xref: /linux/drivers/scsi/mpi3mr/mpi3mr_fw.c (revision 9e4e86a604dfd06402933467578c4b79f5412b2c)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Driver for Broadcom MPI3 Storage Controllers
4  *
5  * Copyright (C) 2017-2023 Broadcom Inc.
6  *  (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
7  *
8  */
9 
10 #include "mpi3mr.h"
11 #include <linux/io-64-nonatomic-lo-hi.h>
12 
13 static int
14 mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, u16 reset_reason);
15 static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc);
16 static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
17 	struct mpi3_ioc_facts_data *facts_data);
18 static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc,
19 	struct mpi3mr_drv_cmd *drv_cmd);
20 static int mpi3mr_check_op_admin_proc(struct mpi3mr_ioc *mrioc);
21 static int poll_queues;
22 module_param(poll_queues, int, 0444);
23 MODULE_PARM_DESC(poll_queues, "Number of queues for io_uring poll mode. (Range 1 - 126)");
24 static bool threaded_isr_poll = true;
25 module_param(threaded_isr_poll, bool, 0444);
26 MODULE_PARM_DESC(threaded_isr_poll,
27 			"Enablement of IRQ polling thread (default=true)");
28 
29 #if defined(writeq) && defined(CONFIG_64BIT)
mpi3mr_writeq(__u64 b,void __iomem * addr,spinlock_t * write_queue_lock)30 static inline void mpi3mr_writeq(__u64 b, void __iomem *addr,
31 	spinlock_t *write_queue_lock)
32 {
33 	writeq(b, addr);
34 }
35 #else
mpi3mr_writeq(__u64 b,void __iomem * addr,spinlock_t * write_queue_lock)36 static inline void mpi3mr_writeq(__u64 b, void __iomem *addr,
37 	spinlock_t *write_queue_lock)
38 {
39 	__u64 data_out = b;
40 	unsigned long flags;
41 
42 	spin_lock_irqsave(write_queue_lock, flags);
43 	writel((u32)(data_out), addr);
44 	writel((u32)(data_out >> 32), (addr + 4));
45 	spin_unlock_irqrestore(write_queue_lock, flags);
46 }
47 #endif
48 
49 static inline bool
mpi3mr_check_req_qfull(struct op_req_qinfo * op_req_q)50 mpi3mr_check_req_qfull(struct op_req_qinfo *op_req_q)
51 {
52 	u16 pi, ci, max_entries;
53 	bool is_qfull = false;
54 
55 	pi = op_req_q->pi;
56 	ci = READ_ONCE(op_req_q->ci);
57 	max_entries = op_req_q->num_requests;
58 
59 	if ((ci == (pi + 1)) || ((!ci) && (pi == (max_entries - 1))))
60 		is_qfull = true;
61 
62 	return is_qfull;
63 }
64 
mpi3mr_sync_irqs(struct mpi3mr_ioc * mrioc)65 static void mpi3mr_sync_irqs(struct mpi3mr_ioc *mrioc)
66 {
67 	u16 i, max_vectors;
68 
69 	max_vectors = mrioc->intr_info_count;
70 
71 	for (i = 0; i < max_vectors; i++)
72 		synchronize_irq(pci_irq_vector(mrioc->pdev, i));
73 }
74 
mpi3mr_ioc_disable_intr(struct mpi3mr_ioc * mrioc)75 void mpi3mr_ioc_disable_intr(struct mpi3mr_ioc *mrioc)
76 {
77 	mrioc->intr_enabled = 0;
78 	mpi3mr_sync_irqs(mrioc);
79 }
80 
mpi3mr_ioc_enable_intr(struct mpi3mr_ioc * mrioc)81 void mpi3mr_ioc_enable_intr(struct mpi3mr_ioc *mrioc)
82 {
83 	mrioc->intr_enabled = 1;
84 }
85 
mpi3mr_cleanup_isr(struct mpi3mr_ioc * mrioc)86 static void mpi3mr_cleanup_isr(struct mpi3mr_ioc *mrioc)
87 {
88 	u16 i;
89 
90 	mpi3mr_ioc_disable_intr(mrioc);
91 
92 	if (!mrioc->intr_info)
93 		return;
94 
95 	for (i = 0; i < mrioc->intr_info_count; i++)
96 		free_irq(pci_irq_vector(mrioc->pdev, i),
97 		    (mrioc->intr_info + i));
98 
99 	kfree(mrioc->intr_info);
100 	mrioc->intr_info = NULL;
101 	mrioc->intr_info_count = 0;
102 	mrioc->is_intr_info_set = false;
103 	pci_free_irq_vectors(mrioc->pdev);
104 }
105 
mpi3mr_add_sg_single(void * paddr,u8 flags,u32 length,dma_addr_t dma_addr)106 void mpi3mr_add_sg_single(void *paddr, u8 flags, u32 length,
107 	dma_addr_t dma_addr)
108 {
109 	struct mpi3_sge_common *sgel = paddr;
110 
111 	sgel->flags = flags;
112 	sgel->length = cpu_to_le32(length);
113 	sgel->address = cpu_to_le64(dma_addr);
114 }
115 
mpi3mr_build_zero_len_sge(void * paddr)116 void mpi3mr_build_zero_len_sge(void *paddr)
117 {
118 	u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
119 
120 	mpi3mr_add_sg_single(paddr, sgl_flags, 0, -1);
121 }
122 
mpi3mr_get_reply_virt_addr(struct mpi3mr_ioc * mrioc,dma_addr_t phys_addr)123 void *mpi3mr_get_reply_virt_addr(struct mpi3mr_ioc *mrioc,
124 	dma_addr_t phys_addr)
125 {
126 	if (!phys_addr)
127 		return NULL;
128 
129 	if ((phys_addr < mrioc->reply_buf_dma) ||
130 	    (phys_addr > mrioc->reply_buf_dma_max_address))
131 		return NULL;
132 
133 	return mrioc->reply_buf + (phys_addr - mrioc->reply_buf_dma);
134 }
135 
mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_ioc * mrioc,dma_addr_t phys_addr)136 void *mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_ioc *mrioc,
137 	dma_addr_t phys_addr)
138 {
139 	if (!phys_addr)
140 		return NULL;
141 
142 	return mrioc->sense_buf + (phys_addr - mrioc->sense_buf_dma);
143 }
144 
mpi3mr_repost_reply_buf(struct mpi3mr_ioc * mrioc,u64 reply_dma)145 static void mpi3mr_repost_reply_buf(struct mpi3mr_ioc *mrioc,
146 	u64 reply_dma)
147 {
148 	u32 old_idx = 0;
149 	unsigned long flags;
150 
151 	spin_lock_irqsave(&mrioc->reply_free_queue_lock, flags);
152 	old_idx  =  mrioc->reply_free_queue_host_index;
153 	mrioc->reply_free_queue_host_index = (
154 	    (mrioc->reply_free_queue_host_index ==
155 	    (mrioc->reply_free_qsz - 1)) ? 0 :
156 	    (mrioc->reply_free_queue_host_index + 1));
157 	mrioc->reply_free_q[old_idx] = cpu_to_le64(reply_dma);
158 	writel(mrioc->reply_free_queue_host_index,
159 	    &mrioc->sysif_regs->reply_free_host_index);
160 	spin_unlock_irqrestore(&mrioc->reply_free_queue_lock, flags);
161 }
162 
mpi3mr_repost_sense_buf(struct mpi3mr_ioc * mrioc,u64 sense_buf_dma)163 void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc,
164 	u64 sense_buf_dma)
165 {
166 	u32 old_idx = 0;
167 	unsigned long flags;
168 
169 	spin_lock_irqsave(&mrioc->sbq_lock, flags);
170 	old_idx  =  mrioc->sbq_host_index;
171 	mrioc->sbq_host_index = ((mrioc->sbq_host_index ==
172 	    (mrioc->sense_buf_q_sz - 1)) ? 0 :
173 	    (mrioc->sbq_host_index + 1));
174 	mrioc->sense_buf_q[old_idx] = cpu_to_le64(sense_buf_dma);
175 	writel(mrioc->sbq_host_index,
176 	    &mrioc->sysif_regs->sense_buffer_free_host_index);
177 	spin_unlock_irqrestore(&mrioc->sbq_lock, flags);
178 }
179 
mpi3mr_print_event_data(struct mpi3mr_ioc * mrioc,struct mpi3_event_notification_reply * event_reply)180 static void mpi3mr_print_event_data(struct mpi3mr_ioc *mrioc,
181 	struct mpi3_event_notification_reply *event_reply)
182 {
183 	char *desc = NULL;
184 	u16 event;
185 
186 	if (!(mrioc->logging_level & MPI3_DEBUG_EVENT))
187 		return;
188 
189 	event = event_reply->event;
190 
191 	switch (event) {
192 	case MPI3_EVENT_LOG_DATA:
193 		desc = "Log Data";
194 		break;
195 	case MPI3_EVENT_CHANGE:
196 		desc = "Event Change";
197 		break;
198 	case MPI3_EVENT_GPIO_INTERRUPT:
199 		desc = "GPIO Interrupt";
200 		break;
201 	case MPI3_EVENT_CABLE_MGMT:
202 		desc = "Cable Management";
203 		break;
204 	case MPI3_EVENT_ENERGY_PACK_CHANGE:
205 		desc = "Energy Pack Change";
206 		break;
207 	case MPI3_EVENT_DEVICE_ADDED:
208 	{
209 		struct mpi3_device_page0 *event_data =
210 		    (struct mpi3_device_page0 *)event_reply->event_data;
211 		ioc_info(mrioc, "Device Added: dev=0x%04x Form=0x%x\n",
212 		    event_data->dev_handle, event_data->device_form);
213 		return;
214 	}
215 	case MPI3_EVENT_DEVICE_INFO_CHANGED:
216 	{
217 		struct mpi3_device_page0 *event_data =
218 		    (struct mpi3_device_page0 *)event_reply->event_data;
219 		ioc_info(mrioc, "Device Info Changed: dev=0x%04x Form=0x%x\n",
220 		    event_data->dev_handle, event_data->device_form);
221 		return;
222 	}
223 	case MPI3_EVENT_DEVICE_STATUS_CHANGE:
224 	{
225 		struct mpi3_event_data_device_status_change *event_data =
226 		    (struct mpi3_event_data_device_status_change *)event_reply->event_data;
227 		ioc_info(mrioc, "Device status Change: dev=0x%04x RC=0x%x\n",
228 		    event_data->dev_handle, event_data->reason_code);
229 		return;
230 	}
231 	case MPI3_EVENT_SAS_DISCOVERY:
232 	{
233 		struct mpi3_event_data_sas_discovery *event_data =
234 		    (struct mpi3_event_data_sas_discovery *)event_reply->event_data;
235 		ioc_info(mrioc, "SAS Discovery: (%s) status (0x%08x)\n",
236 		    (event_data->reason_code == MPI3_EVENT_SAS_DISC_RC_STARTED) ?
237 		    "start" : "stop",
238 		    le32_to_cpu(event_data->discovery_status));
239 		return;
240 	}
241 	case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
242 		desc = "SAS Broadcast Primitive";
243 		break;
244 	case MPI3_EVENT_SAS_NOTIFY_PRIMITIVE:
245 		desc = "SAS Notify Primitive";
246 		break;
247 	case MPI3_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
248 		desc = "SAS Init Device Status Change";
249 		break;
250 	case MPI3_EVENT_SAS_INIT_TABLE_OVERFLOW:
251 		desc = "SAS Init Table Overflow";
252 		break;
253 	case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
254 		desc = "SAS Topology Change List";
255 		break;
256 	case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
257 		desc = "Enclosure Device Status Change";
258 		break;
259 	case MPI3_EVENT_ENCL_DEVICE_ADDED:
260 		desc = "Enclosure Added";
261 		break;
262 	case MPI3_EVENT_HARD_RESET_RECEIVED:
263 		desc = "Hard Reset Received";
264 		break;
265 	case MPI3_EVENT_SAS_PHY_COUNTER:
266 		desc = "SAS PHY Counter";
267 		break;
268 	case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
269 		desc = "SAS Device Discovery Error";
270 		break;
271 	case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
272 		desc = "PCIE Topology Change List";
273 		break;
274 	case MPI3_EVENT_PCIE_ENUMERATION:
275 	{
276 		struct mpi3_event_data_pcie_enumeration *event_data =
277 		    (struct mpi3_event_data_pcie_enumeration *)event_reply->event_data;
278 		ioc_info(mrioc, "PCIE Enumeration: (%s)",
279 		    (event_data->reason_code ==
280 		    MPI3_EVENT_PCIE_ENUM_RC_STARTED) ? "start" : "stop");
281 		if (event_data->enumeration_status)
282 			ioc_info(mrioc, "enumeration_status(0x%08x)\n",
283 			    le32_to_cpu(event_data->enumeration_status));
284 		return;
285 	}
286 	case MPI3_EVENT_PREPARE_FOR_RESET:
287 		desc = "Prepare For Reset";
288 		break;
289 	case MPI3_EVENT_DIAGNOSTIC_BUFFER_STATUS_CHANGE:
290 		desc = "Diagnostic Buffer Status Change";
291 		break;
292 	}
293 
294 	if (!desc)
295 		return;
296 
297 	ioc_info(mrioc, "%s\n", desc);
298 }
299 
mpi3mr_handle_events(struct mpi3mr_ioc * mrioc,struct mpi3_default_reply * def_reply)300 static void mpi3mr_handle_events(struct mpi3mr_ioc *mrioc,
301 	struct mpi3_default_reply *def_reply)
302 {
303 	struct mpi3_event_notification_reply *event_reply =
304 	    (struct mpi3_event_notification_reply *)def_reply;
305 
306 	mrioc->change_count = le16_to_cpu(event_reply->ioc_change_count);
307 	mpi3mr_print_event_data(mrioc, event_reply);
308 	mpi3mr_os_handle_events(mrioc, event_reply);
309 }
310 
311 static struct mpi3mr_drv_cmd *
mpi3mr_get_drv_cmd(struct mpi3mr_ioc * mrioc,u16 host_tag,struct mpi3_default_reply * def_reply)312 mpi3mr_get_drv_cmd(struct mpi3mr_ioc *mrioc, u16 host_tag,
313 	struct mpi3_default_reply *def_reply)
314 {
315 	u16 idx;
316 
317 	switch (host_tag) {
318 	case MPI3MR_HOSTTAG_INITCMDS:
319 		return &mrioc->init_cmds;
320 	case MPI3MR_HOSTTAG_CFG_CMDS:
321 		return &mrioc->cfg_cmds;
322 	case MPI3MR_HOSTTAG_BSG_CMDS:
323 		return &mrioc->bsg_cmds;
324 	case MPI3MR_HOSTTAG_BLK_TMS:
325 		return &mrioc->host_tm_cmds;
326 	case MPI3MR_HOSTTAG_PEL_ABORT:
327 		return &mrioc->pel_abort_cmd;
328 	case MPI3MR_HOSTTAG_PEL_WAIT:
329 		return &mrioc->pel_cmds;
330 	case MPI3MR_HOSTTAG_TRANSPORT_CMDS:
331 		return &mrioc->transport_cmds;
332 	case MPI3MR_HOSTTAG_INVALID:
333 		if (def_reply && def_reply->function ==
334 		    MPI3_FUNCTION_EVENT_NOTIFICATION)
335 			mpi3mr_handle_events(mrioc, def_reply);
336 		return NULL;
337 	default:
338 		break;
339 	}
340 	if (host_tag >= MPI3MR_HOSTTAG_DEVRMCMD_MIN &&
341 	    host_tag <= MPI3MR_HOSTTAG_DEVRMCMD_MAX) {
342 		idx = host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
343 		return &mrioc->dev_rmhs_cmds[idx];
344 	}
345 
346 	if (host_tag >= MPI3MR_HOSTTAG_EVTACKCMD_MIN &&
347 	    host_tag <= MPI3MR_HOSTTAG_EVTACKCMD_MAX) {
348 		idx = host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
349 		return &mrioc->evtack_cmds[idx];
350 	}
351 
352 	return NULL;
353 }
354 
mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc * mrioc,struct mpi3_default_reply_descriptor * reply_desc,u64 * reply_dma)355 static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc,
356 	struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma)
357 {
358 	u16 reply_desc_type, host_tag = 0;
359 	u16 ioc_status = MPI3_IOCSTATUS_SUCCESS;
360 	u16 masked_ioc_status = MPI3_IOCSTATUS_SUCCESS;
361 	u32 ioc_loginfo = 0, sense_count = 0;
362 	struct mpi3_status_reply_descriptor *status_desc;
363 	struct mpi3_address_reply_descriptor *addr_desc;
364 	struct mpi3_success_reply_descriptor *success_desc;
365 	struct mpi3_default_reply *def_reply = NULL;
366 	struct mpi3mr_drv_cmd *cmdptr = NULL;
367 	struct mpi3_scsi_io_reply *scsi_reply;
368 	struct scsi_sense_hdr sshdr;
369 	u8 *sense_buf = NULL;
370 
371 	*reply_dma = 0;
372 	reply_desc_type = le16_to_cpu(reply_desc->reply_flags) &
373 	    MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK;
374 	switch (reply_desc_type) {
375 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS:
376 		status_desc = (struct mpi3_status_reply_descriptor *)reply_desc;
377 		host_tag = le16_to_cpu(status_desc->host_tag);
378 		ioc_status = le16_to_cpu(status_desc->ioc_status);
379 		if (ioc_status &
380 		    MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
381 			ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info);
382 		masked_ioc_status = ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
383 		mpi3mr_reply_trigger(mrioc, masked_ioc_status, ioc_loginfo);
384 		break;
385 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
386 		addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc;
387 		*reply_dma = le64_to_cpu(addr_desc->reply_frame_address);
388 		def_reply = mpi3mr_get_reply_virt_addr(mrioc, *reply_dma);
389 		if (!def_reply)
390 			goto out;
391 		host_tag = le16_to_cpu(def_reply->host_tag);
392 		ioc_status = le16_to_cpu(def_reply->ioc_status);
393 		if (ioc_status &
394 		    MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
395 			ioc_loginfo = le32_to_cpu(def_reply->ioc_log_info);
396 		masked_ioc_status = ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
397 		if (def_reply->function == MPI3_FUNCTION_SCSI_IO) {
398 			scsi_reply = (struct mpi3_scsi_io_reply *)def_reply;
399 			sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc,
400 			    le64_to_cpu(scsi_reply->sense_data_buffer_address));
401 			sense_count = le32_to_cpu(scsi_reply->sense_count);
402 			if (sense_buf) {
403 				scsi_normalize_sense(sense_buf, sense_count,
404 				    &sshdr);
405 				mpi3mr_scsisense_trigger(mrioc, sshdr.sense_key,
406 				    sshdr.asc, sshdr.ascq);
407 			}
408 		}
409 		mpi3mr_reply_trigger(mrioc, masked_ioc_status, ioc_loginfo);
410 		break;
411 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
412 		success_desc = (struct mpi3_success_reply_descriptor *)reply_desc;
413 		host_tag = le16_to_cpu(success_desc->host_tag);
414 		break;
415 	default:
416 		break;
417 	}
418 
419 	cmdptr = mpi3mr_get_drv_cmd(mrioc, host_tag, def_reply);
420 	if (cmdptr) {
421 		if (cmdptr->state & MPI3MR_CMD_PENDING) {
422 			cmdptr->state |= MPI3MR_CMD_COMPLETE;
423 			cmdptr->ioc_loginfo = ioc_loginfo;
424 			if (host_tag == MPI3MR_HOSTTAG_BSG_CMDS)
425 				cmdptr->ioc_status = ioc_status;
426 			else
427 				cmdptr->ioc_status = masked_ioc_status;
428 			cmdptr->state &= ~MPI3MR_CMD_PENDING;
429 			if (def_reply) {
430 				cmdptr->state |= MPI3MR_CMD_REPLY_VALID;
431 				memcpy((u8 *)cmdptr->reply, (u8 *)def_reply,
432 				    mrioc->reply_sz);
433 			}
434 			if (sense_buf && cmdptr->sensebuf) {
435 				cmdptr->is_sense = 1;
436 				memcpy(cmdptr->sensebuf, sense_buf,
437 				       MPI3MR_SENSE_BUF_SZ);
438 			}
439 			if (cmdptr->is_waiting) {
440 				cmdptr->is_waiting = 0;
441 				complete(&cmdptr->done);
442 			} else if (cmdptr->callback)
443 				cmdptr->callback(mrioc, cmdptr);
444 		}
445 	}
446 out:
447 	if (sense_buf)
448 		mpi3mr_repost_sense_buf(mrioc,
449 		    le64_to_cpu(scsi_reply->sense_data_buffer_address));
450 }
451 
mpi3mr_process_admin_reply_q(struct mpi3mr_ioc * mrioc)452 int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
453 {
454 	u32 exp_phase = mrioc->admin_reply_ephase;
455 	u32 admin_reply_ci = mrioc->admin_reply_ci;
456 	u32 num_admin_replies = 0;
457 	u64 reply_dma = 0;
458 	u16 threshold_comps = 0;
459 	struct mpi3_default_reply_descriptor *reply_desc;
460 
461 	if (!atomic_add_unless(&mrioc->admin_reply_q_in_use, 1, 1)) {
462 		atomic_inc(&mrioc->admin_pend_isr);
463 		return 0;
464 	}
465 
466 	atomic_set(&mrioc->admin_pend_isr, 0);
467 	reply_desc = (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
468 	    admin_reply_ci;
469 
470 	if ((le16_to_cpu(reply_desc->reply_flags) &
471 	    MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) {
472 		atomic_dec(&mrioc->admin_reply_q_in_use);
473 		return 0;
474 	}
475 
476 	do {
477 		if (mrioc->unrecoverable || mrioc->io_admin_reset_sync)
478 			break;
479 
480 		mrioc->admin_req_ci = le16_to_cpu(reply_desc->request_queue_ci);
481 		mpi3mr_process_admin_reply_desc(mrioc, reply_desc, &reply_dma);
482 		if (reply_dma)
483 			mpi3mr_repost_reply_buf(mrioc, reply_dma);
484 		num_admin_replies++;
485 		threshold_comps++;
486 		if (++admin_reply_ci == mrioc->num_admin_replies) {
487 			admin_reply_ci = 0;
488 			exp_phase ^= 1;
489 		}
490 		reply_desc =
491 		    (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
492 		    admin_reply_ci;
493 		if ((le16_to_cpu(reply_desc->reply_flags) &
494 		    MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
495 			break;
496 		if (threshold_comps == MPI3MR_THRESHOLD_REPLY_COUNT) {
497 			writel(admin_reply_ci,
498 			    &mrioc->sysif_regs->admin_reply_queue_ci);
499 			threshold_comps = 0;
500 		}
501 	} while (1);
502 
503 	writel(admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci);
504 	mrioc->admin_reply_ci = admin_reply_ci;
505 	mrioc->admin_reply_ephase = exp_phase;
506 	atomic_dec(&mrioc->admin_reply_q_in_use);
507 
508 	return num_admin_replies;
509 }
510 
511 /**
512  * mpi3mr_get_reply_desc - get reply descriptor frame corresponding to
513  *	queue's consumer index from operational reply descriptor queue.
514  * @op_reply_q: op_reply_qinfo object
515  * @reply_ci: operational reply descriptor's queue consumer index
516  *
517  * Returns: reply descriptor frame address
518  */
519 static inline struct mpi3_default_reply_descriptor *
mpi3mr_get_reply_desc(struct op_reply_qinfo * op_reply_q,u32 reply_ci)520 mpi3mr_get_reply_desc(struct op_reply_qinfo *op_reply_q, u32 reply_ci)
521 {
522 	void *segment_base_addr;
523 	struct segments *segments = op_reply_q->q_segments;
524 	struct mpi3_default_reply_descriptor *reply_desc = NULL;
525 
526 	segment_base_addr =
527 	    segments[reply_ci / op_reply_q->segment_qd].segment;
528 	reply_desc = (struct mpi3_default_reply_descriptor *)segment_base_addr +
529 	    (reply_ci % op_reply_q->segment_qd);
530 	return reply_desc;
531 }
532 
533 /**
534  * mpi3mr_process_op_reply_q - Operational reply queue handler
535  * @mrioc: Adapter instance reference
536  * @op_reply_q: Operational reply queue info
537  *
538  * Checks the specific operational reply queue and drains the
539  * reply queue entries until the queue is empty and process the
540  * individual reply descriptors.
541  *
542  * Return: 0 if queue is already processed,or number of reply
543  *	    descriptors processed.
544  */
mpi3mr_process_op_reply_q(struct mpi3mr_ioc * mrioc,struct op_reply_qinfo * op_reply_q)545 int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
546 	struct op_reply_qinfo *op_reply_q)
547 {
548 	struct op_req_qinfo *op_req_q;
549 	u32 exp_phase;
550 	u32 reply_ci;
551 	u32 num_op_reply = 0;
552 	u64 reply_dma = 0;
553 	struct mpi3_default_reply_descriptor *reply_desc;
554 	u16 req_q_idx = 0, reply_qidx, threshold_comps = 0;
555 
556 	reply_qidx = op_reply_q->qid - 1;
557 
558 	if (!atomic_add_unless(&op_reply_q->in_use, 1, 1))
559 		return 0;
560 
561 	exp_phase = op_reply_q->ephase;
562 	reply_ci = op_reply_q->ci;
563 
564 	reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci);
565 	if ((le16_to_cpu(reply_desc->reply_flags) &
566 	    MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) {
567 		atomic_dec(&op_reply_q->in_use);
568 		return 0;
569 	}
570 
571 	do {
572 		if (mrioc->unrecoverable || mrioc->io_admin_reset_sync)
573 			break;
574 
575 		req_q_idx = le16_to_cpu(reply_desc->request_queue_id) - 1;
576 		op_req_q = &mrioc->req_qinfo[req_q_idx];
577 
578 		WRITE_ONCE(op_req_q->ci, le16_to_cpu(reply_desc->request_queue_ci));
579 		mpi3mr_process_op_reply_desc(mrioc, reply_desc, &reply_dma,
580 		    reply_qidx);
581 
582 		if (reply_dma)
583 			mpi3mr_repost_reply_buf(mrioc, reply_dma);
584 		num_op_reply++;
585 		threshold_comps++;
586 
587 		if (++reply_ci == op_reply_q->num_replies) {
588 			reply_ci = 0;
589 			exp_phase ^= 1;
590 		}
591 
592 		reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci);
593 
594 		if ((le16_to_cpu(reply_desc->reply_flags) &
595 		    MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
596 			break;
597 #ifndef CONFIG_PREEMPT_RT
598 		/*
599 		 * Exit completion loop to avoid CPU lockup
600 		 * Ensure remaining completion happens from threaded ISR.
601 		 */
602 		if ((num_op_reply > mrioc->max_host_ios) &&
603 			(threaded_isr_poll == true)) {
604 			op_reply_q->enable_irq_poll = true;
605 			break;
606 		}
607 #endif
608 		if (threshold_comps == MPI3MR_THRESHOLD_REPLY_COUNT) {
609 			writel(reply_ci,
610 			    &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index);
611 			atomic_sub(threshold_comps, &op_reply_q->pend_ios);
612 			threshold_comps = 0;
613 		}
614 	} while (1);
615 
616 	writel(reply_ci,
617 	    &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index);
618 	op_reply_q->ci = reply_ci;
619 	op_reply_q->ephase = exp_phase;
620 	atomic_sub(threshold_comps, &op_reply_q->pend_ios);
621 	atomic_dec(&op_reply_q->in_use);
622 	return num_op_reply;
623 }
624 
625 /**
626  * mpi3mr_blk_mq_poll - Operational reply queue handler
627  * @shost: SCSI Host reference
628  * @queue_num: Request queue number (w.r.t OS it is hardware context number)
629  *
630  * Checks the specific operational reply queue and drains the
631  * reply queue entries until the queue is empty and process the
632  * individual reply descriptors.
633  *
634  * Return: 0 if queue is already processed,or number of reply
635  *	    descriptors processed.
636  */
mpi3mr_blk_mq_poll(struct Scsi_Host * shost,unsigned int queue_num)637 int mpi3mr_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
638 {
639 	int num_entries = 0;
640 	struct mpi3mr_ioc *mrioc;
641 
642 	mrioc = (struct mpi3mr_ioc *)shost->hostdata;
643 
644 	if ((mrioc->reset_in_progress || mrioc->prepare_for_reset ||
645 	    mrioc->unrecoverable || mrioc->pci_err_recovery))
646 		return 0;
647 
648 	num_entries = mpi3mr_process_op_reply_q(mrioc,
649 			&mrioc->op_reply_qinfo[queue_num]);
650 
651 	return num_entries;
652 }
653 
mpi3mr_isr_primary(int irq,void * privdata)654 static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata)
655 {
656 	struct mpi3mr_intr_info *intr_info = privdata;
657 	struct mpi3mr_ioc *mrioc;
658 	u16 midx;
659 	u32 num_admin_replies = 0, num_op_reply = 0;
660 
661 	if (!intr_info)
662 		return IRQ_NONE;
663 
664 	mrioc = intr_info->mrioc;
665 
666 	if (!mrioc->intr_enabled)
667 		return IRQ_NONE;
668 
669 	midx = intr_info->msix_index;
670 
671 	if (!midx)
672 		num_admin_replies = mpi3mr_process_admin_reply_q(mrioc);
673 	if (intr_info->op_reply_q)
674 		num_op_reply = mpi3mr_process_op_reply_q(mrioc,
675 		    intr_info->op_reply_q);
676 
677 	if (num_admin_replies || num_op_reply)
678 		return IRQ_HANDLED;
679 	else
680 		return IRQ_NONE;
681 }
682 
683 #ifndef CONFIG_PREEMPT_RT
684 
mpi3mr_isr(int irq,void * privdata)685 static irqreturn_t mpi3mr_isr(int irq, void *privdata)
686 {
687 	struct mpi3mr_intr_info *intr_info = privdata;
688 	int ret;
689 
690 	if (!intr_info)
691 		return IRQ_NONE;
692 
693 	/* Call primary ISR routine */
694 	ret = mpi3mr_isr_primary(irq, privdata);
695 
696 	/*
697 	 * If more IOs are expected, schedule IRQ polling thread.
698 	 * Otherwise exit from ISR.
699 	 */
700 	if ((threaded_isr_poll == false) || !intr_info->op_reply_q)
701 		return ret;
702 
703 	if (!intr_info->op_reply_q->enable_irq_poll ||
704 	    !atomic_read(&intr_info->op_reply_q->pend_ios))
705 		return ret;
706 
707 	disable_irq_nosync(intr_info->os_irq);
708 
709 	return IRQ_WAKE_THREAD;
710 }
711 
712 /**
713  * mpi3mr_isr_poll - Reply queue polling routine
714  * @irq: IRQ
715  * @privdata: Interrupt info
716  *
717  * poll for pending I/O completions in a loop until pending I/Os
718  * present or controller queue depth I/Os are processed.
719  *
720  * Return: IRQ_NONE or IRQ_HANDLED
721  */
mpi3mr_isr_poll(int irq,void * privdata)722 static irqreturn_t mpi3mr_isr_poll(int irq, void *privdata)
723 {
724 	struct mpi3mr_intr_info *intr_info = privdata;
725 	struct mpi3mr_ioc *mrioc;
726 	u16 midx;
727 	u32 num_op_reply = 0;
728 
729 	if (!intr_info || !intr_info->op_reply_q)
730 		return IRQ_NONE;
731 
732 	mrioc = intr_info->mrioc;
733 	midx = intr_info->msix_index;
734 
735 	/* Poll for pending IOs completions */
736 	do {
737 		if (!mrioc->intr_enabled || mrioc->unrecoverable)
738 			break;
739 
740 		if (!midx)
741 			mpi3mr_process_admin_reply_q(mrioc);
742 		if (intr_info->op_reply_q)
743 			num_op_reply +=
744 			    mpi3mr_process_op_reply_q(mrioc,
745 				intr_info->op_reply_q);
746 
747 		usleep_range(MPI3MR_IRQ_POLL_SLEEP, MPI3MR_IRQ_POLL_SLEEP + 1);
748 
749 	} while (atomic_read(&intr_info->op_reply_q->pend_ios) &&
750 	    (num_op_reply < mrioc->max_host_ios));
751 
752 	intr_info->op_reply_q->enable_irq_poll = false;
753 	enable_irq(intr_info->os_irq);
754 
755 	return IRQ_HANDLED;
756 }
757 
758 #endif
759 
760 /**
761  * mpi3mr_request_irq - Request IRQ and register ISR
762  * @mrioc: Adapter instance reference
763  * @index: IRQ vector index
764  *
765  * Request threaded ISR with primary ISR and secondary
766  *
767  * Return: 0 on success and non zero on failures.
768  */
mpi3mr_request_irq(struct mpi3mr_ioc * mrioc,u16 index)769 static inline int mpi3mr_request_irq(struct mpi3mr_ioc *mrioc, u16 index)
770 {
771 	struct pci_dev *pdev = mrioc->pdev;
772 	struct mpi3mr_intr_info *intr_info = mrioc->intr_info + index;
773 	int retval = 0;
774 
775 	intr_info->mrioc = mrioc;
776 	intr_info->msix_index = index;
777 	intr_info->op_reply_q = NULL;
778 
779 	scnprintf(intr_info->name, MPI3MR_NAME_LENGTH,
780 	    "%.32s%d-msix%u", mrioc->driver_name, mrioc->id, index);
781 
782 #ifndef CONFIG_PREEMPT_RT
783 	retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr,
784 	    mpi3mr_isr_poll, IRQF_SHARED, intr_info->name, intr_info);
785 #else
786 	retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr_primary,
787 	    NULL, IRQF_SHARED, intr_info->name, intr_info);
788 #endif
789 	if (retval) {
790 		ioc_err(mrioc, "%s: Unable to allocate interrupt %d!\n",
791 		    intr_info->name, pci_irq_vector(pdev, index));
792 		return retval;
793 	}
794 
795 	intr_info->os_irq = pci_irq_vector(pdev, index);
796 	return retval;
797 }
798 
mpi3mr_calc_poll_queues(struct mpi3mr_ioc * mrioc,u16 max_vectors)799 static void mpi3mr_calc_poll_queues(struct mpi3mr_ioc *mrioc, u16 max_vectors)
800 {
801 	if (!mrioc->requested_poll_qcount)
802 		return;
803 
804 	/* Reserved for Admin and Default Queue */
805 	if (max_vectors > 2 &&
806 		(mrioc->requested_poll_qcount < max_vectors - 2)) {
807 		ioc_info(mrioc,
808 		    "enabled polled queues (%d) msix (%d)\n",
809 		    mrioc->requested_poll_qcount, max_vectors);
810 	} else {
811 		ioc_info(mrioc,
812 		    "disabled polled queues (%d) msix (%d) because of no resources for default queue\n",
813 		    mrioc->requested_poll_qcount, max_vectors);
814 		mrioc->requested_poll_qcount = 0;
815 	}
816 }
817 
818 /**
819  * mpi3mr_setup_isr - Setup ISR for the controller
820  * @mrioc: Adapter instance reference
821  * @setup_one: Request one IRQ or more
822  *
823  * Allocate IRQ vectors and call mpi3mr_request_irq to setup ISR
824  *
825  * Return: 0 on success and non zero on failures.
826  */
mpi3mr_setup_isr(struct mpi3mr_ioc * mrioc,u8 setup_one)827 static int mpi3mr_setup_isr(struct mpi3mr_ioc *mrioc, u8 setup_one)
828 {
829 	unsigned int irq_flags = PCI_IRQ_MSIX;
830 	int max_vectors, min_vec;
831 	int retval;
832 	int i;
833 	struct irq_affinity desc = { .pre_vectors =  1, .post_vectors = 1 };
834 
835 	if (mrioc->is_intr_info_set)
836 		return 0;
837 
838 	mpi3mr_cleanup_isr(mrioc);
839 
840 	if (setup_one || reset_devices) {
841 		max_vectors = 1;
842 		retval = pci_alloc_irq_vectors(mrioc->pdev,
843 		    1, max_vectors, irq_flags);
844 		if (retval < 0) {
845 			ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n",
846 			    retval);
847 			goto out_failed;
848 		}
849 	} else {
850 		max_vectors =
851 		    min_t(int, mrioc->cpu_count + 1 +
852 			mrioc->requested_poll_qcount, mrioc->msix_count);
853 
854 		mpi3mr_calc_poll_queues(mrioc, max_vectors);
855 
856 		ioc_info(mrioc,
857 		    "MSI-X vectors supported: %d, no of cores: %d,",
858 		    mrioc->msix_count, mrioc->cpu_count);
859 		ioc_info(mrioc,
860 		    "MSI-x vectors requested: %d poll_queues %d\n",
861 		    max_vectors, mrioc->requested_poll_qcount);
862 
863 		desc.post_vectors = mrioc->requested_poll_qcount;
864 		min_vec = desc.pre_vectors + desc.post_vectors;
865 		irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES;
866 
867 		retval = pci_alloc_irq_vectors_affinity(mrioc->pdev,
868 			min_vec, max_vectors, irq_flags, &desc);
869 
870 		if (retval < 0) {
871 			ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n",
872 			    retval);
873 			goto out_failed;
874 		}
875 
876 
877 		/*
878 		 * If only one MSI-x is allocated, then MSI-x 0 will be shared
879 		 * between Admin queue and operational queue
880 		 */
881 		if (retval == min_vec)
882 			mrioc->op_reply_q_offset = 0;
883 		else if (retval != (max_vectors)) {
884 			ioc_info(mrioc,
885 			    "allocated vectors (%d) are less than configured (%d)\n",
886 			    retval, max_vectors);
887 		}
888 
889 		max_vectors = retval;
890 		mrioc->op_reply_q_offset = (max_vectors > 1) ? 1 : 0;
891 
892 		mpi3mr_calc_poll_queues(mrioc, max_vectors);
893 
894 	}
895 
896 	mrioc->intr_info = kzalloc(sizeof(struct mpi3mr_intr_info) * max_vectors,
897 	    GFP_KERNEL);
898 	if (!mrioc->intr_info) {
899 		retval = -ENOMEM;
900 		pci_free_irq_vectors(mrioc->pdev);
901 		goto out_failed;
902 	}
903 	for (i = 0; i < max_vectors; i++) {
904 		retval = mpi3mr_request_irq(mrioc, i);
905 		if (retval) {
906 			mrioc->intr_info_count = i;
907 			goto out_failed;
908 		}
909 	}
910 	if (reset_devices || !setup_one)
911 		mrioc->is_intr_info_set = true;
912 	mrioc->intr_info_count = max_vectors;
913 	mpi3mr_ioc_enable_intr(mrioc);
914 	return 0;
915 
916 out_failed:
917 	mpi3mr_cleanup_isr(mrioc);
918 
919 	return retval;
920 }
921 
922 static const struct {
923 	enum mpi3mr_iocstate value;
924 	char *name;
925 } mrioc_states[] = {
926 	{ MRIOC_STATE_READY, "ready" },
927 	{ MRIOC_STATE_FAULT, "fault" },
928 	{ MRIOC_STATE_RESET, "reset" },
929 	{ MRIOC_STATE_BECOMING_READY, "becoming ready" },
930 	{ MRIOC_STATE_RESET_REQUESTED, "reset requested" },
931 	{ MRIOC_STATE_UNRECOVERABLE, "unrecoverable error" },
932 };
933 
mpi3mr_iocstate_name(enum mpi3mr_iocstate mrioc_state)934 static const char *mpi3mr_iocstate_name(enum mpi3mr_iocstate mrioc_state)
935 {
936 	int i;
937 	char *name = NULL;
938 
939 	for (i = 0; i < ARRAY_SIZE(mrioc_states); i++) {
940 		if (mrioc_states[i].value == mrioc_state) {
941 			name = mrioc_states[i].name;
942 			break;
943 		}
944 	}
945 	return name;
946 }
947 
948 /* Reset reason to name mapper structure*/
949 static const struct {
950 	enum mpi3mr_reset_reason value;
951 	char *name;
952 } mpi3mr_reset_reason_codes[] = {
953 	{ MPI3MR_RESET_FROM_BRINGUP, "timeout in bringup" },
954 	{ MPI3MR_RESET_FROM_FAULT_WATCH, "fault" },
955 	{ MPI3MR_RESET_FROM_APP, "application invocation" },
956 	{ MPI3MR_RESET_FROM_EH_HOS, "error handling" },
957 	{ MPI3MR_RESET_FROM_TM_TIMEOUT, "TM timeout" },
958 	{ MPI3MR_RESET_FROM_APP_TIMEOUT, "application command timeout" },
959 	{ MPI3MR_RESET_FROM_MUR_FAILURE, "MUR failure" },
960 	{ MPI3MR_RESET_FROM_CTLR_CLEANUP, "timeout in controller cleanup" },
961 	{ MPI3MR_RESET_FROM_CIACTIV_FAULT, "component image activation fault" },
962 	{ MPI3MR_RESET_FROM_PE_TIMEOUT, "port enable timeout" },
963 	{ MPI3MR_RESET_FROM_TSU_TIMEOUT, "time stamp update timeout" },
964 	{ MPI3MR_RESET_FROM_DELREQQ_TIMEOUT, "delete request queue timeout" },
965 	{ MPI3MR_RESET_FROM_DELREPQ_TIMEOUT, "delete reply queue timeout" },
966 	{
967 		MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT,
968 		"create request queue timeout"
969 	},
970 	{
971 		MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT,
972 		"create reply queue timeout"
973 	},
974 	{ MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT, "IOC facts timeout" },
975 	{ MPI3MR_RESET_FROM_IOCINIT_TIMEOUT, "IOC init timeout" },
976 	{ MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT, "event notify timeout" },
977 	{ MPI3MR_RESET_FROM_EVTACK_TIMEOUT, "event acknowledgment timeout" },
978 	{
979 		MPI3MR_RESET_FROM_CIACTVRST_TIMER,
980 		"component image activation timeout"
981 	},
982 	{
983 		MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT,
984 		"get package version timeout"
985 	},
986 	{ MPI3MR_RESET_FROM_SYSFS, "sysfs invocation" },
987 	{ MPI3MR_RESET_FROM_SYSFS_TIMEOUT, "sysfs TM timeout" },
988 	{
989 		MPI3MR_RESET_FROM_DIAG_BUFFER_POST_TIMEOUT,
990 		"diagnostic buffer post timeout"
991 	},
992 	{
993 		MPI3MR_RESET_FROM_DIAG_BUFFER_RELEASE_TIMEOUT,
994 		"diagnostic buffer release timeout"
995 	},
996 	{ MPI3MR_RESET_FROM_FIRMWARE, "firmware asynchronous reset" },
997 	{ MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT, "configuration request timeout"},
998 	{ MPI3MR_RESET_FROM_SAS_TRANSPORT_TIMEOUT, "timeout of a SAS transport layer request" },
999 	{ MPI3MR_RESET_FROM_INVALID_COMPLETION, "invalid cmd completion" },
1000 };
1001 
1002 /**
1003  * mpi3mr_reset_rc_name - get reset reason code name
1004  * @reason_code: reset reason code value
1005  *
1006  * Map reset reason to an NULL terminated ASCII string
1007  *
1008  * Return: name corresponding to reset reason value or NULL.
1009  */
mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code)1010 static const char *mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code)
1011 {
1012 	int i;
1013 	char *name = NULL;
1014 
1015 	for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_reason_codes); i++) {
1016 		if (mpi3mr_reset_reason_codes[i].value == reason_code) {
1017 			name = mpi3mr_reset_reason_codes[i].name;
1018 			break;
1019 		}
1020 	}
1021 	return name;
1022 }
1023 
1024 /* Reset type to name mapper structure*/
1025 static const struct {
1026 	u16 reset_type;
1027 	char *name;
1028 } mpi3mr_reset_types[] = {
1029 	{ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, "soft" },
1030 	{ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, "diag fault" },
1031 };
1032 
1033 /**
1034  * mpi3mr_reset_type_name - get reset type name
1035  * @reset_type: reset type value
1036  *
1037  * Map reset type to an NULL terminated ASCII string
1038  *
1039  * Return: name corresponding to reset type value or NULL.
1040  */
mpi3mr_reset_type_name(u16 reset_type)1041 static const char *mpi3mr_reset_type_name(u16 reset_type)
1042 {
1043 	int i;
1044 	char *name = NULL;
1045 
1046 	for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_types); i++) {
1047 		if (mpi3mr_reset_types[i].reset_type == reset_type) {
1048 			name = mpi3mr_reset_types[i].name;
1049 			break;
1050 		}
1051 	}
1052 	return name;
1053 }
1054 
1055 /**
1056  * mpi3mr_is_fault_recoverable - Read fault code and decide
1057  * whether the controller can be recoverable
1058  * @mrioc: Adapter instance reference
1059  * Return: true if fault is recoverable, false otherwise.
1060  */
mpi3mr_is_fault_recoverable(struct mpi3mr_ioc * mrioc)1061 static inline bool mpi3mr_is_fault_recoverable(struct mpi3mr_ioc *mrioc)
1062 {
1063 	u32 fault;
1064 
1065 	fault = (readl(&mrioc->sysif_regs->fault) &
1066 		      MPI3_SYSIF_FAULT_CODE_MASK);
1067 
1068 	switch (fault) {
1069 	case MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED:
1070 	case MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED:
1071 		ioc_warn(mrioc,
1072 		    "controller requires system power cycle, marking controller as unrecoverable\n");
1073 		return false;
1074 	case MPI3_SYSIF_FAULT_CODE_INSUFFICIENT_PCI_SLOT_POWER:
1075 		ioc_warn(mrioc,
1076 		    "controller faulted due to insufficient power,\n"
1077 		    " try by connecting it to a different slot\n");
1078 		return false;
1079 	default:
1080 		break;
1081 	}
1082 	return true;
1083 }
1084 
1085 /**
1086  * mpi3mr_print_fault_info - Display fault information
1087  * @mrioc: Adapter instance reference
1088  *
1089  * Display the controller fault information if there is a
1090  * controller fault.
1091  *
1092  * Return: Nothing.
1093  */
mpi3mr_print_fault_info(struct mpi3mr_ioc * mrioc)1094 void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc)
1095 {
1096 	u32 ioc_status, code, code1, code2, code3;
1097 
1098 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1099 
1100 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
1101 		code = readl(&mrioc->sysif_regs->fault);
1102 		code1 = readl(&mrioc->sysif_regs->fault_info[0]);
1103 		code2 = readl(&mrioc->sysif_regs->fault_info[1]);
1104 		code3 = readl(&mrioc->sysif_regs->fault_info[2]);
1105 
1106 		ioc_info(mrioc,
1107 		    "fault code(0x%08X): Additional code: (0x%08X:0x%08X:0x%08X)\n",
1108 		    code, code1, code2, code3);
1109 	}
1110 }
1111 
1112 /**
1113  * mpi3mr_save_fault_info - Save fault information
1114  * @mrioc: Adapter instance reference
1115  *
1116  * Save the controller fault information if there is a
1117  * controller fault.
1118  *
1119  * Return: Nothing.
1120  */
mpi3mr_save_fault_info(struct mpi3mr_ioc * mrioc)1121 static void mpi3mr_save_fault_info(struct mpi3mr_ioc *mrioc)
1122 {
1123 	u32 ioc_status, i;
1124 
1125 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1126 
1127 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
1128 		mrioc->saved_fault_code = readl(&mrioc->sysif_regs->fault) &
1129 		    MPI3_SYSIF_FAULT_CODE_MASK;
1130 		for (i = 0; i < 3; i++) {
1131 			mrioc->saved_fault_info[i] =
1132 			readl(&mrioc->sysif_regs->fault_info[i]);
1133 		}
1134 	}
1135 }
1136 
1137 /**
1138  * mpi3mr_get_iocstate - Get IOC State
1139  * @mrioc: Adapter instance reference
1140  *
1141  * Return a proper IOC state enum based on the IOC status and
1142  * IOC configuration and unrcoverable state of the controller.
1143  *
1144  * Return: Current IOC state.
1145  */
mpi3mr_get_iocstate(struct mpi3mr_ioc * mrioc)1146 enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_ioc *mrioc)
1147 {
1148 	u32 ioc_status, ioc_config;
1149 	u8 ready, enabled;
1150 
1151 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1152 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1153 
1154 	if (mrioc->unrecoverable)
1155 		return MRIOC_STATE_UNRECOVERABLE;
1156 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)
1157 		return MRIOC_STATE_FAULT;
1158 
1159 	ready = (ioc_status & MPI3_SYSIF_IOC_STATUS_READY);
1160 	enabled = (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC);
1161 
1162 	if (ready && enabled)
1163 		return MRIOC_STATE_READY;
1164 	if ((!ready) && (!enabled))
1165 		return MRIOC_STATE_RESET;
1166 	if ((!ready) && (enabled))
1167 		return MRIOC_STATE_BECOMING_READY;
1168 
1169 	return MRIOC_STATE_RESET_REQUESTED;
1170 }
1171 
1172 /**
1173  * mpi3mr_free_ioctl_dma_memory - free memory for ioctl dma
1174  * @mrioc: Adapter instance reference
1175  *
1176  * Free the DMA memory allocated for IOCTL handling purpose.
1177  *
1178  * Return: None
1179  */
mpi3mr_free_ioctl_dma_memory(struct mpi3mr_ioc * mrioc)1180 static void mpi3mr_free_ioctl_dma_memory(struct mpi3mr_ioc *mrioc)
1181 {
1182 	struct dma_memory_desc *mem_desc;
1183 	u16 i;
1184 
1185 	if (!mrioc->ioctl_dma_pool)
1186 		return;
1187 
1188 	for (i = 0; i < MPI3MR_NUM_IOCTL_SGE; i++) {
1189 		mem_desc = &mrioc->ioctl_sge[i];
1190 		if (mem_desc->addr) {
1191 			dma_pool_free(mrioc->ioctl_dma_pool,
1192 				      mem_desc->addr,
1193 				      mem_desc->dma_addr);
1194 			mem_desc->addr = NULL;
1195 		}
1196 	}
1197 	dma_pool_destroy(mrioc->ioctl_dma_pool);
1198 	mrioc->ioctl_dma_pool = NULL;
1199 	mem_desc = &mrioc->ioctl_chain_sge;
1200 
1201 	if (mem_desc->addr) {
1202 		dma_free_coherent(&mrioc->pdev->dev, mem_desc->size,
1203 				  mem_desc->addr, mem_desc->dma_addr);
1204 		mem_desc->addr = NULL;
1205 	}
1206 	mem_desc = &mrioc->ioctl_resp_sge;
1207 	if (mem_desc->addr) {
1208 		dma_free_coherent(&mrioc->pdev->dev, mem_desc->size,
1209 				  mem_desc->addr, mem_desc->dma_addr);
1210 		mem_desc->addr = NULL;
1211 	}
1212 
1213 	mrioc->ioctl_sges_allocated = false;
1214 }
1215 
1216 /**
1217  * mpi3mr_alloc_ioctl_dma_memory - Alloc memory for ioctl dma
1218  * @mrioc: Adapter instance reference
1219  *
1220  * This function allocates dmaable memory required to handle the
1221  * application issued MPI3 IOCTL requests.
1222  *
1223  * Return: None
1224  */
mpi3mr_alloc_ioctl_dma_memory(struct mpi3mr_ioc * mrioc)1225 static void mpi3mr_alloc_ioctl_dma_memory(struct mpi3mr_ioc *mrioc)
1226 
1227 {
1228 	struct dma_memory_desc *mem_desc;
1229 	u16 i;
1230 
1231 	mrioc->ioctl_dma_pool = dma_pool_create("ioctl dma pool",
1232 						&mrioc->pdev->dev,
1233 						MPI3MR_IOCTL_SGE_SIZE,
1234 						MPI3MR_PAGE_SIZE_4K, 0);
1235 
1236 	if (!mrioc->ioctl_dma_pool) {
1237 		ioc_err(mrioc, "ioctl_dma_pool: dma_pool_create failed\n");
1238 		goto out_failed;
1239 	}
1240 
1241 	for (i = 0; i < MPI3MR_NUM_IOCTL_SGE; i++) {
1242 		mem_desc = &mrioc->ioctl_sge[i];
1243 		mem_desc->size = MPI3MR_IOCTL_SGE_SIZE;
1244 		mem_desc->addr = dma_pool_zalloc(mrioc->ioctl_dma_pool,
1245 						 GFP_KERNEL,
1246 						 &mem_desc->dma_addr);
1247 		if (!mem_desc->addr)
1248 			goto out_failed;
1249 	}
1250 
1251 	mem_desc = &mrioc->ioctl_chain_sge;
1252 	mem_desc->size = MPI3MR_PAGE_SIZE_4K;
1253 	mem_desc->addr = dma_alloc_coherent(&mrioc->pdev->dev,
1254 					    mem_desc->size,
1255 					    &mem_desc->dma_addr,
1256 					    GFP_KERNEL);
1257 	if (!mem_desc->addr)
1258 		goto out_failed;
1259 
1260 	mem_desc = &mrioc->ioctl_resp_sge;
1261 	mem_desc->size = MPI3MR_PAGE_SIZE_4K;
1262 	mem_desc->addr = dma_alloc_coherent(&mrioc->pdev->dev,
1263 					    mem_desc->size,
1264 					    &mem_desc->dma_addr,
1265 					    GFP_KERNEL);
1266 	if (!mem_desc->addr)
1267 		goto out_failed;
1268 
1269 	mrioc->ioctl_sges_allocated = true;
1270 
1271 	return;
1272 out_failed:
1273 	ioc_warn(mrioc, "cannot allocate DMA memory for the mpt commands\n"
1274 		 "from the applications, application interface for MPT command is disabled\n");
1275 	mpi3mr_free_ioctl_dma_memory(mrioc);
1276 }
1277 
1278 /**
1279  * mpi3mr_fault_uevent_emit - Emit uevent for any controller
1280  * fault
1281  * @mrioc: Pointer to the mpi3mr_ioc structure for the controller instance
1282  *
1283  * This function is invoked when the controller undergoes any
1284  * type of fault.
1285  */
1286 
mpi3mr_fault_uevent_emit(struct mpi3mr_ioc * mrioc)1287 static void mpi3mr_fault_uevent_emit(struct mpi3mr_ioc *mrioc)
1288 {
1289 	struct kobj_uevent_env *env;
1290 	int ret;
1291 
1292 	env = kzalloc_obj(*env);
1293 	if (!env)
1294 		return;
1295 
1296 	ret = add_uevent_var(env, "DRIVER=%s", mrioc->driver_name);
1297 	if (ret)
1298 		goto out_free;
1299 
1300 	ret = add_uevent_var(env, "IOC_ID=%u", mrioc->id);
1301 	if (ret)
1302 		goto out_free;
1303 
1304 	ret = add_uevent_var(env, "FAULT_CODE=0x%08x",
1305 			    mrioc->saved_fault_code);
1306 	if (ret)
1307 		goto out_free;
1308 
1309 	ret = add_uevent_var(env, "FAULT_INFO0=0x%08x",
1310 			     mrioc->saved_fault_info[0]);
1311 	if (ret)
1312 		goto out_free;
1313 
1314 	ret = add_uevent_var(env, "FAULT_INFO1=0x%08x",
1315 			    mrioc->saved_fault_info[1]);
1316 	if (ret)
1317 		goto out_free;
1318 
1319 	ret = add_uevent_var(env, "FAULT_INFO2=0x%08x",
1320 			    mrioc->saved_fault_info[2]);
1321 	if (ret)
1322 		goto out_free;
1323 
1324 	kobject_uevent_env(&mrioc->shost->shost_gendev.kobj,
1325 			KOBJ_CHANGE, env->envp);
1326 
1327 out_free:
1328 	kfree(env);
1329 
1330 }
1331 
1332 /**
1333  * mpi3mr_clear_reset_history - clear reset history
1334  * @mrioc: Adapter instance reference
1335  *
1336  * Write the reset history bit in IOC status to clear the bit,
1337  * if it is already set.
1338  *
1339  * Return: Nothing.
1340  */
mpi3mr_clear_reset_history(struct mpi3mr_ioc * mrioc)1341 static inline void mpi3mr_clear_reset_history(struct mpi3mr_ioc *mrioc)
1342 {
1343 	u32 ioc_status;
1344 
1345 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1346 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
1347 		writel(ioc_status, &mrioc->sysif_regs->ioc_status);
1348 }
1349 
1350 /**
1351  * mpi3mr_issue_and_process_mur - Message unit Reset handler
1352  * @mrioc: Adapter instance reference
1353  * @reset_reason: Reset reason code
1354  *
1355  * Issue Message unit Reset to the controller and wait for it to
1356  * be complete.
1357  *
1358  * Return: 0 on success, -1 on failure.
1359  */
mpi3mr_issue_and_process_mur(struct mpi3mr_ioc * mrioc,u32 reset_reason)1360 static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc,
1361 	u32 reset_reason)
1362 {
1363 	u32 ioc_config, timeout, ioc_status, scratch_pad0;
1364 	int retval = -1;
1365 
1366 	ioc_info(mrioc, "Issuing Message unit Reset(MUR)\n");
1367 	if (mrioc->unrecoverable) {
1368 		ioc_info(mrioc, "IOC is unrecoverable MUR not issued\n");
1369 		return retval;
1370 	}
1371 	mpi3mr_clear_reset_history(mrioc);
1372 	scratch_pad0 = ((MPI3MR_RESET_REASON_OSTYPE_LINUX <<
1373 			 MPI3MR_RESET_REASON_OSTYPE_SHIFT) |
1374 			(mrioc->facts.ioc_num <<
1375 			 MPI3MR_RESET_REASON_IOCNUM_SHIFT) | reset_reason);
1376 	writel(scratch_pad0, &mrioc->sysif_regs->scratchpad[0]);
1377 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1378 	ioc_config &= ~MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
1379 	writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
1380 
1381 	timeout = MPI3MR_MUR_TIMEOUT * 10;
1382 	do {
1383 		ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1384 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)) {
1385 			mpi3mr_clear_reset_history(mrioc);
1386 			break;
1387 		}
1388 		if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
1389 			mpi3mr_print_fault_info(mrioc);
1390 			break;
1391 		}
1392 		msleep(100);
1393 	} while (--timeout);
1394 
1395 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1396 	if (timeout && !((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
1397 	      (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) ||
1398 	      (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
1399 		retval = 0;
1400 
1401 	ioc_info(mrioc, "Base IOC Sts/Config after %s MUR is (0x%08x)/(0x%08x)\n",
1402 	    (!retval) ? "successful" : "failed", ioc_status, ioc_config);
1403 	return retval;
1404 }
1405 
1406 /**
1407  * mpi3mr_revalidate_factsdata - validate IOCFacts parameters
1408  * during reset/resume
1409  * @mrioc: Adapter instance reference
1410  *
1411  * Return: zero if the new IOCFacts parameters value is compatible with
1412  * older values else return -EPERM
1413  */
1414 static int
mpi3mr_revalidate_factsdata(struct mpi3mr_ioc * mrioc)1415 mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc)
1416 {
1417 	unsigned long *removepend_bitmap;
1418 
1419 	if (mrioc->facts.reply_sz > mrioc->reply_sz) {
1420 		ioc_err(mrioc,
1421 		    "cannot increase reply size from %d to %d\n",
1422 		    mrioc->reply_sz, mrioc->facts.reply_sz);
1423 		return -EPERM;
1424 	}
1425 
1426 	if (mrioc->facts.max_op_reply_q < mrioc->num_op_reply_q) {
1427 		ioc_err(mrioc,
1428 		    "cannot reduce number of operational reply queues from %d to %d\n",
1429 		    mrioc->num_op_reply_q,
1430 		    mrioc->facts.max_op_reply_q);
1431 		return -EPERM;
1432 	}
1433 
1434 	if (mrioc->facts.max_op_req_q < mrioc->num_op_req_q) {
1435 		ioc_err(mrioc,
1436 		    "cannot reduce number of operational request queues from %d to %d\n",
1437 		    mrioc->num_op_req_q, mrioc->facts.max_op_req_q);
1438 		return -EPERM;
1439 	}
1440 
1441 	if (mrioc->shost->max_sectors != (mrioc->facts.max_data_length / 512))
1442 		ioc_err(mrioc, "Warning: The maximum data transfer length\n"
1443 			    "\tchanged after reset: previous(%d), new(%d),\n"
1444 			    "the driver cannot change this at run time\n",
1445 			    mrioc->shost->max_sectors * 512, mrioc->facts.max_data_length);
1446 
1447 	if ((mrioc->sas_transport_enabled) && (mrioc->facts.ioc_capabilities &
1448 	    MPI3_IOCFACTS_CAPABILITY_MULTIPATH_SUPPORTED))
1449 		ioc_err(mrioc,
1450 		    "critical error: multipath capability is enabled at the\n"
1451 		    "\tcontroller while sas transport support is enabled at the\n"
1452 		    "\tdriver, please reboot the system or reload the driver\n");
1453 
1454 	if (mrioc->seg_tb_support) {
1455 		if (!(mrioc->facts.ioc_capabilities &
1456 		     MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_TRACE_SUPPORTED)) {
1457 			ioc_err(mrioc,
1458 			    "critical error: previously enabled segmented trace\n"
1459 			    " buffer capability is disabled after reset. Please\n"
1460 			    " update the firmware or reboot the system or\n"
1461 			    " reload the driver to enable trace diag buffer\n");
1462 			mrioc->diag_buffers[0].disabled_after_reset = true;
1463 		} else
1464 			mrioc->diag_buffers[0].disabled_after_reset = false;
1465 	}
1466 
1467 	if (mrioc->facts.max_devhandle > mrioc->dev_handle_bitmap_bits) {
1468 		removepend_bitmap = bitmap_zalloc(mrioc->facts.max_devhandle,
1469 						  GFP_KERNEL);
1470 		if (!removepend_bitmap) {
1471 			ioc_err(mrioc,
1472 				"failed to increase removepend_bitmap bits from %d to %d\n",
1473 				mrioc->dev_handle_bitmap_bits,
1474 				mrioc->facts.max_devhandle);
1475 			return -EPERM;
1476 		}
1477 		bitmap_free(mrioc->removepend_bitmap);
1478 		mrioc->removepend_bitmap = removepend_bitmap;
1479 		ioc_info(mrioc,
1480 			 "increased bits of dev_handle_bitmap from %d to %d\n",
1481 			 mrioc->dev_handle_bitmap_bits,
1482 			 mrioc->facts.max_devhandle);
1483 		mrioc->dev_handle_bitmap_bits = mrioc->facts.max_devhandle;
1484 	}
1485 
1486 	return 0;
1487 }
1488 
1489 /**
1490  * mpi3mr_bring_ioc_ready - Bring controller to ready state
1491  * @mrioc: Adapter instance reference
1492  *
1493  * Set Enable IOC bit in IOC configuration register and wait for
1494  * the controller to become ready.
1495  *
1496  * Return: 0 on success, appropriate error on failure.
1497  */
mpi3mr_bring_ioc_ready(struct mpi3mr_ioc * mrioc)1498 static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
1499 {
1500 	u32 ioc_config, ioc_status, timeout, host_diagnostic;
1501 	int retval = 0;
1502 	enum mpi3mr_iocstate ioc_state;
1503 	u64 base_info;
1504 	u8 retry = 0;
1505 	u64 start_time, elapsed_time_sec;
1506 
1507 retry_bring_ioc_ready:
1508 
1509 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1510 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1511 	base_info = lo_hi_readq(&mrioc->sysif_regs->ioc_information);
1512 	ioc_info(mrioc, "ioc_status(0x%08x), ioc_config(0x%08x), ioc_info(0x%016llx) at the bringup\n",
1513 	    ioc_status, ioc_config, base_info);
1514 
1515 	if (!mpi3mr_is_fault_recoverable(mrioc)) {
1516 		mrioc->unrecoverable = 1;
1517 		goto out_device_not_present;
1518 	}
1519 
1520 	/*The timeout value is in 2sec unit, changing it to seconds*/
1521 	mrioc->ready_timeout =
1522 	    ((base_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >>
1523 	    MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT) * 2;
1524 
1525 	ioc_info(mrioc, "ready timeout: %d seconds\n", mrioc->ready_timeout);
1526 
1527 	ioc_state = mpi3mr_get_iocstate(mrioc);
1528 	ioc_info(mrioc, "controller is in %s state during detection\n",
1529 	    mpi3mr_iocstate_name(ioc_state));
1530 
1531 	timeout = mrioc->ready_timeout * 10;
1532 
1533 	do {
1534 		ioc_state = mpi3mr_get_iocstate(mrioc);
1535 
1536 		if (ioc_state != MRIOC_STATE_BECOMING_READY &&
1537 		    ioc_state != MRIOC_STATE_RESET_REQUESTED)
1538 			break;
1539 
1540 		if (!pci_device_is_present(mrioc->pdev)) {
1541 			mrioc->unrecoverable = 1;
1542 			ioc_err(mrioc, "controller is not present while waiting to reset\n");
1543 			goto out_device_not_present;
1544 		}
1545 
1546 		msleep(100);
1547 	} while (--timeout);
1548 
1549 	if (ioc_state == MRIOC_STATE_READY) {
1550 		ioc_info(mrioc, "issuing message unit reset (MUR) to bring to reset state\n");
1551 		retval = mpi3mr_issue_and_process_mur(mrioc,
1552 		    MPI3MR_RESET_FROM_BRINGUP);
1553 		ioc_state = mpi3mr_get_iocstate(mrioc);
1554 		if (retval)
1555 			ioc_err(mrioc,
1556 			    "message unit reset failed with error %d current state %s\n",
1557 			    retval, mpi3mr_iocstate_name(ioc_state));
1558 	}
1559 	if (ioc_state != MRIOC_STATE_RESET) {
1560 		if (ioc_state == MRIOC_STATE_FAULT) {
1561 			timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
1562 			mpi3mr_print_fault_info(mrioc);
1563 			mpi3mr_save_fault_info(mrioc);
1564 			mrioc->fault_during_init = 1;
1565 			mrioc->fwfault_counter++;
1566 
1567 			do {
1568 				host_diagnostic =
1569 					readl(&mrioc->sysif_regs->host_diagnostic);
1570 				if (!(host_diagnostic &
1571 				      MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
1572 					break;
1573 				if (!pci_device_is_present(mrioc->pdev)) {
1574 					mrioc->unrecoverable = 1;
1575 					ioc_err(mrioc, "controller is not present at the bringup\n");
1576 					goto out_device_not_present;
1577 				}
1578 				msleep(100);
1579 			} while (--timeout);
1580 		}
1581 		mpi3mr_print_fault_info(mrioc);
1582 		ioc_info(mrioc, "issuing soft reset to bring to reset state\n");
1583 		retval = mpi3mr_issue_reset(mrioc,
1584 		    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
1585 		    MPI3MR_RESET_FROM_BRINGUP);
1586 		if (retval) {
1587 			ioc_err(mrioc,
1588 			    "soft reset failed with error %d\n", retval);
1589 			goto out_failed;
1590 		}
1591 	}
1592 	ioc_state = mpi3mr_get_iocstate(mrioc);
1593 	if (ioc_state != MRIOC_STATE_RESET) {
1594 		ioc_err(mrioc,
1595 		    "cannot bring controller to reset state, current state: %s\n",
1596 		    mpi3mr_iocstate_name(ioc_state));
1597 		goto out_failed;
1598 	}
1599 	mpi3mr_clear_reset_history(mrioc);
1600 	retval = mpi3mr_setup_admin_qpair(mrioc);
1601 	if (retval) {
1602 		ioc_err(mrioc, "failed to setup admin queues: error %d\n",
1603 		    retval);
1604 		goto out_failed;
1605 	}
1606 
1607 	ioc_info(mrioc, "bringing controller to ready state\n");
1608 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1609 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
1610 	writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
1611 
1612 	if (retry == 0)
1613 		start_time = jiffies;
1614 
1615 	timeout = mrioc->ready_timeout * 10;
1616 	do {
1617 		ioc_state = mpi3mr_get_iocstate(mrioc);
1618 		if (ioc_state == MRIOC_STATE_READY) {
1619 			ioc_info(mrioc,
1620 			    "successfully transitioned to %s state\n",
1621 			    mpi3mr_iocstate_name(ioc_state));
1622 			mpi3mr_clear_reset_history(mrioc);
1623 			return 0;
1624 		}
1625 		ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1626 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
1627 		    (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
1628 			mpi3mr_print_fault_info(mrioc);
1629 			goto out_failed;
1630 		}
1631 		if (!pci_device_is_present(mrioc->pdev)) {
1632 			mrioc->unrecoverable = 1;
1633 			ioc_err(mrioc,
1634 			    "controller is not present at the bringup\n");
1635 			retval = -1;
1636 			goto out_device_not_present;
1637 		}
1638 		msleep(100);
1639 		elapsed_time_sec = jiffies_to_msecs(jiffies - start_time)/1000;
1640 	} while (elapsed_time_sec < mrioc->ready_timeout);
1641 
1642 	ioc_state = mpi3mr_get_iocstate(mrioc);
1643 	if (ioc_state == MRIOC_STATE_READY) {
1644 		ioc_info(mrioc,
1645 		    "successfully transitioned to %s state after %llu seconds\n",
1646 		    mpi3mr_iocstate_name(ioc_state), elapsed_time_sec);
1647 		mpi3mr_clear_reset_history(mrioc);
1648 		return 0;
1649 	}
1650 
1651 out_failed:
1652 	elapsed_time_sec = jiffies_to_msecs(jiffies - start_time)/1000;
1653 	if ((retry < 2) && (elapsed_time_sec < (mrioc->ready_timeout - 60))) {
1654 		retry++;
1655 
1656 		ioc_warn(mrioc, "retrying to bring IOC ready, retry_count:%d\n"
1657 				" elapsed time =%llu\n", retry, elapsed_time_sec);
1658 
1659 		goto retry_bring_ioc_ready;
1660 	}
1661 	ioc_state = mpi3mr_get_iocstate(mrioc);
1662 	ioc_err(mrioc,
1663 	    "failed to bring to ready state,  current state: %s\n",
1664 	    mpi3mr_iocstate_name(ioc_state));
1665 out_device_not_present:
1666 	return retval;
1667 }
1668 
1669 /**
1670  * mpi3mr_soft_reset_success - Check softreset is success or not
1671  * @ioc_status: IOC status register value
1672  * @ioc_config: IOC config register value
1673  *
1674  * Check whether the soft reset is successful or not based on
1675  * IOC status and IOC config register values.
1676  *
1677  * Return: True when the soft reset is success, false otherwise.
1678  */
1679 static inline bool
mpi3mr_soft_reset_success(u32 ioc_status,u32 ioc_config)1680 mpi3mr_soft_reset_success(u32 ioc_status, u32 ioc_config)
1681 {
1682 	if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
1683 	    (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
1684 		return true;
1685 	return false;
1686 }
1687 
1688 /**
1689  * mpi3mr_diagfault_success - Check diag fault is success or not
1690  * @mrioc: Adapter reference
1691  * @ioc_status: IOC status register value
1692  *
1693  * Check whether the controller hit diag reset fault code.
1694  *
1695  * Return: True when there is diag fault, false otherwise.
1696  */
mpi3mr_diagfault_success(struct mpi3mr_ioc * mrioc,u32 ioc_status)1697 static inline bool mpi3mr_diagfault_success(struct mpi3mr_ioc *mrioc,
1698 	u32 ioc_status)
1699 {
1700 	u32 fault;
1701 
1702 	if (!(ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT))
1703 		return false;
1704 	fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK;
1705 	if (fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET) {
1706 		mpi3mr_print_fault_info(mrioc);
1707 		return true;
1708 	}
1709 	return false;
1710 }
1711 
1712 /**
1713  * mpi3mr_set_diagsave - Set diag save bit for snapdump
1714  * @mrioc: Adapter reference
1715  *
1716  * Set diag save bit in IOC configuration register to enable
1717  * snapdump.
1718  *
1719  * Return: Nothing.
1720  */
mpi3mr_set_diagsave(struct mpi3mr_ioc * mrioc)1721 static inline void mpi3mr_set_diagsave(struct mpi3mr_ioc *mrioc)
1722 {
1723 	u32 ioc_config;
1724 
1725 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1726 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_DIAG_SAVE;
1727 	writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
1728 }
1729 
1730 /**
1731  * mpi3mr_issue_reset - Issue reset to the controller
1732  * @mrioc: Adapter reference
1733  * @reset_type: Reset type
1734  * @reset_reason: Reset reason code
1735  *
1736  * Unlock the host diagnostic registers and write the specific
1737  * reset type to that, wait for reset acknowledgment from the
1738  * controller, if the reset is not successful retry for the
1739  * predefined number of times.
1740  *
1741  * Return: 0 on success, non-zero on failure.
1742  */
mpi3mr_issue_reset(struct mpi3mr_ioc * mrioc,u16 reset_type,u16 reset_reason)1743 static int mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type,
1744 	u16 reset_reason)
1745 {
1746 	int retval = -1;
1747 	u8 unlock_retry_count = 0;
1748 	u32 host_diagnostic, ioc_status, ioc_config, scratch_pad0;
1749 	u32 timeout = MPI3MR_RESET_ACK_TIMEOUT * 10;
1750 
1751 	if ((reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) &&
1752 	    (reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT))
1753 		return retval;
1754 	if (mrioc->unrecoverable)
1755 		return retval;
1756 	if (reset_reason == MPI3MR_RESET_FROM_FIRMWARE) {
1757 		retval = 0;
1758 		return retval;
1759 	}
1760 
1761 	ioc_info(mrioc, "%s reset due to %s(0x%x)\n",
1762 	    mpi3mr_reset_type_name(reset_type),
1763 	    mpi3mr_reset_rc_name(reset_reason), reset_reason);
1764 
1765 	mpi3mr_clear_reset_history(mrioc);
1766 	do {
1767 		ioc_info(mrioc,
1768 		    "Write magic sequence to unlock host diag register (retry=%d)\n",
1769 		    ++unlock_retry_count);
1770 		if (unlock_retry_count >= MPI3MR_HOSTDIAG_UNLOCK_RETRY_COUNT) {
1771 			ioc_err(mrioc,
1772 			    "%s reset failed due to unlock failure, host_diagnostic(0x%08x)\n",
1773 			    mpi3mr_reset_type_name(reset_type),
1774 			    host_diagnostic);
1775 			mrioc->unrecoverable = 1;
1776 			return retval;
1777 		}
1778 
1779 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH,
1780 		    &mrioc->sysif_regs->write_sequence);
1781 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_1ST,
1782 		    &mrioc->sysif_regs->write_sequence);
1783 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
1784 		    &mrioc->sysif_regs->write_sequence);
1785 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_3RD,
1786 		    &mrioc->sysif_regs->write_sequence);
1787 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_4TH,
1788 		    &mrioc->sysif_regs->write_sequence);
1789 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_5TH,
1790 		    &mrioc->sysif_regs->write_sequence);
1791 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_6TH,
1792 		    &mrioc->sysif_regs->write_sequence);
1793 		usleep_range(1000, 1100);
1794 		host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
1795 		ioc_info(mrioc,
1796 		    "wrote magic sequence: retry_count(%d), host_diagnostic(0x%08x)\n",
1797 		    unlock_retry_count, host_diagnostic);
1798 	} while (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_DIAG_WRITE_ENABLE));
1799 
1800 	scratch_pad0 = ((MPI3MR_RESET_REASON_OSTYPE_LINUX <<
1801 	    MPI3MR_RESET_REASON_OSTYPE_SHIFT) | (mrioc->facts.ioc_num <<
1802 	    MPI3MR_RESET_REASON_IOCNUM_SHIFT) | reset_reason);
1803 	writel(scratch_pad0, &mrioc->sysif_regs->scratchpad[0]);
1804 	if (reset_type == MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT)
1805 		mpi3mr_set_diagsave(mrioc);
1806 	writel(host_diagnostic | reset_type,
1807 	    &mrioc->sysif_regs->host_diagnostic);
1808 	switch (reset_type) {
1809 	case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET:
1810 		do {
1811 			ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1812 			ioc_config =
1813 			    readl(&mrioc->sysif_regs->ioc_configuration);
1814 			if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
1815 			    && mpi3mr_soft_reset_success(ioc_status, ioc_config)
1816 			    ) {
1817 				mpi3mr_clear_reset_history(mrioc);
1818 				retval = 0;
1819 				break;
1820 			}
1821 			msleep(100);
1822 		} while (--timeout);
1823 		mpi3mr_print_fault_info(mrioc);
1824 		break;
1825 	case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT:
1826 		do {
1827 			ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1828 			if (mpi3mr_diagfault_success(mrioc, ioc_status)) {
1829 				retval = 0;
1830 				break;
1831 			}
1832 			msleep(100);
1833 		} while (--timeout);
1834 		break;
1835 	default:
1836 		break;
1837 	}
1838 
1839 	writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
1840 	    &mrioc->sysif_regs->write_sequence);
1841 
1842 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1843 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1844 	ioc_info(mrioc,
1845 	    "ioc_status/ioc_config after %s reset is (0x%08x)/(0x%08x)\n",
1846 	    (!retval)?"successful":"failed", ioc_status,
1847 	    ioc_config);
1848 	if (retval)
1849 		mrioc->unrecoverable = 1;
1850 	return retval;
1851 }
1852 
1853 /**
1854  * mpi3mr_admin_request_post - Post request to admin queue
1855  * @mrioc: Adapter reference
1856  * @admin_req: MPI3 request
1857  * @admin_req_sz: Request size
1858  * @ignore_reset: Ignore reset in process
1859  *
1860  * Post the MPI3 request into admin request queue and
1861  * inform the controller, if the queue is full return
1862  * appropriate error.
1863  *
1864  * Return: 0 on success, non-zero on failure.
1865  */
mpi3mr_admin_request_post(struct mpi3mr_ioc * mrioc,void * admin_req,u16 admin_req_sz,u8 ignore_reset)1866 int mpi3mr_admin_request_post(struct mpi3mr_ioc *mrioc, void *admin_req,
1867 	u16 admin_req_sz, u8 ignore_reset)
1868 {
1869 	u16 areq_pi = 0, areq_ci = 0, max_entries = 0;
1870 	int retval = 0;
1871 	unsigned long flags;
1872 	u8 *areq_entry;
1873 
1874 	if (mrioc->unrecoverable) {
1875 		ioc_err(mrioc, "%s : Unrecoverable controller\n", __func__);
1876 		return -EFAULT;
1877 	}
1878 
1879 	spin_lock_irqsave(&mrioc->admin_req_lock, flags);
1880 	areq_pi = mrioc->admin_req_pi;
1881 	areq_ci = mrioc->admin_req_ci;
1882 	max_entries = mrioc->num_admin_req;
1883 	if ((areq_ci == (areq_pi + 1)) || ((!areq_ci) &&
1884 	    (areq_pi == (max_entries - 1)))) {
1885 		ioc_err(mrioc, "AdminReqQ full condition detected\n");
1886 		retval = -EAGAIN;
1887 		goto out;
1888 	}
1889 	if (!ignore_reset && mrioc->reset_in_progress) {
1890 		ioc_err(mrioc, "AdminReqQ submit reset in progress\n");
1891 		retval = -EAGAIN;
1892 		goto out;
1893 	}
1894 	if (mrioc->pci_err_recovery) {
1895 		ioc_err(mrioc, "admin request queue submission failed due to pci error recovery in progress\n");
1896 		retval = -EAGAIN;
1897 		goto out;
1898 	}
1899 
1900 	areq_entry = (u8 *)mrioc->admin_req_base +
1901 	    (areq_pi * MPI3MR_ADMIN_REQ_FRAME_SZ);
1902 	memset(areq_entry, 0, MPI3MR_ADMIN_REQ_FRAME_SZ);
1903 	memcpy(areq_entry, (u8 *)admin_req, admin_req_sz);
1904 
1905 	if (++areq_pi == max_entries)
1906 		areq_pi = 0;
1907 	mrioc->admin_req_pi = areq_pi;
1908 
1909 	writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi);
1910 
1911 out:
1912 	spin_unlock_irqrestore(&mrioc->admin_req_lock, flags);
1913 
1914 	return retval;
1915 }
1916 
1917 /**
1918  * mpi3mr_free_op_req_q_segments - free request memory segments
1919  * @mrioc: Adapter instance reference
1920  * @q_idx: operational request queue index
1921  *
1922  * Free memory segments allocated for operational request queue
1923  *
1924  * Return: Nothing.
1925  */
mpi3mr_free_op_req_q_segments(struct mpi3mr_ioc * mrioc,u16 q_idx)1926 static void mpi3mr_free_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx)
1927 {
1928 	u16 j;
1929 	int size;
1930 	struct segments *segments;
1931 
1932 	segments = mrioc->req_qinfo[q_idx].q_segments;
1933 	if (!segments)
1934 		return;
1935 
1936 	if (mrioc->enable_segqueue) {
1937 		size = MPI3MR_OP_REQ_Q_SEG_SIZE;
1938 		if (mrioc->req_qinfo[q_idx].q_segment_list) {
1939 			dma_free_coherent(&mrioc->pdev->dev,
1940 			    MPI3MR_MAX_SEG_LIST_SIZE,
1941 			    mrioc->req_qinfo[q_idx].q_segment_list,
1942 			    mrioc->req_qinfo[q_idx].q_segment_list_dma);
1943 			mrioc->req_qinfo[q_idx].q_segment_list = NULL;
1944 		}
1945 	} else
1946 		size = mrioc->req_qinfo[q_idx].segment_qd *
1947 		    mrioc->facts.op_req_sz;
1948 
1949 	for (j = 0; j < mrioc->req_qinfo[q_idx].num_segments; j++) {
1950 		if (!segments[j].segment)
1951 			continue;
1952 		dma_free_coherent(&mrioc->pdev->dev,
1953 		    size, segments[j].segment, segments[j].segment_dma);
1954 		segments[j].segment = NULL;
1955 	}
1956 	kfree(mrioc->req_qinfo[q_idx].q_segments);
1957 	mrioc->req_qinfo[q_idx].q_segments = NULL;
1958 	mrioc->req_qinfo[q_idx].qid = 0;
1959 }
1960 
1961 /**
1962  * mpi3mr_free_op_reply_q_segments - free reply memory segments
1963  * @mrioc: Adapter instance reference
1964  * @q_idx: operational reply queue index
1965  *
1966  * Free memory segments allocated for operational reply queue
1967  *
1968  * Return: Nothing.
1969  */
mpi3mr_free_op_reply_q_segments(struct mpi3mr_ioc * mrioc,u16 q_idx)1970 static void mpi3mr_free_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx)
1971 {
1972 	u16 j;
1973 	int size;
1974 	struct segments *segments;
1975 
1976 	segments = mrioc->op_reply_qinfo[q_idx].q_segments;
1977 	if (!segments)
1978 		return;
1979 
1980 	if (mrioc->enable_segqueue) {
1981 		size = MPI3MR_OP_REP_Q_SEG_SIZE;
1982 		if (mrioc->op_reply_qinfo[q_idx].q_segment_list) {
1983 			dma_free_coherent(&mrioc->pdev->dev,
1984 			    MPI3MR_MAX_SEG_LIST_SIZE,
1985 			    mrioc->op_reply_qinfo[q_idx].q_segment_list,
1986 			    mrioc->op_reply_qinfo[q_idx].q_segment_list_dma);
1987 			mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL;
1988 		}
1989 	} else
1990 		size = mrioc->op_reply_qinfo[q_idx].segment_qd *
1991 		    mrioc->op_reply_desc_sz;
1992 
1993 	for (j = 0; j < mrioc->op_reply_qinfo[q_idx].num_segments; j++) {
1994 		if (!segments[j].segment)
1995 			continue;
1996 		dma_free_coherent(&mrioc->pdev->dev,
1997 		    size, segments[j].segment, segments[j].segment_dma);
1998 		segments[j].segment = NULL;
1999 	}
2000 
2001 	kfree(mrioc->op_reply_qinfo[q_idx].q_segments);
2002 	mrioc->op_reply_qinfo[q_idx].q_segments = NULL;
2003 	mrioc->op_reply_qinfo[q_idx].qid = 0;
2004 }
2005 
2006 /**
2007  * mpi3mr_delete_op_reply_q - delete operational reply queue
2008  * @mrioc: Adapter instance reference
2009  * @qidx: operational reply queue index
2010  *
2011  * Delete operatinal reply queue by issuing MPI request
2012  * through admin queue.
2013  *
2014  * Return:  0 on success, non-zero on failure.
2015  */
mpi3mr_delete_op_reply_q(struct mpi3mr_ioc * mrioc,u16 qidx)2016 static int mpi3mr_delete_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
2017 {
2018 	struct mpi3_delete_reply_queue_request delq_req;
2019 	struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
2020 	int retval = 0;
2021 	u16 reply_qid = 0, midx;
2022 
2023 	reply_qid = op_reply_q->qid;
2024 
2025 	midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset);
2026 
2027 	if (!reply_qid)	{
2028 		retval = -1;
2029 		ioc_err(mrioc, "Issue DelRepQ: called with invalid ReqQID\n");
2030 		goto out;
2031 	}
2032 
2033 	(op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount-- :
2034 	    mrioc->active_poll_qcount--;
2035 
2036 	memset(&delq_req, 0, sizeof(delq_req));
2037 	mutex_lock(&mrioc->init_cmds.mutex);
2038 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2039 		retval = -1;
2040 		ioc_err(mrioc, "Issue DelRepQ: Init command is in use\n");
2041 		mutex_unlock(&mrioc->init_cmds.mutex);
2042 		goto out;
2043 	}
2044 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2045 	mrioc->init_cmds.is_waiting = 1;
2046 	mrioc->init_cmds.callback = NULL;
2047 	delq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2048 	delq_req.function = MPI3_FUNCTION_DELETE_REPLY_QUEUE;
2049 	delq_req.queue_id = cpu_to_le16(reply_qid);
2050 
2051 	init_completion(&mrioc->init_cmds.done);
2052 	retval = mpi3mr_admin_request_post(mrioc, &delq_req, sizeof(delq_req),
2053 	    1);
2054 	if (retval) {
2055 		ioc_err(mrioc, "Issue DelRepQ: Admin Post failed\n");
2056 		goto out_unlock;
2057 	}
2058 	wait_for_completion_timeout(&mrioc->init_cmds.done,
2059 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2060 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2061 		ioc_err(mrioc, "delete reply queue timed out\n");
2062 		mpi3mr_check_rh_fault_ioc(mrioc,
2063 		    MPI3MR_RESET_FROM_DELREPQ_TIMEOUT);
2064 		retval = -1;
2065 		goto out_unlock;
2066 	}
2067 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2068 	    != MPI3_IOCSTATUS_SUCCESS) {
2069 		ioc_err(mrioc,
2070 		    "Issue DelRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2071 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2072 		    mrioc->init_cmds.ioc_loginfo);
2073 		retval = -1;
2074 		goto out_unlock;
2075 	}
2076 	mrioc->intr_info[midx].op_reply_q = NULL;
2077 
2078 	mpi3mr_free_op_reply_q_segments(mrioc, qidx);
2079 out_unlock:
2080 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2081 	mutex_unlock(&mrioc->init_cmds.mutex);
2082 out:
2083 
2084 	return retval;
2085 }
2086 
2087 /**
2088  * mpi3mr_alloc_op_reply_q_segments -Alloc segmented reply pool
2089  * @mrioc: Adapter instance reference
2090  * @qidx: request queue index
2091  *
2092  * Allocate segmented memory pools for operational reply
2093  * queue.
2094  *
2095  * Return: 0 on success, non-zero on failure.
2096  */
mpi3mr_alloc_op_reply_q_segments(struct mpi3mr_ioc * mrioc,u16 qidx)2097 static int mpi3mr_alloc_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx)
2098 {
2099 	struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
2100 	int i, size;
2101 	u64 *q_segment_list_entry = NULL;
2102 	struct segments *segments;
2103 
2104 	if (mrioc->enable_segqueue) {
2105 		op_reply_q->segment_qd =
2106 		    MPI3MR_OP_REP_Q_SEG_SIZE / mrioc->op_reply_desc_sz;
2107 
2108 		size = MPI3MR_OP_REP_Q_SEG_SIZE;
2109 
2110 		op_reply_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev,
2111 		    MPI3MR_MAX_SEG_LIST_SIZE, &op_reply_q->q_segment_list_dma,
2112 		    GFP_KERNEL);
2113 		if (!op_reply_q->q_segment_list)
2114 			return -ENOMEM;
2115 		q_segment_list_entry = (u64 *)op_reply_q->q_segment_list;
2116 	} else {
2117 		op_reply_q->segment_qd = op_reply_q->num_replies;
2118 		size = op_reply_q->num_replies * mrioc->op_reply_desc_sz;
2119 	}
2120 
2121 	op_reply_q->num_segments = DIV_ROUND_UP(op_reply_q->num_replies,
2122 	    op_reply_q->segment_qd);
2123 
2124 	op_reply_q->q_segments = kzalloc_objs(struct segments,
2125 					      op_reply_q->num_segments);
2126 	if (!op_reply_q->q_segments)
2127 		return -ENOMEM;
2128 
2129 	segments = op_reply_q->q_segments;
2130 	for (i = 0; i < op_reply_q->num_segments; i++) {
2131 		segments[i].segment =
2132 		    dma_alloc_coherent(&mrioc->pdev->dev,
2133 		    size, &segments[i].segment_dma, GFP_KERNEL);
2134 		if (!segments[i].segment)
2135 			return -ENOMEM;
2136 		if (mrioc->enable_segqueue)
2137 			q_segment_list_entry[i] =
2138 			    (unsigned long)segments[i].segment_dma;
2139 	}
2140 
2141 	return 0;
2142 }
2143 
2144 /**
2145  * mpi3mr_alloc_op_req_q_segments - Alloc segmented req pool.
2146  * @mrioc: Adapter instance reference
2147  * @qidx: request queue index
2148  *
2149  * Allocate segmented memory pools for operational request
2150  * queue.
2151  *
2152  * Return: 0 on success, non-zero on failure.
2153  */
mpi3mr_alloc_op_req_q_segments(struct mpi3mr_ioc * mrioc,u16 qidx)2154 static int mpi3mr_alloc_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx)
2155 {
2156 	struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx;
2157 	int i, size;
2158 	u64 *q_segment_list_entry = NULL;
2159 	struct segments *segments;
2160 
2161 	if (mrioc->enable_segqueue) {
2162 		op_req_q->segment_qd =
2163 		    MPI3MR_OP_REQ_Q_SEG_SIZE / mrioc->facts.op_req_sz;
2164 
2165 		size = MPI3MR_OP_REQ_Q_SEG_SIZE;
2166 
2167 		op_req_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev,
2168 		    MPI3MR_MAX_SEG_LIST_SIZE, &op_req_q->q_segment_list_dma,
2169 		    GFP_KERNEL);
2170 		if (!op_req_q->q_segment_list)
2171 			return -ENOMEM;
2172 		q_segment_list_entry = (u64 *)op_req_q->q_segment_list;
2173 
2174 	} else {
2175 		op_req_q->segment_qd = op_req_q->num_requests;
2176 		size = op_req_q->num_requests * mrioc->facts.op_req_sz;
2177 	}
2178 
2179 	op_req_q->num_segments = DIV_ROUND_UP(op_req_q->num_requests,
2180 	    op_req_q->segment_qd);
2181 
2182 	op_req_q->q_segments = kzalloc_objs(struct segments,
2183 					    op_req_q->num_segments);
2184 	if (!op_req_q->q_segments)
2185 		return -ENOMEM;
2186 
2187 	segments = op_req_q->q_segments;
2188 	for (i = 0; i < op_req_q->num_segments; i++) {
2189 		segments[i].segment =
2190 		    dma_alloc_coherent(&mrioc->pdev->dev,
2191 		    size, &segments[i].segment_dma, GFP_KERNEL);
2192 		if (!segments[i].segment)
2193 			return -ENOMEM;
2194 		if (mrioc->enable_segqueue)
2195 			q_segment_list_entry[i] =
2196 			    (unsigned long)segments[i].segment_dma;
2197 	}
2198 
2199 	return 0;
2200 }
2201 
2202 /**
2203  * mpi3mr_create_op_reply_q - create operational reply queue
2204  * @mrioc: Adapter instance reference
2205  * @qidx: operational reply queue index
2206  *
2207  * Create operatinal reply queue by issuing MPI request
2208  * through admin queue.
2209  *
2210  * Return:  0 on success, non-zero on failure.
2211  */
mpi3mr_create_op_reply_q(struct mpi3mr_ioc * mrioc,u16 qidx)2212 static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
2213 {
2214 	struct mpi3_create_reply_queue_request create_req;
2215 	struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
2216 	int retval = 0;
2217 	u16 reply_qid = 0, midx;
2218 
2219 	reply_qid = op_reply_q->qid;
2220 
2221 	midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset);
2222 
2223 	if (reply_qid) {
2224 		retval = -1;
2225 		ioc_err(mrioc, "CreateRepQ: called for duplicate qid %d\n",
2226 		    reply_qid);
2227 
2228 		return retval;
2229 	}
2230 
2231 	reply_qid = qidx + 1;
2232 
2233 	if (mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) {
2234 		if (mrioc->pdev->revision)
2235 			op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD;
2236 		else
2237 			op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD4K;
2238 	} else
2239 		op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD2K;
2240 
2241 	op_reply_q->ci = 0;
2242 	op_reply_q->ephase = 1;
2243 	atomic_set(&op_reply_q->pend_ios, 0);
2244 	atomic_set(&op_reply_q->in_use, 0);
2245 	op_reply_q->enable_irq_poll = false;
2246 	op_reply_q->qfull_watermark =
2247 		op_reply_q->num_replies - (MPI3MR_THRESHOLD_REPLY_COUNT * 2);
2248 
2249 	if (!op_reply_q->q_segments) {
2250 		retval = mpi3mr_alloc_op_reply_q_segments(mrioc, qidx);
2251 		if (retval) {
2252 			mpi3mr_free_op_reply_q_segments(mrioc, qidx);
2253 			goto out;
2254 		}
2255 	}
2256 
2257 	memset(&create_req, 0, sizeof(create_req));
2258 	mutex_lock(&mrioc->init_cmds.mutex);
2259 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2260 		retval = -1;
2261 		ioc_err(mrioc, "CreateRepQ: Init command is in use\n");
2262 		goto out_unlock;
2263 	}
2264 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2265 	mrioc->init_cmds.is_waiting = 1;
2266 	mrioc->init_cmds.callback = NULL;
2267 	create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2268 	create_req.function = MPI3_FUNCTION_CREATE_REPLY_QUEUE;
2269 	create_req.queue_id = cpu_to_le16(reply_qid);
2270 
2271 	if (midx < (mrioc->intr_info_count - mrioc->requested_poll_qcount))
2272 		op_reply_q->qtype = MPI3MR_DEFAULT_QUEUE;
2273 	else
2274 		op_reply_q->qtype = MPI3MR_POLL_QUEUE;
2275 
2276 	if (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) {
2277 		create_req.flags =
2278 			MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE;
2279 		create_req.msix_index =
2280 			cpu_to_le16(mrioc->intr_info[midx].msix_index);
2281 	} else {
2282 		create_req.msix_index = cpu_to_le16(mrioc->intr_info_count - 1);
2283 		ioc_info(mrioc, "create reply queue(polled): for qid(%d), midx(%d)\n",
2284 			reply_qid, midx);
2285 		if (!mrioc->active_poll_qcount)
2286 			disable_irq_nosync(pci_irq_vector(mrioc->pdev,
2287 			    mrioc->intr_info_count - 1));
2288 	}
2289 
2290 	if (mrioc->enable_segqueue) {
2291 		create_req.flags |=
2292 		    MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED;
2293 		create_req.base_address = cpu_to_le64(
2294 		    op_reply_q->q_segment_list_dma);
2295 	} else
2296 		create_req.base_address = cpu_to_le64(
2297 		    op_reply_q->q_segments[0].segment_dma);
2298 
2299 	create_req.size = cpu_to_le16(op_reply_q->num_replies);
2300 
2301 	init_completion(&mrioc->init_cmds.done);
2302 	retval = mpi3mr_admin_request_post(mrioc, &create_req,
2303 	    sizeof(create_req), 1);
2304 	if (retval) {
2305 		ioc_err(mrioc, "CreateRepQ: Admin Post failed\n");
2306 		goto out_unlock;
2307 	}
2308 	wait_for_completion_timeout(&mrioc->init_cmds.done,
2309 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2310 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2311 		ioc_err(mrioc, "create reply queue timed out\n");
2312 		mpi3mr_check_rh_fault_ioc(mrioc,
2313 		    MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT);
2314 		retval = -1;
2315 		goto out_unlock;
2316 	}
2317 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2318 	    != MPI3_IOCSTATUS_SUCCESS) {
2319 		ioc_err(mrioc,
2320 		    "CreateRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2321 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2322 		    mrioc->init_cmds.ioc_loginfo);
2323 		retval = -1;
2324 		goto out_unlock;
2325 	}
2326 	op_reply_q->qid = reply_qid;
2327 	if (midx < mrioc->intr_info_count)
2328 		mrioc->intr_info[midx].op_reply_q = op_reply_q;
2329 
2330 	(op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount++ :
2331 	    mrioc->active_poll_qcount++;
2332 
2333 out_unlock:
2334 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2335 	mutex_unlock(&mrioc->init_cmds.mutex);
2336 out:
2337 
2338 	return retval;
2339 }
2340 
2341 /**
2342  * mpi3mr_create_op_req_q - create operational request queue
2343  * @mrioc: Adapter instance reference
2344  * @idx: operational request queue index
2345  * @reply_qid: Reply queue ID
2346  *
2347  * Create operatinal request queue by issuing MPI request
2348  * through admin queue.
2349  *
2350  * Return:  0 on success, non-zero on failure.
2351  */
mpi3mr_create_op_req_q(struct mpi3mr_ioc * mrioc,u16 idx,u16 reply_qid)2352 static int mpi3mr_create_op_req_q(struct mpi3mr_ioc *mrioc, u16 idx,
2353 	u16 reply_qid)
2354 {
2355 	struct mpi3_create_request_queue_request create_req;
2356 	struct op_req_qinfo *op_req_q = mrioc->req_qinfo + idx;
2357 	int retval = 0;
2358 	u16 req_qid = 0;
2359 
2360 	req_qid = op_req_q->qid;
2361 
2362 	if (req_qid) {
2363 		retval = -1;
2364 		ioc_err(mrioc, "CreateReqQ: called for duplicate qid %d\n",
2365 		    req_qid);
2366 
2367 		return retval;
2368 	}
2369 	req_qid = idx + 1;
2370 
2371 	op_req_q->num_requests = MPI3MR_OP_REQ_Q_QD;
2372 	op_req_q->ci = 0;
2373 	op_req_q->pi = 0;
2374 	op_req_q->reply_qid = reply_qid;
2375 	op_req_q->last_full_host_tag =  MPI3MR_HOSTTAG_INVALID;
2376 	op_req_q->qfull_io_count =  0;
2377 	op_req_q->qfull_instances =  0;
2378 	spin_lock_init(&op_req_q->q_lock);
2379 
2380 	if (!op_req_q->q_segments) {
2381 		retval = mpi3mr_alloc_op_req_q_segments(mrioc, idx);
2382 		if (retval) {
2383 			mpi3mr_free_op_req_q_segments(mrioc, idx);
2384 			goto out;
2385 		}
2386 	}
2387 
2388 	memset(&create_req, 0, sizeof(create_req));
2389 	mutex_lock(&mrioc->init_cmds.mutex);
2390 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2391 		retval = -1;
2392 		ioc_err(mrioc, "CreateReqQ: Init command is in use\n");
2393 		goto out_unlock;
2394 	}
2395 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2396 	mrioc->init_cmds.is_waiting = 1;
2397 	mrioc->init_cmds.callback = NULL;
2398 	create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2399 	create_req.function = MPI3_FUNCTION_CREATE_REQUEST_QUEUE;
2400 	create_req.queue_id = cpu_to_le16(req_qid);
2401 	if (mrioc->enable_segqueue) {
2402 		create_req.flags =
2403 		    MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED;
2404 		create_req.base_address = cpu_to_le64(
2405 		    op_req_q->q_segment_list_dma);
2406 	} else
2407 		create_req.base_address = cpu_to_le64(
2408 		    op_req_q->q_segments[0].segment_dma);
2409 	create_req.reply_queue_id = cpu_to_le16(reply_qid);
2410 	create_req.size = cpu_to_le16(op_req_q->num_requests);
2411 
2412 	init_completion(&mrioc->init_cmds.done);
2413 	retval = mpi3mr_admin_request_post(mrioc, &create_req,
2414 	    sizeof(create_req), 1);
2415 	if (retval) {
2416 		ioc_err(mrioc, "CreateReqQ: Admin Post failed\n");
2417 		goto out_unlock;
2418 	}
2419 	wait_for_completion_timeout(&mrioc->init_cmds.done,
2420 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2421 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2422 		ioc_err(mrioc, "create request queue timed out\n");
2423 		mpi3mr_check_rh_fault_ioc(mrioc,
2424 		    MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT);
2425 		retval = -1;
2426 		goto out_unlock;
2427 	}
2428 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2429 	    != MPI3_IOCSTATUS_SUCCESS) {
2430 		ioc_err(mrioc,
2431 		    "CreateReqQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2432 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2433 		    mrioc->init_cmds.ioc_loginfo);
2434 		retval = -1;
2435 		goto out_unlock;
2436 	}
2437 	op_req_q->qid = req_qid;
2438 
2439 out_unlock:
2440 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2441 	mutex_unlock(&mrioc->init_cmds.mutex);
2442 out:
2443 
2444 	return retval;
2445 }
2446 
2447 /**
2448  * mpi3mr_create_op_queues - create operational queue pairs
2449  * @mrioc: Adapter instance reference
2450  *
2451  * Allocate memory for operational queue meta data and call
2452  * create request and reply queue functions.
2453  *
2454  * Return: 0 on success, non-zero on failures.
2455  */
mpi3mr_create_op_queues(struct mpi3mr_ioc * mrioc)2456 static int mpi3mr_create_op_queues(struct mpi3mr_ioc *mrioc)
2457 {
2458 	int retval = 0;
2459 	u16 num_queues = 0, i = 0, msix_count_op_q = 1;
2460 	u32 ioc_status;
2461 	enum mpi3mr_iocstate ioc_state;
2462 
2463 	num_queues = min_t(int, mrioc->facts.max_op_reply_q,
2464 	    mrioc->facts.max_op_req_q);
2465 
2466 	msix_count_op_q =
2467 	    mrioc->intr_info_count - mrioc->op_reply_q_offset;
2468 	if (!mrioc->num_queues)
2469 		mrioc->num_queues = min_t(int, num_queues, msix_count_op_q);
2470 	/*
2471 	 * During reset set the num_queues to the number of queues
2472 	 * that was set before the reset.
2473 	 */
2474 	num_queues = mrioc->num_op_reply_q ?
2475 	    mrioc->num_op_reply_q : mrioc->num_queues;
2476 	ioc_info(mrioc, "trying to create %d operational queue pairs\n",
2477 	    num_queues);
2478 
2479 	if (!mrioc->req_qinfo) {
2480 		mrioc->req_qinfo = kzalloc_objs(struct op_req_qinfo, num_queues);
2481 		if (!mrioc->req_qinfo) {
2482 			retval = -1;
2483 			goto out_failed;
2484 		}
2485 
2486 		mrioc->op_reply_qinfo = kzalloc(sizeof(struct op_reply_qinfo) *
2487 		    num_queues, GFP_KERNEL);
2488 		if (!mrioc->op_reply_qinfo) {
2489 			retval = -1;
2490 			goto out_failed;
2491 		}
2492 	}
2493 
2494 	if (mrioc->enable_segqueue)
2495 		ioc_info(mrioc,
2496 		    "allocating operational queues through segmented queues\n");
2497 
2498 	for (i = 0; i < num_queues; i++) {
2499 		if (mpi3mr_create_op_reply_q(mrioc, i)) {
2500 			ioc_err(mrioc, "Cannot create OP RepQ %d\n", i);
2501 			break;
2502 		}
2503 		if (mpi3mr_create_op_req_q(mrioc, i,
2504 		    mrioc->op_reply_qinfo[i].qid)) {
2505 			ioc_err(mrioc, "Cannot create OP ReqQ %d\n", i);
2506 			mpi3mr_delete_op_reply_q(mrioc, i);
2507 			break;
2508 		}
2509 	}
2510 
2511 	if (i == 0) {
2512 		/* Not even one queue is created successfully*/
2513 		retval = -1;
2514 		goto out_failed;
2515 	}
2516 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
2517 	ioc_state = mpi3mr_get_iocstate(mrioc);
2518 	if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
2519 	    ioc_state != MRIOC_STATE_READY) {
2520 		mpi3mr_print_fault_info(mrioc);
2521 		retval = -1;
2522 		goto out_failed;
2523 	}
2524 	mrioc->num_op_reply_q = mrioc->num_op_req_q = i;
2525 	ioc_info(mrioc,
2526 	    "successfully created %d operational queue pairs(default/polled) queue = (%d/%d)\n",
2527 	    mrioc->num_op_reply_q, mrioc->default_qcount,
2528 	    mrioc->active_poll_qcount);
2529 
2530 	return retval;
2531 out_failed:
2532 	kfree(mrioc->req_qinfo);
2533 	mrioc->req_qinfo = NULL;
2534 
2535 	kfree(mrioc->op_reply_qinfo);
2536 	mrioc->op_reply_qinfo = NULL;
2537 
2538 	return retval;
2539 }
2540 
2541 /**
2542  * mpi3mr_op_request_post - Post request to operational queue
2543  * @mrioc: Adapter reference
2544  * @op_req_q: Operational request queue info
2545  * @req: MPI3 request
2546  *
2547  * Post the MPI3 request into operational request queue and
2548  * inform the controller, if the queue is full return
2549  * appropriate error.
2550  *
2551  * Return: 0 on success, non-zero on failure.
2552  */
mpi3mr_op_request_post(struct mpi3mr_ioc * mrioc,struct op_req_qinfo * op_req_q,u8 * req)2553 int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
2554 	struct op_req_qinfo *op_req_q, u8 *req)
2555 {
2556 	u16 pi = 0, max_entries, reply_qidx = 0, midx;
2557 	int retval = 0;
2558 	unsigned long flags;
2559 	u8 *req_entry;
2560 	void *segment_base_addr;
2561 	u16 req_sz = mrioc->facts.op_req_sz;
2562 	struct segments *segments = op_req_q->q_segments;
2563 	struct op_reply_qinfo *op_reply_q = NULL;
2564 	struct mpi3_scsi_io_request *scsiio_req =
2565 		(struct mpi3_scsi_io_request *)req;
2566 
2567 	reply_qidx = op_req_q->reply_qid - 1;
2568 	op_reply_q = mrioc->op_reply_qinfo + reply_qidx;
2569 
2570 	if (mrioc->unrecoverable)
2571 		return -EFAULT;
2572 
2573 	spin_lock_irqsave(&op_req_q->q_lock, flags);
2574 	pi = op_req_q->pi;
2575 	max_entries = op_req_q->num_requests;
2576 
2577 	if (mpi3mr_check_req_qfull(op_req_q)) {
2578 		midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(
2579 		    reply_qidx, mrioc->op_reply_q_offset);
2580 		mpi3mr_process_op_reply_q(mrioc, mrioc->intr_info[midx].op_reply_q);
2581 
2582 		if (mpi3mr_check_req_qfull(op_req_q)) {
2583 
2584 			if (op_req_q->last_full_host_tag ==
2585 			    MPI3MR_HOSTTAG_INVALID)
2586 				op_req_q->qfull_instances++;
2587 
2588 			op_req_q->last_full_host_tag = scsiio_req->host_tag;
2589 			op_req_q->qfull_io_count++;
2590 			retval = -EAGAIN;
2591 			goto out;
2592 		}
2593 	}
2594 
2595 	if (op_req_q->last_full_host_tag != MPI3MR_HOSTTAG_INVALID)
2596 		op_req_q->last_full_host_tag = MPI3MR_HOSTTAG_INVALID;
2597 
2598 	if (mrioc->reset_in_progress) {
2599 		ioc_err(mrioc, "OpReqQ submit reset in progress\n");
2600 		retval = -EAGAIN;
2601 		goto out;
2602 	}
2603 	if (mrioc->pci_err_recovery) {
2604 		ioc_err(mrioc, "operational request queue submission failed due to pci error recovery in progress\n");
2605 		retval = -EAGAIN;
2606 		goto out;
2607 	}
2608 
2609 	/* Reply queue is nearing to get full, push back IOs to SML */
2610 	if ((mrioc->prevent_reply_qfull == true) &&
2611 		(atomic_read(&op_reply_q->pend_ios) >
2612 	     (op_reply_q->qfull_watermark))) {
2613 		atomic_inc(&mrioc->reply_qfull_count);
2614 		retval = -EAGAIN;
2615 		goto out;
2616 	}
2617 
2618 	segment_base_addr = segments[pi / op_req_q->segment_qd].segment;
2619 	req_entry = (u8 *)segment_base_addr +
2620 	    ((pi % op_req_q->segment_qd) * req_sz);
2621 
2622 	memset(req_entry, 0, req_sz);
2623 	memcpy(req_entry, req, MPI3MR_ADMIN_REQ_FRAME_SZ);
2624 
2625 	if (++pi == max_entries)
2626 		pi = 0;
2627 	op_req_q->pi = pi;
2628 
2629 #ifndef CONFIG_PREEMPT_RT
2630 	if (atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios)
2631 	    > MPI3MR_IRQ_POLL_TRIGGER_IOCOUNT)
2632 		mrioc->op_reply_qinfo[reply_qidx].enable_irq_poll = true;
2633 #else
2634 	atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios);
2635 #endif
2636 
2637 	writel(op_req_q->pi,
2638 	    &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].producer_index);
2639 
2640 out:
2641 	spin_unlock_irqrestore(&op_req_q->q_lock, flags);
2642 	return retval;
2643 }
2644 
2645 /**
2646  * mpi3mr_check_rh_fault_ioc - check reset history and fault
2647  * controller
2648  * @mrioc: Adapter instance reference
2649  * @reason_code: reason code for the fault.
2650  *
2651  * This routine will save snapdump and fault the controller with
2652  * the given reason code if it is not already in the fault or
2653  * not asynchronosuly reset. This will be used to handle
2654  * initilaization time faults/resets/timeout as in those cases
2655  * immediate soft reset invocation is not required.
2656  *
2657  * Return:  None.
2658  */
mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc * mrioc,u32 reason_code)2659 void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code)
2660 {
2661 	u32 ioc_status, host_diagnostic, timeout;
2662 	union mpi3mr_trigger_data trigger_data;
2663 
2664 	if (mrioc->unrecoverable) {
2665 		ioc_err(mrioc, "controller is unrecoverable\n");
2666 		return;
2667 	}
2668 
2669 	if (!pci_device_is_present(mrioc->pdev)) {
2670 		mrioc->unrecoverable = 1;
2671 		ioc_err(mrioc, "controller is not present\n");
2672 		return;
2673 	}
2674 	memset(&trigger_data, 0, sizeof(trigger_data));
2675 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
2676 
2677 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) {
2678 		mpi3mr_set_trigger_data_in_all_hdb(mrioc,
2679 		    MPI3MR_HDB_TRIGGER_TYPE_FW_RELEASED, NULL, 0);
2680 		return;
2681 	} else if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
2682 		trigger_data.fault = (readl(&mrioc->sysif_regs->fault) &
2683 		      MPI3_SYSIF_FAULT_CODE_MASK);
2684 
2685 		mpi3mr_set_trigger_data_in_all_hdb(mrioc,
2686 		    MPI3MR_HDB_TRIGGER_TYPE_FAULT, &trigger_data, 0);
2687 		mpi3mr_print_fault_info(mrioc);
2688 		mpi3mr_save_fault_info(mrioc);
2689 		mrioc->fault_during_init = 1;
2690 		mrioc->fwfault_counter++;
2691 		return;
2692 	}
2693 
2694 	mpi3mr_set_diagsave(mrioc);
2695 	mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
2696 	    reason_code);
2697 	trigger_data.fault = (readl(&mrioc->sysif_regs->fault) &
2698 		      MPI3_SYSIF_FAULT_CODE_MASK);
2699 	mpi3mr_set_trigger_data_in_all_hdb(mrioc, MPI3MR_HDB_TRIGGER_TYPE_FAULT,
2700 	    &trigger_data, 0);
2701 	timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
2702 	do {
2703 		host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
2704 		if (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
2705 			break;
2706 		msleep(100);
2707 	} while (--timeout);
2708 
2709 	mpi3mr_save_fault_info(mrioc);
2710 	mrioc->fault_during_init = 1;
2711 	mrioc->fwfault_counter++;
2712 }
2713 
2714 /**
2715  * mpi3mr_sync_timestamp - Issue time stamp sync request
2716  * @mrioc: Adapter reference
2717  *
2718  * Issue IO unit control MPI request to synchronize firmware
2719  * timestamp with host time.
2720  *
2721  * Return: 0 on success, non-zero on failure.
2722  */
mpi3mr_sync_timestamp(struct mpi3mr_ioc * mrioc)2723 static int mpi3mr_sync_timestamp(struct mpi3mr_ioc *mrioc)
2724 {
2725 	ktime_t current_time;
2726 	struct mpi3_iounit_control_request iou_ctrl;
2727 	int retval = 0;
2728 
2729 	memset(&iou_ctrl, 0, sizeof(iou_ctrl));
2730 	mutex_lock(&mrioc->init_cmds.mutex);
2731 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2732 		retval = -1;
2733 		ioc_err(mrioc, "Issue IOUCTL time_stamp: command is in use\n");
2734 		mutex_unlock(&mrioc->init_cmds.mutex);
2735 		goto out;
2736 	}
2737 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2738 	mrioc->init_cmds.is_waiting = 1;
2739 	mrioc->init_cmds.callback = NULL;
2740 	iou_ctrl.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2741 	iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL;
2742 	iou_ctrl.operation = MPI3_CTRL_OP_UPDATE_TIMESTAMP;
2743 	current_time = ktime_get_real();
2744 	iou_ctrl.param64[0] = cpu_to_le64(ktime_to_ms(current_time));
2745 
2746 	init_completion(&mrioc->init_cmds.done);
2747 	retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl,
2748 	    sizeof(iou_ctrl), 0);
2749 	if (retval) {
2750 		ioc_err(mrioc, "Issue IOUCTL time_stamp: Admin Post failed\n");
2751 		goto out_unlock;
2752 	}
2753 
2754 	wait_for_completion_timeout(&mrioc->init_cmds.done,
2755 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2756 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2757 		ioc_err(mrioc, "Issue IOUCTL time_stamp: command timed out\n");
2758 		mrioc->init_cmds.is_waiting = 0;
2759 		if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET))
2760 			mpi3mr_check_rh_fault_ioc(mrioc,
2761 			    MPI3MR_RESET_FROM_TSU_TIMEOUT);
2762 		retval = -1;
2763 		goto out_unlock;
2764 	}
2765 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2766 	    != MPI3_IOCSTATUS_SUCCESS) {
2767 		ioc_err(mrioc,
2768 		    "Issue IOUCTL time_stamp: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2769 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2770 		    mrioc->init_cmds.ioc_loginfo);
2771 		retval = -1;
2772 		goto out_unlock;
2773 	}
2774 
2775 out_unlock:
2776 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2777 	mutex_unlock(&mrioc->init_cmds.mutex);
2778 
2779 out:
2780 	return retval;
2781 }
2782 
2783 /**
2784  * mpi3mr_print_pkg_ver - display controller fw package version
2785  * @mrioc: Adapter reference
2786  *
2787  * Retrieve firmware package version from the component image
2788  * header of the controller flash and display it.
2789  *
2790  * Return: 0 on success and non-zero on failure.
2791  */
mpi3mr_print_pkg_ver(struct mpi3mr_ioc * mrioc)2792 static int mpi3mr_print_pkg_ver(struct mpi3mr_ioc *mrioc)
2793 {
2794 	struct mpi3_ci_upload_request ci_upload;
2795 	int retval = -1;
2796 	void *data = NULL;
2797 	dma_addr_t data_dma;
2798 	struct mpi3_ci_manifest_mpi *manifest;
2799 	u32 data_len = sizeof(struct mpi3_ci_manifest_mpi);
2800 	u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
2801 
2802 	data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
2803 	    GFP_KERNEL);
2804 	if (!data)
2805 		return -ENOMEM;
2806 
2807 	memset(&ci_upload, 0, sizeof(ci_upload));
2808 	mutex_lock(&mrioc->init_cmds.mutex);
2809 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2810 		ioc_err(mrioc, "sending get package version failed due to command in use\n");
2811 		mutex_unlock(&mrioc->init_cmds.mutex);
2812 		goto out;
2813 	}
2814 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2815 	mrioc->init_cmds.is_waiting = 1;
2816 	mrioc->init_cmds.callback = NULL;
2817 	ci_upload.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2818 	ci_upload.function = MPI3_FUNCTION_CI_UPLOAD;
2819 	ci_upload.msg_flags = MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_PRIMARY;
2820 	ci_upload.signature1 = cpu_to_le32(MPI3_IMAGE_HEADER_SIGNATURE1_MANIFEST);
2821 	ci_upload.image_offset = cpu_to_le32(MPI3_IMAGE_HEADER_SIZE);
2822 	ci_upload.segment_size = cpu_to_le32(data_len);
2823 
2824 	mpi3mr_add_sg_single(&ci_upload.sgl, sgl_flags, data_len,
2825 	    data_dma);
2826 	init_completion(&mrioc->init_cmds.done);
2827 	retval = mpi3mr_admin_request_post(mrioc, &ci_upload,
2828 	    sizeof(ci_upload), 1);
2829 	if (retval) {
2830 		ioc_err(mrioc, "posting get package version failed\n");
2831 		goto out_unlock;
2832 	}
2833 	wait_for_completion_timeout(&mrioc->init_cmds.done,
2834 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2835 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2836 		ioc_err(mrioc, "get package version timed out\n");
2837 		mpi3mr_check_rh_fault_ioc(mrioc,
2838 		    MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT);
2839 		retval = -1;
2840 		goto out_unlock;
2841 	}
2842 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2843 	    == MPI3_IOCSTATUS_SUCCESS) {
2844 		manifest = (struct mpi3_ci_manifest_mpi *) data;
2845 		if (manifest->manifest_type == MPI3_CI_MANIFEST_TYPE_MPI) {
2846 			ioc_info(mrioc,
2847 			    "firmware package version(%d.%d.%d.%d.%05d-%05d)\n",
2848 			    manifest->package_version.gen_major,
2849 			    manifest->package_version.gen_minor,
2850 			    manifest->package_version.phase_major,
2851 			    manifest->package_version.phase_minor,
2852 			    manifest->package_version.customer_id,
2853 			    manifest->package_version.build_num);
2854 		}
2855 	}
2856 	retval = 0;
2857 out_unlock:
2858 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2859 	mutex_unlock(&mrioc->init_cmds.mutex);
2860 
2861 out:
2862 	if (data)
2863 		dma_free_coherent(&mrioc->pdev->dev, data_len, data,
2864 		    data_dma);
2865 	return retval;
2866 }
2867 
2868 /**
2869  * mpi3mr_watchdog_work - watchdog thread to monitor faults
2870  * @work: work struct
2871  *
2872  * Watch dog work periodically executed (1 second interval) to
2873  * monitor firmware fault and to issue periodic timer sync to
2874  * the firmware.
2875  *
2876  * Return: Nothing.
2877  */
mpi3mr_watchdog_work(struct work_struct * work)2878 static void mpi3mr_watchdog_work(struct work_struct *work)
2879 {
2880 	struct mpi3mr_ioc *mrioc =
2881 	    container_of(work, struct mpi3mr_ioc, watchdog_work.work);
2882 	unsigned long flags;
2883 	enum mpi3mr_iocstate ioc_state;
2884 	u32 host_diagnostic, ioc_status;
2885 	union mpi3mr_trigger_data trigger_data;
2886 	u16 reset_reason = MPI3MR_RESET_FROM_FAULT_WATCH;
2887 
2888 	if (mrioc->fault_during_init) {
2889 		mpi3mr_fault_uevent_emit(mrioc);
2890 		mrioc->fault_during_init = 0;
2891 	}
2892 
2893 	if (mrioc->reset_in_progress || mrioc->pci_err_recovery)
2894 		return;
2895 
2896 	if (!mrioc->unrecoverable && !pci_device_is_present(mrioc->pdev)) {
2897 		ioc_err(mrioc, "watchdog could not detect the controller\n");
2898 		mrioc->unrecoverable = 1;
2899 	}
2900 
2901 	if (mrioc->unrecoverable) {
2902 		ioc_err(mrioc,
2903 		    "flush pending commands for unrecoverable controller\n");
2904 		mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
2905 		return;
2906 	}
2907 
2908 	if (mrioc->invalid_io_comp) {
2909 		mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_INVALID_COMPLETION, 1);
2910 		return;
2911 	}
2912 
2913 	if (atomic_read(&mrioc->admin_pend_isr)) {
2914 		ioc_err(mrioc, "Unprocessed admin ISR instance found\n"
2915 				"flush admin replies\n");
2916 		mpi3mr_process_admin_reply_q(mrioc);
2917 	}
2918 
2919 	if (!(mrioc->facts.ioc_capabilities &
2920 		MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_IOC) &&
2921 		(mrioc->ts_update_counter++ >= mrioc->ts_update_interval)) {
2922 
2923 		mrioc->ts_update_counter = 0;
2924 		mpi3mr_sync_timestamp(mrioc);
2925 	}
2926 
2927 	if ((mrioc->prepare_for_reset) &&
2928 	    ((mrioc->prepare_for_reset_timeout_counter++) >=
2929 	     MPI3MR_PREPARE_FOR_RESET_TIMEOUT)) {
2930 		mpi3mr_soft_reset_handler(mrioc,
2931 		    MPI3MR_RESET_FROM_CIACTVRST_TIMER, 1);
2932 		return;
2933 	}
2934 
2935 	memset(&trigger_data, 0, sizeof(trigger_data));
2936 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
2937 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) {
2938 		mpi3mr_set_trigger_data_in_all_hdb(mrioc,
2939 		    MPI3MR_HDB_TRIGGER_TYPE_FW_RELEASED, NULL, 0);
2940 		mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_FIRMWARE, 0);
2941 		return;
2942 	}
2943 
2944 	/*Check for fault state every one second and issue Soft reset*/
2945 	ioc_state = mpi3mr_get_iocstate(mrioc);
2946 	if (ioc_state != MRIOC_STATE_FAULT)
2947 		goto schedule_work;
2948 
2949 	trigger_data.fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK;
2950 	mpi3mr_set_trigger_data_in_all_hdb(mrioc,
2951 	    MPI3MR_HDB_TRIGGER_TYPE_FAULT, &trigger_data, 0);
2952 	host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
2953 	if (host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS) {
2954 		if (!mrioc->diagsave_timeout) {
2955 			mpi3mr_print_fault_info(mrioc);
2956 			ioc_warn(mrioc, "diag save in progress\n");
2957 		}
2958 		if ((mrioc->diagsave_timeout++) <= MPI3_SYSIF_DIAG_SAVE_TIMEOUT)
2959 			goto schedule_work;
2960 	}
2961 
2962 	mpi3mr_print_fault_info(mrioc);
2963 	mrioc->diagsave_timeout = 0;
2964 
2965 	if (!mpi3mr_is_fault_recoverable(mrioc)) {
2966 		mrioc->unrecoverable = 1;
2967 		goto schedule_work;
2968 	}
2969 
2970 	mpi3mr_save_fault_info(mrioc);
2971 	mpi3mr_fault_uevent_emit(mrioc);
2972 	mrioc->fwfault_counter++;
2973 
2974 	switch (trigger_data.fault) {
2975 	case MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED:
2976 	case MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED:
2977 		ioc_warn(mrioc,
2978 		    "controller requires system power cycle, marking controller as unrecoverable\n");
2979 		mrioc->unrecoverable = 1;
2980 		goto schedule_work;
2981 	case MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS:
2982 		goto schedule_work;
2983 	case MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET:
2984 		reset_reason = MPI3MR_RESET_FROM_CIACTIV_FAULT;
2985 		break;
2986 	default:
2987 		break;
2988 	}
2989 	mpi3mr_soft_reset_handler(mrioc, reset_reason, 0);
2990 	return;
2991 
2992 schedule_work:
2993 	spin_lock_irqsave(&mrioc->watchdog_lock, flags);
2994 	if (mrioc->watchdog_work_q)
2995 		queue_delayed_work(mrioc->watchdog_work_q,
2996 		    &mrioc->watchdog_work,
2997 		    msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
2998 	spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
2999 	return;
3000 }
3001 
3002 /**
3003  * mpi3mr_start_watchdog - Start watchdog
3004  * @mrioc: Adapter instance reference
3005  *
3006  * Create and start the watchdog thread to monitor controller
3007  * faults.
3008  *
3009  * Return: Nothing.
3010  */
mpi3mr_start_watchdog(struct mpi3mr_ioc * mrioc)3011 void mpi3mr_start_watchdog(struct mpi3mr_ioc *mrioc)
3012 {
3013 	if (mrioc->watchdog_work_q)
3014 		return;
3015 
3016 	INIT_DELAYED_WORK(&mrioc->watchdog_work, mpi3mr_watchdog_work);
3017 	mrioc->watchdog_work_q = alloc_ordered_workqueue(
3018 		"watchdog_%s%d", WQ_MEM_RECLAIM, mrioc->name, mrioc->id);
3019 	if (!mrioc->watchdog_work_q) {
3020 		ioc_err(mrioc, "%s: failed (line=%d)\n", __func__, __LINE__);
3021 		return;
3022 	}
3023 
3024 	if (mrioc->watchdog_work_q)
3025 		queue_delayed_work(mrioc->watchdog_work_q,
3026 		    &mrioc->watchdog_work,
3027 		    msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
3028 }
3029 
3030 /**
3031  * mpi3mr_stop_watchdog - Stop watchdog
3032  * @mrioc: Adapter instance reference
3033  *
3034  * Stop the watchdog thread created to monitor controller
3035  * faults.
3036  *
3037  * Return: Nothing.
3038  */
mpi3mr_stop_watchdog(struct mpi3mr_ioc * mrioc)3039 void mpi3mr_stop_watchdog(struct mpi3mr_ioc *mrioc)
3040 {
3041 	unsigned long flags;
3042 	struct workqueue_struct *wq;
3043 
3044 	spin_lock_irqsave(&mrioc->watchdog_lock, flags);
3045 	wq = mrioc->watchdog_work_q;
3046 	mrioc->watchdog_work_q = NULL;
3047 	spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
3048 	if (wq) {
3049 		if (!cancel_delayed_work_sync(&mrioc->watchdog_work))
3050 			flush_workqueue(wq);
3051 		destroy_workqueue(wq);
3052 	}
3053 }
3054 
3055 /**
3056  * mpi3mr_setup_admin_qpair - Setup admin queue pair
3057  * @mrioc: Adapter instance reference
3058  *
3059  * Allocate memory for admin queue pair if required and register
3060  * the admin queue with the controller.
3061  *
3062  * Return: 0 on success, non-zero on failures.
3063  */
mpi3mr_setup_admin_qpair(struct mpi3mr_ioc * mrioc)3064 static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc)
3065 {
3066 	int retval = 0;
3067 	u32 num_admin_entries = 0;
3068 
3069 	mrioc->admin_req_q_sz = MPI3MR_ADMIN_REQ_Q_SIZE;
3070 	mrioc->num_admin_req = mrioc->admin_req_q_sz /
3071 	    MPI3MR_ADMIN_REQ_FRAME_SZ;
3072 	mrioc->admin_req_ci = mrioc->admin_req_pi = 0;
3073 
3074 	mrioc->admin_reply_q_sz = MPI3MR_ADMIN_REPLY_Q_SIZE;
3075 	mrioc->num_admin_replies = mrioc->admin_reply_q_sz /
3076 	    MPI3MR_ADMIN_REPLY_FRAME_SZ;
3077 	mrioc->admin_reply_ci = 0;
3078 	mrioc->admin_reply_ephase = 1;
3079 	atomic_set(&mrioc->admin_reply_q_in_use, 0);
3080 	atomic_set(&mrioc->admin_pend_isr, 0);
3081 
3082 	if (!mrioc->admin_req_base) {
3083 		mrioc->admin_req_base = dma_alloc_coherent(&mrioc->pdev->dev,
3084 		    mrioc->admin_req_q_sz, &mrioc->admin_req_dma, GFP_KERNEL);
3085 
3086 		if (!mrioc->admin_req_base) {
3087 			retval = -1;
3088 			goto out_failed;
3089 		}
3090 
3091 		mrioc->admin_reply_base = dma_alloc_coherent(&mrioc->pdev->dev,
3092 		    mrioc->admin_reply_q_sz, &mrioc->admin_reply_dma,
3093 		    GFP_KERNEL);
3094 
3095 		if (!mrioc->admin_reply_base) {
3096 			retval = -1;
3097 			goto out_failed;
3098 		}
3099 	}
3100 
3101 	num_admin_entries = (mrioc->num_admin_replies << 16) |
3102 	    (mrioc->num_admin_req);
3103 	writel(num_admin_entries, &mrioc->sysif_regs->admin_queue_num_entries);
3104 	mpi3mr_writeq(mrioc->admin_req_dma,
3105 		&mrioc->sysif_regs->admin_request_queue_address,
3106 		&mrioc->adm_req_q_bar_writeq_lock);
3107 	mpi3mr_writeq(mrioc->admin_reply_dma,
3108 		&mrioc->sysif_regs->admin_reply_queue_address,
3109 		&mrioc->adm_reply_q_bar_writeq_lock);
3110 	writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi);
3111 	writel(mrioc->admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci);
3112 	return retval;
3113 
3114 out_failed:
3115 
3116 	if (mrioc->admin_reply_base) {
3117 		dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz,
3118 		    mrioc->admin_reply_base, mrioc->admin_reply_dma);
3119 		mrioc->admin_reply_base = NULL;
3120 	}
3121 	if (mrioc->admin_req_base) {
3122 		dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz,
3123 		    mrioc->admin_req_base, mrioc->admin_req_dma);
3124 		mrioc->admin_req_base = NULL;
3125 	}
3126 	return retval;
3127 }
3128 
3129 /**
3130  * mpi3mr_issue_iocfacts - Send IOC Facts
3131  * @mrioc: Adapter instance reference
3132  * @facts_data: Cached IOC facts data
3133  *
3134  * Issue IOC Facts MPI request through admin queue and wait for
3135  * the completion of it or time out.
3136  *
3137  * Return: 0 on success, non-zero on failures.
3138  */
mpi3mr_issue_iocfacts(struct mpi3mr_ioc * mrioc,struct mpi3_ioc_facts_data * facts_data)3139 static int mpi3mr_issue_iocfacts(struct mpi3mr_ioc *mrioc,
3140 	struct mpi3_ioc_facts_data *facts_data)
3141 {
3142 	struct mpi3_ioc_facts_request iocfacts_req;
3143 	void *data = NULL;
3144 	dma_addr_t data_dma;
3145 	u32 data_len = sizeof(*facts_data);
3146 	int retval = 0;
3147 	u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
3148 
3149 	data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
3150 	    GFP_KERNEL);
3151 
3152 	if (!data) {
3153 		retval = -1;
3154 		goto out;
3155 	}
3156 
3157 	memset(&iocfacts_req, 0, sizeof(iocfacts_req));
3158 	mutex_lock(&mrioc->init_cmds.mutex);
3159 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3160 		retval = -1;
3161 		ioc_err(mrioc, "Issue IOCFacts: Init command is in use\n");
3162 		mutex_unlock(&mrioc->init_cmds.mutex);
3163 		goto out;
3164 	}
3165 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3166 	mrioc->init_cmds.is_waiting = 1;
3167 	mrioc->init_cmds.callback = NULL;
3168 	iocfacts_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3169 	iocfacts_req.function = MPI3_FUNCTION_IOC_FACTS;
3170 
3171 	mpi3mr_add_sg_single(&iocfacts_req.sgl, sgl_flags, data_len,
3172 	    data_dma);
3173 
3174 	init_completion(&mrioc->init_cmds.done);
3175 	retval = mpi3mr_admin_request_post(mrioc, &iocfacts_req,
3176 	    sizeof(iocfacts_req), 1);
3177 	if (retval) {
3178 		ioc_err(mrioc, "Issue IOCFacts: Admin Post failed\n");
3179 		goto out_unlock;
3180 	}
3181 	wait_for_completion_timeout(&mrioc->init_cmds.done,
3182 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
3183 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3184 		ioc_err(mrioc, "ioc_facts timed out\n");
3185 		mpi3mr_check_rh_fault_ioc(mrioc,
3186 		    MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT);
3187 		retval = -1;
3188 		goto out_unlock;
3189 	}
3190 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
3191 	    != MPI3_IOCSTATUS_SUCCESS) {
3192 		ioc_err(mrioc,
3193 		    "Issue IOCFacts: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
3194 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3195 		    mrioc->init_cmds.ioc_loginfo);
3196 		retval = -1;
3197 		goto out_unlock;
3198 	}
3199 	memcpy(facts_data, (u8 *)data, data_len);
3200 	mpi3mr_process_factsdata(mrioc, facts_data);
3201 out_unlock:
3202 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3203 	mutex_unlock(&mrioc->init_cmds.mutex);
3204 
3205 out:
3206 	if (data)
3207 		dma_free_coherent(&mrioc->pdev->dev, data_len, data, data_dma);
3208 
3209 	return retval;
3210 }
3211 
3212 /**
3213  * mpi3mr_check_reset_dma_mask - Process IOC facts data
3214  * @mrioc: Adapter instance reference
3215  *
3216  * Check whether the new DMA mask requested through IOCFacts by
3217  * firmware needs to be set, if so set it .
3218  *
3219  * Return: 0 on success, non-zero on failure.
3220  */
mpi3mr_check_reset_dma_mask(struct mpi3mr_ioc * mrioc)3221 static inline int mpi3mr_check_reset_dma_mask(struct mpi3mr_ioc *mrioc)
3222 {
3223 	struct pci_dev *pdev = mrioc->pdev;
3224 	int r;
3225 	u64 facts_dma_mask = DMA_BIT_MASK(mrioc->facts.dma_mask);
3226 
3227 	if (!mrioc->facts.dma_mask || (mrioc->dma_mask <= facts_dma_mask))
3228 		return 0;
3229 
3230 	ioc_info(mrioc, "Changing DMA mask from 0x%016llx to 0x%016llx\n",
3231 	    mrioc->dma_mask, facts_dma_mask);
3232 
3233 	r = dma_set_mask_and_coherent(&pdev->dev, facts_dma_mask);
3234 	if (r) {
3235 		ioc_err(mrioc, "Setting DMA mask to 0x%016llx failed: %d\n",
3236 		    facts_dma_mask, r);
3237 		return r;
3238 	}
3239 	mrioc->dma_mask = facts_dma_mask;
3240 	return r;
3241 }
3242 
3243 /**
3244  * mpi3mr_process_factsdata - Process IOC facts data
3245  * @mrioc: Adapter instance reference
3246  * @facts_data: Cached IOC facts data
3247  *
3248  * Convert IOC facts data into cpu endianness and cache it in
3249  * the driver .
3250  *
3251  * Return: Nothing.
3252  */
mpi3mr_process_factsdata(struct mpi3mr_ioc * mrioc,struct mpi3_ioc_facts_data * facts_data)3253 static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
3254 	struct mpi3_ioc_facts_data *facts_data)
3255 {
3256 	u32 ioc_config, req_sz, facts_flags;
3257 
3258 	if ((le16_to_cpu(facts_data->ioc_facts_data_length)) !=
3259 	    (sizeof(*facts_data) / 4)) {
3260 		ioc_warn(mrioc,
3261 		    "IOCFactsdata length mismatch driver_sz(%zu) firmware_sz(%d)\n",
3262 		    sizeof(*facts_data),
3263 		    le16_to_cpu(facts_data->ioc_facts_data_length) * 4);
3264 	}
3265 
3266 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
3267 	req_sz = 1 << ((ioc_config & MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ) >>
3268 	    MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ_SHIFT);
3269 	if (le16_to_cpu(facts_data->ioc_request_frame_size) != (req_sz / 4)) {
3270 		ioc_err(mrioc,
3271 		    "IOCFacts data reqFrameSize mismatch hw_size(%d) firmware_sz(%d)\n",
3272 		    req_sz / 4, le16_to_cpu(facts_data->ioc_request_frame_size));
3273 	}
3274 
3275 	memset(&mrioc->facts, 0, sizeof(mrioc->facts));
3276 
3277 	facts_flags = le32_to_cpu(facts_data->flags);
3278 	mrioc->facts.op_req_sz = req_sz;
3279 	mrioc->op_reply_desc_sz = 1 << ((ioc_config &
3280 	    MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ) >>
3281 	    MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT);
3282 
3283 	mrioc->facts.ioc_num = facts_data->ioc_number;
3284 	mrioc->facts.who_init = facts_data->who_init;
3285 	mrioc->facts.max_msix_vectors = le16_to_cpu(facts_data->max_msix_vectors);
3286 	mrioc->facts.personality = (facts_flags &
3287 	    MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK);
3288 	mrioc->facts.dma_mask = (facts_flags &
3289 	    MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >>
3290 	    MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
3291 	mrioc->facts.dma_mask = (facts_flags &
3292 	    MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >>
3293 	    MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
3294 	mrioc->facts.max_req_limit = (facts_flags &
3295 			MPI3_IOCFACTS_FLAGS_MAX_REQ_PER_REPLY_QUEUE_LIMIT);
3296 	mrioc->facts.protocol_flags = facts_data->protocol_flags;
3297 	mrioc->facts.mpi_version = le32_to_cpu(facts_data->mpi_version.word);
3298 	mrioc->facts.max_reqs = le16_to_cpu(facts_data->max_outstanding_requests);
3299 	mrioc->facts.product_id = le16_to_cpu(facts_data->product_id);
3300 	mrioc->facts.reply_sz = le16_to_cpu(facts_data->reply_frame_size) * 4;
3301 	mrioc->facts.exceptions = le16_to_cpu(facts_data->ioc_exceptions);
3302 	mrioc->facts.max_perids = le16_to_cpu(facts_data->max_persistent_id);
3303 	mrioc->facts.max_vds = le16_to_cpu(facts_data->max_vds);
3304 	mrioc->facts.max_hpds = le16_to_cpu(facts_data->max_host_pds);
3305 	mrioc->facts.max_advhpds = le16_to_cpu(facts_data->max_adv_host_pds);
3306 	mrioc->facts.max_raid_pds = le16_to_cpu(facts_data->max_raid_pds);
3307 	mrioc->facts.max_nvme = le16_to_cpu(facts_data->max_nvme);
3308 	mrioc->facts.max_pcie_switches =
3309 	    le16_to_cpu(facts_data->max_pcie_switches);
3310 	mrioc->facts.max_sasexpanders =
3311 	    le16_to_cpu(facts_data->max_sas_expanders);
3312 	mrioc->facts.max_data_length = le16_to_cpu(facts_data->max_data_length);
3313 	mrioc->facts.max_sasinitiators =
3314 	    le16_to_cpu(facts_data->max_sas_initiators);
3315 	mrioc->facts.max_enclosures = le16_to_cpu(facts_data->max_enclosures);
3316 	mrioc->facts.min_devhandle = le16_to_cpu(facts_data->min_dev_handle);
3317 	mrioc->facts.max_devhandle = le16_to_cpu(facts_data->max_dev_handle);
3318 	mrioc->facts.max_op_req_q =
3319 	    le16_to_cpu(facts_data->max_operational_request_queues);
3320 	mrioc->facts.max_op_reply_q =
3321 	    le16_to_cpu(facts_data->max_operational_reply_queues);
3322 	mrioc->facts.ioc_capabilities =
3323 	    le32_to_cpu(facts_data->ioc_capabilities);
3324 	mrioc->facts.fw_ver.build_num =
3325 	    le16_to_cpu(facts_data->fw_version.build_num);
3326 	mrioc->facts.fw_ver.cust_id =
3327 	    le16_to_cpu(facts_data->fw_version.customer_id);
3328 	mrioc->facts.fw_ver.ph_minor = facts_data->fw_version.phase_minor;
3329 	mrioc->facts.fw_ver.ph_major = facts_data->fw_version.phase_major;
3330 	mrioc->facts.fw_ver.gen_minor = facts_data->fw_version.gen_minor;
3331 	mrioc->facts.fw_ver.gen_major = facts_data->fw_version.gen_major;
3332 	mrioc->msix_count = min_t(int, mrioc->msix_count,
3333 	    mrioc->facts.max_msix_vectors);
3334 	mrioc->facts.sge_mod_mask = facts_data->sge_modifier_mask;
3335 	mrioc->facts.sge_mod_value = facts_data->sge_modifier_value;
3336 	mrioc->facts.sge_mod_shift = facts_data->sge_modifier_shift;
3337 	mrioc->facts.shutdown_timeout =
3338 	    le16_to_cpu(facts_data->shutdown_timeout);
3339 	mrioc->facts.diag_trace_sz =
3340 	    le32_to_cpu(facts_data->diag_trace_size);
3341 	mrioc->facts.diag_fw_sz =
3342 	    le32_to_cpu(facts_data->diag_fw_size);
3343 	mrioc->facts.diag_drvr_sz = le32_to_cpu(facts_data->diag_driver_size);
3344 	mrioc->facts.max_dev_per_tg =
3345 	    facts_data->max_devices_per_throttle_group;
3346 	mrioc->facts.io_throttle_data_length =
3347 	    le16_to_cpu(facts_data->io_throttle_data_length);
3348 	mrioc->facts.max_io_throttle_group =
3349 	    le16_to_cpu(facts_data->max_io_throttle_group);
3350 	mrioc->facts.io_throttle_low = le16_to_cpu(facts_data->io_throttle_low);
3351 	mrioc->facts.io_throttle_high =
3352 	    le16_to_cpu(facts_data->io_throttle_high);
3353 
3354 	if (mrioc->facts.max_data_length ==
3355 	    MPI3_IOCFACTS_MAX_DATA_LENGTH_NOT_REPORTED)
3356 		mrioc->facts.max_data_length = MPI3MR_DEFAULT_MAX_IO_SIZE;
3357 	else
3358 		mrioc->facts.max_data_length *= MPI3MR_PAGE_SIZE_4K;
3359 	/* Store in 512b block count */
3360 	if (mrioc->facts.io_throttle_data_length)
3361 		mrioc->io_throttle_data_length =
3362 		    (mrioc->facts.io_throttle_data_length * 2 * 4);
3363 	else
3364 		/* set the length to 1MB + 1K to disable throttle */
3365 		mrioc->io_throttle_data_length = (mrioc->facts.max_data_length / 512) + 2;
3366 
3367 	mrioc->io_throttle_high = (mrioc->facts.io_throttle_high * 2 * 1024);
3368 	mrioc->io_throttle_low = (mrioc->facts.io_throttle_low * 2 * 1024);
3369 
3370 	ioc_info(mrioc, "ioc_num(%d), maxopQ(%d), maxopRepQ(%d), maxdh(%d),",
3371 	    mrioc->facts.ioc_num, mrioc->facts.max_op_req_q,
3372 	    mrioc->facts.max_op_reply_q, mrioc->facts.max_devhandle);
3373 	ioc_info(mrioc,
3374 	    "maxreqs(%d), mindh(%d) maxvectors(%d) maxperids(%d)\n",
3375 	    mrioc->facts.max_reqs, mrioc->facts.min_devhandle,
3376 	    mrioc->facts.max_msix_vectors, mrioc->facts.max_perids);
3377 	ioc_info(mrioc, "SGEModMask 0x%x SGEModVal 0x%x SGEModShift 0x%x ",
3378 	    mrioc->facts.sge_mod_mask, mrioc->facts.sge_mod_value,
3379 	    mrioc->facts.sge_mod_shift);
3380 	ioc_info(mrioc, "DMA mask %d InitialPE status 0x%x max_data_len (%d)\n",
3381 	    mrioc->facts.dma_mask, (facts_flags &
3382 	    MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK), mrioc->facts.max_data_length);
3383 	ioc_info(mrioc,
3384 	    "max_dev_per_throttle_group(%d), max_throttle_groups(%d)\n",
3385 	    mrioc->facts.max_dev_per_tg, mrioc->facts.max_io_throttle_group);
3386 	ioc_info(mrioc,
3387 	   "io_throttle_data_len(%dKiB), io_throttle_high(%dMiB), io_throttle_low(%dMiB)\n",
3388 	   mrioc->facts.io_throttle_data_length * 4,
3389 	   mrioc->facts.io_throttle_high, mrioc->facts.io_throttle_low);
3390 }
3391 
3392 /**
3393  * mpi3mr_alloc_reply_sense_bufs - Send IOC Init
3394  * @mrioc: Adapter instance reference
3395  *
3396  * Allocate and initialize the reply free buffers, sense
3397  * buffers, reply free queue and sense buffer queue.
3398  *
3399  * Return: 0 on success, non-zero on failures.
3400  */
mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc * mrioc)3401 static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc)
3402 {
3403 	int retval = 0;
3404 	u32 sz, i;
3405 
3406 	if (mrioc->init_cmds.reply)
3407 		return retval;
3408 
3409 	mrioc->init_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3410 	if (!mrioc->init_cmds.reply)
3411 		goto out_failed;
3412 
3413 	mrioc->bsg_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3414 	if (!mrioc->bsg_cmds.reply)
3415 		goto out_failed;
3416 
3417 	mrioc->transport_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3418 	if (!mrioc->transport_cmds.reply)
3419 		goto out_failed;
3420 
3421 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
3422 		mrioc->dev_rmhs_cmds[i].reply = kzalloc(mrioc->reply_sz,
3423 		    GFP_KERNEL);
3424 		if (!mrioc->dev_rmhs_cmds[i].reply)
3425 			goto out_failed;
3426 	}
3427 
3428 	for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
3429 		mrioc->evtack_cmds[i].reply = kzalloc(mrioc->reply_sz,
3430 		    GFP_KERNEL);
3431 		if (!mrioc->evtack_cmds[i].reply)
3432 			goto out_failed;
3433 	}
3434 
3435 	mrioc->host_tm_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3436 	if (!mrioc->host_tm_cmds.reply)
3437 		goto out_failed;
3438 
3439 	mrioc->pel_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3440 	if (!mrioc->pel_cmds.reply)
3441 		goto out_failed;
3442 
3443 	mrioc->pel_abort_cmd.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3444 	if (!mrioc->pel_abort_cmd.reply)
3445 		goto out_failed;
3446 
3447 	mrioc->dev_handle_bitmap_bits = mrioc->facts.max_devhandle;
3448 	mrioc->removepend_bitmap = bitmap_zalloc(mrioc->dev_handle_bitmap_bits,
3449 						 GFP_KERNEL);
3450 	if (!mrioc->removepend_bitmap)
3451 		goto out_failed;
3452 
3453 	mrioc->devrem_bitmap = bitmap_zalloc(MPI3MR_NUM_DEVRMCMD, GFP_KERNEL);
3454 	if (!mrioc->devrem_bitmap)
3455 		goto out_failed;
3456 
3457 	mrioc->evtack_cmds_bitmap = bitmap_zalloc(MPI3MR_NUM_EVTACKCMD,
3458 						  GFP_KERNEL);
3459 	if (!mrioc->evtack_cmds_bitmap)
3460 		goto out_failed;
3461 
3462 	mrioc->num_reply_bufs = mrioc->facts.max_reqs + MPI3MR_NUM_EVT_REPLIES;
3463 	mrioc->reply_free_qsz = mrioc->num_reply_bufs + 1;
3464 	mrioc->num_sense_bufs = mrioc->facts.max_reqs / MPI3MR_SENSEBUF_FACTOR;
3465 	mrioc->sense_buf_q_sz = mrioc->num_sense_bufs + 1;
3466 
3467 	/* reply buffer pool, 16 byte align */
3468 	sz = mrioc->num_reply_bufs * mrioc->reply_sz;
3469 	mrioc->reply_buf_pool = dma_pool_create("reply_buf pool",
3470 	    &mrioc->pdev->dev, sz, 16, 0);
3471 	if (!mrioc->reply_buf_pool) {
3472 		ioc_err(mrioc, "reply buf pool: dma_pool_create failed\n");
3473 		goto out_failed;
3474 	}
3475 
3476 	mrioc->reply_buf = dma_pool_zalloc(mrioc->reply_buf_pool, GFP_KERNEL,
3477 	    &mrioc->reply_buf_dma);
3478 	if (!mrioc->reply_buf)
3479 		goto out_failed;
3480 
3481 	mrioc->reply_buf_dma_max_address = mrioc->reply_buf_dma + sz;
3482 
3483 	/* reply free queue, 8 byte align */
3484 	sz = mrioc->reply_free_qsz * 8;
3485 	mrioc->reply_free_q_pool = dma_pool_create("reply_free_q pool",
3486 	    &mrioc->pdev->dev, sz, 8, 0);
3487 	if (!mrioc->reply_free_q_pool) {
3488 		ioc_err(mrioc, "reply_free_q pool: dma_pool_create failed\n");
3489 		goto out_failed;
3490 	}
3491 	mrioc->reply_free_q = dma_pool_zalloc(mrioc->reply_free_q_pool,
3492 	    GFP_KERNEL, &mrioc->reply_free_q_dma);
3493 	if (!mrioc->reply_free_q)
3494 		goto out_failed;
3495 
3496 	/* sense buffer pool,  4 byte align */
3497 	sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ;
3498 	mrioc->sense_buf_pool = dma_pool_create("sense_buf pool",
3499 	    &mrioc->pdev->dev, sz, 4, 0);
3500 	if (!mrioc->sense_buf_pool) {
3501 		ioc_err(mrioc, "sense_buf pool: dma_pool_create failed\n");
3502 		goto out_failed;
3503 	}
3504 	mrioc->sense_buf = dma_pool_zalloc(mrioc->sense_buf_pool, GFP_KERNEL,
3505 	    &mrioc->sense_buf_dma);
3506 	if (!mrioc->sense_buf)
3507 		goto out_failed;
3508 
3509 	/* sense buffer queue, 8 byte align */
3510 	sz = mrioc->sense_buf_q_sz * 8;
3511 	mrioc->sense_buf_q_pool = dma_pool_create("sense_buf_q pool",
3512 	    &mrioc->pdev->dev, sz, 8, 0);
3513 	if (!mrioc->sense_buf_q_pool) {
3514 		ioc_err(mrioc, "sense_buf_q pool: dma_pool_create failed\n");
3515 		goto out_failed;
3516 	}
3517 	mrioc->sense_buf_q = dma_pool_zalloc(mrioc->sense_buf_q_pool,
3518 	    GFP_KERNEL, &mrioc->sense_buf_q_dma);
3519 	if (!mrioc->sense_buf_q)
3520 		goto out_failed;
3521 
3522 	return retval;
3523 
3524 out_failed:
3525 	retval = -1;
3526 	return retval;
3527 }
3528 
3529 /**
3530  * mpimr_initialize_reply_sbuf_queues - initialize reply sense
3531  * buffers
3532  * @mrioc: Adapter instance reference
3533  *
3534  * Helper function to initialize reply and sense buffers along
3535  * with some debug prints.
3536  *
3537  * Return:  None.
3538  */
mpimr_initialize_reply_sbuf_queues(struct mpi3mr_ioc * mrioc)3539 static void mpimr_initialize_reply_sbuf_queues(struct mpi3mr_ioc *mrioc)
3540 {
3541 	u32 sz, i;
3542 	dma_addr_t phy_addr;
3543 
3544 	sz = mrioc->num_reply_bufs * mrioc->reply_sz;
3545 	ioc_info(mrioc,
3546 	    "reply buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n",
3547 	    mrioc->reply_buf, mrioc->num_reply_bufs, mrioc->reply_sz,
3548 	    (sz / 1024), (unsigned long long)mrioc->reply_buf_dma);
3549 	sz = mrioc->reply_free_qsz * 8;
3550 	ioc_info(mrioc,
3551 	    "reply_free_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n",
3552 	    mrioc->reply_free_q, mrioc->reply_free_qsz, 8, (sz / 1024),
3553 	    (unsigned long long)mrioc->reply_free_q_dma);
3554 	sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ;
3555 	ioc_info(mrioc,
3556 	    "sense_buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n",
3557 	    mrioc->sense_buf, mrioc->num_sense_bufs, MPI3MR_SENSE_BUF_SZ,
3558 	    (sz / 1024), (unsigned long long)mrioc->sense_buf_dma);
3559 	sz = mrioc->sense_buf_q_sz * 8;
3560 	ioc_info(mrioc,
3561 	    "sense_buf_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n",
3562 	    mrioc->sense_buf_q, mrioc->sense_buf_q_sz, 8, (sz / 1024),
3563 	    (unsigned long long)mrioc->sense_buf_q_dma);
3564 
3565 	/* initialize Reply buffer Queue */
3566 	for (i = 0, phy_addr = mrioc->reply_buf_dma;
3567 	    i < mrioc->num_reply_bufs; i++, phy_addr += mrioc->reply_sz)
3568 		mrioc->reply_free_q[i] = cpu_to_le64(phy_addr);
3569 	mrioc->reply_free_q[i] = cpu_to_le64(0);
3570 
3571 	/* initialize Sense Buffer Queue */
3572 	for (i = 0, phy_addr = mrioc->sense_buf_dma;
3573 	    i < mrioc->num_sense_bufs; i++, phy_addr += MPI3MR_SENSE_BUF_SZ)
3574 		mrioc->sense_buf_q[i] = cpu_to_le64(phy_addr);
3575 	mrioc->sense_buf_q[i] = cpu_to_le64(0);
3576 }
3577 
3578 /**
3579  * mpi3mr_issue_iocinit - Send IOC Init
3580  * @mrioc: Adapter instance reference
3581  *
3582  * Issue IOC Init MPI request through admin queue and wait for
3583  * the completion of it or time out.
3584  *
3585  * Return: 0 on success, non-zero on failures.
3586  */
mpi3mr_issue_iocinit(struct mpi3mr_ioc * mrioc)3587 static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc)
3588 {
3589 	struct mpi3_ioc_init_request iocinit_req;
3590 	struct mpi3_driver_info_layout *drv_info;
3591 	dma_addr_t data_dma;
3592 	u32 data_len = sizeof(*drv_info);
3593 	int retval = 0;
3594 	ktime_t current_time;
3595 
3596 	drv_info = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
3597 	    GFP_KERNEL);
3598 	if (!drv_info) {
3599 		retval = -1;
3600 		goto out;
3601 	}
3602 	mpimr_initialize_reply_sbuf_queues(mrioc);
3603 
3604 	drv_info->information_length = cpu_to_le32(data_len);
3605 	strscpy(drv_info->driver_signature, "Broadcom", sizeof(drv_info->driver_signature));
3606 	strscpy(drv_info->os_name, utsname()->sysname, sizeof(drv_info->os_name));
3607 	strscpy(drv_info->os_version, utsname()->release, sizeof(drv_info->os_version));
3608 	strscpy(drv_info->driver_name, MPI3MR_DRIVER_NAME, sizeof(drv_info->driver_name));
3609 	strscpy(drv_info->driver_version, MPI3MR_DRIVER_VERSION, sizeof(drv_info->driver_version));
3610 	strscpy(drv_info->driver_release_date, MPI3MR_DRIVER_RELDATE,
3611 	    sizeof(drv_info->driver_release_date));
3612 	drv_info->driver_capabilities = 0;
3613 	memcpy((u8 *)&mrioc->driver_info, (u8 *)drv_info,
3614 	    sizeof(mrioc->driver_info));
3615 
3616 	memset(&iocinit_req, 0, sizeof(iocinit_req));
3617 	mutex_lock(&mrioc->init_cmds.mutex);
3618 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3619 		retval = -1;
3620 		ioc_err(mrioc, "Issue IOCInit: Init command is in use\n");
3621 		mutex_unlock(&mrioc->init_cmds.mutex);
3622 		goto out;
3623 	}
3624 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3625 	mrioc->init_cmds.is_waiting = 1;
3626 	mrioc->init_cmds.callback = NULL;
3627 	iocinit_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3628 	iocinit_req.function = MPI3_FUNCTION_IOC_INIT;
3629 	iocinit_req.mpi_version.mpi3_version.dev = MPI3_VERSION_DEV;
3630 	iocinit_req.mpi_version.mpi3_version.unit = MPI3_VERSION_UNIT;
3631 	iocinit_req.mpi_version.mpi3_version.major = MPI3_VERSION_MAJOR;
3632 	iocinit_req.mpi_version.mpi3_version.minor = MPI3_VERSION_MINOR;
3633 	iocinit_req.who_init = MPI3_WHOINIT_HOST_DRIVER;
3634 	iocinit_req.reply_free_queue_depth = cpu_to_le16(mrioc->reply_free_qsz);
3635 	iocinit_req.reply_free_queue_address =
3636 	    cpu_to_le64(mrioc->reply_free_q_dma);
3637 	iocinit_req.sense_buffer_length = cpu_to_le16(MPI3MR_SENSE_BUF_SZ);
3638 	iocinit_req.sense_buffer_free_queue_depth =
3639 	    cpu_to_le16(mrioc->sense_buf_q_sz);
3640 	iocinit_req.sense_buffer_free_queue_address =
3641 	    cpu_to_le64(mrioc->sense_buf_q_dma);
3642 	iocinit_req.driver_information_address = cpu_to_le64(data_dma);
3643 
3644 	current_time = ktime_get_real();
3645 	iocinit_req.time_stamp = cpu_to_le64(ktime_to_ms(current_time));
3646 
3647 	iocinit_req.msg_flags |=
3648 	    MPI3_IOCINIT_MSGFLAGS_SCSIIOSTATUSREPLY_SUPPORTED;
3649 	iocinit_req.msg_flags |=
3650 		MPI3_IOCINIT_MSGFLAGS_WRITESAMEDIVERT_SUPPORTED;
3651 
3652 	init_completion(&mrioc->init_cmds.done);
3653 	retval = mpi3mr_admin_request_post(mrioc, &iocinit_req,
3654 	    sizeof(iocinit_req), 1);
3655 	if (retval) {
3656 		ioc_err(mrioc, "Issue IOCInit: Admin Post failed\n");
3657 		goto out_unlock;
3658 	}
3659 	wait_for_completion_timeout(&mrioc->init_cmds.done,
3660 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
3661 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3662 		mpi3mr_check_rh_fault_ioc(mrioc,
3663 		    MPI3MR_RESET_FROM_IOCINIT_TIMEOUT);
3664 		ioc_err(mrioc, "ioc_init timed out\n");
3665 		retval = -1;
3666 		goto out_unlock;
3667 	}
3668 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
3669 	    != MPI3_IOCSTATUS_SUCCESS) {
3670 		ioc_err(mrioc,
3671 		    "Issue IOCInit: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
3672 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3673 		    mrioc->init_cmds.ioc_loginfo);
3674 		retval = -1;
3675 		goto out_unlock;
3676 	}
3677 
3678 	mrioc->reply_free_queue_host_index = mrioc->num_reply_bufs;
3679 	writel(mrioc->reply_free_queue_host_index,
3680 	    &mrioc->sysif_regs->reply_free_host_index);
3681 
3682 	mrioc->sbq_host_index = mrioc->num_sense_bufs;
3683 	writel(mrioc->sbq_host_index,
3684 	    &mrioc->sysif_regs->sense_buffer_free_host_index);
3685 out_unlock:
3686 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3687 	mutex_unlock(&mrioc->init_cmds.mutex);
3688 
3689 out:
3690 	if (drv_info)
3691 		dma_free_coherent(&mrioc->pdev->dev, data_len, drv_info,
3692 		    data_dma);
3693 
3694 	return retval;
3695 }
3696 
3697 /**
3698  * mpi3mr_unmask_events - Unmask events in event mask bitmap
3699  * @mrioc: Adapter instance reference
3700  * @event: MPI event ID
3701  *
3702  * Un mask the specific event by resetting the event_mask
3703  * bitmap.
3704  *
3705  * Return: 0 on success, non-zero on failures.
3706  */
mpi3mr_unmask_events(struct mpi3mr_ioc * mrioc,u16 event)3707 static void mpi3mr_unmask_events(struct mpi3mr_ioc *mrioc, u16 event)
3708 {
3709 	u32 desired_event;
3710 	u8 word;
3711 
3712 	if (event >= 128)
3713 		return;
3714 
3715 	desired_event = (1 << (event % 32));
3716 	word = event / 32;
3717 
3718 	mrioc->event_masks[word] &= ~desired_event;
3719 }
3720 
3721 /**
3722  * mpi3mr_issue_event_notification - Send event notification
3723  * @mrioc: Adapter instance reference
3724  *
3725  * Issue event notification MPI request through admin queue and
3726  * wait for the completion of it or time out.
3727  *
3728  * Return: 0 on success, non-zero on failures.
3729  */
mpi3mr_issue_event_notification(struct mpi3mr_ioc * mrioc)3730 static int mpi3mr_issue_event_notification(struct mpi3mr_ioc *mrioc)
3731 {
3732 	struct mpi3_event_notification_request evtnotify_req;
3733 	int retval = 0;
3734 	u8 i;
3735 
3736 	memset(&evtnotify_req, 0, sizeof(evtnotify_req));
3737 	mutex_lock(&mrioc->init_cmds.mutex);
3738 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3739 		retval = -1;
3740 		ioc_err(mrioc, "Issue EvtNotify: Init command is in use\n");
3741 		mutex_unlock(&mrioc->init_cmds.mutex);
3742 		goto out;
3743 	}
3744 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3745 	mrioc->init_cmds.is_waiting = 1;
3746 	mrioc->init_cmds.callback = NULL;
3747 	evtnotify_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3748 	evtnotify_req.function = MPI3_FUNCTION_EVENT_NOTIFICATION;
3749 	for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
3750 		evtnotify_req.event_masks[i] =
3751 		    cpu_to_le32(mrioc->event_masks[i]);
3752 	init_completion(&mrioc->init_cmds.done);
3753 	retval = mpi3mr_admin_request_post(mrioc, &evtnotify_req,
3754 	    sizeof(evtnotify_req), 1);
3755 	if (retval) {
3756 		ioc_err(mrioc, "Issue EvtNotify: Admin Post failed\n");
3757 		goto out_unlock;
3758 	}
3759 	wait_for_completion_timeout(&mrioc->init_cmds.done,
3760 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
3761 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3762 		ioc_err(mrioc, "event notification timed out\n");
3763 		mpi3mr_check_rh_fault_ioc(mrioc,
3764 		    MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT);
3765 		retval = -1;
3766 		goto out_unlock;
3767 	}
3768 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
3769 	    != MPI3_IOCSTATUS_SUCCESS) {
3770 		ioc_err(mrioc,
3771 		    "Issue EvtNotify: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
3772 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3773 		    mrioc->init_cmds.ioc_loginfo);
3774 		retval = -1;
3775 		goto out_unlock;
3776 	}
3777 
3778 out_unlock:
3779 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3780 	mutex_unlock(&mrioc->init_cmds.mutex);
3781 out:
3782 	return retval;
3783 }
3784 
3785 /**
3786  * mpi3mr_process_event_ack - Process event acknowledgment
3787  * @mrioc: Adapter instance reference
3788  * @event: MPI3 event ID
3789  * @event_ctx: event context
3790  *
3791  * Send event acknowledgment through admin queue and wait for
3792  * it to complete.
3793  *
3794  * Return: 0 on success, non-zero on failures.
3795  */
mpi3mr_process_event_ack(struct mpi3mr_ioc * mrioc,u8 event,u32 event_ctx)3796 int mpi3mr_process_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
3797 	u32 event_ctx)
3798 {
3799 	struct mpi3_event_ack_request evtack_req;
3800 	int retval = 0;
3801 
3802 	memset(&evtack_req, 0, sizeof(evtack_req));
3803 	mutex_lock(&mrioc->init_cmds.mutex);
3804 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3805 		retval = -1;
3806 		ioc_err(mrioc, "Send EvtAck: Init command is in use\n");
3807 		mutex_unlock(&mrioc->init_cmds.mutex);
3808 		goto out;
3809 	}
3810 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3811 	mrioc->init_cmds.is_waiting = 1;
3812 	mrioc->init_cmds.callback = NULL;
3813 	evtack_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3814 	evtack_req.function = MPI3_FUNCTION_EVENT_ACK;
3815 	evtack_req.event = event;
3816 	evtack_req.event_context = cpu_to_le32(event_ctx);
3817 
3818 	init_completion(&mrioc->init_cmds.done);
3819 	retval = mpi3mr_admin_request_post(mrioc, &evtack_req,
3820 	    sizeof(evtack_req), 1);
3821 	if (retval) {
3822 		ioc_err(mrioc, "Send EvtAck: Admin Post failed\n");
3823 		goto out_unlock;
3824 	}
3825 	wait_for_completion_timeout(&mrioc->init_cmds.done,
3826 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
3827 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3828 		ioc_err(mrioc, "Issue EvtNotify: command timed out\n");
3829 		if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET))
3830 			mpi3mr_check_rh_fault_ioc(mrioc,
3831 			    MPI3MR_RESET_FROM_EVTACK_TIMEOUT);
3832 		retval = -1;
3833 		goto out_unlock;
3834 	}
3835 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
3836 	    != MPI3_IOCSTATUS_SUCCESS) {
3837 		ioc_err(mrioc,
3838 		    "Send EvtAck: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
3839 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3840 		    mrioc->init_cmds.ioc_loginfo);
3841 		retval = -1;
3842 		goto out_unlock;
3843 	}
3844 
3845 out_unlock:
3846 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3847 	mutex_unlock(&mrioc->init_cmds.mutex);
3848 out:
3849 	return retval;
3850 }
3851 
3852 /**
3853  * mpi3mr_alloc_chain_bufs - Allocate chain buffers
3854  * @mrioc: Adapter instance reference
3855  *
3856  * Allocate chain buffers and set a bitmap to indicate free
3857  * chain buffers. Chain buffers are used to pass the SGE
3858  * information along with MPI3 SCSI IO requests for host I/O.
3859  *
3860  * Return: 0 on success, non-zero on failure
3861  */
mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc * mrioc)3862 static int mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc *mrioc)
3863 {
3864 	int retval = 0;
3865 	u32 sz, i;
3866 	u16 num_chains;
3867 
3868 	if (mrioc->chain_sgl_list)
3869 		return retval;
3870 
3871 	num_chains = mrioc->max_host_ios / MPI3MR_CHAINBUF_FACTOR;
3872 
3873 	if (prot_mask & (SHOST_DIX_TYPE0_PROTECTION
3874 	    | SHOST_DIX_TYPE1_PROTECTION
3875 	    | SHOST_DIX_TYPE2_PROTECTION
3876 	    | SHOST_DIX_TYPE3_PROTECTION))
3877 		num_chains += (num_chains / MPI3MR_CHAINBUFDIX_FACTOR);
3878 
3879 	mrioc->chain_buf_count = num_chains;
3880 	sz = sizeof(struct chain_element) * num_chains;
3881 	mrioc->chain_sgl_list = kzalloc(sz, GFP_KERNEL);
3882 	if (!mrioc->chain_sgl_list)
3883 		goto out_failed;
3884 
3885 	if (mrioc->max_sgl_entries > (mrioc->facts.max_data_length /
3886 		MPI3MR_PAGE_SIZE_4K))
3887 		mrioc->max_sgl_entries = mrioc->facts.max_data_length /
3888 			MPI3MR_PAGE_SIZE_4K;
3889 	sz = mrioc->max_sgl_entries * sizeof(struct mpi3_sge_common);
3890 	ioc_info(mrioc, "number of sgl entries=%d chain buffer size=%dKB\n",
3891 			mrioc->max_sgl_entries, sz/1024);
3892 
3893 	mrioc->chain_buf_pool = dma_pool_create("chain_buf pool",
3894 	    &mrioc->pdev->dev, sz, 16, 0);
3895 	if (!mrioc->chain_buf_pool) {
3896 		ioc_err(mrioc, "chain buf pool: dma_pool_create failed\n");
3897 		goto out_failed;
3898 	}
3899 
3900 	for (i = 0; i < num_chains; i++) {
3901 		mrioc->chain_sgl_list[i].addr =
3902 		    dma_pool_zalloc(mrioc->chain_buf_pool, GFP_KERNEL,
3903 		    &mrioc->chain_sgl_list[i].dma_addr);
3904 
3905 		if (!mrioc->chain_sgl_list[i].addr)
3906 			goto out_failed;
3907 	}
3908 	mrioc->chain_bitmap = bitmap_zalloc(num_chains, GFP_KERNEL);
3909 	if (!mrioc->chain_bitmap)
3910 		goto out_failed;
3911 	return retval;
3912 out_failed:
3913 	retval = -1;
3914 	return retval;
3915 }
3916 
3917 /**
3918  * mpi3mr_port_enable_complete - Mark port enable complete
3919  * @mrioc: Adapter instance reference
3920  * @drv_cmd: Internal command tracker
3921  *
3922  * Call back for asynchronous port enable request sets the
3923  * driver command to indicate port enable request is complete.
3924  *
3925  * Return: Nothing
3926  */
mpi3mr_port_enable_complete(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * drv_cmd)3927 static void mpi3mr_port_enable_complete(struct mpi3mr_ioc *mrioc,
3928 	struct mpi3mr_drv_cmd *drv_cmd)
3929 {
3930 	drv_cmd->callback = NULL;
3931 	mrioc->scan_started = 0;
3932 	if (drv_cmd->state & MPI3MR_CMD_RESET)
3933 		mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
3934 	else
3935 		mrioc->scan_failed = drv_cmd->ioc_status;
3936 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
3937 }
3938 
3939 /**
3940  * mpi3mr_issue_port_enable - Issue Port Enable
3941  * @mrioc: Adapter instance reference
3942  * @async: Flag to wait for completion or not
3943  *
3944  * Issue Port Enable MPI request through admin queue and if the
3945  * async flag is not set wait for the completion of the port
3946  * enable or time out.
3947  *
3948  * Return: 0 on success, non-zero on failures.
3949  */
mpi3mr_issue_port_enable(struct mpi3mr_ioc * mrioc,u8 async)3950 int mpi3mr_issue_port_enable(struct mpi3mr_ioc *mrioc, u8 async)
3951 {
3952 	struct mpi3_port_enable_request pe_req;
3953 	int retval = 0;
3954 	u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT;
3955 
3956 	memset(&pe_req, 0, sizeof(pe_req));
3957 	mutex_lock(&mrioc->init_cmds.mutex);
3958 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3959 		retval = -1;
3960 		ioc_err(mrioc, "Issue PortEnable: Init command is in use\n");
3961 		mutex_unlock(&mrioc->init_cmds.mutex);
3962 		goto out;
3963 	}
3964 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3965 	if (async) {
3966 		mrioc->init_cmds.is_waiting = 0;
3967 		mrioc->init_cmds.callback = mpi3mr_port_enable_complete;
3968 	} else {
3969 		mrioc->init_cmds.is_waiting = 1;
3970 		mrioc->init_cmds.callback = NULL;
3971 		init_completion(&mrioc->init_cmds.done);
3972 	}
3973 	pe_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3974 	pe_req.function = MPI3_FUNCTION_PORT_ENABLE;
3975 
3976 	retval = mpi3mr_admin_request_post(mrioc, &pe_req, sizeof(pe_req), 1);
3977 	if (retval) {
3978 		ioc_err(mrioc, "Issue PortEnable: Admin Post failed\n");
3979 		goto out_unlock;
3980 	}
3981 	if (async) {
3982 		mutex_unlock(&mrioc->init_cmds.mutex);
3983 		goto out;
3984 	}
3985 
3986 	wait_for_completion_timeout(&mrioc->init_cmds.done, (pe_timeout * HZ));
3987 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3988 		ioc_err(mrioc, "port enable timed out\n");
3989 		retval = -1;
3990 		mpi3mr_check_rh_fault_ioc(mrioc, MPI3MR_RESET_FROM_PE_TIMEOUT);
3991 		goto out_unlock;
3992 	}
3993 	mpi3mr_port_enable_complete(mrioc, &mrioc->init_cmds);
3994 
3995 out_unlock:
3996 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3997 	mutex_unlock(&mrioc->init_cmds.mutex);
3998 out:
3999 	return retval;
4000 }
4001 
4002 /* Protocol type to name mapper structure */
4003 static const struct {
4004 	u8 protocol;
4005 	char *name;
4006 } mpi3mr_protocols[] = {
4007 	{ MPI3_IOCFACTS_PROTOCOL_SCSI_INITIATOR, "Initiator" },
4008 	{ MPI3_IOCFACTS_PROTOCOL_SCSI_TARGET, "Target" },
4009 	{ MPI3_IOCFACTS_PROTOCOL_NVME, "NVMe attachment" },
4010 };
4011 
4012 /* Capability to name mapper structure*/
4013 static const struct {
4014 	u32 capability;
4015 	char *name;
4016 } mpi3mr_capabilities[] = {
4017 	{ MPI3_IOCFACTS_CAPABILITY_RAID_SUPPORTED, "RAID" },
4018 	{ MPI3_IOCFACTS_CAPABILITY_MULTIPATH_SUPPORTED, "MultiPath" },
4019 };
4020 
4021 /**
4022  * mpi3mr_repost_diag_bufs - repost host diag buffers
4023  * @mrioc: Adapter instance reference
4024  *
4025  * repost firmware and trace diag buffers based on global
4026  * trigger flag from driver page 2
4027  *
4028  * Return: 0 on success, non-zero on failures.
4029  */
mpi3mr_repost_diag_bufs(struct mpi3mr_ioc * mrioc)4030 static int mpi3mr_repost_diag_bufs(struct mpi3mr_ioc *mrioc)
4031 {
4032 	u64 global_trigger;
4033 	union mpi3mr_trigger_data prev_trigger_data;
4034 	struct diag_buffer_desc *trace_hdb = NULL;
4035 	struct diag_buffer_desc *fw_hdb = NULL;
4036 	int retval = 0;
4037 	bool trace_repost_needed = false;
4038 	bool fw_repost_needed = false;
4039 	u8 prev_trigger_type;
4040 
4041 	retval = mpi3mr_refresh_trigger(mrioc, MPI3_CONFIG_ACTION_READ_CURRENT);
4042 	if (retval)
4043 		return -1;
4044 
4045 	trace_hdb = mpi3mr_diag_buffer_for_type(mrioc,
4046 	    MPI3_DIAG_BUFFER_TYPE_TRACE);
4047 
4048 	if (trace_hdb &&
4049 	    trace_hdb->status != MPI3MR_HDB_BUFSTATUS_NOT_ALLOCATED &&
4050 	    trace_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_GLOBAL &&
4051 	    trace_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_ELEMENT)
4052 		trace_repost_needed = true;
4053 
4054 	fw_hdb = mpi3mr_diag_buffer_for_type(mrioc, MPI3_DIAG_BUFFER_TYPE_FW);
4055 
4056 	if (fw_hdb && fw_hdb->status != MPI3MR_HDB_BUFSTATUS_NOT_ALLOCATED &&
4057 	    fw_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_GLOBAL &&
4058 	    fw_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_ELEMENT)
4059 		fw_repost_needed = true;
4060 
4061 	if (trace_repost_needed || fw_repost_needed) {
4062 		global_trigger = le64_to_cpu(mrioc->driver_pg2->global_trigger);
4063 		if (global_trigger &
4064 		      MPI3_DRIVER2_GLOBALTRIGGER_POST_DIAG_TRACE_DISABLED)
4065 			trace_repost_needed = false;
4066 		if (global_trigger &
4067 		     MPI3_DRIVER2_GLOBALTRIGGER_POST_DIAG_FW_DISABLED)
4068 			fw_repost_needed = false;
4069 	}
4070 
4071 	if (trace_repost_needed) {
4072 		prev_trigger_type = trace_hdb->trigger_type;
4073 		memcpy(&prev_trigger_data, &trace_hdb->trigger_data,
4074 		    sizeof(trace_hdb->trigger_data));
4075 		retval = mpi3mr_issue_diag_buf_post(mrioc, trace_hdb);
4076 		if (!retval) {
4077 			dprint_init(mrioc, "trace diag buffer reposted");
4078 			mpi3mr_set_trigger_data_in_hdb(trace_hdb,
4079 				    MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN, NULL, 1);
4080 		} else {
4081 			trace_hdb->trigger_type = prev_trigger_type;
4082 			memcpy(&trace_hdb->trigger_data, &prev_trigger_data,
4083 			    sizeof(prev_trigger_data));
4084 			ioc_err(mrioc, "trace diag buffer repost failed");
4085 			return -1;
4086 		}
4087 	}
4088 
4089 	if (fw_repost_needed) {
4090 		prev_trigger_type = fw_hdb->trigger_type;
4091 		memcpy(&prev_trigger_data, &fw_hdb->trigger_data,
4092 		    sizeof(fw_hdb->trigger_data));
4093 		retval = mpi3mr_issue_diag_buf_post(mrioc, fw_hdb);
4094 		if (!retval) {
4095 			dprint_init(mrioc, "firmware diag buffer reposted");
4096 			mpi3mr_set_trigger_data_in_hdb(fw_hdb,
4097 				    MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN, NULL, 1);
4098 		} else {
4099 			fw_hdb->trigger_type = prev_trigger_type;
4100 			memcpy(&fw_hdb->trigger_data, &prev_trigger_data,
4101 			    sizeof(prev_trigger_data));
4102 			ioc_err(mrioc, "firmware diag buffer repost failed");
4103 			return -1;
4104 		}
4105 	}
4106 	return retval;
4107 }
4108 
4109 /**
4110  * mpi3mr_read_tsu_interval - Update time stamp interval
4111  * @mrioc: Adapter instance reference
4112  *
4113  * Update time stamp interval if its defined in driver page 1,
4114  * otherwise use default value.
4115  *
4116  * Return: Nothing
4117  */
4118 static void
mpi3mr_read_tsu_interval(struct mpi3mr_ioc * mrioc)4119 mpi3mr_read_tsu_interval(struct mpi3mr_ioc *mrioc)
4120 {
4121 	struct mpi3_driver_page1 driver_pg1;
4122 	u16 pg_sz = sizeof(driver_pg1);
4123 	int retval = 0;
4124 
4125 	mrioc->ts_update_interval = MPI3MR_TSUPDATE_INTERVAL;
4126 
4127 	retval = mpi3mr_cfg_get_driver_pg1(mrioc, &driver_pg1, pg_sz);
4128 	if (!retval && driver_pg1.time_stamp_update)
4129 		mrioc->ts_update_interval = (driver_pg1.time_stamp_update * 60);
4130 }
4131 
4132 /**
4133  * mpi3mr_print_ioc_info - Display controller information
4134  * @mrioc: Adapter instance reference
4135  *
4136  * Display controller personality, capability, supported
4137  * protocols etc.
4138  *
4139  * Return: Nothing
4140  */
4141 static void
mpi3mr_print_ioc_info(struct mpi3mr_ioc * mrioc)4142 mpi3mr_print_ioc_info(struct mpi3mr_ioc *mrioc)
4143 {
4144 	int i = 0, bytes_written = 0;
4145 	const char *personality;
4146 	char protocol[50] = {0};
4147 	char capabilities[100] = {0};
4148 	struct mpi3mr_compimg_ver *fwver = &mrioc->facts.fw_ver;
4149 
4150 	switch (mrioc->facts.personality) {
4151 	case MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA:
4152 		personality = "Enhanced HBA";
4153 		break;
4154 	case MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR:
4155 		personality = "RAID";
4156 		break;
4157 	default:
4158 		personality = "Unknown";
4159 		break;
4160 	}
4161 
4162 	ioc_info(mrioc, "Running in %s Personality", personality);
4163 
4164 	ioc_info(mrioc, "FW version(%d.%d.%d.%d.%d.%d)\n",
4165 	    fwver->gen_major, fwver->gen_minor, fwver->ph_major,
4166 	    fwver->ph_minor, fwver->cust_id, fwver->build_num);
4167 
4168 	for (i = 0; i < ARRAY_SIZE(mpi3mr_protocols); i++) {
4169 		if (mrioc->facts.protocol_flags &
4170 		    mpi3mr_protocols[i].protocol) {
4171 			bytes_written += scnprintf(protocol + bytes_written,
4172 				    sizeof(protocol) - bytes_written, "%s%s",
4173 				    bytes_written ? "," : "",
4174 				    mpi3mr_protocols[i].name);
4175 		}
4176 	}
4177 
4178 	bytes_written = 0;
4179 	for (i = 0; i < ARRAY_SIZE(mpi3mr_capabilities); i++) {
4180 		if (mrioc->facts.protocol_flags &
4181 		    mpi3mr_capabilities[i].capability) {
4182 			bytes_written += scnprintf(capabilities + bytes_written,
4183 				    sizeof(capabilities) - bytes_written, "%s%s",
4184 				    bytes_written ? "," : "",
4185 				    mpi3mr_capabilities[i].name);
4186 		}
4187 	}
4188 
4189 	ioc_info(mrioc, "Protocol=(%s), Capabilities=(%s)\n",
4190 		 protocol, capabilities);
4191 }
4192 
4193 /**
4194  * mpi3mr_cleanup_resources - Free PCI resources
4195  * @mrioc: Adapter instance reference
4196  *
4197  * Unmap PCI device memory and disable PCI device.
4198  *
4199  * Return: 0 on success and non-zero on failure.
4200  */
mpi3mr_cleanup_resources(struct mpi3mr_ioc * mrioc)4201 void mpi3mr_cleanup_resources(struct mpi3mr_ioc *mrioc)
4202 {
4203 	struct pci_dev *pdev = mrioc->pdev;
4204 
4205 	mpi3mr_cleanup_isr(mrioc);
4206 
4207 	if (mrioc->sysif_regs) {
4208 		iounmap((void __iomem *)mrioc->sysif_regs);
4209 		mrioc->sysif_regs = NULL;
4210 	}
4211 
4212 	if (pci_is_enabled(pdev)) {
4213 		if (mrioc->bars)
4214 			pci_release_selected_regions(pdev, mrioc->bars);
4215 		pci_disable_device(pdev);
4216 	}
4217 }
4218 
4219 /**
4220  * mpi3mr_setup_resources - Enable PCI resources
4221  * @mrioc: Adapter instance reference
4222  *
4223  * Enable PCI device memory, MSI-x registers and set DMA mask.
4224  *
4225  * Return: 0 on success and non-zero on failure.
4226  */
mpi3mr_setup_resources(struct mpi3mr_ioc * mrioc)4227 int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc)
4228 {
4229 	struct pci_dev *pdev = mrioc->pdev;
4230 	u32 memap_sz = 0;
4231 	int i, retval = 0, capb = 0;
4232 	u16 message_control;
4233 	u64 dma_mask = mrioc->dma_mask ? mrioc->dma_mask :
4234 	    ((sizeof(dma_addr_t) > 4) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32));
4235 
4236 	if (pci_enable_device_mem(pdev)) {
4237 		ioc_err(mrioc, "pci_enable_device_mem: failed\n");
4238 		retval = -ENODEV;
4239 		goto out_failed;
4240 	}
4241 
4242 	capb = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
4243 	if (!capb) {
4244 		ioc_err(mrioc, "Unable to find MSI-X Capabilities\n");
4245 		retval = -ENODEV;
4246 		goto out_failed;
4247 	}
4248 	mrioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
4249 
4250 	if (pci_request_selected_regions(pdev, mrioc->bars,
4251 	    mrioc->driver_name)) {
4252 		ioc_err(mrioc, "pci_request_selected_regions: failed\n");
4253 		retval = -ENODEV;
4254 		goto out_failed;
4255 	}
4256 
4257 	for (i = 0; (i < DEVICE_COUNT_RESOURCE); i++) {
4258 		if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
4259 			mrioc->sysif_regs_phys = pci_resource_start(pdev, i);
4260 			memap_sz = pci_resource_len(pdev, i);
4261 			mrioc->sysif_regs =
4262 			    ioremap(mrioc->sysif_regs_phys, memap_sz);
4263 			break;
4264 		}
4265 	}
4266 
4267 	pci_set_master(pdev);
4268 
4269 	retval = dma_set_mask_and_coherent(&pdev->dev, dma_mask);
4270 	if (retval) {
4271 		if (dma_mask != DMA_BIT_MASK(32)) {
4272 			ioc_warn(mrioc, "Setting 64 bit DMA mask failed\n");
4273 			dma_mask = DMA_BIT_MASK(32);
4274 			retval = dma_set_mask_and_coherent(&pdev->dev,
4275 			    dma_mask);
4276 		}
4277 		if (retval) {
4278 			mrioc->dma_mask = 0;
4279 			ioc_err(mrioc, "Setting 32 bit DMA mask also failed\n");
4280 			goto out_failed;
4281 		}
4282 	}
4283 	mrioc->dma_mask = dma_mask;
4284 
4285 	if (!mrioc->sysif_regs) {
4286 		ioc_err(mrioc,
4287 		    "Unable to map adapter memory or resource not found\n");
4288 		retval = -EINVAL;
4289 		goto out_failed;
4290 	}
4291 
4292 	pci_read_config_word(pdev, capb + 2, &message_control);
4293 	mrioc->msix_count = (message_control & 0x3FF) + 1;
4294 
4295 	pci_save_state(pdev);
4296 
4297 	pci_set_drvdata(pdev, mrioc->shost);
4298 
4299 	mpi3mr_ioc_disable_intr(mrioc);
4300 
4301 	ioc_info(mrioc, "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
4302 	    (unsigned long long)mrioc->sysif_regs_phys,
4303 	    mrioc->sysif_regs, memap_sz);
4304 	ioc_info(mrioc, "Number of MSI-X vectors found in capabilities: (%d)\n",
4305 	    mrioc->msix_count);
4306 
4307 	if (!reset_devices && poll_queues > 0)
4308 		mrioc->requested_poll_qcount = min_t(int, poll_queues,
4309 				mrioc->msix_count - 2);
4310 	return retval;
4311 
4312 out_failed:
4313 	mpi3mr_cleanup_resources(mrioc);
4314 	return retval;
4315 }
4316 
4317 /**
4318  * mpi3mr_enable_events - Enable required events
4319  * @mrioc: Adapter instance reference
4320  *
4321  * This routine unmasks the events required by the driver by
4322  * sennding appropriate event mask bitmapt through an event
4323  * notification request.
4324  *
4325  * Return: 0 on success and non-zero on failure.
4326  */
mpi3mr_enable_events(struct mpi3mr_ioc * mrioc)4327 static int mpi3mr_enable_events(struct mpi3mr_ioc *mrioc)
4328 {
4329 	int retval = 0;
4330 	u32  i;
4331 
4332 	for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
4333 		mrioc->event_masks[i] = -1;
4334 
4335 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_ADDED);
4336 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_INFO_CHANGED);
4337 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_STATUS_CHANGE);
4338 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE);
4339 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_ADDED);
4340 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
4341 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DISCOVERY);
4342 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
4343 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_BROADCAST_PRIMITIVE);
4344 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
4345 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_ENUMERATION);
4346 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_PREPARE_FOR_RESET);
4347 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_CABLE_MGMT);
4348 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENERGY_PACK_CHANGE);
4349 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_DIAGNOSTIC_BUFFER_STATUS_CHANGE);
4350 
4351 	retval = mpi3mr_issue_event_notification(mrioc);
4352 	if (retval)
4353 		ioc_err(mrioc, "failed to issue event notification %d\n",
4354 		    retval);
4355 	return retval;
4356 }
4357 
4358 /**
4359  * mpi3mr_init_ioc - Initialize the controller
4360  * @mrioc: Adapter instance reference
4361  *
4362  * This the controller initialization routine, executed either
4363  * after soft reset or from pci probe callback.
4364  * Setup the required resources, memory map the controller
4365  * registers, create admin and operational reply queue pairs,
4366  * allocate required memory for reply pool, sense buffer pool,
4367  * issue IOC init request to the firmware, unmask the events and
4368  * issue port enable to discover SAS/SATA/NVMe devies and RAID
4369  * volumes.
4370  *
4371  * Return: 0 on success and non-zero on failure.
4372  */
mpi3mr_init_ioc(struct mpi3mr_ioc * mrioc)4373 int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc)
4374 {
4375 	int retval = 0;
4376 	u8 retry = 0;
4377 	struct mpi3_ioc_facts_data facts_data;
4378 	u32 sz;
4379 
4380 retry_init:
4381 	retval = mpi3mr_bring_ioc_ready(mrioc);
4382 	if (retval) {
4383 		ioc_err(mrioc, "Failed to bring ioc ready: error %d\n",
4384 		    retval);
4385 		goto out_failed_noretry;
4386 	}
4387 
4388 	retval = mpi3mr_setup_isr(mrioc, 1);
4389 	if (retval) {
4390 		ioc_err(mrioc, "Failed to setup ISR error %d\n",
4391 		    retval);
4392 		goto out_failed_noretry;
4393 	}
4394 
4395 	retval = mpi3mr_issue_iocfacts(mrioc, &facts_data);
4396 	if (retval) {
4397 		ioc_err(mrioc, "Failed to Issue IOC Facts %d\n",
4398 		    retval);
4399 		goto out_failed;
4400 	}
4401 
4402 	mrioc->max_host_ios = mrioc->facts.max_reqs - MPI3MR_INTERNAL_CMDS_RESVD;
4403 	mrioc->shost->max_sectors = mrioc->facts.max_data_length / 512;
4404 	mrioc->num_io_throttle_group = mrioc->facts.max_io_throttle_group;
4405 	atomic_set(&mrioc->pend_large_data_sz, 0);
4406 
4407 	if (reset_devices)
4408 		mrioc->max_host_ios = min_t(int, mrioc->max_host_ios,
4409 		    MPI3MR_HOST_IOS_KDUMP);
4410 
4411 	if (!(mrioc->facts.ioc_capabilities &
4412 	    MPI3_IOCFACTS_CAPABILITY_MULTIPATH_SUPPORTED)) {
4413 		mrioc->sas_transport_enabled = 1;
4414 		mrioc->scsi_device_channel = 1;
4415 		mrioc->shost->max_channel = 1;
4416 		mrioc->shost->transportt = mpi3mr_transport_template;
4417 	}
4418 
4419 	if (mrioc->facts.max_req_limit)
4420 		mrioc->prevent_reply_qfull = true;
4421 
4422 	if (mrioc->facts.ioc_capabilities &
4423 		MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_TRACE_SUPPORTED)
4424 		mrioc->seg_tb_support = true;
4425 
4426 	mrioc->reply_sz = mrioc->facts.reply_sz;
4427 
4428 	retval = mpi3mr_check_reset_dma_mask(mrioc);
4429 	if (retval) {
4430 		ioc_err(mrioc, "Resetting dma mask failed %d\n",
4431 		    retval);
4432 		goto out_failed_noretry;
4433 	}
4434 
4435 	mpi3mr_read_tsu_interval(mrioc);
4436 	mpi3mr_print_ioc_info(mrioc);
4437 
4438 	dprint_init(mrioc, "allocating host diag buffers\n");
4439 	mpi3mr_alloc_diag_bufs(mrioc);
4440 
4441 	dprint_init(mrioc, "allocating ioctl dma buffers\n");
4442 	mpi3mr_alloc_ioctl_dma_memory(mrioc);
4443 
4444 	dprint_init(mrioc, "posting host diag buffers\n");
4445 	retval = mpi3mr_post_diag_bufs(mrioc);
4446 
4447 	if (retval)
4448 		ioc_warn(mrioc, "failed to post host diag buffers\n");
4449 
4450 	if (!mrioc->init_cmds.reply) {
4451 		retval = mpi3mr_alloc_reply_sense_bufs(mrioc);
4452 		if (retval) {
4453 			ioc_err(mrioc,
4454 			    "%s :Failed to allocated reply sense buffers %d\n",
4455 			    __func__, retval);
4456 			goto out_failed_noretry;
4457 		}
4458 	}
4459 
4460 	if (!mrioc->chain_sgl_list) {
4461 		retval = mpi3mr_alloc_chain_bufs(mrioc);
4462 		if (retval) {
4463 			ioc_err(mrioc, "Failed to allocated chain buffers %d\n",
4464 			    retval);
4465 			goto out_failed_noretry;
4466 		}
4467 	}
4468 
4469 	retval = mpi3mr_issue_iocinit(mrioc);
4470 	if (retval) {
4471 		ioc_err(mrioc, "Failed to Issue IOC Init %d\n",
4472 		    retval);
4473 		goto out_failed;
4474 	}
4475 
4476 	retval = mpi3mr_print_pkg_ver(mrioc);
4477 	if (retval) {
4478 		ioc_err(mrioc, "failed to get package version\n");
4479 		goto out_failed;
4480 	}
4481 
4482 	retval = mpi3mr_setup_isr(mrioc, 0);
4483 	if (retval) {
4484 		ioc_err(mrioc, "Failed to re-setup ISR, error %d\n",
4485 		    retval);
4486 		goto out_failed_noretry;
4487 	}
4488 
4489 	retval = mpi3mr_create_op_queues(mrioc);
4490 	if (retval) {
4491 		ioc_err(mrioc, "Failed to create OpQueues error %d\n",
4492 		    retval);
4493 		goto out_failed;
4494 	}
4495 
4496 	if (!mrioc->pel_seqnum_virt) {
4497 		dprint_init(mrioc, "allocating memory for pel_seqnum_virt\n");
4498 		mrioc->pel_seqnum_sz = sizeof(struct mpi3_pel_seq);
4499 		mrioc->pel_seqnum_virt = dma_alloc_coherent(&mrioc->pdev->dev,
4500 		    mrioc->pel_seqnum_sz, &mrioc->pel_seqnum_dma,
4501 		    GFP_KERNEL);
4502 		if (!mrioc->pel_seqnum_virt) {
4503 			retval = -ENOMEM;
4504 			goto out_failed_noretry;
4505 		}
4506 	}
4507 
4508 	if (!mrioc->throttle_groups && mrioc->num_io_throttle_group) {
4509 		dprint_init(mrioc, "allocating memory for throttle groups\n");
4510 		sz = sizeof(struct mpi3mr_throttle_group_info);
4511 		mrioc->throttle_groups = kcalloc(mrioc->num_io_throttle_group, sz, GFP_KERNEL);
4512 		if (!mrioc->throttle_groups) {
4513 			retval = -1;
4514 			goto out_failed_noretry;
4515 		}
4516 	}
4517 
4518 	retval = mpi3mr_enable_events(mrioc);
4519 	if (retval) {
4520 		ioc_err(mrioc, "failed to enable events %d\n",
4521 		    retval);
4522 		goto out_failed;
4523 	}
4524 
4525 	retval = mpi3mr_refresh_trigger(mrioc, MPI3_CONFIG_ACTION_READ_CURRENT);
4526 	if (retval) {
4527 		ioc_err(mrioc, "failed to refresh triggers\n");
4528 		goto out_failed;
4529 	}
4530 
4531 	ioc_info(mrioc, "controller initialization completed successfully\n");
4532 	return retval;
4533 out_failed:
4534 	if (retry < 2) {
4535 		retry++;
4536 		ioc_warn(mrioc, "retrying controller initialization, retry_count:%d\n",
4537 		    retry);
4538 		mpi3mr_memset_buffers(mrioc);
4539 		goto retry_init;
4540 	}
4541 	retval = -1;
4542 out_failed_noretry:
4543 	ioc_err(mrioc, "controller initialization failed\n");
4544 	mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
4545 	    MPI3MR_RESET_FROM_CTLR_CLEANUP);
4546 	mrioc->unrecoverable = 1;
4547 	return retval;
4548 }
4549 
4550 /**
4551  * mpi3mr_reinit_ioc - Re-Initialize the controller
4552  * @mrioc: Adapter instance reference
4553  * @is_resume: Called from resume or reset path
4554  *
4555  * This the controller re-initialization routine, executed from
4556  * the soft reset handler or resume callback. Creates
4557  * operational reply queue pairs, allocate required memory for
4558  * reply pool, sense buffer pool, issue IOC init request to the
4559  * firmware, unmask the events and issue port enable to discover
4560  * SAS/SATA/NVMe devices and RAID volumes.
4561  *
4562  * Return: 0 on success and non-zero on failure.
4563  */
mpi3mr_reinit_ioc(struct mpi3mr_ioc * mrioc,u8 is_resume)4564 int mpi3mr_reinit_ioc(struct mpi3mr_ioc *mrioc, u8 is_resume)
4565 {
4566 	int retval = 0;
4567 	u8 retry = 0;
4568 	struct mpi3_ioc_facts_data facts_data;
4569 	u32 pe_timeout, ioc_status;
4570 
4571 retry_init:
4572 	pe_timeout =
4573 	    (MPI3MR_PORTENABLE_TIMEOUT / MPI3MR_PORTENABLE_POLL_INTERVAL);
4574 
4575 	dprint_reset(mrioc, "bringing up the controller to ready state\n");
4576 	retval = mpi3mr_bring_ioc_ready(mrioc);
4577 	if (retval) {
4578 		ioc_err(mrioc, "failed to bring to ready state\n");
4579 		goto out_failed_noretry;
4580 	}
4581 
4582 	mrioc->io_admin_reset_sync = 0;
4583 	if (is_resume || mrioc->block_on_pci_err) {
4584 		dprint_reset(mrioc, "setting up single ISR\n");
4585 		retval = mpi3mr_setup_isr(mrioc, 1);
4586 		if (retval) {
4587 			ioc_err(mrioc, "failed to setup ISR\n");
4588 			goto out_failed_noretry;
4589 		}
4590 	} else
4591 		mpi3mr_ioc_enable_intr(mrioc);
4592 
4593 	dprint_reset(mrioc, "getting ioc_facts\n");
4594 	retval = mpi3mr_issue_iocfacts(mrioc, &facts_data);
4595 	if (retval) {
4596 		ioc_err(mrioc, "failed to get ioc_facts\n");
4597 		goto out_failed;
4598 	}
4599 
4600 	dprint_reset(mrioc, "validating ioc_facts\n");
4601 	retval = mpi3mr_revalidate_factsdata(mrioc);
4602 	if (retval) {
4603 		ioc_err(mrioc, "failed to revalidate ioc_facts data\n");
4604 		goto out_failed_noretry;
4605 	}
4606 
4607 	mpi3mr_read_tsu_interval(mrioc);
4608 	mpi3mr_print_ioc_info(mrioc);
4609 
4610 	if (is_resume) {
4611 		dprint_reset(mrioc, "posting host diag buffers\n");
4612 		retval = mpi3mr_post_diag_bufs(mrioc);
4613 		if (retval)
4614 			ioc_warn(mrioc, "failed to post host diag buffers\n");
4615 	} else {
4616 		retval = mpi3mr_repost_diag_bufs(mrioc);
4617 		if (retval)
4618 			ioc_warn(mrioc, "failed to re post host diag buffers\n");
4619 	}
4620 
4621 	dprint_reset(mrioc, "sending ioc_init\n");
4622 	retval = mpi3mr_issue_iocinit(mrioc);
4623 	if (retval) {
4624 		ioc_err(mrioc, "failed to send ioc_init\n");
4625 		goto out_failed;
4626 	}
4627 
4628 	dprint_reset(mrioc, "getting package version\n");
4629 	retval = mpi3mr_print_pkg_ver(mrioc);
4630 	if (retval) {
4631 		ioc_err(mrioc, "failed to get package version\n");
4632 		goto out_failed;
4633 	}
4634 
4635 	if (is_resume || mrioc->block_on_pci_err) {
4636 		dprint_reset(mrioc, "setting up multiple ISR\n");
4637 		retval = mpi3mr_setup_isr(mrioc, 0);
4638 		if (retval) {
4639 			ioc_err(mrioc, "failed to re-setup ISR\n");
4640 			goto out_failed_noretry;
4641 		}
4642 	}
4643 
4644 	dprint_reset(mrioc, "creating operational queue pairs\n");
4645 	retval = mpi3mr_create_op_queues(mrioc);
4646 	if (retval) {
4647 		ioc_err(mrioc, "failed to create operational queue pairs\n");
4648 		goto out_failed;
4649 	}
4650 
4651 	if (!mrioc->pel_seqnum_virt) {
4652 		dprint_reset(mrioc, "allocating memory for pel_seqnum_virt\n");
4653 		mrioc->pel_seqnum_sz = sizeof(struct mpi3_pel_seq);
4654 		mrioc->pel_seqnum_virt = dma_alloc_coherent(&mrioc->pdev->dev,
4655 		    mrioc->pel_seqnum_sz, &mrioc->pel_seqnum_dma,
4656 		    GFP_KERNEL);
4657 		if (!mrioc->pel_seqnum_virt) {
4658 			retval = -ENOMEM;
4659 			goto out_failed_noretry;
4660 		}
4661 	}
4662 
4663 	if (mrioc->shost->nr_hw_queues > mrioc->num_op_reply_q) {
4664 		ioc_err(mrioc,
4665 		    "cannot create minimum number of operational queues expected:%d created:%d\n",
4666 		    mrioc->shost->nr_hw_queues, mrioc->num_op_reply_q);
4667 		retval = -1;
4668 		goto out_failed_noretry;
4669 	}
4670 
4671 	dprint_reset(mrioc, "enabling events\n");
4672 	retval = mpi3mr_enable_events(mrioc);
4673 	if (retval) {
4674 		ioc_err(mrioc, "failed to enable events\n");
4675 		goto out_failed;
4676 	}
4677 
4678 	mrioc->device_refresh_on = 1;
4679 	mpi3mr_add_event_wait_for_device_refresh(mrioc);
4680 
4681 	ioc_info(mrioc, "sending port enable\n");
4682 	retval = mpi3mr_issue_port_enable(mrioc, 1);
4683 	if (retval) {
4684 		ioc_err(mrioc, "failed to issue port enable\n");
4685 		goto out_failed;
4686 	}
4687 	do {
4688 		ssleep(MPI3MR_PORTENABLE_POLL_INTERVAL);
4689 		if (mrioc->init_cmds.state == MPI3MR_CMD_NOTUSED)
4690 			break;
4691 		if (!pci_device_is_present(mrioc->pdev))
4692 			mrioc->unrecoverable = 1;
4693 		if (mrioc->unrecoverable) {
4694 			retval = -1;
4695 			goto out_failed_noretry;
4696 		}
4697 		ioc_status = readl(&mrioc->sysif_regs->ioc_status);
4698 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
4699 		    (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
4700 			mpi3mr_print_fault_info(mrioc);
4701 			mrioc->init_cmds.is_waiting = 0;
4702 			mrioc->init_cmds.callback = NULL;
4703 			mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
4704 			goto out_failed;
4705 		}
4706 	} while (--pe_timeout);
4707 
4708 	if (!pe_timeout) {
4709 		ioc_err(mrioc, "port enable timed out\n");
4710 		mpi3mr_check_rh_fault_ioc(mrioc,
4711 		    MPI3MR_RESET_FROM_PE_TIMEOUT);
4712 		mrioc->init_cmds.is_waiting = 0;
4713 		mrioc->init_cmds.callback = NULL;
4714 		mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
4715 		goto out_failed;
4716 	} else if (mrioc->scan_failed) {
4717 		ioc_err(mrioc,
4718 		    "port enable failed with status=0x%04x\n",
4719 		    mrioc->scan_failed);
4720 	} else
4721 		ioc_info(mrioc, "port enable completed successfully\n");
4722 
4723 	ioc_info(mrioc, "controller %s completed successfully\n",
4724 	    (is_resume)?"resume":"re-initialization");
4725 	return retval;
4726 out_failed:
4727 	if (retry < 2) {
4728 		retry++;
4729 		ioc_warn(mrioc, "retrying controller %s, retry_count:%d\n",
4730 		    (is_resume)?"resume":"re-initialization", retry);
4731 		mpi3mr_memset_buffers(mrioc);
4732 		goto retry_init;
4733 	}
4734 	retval = -1;
4735 out_failed_noretry:
4736 	ioc_err(mrioc, "controller %s is failed\n",
4737 	    (is_resume)?"resume":"re-initialization");
4738 	mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
4739 	    MPI3MR_RESET_FROM_CTLR_CLEANUP);
4740 	mrioc->unrecoverable = 1;
4741 	return retval;
4742 }
4743 
4744 /**
4745  * mpi3mr_memset_op_reply_q_buffers - memset the operational reply queue's
4746  *					segments
4747  * @mrioc: Adapter instance reference
4748  * @qidx: Operational reply queue index
4749  *
4750  * Return: Nothing.
4751  */
mpi3mr_memset_op_reply_q_buffers(struct mpi3mr_ioc * mrioc,u16 qidx)4752 static void mpi3mr_memset_op_reply_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx)
4753 {
4754 	struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
4755 	struct segments *segments;
4756 	int i, size;
4757 
4758 	if (!op_reply_q->q_segments)
4759 		return;
4760 
4761 	size = op_reply_q->segment_qd * mrioc->op_reply_desc_sz;
4762 	segments = op_reply_q->q_segments;
4763 	for (i = 0; i < op_reply_q->num_segments; i++)
4764 		memset(segments[i].segment, 0, size);
4765 }
4766 
4767 /**
4768  * mpi3mr_memset_op_req_q_buffers - memset the operational request queue's
4769  *					segments
4770  * @mrioc: Adapter instance reference
4771  * @qidx: Operational request queue index
4772  *
4773  * Return: Nothing.
4774  */
mpi3mr_memset_op_req_q_buffers(struct mpi3mr_ioc * mrioc,u16 qidx)4775 static void mpi3mr_memset_op_req_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx)
4776 {
4777 	struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx;
4778 	struct segments *segments;
4779 	int i, size;
4780 
4781 	if (!op_req_q->q_segments)
4782 		return;
4783 
4784 	size = op_req_q->segment_qd * mrioc->facts.op_req_sz;
4785 	segments = op_req_q->q_segments;
4786 	for (i = 0; i < op_req_q->num_segments; i++)
4787 		memset(segments[i].segment, 0, size);
4788 }
4789 
4790 /**
4791  * mpi3mr_memset_buffers - memset memory for a controller
4792  * @mrioc: Adapter instance reference
4793  *
4794  * clear all the memory allocated for a controller, typically
4795  * called post reset to reuse the memory allocated during the
4796  * controller init.
4797  *
4798  * Return: Nothing.
4799  */
mpi3mr_memset_buffers(struct mpi3mr_ioc * mrioc)4800 void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
4801 {
4802 	u16 i;
4803 	struct mpi3mr_throttle_group_info *tg;
4804 
4805 	mrioc->change_count = 0;
4806 	mrioc->active_poll_qcount = 0;
4807 	mrioc->default_qcount = 0;
4808 	if (mrioc->admin_req_base)
4809 		memset(mrioc->admin_req_base, 0, mrioc->admin_req_q_sz);
4810 	if (mrioc->admin_reply_base)
4811 		memset(mrioc->admin_reply_base, 0, mrioc->admin_reply_q_sz);
4812 	atomic_set(&mrioc->admin_reply_q_in_use, 0);
4813 	atomic_set(&mrioc->admin_pend_isr, 0);
4814 
4815 	if (mrioc->init_cmds.reply) {
4816 		memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply));
4817 		memset(mrioc->bsg_cmds.reply, 0,
4818 		    sizeof(*mrioc->bsg_cmds.reply));
4819 		memset(mrioc->host_tm_cmds.reply, 0,
4820 		    sizeof(*mrioc->host_tm_cmds.reply));
4821 		memset(mrioc->pel_cmds.reply, 0,
4822 		    sizeof(*mrioc->pel_cmds.reply));
4823 		memset(mrioc->pel_abort_cmd.reply, 0,
4824 		    sizeof(*mrioc->pel_abort_cmd.reply));
4825 		memset(mrioc->transport_cmds.reply, 0,
4826 		    sizeof(*mrioc->transport_cmds.reply));
4827 		for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
4828 			memset(mrioc->dev_rmhs_cmds[i].reply, 0,
4829 			    sizeof(*mrioc->dev_rmhs_cmds[i].reply));
4830 		for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++)
4831 			memset(mrioc->evtack_cmds[i].reply, 0,
4832 			    sizeof(*mrioc->evtack_cmds[i].reply));
4833 		bitmap_clear(mrioc->removepend_bitmap, 0,
4834 			     mrioc->dev_handle_bitmap_bits);
4835 		bitmap_clear(mrioc->devrem_bitmap, 0, MPI3MR_NUM_DEVRMCMD);
4836 		bitmap_clear(mrioc->evtack_cmds_bitmap, 0,
4837 			     MPI3MR_NUM_EVTACKCMD);
4838 	}
4839 
4840 	for (i = 0; i < mrioc->num_queues; i++) {
4841 		if (mrioc->op_reply_qinfo) {
4842 			mrioc->op_reply_qinfo[i].qid = 0;
4843 			mrioc->op_reply_qinfo[i].ci = 0;
4844 			mrioc->op_reply_qinfo[i].num_replies = 0;
4845 			mrioc->op_reply_qinfo[i].ephase = 0;
4846 			atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0);
4847 			atomic_set(&mrioc->op_reply_qinfo[i].in_use, 0);
4848 			mpi3mr_memset_op_reply_q_buffers(mrioc, i);
4849 		}
4850 
4851 		if (mrioc->req_qinfo) {
4852 			mrioc->req_qinfo[i].ci = 0;
4853 			mrioc->req_qinfo[i].pi = 0;
4854 			mrioc->req_qinfo[i].num_requests = 0;
4855 			mrioc->req_qinfo[i].qid = 0;
4856 			mrioc->req_qinfo[i].reply_qid = 0;
4857 			spin_lock_init(&mrioc->req_qinfo[i].q_lock);
4858 			mrioc->req_qinfo[i].last_full_host_tag = 0;
4859 			mpi3mr_memset_op_req_q_buffers(mrioc, i);
4860 		}
4861 	}
4862 
4863 	atomic_set(&mrioc->pend_large_data_sz, 0);
4864 	if (mrioc->throttle_groups) {
4865 		tg = mrioc->throttle_groups;
4866 		for (i = 0; i < mrioc->num_io_throttle_group; i++, tg++) {
4867 			tg->id = 0;
4868 			tg->fw_qd = 0;
4869 			tg->modified_qd = 0;
4870 			tg->io_divert = 0;
4871 			tg->need_qd_reduction = 0;
4872 			tg->high = 0;
4873 			tg->low = 0;
4874 			tg->qd_reduction = 0;
4875 			atomic_set(&tg->pend_large_data_sz, 0);
4876 		}
4877 	}
4878 }
4879 
4880 /**
4881  * mpi3mr_free_mem - Free memory allocated for a controller
4882  * @mrioc: Adapter instance reference
4883  *
4884  * Free all the memory allocated for a controller.
4885  *
4886  * Return: Nothing.
4887  */
mpi3mr_free_mem(struct mpi3mr_ioc * mrioc)4888 void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
4889 {
4890 	u16 i, j;
4891 	struct mpi3mr_intr_info *intr_info;
4892 	struct diag_buffer_desc *diag_buffer;
4893 
4894 	mpi3mr_free_enclosure_list(mrioc);
4895 	mpi3mr_free_ioctl_dma_memory(mrioc);
4896 
4897 	if (mrioc->sense_buf_pool) {
4898 		if (mrioc->sense_buf)
4899 			dma_pool_free(mrioc->sense_buf_pool, mrioc->sense_buf,
4900 			    mrioc->sense_buf_dma);
4901 		dma_pool_destroy(mrioc->sense_buf_pool);
4902 		mrioc->sense_buf = NULL;
4903 		mrioc->sense_buf_pool = NULL;
4904 	}
4905 	if (mrioc->sense_buf_q_pool) {
4906 		if (mrioc->sense_buf_q)
4907 			dma_pool_free(mrioc->sense_buf_q_pool,
4908 			    mrioc->sense_buf_q, mrioc->sense_buf_q_dma);
4909 		dma_pool_destroy(mrioc->sense_buf_q_pool);
4910 		mrioc->sense_buf_q = NULL;
4911 		mrioc->sense_buf_q_pool = NULL;
4912 	}
4913 
4914 	if (mrioc->reply_buf_pool) {
4915 		if (mrioc->reply_buf)
4916 			dma_pool_free(mrioc->reply_buf_pool, mrioc->reply_buf,
4917 			    mrioc->reply_buf_dma);
4918 		dma_pool_destroy(mrioc->reply_buf_pool);
4919 		mrioc->reply_buf = NULL;
4920 		mrioc->reply_buf_pool = NULL;
4921 	}
4922 	if (mrioc->reply_free_q_pool) {
4923 		if (mrioc->reply_free_q)
4924 			dma_pool_free(mrioc->reply_free_q_pool,
4925 			    mrioc->reply_free_q, mrioc->reply_free_q_dma);
4926 		dma_pool_destroy(mrioc->reply_free_q_pool);
4927 		mrioc->reply_free_q = NULL;
4928 		mrioc->reply_free_q_pool = NULL;
4929 	}
4930 
4931 	for (i = 0; i < mrioc->num_op_req_q; i++)
4932 		mpi3mr_free_op_req_q_segments(mrioc, i);
4933 
4934 	for (i = 0; i < mrioc->num_op_reply_q; i++)
4935 		mpi3mr_free_op_reply_q_segments(mrioc, i);
4936 
4937 	for (i = 0; i < mrioc->intr_info_count; i++) {
4938 		intr_info = mrioc->intr_info + i;
4939 		intr_info->op_reply_q = NULL;
4940 	}
4941 
4942 	kfree(mrioc->req_qinfo);
4943 	mrioc->req_qinfo = NULL;
4944 	mrioc->num_op_req_q = 0;
4945 
4946 	kfree(mrioc->op_reply_qinfo);
4947 	mrioc->op_reply_qinfo = NULL;
4948 	mrioc->num_op_reply_q = 0;
4949 
4950 	kfree(mrioc->init_cmds.reply);
4951 	mrioc->init_cmds.reply = NULL;
4952 
4953 	kfree(mrioc->bsg_cmds.reply);
4954 	mrioc->bsg_cmds.reply = NULL;
4955 
4956 	kfree(mrioc->host_tm_cmds.reply);
4957 	mrioc->host_tm_cmds.reply = NULL;
4958 
4959 	kfree(mrioc->pel_cmds.reply);
4960 	mrioc->pel_cmds.reply = NULL;
4961 
4962 	kfree(mrioc->pel_abort_cmd.reply);
4963 	mrioc->pel_abort_cmd.reply = NULL;
4964 
4965 	for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
4966 		kfree(mrioc->evtack_cmds[i].reply);
4967 		mrioc->evtack_cmds[i].reply = NULL;
4968 	}
4969 
4970 	bitmap_free(mrioc->removepend_bitmap);
4971 	mrioc->removepend_bitmap = NULL;
4972 
4973 	bitmap_free(mrioc->devrem_bitmap);
4974 	mrioc->devrem_bitmap = NULL;
4975 
4976 	bitmap_free(mrioc->evtack_cmds_bitmap);
4977 	mrioc->evtack_cmds_bitmap = NULL;
4978 
4979 	bitmap_free(mrioc->chain_bitmap);
4980 	mrioc->chain_bitmap = NULL;
4981 
4982 	kfree(mrioc->transport_cmds.reply);
4983 	mrioc->transport_cmds.reply = NULL;
4984 
4985 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
4986 		kfree(mrioc->dev_rmhs_cmds[i].reply);
4987 		mrioc->dev_rmhs_cmds[i].reply = NULL;
4988 	}
4989 
4990 	if (mrioc->chain_buf_pool) {
4991 		for (i = 0; i < mrioc->chain_buf_count; i++) {
4992 			if (mrioc->chain_sgl_list[i].addr) {
4993 				dma_pool_free(mrioc->chain_buf_pool,
4994 				    mrioc->chain_sgl_list[i].addr,
4995 				    mrioc->chain_sgl_list[i].dma_addr);
4996 				mrioc->chain_sgl_list[i].addr = NULL;
4997 			}
4998 		}
4999 		dma_pool_destroy(mrioc->chain_buf_pool);
5000 		mrioc->chain_buf_pool = NULL;
5001 	}
5002 
5003 	kfree(mrioc->chain_sgl_list);
5004 	mrioc->chain_sgl_list = NULL;
5005 
5006 	if (mrioc->admin_reply_base) {
5007 		dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz,
5008 		    mrioc->admin_reply_base, mrioc->admin_reply_dma);
5009 		mrioc->admin_reply_base = NULL;
5010 	}
5011 	if (mrioc->admin_req_base) {
5012 		dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz,
5013 		    mrioc->admin_req_base, mrioc->admin_req_dma);
5014 		mrioc->admin_req_base = NULL;
5015 	}
5016 
5017 	if (mrioc->pel_seqnum_virt) {
5018 		dma_free_coherent(&mrioc->pdev->dev, mrioc->pel_seqnum_sz,
5019 		    mrioc->pel_seqnum_virt, mrioc->pel_seqnum_dma);
5020 		mrioc->pel_seqnum_virt = NULL;
5021 	}
5022 
5023 	for (i = 0; i < MPI3MR_MAX_NUM_HDB; i++) {
5024 		diag_buffer = &mrioc->diag_buffers[i];
5025 		if ((i == 0) && mrioc->seg_tb_support) {
5026 			if (mrioc->trace_buf_pool) {
5027 				for (j = 0; j < mrioc->num_tb_segs; j++) {
5028 					if (mrioc->trace_buf[j].segment) {
5029 						dma_pool_free(mrioc->trace_buf_pool,
5030 						    mrioc->trace_buf[j].segment,
5031 						    mrioc->trace_buf[j].segment_dma);
5032 						mrioc->trace_buf[j].segment = NULL;
5033 					}
5034 
5035 					mrioc->trace_buf[j].segment = NULL;
5036 				}
5037 				dma_pool_destroy(mrioc->trace_buf_pool);
5038 				mrioc->trace_buf_pool = NULL;
5039 			}
5040 
5041 			kfree(mrioc->trace_buf);
5042 			mrioc->trace_buf = NULL;
5043 			diag_buffer->size = sizeof(u64) * mrioc->num_tb_segs;
5044 		}
5045 		if (diag_buffer->addr) {
5046 			dma_free_coherent(&mrioc->pdev->dev,
5047 			    diag_buffer->size, diag_buffer->addr,
5048 			    diag_buffer->dma_addr);
5049 			diag_buffer->addr = NULL;
5050 			diag_buffer->size = 0;
5051 			diag_buffer->type = 0;
5052 			diag_buffer->status = 0;
5053 		}
5054 	}
5055 
5056 	kfree(mrioc->throttle_groups);
5057 	mrioc->throttle_groups = NULL;
5058 
5059 	kfree(mrioc->logdata_buf);
5060 	mrioc->logdata_buf = NULL;
5061 
5062 }
5063 
5064 /**
5065  * mpi3mr_issue_ioc_shutdown - shutdown controller
5066  * @mrioc: Adapter instance reference
5067  *
5068  * Send shutodwn notification to the controller and wait for the
5069  * shutdown_timeout for it to be completed.
5070  *
5071  * Return: Nothing.
5072  */
mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc * mrioc)5073 static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc)
5074 {
5075 	u32 ioc_config, ioc_status, shutdown_action;
5076 	u8 retval = 1, retry = 0;
5077 	u32 timeout = MPI3MR_DEFAULT_SHUTDOWN_TIME * 10, timeout_remaining = 0;
5078 
5079 	ioc_info(mrioc, "Issuing shutdown Notification\n");
5080 	if (mrioc->unrecoverable) {
5081 		ioc_warn(mrioc,
5082 		    "IOC is unrecoverable shutdown is not issued\n");
5083 		return;
5084 	}
5085 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
5086 	if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
5087 	    == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) {
5088 		ioc_info(mrioc, "shutdown already in progress\n");
5089 		return;
5090 	}
5091 
5092 	shutdown_action = MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL |
5093 	    MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ;
5094 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
5095 	ioc_config |= shutdown_action;
5096 
5097 	writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
5098 
5099 	if (mrioc->facts.shutdown_timeout)
5100 		timeout = mrioc->facts.shutdown_timeout * 10;
5101 	timeout_remaining = timeout;
5102 
5103 	do {
5104 		ioc_status = readl(&mrioc->sysif_regs->ioc_status);
5105 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
5106 		    == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_COMPLETE) {
5107 			retval = 0;
5108 			break;
5109 		}
5110 		if (mrioc->unrecoverable)
5111 			break;
5112 		if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
5113 			mpi3mr_print_fault_info(mrioc);
5114 			if (retry >= MPI3MR_MAX_SHUTDOWN_RETRY_COUNT)
5115 				break;
5116 			if (mpi3mr_issue_reset(mrioc,
5117 			    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
5118 			    MPI3MR_RESET_FROM_CTLR_CLEANUP))
5119 				break;
5120 			ioc_config =
5121 			    readl(&mrioc->sysif_regs->ioc_configuration);
5122 			ioc_config |= shutdown_action;
5123 			writel(ioc_config,
5124 			    &mrioc->sysif_regs->ioc_configuration);
5125 			timeout_remaining = timeout;
5126 			retry++;
5127 		}
5128 		msleep(100);
5129 	} while (--timeout_remaining);
5130 
5131 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
5132 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
5133 
5134 	if (retval) {
5135 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
5136 		    == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS)
5137 			ioc_warn(mrioc,
5138 			    "shutdown still in progress after timeout\n");
5139 	}
5140 
5141 	ioc_info(mrioc,
5142 	    "Base IOC Sts/Config after %s shutdown is (0x%08x)/(0x%08x)\n",
5143 	    (!retval) ? "successful" : "failed", ioc_status,
5144 	    ioc_config);
5145 }
5146 
5147 /**
5148  * mpi3mr_cleanup_ioc - Cleanup controller
5149  * @mrioc: Adapter instance reference
5150  *
5151  * controller cleanup handler, Message unit reset or soft reset
5152  * and shutdown notification is issued to the controller.
5153  *
5154  * Return: Nothing.
5155  */
mpi3mr_cleanup_ioc(struct mpi3mr_ioc * mrioc)5156 void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc)
5157 {
5158 	enum mpi3mr_iocstate ioc_state;
5159 
5160 	dprint_exit(mrioc, "cleaning up the controller\n");
5161 	mpi3mr_ioc_disable_intr(mrioc);
5162 
5163 	ioc_state = mpi3mr_get_iocstate(mrioc);
5164 
5165 	if (!mrioc->unrecoverable && !mrioc->reset_in_progress &&
5166 	    !mrioc->pci_err_recovery &&
5167 	    (ioc_state == MRIOC_STATE_READY)) {
5168 		if (mpi3mr_issue_and_process_mur(mrioc,
5169 		    MPI3MR_RESET_FROM_CTLR_CLEANUP))
5170 			mpi3mr_issue_reset(mrioc,
5171 			    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
5172 			    MPI3MR_RESET_FROM_MUR_FAILURE);
5173 		mpi3mr_issue_ioc_shutdown(mrioc);
5174 	}
5175 	dprint_exit(mrioc, "controller cleanup completed\n");
5176 }
5177 
5178 /**
5179  * mpi3mr_drv_cmd_comp_reset - Flush a internal driver command
5180  * @mrioc: Adapter instance reference
5181  * @cmdptr: Internal command tracker
5182  *
5183  * Complete an internal driver commands with state indicating it
5184  * is completed due to reset.
5185  *
5186  * Return: Nothing.
5187  */
mpi3mr_drv_cmd_comp_reset(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * cmdptr)5188 static inline void mpi3mr_drv_cmd_comp_reset(struct mpi3mr_ioc *mrioc,
5189 	struct mpi3mr_drv_cmd *cmdptr)
5190 {
5191 	if (cmdptr->state & MPI3MR_CMD_PENDING) {
5192 		cmdptr->state |= MPI3MR_CMD_RESET;
5193 		cmdptr->state &= ~MPI3MR_CMD_PENDING;
5194 		if (cmdptr->is_waiting) {
5195 			complete(&cmdptr->done);
5196 			cmdptr->is_waiting = 0;
5197 		} else if (cmdptr->callback)
5198 			cmdptr->callback(mrioc, cmdptr);
5199 	}
5200 }
5201 
5202 /**
5203  * mpi3mr_flush_drv_cmds - Flush internaldriver commands
5204  * @mrioc: Adapter instance reference
5205  *
5206  * Flush all internal driver commands post reset
5207  *
5208  * Return: Nothing.
5209  */
mpi3mr_flush_drv_cmds(struct mpi3mr_ioc * mrioc)5210 void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc)
5211 {
5212 	struct mpi3mr_drv_cmd *cmdptr;
5213 	u8 i;
5214 
5215 	cmdptr = &mrioc->init_cmds;
5216 	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5217 
5218 	cmdptr = &mrioc->cfg_cmds;
5219 	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5220 
5221 	cmdptr = &mrioc->bsg_cmds;
5222 	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5223 	cmdptr = &mrioc->host_tm_cmds;
5224 	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5225 
5226 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
5227 		cmdptr = &mrioc->dev_rmhs_cmds[i];
5228 		mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5229 	}
5230 
5231 	for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
5232 		cmdptr = &mrioc->evtack_cmds[i];
5233 		mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5234 	}
5235 
5236 	cmdptr = &mrioc->pel_cmds;
5237 	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5238 
5239 	cmdptr = &mrioc->pel_abort_cmd;
5240 	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5241 
5242 	cmdptr = &mrioc->transport_cmds;
5243 	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5244 }
5245 
5246 /**
5247  * mpi3mr_pel_wait_post - Issue PEL Wait
5248  * @mrioc: Adapter instance reference
5249  * @drv_cmd: Internal command tracker
5250  *
5251  * Issue PEL Wait MPI request through admin queue and return.
5252  *
5253  * Return: Nothing.
5254  */
mpi3mr_pel_wait_post(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * drv_cmd)5255 static void mpi3mr_pel_wait_post(struct mpi3mr_ioc *mrioc,
5256 	struct mpi3mr_drv_cmd *drv_cmd)
5257 {
5258 	struct mpi3_pel_req_action_wait pel_wait;
5259 
5260 	mrioc->pel_abort_requested = false;
5261 
5262 	memset(&pel_wait, 0, sizeof(pel_wait));
5263 	drv_cmd->state = MPI3MR_CMD_PENDING;
5264 	drv_cmd->is_waiting = 0;
5265 	drv_cmd->callback = mpi3mr_pel_wait_complete;
5266 	drv_cmd->ioc_status = 0;
5267 	drv_cmd->ioc_loginfo = 0;
5268 	pel_wait.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT);
5269 	pel_wait.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG;
5270 	pel_wait.action = MPI3_PEL_ACTION_WAIT;
5271 	pel_wait.starting_sequence_number = cpu_to_le32(mrioc->pel_newest_seqnum);
5272 	pel_wait.locale = cpu_to_le16(mrioc->pel_locale);
5273 	pel_wait.class = cpu_to_le16(mrioc->pel_class);
5274 	pel_wait.wait_time = MPI3_PEL_WAITTIME_INFINITE_WAIT;
5275 	dprint_bsg_info(mrioc, "sending pel_wait seqnum(%d), class(%d), locale(0x%08x)\n",
5276 	    mrioc->pel_newest_seqnum, mrioc->pel_class, mrioc->pel_locale);
5277 
5278 	if (mpi3mr_admin_request_post(mrioc, &pel_wait, sizeof(pel_wait), 0)) {
5279 		dprint_bsg_err(mrioc,
5280 			    "Issuing PELWait: Admin post failed\n");
5281 		drv_cmd->state = MPI3MR_CMD_NOTUSED;
5282 		drv_cmd->callback = NULL;
5283 		drv_cmd->retry_count = 0;
5284 		mrioc->pel_enabled = false;
5285 	}
5286 }
5287 
5288 /**
5289  * mpi3mr_pel_get_seqnum_post - Issue PEL Get Sequence number
5290  * @mrioc: Adapter instance reference
5291  * @drv_cmd: Internal command tracker
5292  *
5293  * Issue PEL get sequence number MPI request through admin queue
5294  * and return.
5295  *
5296  * Return: 0 on success, non-zero on failure.
5297  */
mpi3mr_pel_get_seqnum_post(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * drv_cmd)5298 int mpi3mr_pel_get_seqnum_post(struct mpi3mr_ioc *mrioc,
5299 	struct mpi3mr_drv_cmd *drv_cmd)
5300 {
5301 	struct mpi3_pel_req_action_get_sequence_numbers pel_getseq_req;
5302 	u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
5303 	int retval = 0;
5304 
5305 	memset(&pel_getseq_req, 0, sizeof(pel_getseq_req));
5306 	mrioc->pel_cmds.state = MPI3MR_CMD_PENDING;
5307 	mrioc->pel_cmds.is_waiting = 0;
5308 	mrioc->pel_cmds.ioc_status = 0;
5309 	mrioc->pel_cmds.ioc_loginfo = 0;
5310 	mrioc->pel_cmds.callback = mpi3mr_pel_get_seqnum_complete;
5311 	pel_getseq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT);
5312 	pel_getseq_req.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG;
5313 	pel_getseq_req.action = MPI3_PEL_ACTION_GET_SEQNUM;
5314 	mpi3mr_add_sg_single(&pel_getseq_req.sgl, sgl_flags,
5315 	    mrioc->pel_seqnum_sz, mrioc->pel_seqnum_dma);
5316 
5317 	retval = mpi3mr_admin_request_post(mrioc, &pel_getseq_req,
5318 			sizeof(pel_getseq_req), 0);
5319 	if (retval) {
5320 		if (drv_cmd) {
5321 			drv_cmd->state = MPI3MR_CMD_NOTUSED;
5322 			drv_cmd->callback = NULL;
5323 			drv_cmd->retry_count = 0;
5324 		}
5325 		mrioc->pel_enabled = false;
5326 	}
5327 
5328 	return retval;
5329 }
5330 
5331 /**
5332  * mpi3mr_pel_wait_complete - PELWait Completion callback
5333  * @mrioc: Adapter instance reference
5334  * @drv_cmd: Internal command tracker
5335  *
5336  * This is a callback handler for the PELWait request and
5337  * firmware completes a PELWait request when it is aborted or a
5338  * new PEL entry is available. This sends AEN to the application
5339  * and if the PELwait completion is not due to PELAbort then
5340  * this will send a request for new PEL Sequence number
5341  *
5342  * Return: Nothing.
5343  */
mpi3mr_pel_wait_complete(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * drv_cmd)5344 static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc,
5345 	struct mpi3mr_drv_cmd *drv_cmd)
5346 {
5347 	struct mpi3_pel_reply *pel_reply = NULL;
5348 	u16 ioc_status, pe_log_status;
5349 	bool do_retry = false;
5350 
5351 	if (drv_cmd->state & MPI3MR_CMD_RESET)
5352 		goto cleanup_drv_cmd;
5353 
5354 	ioc_status = drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
5355 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5356 		ioc_err(mrioc, "%s: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
5357 			__func__, ioc_status, drv_cmd->ioc_loginfo);
5358 		dprint_bsg_err(mrioc,
5359 		    "pel_wait: failed with ioc_status(0x%04x), log_info(0x%08x)\n",
5360 		    ioc_status, drv_cmd->ioc_loginfo);
5361 		do_retry = true;
5362 	}
5363 
5364 	if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
5365 		pel_reply = (struct mpi3_pel_reply *)drv_cmd->reply;
5366 
5367 	if (!pel_reply) {
5368 		dprint_bsg_err(mrioc,
5369 		    "pel_wait: failed due to no reply\n");
5370 		goto out_failed;
5371 	}
5372 
5373 	pe_log_status = le16_to_cpu(pel_reply->pe_log_status);
5374 	if ((pe_log_status != MPI3_PEL_STATUS_SUCCESS) &&
5375 	    (pe_log_status != MPI3_PEL_STATUS_ABORTED)) {
5376 		ioc_err(mrioc, "%s: Failed pe_log_status(0x%04x)\n",
5377 			__func__, pe_log_status);
5378 		dprint_bsg_err(mrioc,
5379 		    "pel_wait: failed due to pel_log_status(0x%04x)\n",
5380 		    pe_log_status);
5381 		do_retry = true;
5382 	}
5383 
5384 	if (do_retry) {
5385 		if (drv_cmd->retry_count < MPI3MR_PEL_RETRY_COUNT) {
5386 			drv_cmd->retry_count++;
5387 			dprint_bsg_err(mrioc, "pel_wait: retrying(%d)\n",
5388 			    drv_cmd->retry_count);
5389 			mpi3mr_pel_wait_post(mrioc, drv_cmd);
5390 			return;
5391 		}
5392 		dprint_bsg_err(mrioc,
5393 		    "pel_wait: failed after all retries(%d)\n",
5394 		    drv_cmd->retry_count);
5395 		goto out_failed;
5396 	}
5397 	atomic64_inc(&event_counter);
5398 	if (!mrioc->pel_abort_requested) {
5399 		mrioc->pel_cmds.retry_count = 0;
5400 		mpi3mr_pel_get_seqnum_post(mrioc, &mrioc->pel_cmds);
5401 	}
5402 
5403 	return;
5404 out_failed:
5405 	mrioc->pel_enabled = false;
5406 cleanup_drv_cmd:
5407 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
5408 	drv_cmd->callback = NULL;
5409 	drv_cmd->retry_count = 0;
5410 }
5411 
5412 /**
5413  * mpi3mr_pel_get_seqnum_complete - PELGetSeqNum Completion callback
5414  * @mrioc: Adapter instance reference
5415  * @drv_cmd: Internal command tracker
5416  *
5417  * This is a callback handler for the PEL get sequence number
5418  * request and a new PEL wait request will be issued to the
5419  * firmware from this
5420  *
5421  * Return: Nothing.
5422  */
mpi3mr_pel_get_seqnum_complete(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * drv_cmd)5423 void mpi3mr_pel_get_seqnum_complete(struct mpi3mr_ioc *mrioc,
5424 	struct mpi3mr_drv_cmd *drv_cmd)
5425 {
5426 	struct mpi3_pel_reply *pel_reply = NULL;
5427 	struct mpi3_pel_seq *pel_seqnum_virt;
5428 	u16 ioc_status;
5429 	bool do_retry = false;
5430 
5431 	pel_seqnum_virt = (struct mpi3_pel_seq *)mrioc->pel_seqnum_virt;
5432 
5433 	if (drv_cmd->state & MPI3MR_CMD_RESET)
5434 		goto cleanup_drv_cmd;
5435 
5436 	ioc_status = drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
5437 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5438 		dprint_bsg_err(mrioc,
5439 		    "pel_get_seqnum: failed with ioc_status(0x%04x), log_info(0x%08x)\n",
5440 		    ioc_status, drv_cmd->ioc_loginfo);
5441 		do_retry = true;
5442 	}
5443 
5444 	if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
5445 		pel_reply = (struct mpi3_pel_reply *)drv_cmd->reply;
5446 	if (!pel_reply) {
5447 		dprint_bsg_err(mrioc,
5448 		    "pel_get_seqnum: failed due to no reply\n");
5449 		goto out_failed;
5450 	}
5451 
5452 	if (le16_to_cpu(pel_reply->pe_log_status) != MPI3_PEL_STATUS_SUCCESS) {
5453 		dprint_bsg_err(mrioc,
5454 		    "pel_get_seqnum: failed due to pel_log_status(0x%04x)\n",
5455 		    le16_to_cpu(pel_reply->pe_log_status));
5456 		do_retry = true;
5457 	}
5458 
5459 	if (do_retry) {
5460 		if (drv_cmd->retry_count < MPI3MR_PEL_RETRY_COUNT) {
5461 			drv_cmd->retry_count++;
5462 			dprint_bsg_err(mrioc,
5463 			    "pel_get_seqnum: retrying(%d)\n",
5464 			    drv_cmd->retry_count);
5465 			mpi3mr_pel_get_seqnum_post(mrioc, drv_cmd);
5466 			return;
5467 		}
5468 
5469 		dprint_bsg_err(mrioc,
5470 		    "pel_get_seqnum: failed after all retries(%d)\n",
5471 		    drv_cmd->retry_count);
5472 		goto out_failed;
5473 	}
5474 	mrioc->pel_newest_seqnum = le32_to_cpu(pel_seqnum_virt->newest) + 1;
5475 	drv_cmd->retry_count = 0;
5476 	mpi3mr_pel_wait_post(mrioc, drv_cmd);
5477 
5478 	return;
5479 out_failed:
5480 	mrioc->pel_enabled = false;
5481 cleanup_drv_cmd:
5482 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
5483 	drv_cmd->callback = NULL;
5484 	drv_cmd->retry_count = 0;
5485 }
5486 
5487 /**
5488  * mpi3mr_check_op_admin_proc -
5489  * @mrioc: Adapter instance reference
5490  *
5491  * Check if any of the operation reply queues
5492  * or the admin reply queue are currently in use.
5493  * If any queue is in use, this function waits for
5494  * a maximum of 10 seconds for them to become available.
5495  *
5496  * Return: 0 on success, non-zero on failure.
5497  */
mpi3mr_check_op_admin_proc(struct mpi3mr_ioc * mrioc)5498 static int mpi3mr_check_op_admin_proc(struct mpi3mr_ioc *mrioc)
5499 {
5500 
5501 	u16 timeout = 10 * 10;
5502 	u16 elapsed_time = 0;
5503 	bool op_admin_in_use = false;
5504 
5505 	do {
5506 		op_admin_in_use = false;
5507 
5508 		/* Check admin_reply queue first to exit early */
5509 		if (atomic_read(&mrioc->admin_reply_q_in_use) == 1)
5510 			op_admin_in_use = true;
5511 		else {
5512 			/* Check op_reply queues */
5513 			int i;
5514 
5515 			for (i = 0; i < mrioc->num_queues; i++) {
5516 				if (atomic_read(&mrioc->op_reply_qinfo[i].in_use) == 1) {
5517 					op_admin_in_use = true;
5518 					break;
5519 				}
5520 			}
5521 		}
5522 
5523 		if (!op_admin_in_use)
5524 			break;
5525 
5526 		msleep(100);
5527 
5528 	} while (++elapsed_time < timeout);
5529 
5530 	if (op_admin_in_use)
5531 		return 1;
5532 
5533 	return 0;
5534 }
5535 
5536 /**
5537  * mpi3mr_soft_reset_handler - Reset the controller
5538  * @mrioc: Adapter instance reference
5539  * @reset_reason: Reset reason code
5540  * @snapdump: Flag to generate snapdump in firmware or not
5541  *
5542  * This is an handler for recovering controller by issuing soft
5543  * reset are diag fault reset.  This is a blocking function and
5544  * when one reset is executed if any other resets they will be
5545  * blocked. All BSG requests will be blocked during the reset. If
5546  * controller reset is successful then the controller will be
5547  * reinitalized, otherwise the controller will be marked as not
5548  * recoverable
5549  *
5550  * In snapdump bit is set, the controller is issued with diag
5551  * fault reset so that the firmware can create a snap dump and
5552  * post that the firmware will result in F000 fault and the
5553  * driver will issue soft reset to recover from that.
5554  *
5555  * Return: 0 on success, non-zero on failure.
5556  */
mpi3mr_soft_reset_handler(struct mpi3mr_ioc * mrioc,u16 reset_reason,u8 snapdump)5557 int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
5558 	u16 reset_reason, u8 snapdump)
5559 {
5560 	int retval = 0, i;
5561 	unsigned long flags;
5562 	enum mpi3mr_iocstate ioc_state;
5563 	u32 host_diagnostic, timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
5564 	union mpi3mr_trigger_data trigger_data;
5565 
5566 	/* Block the reset handler until diag save in progress*/
5567 	dprint_reset(mrioc,
5568 	    "soft_reset_handler: check and block on diagsave_timeout(%d)\n",
5569 	    mrioc->diagsave_timeout);
5570 	while (mrioc->diagsave_timeout)
5571 		ssleep(1);
5572 	/*
5573 	 * Block new resets until the currently executing one is finished and
5574 	 * return the status of the existing reset for all blocked resets
5575 	 */
5576 	dprint_reset(mrioc, "soft_reset_handler: acquiring reset_mutex\n");
5577 	if (!mutex_trylock(&mrioc->reset_mutex)) {
5578 		ioc_info(mrioc,
5579 		    "controller reset triggered by %s is blocked due to another reset in progress\n",
5580 		    mpi3mr_reset_rc_name(reset_reason));
5581 		do {
5582 			ssleep(1);
5583 		} while (mrioc->reset_in_progress == 1);
5584 		ioc_info(mrioc,
5585 		    "returning previous reset result(%d) for the reset triggered by %s\n",
5586 		    mrioc->prev_reset_result,
5587 		    mpi3mr_reset_rc_name(reset_reason));
5588 		return mrioc->prev_reset_result;
5589 	}
5590 	ioc_info(mrioc, "controller reset is triggered by %s\n",
5591 	    mpi3mr_reset_rc_name(reset_reason));
5592 
5593 	mrioc->device_refresh_on = 0;
5594 	scsi_block_requests(mrioc->shost);
5595 	mrioc->reset_in_progress = 1;
5596 	mrioc->stop_bsgs = 1;
5597 	mrioc->prev_reset_result = -1;
5598 	memset(&trigger_data, 0, sizeof(trigger_data));
5599 
5600 	if ((!snapdump) && (reset_reason != MPI3MR_RESET_FROM_FAULT_WATCH) &&
5601 	    (reset_reason != MPI3MR_RESET_FROM_FIRMWARE) &&
5602 	    (reset_reason != MPI3MR_RESET_FROM_CIACTIV_FAULT)) {
5603 		mpi3mr_set_trigger_data_in_all_hdb(mrioc,
5604 		    MPI3MR_HDB_TRIGGER_TYPE_SOFT_RESET, NULL, 0);
5605 		dprint_reset(mrioc,
5606 		    "soft_reset_handler: releasing host diagnostic buffers\n");
5607 		mpi3mr_release_diag_bufs(mrioc, 0);
5608 		for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
5609 			mrioc->event_masks[i] = -1;
5610 
5611 		dprint_reset(mrioc, "soft_reset_handler: masking events\n");
5612 		mpi3mr_issue_event_notification(mrioc);
5613 	}
5614 
5615 	mpi3mr_wait_for_host_io(mrioc, MPI3MR_RESET_HOST_IOWAIT_TIMEOUT);
5616 
5617 	mpi3mr_ioc_disable_intr(mrioc);
5618 	mrioc->io_admin_reset_sync = 1;
5619 
5620 	if (snapdump) {
5621 		retval = mpi3mr_issue_reset(mrioc,
5622 		    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
5623 		if (!retval) {
5624 			trigger_data.fault = (readl(&mrioc->sysif_regs->fault) &
5625 				      MPI3_SYSIF_FAULT_CODE_MASK);
5626 			do {
5627 				host_diagnostic =
5628 				    readl(&mrioc->sysif_regs->host_diagnostic);
5629 				if (!(host_diagnostic &
5630 				    MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
5631 					break;
5632 				msleep(100);
5633 			} while (--timeout);
5634 
5635 			mpi3mr_save_fault_info(mrioc);
5636 			mpi3mr_fault_uevent_emit(mrioc);
5637 			mrioc->fwfault_counter++;
5638 			mpi3mr_set_trigger_data_in_all_hdb(mrioc,
5639 			    MPI3MR_HDB_TRIGGER_TYPE_FAULT, &trigger_data, 0);
5640 		}
5641 	}
5642 
5643 	retval = mpi3mr_issue_reset(mrioc,
5644 	    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, reset_reason);
5645 	if (retval) {
5646 		ioc_err(mrioc, "Failed to issue soft reset to the ioc\n");
5647 		goto out;
5648 	}
5649 
5650 	retval = mpi3mr_check_op_admin_proc(mrioc);
5651 	if (retval) {
5652 		ioc_err(mrioc, "Soft reset failed due to an Admin or I/O queue polling\n"
5653 				"thread still processing replies even after a 10 second\n"
5654 				"timeout. Marking the controller as unrecoverable!\n");
5655 
5656 		goto out;
5657 	}
5658 
5659 	if (mrioc->num_io_throttle_group !=
5660 	    mrioc->facts.max_io_throttle_group) {
5661 		ioc_err(mrioc,
5662 		    "max io throttle group doesn't match old(%d), new(%d)\n",
5663 		    mrioc->num_io_throttle_group,
5664 		    mrioc->facts.max_io_throttle_group);
5665 		retval = -EPERM;
5666 		goto out;
5667 	}
5668 
5669 	mpi3mr_flush_delayed_cmd_lists(mrioc);
5670 	mpi3mr_flush_drv_cmds(mrioc);
5671 	bitmap_clear(mrioc->devrem_bitmap, 0, MPI3MR_NUM_DEVRMCMD);
5672 	bitmap_clear(mrioc->removepend_bitmap, 0,
5673 		     mrioc->dev_handle_bitmap_bits);
5674 	bitmap_clear(mrioc->evtack_cmds_bitmap, 0, MPI3MR_NUM_EVTACKCMD);
5675 	mpi3mr_flush_host_io(mrioc);
5676 	mpi3mr_cleanup_fwevt_list(mrioc);
5677 	mpi3mr_invalidate_devhandles(mrioc);
5678 	mpi3mr_free_enclosure_list(mrioc);
5679 
5680 	if (mrioc->prepare_for_reset) {
5681 		mrioc->prepare_for_reset = 0;
5682 		mrioc->prepare_for_reset_timeout_counter = 0;
5683 	}
5684 	mpi3mr_memset_buffers(mrioc);
5685 	mpi3mr_release_diag_bufs(mrioc, 1);
5686 	mrioc->fw_release_trigger_active = false;
5687 	mrioc->trace_release_trigger_active = false;
5688 	mrioc->snapdump_trigger_active = false;
5689 	mpi3mr_set_trigger_data_in_all_hdb(mrioc,
5690 	    MPI3MR_HDB_TRIGGER_TYPE_SOFT_RESET, NULL, 0);
5691 
5692 	dprint_reset(mrioc,
5693 	    "soft_reset_handler: reinitializing the controller\n");
5694 	retval = mpi3mr_reinit_ioc(mrioc, 0);
5695 	if (retval) {
5696 		pr_err(IOCNAME "reinit after soft reset failed: reason %d\n",
5697 		    mrioc->name, reset_reason);
5698 		goto out;
5699 	}
5700 	ssleep(MPI3MR_RESET_TOPOLOGY_SETTLE_TIME);
5701 
5702 out:
5703 	mrioc->invalid_io_comp = 0;
5704 	if (!retval) {
5705 		mrioc->diagsave_timeout = 0;
5706 		mrioc->reset_in_progress = 0;
5707 		scsi_unblock_requests(mrioc->shost);
5708 		mrioc->pel_abort_requested = 0;
5709 		if (mrioc->pel_enabled) {
5710 			mrioc->pel_cmds.retry_count = 0;
5711 			mpi3mr_pel_wait_post(mrioc, &mrioc->pel_cmds);
5712 		}
5713 
5714 		mrioc->device_refresh_on = 0;
5715 
5716 		mrioc->ts_update_counter = 0;
5717 		spin_lock_irqsave(&mrioc->watchdog_lock, flags);
5718 		if (mrioc->watchdog_work_q)
5719 			queue_delayed_work(mrioc->watchdog_work_q,
5720 			    &mrioc->watchdog_work,
5721 			    msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
5722 		spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
5723 		mrioc->stop_bsgs = 0;
5724 		if (mrioc->pel_enabled)
5725 			atomic64_inc(&event_counter);
5726 	} else {
5727 		dprint_reset(mrioc,
5728 			"soft_reset_handler failed, marking controller as unrecoverable\n");
5729 		ioc_state = mpi3mr_get_iocstate(mrioc);
5730 
5731 		if (ioc_state != MRIOC_STATE_FAULT)
5732 			mpi3mr_issue_reset(mrioc,
5733 				MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
5734 		mrioc->device_refresh_on = 0;
5735 		mrioc->unrecoverable = 1;
5736 		mrioc->reset_in_progress = 0;
5737 		scsi_unblock_requests(mrioc->shost);
5738 		mrioc->stop_bsgs = 0;
5739 		retval = -1;
5740 		mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
5741 	}
5742 	mrioc->prev_reset_result = retval;
5743 	mutex_unlock(&mrioc->reset_mutex);
5744 	ioc_info(mrioc, "controller reset is %s\n",
5745 	    ((retval == 0) ? "successful" : "failed"));
5746 	return retval;
5747 }
5748 
5749 /**
5750  * mpi3mr_post_cfg_req - Issue config requests and wait
5751  * @mrioc: Adapter instance reference
5752  * @cfg_req: Configuration request
5753  * @timeout: Timeout in seconds
5754  * @ioc_status: Pointer to return ioc status
5755  *
5756  * A generic function for posting MPI3 configuration request to
5757  * the firmware. This blocks for the completion of request for
5758  * timeout seconds and if the request times out this function
5759  * faults the controller with proper reason code.
5760  *
5761  * On successful completion of the request this function returns
5762  * appropriate ioc status from the firmware back to the caller.
5763  *
5764  * Return: 0 on success, non-zero on failure.
5765  */
mpi3mr_post_cfg_req(struct mpi3mr_ioc * mrioc,struct mpi3_config_request * cfg_req,int timeout,u16 * ioc_status)5766 static int mpi3mr_post_cfg_req(struct mpi3mr_ioc *mrioc,
5767 	struct mpi3_config_request *cfg_req, int timeout, u16 *ioc_status)
5768 {
5769 	int retval = 0;
5770 
5771 	mutex_lock(&mrioc->cfg_cmds.mutex);
5772 	if (mrioc->cfg_cmds.state & MPI3MR_CMD_PENDING) {
5773 		retval = -1;
5774 		ioc_err(mrioc, "sending config request failed due to command in use\n");
5775 		mutex_unlock(&mrioc->cfg_cmds.mutex);
5776 		goto out;
5777 	}
5778 	mrioc->cfg_cmds.state = MPI3MR_CMD_PENDING;
5779 	mrioc->cfg_cmds.is_waiting = 1;
5780 	mrioc->cfg_cmds.callback = NULL;
5781 	mrioc->cfg_cmds.ioc_status = 0;
5782 	mrioc->cfg_cmds.ioc_loginfo = 0;
5783 
5784 	cfg_req->host_tag = cpu_to_le16(MPI3MR_HOSTTAG_CFG_CMDS);
5785 	cfg_req->function = MPI3_FUNCTION_CONFIG;
5786 
5787 	init_completion(&mrioc->cfg_cmds.done);
5788 	dprint_cfg_info(mrioc, "posting config request\n");
5789 	if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO)
5790 		dprint_dump(cfg_req, sizeof(struct mpi3_config_request),
5791 		    "mpi3_cfg_req");
5792 	retval = mpi3mr_admin_request_post(mrioc, cfg_req, sizeof(*cfg_req), 1);
5793 	if (retval) {
5794 		ioc_err(mrioc, "posting config request failed\n");
5795 		goto out_unlock;
5796 	}
5797 	wait_for_completion_timeout(&mrioc->cfg_cmds.done, (timeout * HZ));
5798 	if (!(mrioc->cfg_cmds.state & MPI3MR_CMD_COMPLETE)) {
5799 		mpi3mr_check_rh_fault_ioc(mrioc,
5800 		    MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT);
5801 		ioc_err(mrioc, "config request timed out\n");
5802 		retval = -1;
5803 		goto out_unlock;
5804 	}
5805 	*ioc_status = mrioc->cfg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
5806 	if ((*ioc_status) != MPI3_IOCSTATUS_SUCCESS)
5807 		dprint_cfg_err(mrioc,
5808 		    "cfg_page request returned with ioc_status(0x%04x), log_info(0x%08x)\n",
5809 		    *ioc_status, mrioc->cfg_cmds.ioc_loginfo);
5810 
5811 out_unlock:
5812 	mrioc->cfg_cmds.state = MPI3MR_CMD_NOTUSED;
5813 	mutex_unlock(&mrioc->cfg_cmds.mutex);
5814 
5815 out:
5816 	return retval;
5817 }
5818 
5819 /**
5820  * mpi3mr_process_cfg_req - config page request processor
5821  * @mrioc: Adapter instance reference
5822  * @cfg_req: Configuration request
5823  * @cfg_hdr: Configuration page header
5824  * @timeout: Timeout in seconds
5825  * @ioc_status: Pointer to return ioc status
5826  * @cfg_buf: Memory pointer to copy config page or header
5827  * @cfg_buf_sz: Size of the memory to get config page or header
5828  *
5829  * This is handler for config page read, write and config page
5830  * header read operations.
5831  *
5832  * This function expects the cfg_req to be populated with page
5833  * type, page number, action for the header read and with page
5834  * address for all other operations.
5835  *
5836  * The cfg_hdr can be passed as null for reading required header
5837  * details for read/write pages the cfg_hdr should point valid
5838  * configuration page header.
5839  *
5840  * This allocates dmaable memory based on the size of the config
5841  * buffer and set the SGE of the cfg_req.
5842  *
5843  * For write actions, the config page data has to be passed in
5844  * the cfg_buf and size of the data has to be mentioned in the
5845  * cfg_buf_sz.
5846  *
5847  * For read/header actions, on successful completion of the
5848  * request with successful ioc_status the data will be copied
5849  * into the cfg_buf limited to a minimum of actual page size and
5850  * cfg_buf_sz
5851  *
5852  *
5853  * Return: 0 on success, non-zero on failure.
5854  */
mpi3mr_process_cfg_req(struct mpi3mr_ioc * mrioc,struct mpi3_config_request * cfg_req,struct mpi3_config_page_header * cfg_hdr,int timeout,u16 * ioc_status,void * cfg_buf,u32 cfg_buf_sz)5855 static int mpi3mr_process_cfg_req(struct mpi3mr_ioc *mrioc,
5856 	struct mpi3_config_request *cfg_req,
5857 	struct mpi3_config_page_header *cfg_hdr, int timeout, u16 *ioc_status,
5858 	void *cfg_buf, u32 cfg_buf_sz)
5859 {
5860 	struct dma_memory_desc mem_desc;
5861 	int retval = -1;
5862 	u8 invalid_action = 0;
5863 	u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
5864 
5865 	memset(&mem_desc, 0, sizeof(struct dma_memory_desc));
5866 
5867 	if (cfg_req->action == MPI3_CONFIG_ACTION_PAGE_HEADER)
5868 		mem_desc.size = sizeof(struct mpi3_config_page_header);
5869 	else {
5870 		if (!cfg_hdr) {
5871 			ioc_err(mrioc, "null config header passed for config action(%d), page_type(0x%02x), page_num(%d)\n",
5872 			    cfg_req->action, cfg_req->page_type,
5873 			    cfg_req->page_number);
5874 			goto out;
5875 		}
5876 		switch (cfg_hdr->page_attribute & MPI3_CONFIG_PAGEATTR_MASK) {
5877 		case MPI3_CONFIG_PAGEATTR_READ_ONLY:
5878 			if (cfg_req->action
5879 			    != MPI3_CONFIG_ACTION_READ_CURRENT)
5880 				invalid_action = 1;
5881 			break;
5882 		case MPI3_CONFIG_PAGEATTR_CHANGEABLE:
5883 			if ((cfg_req->action ==
5884 			     MPI3_CONFIG_ACTION_READ_PERSISTENT) ||
5885 			    (cfg_req->action ==
5886 			     MPI3_CONFIG_ACTION_WRITE_PERSISTENT))
5887 				invalid_action = 1;
5888 			break;
5889 		case MPI3_CONFIG_PAGEATTR_PERSISTENT:
5890 		default:
5891 			break;
5892 		}
5893 		if (invalid_action) {
5894 			ioc_err(mrioc,
5895 			    "config action(%d) is not allowed for page_type(0x%02x), page_num(%d) with page_attribute(0x%02x)\n",
5896 			    cfg_req->action, cfg_req->page_type,
5897 			    cfg_req->page_number, cfg_hdr->page_attribute);
5898 			goto out;
5899 		}
5900 		mem_desc.size = le16_to_cpu(cfg_hdr->page_length) * 4;
5901 		cfg_req->page_length = cfg_hdr->page_length;
5902 		cfg_req->page_version = cfg_hdr->page_version;
5903 	}
5904 
5905 	mem_desc.addr = dma_alloc_coherent(&mrioc->pdev->dev,
5906 		mem_desc.size, &mem_desc.dma_addr, GFP_KERNEL);
5907 
5908 	if (!mem_desc.addr)
5909 		return retval;
5910 
5911 	mpi3mr_add_sg_single(&cfg_req->sgl, sgl_flags, mem_desc.size,
5912 	    mem_desc.dma_addr);
5913 
5914 	if ((cfg_req->action == MPI3_CONFIG_ACTION_WRITE_PERSISTENT) ||
5915 	    (cfg_req->action == MPI3_CONFIG_ACTION_WRITE_CURRENT)) {
5916 		memcpy(mem_desc.addr, cfg_buf, min_t(u16, mem_desc.size,
5917 		    cfg_buf_sz));
5918 		dprint_cfg_info(mrioc, "config buffer to be written\n");
5919 		if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO)
5920 			dprint_dump(mem_desc.addr, mem_desc.size, "cfg_buf");
5921 	}
5922 
5923 	if (mpi3mr_post_cfg_req(mrioc, cfg_req, timeout, ioc_status))
5924 		goto out;
5925 
5926 	retval = 0;
5927 	if ((*ioc_status == MPI3_IOCSTATUS_SUCCESS) &&
5928 	    (cfg_req->action != MPI3_CONFIG_ACTION_WRITE_PERSISTENT) &&
5929 	    (cfg_req->action != MPI3_CONFIG_ACTION_WRITE_CURRENT)) {
5930 		memcpy(cfg_buf, mem_desc.addr, min_t(u16, mem_desc.size,
5931 		    cfg_buf_sz));
5932 		dprint_cfg_info(mrioc, "config buffer read\n");
5933 		if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO)
5934 			dprint_dump(mem_desc.addr, mem_desc.size, "cfg_buf");
5935 	}
5936 
5937 out:
5938 	if (mem_desc.addr) {
5939 		dma_free_coherent(&mrioc->pdev->dev, mem_desc.size,
5940 			mem_desc.addr, mem_desc.dma_addr);
5941 		mem_desc.addr = NULL;
5942 	}
5943 
5944 	return retval;
5945 }
5946 
5947 /**
5948  * mpi3mr_cfg_get_dev_pg0 - Read current device page0
5949  * @mrioc: Adapter instance reference
5950  * @ioc_status: Pointer to return ioc status
5951  * @dev_pg0: Pointer to return device page 0
5952  * @pg_sz: Size of the memory allocated to the page pointer
5953  * @form: The form to be used for addressing the page
5954  * @form_spec: Form specific information like device handle
5955  *
5956  * This is handler for config page read for a specific device
5957  * page0. The ioc_status has the controller returned ioc_status.
5958  * This routine doesn't check ioc_status to decide whether the
5959  * page read is success or not and it is the callers
5960  * responsibility.
5961  *
5962  * Return: 0 on success, non-zero on failure.
5963  */
mpi3mr_cfg_get_dev_pg0(struct mpi3mr_ioc * mrioc,u16 * ioc_status,struct mpi3_device_page0 * dev_pg0,u16 pg_sz,u32 form,u32 form_spec)5964 int mpi3mr_cfg_get_dev_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
5965 	struct mpi3_device_page0 *dev_pg0, u16 pg_sz, u32 form, u32 form_spec)
5966 {
5967 	struct mpi3_config_page_header cfg_hdr;
5968 	struct mpi3_config_request cfg_req;
5969 	u32 page_address;
5970 
5971 	memset(dev_pg0, 0, pg_sz);
5972 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
5973 	memset(&cfg_req, 0, sizeof(cfg_req));
5974 
5975 	cfg_req.function = MPI3_FUNCTION_CONFIG;
5976 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5977 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DEVICE;
5978 	cfg_req.page_number = 0;
5979 	cfg_req.page_address = 0;
5980 
5981 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
5982 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
5983 		ioc_err(mrioc, "device page0 header read failed\n");
5984 		goto out_failed;
5985 	}
5986 	if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5987 		ioc_err(mrioc, "device page0 header read failed with ioc_status(0x%04x)\n",
5988 		    *ioc_status);
5989 		goto out_failed;
5990 	}
5991 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
5992 	page_address = ((form & MPI3_DEVICE_PGAD_FORM_MASK) |
5993 	    (form_spec & MPI3_DEVICE_PGAD_HANDLE_MASK));
5994 	cfg_req.page_address = cpu_to_le32(page_address);
5995 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
5996 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, dev_pg0, pg_sz)) {
5997 		ioc_err(mrioc, "device page0 read failed\n");
5998 		goto out_failed;
5999 	}
6000 	return 0;
6001 out_failed:
6002 	return -1;
6003 }
6004 
6005 
6006 /**
6007  * mpi3mr_cfg_get_sas_phy_pg0 - Read current SAS Phy page0
6008  * @mrioc: Adapter instance reference
6009  * @ioc_status: Pointer to return ioc status
6010  * @phy_pg0: Pointer to return SAS Phy page 0
6011  * @pg_sz: Size of the memory allocated to the page pointer
6012  * @form: The form to be used for addressing the page
6013  * @form_spec: Form specific information like phy number
6014  *
6015  * This is handler for config page read for a specific SAS Phy
6016  * page0. The ioc_status has the controller returned ioc_status.
6017  * This routine doesn't check ioc_status to decide whether the
6018  * page read is success or not and it is the callers
6019  * responsibility.
6020  *
6021  * Return: 0 on success, non-zero on failure.
6022  */
mpi3mr_cfg_get_sas_phy_pg0(struct mpi3mr_ioc * mrioc,u16 * ioc_status,struct mpi3_sas_phy_page0 * phy_pg0,u16 pg_sz,u32 form,u32 form_spec)6023 int mpi3mr_cfg_get_sas_phy_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
6024 	struct mpi3_sas_phy_page0 *phy_pg0, u16 pg_sz, u32 form,
6025 	u32 form_spec)
6026 {
6027 	struct mpi3_config_page_header cfg_hdr;
6028 	struct mpi3_config_request cfg_req;
6029 	u32 page_address;
6030 
6031 	memset(phy_pg0, 0, pg_sz);
6032 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6033 	memset(&cfg_req, 0, sizeof(cfg_req));
6034 
6035 	cfg_req.function = MPI3_FUNCTION_CONFIG;
6036 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6037 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_PHY;
6038 	cfg_req.page_number = 0;
6039 	cfg_req.page_address = 0;
6040 
6041 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6042 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6043 		ioc_err(mrioc, "sas phy page0 header read failed\n");
6044 		goto out_failed;
6045 	}
6046 	if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6047 		ioc_err(mrioc, "sas phy page0 header read failed with ioc_status(0x%04x)\n",
6048 		    *ioc_status);
6049 		goto out_failed;
6050 	}
6051 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6052 	page_address = ((form & MPI3_SAS_PHY_PGAD_FORM_MASK) |
6053 	    (form_spec & MPI3_SAS_PHY_PGAD_PHY_NUMBER_MASK));
6054 	cfg_req.page_address = cpu_to_le32(page_address);
6055 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6056 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, phy_pg0, pg_sz)) {
6057 		ioc_err(mrioc, "sas phy page0 read failed\n");
6058 		goto out_failed;
6059 	}
6060 	return 0;
6061 out_failed:
6062 	return -1;
6063 }
6064 
6065 /**
6066  * mpi3mr_cfg_get_sas_phy_pg1 - Read current SAS Phy page1
6067  * @mrioc: Adapter instance reference
6068  * @ioc_status: Pointer to return ioc status
6069  * @phy_pg1: Pointer to return SAS Phy page 1
6070  * @pg_sz: Size of the memory allocated to the page pointer
6071  * @form: The form to be used for addressing the page
6072  * @form_spec: Form specific information like phy number
6073  *
6074  * This is handler for config page read for a specific SAS Phy
6075  * page1. The ioc_status has the controller returned ioc_status.
6076  * This routine doesn't check ioc_status to decide whether the
6077  * page read is success or not and it is the callers
6078  * responsibility.
6079  *
6080  * Return: 0 on success, non-zero on failure.
6081  */
mpi3mr_cfg_get_sas_phy_pg1(struct mpi3mr_ioc * mrioc,u16 * ioc_status,struct mpi3_sas_phy_page1 * phy_pg1,u16 pg_sz,u32 form,u32 form_spec)6082 int mpi3mr_cfg_get_sas_phy_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
6083 	struct mpi3_sas_phy_page1 *phy_pg1, u16 pg_sz, u32 form,
6084 	u32 form_spec)
6085 {
6086 	struct mpi3_config_page_header cfg_hdr;
6087 	struct mpi3_config_request cfg_req;
6088 	u32 page_address;
6089 
6090 	memset(phy_pg1, 0, pg_sz);
6091 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6092 	memset(&cfg_req, 0, sizeof(cfg_req));
6093 
6094 	cfg_req.function = MPI3_FUNCTION_CONFIG;
6095 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6096 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_PHY;
6097 	cfg_req.page_number = 1;
6098 	cfg_req.page_address = 0;
6099 
6100 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6101 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6102 		ioc_err(mrioc, "sas phy page1 header read failed\n");
6103 		goto out_failed;
6104 	}
6105 	if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6106 		ioc_err(mrioc, "sas phy page1 header read failed with ioc_status(0x%04x)\n",
6107 		    *ioc_status);
6108 		goto out_failed;
6109 	}
6110 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6111 	page_address = ((form & MPI3_SAS_PHY_PGAD_FORM_MASK) |
6112 	    (form_spec & MPI3_SAS_PHY_PGAD_PHY_NUMBER_MASK));
6113 	cfg_req.page_address = cpu_to_le32(page_address);
6114 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6115 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, phy_pg1, pg_sz)) {
6116 		ioc_err(mrioc, "sas phy page1 read failed\n");
6117 		goto out_failed;
6118 	}
6119 	return 0;
6120 out_failed:
6121 	return -1;
6122 }
6123 
6124 
6125 /**
6126  * mpi3mr_cfg_get_sas_exp_pg0 - Read current SAS Expander page0
6127  * @mrioc: Adapter instance reference
6128  * @ioc_status: Pointer to return ioc status
6129  * @exp_pg0: Pointer to return SAS Expander page 0
6130  * @pg_sz: Size of the memory allocated to the page pointer
6131  * @form: The form to be used for addressing the page
6132  * @form_spec: Form specific information like device handle
6133  *
6134  * This is handler for config page read for a specific SAS
6135  * Expander page0. The ioc_status has the controller returned
6136  * ioc_status. This routine doesn't check ioc_status to decide
6137  * whether the page read is success or not and it is the callers
6138  * responsibility.
6139  *
6140  * Return: 0 on success, non-zero on failure.
6141  */
mpi3mr_cfg_get_sas_exp_pg0(struct mpi3mr_ioc * mrioc,u16 * ioc_status,struct mpi3_sas_expander_page0 * exp_pg0,u16 pg_sz,u32 form,u32 form_spec)6142 int mpi3mr_cfg_get_sas_exp_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
6143 	struct mpi3_sas_expander_page0 *exp_pg0, u16 pg_sz, u32 form,
6144 	u32 form_spec)
6145 {
6146 	struct mpi3_config_page_header cfg_hdr;
6147 	struct mpi3_config_request cfg_req;
6148 	u32 page_address;
6149 
6150 	memset(exp_pg0, 0, pg_sz);
6151 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6152 	memset(&cfg_req, 0, sizeof(cfg_req));
6153 
6154 	cfg_req.function = MPI3_FUNCTION_CONFIG;
6155 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6156 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_EXPANDER;
6157 	cfg_req.page_number = 0;
6158 	cfg_req.page_address = 0;
6159 
6160 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6161 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6162 		ioc_err(mrioc, "expander page0 header read failed\n");
6163 		goto out_failed;
6164 	}
6165 	if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6166 		ioc_err(mrioc, "expander page0 header read failed with ioc_status(0x%04x)\n",
6167 		    *ioc_status);
6168 		goto out_failed;
6169 	}
6170 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6171 	page_address = ((form & MPI3_SAS_EXPAND_PGAD_FORM_MASK) |
6172 	    (form_spec & (MPI3_SAS_EXPAND_PGAD_PHYNUM_MASK |
6173 	    MPI3_SAS_EXPAND_PGAD_HANDLE_MASK)));
6174 	cfg_req.page_address = cpu_to_le32(page_address);
6175 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6176 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, exp_pg0, pg_sz)) {
6177 		ioc_err(mrioc, "expander page0 read failed\n");
6178 		goto out_failed;
6179 	}
6180 	return 0;
6181 out_failed:
6182 	return -1;
6183 }
6184 
6185 /**
6186  * mpi3mr_cfg_get_sas_exp_pg1 - Read current SAS Expander page1
6187  * @mrioc: Adapter instance reference
6188  * @ioc_status: Pointer to return ioc status
6189  * @exp_pg1: Pointer to return SAS Expander page 1
6190  * @pg_sz: Size of the memory allocated to the page pointer
6191  * @form: The form to be used for addressing the page
6192  * @form_spec: Form specific information like phy number
6193  *
6194  * This is handler for config page read for a specific SAS
6195  * Expander page1. The ioc_status has the controller returned
6196  * ioc_status. This routine doesn't check ioc_status to decide
6197  * whether the page read is success or not and it is the callers
6198  * responsibility.
6199  *
6200  * Return: 0 on success, non-zero on failure.
6201  */
mpi3mr_cfg_get_sas_exp_pg1(struct mpi3mr_ioc * mrioc,u16 * ioc_status,struct mpi3_sas_expander_page1 * exp_pg1,u16 pg_sz,u32 form,u32 form_spec)6202 int mpi3mr_cfg_get_sas_exp_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
6203 	struct mpi3_sas_expander_page1 *exp_pg1, u16 pg_sz, u32 form,
6204 	u32 form_spec)
6205 {
6206 	struct mpi3_config_page_header cfg_hdr;
6207 	struct mpi3_config_request cfg_req;
6208 	u32 page_address;
6209 
6210 	memset(exp_pg1, 0, pg_sz);
6211 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6212 	memset(&cfg_req, 0, sizeof(cfg_req));
6213 
6214 	cfg_req.function = MPI3_FUNCTION_CONFIG;
6215 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6216 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_EXPANDER;
6217 	cfg_req.page_number = 1;
6218 	cfg_req.page_address = 0;
6219 
6220 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6221 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6222 		ioc_err(mrioc, "expander page1 header read failed\n");
6223 		goto out_failed;
6224 	}
6225 	if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6226 		ioc_err(mrioc, "expander page1 header read failed with ioc_status(0x%04x)\n",
6227 		    *ioc_status);
6228 		goto out_failed;
6229 	}
6230 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6231 	page_address = ((form & MPI3_SAS_EXPAND_PGAD_FORM_MASK) |
6232 	    (form_spec & (MPI3_SAS_EXPAND_PGAD_PHYNUM_MASK |
6233 	    MPI3_SAS_EXPAND_PGAD_HANDLE_MASK)));
6234 	cfg_req.page_address = cpu_to_le32(page_address);
6235 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6236 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, exp_pg1, pg_sz)) {
6237 		ioc_err(mrioc, "expander page1 read failed\n");
6238 		goto out_failed;
6239 	}
6240 	return 0;
6241 out_failed:
6242 	return -1;
6243 }
6244 
6245 /**
6246  * mpi3mr_cfg_get_enclosure_pg0 - Read current Enclosure page0
6247  * @mrioc: Adapter instance reference
6248  * @ioc_status: Pointer to return ioc status
6249  * @encl_pg0: Pointer to return Enclosure page 0
6250  * @pg_sz: Size of the memory allocated to the page pointer
6251  * @form: The form to be used for addressing the page
6252  * @form_spec: Form specific information like device handle
6253  *
6254  * This is handler for config page read for a specific Enclosure
6255  * page0. The ioc_status has the controller returned ioc_status.
6256  * This routine doesn't check ioc_status to decide whether the
6257  * page read is success or not and it is the callers
6258  * responsibility.
6259  *
6260  * Return: 0 on success, non-zero on failure.
6261  */
mpi3mr_cfg_get_enclosure_pg0(struct mpi3mr_ioc * mrioc,u16 * ioc_status,struct mpi3_enclosure_page0 * encl_pg0,u16 pg_sz,u32 form,u32 form_spec)6262 int mpi3mr_cfg_get_enclosure_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
6263 	struct mpi3_enclosure_page0 *encl_pg0, u16 pg_sz, u32 form,
6264 	u32 form_spec)
6265 {
6266 	struct mpi3_config_page_header cfg_hdr;
6267 	struct mpi3_config_request cfg_req;
6268 	u32 page_address;
6269 
6270 	memset(encl_pg0, 0, pg_sz);
6271 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6272 	memset(&cfg_req, 0, sizeof(cfg_req));
6273 
6274 	cfg_req.function = MPI3_FUNCTION_CONFIG;
6275 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6276 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_ENCLOSURE;
6277 	cfg_req.page_number = 0;
6278 	cfg_req.page_address = 0;
6279 
6280 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6281 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6282 		ioc_err(mrioc, "enclosure page0 header read failed\n");
6283 		goto out_failed;
6284 	}
6285 	if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6286 		ioc_err(mrioc, "enclosure page0 header read failed with ioc_status(0x%04x)\n",
6287 		    *ioc_status);
6288 		goto out_failed;
6289 	}
6290 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6291 	page_address = ((form & MPI3_ENCLOS_PGAD_FORM_MASK) |
6292 	    (form_spec & MPI3_ENCLOS_PGAD_HANDLE_MASK));
6293 	cfg_req.page_address = cpu_to_le32(page_address);
6294 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6295 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, encl_pg0, pg_sz)) {
6296 		ioc_err(mrioc, "enclosure page0 read failed\n");
6297 		goto out_failed;
6298 	}
6299 	return 0;
6300 out_failed:
6301 	return -1;
6302 }
6303 
6304 
6305 /**
6306  * mpi3mr_cfg_get_sas_io_unit_pg0 - Read current SASIOUnit page0
6307  * @mrioc: Adapter instance reference
6308  * @sas_io_unit_pg0: Pointer to return SAS IO Unit page 0
6309  * @pg_sz: Size of the memory allocated to the page pointer
6310  *
6311  * This is handler for config page read for the SAS IO Unit
6312  * page0. This routine checks ioc_status to decide whether the
6313  * page read is success or not.
6314  *
6315  * Return: 0 on success, non-zero on failure.
6316  */
mpi3mr_cfg_get_sas_io_unit_pg0(struct mpi3mr_ioc * mrioc,struct mpi3_sas_io_unit_page0 * sas_io_unit_pg0,u16 pg_sz)6317 int mpi3mr_cfg_get_sas_io_unit_pg0(struct mpi3mr_ioc *mrioc,
6318 	struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0, u16 pg_sz)
6319 {
6320 	struct mpi3_config_page_header cfg_hdr;
6321 	struct mpi3_config_request cfg_req;
6322 	u16 ioc_status = 0;
6323 
6324 	memset(sas_io_unit_pg0, 0, pg_sz);
6325 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6326 	memset(&cfg_req, 0, sizeof(cfg_req));
6327 
6328 	cfg_req.function = MPI3_FUNCTION_CONFIG;
6329 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6330 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT;
6331 	cfg_req.page_number = 0;
6332 	cfg_req.page_address = 0;
6333 
6334 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6335 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6336 		ioc_err(mrioc, "sas io unit page0 header read failed\n");
6337 		goto out_failed;
6338 	}
6339 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6340 		ioc_err(mrioc, "sas io unit page0 header read failed with ioc_status(0x%04x)\n",
6341 		    ioc_status);
6342 		goto out_failed;
6343 	}
6344 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6345 
6346 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6347 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg0, pg_sz)) {
6348 		ioc_err(mrioc, "sas io unit page0 read failed\n");
6349 		goto out_failed;
6350 	}
6351 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6352 		ioc_err(mrioc, "sas io unit page0 read failed with ioc_status(0x%04x)\n",
6353 		    ioc_status);
6354 		goto out_failed;
6355 	}
6356 	return 0;
6357 out_failed:
6358 	return -1;
6359 }
6360 
6361 /**
6362  * mpi3mr_cfg_get_sas_io_unit_pg1 - Read current SASIOUnit page1
6363  * @mrioc: Adapter instance reference
6364  * @sas_io_unit_pg1: Pointer to return SAS IO Unit page 1
6365  * @pg_sz: Size of the memory allocated to the page pointer
6366  *
6367  * This is handler for config page read for the SAS IO Unit
6368  * page1. This routine checks ioc_status to decide whether the
6369  * page read is success or not.
6370  *
6371  * Return: 0 on success, non-zero on failure.
6372  */
mpi3mr_cfg_get_sas_io_unit_pg1(struct mpi3mr_ioc * mrioc,struct mpi3_sas_io_unit_page1 * sas_io_unit_pg1,u16 pg_sz)6373 int mpi3mr_cfg_get_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc,
6374 	struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz)
6375 {
6376 	struct mpi3_config_page_header cfg_hdr;
6377 	struct mpi3_config_request cfg_req;
6378 	u16 ioc_status = 0;
6379 
6380 	memset(sas_io_unit_pg1, 0, pg_sz);
6381 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6382 	memset(&cfg_req, 0, sizeof(cfg_req));
6383 
6384 	cfg_req.function = MPI3_FUNCTION_CONFIG;
6385 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6386 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT;
6387 	cfg_req.page_number = 1;
6388 	cfg_req.page_address = 0;
6389 
6390 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6391 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6392 		ioc_err(mrioc, "sas io unit page1 header read failed\n");
6393 		goto out_failed;
6394 	}
6395 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6396 		ioc_err(mrioc, "sas io unit page1 header read failed with ioc_status(0x%04x)\n",
6397 		    ioc_status);
6398 		goto out_failed;
6399 	}
6400 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6401 
6402 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6403 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) {
6404 		ioc_err(mrioc, "sas io unit page1 read failed\n");
6405 		goto out_failed;
6406 	}
6407 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6408 		ioc_err(mrioc, "sas io unit page1 read failed with ioc_status(0x%04x)\n",
6409 		    ioc_status);
6410 		goto out_failed;
6411 	}
6412 	return 0;
6413 out_failed:
6414 	return -1;
6415 }
6416 
6417 /**
6418  * mpi3mr_cfg_set_sas_io_unit_pg1 - Write SASIOUnit page1
6419  * @mrioc: Adapter instance reference
6420  * @sas_io_unit_pg1: Pointer to the SAS IO Unit page 1 to write
6421  * @pg_sz: Size of the memory allocated to the page pointer
6422  *
6423  * This is handler for config page write for the SAS IO Unit
6424  * page1. This routine checks ioc_status to decide whether the
6425  * page read is success or not. This will modify both current
6426  * and persistent page.
6427  *
6428  * Return: 0 on success, non-zero on failure.
6429  */
mpi3mr_cfg_set_sas_io_unit_pg1(struct mpi3mr_ioc * mrioc,struct mpi3_sas_io_unit_page1 * sas_io_unit_pg1,u16 pg_sz)6430 int mpi3mr_cfg_set_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc,
6431 	struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz)
6432 {
6433 	struct mpi3_config_page_header cfg_hdr;
6434 	struct mpi3_config_request cfg_req;
6435 	u16 ioc_status = 0;
6436 
6437 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6438 	memset(&cfg_req, 0, sizeof(cfg_req));
6439 
6440 	cfg_req.function = MPI3_FUNCTION_CONFIG;
6441 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6442 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT;
6443 	cfg_req.page_number = 1;
6444 	cfg_req.page_address = 0;
6445 
6446 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6447 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6448 		ioc_err(mrioc, "sas io unit page1 header read failed\n");
6449 		goto out_failed;
6450 	}
6451 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6452 		ioc_err(mrioc, "sas io unit page1 header read failed with ioc_status(0x%04x)\n",
6453 		    ioc_status);
6454 		goto out_failed;
6455 	}
6456 	cfg_req.action = MPI3_CONFIG_ACTION_WRITE_CURRENT;
6457 
6458 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6459 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) {
6460 		ioc_err(mrioc, "sas io unit page1 write current failed\n");
6461 		goto out_failed;
6462 	}
6463 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6464 		ioc_err(mrioc, "sas io unit page1 write current failed with ioc_status(0x%04x)\n",
6465 		    ioc_status);
6466 		goto out_failed;
6467 	}
6468 
6469 	cfg_req.action = MPI3_CONFIG_ACTION_WRITE_PERSISTENT;
6470 
6471 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6472 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) {
6473 		ioc_err(mrioc, "sas io unit page1 write persistent failed\n");
6474 		goto out_failed;
6475 	}
6476 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6477 		ioc_err(mrioc, "sas io unit page1 write persistent failed with ioc_status(0x%04x)\n",
6478 		    ioc_status);
6479 		goto out_failed;
6480 	}
6481 	return 0;
6482 out_failed:
6483 	return -1;
6484 }
6485 
6486 /**
6487  * mpi3mr_cfg_get_driver_pg1 - Read current Driver page1
6488  * @mrioc: Adapter instance reference
6489  * @driver_pg1: Pointer to return Driver page 1
6490  * @pg_sz: Size of the memory allocated to the page pointer
6491  *
6492  * This is handler for config page read for the Driver page1.
6493  * This routine checks ioc_status to decide whether the page
6494  * read is success or not.
6495  *
6496  * Return: 0 on success, non-zero on failure.
6497  */
mpi3mr_cfg_get_driver_pg1(struct mpi3mr_ioc * mrioc,struct mpi3_driver_page1 * driver_pg1,u16 pg_sz)6498 int mpi3mr_cfg_get_driver_pg1(struct mpi3mr_ioc *mrioc,
6499 	struct mpi3_driver_page1 *driver_pg1, u16 pg_sz)
6500 {
6501 	struct mpi3_config_page_header cfg_hdr;
6502 	struct mpi3_config_request cfg_req;
6503 	u16 ioc_status = 0;
6504 
6505 	memset(driver_pg1, 0, pg_sz);
6506 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6507 	memset(&cfg_req, 0, sizeof(cfg_req));
6508 
6509 	cfg_req.function = MPI3_FUNCTION_CONFIG;
6510 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6511 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DRIVER;
6512 	cfg_req.page_number = 1;
6513 	cfg_req.page_address = 0;
6514 
6515 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6516 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6517 		ioc_err(mrioc, "driver page1 header read failed\n");
6518 		goto out_failed;
6519 	}
6520 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6521 		ioc_err(mrioc, "driver page1 header read failed with ioc_status(0x%04x)\n",
6522 		    ioc_status);
6523 		goto out_failed;
6524 	}
6525 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6526 
6527 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6528 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, driver_pg1, pg_sz)) {
6529 		ioc_err(mrioc, "driver page1 read failed\n");
6530 		goto out_failed;
6531 	}
6532 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6533 		ioc_err(mrioc, "driver page1 read failed with ioc_status(0x%04x)\n",
6534 		    ioc_status);
6535 		goto out_failed;
6536 	}
6537 	return 0;
6538 out_failed:
6539 	return -1;
6540 }
6541 
6542 /**
6543  * mpi3mr_cfg_get_driver_pg2 - Read current driver page2
6544  * @mrioc: Adapter instance reference
6545  * @driver_pg2: Pointer to return driver page 2
6546  * @pg_sz: Size of the memory allocated to the page pointer
6547  * @page_action: Page action
6548  *
6549  * This is handler for config page read for the driver page2.
6550  * This routine checks ioc_status to decide whether the page
6551  * read is success or not.
6552  *
6553  * Return: 0 on success, non-zero on failure.
6554  */
mpi3mr_cfg_get_driver_pg2(struct mpi3mr_ioc * mrioc,struct mpi3_driver_page2 * driver_pg2,u16 pg_sz,u8 page_action)6555 int mpi3mr_cfg_get_driver_pg2(struct mpi3mr_ioc *mrioc,
6556 	struct mpi3_driver_page2 *driver_pg2, u16 pg_sz, u8 page_action)
6557 {
6558 	struct mpi3_config_page_header cfg_hdr;
6559 	struct mpi3_config_request cfg_req;
6560 	u16 ioc_status = 0;
6561 
6562 	memset(driver_pg2, 0, pg_sz);
6563 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6564 	memset(&cfg_req, 0, sizeof(cfg_req));
6565 
6566 	cfg_req.function = MPI3_FUNCTION_CONFIG;
6567 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6568 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DRIVER;
6569 	cfg_req.page_number = 2;
6570 	cfg_req.page_address = 0;
6571 	cfg_req.page_version = MPI3_DRIVER2_PAGEVERSION;
6572 
6573 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6574 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6575 		ioc_err(mrioc, "driver page2 header read failed\n");
6576 		goto out_failed;
6577 	}
6578 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6579 		ioc_err(mrioc, "driver page2 header read failed with\n"
6580 			       "ioc_status(0x%04x)\n",
6581 		    ioc_status);
6582 		goto out_failed;
6583 	}
6584 	cfg_req.action = page_action;
6585 
6586 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6587 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, driver_pg2, pg_sz)) {
6588 		ioc_err(mrioc, "driver page2 read failed\n");
6589 		goto out_failed;
6590 	}
6591 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6592 		ioc_err(mrioc, "driver page2 read failed with\n"
6593 			       "ioc_status(0x%04x)\n",
6594 		    ioc_status);
6595 		goto out_failed;
6596 	}
6597 	return 0;
6598 out_failed:
6599 	return -1;
6600 }
6601 
6602