1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Driver for Broadcom MPI3 Storage Controllers
4 *
5 * Copyright (C) 2017-2023 Broadcom Inc.
6 * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
7 *
8 */
9
10 #include "mpi3mr.h"
11 #include <linux/io-64-nonatomic-lo-hi.h>
12
13 static int
14 mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, u16 reset_reason);
15 static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc);
16 static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
17 struct mpi3_ioc_facts_data *facts_data);
18 static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc,
19 struct mpi3mr_drv_cmd *drv_cmd);
20 static int mpi3mr_check_op_admin_proc(struct mpi3mr_ioc *mrioc);
21 static int poll_queues;
22 module_param(poll_queues, int, 0444);
23 MODULE_PARM_DESC(poll_queues, "Number of queues for io_uring poll mode. (Range 1 - 126)");
24
25 #if defined(writeq) && defined(CONFIG_64BIT)
mpi3mr_writeq(__u64 b,volatile void __iomem * addr)26 static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr)
27 {
28 writeq(b, addr);
29 }
30 #else
mpi3mr_writeq(__u64 b,volatile void __iomem * addr)31 static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr)
32 {
33 __u64 data_out = b;
34
35 writel((u32)(data_out), addr);
36 writel((u32)(data_out >> 32), (addr + 4));
37 }
38 #endif
39
40 static inline bool
mpi3mr_check_req_qfull(struct op_req_qinfo * op_req_q)41 mpi3mr_check_req_qfull(struct op_req_qinfo *op_req_q)
42 {
43 u16 pi, ci, max_entries;
44 bool is_qfull = false;
45
46 pi = op_req_q->pi;
47 ci = READ_ONCE(op_req_q->ci);
48 max_entries = op_req_q->num_requests;
49
50 if ((ci == (pi + 1)) || ((!ci) && (pi == (max_entries - 1))))
51 is_qfull = true;
52
53 return is_qfull;
54 }
55
mpi3mr_sync_irqs(struct mpi3mr_ioc * mrioc)56 static void mpi3mr_sync_irqs(struct mpi3mr_ioc *mrioc)
57 {
58 u16 i, max_vectors;
59
60 max_vectors = mrioc->intr_info_count;
61
62 for (i = 0; i < max_vectors; i++)
63 synchronize_irq(pci_irq_vector(mrioc->pdev, i));
64 }
65
mpi3mr_ioc_disable_intr(struct mpi3mr_ioc * mrioc)66 void mpi3mr_ioc_disable_intr(struct mpi3mr_ioc *mrioc)
67 {
68 mrioc->intr_enabled = 0;
69 mpi3mr_sync_irqs(mrioc);
70 }
71
mpi3mr_ioc_enable_intr(struct mpi3mr_ioc * mrioc)72 void mpi3mr_ioc_enable_intr(struct mpi3mr_ioc *mrioc)
73 {
74 mrioc->intr_enabled = 1;
75 }
76
mpi3mr_cleanup_isr(struct mpi3mr_ioc * mrioc)77 static void mpi3mr_cleanup_isr(struct mpi3mr_ioc *mrioc)
78 {
79 u16 i;
80
81 mpi3mr_ioc_disable_intr(mrioc);
82
83 if (!mrioc->intr_info)
84 return;
85
86 for (i = 0; i < mrioc->intr_info_count; i++)
87 free_irq(pci_irq_vector(mrioc->pdev, i),
88 (mrioc->intr_info + i));
89
90 kfree(mrioc->intr_info);
91 mrioc->intr_info = NULL;
92 mrioc->intr_info_count = 0;
93 mrioc->is_intr_info_set = false;
94 pci_free_irq_vectors(mrioc->pdev);
95 }
96
mpi3mr_add_sg_single(void * paddr,u8 flags,u32 length,dma_addr_t dma_addr)97 void mpi3mr_add_sg_single(void *paddr, u8 flags, u32 length,
98 dma_addr_t dma_addr)
99 {
100 struct mpi3_sge_common *sgel = paddr;
101
102 sgel->flags = flags;
103 sgel->length = cpu_to_le32(length);
104 sgel->address = cpu_to_le64(dma_addr);
105 }
106
mpi3mr_build_zero_len_sge(void * paddr)107 void mpi3mr_build_zero_len_sge(void *paddr)
108 {
109 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
110
111 mpi3mr_add_sg_single(paddr, sgl_flags, 0, -1);
112 }
113
mpi3mr_get_reply_virt_addr(struct mpi3mr_ioc * mrioc,dma_addr_t phys_addr)114 void *mpi3mr_get_reply_virt_addr(struct mpi3mr_ioc *mrioc,
115 dma_addr_t phys_addr)
116 {
117 if (!phys_addr)
118 return NULL;
119
120 if ((phys_addr < mrioc->reply_buf_dma) ||
121 (phys_addr > mrioc->reply_buf_dma_max_address))
122 return NULL;
123
124 return mrioc->reply_buf + (phys_addr - mrioc->reply_buf_dma);
125 }
126
mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_ioc * mrioc,dma_addr_t phys_addr)127 void *mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_ioc *mrioc,
128 dma_addr_t phys_addr)
129 {
130 if (!phys_addr)
131 return NULL;
132
133 return mrioc->sense_buf + (phys_addr - mrioc->sense_buf_dma);
134 }
135
mpi3mr_repost_reply_buf(struct mpi3mr_ioc * mrioc,u64 reply_dma)136 static void mpi3mr_repost_reply_buf(struct mpi3mr_ioc *mrioc,
137 u64 reply_dma)
138 {
139 u32 old_idx = 0;
140 unsigned long flags;
141
142 spin_lock_irqsave(&mrioc->reply_free_queue_lock, flags);
143 old_idx = mrioc->reply_free_queue_host_index;
144 mrioc->reply_free_queue_host_index = (
145 (mrioc->reply_free_queue_host_index ==
146 (mrioc->reply_free_qsz - 1)) ? 0 :
147 (mrioc->reply_free_queue_host_index + 1));
148 mrioc->reply_free_q[old_idx] = cpu_to_le64(reply_dma);
149 writel(mrioc->reply_free_queue_host_index,
150 &mrioc->sysif_regs->reply_free_host_index);
151 spin_unlock_irqrestore(&mrioc->reply_free_queue_lock, flags);
152 }
153
mpi3mr_repost_sense_buf(struct mpi3mr_ioc * mrioc,u64 sense_buf_dma)154 void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc,
155 u64 sense_buf_dma)
156 {
157 u32 old_idx = 0;
158 unsigned long flags;
159
160 spin_lock_irqsave(&mrioc->sbq_lock, flags);
161 old_idx = mrioc->sbq_host_index;
162 mrioc->sbq_host_index = ((mrioc->sbq_host_index ==
163 (mrioc->sense_buf_q_sz - 1)) ? 0 :
164 (mrioc->sbq_host_index + 1));
165 mrioc->sense_buf_q[old_idx] = cpu_to_le64(sense_buf_dma);
166 writel(mrioc->sbq_host_index,
167 &mrioc->sysif_regs->sense_buffer_free_host_index);
168 spin_unlock_irqrestore(&mrioc->sbq_lock, flags);
169 }
170
mpi3mr_print_event_data(struct mpi3mr_ioc * mrioc,struct mpi3_event_notification_reply * event_reply)171 static void mpi3mr_print_event_data(struct mpi3mr_ioc *mrioc,
172 struct mpi3_event_notification_reply *event_reply)
173 {
174 char *desc = NULL;
175 u16 event;
176
177 event = event_reply->event;
178
179 switch (event) {
180 case MPI3_EVENT_LOG_DATA:
181 desc = "Log Data";
182 break;
183 case MPI3_EVENT_CHANGE:
184 desc = "Event Change";
185 break;
186 case MPI3_EVENT_GPIO_INTERRUPT:
187 desc = "GPIO Interrupt";
188 break;
189 case MPI3_EVENT_CABLE_MGMT:
190 desc = "Cable Management";
191 break;
192 case MPI3_EVENT_ENERGY_PACK_CHANGE:
193 desc = "Energy Pack Change";
194 break;
195 case MPI3_EVENT_DEVICE_ADDED:
196 {
197 struct mpi3_device_page0 *event_data =
198 (struct mpi3_device_page0 *)event_reply->event_data;
199 ioc_info(mrioc, "Device Added: dev=0x%04x Form=0x%x\n",
200 event_data->dev_handle, event_data->device_form);
201 return;
202 }
203 case MPI3_EVENT_DEVICE_INFO_CHANGED:
204 {
205 struct mpi3_device_page0 *event_data =
206 (struct mpi3_device_page0 *)event_reply->event_data;
207 ioc_info(mrioc, "Device Info Changed: dev=0x%04x Form=0x%x\n",
208 event_data->dev_handle, event_data->device_form);
209 return;
210 }
211 case MPI3_EVENT_DEVICE_STATUS_CHANGE:
212 {
213 struct mpi3_event_data_device_status_change *event_data =
214 (struct mpi3_event_data_device_status_change *)event_reply->event_data;
215 ioc_info(mrioc, "Device status Change: dev=0x%04x RC=0x%x\n",
216 event_data->dev_handle, event_data->reason_code);
217 return;
218 }
219 case MPI3_EVENT_SAS_DISCOVERY:
220 {
221 struct mpi3_event_data_sas_discovery *event_data =
222 (struct mpi3_event_data_sas_discovery *)event_reply->event_data;
223 ioc_info(mrioc, "SAS Discovery: (%s) status (0x%08x)\n",
224 (event_data->reason_code == MPI3_EVENT_SAS_DISC_RC_STARTED) ?
225 "start" : "stop",
226 le32_to_cpu(event_data->discovery_status));
227 return;
228 }
229 case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
230 desc = "SAS Broadcast Primitive";
231 break;
232 case MPI3_EVENT_SAS_NOTIFY_PRIMITIVE:
233 desc = "SAS Notify Primitive";
234 break;
235 case MPI3_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
236 desc = "SAS Init Device Status Change";
237 break;
238 case MPI3_EVENT_SAS_INIT_TABLE_OVERFLOW:
239 desc = "SAS Init Table Overflow";
240 break;
241 case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
242 desc = "SAS Topology Change List";
243 break;
244 case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
245 desc = "Enclosure Device Status Change";
246 break;
247 case MPI3_EVENT_ENCL_DEVICE_ADDED:
248 desc = "Enclosure Added";
249 break;
250 case MPI3_EVENT_HARD_RESET_RECEIVED:
251 desc = "Hard Reset Received";
252 break;
253 case MPI3_EVENT_SAS_PHY_COUNTER:
254 desc = "SAS PHY Counter";
255 break;
256 case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
257 desc = "SAS Device Discovery Error";
258 break;
259 case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
260 desc = "PCIE Topology Change List";
261 break;
262 case MPI3_EVENT_PCIE_ENUMERATION:
263 {
264 struct mpi3_event_data_pcie_enumeration *event_data =
265 (struct mpi3_event_data_pcie_enumeration *)event_reply->event_data;
266 ioc_info(mrioc, "PCIE Enumeration: (%s)",
267 (event_data->reason_code ==
268 MPI3_EVENT_PCIE_ENUM_RC_STARTED) ? "start" : "stop");
269 if (event_data->enumeration_status)
270 ioc_info(mrioc, "enumeration_status(0x%08x)\n",
271 le32_to_cpu(event_data->enumeration_status));
272 return;
273 }
274 case MPI3_EVENT_PREPARE_FOR_RESET:
275 desc = "Prepare For Reset";
276 break;
277 case MPI3_EVENT_DIAGNOSTIC_BUFFER_STATUS_CHANGE:
278 desc = "Diagnostic Buffer Status Change";
279 break;
280 }
281
282 if (!desc)
283 return;
284
285 ioc_info(mrioc, "%s\n", desc);
286 }
287
mpi3mr_handle_events(struct mpi3mr_ioc * mrioc,struct mpi3_default_reply * def_reply)288 static void mpi3mr_handle_events(struct mpi3mr_ioc *mrioc,
289 struct mpi3_default_reply *def_reply)
290 {
291 struct mpi3_event_notification_reply *event_reply =
292 (struct mpi3_event_notification_reply *)def_reply;
293
294 mrioc->change_count = le16_to_cpu(event_reply->ioc_change_count);
295 mpi3mr_print_event_data(mrioc, event_reply);
296 mpi3mr_os_handle_events(mrioc, event_reply);
297 }
298
299 static struct mpi3mr_drv_cmd *
mpi3mr_get_drv_cmd(struct mpi3mr_ioc * mrioc,u16 host_tag,struct mpi3_default_reply * def_reply)300 mpi3mr_get_drv_cmd(struct mpi3mr_ioc *mrioc, u16 host_tag,
301 struct mpi3_default_reply *def_reply)
302 {
303 u16 idx;
304
305 switch (host_tag) {
306 case MPI3MR_HOSTTAG_INITCMDS:
307 return &mrioc->init_cmds;
308 case MPI3MR_HOSTTAG_CFG_CMDS:
309 return &mrioc->cfg_cmds;
310 case MPI3MR_HOSTTAG_BSG_CMDS:
311 return &mrioc->bsg_cmds;
312 case MPI3MR_HOSTTAG_BLK_TMS:
313 return &mrioc->host_tm_cmds;
314 case MPI3MR_HOSTTAG_PEL_ABORT:
315 return &mrioc->pel_abort_cmd;
316 case MPI3MR_HOSTTAG_PEL_WAIT:
317 return &mrioc->pel_cmds;
318 case MPI3MR_HOSTTAG_TRANSPORT_CMDS:
319 return &mrioc->transport_cmds;
320 case MPI3MR_HOSTTAG_INVALID:
321 if (def_reply && def_reply->function ==
322 MPI3_FUNCTION_EVENT_NOTIFICATION)
323 mpi3mr_handle_events(mrioc, def_reply);
324 return NULL;
325 default:
326 break;
327 }
328 if (host_tag >= MPI3MR_HOSTTAG_DEVRMCMD_MIN &&
329 host_tag <= MPI3MR_HOSTTAG_DEVRMCMD_MAX) {
330 idx = host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
331 return &mrioc->dev_rmhs_cmds[idx];
332 }
333
334 if (host_tag >= MPI3MR_HOSTTAG_EVTACKCMD_MIN &&
335 host_tag <= MPI3MR_HOSTTAG_EVTACKCMD_MAX) {
336 idx = host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
337 return &mrioc->evtack_cmds[idx];
338 }
339
340 return NULL;
341 }
342
mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc * mrioc,struct mpi3_default_reply_descriptor * reply_desc,u64 * reply_dma)343 static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc,
344 struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma)
345 {
346 u16 reply_desc_type, host_tag = 0;
347 u16 ioc_status = MPI3_IOCSTATUS_SUCCESS;
348 u16 masked_ioc_status = MPI3_IOCSTATUS_SUCCESS;
349 u32 ioc_loginfo = 0, sense_count = 0;
350 struct mpi3_status_reply_descriptor *status_desc;
351 struct mpi3_address_reply_descriptor *addr_desc;
352 struct mpi3_success_reply_descriptor *success_desc;
353 struct mpi3_default_reply *def_reply = NULL;
354 struct mpi3mr_drv_cmd *cmdptr = NULL;
355 struct mpi3_scsi_io_reply *scsi_reply;
356 struct scsi_sense_hdr sshdr;
357 u8 *sense_buf = NULL;
358
359 *reply_dma = 0;
360 reply_desc_type = le16_to_cpu(reply_desc->reply_flags) &
361 MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK;
362 switch (reply_desc_type) {
363 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS:
364 status_desc = (struct mpi3_status_reply_descriptor *)reply_desc;
365 host_tag = le16_to_cpu(status_desc->host_tag);
366 ioc_status = le16_to_cpu(status_desc->ioc_status);
367 if (ioc_status &
368 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
369 ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info);
370 masked_ioc_status = ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
371 mpi3mr_reply_trigger(mrioc, masked_ioc_status, ioc_loginfo);
372 break;
373 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
374 addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc;
375 *reply_dma = le64_to_cpu(addr_desc->reply_frame_address);
376 def_reply = mpi3mr_get_reply_virt_addr(mrioc, *reply_dma);
377 if (!def_reply)
378 goto out;
379 host_tag = le16_to_cpu(def_reply->host_tag);
380 ioc_status = le16_to_cpu(def_reply->ioc_status);
381 if (ioc_status &
382 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
383 ioc_loginfo = le32_to_cpu(def_reply->ioc_log_info);
384 masked_ioc_status = ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
385 if (def_reply->function == MPI3_FUNCTION_SCSI_IO) {
386 scsi_reply = (struct mpi3_scsi_io_reply *)def_reply;
387 sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc,
388 le64_to_cpu(scsi_reply->sense_data_buffer_address));
389 sense_count = le32_to_cpu(scsi_reply->sense_count);
390 if (sense_buf) {
391 scsi_normalize_sense(sense_buf, sense_count,
392 &sshdr);
393 mpi3mr_scsisense_trigger(mrioc, sshdr.sense_key,
394 sshdr.asc, sshdr.ascq);
395 }
396 }
397 mpi3mr_reply_trigger(mrioc, masked_ioc_status, ioc_loginfo);
398 break;
399 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
400 success_desc = (struct mpi3_success_reply_descriptor *)reply_desc;
401 host_tag = le16_to_cpu(success_desc->host_tag);
402 break;
403 default:
404 break;
405 }
406
407 cmdptr = mpi3mr_get_drv_cmd(mrioc, host_tag, def_reply);
408 if (cmdptr) {
409 if (cmdptr->state & MPI3MR_CMD_PENDING) {
410 cmdptr->state |= MPI3MR_CMD_COMPLETE;
411 cmdptr->ioc_loginfo = ioc_loginfo;
412 if (host_tag == MPI3MR_HOSTTAG_BSG_CMDS)
413 cmdptr->ioc_status = ioc_status;
414 else
415 cmdptr->ioc_status = masked_ioc_status;
416 cmdptr->state &= ~MPI3MR_CMD_PENDING;
417 if (def_reply) {
418 cmdptr->state |= MPI3MR_CMD_REPLY_VALID;
419 memcpy((u8 *)cmdptr->reply, (u8 *)def_reply,
420 mrioc->reply_sz);
421 }
422 if (sense_buf && cmdptr->sensebuf) {
423 cmdptr->is_sense = 1;
424 memcpy(cmdptr->sensebuf, sense_buf,
425 MPI3MR_SENSE_BUF_SZ);
426 }
427 if (cmdptr->is_waiting) {
428 complete(&cmdptr->done);
429 cmdptr->is_waiting = 0;
430 } else if (cmdptr->callback)
431 cmdptr->callback(mrioc, cmdptr);
432 }
433 }
434 out:
435 if (sense_buf)
436 mpi3mr_repost_sense_buf(mrioc,
437 le64_to_cpu(scsi_reply->sense_data_buffer_address));
438 }
439
mpi3mr_process_admin_reply_q(struct mpi3mr_ioc * mrioc)440 int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
441 {
442 u32 exp_phase = mrioc->admin_reply_ephase;
443 u32 admin_reply_ci = mrioc->admin_reply_ci;
444 u32 num_admin_replies = 0;
445 u64 reply_dma = 0;
446 u16 threshold_comps = 0;
447 struct mpi3_default_reply_descriptor *reply_desc;
448
449 if (!atomic_add_unless(&mrioc->admin_reply_q_in_use, 1, 1)) {
450 atomic_inc(&mrioc->admin_pend_isr);
451 return 0;
452 }
453
454 reply_desc = (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
455 admin_reply_ci;
456
457 if ((le16_to_cpu(reply_desc->reply_flags) &
458 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) {
459 atomic_dec(&mrioc->admin_reply_q_in_use);
460 return 0;
461 }
462
463 do {
464 if (mrioc->unrecoverable || mrioc->io_admin_reset_sync)
465 break;
466
467 mrioc->admin_req_ci = le16_to_cpu(reply_desc->request_queue_ci);
468 mpi3mr_process_admin_reply_desc(mrioc, reply_desc, &reply_dma);
469 if (reply_dma)
470 mpi3mr_repost_reply_buf(mrioc, reply_dma);
471 num_admin_replies++;
472 threshold_comps++;
473 if (++admin_reply_ci == mrioc->num_admin_replies) {
474 admin_reply_ci = 0;
475 exp_phase ^= 1;
476 }
477 reply_desc =
478 (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
479 admin_reply_ci;
480 if ((le16_to_cpu(reply_desc->reply_flags) &
481 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
482 break;
483 if (threshold_comps == MPI3MR_THRESHOLD_REPLY_COUNT) {
484 writel(admin_reply_ci,
485 &mrioc->sysif_regs->admin_reply_queue_ci);
486 threshold_comps = 0;
487 }
488 } while (1);
489
490 writel(admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci);
491 mrioc->admin_reply_ci = admin_reply_ci;
492 mrioc->admin_reply_ephase = exp_phase;
493 atomic_dec(&mrioc->admin_reply_q_in_use);
494
495 return num_admin_replies;
496 }
497
498 /**
499 * mpi3mr_get_reply_desc - get reply descriptor frame corresponding to
500 * queue's consumer index from operational reply descriptor queue.
501 * @op_reply_q: op_reply_qinfo object
502 * @reply_ci: operational reply descriptor's queue consumer index
503 *
504 * Returns: reply descriptor frame address
505 */
506 static inline struct mpi3_default_reply_descriptor *
mpi3mr_get_reply_desc(struct op_reply_qinfo * op_reply_q,u32 reply_ci)507 mpi3mr_get_reply_desc(struct op_reply_qinfo *op_reply_q, u32 reply_ci)
508 {
509 void *segment_base_addr;
510 struct segments *segments = op_reply_q->q_segments;
511 struct mpi3_default_reply_descriptor *reply_desc = NULL;
512
513 segment_base_addr =
514 segments[reply_ci / op_reply_q->segment_qd].segment;
515 reply_desc = (struct mpi3_default_reply_descriptor *)segment_base_addr +
516 (reply_ci % op_reply_q->segment_qd);
517 return reply_desc;
518 }
519
520 /**
521 * mpi3mr_process_op_reply_q - Operational reply queue handler
522 * @mrioc: Adapter instance reference
523 * @op_reply_q: Operational reply queue info
524 *
525 * Checks the specific operational reply queue and drains the
526 * reply queue entries until the queue is empty and process the
527 * individual reply descriptors.
528 *
529 * Return: 0 if queue is already processed,or number of reply
530 * descriptors processed.
531 */
mpi3mr_process_op_reply_q(struct mpi3mr_ioc * mrioc,struct op_reply_qinfo * op_reply_q)532 int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
533 struct op_reply_qinfo *op_reply_q)
534 {
535 struct op_req_qinfo *op_req_q;
536 u32 exp_phase;
537 u32 reply_ci;
538 u32 num_op_reply = 0;
539 u64 reply_dma = 0;
540 struct mpi3_default_reply_descriptor *reply_desc;
541 u16 req_q_idx = 0, reply_qidx, threshold_comps = 0;
542
543 reply_qidx = op_reply_q->qid - 1;
544
545 if (!atomic_add_unless(&op_reply_q->in_use, 1, 1))
546 return 0;
547
548 exp_phase = op_reply_q->ephase;
549 reply_ci = op_reply_q->ci;
550
551 reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci);
552 if ((le16_to_cpu(reply_desc->reply_flags) &
553 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) {
554 atomic_dec(&op_reply_q->in_use);
555 return 0;
556 }
557
558 do {
559 if (mrioc->unrecoverable || mrioc->io_admin_reset_sync)
560 break;
561
562 req_q_idx = le16_to_cpu(reply_desc->request_queue_id) - 1;
563 op_req_q = &mrioc->req_qinfo[req_q_idx];
564
565 WRITE_ONCE(op_req_q->ci, le16_to_cpu(reply_desc->request_queue_ci));
566 mpi3mr_process_op_reply_desc(mrioc, reply_desc, &reply_dma,
567 reply_qidx);
568 atomic_dec(&op_reply_q->pend_ios);
569 if (reply_dma)
570 mpi3mr_repost_reply_buf(mrioc, reply_dma);
571 num_op_reply++;
572 threshold_comps++;
573
574 if (++reply_ci == op_reply_q->num_replies) {
575 reply_ci = 0;
576 exp_phase ^= 1;
577 }
578
579 reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci);
580
581 if ((le16_to_cpu(reply_desc->reply_flags) &
582 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
583 break;
584 #ifndef CONFIG_PREEMPT_RT
585 /*
586 * Exit completion loop to avoid CPU lockup
587 * Ensure remaining completion happens from threaded ISR.
588 */
589 if (num_op_reply > mrioc->max_host_ios) {
590 op_reply_q->enable_irq_poll = true;
591 break;
592 }
593 #endif
594 if (threshold_comps == MPI3MR_THRESHOLD_REPLY_COUNT) {
595 writel(reply_ci,
596 &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index);
597 atomic_sub(threshold_comps, &op_reply_q->pend_ios);
598 threshold_comps = 0;
599 }
600 } while (1);
601
602 writel(reply_ci,
603 &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index);
604 op_reply_q->ci = reply_ci;
605 op_reply_q->ephase = exp_phase;
606 atomic_sub(threshold_comps, &op_reply_q->pend_ios);
607 atomic_dec(&op_reply_q->in_use);
608 return num_op_reply;
609 }
610
611 /**
612 * mpi3mr_blk_mq_poll - Operational reply queue handler
613 * @shost: SCSI Host reference
614 * @queue_num: Request queue number (w.r.t OS it is hardware context number)
615 *
616 * Checks the specific operational reply queue and drains the
617 * reply queue entries until the queue is empty and process the
618 * individual reply descriptors.
619 *
620 * Return: 0 if queue is already processed,or number of reply
621 * descriptors processed.
622 */
mpi3mr_blk_mq_poll(struct Scsi_Host * shost,unsigned int queue_num)623 int mpi3mr_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
624 {
625 int num_entries = 0;
626 struct mpi3mr_ioc *mrioc;
627
628 mrioc = (struct mpi3mr_ioc *)shost->hostdata;
629
630 if ((mrioc->reset_in_progress || mrioc->prepare_for_reset ||
631 mrioc->unrecoverable || mrioc->pci_err_recovery))
632 return 0;
633
634 num_entries = mpi3mr_process_op_reply_q(mrioc,
635 &mrioc->op_reply_qinfo[queue_num]);
636
637 return num_entries;
638 }
639
mpi3mr_isr_primary(int irq,void * privdata)640 static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata)
641 {
642 struct mpi3mr_intr_info *intr_info = privdata;
643 struct mpi3mr_ioc *mrioc;
644 u16 midx;
645 u32 num_admin_replies = 0, num_op_reply = 0;
646
647 if (!intr_info)
648 return IRQ_NONE;
649
650 mrioc = intr_info->mrioc;
651
652 if (!mrioc->intr_enabled)
653 return IRQ_NONE;
654
655 midx = intr_info->msix_index;
656
657 if (!midx)
658 num_admin_replies = mpi3mr_process_admin_reply_q(mrioc);
659 if (intr_info->op_reply_q)
660 num_op_reply = mpi3mr_process_op_reply_q(mrioc,
661 intr_info->op_reply_q);
662
663 if (num_admin_replies || num_op_reply)
664 return IRQ_HANDLED;
665 else
666 return IRQ_NONE;
667 }
668
669 #ifndef CONFIG_PREEMPT_RT
670
mpi3mr_isr(int irq,void * privdata)671 static irqreturn_t mpi3mr_isr(int irq, void *privdata)
672 {
673 struct mpi3mr_intr_info *intr_info = privdata;
674 int ret;
675
676 if (!intr_info)
677 return IRQ_NONE;
678
679 /* Call primary ISR routine */
680 ret = mpi3mr_isr_primary(irq, privdata);
681
682 /*
683 * If more IOs are expected, schedule IRQ polling thread.
684 * Otherwise exit from ISR.
685 */
686 if (!intr_info->op_reply_q)
687 return ret;
688
689 if (!intr_info->op_reply_q->enable_irq_poll ||
690 !atomic_read(&intr_info->op_reply_q->pend_ios))
691 return ret;
692
693 disable_irq_nosync(intr_info->os_irq);
694
695 return IRQ_WAKE_THREAD;
696 }
697
698 /**
699 * mpi3mr_isr_poll - Reply queue polling routine
700 * @irq: IRQ
701 * @privdata: Interrupt info
702 *
703 * poll for pending I/O completions in a loop until pending I/Os
704 * present or controller queue depth I/Os are processed.
705 *
706 * Return: IRQ_NONE or IRQ_HANDLED
707 */
mpi3mr_isr_poll(int irq,void * privdata)708 static irqreturn_t mpi3mr_isr_poll(int irq, void *privdata)
709 {
710 struct mpi3mr_intr_info *intr_info = privdata;
711 struct mpi3mr_ioc *mrioc;
712 u16 midx;
713 u32 num_op_reply = 0;
714
715 if (!intr_info || !intr_info->op_reply_q)
716 return IRQ_NONE;
717
718 mrioc = intr_info->mrioc;
719 midx = intr_info->msix_index;
720
721 /* Poll for pending IOs completions */
722 do {
723 if (!mrioc->intr_enabled || mrioc->unrecoverable)
724 break;
725
726 if (!midx)
727 mpi3mr_process_admin_reply_q(mrioc);
728 if (intr_info->op_reply_q)
729 num_op_reply +=
730 mpi3mr_process_op_reply_q(mrioc,
731 intr_info->op_reply_q);
732
733 usleep_range(MPI3MR_IRQ_POLL_SLEEP, MPI3MR_IRQ_POLL_SLEEP + 1);
734
735 } while (atomic_read(&intr_info->op_reply_q->pend_ios) &&
736 (num_op_reply < mrioc->max_host_ios));
737
738 intr_info->op_reply_q->enable_irq_poll = false;
739 enable_irq(intr_info->os_irq);
740
741 return IRQ_HANDLED;
742 }
743
744 #endif
745
746 /**
747 * mpi3mr_request_irq - Request IRQ and register ISR
748 * @mrioc: Adapter instance reference
749 * @index: IRQ vector index
750 *
751 * Request threaded ISR with primary ISR and secondary
752 *
753 * Return: 0 on success and non zero on failures.
754 */
mpi3mr_request_irq(struct mpi3mr_ioc * mrioc,u16 index)755 static inline int mpi3mr_request_irq(struct mpi3mr_ioc *mrioc, u16 index)
756 {
757 struct pci_dev *pdev = mrioc->pdev;
758 struct mpi3mr_intr_info *intr_info = mrioc->intr_info + index;
759 int retval = 0;
760
761 intr_info->mrioc = mrioc;
762 intr_info->msix_index = index;
763 intr_info->op_reply_q = NULL;
764
765 snprintf(intr_info->name, MPI3MR_NAME_LENGTH, "%s%d-msix%d",
766 mrioc->driver_name, mrioc->id, index);
767
768 #ifndef CONFIG_PREEMPT_RT
769 retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr,
770 mpi3mr_isr_poll, IRQF_SHARED, intr_info->name, intr_info);
771 #else
772 retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr_primary,
773 NULL, IRQF_SHARED, intr_info->name, intr_info);
774 #endif
775 if (retval) {
776 ioc_err(mrioc, "%s: Unable to allocate interrupt %d!\n",
777 intr_info->name, pci_irq_vector(pdev, index));
778 return retval;
779 }
780
781 intr_info->os_irq = pci_irq_vector(pdev, index);
782 return retval;
783 }
784
mpi3mr_calc_poll_queues(struct mpi3mr_ioc * mrioc,u16 max_vectors)785 static void mpi3mr_calc_poll_queues(struct mpi3mr_ioc *mrioc, u16 max_vectors)
786 {
787 if (!mrioc->requested_poll_qcount)
788 return;
789
790 /* Reserved for Admin and Default Queue */
791 if (max_vectors > 2 &&
792 (mrioc->requested_poll_qcount < max_vectors - 2)) {
793 ioc_info(mrioc,
794 "enabled polled queues (%d) msix (%d)\n",
795 mrioc->requested_poll_qcount, max_vectors);
796 } else {
797 ioc_info(mrioc,
798 "disabled polled queues (%d) msix (%d) because of no resources for default queue\n",
799 mrioc->requested_poll_qcount, max_vectors);
800 mrioc->requested_poll_qcount = 0;
801 }
802 }
803
804 /**
805 * mpi3mr_setup_isr - Setup ISR for the controller
806 * @mrioc: Adapter instance reference
807 * @setup_one: Request one IRQ or more
808 *
809 * Allocate IRQ vectors and call mpi3mr_request_irq to setup ISR
810 *
811 * Return: 0 on success and non zero on failures.
812 */
mpi3mr_setup_isr(struct mpi3mr_ioc * mrioc,u8 setup_one)813 static int mpi3mr_setup_isr(struct mpi3mr_ioc *mrioc, u8 setup_one)
814 {
815 unsigned int irq_flags = PCI_IRQ_MSIX;
816 int max_vectors, min_vec;
817 int retval;
818 int i;
819 struct irq_affinity desc = { .pre_vectors = 1, .post_vectors = 1 };
820
821 if (mrioc->is_intr_info_set)
822 return 0;
823
824 mpi3mr_cleanup_isr(mrioc);
825
826 if (setup_one || reset_devices) {
827 max_vectors = 1;
828 retval = pci_alloc_irq_vectors(mrioc->pdev,
829 1, max_vectors, irq_flags);
830 if (retval < 0) {
831 ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n",
832 retval);
833 goto out_failed;
834 }
835 } else {
836 max_vectors =
837 min_t(int, mrioc->cpu_count + 1 +
838 mrioc->requested_poll_qcount, mrioc->msix_count);
839
840 mpi3mr_calc_poll_queues(mrioc, max_vectors);
841
842 ioc_info(mrioc,
843 "MSI-X vectors supported: %d, no of cores: %d,",
844 mrioc->msix_count, mrioc->cpu_count);
845 ioc_info(mrioc,
846 "MSI-x vectors requested: %d poll_queues %d\n",
847 max_vectors, mrioc->requested_poll_qcount);
848
849 desc.post_vectors = mrioc->requested_poll_qcount;
850 min_vec = desc.pre_vectors + desc.post_vectors;
851 irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES;
852
853 retval = pci_alloc_irq_vectors_affinity(mrioc->pdev,
854 min_vec, max_vectors, irq_flags, &desc);
855
856 if (retval < 0) {
857 ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n",
858 retval);
859 goto out_failed;
860 }
861
862
863 /*
864 * If only one MSI-x is allocated, then MSI-x 0 will be shared
865 * between Admin queue and operational queue
866 */
867 if (retval == min_vec)
868 mrioc->op_reply_q_offset = 0;
869 else if (retval != (max_vectors)) {
870 ioc_info(mrioc,
871 "allocated vectors (%d) are less than configured (%d)\n",
872 retval, max_vectors);
873 }
874
875 max_vectors = retval;
876 mrioc->op_reply_q_offset = (max_vectors > 1) ? 1 : 0;
877
878 mpi3mr_calc_poll_queues(mrioc, max_vectors);
879
880 }
881
882 mrioc->intr_info = kzalloc(sizeof(struct mpi3mr_intr_info) * max_vectors,
883 GFP_KERNEL);
884 if (!mrioc->intr_info) {
885 retval = -ENOMEM;
886 pci_free_irq_vectors(mrioc->pdev);
887 goto out_failed;
888 }
889 for (i = 0; i < max_vectors; i++) {
890 retval = mpi3mr_request_irq(mrioc, i);
891 if (retval) {
892 mrioc->intr_info_count = i;
893 goto out_failed;
894 }
895 }
896 if (reset_devices || !setup_one)
897 mrioc->is_intr_info_set = true;
898 mrioc->intr_info_count = max_vectors;
899 mpi3mr_ioc_enable_intr(mrioc);
900 return 0;
901
902 out_failed:
903 mpi3mr_cleanup_isr(mrioc);
904
905 return retval;
906 }
907
908 static const struct {
909 enum mpi3mr_iocstate value;
910 char *name;
911 } mrioc_states[] = {
912 { MRIOC_STATE_READY, "ready" },
913 { MRIOC_STATE_FAULT, "fault" },
914 { MRIOC_STATE_RESET, "reset" },
915 { MRIOC_STATE_BECOMING_READY, "becoming ready" },
916 { MRIOC_STATE_RESET_REQUESTED, "reset requested" },
917 { MRIOC_STATE_UNRECOVERABLE, "unrecoverable error" },
918 };
919
mpi3mr_iocstate_name(enum mpi3mr_iocstate mrioc_state)920 static const char *mpi3mr_iocstate_name(enum mpi3mr_iocstate mrioc_state)
921 {
922 int i;
923 char *name = NULL;
924
925 for (i = 0; i < ARRAY_SIZE(mrioc_states); i++) {
926 if (mrioc_states[i].value == mrioc_state) {
927 name = mrioc_states[i].name;
928 break;
929 }
930 }
931 return name;
932 }
933
934 /* Reset reason to name mapper structure*/
935 static const struct {
936 enum mpi3mr_reset_reason value;
937 char *name;
938 } mpi3mr_reset_reason_codes[] = {
939 { MPI3MR_RESET_FROM_BRINGUP, "timeout in bringup" },
940 { MPI3MR_RESET_FROM_FAULT_WATCH, "fault" },
941 { MPI3MR_RESET_FROM_APP, "application invocation" },
942 { MPI3MR_RESET_FROM_EH_HOS, "error handling" },
943 { MPI3MR_RESET_FROM_TM_TIMEOUT, "TM timeout" },
944 { MPI3MR_RESET_FROM_APP_TIMEOUT, "application command timeout" },
945 { MPI3MR_RESET_FROM_MUR_FAILURE, "MUR failure" },
946 { MPI3MR_RESET_FROM_CTLR_CLEANUP, "timeout in controller cleanup" },
947 { MPI3MR_RESET_FROM_CIACTIV_FAULT, "component image activation fault" },
948 { MPI3MR_RESET_FROM_PE_TIMEOUT, "port enable timeout" },
949 { MPI3MR_RESET_FROM_TSU_TIMEOUT, "time stamp update timeout" },
950 { MPI3MR_RESET_FROM_DELREQQ_TIMEOUT, "delete request queue timeout" },
951 { MPI3MR_RESET_FROM_DELREPQ_TIMEOUT, "delete reply queue timeout" },
952 {
953 MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT,
954 "create request queue timeout"
955 },
956 {
957 MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT,
958 "create reply queue timeout"
959 },
960 { MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT, "IOC facts timeout" },
961 { MPI3MR_RESET_FROM_IOCINIT_TIMEOUT, "IOC init timeout" },
962 { MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT, "event notify timeout" },
963 { MPI3MR_RESET_FROM_EVTACK_TIMEOUT, "event acknowledgment timeout" },
964 {
965 MPI3MR_RESET_FROM_CIACTVRST_TIMER,
966 "component image activation timeout"
967 },
968 {
969 MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT,
970 "get package version timeout"
971 },
972 { MPI3MR_RESET_FROM_SYSFS, "sysfs invocation" },
973 { MPI3MR_RESET_FROM_SYSFS_TIMEOUT, "sysfs TM timeout" },
974 {
975 MPI3MR_RESET_FROM_DIAG_BUFFER_POST_TIMEOUT,
976 "diagnostic buffer post timeout"
977 },
978 {
979 MPI3MR_RESET_FROM_DIAG_BUFFER_RELEASE_TIMEOUT,
980 "diagnostic buffer release timeout"
981 },
982 { MPI3MR_RESET_FROM_FIRMWARE, "firmware asynchronous reset" },
983 { MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT, "configuration request timeout"},
984 { MPI3MR_RESET_FROM_SAS_TRANSPORT_TIMEOUT, "timeout of a SAS transport layer request" },
985 };
986
987 /**
988 * mpi3mr_reset_rc_name - get reset reason code name
989 * @reason_code: reset reason code value
990 *
991 * Map reset reason to an NULL terminated ASCII string
992 *
993 * Return: name corresponding to reset reason value or NULL.
994 */
mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code)995 static const char *mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code)
996 {
997 int i;
998 char *name = NULL;
999
1000 for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_reason_codes); i++) {
1001 if (mpi3mr_reset_reason_codes[i].value == reason_code) {
1002 name = mpi3mr_reset_reason_codes[i].name;
1003 break;
1004 }
1005 }
1006 return name;
1007 }
1008
1009 /* Reset type to name mapper structure*/
1010 static const struct {
1011 u16 reset_type;
1012 char *name;
1013 } mpi3mr_reset_types[] = {
1014 { MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, "soft" },
1015 { MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, "diag fault" },
1016 };
1017
1018 /**
1019 * mpi3mr_reset_type_name - get reset type name
1020 * @reset_type: reset type value
1021 *
1022 * Map reset type to an NULL terminated ASCII string
1023 *
1024 * Return: name corresponding to reset type value or NULL.
1025 */
mpi3mr_reset_type_name(u16 reset_type)1026 static const char *mpi3mr_reset_type_name(u16 reset_type)
1027 {
1028 int i;
1029 char *name = NULL;
1030
1031 for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_types); i++) {
1032 if (mpi3mr_reset_types[i].reset_type == reset_type) {
1033 name = mpi3mr_reset_types[i].name;
1034 break;
1035 }
1036 }
1037 return name;
1038 }
1039
1040 /**
1041 * mpi3mr_is_fault_recoverable - Read fault code and decide
1042 * whether the controller can be recoverable
1043 * @mrioc: Adapter instance reference
1044 * Return: true if fault is recoverable, false otherwise.
1045 */
mpi3mr_is_fault_recoverable(struct mpi3mr_ioc * mrioc)1046 static inline bool mpi3mr_is_fault_recoverable(struct mpi3mr_ioc *mrioc)
1047 {
1048 u32 fault;
1049
1050 fault = (readl(&mrioc->sysif_regs->fault) &
1051 MPI3_SYSIF_FAULT_CODE_MASK);
1052
1053 switch (fault) {
1054 case MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED:
1055 case MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED:
1056 ioc_warn(mrioc,
1057 "controller requires system power cycle, marking controller as unrecoverable\n");
1058 return false;
1059 case MPI3_SYSIF_FAULT_CODE_INSUFFICIENT_PCI_SLOT_POWER:
1060 ioc_warn(mrioc,
1061 "controller faulted due to insufficient power,\n"
1062 " try by connecting it to a different slot\n");
1063 return false;
1064 default:
1065 break;
1066 }
1067 return true;
1068 }
1069
1070 /**
1071 * mpi3mr_print_fault_info - Display fault information
1072 * @mrioc: Adapter instance reference
1073 *
1074 * Display the controller fault information if there is a
1075 * controller fault.
1076 *
1077 * Return: Nothing.
1078 */
mpi3mr_print_fault_info(struct mpi3mr_ioc * mrioc)1079 void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc)
1080 {
1081 u32 ioc_status, code, code1, code2, code3;
1082
1083 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1084
1085 if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
1086 code = readl(&mrioc->sysif_regs->fault);
1087 code1 = readl(&mrioc->sysif_regs->fault_info[0]);
1088 code2 = readl(&mrioc->sysif_regs->fault_info[1]);
1089 code3 = readl(&mrioc->sysif_regs->fault_info[2]);
1090
1091 ioc_info(mrioc,
1092 "fault code(0x%08X): Additional code: (0x%08X:0x%08X:0x%08X)\n",
1093 code, code1, code2, code3);
1094 }
1095 }
1096
1097 /**
1098 * mpi3mr_get_iocstate - Get IOC State
1099 * @mrioc: Adapter instance reference
1100 *
1101 * Return a proper IOC state enum based on the IOC status and
1102 * IOC configuration and unrcoverable state of the controller.
1103 *
1104 * Return: Current IOC state.
1105 */
mpi3mr_get_iocstate(struct mpi3mr_ioc * mrioc)1106 enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_ioc *mrioc)
1107 {
1108 u32 ioc_status, ioc_config;
1109 u8 ready, enabled;
1110
1111 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1112 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1113
1114 if (mrioc->unrecoverable)
1115 return MRIOC_STATE_UNRECOVERABLE;
1116 if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)
1117 return MRIOC_STATE_FAULT;
1118
1119 ready = (ioc_status & MPI3_SYSIF_IOC_STATUS_READY);
1120 enabled = (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC);
1121
1122 if (ready && enabled)
1123 return MRIOC_STATE_READY;
1124 if ((!ready) && (!enabled))
1125 return MRIOC_STATE_RESET;
1126 if ((!ready) && (enabled))
1127 return MRIOC_STATE_BECOMING_READY;
1128
1129 return MRIOC_STATE_RESET_REQUESTED;
1130 }
1131
1132 /**
1133 * mpi3mr_free_ioctl_dma_memory - free memory for ioctl dma
1134 * @mrioc: Adapter instance reference
1135 *
1136 * Free the DMA memory allocated for IOCTL handling purpose.
1137 *
1138 * Return: None
1139 */
mpi3mr_free_ioctl_dma_memory(struct mpi3mr_ioc * mrioc)1140 static void mpi3mr_free_ioctl_dma_memory(struct mpi3mr_ioc *mrioc)
1141 {
1142 struct dma_memory_desc *mem_desc;
1143 u16 i;
1144
1145 if (!mrioc->ioctl_dma_pool)
1146 return;
1147
1148 for (i = 0; i < MPI3MR_NUM_IOCTL_SGE; i++) {
1149 mem_desc = &mrioc->ioctl_sge[i];
1150 if (mem_desc->addr) {
1151 dma_pool_free(mrioc->ioctl_dma_pool,
1152 mem_desc->addr,
1153 mem_desc->dma_addr);
1154 mem_desc->addr = NULL;
1155 }
1156 }
1157 dma_pool_destroy(mrioc->ioctl_dma_pool);
1158 mrioc->ioctl_dma_pool = NULL;
1159 mem_desc = &mrioc->ioctl_chain_sge;
1160
1161 if (mem_desc->addr) {
1162 dma_free_coherent(&mrioc->pdev->dev, mem_desc->size,
1163 mem_desc->addr, mem_desc->dma_addr);
1164 mem_desc->addr = NULL;
1165 }
1166 mem_desc = &mrioc->ioctl_resp_sge;
1167 if (mem_desc->addr) {
1168 dma_free_coherent(&mrioc->pdev->dev, mem_desc->size,
1169 mem_desc->addr, mem_desc->dma_addr);
1170 mem_desc->addr = NULL;
1171 }
1172
1173 mrioc->ioctl_sges_allocated = false;
1174 }
1175
1176 /**
1177 * mpi3mr_alloc_ioctl_dma_memory - Alloc memory for ioctl dma
1178 * @mrioc: Adapter instance reference
1179 *
1180 * This function allocates dmaable memory required to handle the
1181 * application issued MPI3 IOCTL requests.
1182 *
1183 * Return: None
1184 */
mpi3mr_alloc_ioctl_dma_memory(struct mpi3mr_ioc * mrioc)1185 static void mpi3mr_alloc_ioctl_dma_memory(struct mpi3mr_ioc *mrioc)
1186
1187 {
1188 struct dma_memory_desc *mem_desc;
1189 u16 i;
1190
1191 mrioc->ioctl_dma_pool = dma_pool_create("ioctl dma pool",
1192 &mrioc->pdev->dev,
1193 MPI3MR_IOCTL_SGE_SIZE,
1194 MPI3MR_PAGE_SIZE_4K, 0);
1195
1196 if (!mrioc->ioctl_dma_pool) {
1197 ioc_err(mrioc, "ioctl_dma_pool: dma_pool_create failed\n");
1198 goto out_failed;
1199 }
1200
1201 for (i = 0; i < MPI3MR_NUM_IOCTL_SGE; i++) {
1202 mem_desc = &mrioc->ioctl_sge[i];
1203 mem_desc->size = MPI3MR_IOCTL_SGE_SIZE;
1204 mem_desc->addr = dma_pool_zalloc(mrioc->ioctl_dma_pool,
1205 GFP_KERNEL,
1206 &mem_desc->dma_addr);
1207 if (!mem_desc->addr)
1208 goto out_failed;
1209 }
1210
1211 mem_desc = &mrioc->ioctl_chain_sge;
1212 mem_desc->size = MPI3MR_PAGE_SIZE_4K;
1213 mem_desc->addr = dma_alloc_coherent(&mrioc->pdev->dev,
1214 mem_desc->size,
1215 &mem_desc->dma_addr,
1216 GFP_KERNEL);
1217 if (!mem_desc->addr)
1218 goto out_failed;
1219
1220 mem_desc = &mrioc->ioctl_resp_sge;
1221 mem_desc->size = MPI3MR_PAGE_SIZE_4K;
1222 mem_desc->addr = dma_alloc_coherent(&mrioc->pdev->dev,
1223 mem_desc->size,
1224 &mem_desc->dma_addr,
1225 GFP_KERNEL);
1226 if (!mem_desc->addr)
1227 goto out_failed;
1228
1229 mrioc->ioctl_sges_allocated = true;
1230
1231 return;
1232 out_failed:
1233 ioc_warn(mrioc, "cannot allocate DMA memory for the mpt commands\n"
1234 "from the applications, application interface for MPT command is disabled\n");
1235 mpi3mr_free_ioctl_dma_memory(mrioc);
1236 }
1237
1238 /**
1239 * mpi3mr_clear_reset_history - clear reset history
1240 * @mrioc: Adapter instance reference
1241 *
1242 * Write the reset history bit in IOC status to clear the bit,
1243 * if it is already set.
1244 *
1245 * Return: Nothing.
1246 */
mpi3mr_clear_reset_history(struct mpi3mr_ioc * mrioc)1247 static inline void mpi3mr_clear_reset_history(struct mpi3mr_ioc *mrioc)
1248 {
1249 u32 ioc_status;
1250
1251 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1252 if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
1253 writel(ioc_status, &mrioc->sysif_regs->ioc_status);
1254 }
1255
1256 /**
1257 * mpi3mr_issue_and_process_mur - Message unit Reset handler
1258 * @mrioc: Adapter instance reference
1259 * @reset_reason: Reset reason code
1260 *
1261 * Issue Message unit Reset to the controller and wait for it to
1262 * be complete.
1263 *
1264 * Return: 0 on success, -1 on failure.
1265 */
mpi3mr_issue_and_process_mur(struct mpi3mr_ioc * mrioc,u32 reset_reason)1266 static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc,
1267 u32 reset_reason)
1268 {
1269 u32 ioc_config, timeout, ioc_status, scratch_pad0;
1270 int retval = -1;
1271
1272 ioc_info(mrioc, "Issuing Message unit Reset(MUR)\n");
1273 if (mrioc->unrecoverable) {
1274 ioc_info(mrioc, "IOC is unrecoverable MUR not issued\n");
1275 return retval;
1276 }
1277 mpi3mr_clear_reset_history(mrioc);
1278 scratch_pad0 = ((MPI3MR_RESET_REASON_OSTYPE_LINUX <<
1279 MPI3MR_RESET_REASON_OSTYPE_SHIFT) |
1280 (mrioc->facts.ioc_num <<
1281 MPI3MR_RESET_REASON_IOCNUM_SHIFT) | reset_reason);
1282 writel(scratch_pad0, &mrioc->sysif_regs->scratchpad[0]);
1283 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1284 ioc_config &= ~MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
1285 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
1286
1287 timeout = MPI3MR_MUR_TIMEOUT * 10;
1288 do {
1289 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1290 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)) {
1291 mpi3mr_clear_reset_history(mrioc);
1292 break;
1293 }
1294 if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
1295 mpi3mr_print_fault_info(mrioc);
1296 break;
1297 }
1298 msleep(100);
1299 } while (--timeout);
1300
1301 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1302 if (timeout && !((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
1303 (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) ||
1304 (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
1305 retval = 0;
1306
1307 ioc_info(mrioc, "Base IOC Sts/Config after %s MUR is (0x%08x)/(0x%08x)\n",
1308 (!retval) ? "successful" : "failed", ioc_status, ioc_config);
1309 return retval;
1310 }
1311
1312 /**
1313 * mpi3mr_revalidate_factsdata - validate IOCFacts parameters
1314 * during reset/resume
1315 * @mrioc: Adapter instance reference
1316 *
1317 * Return: zero if the new IOCFacts parameters value is compatible with
1318 * older values else return -EPERM
1319 */
1320 static int
mpi3mr_revalidate_factsdata(struct mpi3mr_ioc * mrioc)1321 mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc)
1322 {
1323 unsigned long *removepend_bitmap;
1324
1325 if (mrioc->facts.reply_sz > mrioc->reply_sz) {
1326 ioc_err(mrioc,
1327 "cannot increase reply size from %d to %d\n",
1328 mrioc->reply_sz, mrioc->facts.reply_sz);
1329 return -EPERM;
1330 }
1331
1332 if (mrioc->facts.max_op_reply_q < mrioc->num_op_reply_q) {
1333 ioc_err(mrioc,
1334 "cannot reduce number of operational reply queues from %d to %d\n",
1335 mrioc->num_op_reply_q,
1336 mrioc->facts.max_op_reply_q);
1337 return -EPERM;
1338 }
1339
1340 if (mrioc->facts.max_op_req_q < mrioc->num_op_req_q) {
1341 ioc_err(mrioc,
1342 "cannot reduce number of operational request queues from %d to %d\n",
1343 mrioc->num_op_req_q, mrioc->facts.max_op_req_q);
1344 return -EPERM;
1345 }
1346
1347 if (mrioc->shost->max_sectors != (mrioc->facts.max_data_length / 512))
1348 ioc_err(mrioc, "Warning: The maximum data transfer length\n"
1349 "\tchanged after reset: previous(%d), new(%d),\n"
1350 "the driver cannot change this at run time\n",
1351 mrioc->shost->max_sectors * 512, mrioc->facts.max_data_length);
1352
1353 if ((mrioc->sas_transport_enabled) && (mrioc->facts.ioc_capabilities &
1354 MPI3_IOCFACTS_CAPABILITY_MULTIPATH_SUPPORTED))
1355 ioc_err(mrioc,
1356 "critical error: multipath capability is enabled at the\n"
1357 "\tcontroller while sas transport support is enabled at the\n"
1358 "\tdriver, please reboot the system or reload the driver\n");
1359
1360 if (mrioc->seg_tb_support) {
1361 if (!(mrioc->facts.ioc_capabilities &
1362 MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_TRACE_SUPPORTED)) {
1363 ioc_err(mrioc,
1364 "critical error: previously enabled segmented trace\n"
1365 " buffer capability is disabled after reset. Please\n"
1366 " update the firmware or reboot the system or\n"
1367 " reload the driver to enable trace diag buffer\n");
1368 mrioc->diag_buffers[0].disabled_after_reset = true;
1369 } else
1370 mrioc->diag_buffers[0].disabled_after_reset = false;
1371 }
1372
1373 if (mrioc->facts.max_devhandle > mrioc->dev_handle_bitmap_bits) {
1374 removepend_bitmap = bitmap_zalloc(mrioc->facts.max_devhandle,
1375 GFP_KERNEL);
1376 if (!removepend_bitmap) {
1377 ioc_err(mrioc,
1378 "failed to increase removepend_bitmap bits from %d to %d\n",
1379 mrioc->dev_handle_bitmap_bits,
1380 mrioc->facts.max_devhandle);
1381 return -EPERM;
1382 }
1383 bitmap_free(mrioc->removepend_bitmap);
1384 mrioc->removepend_bitmap = removepend_bitmap;
1385 ioc_info(mrioc,
1386 "increased bits of dev_handle_bitmap from %d to %d\n",
1387 mrioc->dev_handle_bitmap_bits,
1388 mrioc->facts.max_devhandle);
1389 mrioc->dev_handle_bitmap_bits = mrioc->facts.max_devhandle;
1390 }
1391
1392 return 0;
1393 }
1394
1395 /**
1396 * mpi3mr_bring_ioc_ready - Bring controller to ready state
1397 * @mrioc: Adapter instance reference
1398 *
1399 * Set Enable IOC bit in IOC configuration register and wait for
1400 * the controller to become ready.
1401 *
1402 * Return: 0 on success, appropriate error on failure.
1403 */
mpi3mr_bring_ioc_ready(struct mpi3mr_ioc * mrioc)1404 static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
1405 {
1406 u32 ioc_config, ioc_status, timeout, host_diagnostic;
1407 int retval = 0;
1408 enum mpi3mr_iocstate ioc_state;
1409 u64 base_info;
1410 u8 retry = 0;
1411 u64 start_time, elapsed_time_sec;
1412
1413 retry_bring_ioc_ready:
1414
1415 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1416 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1417 base_info = lo_hi_readq(&mrioc->sysif_regs->ioc_information);
1418 ioc_info(mrioc, "ioc_status(0x%08x), ioc_config(0x%08x), ioc_info(0x%016llx) at the bringup\n",
1419 ioc_status, ioc_config, base_info);
1420
1421 if (!mpi3mr_is_fault_recoverable(mrioc)) {
1422 mrioc->unrecoverable = 1;
1423 goto out_device_not_present;
1424 }
1425
1426 /*The timeout value is in 2sec unit, changing it to seconds*/
1427 mrioc->ready_timeout =
1428 ((base_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >>
1429 MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT) * 2;
1430
1431 ioc_info(mrioc, "ready timeout: %d seconds\n", mrioc->ready_timeout);
1432
1433 ioc_state = mpi3mr_get_iocstate(mrioc);
1434 ioc_info(mrioc, "controller is in %s state during detection\n",
1435 mpi3mr_iocstate_name(ioc_state));
1436
1437 timeout = mrioc->ready_timeout * 10;
1438
1439 do {
1440 ioc_state = mpi3mr_get_iocstate(mrioc);
1441
1442 if (ioc_state != MRIOC_STATE_BECOMING_READY &&
1443 ioc_state != MRIOC_STATE_RESET_REQUESTED)
1444 break;
1445
1446 if (!pci_device_is_present(mrioc->pdev)) {
1447 mrioc->unrecoverable = 1;
1448 ioc_err(mrioc, "controller is not present while waiting to reset\n");
1449 goto out_device_not_present;
1450 }
1451
1452 msleep(100);
1453 } while (--timeout);
1454
1455 if (ioc_state == MRIOC_STATE_READY) {
1456 ioc_info(mrioc, "issuing message unit reset (MUR) to bring to reset state\n");
1457 retval = mpi3mr_issue_and_process_mur(mrioc,
1458 MPI3MR_RESET_FROM_BRINGUP);
1459 ioc_state = mpi3mr_get_iocstate(mrioc);
1460 if (retval)
1461 ioc_err(mrioc,
1462 "message unit reset failed with error %d current state %s\n",
1463 retval, mpi3mr_iocstate_name(ioc_state));
1464 }
1465 if (ioc_state != MRIOC_STATE_RESET) {
1466 if (ioc_state == MRIOC_STATE_FAULT) {
1467 timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
1468 mpi3mr_print_fault_info(mrioc);
1469 do {
1470 host_diagnostic =
1471 readl(&mrioc->sysif_regs->host_diagnostic);
1472 if (!(host_diagnostic &
1473 MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
1474 break;
1475 if (!pci_device_is_present(mrioc->pdev)) {
1476 mrioc->unrecoverable = 1;
1477 ioc_err(mrioc, "controller is not present at the bringup\n");
1478 goto out_device_not_present;
1479 }
1480 msleep(100);
1481 } while (--timeout);
1482 }
1483 mpi3mr_print_fault_info(mrioc);
1484 ioc_info(mrioc, "issuing soft reset to bring to reset state\n");
1485 retval = mpi3mr_issue_reset(mrioc,
1486 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
1487 MPI3MR_RESET_FROM_BRINGUP);
1488 if (retval) {
1489 ioc_err(mrioc,
1490 "soft reset failed with error %d\n", retval);
1491 goto out_failed;
1492 }
1493 }
1494 ioc_state = mpi3mr_get_iocstate(mrioc);
1495 if (ioc_state != MRIOC_STATE_RESET) {
1496 ioc_err(mrioc,
1497 "cannot bring controller to reset state, current state: %s\n",
1498 mpi3mr_iocstate_name(ioc_state));
1499 goto out_failed;
1500 }
1501 mpi3mr_clear_reset_history(mrioc);
1502 retval = mpi3mr_setup_admin_qpair(mrioc);
1503 if (retval) {
1504 ioc_err(mrioc, "failed to setup admin queues: error %d\n",
1505 retval);
1506 goto out_failed;
1507 }
1508
1509 ioc_info(mrioc, "bringing controller to ready state\n");
1510 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1511 ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
1512 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
1513
1514 if (retry == 0)
1515 start_time = jiffies;
1516
1517 timeout = mrioc->ready_timeout * 10;
1518 do {
1519 ioc_state = mpi3mr_get_iocstate(mrioc);
1520 if (ioc_state == MRIOC_STATE_READY) {
1521 ioc_info(mrioc,
1522 "successfully transitioned to %s state\n",
1523 mpi3mr_iocstate_name(ioc_state));
1524 return 0;
1525 }
1526 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1527 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
1528 (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
1529 mpi3mr_print_fault_info(mrioc);
1530 goto out_failed;
1531 }
1532 if (!pci_device_is_present(mrioc->pdev)) {
1533 mrioc->unrecoverable = 1;
1534 ioc_err(mrioc,
1535 "controller is not present at the bringup\n");
1536 retval = -1;
1537 goto out_device_not_present;
1538 }
1539 msleep(100);
1540 elapsed_time_sec = jiffies_to_msecs(jiffies - start_time)/1000;
1541 } while (elapsed_time_sec < mrioc->ready_timeout);
1542
1543 out_failed:
1544 elapsed_time_sec = jiffies_to_msecs(jiffies - start_time)/1000;
1545 if ((retry < 2) && (elapsed_time_sec < (mrioc->ready_timeout - 60))) {
1546 retry++;
1547
1548 ioc_warn(mrioc, "retrying to bring IOC ready, retry_count:%d\n"
1549 " elapsed time =%llu\n", retry, elapsed_time_sec);
1550
1551 goto retry_bring_ioc_ready;
1552 }
1553 ioc_state = mpi3mr_get_iocstate(mrioc);
1554 ioc_err(mrioc,
1555 "failed to bring to ready state, current state: %s\n",
1556 mpi3mr_iocstate_name(ioc_state));
1557 out_device_not_present:
1558 return retval;
1559 }
1560
1561 /**
1562 * mpi3mr_soft_reset_success - Check softreset is success or not
1563 * @ioc_status: IOC status register value
1564 * @ioc_config: IOC config register value
1565 *
1566 * Check whether the soft reset is successful or not based on
1567 * IOC status and IOC config register values.
1568 *
1569 * Return: True when the soft reset is success, false otherwise.
1570 */
1571 static inline bool
mpi3mr_soft_reset_success(u32 ioc_status,u32 ioc_config)1572 mpi3mr_soft_reset_success(u32 ioc_status, u32 ioc_config)
1573 {
1574 if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
1575 (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
1576 return true;
1577 return false;
1578 }
1579
1580 /**
1581 * mpi3mr_diagfault_success - Check diag fault is success or not
1582 * @mrioc: Adapter reference
1583 * @ioc_status: IOC status register value
1584 *
1585 * Check whether the controller hit diag reset fault code.
1586 *
1587 * Return: True when there is diag fault, false otherwise.
1588 */
mpi3mr_diagfault_success(struct mpi3mr_ioc * mrioc,u32 ioc_status)1589 static inline bool mpi3mr_diagfault_success(struct mpi3mr_ioc *mrioc,
1590 u32 ioc_status)
1591 {
1592 u32 fault;
1593
1594 if (!(ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT))
1595 return false;
1596 fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK;
1597 if (fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET) {
1598 mpi3mr_print_fault_info(mrioc);
1599 return true;
1600 }
1601 return false;
1602 }
1603
1604 /**
1605 * mpi3mr_set_diagsave - Set diag save bit for snapdump
1606 * @mrioc: Adapter reference
1607 *
1608 * Set diag save bit in IOC configuration register to enable
1609 * snapdump.
1610 *
1611 * Return: Nothing.
1612 */
mpi3mr_set_diagsave(struct mpi3mr_ioc * mrioc)1613 static inline void mpi3mr_set_diagsave(struct mpi3mr_ioc *mrioc)
1614 {
1615 u32 ioc_config;
1616
1617 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1618 ioc_config |= MPI3_SYSIF_IOC_CONFIG_DIAG_SAVE;
1619 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
1620 }
1621
1622 /**
1623 * mpi3mr_issue_reset - Issue reset to the controller
1624 * @mrioc: Adapter reference
1625 * @reset_type: Reset type
1626 * @reset_reason: Reset reason code
1627 *
1628 * Unlock the host diagnostic registers and write the specific
1629 * reset type to that, wait for reset acknowledgment from the
1630 * controller, if the reset is not successful retry for the
1631 * predefined number of times.
1632 *
1633 * Return: 0 on success, non-zero on failure.
1634 */
mpi3mr_issue_reset(struct mpi3mr_ioc * mrioc,u16 reset_type,u16 reset_reason)1635 static int mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type,
1636 u16 reset_reason)
1637 {
1638 int retval = -1;
1639 u8 unlock_retry_count = 0;
1640 u32 host_diagnostic, ioc_status, ioc_config, scratch_pad0;
1641 u32 timeout = MPI3MR_RESET_ACK_TIMEOUT * 10;
1642
1643 if ((reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) &&
1644 (reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT))
1645 return retval;
1646 if (mrioc->unrecoverable)
1647 return retval;
1648 if (reset_reason == MPI3MR_RESET_FROM_FIRMWARE) {
1649 retval = 0;
1650 return retval;
1651 }
1652
1653 ioc_info(mrioc, "%s reset due to %s(0x%x)\n",
1654 mpi3mr_reset_type_name(reset_type),
1655 mpi3mr_reset_rc_name(reset_reason), reset_reason);
1656
1657 mpi3mr_clear_reset_history(mrioc);
1658 do {
1659 ioc_info(mrioc,
1660 "Write magic sequence to unlock host diag register (retry=%d)\n",
1661 ++unlock_retry_count);
1662 if (unlock_retry_count >= MPI3MR_HOSTDIAG_UNLOCK_RETRY_COUNT) {
1663 ioc_err(mrioc,
1664 "%s reset failed due to unlock failure, host_diagnostic(0x%08x)\n",
1665 mpi3mr_reset_type_name(reset_type),
1666 host_diagnostic);
1667 mrioc->unrecoverable = 1;
1668 return retval;
1669 }
1670
1671 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH,
1672 &mrioc->sysif_regs->write_sequence);
1673 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_1ST,
1674 &mrioc->sysif_regs->write_sequence);
1675 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
1676 &mrioc->sysif_regs->write_sequence);
1677 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_3RD,
1678 &mrioc->sysif_regs->write_sequence);
1679 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_4TH,
1680 &mrioc->sysif_regs->write_sequence);
1681 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_5TH,
1682 &mrioc->sysif_regs->write_sequence);
1683 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_6TH,
1684 &mrioc->sysif_regs->write_sequence);
1685 usleep_range(1000, 1100);
1686 host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
1687 ioc_info(mrioc,
1688 "wrote magic sequence: retry_count(%d), host_diagnostic(0x%08x)\n",
1689 unlock_retry_count, host_diagnostic);
1690 } while (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_DIAG_WRITE_ENABLE));
1691
1692 scratch_pad0 = ((MPI3MR_RESET_REASON_OSTYPE_LINUX <<
1693 MPI3MR_RESET_REASON_OSTYPE_SHIFT) | (mrioc->facts.ioc_num <<
1694 MPI3MR_RESET_REASON_IOCNUM_SHIFT) | reset_reason);
1695 writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]);
1696 writel(host_diagnostic | reset_type,
1697 &mrioc->sysif_regs->host_diagnostic);
1698 switch (reset_type) {
1699 case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET:
1700 do {
1701 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1702 ioc_config =
1703 readl(&mrioc->sysif_regs->ioc_configuration);
1704 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
1705 && mpi3mr_soft_reset_success(ioc_status, ioc_config)
1706 ) {
1707 mpi3mr_clear_reset_history(mrioc);
1708 retval = 0;
1709 break;
1710 }
1711 msleep(100);
1712 } while (--timeout);
1713 mpi3mr_print_fault_info(mrioc);
1714 break;
1715 case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT:
1716 do {
1717 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1718 if (mpi3mr_diagfault_success(mrioc, ioc_status)) {
1719 retval = 0;
1720 break;
1721 }
1722 msleep(100);
1723 } while (--timeout);
1724 break;
1725 default:
1726 break;
1727 }
1728
1729 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
1730 &mrioc->sysif_regs->write_sequence);
1731
1732 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1733 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1734 ioc_info(mrioc,
1735 "ioc_status/ioc_config after %s reset is (0x%08x)/(0x%08x)\n",
1736 (!retval)?"successful":"failed", ioc_status,
1737 ioc_config);
1738 if (retval)
1739 mrioc->unrecoverable = 1;
1740 return retval;
1741 }
1742
1743 /**
1744 * mpi3mr_admin_request_post - Post request to admin queue
1745 * @mrioc: Adapter reference
1746 * @admin_req: MPI3 request
1747 * @admin_req_sz: Request size
1748 * @ignore_reset: Ignore reset in process
1749 *
1750 * Post the MPI3 request into admin request queue and
1751 * inform the controller, if the queue is full return
1752 * appropriate error.
1753 *
1754 * Return: 0 on success, non-zero on failure.
1755 */
mpi3mr_admin_request_post(struct mpi3mr_ioc * mrioc,void * admin_req,u16 admin_req_sz,u8 ignore_reset)1756 int mpi3mr_admin_request_post(struct mpi3mr_ioc *mrioc, void *admin_req,
1757 u16 admin_req_sz, u8 ignore_reset)
1758 {
1759 u16 areq_pi = 0, areq_ci = 0, max_entries = 0;
1760 int retval = 0;
1761 unsigned long flags;
1762 u8 *areq_entry;
1763
1764 if (mrioc->unrecoverable) {
1765 ioc_err(mrioc, "%s : Unrecoverable controller\n", __func__);
1766 return -EFAULT;
1767 }
1768
1769 spin_lock_irqsave(&mrioc->admin_req_lock, flags);
1770 areq_pi = mrioc->admin_req_pi;
1771 areq_ci = mrioc->admin_req_ci;
1772 max_entries = mrioc->num_admin_req;
1773 if ((areq_ci == (areq_pi + 1)) || ((!areq_ci) &&
1774 (areq_pi == (max_entries - 1)))) {
1775 ioc_err(mrioc, "AdminReqQ full condition detected\n");
1776 retval = -EAGAIN;
1777 goto out;
1778 }
1779 if (!ignore_reset && mrioc->reset_in_progress) {
1780 ioc_err(mrioc, "AdminReqQ submit reset in progress\n");
1781 retval = -EAGAIN;
1782 goto out;
1783 }
1784 if (mrioc->pci_err_recovery) {
1785 ioc_err(mrioc, "admin request queue submission failed due to pci error recovery in progress\n");
1786 retval = -EAGAIN;
1787 goto out;
1788 }
1789
1790 areq_entry = (u8 *)mrioc->admin_req_base +
1791 (areq_pi * MPI3MR_ADMIN_REQ_FRAME_SZ);
1792 memset(areq_entry, 0, MPI3MR_ADMIN_REQ_FRAME_SZ);
1793 memcpy(areq_entry, (u8 *)admin_req, admin_req_sz);
1794
1795 if (++areq_pi == max_entries)
1796 areq_pi = 0;
1797 mrioc->admin_req_pi = areq_pi;
1798
1799 writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi);
1800
1801 out:
1802 spin_unlock_irqrestore(&mrioc->admin_req_lock, flags);
1803
1804 return retval;
1805 }
1806
1807 /**
1808 * mpi3mr_free_op_req_q_segments - free request memory segments
1809 * @mrioc: Adapter instance reference
1810 * @q_idx: operational request queue index
1811 *
1812 * Free memory segments allocated for operational request queue
1813 *
1814 * Return: Nothing.
1815 */
mpi3mr_free_op_req_q_segments(struct mpi3mr_ioc * mrioc,u16 q_idx)1816 static void mpi3mr_free_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx)
1817 {
1818 u16 j;
1819 int size;
1820 struct segments *segments;
1821
1822 segments = mrioc->req_qinfo[q_idx].q_segments;
1823 if (!segments)
1824 return;
1825
1826 if (mrioc->enable_segqueue) {
1827 size = MPI3MR_OP_REQ_Q_SEG_SIZE;
1828 if (mrioc->req_qinfo[q_idx].q_segment_list) {
1829 dma_free_coherent(&mrioc->pdev->dev,
1830 MPI3MR_MAX_SEG_LIST_SIZE,
1831 mrioc->req_qinfo[q_idx].q_segment_list,
1832 mrioc->req_qinfo[q_idx].q_segment_list_dma);
1833 mrioc->req_qinfo[q_idx].q_segment_list = NULL;
1834 }
1835 } else
1836 size = mrioc->req_qinfo[q_idx].segment_qd *
1837 mrioc->facts.op_req_sz;
1838
1839 for (j = 0; j < mrioc->req_qinfo[q_idx].num_segments; j++) {
1840 if (!segments[j].segment)
1841 continue;
1842 dma_free_coherent(&mrioc->pdev->dev,
1843 size, segments[j].segment, segments[j].segment_dma);
1844 segments[j].segment = NULL;
1845 }
1846 kfree(mrioc->req_qinfo[q_idx].q_segments);
1847 mrioc->req_qinfo[q_idx].q_segments = NULL;
1848 mrioc->req_qinfo[q_idx].qid = 0;
1849 }
1850
1851 /**
1852 * mpi3mr_free_op_reply_q_segments - free reply memory segments
1853 * @mrioc: Adapter instance reference
1854 * @q_idx: operational reply queue index
1855 *
1856 * Free memory segments allocated for operational reply queue
1857 *
1858 * Return: Nothing.
1859 */
mpi3mr_free_op_reply_q_segments(struct mpi3mr_ioc * mrioc,u16 q_idx)1860 static void mpi3mr_free_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx)
1861 {
1862 u16 j;
1863 int size;
1864 struct segments *segments;
1865
1866 segments = mrioc->op_reply_qinfo[q_idx].q_segments;
1867 if (!segments)
1868 return;
1869
1870 if (mrioc->enable_segqueue) {
1871 size = MPI3MR_OP_REP_Q_SEG_SIZE;
1872 if (mrioc->op_reply_qinfo[q_idx].q_segment_list) {
1873 dma_free_coherent(&mrioc->pdev->dev,
1874 MPI3MR_MAX_SEG_LIST_SIZE,
1875 mrioc->op_reply_qinfo[q_idx].q_segment_list,
1876 mrioc->op_reply_qinfo[q_idx].q_segment_list_dma);
1877 mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL;
1878 }
1879 } else
1880 size = mrioc->op_reply_qinfo[q_idx].segment_qd *
1881 mrioc->op_reply_desc_sz;
1882
1883 for (j = 0; j < mrioc->op_reply_qinfo[q_idx].num_segments; j++) {
1884 if (!segments[j].segment)
1885 continue;
1886 dma_free_coherent(&mrioc->pdev->dev,
1887 size, segments[j].segment, segments[j].segment_dma);
1888 segments[j].segment = NULL;
1889 }
1890
1891 kfree(mrioc->op_reply_qinfo[q_idx].q_segments);
1892 mrioc->op_reply_qinfo[q_idx].q_segments = NULL;
1893 mrioc->op_reply_qinfo[q_idx].qid = 0;
1894 }
1895
1896 /**
1897 * mpi3mr_delete_op_reply_q - delete operational reply queue
1898 * @mrioc: Adapter instance reference
1899 * @qidx: operational reply queue index
1900 *
1901 * Delete operatinal reply queue by issuing MPI request
1902 * through admin queue.
1903 *
1904 * Return: 0 on success, non-zero on failure.
1905 */
mpi3mr_delete_op_reply_q(struct mpi3mr_ioc * mrioc,u16 qidx)1906 static int mpi3mr_delete_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
1907 {
1908 struct mpi3_delete_reply_queue_request delq_req;
1909 struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
1910 int retval = 0;
1911 u16 reply_qid = 0, midx;
1912
1913 reply_qid = op_reply_q->qid;
1914
1915 midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset);
1916
1917 if (!reply_qid) {
1918 retval = -1;
1919 ioc_err(mrioc, "Issue DelRepQ: called with invalid ReqQID\n");
1920 goto out;
1921 }
1922
1923 (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount-- :
1924 mrioc->active_poll_qcount--;
1925
1926 memset(&delq_req, 0, sizeof(delq_req));
1927 mutex_lock(&mrioc->init_cmds.mutex);
1928 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
1929 retval = -1;
1930 ioc_err(mrioc, "Issue DelRepQ: Init command is in use\n");
1931 mutex_unlock(&mrioc->init_cmds.mutex);
1932 goto out;
1933 }
1934 mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
1935 mrioc->init_cmds.is_waiting = 1;
1936 mrioc->init_cmds.callback = NULL;
1937 delq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
1938 delq_req.function = MPI3_FUNCTION_DELETE_REPLY_QUEUE;
1939 delq_req.queue_id = cpu_to_le16(reply_qid);
1940
1941 init_completion(&mrioc->init_cmds.done);
1942 retval = mpi3mr_admin_request_post(mrioc, &delq_req, sizeof(delq_req),
1943 1);
1944 if (retval) {
1945 ioc_err(mrioc, "Issue DelRepQ: Admin Post failed\n");
1946 goto out_unlock;
1947 }
1948 wait_for_completion_timeout(&mrioc->init_cmds.done,
1949 (MPI3MR_INTADMCMD_TIMEOUT * HZ));
1950 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
1951 ioc_err(mrioc, "delete reply queue timed out\n");
1952 mpi3mr_check_rh_fault_ioc(mrioc,
1953 MPI3MR_RESET_FROM_DELREPQ_TIMEOUT);
1954 retval = -1;
1955 goto out_unlock;
1956 }
1957 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
1958 != MPI3_IOCSTATUS_SUCCESS) {
1959 ioc_err(mrioc,
1960 "Issue DelRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
1961 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
1962 mrioc->init_cmds.ioc_loginfo);
1963 retval = -1;
1964 goto out_unlock;
1965 }
1966 mrioc->intr_info[midx].op_reply_q = NULL;
1967
1968 mpi3mr_free_op_reply_q_segments(mrioc, qidx);
1969 out_unlock:
1970 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
1971 mutex_unlock(&mrioc->init_cmds.mutex);
1972 out:
1973
1974 return retval;
1975 }
1976
1977 /**
1978 * mpi3mr_alloc_op_reply_q_segments -Alloc segmented reply pool
1979 * @mrioc: Adapter instance reference
1980 * @qidx: request queue index
1981 *
1982 * Allocate segmented memory pools for operational reply
1983 * queue.
1984 *
1985 * Return: 0 on success, non-zero on failure.
1986 */
mpi3mr_alloc_op_reply_q_segments(struct mpi3mr_ioc * mrioc,u16 qidx)1987 static int mpi3mr_alloc_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx)
1988 {
1989 struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
1990 int i, size;
1991 u64 *q_segment_list_entry = NULL;
1992 struct segments *segments;
1993
1994 if (mrioc->enable_segqueue) {
1995 op_reply_q->segment_qd =
1996 MPI3MR_OP_REP_Q_SEG_SIZE / mrioc->op_reply_desc_sz;
1997
1998 size = MPI3MR_OP_REP_Q_SEG_SIZE;
1999
2000 op_reply_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev,
2001 MPI3MR_MAX_SEG_LIST_SIZE, &op_reply_q->q_segment_list_dma,
2002 GFP_KERNEL);
2003 if (!op_reply_q->q_segment_list)
2004 return -ENOMEM;
2005 q_segment_list_entry = (u64 *)op_reply_q->q_segment_list;
2006 } else {
2007 op_reply_q->segment_qd = op_reply_q->num_replies;
2008 size = op_reply_q->num_replies * mrioc->op_reply_desc_sz;
2009 }
2010
2011 op_reply_q->num_segments = DIV_ROUND_UP(op_reply_q->num_replies,
2012 op_reply_q->segment_qd);
2013
2014 op_reply_q->q_segments = kcalloc(op_reply_q->num_segments,
2015 sizeof(struct segments), GFP_KERNEL);
2016 if (!op_reply_q->q_segments)
2017 return -ENOMEM;
2018
2019 segments = op_reply_q->q_segments;
2020 for (i = 0; i < op_reply_q->num_segments; i++) {
2021 segments[i].segment =
2022 dma_alloc_coherent(&mrioc->pdev->dev,
2023 size, &segments[i].segment_dma, GFP_KERNEL);
2024 if (!segments[i].segment)
2025 return -ENOMEM;
2026 if (mrioc->enable_segqueue)
2027 q_segment_list_entry[i] =
2028 (unsigned long)segments[i].segment_dma;
2029 }
2030
2031 return 0;
2032 }
2033
2034 /**
2035 * mpi3mr_alloc_op_req_q_segments - Alloc segmented req pool.
2036 * @mrioc: Adapter instance reference
2037 * @qidx: request queue index
2038 *
2039 * Allocate segmented memory pools for operational request
2040 * queue.
2041 *
2042 * Return: 0 on success, non-zero on failure.
2043 */
mpi3mr_alloc_op_req_q_segments(struct mpi3mr_ioc * mrioc,u16 qidx)2044 static int mpi3mr_alloc_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx)
2045 {
2046 struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx;
2047 int i, size;
2048 u64 *q_segment_list_entry = NULL;
2049 struct segments *segments;
2050
2051 if (mrioc->enable_segqueue) {
2052 op_req_q->segment_qd =
2053 MPI3MR_OP_REQ_Q_SEG_SIZE / mrioc->facts.op_req_sz;
2054
2055 size = MPI3MR_OP_REQ_Q_SEG_SIZE;
2056
2057 op_req_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev,
2058 MPI3MR_MAX_SEG_LIST_SIZE, &op_req_q->q_segment_list_dma,
2059 GFP_KERNEL);
2060 if (!op_req_q->q_segment_list)
2061 return -ENOMEM;
2062 q_segment_list_entry = (u64 *)op_req_q->q_segment_list;
2063
2064 } else {
2065 op_req_q->segment_qd = op_req_q->num_requests;
2066 size = op_req_q->num_requests * mrioc->facts.op_req_sz;
2067 }
2068
2069 op_req_q->num_segments = DIV_ROUND_UP(op_req_q->num_requests,
2070 op_req_q->segment_qd);
2071
2072 op_req_q->q_segments = kcalloc(op_req_q->num_segments,
2073 sizeof(struct segments), GFP_KERNEL);
2074 if (!op_req_q->q_segments)
2075 return -ENOMEM;
2076
2077 segments = op_req_q->q_segments;
2078 for (i = 0; i < op_req_q->num_segments; i++) {
2079 segments[i].segment =
2080 dma_alloc_coherent(&mrioc->pdev->dev,
2081 size, &segments[i].segment_dma, GFP_KERNEL);
2082 if (!segments[i].segment)
2083 return -ENOMEM;
2084 if (mrioc->enable_segqueue)
2085 q_segment_list_entry[i] =
2086 (unsigned long)segments[i].segment_dma;
2087 }
2088
2089 return 0;
2090 }
2091
2092 /**
2093 * mpi3mr_create_op_reply_q - create operational reply queue
2094 * @mrioc: Adapter instance reference
2095 * @qidx: operational reply queue index
2096 *
2097 * Create operatinal reply queue by issuing MPI request
2098 * through admin queue.
2099 *
2100 * Return: 0 on success, non-zero on failure.
2101 */
mpi3mr_create_op_reply_q(struct mpi3mr_ioc * mrioc,u16 qidx)2102 static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
2103 {
2104 struct mpi3_create_reply_queue_request create_req;
2105 struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
2106 int retval = 0;
2107 u16 reply_qid = 0, midx;
2108
2109 reply_qid = op_reply_q->qid;
2110
2111 midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset);
2112
2113 if (reply_qid) {
2114 retval = -1;
2115 ioc_err(mrioc, "CreateRepQ: called for duplicate qid %d\n",
2116 reply_qid);
2117
2118 return retval;
2119 }
2120
2121 reply_qid = qidx + 1;
2122
2123 if (mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) {
2124 if (mrioc->pdev->revision)
2125 op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD;
2126 else
2127 op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD4K;
2128 } else
2129 op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD2K;
2130
2131 op_reply_q->ci = 0;
2132 op_reply_q->ephase = 1;
2133 atomic_set(&op_reply_q->pend_ios, 0);
2134 atomic_set(&op_reply_q->in_use, 0);
2135 op_reply_q->enable_irq_poll = false;
2136 op_reply_q->qfull_watermark =
2137 op_reply_q->num_replies - (MPI3MR_THRESHOLD_REPLY_COUNT * 2);
2138
2139 if (!op_reply_q->q_segments) {
2140 retval = mpi3mr_alloc_op_reply_q_segments(mrioc, qidx);
2141 if (retval) {
2142 mpi3mr_free_op_reply_q_segments(mrioc, qidx);
2143 goto out;
2144 }
2145 }
2146
2147 memset(&create_req, 0, sizeof(create_req));
2148 mutex_lock(&mrioc->init_cmds.mutex);
2149 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2150 retval = -1;
2151 ioc_err(mrioc, "CreateRepQ: Init command is in use\n");
2152 goto out_unlock;
2153 }
2154 mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2155 mrioc->init_cmds.is_waiting = 1;
2156 mrioc->init_cmds.callback = NULL;
2157 create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2158 create_req.function = MPI3_FUNCTION_CREATE_REPLY_QUEUE;
2159 create_req.queue_id = cpu_to_le16(reply_qid);
2160
2161 if (midx < (mrioc->intr_info_count - mrioc->requested_poll_qcount))
2162 op_reply_q->qtype = MPI3MR_DEFAULT_QUEUE;
2163 else
2164 op_reply_q->qtype = MPI3MR_POLL_QUEUE;
2165
2166 if (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) {
2167 create_req.flags =
2168 MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE;
2169 create_req.msix_index =
2170 cpu_to_le16(mrioc->intr_info[midx].msix_index);
2171 } else {
2172 create_req.msix_index = cpu_to_le16(mrioc->intr_info_count - 1);
2173 ioc_info(mrioc, "create reply queue(polled): for qid(%d), midx(%d)\n",
2174 reply_qid, midx);
2175 if (!mrioc->active_poll_qcount)
2176 disable_irq_nosync(pci_irq_vector(mrioc->pdev,
2177 mrioc->intr_info_count - 1));
2178 }
2179
2180 if (mrioc->enable_segqueue) {
2181 create_req.flags |=
2182 MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED;
2183 create_req.base_address = cpu_to_le64(
2184 op_reply_q->q_segment_list_dma);
2185 } else
2186 create_req.base_address = cpu_to_le64(
2187 op_reply_q->q_segments[0].segment_dma);
2188
2189 create_req.size = cpu_to_le16(op_reply_q->num_replies);
2190
2191 init_completion(&mrioc->init_cmds.done);
2192 retval = mpi3mr_admin_request_post(mrioc, &create_req,
2193 sizeof(create_req), 1);
2194 if (retval) {
2195 ioc_err(mrioc, "CreateRepQ: Admin Post failed\n");
2196 goto out_unlock;
2197 }
2198 wait_for_completion_timeout(&mrioc->init_cmds.done,
2199 (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2200 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2201 ioc_err(mrioc, "create reply queue timed out\n");
2202 mpi3mr_check_rh_fault_ioc(mrioc,
2203 MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT);
2204 retval = -1;
2205 goto out_unlock;
2206 }
2207 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2208 != MPI3_IOCSTATUS_SUCCESS) {
2209 ioc_err(mrioc,
2210 "CreateRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2211 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2212 mrioc->init_cmds.ioc_loginfo);
2213 retval = -1;
2214 goto out_unlock;
2215 }
2216 op_reply_q->qid = reply_qid;
2217 if (midx < mrioc->intr_info_count)
2218 mrioc->intr_info[midx].op_reply_q = op_reply_q;
2219
2220 (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount++ :
2221 mrioc->active_poll_qcount++;
2222
2223 out_unlock:
2224 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2225 mutex_unlock(&mrioc->init_cmds.mutex);
2226 out:
2227
2228 return retval;
2229 }
2230
2231 /**
2232 * mpi3mr_create_op_req_q - create operational request queue
2233 * @mrioc: Adapter instance reference
2234 * @idx: operational request queue index
2235 * @reply_qid: Reply queue ID
2236 *
2237 * Create operatinal request queue by issuing MPI request
2238 * through admin queue.
2239 *
2240 * Return: 0 on success, non-zero on failure.
2241 */
mpi3mr_create_op_req_q(struct mpi3mr_ioc * mrioc,u16 idx,u16 reply_qid)2242 static int mpi3mr_create_op_req_q(struct mpi3mr_ioc *mrioc, u16 idx,
2243 u16 reply_qid)
2244 {
2245 struct mpi3_create_request_queue_request create_req;
2246 struct op_req_qinfo *op_req_q = mrioc->req_qinfo + idx;
2247 int retval = 0;
2248 u16 req_qid = 0;
2249
2250 req_qid = op_req_q->qid;
2251
2252 if (req_qid) {
2253 retval = -1;
2254 ioc_err(mrioc, "CreateReqQ: called for duplicate qid %d\n",
2255 req_qid);
2256
2257 return retval;
2258 }
2259 req_qid = idx + 1;
2260
2261 op_req_q->num_requests = MPI3MR_OP_REQ_Q_QD;
2262 op_req_q->ci = 0;
2263 op_req_q->pi = 0;
2264 op_req_q->reply_qid = reply_qid;
2265 spin_lock_init(&op_req_q->q_lock);
2266
2267 if (!op_req_q->q_segments) {
2268 retval = mpi3mr_alloc_op_req_q_segments(mrioc, idx);
2269 if (retval) {
2270 mpi3mr_free_op_req_q_segments(mrioc, idx);
2271 goto out;
2272 }
2273 }
2274
2275 memset(&create_req, 0, sizeof(create_req));
2276 mutex_lock(&mrioc->init_cmds.mutex);
2277 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2278 retval = -1;
2279 ioc_err(mrioc, "CreateReqQ: Init command is in use\n");
2280 goto out_unlock;
2281 }
2282 mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2283 mrioc->init_cmds.is_waiting = 1;
2284 mrioc->init_cmds.callback = NULL;
2285 create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2286 create_req.function = MPI3_FUNCTION_CREATE_REQUEST_QUEUE;
2287 create_req.queue_id = cpu_to_le16(req_qid);
2288 if (mrioc->enable_segqueue) {
2289 create_req.flags =
2290 MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED;
2291 create_req.base_address = cpu_to_le64(
2292 op_req_q->q_segment_list_dma);
2293 } else
2294 create_req.base_address = cpu_to_le64(
2295 op_req_q->q_segments[0].segment_dma);
2296 create_req.reply_queue_id = cpu_to_le16(reply_qid);
2297 create_req.size = cpu_to_le16(op_req_q->num_requests);
2298
2299 init_completion(&mrioc->init_cmds.done);
2300 retval = mpi3mr_admin_request_post(mrioc, &create_req,
2301 sizeof(create_req), 1);
2302 if (retval) {
2303 ioc_err(mrioc, "CreateReqQ: Admin Post failed\n");
2304 goto out_unlock;
2305 }
2306 wait_for_completion_timeout(&mrioc->init_cmds.done,
2307 (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2308 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2309 ioc_err(mrioc, "create request queue timed out\n");
2310 mpi3mr_check_rh_fault_ioc(mrioc,
2311 MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT);
2312 retval = -1;
2313 goto out_unlock;
2314 }
2315 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2316 != MPI3_IOCSTATUS_SUCCESS) {
2317 ioc_err(mrioc,
2318 "CreateReqQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2319 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2320 mrioc->init_cmds.ioc_loginfo);
2321 retval = -1;
2322 goto out_unlock;
2323 }
2324 op_req_q->qid = req_qid;
2325
2326 out_unlock:
2327 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2328 mutex_unlock(&mrioc->init_cmds.mutex);
2329 out:
2330
2331 return retval;
2332 }
2333
2334 /**
2335 * mpi3mr_create_op_queues - create operational queue pairs
2336 * @mrioc: Adapter instance reference
2337 *
2338 * Allocate memory for operational queue meta data and call
2339 * create request and reply queue functions.
2340 *
2341 * Return: 0 on success, non-zero on failures.
2342 */
mpi3mr_create_op_queues(struct mpi3mr_ioc * mrioc)2343 static int mpi3mr_create_op_queues(struct mpi3mr_ioc *mrioc)
2344 {
2345 int retval = 0;
2346 u16 num_queues = 0, i = 0, msix_count_op_q = 1;
2347
2348 num_queues = min_t(int, mrioc->facts.max_op_reply_q,
2349 mrioc->facts.max_op_req_q);
2350
2351 msix_count_op_q =
2352 mrioc->intr_info_count - mrioc->op_reply_q_offset;
2353 if (!mrioc->num_queues)
2354 mrioc->num_queues = min_t(int, num_queues, msix_count_op_q);
2355 /*
2356 * During reset set the num_queues to the number of queues
2357 * that was set before the reset.
2358 */
2359 num_queues = mrioc->num_op_reply_q ?
2360 mrioc->num_op_reply_q : mrioc->num_queues;
2361 ioc_info(mrioc, "trying to create %d operational queue pairs\n",
2362 num_queues);
2363
2364 if (!mrioc->req_qinfo) {
2365 mrioc->req_qinfo = kcalloc(num_queues,
2366 sizeof(struct op_req_qinfo), GFP_KERNEL);
2367 if (!mrioc->req_qinfo) {
2368 retval = -1;
2369 goto out_failed;
2370 }
2371
2372 mrioc->op_reply_qinfo = kzalloc(sizeof(struct op_reply_qinfo) *
2373 num_queues, GFP_KERNEL);
2374 if (!mrioc->op_reply_qinfo) {
2375 retval = -1;
2376 goto out_failed;
2377 }
2378 }
2379
2380 if (mrioc->enable_segqueue)
2381 ioc_info(mrioc,
2382 "allocating operational queues through segmented queues\n");
2383
2384 for (i = 0; i < num_queues; i++) {
2385 if (mpi3mr_create_op_reply_q(mrioc, i)) {
2386 ioc_err(mrioc, "Cannot create OP RepQ %d\n", i);
2387 break;
2388 }
2389 if (mpi3mr_create_op_req_q(mrioc, i,
2390 mrioc->op_reply_qinfo[i].qid)) {
2391 ioc_err(mrioc, "Cannot create OP ReqQ %d\n", i);
2392 mpi3mr_delete_op_reply_q(mrioc, i);
2393 break;
2394 }
2395 }
2396
2397 if (i == 0) {
2398 /* Not even one queue is created successfully*/
2399 retval = -1;
2400 goto out_failed;
2401 }
2402 mrioc->num_op_reply_q = mrioc->num_op_req_q = i;
2403 ioc_info(mrioc,
2404 "successfully created %d operational queue pairs(default/polled) queue = (%d/%d)\n",
2405 mrioc->num_op_reply_q, mrioc->default_qcount,
2406 mrioc->active_poll_qcount);
2407
2408 return retval;
2409 out_failed:
2410 kfree(mrioc->req_qinfo);
2411 mrioc->req_qinfo = NULL;
2412
2413 kfree(mrioc->op_reply_qinfo);
2414 mrioc->op_reply_qinfo = NULL;
2415
2416 return retval;
2417 }
2418
2419 /**
2420 * mpi3mr_op_request_post - Post request to operational queue
2421 * @mrioc: Adapter reference
2422 * @op_req_q: Operational request queue info
2423 * @req: MPI3 request
2424 *
2425 * Post the MPI3 request into operational request queue and
2426 * inform the controller, if the queue is full return
2427 * appropriate error.
2428 *
2429 * Return: 0 on success, non-zero on failure.
2430 */
mpi3mr_op_request_post(struct mpi3mr_ioc * mrioc,struct op_req_qinfo * op_req_q,u8 * req)2431 int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
2432 struct op_req_qinfo *op_req_q, u8 *req)
2433 {
2434 u16 pi = 0, max_entries, reply_qidx = 0, midx;
2435 int retval = 0;
2436 unsigned long flags;
2437 u8 *req_entry;
2438 void *segment_base_addr;
2439 u16 req_sz = mrioc->facts.op_req_sz;
2440 struct segments *segments = op_req_q->q_segments;
2441 struct op_reply_qinfo *op_reply_q = NULL;
2442
2443 reply_qidx = op_req_q->reply_qid - 1;
2444 op_reply_q = mrioc->op_reply_qinfo + reply_qidx;
2445
2446 if (mrioc->unrecoverable)
2447 return -EFAULT;
2448
2449 spin_lock_irqsave(&op_req_q->q_lock, flags);
2450 pi = op_req_q->pi;
2451 max_entries = op_req_q->num_requests;
2452
2453 if (mpi3mr_check_req_qfull(op_req_q)) {
2454 midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(
2455 reply_qidx, mrioc->op_reply_q_offset);
2456 mpi3mr_process_op_reply_q(mrioc, mrioc->intr_info[midx].op_reply_q);
2457
2458 if (mpi3mr_check_req_qfull(op_req_q)) {
2459 retval = -EAGAIN;
2460 goto out;
2461 }
2462 }
2463
2464 if (mrioc->reset_in_progress) {
2465 ioc_err(mrioc, "OpReqQ submit reset in progress\n");
2466 retval = -EAGAIN;
2467 goto out;
2468 }
2469 if (mrioc->pci_err_recovery) {
2470 ioc_err(mrioc, "operational request queue submission failed due to pci error recovery in progress\n");
2471 retval = -EAGAIN;
2472 goto out;
2473 }
2474
2475 /* Reply queue is nearing to get full, push back IOs to SML */
2476 if ((mrioc->prevent_reply_qfull == true) &&
2477 (atomic_read(&op_reply_q->pend_ios) >
2478 (op_reply_q->qfull_watermark))) {
2479 atomic_inc(&mrioc->reply_qfull_count);
2480 retval = -EAGAIN;
2481 goto out;
2482 }
2483
2484 segment_base_addr = segments[pi / op_req_q->segment_qd].segment;
2485 req_entry = (u8 *)segment_base_addr +
2486 ((pi % op_req_q->segment_qd) * req_sz);
2487
2488 memset(req_entry, 0, req_sz);
2489 memcpy(req_entry, req, MPI3MR_ADMIN_REQ_FRAME_SZ);
2490
2491 if (++pi == max_entries)
2492 pi = 0;
2493 op_req_q->pi = pi;
2494
2495 #ifndef CONFIG_PREEMPT_RT
2496 if (atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios)
2497 > MPI3MR_IRQ_POLL_TRIGGER_IOCOUNT)
2498 mrioc->op_reply_qinfo[reply_qidx].enable_irq_poll = true;
2499 #else
2500 atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios);
2501 #endif
2502
2503 writel(op_req_q->pi,
2504 &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].producer_index);
2505
2506 out:
2507 spin_unlock_irqrestore(&op_req_q->q_lock, flags);
2508 return retval;
2509 }
2510
2511 /**
2512 * mpi3mr_check_rh_fault_ioc - check reset history and fault
2513 * controller
2514 * @mrioc: Adapter instance reference
2515 * @reason_code: reason code for the fault.
2516 *
2517 * This routine will save snapdump and fault the controller with
2518 * the given reason code if it is not already in the fault or
2519 * not asynchronosuly reset. This will be used to handle
2520 * initilaization time faults/resets/timeout as in those cases
2521 * immediate soft reset invocation is not required.
2522 *
2523 * Return: None.
2524 */
mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc * mrioc,u32 reason_code)2525 void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code)
2526 {
2527 u32 ioc_status, host_diagnostic, timeout;
2528 union mpi3mr_trigger_data trigger_data;
2529
2530 if (mrioc->unrecoverable) {
2531 ioc_err(mrioc, "controller is unrecoverable\n");
2532 return;
2533 }
2534
2535 if (!pci_device_is_present(mrioc->pdev)) {
2536 mrioc->unrecoverable = 1;
2537 ioc_err(mrioc, "controller is not present\n");
2538 return;
2539 }
2540 memset(&trigger_data, 0, sizeof(trigger_data));
2541 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
2542
2543 if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) {
2544 mpi3mr_set_trigger_data_in_all_hdb(mrioc,
2545 MPI3MR_HDB_TRIGGER_TYPE_FW_RELEASED, NULL, 0);
2546 return;
2547 } else if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
2548 trigger_data.fault = (readl(&mrioc->sysif_regs->fault) &
2549 MPI3_SYSIF_FAULT_CODE_MASK);
2550
2551 mpi3mr_set_trigger_data_in_all_hdb(mrioc,
2552 MPI3MR_HDB_TRIGGER_TYPE_FAULT, &trigger_data, 0);
2553 mpi3mr_print_fault_info(mrioc);
2554 return;
2555 }
2556
2557 mpi3mr_set_diagsave(mrioc);
2558 mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
2559 reason_code);
2560 trigger_data.fault = (readl(&mrioc->sysif_regs->fault) &
2561 MPI3_SYSIF_FAULT_CODE_MASK);
2562 mpi3mr_set_trigger_data_in_all_hdb(mrioc, MPI3MR_HDB_TRIGGER_TYPE_FAULT,
2563 &trigger_data, 0);
2564 timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
2565 do {
2566 host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
2567 if (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
2568 break;
2569 msleep(100);
2570 } while (--timeout);
2571 }
2572
2573 /**
2574 * mpi3mr_sync_timestamp - Issue time stamp sync request
2575 * @mrioc: Adapter reference
2576 *
2577 * Issue IO unit control MPI request to synchornize firmware
2578 * timestamp with host time.
2579 *
2580 * Return: 0 on success, non-zero on failure.
2581 */
mpi3mr_sync_timestamp(struct mpi3mr_ioc * mrioc)2582 static int mpi3mr_sync_timestamp(struct mpi3mr_ioc *mrioc)
2583 {
2584 ktime_t current_time;
2585 struct mpi3_iounit_control_request iou_ctrl;
2586 int retval = 0;
2587
2588 memset(&iou_ctrl, 0, sizeof(iou_ctrl));
2589 mutex_lock(&mrioc->init_cmds.mutex);
2590 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2591 retval = -1;
2592 ioc_err(mrioc, "Issue IOUCTL time_stamp: command is in use\n");
2593 mutex_unlock(&mrioc->init_cmds.mutex);
2594 goto out;
2595 }
2596 mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2597 mrioc->init_cmds.is_waiting = 1;
2598 mrioc->init_cmds.callback = NULL;
2599 iou_ctrl.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2600 iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL;
2601 iou_ctrl.operation = MPI3_CTRL_OP_UPDATE_TIMESTAMP;
2602 current_time = ktime_get_real();
2603 iou_ctrl.param64[0] = cpu_to_le64(ktime_to_ms(current_time));
2604
2605 init_completion(&mrioc->init_cmds.done);
2606 retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl,
2607 sizeof(iou_ctrl), 0);
2608 if (retval) {
2609 ioc_err(mrioc, "Issue IOUCTL time_stamp: Admin Post failed\n");
2610 goto out_unlock;
2611 }
2612
2613 wait_for_completion_timeout(&mrioc->init_cmds.done,
2614 (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2615 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2616 ioc_err(mrioc, "Issue IOUCTL time_stamp: command timed out\n");
2617 mrioc->init_cmds.is_waiting = 0;
2618 if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET))
2619 mpi3mr_check_rh_fault_ioc(mrioc,
2620 MPI3MR_RESET_FROM_TSU_TIMEOUT);
2621 retval = -1;
2622 goto out_unlock;
2623 }
2624 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2625 != MPI3_IOCSTATUS_SUCCESS) {
2626 ioc_err(mrioc,
2627 "Issue IOUCTL time_stamp: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2628 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2629 mrioc->init_cmds.ioc_loginfo);
2630 retval = -1;
2631 goto out_unlock;
2632 }
2633
2634 out_unlock:
2635 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2636 mutex_unlock(&mrioc->init_cmds.mutex);
2637
2638 out:
2639 return retval;
2640 }
2641
2642 /**
2643 * mpi3mr_print_pkg_ver - display controller fw package version
2644 * @mrioc: Adapter reference
2645 *
2646 * Retrieve firmware package version from the component image
2647 * header of the controller flash and display it.
2648 *
2649 * Return: 0 on success and non-zero on failure.
2650 */
mpi3mr_print_pkg_ver(struct mpi3mr_ioc * mrioc)2651 static int mpi3mr_print_pkg_ver(struct mpi3mr_ioc *mrioc)
2652 {
2653 struct mpi3_ci_upload_request ci_upload;
2654 int retval = -1;
2655 void *data = NULL;
2656 dma_addr_t data_dma;
2657 struct mpi3_ci_manifest_mpi *manifest;
2658 u32 data_len = sizeof(struct mpi3_ci_manifest_mpi);
2659 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
2660
2661 data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
2662 GFP_KERNEL);
2663 if (!data)
2664 return -ENOMEM;
2665
2666 memset(&ci_upload, 0, sizeof(ci_upload));
2667 mutex_lock(&mrioc->init_cmds.mutex);
2668 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2669 ioc_err(mrioc, "sending get package version failed due to command in use\n");
2670 mutex_unlock(&mrioc->init_cmds.mutex);
2671 goto out;
2672 }
2673 mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2674 mrioc->init_cmds.is_waiting = 1;
2675 mrioc->init_cmds.callback = NULL;
2676 ci_upload.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2677 ci_upload.function = MPI3_FUNCTION_CI_UPLOAD;
2678 ci_upload.msg_flags = MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_PRIMARY;
2679 ci_upload.signature1 = cpu_to_le32(MPI3_IMAGE_HEADER_SIGNATURE1_MANIFEST);
2680 ci_upload.image_offset = cpu_to_le32(MPI3_IMAGE_HEADER_SIZE);
2681 ci_upload.segment_size = cpu_to_le32(data_len);
2682
2683 mpi3mr_add_sg_single(&ci_upload.sgl, sgl_flags, data_len,
2684 data_dma);
2685 init_completion(&mrioc->init_cmds.done);
2686 retval = mpi3mr_admin_request_post(mrioc, &ci_upload,
2687 sizeof(ci_upload), 1);
2688 if (retval) {
2689 ioc_err(mrioc, "posting get package version failed\n");
2690 goto out_unlock;
2691 }
2692 wait_for_completion_timeout(&mrioc->init_cmds.done,
2693 (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2694 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2695 ioc_err(mrioc, "get package version timed out\n");
2696 mpi3mr_check_rh_fault_ioc(mrioc,
2697 MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT);
2698 retval = -1;
2699 goto out_unlock;
2700 }
2701 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2702 == MPI3_IOCSTATUS_SUCCESS) {
2703 manifest = (struct mpi3_ci_manifest_mpi *) data;
2704 if (manifest->manifest_type == MPI3_CI_MANIFEST_TYPE_MPI) {
2705 ioc_info(mrioc,
2706 "firmware package version(%d.%d.%d.%d.%05d-%05d)\n",
2707 manifest->package_version.gen_major,
2708 manifest->package_version.gen_minor,
2709 manifest->package_version.phase_major,
2710 manifest->package_version.phase_minor,
2711 manifest->package_version.customer_id,
2712 manifest->package_version.build_num);
2713 }
2714 }
2715 retval = 0;
2716 out_unlock:
2717 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2718 mutex_unlock(&mrioc->init_cmds.mutex);
2719
2720 out:
2721 if (data)
2722 dma_free_coherent(&mrioc->pdev->dev, data_len, data,
2723 data_dma);
2724 return retval;
2725 }
2726
2727 /**
2728 * mpi3mr_watchdog_work - watchdog thread to monitor faults
2729 * @work: work struct
2730 *
2731 * Watch dog work periodically executed (1 second interval) to
2732 * monitor firmware fault and to issue periodic timer sync to
2733 * the firmware.
2734 *
2735 * Return: Nothing.
2736 */
mpi3mr_watchdog_work(struct work_struct * work)2737 static void mpi3mr_watchdog_work(struct work_struct *work)
2738 {
2739 struct mpi3mr_ioc *mrioc =
2740 container_of(work, struct mpi3mr_ioc, watchdog_work.work);
2741 unsigned long flags;
2742 enum mpi3mr_iocstate ioc_state;
2743 u32 host_diagnostic, ioc_status;
2744 union mpi3mr_trigger_data trigger_data;
2745 u16 reset_reason = MPI3MR_RESET_FROM_FAULT_WATCH;
2746
2747 if (mrioc->reset_in_progress || mrioc->pci_err_recovery)
2748 return;
2749
2750 if (!mrioc->unrecoverable && !pci_device_is_present(mrioc->pdev)) {
2751 ioc_err(mrioc, "watchdog could not detect the controller\n");
2752 mrioc->unrecoverable = 1;
2753 }
2754
2755 if (mrioc->unrecoverable) {
2756 ioc_err(mrioc,
2757 "flush pending commands for unrecoverable controller\n");
2758 mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
2759 return;
2760 }
2761
2762 if (atomic_read(&mrioc->admin_pend_isr)) {
2763 ioc_err(mrioc, "Unprocessed admin ISR instance found\n"
2764 "flush admin replies\n");
2765 mpi3mr_process_admin_reply_q(mrioc);
2766 }
2767
2768 if (!(mrioc->facts.ioc_capabilities &
2769 MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_IOC) &&
2770 (mrioc->ts_update_counter++ >= mrioc->ts_update_interval)) {
2771
2772 mrioc->ts_update_counter = 0;
2773 mpi3mr_sync_timestamp(mrioc);
2774 }
2775
2776 if ((mrioc->prepare_for_reset) &&
2777 ((mrioc->prepare_for_reset_timeout_counter++) >=
2778 MPI3MR_PREPARE_FOR_RESET_TIMEOUT)) {
2779 mpi3mr_soft_reset_handler(mrioc,
2780 MPI3MR_RESET_FROM_CIACTVRST_TIMER, 1);
2781 return;
2782 }
2783
2784 memset(&trigger_data, 0, sizeof(trigger_data));
2785 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
2786 if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) {
2787 mpi3mr_set_trigger_data_in_all_hdb(mrioc,
2788 MPI3MR_HDB_TRIGGER_TYPE_FW_RELEASED, NULL, 0);
2789 mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_FIRMWARE, 0);
2790 return;
2791 }
2792
2793 /*Check for fault state every one second and issue Soft reset*/
2794 ioc_state = mpi3mr_get_iocstate(mrioc);
2795 if (ioc_state != MRIOC_STATE_FAULT)
2796 goto schedule_work;
2797
2798 trigger_data.fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK;
2799 mpi3mr_set_trigger_data_in_all_hdb(mrioc,
2800 MPI3MR_HDB_TRIGGER_TYPE_FAULT, &trigger_data, 0);
2801 host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
2802 if (host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS) {
2803 if (!mrioc->diagsave_timeout) {
2804 mpi3mr_print_fault_info(mrioc);
2805 ioc_warn(mrioc, "diag save in progress\n");
2806 }
2807 if ((mrioc->diagsave_timeout++) <= MPI3_SYSIF_DIAG_SAVE_TIMEOUT)
2808 goto schedule_work;
2809 }
2810
2811 mpi3mr_print_fault_info(mrioc);
2812 mrioc->diagsave_timeout = 0;
2813
2814 if (!mpi3mr_is_fault_recoverable(mrioc)) {
2815 mrioc->unrecoverable = 1;
2816 goto schedule_work;
2817 }
2818
2819 switch (trigger_data.fault) {
2820 case MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED:
2821 case MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED:
2822 ioc_warn(mrioc,
2823 "controller requires system power cycle, marking controller as unrecoverable\n");
2824 mrioc->unrecoverable = 1;
2825 goto schedule_work;
2826 case MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS:
2827 goto schedule_work;
2828 case MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET:
2829 reset_reason = MPI3MR_RESET_FROM_CIACTIV_FAULT;
2830 break;
2831 default:
2832 break;
2833 }
2834 mpi3mr_soft_reset_handler(mrioc, reset_reason, 0);
2835 return;
2836
2837 schedule_work:
2838 spin_lock_irqsave(&mrioc->watchdog_lock, flags);
2839 if (mrioc->watchdog_work_q)
2840 queue_delayed_work(mrioc->watchdog_work_q,
2841 &mrioc->watchdog_work,
2842 msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
2843 spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
2844 return;
2845 }
2846
2847 /**
2848 * mpi3mr_start_watchdog - Start watchdog
2849 * @mrioc: Adapter instance reference
2850 *
2851 * Create and start the watchdog thread to monitor controller
2852 * faults.
2853 *
2854 * Return: Nothing.
2855 */
mpi3mr_start_watchdog(struct mpi3mr_ioc * mrioc)2856 void mpi3mr_start_watchdog(struct mpi3mr_ioc *mrioc)
2857 {
2858 if (mrioc->watchdog_work_q)
2859 return;
2860
2861 INIT_DELAYED_WORK(&mrioc->watchdog_work, mpi3mr_watchdog_work);
2862 snprintf(mrioc->watchdog_work_q_name,
2863 sizeof(mrioc->watchdog_work_q_name), "watchdog_%s%d", mrioc->name,
2864 mrioc->id);
2865 mrioc->watchdog_work_q = alloc_ordered_workqueue(
2866 "%s", WQ_MEM_RECLAIM, mrioc->watchdog_work_q_name);
2867 if (!mrioc->watchdog_work_q) {
2868 ioc_err(mrioc, "%s: failed (line=%d)\n", __func__, __LINE__);
2869 return;
2870 }
2871
2872 if (mrioc->watchdog_work_q)
2873 queue_delayed_work(mrioc->watchdog_work_q,
2874 &mrioc->watchdog_work,
2875 msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
2876 }
2877
2878 /**
2879 * mpi3mr_stop_watchdog - Stop watchdog
2880 * @mrioc: Adapter instance reference
2881 *
2882 * Stop the watchdog thread created to monitor controller
2883 * faults.
2884 *
2885 * Return: Nothing.
2886 */
mpi3mr_stop_watchdog(struct mpi3mr_ioc * mrioc)2887 void mpi3mr_stop_watchdog(struct mpi3mr_ioc *mrioc)
2888 {
2889 unsigned long flags;
2890 struct workqueue_struct *wq;
2891
2892 spin_lock_irqsave(&mrioc->watchdog_lock, flags);
2893 wq = mrioc->watchdog_work_q;
2894 mrioc->watchdog_work_q = NULL;
2895 spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
2896 if (wq) {
2897 if (!cancel_delayed_work_sync(&mrioc->watchdog_work))
2898 flush_workqueue(wq);
2899 destroy_workqueue(wq);
2900 }
2901 }
2902
2903 /**
2904 * mpi3mr_setup_admin_qpair - Setup admin queue pair
2905 * @mrioc: Adapter instance reference
2906 *
2907 * Allocate memory for admin queue pair if required and register
2908 * the admin queue with the controller.
2909 *
2910 * Return: 0 on success, non-zero on failures.
2911 */
mpi3mr_setup_admin_qpair(struct mpi3mr_ioc * mrioc)2912 static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc)
2913 {
2914 int retval = 0;
2915 u32 num_admin_entries = 0;
2916
2917 mrioc->admin_req_q_sz = MPI3MR_ADMIN_REQ_Q_SIZE;
2918 mrioc->num_admin_req = mrioc->admin_req_q_sz /
2919 MPI3MR_ADMIN_REQ_FRAME_SZ;
2920 mrioc->admin_req_ci = mrioc->admin_req_pi = 0;
2921
2922 mrioc->admin_reply_q_sz = MPI3MR_ADMIN_REPLY_Q_SIZE;
2923 mrioc->num_admin_replies = mrioc->admin_reply_q_sz /
2924 MPI3MR_ADMIN_REPLY_FRAME_SZ;
2925 mrioc->admin_reply_ci = 0;
2926 mrioc->admin_reply_ephase = 1;
2927 atomic_set(&mrioc->admin_reply_q_in_use, 0);
2928
2929 if (!mrioc->admin_req_base) {
2930 mrioc->admin_req_base = dma_alloc_coherent(&mrioc->pdev->dev,
2931 mrioc->admin_req_q_sz, &mrioc->admin_req_dma, GFP_KERNEL);
2932
2933 if (!mrioc->admin_req_base) {
2934 retval = -1;
2935 goto out_failed;
2936 }
2937
2938 mrioc->admin_reply_base = dma_alloc_coherent(&mrioc->pdev->dev,
2939 mrioc->admin_reply_q_sz, &mrioc->admin_reply_dma,
2940 GFP_KERNEL);
2941
2942 if (!mrioc->admin_reply_base) {
2943 retval = -1;
2944 goto out_failed;
2945 }
2946 }
2947
2948 num_admin_entries = (mrioc->num_admin_replies << 16) |
2949 (mrioc->num_admin_req);
2950 writel(num_admin_entries, &mrioc->sysif_regs->admin_queue_num_entries);
2951 mpi3mr_writeq(mrioc->admin_req_dma,
2952 &mrioc->sysif_regs->admin_request_queue_address);
2953 mpi3mr_writeq(mrioc->admin_reply_dma,
2954 &mrioc->sysif_regs->admin_reply_queue_address);
2955 writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi);
2956 writel(mrioc->admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci);
2957 return retval;
2958
2959 out_failed:
2960
2961 if (mrioc->admin_reply_base) {
2962 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz,
2963 mrioc->admin_reply_base, mrioc->admin_reply_dma);
2964 mrioc->admin_reply_base = NULL;
2965 }
2966 if (mrioc->admin_req_base) {
2967 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz,
2968 mrioc->admin_req_base, mrioc->admin_req_dma);
2969 mrioc->admin_req_base = NULL;
2970 }
2971 return retval;
2972 }
2973
2974 /**
2975 * mpi3mr_issue_iocfacts - Send IOC Facts
2976 * @mrioc: Adapter instance reference
2977 * @facts_data: Cached IOC facts data
2978 *
2979 * Issue IOC Facts MPI request through admin queue and wait for
2980 * the completion of it or time out.
2981 *
2982 * Return: 0 on success, non-zero on failures.
2983 */
mpi3mr_issue_iocfacts(struct mpi3mr_ioc * mrioc,struct mpi3_ioc_facts_data * facts_data)2984 static int mpi3mr_issue_iocfacts(struct mpi3mr_ioc *mrioc,
2985 struct mpi3_ioc_facts_data *facts_data)
2986 {
2987 struct mpi3_ioc_facts_request iocfacts_req;
2988 void *data = NULL;
2989 dma_addr_t data_dma;
2990 u32 data_len = sizeof(*facts_data);
2991 int retval = 0;
2992 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
2993
2994 data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
2995 GFP_KERNEL);
2996
2997 if (!data) {
2998 retval = -1;
2999 goto out;
3000 }
3001
3002 memset(&iocfacts_req, 0, sizeof(iocfacts_req));
3003 mutex_lock(&mrioc->init_cmds.mutex);
3004 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3005 retval = -1;
3006 ioc_err(mrioc, "Issue IOCFacts: Init command is in use\n");
3007 mutex_unlock(&mrioc->init_cmds.mutex);
3008 goto out;
3009 }
3010 mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3011 mrioc->init_cmds.is_waiting = 1;
3012 mrioc->init_cmds.callback = NULL;
3013 iocfacts_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3014 iocfacts_req.function = MPI3_FUNCTION_IOC_FACTS;
3015
3016 mpi3mr_add_sg_single(&iocfacts_req.sgl, sgl_flags, data_len,
3017 data_dma);
3018
3019 init_completion(&mrioc->init_cmds.done);
3020 retval = mpi3mr_admin_request_post(mrioc, &iocfacts_req,
3021 sizeof(iocfacts_req), 1);
3022 if (retval) {
3023 ioc_err(mrioc, "Issue IOCFacts: Admin Post failed\n");
3024 goto out_unlock;
3025 }
3026 wait_for_completion_timeout(&mrioc->init_cmds.done,
3027 (MPI3MR_INTADMCMD_TIMEOUT * HZ));
3028 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3029 ioc_err(mrioc, "ioc_facts timed out\n");
3030 mpi3mr_check_rh_fault_ioc(mrioc,
3031 MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT);
3032 retval = -1;
3033 goto out_unlock;
3034 }
3035 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
3036 != MPI3_IOCSTATUS_SUCCESS) {
3037 ioc_err(mrioc,
3038 "Issue IOCFacts: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
3039 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3040 mrioc->init_cmds.ioc_loginfo);
3041 retval = -1;
3042 goto out_unlock;
3043 }
3044 memcpy(facts_data, (u8 *)data, data_len);
3045 mpi3mr_process_factsdata(mrioc, facts_data);
3046 out_unlock:
3047 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3048 mutex_unlock(&mrioc->init_cmds.mutex);
3049
3050 out:
3051 if (data)
3052 dma_free_coherent(&mrioc->pdev->dev, data_len, data, data_dma);
3053
3054 return retval;
3055 }
3056
3057 /**
3058 * mpi3mr_check_reset_dma_mask - Process IOC facts data
3059 * @mrioc: Adapter instance reference
3060 *
3061 * Check whether the new DMA mask requested through IOCFacts by
3062 * firmware needs to be set, if so set it .
3063 *
3064 * Return: 0 on success, non-zero on failure.
3065 */
mpi3mr_check_reset_dma_mask(struct mpi3mr_ioc * mrioc)3066 static inline int mpi3mr_check_reset_dma_mask(struct mpi3mr_ioc *mrioc)
3067 {
3068 struct pci_dev *pdev = mrioc->pdev;
3069 int r;
3070 u64 facts_dma_mask = DMA_BIT_MASK(mrioc->facts.dma_mask);
3071
3072 if (!mrioc->facts.dma_mask || (mrioc->dma_mask <= facts_dma_mask))
3073 return 0;
3074
3075 ioc_info(mrioc, "Changing DMA mask from 0x%016llx to 0x%016llx\n",
3076 mrioc->dma_mask, facts_dma_mask);
3077
3078 r = dma_set_mask_and_coherent(&pdev->dev, facts_dma_mask);
3079 if (r) {
3080 ioc_err(mrioc, "Setting DMA mask to 0x%016llx failed: %d\n",
3081 facts_dma_mask, r);
3082 return r;
3083 }
3084 mrioc->dma_mask = facts_dma_mask;
3085 return r;
3086 }
3087
3088 /**
3089 * mpi3mr_process_factsdata - Process IOC facts data
3090 * @mrioc: Adapter instance reference
3091 * @facts_data: Cached IOC facts data
3092 *
3093 * Convert IOC facts data into cpu endianness and cache it in
3094 * the driver .
3095 *
3096 * Return: Nothing.
3097 */
mpi3mr_process_factsdata(struct mpi3mr_ioc * mrioc,struct mpi3_ioc_facts_data * facts_data)3098 static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
3099 struct mpi3_ioc_facts_data *facts_data)
3100 {
3101 u32 ioc_config, req_sz, facts_flags;
3102
3103 if ((le16_to_cpu(facts_data->ioc_facts_data_length)) !=
3104 (sizeof(*facts_data) / 4)) {
3105 ioc_warn(mrioc,
3106 "IOCFactsdata length mismatch driver_sz(%zu) firmware_sz(%d)\n",
3107 sizeof(*facts_data),
3108 le16_to_cpu(facts_data->ioc_facts_data_length) * 4);
3109 }
3110
3111 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
3112 req_sz = 1 << ((ioc_config & MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ) >>
3113 MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ_SHIFT);
3114 if (le16_to_cpu(facts_data->ioc_request_frame_size) != (req_sz / 4)) {
3115 ioc_err(mrioc,
3116 "IOCFacts data reqFrameSize mismatch hw_size(%d) firmware_sz(%d)\n",
3117 req_sz / 4, le16_to_cpu(facts_data->ioc_request_frame_size));
3118 }
3119
3120 memset(&mrioc->facts, 0, sizeof(mrioc->facts));
3121
3122 facts_flags = le32_to_cpu(facts_data->flags);
3123 mrioc->facts.op_req_sz = req_sz;
3124 mrioc->op_reply_desc_sz = 1 << ((ioc_config &
3125 MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ) >>
3126 MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT);
3127
3128 mrioc->facts.ioc_num = facts_data->ioc_number;
3129 mrioc->facts.who_init = facts_data->who_init;
3130 mrioc->facts.max_msix_vectors = le16_to_cpu(facts_data->max_msix_vectors);
3131 mrioc->facts.personality = (facts_flags &
3132 MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK);
3133 mrioc->facts.dma_mask = (facts_flags &
3134 MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >>
3135 MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
3136 mrioc->facts.dma_mask = (facts_flags &
3137 MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >>
3138 MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
3139 mrioc->facts.protocol_flags = facts_data->protocol_flags;
3140 mrioc->facts.mpi_version = le32_to_cpu(facts_data->mpi_version.word);
3141 mrioc->facts.max_reqs = le16_to_cpu(facts_data->max_outstanding_requests);
3142 mrioc->facts.product_id = le16_to_cpu(facts_data->product_id);
3143 mrioc->facts.reply_sz = le16_to_cpu(facts_data->reply_frame_size) * 4;
3144 mrioc->facts.exceptions = le16_to_cpu(facts_data->ioc_exceptions);
3145 mrioc->facts.max_perids = le16_to_cpu(facts_data->max_persistent_id);
3146 mrioc->facts.max_vds = le16_to_cpu(facts_data->max_vds);
3147 mrioc->facts.max_hpds = le16_to_cpu(facts_data->max_host_pds);
3148 mrioc->facts.max_advhpds = le16_to_cpu(facts_data->max_adv_host_pds);
3149 mrioc->facts.max_raid_pds = le16_to_cpu(facts_data->max_raid_pds);
3150 mrioc->facts.max_nvme = le16_to_cpu(facts_data->max_nvme);
3151 mrioc->facts.max_pcie_switches =
3152 le16_to_cpu(facts_data->max_pcie_switches);
3153 mrioc->facts.max_sasexpanders =
3154 le16_to_cpu(facts_data->max_sas_expanders);
3155 mrioc->facts.max_data_length = le16_to_cpu(facts_data->max_data_length);
3156 mrioc->facts.max_sasinitiators =
3157 le16_to_cpu(facts_data->max_sas_initiators);
3158 mrioc->facts.max_enclosures = le16_to_cpu(facts_data->max_enclosures);
3159 mrioc->facts.min_devhandle = le16_to_cpu(facts_data->min_dev_handle);
3160 mrioc->facts.max_devhandle = le16_to_cpu(facts_data->max_dev_handle);
3161 mrioc->facts.max_op_req_q =
3162 le16_to_cpu(facts_data->max_operational_request_queues);
3163 mrioc->facts.max_op_reply_q =
3164 le16_to_cpu(facts_data->max_operational_reply_queues);
3165 mrioc->facts.ioc_capabilities =
3166 le32_to_cpu(facts_data->ioc_capabilities);
3167 mrioc->facts.fw_ver.build_num =
3168 le16_to_cpu(facts_data->fw_version.build_num);
3169 mrioc->facts.fw_ver.cust_id =
3170 le16_to_cpu(facts_data->fw_version.customer_id);
3171 mrioc->facts.fw_ver.ph_minor = facts_data->fw_version.phase_minor;
3172 mrioc->facts.fw_ver.ph_major = facts_data->fw_version.phase_major;
3173 mrioc->facts.fw_ver.gen_minor = facts_data->fw_version.gen_minor;
3174 mrioc->facts.fw_ver.gen_major = facts_data->fw_version.gen_major;
3175 mrioc->msix_count = min_t(int, mrioc->msix_count,
3176 mrioc->facts.max_msix_vectors);
3177 mrioc->facts.sge_mod_mask = facts_data->sge_modifier_mask;
3178 mrioc->facts.sge_mod_value = facts_data->sge_modifier_value;
3179 mrioc->facts.sge_mod_shift = facts_data->sge_modifier_shift;
3180 mrioc->facts.shutdown_timeout =
3181 le16_to_cpu(facts_data->shutdown_timeout);
3182 mrioc->facts.diag_trace_sz =
3183 le32_to_cpu(facts_data->diag_trace_size);
3184 mrioc->facts.diag_fw_sz =
3185 le32_to_cpu(facts_data->diag_fw_size);
3186 mrioc->facts.diag_drvr_sz = le32_to_cpu(facts_data->diag_driver_size);
3187 mrioc->facts.max_dev_per_tg =
3188 facts_data->max_devices_per_throttle_group;
3189 mrioc->facts.io_throttle_data_length =
3190 le16_to_cpu(facts_data->io_throttle_data_length);
3191 mrioc->facts.max_io_throttle_group =
3192 le16_to_cpu(facts_data->max_io_throttle_group);
3193 mrioc->facts.io_throttle_low = le16_to_cpu(facts_data->io_throttle_low);
3194 mrioc->facts.io_throttle_high =
3195 le16_to_cpu(facts_data->io_throttle_high);
3196
3197 if (mrioc->facts.max_data_length ==
3198 MPI3_IOCFACTS_MAX_DATA_LENGTH_NOT_REPORTED)
3199 mrioc->facts.max_data_length = MPI3MR_DEFAULT_MAX_IO_SIZE;
3200 else
3201 mrioc->facts.max_data_length *= MPI3MR_PAGE_SIZE_4K;
3202 /* Store in 512b block count */
3203 if (mrioc->facts.io_throttle_data_length)
3204 mrioc->io_throttle_data_length =
3205 (mrioc->facts.io_throttle_data_length * 2 * 4);
3206 else
3207 /* set the length to 1MB + 1K to disable throttle */
3208 mrioc->io_throttle_data_length = (mrioc->facts.max_data_length / 512) + 2;
3209
3210 mrioc->io_throttle_high = (mrioc->facts.io_throttle_high * 2 * 1024);
3211 mrioc->io_throttle_low = (mrioc->facts.io_throttle_low * 2 * 1024);
3212
3213 ioc_info(mrioc, "ioc_num(%d), maxopQ(%d), maxopRepQ(%d), maxdh(%d),",
3214 mrioc->facts.ioc_num, mrioc->facts.max_op_req_q,
3215 mrioc->facts.max_op_reply_q, mrioc->facts.max_devhandle);
3216 ioc_info(mrioc,
3217 "maxreqs(%d), mindh(%d) maxvectors(%d) maxperids(%d)\n",
3218 mrioc->facts.max_reqs, mrioc->facts.min_devhandle,
3219 mrioc->facts.max_msix_vectors, mrioc->facts.max_perids);
3220 ioc_info(mrioc, "SGEModMask 0x%x SGEModVal 0x%x SGEModShift 0x%x ",
3221 mrioc->facts.sge_mod_mask, mrioc->facts.sge_mod_value,
3222 mrioc->facts.sge_mod_shift);
3223 ioc_info(mrioc, "DMA mask %d InitialPE status 0x%x max_data_len (%d)\n",
3224 mrioc->facts.dma_mask, (facts_flags &
3225 MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK), mrioc->facts.max_data_length);
3226 ioc_info(mrioc,
3227 "max_dev_per_throttle_group(%d), max_throttle_groups(%d)\n",
3228 mrioc->facts.max_dev_per_tg, mrioc->facts.max_io_throttle_group);
3229 ioc_info(mrioc,
3230 "io_throttle_data_len(%dKiB), io_throttle_high(%dMiB), io_throttle_low(%dMiB)\n",
3231 mrioc->facts.io_throttle_data_length * 4,
3232 mrioc->facts.io_throttle_high, mrioc->facts.io_throttle_low);
3233 }
3234
3235 /**
3236 * mpi3mr_alloc_reply_sense_bufs - Send IOC Init
3237 * @mrioc: Adapter instance reference
3238 *
3239 * Allocate and initialize the reply free buffers, sense
3240 * buffers, reply free queue and sense buffer queue.
3241 *
3242 * Return: 0 on success, non-zero on failures.
3243 */
mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc * mrioc)3244 static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc)
3245 {
3246 int retval = 0;
3247 u32 sz, i;
3248
3249 if (mrioc->init_cmds.reply)
3250 return retval;
3251
3252 mrioc->init_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3253 if (!mrioc->init_cmds.reply)
3254 goto out_failed;
3255
3256 mrioc->bsg_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3257 if (!mrioc->bsg_cmds.reply)
3258 goto out_failed;
3259
3260 mrioc->transport_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3261 if (!mrioc->transport_cmds.reply)
3262 goto out_failed;
3263
3264 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
3265 mrioc->dev_rmhs_cmds[i].reply = kzalloc(mrioc->reply_sz,
3266 GFP_KERNEL);
3267 if (!mrioc->dev_rmhs_cmds[i].reply)
3268 goto out_failed;
3269 }
3270
3271 for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
3272 mrioc->evtack_cmds[i].reply = kzalloc(mrioc->reply_sz,
3273 GFP_KERNEL);
3274 if (!mrioc->evtack_cmds[i].reply)
3275 goto out_failed;
3276 }
3277
3278 mrioc->host_tm_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3279 if (!mrioc->host_tm_cmds.reply)
3280 goto out_failed;
3281
3282 mrioc->pel_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3283 if (!mrioc->pel_cmds.reply)
3284 goto out_failed;
3285
3286 mrioc->pel_abort_cmd.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3287 if (!mrioc->pel_abort_cmd.reply)
3288 goto out_failed;
3289
3290 mrioc->dev_handle_bitmap_bits = mrioc->facts.max_devhandle;
3291 mrioc->removepend_bitmap = bitmap_zalloc(mrioc->dev_handle_bitmap_bits,
3292 GFP_KERNEL);
3293 if (!mrioc->removepend_bitmap)
3294 goto out_failed;
3295
3296 mrioc->devrem_bitmap = bitmap_zalloc(MPI3MR_NUM_DEVRMCMD, GFP_KERNEL);
3297 if (!mrioc->devrem_bitmap)
3298 goto out_failed;
3299
3300 mrioc->evtack_cmds_bitmap = bitmap_zalloc(MPI3MR_NUM_EVTACKCMD,
3301 GFP_KERNEL);
3302 if (!mrioc->evtack_cmds_bitmap)
3303 goto out_failed;
3304
3305 mrioc->num_reply_bufs = mrioc->facts.max_reqs + MPI3MR_NUM_EVT_REPLIES;
3306 mrioc->reply_free_qsz = mrioc->num_reply_bufs + 1;
3307 mrioc->num_sense_bufs = mrioc->facts.max_reqs / MPI3MR_SENSEBUF_FACTOR;
3308 mrioc->sense_buf_q_sz = mrioc->num_sense_bufs + 1;
3309
3310 /* reply buffer pool, 16 byte align */
3311 sz = mrioc->num_reply_bufs * mrioc->reply_sz;
3312 mrioc->reply_buf_pool = dma_pool_create("reply_buf pool",
3313 &mrioc->pdev->dev, sz, 16, 0);
3314 if (!mrioc->reply_buf_pool) {
3315 ioc_err(mrioc, "reply buf pool: dma_pool_create failed\n");
3316 goto out_failed;
3317 }
3318
3319 mrioc->reply_buf = dma_pool_zalloc(mrioc->reply_buf_pool, GFP_KERNEL,
3320 &mrioc->reply_buf_dma);
3321 if (!mrioc->reply_buf)
3322 goto out_failed;
3323
3324 mrioc->reply_buf_dma_max_address = mrioc->reply_buf_dma + sz;
3325
3326 /* reply free queue, 8 byte align */
3327 sz = mrioc->reply_free_qsz * 8;
3328 mrioc->reply_free_q_pool = dma_pool_create("reply_free_q pool",
3329 &mrioc->pdev->dev, sz, 8, 0);
3330 if (!mrioc->reply_free_q_pool) {
3331 ioc_err(mrioc, "reply_free_q pool: dma_pool_create failed\n");
3332 goto out_failed;
3333 }
3334 mrioc->reply_free_q = dma_pool_zalloc(mrioc->reply_free_q_pool,
3335 GFP_KERNEL, &mrioc->reply_free_q_dma);
3336 if (!mrioc->reply_free_q)
3337 goto out_failed;
3338
3339 /* sense buffer pool, 4 byte align */
3340 sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ;
3341 mrioc->sense_buf_pool = dma_pool_create("sense_buf pool",
3342 &mrioc->pdev->dev, sz, 4, 0);
3343 if (!mrioc->sense_buf_pool) {
3344 ioc_err(mrioc, "sense_buf pool: dma_pool_create failed\n");
3345 goto out_failed;
3346 }
3347 mrioc->sense_buf = dma_pool_zalloc(mrioc->sense_buf_pool, GFP_KERNEL,
3348 &mrioc->sense_buf_dma);
3349 if (!mrioc->sense_buf)
3350 goto out_failed;
3351
3352 /* sense buffer queue, 8 byte align */
3353 sz = mrioc->sense_buf_q_sz * 8;
3354 mrioc->sense_buf_q_pool = dma_pool_create("sense_buf_q pool",
3355 &mrioc->pdev->dev, sz, 8, 0);
3356 if (!mrioc->sense_buf_q_pool) {
3357 ioc_err(mrioc, "sense_buf_q pool: dma_pool_create failed\n");
3358 goto out_failed;
3359 }
3360 mrioc->sense_buf_q = dma_pool_zalloc(mrioc->sense_buf_q_pool,
3361 GFP_KERNEL, &mrioc->sense_buf_q_dma);
3362 if (!mrioc->sense_buf_q)
3363 goto out_failed;
3364
3365 return retval;
3366
3367 out_failed:
3368 retval = -1;
3369 return retval;
3370 }
3371
3372 /**
3373 * mpimr_initialize_reply_sbuf_queues - initialize reply sense
3374 * buffers
3375 * @mrioc: Adapter instance reference
3376 *
3377 * Helper function to initialize reply and sense buffers along
3378 * with some debug prints.
3379 *
3380 * Return: None.
3381 */
mpimr_initialize_reply_sbuf_queues(struct mpi3mr_ioc * mrioc)3382 static void mpimr_initialize_reply_sbuf_queues(struct mpi3mr_ioc *mrioc)
3383 {
3384 u32 sz, i;
3385 dma_addr_t phy_addr;
3386
3387 sz = mrioc->num_reply_bufs * mrioc->reply_sz;
3388 ioc_info(mrioc,
3389 "reply buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n",
3390 mrioc->reply_buf, mrioc->num_reply_bufs, mrioc->reply_sz,
3391 (sz / 1024), (unsigned long long)mrioc->reply_buf_dma);
3392 sz = mrioc->reply_free_qsz * 8;
3393 ioc_info(mrioc,
3394 "reply_free_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n",
3395 mrioc->reply_free_q, mrioc->reply_free_qsz, 8, (sz / 1024),
3396 (unsigned long long)mrioc->reply_free_q_dma);
3397 sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ;
3398 ioc_info(mrioc,
3399 "sense_buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n",
3400 mrioc->sense_buf, mrioc->num_sense_bufs, MPI3MR_SENSE_BUF_SZ,
3401 (sz / 1024), (unsigned long long)mrioc->sense_buf_dma);
3402 sz = mrioc->sense_buf_q_sz * 8;
3403 ioc_info(mrioc,
3404 "sense_buf_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n",
3405 mrioc->sense_buf_q, mrioc->sense_buf_q_sz, 8, (sz / 1024),
3406 (unsigned long long)mrioc->sense_buf_q_dma);
3407
3408 /* initialize Reply buffer Queue */
3409 for (i = 0, phy_addr = mrioc->reply_buf_dma;
3410 i < mrioc->num_reply_bufs; i++, phy_addr += mrioc->reply_sz)
3411 mrioc->reply_free_q[i] = cpu_to_le64(phy_addr);
3412 mrioc->reply_free_q[i] = cpu_to_le64(0);
3413
3414 /* initialize Sense Buffer Queue */
3415 for (i = 0, phy_addr = mrioc->sense_buf_dma;
3416 i < mrioc->num_sense_bufs; i++, phy_addr += MPI3MR_SENSE_BUF_SZ)
3417 mrioc->sense_buf_q[i] = cpu_to_le64(phy_addr);
3418 mrioc->sense_buf_q[i] = cpu_to_le64(0);
3419 }
3420
3421 /**
3422 * mpi3mr_issue_iocinit - Send IOC Init
3423 * @mrioc: Adapter instance reference
3424 *
3425 * Issue IOC Init MPI request through admin queue and wait for
3426 * the completion of it or time out.
3427 *
3428 * Return: 0 on success, non-zero on failures.
3429 */
mpi3mr_issue_iocinit(struct mpi3mr_ioc * mrioc)3430 static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc)
3431 {
3432 struct mpi3_ioc_init_request iocinit_req;
3433 struct mpi3_driver_info_layout *drv_info;
3434 dma_addr_t data_dma;
3435 u32 data_len = sizeof(*drv_info);
3436 int retval = 0;
3437 ktime_t current_time;
3438
3439 drv_info = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
3440 GFP_KERNEL);
3441 if (!drv_info) {
3442 retval = -1;
3443 goto out;
3444 }
3445 mpimr_initialize_reply_sbuf_queues(mrioc);
3446
3447 drv_info->information_length = cpu_to_le32(data_len);
3448 strscpy(drv_info->driver_signature, "Broadcom", sizeof(drv_info->driver_signature));
3449 strscpy(drv_info->os_name, utsname()->sysname, sizeof(drv_info->os_name));
3450 strscpy(drv_info->os_version, utsname()->release, sizeof(drv_info->os_version));
3451 strscpy(drv_info->driver_name, MPI3MR_DRIVER_NAME, sizeof(drv_info->driver_name));
3452 strscpy(drv_info->driver_version, MPI3MR_DRIVER_VERSION, sizeof(drv_info->driver_version));
3453 strscpy(drv_info->driver_release_date, MPI3MR_DRIVER_RELDATE,
3454 sizeof(drv_info->driver_release_date));
3455 drv_info->driver_capabilities = 0;
3456 memcpy((u8 *)&mrioc->driver_info, (u8 *)drv_info,
3457 sizeof(mrioc->driver_info));
3458
3459 memset(&iocinit_req, 0, sizeof(iocinit_req));
3460 mutex_lock(&mrioc->init_cmds.mutex);
3461 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3462 retval = -1;
3463 ioc_err(mrioc, "Issue IOCInit: Init command is in use\n");
3464 mutex_unlock(&mrioc->init_cmds.mutex);
3465 goto out;
3466 }
3467 mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3468 mrioc->init_cmds.is_waiting = 1;
3469 mrioc->init_cmds.callback = NULL;
3470 iocinit_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3471 iocinit_req.function = MPI3_FUNCTION_IOC_INIT;
3472 iocinit_req.mpi_version.mpi3_version.dev = MPI3_VERSION_DEV;
3473 iocinit_req.mpi_version.mpi3_version.unit = MPI3_VERSION_UNIT;
3474 iocinit_req.mpi_version.mpi3_version.major = MPI3_VERSION_MAJOR;
3475 iocinit_req.mpi_version.mpi3_version.minor = MPI3_VERSION_MINOR;
3476 iocinit_req.who_init = MPI3_WHOINIT_HOST_DRIVER;
3477 iocinit_req.reply_free_queue_depth = cpu_to_le16(mrioc->reply_free_qsz);
3478 iocinit_req.reply_free_queue_address =
3479 cpu_to_le64(mrioc->reply_free_q_dma);
3480 iocinit_req.sense_buffer_length = cpu_to_le16(MPI3MR_SENSE_BUF_SZ);
3481 iocinit_req.sense_buffer_free_queue_depth =
3482 cpu_to_le16(mrioc->sense_buf_q_sz);
3483 iocinit_req.sense_buffer_free_queue_address =
3484 cpu_to_le64(mrioc->sense_buf_q_dma);
3485 iocinit_req.driver_information_address = cpu_to_le64(data_dma);
3486
3487 current_time = ktime_get_real();
3488 iocinit_req.time_stamp = cpu_to_le64(ktime_to_ms(current_time));
3489
3490 iocinit_req.msg_flags |=
3491 MPI3_IOCINIT_MSGFLAGS_SCSIIOSTATUSREPLY_SUPPORTED;
3492 iocinit_req.msg_flags |=
3493 MPI3_IOCINIT_MSGFLAGS_WRITESAMEDIVERT_SUPPORTED;
3494
3495 init_completion(&mrioc->init_cmds.done);
3496 retval = mpi3mr_admin_request_post(mrioc, &iocinit_req,
3497 sizeof(iocinit_req), 1);
3498 if (retval) {
3499 ioc_err(mrioc, "Issue IOCInit: Admin Post failed\n");
3500 goto out_unlock;
3501 }
3502 wait_for_completion_timeout(&mrioc->init_cmds.done,
3503 (MPI3MR_INTADMCMD_TIMEOUT * HZ));
3504 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3505 mpi3mr_check_rh_fault_ioc(mrioc,
3506 MPI3MR_RESET_FROM_IOCINIT_TIMEOUT);
3507 ioc_err(mrioc, "ioc_init timed out\n");
3508 retval = -1;
3509 goto out_unlock;
3510 }
3511 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
3512 != MPI3_IOCSTATUS_SUCCESS) {
3513 ioc_err(mrioc,
3514 "Issue IOCInit: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
3515 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3516 mrioc->init_cmds.ioc_loginfo);
3517 retval = -1;
3518 goto out_unlock;
3519 }
3520
3521 mrioc->reply_free_queue_host_index = mrioc->num_reply_bufs;
3522 writel(mrioc->reply_free_queue_host_index,
3523 &mrioc->sysif_regs->reply_free_host_index);
3524
3525 mrioc->sbq_host_index = mrioc->num_sense_bufs;
3526 writel(mrioc->sbq_host_index,
3527 &mrioc->sysif_regs->sense_buffer_free_host_index);
3528 out_unlock:
3529 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3530 mutex_unlock(&mrioc->init_cmds.mutex);
3531
3532 out:
3533 if (drv_info)
3534 dma_free_coherent(&mrioc->pdev->dev, data_len, drv_info,
3535 data_dma);
3536
3537 return retval;
3538 }
3539
3540 /**
3541 * mpi3mr_unmask_events - Unmask events in event mask bitmap
3542 * @mrioc: Adapter instance reference
3543 * @event: MPI event ID
3544 *
3545 * Un mask the specific event by resetting the event_mask
3546 * bitmap.
3547 *
3548 * Return: 0 on success, non-zero on failures.
3549 */
mpi3mr_unmask_events(struct mpi3mr_ioc * mrioc,u16 event)3550 static void mpi3mr_unmask_events(struct mpi3mr_ioc *mrioc, u16 event)
3551 {
3552 u32 desired_event;
3553 u8 word;
3554
3555 if (event >= 128)
3556 return;
3557
3558 desired_event = (1 << (event % 32));
3559 word = event / 32;
3560
3561 mrioc->event_masks[word] &= ~desired_event;
3562 }
3563
3564 /**
3565 * mpi3mr_issue_event_notification - Send event notification
3566 * @mrioc: Adapter instance reference
3567 *
3568 * Issue event notification MPI request through admin queue and
3569 * wait for the completion of it or time out.
3570 *
3571 * Return: 0 on success, non-zero on failures.
3572 */
mpi3mr_issue_event_notification(struct mpi3mr_ioc * mrioc)3573 static int mpi3mr_issue_event_notification(struct mpi3mr_ioc *mrioc)
3574 {
3575 struct mpi3_event_notification_request evtnotify_req;
3576 int retval = 0;
3577 u8 i;
3578
3579 memset(&evtnotify_req, 0, sizeof(evtnotify_req));
3580 mutex_lock(&mrioc->init_cmds.mutex);
3581 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3582 retval = -1;
3583 ioc_err(mrioc, "Issue EvtNotify: Init command is in use\n");
3584 mutex_unlock(&mrioc->init_cmds.mutex);
3585 goto out;
3586 }
3587 mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3588 mrioc->init_cmds.is_waiting = 1;
3589 mrioc->init_cmds.callback = NULL;
3590 evtnotify_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3591 evtnotify_req.function = MPI3_FUNCTION_EVENT_NOTIFICATION;
3592 for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
3593 evtnotify_req.event_masks[i] =
3594 cpu_to_le32(mrioc->event_masks[i]);
3595 init_completion(&mrioc->init_cmds.done);
3596 retval = mpi3mr_admin_request_post(mrioc, &evtnotify_req,
3597 sizeof(evtnotify_req), 1);
3598 if (retval) {
3599 ioc_err(mrioc, "Issue EvtNotify: Admin Post failed\n");
3600 goto out_unlock;
3601 }
3602 wait_for_completion_timeout(&mrioc->init_cmds.done,
3603 (MPI3MR_INTADMCMD_TIMEOUT * HZ));
3604 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3605 ioc_err(mrioc, "event notification timed out\n");
3606 mpi3mr_check_rh_fault_ioc(mrioc,
3607 MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT);
3608 retval = -1;
3609 goto out_unlock;
3610 }
3611 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
3612 != MPI3_IOCSTATUS_SUCCESS) {
3613 ioc_err(mrioc,
3614 "Issue EvtNotify: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
3615 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3616 mrioc->init_cmds.ioc_loginfo);
3617 retval = -1;
3618 goto out_unlock;
3619 }
3620
3621 out_unlock:
3622 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3623 mutex_unlock(&mrioc->init_cmds.mutex);
3624 out:
3625 return retval;
3626 }
3627
3628 /**
3629 * mpi3mr_process_event_ack - Process event acknowledgment
3630 * @mrioc: Adapter instance reference
3631 * @event: MPI3 event ID
3632 * @event_ctx: event context
3633 *
3634 * Send event acknowledgment through admin queue and wait for
3635 * it to complete.
3636 *
3637 * Return: 0 on success, non-zero on failures.
3638 */
mpi3mr_process_event_ack(struct mpi3mr_ioc * mrioc,u8 event,u32 event_ctx)3639 int mpi3mr_process_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
3640 u32 event_ctx)
3641 {
3642 struct mpi3_event_ack_request evtack_req;
3643 int retval = 0;
3644
3645 memset(&evtack_req, 0, sizeof(evtack_req));
3646 mutex_lock(&mrioc->init_cmds.mutex);
3647 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3648 retval = -1;
3649 ioc_err(mrioc, "Send EvtAck: Init command is in use\n");
3650 mutex_unlock(&mrioc->init_cmds.mutex);
3651 goto out;
3652 }
3653 mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3654 mrioc->init_cmds.is_waiting = 1;
3655 mrioc->init_cmds.callback = NULL;
3656 evtack_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3657 evtack_req.function = MPI3_FUNCTION_EVENT_ACK;
3658 evtack_req.event = event;
3659 evtack_req.event_context = cpu_to_le32(event_ctx);
3660
3661 init_completion(&mrioc->init_cmds.done);
3662 retval = mpi3mr_admin_request_post(mrioc, &evtack_req,
3663 sizeof(evtack_req), 1);
3664 if (retval) {
3665 ioc_err(mrioc, "Send EvtAck: Admin Post failed\n");
3666 goto out_unlock;
3667 }
3668 wait_for_completion_timeout(&mrioc->init_cmds.done,
3669 (MPI3MR_INTADMCMD_TIMEOUT * HZ));
3670 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3671 ioc_err(mrioc, "Issue EvtNotify: command timed out\n");
3672 if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET))
3673 mpi3mr_check_rh_fault_ioc(mrioc,
3674 MPI3MR_RESET_FROM_EVTACK_TIMEOUT);
3675 retval = -1;
3676 goto out_unlock;
3677 }
3678 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
3679 != MPI3_IOCSTATUS_SUCCESS) {
3680 ioc_err(mrioc,
3681 "Send EvtAck: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
3682 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3683 mrioc->init_cmds.ioc_loginfo);
3684 retval = -1;
3685 goto out_unlock;
3686 }
3687
3688 out_unlock:
3689 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3690 mutex_unlock(&mrioc->init_cmds.mutex);
3691 out:
3692 return retval;
3693 }
3694
3695 /**
3696 * mpi3mr_alloc_chain_bufs - Allocate chain buffers
3697 * @mrioc: Adapter instance reference
3698 *
3699 * Allocate chain buffers and set a bitmap to indicate free
3700 * chain buffers. Chain buffers are used to pass the SGE
3701 * information along with MPI3 SCSI IO requests for host I/O.
3702 *
3703 * Return: 0 on success, non-zero on failure
3704 */
mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc * mrioc)3705 static int mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc *mrioc)
3706 {
3707 int retval = 0;
3708 u32 sz, i;
3709 u16 num_chains;
3710
3711 if (mrioc->chain_sgl_list)
3712 return retval;
3713
3714 num_chains = mrioc->max_host_ios / MPI3MR_CHAINBUF_FACTOR;
3715
3716 if (prot_mask & (SHOST_DIX_TYPE0_PROTECTION
3717 | SHOST_DIX_TYPE1_PROTECTION
3718 | SHOST_DIX_TYPE2_PROTECTION
3719 | SHOST_DIX_TYPE3_PROTECTION))
3720 num_chains += (num_chains / MPI3MR_CHAINBUFDIX_FACTOR);
3721
3722 mrioc->chain_buf_count = num_chains;
3723 sz = sizeof(struct chain_element) * num_chains;
3724 mrioc->chain_sgl_list = kzalloc(sz, GFP_KERNEL);
3725 if (!mrioc->chain_sgl_list)
3726 goto out_failed;
3727
3728 if (mrioc->max_sgl_entries > (mrioc->facts.max_data_length /
3729 MPI3MR_PAGE_SIZE_4K))
3730 mrioc->max_sgl_entries = mrioc->facts.max_data_length /
3731 MPI3MR_PAGE_SIZE_4K;
3732 sz = mrioc->max_sgl_entries * sizeof(struct mpi3_sge_common);
3733 ioc_info(mrioc, "number of sgl entries=%d chain buffer size=%dKB\n",
3734 mrioc->max_sgl_entries, sz/1024);
3735
3736 mrioc->chain_buf_pool = dma_pool_create("chain_buf pool",
3737 &mrioc->pdev->dev, sz, 16, 0);
3738 if (!mrioc->chain_buf_pool) {
3739 ioc_err(mrioc, "chain buf pool: dma_pool_create failed\n");
3740 goto out_failed;
3741 }
3742
3743 for (i = 0; i < num_chains; i++) {
3744 mrioc->chain_sgl_list[i].addr =
3745 dma_pool_zalloc(mrioc->chain_buf_pool, GFP_KERNEL,
3746 &mrioc->chain_sgl_list[i].dma_addr);
3747
3748 if (!mrioc->chain_sgl_list[i].addr)
3749 goto out_failed;
3750 }
3751 mrioc->chain_bitmap = bitmap_zalloc(num_chains, GFP_KERNEL);
3752 if (!mrioc->chain_bitmap)
3753 goto out_failed;
3754 return retval;
3755 out_failed:
3756 retval = -1;
3757 return retval;
3758 }
3759
3760 /**
3761 * mpi3mr_port_enable_complete - Mark port enable complete
3762 * @mrioc: Adapter instance reference
3763 * @drv_cmd: Internal command tracker
3764 *
3765 * Call back for asynchronous port enable request sets the
3766 * driver command to indicate port enable request is complete.
3767 *
3768 * Return: Nothing
3769 */
mpi3mr_port_enable_complete(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * drv_cmd)3770 static void mpi3mr_port_enable_complete(struct mpi3mr_ioc *mrioc,
3771 struct mpi3mr_drv_cmd *drv_cmd)
3772 {
3773 drv_cmd->callback = NULL;
3774 mrioc->scan_started = 0;
3775 if (drv_cmd->state & MPI3MR_CMD_RESET)
3776 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
3777 else
3778 mrioc->scan_failed = drv_cmd->ioc_status;
3779 drv_cmd->state = MPI3MR_CMD_NOTUSED;
3780 }
3781
3782 /**
3783 * mpi3mr_issue_port_enable - Issue Port Enable
3784 * @mrioc: Adapter instance reference
3785 * @async: Flag to wait for completion or not
3786 *
3787 * Issue Port Enable MPI request through admin queue and if the
3788 * async flag is not set wait for the completion of the port
3789 * enable or time out.
3790 *
3791 * Return: 0 on success, non-zero on failures.
3792 */
mpi3mr_issue_port_enable(struct mpi3mr_ioc * mrioc,u8 async)3793 int mpi3mr_issue_port_enable(struct mpi3mr_ioc *mrioc, u8 async)
3794 {
3795 struct mpi3_port_enable_request pe_req;
3796 int retval = 0;
3797 u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT;
3798
3799 memset(&pe_req, 0, sizeof(pe_req));
3800 mutex_lock(&mrioc->init_cmds.mutex);
3801 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3802 retval = -1;
3803 ioc_err(mrioc, "Issue PortEnable: Init command is in use\n");
3804 mutex_unlock(&mrioc->init_cmds.mutex);
3805 goto out;
3806 }
3807 mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3808 if (async) {
3809 mrioc->init_cmds.is_waiting = 0;
3810 mrioc->init_cmds.callback = mpi3mr_port_enable_complete;
3811 } else {
3812 mrioc->init_cmds.is_waiting = 1;
3813 mrioc->init_cmds.callback = NULL;
3814 init_completion(&mrioc->init_cmds.done);
3815 }
3816 pe_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3817 pe_req.function = MPI3_FUNCTION_PORT_ENABLE;
3818
3819 retval = mpi3mr_admin_request_post(mrioc, &pe_req, sizeof(pe_req), 1);
3820 if (retval) {
3821 ioc_err(mrioc, "Issue PortEnable: Admin Post failed\n");
3822 goto out_unlock;
3823 }
3824 if (async) {
3825 mutex_unlock(&mrioc->init_cmds.mutex);
3826 goto out;
3827 }
3828
3829 wait_for_completion_timeout(&mrioc->init_cmds.done, (pe_timeout * HZ));
3830 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3831 ioc_err(mrioc, "port enable timed out\n");
3832 retval = -1;
3833 mpi3mr_check_rh_fault_ioc(mrioc, MPI3MR_RESET_FROM_PE_TIMEOUT);
3834 goto out_unlock;
3835 }
3836 mpi3mr_port_enable_complete(mrioc, &mrioc->init_cmds);
3837
3838 out_unlock:
3839 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3840 mutex_unlock(&mrioc->init_cmds.mutex);
3841 out:
3842 return retval;
3843 }
3844
3845 /* Protocol type to name mapper structure */
3846 static const struct {
3847 u8 protocol;
3848 char *name;
3849 } mpi3mr_protocols[] = {
3850 { MPI3_IOCFACTS_PROTOCOL_SCSI_INITIATOR, "Initiator" },
3851 { MPI3_IOCFACTS_PROTOCOL_SCSI_TARGET, "Target" },
3852 { MPI3_IOCFACTS_PROTOCOL_NVME, "NVMe attachment" },
3853 };
3854
3855 /* Capability to name mapper structure*/
3856 static const struct {
3857 u32 capability;
3858 char *name;
3859 } mpi3mr_capabilities[] = {
3860 { MPI3_IOCFACTS_CAPABILITY_RAID_SUPPORTED, "RAID" },
3861 { MPI3_IOCFACTS_CAPABILITY_MULTIPATH_SUPPORTED, "MultiPath" },
3862 };
3863
3864 /**
3865 * mpi3mr_repost_diag_bufs - repost host diag buffers
3866 * @mrioc: Adapter instance reference
3867 *
3868 * repost firmware and trace diag buffers based on global
3869 * trigger flag from driver page 2
3870 *
3871 * Return: 0 on success, non-zero on failures.
3872 */
mpi3mr_repost_diag_bufs(struct mpi3mr_ioc * mrioc)3873 static int mpi3mr_repost_diag_bufs(struct mpi3mr_ioc *mrioc)
3874 {
3875 u64 global_trigger;
3876 union mpi3mr_trigger_data prev_trigger_data;
3877 struct diag_buffer_desc *trace_hdb = NULL;
3878 struct diag_buffer_desc *fw_hdb = NULL;
3879 int retval = 0;
3880 bool trace_repost_needed = false;
3881 bool fw_repost_needed = false;
3882 u8 prev_trigger_type;
3883
3884 retval = mpi3mr_refresh_trigger(mrioc, MPI3_CONFIG_ACTION_READ_CURRENT);
3885 if (retval)
3886 return -1;
3887
3888 trace_hdb = mpi3mr_diag_buffer_for_type(mrioc,
3889 MPI3_DIAG_BUFFER_TYPE_TRACE);
3890
3891 if (trace_hdb &&
3892 trace_hdb->status != MPI3MR_HDB_BUFSTATUS_NOT_ALLOCATED &&
3893 trace_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_GLOBAL &&
3894 trace_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_ELEMENT)
3895 trace_repost_needed = true;
3896
3897 fw_hdb = mpi3mr_diag_buffer_for_type(mrioc, MPI3_DIAG_BUFFER_TYPE_FW);
3898
3899 if (fw_hdb && fw_hdb->status != MPI3MR_HDB_BUFSTATUS_NOT_ALLOCATED &&
3900 fw_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_GLOBAL &&
3901 fw_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_ELEMENT)
3902 fw_repost_needed = true;
3903
3904 if (trace_repost_needed || fw_repost_needed) {
3905 global_trigger = le64_to_cpu(mrioc->driver_pg2->global_trigger);
3906 if (global_trigger &
3907 MPI3_DRIVER2_GLOBALTRIGGER_POST_DIAG_TRACE_DISABLED)
3908 trace_repost_needed = false;
3909 if (global_trigger &
3910 MPI3_DRIVER2_GLOBALTRIGGER_POST_DIAG_FW_DISABLED)
3911 fw_repost_needed = false;
3912 }
3913
3914 if (trace_repost_needed) {
3915 prev_trigger_type = trace_hdb->trigger_type;
3916 memcpy(&prev_trigger_data, &trace_hdb->trigger_data,
3917 sizeof(trace_hdb->trigger_data));
3918 retval = mpi3mr_issue_diag_buf_post(mrioc, trace_hdb);
3919 if (!retval) {
3920 dprint_init(mrioc, "trace diag buffer reposted");
3921 mpi3mr_set_trigger_data_in_hdb(trace_hdb,
3922 MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN, NULL, 1);
3923 } else {
3924 trace_hdb->trigger_type = prev_trigger_type;
3925 memcpy(&trace_hdb->trigger_data, &prev_trigger_data,
3926 sizeof(prev_trigger_data));
3927 ioc_err(mrioc, "trace diag buffer repost failed");
3928 return -1;
3929 }
3930 }
3931
3932 if (fw_repost_needed) {
3933 prev_trigger_type = fw_hdb->trigger_type;
3934 memcpy(&prev_trigger_data, &fw_hdb->trigger_data,
3935 sizeof(fw_hdb->trigger_data));
3936 retval = mpi3mr_issue_diag_buf_post(mrioc, fw_hdb);
3937 if (!retval) {
3938 dprint_init(mrioc, "firmware diag buffer reposted");
3939 mpi3mr_set_trigger_data_in_hdb(fw_hdb,
3940 MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN, NULL, 1);
3941 } else {
3942 fw_hdb->trigger_type = prev_trigger_type;
3943 memcpy(&fw_hdb->trigger_data, &prev_trigger_data,
3944 sizeof(prev_trigger_data));
3945 ioc_err(mrioc, "firmware diag buffer repost failed");
3946 return -1;
3947 }
3948 }
3949 return retval;
3950 }
3951
3952 /**
3953 * mpi3mr_read_tsu_interval - Update time stamp interval
3954 * @mrioc: Adapter instance reference
3955 *
3956 * Update time stamp interval if its defined in driver page 1,
3957 * otherwise use default value.
3958 *
3959 * Return: Nothing
3960 */
3961 static void
mpi3mr_read_tsu_interval(struct mpi3mr_ioc * mrioc)3962 mpi3mr_read_tsu_interval(struct mpi3mr_ioc *mrioc)
3963 {
3964 struct mpi3_driver_page1 driver_pg1;
3965 u16 pg_sz = sizeof(driver_pg1);
3966 int retval = 0;
3967
3968 mrioc->ts_update_interval = MPI3MR_TSUPDATE_INTERVAL;
3969
3970 retval = mpi3mr_cfg_get_driver_pg1(mrioc, &driver_pg1, pg_sz);
3971 if (!retval && driver_pg1.time_stamp_update)
3972 mrioc->ts_update_interval = (driver_pg1.time_stamp_update * 60);
3973 }
3974
3975 /**
3976 * mpi3mr_print_ioc_info - Display controller information
3977 * @mrioc: Adapter instance reference
3978 *
3979 * Display controller personality, capability, supported
3980 * protocols etc.
3981 *
3982 * Return: Nothing
3983 */
3984 static void
mpi3mr_print_ioc_info(struct mpi3mr_ioc * mrioc)3985 mpi3mr_print_ioc_info(struct mpi3mr_ioc *mrioc)
3986 {
3987 int i = 0, bytes_written = 0;
3988 const char *personality;
3989 char protocol[50] = {0};
3990 char capabilities[100] = {0};
3991 struct mpi3mr_compimg_ver *fwver = &mrioc->facts.fw_ver;
3992
3993 switch (mrioc->facts.personality) {
3994 case MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA:
3995 personality = "Enhanced HBA";
3996 break;
3997 case MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR:
3998 personality = "RAID";
3999 break;
4000 default:
4001 personality = "Unknown";
4002 break;
4003 }
4004
4005 ioc_info(mrioc, "Running in %s Personality", personality);
4006
4007 ioc_info(mrioc, "FW version(%d.%d.%d.%d.%d.%d)\n",
4008 fwver->gen_major, fwver->gen_minor, fwver->ph_major,
4009 fwver->ph_minor, fwver->cust_id, fwver->build_num);
4010
4011 for (i = 0; i < ARRAY_SIZE(mpi3mr_protocols); i++) {
4012 if (mrioc->facts.protocol_flags &
4013 mpi3mr_protocols[i].protocol) {
4014 bytes_written += scnprintf(protocol + bytes_written,
4015 sizeof(protocol) - bytes_written, "%s%s",
4016 bytes_written ? "," : "",
4017 mpi3mr_protocols[i].name);
4018 }
4019 }
4020
4021 bytes_written = 0;
4022 for (i = 0; i < ARRAY_SIZE(mpi3mr_capabilities); i++) {
4023 if (mrioc->facts.protocol_flags &
4024 mpi3mr_capabilities[i].capability) {
4025 bytes_written += scnprintf(capabilities + bytes_written,
4026 sizeof(capabilities) - bytes_written, "%s%s",
4027 bytes_written ? "," : "",
4028 mpi3mr_capabilities[i].name);
4029 }
4030 }
4031
4032 ioc_info(mrioc, "Protocol=(%s), Capabilities=(%s)\n",
4033 protocol, capabilities);
4034 }
4035
4036 /**
4037 * mpi3mr_cleanup_resources - Free PCI resources
4038 * @mrioc: Adapter instance reference
4039 *
4040 * Unmap PCI device memory and disable PCI device.
4041 *
4042 * Return: 0 on success and non-zero on failure.
4043 */
mpi3mr_cleanup_resources(struct mpi3mr_ioc * mrioc)4044 void mpi3mr_cleanup_resources(struct mpi3mr_ioc *mrioc)
4045 {
4046 struct pci_dev *pdev = mrioc->pdev;
4047
4048 mpi3mr_cleanup_isr(mrioc);
4049
4050 if (mrioc->sysif_regs) {
4051 iounmap((void __iomem *)mrioc->sysif_regs);
4052 mrioc->sysif_regs = NULL;
4053 }
4054
4055 if (pci_is_enabled(pdev)) {
4056 if (mrioc->bars)
4057 pci_release_selected_regions(pdev, mrioc->bars);
4058 pci_disable_device(pdev);
4059 }
4060 }
4061
4062 /**
4063 * mpi3mr_setup_resources - Enable PCI resources
4064 * @mrioc: Adapter instance reference
4065 *
4066 * Enable PCI device memory, MSI-x registers and set DMA mask.
4067 *
4068 * Return: 0 on success and non-zero on failure.
4069 */
mpi3mr_setup_resources(struct mpi3mr_ioc * mrioc)4070 int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc)
4071 {
4072 struct pci_dev *pdev = mrioc->pdev;
4073 u32 memap_sz = 0;
4074 int i, retval = 0, capb = 0;
4075 u16 message_control;
4076 u64 dma_mask = mrioc->dma_mask ? mrioc->dma_mask :
4077 ((sizeof(dma_addr_t) > 4) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32));
4078
4079 if (pci_enable_device_mem(pdev)) {
4080 ioc_err(mrioc, "pci_enable_device_mem: failed\n");
4081 retval = -ENODEV;
4082 goto out_failed;
4083 }
4084
4085 capb = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
4086 if (!capb) {
4087 ioc_err(mrioc, "Unable to find MSI-X Capabilities\n");
4088 retval = -ENODEV;
4089 goto out_failed;
4090 }
4091 mrioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
4092
4093 if (pci_request_selected_regions(pdev, mrioc->bars,
4094 mrioc->driver_name)) {
4095 ioc_err(mrioc, "pci_request_selected_regions: failed\n");
4096 retval = -ENODEV;
4097 goto out_failed;
4098 }
4099
4100 for (i = 0; (i < DEVICE_COUNT_RESOURCE); i++) {
4101 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
4102 mrioc->sysif_regs_phys = pci_resource_start(pdev, i);
4103 memap_sz = pci_resource_len(pdev, i);
4104 mrioc->sysif_regs =
4105 ioremap(mrioc->sysif_regs_phys, memap_sz);
4106 break;
4107 }
4108 }
4109
4110 pci_set_master(pdev);
4111
4112 retval = dma_set_mask_and_coherent(&pdev->dev, dma_mask);
4113 if (retval) {
4114 if (dma_mask != DMA_BIT_MASK(32)) {
4115 ioc_warn(mrioc, "Setting 64 bit DMA mask failed\n");
4116 dma_mask = DMA_BIT_MASK(32);
4117 retval = dma_set_mask_and_coherent(&pdev->dev,
4118 dma_mask);
4119 }
4120 if (retval) {
4121 mrioc->dma_mask = 0;
4122 ioc_err(mrioc, "Setting 32 bit DMA mask also failed\n");
4123 goto out_failed;
4124 }
4125 }
4126 mrioc->dma_mask = dma_mask;
4127
4128 if (!mrioc->sysif_regs) {
4129 ioc_err(mrioc,
4130 "Unable to map adapter memory or resource not found\n");
4131 retval = -EINVAL;
4132 goto out_failed;
4133 }
4134
4135 pci_read_config_word(pdev, capb + 2, &message_control);
4136 mrioc->msix_count = (message_control & 0x3FF) + 1;
4137
4138 pci_save_state(pdev);
4139
4140 pci_set_drvdata(pdev, mrioc->shost);
4141
4142 mpi3mr_ioc_disable_intr(mrioc);
4143
4144 ioc_info(mrioc, "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
4145 (unsigned long long)mrioc->sysif_regs_phys,
4146 mrioc->sysif_regs, memap_sz);
4147 ioc_info(mrioc, "Number of MSI-X vectors found in capabilities: (%d)\n",
4148 mrioc->msix_count);
4149
4150 if (!reset_devices && poll_queues > 0)
4151 mrioc->requested_poll_qcount = min_t(int, poll_queues,
4152 mrioc->msix_count - 2);
4153 return retval;
4154
4155 out_failed:
4156 mpi3mr_cleanup_resources(mrioc);
4157 return retval;
4158 }
4159
4160 /**
4161 * mpi3mr_enable_events - Enable required events
4162 * @mrioc: Adapter instance reference
4163 *
4164 * This routine unmasks the events required by the driver by
4165 * sennding appropriate event mask bitmapt through an event
4166 * notification request.
4167 *
4168 * Return: 0 on success and non-zero on failure.
4169 */
mpi3mr_enable_events(struct mpi3mr_ioc * mrioc)4170 static int mpi3mr_enable_events(struct mpi3mr_ioc *mrioc)
4171 {
4172 int retval = 0;
4173 u32 i;
4174
4175 for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
4176 mrioc->event_masks[i] = -1;
4177
4178 mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_ADDED);
4179 mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_INFO_CHANGED);
4180 mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_STATUS_CHANGE);
4181 mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE);
4182 mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_ADDED);
4183 mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
4184 mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DISCOVERY);
4185 mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
4186 mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_BROADCAST_PRIMITIVE);
4187 mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
4188 mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_ENUMERATION);
4189 mpi3mr_unmask_events(mrioc, MPI3_EVENT_PREPARE_FOR_RESET);
4190 mpi3mr_unmask_events(mrioc, MPI3_EVENT_CABLE_MGMT);
4191 mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENERGY_PACK_CHANGE);
4192 mpi3mr_unmask_events(mrioc, MPI3_EVENT_DIAGNOSTIC_BUFFER_STATUS_CHANGE);
4193
4194 retval = mpi3mr_issue_event_notification(mrioc);
4195 if (retval)
4196 ioc_err(mrioc, "failed to issue event notification %d\n",
4197 retval);
4198 return retval;
4199 }
4200
4201 /**
4202 * mpi3mr_init_ioc - Initialize the controller
4203 * @mrioc: Adapter instance reference
4204 *
4205 * This the controller initialization routine, executed either
4206 * after soft reset or from pci probe callback.
4207 * Setup the required resources, memory map the controller
4208 * registers, create admin and operational reply queue pairs,
4209 * allocate required memory for reply pool, sense buffer pool,
4210 * issue IOC init request to the firmware, unmask the events and
4211 * issue port enable to discover SAS/SATA/NVMe devies and RAID
4212 * volumes.
4213 *
4214 * Return: 0 on success and non-zero on failure.
4215 */
mpi3mr_init_ioc(struct mpi3mr_ioc * mrioc)4216 int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc)
4217 {
4218 int retval = 0;
4219 u8 retry = 0;
4220 struct mpi3_ioc_facts_data facts_data;
4221 u32 sz;
4222
4223 retry_init:
4224 retval = mpi3mr_bring_ioc_ready(mrioc);
4225 if (retval) {
4226 ioc_err(mrioc, "Failed to bring ioc ready: error %d\n",
4227 retval);
4228 goto out_failed_noretry;
4229 }
4230
4231 retval = mpi3mr_setup_isr(mrioc, 1);
4232 if (retval) {
4233 ioc_err(mrioc, "Failed to setup ISR error %d\n",
4234 retval);
4235 goto out_failed_noretry;
4236 }
4237
4238 retval = mpi3mr_issue_iocfacts(mrioc, &facts_data);
4239 if (retval) {
4240 ioc_err(mrioc, "Failed to Issue IOC Facts %d\n",
4241 retval);
4242 goto out_failed;
4243 }
4244
4245 mrioc->max_host_ios = mrioc->facts.max_reqs - MPI3MR_INTERNAL_CMDS_RESVD;
4246 mrioc->shost->max_sectors = mrioc->facts.max_data_length / 512;
4247 mrioc->num_io_throttle_group = mrioc->facts.max_io_throttle_group;
4248 atomic_set(&mrioc->pend_large_data_sz, 0);
4249
4250 if (reset_devices)
4251 mrioc->max_host_ios = min_t(int, mrioc->max_host_ios,
4252 MPI3MR_HOST_IOS_KDUMP);
4253
4254 if (!(mrioc->facts.ioc_capabilities &
4255 MPI3_IOCFACTS_CAPABILITY_MULTIPATH_SUPPORTED)) {
4256 mrioc->sas_transport_enabled = 1;
4257 mrioc->scsi_device_channel = 1;
4258 mrioc->shost->max_channel = 1;
4259 mrioc->shost->transportt = mpi3mr_transport_template;
4260 }
4261
4262 if (mrioc->facts.max_req_limit)
4263 mrioc->prevent_reply_qfull = true;
4264
4265 if (mrioc->facts.ioc_capabilities &
4266 MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_TRACE_SUPPORTED)
4267 mrioc->seg_tb_support = true;
4268
4269 mrioc->reply_sz = mrioc->facts.reply_sz;
4270
4271 retval = mpi3mr_check_reset_dma_mask(mrioc);
4272 if (retval) {
4273 ioc_err(mrioc, "Resetting dma mask failed %d\n",
4274 retval);
4275 goto out_failed_noretry;
4276 }
4277
4278 mpi3mr_read_tsu_interval(mrioc);
4279 mpi3mr_print_ioc_info(mrioc);
4280
4281 dprint_init(mrioc, "allocating host diag buffers\n");
4282 mpi3mr_alloc_diag_bufs(mrioc);
4283
4284 dprint_init(mrioc, "allocating ioctl dma buffers\n");
4285 mpi3mr_alloc_ioctl_dma_memory(mrioc);
4286
4287 dprint_init(mrioc, "posting host diag buffers\n");
4288 retval = mpi3mr_post_diag_bufs(mrioc);
4289
4290 if (retval)
4291 ioc_warn(mrioc, "failed to post host diag buffers\n");
4292
4293 if (!mrioc->init_cmds.reply) {
4294 retval = mpi3mr_alloc_reply_sense_bufs(mrioc);
4295 if (retval) {
4296 ioc_err(mrioc,
4297 "%s :Failed to allocated reply sense buffers %d\n",
4298 __func__, retval);
4299 goto out_failed_noretry;
4300 }
4301 }
4302
4303 if (!mrioc->chain_sgl_list) {
4304 retval = mpi3mr_alloc_chain_bufs(mrioc);
4305 if (retval) {
4306 ioc_err(mrioc, "Failed to allocated chain buffers %d\n",
4307 retval);
4308 goto out_failed_noretry;
4309 }
4310 }
4311
4312 retval = mpi3mr_issue_iocinit(mrioc);
4313 if (retval) {
4314 ioc_err(mrioc, "Failed to Issue IOC Init %d\n",
4315 retval);
4316 goto out_failed;
4317 }
4318
4319 retval = mpi3mr_print_pkg_ver(mrioc);
4320 if (retval) {
4321 ioc_err(mrioc, "failed to get package version\n");
4322 goto out_failed;
4323 }
4324
4325 retval = mpi3mr_setup_isr(mrioc, 0);
4326 if (retval) {
4327 ioc_err(mrioc, "Failed to re-setup ISR, error %d\n",
4328 retval);
4329 goto out_failed_noretry;
4330 }
4331
4332 retval = mpi3mr_create_op_queues(mrioc);
4333 if (retval) {
4334 ioc_err(mrioc, "Failed to create OpQueues error %d\n",
4335 retval);
4336 goto out_failed;
4337 }
4338
4339 if (!mrioc->pel_seqnum_virt) {
4340 dprint_init(mrioc, "allocating memory for pel_seqnum_virt\n");
4341 mrioc->pel_seqnum_sz = sizeof(struct mpi3_pel_seq);
4342 mrioc->pel_seqnum_virt = dma_alloc_coherent(&mrioc->pdev->dev,
4343 mrioc->pel_seqnum_sz, &mrioc->pel_seqnum_dma,
4344 GFP_KERNEL);
4345 if (!mrioc->pel_seqnum_virt) {
4346 retval = -ENOMEM;
4347 goto out_failed_noretry;
4348 }
4349 }
4350
4351 if (!mrioc->throttle_groups && mrioc->num_io_throttle_group) {
4352 dprint_init(mrioc, "allocating memory for throttle groups\n");
4353 sz = sizeof(struct mpi3mr_throttle_group_info);
4354 mrioc->throttle_groups = kcalloc(mrioc->num_io_throttle_group, sz, GFP_KERNEL);
4355 if (!mrioc->throttle_groups) {
4356 retval = -1;
4357 goto out_failed_noretry;
4358 }
4359 }
4360
4361 retval = mpi3mr_enable_events(mrioc);
4362 if (retval) {
4363 ioc_err(mrioc, "failed to enable events %d\n",
4364 retval);
4365 goto out_failed;
4366 }
4367
4368 retval = mpi3mr_refresh_trigger(mrioc, MPI3_CONFIG_ACTION_READ_CURRENT);
4369 if (retval) {
4370 ioc_err(mrioc, "failed to refresh triggers\n");
4371 goto out_failed;
4372 }
4373
4374 ioc_info(mrioc, "controller initialization completed successfully\n");
4375 return retval;
4376 out_failed:
4377 if (retry < 2) {
4378 retry++;
4379 ioc_warn(mrioc, "retrying controller initialization, retry_count:%d\n",
4380 retry);
4381 mpi3mr_memset_buffers(mrioc);
4382 goto retry_init;
4383 }
4384 retval = -1;
4385 out_failed_noretry:
4386 ioc_err(mrioc, "controller initialization failed\n");
4387 mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
4388 MPI3MR_RESET_FROM_CTLR_CLEANUP);
4389 mrioc->unrecoverable = 1;
4390 return retval;
4391 }
4392
4393 /**
4394 * mpi3mr_reinit_ioc - Re-Initialize the controller
4395 * @mrioc: Adapter instance reference
4396 * @is_resume: Called from resume or reset path
4397 *
4398 * This the controller re-initialization routine, executed from
4399 * the soft reset handler or resume callback. Creates
4400 * operational reply queue pairs, allocate required memory for
4401 * reply pool, sense buffer pool, issue IOC init request to the
4402 * firmware, unmask the events and issue port enable to discover
4403 * SAS/SATA/NVMe devices and RAID volumes.
4404 *
4405 * Return: 0 on success and non-zero on failure.
4406 */
mpi3mr_reinit_ioc(struct mpi3mr_ioc * mrioc,u8 is_resume)4407 int mpi3mr_reinit_ioc(struct mpi3mr_ioc *mrioc, u8 is_resume)
4408 {
4409 int retval = 0;
4410 u8 retry = 0;
4411 struct mpi3_ioc_facts_data facts_data;
4412 u32 pe_timeout, ioc_status;
4413
4414 retry_init:
4415 pe_timeout =
4416 (MPI3MR_PORTENABLE_TIMEOUT / MPI3MR_PORTENABLE_POLL_INTERVAL);
4417
4418 dprint_reset(mrioc, "bringing up the controller to ready state\n");
4419 retval = mpi3mr_bring_ioc_ready(mrioc);
4420 if (retval) {
4421 ioc_err(mrioc, "failed to bring to ready state\n");
4422 goto out_failed_noretry;
4423 }
4424
4425 mrioc->io_admin_reset_sync = 0;
4426 if (is_resume || mrioc->block_on_pci_err) {
4427 dprint_reset(mrioc, "setting up single ISR\n");
4428 retval = mpi3mr_setup_isr(mrioc, 1);
4429 if (retval) {
4430 ioc_err(mrioc, "failed to setup ISR\n");
4431 goto out_failed_noretry;
4432 }
4433 } else
4434 mpi3mr_ioc_enable_intr(mrioc);
4435
4436 dprint_reset(mrioc, "getting ioc_facts\n");
4437 retval = mpi3mr_issue_iocfacts(mrioc, &facts_data);
4438 if (retval) {
4439 ioc_err(mrioc, "failed to get ioc_facts\n");
4440 goto out_failed;
4441 }
4442
4443 dprint_reset(mrioc, "validating ioc_facts\n");
4444 retval = mpi3mr_revalidate_factsdata(mrioc);
4445 if (retval) {
4446 ioc_err(mrioc, "failed to revalidate ioc_facts data\n");
4447 goto out_failed_noretry;
4448 }
4449
4450 mpi3mr_read_tsu_interval(mrioc);
4451 mpi3mr_print_ioc_info(mrioc);
4452
4453 if (is_resume) {
4454 dprint_reset(mrioc, "posting host diag buffers\n");
4455 retval = mpi3mr_post_diag_bufs(mrioc);
4456 if (retval)
4457 ioc_warn(mrioc, "failed to post host diag buffers\n");
4458 } else {
4459 retval = mpi3mr_repost_diag_bufs(mrioc);
4460 if (retval)
4461 ioc_warn(mrioc, "failed to re post host diag buffers\n");
4462 }
4463
4464 dprint_reset(mrioc, "sending ioc_init\n");
4465 retval = mpi3mr_issue_iocinit(mrioc);
4466 if (retval) {
4467 ioc_err(mrioc, "failed to send ioc_init\n");
4468 goto out_failed;
4469 }
4470
4471 dprint_reset(mrioc, "getting package version\n");
4472 retval = mpi3mr_print_pkg_ver(mrioc);
4473 if (retval) {
4474 ioc_err(mrioc, "failed to get package version\n");
4475 goto out_failed;
4476 }
4477
4478 if (is_resume || mrioc->block_on_pci_err) {
4479 dprint_reset(mrioc, "setting up multiple ISR\n");
4480 retval = mpi3mr_setup_isr(mrioc, 0);
4481 if (retval) {
4482 ioc_err(mrioc, "failed to re-setup ISR\n");
4483 goto out_failed_noretry;
4484 }
4485 }
4486
4487 dprint_reset(mrioc, "creating operational queue pairs\n");
4488 retval = mpi3mr_create_op_queues(mrioc);
4489 if (retval) {
4490 ioc_err(mrioc, "failed to create operational queue pairs\n");
4491 goto out_failed;
4492 }
4493
4494 if (!mrioc->pel_seqnum_virt) {
4495 dprint_reset(mrioc, "allocating memory for pel_seqnum_virt\n");
4496 mrioc->pel_seqnum_sz = sizeof(struct mpi3_pel_seq);
4497 mrioc->pel_seqnum_virt = dma_alloc_coherent(&mrioc->pdev->dev,
4498 mrioc->pel_seqnum_sz, &mrioc->pel_seqnum_dma,
4499 GFP_KERNEL);
4500 if (!mrioc->pel_seqnum_virt) {
4501 retval = -ENOMEM;
4502 goto out_failed_noretry;
4503 }
4504 }
4505
4506 if (mrioc->shost->nr_hw_queues > mrioc->num_op_reply_q) {
4507 ioc_err(mrioc,
4508 "cannot create minimum number of operational queues expected:%d created:%d\n",
4509 mrioc->shost->nr_hw_queues, mrioc->num_op_reply_q);
4510 retval = -1;
4511 goto out_failed_noretry;
4512 }
4513
4514 dprint_reset(mrioc, "enabling events\n");
4515 retval = mpi3mr_enable_events(mrioc);
4516 if (retval) {
4517 ioc_err(mrioc, "failed to enable events\n");
4518 goto out_failed;
4519 }
4520
4521 mrioc->device_refresh_on = 1;
4522 mpi3mr_add_event_wait_for_device_refresh(mrioc);
4523
4524 ioc_info(mrioc, "sending port enable\n");
4525 retval = mpi3mr_issue_port_enable(mrioc, 1);
4526 if (retval) {
4527 ioc_err(mrioc, "failed to issue port enable\n");
4528 goto out_failed;
4529 }
4530 do {
4531 ssleep(MPI3MR_PORTENABLE_POLL_INTERVAL);
4532 if (mrioc->init_cmds.state == MPI3MR_CMD_NOTUSED)
4533 break;
4534 if (!pci_device_is_present(mrioc->pdev))
4535 mrioc->unrecoverable = 1;
4536 if (mrioc->unrecoverable) {
4537 retval = -1;
4538 goto out_failed_noretry;
4539 }
4540 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
4541 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
4542 (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
4543 mpi3mr_print_fault_info(mrioc);
4544 mrioc->init_cmds.is_waiting = 0;
4545 mrioc->init_cmds.callback = NULL;
4546 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
4547 goto out_failed;
4548 }
4549 } while (--pe_timeout);
4550
4551 if (!pe_timeout) {
4552 ioc_err(mrioc, "port enable timed out\n");
4553 mpi3mr_check_rh_fault_ioc(mrioc,
4554 MPI3MR_RESET_FROM_PE_TIMEOUT);
4555 mrioc->init_cmds.is_waiting = 0;
4556 mrioc->init_cmds.callback = NULL;
4557 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
4558 goto out_failed;
4559 } else if (mrioc->scan_failed) {
4560 ioc_err(mrioc,
4561 "port enable failed with status=0x%04x\n",
4562 mrioc->scan_failed);
4563 } else
4564 ioc_info(mrioc, "port enable completed successfully\n");
4565
4566 ioc_info(mrioc, "controller %s completed successfully\n",
4567 (is_resume)?"resume":"re-initialization");
4568 return retval;
4569 out_failed:
4570 if (retry < 2) {
4571 retry++;
4572 ioc_warn(mrioc, "retrying controller %s, retry_count:%d\n",
4573 (is_resume)?"resume":"re-initialization", retry);
4574 mpi3mr_memset_buffers(mrioc);
4575 goto retry_init;
4576 }
4577 retval = -1;
4578 out_failed_noretry:
4579 ioc_err(mrioc, "controller %s is failed\n",
4580 (is_resume)?"resume":"re-initialization");
4581 mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
4582 MPI3MR_RESET_FROM_CTLR_CLEANUP);
4583 mrioc->unrecoverable = 1;
4584 return retval;
4585 }
4586
4587 /**
4588 * mpi3mr_memset_op_reply_q_buffers - memset the operational reply queue's
4589 * segments
4590 * @mrioc: Adapter instance reference
4591 * @qidx: Operational reply queue index
4592 *
4593 * Return: Nothing.
4594 */
mpi3mr_memset_op_reply_q_buffers(struct mpi3mr_ioc * mrioc,u16 qidx)4595 static void mpi3mr_memset_op_reply_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx)
4596 {
4597 struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
4598 struct segments *segments;
4599 int i, size;
4600
4601 if (!op_reply_q->q_segments)
4602 return;
4603
4604 size = op_reply_q->segment_qd * mrioc->op_reply_desc_sz;
4605 segments = op_reply_q->q_segments;
4606 for (i = 0; i < op_reply_q->num_segments; i++)
4607 memset(segments[i].segment, 0, size);
4608 }
4609
4610 /**
4611 * mpi3mr_memset_op_req_q_buffers - memset the operational request queue's
4612 * segments
4613 * @mrioc: Adapter instance reference
4614 * @qidx: Operational request queue index
4615 *
4616 * Return: Nothing.
4617 */
mpi3mr_memset_op_req_q_buffers(struct mpi3mr_ioc * mrioc,u16 qidx)4618 static void mpi3mr_memset_op_req_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx)
4619 {
4620 struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx;
4621 struct segments *segments;
4622 int i, size;
4623
4624 if (!op_req_q->q_segments)
4625 return;
4626
4627 size = op_req_q->segment_qd * mrioc->facts.op_req_sz;
4628 segments = op_req_q->q_segments;
4629 for (i = 0; i < op_req_q->num_segments; i++)
4630 memset(segments[i].segment, 0, size);
4631 }
4632
4633 /**
4634 * mpi3mr_memset_buffers - memset memory for a controller
4635 * @mrioc: Adapter instance reference
4636 *
4637 * clear all the memory allocated for a controller, typically
4638 * called post reset to reuse the memory allocated during the
4639 * controller init.
4640 *
4641 * Return: Nothing.
4642 */
mpi3mr_memset_buffers(struct mpi3mr_ioc * mrioc)4643 void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
4644 {
4645 u16 i;
4646 struct mpi3mr_throttle_group_info *tg;
4647
4648 mrioc->change_count = 0;
4649 mrioc->active_poll_qcount = 0;
4650 mrioc->default_qcount = 0;
4651 if (mrioc->admin_req_base)
4652 memset(mrioc->admin_req_base, 0, mrioc->admin_req_q_sz);
4653 if (mrioc->admin_reply_base)
4654 memset(mrioc->admin_reply_base, 0, mrioc->admin_reply_q_sz);
4655 atomic_set(&mrioc->admin_reply_q_in_use, 0);
4656
4657 if (mrioc->init_cmds.reply) {
4658 memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply));
4659 memset(mrioc->bsg_cmds.reply, 0,
4660 sizeof(*mrioc->bsg_cmds.reply));
4661 memset(mrioc->host_tm_cmds.reply, 0,
4662 sizeof(*mrioc->host_tm_cmds.reply));
4663 memset(mrioc->pel_cmds.reply, 0,
4664 sizeof(*mrioc->pel_cmds.reply));
4665 memset(mrioc->pel_abort_cmd.reply, 0,
4666 sizeof(*mrioc->pel_abort_cmd.reply));
4667 memset(mrioc->transport_cmds.reply, 0,
4668 sizeof(*mrioc->transport_cmds.reply));
4669 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
4670 memset(mrioc->dev_rmhs_cmds[i].reply, 0,
4671 sizeof(*mrioc->dev_rmhs_cmds[i].reply));
4672 for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++)
4673 memset(mrioc->evtack_cmds[i].reply, 0,
4674 sizeof(*mrioc->evtack_cmds[i].reply));
4675 bitmap_clear(mrioc->removepend_bitmap, 0,
4676 mrioc->dev_handle_bitmap_bits);
4677 bitmap_clear(mrioc->devrem_bitmap, 0, MPI3MR_NUM_DEVRMCMD);
4678 bitmap_clear(mrioc->evtack_cmds_bitmap, 0,
4679 MPI3MR_NUM_EVTACKCMD);
4680 }
4681
4682 for (i = 0; i < mrioc->num_queues; i++) {
4683 mrioc->op_reply_qinfo[i].qid = 0;
4684 mrioc->op_reply_qinfo[i].ci = 0;
4685 mrioc->op_reply_qinfo[i].num_replies = 0;
4686 mrioc->op_reply_qinfo[i].ephase = 0;
4687 atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0);
4688 atomic_set(&mrioc->op_reply_qinfo[i].in_use, 0);
4689 mpi3mr_memset_op_reply_q_buffers(mrioc, i);
4690
4691 mrioc->req_qinfo[i].ci = 0;
4692 mrioc->req_qinfo[i].pi = 0;
4693 mrioc->req_qinfo[i].num_requests = 0;
4694 mrioc->req_qinfo[i].qid = 0;
4695 mrioc->req_qinfo[i].reply_qid = 0;
4696 spin_lock_init(&mrioc->req_qinfo[i].q_lock);
4697 mpi3mr_memset_op_req_q_buffers(mrioc, i);
4698 }
4699
4700 atomic_set(&mrioc->pend_large_data_sz, 0);
4701 if (mrioc->throttle_groups) {
4702 tg = mrioc->throttle_groups;
4703 for (i = 0; i < mrioc->num_io_throttle_group; i++, tg++) {
4704 tg->id = 0;
4705 tg->fw_qd = 0;
4706 tg->modified_qd = 0;
4707 tg->io_divert = 0;
4708 tg->need_qd_reduction = 0;
4709 tg->high = 0;
4710 tg->low = 0;
4711 tg->qd_reduction = 0;
4712 atomic_set(&tg->pend_large_data_sz, 0);
4713 }
4714 }
4715 }
4716
4717 /**
4718 * mpi3mr_free_mem - Free memory allocated for a controller
4719 * @mrioc: Adapter instance reference
4720 *
4721 * Free all the memory allocated for a controller.
4722 *
4723 * Return: Nothing.
4724 */
mpi3mr_free_mem(struct mpi3mr_ioc * mrioc)4725 void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
4726 {
4727 u16 i, j;
4728 struct mpi3mr_intr_info *intr_info;
4729 struct diag_buffer_desc *diag_buffer;
4730
4731 mpi3mr_free_enclosure_list(mrioc);
4732 mpi3mr_free_ioctl_dma_memory(mrioc);
4733
4734 if (mrioc->sense_buf_pool) {
4735 if (mrioc->sense_buf)
4736 dma_pool_free(mrioc->sense_buf_pool, mrioc->sense_buf,
4737 mrioc->sense_buf_dma);
4738 dma_pool_destroy(mrioc->sense_buf_pool);
4739 mrioc->sense_buf = NULL;
4740 mrioc->sense_buf_pool = NULL;
4741 }
4742 if (mrioc->sense_buf_q_pool) {
4743 if (mrioc->sense_buf_q)
4744 dma_pool_free(mrioc->sense_buf_q_pool,
4745 mrioc->sense_buf_q, mrioc->sense_buf_q_dma);
4746 dma_pool_destroy(mrioc->sense_buf_q_pool);
4747 mrioc->sense_buf_q = NULL;
4748 mrioc->sense_buf_q_pool = NULL;
4749 }
4750
4751 if (mrioc->reply_buf_pool) {
4752 if (mrioc->reply_buf)
4753 dma_pool_free(mrioc->reply_buf_pool, mrioc->reply_buf,
4754 mrioc->reply_buf_dma);
4755 dma_pool_destroy(mrioc->reply_buf_pool);
4756 mrioc->reply_buf = NULL;
4757 mrioc->reply_buf_pool = NULL;
4758 }
4759 if (mrioc->reply_free_q_pool) {
4760 if (mrioc->reply_free_q)
4761 dma_pool_free(mrioc->reply_free_q_pool,
4762 mrioc->reply_free_q, mrioc->reply_free_q_dma);
4763 dma_pool_destroy(mrioc->reply_free_q_pool);
4764 mrioc->reply_free_q = NULL;
4765 mrioc->reply_free_q_pool = NULL;
4766 }
4767
4768 for (i = 0; i < mrioc->num_op_req_q; i++)
4769 mpi3mr_free_op_req_q_segments(mrioc, i);
4770
4771 for (i = 0; i < mrioc->num_op_reply_q; i++)
4772 mpi3mr_free_op_reply_q_segments(mrioc, i);
4773
4774 for (i = 0; i < mrioc->intr_info_count; i++) {
4775 intr_info = mrioc->intr_info + i;
4776 intr_info->op_reply_q = NULL;
4777 }
4778
4779 kfree(mrioc->req_qinfo);
4780 mrioc->req_qinfo = NULL;
4781 mrioc->num_op_req_q = 0;
4782
4783 kfree(mrioc->op_reply_qinfo);
4784 mrioc->op_reply_qinfo = NULL;
4785 mrioc->num_op_reply_q = 0;
4786
4787 kfree(mrioc->init_cmds.reply);
4788 mrioc->init_cmds.reply = NULL;
4789
4790 kfree(mrioc->bsg_cmds.reply);
4791 mrioc->bsg_cmds.reply = NULL;
4792
4793 kfree(mrioc->host_tm_cmds.reply);
4794 mrioc->host_tm_cmds.reply = NULL;
4795
4796 kfree(mrioc->pel_cmds.reply);
4797 mrioc->pel_cmds.reply = NULL;
4798
4799 kfree(mrioc->pel_abort_cmd.reply);
4800 mrioc->pel_abort_cmd.reply = NULL;
4801
4802 for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
4803 kfree(mrioc->evtack_cmds[i].reply);
4804 mrioc->evtack_cmds[i].reply = NULL;
4805 }
4806
4807 bitmap_free(mrioc->removepend_bitmap);
4808 mrioc->removepend_bitmap = NULL;
4809
4810 bitmap_free(mrioc->devrem_bitmap);
4811 mrioc->devrem_bitmap = NULL;
4812
4813 bitmap_free(mrioc->evtack_cmds_bitmap);
4814 mrioc->evtack_cmds_bitmap = NULL;
4815
4816 bitmap_free(mrioc->chain_bitmap);
4817 mrioc->chain_bitmap = NULL;
4818
4819 kfree(mrioc->transport_cmds.reply);
4820 mrioc->transport_cmds.reply = NULL;
4821
4822 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
4823 kfree(mrioc->dev_rmhs_cmds[i].reply);
4824 mrioc->dev_rmhs_cmds[i].reply = NULL;
4825 }
4826
4827 if (mrioc->chain_buf_pool) {
4828 for (i = 0; i < mrioc->chain_buf_count; i++) {
4829 if (mrioc->chain_sgl_list[i].addr) {
4830 dma_pool_free(mrioc->chain_buf_pool,
4831 mrioc->chain_sgl_list[i].addr,
4832 mrioc->chain_sgl_list[i].dma_addr);
4833 mrioc->chain_sgl_list[i].addr = NULL;
4834 }
4835 }
4836 dma_pool_destroy(mrioc->chain_buf_pool);
4837 mrioc->chain_buf_pool = NULL;
4838 }
4839
4840 kfree(mrioc->chain_sgl_list);
4841 mrioc->chain_sgl_list = NULL;
4842
4843 if (mrioc->admin_reply_base) {
4844 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz,
4845 mrioc->admin_reply_base, mrioc->admin_reply_dma);
4846 mrioc->admin_reply_base = NULL;
4847 }
4848 if (mrioc->admin_req_base) {
4849 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz,
4850 mrioc->admin_req_base, mrioc->admin_req_dma);
4851 mrioc->admin_req_base = NULL;
4852 }
4853
4854 if (mrioc->pel_seqnum_virt) {
4855 dma_free_coherent(&mrioc->pdev->dev, mrioc->pel_seqnum_sz,
4856 mrioc->pel_seqnum_virt, mrioc->pel_seqnum_dma);
4857 mrioc->pel_seqnum_virt = NULL;
4858 }
4859
4860 for (i = 0; i < MPI3MR_MAX_NUM_HDB; i++) {
4861 diag_buffer = &mrioc->diag_buffers[i];
4862 if ((i == 0) && mrioc->seg_tb_support) {
4863 if (mrioc->trace_buf_pool) {
4864 for (j = 0; j < mrioc->num_tb_segs; j++) {
4865 if (mrioc->trace_buf[j].segment) {
4866 dma_pool_free(mrioc->trace_buf_pool,
4867 mrioc->trace_buf[j].segment,
4868 mrioc->trace_buf[j].segment_dma);
4869 mrioc->trace_buf[j].segment = NULL;
4870 }
4871
4872 mrioc->trace_buf[j].segment = NULL;
4873 }
4874 dma_pool_destroy(mrioc->trace_buf_pool);
4875 mrioc->trace_buf_pool = NULL;
4876 }
4877
4878 kfree(mrioc->trace_buf);
4879 mrioc->trace_buf = NULL;
4880 diag_buffer->size = sizeof(u64) * mrioc->num_tb_segs;
4881 }
4882 if (diag_buffer->addr) {
4883 dma_free_coherent(&mrioc->pdev->dev,
4884 diag_buffer->size, diag_buffer->addr,
4885 diag_buffer->dma_addr);
4886 diag_buffer->addr = NULL;
4887 diag_buffer->size = 0;
4888 diag_buffer->type = 0;
4889 diag_buffer->status = 0;
4890 }
4891 }
4892
4893 kfree(mrioc->throttle_groups);
4894 mrioc->throttle_groups = NULL;
4895
4896 kfree(mrioc->logdata_buf);
4897 mrioc->logdata_buf = NULL;
4898
4899 }
4900
4901 /**
4902 * mpi3mr_issue_ioc_shutdown - shutdown controller
4903 * @mrioc: Adapter instance reference
4904 *
4905 * Send shutodwn notification to the controller and wait for the
4906 * shutdown_timeout for it to be completed.
4907 *
4908 * Return: Nothing.
4909 */
mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc * mrioc)4910 static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc)
4911 {
4912 u32 ioc_config, ioc_status;
4913 u8 retval = 1;
4914 u32 timeout = MPI3MR_DEFAULT_SHUTDOWN_TIME * 10;
4915
4916 ioc_info(mrioc, "Issuing shutdown Notification\n");
4917 if (mrioc->unrecoverable) {
4918 ioc_warn(mrioc,
4919 "IOC is unrecoverable shutdown is not issued\n");
4920 return;
4921 }
4922 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
4923 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
4924 == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) {
4925 ioc_info(mrioc, "shutdown already in progress\n");
4926 return;
4927 }
4928
4929 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
4930 ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL;
4931 ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ;
4932
4933 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
4934
4935 if (mrioc->facts.shutdown_timeout)
4936 timeout = mrioc->facts.shutdown_timeout * 10;
4937
4938 do {
4939 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
4940 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
4941 == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_COMPLETE) {
4942 retval = 0;
4943 break;
4944 }
4945 msleep(100);
4946 } while (--timeout);
4947
4948 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
4949 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
4950
4951 if (retval) {
4952 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
4953 == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS)
4954 ioc_warn(mrioc,
4955 "shutdown still in progress after timeout\n");
4956 }
4957
4958 ioc_info(mrioc,
4959 "Base IOC Sts/Config after %s shutdown is (0x%08x)/(0x%08x)\n",
4960 (!retval) ? "successful" : "failed", ioc_status,
4961 ioc_config);
4962 }
4963
4964 /**
4965 * mpi3mr_cleanup_ioc - Cleanup controller
4966 * @mrioc: Adapter instance reference
4967 *
4968 * controller cleanup handler, Message unit reset or soft reset
4969 * and shutdown notification is issued to the controller.
4970 *
4971 * Return: Nothing.
4972 */
mpi3mr_cleanup_ioc(struct mpi3mr_ioc * mrioc)4973 void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc)
4974 {
4975 enum mpi3mr_iocstate ioc_state;
4976
4977 dprint_exit(mrioc, "cleaning up the controller\n");
4978 mpi3mr_ioc_disable_intr(mrioc);
4979
4980 ioc_state = mpi3mr_get_iocstate(mrioc);
4981
4982 if (!mrioc->unrecoverable && !mrioc->reset_in_progress &&
4983 !mrioc->pci_err_recovery &&
4984 (ioc_state == MRIOC_STATE_READY)) {
4985 if (mpi3mr_issue_and_process_mur(mrioc,
4986 MPI3MR_RESET_FROM_CTLR_CLEANUP))
4987 mpi3mr_issue_reset(mrioc,
4988 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
4989 MPI3MR_RESET_FROM_MUR_FAILURE);
4990 mpi3mr_issue_ioc_shutdown(mrioc);
4991 }
4992 dprint_exit(mrioc, "controller cleanup completed\n");
4993 }
4994
4995 /**
4996 * mpi3mr_drv_cmd_comp_reset - Flush a internal driver command
4997 * @mrioc: Adapter instance reference
4998 * @cmdptr: Internal command tracker
4999 *
5000 * Complete an internal driver commands with state indicating it
5001 * is completed due to reset.
5002 *
5003 * Return: Nothing.
5004 */
mpi3mr_drv_cmd_comp_reset(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * cmdptr)5005 static inline void mpi3mr_drv_cmd_comp_reset(struct mpi3mr_ioc *mrioc,
5006 struct mpi3mr_drv_cmd *cmdptr)
5007 {
5008 if (cmdptr->state & MPI3MR_CMD_PENDING) {
5009 cmdptr->state |= MPI3MR_CMD_RESET;
5010 cmdptr->state &= ~MPI3MR_CMD_PENDING;
5011 if (cmdptr->is_waiting) {
5012 complete(&cmdptr->done);
5013 cmdptr->is_waiting = 0;
5014 } else if (cmdptr->callback)
5015 cmdptr->callback(mrioc, cmdptr);
5016 }
5017 }
5018
5019 /**
5020 * mpi3mr_flush_drv_cmds - Flush internaldriver commands
5021 * @mrioc: Adapter instance reference
5022 *
5023 * Flush all internal driver commands post reset
5024 *
5025 * Return: Nothing.
5026 */
mpi3mr_flush_drv_cmds(struct mpi3mr_ioc * mrioc)5027 void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc)
5028 {
5029 struct mpi3mr_drv_cmd *cmdptr;
5030 u8 i;
5031
5032 cmdptr = &mrioc->init_cmds;
5033 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5034
5035 cmdptr = &mrioc->cfg_cmds;
5036 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5037
5038 cmdptr = &mrioc->bsg_cmds;
5039 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5040 cmdptr = &mrioc->host_tm_cmds;
5041 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5042
5043 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
5044 cmdptr = &mrioc->dev_rmhs_cmds[i];
5045 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5046 }
5047
5048 for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
5049 cmdptr = &mrioc->evtack_cmds[i];
5050 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5051 }
5052
5053 cmdptr = &mrioc->pel_cmds;
5054 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5055
5056 cmdptr = &mrioc->pel_abort_cmd;
5057 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5058
5059 cmdptr = &mrioc->transport_cmds;
5060 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5061 }
5062
5063 /**
5064 * mpi3mr_pel_wait_post - Issue PEL Wait
5065 * @mrioc: Adapter instance reference
5066 * @drv_cmd: Internal command tracker
5067 *
5068 * Issue PEL Wait MPI request through admin queue and return.
5069 *
5070 * Return: Nothing.
5071 */
mpi3mr_pel_wait_post(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * drv_cmd)5072 static void mpi3mr_pel_wait_post(struct mpi3mr_ioc *mrioc,
5073 struct mpi3mr_drv_cmd *drv_cmd)
5074 {
5075 struct mpi3_pel_req_action_wait pel_wait;
5076
5077 mrioc->pel_abort_requested = false;
5078
5079 memset(&pel_wait, 0, sizeof(pel_wait));
5080 drv_cmd->state = MPI3MR_CMD_PENDING;
5081 drv_cmd->is_waiting = 0;
5082 drv_cmd->callback = mpi3mr_pel_wait_complete;
5083 drv_cmd->ioc_status = 0;
5084 drv_cmd->ioc_loginfo = 0;
5085 pel_wait.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT);
5086 pel_wait.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG;
5087 pel_wait.action = MPI3_PEL_ACTION_WAIT;
5088 pel_wait.starting_sequence_number = cpu_to_le32(mrioc->pel_newest_seqnum);
5089 pel_wait.locale = cpu_to_le16(mrioc->pel_locale);
5090 pel_wait.class = cpu_to_le16(mrioc->pel_class);
5091 pel_wait.wait_time = MPI3_PEL_WAITTIME_INFINITE_WAIT;
5092 dprint_bsg_info(mrioc, "sending pel_wait seqnum(%d), class(%d), locale(0x%08x)\n",
5093 mrioc->pel_newest_seqnum, mrioc->pel_class, mrioc->pel_locale);
5094
5095 if (mpi3mr_admin_request_post(mrioc, &pel_wait, sizeof(pel_wait), 0)) {
5096 dprint_bsg_err(mrioc,
5097 "Issuing PELWait: Admin post failed\n");
5098 drv_cmd->state = MPI3MR_CMD_NOTUSED;
5099 drv_cmd->callback = NULL;
5100 drv_cmd->retry_count = 0;
5101 mrioc->pel_enabled = false;
5102 }
5103 }
5104
5105 /**
5106 * mpi3mr_pel_get_seqnum_post - Issue PEL Get Sequence number
5107 * @mrioc: Adapter instance reference
5108 * @drv_cmd: Internal command tracker
5109 *
5110 * Issue PEL get sequence number MPI request through admin queue
5111 * and return.
5112 *
5113 * Return: 0 on success, non-zero on failure.
5114 */
mpi3mr_pel_get_seqnum_post(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * drv_cmd)5115 int mpi3mr_pel_get_seqnum_post(struct mpi3mr_ioc *mrioc,
5116 struct mpi3mr_drv_cmd *drv_cmd)
5117 {
5118 struct mpi3_pel_req_action_get_sequence_numbers pel_getseq_req;
5119 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
5120 int retval = 0;
5121
5122 memset(&pel_getseq_req, 0, sizeof(pel_getseq_req));
5123 mrioc->pel_cmds.state = MPI3MR_CMD_PENDING;
5124 mrioc->pel_cmds.is_waiting = 0;
5125 mrioc->pel_cmds.ioc_status = 0;
5126 mrioc->pel_cmds.ioc_loginfo = 0;
5127 mrioc->pel_cmds.callback = mpi3mr_pel_get_seqnum_complete;
5128 pel_getseq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT);
5129 pel_getseq_req.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG;
5130 pel_getseq_req.action = MPI3_PEL_ACTION_GET_SEQNUM;
5131 mpi3mr_add_sg_single(&pel_getseq_req.sgl, sgl_flags,
5132 mrioc->pel_seqnum_sz, mrioc->pel_seqnum_dma);
5133
5134 retval = mpi3mr_admin_request_post(mrioc, &pel_getseq_req,
5135 sizeof(pel_getseq_req), 0);
5136 if (retval) {
5137 if (drv_cmd) {
5138 drv_cmd->state = MPI3MR_CMD_NOTUSED;
5139 drv_cmd->callback = NULL;
5140 drv_cmd->retry_count = 0;
5141 }
5142 mrioc->pel_enabled = false;
5143 }
5144
5145 return retval;
5146 }
5147
5148 /**
5149 * mpi3mr_pel_wait_complete - PELWait Completion callback
5150 * @mrioc: Adapter instance reference
5151 * @drv_cmd: Internal command tracker
5152 *
5153 * This is a callback handler for the PELWait request and
5154 * firmware completes a PELWait request when it is aborted or a
5155 * new PEL entry is available. This sends AEN to the application
5156 * and if the PELwait completion is not due to PELAbort then
5157 * this will send a request for new PEL Sequence number
5158 *
5159 * Return: Nothing.
5160 */
mpi3mr_pel_wait_complete(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * drv_cmd)5161 static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc,
5162 struct mpi3mr_drv_cmd *drv_cmd)
5163 {
5164 struct mpi3_pel_reply *pel_reply = NULL;
5165 u16 ioc_status, pe_log_status;
5166 bool do_retry = false;
5167
5168 if (drv_cmd->state & MPI3MR_CMD_RESET)
5169 goto cleanup_drv_cmd;
5170
5171 ioc_status = drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
5172 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5173 ioc_err(mrioc, "%s: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
5174 __func__, ioc_status, drv_cmd->ioc_loginfo);
5175 dprint_bsg_err(mrioc,
5176 "pel_wait: failed with ioc_status(0x%04x), log_info(0x%08x)\n",
5177 ioc_status, drv_cmd->ioc_loginfo);
5178 do_retry = true;
5179 }
5180
5181 if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
5182 pel_reply = (struct mpi3_pel_reply *)drv_cmd->reply;
5183
5184 if (!pel_reply) {
5185 dprint_bsg_err(mrioc,
5186 "pel_wait: failed due to no reply\n");
5187 goto out_failed;
5188 }
5189
5190 pe_log_status = le16_to_cpu(pel_reply->pe_log_status);
5191 if ((pe_log_status != MPI3_PEL_STATUS_SUCCESS) &&
5192 (pe_log_status != MPI3_PEL_STATUS_ABORTED)) {
5193 ioc_err(mrioc, "%s: Failed pe_log_status(0x%04x)\n",
5194 __func__, pe_log_status);
5195 dprint_bsg_err(mrioc,
5196 "pel_wait: failed due to pel_log_status(0x%04x)\n",
5197 pe_log_status);
5198 do_retry = true;
5199 }
5200
5201 if (do_retry) {
5202 if (drv_cmd->retry_count < MPI3MR_PEL_RETRY_COUNT) {
5203 drv_cmd->retry_count++;
5204 dprint_bsg_err(mrioc, "pel_wait: retrying(%d)\n",
5205 drv_cmd->retry_count);
5206 mpi3mr_pel_wait_post(mrioc, drv_cmd);
5207 return;
5208 }
5209 dprint_bsg_err(mrioc,
5210 "pel_wait: failed after all retries(%d)\n",
5211 drv_cmd->retry_count);
5212 goto out_failed;
5213 }
5214 atomic64_inc(&event_counter);
5215 if (!mrioc->pel_abort_requested) {
5216 mrioc->pel_cmds.retry_count = 0;
5217 mpi3mr_pel_get_seqnum_post(mrioc, &mrioc->pel_cmds);
5218 }
5219
5220 return;
5221 out_failed:
5222 mrioc->pel_enabled = false;
5223 cleanup_drv_cmd:
5224 drv_cmd->state = MPI3MR_CMD_NOTUSED;
5225 drv_cmd->callback = NULL;
5226 drv_cmd->retry_count = 0;
5227 }
5228
5229 /**
5230 * mpi3mr_pel_get_seqnum_complete - PELGetSeqNum Completion callback
5231 * @mrioc: Adapter instance reference
5232 * @drv_cmd: Internal command tracker
5233 *
5234 * This is a callback handler for the PEL get sequence number
5235 * request and a new PEL wait request will be issued to the
5236 * firmware from this
5237 *
5238 * Return: Nothing.
5239 */
mpi3mr_pel_get_seqnum_complete(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * drv_cmd)5240 void mpi3mr_pel_get_seqnum_complete(struct mpi3mr_ioc *mrioc,
5241 struct mpi3mr_drv_cmd *drv_cmd)
5242 {
5243 struct mpi3_pel_reply *pel_reply = NULL;
5244 struct mpi3_pel_seq *pel_seqnum_virt;
5245 u16 ioc_status;
5246 bool do_retry = false;
5247
5248 pel_seqnum_virt = (struct mpi3_pel_seq *)mrioc->pel_seqnum_virt;
5249
5250 if (drv_cmd->state & MPI3MR_CMD_RESET)
5251 goto cleanup_drv_cmd;
5252
5253 ioc_status = drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
5254 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5255 dprint_bsg_err(mrioc,
5256 "pel_get_seqnum: failed with ioc_status(0x%04x), log_info(0x%08x)\n",
5257 ioc_status, drv_cmd->ioc_loginfo);
5258 do_retry = true;
5259 }
5260
5261 if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
5262 pel_reply = (struct mpi3_pel_reply *)drv_cmd->reply;
5263 if (!pel_reply) {
5264 dprint_bsg_err(mrioc,
5265 "pel_get_seqnum: failed due to no reply\n");
5266 goto out_failed;
5267 }
5268
5269 if (le16_to_cpu(pel_reply->pe_log_status) != MPI3_PEL_STATUS_SUCCESS) {
5270 dprint_bsg_err(mrioc,
5271 "pel_get_seqnum: failed due to pel_log_status(0x%04x)\n",
5272 le16_to_cpu(pel_reply->pe_log_status));
5273 do_retry = true;
5274 }
5275
5276 if (do_retry) {
5277 if (drv_cmd->retry_count < MPI3MR_PEL_RETRY_COUNT) {
5278 drv_cmd->retry_count++;
5279 dprint_bsg_err(mrioc,
5280 "pel_get_seqnum: retrying(%d)\n",
5281 drv_cmd->retry_count);
5282 mpi3mr_pel_get_seqnum_post(mrioc, drv_cmd);
5283 return;
5284 }
5285
5286 dprint_bsg_err(mrioc,
5287 "pel_get_seqnum: failed after all retries(%d)\n",
5288 drv_cmd->retry_count);
5289 goto out_failed;
5290 }
5291 mrioc->pel_newest_seqnum = le32_to_cpu(pel_seqnum_virt->newest) + 1;
5292 drv_cmd->retry_count = 0;
5293 mpi3mr_pel_wait_post(mrioc, drv_cmd);
5294
5295 return;
5296 out_failed:
5297 mrioc->pel_enabled = false;
5298 cleanup_drv_cmd:
5299 drv_cmd->state = MPI3MR_CMD_NOTUSED;
5300 drv_cmd->callback = NULL;
5301 drv_cmd->retry_count = 0;
5302 }
5303
5304 /**
5305 * mpi3mr_check_op_admin_proc -
5306 * @mrioc: Adapter instance reference
5307 *
5308 * Check if any of the operation reply queues
5309 * or the admin reply queue are currently in use.
5310 * If any queue is in use, this function waits for
5311 * a maximum of 10 seconds for them to become available.
5312 *
5313 * Return: 0 on success, non-zero on failure.
5314 */
mpi3mr_check_op_admin_proc(struct mpi3mr_ioc * mrioc)5315 static int mpi3mr_check_op_admin_proc(struct mpi3mr_ioc *mrioc)
5316 {
5317
5318 u16 timeout = 10 * 10;
5319 u16 elapsed_time = 0;
5320 bool op_admin_in_use = false;
5321
5322 do {
5323 op_admin_in_use = false;
5324
5325 /* Check admin_reply queue first to exit early */
5326 if (atomic_read(&mrioc->admin_reply_q_in_use) == 1)
5327 op_admin_in_use = true;
5328 else {
5329 /* Check op_reply queues */
5330 int i;
5331
5332 for (i = 0; i < mrioc->num_queues; i++) {
5333 if (atomic_read(&mrioc->op_reply_qinfo[i].in_use) == 1) {
5334 op_admin_in_use = true;
5335 break;
5336 }
5337 }
5338 }
5339
5340 if (!op_admin_in_use)
5341 break;
5342
5343 msleep(100);
5344
5345 } while (++elapsed_time < timeout);
5346
5347 if (op_admin_in_use)
5348 return 1;
5349
5350 return 0;
5351 }
5352
5353 /**
5354 * mpi3mr_soft_reset_handler - Reset the controller
5355 * @mrioc: Adapter instance reference
5356 * @reset_reason: Reset reason code
5357 * @snapdump: Flag to generate snapdump in firmware or not
5358 *
5359 * This is an handler for recovering controller by issuing soft
5360 * reset are diag fault reset. This is a blocking function and
5361 * when one reset is executed if any other resets they will be
5362 * blocked. All BSG requests will be blocked during the reset. If
5363 * controller reset is successful then the controller will be
5364 * reinitalized, otherwise the controller will be marked as not
5365 * recoverable
5366 *
5367 * In snapdump bit is set, the controller is issued with diag
5368 * fault reset so that the firmware can create a snap dump and
5369 * post that the firmware will result in F000 fault and the
5370 * driver will issue soft reset to recover from that.
5371 *
5372 * Return: 0 on success, non-zero on failure.
5373 */
mpi3mr_soft_reset_handler(struct mpi3mr_ioc * mrioc,u16 reset_reason,u8 snapdump)5374 int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
5375 u16 reset_reason, u8 snapdump)
5376 {
5377 int retval = 0, i;
5378 unsigned long flags;
5379 u32 host_diagnostic, timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
5380 union mpi3mr_trigger_data trigger_data;
5381
5382 /* Block the reset handler until diag save in progress*/
5383 dprint_reset(mrioc,
5384 "soft_reset_handler: check and block on diagsave_timeout(%d)\n",
5385 mrioc->diagsave_timeout);
5386 while (mrioc->diagsave_timeout)
5387 ssleep(1);
5388 /*
5389 * Block new resets until the currently executing one is finished and
5390 * return the status of the existing reset for all blocked resets
5391 */
5392 dprint_reset(mrioc, "soft_reset_handler: acquiring reset_mutex\n");
5393 if (!mutex_trylock(&mrioc->reset_mutex)) {
5394 ioc_info(mrioc,
5395 "controller reset triggered by %s is blocked due to another reset in progress\n",
5396 mpi3mr_reset_rc_name(reset_reason));
5397 do {
5398 ssleep(1);
5399 } while (mrioc->reset_in_progress == 1);
5400 ioc_info(mrioc,
5401 "returning previous reset result(%d) for the reset triggered by %s\n",
5402 mrioc->prev_reset_result,
5403 mpi3mr_reset_rc_name(reset_reason));
5404 return mrioc->prev_reset_result;
5405 }
5406 ioc_info(mrioc, "controller reset is triggered by %s\n",
5407 mpi3mr_reset_rc_name(reset_reason));
5408
5409 mrioc->device_refresh_on = 0;
5410 mrioc->reset_in_progress = 1;
5411 mrioc->stop_bsgs = 1;
5412 mrioc->prev_reset_result = -1;
5413 memset(&trigger_data, 0, sizeof(trigger_data));
5414
5415 if ((!snapdump) && (reset_reason != MPI3MR_RESET_FROM_FAULT_WATCH) &&
5416 (reset_reason != MPI3MR_RESET_FROM_FIRMWARE) &&
5417 (reset_reason != MPI3MR_RESET_FROM_CIACTIV_FAULT)) {
5418 mpi3mr_set_trigger_data_in_all_hdb(mrioc,
5419 MPI3MR_HDB_TRIGGER_TYPE_SOFT_RESET, NULL, 0);
5420 dprint_reset(mrioc,
5421 "soft_reset_handler: releasing host diagnostic buffers\n");
5422 mpi3mr_release_diag_bufs(mrioc, 0);
5423 for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
5424 mrioc->event_masks[i] = -1;
5425
5426 dprint_reset(mrioc, "soft_reset_handler: masking events\n");
5427 mpi3mr_issue_event_notification(mrioc);
5428 }
5429
5430 mpi3mr_wait_for_host_io(mrioc, MPI3MR_RESET_HOST_IOWAIT_TIMEOUT);
5431
5432 mpi3mr_ioc_disable_intr(mrioc);
5433 mrioc->io_admin_reset_sync = 1;
5434
5435 if (snapdump) {
5436 mpi3mr_set_diagsave(mrioc);
5437 retval = mpi3mr_issue_reset(mrioc,
5438 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
5439 if (!retval) {
5440 trigger_data.fault = (readl(&mrioc->sysif_regs->fault) &
5441 MPI3_SYSIF_FAULT_CODE_MASK);
5442 do {
5443 host_diagnostic =
5444 readl(&mrioc->sysif_regs->host_diagnostic);
5445 if (!(host_diagnostic &
5446 MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
5447 break;
5448 msleep(100);
5449 } while (--timeout);
5450 mpi3mr_set_trigger_data_in_all_hdb(mrioc,
5451 MPI3MR_HDB_TRIGGER_TYPE_FAULT, &trigger_data, 0);
5452 }
5453 }
5454
5455 retval = mpi3mr_issue_reset(mrioc,
5456 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, reset_reason);
5457 if (retval) {
5458 ioc_err(mrioc, "Failed to issue soft reset to the ioc\n");
5459 goto out;
5460 }
5461
5462 retval = mpi3mr_check_op_admin_proc(mrioc);
5463 if (retval) {
5464 ioc_err(mrioc, "Soft reset failed due to an Admin or I/O queue polling\n"
5465 "thread still processing replies even after a 10 second\n"
5466 "timeout. Marking the controller as unrecoverable!\n");
5467
5468 goto out;
5469 }
5470
5471 if (mrioc->num_io_throttle_group !=
5472 mrioc->facts.max_io_throttle_group) {
5473 ioc_err(mrioc,
5474 "max io throttle group doesn't match old(%d), new(%d)\n",
5475 mrioc->num_io_throttle_group,
5476 mrioc->facts.max_io_throttle_group);
5477 retval = -EPERM;
5478 goto out;
5479 }
5480
5481 mpi3mr_flush_delayed_cmd_lists(mrioc);
5482 mpi3mr_flush_drv_cmds(mrioc);
5483 bitmap_clear(mrioc->devrem_bitmap, 0, MPI3MR_NUM_DEVRMCMD);
5484 bitmap_clear(mrioc->removepend_bitmap, 0,
5485 mrioc->dev_handle_bitmap_bits);
5486 bitmap_clear(mrioc->evtack_cmds_bitmap, 0, MPI3MR_NUM_EVTACKCMD);
5487 mpi3mr_flush_host_io(mrioc);
5488 mpi3mr_cleanup_fwevt_list(mrioc);
5489 mpi3mr_invalidate_devhandles(mrioc);
5490 mpi3mr_free_enclosure_list(mrioc);
5491
5492 if (mrioc->prepare_for_reset) {
5493 mrioc->prepare_for_reset = 0;
5494 mrioc->prepare_for_reset_timeout_counter = 0;
5495 }
5496 mpi3mr_memset_buffers(mrioc);
5497 mpi3mr_release_diag_bufs(mrioc, 1);
5498 mrioc->fw_release_trigger_active = false;
5499 mrioc->trace_release_trigger_active = false;
5500 mrioc->snapdump_trigger_active = false;
5501 mpi3mr_set_trigger_data_in_all_hdb(mrioc,
5502 MPI3MR_HDB_TRIGGER_TYPE_SOFT_RESET, NULL, 0);
5503
5504 dprint_reset(mrioc,
5505 "soft_reset_handler: reinitializing the controller\n");
5506 retval = mpi3mr_reinit_ioc(mrioc, 0);
5507 if (retval) {
5508 pr_err(IOCNAME "reinit after soft reset failed: reason %d\n",
5509 mrioc->name, reset_reason);
5510 goto out;
5511 }
5512 ssleep(MPI3MR_RESET_TOPOLOGY_SETTLE_TIME);
5513
5514 out:
5515 if (!retval) {
5516 mrioc->diagsave_timeout = 0;
5517 mrioc->reset_in_progress = 0;
5518 mrioc->pel_abort_requested = 0;
5519 if (mrioc->pel_enabled) {
5520 mrioc->pel_cmds.retry_count = 0;
5521 mpi3mr_pel_wait_post(mrioc, &mrioc->pel_cmds);
5522 }
5523
5524 mrioc->device_refresh_on = 0;
5525
5526 mrioc->ts_update_counter = 0;
5527 spin_lock_irqsave(&mrioc->watchdog_lock, flags);
5528 if (mrioc->watchdog_work_q)
5529 queue_delayed_work(mrioc->watchdog_work_q,
5530 &mrioc->watchdog_work,
5531 msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
5532 spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
5533 mrioc->stop_bsgs = 0;
5534 if (mrioc->pel_enabled)
5535 atomic64_inc(&event_counter);
5536 } else {
5537 mpi3mr_issue_reset(mrioc,
5538 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
5539 mrioc->device_refresh_on = 0;
5540 mrioc->unrecoverable = 1;
5541 mrioc->reset_in_progress = 0;
5542 mrioc->stop_bsgs = 0;
5543 retval = -1;
5544 mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
5545 }
5546 mrioc->prev_reset_result = retval;
5547 mutex_unlock(&mrioc->reset_mutex);
5548 ioc_info(mrioc, "controller reset is %s\n",
5549 ((retval == 0) ? "successful" : "failed"));
5550 return retval;
5551 }
5552
5553 /**
5554 * mpi3mr_post_cfg_req - Issue config requests and wait
5555 * @mrioc: Adapter instance reference
5556 * @cfg_req: Configuration request
5557 * @timeout: Timeout in seconds
5558 * @ioc_status: Pointer to return ioc status
5559 *
5560 * A generic function for posting MPI3 configuration request to
5561 * the firmware. This blocks for the completion of request for
5562 * timeout seconds and if the request times out this function
5563 * faults the controller with proper reason code.
5564 *
5565 * On successful completion of the request this function returns
5566 * appropriate ioc status from the firmware back to the caller.
5567 *
5568 * Return: 0 on success, non-zero on failure.
5569 */
mpi3mr_post_cfg_req(struct mpi3mr_ioc * mrioc,struct mpi3_config_request * cfg_req,int timeout,u16 * ioc_status)5570 static int mpi3mr_post_cfg_req(struct mpi3mr_ioc *mrioc,
5571 struct mpi3_config_request *cfg_req, int timeout, u16 *ioc_status)
5572 {
5573 int retval = 0;
5574
5575 mutex_lock(&mrioc->cfg_cmds.mutex);
5576 if (mrioc->cfg_cmds.state & MPI3MR_CMD_PENDING) {
5577 retval = -1;
5578 ioc_err(mrioc, "sending config request failed due to command in use\n");
5579 mutex_unlock(&mrioc->cfg_cmds.mutex);
5580 goto out;
5581 }
5582 mrioc->cfg_cmds.state = MPI3MR_CMD_PENDING;
5583 mrioc->cfg_cmds.is_waiting = 1;
5584 mrioc->cfg_cmds.callback = NULL;
5585 mrioc->cfg_cmds.ioc_status = 0;
5586 mrioc->cfg_cmds.ioc_loginfo = 0;
5587
5588 cfg_req->host_tag = cpu_to_le16(MPI3MR_HOSTTAG_CFG_CMDS);
5589 cfg_req->function = MPI3_FUNCTION_CONFIG;
5590
5591 init_completion(&mrioc->cfg_cmds.done);
5592 dprint_cfg_info(mrioc, "posting config request\n");
5593 if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO)
5594 dprint_dump(cfg_req, sizeof(struct mpi3_config_request),
5595 "mpi3_cfg_req");
5596 retval = mpi3mr_admin_request_post(mrioc, cfg_req, sizeof(*cfg_req), 1);
5597 if (retval) {
5598 ioc_err(mrioc, "posting config request failed\n");
5599 goto out_unlock;
5600 }
5601 wait_for_completion_timeout(&mrioc->cfg_cmds.done, (timeout * HZ));
5602 if (!(mrioc->cfg_cmds.state & MPI3MR_CMD_COMPLETE)) {
5603 mpi3mr_check_rh_fault_ioc(mrioc,
5604 MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT);
5605 ioc_err(mrioc, "config request timed out\n");
5606 retval = -1;
5607 goto out_unlock;
5608 }
5609 *ioc_status = mrioc->cfg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
5610 if ((*ioc_status) != MPI3_IOCSTATUS_SUCCESS)
5611 dprint_cfg_err(mrioc,
5612 "cfg_page request returned with ioc_status(0x%04x), log_info(0x%08x)\n",
5613 *ioc_status, mrioc->cfg_cmds.ioc_loginfo);
5614
5615 out_unlock:
5616 mrioc->cfg_cmds.state = MPI3MR_CMD_NOTUSED;
5617 mutex_unlock(&mrioc->cfg_cmds.mutex);
5618
5619 out:
5620 return retval;
5621 }
5622
5623 /**
5624 * mpi3mr_process_cfg_req - config page request processor
5625 * @mrioc: Adapter instance reference
5626 * @cfg_req: Configuration request
5627 * @cfg_hdr: Configuration page header
5628 * @timeout: Timeout in seconds
5629 * @ioc_status: Pointer to return ioc status
5630 * @cfg_buf: Memory pointer to copy config page or header
5631 * @cfg_buf_sz: Size of the memory to get config page or header
5632 *
5633 * This is handler for config page read, write and config page
5634 * header read operations.
5635 *
5636 * This function expects the cfg_req to be populated with page
5637 * type, page number, action for the header read and with page
5638 * address for all other operations.
5639 *
5640 * The cfg_hdr can be passed as null for reading required header
5641 * details for read/write pages the cfg_hdr should point valid
5642 * configuration page header.
5643 *
5644 * This allocates dmaable memory based on the size of the config
5645 * buffer and set the SGE of the cfg_req.
5646 *
5647 * For write actions, the config page data has to be passed in
5648 * the cfg_buf and size of the data has to be mentioned in the
5649 * cfg_buf_sz.
5650 *
5651 * For read/header actions, on successful completion of the
5652 * request with successful ioc_status the data will be copied
5653 * into the cfg_buf limited to a minimum of actual page size and
5654 * cfg_buf_sz
5655 *
5656 *
5657 * Return: 0 on success, non-zero on failure.
5658 */
mpi3mr_process_cfg_req(struct mpi3mr_ioc * mrioc,struct mpi3_config_request * cfg_req,struct mpi3_config_page_header * cfg_hdr,int timeout,u16 * ioc_status,void * cfg_buf,u32 cfg_buf_sz)5659 static int mpi3mr_process_cfg_req(struct mpi3mr_ioc *mrioc,
5660 struct mpi3_config_request *cfg_req,
5661 struct mpi3_config_page_header *cfg_hdr, int timeout, u16 *ioc_status,
5662 void *cfg_buf, u32 cfg_buf_sz)
5663 {
5664 struct dma_memory_desc mem_desc;
5665 int retval = -1;
5666 u8 invalid_action = 0;
5667 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
5668
5669 memset(&mem_desc, 0, sizeof(struct dma_memory_desc));
5670
5671 if (cfg_req->action == MPI3_CONFIG_ACTION_PAGE_HEADER)
5672 mem_desc.size = sizeof(struct mpi3_config_page_header);
5673 else {
5674 if (!cfg_hdr) {
5675 ioc_err(mrioc, "null config header passed for config action(%d), page_type(0x%02x), page_num(%d)\n",
5676 cfg_req->action, cfg_req->page_type,
5677 cfg_req->page_number);
5678 goto out;
5679 }
5680 switch (cfg_hdr->page_attribute & MPI3_CONFIG_PAGEATTR_MASK) {
5681 case MPI3_CONFIG_PAGEATTR_READ_ONLY:
5682 if (cfg_req->action
5683 != MPI3_CONFIG_ACTION_READ_CURRENT)
5684 invalid_action = 1;
5685 break;
5686 case MPI3_CONFIG_PAGEATTR_CHANGEABLE:
5687 if ((cfg_req->action ==
5688 MPI3_CONFIG_ACTION_READ_PERSISTENT) ||
5689 (cfg_req->action ==
5690 MPI3_CONFIG_ACTION_WRITE_PERSISTENT))
5691 invalid_action = 1;
5692 break;
5693 case MPI3_CONFIG_PAGEATTR_PERSISTENT:
5694 default:
5695 break;
5696 }
5697 if (invalid_action) {
5698 ioc_err(mrioc,
5699 "config action(%d) is not allowed for page_type(0x%02x), page_num(%d) with page_attribute(0x%02x)\n",
5700 cfg_req->action, cfg_req->page_type,
5701 cfg_req->page_number, cfg_hdr->page_attribute);
5702 goto out;
5703 }
5704 mem_desc.size = le16_to_cpu(cfg_hdr->page_length) * 4;
5705 cfg_req->page_length = cfg_hdr->page_length;
5706 cfg_req->page_version = cfg_hdr->page_version;
5707 }
5708
5709 mem_desc.addr = dma_alloc_coherent(&mrioc->pdev->dev,
5710 mem_desc.size, &mem_desc.dma_addr, GFP_KERNEL);
5711
5712 if (!mem_desc.addr)
5713 return retval;
5714
5715 mpi3mr_add_sg_single(&cfg_req->sgl, sgl_flags, mem_desc.size,
5716 mem_desc.dma_addr);
5717
5718 if ((cfg_req->action == MPI3_CONFIG_ACTION_WRITE_PERSISTENT) ||
5719 (cfg_req->action == MPI3_CONFIG_ACTION_WRITE_CURRENT)) {
5720 memcpy(mem_desc.addr, cfg_buf, min_t(u16, mem_desc.size,
5721 cfg_buf_sz));
5722 dprint_cfg_info(mrioc, "config buffer to be written\n");
5723 if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO)
5724 dprint_dump(mem_desc.addr, mem_desc.size, "cfg_buf");
5725 }
5726
5727 if (mpi3mr_post_cfg_req(mrioc, cfg_req, timeout, ioc_status))
5728 goto out;
5729
5730 retval = 0;
5731 if ((*ioc_status == MPI3_IOCSTATUS_SUCCESS) &&
5732 (cfg_req->action != MPI3_CONFIG_ACTION_WRITE_PERSISTENT) &&
5733 (cfg_req->action != MPI3_CONFIG_ACTION_WRITE_CURRENT)) {
5734 memcpy(cfg_buf, mem_desc.addr, min_t(u16, mem_desc.size,
5735 cfg_buf_sz));
5736 dprint_cfg_info(mrioc, "config buffer read\n");
5737 if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO)
5738 dprint_dump(mem_desc.addr, mem_desc.size, "cfg_buf");
5739 }
5740
5741 out:
5742 if (mem_desc.addr) {
5743 dma_free_coherent(&mrioc->pdev->dev, mem_desc.size,
5744 mem_desc.addr, mem_desc.dma_addr);
5745 mem_desc.addr = NULL;
5746 }
5747
5748 return retval;
5749 }
5750
5751 /**
5752 * mpi3mr_cfg_get_dev_pg0 - Read current device page0
5753 * @mrioc: Adapter instance reference
5754 * @ioc_status: Pointer to return ioc status
5755 * @dev_pg0: Pointer to return device page 0
5756 * @pg_sz: Size of the memory allocated to the page pointer
5757 * @form: The form to be used for addressing the page
5758 * @form_spec: Form specific information like device handle
5759 *
5760 * This is handler for config page read for a specific device
5761 * page0. The ioc_status has the controller returned ioc_status.
5762 * This routine doesn't check ioc_status to decide whether the
5763 * page read is success or not and it is the callers
5764 * responsibility.
5765 *
5766 * Return: 0 on success, non-zero on failure.
5767 */
mpi3mr_cfg_get_dev_pg0(struct mpi3mr_ioc * mrioc,u16 * ioc_status,struct mpi3_device_page0 * dev_pg0,u16 pg_sz,u32 form,u32 form_spec)5768 int mpi3mr_cfg_get_dev_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
5769 struct mpi3_device_page0 *dev_pg0, u16 pg_sz, u32 form, u32 form_spec)
5770 {
5771 struct mpi3_config_page_header cfg_hdr;
5772 struct mpi3_config_request cfg_req;
5773 u32 page_address;
5774
5775 memset(dev_pg0, 0, pg_sz);
5776 memset(&cfg_hdr, 0, sizeof(cfg_hdr));
5777 memset(&cfg_req, 0, sizeof(cfg_req));
5778
5779 cfg_req.function = MPI3_FUNCTION_CONFIG;
5780 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5781 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DEVICE;
5782 cfg_req.page_number = 0;
5783 cfg_req.page_address = 0;
5784
5785 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
5786 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
5787 ioc_err(mrioc, "device page0 header read failed\n");
5788 goto out_failed;
5789 }
5790 if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5791 ioc_err(mrioc, "device page0 header read failed with ioc_status(0x%04x)\n",
5792 *ioc_status);
5793 goto out_failed;
5794 }
5795 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
5796 page_address = ((form & MPI3_DEVICE_PGAD_FORM_MASK) |
5797 (form_spec & MPI3_DEVICE_PGAD_HANDLE_MASK));
5798 cfg_req.page_address = cpu_to_le32(page_address);
5799 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
5800 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, dev_pg0, pg_sz)) {
5801 ioc_err(mrioc, "device page0 read failed\n");
5802 goto out_failed;
5803 }
5804 return 0;
5805 out_failed:
5806 return -1;
5807 }
5808
5809
5810 /**
5811 * mpi3mr_cfg_get_sas_phy_pg0 - Read current SAS Phy page0
5812 * @mrioc: Adapter instance reference
5813 * @ioc_status: Pointer to return ioc status
5814 * @phy_pg0: Pointer to return SAS Phy page 0
5815 * @pg_sz: Size of the memory allocated to the page pointer
5816 * @form: The form to be used for addressing the page
5817 * @form_spec: Form specific information like phy number
5818 *
5819 * This is handler for config page read for a specific SAS Phy
5820 * page0. The ioc_status has the controller returned ioc_status.
5821 * This routine doesn't check ioc_status to decide whether the
5822 * page read is success or not and it is the callers
5823 * responsibility.
5824 *
5825 * Return: 0 on success, non-zero on failure.
5826 */
mpi3mr_cfg_get_sas_phy_pg0(struct mpi3mr_ioc * mrioc,u16 * ioc_status,struct mpi3_sas_phy_page0 * phy_pg0,u16 pg_sz,u32 form,u32 form_spec)5827 int mpi3mr_cfg_get_sas_phy_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
5828 struct mpi3_sas_phy_page0 *phy_pg0, u16 pg_sz, u32 form,
5829 u32 form_spec)
5830 {
5831 struct mpi3_config_page_header cfg_hdr;
5832 struct mpi3_config_request cfg_req;
5833 u32 page_address;
5834
5835 memset(phy_pg0, 0, pg_sz);
5836 memset(&cfg_hdr, 0, sizeof(cfg_hdr));
5837 memset(&cfg_req, 0, sizeof(cfg_req));
5838
5839 cfg_req.function = MPI3_FUNCTION_CONFIG;
5840 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5841 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_PHY;
5842 cfg_req.page_number = 0;
5843 cfg_req.page_address = 0;
5844
5845 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
5846 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
5847 ioc_err(mrioc, "sas phy page0 header read failed\n");
5848 goto out_failed;
5849 }
5850 if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5851 ioc_err(mrioc, "sas phy page0 header read failed with ioc_status(0x%04x)\n",
5852 *ioc_status);
5853 goto out_failed;
5854 }
5855 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
5856 page_address = ((form & MPI3_SAS_PHY_PGAD_FORM_MASK) |
5857 (form_spec & MPI3_SAS_PHY_PGAD_PHY_NUMBER_MASK));
5858 cfg_req.page_address = cpu_to_le32(page_address);
5859 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
5860 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, phy_pg0, pg_sz)) {
5861 ioc_err(mrioc, "sas phy page0 read failed\n");
5862 goto out_failed;
5863 }
5864 return 0;
5865 out_failed:
5866 return -1;
5867 }
5868
5869 /**
5870 * mpi3mr_cfg_get_sas_phy_pg1 - Read current SAS Phy page1
5871 * @mrioc: Adapter instance reference
5872 * @ioc_status: Pointer to return ioc status
5873 * @phy_pg1: Pointer to return SAS Phy page 1
5874 * @pg_sz: Size of the memory allocated to the page pointer
5875 * @form: The form to be used for addressing the page
5876 * @form_spec: Form specific information like phy number
5877 *
5878 * This is handler for config page read for a specific SAS Phy
5879 * page1. The ioc_status has the controller returned ioc_status.
5880 * This routine doesn't check ioc_status to decide whether the
5881 * page read is success or not and it is the callers
5882 * responsibility.
5883 *
5884 * Return: 0 on success, non-zero on failure.
5885 */
mpi3mr_cfg_get_sas_phy_pg1(struct mpi3mr_ioc * mrioc,u16 * ioc_status,struct mpi3_sas_phy_page1 * phy_pg1,u16 pg_sz,u32 form,u32 form_spec)5886 int mpi3mr_cfg_get_sas_phy_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
5887 struct mpi3_sas_phy_page1 *phy_pg1, u16 pg_sz, u32 form,
5888 u32 form_spec)
5889 {
5890 struct mpi3_config_page_header cfg_hdr;
5891 struct mpi3_config_request cfg_req;
5892 u32 page_address;
5893
5894 memset(phy_pg1, 0, pg_sz);
5895 memset(&cfg_hdr, 0, sizeof(cfg_hdr));
5896 memset(&cfg_req, 0, sizeof(cfg_req));
5897
5898 cfg_req.function = MPI3_FUNCTION_CONFIG;
5899 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5900 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_PHY;
5901 cfg_req.page_number = 1;
5902 cfg_req.page_address = 0;
5903
5904 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
5905 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
5906 ioc_err(mrioc, "sas phy page1 header read failed\n");
5907 goto out_failed;
5908 }
5909 if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5910 ioc_err(mrioc, "sas phy page1 header read failed with ioc_status(0x%04x)\n",
5911 *ioc_status);
5912 goto out_failed;
5913 }
5914 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
5915 page_address = ((form & MPI3_SAS_PHY_PGAD_FORM_MASK) |
5916 (form_spec & MPI3_SAS_PHY_PGAD_PHY_NUMBER_MASK));
5917 cfg_req.page_address = cpu_to_le32(page_address);
5918 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
5919 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, phy_pg1, pg_sz)) {
5920 ioc_err(mrioc, "sas phy page1 read failed\n");
5921 goto out_failed;
5922 }
5923 return 0;
5924 out_failed:
5925 return -1;
5926 }
5927
5928
5929 /**
5930 * mpi3mr_cfg_get_sas_exp_pg0 - Read current SAS Expander page0
5931 * @mrioc: Adapter instance reference
5932 * @ioc_status: Pointer to return ioc status
5933 * @exp_pg0: Pointer to return SAS Expander page 0
5934 * @pg_sz: Size of the memory allocated to the page pointer
5935 * @form: The form to be used for addressing the page
5936 * @form_spec: Form specific information like device handle
5937 *
5938 * This is handler for config page read for a specific SAS
5939 * Expander page0. The ioc_status has the controller returned
5940 * ioc_status. This routine doesn't check ioc_status to decide
5941 * whether the page read is success or not and it is the callers
5942 * responsibility.
5943 *
5944 * Return: 0 on success, non-zero on failure.
5945 */
mpi3mr_cfg_get_sas_exp_pg0(struct mpi3mr_ioc * mrioc,u16 * ioc_status,struct mpi3_sas_expander_page0 * exp_pg0,u16 pg_sz,u32 form,u32 form_spec)5946 int mpi3mr_cfg_get_sas_exp_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
5947 struct mpi3_sas_expander_page0 *exp_pg0, u16 pg_sz, u32 form,
5948 u32 form_spec)
5949 {
5950 struct mpi3_config_page_header cfg_hdr;
5951 struct mpi3_config_request cfg_req;
5952 u32 page_address;
5953
5954 memset(exp_pg0, 0, pg_sz);
5955 memset(&cfg_hdr, 0, sizeof(cfg_hdr));
5956 memset(&cfg_req, 0, sizeof(cfg_req));
5957
5958 cfg_req.function = MPI3_FUNCTION_CONFIG;
5959 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5960 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_EXPANDER;
5961 cfg_req.page_number = 0;
5962 cfg_req.page_address = 0;
5963
5964 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
5965 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
5966 ioc_err(mrioc, "expander page0 header read failed\n");
5967 goto out_failed;
5968 }
5969 if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5970 ioc_err(mrioc, "expander page0 header read failed with ioc_status(0x%04x)\n",
5971 *ioc_status);
5972 goto out_failed;
5973 }
5974 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
5975 page_address = ((form & MPI3_SAS_EXPAND_PGAD_FORM_MASK) |
5976 (form_spec & (MPI3_SAS_EXPAND_PGAD_PHYNUM_MASK |
5977 MPI3_SAS_EXPAND_PGAD_HANDLE_MASK)));
5978 cfg_req.page_address = cpu_to_le32(page_address);
5979 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
5980 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, exp_pg0, pg_sz)) {
5981 ioc_err(mrioc, "expander page0 read failed\n");
5982 goto out_failed;
5983 }
5984 return 0;
5985 out_failed:
5986 return -1;
5987 }
5988
5989 /**
5990 * mpi3mr_cfg_get_sas_exp_pg1 - Read current SAS Expander page1
5991 * @mrioc: Adapter instance reference
5992 * @ioc_status: Pointer to return ioc status
5993 * @exp_pg1: Pointer to return SAS Expander page 1
5994 * @pg_sz: Size of the memory allocated to the page pointer
5995 * @form: The form to be used for addressing the page
5996 * @form_spec: Form specific information like phy number
5997 *
5998 * This is handler for config page read for a specific SAS
5999 * Expander page1. The ioc_status has the controller returned
6000 * ioc_status. This routine doesn't check ioc_status to decide
6001 * whether the page read is success or not and it is the callers
6002 * responsibility.
6003 *
6004 * Return: 0 on success, non-zero on failure.
6005 */
mpi3mr_cfg_get_sas_exp_pg1(struct mpi3mr_ioc * mrioc,u16 * ioc_status,struct mpi3_sas_expander_page1 * exp_pg1,u16 pg_sz,u32 form,u32 form_spec)6006 int mpi3mr_cfg_get_sas_exp_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
6007 struct mpi3_sas_expander_page1 *exp_pg1, u16 pg_sz, u32 form,
6008 u32 form_spec)
6009 {
6010 struct mpi3_config_page_header cfg_hdr;
6011 struct mpi3_config_request cfg_req;
6012 u32 page_address;
6013
6014 memset(exp_pg1, 0, pg_sz);
6015 memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6016 memset(&cfg_req, 0, sizeof(cfg_req));
6017
6018 cfg_req.function = MPI3_FUNCTION_CONFIG;
6019 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6020 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_EXPANDER;
6021 cfg_req.page_number = 1;
6022 cfg_req.page_address = 0;
6023
6024 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6025 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6026 ioc_err(mrioc, "expander page1 header read failed\n");
6027 goto out_failed;
6028 }
6029 if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6030 ioc_err(mrioc, "expander page1 header read failed with ioc_status(0x%04x)\n",
6031 *ioc_status);
6032 goto out_failed;
6033 }
6034 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6035 page_address = ((form & MPI3_SAS_EXPAND_PGAD_FORM_MASK) |
6036 (form_spec & (MPI3_SAS_EXPAND_PGAD_PHYNUM_MASK |
6037 MPI3_SAS_EXPAND_PGAD_HANDLE_MASK)));
6038 cfg_req.page_address = cpu_to_le32(page_address);
6039 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6040 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, exp_pg1, pg_sz)) {
6041 ioc_err(mrioc, "expander page1 read failed\n");
6042 goto out_failed;
6043 }
6044 return 0;
6045 out_failed:
6046 return -1;
6047 }
6048
6049 /**
6050 * mpi3mr_cfg_get_enclosure_pg0 - Read current Enclosure page0
6051 * @mrioc: Adapter instance reference
6052 * @ioc_status: Pointer to return ioc status
6053 * @encl_pg0: Pointer to return Enclosure page 0
6054 * @pg_sz: Size of the memory allocated to the page pointer
6055 * @form: The form to be used for addressing the page
6056 * @form_spec: Form specific information like device handle
6057 *
6058 * This is handler for config page read for a specific Enclosure
6059 * page0. The ioc_status has the controller returned ioc_status.
6060 * This routine doesn't check ioc_status to decide whether the
6061 * page read is success or not and it is the callers
6062 * responsibility.
6063 *
6064 * Return: 0 on success, non-zero on failure.
6065 */
mpi3mr_cfg_get_enclosure_pg0(struct mpi3mr_ioc * mrioc,u16 * ioc_status,struct mpi3_enclosure_page0 * encl_pg0,u16 pg_sz,u32 form,u32 form_spec)6066 int mpi3mr_cfg_get_enclosure_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
6067 struct mpi3_enclosure_page0 *encl_pg0, u16 pg_sz, u32 form,
6068 u32 form_spec)
6069 {
6070 struct mpi3_config_page_header cfg_hdr;
6071 struct mpi3_config_request cfg_req;
6072 u32 page_address;
6073
6074 memset(encl_pg0, 0, pg_sz);
6075 memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6076 memset(&cfg_req, 0, sizeof(cfg_req));
6077
6078 cfg_req.function = MPI3_FUNCTION_CONFIG;
6079 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6080 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_ENCLOSURE;
6081 cfg_req.page_number = 0;
6082 cfg_req.page_address = 0;
6083
6084 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6085 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6086 ioc_err(mrioc, "enclosure page0 header read failed\n");
6087 goto out_failed;
6088 }
6089 if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6090 ioc_err(mrioc, "enclosure page0 header read failed with ioc_status(0x%04x)\n",
6091 *ioc_status);
6092 goto out_failed;
6093 }
6094 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6095 page_address = ((form & MPI3_ENCLOS_PGAD_FORM_MASK) |
6096 (form_spec & MPI3_ENCLOS_PGAD_HANDLE_MASK));
6097 cfg_req.page_address = cpu_to_le32(page_address);
6098 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6099 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, encl_pg0, pg_sz)) {
6100 ioc_err(mrioc, "enclosure page0 read failed\n");
6101 goto out_failed;
6102 }
6103 return 0;
6104 out_failed:
6105 return -1;
6106 }
6107
6108
6109 /**
6110 * mpi3mr_cfg_get_sas_io_unit_pg0 - Read current SASIOUnit page0
6111 * @mrioc: Adapter instance reference
6112 * @sas_io_unit_pg0: Pointer to return SAS IO Unit page 0
6113 * @pg_sz: Size of the memory allocated to the page pointer
6114 *
6115 * This is handler for config page read for the SAS IO Unit
6116 * page0. This routine checks ioc_status to decide whether the
6117 * page read is success or not.
6118 *
6119 * Return: 0 on success, non-zero on failure.
6120 */
mpi3mr_cfg_get_sas_io_unit_pg0(struct mpi3mr_ioc * mrioc,struct mpi3_sas_io_unit_page0 * sas_io_unit_pg0,u16 pg_sz)6121 int mpi3mr_cfg_get_sas_io_unit_pg0(struct mpi3mr_ioc *mrioc,
6122 struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0, u16 pg_sz)
6123 {
6124 struct mpi3_config_page_header cfg_hdr;
6125 struct mpi3_config_request cfg_req;
6126 u16 ioc_status = 0;
6127
6128 memset(sas_io_unit_pg0, 0, pg_sz);
6129 memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6130 memset(&cfg_req, 0, sizeof(cfg_req));
6131
6132 cfg_req.function = MPI3_FUNCTION_CONFIG;
6133 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6134 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT;
6135 cfg_req.page_number = 0;
6136 cfg_req.page_address = 0;
6137
6138 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6139 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6140 ioc_err(mrioc, "sas io unit page0 header read failed\n");
6141 goto out_failed;
6142 }
6143 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6144 ioc_err(mrioc, "sas io unit page0 header read failed with ioc_status(0x%04x)\n",
6145 ioc_status);
6146 goto out_failed;
6147 }
6148 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6149
6150 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6151 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg0, pg_sz)) {
6152 ioc_err(mrioc, "sas io unit page0 read failed\n");
6153 goto out_failed;
6154 }
6155 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6156 ioc_err(mrioc, "sas io unit page0 read failed with ioc_status(0x%04x)\n",
6157 ioc_status);
6158 goto out_failed;
6159 }
6160 return 0;
6161 out_failed:
6162 return -1;
6163 }
6164
6165 /**
6166 * mpi3mr_cfg_get_sas_io_unit_pg1 - Read current SASIOUnit page1
6167 * @mrioc: Adapter instance reference
6168 * @sas_io_unit_pg1: Pointer to return SAS IO Unit page 1
6169 * @pg_sz: Size of the memory allocated to the page pointer
6170 *
6171 * This is handler for config page read for the SAS IO Unit
6172 * page1. This routine checks ioc_status to decide whether the
6173 * page read is success or not.
6174 *
6175 * Return: 0 on success, non-zero on failure.
6176 */
mpi3mr_cfg_get_sas_io_unit_pg1(struct mpi3mr_ioc * mrioc,struct mpi3_sas_io_unit_page1 * sas_io_unit_pg1,u16 pg_sz)6177 int mpi3mr_cfg_get_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc,
6178 struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz)
6179 {
6180 struct mpi3_config_page_header cfg_hdr;
6181 struct mpi3_config_request cfg_req;
6182 u16 ioc_status = 0;
6183
6184 memset(sas_io_unit_pg1, 0, pg_sz);
6185 memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6186 memset(&cfg_req, 0, sizeof(cfg_req));
6187
6188 cfg_req.function = MPI3_FUNCTION_CONFIG;
6189 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6190 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT;
6191 cfg_req.page_number = 1;
6192 cfg_req.page_address = 0;
6193
6194 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6195 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6196 ioc_err(mrioc, "sas io unit page1 header read failed\n");
6197 goto out_failed;
6198 }
6199 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6200 ioc_err(mrioc, "sas io unit page1 header read failed with ioc_status(0x%04x)\n",
6201 ioc_status);
6202 goto out_failed;
6203 }
6204 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6205
6206 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6207 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) {
6208 ioc_err(mrioc, "sas io unit page1 read failed\n");
6209 goto out_failed;
6210 }
6211 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6212 ioc_err(mrioc, "sas io unit page1 read failed with ioc_status(0x%04x)\n",
6213 ioc_status);
6214 goto out_failed;
6215 }
6216 return 0;
6217 out_failed:
6218 return -1;
6219 }
6220
6221 /**
6222 * mpi3mr_cfg_set_sas_io_unit_pg1 - Write SASIOUnit page1
6223 * @mrioc: Adapter instance reference
6224 * @sas_io_unit_pg1: Pointer to the SAS IO Unit page 1 to write
6225 * @pg_sz: Size of the memory allocated to the page pointer
6226 *
6227 * This is handler for config page write for the SAS IO Unit
6228 * page1. This routine checks ioc_status to decide whether the
6229 * page read is success or not. This will modify both current
6230 * and persistent page.
6231 *
6232 * Return: 0 on success, non-zero on failure.
6233 */
mpi3mr_cfg_set_sas_io_unit_pg1(struct mpi3mr_ioc * mrioc,struct mpi3_sas_io_unit_page1 * sas_io_unit_pg1,u16 pg_sz)6234 int mpi3mr_cfg_set_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc,
6235 struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz)
6236 {
6237 struct mpi3_config_page_header cfg_hdr;
6238 struct mpi3_config_request cfg_req;
6239 u16 ioc_status = 0;
6240
6241 memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6242 memset(&cfg_req, 0, sizeof(cfg_req));
6243
6244 cfg_req.function = MPI3_FUNCTION_CONFIG;
6245 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6246 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT;
6247 cfg_req.page_number = 1;
6248 cfg_req.page_address = 0;
6249
6250 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6251 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6252 ioc_err(mrioc, "sas io unit page1 header read failed\n");
6253 goto out_failed;
6254 }
6255 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6256 ioc_err(mrioc, "sas io unit page1 header read failed with ioc_status(0x%04x)\n",
6257 ioc_status);
6258 goto out_failed;
6259 }
6260 cfg_req.action = MPI3_CONFIG_ACTION_WRITE_CURRENT;
6261
6262 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6263 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) {
6264 ioc_err(mrioc, "sas io unit page1 write current failed\n");
6265 goto out_failed;
6266 }
6267 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6268 ioc_err(mrioc, "sas io unit page1 write current failed with ioc_status(0x%04x)\n",
6269 ioc_status);
6270 goto out_failed;
6271 }
6272
6273 cfg_req.action = MPI3_CONFIG_ACTION_WRITE_PERSISTENT;
6274
6275 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6276 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) {
6277 ioc_err(mrioc, "sas io unit page1 write persistent failed\n");
6278 goto out_failed;
6279 }
6280 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6281 ioc_err(mrioc, "sas io unit page1 write persistent failed with ioc_status(0x%04x)\n",
6282 ioc_status);
6283 goto out_failed;
6284 }
6285 return 0;
6286 out_failed:
6287 return -1;
6288 }
6289
6290 /**
6291 * mpi3mr_cfg_get_driver_pg1 - Read current Driver page1
6292 * @mrioc: Adapter instance reference
6293 * @driver_pg1: Pointer to return Driver page 1
6294 * @pg_sz: Size of the memory allocated to the page pointer
6295 *
6296 * This is handler for config page read for the Driver page1.
6297 * This routine checks ioc_status to decide whether the page
6298 * read is success or not.
6299 *
6300 * Return: 0 on success, non-zero on failure.
6301 */
mpi3mr_cfg_get_driver_pg1(struct mpi3mr_ioc * mrioc,struct mpi3_driver_page1 * driver_pg1,u16 pg_sz)6302 int mpi3mr_cfg_get_driver_pg1(struct mpi3mr_ioc *mrioc,
6303 struct mpi3_driver_page1 *driver_pg1, u16 pg_sz)
6304 {
6305 struct mpi3_config_page_header cfg_hdr;
6306 struct mpi3_config_request cfg_req;
6307 u16 ioc_status = 0;
6308
6309 memset(driver_pg1, 0, pg_sz);
6310 memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6311 memset(&cfg_req, 0, sizeof(cfg_req));
6312
6313 cfg_req.function = MPI3_FUNCTION_CONFIG;
6314 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6315 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DRIVER;
6316 cfg_req.page_number = 1;
6317 cfg_req.page_address = 0;
6318
6319 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6320 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6321 ioc_err(mrioc, "driver page1 header read failed\n");
6322 goto out_failed;
6323 }
6324 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6325 ioc_err(mrioc, "driver page1 header read failed with ioc_status(0x%04x)\n",
6326 ioc_status);
6327 goto out_failed;
6328 }
6329 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6330
6331 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6332 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, driver_pg1, pg_sz)) {
6333 ioc_err(mrioc, "driver page1 read failed\n");
6334 goto out_failed;
6335 }
6336 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6337 ioc_err(mrioc, "driver page1 read failed with ioc_status(0x%04x)\n",
6338 ioc_status);
6339 goto out_failed;
6340 }
6341 return 0;
6342 out_failed:
6343 return -1;
6344 }
6345
6346 /**
6347 * mpi3mr_cfg_get_driver_pg2 - Read current driver page2
6348 * @mrioc: Adapter instance reference
6349 * @driver_pg2: Pointer to return driver page 2
6350 * @pg_sz: Size of the memory allocated to the page pointer
6351 * @page_action: Page action
6352 *
6353 * This is handler for config page read for the driver page2.
6354 * This routine checks ioc_status to decide whether the page
6355 * read is success or not.
6356 *
6357 * Return: 0 on success, non-zero on failure.
6358 */
mpi3mr_cfg_get_driver_pg2(struct mpi3mr_ioc * mrioc,struct mpi3_driver_page2 * driver_pg2,u16 pg_sz,u8 page_action)6359 int mpi3mr_cfg_get_driver_pg2(struct mpi3mr_ioc *mrioc,
6360 struct mpi3_driver_page2 *driver_pg2, u16 pg_sz, u8 page_action)
6361 {
6362 struct mpi3_config_page_header cfg_hdr;
6363 struct mpi3_config_request cfg_req;
6364 u16 ioc_status = 0;
6365
6366 memset(driver_pg2, 0, pg_sz);
6367 memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6368 memset(&cfg_req, 0, sizeof(cfg_req));
6369
6370 cfg_req.function = MPI3_FUNCTION_CONFIG;
6371 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6372 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DRIVER;
6373 cfg_req.page_number = 2;
6374 cfg_req.page_address = 0;
6375 cfg_req.page_version = MPI3_DRIVER2_PAGEVERSION;
6376
6377 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6378 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6379 ioc_err(mrioc, "driver page2 header read failed\n");
6380 goto out_failed;
6381 }
6382 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6383 ioc_err(mrioc, "driver page2 header read failed with\n"
6384 "ioc_status(0x%04x)\n",
6385 ioc_status);
6386 goto out_failed;
6387 }
6388 cfg_req.action = page_action;
6389
6390 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6391 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, driver_pg2, pg_sz)) {
6392 ioc_err(mrioc, "driver page2 read failed\n");
6393 goto out_failed;
6394 }
6395 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6396 ioc_err(mrioc, "driver page2 read failed with\n"
6397 "ioc_status(0x%04x)\n",
6398 ioc_status);
6399 goto out_failed;
6400 }
6401 return 0;
6402 out_failed:
6403 return -1;
6404 }
6405
6406