1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Driver for Broadcom MPI3 Storage Controllers
4 *
5 * Copyright (C) 2017-2023 Broadcom Inc.
6 * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
7 *
8 */
9
10 #include "mpi3mr.h"
11 #include <linux/io-64-nonatomic-lo-hi.h>
12
13 static int
14 mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, u16 reset_reason);
15 static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc);
16 static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
17 struct mpi3_ioc_facts_data *facts_data);
18 static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc,
19 struct mpi3mr_drv_cmd *drv_cmd);
20 static int mpi3mr_check_op_admin_proc(struct mpi3mr_ioc *mrioc);
21 static int poll_queues;
22 module_param(poll_queues, int, 0444);
23 MODULE_PARM_DESC(poll_queues, "Number of queues for io_uring poll mode. (Range 1 - 126)");
24
25 #if defined(writeq) && defined(CONFIG_64BIT)
mpi3mr_writeq(__u64 b,void __iomem * addr,spinlock_t * write_queue_lock)26 static inline void mpi3mr_writeq(__u64 b, void __iomem *addr,
27 spinlock_t *write_queue_lock)
28 {
29 writeq(b, addr);
30 }
31 #else
mpi3mr_writeq(__u64 b,void __iomem * addr,spinlock_t * write_queue_lock)32 static inline void mpi3mr_writeq(__u64 b, void __iomem *addr,
33 spinlock_t *write_queue_lock)
34 {
35 __u64 data_out = b;
36 unsigned long flags;
37
38 spin_lock_irqsave(write_queue_lock, flags);
39 writel((u32)(data_out), addr);
40 writel((u32)(data_out >> 32), (addr + 4));
41 spin_unlock_irqrestore(write_queue_lock, flags);
42 }
43 #endif
44
45 static inline bool
mpi3mr_check_req_qfull(struct op_req_qinfo * op_req_q)46 mpi3mr_check_req_qfull(struct op_req_qinfo *op_req_q)
47 {
48 u16 pi, ci, max_entries;
49 bool is_qfull = false;
50
51 pi = op_req_q->pi;
52 ci = READ_ONCE(op_req_q->ci);
53 max_entries = op_req_q->num_requests;
54
55 if ((ci == (pi + 1)) || ((!ci) && (pi == (max_entries - 1))))
56 is_qfull = true;
57
58 return is_qfull;
59 }
60
mpi3mr_sync_irqs(struct mpi3mr_ioc * mrioc)61 static void mpi3mr_sync_irqs(struct mpi3mr_ioc *mrioc)
62 {
63 u16 i, max_vectors;
64
65 max_vectors = mrioc->intr_info_count;
66
67 for (i = 0; i < max_vectors; i++)
68 synchronize_irq(pci_irq_vector(mrioc->pdev, i));
69 }
70
mpi3mr_ioc_disable_intr(struct mpi3mr_ioc * mrioc)71 void mpi3mr_ioc_disable_intr(struct mpi3mr_ioc *mrioc)
72 {
73 mrioc->intr_enabled = 0;
74 mpi3mr_sync_irqs(mrioc);
75 }
76
mpi3mr_ioc_enable_intr(struct mpi3mr_ioc * mrioc)77 void mpi3mr_ioc_enable_intr(struct mpi3mr_ioc *mrioc)
78 {
79 mrioc->intr_enabled = 1;
80 }
81
mpi3mr_cleanup_isr(struct mpi3mr_ioc * mrioc)82 static void mpi3mr_cleanup_isr(struct mpi3mr_ioc *mrioc)
83 {
84 u16 i;
85
86 mpi3mr_ioc_disable_intr(mrioc);
87
88 if (!mrioc->intr_info)
89 return;
90
91 for (i = 0; i < mrioc->intr_info_count; i++)
92 free_irq(pci_irq_vector(mrioc->pdev, i),
93 (mrioc->intr_info + i));
94
95 kfree(mrioc->intr_info);
96 mrioc->intr_info = NULL;
97 mrioc->intr_info_count = 0;
98 mrioc->is_intr_info_set = false;
99 pci_free_irq_vectors(mrioc->pdev);
100 }
101
mpi3mr_add_sg_single(void * paddr,u8 flags,u32 length,dma_addr_t dma_addr)102 void mpi3mr_add_sg_single(void *paddr, u8 flags, u32 length,
103 dma_addr_t dma_addr)
104 {
105 struct mpi3_sge_common *sgel = paddr;
106
107 sgel->flags = flags;
108 sgel->length = cpu_to_le32(length);
109 sgel->address = cpu_to_le64(dma_addr);
110 }
111
mpi3mr_build_zero_len_sge(void * paddr)112 void mpi3mr_build_zero_len_sge(void *paddr)
113 {
114 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
115
116 mpi3mr_add_sg_single(paddr, sgl_flags, 0, -1);
117 }
118
mpi3mr_get_reply_virt_addr(struct mpi3mr_ioc * mrioc,dma_addr_t phys_addr)119 void *mpi3mr_get_reply_virt_addr(struct mpi3mr_ioc *mrioc,
120 dma_addr_t phys_addr)
121 {
122 if (!phys_addr)
123 return NULL;
124
125 if ((phys_addr < mrioc->reply_buf_dma) ||
126 (phys_addr > mrioc->reply_buf_dma_max_address))
127 return NULL;
128
129 return mrioc->reply_buf + (phys_addr - mrioc->reply_buf_dma);
130 }
131
mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_ioc * mrioc,dma_addr_t phys_addr)132 void *mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_ioc *mrioc,
133 dma_addr_t phys_addr)
134 {
135 if (!phys_addr)
136 return NULL;
137
138 return mrioc->sense_buf + (phys_addr - mrioc->sense_buf_dma);
139 }
140
mpi3mr_repost_reply_buf(struct mpi3mr_ioc * mrioc,u64 reply_dma)141 static void mpi3mr_repost_reply_buf(struct mpi3mr_ioc *mrioc,
142 u64 reply_dma)
143 {
144 u32 old_idx = 0;
145 unsigned long flags;
146
147 spin_lock_irqsave(&mrioc->reply_free_queue_lock, flags);
148 old_idx = mrioc->reply_free_queue_host_index;
149 mrioc->reply_free_queue_host_index = (
150 (mrioc->reply_free_queue_host_index ==
151 (mrioc->reply_free_qsz - 1)) ? 0 :
152 (mrioc->reply_free_queue_host_index + 1));
153 mrioc->reply_free_q[old_idx] = cpu_to_le64(reply_dma);
154 writel(mrioc->reply_free_queue_host_index,
155 &mrioc->sysif_regs->reply_free_host_index);
156 spin_unlock_irqrestore(&mrioc->reply_free_queue_lock, flags);
157 }
158
mpi3mr_repost_sense_buf(struct mpi3mr_ioc * mrioc,u64 sense_buf_dma)159 void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc,
160 u64 sense_buf_dma)
161 {
162 u32 old_idx = 0;
163 unsigned long flags;
164
165 spin_lock_irqsave(&mrioc->sbq_lock, flags);
166 old_idx = mrioc->sbq_host_index;
167 mrioc->sbq_host_index = ((mrioc->sbq_host_index ==
168 (mrioc->sense_buf_q_sz - 1)) ? 0 :
169 (mrioc->sbq_host_index + 1));
170 mrioc->sense_buf_q[old_idx] = cpu_to_le64(sense_buf_dma);
171 writel(mrioc->sbq_host_index,
172 &mrioc->sysif_regs->sense_buffer_free_host_index);
173 spin_unlock_irqrestore(&mrioc->sbq_lock, flags);
174 }
175
mpi3mr_print_event_data(struct mpi3mr_ioc * mrioc,struct mpi3_event_notification_reply * event_reply)176 static void mpi3mr_print_event_data(struct mpi3mr_ioc *mrioc,
177 struct mpi3_event_notification_reply *event_reply)
178 {
179 char *desc = NULL;
180 u16 event;
181
182 if (!(mrioc->logging_level & MPI3_DEBUG_EVENT))
183 return;
184
185 event = event_reply->event;
186
187 switch (event) {
188 case MPI3_EVENT_LOG_DATA:
189 desc = "Log Data";
190 break;
191 case MPI3_EVENT_CHANGE:
192 desc = "Event Change";
193 break;
194 case MPI3_EVENT_GPIO_INTERRUPT:
195 desc = "GPIO Interrupt";
196 break;
197 case MPI3_EVENT_CABLE_MGMT:
198 desc = "Cable Management";
199 break;
200 case MPI3_EVENT_ENERGY_PACK_CHANGE:
201 desc = "Energy Pack Change";
202 break;
203 case MPI3_EVENT_DEVICE_ADDED:
204 {
205 struct mpi3_device_page0 *event_data =
206 (struct mpi3_device_page0 *)event_reply->event_data;
207 ioc_info(mrioc, "Device Added: dev=0x%04x Form=0x%x\n",
208 event_data->dev_handle, event_data->device_form);
209 return;
210 }
211 case MPI3_EVENT_DEVICE_INFO_CHANGED:
212 {
213 struct mpi3_device_page0 *event_data =
214 (struct mpi3_device_page0 *)event_reply->event_data;
215 ioc_info(mrioc, "Device Info Changed: dev=0x%04x Form=0x%x\n",
216 event_data->dev_handle, event_data->device_form);
217 return;
218 }
219 case MPI3_EVENT_DEVICE_STATUS_CHANGE:
220 {
221 struct mpi3_event_data_device_status_change *event_data =
222 (struct mpi3_event_data_device_status_change *)event_reply->event_data;
223 ioc_info(mrioc, "Device status Change: dev=0x%04x RC=0x%x\n",
224 event_data->dev_handle, event_data->reason_code);
225 return;
226 }
227 case MPI3_EVENT_SAS_DISCOVERY:
228 {
229 struct mpi3_event_data_sas_discovery *event_data =
230 (struct mpi3_event_data_sas_discovery *)event_reply->event_data;
231 ioc_info(mrioc, "SAS Discovery: (%s) status (0x%08x)\n",
232 (event_data->reason_code == MPI3_EVENT_SAS_DISC_RC_STARTED) ?
233 "start" : "stop",
234 le32_to_cpu(event_data->discovery_status));
235 return;
236 }
237 case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
238 desc = "SAS Broadcast Primitive";
239 break;
240 case MPI3_EVENT_SAS_NOTIFY_PRIMITIVE:
241 desc = "SAS Notify Primitive";
242 break;
243 case MPI3_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
244 desc = "SAS Init Device Status Change";
245 break;
246 case MPI3_EVENT_SAS_INIT_TABLE_OVERFLOW:
247 desc = "SAS Init Table Overflow";
248 break;
249 case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
250 desc = "SAS Topology Change List";
251 break;
252 case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
253 desc = "Enclosure Device Status Change";
254 break;
255 case MPI3_EVENT_ENCL_DEVICE_ADDED:
256 desc = "Enclosure Added";
257 break;
258 case MPI3_EVENT_HARD_RESET_RECEIVED:
259 desc = "Hard Reset Received";
260 break;
261 case MPI3_EVENT_SAS_PHY_COUNTER:
262 desc = "SAS PHY Counter";
263 break;
264 case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
265 desc = "SAS Device Discovery Error";
266 break;
267 case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
268 desc = "PCIE Topology Change List";
269 break;
270 case MPI3_EVENT_PCIE_ENUMERATION:
271 {
272 struct mpi3_event_data_pcie_enumeration *event_data =
273 (struct mpi3_event_data_pcie_enumeration *)event_reply->event_data;
274 ioc_info(mrioc, "PCIE Enumeration: (%s)",
275 (event_data->reason_code ==
276 MPI3_EVENT_PCIE_ENUM_RC_STARTED) ? "start" : "stop");
277 if (event_data->enumeration_status)
278 ioc_info(mrioc, "enumeration_status(0x%08x)\n",
279 le32_to_cpu(event_data->enumeration_status));
280 return;
281 }
282 case MPI3_EVENT_PREPARE_FOR_RESET:
283 desc = "Prepare For Reset";
284 break;
285 case MPI3_EVENT_DIAGNOSTIC_BUFFER_STATUS_CHANGE:
286 desc = "Diagnostic Buffer Status Change";
287 break;
288 }
289
290 if (!desc)
291 return;
292
293 ioc_info(mrioc, "%s\n", desc);
294 }
295
mpi3mr_handle_events(struct mpi3mr_ioc * mrioc,struct mpi3_default_reply * def_reply)296 static void mpi3mr_handle_events(struct mpi3mr_ioc *mrioc,
297 struct mpi3_default_reply *def_reply)
298 {
299 struct mpi3_event_notification_reply *event_reply =
300 (struct mpi3_event_notification_reply *)def_reply;
301
302 mrioc->change_count = le16_to_cpu(event_reply->ioc_change_count);
303 mpi3mr_print_event_data(mrioc, event_reply);
304 mpi3mr_os_handle_events(mrioc, event_reply);
305 }
306
307 static struct mpi3mr_drv_cmd *
mpi3mr_get_drv_cmd(struct mpi3mr_ioc * mrioc,u16 host_tag,struct mpi3_default_reply * def_reply)308 mpi3mr_get_drv_cmd(struct mpi3mr_ioc *mrioc, u16 host_tag,
309 struct mpi3_default_reply *def_reply)
310 {
311 u16 idx;
312
313 switch (host_tag) {
314 case MPI3MR_HOSTTAG_INITCMDS:
315 return &mrioc->init_cmds;
316 case MPI3MR_HOSTTAG_CFG_CMDS:
317 return &mrioc->cfg_cmds;
318 case MPI3MR_HOSTTAG_BSG_CMDS:
319 return &mrioc->bsg_cmds;
320 case MPI3MR_HOSTTAG_BLK_TMS:
321 return &mrioc->host_tm_cmds;
322 case MPI3MR_HOSTTAG_PEL_ABORT:
323 return &mrioc->pel_abort_cmd;
324 case MPI3MR_HOSTTAG_PEL_WAIT:
325 return &mrioc->pel_cmds;
326 case MPI3MR_HOSTTAG_TRANSPORT_CMDS:
327 return &mrioc->transport_cmds;
328 case MPI3MR_HOSTTAG_INVALID:
329 if (def_reply && def_reply->function ==
330 MPI3_FUNCTION_EVENT_NOTIFICATION)
331 mpi3mr_handle_events(mrioc, def_reply);
332 return NULL;
333 default:
334 break;
335 }
336 if (host_tag >= MPI3MR_HOSTTAG_DEVRMCMD_MIN &&
337 host_tag <= MPI3MR_HOSTTAG_DEVRMCMD_MAX) {
338 idx = host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
339 return &mrioc->dev_rmhs_cmds[idx];
340 }
341
342 if (host_tag >= MPI3MR_HOSTTAG_EVTACKCMD_MIN &&
343 host_tag <= MPI3MR_HOSTTAG_EVTACKCMD_MAX) {
344 idx = host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
345 return &mrioc->evtack_cmds[idx];
346 }
347
348 return NULL;
349 }
350
mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc * mrioc,struct mpi3_default_reply_descriptor * reply_desc,u64 * reply_dma)351 static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc,
352 struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma)
353 {
354 u16 reply_desc_type, host_tag = 0;
355 u16 ioc_status = MPI3_IOCSTATUS_SUCCESS;
356 u16 masked_ioc_status = MPI3_IOCSTATUS_SUCCESS;
357 u32 ioc_loginfo = 0, sense_count = 0;
358 struct mpi3_status_reply_descriptor *status_desc;
359 struct mpi3_address_reply_descriptor *addr_desc;
360 struct mpi3_success_reply_descriptor *success_desc;
361 struct mpi3_default_reply *def_reply = NULL;
362 struct mpi3mr_drv_cmd *cmdptr = NULL;
363 struct mpi3_scsi_io_reply *scsi_reply;
364 struct scsi_sense_hdr sshdr;
365 u8 *sense_buf = NULL;
366
367 *reply_dma = 0;
368 reply_desc_type = le16_to_cpu(reply_desc->reply_flags) &
369 MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK;
370 switch (reply_desc_type) {
371 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS:
372 status_desc = (struct mpi3_status_reply_descriptor *)reply_desc;
373 host_tag = le16_to_cpu(status_desc->host_tag);
374 ioc_status = le16_to_cpu(status_desc->ioc_status);
375 if (ioc_status &
376 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
377 ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info);
378 masked_ioc_status = ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
379 mpi3mr_reply_trigger(mrioc, masked_ioc_status, ioc_loginfo);
380 break;
381 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
382 addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc;
383 *reply_dma = le64_to_cpu(addr_desc->reply_frame_address);
384 def_reply = mpi3mr_get_reply_virt_addr(mrioc, *reply_dma);
385 if (!def_reply)
386 goto out;
387 host_tag = le16_to_cpu(def_reply->host_tag);
388 ioc_status = le16_to_cpu(def_reply->ioc_status);
389 if (ioc_status &
390 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
391 ioc_loginfo = le32_to_cpu(def_reply->ioc_log_info);
392 masked_ioc_status = ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
393 if (def_reply->function == MPI3_FUNCTION_SCSI_IO) {
394 scsi_reply = (struct mpi3_scsi_io_reply *)def_reply;
395 sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc,
396 le64_to_cpu(scsi_reply->sense_data_buffer_address));
397 sense_count = le32_to_cpu(scsi_reply->sense_count);
398 if (sense_buf) {
399 scsi_normalize_sense(sense_buf, sense_count,
400 &sshdr);
401 mpi3mr_scsisense_trigger(mrioc, sshdr.sense_key,
402 sshdr.asc, sshdr.ascq);
403 }
404 }
405 mpi3mr_reply_trigger(mrioc, masked_ioc_status, ioc_loginfo);
406 break;
407 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
408 success_desc = (struct mpi3_success_reply_descriptor *)reply_desc;
409 host_tag = le16_to_cpu(success_desc->host_tag);
410 break;
411 default:
412 break;
413 }
414
415 cmdptr = mpi3mr_get_drv_cmd(mrioc, host_tag, def_reply);
416 if (cmdptr) {
417 if (cmdptr->state & MPI3MR_CMD_PENDING) {
418 cmdptr->state |= MPI3MR_CMD_COMPLETE;
419 cmdptr->ioc_loginfo = ioc_loginfo;
420 if (host_tag == MPI3MR_HOSTTAG_BSG_CMDS)
421 cmdptr->ioc_status = ioc_status;
422 else
423 cmdptr->ioc_status = masked_ioc_status;
424 cmdptr->state &= ~MPI3MR_CMD_PENDING;
425 if (def_reply) {
426 cmdptr->state |= MPI3MR_CMD_REPLY_VALID;
427 memcpy((u8 *)cmdptr->reply, (u8 *)def_reply,
428 mrioc->reply_sz);
429 }
430 if (sense_buf && cmdptr->sensebuf) {
431 cmdptr->is_sense = 1;
432 memcpy(cmdptr->sensebuf, sense_buf,
433 MPI3MR_SENSE_BUF_SZ);
434 }
435 if (cmdptr->is_waiting) {
436 cmdptr->is_waiting = 0;
437 complete(&cmdptr->done);
438 } else if (cmdptr->callback)
439 cmdptr->callback(mrioc, cmdptr);
440 }
441 }
442 out:
443 if (sense_buf)
444 mpi3mr_repost_sense_buf(mrioc,
445 le64_to_cpu(scsi_reply->sense_data_buffer_address));
446 }
447
mpi3mr_process_admin_reply_q(struct mpi3mr_ioc * mrioc)448 int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
449 {
450 u32 exp_phase = mrioc->admin_reply_ephase;
451 u32 admin_reply_ci = mrioc->admin_reply_ci;
452 u32 num_admin_replies = 0;
453 u64 reply_dma = 0;
454 u16 threshold_comps = 0;
455 struct mpi3_default_reply_descriptor *reply_desc;
456
457 if (!atomic_add_unless(&mrioc->admin_reply_q_in_use, 1, 1)) {
458 atomic_inc(&mrioc->admin_pend_isr);
459 return 0;
460 }
461
462 atomic_set(&mrioc->admin_pend_isr, 0);
463 reply_desc = (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
464 admin_reply_ci;
465
466 if ((le16_to_cpu(reply_desc->reply_flags) &
467 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) {
468 atomic_dec(&mrioc->admin_reply_q_in_use);
469 return 0;
470 }
471
472 do {
473 if (mrioc->unrecoverable || mrioc->io_admin_reset_sync)
474 break;
475
476 mrioc->admin_req_ci = le16_to_cpu(reply_desc->request_queue_ci);
477 mpi3mr_process_admin_reply_desc(mrioc, reply_desc, &reply_dma);
478 if (reply_dma)
479 mpi3mr_repost_reply_buf(mrioc, reply_dma);
480 num_admin_replies++;
481 threshold_comps++;
482 if (++admin_reply_ci == mrioc->num_admin_replies) {
483 admin_reply_ci = 0;
484 exp_phase ^= 1;
485 }
486 reply_desc =
487 (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
488 admin_reply_ci;
489 if ((le16_to_cpu(reply_desc->reply_flags) &
490 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
491 break;
492 if (threshold_comps == MPI3MR_THRESHOLD_REPLY_COUNT) {
493 writel(admin_reply_ci,
494 &mrioc->sysif_regs->admin_reply_queue_ci);
495 threshold_comps = 0;
496 }
497 } while (1);
498
499 writel(admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci);
500 mrioc->admin_reply_ci = admin_reply_ci;
501 mrioc->admin_reply_ephase = exp_phase;
502 atomic_dec(&mrioc->admin_reply_q_in_use);
503
504 return num_admin_replies;
505 }
506
507 /**
508 * mpi3mr_get_reply_desc - get reply descriptor frame corresponding to
509 * queue's consumer index from operational reply descriptor queue.
510 * @op_reply_q: op_reply_qinfo object
511 * @reply_ci: operational reply descriptor's queue consumer index
512 *
513 * Returns: reply descriptor frame address
514 */
515 static inline struct mpi3_default_reply_descriptor *
mpi3mr_get_reply_desc(struct op_reply_qinfo * op_reply_q,u32 reply_ci)516 mpi3mr_get_reply_desc(struct op_reply_qinfo *op_reply_q, u32 reply_ci)
517 {
518 void *segment_base_addr;
519 struct segments *segments = op_reply_q->q_segments;
520 struct mpi3_default_reply_descriptor *reply_desc = NULL;
521
522 segment_base_addr =
523 segments[reply_ci / op_reply_q->segment_qd].segment;
524 reply_desc = (struct mpi3_default_reply_descriptor *)segment_base_addr +
525 (reply_ci % op_reply_q->segment_qd);
526 return reply_desc;
527 }
528
529 /**
530 * mpi3mr_process_op_reply_q - Operational reply queue handler
531 * @mrioc: Adapter instance reference
532 * @op_reply_q: Operational reply queue info
533 *
534 * Checks the specific operational reply queue and drains the
535 * reply queue entries until the queue is empty and process the
536 * individual reply descriptors.
537 *
538 * Return: 0 if queue is already processed,or number of reply
539 * descriptors processed.
540 */
mpi3mr_process_op_reply_q(struct mpi3mr_ioc * mrioc,struct op_reply_qinfo * op_reply_q)541 int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
542 struct op_reply_qinfo *op_reply_q)
543 {
544 struct op_req_qinfo *op_req_q;
545 u32 exp_phase;
546 u32 reply_ci;
547 u32 num_op_reply = 0;
548 u64 reply_dma = 0;
549 struct mpi3_default_reply_descriptor *reply_desc;
550 u16 req_q_idx = 0, reply_qidx, threshold_comps = 0;
551
552 reply_qidx = op_reply_q->qid - 1;
553
554 if (!atomic_add_unless(&op_reply_q->in_use, 1, 1))
555 return 0;
556
557 exp_phase = op_reply_q->ephase;
558 reply_ci = op_reply_q->ci;
559
560 reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci);
561 if ((le16_to_cpu(reply_desc->reply_flags) &
562 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) {
563 atomic_dec(&op_reply_q->in_use);
564 return 0;
565 }
566
567 do {
568 if (mrioc->unrecoverable || mrioc->io_admin_reset_sync)
569 break;
570
571 req_q_idx = le16_to_cpu(reply_desc->request_queue_id) - 1;
572 op_req_q = &mrioc->req_qinfo[req_q_idx];
573
574 WRITE_ONCE(op_req_q->ci, le16_to_cpu(reply_desc->request_queue_ci));
575 mpi3mr_process_op_reply_desc(mrioc, reply_desc, &reply_dma,
576 reply_qidx);
577
578 if (reply_dma)
579 mpi3mr_repost_reply_buf(mrioc, reply_dma);
580 num_op_reply++;
581 threshold_comps++;
582
583 if (++reply_ci == op_reply_q->num_replies) {
584 reply_ci = 0;
585 exp_phase ^= 1;
586 }
587
588 reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci);
589
590 if ((le16_to_cpu(reply_desc->reply_flags) &
591 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
592 break;
593 #ifndef CONFIG_PREEMPT_RT
594 /*
595 * Exit completion loop to avoid CPU lockup
596 * Ensure remaining completion happens from threaded ISR.
597 */
598 if (num_op_reply > mrioc->max_host_ios) {
599 op_reply_q->enable_irq_poll = true;
600 break;
601 }
602 #endif
603 if (threshold_comps == MPI3MR_THRESHOLD_REPLY_COUNT) {
604 writel(reply_ci,
605 &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index);
606 atomic_sub(threshold_comps, &op_reply_q->pend_ios);
607 threshold_comps = 0;
608 }
609 } while (1);
610
611 writel(reply_ci,
612 &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index);
613 op_reply_q->ci = reply_ci;
614 op_reply_q->ephase = exp_phase;
615 atomic_sub(threshold_comps, &op_reply_q->pend_ios);
616 atomic_dec(&op_reply_q->in_use);
617 return num_op_reply;
618 }
619
620 /**
621 * mpi3mr_blk_mq_poll - Operational reply queue handler
622 * @shost: SCSI Host reference
623 * @queue_num: Request queue number (w.r.t OS it is hardware context number)
624 *
625 * Checks the specific operational reply queue and drains the
626 * reply queue entries until the queue is empty and process the
627 * individual reply descriptors.
628 *
629 * Return: 0 if queue is already processed,or number of reply
630 * descriptors processed.
631 */
mpi3mr_blk_mq_poll(struct Scsi_Host * shost,unsigned int queue_num)632 int mpi3mr_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
633 {
634 int num_entries = 0;
635 struct mpi3mr_ioc *mrioc;
636
637 mrioc = (struct mpi3mr_ioc *)shost->hostdata;
638
639 if ((mrioc->reset_in_progress || mrioc->prepare_for_reset ||
640 mrioc->unrecoverable || mrioc->pci_err_recovery))
641 return 0;
642
643 num_entries = mpi3mr_process_op_reply_q(mrioc,
644 &mrioc->op_reply_qinfo[queue_num]);
645
646 return num_entries;
647 }
648
mpi3mr_isr_primary(int irq,void * privdata)649 static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata)
650 {
651 struct mpi3mr_intr_info *intr_info = privdata;
652 struct mpi3mr_ioc *mrioc;
653 u16 midx;
654 u32 num_admin_replies = 0, num_op_reply = 0;
655
656 if (!intr_info)
657 return IRQ_NONE;
658
659 mrioc = intr_info->mrioc;
660
661 if (!mrioc->intr_enabled)
662 return IRQ_NONE;
663
664 midx = intr_info->msix_index;
665
666 if (!midx)
667 num_admin_replies = mpi3mr_process_admin_reply_q(mrioc);
668 if (intr_info->op_reply_q)
669 num_op_reply = mpi3mr_process_op_reply_q(mrioc,
670 intr_info->op_reply_q);
671
672 if (num_admin_replies || num_op_reply)
673 return IRQ_HANDLED;
674 else
675 return IRQ_NONE;
676 }
677
678 #ifndef CONFIG_PREEMPT_RT
679
mpi3mr_isr(int irq,void * privdata)680 static irqreturn_t mpi3mr_isr(int irq, void *privdata)
681 {
682 struct mpi3mr_intr_info *intr_info = privdata;
683 int ret;
684
685 if (!intr_info)
686 return IRQ_NONE;
687
688 /* Call primary ISR routine */
689 ret = mpi3mr_isr_primary(irq, privdata);
690
691 /*
692 * If more IOs are expected, schedule IRQ polling thread.
693 * Otherwise exit from ISR.
694 */
695 if (!intr_info->op_reply_q)
696 return ret;
697
698 if (!intr_info->op_reply_q->enable_irq_poll ||
699 !atomic_read(&intr_info->op_reply_q->pend_ios))
700 return ret;
701
702 disable_irq_nosync(intr_info->os_irq);
703
704 return IRQ_WAKE_THREAD;
705 }
706
707 /**
708 * mpi3mr_isr_poll - Reply queue polling routine
709 * @irq: IRQ
710 * @privdata: Interrupt info
711 *
712 * poll for pending I/O completions in a loop until pending I/Os
713 * present or controller queue depth I/Os are processed.
714 *
715 * Return: IRQ_NONE or IRQ_HANDLED
716 */
mpi3mr_isr_poll(int irq,void * privdata)717 static irqreturn_t mpi3mr_isr_poll(int irq, void *privdata)
718 {
719 struct mpi3mr_intr_info *intr_info = privdata;
720 struct mpi3mr_ioc *mrioc;
721 u16 midx;
722 u32 num_op_reply = 0;
723
724 if (!intr_info || !intr_info->op_reply_q)
725 return IRQ_NONE;
726
727 mrioc = intr_info->mrioc;
728 midx = intr_info->msix_index;
729
730 /* Poll for pending IOs completions */
731 do {
732 if (!mrioc->intr_enabled || mrioc->unrecoverable)
733 break;
734
735 if (!midx)
736 mpi3mr_process_admin_reply_q(mrioc);
737 if (intr_info->op_reply_q)
738 num_op_reply +=
739 mpi3mr_process_op_reply_q(mrioc,
740 intr_info->op_reply_q);
741
742 usleep_range(MPI3MR_IRQ_POLL_SLEEP, MPI3MR_IRQ_POLL_SLEEP + 1);
743
744 } while (atomic_read(&intr_info->op_reply_q->pend_ios) &&
745 (num_op_reply < mrioc->max_host_ios));
746
747 intr_info->op_reply_q->enable_irq_poll = false;
748 enable_irq(intr_info->os_irq);
749
750 return IRQ_HANDLED;
751 }
752
753 #endif
754
755 /**
756 * mpi3mr_request_irq - Request IRQ and register ISR
757 * @mrioc: Adapter instance reference
758 * @index: IRQ vector index
759 *
760 * Request threaded ISR with primary ISR and secondary
761 *
762 * Return: 0 on success and non zero on failures.
763 */
mpi3mr_request_irq(struct mpi3mr_ioc * mrioc,u16 index)764 static inline int mpi3mr_request_irq(struct mpi3mr_ioc *mrioc, u16 index)
765 {
766 struct pci_dev *pdev = mrioc->pdev;
767 struct mpi3mr_intr_info *intr_info = mrioc->intr_info + index;
768 int retval = 0;
769
770 intr_info->mrioc = mrioc;
771 intr_info->msix_index = index;
772 intr_info->op_reply_q = NULL;
773
774 snprintf(intr_info->name, MPI3MR_NAME_LENGTH, "%s%d-msix%d",
775 mrioc->driver_name, mrioc->id, index);
776
777 #ifndef CONFIG_PREEMPT_RT
778 retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr,
779 mpi3mr_isr_poll, IRQF_SHARED, intr_info->name, intr_info);
780 #else
781 retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr_primary,
782 NULL, IRQF_SHARED, intr_info->name, intr_info);
783 #endif
784 if (retval) {
785 ioc_err(mrioc, "%s: Unable to allocate interrupt %d!\n",
786 intr_info->name, pci_irq_vector(pdev, index));
787 return retval;
788 }
789
790 intr_info->os_irq = pci_irq_vector(pdev, index);
791 return retval;
792 }
793
mpi3mr_calc_poll_queues(struct mpi3mr_ioc * mrioc,u16 max_vectors)794 static void mpi3mr_calc_poll_queues(struct mpi3mr_ioc *mrioc, u16 max_vectors)
795 {
796 if (!mrioc->requested_poll_qcount)
797 return;
798
799 /* Reserved for Admin and Default Queue */
800 if (max_vectors > 2 &&
801 (mrioc->requested_poll_qcount < max_vectors - 2)) {
802 ioc_info(mrioc,
803 "enabled polled queues (%d) msix (%d)\n",
804 mrioc->requested_poll_qcount, max_vectors);
805 } else {
806 ioc_info(mrioc,
807 "disabled polled queues (%d) msix (%d) because of no resources for default queue\n",
808 mrioc->requested_poll_qcount, max_vectors);
809 mrioc->requested_poll_qcount = 0;
810 }
811 }
812
813 /**
814 * mpi3mr_setup_isr - Setup ISR for the controller
815 * @mrioc: Adapter instance reference
816 * @setup_one: Request one IRQ or more
817 *
818 * Allocate IRQ vectors and call mpi3mr_request_irq to setup ISR
819 *
820 * Return: 0 on success and non zero on failures.
821 */
mpi3mr_setup_isr(struct mpi3mr_ioc * mrioc,u8 setup_one)822 static int mpi3mr_setup_isr(struct mpi3mr_ioc *mrioc, u8 setup_one)
823 {
824 unsigned int irq_flags = PCI_IRQ_MSIX;
825 int max_vectors, min_vec;
826 int retval;
827 int i;
828 struct irq_affinity desc = { .pre_vectors = 1, .post_vectors = 1 };
829
830 if (mrioc->is_intr_info_set)
831 return 0;
832
833 mpi3mr_cleanup_isr(mrioc);
834
835 if (setup_one || reset_devices) {
836 max_vectors = 1;
837 retval = pci_alloc_irq_vectors(mrioc->pdev,
838 1, max_vectors, irq_flags);
839 if (retval < 0) {
840 ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n",
841 retval);
842 goto out_failed;
843 }
844 } else {
845 max_vectors =
846 min_t(int, mrioc->cpu_count + 1 +
847 mrioc->requested_poll_qcount, mrioc->msix_count);
848
849 mpi3mr_calc_poll_queues(mrioc, max_vectors);
850
851 ioc_info(mrioc,
852 "MSI-X vectors supported: %d, no of cores: %d,",
853 mrioc->msix_count, mrioc->cpu_count);
854 ioc_info(mrioc,
855 "MSI-x vectors requested: %d poll_queues %d\n",
856 max_vectors, mrioc->requested_poll_qcount);
857
858 desc.post_vectors = mrioc->requested_poll_qcount;
859 min_vec = desc.pre_vectors + desc.post_vectors;
860 irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES;
861
862 retval = pci_alloc_irq_vectors_affinity(mrioc->pdev,
863 min_vec, max_vectors, irq_flags, &desc);
864
865 if (retval < 0) {
866 ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n",
867 retval);
868 goto out_failed;
869 }
870
871
872 /*
873 * If only one MSI-x is allocated, then MSI-x 0 will be shared
874 * between Admin queue and operational queue
875 */
876 if (retval == min_vec)
877 mrioc->op_reply_q_offset = 0;
878 else if (retval != (max_vectors)) {
879 ioc_info(mrioc,
880 "allocated vectors (%d) are less than configured (%d)\n",
881 retval, max_vectors);
882 }
883
884 max_vectors = retval;
885 mrioc->op_reply_q_offset = (max_vectors > 1) ? 1 : 0;
886
887 mpi3mr_calc_poll_queues(mrioc, max_vectors);
888
889 }
890
891 mrioc->intr_info = kzalloc(sizeof(struct mpi3mr_intr_info) * max_vectors,
892 GFP_KERNEL);
893 if (!mrioc->intr_info) {
894 retval = -ENOMEM;
895 pci_free_irq_vectors(mrioc->pdev);
896 goto out_failed;
897 }
898 for (i = 0; i < max_vectors; i++) {
899 retval = mpi3mr_request_irq(mrioc, i);
900 if (retval) {
901 mrioc->intr_info_count = i;
902 goto out_failed;
903 }
904 }
905 if (reset_devices || !setup_one)
906 mrioc->is_intr_info_set = true;
907 mrioc->intr_info_count = max_vectors;
908 mpi3mr_ioc_enable_intr(mrioc);
909 return 0;
910
911 out_failed:
912 mpi3mr_cleanup_isr(mrioc);
913
914 return retval;
915 }
916
917 static const struct {
918 enum mpi3mr_iocstate value;
919 char *name;
920 } mrioc_states[] = {
921 { MRIOC_STATE_READY, "ready" },
922 { MRIOC_STATE_FAULT, "fault" },
923 { MRIOC_STATE_RESET, "reset" },
924 { MRIOC_STATE_BECOMING_READY, "becoming ready" },
925 { MRIOC_STATE_RESET_REQUESTED, "reset requested" },
926 { MRIOC_STATE_UNRECOVERABLE, "unrecoverable error" },
927 };
928
mpi3mr_iocstate_name(enum mpi3mr_iocstate mrioc_state)929 static const char *mpi3mr_iocstate_name(enum mpi3mr_iocstate mrioc_state)
930 {
931 int i;
932 char *name = NULL;
933
934 for (i = 0; i < ARRAY_SIZE(mrioc_states); i++) {
935 if (mrioc_states[i].value == mrioc_state) {
936 name = mrioc_states[i].name;
937 break;
938 }
939 }
940 return name;
941 }
942
943 /* Reset reason to name mapper structure*/
944 static const struct {
945 enum mpi3mr_reset_reason value;
946 char *name;
947 } mpi3mr_reset_reason_codes[] = {
948 { MPI3MR_RESET_FROM_BRINGUP, "timeout in bringup" },
949 { MPI3MR_RESET_FROM_FAULT_WATCH, "fault" },
950 { MPI3MR_RESET_FROM_APP, "application invocation" },
951 { MPI3MR_RESET_FROM_EH_HOS, "error handling" },
952 { MPI3MR_RESET_FROM_TM_TIMEOUT, "TM timeout" },
953 { MPI3MR_RESET_FROM_APP_TIMEOUT, "application command timeout" },
954 { MPI3MR_RESET_FROM_MUR_FAILURE, "MUR failure" },
955 { MPI3MR_RESET_FROM_CTLR_CLEANUP, "timeout in controller cleanup" },
956 { MPI3MR_RESET_FROM_CIACTIV_FAULT, "component image activation fault" },
957 { MPI3MR_RESET_FROM_PE_TIMEOUT, "port enable timeout" },
958 { MPI3MR_RESET_FROM_TSU_TIMEOUT, "time stamp update timeout" },
959 { MPI3MR_RESET_FROM_DELREQQ_TIMEOUT, "delete request queue timeout" },
960 { MPI3MR_RESET_FROM_DELREPQ_TIMEOUT, "delete reply queue timeout" },
961 {
962 MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT,
963 "create request queue timeout"
964 },
965 {
966 MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT,
967 "create reply queue timeout"
968 },
969 { MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT, "IOC facts timeout" },
970 { MPI3MR_RESET_FROM_IOCINIT_TIMEOUT, "IOC init timeout" },
971 { MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT, "event notify timeout" },
972 { MPI3MR_RESET_FROM_EVTACK_TIMEOUT, "event acknowledgment timeout" },
973 {
974 MPI3MR_RESET_FROM_CIACTVRST_TIMER,
975 "component image activation timeout"
976 },
977 {
978 MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT,
979 "get package version timeout"
980 },
981 { MPI3MR_RESET_FROM_SYSFS, "sysfs invocation" },
982 { MPI3MR_RESET_FROM_SYSFS_TIMEOUT, "sysfs TM timeout" },
983 {
984 MPI3MR_RESET_FROM_DIAG_BUFFER_POST_TIMEOUT,
985 "diagnostic buffer post timeout"
986 },
987 {
988 MPI3MR_RESET_FROM_DIAG_BUFFER_RELEASE_TIMEOUT,
989 "diagnostic buffer release timeout"
990 },
991 { MPI3MR_RESET_FROM_FIRMWARE, "firmware asynchronous reset" },
992 { MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT, "configuration request timeout"},
993 { MPI3MR_RESET_FROM_SAS_TRANSPORT_TIMEOUT, "timeout of a SAS transport layer request" },
994 };
995
996 /**
997 * mpi3mr_reset_rc_name - get reset reason code name
998 * @reason_code: reset reason code value
999 *
1000 * Map reset reason to an NULL terminated ASCII string
1001 *
1002 * Return: name corresponding to reset reason value or NULL.
1003 */
mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code)1004 static const char *mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code)
1005 {
1006 int i;
1007 char *name = NULL;
1008
1009 for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_reason_codes); i++) {
1010 if (mpi3mr_reset_reason_codes[i].value == reason_code) {
1011 name = mpi3mr_reset_reason_codes[i].name;
1012 break;
1013 }
1014 }
1015 return name;
1016 }
1017
1018 /* Reset type to name mapper structure*/
1019 static const struct {
1020 u16 reset_type;
1021 char *name;
1022 } mpi3mr_reset_types[] = {
1023 { MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, "soft" },
1024 { MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, "diag fault" },
1025 };
1026
1027 /**
1028 * mpi3mr_reset_type_name - get reset type name
1029 * @reset_type: reset type value
1030 *
1031 * Map reset type to an NULL terminated ASCII string
1032 *
1033 * Return: name corresponding to reset type value or NULL.
1034 */
mpi3mr_reset_type_name(u16 reset_type)1035 static const char *mpi3mr_reset_type_name(u16 reset_type)
1036 {
1037 int i;
1038 char *name = NULL;
1039
1040 for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_types); i++) {
1041 if (mpi3mr_reset_types[i].reset_type == reset_type) {
1042 name = mpi3mr_reset_types[i].name;
1043 break;
1044 }
1045 }
1046 return name;
1047 }
1048
1049 /**
1050 * mpi3mr_is_fault_recoverable - Read fault code and decide
1051 * whether the controller can be recoverable
1052 * @mrioc: Adapter instance reference
1053 * Return: true if fault is recoverable, false otherwise.
1054 */
mpi3mr_is_fault_recoverable(struct mpi3mr_ioc * mrioc)1055 static inline bool mpi3mr_is_fault_recoverable(struct mpi3mr_ioc *mrioc)
1056 {
1057 u32 fault;
1058
1059 fault = (readl(&mrioc->sysif_regs->fault) &
1060 MPI3_SYSIF_FAULT_CODE_MASK);
1061
1062 switch (fault) {
1063 case MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED:
1064 case MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED:
1065 ioc_warn(mrioc,
1066 "controller requires system power cycle, marking controller as unrecoverable\n");
1067 return false;
1068 case MPI3_SYSIF_FAULT_CODE_INSUFFICIENT_PCI_SLOT_POWER:
1069 ioc_warn(mrioc,
1070 "controller faulted due to insufficient power,\n"
1071 " try by connecting it to a different slot\n");
1072 return false;
1073 default:
1074 break;
1075 }
1076 return true;
1077 }
1078
1079 /**
1080 * mpi3mr_print_fault_info - Display fault information
1081 * @mrioc: Adapter instance reference
1082 *
1083 * Display the controller fault information if there is a
1084 * controller fault.
1085 *
1086 * Return: Nothing.
1087 */
mpi3mr_print_fault_info(struct mpi3mr_ioc * mrioc)1088 void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc)
1089 {
1090 u32 ioc_status, code, code1, code2, code3;
1091
1092 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1093
1094 if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
1095 code = readl(&mrioc->sysif_regs->fault);
1096 code1 = readl(&mrioc->sysif_regs->fault_info[0]);
1097 code2 = readl(&mrioc->sysif_regs->fault_info[1]);
1098 code3 = readl(&mrioc->sysif_regs->fault_info[2]);
1099
1100 ioc_info(mrioc,
1101 "fault code(0x%08X): Additional code: (0x%08X:0x%08X:0x%08X)\n",
1102 code, code1, code2, code3);
1103 }
1104 }
1105
1106 /**
1107 * mpi3mr_get_iocstate - Get IOC State
1108 * @mrioc: Adapter instance reference
1109 *
1110 * Return a proper IOC state enum based on the IOC status and
1111 * IOC configuration and unrcoverable state of the controller.
1112 *
1113 * Return: Current IOC state.
1114 */
mpi3mr_get_iocstate(struct mpi3mr_ioc * mrioc)1115 enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_ioc *mrioc)
1116 {
1117 u32 ioc_status, ioc_config;
1118 u8 ready, enabled;
1119
1120 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1121 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1122
1123 if (mrioc->unrecoverable)
1124 return MRIOC_STATE_UNRECOVERABLE;
1125 if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)
1126 return MRIOC_STATE_FAULT;
1127
1128 ready = (ioc_status & MPI3_SYSIF_IOC_STATUS_READY);
1129 enabled = (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC);
1130
1131 if (ready && enabled)
1132 return MRIOC_STATE_READY;
1133 if ((!ready) && (!enabled))
1134 return MRIOC_STATE_RESET;
1135 if ((!ready) && (enabled))
1136 return MRIOC_STATE_BECOMING_READY;
1137
1138 return MRIOC_STATE_RESET_REQUESTED;
1139 }
1140
1141 /**
1142 * mpi3mr_free_ioctl_dma_memory - free memory for ioctl dma
1143 * @mrioc: Adapter instance reference
1144 *
1145 * Free the DMA memory allocated for IOCTL handling purpose.
1146 *
1147 * Return: None
1148 */
mpi3mr_free_ioctl_dma_memory(struct mpi3mr_ioc * mrioc)1149 static void mpi3mr_free_ioctl_dma_memory(struct mpi3mr_ioc *mrioc)
1150 {
1151 struct dma_memory_desc *mem_desc;
1152 u16 i;
1153
1154 if (!mrioc->ioctl_dma_pool)
1155 return;
1156
1157 for (i = 0; i < MPI3MR_NUM_IOCTL_SGE; i++) {
1158 mem_desc = &mrioc->ioctl_sge[i];
1159 if (mem_desc->addr) {
1160 dma_pool_free(mrioc->ioctl_dma_pool,
1161 mem_desc->addr,
1162 mem_desc->dma_addr);
1163 mem_desc->addr = NULL;
1164 }
1165 }
1166 dma_pool_destroy(mrioc->ioctl_dma_pool);
1167 mrioc->ioctl_dma_pool = NULL;
1168 mem_desc = &mrioc->ioctl_chain_sge;
1169
1170 if (mem_desc->addr) {
1171 dma_free_coherent(&mrioc->pdev->dev, mem_desc->size,
1172 mem_desc->addr, mem_desc->dma_addr);
1173 mem_desc->addr = NULL;
1174 }
1175 mem_desc = &mrioc->ioctl_resp_sge;
1176 if (mem_desc->addr) {
1177 dma_free_coherent(&mrioc->pdev->dev, mem_desc->size,
1178 mem_desc->addr, mem_desc->dma_addr);
1179 mem_desc->addr = NULL;
1180 }
1181
1182 mrioc->ioctl_sges_allocated = false;
1183 }
1184
1185 /**
1186 * mpi3mr_alloc_ioctl_dma_memory - Alloc memory for ioctl dma
1187 * @mrioc: Adapter instance reference
1188 *
1189 * This function allocates dmaable memory required to handle the
1190 * application issued MPI3 IOCTL requests.
1191 *
1192 * Return: None
1193 */
mpi3mr_alloc_ioctl_dma_memory(struct mpi3mr_ioc * mrioc)1194 static void mpi3mr_alloc_ioctl_dma_memory(struct mpi3mr_ioc *mrioc)
1195
1196 {
1197 struct dma_memory_desc *mem_desc;
1198 u16 i;
1199
1200 mrioc->ioctl_dma_pool = dma_pool_create("ioctl dma pool",
1201 &mrioc->pdev->dev,
1202 MPI3MR_IOCTL_SGE_SIZE,
1203 MPI3MR_PAGE_SIZE_4K, 0);
1204
1205 if (!mrioc->ioctl_dma_pool) {
1206 ioc_err(mrioc, "ioctl_dma_pool: dma_pool_create failed\n");
1207 goto out_failed;
1208 }
1209
1210 for (i = 0; i < MPI3MR_NUM_IOCTL_SGE; i++) {
1211 mem_desc = &mrioc->ioctl_sge[i];
1212 mem_desc->size = MPI3MR_IOCTL_SGE_SIZE;
1213 mem_desc->addr = dma_pool_zalloc(mrioc->ioctl_dma_pool,
1214 GFP_KERNEL,
1215 &mem_desc->dma_addr);
1216 if (!mem_desc->addr)
1217 goto out_failed;
1218 }
1219
1220 mem_desc = &mrioc->ioctl_chain_sge;
1221 mem_desc->size = MPI3MR_PAGE_SIZE_4K;
1222 mem_desc->addr = dma_alloc_coherent(&mrioc->pdev->dev,
1223 mem_desc->size,
1224 &mem_desc->dma_addr,
1225 GFP_KERNEL);
1226 if (!mem_desc->addr)
1227 goto out_failed;
1228
1229 mem_desc = &mrioc->ioctl_resp_sge;
1230 mem_desc->size = MPI3MR_PAGE_SIZE_4K;
1231 mem_desc->addr = dma_alloc_coherent(&mrioc->pdev->dev,
1232 mem_desc->size,
1233 &mem_desc->dma_addr,
1234 GFP_KERNEL);
1235 if (!mem_desc->addr)
1236 goto out_failed;
1237
1238 mrioc->ioctl_sges_allocated = true;
1239
1240 return;
1241 out_failed:
1242 ioc_warn(mrioc, "cannot allocate DMA memory for the mpt commands\n"
1243 "from the applications, application interface for MPT command is disabled\n");
1244 mpi3mr_free_ioctl_dma_memory(mrioc);
1245 }
1246
1247 /**
1248 * mpi3mr_clear_reset_history - clear reset history
1249 * @mrioc: Adapter instance reference
1250 *
1251 * Write the reset history bit in IOC status to clear the bit,
1252 * if it is already set.
1253 *
1254 * Return: Nothing.
1255 */
mpi3mr_clear_reset_history(struct mpi3mr_ioc * mrioc)1256 static inline void mpi3mr_clear_reset_history(struct mpi3mr_ioc *mrioc)
1257 {
1258 u32 ioc_status;
1259
1260 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1261 if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
1262 writel(ioc_status, &mrioc->sysif_regs->ioc_status);
1263 }
1264
1265 /**
1266 * mpi3mr_issue_and_process_mur - Message unit Reset handler
1267 * @mrioc: Adapter instance reference
1268 * @reset_reason: Reset reason code
1269 *
1270 * Issue Message unit Reset to the controller and wait for it to
1271 * be complete.
1272 *
1273 * Return: 0 on success, -1 on failure.
1274 */
mpi3mr_issue_and_process_mur(struct mpi3mr_ioc * mrioc,u32 reset_reason)1275 static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc,
1276 u32 reset_reason)
1277 {
1278 u32 ioc_config, timeout, ioc_status, scratch_pad0;
1279 int retval = -1;
1280
1281 ioc_info(mrioc, "Issuing Message unit Reset(MUR)\n");
1282 if (mrioc->unrecoverable) {
1283 ioc_info(mrioc, "IOC is unrecoverable MUR not issued\n");
1284 return retval;
1285 }
1286 mpi3mr_clear_reset_history(mrioc);
1287 scratch_pad0 = ((MPI3MR_RESET_REASON_OSTYPE_LINUX <<
1288 MPI3MR_RESET_REASON_OSTYPE_SHIFT) |
1289 (mrioc->facts.ioc_num <<
1290 MPI3MR_RESET_REASON_IOCNUM_SHIFT) | reset_reason);
1291 writel(scratch_pad0, &mrioc->sysif_regs->scratchpad[0]);
1292 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1293 ioc_config &= ~MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
1294 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
1295
1296 timeout = MPI3MR_MUR_TIMEOUT * 10;
1297 do {
1298 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1299 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)) {
1300 mpi3mr_clear_reset_history(mrioc);
1301 break;
1302 }
1303 if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
1304 mpi3mr_print_fault_info(mrioc);
1305 break;
1306 }
1307 msleep(100);
1308 } while (--timeout);
1309
1310 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1311 if (timeout && !((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
1312 (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) ||
1313 (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
1314 retval = 0;
1315
1316 ioc_info(mrioc, "Base IOC Sts/Config after %s MUR is (0x%08x)/(0x%08x)\n",
1317 (!retval) ? "successful" : "failed", ioc_status, ioc_config);
1318 return retval;
1319 }
1320
1321 /**
1322 * mpi3mr_revalidate_factsdata - validate IOCFacts parameters
1323 * during reset/resume
1324 * @mrioc: Adapter instance reference
1325 *
1326 * Return: zero if the new IOCFacts parameters value is compatible with
1327 * older values else return -EPERM
1328 */
1329 static int
mpi3mr_revalidate_factsdata(struct mpi3mr_ioc * mrioc)1330 mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc)
1331 {
1332 unsigned long *removepend_bitmap;
1333
1334 if (mrioc->facts.reply_sz > mrioc->reply_sz) {
1335 ioc_err(mrioc,
1336 "cannot increase reply size from %d to %d\n",
1337 mrioc->reply_sz, mrioc->facts.reply_sz);
1338 return -EPERM;
1339 }
1340
1341 if (mrioc->facts.max_op_reply_q < mrioc->num_op_reply_q) {
1342 ioc_err(mrioc,
1343 "cannot reduce number of operational reply queues from %d to %d\n",
1344 mrioc->num_op_reply_q,
1345 mrioc->facts.max_op_reply_q);
1346 return -EPERM;
1347 }
1348
1349 if (mrioc->facts.max_op_req_q < mrioc->num_op_req_q) {
1350 ioc_err(mrioc,
1351 "cannot reduce number of operational request queues from %d to %d\n",
1352 mrioc->num_op_req_q, mrioc->facts.max_op_req_q);
1353 return -EPERM;
1354 }
1355
1356 if (mrioc->shost->max_sectors != (mrioc->facts.max_data_length / 512))
1357 ioc_err(mrioc, "Warning: The maximum data transfer length\n"
1358 "\tchanged after reset: previous(%d), new(%d),\n"
1359 "the driver cannot change this at run time\n",
1360 mrioc->shost->max_sectors * 512, mrioc->facts.max_data_length);
1361
1362 if ((mrioc->sas_transport_enabled) && (mrioc->facts.ioc_capabilities &
1363 MPI3_IOCFACTS_CAPABILITY_MULTIPATH_SUPPORTED))
1364 ioc_err(mrioc,
1365 "critical error: multipath capability is enabled at the\n"
1366 "\tcontroller while sas transport support is enabled at the\n"
1367 "\tdriver, please reboot the system or reload the driver\n");
1368
1369 if (mrioc->seg_tb_support) {
1370 if (!(mrioc->facts.ioc_capabilities &
1371 MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_TRACE_SUPPORTED)) {
1372 ioc_err(mrioc,
1373 "critical error: previously enabled segmented trace\n"
1374 " buffer capability is disabled after reset. Please\n"
1375 " update the firmware or reboot the system or\n"
1376 " reload the driver to enable trace diag buffer\n");
1377 mrioc->diag_buffers[0].disabled_after_reset = true;
1378 } else
1379 mrioc->diag_buffers[0].disabled_after_reset = false;
1380 }
1381
1382 if (mrioc->facts.max_devhandle > mrioc->dev_handle_bitmap_bits) {
1383 removepend_bitmap = bitmap_zalloc(mrioc->facts.max_devhandle,
1384 GFP_KERNEL);
1385 if (!removepend_bitmap) {
1386 ioc_err(mrioc,
1387 "failed to increase removepend_bitmap bits from %d to %d\n",
1388 mrioc->dev_handle_bitmap_bits,
1389 mrioc->facts.max_devhandle);
1390 return -EPERM;
1391 }
1392 bitmap_free(mrioc->removepend_bitmap);
1393 mrioc->removepend_bitmap = removepend_bitmap;
1394 ioc_info(mrioc,
1395 "increased bits of dev_handle_bitmap from %d to %d\n",
1396 mrioc->dev_handle_bitmap_bits,
1397 mrioc->facts.max_devhandle);
1398 mrioc->dev_handle_bitmap_bits = mrioc->facts.max_devhandle;
1399 }
1400
1401 return 0;
1402 }
1403
1404 /**
1405 * mpi3mr_bring_ioc_ready - Bring controller to ready state
1406 * @mrioc: Adapter instance reference
1407 *
1408 * Set Enable IOC bit in IOC configuration register and wait for
1409 * the controller to become ready.
1410 *
1411 * Return: 0 on success, appropriate error on failure.
1412 */
mpi3mr_bring_ioc_ready(struct mpi3mr_ioc * mrioc)1413 static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
1414 {
1415 u32 ioc_config, ioc_status, timeout, host_diagnostic;
1416 int retval = 0;
1417 enum mpi3mr_iocstate ioc_state;
1418 u64 base_info;
1419 u8 retry = 0;
1420 u64 start_time, elapsed_time_sec;
1421
1422 retry_bring_ioc_ready:
1423
1424 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1425 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1426 base_info = lo_hi_readq(&mrioc->sysif_regs->ioc_information);
1427 ioc_info(mrioc, "ioc_status(0x%08x), ioc_config(0x%08x), ioc_info(0x%016llx) at the bringup\n",
1428 ioc_status, ioc_config, base_info);
1429
1430 if (!mpi3mr_is_fault_recoverable(mrioc)) {
1431 mrioc->unrecoverable = 1;
1432 goto out_device_not_present;
1433 }
1434
1435 /*The timeout value is in 2sec unit, changing it to seconds*/
1436 mrioc->ready_timeout =
1437 ((base_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >>
1438 MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT) * 2;
1439
1440 ioc_info(mrioc, "ready timeout: %d seconds\n", mrioc->ready_timeout);
1441
1442 ioc_state = mpi3mr_get_iocstate(mrioc);
1443 ioc_info(mrioc, "controller is in %s state during detection\n",
1444 mpi3mr_iocstate_name(ioc_state));
1445
1446 timeout = mrioc->ready_timeout * 10;
1447
1448 do {
1449 ioc_state = mpi3mr_get_iocstate(mrioc);
1450
1451 if (ioc_state != MRIOC_STATE_BECOMING_READY &&
1452 ioc_state != MRIOC_STATE_RESET_REQUESTED)
1453 break;
1454
1455 if (!pci_device_is_present(mrioc->pdev)) {
1456 mrioc->unrecoverable = 1;
1457 ioc_err(mrioc, "controller is not present while waiting to reset\n");
1458 goto out_device_not_present;
1459 }
1460
1461 msleep(100);
1462 } while (--timeout);
1463
1464 if (ioc_state == MRIOC_STATE_READY) {
1465 ioc_info(mrioc, "issuing message unit reset (MUR) to bring to reset state\n");
1466 retval = mpi3mr_issue_and_process_mur(mrioc,
1467 MPI3MR_RESET_FROM_BRINGUP);
1468 ioc_state = mpi3mr_get_iocstate(mrioc);
1469 if (retval)
1470 ioc_err(mrioc,
1471 "message unit reset failed with error %d current state %s\n",
1472 retval, mpi3mr_iocstate_name(ioc_state));
1473 }
1474 if (ioc_state != MRIOC_STATE_RESET) {
1475 if (ioc_state == MRIOC_STATE_FAULT) {
1476 timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
1477 mpi3mr_print_fault_info(mrioc);
1478 do {
1479 host_diagnostic =
1480 readl(&mrioc->sysif_regs->host_diagnostic);
1481 if (!(host_diagnostic &
1482 MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
1483 break;
1484 if (!pci_device_is_present(mrioc->pdev)) {
1485 mrioc->unrecoverable = 1;
1486 ioc_err(mrioc, "controller is not present at the bringup\n");
1487 goto out_device_not_present;
1488 }
1489 msleep(100);
1490 } while (--timeout);
1491 }
1492 mpi3mr_print_fault_info(mrioc);
1493 ioc_info(mrioc, "issuing soft reset to bring to reset state\n");
1494 retval = mpi3mr_issue_reset(mrioc,
1495 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
1496 MPI3MR_RESET_FROM_BRINGUP);
1497 if (retval) {
1498 ioc_err(mrioc,
1499 "soft reset failed with error %d\n", retval);
1500 goto out_failed;
1501 }
1502 }
1503 ioc_state = mpi3mr_get_iocstate(mrioc);
1504 if (ioc_state != MRIOC_STATE_RESET) {
1505 ioc_err(mrioc,
1506 "cannot bring controller to reset state, current state: %s\n",
1507 mpi3mr_iocstate_name(ioc_state));
1508 goto out_failed;
1509 }
1510 mpi3mr_clear_reset_history(mrioc);
1511 retval = mpi3mr_setup_admin_qpair(mrioc);
1512 if (retval) {
1513 ioc_err(mrioc, "failed to setup admin queues: error %d\n",
1514 retval);
1515 goto out_failed;
1516 }
1517
1518 ioc_info(mrioc, "bringing controller to ready state\n");
1519 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1520 ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
1521 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
1522
1523 if (retry == 0)
1524 start_time = jiffies;
1525
1526 timeout = mrioc->ready_timeout * 10;
1527 do {
1528 ioc_state = mpi3mr_get_iocstate(mrioc);
1529 if (ioc_state == MRIOC_STATE_READY) {
1530 ioc_info(mrioc,
1531 "successfully transitioned to %s state\n",
1532 mpi3mr_iocstate_name(ioc_state));
1533 return 0;
1534 }
1535 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1536 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
1537 (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
1538 mpi3mr_print_fault_info(mrioc);
1539 goto out_failed;
1540 }
1541 if (!pci_device_is_present(mrioc->pdev)) {
1542 mrioc->unrecoverable = 1;
1543 ioc_err(mrioc,
1544 "controller is not present at the bringup\n");
1545 retval = -1;
1546 goto out_device_not_present;
1547 }
1548 msleep(100);
1549 elapsed_time_sec = jiffies_to_msecs(jiffies - start_time)/1000;
1550 } while (elapsed_time_sec < mrioc->ready_timeout);
1551
1552 out_failed:
1553 elapsed_time_sec = jiffies_to_msecs(jiffies - start_time)/1000;
1554 if ((retry < 2) && (elapsed_time_sec < (mrioc->ready_timeout - 60))) {
1555 retry++;
1556
1557 ioc_warn(mrioc, "retrying to bring IOC ready, retry_count:%d\n"
1558 " elapsed time =%llu\n", retry, elapsed_time_sec);
1559
1560 goto retry_bring_ioc_ready;
1561 }
1562 ioc_state = mpi3mr_get_iocstate(mrioc);
1563 ioc_err(mrioc,
1564 "failed to bring to ready state, current state: %s\n",
1565 mpi3mr_iocstate_name(ioc_state));
1566 out_device_not_present:
1567 return retval;
1568 }
1569
1570 /**
1571 * mpi3mr_soft_reset_success - Check softreset is success or not
1572 * @ioc_status: IOC status register value
1573 * @ioc_config: IOC config register value
1574 *
1575 * Check whether the soft reset is successful or not based on
1576 * IOC status and IOC config register values.
1577 *
1578 * Return: True when the soft reset is success, false otherwise.
1579 */
1580 static inline bool
mpi3mr_soft_reset_success(u32 ioc_status,u32 ioc_config)1581 mpi3mr_soft_reset_success(u32 ioc_status, u32 ioc_config)
1582 {
1583 if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
1584 (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
1585 return true;
1586 return false;
1587 }
1588
1589 /**
1590 * mpi3mr_diagfault_success - Check diag fault is success or not
1591 * @mrioc: Adapter reference
1592 * @ioc_status: IOC status register value
1593 *
1594 * Check whether the controller hit diag reset fault code.
1595 *
1596 * Return: True when there is diag fault, false otherwise.
1597 */
mpi3mr_diagfault_success(struct mpi3mr_ioc * mrioc,u32 ioc_status)1598 static inline bool mpi3mr_diagfault_success(struct mpi3mr_ioc *mrioc,
1599 u32 ioc_status)
1600 {
1601 u32 fault;
1602
1603 if (!(ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT))
1604 return false;
1605 fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK;
1606 if (fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET) {
1607 mpi3mr_print_fault_info(mrioc);
1608 return true;
1609 }
1610 return false;
1611 }
1612
1613 /**
1614 * mpi3mr_set_diagsave - Set diag save bit for snapdump
1615 * @mrioc: Adapter reference
1616 *
1617 * Set diag save bit in IOC configuration register to enable
1618 * snapdump.
1619 *
1620 * Return: Nothing.
1621 */
mpi3mr_set_diagsave(struct mpi3mr_ioc * mrioc)1622 static inline void mpi3mr_set_diagsave(struct mpi3mr_ioc *mrioc)
1623 {
1624 u32 ioc_config;
1625
1626 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1627 ioc_config |= MPI3_SYSIF_IOC_CONFIG_DIAG_SAVE;
1628 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
1629 }
1630
1631 /**
1632 * mpi3mr_issue_reset - Issue reset to the controller
1633 * @mrioc: Adapter reference
1634 * @reset_type: Reset type
1635 * @reset_reason: Reset reason code
1636 *
1637 * Unlock the host diagnostic registers and write the specific
1638 * reset type to that, wait for reset acknowledgment from the
1639 * controller, if the reset is not successful retry for the
1640 * predefined number of times.
1641 *
1642 * Return: 0 on success, non-zero on failure.
1643 */
mpi3mr_issue_reset(struct mpi3mr_ioc * mrioc,u16 reset_type,u16 reset_reason)1644 static int mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type,
1645 u16 reset_reason)
1646 {
1647 int retval = -1;
1648 u8 unlock_retry_count = 0;
1649 u32 host_diagnostic, ioc_status, ioc_config, scratch_pad0;
1650 u32 timeout = MPI3MR_RESET_ACK_TIMEOUT * 10;
1651
1652 if ((reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) &&
1653 (reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT))
1654 return retval;
1655 if (mrioc->unrecoverable)
1656 return retval;
1657 if (reset_reason == MPI3MR_RESET_FROM_FIRMWARE) {
1658 retval = 0;
1659 return retval;
1660 }
1661
1662 ioc_info(mrioc, "%s reset due to %s(0x%x)\n",
1663 mpi3mr_reset_type_name(reset_type),
1664 mpi3mr_reset_rc_name(reset_reason), reset_reason);
1665
1666 mpi3mr_clear_reset_history(mrioc);
1667 do {
1668 ioc_info(mrioc,
1669 "Write magic sequence to unlock host diag register (retry=%d)\n",
1670 ++unlock_retry_count);
1671 if (unlock_retry_count >= MPI3MR_HOSTDIAG_UNLOCK_RETRY_COUNT) {
1672 ioc_err(mrioc,
1673 "%s reset failed due to unlock failure, host_diagnostic(0x%08x)\n",
1674 mpi3mr_reset_type_name(reset_type),
1675 host_diagnostic);
1676 mrioc->unrecoverable = 1;
1677 return retval;
1678 }
1679
1680 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH,
1681 &mrioc->sysif_regs->write_sequence);
1682 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_1ST,
1683 &mrioc->sysif_regs->write_sequence);
1684 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
1685 &mrioc->sysif_regs->write_sequence);
1686 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_3RD,
1687 &mrioc->sysif_regs->write_sequence);
1688 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_4TH,
1689 &mrioc->sysif_regs->write_sequence);
1690 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_5TH,
1691 &mrioc->sysif_regs->write_sequence);
1692 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_6TH,
1693 &mrioc->sysif_regs->write_sequence);
1694 usleep_range(1000, 1100);
1695 host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
1696 ioc_info(mrioc,
1697 "wrote magic sequence: retry_count(%d), host_diagnostic(0x%08x)\n",
1698 unlock_retry_count, host_diagnostic);
1699 } while (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_DIAG_WRITE_ENABLE));
1700
1701 scratch_pad0 = ((MPI3MR_RESET_REASON_OSTYPE_LINUX <<
1702 MPI3MR_RESET_REASON_OSTYPE_SHIFT) | (mrioc->facts.ioc_num <<
1703 MPI3MR_RESET_REASON_IOCNUM_SHIFT) | reset_reason);
1704 writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]);
1705 writel(host_diagnostic | reset_type,
1706 &mrioc->sysif_regs->host_diagnostic);
1707 switch (reset_type) {
1708 case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET:
1709 do {
1710 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1711 ioc_config =
1712 readl(&mrioc->sysif_regs->ioc_configuration);
1713 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
1714 && mpi3mr_soft_reset_success(ioc_status, ioc_config)
1715 ) {
1716 mpi3mr_clear_reset_history(mrioc);
1717 retval = 0;
1718 break;
1719 }
1720 msleep(100);
1721 } while (--timeout);
1722 mpi3mr_print_fault_info(mrioc);
1723 break;
1724 case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT:
1725 do {
1726 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1727 if (mpi3mr_diagfault_success(mrioc, ioc_status)) {
1728 retval = 0;
1729 break;
1730 }
1731 msleep(100);
1732 } while (--timeout);
1733 break;
1734 default:
1735 break;
1736 }
1737
1738 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
1739 &mrioc->sysif_regs->write_sequence);
1740
1741 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1742 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1743 ioc_info(mrioc,
1744 "ioc_status/ioc_config after %s reset is (0x%08x)/(0x%08x)\n",
1745 (!retval)?"successful":"failed", ioc_status,
1746 ioc_config);
1747 if (retval)
1748 mrioc->unrecoverable = 1;
1749 return retval;
1750 }
1751
1752 /**
1753 * mpi3mr_admin_request_post - Post request to admin queue
1754 * @mrioc: Adapter reference
1755 * @admin_req: MPI3 request
1756 * @admin_req_sz: Request size
1757 * @ignore_reset: Ignore reset in process
1758 *
1759 * Post the MPI3 request into admin request queue and
1760 * inform the controller, if the queue is full return
1761 * appropriate error.
1762 *
1763 * Return: 0 on success, non-zero on failure.
1764 */
mpi3mr_admin_request_post(struct mpi3mr_ioc * mrioc,void * admin_req,u16 admin_req_sz,u8 ignore_reset)1765 int mpi3mr_admin_request_post(struct mpi3mr_ioc *mrioc, void *admin_req,
1766 u16 admin_req_sz, u8 ignore_reset)
1767 {
1768 u16 areq_pi = 0, areq_ci = 0, max_entries = 0;
1769 int retval = 0;
1770 unsigned long flags;
1771 u8 *areq_entry;
1772
1773 if (mrioc->unrecoverable) {
1774 ioc_err(mrioc, "%s : Unrecoverable controller\n", __func__);
1775 return -EFAULT;
1776 }
1777
1778 spin_lock_irqsave(&mrioc->admin_req_lock, flags);
1779 areq_pi = mrioc->admin_req_pi;
1780 areq_ci = mrioc->admin_req_ci;
1781 max_entries = mrioc->num_admin_req;
1782 if ((areq_ci == (areq_pi + 1)) || ((!areq_ci) &&
1783 (areq_pi == (max_entries - 1)))) {
1784 ioc_err(mrioc, "AdminReqQ full condition detected\n");
1785 retval = -EAGAIN;
1786 goto out;
1787 }
1788 if (!ignore_reset && mrioc->reset_in_progress) {
1789 ioc_err(mrioc, "AdminReqQ submit reset in progress\n");
1790 retval = -EAGAIN;
1791 goto out;
1792 }
1793 if (mrioc->pci_err_recovery) {
1794 ioc_err(mrioc, "admin request queue submission failed due to pci error recovery in progress\n");
1795 retval = -EAGAIN;
1796 goto out;
1797 }
1798
1799 areq_entry = (u8 *)mrioc->admin_req_base +
1800 (areq_pi * MPI3MR_ADMIN_REQ_FRAME_SZ);
1801 memset(areq_entry, 0, MPI3MR_ADMIN_REQ_FRAME_SZ);
1802 memcpy(areq_entry, (u8 *)admin_req, admin_req_sz);
1803
1804 if (++areq_pi == max_entries)
1805 areq_pi = 0;
1806 mrioc->admin_req_pi = areq_pi;
1807
1808 writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi);
1809
1810 out:
1811 spin_unlock_irqrestore(&mrioc->admin_req_lock, flags);
1812
1813 return retval;
1814 }
1815
1816 /**
1817 * mpi3mr_free_op_req_q_segments - free request memory segments
1818 * @mrioc: Adapter instance reference
1819 * @q_idx: operational request queue index
1820 *
1821 * Free memory segments allocated for operational request queue
1822 *
1823 * Return: Nothing.
1824 */
mpi3mr_free_op_req_q_segments(struct mpi3mr_ioc * mrioc,u16 q_idx)1825 static void mpi3mr_free_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx)
1826 {
1827 u16 j;
1828 int size;
1829 struct segments *segments;
1830
1831 segments = mrioc->req_qinfo[q_idx].q_segments;
1832 if (!segments)
1833 return;
1834
1835 if (mrioc->enable_segqueue) {
1836 size = MPI3MR_OP_REQ_Q_SEG_SIZE;
1837 if (mrioc->req_qinfo[q_idx].q_segment_list) {
1838 dma_free_coherent(&mrioc->pdev->dev,
1839 MPI3MR_MAX_SEG_LIST_SIZE,
1840 mrioc->req_qinfo[q_idx].q_segment_list,
1841 mrioc->req_qinfo[q_idx].q_segment_list_dma);
1842 mrioc->req_qinfo[q_idx].q_segment_list = NULL;
1843 }
1844 } else
1845 size = mrioc->req_qinfo[q_idx].segment_qd *
1846 mrioc->facts.op_req_sz;
1847
1848 for (j = 0; j < mrioc->req_qinfo[q_idx].num_segments; j++) {
1849 if (!segments[j].segment)
1850 continue;
1851 dma_free_coherent(&mrioc->pdev->dev,
1852 size, segments[j].segment, segments[j].segment_dma);
1853 segments[j].segment = NULL;
1854 }
1855 kfree(mrioc->req_qinfo[q_idx].q_segments);
1856 mrioc->req_qinfo[q_idx].q_segments = NULL;
1857 mrioc->req_qinfo[q_idx].qid = 0;
1858 }
1859
1860 /**
1861 * mpi3mr_free_op_reply_q_segments - free reply memory segments
1862 * @mrioc: Adapter instance reference
1863 * @q_idx: operational reply queue index
1864 *
1865 * Free memory segments allocated for operational reply queue
1866 *
1867 * Return: Nothing.
1868 */
mpi3mr_free_op_reply_q_segments(struct mpi3mr_ioc * mrioc,u16 q_idx)1869 static void mpi3mr_free_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx)
1870 {
1871 u16 j;
1872 int size;
1873 struct segments *segments;
1874
1875 segments = mrioc->op_reply_qinfo[q_idx].q_segments;
1876 if (!segments)
1877 return;
1878
1879 if (mrioc->enable_segqueue) {
1880 size = MPI3MR_OP_REP_Q_SEG_SIZE;
1881 if (mrioc->op_reply_qinfo[q_idx].q_segment_list) {
1882 dma_free_coherent(&mrioc->pdev->dev,
1883 MPI3MR_MAX_SEG_LIST_SIZE,
1884 mrioc->op_reply_qinfo[q_idx].q_segment_list,
1885 mrioc->op_reply_qinfo[q_idx].q_segment_list_dma);
1886 mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL;
1887 }
1888 } else
1889 size = mrioc->op_reply_qinfo[q_idx].segment_qd *
1890 mrioc->op_reply_desc_sz;
1891
1892 for (j = 0; j < mrioc->op_reply_qinfo[q_idx].num_segments; j++) {
1893 if (!segments[j].segment)
1894 continue;
1895 dma_free_coherent(&mrioc->pdev->dev,
1896 size, segments[j].segment, segments[j].segment_dma);
1897 segments[j].segment = NULL;
1898 }
1899
1900 kfree(mrioc->op_reply_qinfo[q_idx].q_segments);
1901 mrioc->op_reply_qinfo[q_idx].q_segments = NULL;
1902 mrioc->op_reply_qinfo[q_idx].qid = 0;
1903 }
1904
1905 /**
1906 * mpi3mr_delete_op_reply_q - delete operational reply queue
1907 * @mrioc: Adapter instance reference
1908 * @qidx: operational reply queue index
1909 *
1910 * Delete operatinal reply queue by issuing MPI request
1911 * through admin queue.
1912 *
1913 * Return: 0 on success, non-zero on failure.
1914 */
mpi3mr_delete_op_reply_q(struct mpi3mr_ioc * mrioc,u16 qidx)1915 static int mpi3mr_delete_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
1916 {
1917 struct mpi3_delete_reply_queue_request delq_req;
1918 struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
1919 int retval = 0;
1920 u16 reply_qid = 0, midx;
1921
1922 reply_qid = op_reply_q->qid;
1923
1924 midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset);
1925
1926 if (!reply_qid) {
1927 retval = -1;
1928 ioc_err(mrioc, "Issue DelRepQ: called with invalid ReqQID\n");
1929 goto out;
1930 }
1931
1932 (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount-- :
1933 mrioc->active_poll_qcount--;
1934
1935 memset(&delq_req, 0, sizeof(delq_req));
1936 mutex_lock(&mrioc->init_cmds.mutex);
1937 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
1938 retval = -1;
1939 ioc_err(mrioc, "Issue DelRepQ: Init command is in use\n");
1940 mutex_unlock(&mrioc->init_cmds.mutex);
1941 goto out;
1942 }
1943 mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
1944 mrioc->init_cmds.is_waiting = 1;
1945 mrioc->init_cmds.callback = NULL;
1946 delq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
1947 delq_req.function = MPI3_FUNCTION_DELETE_REPLY_QUEUE;
1948 delq_req.queue_id = cpu_to_le16(reply_qid);
1949
1950 init_completion(&mrioc->init_cmds.done);
1951 retval = mpi3mr_admin_request_post(mrioc, &delq_req, sizeof(delq_req),
1952 1);
1953 if (retval) {
1954 ioc_err(mrioc, "Issue DelRepQ: Admin Post failed\n");
1955 goto out_unlock;
1956 }
1957 wait_for_completion_timeout(&mrioc->init_cmds.done,
1958 (MPI3MR_INTADMCMD_TIMEOUT * HZ));
1959 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
1960 ioc_err(mrioc, "delete reply queue timed out\n");
1961 mpi3mr_check_rh_fault_ioc(mrioc,
1962 MPI3MR_RESET_FROM_DELREPQ_TIMEOUT);
1963 retval = -1;
1964 goto out_unlock;
1965 }
1966 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
1967 != MPI3_IOCSTATUS_SUCCESS) {
1968 ioc_err(mrioc,
1969 "Issue DelRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
1970 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
1971 mrioc->init_cmds.ioc_loginfo);
1972 retval = -1;
1973 goto out_unlock;
1974 }
1975 mrioc->intr_info[midx].op_reply_q = NULL;
1976
1977 mpi3mr_free_op_reply_q_segments(mrioc, qidx);
1978 out_unlock:
1979 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
1980 mutex_unlock(&mrioc->init_cmds.mutex);
1981 out:
1982
1983 return retval;
1984 }
1985
1986 /**
1987 * mpi3mr_alloc_op_reply_q_segments -Alloc segmented reply pool
1988 * @mrioc: Adapter instance reference
1989 * @qidx: request queue index
1990 *
1991 * Allocate segmented memory pools for operational reply
1992 * queue.
1993 *
1994 * Return: 0 on success, non-zero on failure.
1995 */
mpi3mr_alloc_op_reply_q_segments(struct mpi3mr_ioc * mrioc,u16 qidx)1996 static int mpi3mr_alloc_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx)
1997 {
1998 struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
1999 int i, size;
2000 u64 *q_segment_list_entry = NULL;
2001 struct segments *segments;
2002
2003 if (mrioc->enable_segqueue) {
2004 op_reply_q->segment_qd =
2005 MPI3MR_OP_REP_Q_SEG_SIZE / mrioc->op_reply_desc_sz;
2006
2007 size = MPI3MR_OP_REP_Q_SEG_SIZE;
2008
2009 op_reply_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev,
2010 MPI3MR_MAX_SEG_LIST_SIZE, &op_reply_q->q_segment_list_dma,
2011 GFP_KERNEL);
2012 if (!op_reply_q->q_segment_list)
2013 return -ENOMEM;
2014 q_segment_list_entry = (u64 *)op_reply_q->q_segment_list;
2015 } else {
2016 op_reply_q->segment_qd = op_reply_q->num_replies;
2017 size = op_reply_q->num_replies * mrioc->op_reply_desc_sz;
2018 }
2019
2020 op_reply_q->num_segments = DIV_ROUND_UP(op_reply_q->num_replies,
2021 op_reply_q->segment_qd);
2022
2023 op_reply_q->q_segments = kcalloc(op_reply_q->num_segments,
2024 sizeof(struct segments), GFP_KERNEL);
2025 if (!op_reply_q->q_segments)
2026 return -ENOMEM;
2027
2028 segments = op_reply_q->q_segments;
2029 for (i = 0; i < op_reply_q->num_segments; i++) {
2030 segments[i].segment =
2031 dma_alloc_coherent(&mrioc->pdev->dev,
2032 size, &segments[i].segment_dma, GFP_KERNEL);
2033 if (!segments[i].segment)
2034 return -ENOMEM;
2035 if (mrioc->enable_segqueue)
2036 q_segment_list_entry[i] =
2037 (unsigned long)segments[i].segment_dma;
2038 }
2039
2040 return 0;
2041 }
2042
2043 /**
2044 * mpi3mr_alloc_op_req_q_segments - Alloc segmented req pool.
2045 * @mrioc: Adapter instance reference
2046 * @qidx: request queue index
2047 *
2048 * Allocate segmented memory pools for operational request
2049 * queue.
2050 *
2051 * Return: 0 on success, non-zero on failure.
2052 */
mpi3mr_alloc_op_req_q_segments(struct mpi3mr_ioc * mrioc,u16 qidx)2053 static int mpi3mr_alloc_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx)
2054 {
2055 struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx;
2056 int i, size;
2057 u64 *q_segment_list_entry = NULL;
2058 struct segments *segments;
2059
2060 if (mrioc->enable_segqueue) {
2061 op_req_q->segment_qd =
2062 MPI3MR_OP_REQ_Q_SEG_SIZE / mrioc->facts.op_req_sz;
2063
2064 size = MPI3MR_OP_REQ_Q_SEG_SIZE;
2065
2066 op_req_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev,
2067 MPI3MR_MAX_SEG_LIST_SIZE, &op_req_q->q_segment_list_dma,
2068 GFP_KERNEL);
2069 if (!op_req_q->q_segment_list)
2070 return -ENOMEM;
2071 q_segment_list_entry = (u64 *)op_req_q->q_segment_list;
2072
2073 } else {
2074 op_req_q->segment_qd = op_req_q->num_requests;
2075 size = op_req_q->num_requests * mrioc->facts.op_req_sz;
2076 }
2077
2078 op_req_q->num_segments = DIV_ROUND_UP(op_req_q->num_requests,
2079 op_req_q->segment_qd);
2080
2081 op_req_q->q_segments = kcalloc(op_req_q->num_segments,
2082 sizeof(struct segments), GFP_KERNEL);
2083 if (!op_req_q->q_segments)
2084 return -ENOMEM;
2085
2086 segments = op_req_q->q_segments;
2087 for (i = 0; i < op_req_q->num_segments; i++) {
2088 segments[i].segment =
2089 dma_alloc_coherent(&mrioc->pdev->dev,
2090 size, &segments[i].segment_dma, GFP_KERNEL);
2091 if (!segments[i].segment)
2092 return -ENOMEM;
2093 if (mrioc->enable_segqueue)
2094 q_segment_list_entry[i] =
2095 (unsigned long)segments[i].segment_dma;
2096 }
2097
2098 return 0;
2099 }
2100
2101 /**
2102 * mpi3mr_create_op_reply_q - create operational reply queue
2103 * @mrioc: Adapter instance reference
2104 * @qidx: operational reply queue index
2105 *
2106 * Create operatinal reply queue by issuing MPI request
2107 * through admin queue.
2108 *
2109 * Return: 0 on success, non-zero on failure.
2110 */
mpi3mr_create_op_reply_q(struct mpi3mr_ioc * mrioc,u16 qidx)2111 static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
2112 {
2113 struct mpi3_create_reply_queue_request create_req;
2114 struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
2115 int retval = 0;
2116 u16 reply_qid = 0, midx;
2117
2118 reply_qid = op_reply_q->qid;
2119
2120 midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset);
2121
2122 if (reply_qid) {
2123 retval = -1;
2124 ioc_err(mrioc, "CreateRepQ: called for duplicate qid %d\n",
2125 reply_qid);
2126
2127 return retval;
2128 }
2129
2130 reply_qid = qidx + 1;
2131
2132 if (mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) {
2133 if (mrioc->pdev->revision)
2134 op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD;
2135 else
2136 op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD4K;
2137 } else
2138 op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD2K;
2139
2140 op_reply_q->ci = 0;
2141 op_reply_q->ephase = 1;
2142 atomic_set(&op_reply_q->pend_ios, 0);
2143 atomic_set(&op_reply_q->in_use, 0);
2144 op_reply_q->enable_irq_poll = false;
2145 op_reply_q->qfull_watermark =
2146 op_reply_q->num_replies - (MPI3MR_THRESHOLD_REPLY_COUNT * 2);
2147
2148 if (!op_reply_q->q_segments) {
2149 retval = mpi3mr_alloc_op_reply_q_segments(mrioc, qidx);
2150 if (retval) {
2151 mpi3mr_free_op_reply_q_segments(mrioc, qidx);
2152 goto out;
2153 }
2154 }
2155
2156 memset(&create_req, 0, sizeof(create_req));
2157 mutex_lock(&mrioc->init_cmds.mutex);
2158 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2159 retval = -1;
2160 ioc_err(mrioc, "CreateRepQ: Init command is in use\n");
2161 goto out_unlock;
2162 }
2163 mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2164 mrioc->init_cmds.is_waiting = 1;
2165 mrioc->init_cmds.callback = NULL;
2166 create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2167 create_req.function = MPI3_FUNCTION_CREATE_REPLY_QUEUE;
2168 create_req.queue_id = cpu_to_le16(reply_qid);
2169
2170 if (midx < (mrioc->intr_info_count - mrioc->requested_poll_qcount))
2171 op_reply_q->qtype = MPI3MR_DEFAULT_QUEUE;
2172 else
2173 op_reply_q->qtype = MPI3MR_POLL_QUEUE;
2174
2175 if (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) {
2176 create_req.flags =
2177 MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE;
2178 create_req.msix_index =
2179 cpu_to_le16(mrioc->intr_info[midx].msix_index);
2180 } else {
2181 create_req.msix_index = cpu_to_le16(mrioc->intr_info_count - 1);
2182 ioc_info(mrioc, "create reply queue(polled): for qid(%d), midx(%d)\n",
2183 reply_qid, midx);
2184 if (!mrioc->active_poll_qcount)
2185 disable_irq_nosync(pci_irq_vector(mrioc->pdev,
2186 mrioc->intr_info_count - 1));
2187 }
2188
2189 if (mrioc->enable_segqueue) {
2190 create_req.flags |=
2191 MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED;
2192 create_req.base_address = cpu_to_le64(
2193 op_reply_q->q_segment_list_dma);
2194 } else
2195 create_req.base_address = cpu_to_le64(
2196 op_reply_q->q_segments[0].segment_dma);
2197
2198 create_req.size = cpu_to_le16(op_reply_q->num_replies);
2199
2200 init_completion(&mrioc->init_cmds.done);
2201 retval = mpi3mr_admin_request_post(mrioc, &create_req,
2202 sizeof(create_req), 1);
2203 if (retval) {
2204 ioc_err(mrioc, "CreateRepQ: Admin Post failed\n");
2205 goto out_unlock;
2206 }
2207 wait_for_completion_timeout(&mrioc->init_cmds.done,
2208 (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2209 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2210 ioc_err(mrioc, "create reply queue timed out\n");
2211 mpi3mr_check_rh_fault_ioc(mrioc,
2212 MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT);
2213 retval = -1;
2214 goto out_unlock;
2215 }
2216 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2217 != MPI3_IOCSTATUS_SUCCESS) {
2218 ioc_err(mrioc,
2219 "CreateRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2220 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2221 mrioc->init_cmds.ioc_loginfo);
2222 retval = -1;
2223 goto out_unlock;
2224 }
2225 op_reply_q->qid = reply_qid;
2226 if (midx < mrioc->intr_info_count)
2227 mrioc->intr_info[midx].op_reply_q = op_reply_q;
2228
2229 (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount++ :
2230 mrioc->active_poll_qcount++;
2231
2232 out_unlock:
2233 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2234 mutex_unlock(&mrioc->init_cmds.mutex);
2235 out:
2236
2237 return retval;
2238 }
2239
2240 /**
2241 * mpi3mr_create_op_req_q - create operational request queue
2242 * @mrioc: Adapter instance reference
2243 * @idx: operational request queue index
2244 * @reply_qid: Reply queue ID
2245 *
2246 * Create operatinal request queue by issuing MPI request
2247 * through admin queue.
2248 *
2249 * Return: 0 on success, non-zero on failure.
2250 */
mpi3mr_create_op_req_q(struct mpi3mr_ioc * mrioc,u16 idx,u16 reply_qid)2251 static int mpi3mr_create_op_req_q(struct mpi3mr_ioc *mrioc, u16 idx,
2252 u16 reply_qid)
2253 {
2254 struct mpi3_create_request_queue_request create_req;
2255 struct op_req_qinfo *op_req_q = mrioc->req_qinfo + idx;
2256 int retval = 0;
2257 u16 req_qid = 0;
2258
2259 req_qid = op_req_q->qid;
2260
2261 if (req_qid) {
2262 retval = -1;
2263 ioc_err(mrioc, "CreateReqQ: called for duplicate qid %d\n",
2264 req_qid);
2265
2266 return retval;
2267 }
2268 req_qid = idx + 1;
2269
2270 op_req_q->num_requests = MPI3MR_OP_REQ_Q_QD;
2271 op_req_q->ci = 0;
2272 op_req_q->pi = 0;
2273 op_req_q->reply_qid = reply_qid;
2274 spin_lock_init(&op_req_q->q_lock);
2275
2276 if (!op_req_q->q_segments) {
2277 retval = mpi3mr_alloc_op_req_q_segments(mrioc, idx);
2278 if (retval) {
2279 mpi3mr_free_op_req_q_segments(mrioc, idx);
2280 goto out;
2281 }
2282 }
2283
2284 memset(&create_req, 0, sizeof(create_req));
2285 mutex_lock(&mrioc->init_cmds.mutex);
2286 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2287 retval = -1;
2288 ioc_err(mrioc, "CreateReqQ: Init command is in use\n");
2289 goto out_unlock;
2290 }
2291 mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2292 mrioc->init_cmds.is_waiting = 1;
2293 mrioc->init_cmds.callback = NULL;
2294 create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2295 create_req.function = MPI3_FUNCTION_CREATE_REQUEST_QUEUE;
2296 create_req.queue_id = cpu_to_le16(req_qid);
2297 if (mrioc->enable_segqueue) {
2298 create_req.flags =
2299 MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED;
2300 create_req.base_address = cpu_to_le64(
2301 op_req_q->q_segment_list_dma);
2302 } else
2303 create_req.base_address = cpu_to_le64(
2304 op_req_q->q_segments[0].segment_dma);
2305 create_req.reply_queue_id = cpu_to_le16(reply_qid);
2306 create_req.size = cpu_to_le16(op_req_q->num_requests);
2307
2308 init_completion(&mrioc->init_cmds.done);
2309 retval = mpi3mr_admin_request_post(mrioc, &create_req,
2310 sizeof(create_req), 1);
2311 if (retval) {
2312 ioc_err(mrioc, "CreateReqQ: Admin Post failed\n");
2313 goto out_unlock;
2314 }
2315 wait_for_completion_timeout(&mrioc->init_cmds.done,
2316 (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2317 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2318 ioc_err(mrioc, "create request queue timed out\n");
2319 mpi3mr_check_rh_fault_ioc(mrioc,
2320 MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT);
2321 retval = -1;
2322 goto out_unlock;
2323 }
2324 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2325 != MPI3_IOCSTATUS_SUCCESS) {
2326 ioc_err(mrioc,
2327 "CreateReqQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2328 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2329 mrioc->init_cmds.ioc_loginfo);
2330 retval = -1;
2331 goto out_unlock;
2332 }
2333 op_req_q->qid = req_qid;
2334
2335 out_unlock:
2336 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2337 mutex_unlock(&mrioc->init_cmds.mutex);
2338 out:
2339
2340 return retval;
2341 }
2342
2343 /**
2344 * mpi3mr_create_op_queues - create operational queue pairs
2345 * @mrioc: Adapter instance reference
2346 *
2347 * Allocate memory for operational queue meta data and call
2348 * create request and reply queue functions.
2349 *
2350 * Return: 0 on success, non-zero on failures.
2351 */
mpi3mr_create_op_queues(struct mpi3mr_ioc * mrioc)2352 static int mpi3mr_create_op_queues(struct mpi3mr_ioc *mrioc)
2353 {
2354 int retval = 0;
2355 u16 num_queues = 0, i = 0, msix_count_op_q = 1;
2356 u32 ioc_status;
2357 enum mpi3mr_iocstate ioc_state;
2358
2359 num_queues = min_t(int, mrioc->facts.max_op_reply_q,
2360 mrioc->facts.max_op_req_q);
2361
2362 msix_count_op_q =
2363 mrioc->intr_info_count - mrioc->op_reply_q_offset;
2364 if (!mrioc->num_queues)
2365 mrioc->num_queues = min_t(int, num_queues, msix_count_op_q);
2366 /*
2367 * During reset set the num_queues to the number of queues
2368 * that was set before the reset.
2369 */
2370 num_queues = mrioc->num_op_reply_q ?
2371 mrioc->num_op_reply_q : mrioc->num_queues;
2372 ioc_info(mrioc, "trying to create %d operational queue pairs\n",
2373 num_queues);
2374
2375 if (!mrioc->req_qinfo) {
2376 mrioc->req_qinfo = kcalloc(num_queues,
2377 sizeof(struct op_req_qinfo), GFP_KERNEL);
2378 if (!mrioc->req_qinfo) {
2379 retval = -1;
2380 goto out_failed;
2381 }
2382
2383 mrioc->op_reply_qinfo = kzalloc(sizeof(struct op_reply_qinfo) *
2384 num_queues, GFP_KERNEL);
2385 if (!mrioc->op_reply_qinfo) {
2386 retval = -1;
2387 goto out_failed;
2388 }
2389 }
2390
2391 if (mrioc->enable_segqueue)
2392 ioc_info(mrioc,
2393 "allocating operational queues through segmented queues\n");
2394
2395 for (i = 0; i < num_queues; i++) {
2396 if (mpi3mr_create_op_reply_q(mrioc, i)) {
2397 ioc_err(mrioc, "Cannot create OP RepQ %d\n", i);
2398 break;
2399 }
2400 if (mpi3mr_create_op_req_q(mrioc, i,
2401 mrioc->op_reply_qinfo[i].qid)) {
2402 ioc_err(mrioc, "Cannot create OP ReqQ %d\n", i);
2403 mpi3mr_delete_op_reply_q(mrioc, i);
2404 break;
2405 }
2406 }
2407
2408 if (i == 0) {
2409 /* Not even one queue is created successfully*/
2410 retval = -1;
2411 goto out_failed;
2412 }
2413 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
2414 ioc_state = mpi3mr_get_iocstate(mrioc);
2415 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
2416 ioc_state != MRIOC_STATE_READY) {
2417 mpi3mr_print_fault_info(mrioc);
2418 retval = -1;
2419 goto out_failed;
2420 }
2421 mrioc->num_op_reply_q = mrioc->num_op_req_q = i;
2422 ioc_info(mrioc,
2423 "successfully created %d operational queue pairs(default/polled) queue = (%d/%d)\n",
2424 mrioc->num_op_reply_q, mrioc->default_qcount,
2425 mrioc->active_poll_qcount);
2426
2427 return retval;
2428 out_failed:
2429 kfree(mrioc->req_qinfo);
2430 mrioc->req_qinfo = NULL;
2431
2432 kfree(mrioc->op_reply_qinfo);
2433 mrioc->op_reply_qinfo = NULL;
2434
2435 return retval;
2436 }
2437
2438 /**
2439 * mpi3mr_op_request_post - Post request to operational queue
2440 * @mrioc: Adapter reference
2441 * @op_req_q: Operational request queue info
2442 * @req: MPI3 request
2443 *
2444 * Post the MPI3 request into operational request queue and
2445 * inform the controller, if the queue is full return
2446 * appropriate error.
2447 *
2448 * Return: 0 on success, non-zero on failure.
2449 */
mpi3mr_op_request_post(struct mpi3mr_ioc * mrioc,struct op_req_qinfo * op_req_q,u8 * req)2450 int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
2451 struct op_req_qinfo *op_req_q, u8 *req)
2452 {
2453 u16 pi = 0, max_entries, reply_qidx = 0, midx;
2454 int retval = 0;
2455 unsigned long flags;
2456 u8 *req_entry;
2457 void *segment_base_addr;
2458 u16 req_sz = mrioc->facts.op_req_sz;
2459 struct segments *segments = op_req_q->q_segments;
2460 struct op_reply_qinfo *op_reply_q = NULL;
2461
2462 reply_qidx = op_req_q->reply_qid - 1;
2463 op_reply_q = mrioc->op_reply_qinfo + reply_qidx;
2464
2465 if (mrioc->unrecoverable)
2466 return -EFAULT;
2467
2468 spin_lock_irqsave(&op_req_q->q_lock, flags);
2469 pi = op_req_q->pi;
2470 max_entries = op_req_q->num_requests;
2471
2472 if (mpi3mr_check_req_qfull(op_req_q)) {
2473 midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(
2474 reply_qidx, mrioc->op_reply_q_offset);
2475 mpi3mr_process_op_reply_q(mrioc, mrioc->intr_info[midx].op_reply_q);
2476
2477 if (mpi3mr_check_req_qfull(op_req_q)) {
2478 retval = -EAGAIN;
2479 goto out;
2480 }
2481 }
2482
2483 if (mrioc->reset_in_progress) {
2484 ioc_err(mrioc, "OpReqQ submit reset in progress\n");
2485 retval = -EAGAIN;
2486 goto out;
2487 }
2488 if (mrioc->pci_err_recovery) {
2489 ioc_err(mrioc, "operational request queue submission failed due to pci error recovery in progress\n");
2490 retval = -EAGAIN;
2491 goto out;
2492 }
2493
2494 /* Reply queue is nearing to get full, push back IOs to SML */
2495 if ((mrioc->prevent_reply_qfull == true) &&
2496 (atomic_read(&op_reply_q->pend_ios) >
2497 (op_reply_q->qfull_watermark))) {
2498 atomic_inc(&mrioc->reply_qfull_count);
2499 retval = -EAGAIN;
2500 goto out;
2501 }
2502
2503 segment_base_addr = segments[pi / op_req_q->segment_qd].segment;
2504 req_entry = (u8 *)segment_base_addr +
2505 ((pi % op_req_q->segment_qd) * req_sz);
2506
2507 memset(req_entry, 0, req_sz);
2508 memcpy(req_entry, req, MPI3MR_ADMIN_REQ_FRAME_SZ);
2509
2510 if (++pi == max_entries)
2511 pi = 0;
2512 op_req_q->pi = pi;
2513
2514 #ifndef CONFIG_PREEMPT_RT
2515 if (atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios)
2516 > MPI3MR_IRQ_POLL_TRIGGER_IOCOUNT)
2517 mrioc->op_reply_qinfo[reply_qidx].enable_irq_poll = true;
2518 #else
2519 atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios);
2520 #endif
2521
2522 writel(op_req_q->pi,
2523 &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].producer_index);
2524
2525 out:
2526 spin_unlock_irqrestore(&op_req_q->q_lock, flags);
2527 return retval;
2528 }
2529
2530 /**
2531 * mpi3mr_check_rh_fault_ioc - check reset history and fault
2532 * controller
2533 * @mrioc: Adapter instance reference
2534 * @reason_code: reason code for the fault.
2535 *
2536 * This routine will save snapdump and fault the controller with
2537 * the given reason code if it is not already in the fault or
2538 * not asynchronosuly reset. This will be used to handle
2539 * initilaization time faults/resets/timeout as in those cases
2540 * immediate soft reset invocation is not required.
2541 *
2542 * Return: None.
2543 */
mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc * mrioc,u32 reason_code)2544 void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code)
2545 {
2546 u32 ioc_status, host_diagnostic, timeout;
2547 union mpi3mr_trigger_data trigger_data;
2548
2549 if (mrioc->unrecoverable) {
2550 ioc_err(mrioc, "controller is unrecoverable\n");
2551 return;
2552 }
2553
2554 if (!pci_device_is_present(mrioc->pdev)) {
2555 mrioc->unrecoverable = 1;
2556 ioc_err(mrioc, "controller is not present\n");
2557 return;
2558 }
2559 memset(&trigger_data, 0, sizeof(trigger_data));
2560 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
2561
2562 if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) {
2563 mpi3mr_set_trigger_data_in_all_hdb(mrioc,
2564 MPI3MR_HDB_TRIGGER_TYPE_FW_RELEASED, NULL, 0);
2565 return;
2566 } else if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
2567 trigger_data.fault = (readl(&mrioc->sysif_regs->fault) &
2568 MPI3_SYSIF_FAULT_CODE_MASK);
2569
2570 mpi3mr_set_trigger_data_in_all_hdb(mrioc,
2571 MPI3MR_HDB_TRIGGER_TYPE_FAULT, &trigger_data, 0);
2572 mpi3mr_print_fault_info(mrioc);
2573 return;
2574 }
2575
2576 mpi3mr_set_diagsave(mrioc);
2577 mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
2578 reason_code);
2579 trigger_data.fault = (readl(&mrioc->sysif_regs->fault) &
2580 MPI3_SYSIF_FAULT_CODE_MASK);
2581 mpi3mr_set_trigger_data_in_all_hdb(mrioc, MPI3MR_HDB_TRIGGER_TYPE_FAULT,
2582 &trigger_data, 0);
2583 timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
2584 do {
2585 host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
2586 if (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
2587 break;
2588 msleep(100);
2589 } while (--timeout);
2590 }
2591
2592 /**
2593 * mpi3mr_sync_timestamp - Issue time stamp sync request
2594 * @mrioc: Adapter reference
2595 *
2596 * Issue IO unit control MPI request to synchornize firmware
2597 * timestamp with host time.
2598 *
2599 * Return: 0 on success, non-zero on failure.
2600 */
mpi3mr_sync_timestamp(struct mpi3mr_ioc * mrioc)2601 static int mpi3mr_sync_timestamp(struct mpi3mr_ioc *mrioc)
2602 {
2603 ktime_t current_time;
2604 struct mpi3_iounit_control_request iou_ctrl;
2605 int retval = 0;
2606
2607 memset(&iou_ctrl, 0, sizeof(iou_ctrl));
2608 mutex_lock(&mrioc->init_cmds.mutex);
2609 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2610 retval = -1;
2611 ioc_err(mrioc, "Issue IOUCTL time_stamp: command is in use\n");
2612 mutex_unlock(&mrioc->init_cmds.mutex);
2613 goto out;
2614 }
2615 mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2616 mrioc->init_cmds.is_waiting = 1;
2617 mrioc->init_cmds.callback = NULL;
2618 iou_ctrl.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2619 iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL;
2620 iou_ctrl.operation = MPI3_CTRL_OP_UPDATE_TIMESTAMP;
2621 current_time = ktime_get_real();
2622 iou_ctrl.param64[0] = cpu_to_le64(ktime_to_ms(current_time));
2623
2624 init_completion(&mrioc->init_cmds.done);
2625 retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl,
2626 sizeof(iou_ctrl), 0);
2627 if (retval) {
2628 ioc_err(mrioc, "Issue IOUCTL time_stamp: Admin Post failed\n");
2629 goto out_unlock;
2630 }
2631
2632 wait_for_completion_timeout(&mrioc->init_cmds.done,
2633 (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2634 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2635 ioc_err(mrioc, "Issue IOUCTL time_stamp: command timed out\n");
2636 mrioc->init_cmds.is_waiting = 0;
2637 if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET))
2638 mpi3mr_check_rh_fault_ioc(mrioc,
2639 MPI3MR_RESET_FROM_TSU_TIMEOUT);
2640 retval = -1;
2641 goto out_unlock;
2642 }
2643 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2644 != MPI3_IOCSTATUS_SUCCESS) {
2645 ioc_err(mrioc,
2646 "Issue IOUCTL time_stamp: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2647 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2648 mrioc->init_cmds.ioc_loginfo);
2649 retval = -1;
2650 goto out_unlock;
2651 }
2652
2653 out_unlock:
2654 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2655 mutex_unlock(&mrioc->init_cmds.mutex);
2656
2657 out:
2658 return retval;
2659 }
2660
2661 /**
2662 * mpi3mr_print_pkg_ver - display controller fw package version
2663 * @mrioc: Adapter reference
2664 *
2665 * Retrieve firmware package version from the component image
2666 * header of the controller flash and display it.
2667 *
2668 * Return: 0 on success and non-zero on failure.
2669 */
mpi3mr_print_pkg_ver(struct mpi3mr_ioc * mrioc)2670 static int mpi3mr_print_pkg_ver(struct mpi3mr_ioc *mrioc)
2671 {
2672 struct mpi3_ci_upload_request ci_upload;
2673 int retval = -1;
2674 void *data = NULL;
2675 dma_addr_t data_dma;
2676 struct mpi3_ci_manifest_mpi *manifest;
2677 u32 data_len = sizeof(struct mpi3_ci_manifest_mpi);
2678 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
2679
2680 data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
2681 GFP_KERNEL);
2682 if (!data)
2683 return -ENOMEM;
2684
2685 memset(&ci_upload, 0, sizeof(ci_upload));
2686 mutex_lock(&mrioc->init_cmds.mutex);
2687 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2688 ioc_err(mrioc, "sending get package version failed due to command in use\n");
2689 mutex_unlock(&mrioc->init_cmds.mutex);
2690 goto out;
2691 }
2692 mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2693 mrioc->init_cmds.is_waiting = 1;
2694 mrioc->init_cmds.callback = NULL;
2695 ci_upload.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2696 ci_upload.function = MPI3_FUNCTION_CI_UPLOAD;
2697 ci_upload.msg_flags = MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_PRIMARY;
2698 ci_upload.signature1 = cpu_to_le32(MPI3_IMAGE_HEADER_SIGNATURE1_MANIFEST);
2699 ci_upload.image_offset = cpu_to_le32(MPI3_IMAGE_HEADER_SIZE);
2700 ci_upload.segment_size = cpu_to_le32(data_len);
2701
2702 mpi3mr_add_sg_single(&ci_upload.sgl, sgl_flags, data_len,
2703 data_dma);
2704 init_completion(&mrioc->init_cmds.done);
2705 retval = mpi3mr_admin_request_post(mrioc, &ci_upload,
2706 sizeof(ci_upload), 1);
2707 if (retval) {
2708 ioc_err(mrioc, "posting get package version failed\n");
2709 goto out_unlock;
2710 }
2711 wait_for_completion_timeout(&mrioc->init_cmds.done,
2712 (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2713 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2714 ioc_err(mrioc, "get package version timed out\n");
2715 mpi3mr_check_rh_fault_ioc(mrioc,
2716 MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT);
2717 retval = -1;
2718 goto out_unlock;
2719 }
2720 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2721 == MPI3_IOCSTATUS_SUCCESS) {
2722 manifest = (struct mpi3_ci_manifest_mpi *) data;
2723 if (manifest->manifest_type == MPI3_CI_MANIFEST_TYPE_MPI) {
2724 ioc_info(mrioc,
2725 "firmware package version(%d.%d.%d.%d.%05d-%05d)\n",
2726 manifest->package_version.gen_major,
2727 manifest->package_version.gen_minor,
2728 manifest->package_version.phase_major,
2729 manifest->package_version.phase_minor,
2730 manifest->package_version.customer_id,
2731 manifest->package_version.build_num);
2732 }
2733 }
2734 retval = 0;
2735 out_unlock:
2736 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2737 mutex_unlock(&mrioc->init_cmds.mutex);
2738
2739 out:
2740 if (data)
2741 dma_free_coherent(&mrioc->pdev->dev, data_len, data,
2742 data_dma);
2743 return retval;
2744 }
2745
2746 /**
2747 * mpi3mr_watchdog_work - watchdog thread to monitor faults
2748 * @work: work struct
2749 *
2750 * Watch dog work periodically executed (1 second interval) to
2751 * monitor firmware fault and to issue periodic timer sync to
2752 * the firmware.
2753 *
2754 * Return: Nothing.
2755 */
mpi3mr_watchdog_work(struct work_struct * work)2756 static void mpi3mr_watchdog_work(struct work_struct *work)
2757 {
2758 struct mpi3mr_ioc *mrioc =
2759 container_of(work, struct mpi3mr_ioc, watchdog_work.work);
2760 unsigned long flags;
2761 enum mpi3mr_iocstate ioc_state;
2762 u32 host_diagnostic, ioc_status;
2763 union mpi3mr_trigger_data trigger_data;
2764 u16 reset_reason = MPI3MR_RESET_FROM_FAULT_WATCH;
2765
2766 if (mrioc->reset_in_progress || mrioc->pci_err_recovery)
2767 return;
2768
2769 if (!mrioc->unrecoverable && !pci_device_is_present(mrioc->pdev)) {
2770 ioc_err(mrioc, "watchdog could not detect the controller\n");
2771 mrioc->unrecoverable = 1;
2772 }
2773
2774 if (mrioc->unrecoverable) {
2775 ioc_err(mrioc,
2776 "flush pending commands for unrecoverable controller\n");
2777 mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
2778 return;
2779 }
2780
2781 if (atomic_read(&mrioc->admin_pend_isr)) {
2782 ioc_err(mrioc, "Unprocessed admin ISR instance found\n"
2783 "flush admin replies\n");
2784 mpi3mr_process_admin_reply_q(mrioc);
2785 }
2786
2787 if (!(mrioc->facts.ioc_capabilities &
2788 MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_IOC) &&
2789 (mrioc->ts_update_counter++ >= mrioc->ts_update_interval)) {
2790
2791 mrioc->ts_update_counter = 0;
2792 mpi3mr_sync_timestamp(mrioc);
2793 }
2794
2795 if ((mrioc->prepare_for_reset) &&
2796 ((mrioc->prepare_for_reset_timeout_counter++) >=
2797 MPI3MR_PREPARE_FOR_RESET_TIMEOUT)) {
2798 mpi3mr_soft_reset_handler(mrioc,
2799 MPI3MR_RESET_FROM_CIACTVRST_TIMER, 1);
2800 return;
2801 }
2802
2803 memset(&trigger_data, 0, sizeof(trigger_data));
2804 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
2805 if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) {
2806 mpi3mr_set_trigger_data_in_all_hdb(mrioc,
2807 MPI3MR_HDB_TRIGGER_TYPE_FW_RELEASED, NULL, 0);
2808 mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_FIRMWARE, 0);
2809 return;
2810 }
2811
2812 /*Check for fault state every one second and issue Soft reset*/
2813 ioc_state = mpi3mr_get_iocstate(mrioc);
2814 if (ioc_state != MRIOC_STATE_FAULT)
2815 goto schedule_work;
2816
2817 trigger_data.fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK;
2818 mpi3mr_set_trigger_data_in_all_hdb(mrioc,
2819 MPI3MR_HDB_TRIGGER_TYPE_FAULT, &trigger_data, 0);
2820 host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
2821 if (host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS) {
2822 if (!mrioc->diagsave_timeout) {
2823 mpi3mr_print_fault_info(mrioc);
2824 ioc_warn(mrioc, "diag save in progress\n");
2825 }
2826 if ((mrioc->diagsave_timeout++) <= MPI3_SYSIF_DIAG_SAVE_TIMEOUT)
2827 goto schedule_work;
2828 }
2829
2830 mpi3mr_print_fault_info(mrioc);
2831 mrioc->diagsave_timeout = 0;
2832
2833 if (!mpi3mr_is_fault_recoverable(mrioc)) {
2834 mrioc->unrecoverable = 1;
2835 goto schedule_work;
2836 }
2837
2838 switch (trigger_data.fault) {
2839 case MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED:
2840 case MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED:
2841 ioc_warn(mrioc,
2842 "controller requires system power cycle, marking controller as unrecoverable\n");
2843 mrioc->unrecoverable = 1;
2844 goto schedule_work;
2845 case MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS:
2846 goto schedule_work;
2847 case MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET:
2848 reset_reason = MPI3MR_RESET_FROM_CIACTIV_FAULT;
2849 break;
2850 default:
2851 break;
2852 }
2853 mpi3mr_soft_reset_handler(mrioc, reset_reason, 0);
2854 return;
2855
2856 schedule_work:
2857 spin_lock_irqsave(&mrioc->watchdog_lock, flags);
2858 if (mrioc->watchdog_work_q)
2859 queue_delayed_work(mrioc->watchdog_work_q,
2860 &mrioc->watchdog_work,
2861 msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
2862 spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
2863 return;
2864 }
2865
2866 /**
2867 * mpi3mr_start_watchdog - Start watchdog
2868 * @mrioc: Adapter instance reference
2869 *
2870 * Create and start the watchdog thread to monitor controller
2871 * faults.
2872 *
2873 * Return: Nothing.
2874 */
mpi3mr_start_watchdog(struct mpi3mr_ioc * mrioc)2875 void mpi3mr_start_watchdog(struct mpi3mr_ioc *mrioc)
2876 {
2877 if (mrioc->watchdog_work_q)
2878 return;
2879
2880 INIT_DELAYED_WORK(&mrioc->watchdog_work, mpi3mr_watchdog_work);
2881 snprintf(mrioc->watchdog_work_q_name,
2882 sizeof(mrioc->watchdog_work_q_name), "watchdog_%s%d", mrioc->name,
2883 mrioc->id);
2884 mrioc->watchdog_work_q = alloc_ordered_workqueue(
2885 "%s", WQ_MEM_RECLAIM, mrioc->watchdog_work_q_name);
2886 if (!mrioc->watchdog_work_q) {
2887 ioc_err(mrioc, "%s: failed (line=%d)\n", __func__, __LINE__);
2888 return;
2889 }
2890
2891 if (mrioc->watchdog_work_q)
2892 queue_delayed_work(mrioc->watchdog_work_q,
2893 &mrioc->watchdog_work,
2894 msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
2895 }
2896
2897 /**
2898 * mpi3mr_stop_watchdog - Stop watchdog
2899 * @mrioc: Adapter instance reference
2900 *
2901 * Stop the watchdog thread created to monitor controller
2902 * faults.
2903 *
2904 * Return: Nothing.
2905 */
mpi3mr_stop_watchdog(struct mpi3mr_ioc * mrioc)2906 void mpi3mr_stop_watchdog(struct mpi3mr_ioc *mrioc)
2907 {
2908 unsigned long flags;
2909 struct workqueue_struct *wq;
2910
2911 spin_lock_irqsave(&mrioc->watchdog_lock, flags);
2912 wq = mrioc->watchdog_work_q;
2913 mrioc->watchdog_work_q = NULL;
2914 spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
2915 if (wq) {
2916 if (!cancel_delayed_work_sync(&mrioc->watchdog_work))
2917 flush_workqueue(wq);
2918 destroy_workqueue(wq);
2919 }
2920 }
2921
2922 /**
2923 * mpi3mr_setup_admin_qpair - Setup admin queue pair
2924 * @mrioc: Adapter instance reference
2925 *
2926 * Allocate memory for admin queue pair if required and register
2927 * the admin queue with the controller.
2928 *
2929 * Return: 0 on success, non-zero on failures.
2930 */
mpi3mr_setup_admin_qpair(struct mpi3mr_ioc * mrioc)2931 static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc)
2932 {
2933 int retval = 0;
2934 u32 num_admin_entries = 0;
2935
2936 mrioc->admin_req_q_sz = MPI3MR_ADMIN_REQ_Q_SIZE;
2937 mrioc->num_admin_req = mrioc->admin_req_q_sz /
2938 MPI3MR_ADMIN_REQ_FRAME_SZ;
2939 mrioc->admin_req_ci = mrioc->admin_req_pi = 0;
2940
2941 mrioc->admin_reply_q_sz = MPI3MR_ADMIN_REPLY_Q_SIZE;
2942 mrioc->num_admin_replies = mrioc->admin_reply_q_sz /
2943 MPI3MR_ADMIN_REPLY_FRAME_SZ;
2944 mrioc->admin_reply_ci = 0;
2945 mrioc->admin_reply_ephase = 1;
2946 atomic_set(&mrioc->admin_reply_q_in_use, 0);
2947 atomic_set(&mrioc->admin_pend_isr, 0);
2948
2949 if (!mrioc->admin_req_base) {
2950 mrioc->admin_req_base = dma_alloc_coherent(&mrioc->pdev->dev,
2951 mrioc->admin_req_q_sz, &mrioc->admin_req_dma, GFP_KERNEL);
2952
2953 if (!mrioc->admin_req_base) {
2954 retval = -1;
2955 goto out_failed;
2956 }
2957
2958 mrioc->admin_reply_base = dma_alloc_coherent(&mrioc->pdev->dev,
2959 mrioc->admin_reply_q_sz, &mrioc->admin_reply_dma,
2960 GFP_KERNEL);
2961
2962 if (!mrioc->admin_reply_base) {
2963 retval = -1;
2964 goto out_failed;
2965 }
2966 }
2967
2968 num_admin_entries = (mrioc->num_admin_replies << 16) |
2969 (mrioc->num_admin_req);
2970 writel(num_admin_entries, &mrioc->sysif_regs->admin_queue_num_entries);
2971 mpi3mr_writeq(mrioc->admin_req_dma,
2972 &mrioc->sysif_regs->admin_request_queue_address,
2973 &mrioc->adm_req_q_bar_writeq_lock);
2974 mpi3mr_writeq(mrioc->admin_reply_dma,
2975 &mrioc->sysif_regs->admin_reply_queue_address,
2976 &mrioc->adm_reply_q_bar_writeq_lock);
2977 writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi);
2978 writel(mrioc->admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci);
2979 return retval;
2980
2981 out_failed:
2982
2983 if (mrioc->admin_reply_base) {
2984 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz,
2985 mrioc->admin_reply_base, mrioc->admin_reply_dma);
2986 mrioc->admin_reply_base = NULL;
2987 }
2988 if (mrioc->admin_req_base) {
2989 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz,
2990 mrioc->admin_req_base, mrioc->admin_req_dma);
2991 mrioc->admin_req_base = NULL;
2992 }
2993 return retval;
2994 }
2995
2996 /**
2997 * mpi3mr_issue_iocfacts - Send IOC Facts
2998 * @mrioc: Adapter instance reference
2999 * @facts_data: Cached IOC facts data
3000 *
3001 * Issue IOC Facts MPI request through admin queue and wait for
3002 * the completion of it or time out.
3003 *
3004 * Return: 0 on success, non-zero on failures.
3005 */
mpi3mr_issue_iocfacts(struct mpi3mr_ioc * mrioc,struct mpi3_ioc_facts_data * facts_data)3006 static int mpi3mr_issue_iocfacts(struct mpi3mr_ioc *mrioc,
3007 struct mpi3_ioc_facts_data *facts_data)
3008 {
3009 struct mpi3_ioc_facts_request iocfacts_req;
3010 void *data = NULL;
3011 dma_addr_t data_dma;
3012 u32 data_len = sizeof(*facts_data);
3013 int retval = 0;
3014 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
3015
3016 data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
3017 GFP_KERNEL);
3018
3019 if (!data) {
3020 retval = -1;
3021 goto out;
3022 }
3023
3024 memset(&iocfacts_req, 0, sizeof(iocfacts_req));
3025 mutex_lock(&mrioc->init_cmds.mutex);
3026 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3027 retval = -1;
3028 ioc_err(mrioc, "Issue IOCFacts: Init command is in use\n");
3029 mutex_unlock(&mrioc->init_cmds.mutex);
3030 goto out;
3031 }
3032 mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3033 mrioc->init_cmds.is_waiting = 1;
3034 mrioc->init_cmds.callback = NULL;
3035 iocfacts_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3036 iocfacts_req.function = MPI3_FUNCTION_IOC_FACTS;
3037
3038 mpi3mr_add_sg_single(&iocfacts_req.sgl, sgl_flags, data_len,
3039 data_dma);
3040
3041 init_completion(&mrioc->init_cmds.done);
3042 retval = mpi3mr_admin_request_post(mrioc, &iocfacts_req,
3043 sizeof(iocfacts_req), 1);
3044 if (retval) {
3045 ioc_err(mrioc, "Issue IOCFacts: Admin Post failed\n");
3046 goto out_unlock;
3047 }
3048 wait_for_completion_timeout(&mrioc->init_cmds.done,
3049 (MPI3MR_INTADMCMD_TIMEOUT * HZ));
3050 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3051 ioc_err(mrioc, "ioc_facts timed out\n");
3052 mpi3mr_check_rh_fault_ioc(mrioc,
3053 MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT);
3054 retval = -1;
3055 goto out_unlock;
3056 }
3057 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
3058 != MPI3_IOCSTATUS_SUCCESS) {
3059 ioc_err(mrioc,
3060 "Issue IOCFacts: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
3061 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3062 mrioc->init_cmds.ioc_loginfo);
3063 retval = -1;
3064 goto out_unlock;
3065 }
3066 memcpy(facts_data, (u8 *)data, data_len);
3067 mpi3mr_process_factsdata(mrioc, facts_data);
3068 out_unlock:
3069 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3070 mutex_unlock(&mrioc->init_cmds.mutex);
3071
3072 out:
3073 if (data)
3074 dma_free_coherent(&mrioc->pdev->dev, data_len, data, data_dma);
3075
3076 return retval;
3077 }
3078
3079 /**
3080 * mpi3mr_check_reset_dma_mask - Process IOC facts data
3081 * @mrioc: Adapter instance reference
3082 *
3083 * Check whether the new DMA mask requested through IOCFacts by
3084 * firmware needs to be set, if so set it .
3085 *
3086 * Return: 0 on success, non-zero on failure.
3087 */
mpi3mr_check_reset_dma_mask(struct mpi3mr_ioc * mrioc)3088 static inline int mpi3mr_check_reset_dma_mask(struct mpi3mr_ioc *mrioc)
3089 {
3090 struct pci_dev *pdev = mrioc->pdev;
3091 int r;
3092 u64 facts_dma_mask = DMA_BIT_MASK(mrioc->facts.dma_mask);
3093
3094 if (!mrioc->facts.dma_mask || (mrioc->dma_mask <= facts_dma_mask))
3095 return 0;
3096
3097 ioc_info(mrioc, "Changing DMA mask from 0x%016llx to 0x%016llx\n",
3098 mrioc->dma_mask, facts_dma_mask);
3099
3100 r = dma_set_mask_and_coherent(&pdev->dev, facts_dma_mask);
3101 if (r) {
3102 ioc_err(mrioc, "Setting DMA mask to 0x%016llx failed: %d\n",
3103 facts_dma_mask, r);
3104 return r;
3105 }
3106 mrioc->dma_mask = facts_dma_mask;
3107 return r;
3108 }
3109
3110 /**
3111 * mpi3mr_process_factsdata - Process IOC facts data
3112 * @mrioc: Adapter instance reference
3113 * @facts_data: Cached IOC facts data
3114 *
3115 * Convert IOC facts data into cpu endianness and cache it in
3116 * the driver .
3117 *
3118 * Return: Nothing.
3119 */
mpi3mr_process_factsdata(struct mpi3mr_ioc * mrioc,struct mpi3_ioc_facts_data * facts_data)3120 static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
3121 struct mpi3_ioc_facts_data *facts_data)
3122 {
3123 u32 ioc_config, req_sz, facts_flags;
3124
3125 if ((le16_to_cpu(facts_data->ioc_facts_data_length)) !=
3126 (sizeof(*facts_data) / 4)) {
3127 ioc_warn(mrioc,
3128 "IOCFactsdata length mismatch driver_sz(%zu) firmware_sz(%d)\n",
3129 sizeof(*facts_data),
3130 le16_to_cpu(facts_data->ioc_facts_data_length) * 4);
3131 }
3132
3133 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
3134 req_sz = 1 << ((ioc_config & MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ) >>
3135 MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ_SHIFT);
3136 if (le16_to_cpu(facts_data->ioc_request_frame_size) != (req_sz / 4)) {
3137 ioc_err(mrioc,
3138 "IOCFacts data reqFrameSize mismatch hw_size(%d) firmware_sz(%d)\n",
3139 req_sz / 4, le16_to_cpu(facts_data->ioc_request_frame_size));
3140 }
3141
3142 memset(&mrioc->facts, 0, sizeof(mrioc->facts));
3143
3144 facts_flags = le32_to_cpu(facts_data->flags);
3145 mrioc->facts.op_req_sz = req_sz;
3146 mrioc->op_reply_desc_sz = 1 << ((ioc_config &
3147 MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ) >>
3148 MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT);
3149
3150 mrioc->facts.ioc_num = facts_data->ioc_number;
3151 mrioc->facts.who_init = facts_data->who_init;
3152 mrioc->facts.max_msix_vectors = le16_to_cpu(facts_data->max_msix_vectors);
3153 mrioc->facts.personality = (facts_flags &
3154 MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK);
3155 mrioc->facts.dma_mask = (facts_flags &
3156 MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >>
3157 MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
3158 mrioc->facts.dma_mask = (facts_flags &
3159 MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >>
3160 MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
3161 mrioc->facts.max_req_limit = (facts_flags &
3162 MPI3_IOCFACTS_FLAGS_MAX_REQ_PER_REPLY_QUEUE_LIMIT);
3163 mrioc->facts.protocol_flags = facts_data->protocol_flags;
3164 mrioc->facts.mpi_version = le32_to_cpu(facts_data->mpi_version.word);
3165 mrioc->facts.max_reqs = le16_to_cpu(facts_data->max_outstanding_requests);
3166 mrioc->facts.product_id = le16_to_cpu(facts_data->product_id);
3167 mrioc->facts.reply_sz = le16_to_cpu(facts_data->reply_frame_size) * 4;
3168 mrioc->facts.exceptions = le16_to_cpu(facts_data->ioc_exceptions);
3169 mrioc->facts.max_perids = le16_to_cpu(facts_data->max_persistent_id);
3170 mrioc->facts.max_vds = le16_to_cpu(facts_data->max_vds);
3171 mrioc->facts.max_hpds = le16_to_cpu(facts_data->max_host_pds);
3172 mrioc->facts.max_advhpds = le16_to_cpu(facts_data->max_adv_host_pds);
3173 mrioc->facts.max_raid_pds = le16_to_cpu(facts_data->max_raid_pds);
3174 mrioc->facts.max_nvme = le16_to_cpu(facts_data->max_nvme);
3175 mrioc->facts.max_pcie_switches =
3176 le16_to_cpu(facts_data->max_pcie_switches);
3177 mrioc->facts.max_sasexpanders =
3178 le16_to_cpu(facts_data->max_sas_expanders);
3179 mrioc->facts.max_data_length = le16_to_cpu(facts_data->max_data_length);
3180 mrioc->facts.max_sasinitiators =
3181 le16_to_cpu(facts_data->max_sas_initiators);
3182 mrioc->facts.max_enclosures = le16_to_cpu(facts_data->max_enclosures);
3183 mrioc->facts.min_devhandle = le16_to_cpu(facts_data->min_dev_handle);
3184 mrioc->facts.max_devhandle = le16_to_cpu(facts_data->max_dev_handle);
3185 mrioc->facts.max_op_req_q =
3186 le16_to_cpu(facts_data->max_operational_request_queues);
3187 mrioc->facts.max_op_reply_q =
3188 le16_to_cpu(facts_data->max_operational_reply_queues);
3189 mrioc->facts.ioc_capabilities =
3190 le32_to_cpu(facts_data->ioc_capabilities);
3191 mrioc->facts.fw_ver.build_num =
3192 le16_to_cpu(facts_data->fw_version.build_num);
3193 mrioc->facts.fw_ver.cust_id =
3194 le16_to_cpu(facts_data->fw_version.customer_id);
3195 mrioc->facts.fw_ver.ph_minor = facts_data->fw_version.phase_minor;
3196 mrioc->facts.fw_ver.ph_major = facts_data->fw_version.phase_major;
3197 mrioc->facts.fw_ver.gen_minor = facts_data->fw_version.gen_minor;
3198 mrioc->facts.fw_ver.gen_major = facts_data->fw_version.gen_major;
3199 mrioc->msix_count = min_t(int, mrioc->msix_count,
3200 mrioc->facts.max_msix_vectors);
3201 mrioc->facts.sge_mod_mask = facts_data->sge_modifier_mask;
3202 mrioc->facts.sge_mod_value = facts_data->sge_modifier_value;
3203 mrioc->facts.sge_mod_shift = facts_data->sge_modifier_shift;
3204 mrioc->facts.shutdown_timeout =
3205 le16_to_cpu(facts_data->shutdown_timeout);
3206 mrioc->facts.diag_trace_sz =
3207 le32_to_cpu(facts_data->diag_trace_size);
3208 mrioc->facts.diag_fw_sz =
3209 le32_to_cpu(facts_data->diag_fw_size);
3210 mrioc->facts.diag_drvr_sz = le32_to_cpu(facts_data->diag_driver_size);
3211 mrioc->facts.max_dev_per_tg =
3212 facts_data->max_devices_per_throttle_group;
3213 mrioc->facts.io_throttle_data_length =
3214 le16_to_cpu(facts_data->io_throttle_data_length);
3215 mrioc->facts.max_io_throttle_group =
3216 le16_to_cpu(facts_data->max_io_throttle_group);
3217 mrioc->facts.io_throttle_low = le16_to_cpu(facts_data->io_throttle_low);
3218 mrioc->facts.io_throttle_high =
3219 le16_to_cpu(facts_data->io_throttle_high);
3220
3221 if (mrioc->facts.max_data_length ==
3222 MPI3_IOCFACTS_MAX_DATA_LENGTH_NOT_REPORTED)
3223 mrioc->facts.max_data_length = MPI3MR_DEFAULT_MAX_IO_SIZE;
3224 else
3225 mrioc->facts.max_data_length *= MPI3MR_PAGE_SIZE_4K;
3226 /* Store in 512b block count */
3227 if (mrioc->facts.io_throttle_data_length)
3228 mrioc->io_throttle_data_length =
3229 (mrioc->facts.io_throttle_data_length * 2 * 4);
3230 else
3231 /* set the length to 1MB + 1K to disable throttle */
3232 mrioc->io_throttle_data_length = (mrioc->facts.max_data_length / 512) + 2;
3233
3234 mrioc->io_throttle_high = (mrioc->facts.io_throttle_high * 2 * 1024);
3235 mrioc->io_throttle_low = (mrioc->facts.io_throttle_low * 2 * 1024);
3236
3237 ioc_info(mrioc, "ioc_num(%d), maxopQ(%d), maxopRepQ(%d), maxdh(%d),",
3238 mrioc->facts.ioc_num, mrioc->facts.max_op_req_q,
3239 mrioc->facts.max_op_reply_q, mrioc->facts.max_devhandle);
3240 ioc_info(mrioc,
3241 "maxreqs(%d), mindh(%d) maxvectors(%d) maxperids(%d)\n",
3242 mrioc->facts.max_reqs, mrioc->facts.min_devhandle,
3243 mrioc->facts.max_msix_vectors, mrioc->facts.max_perids);
3244 ioc_info(mrioc, "SGEModMask 0x%x SGEModVal 0x%x SGEModShift 0x%x ",
3245 mrioc->facts.sge_mod_mask, mrioc->facts.sge_mod_value,
3246 mrioc->facts.sge_mod_shift);
3247 ioc_info(mrioc, "DMA mask %d InitialPE status 0x%x max_data_len (%d)\n",
3248 mrioc->facts.dma_mask, (facts_flags &
3249 MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK), mrioc->facts.max_data_length);
3250 ioc_info(mrioc,
3251 "max_dev_per_throttle_group(%d), max_throttle_groups(%d)\n",
3252 mrioc->facts.max_dev_per_tg, mrioc->facts.max_io_throttle_group);
3253 ioc_info(mrioc,
3254 "io_throttle_data_len(%dKiB), io_throttle_high(%dMiB), io_throttle_low(%dMiB)\n",
3255 mrioc->facts.io_throttle_data_length * 4,
3256 mrioc->facts.io_throttle_high, mrioc->facts.io_throttle_low);
3257 }
3258
3259 /**
3260 * mpi3mr_alloc_reply_sense_bufs - Send IOC Init
3261 * @mrioc: Adapter instance reference
3262 *
3263 * Allocate and initialize the reply free buffers, sense
3264 * buffers, reply free queue and sense buffer queue.
3265 *
3266 * Return: 0 on success, non-zero on failures.
3267 */
mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc * mrioc)3268 static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc)
3269 {
3270 int retval = 0;
3271 u32 sz, i;
3272
3273 if (mrioc->init_cmds.reply)
3274 return retval;
3275
3276 mrioc->init_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3277 if (!mrioc->init_cmds.reply)
3278 goto out_failed;
3279
3280 mrioc->bsg_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3281 if (!mrioc->bsg_cmds.reply)
3282 goto out_failed;
3283
3284 mrioc->transport_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3285 if (!mrioc->transport_cmds.reply)
3286 goto out_failed;
3287
3288 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
3289 mrioc->dev_rmhs_cmds[i].reply = kzalloc(mrioc->reply_sz,
3290 GFP_KERNEL);
3291 if (!mrioc->dev_rmhs_cmds[i].reply)
3292 goto out_failed;
3293 }
3294
3295 for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
3296 mrioc->evtack_cmds[i].reply = kzalloc(mrioc->reply_sz,
3297 GFP_KERNEL);
3298 if (!mrioc->evtack_cmds[i].reply)
3299 goto out_failed;
3300 }
3301
3302 mrioc->host_tm_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3303 if (!mrioc->host_tm_cmds.reply)
3304 goto out_failed;
3305
3306 mrioc->pel_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3307 if (!mrioc->pel_cmds.reply)
3308 goto out_failed;
3309
3310 mrioc->pel_abort_cmd.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3311 if (!mrioc->pel_abort_cmd.reply)
3312 goto out_failed;
3313
3314 mrioc->dev_handle_bitmap_bits = mrioc->facts.max_devhandle;
3315 mrioc->removepend_bitmap = bitmap_zalloc(mrioc->dev_handle_bitmap_bits,
3316 GFP_KERNEL);
3317 if (!mrioc->removepend_bitmap)
3318 goto out_failed;
3319
3320 mrioc->devrem_bitmap = bitmap_zalloc(MPI3MR_NUM_DEVRMCMD, GFP_KERNEL);
3321 if (!mrioc->devrem_bitmap)
3322 goto out_failed;
3323
3324 mrioc->evtack_cmds_bitmap = bitmap_zalloc(MPI3MR_NUM_EVTACKCMD,
3325 GFP_KERNEL);
3326 if (!mrioc->evtack_cmds_bitmap)
3327 goto out_failed;
3328
3329 mrioc->num_reply_bufs = mrioc->facts.max_reqs + MPI3MR_NUM_EVT_REPLIES;
3330 mrioc->reply_free_qsz = mrioc->num_reply_bufs + 1;
3331 mrioc->num_sense_bufs = mrioc->facts.max_reqs / MPI3MR_SENSEBUF_FACTOR;
3332 mrioc->sense_buf_q_sz = mrioc->num_sense_bufs + 1;
3333
3334 /* reply buffer pool, 16 byte align */
3335 sz = mrioc->num_reply_bufs * mrioc->reply_sz;
3336 mrioc->reply_buf_pool = dma_pool_create("reply_buf pool",
3337 &mrioc->pdev->dev, sz, 16, 0);
3338 if (!mrioc->reply_buf_pool) {
3339 ioc_err(mrioc, "reply buf pool: dma_pool_create failed\n");
3340 goto out_failed;
3341 }
3342
3343 mrioc->reply_buf = dma_pool_zalloc(mrioc->reply_buf_pool, GFP_KERNEL,
3344 &mrioc->reply_buf_dma);
3345 if (!mrioc->reply_buf)
3346 goto out_failed;
3347
3348 mrioc->reply_buf_dma_max_address = mrioc->reply_buf_dma + sz;
3349
3350 /* reply free queue, 8 byte align */
3351 sz = mrioc->reply_free_qsz * 8;
3352 mrioc->reply_free_q_pool = dma_pool_create("reply_free_q pool",
3353 &mrioc->pdev->dev, sz, 8, 0);
3354 if (!mrioc->reply_free_q_pool) {
3355 ioc_err(mrioc, "reply_free_q pool: dma_pool_create failed\n");
3356 goto out_failed;
3357 }
3358 mrioc->reply_free_q = dma_pool_zalloc(mrioc->reply_free_q_pool,
3359 GFP_KERNEL, &mrioc->reply_free_q_dma);
3360 if (!mrioc->reply_free_q)
3361 goto out_failed;
3362
3363 /* sense buffer pool, 4 byte align */
3364 sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ;
3365 mrioc->sense_buf_pool = dma_pool_create("sense_buf pool",
3366 &mrioc->pdev->dev, sz, 4, 0);
3367 if (!mrioc->sense_buf_pool) {
3368 ioc_err(mrioc, "sense_buf pool: dma_pool_create failed\n");
3369 goto out_failed;
3370 }
3371 mrioc->sense_buf = dma_pool_zalloc(mrioc->sense_buf_pool, GFP_KERNEL,
3372 &mrioc->sense_buf_dma);
3373 if (!mrioc->sense_buf)
3374 goto out_failed;
3375
3376 /* sense buffer queue, 8 byte align */
3377 sz = mrioc->sense_buf_q_sz * 8;
3378 mrioc->sense_buf_q_pool = dma_pool_create("sense_buf_q pool",
3379 &mrioc->pdev->dev, sz, 8, 0);
3380 if (!mrioc->sense_buf_q_pool) {
3381 ioc_err(mrioc, "sense_buf_q pool: dma_pool_create failed\n");
3382 goto out_failed;
3383 }
3384 mrioc->sense_buf_q = dma_pool_zalloc(mrioc->sense_buf_q_pool,
3385 GFP_KERNEL, &mrioc->sense_buf_q_dma);
3386 if (!mrioc->sense_buf_q)
3387 goto out_failed;
3388
3389 return retval;
3390
3391 out_failed:
3392 retval = -1;
3393 return retval;
3394 }
3395
3396 /**
3397 * mpimr_initialize_reply_sbuf_queues - initialize reply sense
3398 * buffers
3399 * @mrioc: Adapter instance reference
3400 *
3401 * Helper function to initialize reply and sense buffers along
3402 * with some debug prints.
3403 *
3404 * Return: None.
3405 */
mpimr_initialize_reply_sbuf_queues(struct mpi3mr_ioc * mrioc)3406 static void mpimr_initialize_reply_sbuf_queues(struct mpi3mr_ioc *mrioc)
3407 {
3408 u32 sz, i;
3409 dma_addr_t phy_addr;
3410
3411 sz = mrioc->num_reply_bufs * mrioc->reply_sz;
3412 ioc_info(mrioc,
3413 "reply buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n",
3414 mrioc->reply_buf, mrioc->num_reply_bufs, mrioc->reply_sz,
3415 (sz / 1024), (unsigned long long)mrioc->reply_buf_dma);
3416 sz = mrioc->reply_free_qsz * 8;
3417 ioc_info(mrioc,
3418 "reply_free_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n",
3419 mrioc->reply_free_q, mrioc->reply_free_qsz, 8, (sz / 1024),
3420 (unsigned long long)mrioc->reply_free_q_dma);
3421 sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ;
3422 ioc_info(mrioc,
3423 "sense_buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n",
3424 mrioc->sense_buf, mrioc->num_sense_bufs, MPI3MR_SENSE_BUF_SZ,
3425 (sz / 1024), (unsigned long long)mrioc->sense_buf_dma);
3426 sz = mrioc->sense_buf_q_sz * 8;
3427 ioc_info(mrioc,
3428 "sense_buf_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n",
3429 mrioc->sense_buf_q, mrioc->sense_buf_q_sz, 8, (sz / 1024),
3430 (unsigned long long)mrioc->sense_buf_q_dma);
3431
3432 /* initialize Reply buffer Queue */
3433 for (i = 0, phy_addr = mrioc->reply_buf_dma;
3434 i < mrioc->num_reply_bufs; i++, phy_addr += mrioc->reply_sz)
3435 mrioc->reply_free_q[i] = cpu_to_le64(phy_addr);
3436 mrioc->reply_free_q[i] = cpu_to_le64(0);
3437
3438 /* initialize Sense Buffer Queue */
3439 for (i = 0, phy_addr = mrioc->sense_buf_dma;
3440 i < mrioc->num_sense_bufs; i++, phy_addr += MPI3MR_SENSE_BUF_SZ)
3441 mrioc->sense_buf_q[i] = cpu_to_le64(phy_addr);
3442 mrioc->sense_buf_q[i] = cpu_to_le64(0);
3443 }
3444
3445 /**
3446 * mpi3mr_issue_iocinit - Send IOC Init
3447 * @mrioc: Adapter instance reference
3448 *
3449 * Issue IOC Init MPI request through admin queue and wait for
3450 * the completion of it or time out.
3451 *
3452 * Return: 0 on success, non-zero on failures.
3453 */
mpi3mr_issue_iocinit(struct mpi3mr_ioc * mrioc)3454 static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc)
3455 {
3456 struct mpi3_ioc_init_request iocinit_req;
3457 struct mpi3_driver_info_layout *drv_info;
3458 dma_addr_t data_dma;
3459 u32 data_len = sizeof(*drv_info);
3460 int retval = 0;
3461 ktime_t current_time;
3462
3463 drv_info = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
3464 GFP_KERNEL);
3465 if (!drv_info) {
3466 retval = -1;
3467 goto out;
3468 }
3469 mpimr_initialize_reply_sbuf_queues(mrioc);
3470
3471 drv_info->information_length = cpu_to_le32(data_len);
3472 strscpy(drv_info->driver_signature, "Broadcom", sizeof(drv_info->driver_signature));
3473 strscpy(drv_info->os_name, utsname()->sysname, sizeof(drv_info->os_name));
3474 strscpy(drv_info->os_version, utsname()->release, sizeof(drv_info->os_version));
3475 strscpy(drv_info->driver_name, MPI3MR_DRIVER_NAME, sizeof(drv_info->driver_name));
3476 strscpy(drv_info->driver_version, MPI3MR_DRIVER_VERSION, sizeof(drv_info->driver_version));
3477 strscpy(drv_info->driver_release_date, MPI3MR_DRIVER_RELDATE,
3478 sizeof(drv_info->driver_release_date));
3479 drv_info->driver_capabilities = 0;
3480 memcpy((u8 *)&mrioc->driver_info, (u8 *)drv_info,
3481 sizeof(mrioc->driver_info));
3482
3483 memset(&iocinit_req, 0, sizeof(iocinit_req));
3484 mutex_lock(&mrioc->init_cmds.mutex);
3485 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3486 retval = -1;
3487 ioc_err(mrioc, "Issue IOCInit: Init command is in use\n");
3488 mutex_unlock(&mrioc->init_cmds.mutex);
3489 goto out;
3490 }
3491 mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3492 mrioc->init_cmds.is_waiting = 1;
3493 mrioc->init_cmds.callback = NULL;
3494 iocinit_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3495 iocinit_req.function = MPI3_FUNCTION_IOC_INIT;
3496 iocinit_req.mpi_version.mpi3_version.dev = MPI3_VERSION_DEV;
3497 iocinit_req.mpi_version.mpi3_version.unit = MPI3_VERSION_UNIT;
3498 iocinit_req.mpi_version.mpi3_version.major = MPI3_VERSION_MAJOR;
3499 iocinit_req.mpi_version.mpi3_version.minor = MPI3_VERSION_MINOR;
3500 iocinit_req.who_init = MPI3_WHOINIT_HOST_DRIVER;
3501 iocinit_req.reply_free_queue_depth = cpu_to_le16(mrioc->reply_free_qsz);
3502 iocinit_req.reply_free_queue_address =
3503 cpu_to_le64(mrioc->reply_free_q_dma);
3504 iocinit_req.sense_buffer_length = cpu_to_le16(MPI3MR_SENSE_BUF_SZ);
3505 iocinit_req.sense_buffer_free_queue_depth =
3506 cpu_to_le16(mrioc->sense_buf_q_sz);
3507 iocinit_req.sense_buffer_free_queue_address =
3508 cpu_to_le64(mrioc->sense_buf_q_dma);
3509 iocinit_req.driver_information_address = cpu_to_le64(data_dma);
3510
3511 current_time = ktime_get_real();
3512 iocinit_req.time_stamp = cpu_to_le64(ktime_to_ms(current_time));
3513
3514 iocinit_req.msg_flags |=
3515 MPI3_IOCINIT_MSGFLAGS_SCSIIOSTATUSREPLY_SUPPORTED;
3516 iocinit_req.msg_flags |=
3517 MPI3_IOCINIT_MSGFLAGS_WRITESAMEDIVERT_SUPPORTED;
3518
3519 init_completion(&mrioc->init_cmds.done);
3520 retval = mpi3mr_admin_request_post(mrioc, &iocinit_req,
3521 sizeof(iocinit_req), 1);
3522 if (retval) {
3523 ioc_err(mrioc, "Issue IOCInit: Admin Post failed\n");
3524 goto out_unlock;
3525 }
3526 wait_for_completion_timeout(&mrioc->init_cmds.done,
3527 (MPI3MR_INTADMCMD_TIMEOUT * HZ));
3528 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3529 mpi3mr_check_rh_fault_ioc(mrioc,
3530 MPI3MR_RESET_FROM_IOCINIT_TIMEOUT);
3531 ioc_err(mrioc, "ioc_init timed out\n");
3532 retval = -1;
3533 goto out_unlock;
3534 }
3535 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
3536 != MPI3_IOCSTATUS_SUCCESS) {
3537 ioc_err(mrioc,
3538 "Issue IOCInit: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
3539 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3540 mrioc->init_cmds.ioc_loginfo);
3541 retval = -1;
3542 goto out_unlock;
3543 }
3544
3545 mrioc->reply_free_queue_host_index = mrioc->num_reply_bufs;
3546 writel(mrioc->reply_free_queue_host_index,
3547 &mrioc->sysif_regs->reply_free_host_index);
3548
3549 mrioc->sbq_host_index = mrioc->num_sense_bufs;
3550 writel(mrioc->sbq_host_index,
3551 &mrioc->sysif_regs->sense_buffer_free_host_index);
3552 out_unlock:
3553 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3554 mutex_unlock(&mrioc->init_cmds.mutex);
3555
3556 out:
3557 if (drv_info)
3558 dma_free_coherent(&mrioc->pdev->dev, data_len, drv_info,
3559 data_dma);
3560
3561 return retval;
3562 }
3563
3564 /**
3565 * mpi3mr_unmask_events - Unmask events in event mask bitmap
3566 * @mrioc: Adapter instance reference
3567 * @event: MPI event ID
3568 *
3569 * Un mask the specific event by resetting the event_mask
3570 * bitmap.
3571 *
3572 * Return: 0 on success, non-zero on failures.
3573 */
mpi3mr_unmask_events(struct mpi3mr_ioc * mrioc,u16 event)3574 static void mpi3mr_unmask_events(struct mpi3mr_ioc *mrioc, u16 event)
3575 {
3576 u32 desired_event;
3577 u8 word;
3578
3579 if (event >= 128)
3580 return;
3581
3582 desired_event = (1 << (event % 32));
3583 word = event / 32;
3584
3585 mrioc->event_masks[word] &= ~desired_event;
3586 }
3587
3588 /**
3589 * mpi3mr_issue_event_notification - Send event notification
3590 * @mrioc: Adapter instance reference
3591 *
3592 * Issue event notification MPI request through admin queue and
3593 * wait for the completion of it or time out.
3594 *
3595 * Return: 0 on success, non-zero on failures.
3596 */
mpi3mr_issue_event_notification(struct mpi3mr_ioc * mrioc)3597 static int mpi3mr_issue_event_notification(struct mpi3mr_ioc *mrioc)
3598 {
3599 struct mpi3_event_notification_request evtnotify_req;
3600 int retval = 0;
3601 u8 i;
3602
3603 memset(&evtnotify_req, 0, sizeof(evtnotify_req));
3604 mutex_lock(&mrioc->init_cmds.mutex);
3605 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3606 retval = -1;
3607 ioc_err(mrioc, "Issue EvtNotify: Init command is in use\n");
3608 mutex_unlock(&mrioc->init_cmds.mutex);
3609 goto out;
3610 }
3611 mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3612 mrioc->init_cmds.is_waiting = 1;
3613 mrioc->init_cmds.callback = NULL;
3614 evtnotify_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3615 evtnotify_req.function = MPI3_FUNCTION_EVENT_NOTIFICATION;
3616 for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
3617 evtnotify_req.event_masks[i] =
3618 cpu_to_le32(mrioc->event_masks[i]);
3619 init_completion(&mrioc->init_cmds.done);
3620 retval = mpi3mr_admin_request_post(mrioc, &evtnotify_req,
3621 sizeof(evtnotify_req), 1);
3622 if (retval) {
3623 ioc_err(mrioc, "Issue EvtNotify: Admin Post failed\n");
3624 goto out_unlock;
3625 }
3626 wait_for_completion_timeout(&mrioc->init_cmds.done,
3627 (MPI3MR_INTADMCMD_TIMEOUT * HZ));
3628 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3629 ioc_err(mrioc, "event notification timed out\n");
3630 mpi3mr_check_rh_fault_ioc(mrioc,
3631 MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT);
3632 retval = -1;
3633 goto out_unlock;
3634 }
3635 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
3636 != MPI3_IOCSTATUS_SUCCESS) {
3637 ioc_err(mrioc,
3638 "Issue EvtNotify: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
3639 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3640 mrioc->init_cmds.ioc_loginfo);
3641 retval = -1;
3642 goto out_unlock;
3643 }
3644
3645 out_unlock:
3646 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3647 mutex_unlock(&mrioc->init_cmds.mutex);
3648 out:
3649 return retval;
3650 }
3651
3652 /**
3653 * mpi3mr_process_event_ack - Process event acknowledgment
3654 * @mrioc: Adapter instance reference
3655 * @event: MPI3 event ID
3656 * @event_ctx: event context
3657 *
3658 * Send event acknowledgment through admin queue and wait for
3659 * it to complete.
3660 *
3661 * Return: 0 on success, non-zero on failures.
3662 */
mpi3mr_process_event_ack(struct mpi3mr_ioc * mrioc,u8 event,u32 event_ctx)3663 int mpi3mr_process_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
3664 u32 event_ctx)
3665 {
3666 struct mpi3_event_ack_request evtack_req;
3667 int retval = 0;
3668
3669 memset(&evtack_req, 0, sizeof(evtack_req));
3670 mutex_lock(&mrioc->init_cmds.mutex);
3671 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3672 retval = -1;
3673 ioc_err(mrioc, "Send EvtAck: Init command is in use\n");
3674 mutex_unlock(&mrioc->init_cmds.mutex);
3675 goto out;
3676 }
3677 mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3678 mrioc->init_cmds.is_waiting = 1;
3679 mrioc->init_cmds.callback = NULL;
3680 evtack_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3681 evtack_req.function = MPI3_FUNCTION_EVENT_ACK;
3682 evtack_req.event = event;
3683 evtack_req.event_context = cpu_to_le32(event_ctx);
3684
3685 init_completion(&mrioc->init_cmds.done);
3686 retval = mpi3mr_admin_request_post(mrioc, &evtack_req,
3687 sizeof(evtack_req), 1);
3688 if (retval) {
3689 ioc_err(mrioc, "Send EvtAck: Admin Post failed\n");
3690 goto out_unlock;
3691 }
3692 wait_for_completion_timeout(&mrioc->init_cmds.done,
3693 (MPI3MR_INTADMCMD_TIMEOUT * HZ));
3694 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3695 ioc_err(mrioc, "Issue EvtNotify: command timed out\n");
3696 if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET))
3697 mpi3mr_check_rh_fault_ioc(mrioc,
3698 MPI3MR_RESET_FROM_EVTACK_TIMEOUT);
3699 retval = -1;
3700 goto out_unlock;
3701 }
3702 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
3703 != MPI3_IOCSTATUS_SUCCESS) {
3704 ioc_err(mrioc,
3705 "Send EvtAck: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
3706 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3707 mrioc->init_cmds.ioc_loginfo);
3708 retval = -1;
3709 goto out_unlock;
3710 }
3711
3712 out_unlock:
3713 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3714 mutex_unlock(&mrioc->init_cmds.mutex);
3715 out:
3716 return retval;
3717 }
3718
3719 /**
3720 * mpi3mr_alloc_chain_bufs - Allocate chain buffers
3721 * @mrioc: Adapter instance reference
3722 *
3723 * Allocate chain buffers and set a bitmap to indicate free
3724 * chain buffers. Chain buffers are used to pass the SGE
3725 * information along with MPI3 SCSI IO requests for host I/O.
3726 *
3727 * Return: 0 on success, non-zero on failure
3728 */
mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc * mrioc)3729 static int mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc *mrioc)
3730 {
3731 int retval = 0;
3732 u32 sz, i;
3733 u16 num_chains;
3734
3735 if (mrioc->chain_sgl_list)
3736 return retval;
3737
3738 num_chains = mrioc->max_host_ios / MPI3MR_CHAINBUF_FACTOR;
3739
3740 if (prot_mask & (SHOST_DIX_TYPE0_PROTECTION
3741 | SHOST_DIX_TYPE1_PROTECTION
3742 | SHOST_DIX_TYPE2_PROTECTION
3743 | SHOST_DIX_TYPE3_PROTECTION))
3744 num_chains += (num_chains / MPI3MR_CHAINBUFDIX_FACTOR);
3745
3746 mrioc->chain_buf_count = num_chains;
3747 sz = sizeof(struct chain_element) * num_chains;
3748 mrioc->chain_sgl_list = kzalloc(sz, GFP_KERNEL);
3749 if (!mrioc->chain_sgl_list)
3750 goto out_failed;
3751
3752 if (mrioc->max_sgl_entries > (mrioc->facts.max_data_length /
3753 MPI3MR_PAGE_SIZE_4K))
3754 mrioc->max_sgl_entries = mrioc->facts.max_data_length /
3755 MPI3MR_PAGE_SIZE_4K;
3756 sz = mrioc->max_sgl_entries * sizeof(struct mpi3_sge_common);
3757 ioc_info(mrioc, "number of sgl entries=%d chain buffer size=%dKB\n",
3758 mrioc->max_sgl_entries, sz/1024);
3759
3760 mrioc->chain_buf_pool = dma_pool_create("chain_buf pool",
3761 &mrioc->pdev->dev, sz, 16, 0);
3762 if (!mrioc->chain_buf_pool) {
3763 ioc_err(mrioc, "chain buf pool: dma_pool_create failed\n");
3764 goto out_failed;
3765 }
3766
3767 for (i = 0; i < num_chains; i++) {
3768 mrioc->chain_sgl_list[i].addr =
3769 dma_pool_zalloc(mrioc->chain_buf_pool, GFP_KERNEL,
3770 &mrioc->chain_sgl_list[i].dma_addr);
3771
3772 if (!mrioc->chain_sgl_list[i].addr)
3773 goto out_failed;
3774 }
3775 mrioc->chain_bitmap = bitmap_zalloc(num_chains, GFP_KERNEL);
3776 if (!mrioc->chain_bitmap)
3777 goto out_failed;
3778 return retval;
3779 out_failed:
3780 retval = -1;
3781 return retval;
3782 }
3783
3784 /**
3785 * mpi3mr_port_enable_complete - Mark port enable complete
3786 * @mrioc: Adapter instance reference
3787 * @drv_cmd: Internal command tracker
3788 *
3789 * Call back for asynchronous port enable request sets the
3790 * driver command to indicate port enable request is complete.
3791 *
3792 * Return: Nothing
3793 */
mpi3mr_port_enable_complete(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * drv_cmd)3794 static void mpi3mr_port_enable_complete(struct mpi3mr_ioc *mrioc,
3795 struct mpi3mr_drv_cmd *drv_cmd)
3796 {
3797 drv_cmd->callback = NULL;
3798 mrioc->scan_started = 0;
3799 if (drv_cmd->state & MPI3MR_CMD_RESET)
3800 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
3801 else
3802 mrioc->scan_failed = drv_cmd->ioc_status;
3803 drv_cmd->state = MPI3MR_CMD_NOTUSED;
3804 }
3805
3806 /**
3807 * mpi3mr_issue_port_enable - Issue Port Enable
3808 * @mrioc: Adapter instance reference
3809 * @async: Flag to wait for completion or not
3810 *
3811 * Issue Port Enable MPI request through admin queue and if the
3812 * async flag is not set wait for the completion of the port
3813 * enable or time out.
3814 *
3815 * Return: 0 on success, non-zero on failures.
3816 */
mpi3mr_issue_port_enable(struct mpi3mr_ioc * mrioc,u8 async)3817 int mpi3mr_issue_port_enable(struct mpi3mr_ioc *mrioc, u8 async)
3818 {
3819 struct mpi3_port_enable_request pe_req;
3820 int retval = 0;
3821 u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT;
3822
3823 memset(&pe_req, 0, sizeof(pe_req));
3824 mutex_lock(&mrioc->init_cmds.mutex);
3825 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3826 retval = -1;
3827 ioc_err(mrioc, "Issue PortEnable: Init command is in use\n");
3828 mutex_unlock(&mrioc->init_cmds.mutex);
3829 goto out;
3830 }
3831 mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3832 if (async) {
3833 mrioc->init_cmds.is_waiting = 0;
3834 mrioc->init_cmds.callback = mpi3mr_port_enable_complete;
3835 } else {
3836 mrioc->init_cmds.is_waiting = 1;
3837 mrioc->init_cmds.callback = NULL;
3838 init_completion(&mrioc->init_cmds.done);
3839 }
3840 pe_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3841 pe_req.function = MPI3_FUNCTION_PORT_ENABLE;
3842
3843 retval = mpi3mr_admin_request_post(mrioc, &pe_req, sizeof(pe_req), 1);
3844 if (retval) {
3845 ioc_err(mrioc, "Issue PortEnable: Admin Post failed\n");
3846 goto out_unlock;
3847 }
3848 if (async) {
3849 mutex_unlock(&mrioc->init_cmds.mutex);
3850 goto out;
3851 }
3852
3853 wait_for_completion_timeout(&mrioc->init_cmds.done, (pe_timeout * HZ));
3854 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3855 ioc_err(mrioc, "port enable timed out\n");
3856 retval = -1;
3857 mpi3mr_check_rh_fault_ioc(mrioc, MPI3MR_RESET_FROM_PE_TIMEOUT);
3858 goto out_unlock;
3859 }
3860 mpi3mr_port_enable_complete(mrioc, &mrioc->init_cmds);
3861
3862 out_unlock:
3863 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3864 mutex_unlock(&mrioc->init_cmds.mutex);
3865 out:
3866 return retval;
3867 }
3868
3869 /* Protocol type to name mapper structure */
3870 static const struct {
3871 u8 protocol;
3872 char *name;
3873 } mpi3mr_protocols[] = {
3874 { MPI3_IOCFACTS_PROTOCOL_SCSI_INITIATOR, "Initiator" },
3875 { MPI3_IOCFACTS_PROTOCOL_SCSI_TARGET, "Target" },
3876 { MPI3_IOCFACTS_PROTOCOL_NVME, "NVMe attachment" },
3877 };
3878
3879 /* Capability to name mapper structure*/
3880 static const struct {
3881 u32 capability;
3882 char *name;
3883 } mpi3mr_capabilities[] = {
3884 { MPI3_IOCFACTS_CAPABILITY_RAID_SUPPORTED, "RAID" },
3885 { MPI3_IOCFACTS_CAPABILITY_MULTIPATH_SUPPORTED, "MultiPath" },
3886 };
3887
3888 /**
3889 * mpi3mr_repost_diag_bufs - repost host diag buffers
3890 * @mrioc: Adapter instance reference
3891 *
3892 * repost firmware and trace diag buffers based on global
3893 * trigger flag from driver page 2
3894 *
3895 * Return: 0 on success, non-zero on failures.
3896 */
mpi3mr_repost_diag_bufs(struct mpi3mr_ioc * mrioc)3897 static int mpi3mr_repost_diag_bufs(struct mpi3mr_ioc *mrioc)
3898 {
3899 u64 global_trigger;
3900 union mpi3mr_trigger_data prev_trigger_data;
3901 struct diag_buffer_desc *trace_hdb = NULL;
3902 struct diag_buffer_desc *fw_hdb = NULL;
3903 int retval = 0;
3904 bool trace_repost_needed = false;
3905 bool fw_repost_needed = false;
3906 u8 prev_trigger_type;
3907
3908 retval = mpi3mr_refresh_trigger(mrioc, MPI3_CONFIG_ACTION_READ_CURRENT);
3909 if (retval)
3910 return -1;
3911
3912 trace_hdb = mpi3mr_diag_buffer_for_type(mrioc,
3913 MPI3_DIAG_BUFFER_TYPE_TRACE);
3914
3915 if (trace_hdb &&
3916 trace_hdb->status != MPI3MR_HDB_BUFSTATUS_NOT_ALLOCATED &&
3917 trace_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_GLOBAL &&
3918 trace_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_ELEMENT)
3919 trace_repost_needed = true;
3920
3921 fw_hdb = mpi3mr_diag_buffer_for_type(mrioc, MPI3_DIAG_BUFFER_TYPE_FW);
3922
3923 if (fw_hdb && fw_hdb->status != MPI3MR_HDB_BUFSTATUS_NOT_ALLOCATED &&
3924 fw_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_GLOBAL &&
3925 fw_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_ELEMENT)
3926 fw_repost_needed = true;
3927
3928 if (trace_repost_needed || fw_repost_needed) {
3929 global_trigger = le64_to_cpu(mrioc->driver_pg2->global_trigger);
3930 if (global_trigger &
3931 MPI3_DRIVER2_GLOBALTRIGGER_POST_DIAG_TRACE_DISABLED)
3932 trace_repost_needed = false;
3933 if (global_trigger &
3934 MPI3_DRIVER2_GLOBALTRIGGER_POST_DIAG_FW_DISABLED)
3935 fw_repost_needed = false;
3936 }
3937
3938 if (trace_repost_needed) {
3939 prev_trigger_type = trace_hdb->trigger_type;
3940 memcpy(&prev_trigger_data, &trace_hdb->trigger_data,
3941 sizeof(trace_hdb->trigger_data));
3942 retval = mpi3mr_issue_diag_buf_post(mrioc, trace_hdb);
3943 if (!retval) {
3944 dprint_init(mrioc, "trace diag buffer reposted");
3945 mpi3mr_set_trigger_data_in_hdb(trace_hdb,
3946 MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN, NULL, 1);
3947 } else {
3948 trace_hdb->trigger_type = prev_trigger_type;
3949 memcpy(&trace_hdb->trigger_data, &prev_trigger_data,
3950 sizeof(prev_trigger_data));
3951 ioc_err(mrioc, "trace diag buffer repost failed");
3952 return -1;
3953 }
3954 }
3955
3956 if (fw_repost_needed) {
3957 prev_trigger_type = fw_hdb->trigger_type;
3958 memcpy(&prev_trigger_data, &fw_hdb->trigger_data,
3959 sizeof(fw_hdb->trigger_data));
3960 retval = mpi3mr_issue_diag_buf_post(mrioc, fw_hdb);
3961 if (!retval) {
3962 dprint_init(mrioc, "firmware diag buffer reposted");
3963 mpi3mr_set_trigger_data_in_hdb(fw_hdb,
3964 MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN, NULL, 1);
3965 } else {
3966 fw_hdb->trigger_type = prev_trigger_type;
3967 memcpy(&fw_hdb->trigger_data, &prev_trigger_data,
3968 sizeof(prev_trigger_data));
3969 ioc_err(mrioc, "firmware diag buffer repost failed");
3970 return -1;
3971 }
3972 }
3973 return retval;
3974 }
3975
3976 /**
3977 * mpi3mr_read_tsu_interval - Update time stamp interval
3978 * @mrioc: Adapter instance reference
3979 *
3980 * Update time stamp interval if its defined in driver page 1,
3981 * otherwise use default value.
3982 *
3983 * Return: Nothing
3984 */
3985 static void
mpi3mr_read_tsu_interval(struct mpi3mr_ioc * mrioc)3986 mpi3mr_read_tsu_interval(struct mpi3mr_ioc *mrioc)
3987 {
3988 struct mpi3_driver_page1 driver_pg1;
3989 u16 pg_sz = sizeof(driver_pg1);
3990 int retval = 0;
3991
3992 mrioc->ts_update_interval = MPI3MR_TSUPDATE_INTERVAL;
3993
3994 retval = mpi3mr_cfg_get_driver_pg1(mrioc, &driver_pg1, pg_sz);
3995 if (!retval && driver_pg1.time_stamp_update)
3996 mrioc->ts_update_interval = (driver_pg1.time_stamp_update * 60);
3997 }
3998
3999 /**
4000 * mpi3mr_print_ioc_info - Display controller information
4001 * @mrioc: Adapter instance reference
4002 *
4003 * Display controller personality, capability, supported
4004 * protocols etc.
4005 *
4006 * Return: Nothing
4007 */
4008 static void
mpi3mr_print_ioc_info(struct mpi3mr_ioc * mrioc)4009 mpi3mr_print_ioc_info(struct mpi3mr_ioc *mrioc)
4010 {
4011 int i = 0, bytes_written = 0;
4012 const char *personality;
4013 char protocol[50] = {0};
4014 char capabilities[100] = {0};
4015 struct mpi3mr_compimg_ver *fwver = &mrioc->facts.fw_ver;
4016
4017 switch (mrioc->facts.personality) {
4018 case MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA:
4019 personality = "Enhanced HBA";
4020 break;
4021 case MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR:
4022 personality = "RAID";
4023 break;
4024 default:
4025 personality = "Unknown";
4026 break;
4027 }
4028
4029 ioc_info(mrioc, "Running in %s Personality", personality);
4030
4031 ioc_info(mrioc, "FW version(%d.%d.%d.%d.%d.%d)\n",
4032 fwver->gen_major, fwver->gen_minor, fwver->ph_major,
4033 fwver->ph_minor, fwver->cust_id, fwver->build_num);
4034
4035 for (i = 0; i < ARRAY_SIZE(mpi3mr_protocols); i++) {
4036 if (mrioc->facts.protocol_flags &
4037 mpi3mr_protocols[i].protocol) {
4038 bytes_written += scnprintf(protocol + bytes_written,
4039 sizeof(protocol) - bytes_written, "%s%s",
4040 bytes_written ? "," : "",
4041 mpi3mr_protocols[i].name);
4042 }
4043 }
4044
4045 bytes_written = 0;
4046 for (i = 0; i < ARRAY_SIZE(mpi3mr_capabilities); i++) {
4047 if (mrioc->facts.protocol_flags &
4048 mpi3mr_capabilities[i].capability) {
4049 bytes_written += scnprintf(capabilities + bytes_written,
4050 sizeof(capabilities) - bytes_written, "%s%s",
4051 bytes_written ? "," : "",
4052 mpi3mr_capabilities[i].name);
4053 }
4054 }
4055
4056 ioc_info(mrioc, "Protocol=(%s), Capabilities=(%s)\n",
4057 protocol, capabilities);
4058 }
4059
4060 /**
4061 * mpi3mr_cleanup_resources - Free PCI resources
4062 * @mrioc: Adapter instance reference
4063 *
4064 * Unmap PCI device memory and disable PCI device.
4065 *
4066 * Return: 0 on success and non-zero on failure.
4067 */
mpi3mr_cleanup_resources(struct mpi3mr_ioc * mrioc)4068 void mpi3mr_cleanup_resources(struct mpi3mr_ioc *mrioc)
4069 {
4070 struct pci_dev *pdev = mrioc->pdev;
4071
4072 mpi3mr_cleanup_isr(mrioc);
4073
4074 if (mrioc->sysif_regs) {
4075 iounmap((void __iomem *)mrioc->sysif_regs);
4076 mrioc->sysif_regs = NULL;
4077 }
4078
4079 if (pci_is_enabled(pdev)) {
4080 if (mrioc->bars)
4081 pci_release_selected_regions(pdev, mrioc->bars);
4082 pci_disable_device(pdev);
4083 }
4084 }
4085
4086 /**
4087 * mpi3mr_setup_resources - Enable PCI resources
4088 * @mrioc: Adapter instance reference
4089 *
4090 * Enable PCI device memory, MSI-x registers and set DMA mask.
4091 *
4092 * Return: 0 on success and non-zero on failure.
4093 */
mpi3mr_setup_resources(struct mpi3mr_ioc * mrioc)4094 int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc)
4095 {
4096 struct pci_dev *pdev = mrioc->pdev;
4097 u32 memap_sz = 0;
4098 int i, retval = 0, capb = 0;
4099 u16 message_control;
4100 u64 dma_mask = mrioc->dma_mask ? mrioc->dma_mask :
4101 ((sizeof(dma_addr_t) > 4) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32));
4102
4103 if (pci_enable_device_mem(pdev)) {
4104 ioc_err(mrioc, "pci_enable_device_mem: failed\n");
4105 retval = -ENODEV;
4106 goto out_failed;
4107 }
4108
4109 capb = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
4110 if (!capb) {
4111 ioc_err(mrioc, "Unable to find MSI-X Capabilities\n");
4112 retval = -ENODEV;
4113 goto out_failed;
4114 }
4115 mrioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
4116
4117 if (pci_request_selected_regions(pdev, mrioc->bars,
4118 mrioc->driver_name)) {
4119 ioc_err(mrioc, "pci_request_selected_regions: failed\n");
4120 retval = -ENODEV;
4121 goto out_failed;
4122 }
4123
4124 for (i = 0; (i < DEVICE_COUNT_RESOURCE); i++) {
4125 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
4126 mrioc->sysif_regs_phys = pci_resource_start(pdev, i);
4127 memap_sz = pci_resource_len(pdev, i);
4128 mrioc->sysif_regs =
4129 ioremap(mrioc->sysif_regs_phys, memap_sz);
4130 break;
4131 }
4132 }
4133
4134 pci_set_master(pdev);
4135
4136 retval = dma_set_mask_and_coherent(&pdev->dev, dma_mask);
4137 if (retval) {
4138 if (dma_mask != DMA_BIT_MASK(32)) {
4139 ioc_warn(mrioc, "Setting 64 bit DMA mask failed\n");
4140 dma_mask = DMA_BIT_MASK(32);
4141 retval = dma_set_mask_and_coherent(&pdev->dev,
4142 dma_mask);
4143 }
4144 if (retval) {
4145 mrioc->dma_mask = 0;
4146 ioc_err(mrioc, "Setting 32 bit DMA mask also failed\n");
4147 goto out_failed;
4148 }
4149 }
4150 mrioc->dma_mask = dma_mask;
4151
4152 if (!mrioc->sysif_regs) {
4153 ioc_err(mrioc,
4154 "Unable to map adapter memory or resource not found\n");
4155 retval = -EINVAL;
4156 goto out_failed;
4157 }
4158
4159 pci_read_config_word(pdev, capb + 2, &message_control);
4160 mrioc->msix_count = (message_control & 0x3FF) + 1;
4161
4162 pci_save_state(pdev);
4163
4164 pci_set_drvdata(pdev, mrioc->shost);
4165
4166 mpi3mr_ioc_disable_intr(mrioc);
4167
4168 ioc_info(mrioc, "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
4169 (unsigned long long)mrioc->sysif_regs_phys,
4170 mrioc->sysif_regs, memap_sz);
4171 ioc_info(mrioc, "Number of MSI-X vectors found in capabilities: (%d)\n",
4172 mrioc->msix_count);
4173
4174 if (!reset_devices && poll_queues > 0)
4175 mrioc->requested_poll_qcount = min_t(int, poll_queues,
4176 mrioc->msix_count - 2);
4177 return retval;
4178
4179 out_failed:
4180 mpi3mr_cleanup_resources(mrioc);
4181 return retval;
4182 }
4183
4184 /**
4185 * mpi3mr_enable_events - Enable required events
4186 * @mrioc: Adapter instance reference
4187 *
4188 * This routine unmasks the events required by the driver by
4189 * sennding appropriate event mask bitmapt through an event
4190 * notification request.
4191 *
4192 * Return: 0 on success and non-zero on failure.
4193 */
mpi3mr_enable_events(struct mpi3mr_ioc * mrioc)4194 static int mpi3mr_enable_events(struct mpi3mr_ioc *mrioc)
4195 {
4196 int retval = 0;
4197 u32 i;
4198
4199 for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
4200 mrioc->event_masks[i] = -1;
4201
4202 mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_ADDED);
4203 mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_INFO_CHANGED);
4204 mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_STATUS_CHANGE);
4205 mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE);
4206 mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_ADDED);
4207 mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
4208 mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DISCOVERY);
4209 mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
4210 mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_BROADCAST_PRIMITIVE);
4211 mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
4212 mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_ENUMERATION);
4213 mpi3mr_unmask_events(mrioc, MPI3_EVENT_PREPARE_FOR_RESET);
4214 mpi3mr_unmask_events(mrioc, MPI3_EVENT_CABLE_MGMT);
4215 mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENERGY_PACK_CHANGE);
4216 mpi3mr_unmask_events(mrioc, MPI3_EVENT_DIAGNOSTIC_BUFFER_STATUS_CHANGE);
4217
4218 retval = mpi3mr_issue_event_notification(mrioc);
4219 if (retval)
4220 ioc_err(mrioc, "failed to issue event notification %d\n",
4221 retval);
4222 return retval;
4223 }
4224
4225 /**
4226 * mpi3mr_init_ioc - Initialize the controller
4227 * @mrioc: Adapter instance reference
4228 *
4229 * This the controller initialization routine, executed either
4230 * after soft reset or from pci probe callback.
4231 * Setup the required resources, memory map the controller
4232 * registers, create admin and operational reply queue pairs,
4233 * allocate required memory for reply pool, sense buffer pool,
4234 * issue IOC init request to the firmware, unmask the events and
4235 * issue port enable to discover SAS/SATA/NVMe devies and RAID
4236 * volumes.
4237 *
4238 * Return: 0 on success and non-zero on failure.
4239 */
mpi3mr_init_ioc(struct mpi3mr_ioc * mrioc)4240 int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc)
4241 {
4242 int retval = 0;
4243 u8 retry = 0;
4244 struct mpi3_ioc_facts_data facts_data;
4245 u32 sz;
4246
4247 retry_init:
4248 retval = mpi3mr_bring_ioc_ready(mrioc);
4249 if (retval) {
4250 ioc_err(mrioc, "Failed to bring ioc ready: error %d\n",
4251 retval);
4252 goto out_failed_noretry;
4253 }
4254
4255 retval = mpi3mr_setup_isr(mrioc, 1);
4256 if (retval) {
4257 ioc_err(mrioc, "Failed to setup ISR error %d\n",
4258 retval);
4259 goto out_failed_noretry;
4260 }
4261
4262 retval = mpi3mr_issue_iocfacts(mrioc, &facts_data);
4263 if (retval) {
4264 ioc_err(mrioc, "Failed to Issue IOC Facts %d\n",
4265 retval);
4266 goto out_failed;
4267 }
4268
4269 mrioc->max_host_ios = mrioc->facts.max_reqs - MPI3MR_INTERNAL_CMDS_RESVD;
4270 mrioc->shost->max_sectors = mrioc->facts.max_data_length / 512;
4271 mrioc->num_io_throttle_group = mrioc->facts.max_io_throttle_group;
4272 atomic_set(&mrioc->pend_large_data_sz, 0);
4273
4274 if (reset_devices)
4275 mrioc->max_host_ios = min_t(int, mrioc->max_host_ios,
4276 MPI3MR_HOST_IOS_KDUMP);
4277
4278 if (!(mrioc->facts.ioc_capabilities &
4279 MPI3_IOCFACTS_CAPABILITY_MULTIPATH_SUPPORTED)) {
4280 mrioc->sas_transport_enabled = 1;
4281 mrioc->scsi_device_channel = 1;
4282 mrioc->shost->max_channel = 1;
4283 mrioc->shost->transportt = mpi3mr_transport_template;
4284 }
4285
4286 if (mrioc->facts.max_req_limit)
4287 mrioc->prevent_reply_qfull = true;
4288
4289 if (mrioc->facts.ioc_capabilities &
4290 MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_TRACE_SUPPORTED)
4291 mrioc->seg_tb_support = true;
4292
4293 mrioc->reply_sz = mrioc->facts.reply_sz;
4294
4295 retval = mpi3mr_check_reset_dma_mask(mrioc);
4296 if (retval) {
4297 ioc_err(mrioc, "Resetting dma mask failed %d\n",
4298 retval);
4299 goto out_failed_noretry;
4300 }
4301
4302 mpi3mr_read_tsu_interval(mrioc);
4303 mpi3mr_print_ioc_info(mrioc);
4304
4305 dprint_init(mrioc, "allocating host diag buffers\n");
4306 mpi3mr_alloc_diag_bufs(mrioc);
4307
4308 dprint_init(mrioc, "allocating ioctl dma buffers\n");
4309 mpi3mr_alloc_ioctl_dma_memory(mrioc);
4310
4311 dprint_init(mrioc, "posting host diag buffers\n");
4312 retval = mpi3mr_post_diag_bufs(mrioc);
4313
4314 if (retval)
4315 ioc_warn(mrioc, "failed to post host diag buffers\n");
4316
4317 if (!mrioc->init_cmds.reply) {
4318 retval = mpi3mr_alloc_reply_sense_bufs(mrioc);
4319 if (retval) {
4320 ioc_err(mrioc,
4321 "%s :Failed to allocated reply sense buffers %d\n",
4322 __func__, retval);
4323 goto out_failed_noretry;
4324 }
4325 }
4326
4327 if (!mrioc->chain_sgl_list) {
4328 retval = mpi3mr_alloc_chain_bufs(mrioc);
4329 if (retval) {
4330 ioc_err(mrioc, "Failed to allocated chain buffers %d\n",
4331 retval);
4332 goto out_failed_noretry;
4333 }
4334 }
4335
4336 retval = mpi3mr_issue_iocinit(mrioc);
4337 if (retval) {
4338 ioc_err(mrioc, "Failed to Issue IOC Init %d\n",
4339 retval);
4340 goto out_failed;
4341 }
4342
4343 retval = mpi3mr_print_pkg_ver(mrioc);
4344 if (retval) {
4345 ioc_err(mrioc, "failed to get package version\n");
4346 goto out_failed;
4347 }
4348
4349 retval = mpi3mr_setup_isr(mrioc, 0);
4350 if (retval) {
4351 ioc_err(mrioc, "Failed to re-setup ISR, error %d\n",
4352 retval);
4353 goto out_failed_noretry;
4354 }
4355
4356 retval = mpi3mr_create_op_queues(mrioc);
4357 if (retval) {
4358 ioc_err(mrioc, "Failed to create OpQueues error %d\n",
4359 retval);
4360 goto out_failed;
4361 }
4362
4363 if (!mrioc->pel_seqnum_virt) {
4364 dprint_init(mrioc, "allocating memory for pel_seqnum_virt\n");
4365 mrioc->pel_seqnum_sz = sizeof(struct mpi3_pel_seq);
4366 mrioc->pel_seqnum_virt = dma_alloc_coherent(&mrioc->pdev->dev,
4367 mrioc->pel_seqnum_sz, &mrioc->pel_seqnum_dma,
4368 GFP_KERNEL);
4369 if (!mrioc->pel_seqnum_virt) {
4370 retval = -ENOMEM;
4371 goto out_failed_noretry;
4372 }
4373 }
4374
4375 if (!mrioc->throttle_groups && mrioc->num_io_throttle_group) {
4376 dprint_init(mrioc, "allocating memory for throttle groups\n");
4377 sz = sizeof(struct mpi3mr_throttle_group_info);
4378 mrioc->throttle_groups = kcalloc(mrioc->num_io_throttle_group, sz, GFP_KERNEL);
4379 if (!mrioc->throttle_groups) {
4380 retval = -1;
4381 goto out_failed_noretry;
4382 }
4383 }
4384
4385 retval = mpi3mr_enable_events(mrioc);
4386 if (retval) {
4387 ioc_err(mrioc, "failed to enable events %d\n",
4388 retval);
4389 goto out_failed;
4390 }
4391
4392 retval = mpi3mr_refresh_trigger(mrioc, MPI3_CONFIG_ACTION_READ_CURRENT);
4393 if (retval) {
4394 ioc_err(mrioc, "failed to refresh triggers\n");
4395 goto out_failed;
4396 }
4397
4398 ioc_info(mrioc, "controller initialization completed successfully\n");
4399 return retval;
4400 out_failed:
4401 if (retry < 2) {
4402 retry++;
4403 ioc_warn(mrioc, "retrying controller initialization, retry_count:%d\n",
4404 retry);
4405 mpi3mr_memset_buffers(mrioc);
4406 goto retry_init;
4407 }
4408 retval = -1;
4409 out_failed_noretry:
4410 ioc_err(mrioc, "controller initialization failed\n");
4411 mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
4412 MPI3MR_RESET_FROM_CTLR_CLEANUP);
4413 mrioc->unrecoverable = 1;
4414 return retval;
4415 }
4416
4417 /**
4418 * mpi3mr_reinit_ioc - Re-Initialize the controller
4419 * @mrioc: Adapter instance reference
4420 * @is_resume: Called from resume or reset path
4421 *
4422 * This the controller re-initialization routine, executed from
4423 * the soft reset handler or resume callback. Creates
4424 * operational reply queue pairs, allocate required memory for
4425 * reply pool, sense buffer pool, issue IOC init request to the
4426 * firmware, unmask the events and issue port enable to discover
4427 * SAS/SATA/NVMe devices and RAID volumes.
4428 *
4429 * Return: 0 on success and non-zero on failure.
4430 */
mpi3mr_reinit_ioc(struct mpi3mr_ioc * mrioc,u8 is_resume)4431 int mpi3mr_reinit_ioc(struct mpi3mr_ioc *mrioc, u8 is_resume)
4432 {
4433 int retval = 0;
4434 u8 retry = 0;
4435 struct mpi3_ioc_facts_data facts_data;
4436 u32 pe_timeout, ioc_status;
4437
4438 retry_init:
4439 pe_timeout =
4440 (MPI3MR_PORTENABLE_TIMEOUT / MPI3MR_PORTENABLE_POLL_INTERVAL);
4441
4442 dprint_reset(mrioc, "bringing up the controller to ready state\n");
4443 retval = mpi3mr_bring_ioc_ready(mrioc);
4444 if (retval) {
4445 ioc_err(mrioc, "failed to bring to ready state\n");
4446 goto out_failed_noretry;
4447 }
4448
4449 mrioc->io_admin_reset_sync = 0;
4450 if (is_resume || mrioc->block_on_pci_err) {
4451 dprint_reset(mrioc, "setting up single ISR\n");
4452 retval = mpi3mr_setup_isr(mrioc, 1);
4453 if (retval) {
4454 ioc_err(mrioc, "failed to setup ISR\n");
4455 goto out_failed_noretry;
4456 }
4457 } else
4458 mpi3mr_ioc_enable_intr(mrioc);
4459
4460 dprint_reset(mrioc, "getting ioc_facts\n");
4461 retval = mpi3mr_issue_iocfacts(mrioc, &facts_data);
4462 if (retval) {
4463 ioc_err(mrioc, "failed to get ioc_facts\n");
4464 goto out_failed;
4465 }
4466
4467 dprint_reset(mrioc, "validating ioc_facts\n");
4468 retval = mpi3mr_revalidate_factsdata(mrioc);
4469 if (retval) {
4470 ioc_err(mrioc, "failed to revalidate ioc_facts data\n");
4471 goto out_failed_noretry;
4472 }
4473
4474 mpi3mr_read_tsu_interval(mrioc);
4475 mpi3mr_print_ioc_info(mrioc);
4476
4477 if (is_resume) {
4478 dprint_reset(mrioc, "posting host diag buffers\n");
4479 retval = mpi3mr_post_diag_bufs(mrioc);
4480 if (retval)
4481 ioc_warn(mrioc, "failed to post host diag buffers\n");
4482 } else {
4483 retval = mpi3mr_repost_diag_bufs(mrioc);
4484 if (retval)
4485 ioc_warn(mrioc, "failed to re post host diag buffers\n");
4486 }
4487
4488 dprint_reset(mrioc, "sending ioc_init\n");
4489 retval = mpi3mr_issue_iocinit(mrioc);
4490 if (retval) {
4491 ioc_err(mrioc, "failed to send ioc_init\n");
4492 goto out_failed;
4493 }
4494
4495 dprint_reset(mrioc, "getting package version\n");
4496 retval = mpi3mr_print_pkg_ver(mrioc);
4497 if (retval) {
4498 ioc_err(mrioc, "failed to get package version\n");
4499 goto out_failed;
4500 }
4501
4502 if (is_resume || mrioc->block_on_pci_err) {
4503 dprint_reset(mrioc, "setting up multiple ISR\n");
4504 retval = mpi3mr_setup_isr(mrioc, 0);
4505 if (retval) {
4506 ioc_err(mrioc, "failed to re-setup ISR\n");
4507 goto out_failed_noretry;
4508 }
4509 }
4510
4511 dprint_reset(mrioc, "creating operational queue pairs\n");
4512 retval = mpi3mr_create_op_queues(mrioc);
4513 if (retval) {
4514 ioc_err(mrioc, "failed to create operational queue pairs\n");
4515 goto out_failed;
4516 }
4517
4518 if (!mrioc->pel_seqnum_virt) {
4519 dprint_reset(mrioc, "allocating memory for pel_seqnum_virt\n");
4520 mrioc->pel_seqnum_sz = sizeof(struct mpi3_pel_seq);
4521 mrioc->pel_seqnum_virt = dma_alloc_coherent(&mrioc->pdev->dev,
4522 mrioc->pel_seqnum_sz, &mrioc->pel_seqnum_dma,
4523 GFP_KERNEL);
4524 if (!mrioc->pel_seqnum_virt) {
4525 retval = -ENOMEM;
4526 goto out_failed_noretry;
4527 }
4528 }
4529
4530 if (mrioc->shost->nr_hw_queues > mrioc->num_op_reply_q) {
4531 ioc_err(mrioc,
4532 "cannot create minimum number of operational queues expected:%d created:%d\n",
4533 mrioc->shost->nr_hw_queues, mrioc->num_op_reply_q);
4534 retval = -1;
4535 goto out_failed_noretry;
4536 }
4537
4538 dprint_reset(mrioc, "enabling events\n");
4539 retval = mpi3mr_enable_events(mrioc);
4540 if (retval) {
4541 ioc_err(mrioc, "failed to enable events\n");
4542 goto out_failed;
4543 }
4544
4545 mrioc->device_refresh_on = 1;
4546 mpi3mr_add_event_wait_for_device_refresh(mrioc);
4547
4548 ioc_info(mrioc, "sending port enable\n");
4549 retval = mpi3mr_issue_port_enable(mrioc, 1);
4550 if (retval) {
4551 ioc_err(mrioc, "failed to issue port enable\n");
4552 goto out_failed;
4553 }
4554 do {
4555 ssleep(MPI3MR_PORTENABLE_POLL_INTERVAL);
4556 if (mrioc->init_cmds.state == MPI3MR_CMD_NOTUSED)
4557 break;
4558 if (!pci_device_is_present(mrioc->pdev))
4559 mrioc->unrecoverable = 1;
4560 if (mrioc->unrecoverable) {
4561 retval = -1;
4562 goto out_failed_noretry;
4563 }
4564 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
4565 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
4566 (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
4567 mpi3mr_print_fault_info(mrioc);
4568 mrioc->init_cmds.is_waiting = 0;
4569 mrioc->init_cmds.callback = NULL;
4570 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
4571 goto out_failed;
4572 }
4573 } while (--pe_timeout);
4574
4575 if (!pe_timeout) {
4576 ioc_err(mrioc, "port enable timed out\n");
4577 mpi3mr_check_rh_fault_ioc(mrioc,
4578 MPI3MR_RESET_FROM_PE_TIMEOUT);
4579 mrioc->init_cmds.is_waiting = 0;
4580 mrioc->init_cmds.callback = NULL;
4581 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
4582 goto out_failed;
4583 } else if (mrioc->scan_failed) {
4584 ioc_err(mrioc,
4585 "port enable failed with status=0x%04x\n",
4586 mrioc->scan_failed);
4587 } else
4588 ioc_info(mrioc, "port enable completed successfully\n");
4589
4590 ioc_info(mrioc, "controller %s completed successfully\n",
4591 (is_resume)?"resume":"re-initialization");
4592 return retval;
4593 out_failed:
4594 if (retry < 2) {
4595 retry++;
4596 ioc_warn(mrioc, "retrying controller %s, retry_count:%d\n",
4597 (is_resume)?"resume":"re-initialization", retry);
4598 mpi3mr_memset_buffers(mrioc);
4599 goto retry_init;
4600 }
4601 retval = -1;
4602 out_failed_noretry:
4603 ioc_err(mrioc, "controller %s is failed\n",
4604 (is_resume)?"resume":"re-initialization");
4605 mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
4606 MPI3MR_RESET_FROM_CTLR_CLEANUP);
4607 mrioc->unrecoverable = 1;
4608 return retval;
4609 }
4610
4611 /**
4612 * mpi3mr_memset_op_reply_q_buffers - memset the operational reply queue's
4613 * segments
4614 * @mrioc: Adapter instance reference
4615 * @qidx: Operational reply queue index
4616 *
4617 * Return: Nothing.
4618 */
mpi3mr_memset_op_reply_q_buffers(struct mpi3mr_ioc * mrioc,u16 qidx)4619 static void mpi3mr_memset_op_reply_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx)
4620 {
4621 struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
4622 struct segments *segments;
4623 int i, size;
4624
4625 if (!op_reply_q->q_segments)
4626 return;
4627
4628 size = op_reply_q->segment_qd * mrioc->op_reply_desc_sz;
4629 segments = op_reply_q->q_segments;
4630 for (i = 0; i < op_reply_q->num_segments; i++)
4631 memset(segments[i].segment, 0, size);
4632 }
4633
4634 /**
4635 * mpi3mr_memset_op_req_q_buffers - memset the operational request queue's
4636 * segments
4637 * @mrioc: Adapter instance reference
4638 * @qidx: Operational request queue index
4639 *
4640 * Return: Nothing.
4641 */
mpi3mr_memset_op_req_q_buffers(struct mpi3mr_ioc * mrioc,u16 qidx)4642 static void mpi3mr_memset_op_req_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx)
4643 {
4644 struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx;
4645 struct segments *segments;
4646 int i, size;
4647
4648 if (!op_req_q->q_segments)
4649 return;
4650
4651 size = op_req_q->segment_qd * mrioc->facts.op_req_sz;
4652 segments = op_req_q->q_segments;
4653 for (i = 0; i < op_req_q->num_segments; i++)
4654 memset(segments[i].segment, 0, size);
4655 }
4656
4657 /**
4658 * mpi3mr_memset_buffers - memset memory for a controller
4659 * @mrioc: Adapter instance reference
4660 *
4661 * clear all the memory allocated for a controller, typically
4662 * called post reset to reuse the memory allocated during the
4663 * controller init.
4664 *
4665 * Return: Nothing.
4666 */
mpi3mr_memset_buffers(struct mpi3mr_ioc * mrioc)4667 void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
4668 {
4669 u16 i;
4670 struct mpi3mr_throttle_group_info *tg;
4671
4672 mrioc->change_count = 0;
4673 mrioc->active_poll_qcount = 0;
4674 mrioc->default_qcount = 0;
4675 if (mrioc->admin_req_base)
4676 memset(mrioc->admin_req_base, 0, mrioc->admin_req_q_sz);
4677 if (mrioc->admin_reply_base)
4678 memset(mrioc->admin_reply_base, 0, mrioc->admin_reply_q_sz);
4679 atomic_set(&mrioc->admin_reply_q_in_use, 0);
4680 atomic_set(&mrioc->admin_pend_isr, 0);
4681
4682 if (mrioc->init_cmds.reply) {
4683 memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply));
4684 memset(mrioc->bsg_cmds.reply, 0,
4685 sizeof(*mrioc->bsg_cmds.reply));
4686 memset(mrioc->host_tm_cmds.reply, 0,
4687 sizeof(*mrioc->host_tm_cmds.reply));
4688 memset(mrioc->pel_cmds.reply, 0,
4689 sizeof(*mrioc->pel_cmds.reply));
4690 memset(mrioc->pel_abort_cmd.reply, 0,
4691 sizeof(*mrioc->pel_abort_cmd.reply));
4692 memset(mrioc->transport_cmds.reply, 0,
4693 sizeof(*mrioc->transport_cmds.reply));
4694 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
4695 memset(mrioc->dev_rmhs_cmds[i].reply, 0,
4696 sizeof(*mrioc->dev_rmhs_cmds[i].reply));
4697 for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++)
4698 memset(mrioc->evtack_cmds[i].reply, 0,
4699 sizeof(*mrioc->evtack_cmds[i].reply));
4700 bitmap_clear(mrioc->removepend_bitmap, 0,
4701 mrioc->dev_handle_bitmap_bits);
4702 bitmap_clear(mrioc->devrem_bitmap, 0, MPI3MR_NUM_DEVRMCMD);
4703 bitmap_clear(mrioc->evtack_cmds_bitmap, 0,
4704 MPI3MR_NUM_EVTACKCMD);
4705 }
4706
4707 for (i = 0; i < mrioc->num_queues; i++) {
4708 mrioc->op_reply_qinfo[i].qid = 0;
4709 mrioc->op_reply_qinfo[i].ci = 0;
4710 mrioc->op_reply_qinfo[i].num_replies = 0;
4711 mrioc->op_reply_qinfo[i].ephase = 0;
4712 atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0);
4713 atomic_set(&mrioc->op_reply_qinfo[i].in_use, 0);
4714 mpi3mr_memset_op_reply_q_buffers(mrioc, i);
4715
4716 mrioc->req_qinfo[i].ci = 0;
4717 mrioc->req_qinfo[i].pi = 0;
4718 mrioc->req_qinfo[i].num_requests = 0;
4719 mrioc->req_qinfo[i].qid = 0;
4720 mrioc->req_qinfo[i].reply_qid = 0;
4721 spin_lock_init(&mrioc->req_qinfo[i].q_lock);
4722 mpi3mr_memset_op_req_q_buffers(mrioc, i);
4723 }
4724
4725 atomic_set(&mrioc->pend_large_data_sz, 0);
4726 if (mrioc->throttle_groups) {
4727 tg = mrioc->throttle_groups;
4728 for (i = 0; i < mrioc->num_io_throttle_group; i++, tg++) {
4729 tg->id = 0;
4730 tg->fw_qd = 0;
4731 tg->modified_qd = 0;
4732 tg->io_divert = 0;
4733 tg->need_qd_reduction = 0;
4734 tg->high = 0;
4735 tg->low = 0;
4736 tg->qd_reduction = 0;
4737 atomic_set(&tg->pend_large_data_sz, 0);
4738 }
4739 }
4740 }
4741
4742 /**
4743 * mpi3mr_free_mem - Free memory allocated for a controller
4744 * @mrioc: Adapter instance reference
4745 *
4746 * Free all the memory allocated for a controller.
4747 *
4748 * Return: Nothing.
4749 */
mpi3mr_free_mem(struct mpi3mr_ioc * mrioc)4750 void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
4751 {
4752 u16 i, j;
4753 struct mpi3mr_intr_info *intr_info;
4754 struct diag_buffer_desc *diag_buffer;
4755
4756 mpi3mr_free_enclosure_list(mrioc);
4757 mpi3mr_free_ioctl_dma_memory(mrioc);
4758
4759 if (mrioc->sense_buf_pool) {
4760 if (mrioc->sense_buf)
4761 dma_pool_free(mrioc->sense_buf_pool, mrioc->sense_buf,
4762 mrioc->sense_buf_dma);
4763 dma_pool_destroy(mrioc->sense_buf_pool);
4764 mrioc->sense_buf = NULL;
4765 mrioc->sense_buf_pool = NULL;
4766 }
4767 if (mrioc->sense_buf_q_pool) {
4768 if (mrioc->sense_buf_q)
4769 dma_pool_free(mrioc->sense_buf_q_pool,
4770 mrioc->sense_buf_q, mrioc->sense_buf_q_dma);
4771 dma_pool_destroy(mrioc->sense_buf_q_pool);
4772 mrioc->sense_buf_q = NULL;
4773 mrioc->sense_buf_q_pool = NULL;
4774 }
4775
4776 if (mrioc->reply_buf_pool) {
4777 if (mrioc->reply_buf)
4778 dma_pool_free(mrioc->reply_buf_pool, mrioc->reply_buf,
4779 mrioc->reply_buf_dma);
4780 dma_pool_destroy(mrioc->reply_buf_pool);
4781 mrioc->reply_buf = NULL;
4782 mrioc->reply_buf_pool = NULL;
4783 }
4784 if (mrioc->reply_free_q_pool) {
4785 if (mrioc->reply_free_q)
4786 dma_pool_free(mrioc->reply_free_q_pool,
4787 mrioc->reply_free_q, mrioc->reply_free_q_dma);
4788 dma_pool_destroy(mrioc->reply_free_q_pool);
4789 mrioc->reply_free_q = NULL;
4790 mrioc->reply_free_q_pool = NULL;
4791 }
4792
4793 for (i = 0; i < mrioc->num_op_req_q; i++)
4794 mpi3mr_free_op_req_q_segments(mrioc, i);
4795
4796 for (i = 0; i < mrioc->num_op_reply_q; i++)
4797 mpi3mr_free_op_reply_q_segments(mrioc, i);
4798
4799 for (i = 0; i < mrioc->intr_info_count; i++) {
4800 intr_info = mrioc->intr_info + i;
4801 intr_info->op_reply_q = NULL;
4802 }
4803
4804 kfree(mrioc->req_qinfo);
4805 mrioc->req_qinfo = NULL;
4806 mrioc->num_op_req_q = 0;
4807
4808 kfree(mrioc->op_reply_qinfo);
4809 mrioc->op_reply_qinfo = NULL;
4810 mrioc->num_op_reply_q = 0;
4811
4812 kfree(mrioc->init_cmds.reply);
4813 mrioc->init_cmds.reply = NULL;
4814
4815 kfree(mrioc->bsg_cmds.reply);
4816 mrioc->bsg_cmds.reply = NULL;
4817
4818 kfree(mrioc->host_tm_cmds.reply);
4819 mrioc->host_tm_cmds.reply = NULL;
4820
4821 kfree(mrioc->pel_cmds.reply);
4822 mrioc->pel_cmds.reply = NULL;
4823
4824 kfree(mrioc->pel_abort_cmd.reply);
4825 mrioc->pel_abort_cmd.reply = NULL;
4826
4827 for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
4828 kfree(mrioc->evtack_cmds[i].reply);
4829 mrioc->evtack_cmds[i].reply = NULL;
4830 }
4831
4832 bitmap_free(mrioc->removepend_bitmap);
4833 mrioc->removepend_bitmap = NULL;
4834
4835 bitmap_free(mrioc->devrem_bitmap);
4836 mrioc->devrem_bitmap = NULL;
4837
4838 bitmap_free(mrioc->evtack_cmds_bitmap);
4839 mrioc->evtack_cmds_bitmap = NULL;
4840
4841 bitmap_free(mrioc->chain_bitmap);
4842 mrioc->chain_bitmap = NULL;
4843
4844 kfree(mrioc->transport_cmds.reply);
4845 mrioc->transport_cmds.reply = NULL;
4846
4847 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
4848 kfree(mrioc->dev_rmhs_cmds[i].reply);
4849 mrioc->dev_rmhs_cmds[i].reply = NULL;
4850 }
4851
4852 if (mrioc->chain_buf_pool) {
4853 for (i = 0; i < mrioc->chain_buf_count; i++) {
4854 if (mrioc->chain_sgl_list[i].addr) {
4855 dma_pool_free(mrioc->chain_buf_pool,
4856 mrioc->chain_sgl_list[i].addr,
4857 mrioc->chain_sgl_list[i].dma_addr);
4858 mrioc->chain_sgl_list[i].addr = NULL;
4859 }
4860 }
4861 dma_pool_destroy(mrioc->chain_buf_pool);
4862 mrioc->chain_buf_pool = NULL;
4863 }
4864
4865 kfree(mrioc->chain_sgl_list);
4866 mrioc->chain_sgl_list = NULL;
4867
4868 if (mrioc->admin_reply_base) {
4869 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz,
4870 mrioc->admin_reply_base, mrioc->admin_reply_dma);
4871 mrioc->admin_reply_base = NULL;
4872 }
4873 if (mrioc->admin_req_base) {
4874 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz,
4875 mrioc->admin_req_base, mrioc->admin_req_dma);
4876 mrioc->admin_req_base = NULL;
4877 }
4878
4879 if (mrioc->pel_seqnum_virt) {
4880 dma_free_coherent(&mrioc->pdev->dev, mrioc->pel_seqnum_sz,
4881 mrioc->pel_seqnum_virt, mrioc->pel_seqnum_dma);
4882 mrioc->pel_seqnum_virt = NULL;
4883 }
4884
4885 for (i = 0; i < MPI3MR_MAX_NUM_HDB; i++) {
4886 diag_buffer = &mrioc->diag_buffers[i];
4887 if ((i == 0) && mrioc->seg_tb_support) {
4888 if (mrioc->trace_buf_pool) {
4889 for (j = 0; j < mrioc->num_tb_segs; j++) {
4890 if (mrioc->trace_buf[j].segment) {
4891 dma_pool_free(mrioc->trace_buf_pool,
4892 mrioc->trace_buf[j].segment,
4893 mrioc->trace_buf[j].segment_dma);
4894 mrioc->trace_buf[j].segment = NULL;
4895 }
4896
4897 mrioc->trace_buf[j].segment = NULL;
4898 }
4899 dma_pool_destroy(mrioc->trace_buf_pool);
4900 mrioc->trace_buf_pool = NULL;
4901 }
4902
4903 kfree(mrioc->trace_buf);
4904 mrioc->trace_buf = NULL;
4905 diag_buffer->size = sizeof(u64) * mrioc->num_tb_segs;
4906 }
4907 if (diag_buffer->addr) {
4908 dma_free_coherent(&mrioc->pdev->dev,
4909 diag_buffer->size, diag_buffer->addr,
4910 diag_buffer->dma_addr);
4911 diag_buffer->addr = NULL;
4912 diag_buffer->size = 0;
4913 diag_buffer->type = 0;
4914 diag_buffer->status = 0;
4915 }
4916 }
4917
4918 kfree(mrioc->throttle_groups);
4919 mrioc->throttle_groups = NULL;
4920
4921 kfree(mrioc->logdata_buf);
4922 mrioc->logdata_buf = NULL;
4923
4924 }
4925
4926 /**
4927 * mpi3mr_issue_ioc_shutdown - shutdown controller
4928 * @mrioc: Adapter instance reference
4929 *
4930 * Send shutodwn notification to the controller and wait for the
4931 * shutdown_timeout for it to be completed.
4932 *
4933 * Return: Nothing.
4934 */
mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc * mrioc)4935 static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc)
4936 {
4937 u32 ioc_config, ioc_status;
4938 u8 retval = 1;
4939 u32 timeout = MPI3MR_DEFAULT_SHUTDOWN_TIME * 10;
4940
4941 ioc_info(mrioc, "Issuing shutdown Notification\n");
4942 if (mrioc->unrecoverable) {
4943 ioc_warn(mrioc,
4944 "IOC is unrecoverable shutdown is not issued\n");
4945 return;
4946 }
4947 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
4948 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
4949 == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) {
4950 ioc_info(mrioc, "shutdown already in progress\n");
4951 return;
4952 }
4953
4954 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
4955 ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL;
4956 ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ;
4957
4958 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
4959
4960 if (mrioc->facts.shutdown_timeout)
4961 timeout = mrioc->facts.shutdown_timeout * 10;
4962
4963 do {
4964 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
4965 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
4966 == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_COMPLETE) {
4967 retval = 0;
4968 break;
4969 }
4970 msleep(100);
4971 } while (--timeout);
4972
4973 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
4974 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
4975
4976 if (retval) {
4977 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
4978 == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS)
4979 ioc_warn(mrioc,
4980 "shutdown still in progress after timeout\n");
4981 }
4982
4983 ioc_info(mrioc,
4984 "Base IOC Sts/Config after %s shutdown is (0x%08x)/(0x%08x)\n",
4985 (!retval) ? "successful" : "failed", ioc_status,
4986 ioc_config);
4987 }
4988
4989 /**
4990 * mpi3mr_cleanup_ioc - Cleanup controller
4991 * @mrioc: Adapter instance reference
4992 *
4993 * controller cleanup handler, Message unit reset or soft reset
4994 * and shutdown notification is issued to the controller.
4995 *
4996 * Return: Nothing.
4997 */
mpi3mr_cleanup_ioc(struct mpi3mr_ioc * mrioc)4998 void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc)
4999 {
5000 enum mpi3mr_iocstate ioc_state;
5001
5002 dprint_exit(mrioc, "cleaning up the controller\n");
5003 mpi3mr_ioc_disable_intr(mrioc);
5004
5005 ioc_state = mpi3mr_get_iocstate(mrioc);
5006
5007 if (!mrioc->unrecoverable && !mrioc->reset_in_progress &&
5008 !mrioc->pci_err_recovery &&
5009 (ioc_state == MRIOC_STATE_READY)) {
5010 if (mpi3mr_issue_and_process_mur(mrioc,
5011 MPI3MR_RESET_FROM_CTLR_CLEANUP))
5012 mpi3mr_issue_reset(mrioc,
5013 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
5014 MPI3MR_RESET_FROM_MUR_FAILURE);
5015 mpi3mr_issue_ioc_shutdown(mrioc);
5016 }
5017 dprint_exit(mrioc, "controller cleanup completed\n");
5018 }
5019
5020 /**
5021 * mpi3mr_drv_cmd_comp_reset - Flush a internal driver command
5022 * @mrioc: Adapter instance reference
5023 * @cmdptr: Internal command tracker
5024 *
5025 * Complete an internal driver commands with state indicating it
5026 * is completed due to reset.
5027 *
5028 * Return: Nothing.
5029 */
mpi3mr_drv_cmd_comp_reset(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * cmdptr)5030 static inline void mpi3mr_drv_cmd_comp_reset(struct mpi3mr_ioc *mrioc,
5031 struct mpi3mr_drv_cmd *cmdptr)
5032 {
5033 if (cmdptr->state & MPI3MR_CMD_PENDING) {
5034 cmdptr->state |= MPI3MR_CMD_RESET;
5035 cmdptr->state &= ~MPI3MR_CMD_PENDING;
5036 if (cmdptr->is_waiting) {
5037 complete(&cmdptr->done);
5038 cmdptr->is_waiting = 0;
5039 } else if (cmdptr->callback)
5040 cmdptr->callback(mrioc, cmdptr);
5041 }
5042 }
5043
5044 /**
5045 * mpi3mr_flush_drv_cmds - Flush internaldriver commands
5046 * @mrioc: Adapter instance reference
5047 *
5048 * Flush all internal driver commands post reset
5049 *
5050 * Return: Nothing.
5051 */
mpi3mr_flush_drv_cmds(struct mpi3mr_ioc * mrioc)5052 void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc)
5053 {
5054 struct mpi3mr_drv_cmd *cmdptr;
5055 u8 i;
5056
5057 cmdptr = &mrioc->init_cmds;
5058 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5059
5060 cmdptr = &mrioc->cfg_cmds;
5061 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5062
5063 cmdptr = &mrioc->bsg_cmds;
5064 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5065 cmdptr = &mrioc->host_tm_cmds;
5066 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5067
5068 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
5069 cmdptr = &mrioc->dev_rmhs_cmds[i];
5070 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5071 }
5072
5073 for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
5074 cmdptr = &mrioc->evtack_cmds[i];
5075 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5076 }
5077
5078 cmdptr = &mrioc->pel_cmds;
5079 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5080
5081 cmdptr = &mrioc->pel_abort_cmd;
5082 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5083
5084 cmdptr = &mrioc->transport_cmds;
5085 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5086 }
5087
5088 /**
5089 * mpi3mr_pel_wait_post - Issue PEL Wait
5090 * @mrioc: Adapter instance reference
5091 * @drv_cmd: Internal command tracker
5092 *
5093 * Issue PEL Wait MPI request through admin queue and return.
5094 *
5095 * Return: Nothing.
5096 */
mpi3mr_pel_wait_post(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * drv_cmd)5097 static void mpi3mr_pel_wait_post(struct mpi3mr_ioc *mrioc,
5098 struct mpi3mr_drv_cmd *drv_cmd)
5099 {
5100 struct mpi3_pel_req_action_wait pel_wait;
5101
5102 mrioc->pel_abort_requested = false;
5103
5104 memset(&pel_wait, 0, sizeof(pel_wait));
5105 drv_cmd->state = MPI3MR_CMD_PENDING;
5106 drv_cmd->is_waiting = 0;
5107 drv_cmd->callback = mpi3mr_pel_wait_complete;
5108 drv_cmd->ioc_status = 0;
5109 drv_cmd->ioc_loginfo = 0;
5110 pel_wait.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT);
5111 pel_wait.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG;
5112 pel_wait.action = MPI3_PEL_ACTION_WAIT;
5113 pel_wait.starting_sequence_number = cpu_to_le32(mrioc->pel_newest_seqnum);
5114 pel_wait.locale = cpu_to_le16(mrioc->pel_locale);
5115 pel_wait.class = cpu_to_le16(mrioc->pel_class);
5116 pel_wait.wait_time = MPI3_PEL_WAITTIME_INFINITE_WAIT;
5117 dprint_bsg_info(mrioc, "sending pel_wait seqnum(%d), class(%d), locale(0x%08x)\n",
5118 mrioc->pel_newest_seqnum, mrioc->pel_class, mrioc->pel_locale);
5119
5120 if (mpi3mr_admin_request_post(mrioc, &pel_wait, sizeof(pel_wait), 0)) {
5121 dprint_bsg_err(mrioc,
5122 "Issuing PELWait: Admin post failed\n");
5123 drv_cmd->state = MPI3MR_CMD_NOTUSED;
5124 drv_cmd->callback = NULL;
5125 drv_cmd->retry_count = 0;
5126 mrioc->pel_enabled = false;
5127 }
5128 }
5129
5130 /**
5131 * mpi3mr_pel_get_seqnum_post - Issue PEL Get Sequence number
5132 * @mrioc: Adapter instance reference
5133 * @drv_cmd: Internal command tracker
5134 *
5135 * Issue PEL get sequence number MPI request through admin queue
5136 * and return.
5137 *
5138 * Return: 0 on success, non-zero on failure.
5139 */
mpi3mr_pel_get_seqnum_post(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * drv_cmd)5140 int mpi3mr_pel_get_seqnum_post(struct mpi3mr_ioc *mrioc,
5141 struct mpi3mr_drv_cmd *drv_cmd)
5142 {
5143 struct mpi3_pel_req_action_get_sequence_numbers pel_getseq_req;
5144 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
5145 int retval = 0;
5146
5147 memset(&pel_getseq_req, 0, sizeof(pel_getseq_req));
5148 mrioc->pel_cmds.state = MPI3MR_CMD_PENDING;
5149 mrioc->pel_cmds.is_waiting = 0;
5150 mrioc->pel_cmds.ioc_status = 0;
5151 mrioc->pel_cmds.ioc_loginfo = 0;
5152 mrioc->pel_cmds.callback = mpi3mr_pel_get_seqnum_complete;
5153 pel_getseq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT);
5154 pel_getseq_req.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG;
5155 pel_getseq_req.action = MPI3_PEL_ACTION_GET_SEQNUM;
5156 mpi3mr_add_sg_single(&pel_getseq_req.sgl, sgl_flags,
5157 mrioc->pel_seqnum_sz, mrioc->pel_seqnum_dma);
5158
5159 retval = mpi3mr_admin_request_post(mrioc, &pel_getseq_req,
5160 sizeof(pel_getseq_req), 0);
5161 if (retval) {
5162 if (drv_cmd) {
5163 drv_cmd->state = MPI3MR_CMD_NOTUSED;
5164 drv_cmd->callback = NULL;
5165 drv_cmd->retry_count = 0;
5166 }
5167 mrioc->pel_enabled = false;
5168 }
5169
5170 return retval;
5171 }
5172
5173 /**
5174 * mpi3mr_pel_wait_complete - PELWait Completion callback
5175 * @mrioc: Adapter instance reference
5176 * @drv_cmd: Internal command tracker
5177 *
5178 * This is a callback handler for the PELWait request and
5179 * firmware completes a PELWait request when it is aborted or a
5180 * new PEL entry is available. This sends AEN to the application
5181 * and if the PELwait completion is not due to PELAbort then
5182 * this will send a request for new PEL Sequence number
5183 *
5184 * Return: Nothing.
5185 */
mpi3mr_pel_wait_complete(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * drv_cmd)5186 static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc,
5187 struct mpi3mr_drv_cmd *drv_cmd)
5188 {
5189 struct mpi3_pel_reply *pel_reply = NULL;
5190 u16 ioc_status, pe_log_status;
5191 bool do_retry = false;
5192
5193 if (drv_cmd->state & MPI3MR_CMD_RESET)
5194 goto cleanup_drv_cmd;
5195
5196 ioc_status = drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
5197 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5198 ioc_err(mrioc, "%s: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
5199 __func__, ioc_status, drv_cmd->ioc_loginfo);
5200 dprint_bsg_err(mrioc,
5201 "pel_wait: failed with ioc_status(0x%04x), log_info(0x%08x)\n",
5202 ioc_status, drv_cmd->ioc_loginfo);
5203 do_retry = true;
5204 }
5205
5206 if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
5207 pel_reply = (struct mpi3_pel_reply *)drv_cmd->reply;
5208
5209 if (!pel_reply) {
5210 dprint_bsg_err(mrioc,
5211 "pel_wait: failed due to no reply\n");
5212 goto out_failed;
5213 }
5214
5215 pe_log_status = le16_to_cpu(pel_reply->pe_log_status);
5216 if ((pe_log_status != MPI3_PEL_STATUS_SUCCESS) &&
5217 (pe_log_status != MPI3_PEL_STATUS_ABORTED)) {
5218 ioc_err(mrioc, "%s: Failed pe_log_status(0x%04x)\n",
5219 __func__, pe_log_status);
5220 dprint_bsg_err(mrioc,
5221 "pel_wait: failed due to pel_log_status(0x%04x)\n",
5222 pe_log_status);
5223 do_retry = true;
5224 }
5225
5226 if (do_retry) {
5227 if (drv_cmd->retry_count < MPI3MR_PEL_RETRY_COUNT) {
5228 drv_cmd->retry_count++;
5229 dprint_bsg_err(mrioc, "pel_wait: retrying(%d)\n",
5230 drv_cmd->retry_count);
5231 mpi3mr_pel_wait_post(mrioc, drv_cmd);
5232 return;
5233 }
5234 dprint_bsg_err(mrioc,
5235 "pel_wait: failed after all retries(%d)\n",
5236 drv_cmd->retry_count);
5237 goto out_failed;
5238 }
5239 atomic64_inc(&event_counter);
5240 if (!mrioc->pel_abort_requested) {
5241 mrioc->pel_cmds.retry_count = 0;
5242 mpi3mr_pel_get_seqnum_post(mrioc, &mrioc->pel_cmds);
5243 }
5244
5245 return;
5246 out_failed:
5247 mrioc->pel_enabled = false;
5248 cleanup_drv_cmd:
5249 drv_cmd->state = MPI3MR_CMD_NOTUSED;
5250 drv_cmd->callback = NULL;
5251 drv_cmd->retry_count = 0;
5252 }
5253
5254 /**
5255 * mpi3mr_pel_get_seqnum_complete - PELGetSeqNum Completion callback
5256 * @mrioc: Adapter instance reference
5257 * @drv_cmd: Internal command tracker
5258 *
5259 * This is a callback handler for the PEL get sequence number
5260 * request and a new PEL wait request will be issued to the
5261 * firmware from this
5262 *
5263 * Return: Nothing.
5264 */
mpi3mr_pel_get_seqnum_complete(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * drv_cmd)5265 void mpi3mr_pel_get_seqnum_complete(struct mpi3mr_ioc *mrioc,
5266 struct mpi3mr_drv_cmd *drv_cmd)
5267 {
5268 struct mpi3_pel_reply *pel_reply = NULL;
5269 struct mpi3_pel_seq *pel_seqnum_virt;
5270 u16 ioc_status;
5271 bool do_retry = false;
5272
5273 pel_seqnum_virt = (struct mpi3_pel_seq *)mrioc->pel_seqnum_virt;
5274
5275 if (drv_cmd->state & MPI3MR_CMD_RESET)
5276 goto cleanup_drv_cmd;
5277
5278 ioc_status = drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
5279 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5280 dprint_bsg_err(mrioc,
5281 "pel_get_seqnum: failed with ioc_status(0x%04x), log_info(0x%08x)\n",
5282 ioc_status, drv_cmd->ioc_loginfo);
5283 do_retry = true;
5284 }
5285
5286 if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
5287 pel_reply = (struct mpi3_pel_reply *)drv_cmd->reply;
5288 if (!pel_reply) {
5289 dprint_bsg_err(mrioc,
5290 "pel_get_seqnum: failed due to no reply\n");
5291 goto out_failed;
5292 }
5293
5294 if (le16_to_cpu(pel_reply->pe_log_status) != MPI3_PEL_STATUS_SUCCESS) {
5295 dprint_bsg_err(mrioc,
5296 "pel_get_seqnum: failed due to pel_log_status(0x%04x)\n",
5297 le16_to_cpu(pel_reply->pe_log_status));
5298 do_retry = true;
5299 }
5300
5301 if (do_retry) {
5302 if (drv_cmd->retry_count < MPI3MR_PEL_RETRY_COUNT) {
5303 drv_cmd->retry_count++;
5304 dprint_bsg_err(mrioc,
5305 "pel_get_seqnum: retrying(%d)\n",
5306 drv_cmd->retry_count);
5307 mpi3mr_pel_get_seqnum_post(mrioc, drv_cmd);
5308 return;
5309 }
5310
5311 dprint_bsg_err(mrioc,
5312 "pel_get_seqnum: failed after all retries(%d)\n",
5313 drv_cmd->retry_count);
5314 goto out_failed;
5315 }
5316 mrioc->pel_newest_seqnum = le32_to_cpu(pel_seqnum_virt->newest) + 1;
5317 drv_cmd->retry_count = 0;
5318 mpi3mr_pel_wait_post(mrioc, drv_cmd);
5319
5320 return;
5321 out_failed:
5322 mrioc->pel_enabled = false;
5323 cleanup_drv_cmd:
5324 drv_cmd->state = MPI3MR_CMD_NOTUSED;
5325 drv_cmd->callback = NULL;
5326 drv_cmd->retry_count = 0;
5327 }
5328
5329 /**
5330 * mpi3mr_check_op_admin_proc -
5331 * @mrioc: Adapter instance reference
5332 *
5333 * Check if any of the operation reply queues
5334 * or the admin reply queue are currently in use.
5335 * If any queue is in use, this function waits for
5336 * a maximum of 10 seconds for them to become available.
5337 *
5338 * Return: 0 on success, non-zero on failure.
5339 */
mpi3mr_check_op_admin_proc(struct mpi3mr_ioc * mrioc)5340 static int mpi3mr_check_op_admin_proc(struct mpi3mr_ioc *mrioc)
5341 {
5342
5343 u16 timeout = 10 * 10;
5344 u16 elapsed_time = 0;
5345 bool op_admin_in_use = false;
5346
5347 do {
5348 op_admin_in_use = false;
5349
5350 /* Check admin_reply queue first to exit early */
5351 if (atomic_read(&mrioc->admin_reply_q_in_use) == 1)
5352 op_admin_in_use = true;
5353 else {
5354 /* Check op_reply queues */
5355 int i;
5356
5357 for (i = 0; i < mrioc->num_queues; i++) {
5358 if (atomic_read(&mrioc->op_reply_qinfo[i].in_use) == 1) {
5359 op_admin_in_use = true;
5360 break;
5361 }
5362 }
5363 }
5364
5365 if (!op_admin_in_use)
5366 break;
5367
5368 msleep(100);
5369
5370 } while (++elapsed_time < timeout);
5371
5372 if (op_admin_in_use)
5373 return 1;
5374
5375 return 0;
5376 }
5377
5378 /**
5379 * mpi3mr_soft_reset_handler - Reset the controller
5380 * @mrioc: Adapter instance reference
5381 * @reset_reason: Reset reason code
5382 * @snapdump: Flag to generate snapdump in firmware or not
5383 *
5384 * This is an handler for recovering controller by issuing soft
5385 * reset are diag fault reset. This is a blocking function and
5386 * when one reset is executed if any other resets they will be
5387 * blocked. All BSG requests will be blocked during the reset. If
5388 * controller reset is successful then the controller will be
5389 * reinitalized, otherwise the controller will be marked as not
5390 * recoverable
5391 *
5392 * In snapdump bit is set, the controller is issued with diag
5393 * fault reset so that the firmware can create a snap dump and
5394 * post that the firmware will result in F000 fault and the
5395 * driver will issue soft reset to recover from that.
5396 *
5397 * Return: 0 on success, non-zero on failure.
5398 */
mpi3mr_soft_reset_handler(struct mpi3mr_ioc * mrioc,u16 reset_reason,u8 snapdump)5399 int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
5400 u16 reset_reason, u8 snapdump)
5401 {
5402 int retval = 0, i;
5403 unsigned long flags;
5404 u32 host_diagnostic, timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
5405 union mpi3mr_trigger_data trigger_data;
5406
5407 /* Block the reset handler until diag save in progress*/
5408 dprint_reset(mrioc,
5409 "soft_reset_handler: check and block on diagsave_timeout(%d)\n",
5410 mrioc->diagsave_timeout);
5411 while (mrioc->diagsave_timeout)
5412 ssleep(1);
5413 /*
5414 * Block new resets until the currently executing one is finished and
5415 * return the status of the existing reset for all blocked resets
5416 */
5417 dprint_reset(mrioc, "soft_reset_handler: acquiring reset_mutex\n");
5418 if (!mutex_trylock(&mrioc->reset_mutex)) {
5419 ioc_info(mrioc,
5420 "controller reset triggered by %s is blocked due to another reset in progress\n",
5421 mpi3mr_reset_rc_name(reset_reason));
5422 do {
5423 ssleep(1);
5424 } while (mrioc->reset_in_progress == 1);
5425 ioc_info(mrioc,
5426 "returning previous reset result(%d) for the reset triggered by %s\n",
5427 mrioc->prev_reset_result,
5428 mpi3mr_reset_rc_name(reset_reason));
5429 return mrioc->prev_reset_result;
5430 }
5431 ioc_info(mrioc, "controller reset is triggered by %s\n",
5432 mpi3mr_reset_rc_name(reset_reason));
5433
5434 mrioc->device_refresh_on = 0;
5435 scsi_block_requests(mrioc->shost);
5436 mrioc->reset_in_progress = 1;
5437 mrioc->stop_bsgs = 1;
5438 mrioc->prev_reset_result = -1;
5439 memset(&trigger_data, 0, sizeof(trigger_data));
5440
5441 if ((!snapdump) && (reset_reason != MPI3MR_RESET_FROM_FAULT_WATCH) &&
5442 (reset_reason != MPI3MR_RESET_FROM_FIRMWARE) &&
5443 (reset_reason != MPI3MR_RESET_FROM_CIACTIV_FAULT)) {
5444 mpi3mr_set_trigger_data_in_all_hdb(mrioc,
5445 MPI3MR_HDB_TRIGGER_TYPE_SOFT_RESET, NULL, 0);
5446 dprint_reset(mrioc,
5447 "soft_reset_handler: releasing host diagnostic buffers\n");
5448 mpi3mr_release_diag_bufs(mrioc, 0);
5449 for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
5450 mrioc->event_masks[i] = -1;
5451
5452 dprint_reset(mrioc, "soft_reset_handler: masking events\n");
5453 mpi3mr_issue_event_notification(mrioc);
5454 }
5455
5456 mpi3mr_wait_for_host_io(mrioc, MPI3MR_RESET_HOST_IOWAIT_TIMEOUT);
5457
5458 mpi3mr_ioc_disable_intr(mrioc);
5459 mrioc->io_admin_reset_sync = 1;
5460
5461 if (snapdump) {
5462 mpi3mr_set_diagsave(mrioc);
5463 retval = mpi3mr_issue_reset(mrioc,
5464 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
5465 if (!retval) {
5466 trigger_data.fault = (readl(&mrioc->sysif_regs->fault) &
5467 MPI3_SYSIF_FAULT_CODE_MASK);
5468 do {
5469 host_diagnostic =
5470 readl(&mrioc->sysif_regs->host_diagnostic);
5471 if (!(host_diagnostic &
5472 MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
5473 break;
5474 msleep(100);
5475 } while (--timeout);
5476 mpi3mr_set_trigger_data_in_all_hdb(mrioc,
5477 MPI3MR_HDB_TRIGGER_TYPE_FAULT, &trigger_data, 0);
5478 }
5479 }
5480
5481 retval = mpi3mr_issue_reset(mrioc,
5482 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, reset_reason);
5483 if (retval) {
5484 ioc_err(mrioc, "Failed to issue soft reset to the ioc\n");
5485 goto out;
5486 }
5487
5488 retval = mpi3mr_check_op_admin_proc(mrioc);
5489 if (retval) {
5490 ioc_err(mrioc, "Soft reset failed due to an Admin or I/O queue polling\n"
5491 "thread still processing replies even after a 10 second\n"
5492 "timeout. Marking the controller as unrecoverable!\n");
5493
5494 goto out;
5495 }
5496
5497 if (mrioc->num_io_throttle_group !=
5498 mrioc->facts.max_io_throttle_group) {
5499 ioc_err(mrioc,
5500 "max io throttle group doesn't match old(%d), new(%d)\n",
5501 mrioc->num_io_throttle_group,
5502 mrioc->facts.max_io_throttle_group);
5503 retval = -EPERM;
5504 goto out;
5505 }
5506
5507 mpi3mr_flush_delayed_cmd_lists(mrioc);
5508 mpi3mr_flush_drv_cmds(mrioc);
5509 bitmap_clear(mrioc->devrem_bitmap, 0, MPI3MR_NUM_DEVRMCMD);
5510 bitmap_clear(mrioc->removepend_bitmap, 0,
5511 mrioc->dev_handle_bitmap_bits);
5512 bitmap_clear(mrioc->evtack_cmds_bitmap, 0, MPI3MR_NUM_EVTACKCMD);
5513 mpi3mr_flush_host_io(mrioc);
5514 mpi3mr_cleanup_fwevt_list(mrioc);
5515 mpi3mr_invalidate_devhandles(mrioc);
5516 mpi3mr_free_enclosure_list(mrioc);
5517
5518 if (mrioc->prepare_for_reset) {
5519 mrioc->prepare_for_reset = 0;
5520 mrioc->prepare_for_reset_timeout_counter = 0;
5521 }
5522 mpi3mr_memset_buffers(mrioc);
5523 mpi3mr_release_diag_bufs(mrioc, 1);
5524 mrioc->fw_release_trigger_active = false;
5525 mrioc->trace_release_trigger_active = false;
5526 mrioc->snapdump_trigger_active = false;
5527 mpi3mr_set_trigger_data_in_all_hdb(mrioc,
5528 MPI3MR_HDB_TRIGGER_TYPE_SOFT_RESET, NULL, 0);
5529
5530 dprint_reset(mrioc,
5531 "soft_reset_handler: reinitializing the controller\n");
5532 retval = mpi3mr_reinit_ioc(mrioc, 0);
5533 if (retval) {
5534 pr_err(IOCNAME "reinit after soft reset failed: reason %d\n",
5535 mrioc->name, reset_reason);
5536 goto out;
5537 }
5538 ssleep(MPI3MR_RESET_TOPOLOGY_SETTLE_TIME);
5539
5540 out:
5541 if (!retval) {
5542 mrioc->diagsave_timeout = 0;
5543 mrioc->reset_in_progress = 0;
5544 scsi_unblock_requests(mrioc->shost);
5545 mrioc->pel_abort_requested = 0;
5546 if (mrioc->pel_enabled) {
5547 mrioc->pel_cmds.retry_count = 0;
5548 mpi3mr_pel_wait_post(mrioc, &mrioc->pel_cmds);
5549 }
5550
5551 mrioc->device_refresh_on = 0;
5552
5553 mrioc->ts_update_counter = 0;
5554 spin_lock_irqsave(&mrioc->watchdog_lock, flags);
5555 if (mrioc->watchdog_work_q)
5556 queue_delayed_work(mrioc->watchdog_work_q,
5557 &mrioc->watchdog_work,
5558 msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
5559 spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
5560 mrioc->stop_bsgs = 0;
5561 if (mrioc->pel_enabled)
5562 atomic64_inc(&event_counter);
5563 } else {
5564 mpi3mr_issue_reset(mrioc,
5565 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
5566 mrioc->device_refresh_on = 0;
5567 mrioc->unrecoverable = 1;
5568 mrioc->reset_in_progress = 0;
5569 scsi_unblock_requests(mrioc->shost);
5570 mrioc->stop_bsgs = 0;
5571 retval = -1;
5572 mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
5573 }
5574 mrioc->prev_reset_result = retval;
5575 mutex_unlock(&mrioc->reset_mutex);
5576 ioc_info(mrioc, "controller reset is %s\n",
5577 ((retval == 0) ? "successful" : "failed"));
5578 return retval;
5579 }
5580
5581 /**
5582 * mpi3mr_post_cfg_req - Issue config requests and wait
5583 * @mrioc: Adapter instance reference
5584 * @cfg_req: Configuration request
5585 * @timeout: Timeout in seconds
5586 * @ioc_status: Pointer to return ioc status
5587 *
5588 * A generic function for posting MPI3 configuration request to
5589 * the firmware. This blocks for the completion of request for
5590 * timeout seconds and if the request times out this function
5591 * faults the controller with proper reason code.
5592 *
5593 * On successful completion of the request this function returns
5594 * appropriate ioc status from the firmware back to the caller.
5595 *
5596 * Return: 0 on success, non-zero on failure.
5597 */
mpi3mr_post_cfg_req(struct mpi3mr_ioc * mrioc,struct mpi3_config_request * cfg_req,int timeout,u16 * ioc_status)5598 static int mpi3mr_post_cfg_req(struct mpi3mr_ioc *mrioc,
5599 struct mpi3_config_request *cfg_req, int timeout, u16 *ioc_status)
5600 {
5601 int retval = 0;
5602
5603 mutex_lock(&mrioc->cfg_cmds.mutex);
5604 if (mrioc->cfg_cmds.state & MPI3MR_CMD_PENDING) {
5605 retval = -1;
5606 ioc_err(mrioc, "sending config request failed due to command in use\n");
5607 mutex_unlock(&mrioc->cfg_cmds.mutex);
5608 goto out;
5609 }
5610 mrioc->cfg_cmds.state = MPI3MR_CMD_PENDING;
5611 mrioc->cfg_cmds.is_waiting = 1;
5612 mrioc->cfg_cmds.callback = NULL;
5613 mrioc->cfg_cmds.ioc_status = 0;
5614 mrioc->cfg_cmds.ioc_loginfo = 0;
5615
5616 cfg_req->host_tag = cpu_to_le16(MPI3MR_HOSTTAG_CFG_CMDS);
5617 cfg_req->function = MPI3_FUNCTION_CONFIG;
5618
5619 init_completion(&mrioc->cfg_cmds.done);
5620 dprint_cfg_info(mrioc, "posting config request\n");
5621 if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO)
5622 dprint_dump(cfg_req, sizeof(struct mpi3_config_request),
5623 "mpi3_cfg_req");
5624 retval = mpi3mr_admin_request_post(mrioc, cfg_req, sizeof(*cfg_req), 1);
5625 if (retval) {
5626 ioc_err(mrioc, "posting config request failed\n");
5627 goto out_unlock;
5628 }
5629 wait_for_completion_timeout(&mrioc->cfg_cmds.done, (timeout * HZ));
5630 if (!(mrioc->cfg_cmds.state & MPI3MR_CMD_COMPLETE)) {
5631 mpi3mr_check_rh_fault_ioc(mrioc,
5632 MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT);
5633 ioc_err(mrioc, "config request timed out\n");
5634 retval = -1;
5635 goto out_unlock;
5636 }
5637 *ioc_status = mrioc->cfg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
5638 if ((*ioc_status) != MPI3_IOCSTATUS_SUCCESS)
5639 dprint_cfg_err(mrioc,
5640 "cfg_page request returned with ioc_status(0x%04x), log_info(0x%08x)\n",
5641 *ioc_status, mrioc->cfg_cmds.ioc_loginfo);
5642
5643 out_unlock:
5644 mrioc->cfg_cmds.state = MPI3MR_CMD_NOTUSED;
5645 mutex_unlock(&mrioc->cfg_cmds.mutex);
5646
5647 out:
5648 return retval;
5649 }
5650
5651 /**
5652 * mpi3mr_process_cfg_req - config page request processor
5653 * @mrioc: Adapter instance reference
5654 * @cfg_req: Configuration request
5655 * @cfg_hdr: Configuration page header
5656 * @timeout: Timeout in seconds
5657 * @ioc_status: Pointer to return ioc status
5658 * @cfg_buf: Memory pointer to copy config page or header
5659 * @cfg_buf_sz: Size of the memory to get config page or header
5660 *
5661 * This is handler for config page read, write and config page
5662 * header read operations.
5663 *
5664 * This function expects the cfg_req to be populated with page
5665 * type, page number, action for the header read and with page
5666 * address for all other operations.
5667 *
5668 * The cfg_hdr can be passed as null for reading required header
5669 * details for read/write pages the cfg_hdr should point valid
5670 * configuration page header.
5671 *
5672 * This allocates dmaable memory based on the size of the config
5673 * buffer and set the SGE of the cfg_req.
5674 *
5675 * For write actions, the config page data has to be passed in
5676 * the cfg_buf and size of the data has to be mentioned in the
5677 * cfg_buf_sz.
5678 *
5679 * For read/header actions, on successful completion of the
5680 * request with successful ioc_status the data will be copied
5681 * into the cfg_buf limited to a minimum of actual page size and
5682 * cfg_buf_sz
5683 *
5684 *
5685 * Return: 0 on success, non-zero on failure.
5686 */
mpi3mr_process_cfg_req(struct mpi3mr_ioc * mrioc,struct mpi3_config_request * cfg_req,struct mpi3_config_page_header * cfg_hdr,int timeout,u16 * ioc_status,void * cfg_buf,u32 cfg_buf_sz)5687 static int mpi3mr_process_cfg_req(struct mpi3mr_ioc *mrioc,
5688 struct mpi3_config_request *cfg_req,
5689 struct mpi3_config_page_header *cfg_hdr, int timeout, u16 *ioc_status,
5690 void *cfg_buf, u32 cfg_buf_sz)
5691 {
5692 struct dma_memory_desc mem_desc;
5693 int retval = -1;
5694 u8 invalid_action = 0;
5695 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
5696
5697 memset(&mem_desc, 0, sizeof(struct dma_memory_desc));
5698
5699 if (cfg_req->action == MPI3_CONFIG_ACTION_PAGE_HEADER)
5700 mem_desc.size = sizeof(struct mpi3_config_page_header);
5701 else {
5702 if (!cfg_hdr) {
5703 ioc_err(mrioc, "null config header passed for config action(%d), page_type(0x%02x), page_num(%d)\n",
5704 cfg_req->action, cfg_req->page_type,
5705 cfg_req->page_number);
5706 goto out;
5707 }
5708 switch (cfg_hdr->page_attribute & MPI3_CONFIG_PAGEATTR_MASK) {
5709 case MPI3_CONFIG_PAGEATTR_READ_ONLY:
5710 if (cfg_req->action
5711 != MPI3_CONFIG_ACTION_READ_CURRENT)
5712 invalid_action = 1;
5713 break;
5714 case MPI3_CONFIG_PAGEATTR_CHANGEABLE:
5715 if ((cfg_req->action ==
5716 MPI3_CONFIG_ACTION_READ_PERSISTENT) ||
5717 (cfg_req->action ==
5718 MPI3_CONFIG_ACTION_WRITE_PERSISTENT))
5719 invalid_action = 1;
5720 break;
5721 case MPI3_CONFIG_PAGEATTR_PERSISTENT:
5722 default:
5723 break;
5724 }
5725 if (invalid_action) {
5726 ioc_err(mrioc,
5727 "config action(%d) is not allowed for page_type(0x%02x), page_num(%d) with page_attribute(0x%02x)\n",
5728 cfg_req->action, cfg_req->page_type,
5729 cfg_req->page_number, cfg_hdr->page_attribute);
5730 goto out;
5731 }
5732 mem_desc.size = le16_to_cpu(cfg_hdr->page_length) * 4;
5733 cfg_req->page_length = cfg_hdr->page_length;
5734 cfg_req->page_version = cfg_hdr->page_version;
5735 }
5736
5737 mem_desc.addr = dma_alloc_coherent(&mrioc->pdev->dev,
5738 mem_desc.size, &mem_desc.dma_addr, GFP_KERNEL);
5739
5740 if (!mem_desc.addr)
5741 return retval;
5742
5743 mpi3mr_add_sg_single(&cfg_req->sgl, sgl_flags, mem_desc.size,
5744 mem_desc.dma_addr);
5745
5746 if ((cfg_req->action == MPI3_CONFIG_ACTION_WRITE_PERSISTENT) ||
5747 (cfg_req->action == MPI3_CONFIG_ACTION_WRITE_CURRENT)) {
5748 memcpy(mem_desc.addr, cfg_buf, min_t(u16, mem_desc.size,
5749 cfg_buf_sz));
5750 dprint_cfg_info(mrioc, "config buffer to be written\n");
5751 if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO)
5752 dprint_dump(mem_desc.addr, mem_desc.size, "cfg_buf");
5753 }
5754
5755 if (mpi3mr_post_cfg_req(mrioc, cfg_req, timeout, ioc_status))
5756 goto out;
5757
5758 retval = 0;
5759 if ((*ioc_status == MPI3_IOCSTATUS_SUCCESS) &&
5760 (cfg_req->action != MPI3_CONFIG_ACTION_WRITE_PERSISTENT) &&
5761 (cfg_req->action != MPI3_CONFIG_ACTION_WRITE_CURRENT)) {
5762 memcpy(cfg_buf, mem_desc.addr, min_t(u16, mem_desc.size,
5763 cfg_buf_sz));
5764 dprint_cfg_info(mrioc, "config buffer read\n");
5765 if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO)
5766 dprint_dump(mem_desc.addr, mem_desc.size, "cfg_buf");
5767 }
5768
5769 out:
5770 if (mem_desc.addr) {
5771 dma_free_coherent(&mrioc->pdev->dev, mem_desc.size,
5772 mem_desc.addr, mem_desc.dma_addr);
5773 mem_desc.addr = NULL;
5774 }
5775
5776 return retval;
5777 }
5778
5779 /**
5780 * mpi3mr_cfg_get_dev_pg0 - Read current device page0
5781 * @mrioc: Adapter instance reference
5782 * @ioc_status: Pointer to return ioc status
5783 * @dev_pg0: Pointer to return device page 0
5784 * @pg_sz: Size of the memory allocated to the page pointer
5785 * @form: The form to be used for addressing the page
5786 * @form_spec: Form specific information like device handle
5787 *
5788 * This is handler for config page read for a specific device
5789 * page0. The ioc_status has the controller returned ioc_status.
5790 * This routine doesn't check ioc_status to decide whether the
5791 * page read is success or not and it is the callers
5792 * responsibility.
5793 *
5794 * Return: 0 on success, non-zero on failure.
5795 */
mpi3mr_cfg_get_dev_pg0(struct mpi3mr_ioc * mrioc,u16 * ioc_status,struct mpi3_device_page0 * dev_pg0,u16 pg_sz,u32 form,u32 form_spec)5796 int mpi3mr_cfg_get_dev_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
5797 struct mpi3_device_page0 *dev_pg0, u16 pg_sz, u32 form, u32 form_spec)
5798 {
5799 struct mpi3_config_page_header cfg_hdr;
5800 struct mpi3_config_request cfg_req;
5801 u32 page_address;
5802
5803 memset(dev_pg0, 0, pg_sz);
5804 memset(&cfg_hdr, 0, sizeof(cfg_hdr));
5805 memset(&cfg_req, 0, sizeof(cfg_req));
5806
5807 cfg_req.function = MPI3_FUNCTION_CONFIG;
5808 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5809 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DEVICE;
5810 cfg_req.page_number = 0;
5811 cfg_req.page_address = 0;
5812
5813 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
5814 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
5815 ioc_err(mrioc, "device page0 header read failed\n");
5816 goto out_failed;
5817 }
5818 if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5819 ioc_err(mrioc, "device page0 header read failed with ioc_status(0x%04x)\n",
5820 *ioc_status);
5821 goto out_failed;
5822 }
5823 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
5824 page_address = ((form & MPI3_DEVICE_PGAD_FORM_MASK) |
5825 (form_spec & MPI3_DEVICE_PGAD_HANDLE_MASK));
5826 cfg_req.page_address = cpu_to_le32(page_address);
5827 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
5828 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, dev_pg0, pg_sz)) {
5829 ioc_err(mrioc, "device page0 read failed\n");
5830 goto out_failed;
5831 }
5832 return 0;
5833 out_failed:
5834 return -1;
5835 }
5836
5837
5838 /**
5839 * mpi3mr_cfg_get_sas_phy_pg0 - Read current SAS Phy page0
5840 * @mrioc: Adapter instance reference
5841 * @ioc_status: Pointer to return ioc status
5842 * @phy_pg0: Pointer to return SAS Phy page 0
5843 * @pg_sz: Size of the memory allocated to the page pointer
5844 * @form: The form to be used for addressing the page
5845 * @form_spec: Form specific information like phy number
5846 *
5847 * This is handler for config page read for a specific SAS Phy
5848 * page0. The ioc_status has the controller returned ioc_status.
5849 * This routine doesn't check ioc_status to decide whether the
5850 * page read is success or not and it is the callers
5851 * responsibility.
5852 *
5853 * Return: 0 on success, non-zero on failure.
5854 */
mpi3mr_cfg_get_sas_phy_pg0(struct mpi3mr_ioc * mrioc,u16 * ioc_status,struct mpi3_sas_phy_page0 * phy_pg0,u16 pg_sz,u32 form,u32 form_spec)5855 int mpi3mr_cfg_get_sas_phy_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
5856 struct mpi3_sas_phy_page0 *phy_pg0, u16 pg_sz, u32 form,
5857 u32 form_spec)
5858 {
5859 struct mpi3_config_page_header cfg_hdr;
5860 struct mpi3_config_request cfg_req;
5861 u32 page_address;
5862
5863 memset(phy_pg0, 0, pg_sz);
5864 memset(&cfg_hdr, 0, sizeof(cfg_hdr));
5865 memset(&cfg_req, 0, sizeof(cfg_req));
5866
5867 cfg_req.function = MPI3_FUNCTION_CONFIG;
5868 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5869 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_PHY;
5870 cfg_req.page_number = 0;
5871 cfg_req.page_address = 0;
5872
5873 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
5874 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
5875 ioc_err(mrioc, "sas phy page0 header read failed\n");
5876 goto out_failed;
5877 }
5878 if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5879 ioc_err(mrioc, "sas phy page0 header read failed with ioc_status(0x%04x)\n",
5880 *ioc_status);
5881 goto out_failed;
5882 }
5883 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
5884 page_address = ((form & MPI3_SAS_PHY_PGAD_FORM_MASK) |
5885 (form_spec & MPI3_SAS_PHY_PGAD_PHY_NUMBER_MASK));
5886 cfg_req.page_address = cpu_to_le32(page_address);
5887 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
5888 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, phy_pg0, pg_sz)) {
5889 ioc_err(mrioc, "sas phy page0 read failed\n");
5890 goto out_failed;
5891 }
5892 return 0;
5893 out_failed:
5894 return -1;
5895 }
5896
5897 /**
5898 * mpi3mr_cfg_get_sas_phy_pg1 - Read current SAS Phy page1
5899 * @mrioc: Adapter instance reference
5900 * @ioc_status: Pointer to return ioc status
5901 * @phy_pg1: Pointer to return SAS Phy page 1
5902 * @pg_sz: Size of the memory allocated to the page pointer
5903 * @form: The form to be used for addressing the page
5904 * @form_spec: Form specific information like phy number
5905 *
5906 * This is handler for config page read for a specific SAS Phy
5907 * page1. The ioc_status has the controller returned ioc_status.
5908 * This routine doesn't check ioc_status to decide whether the
5909 * page read is success or not and it is the callers
5910 * responsibility.
5911 *
5912 * Return: 0 on success, non-zero on failure.
5913 */
mpi3mr_cfg_get_sas_phy_pg1(struct mpi3mr_ioc * mrioc,u16 * ioc_status,struct mpi3_sas_phy_page1 * phy_pg1,u16 pg_sz,u32 form,u32 form_spec)5914 int mpi3mr_cfg_get_sas_phy_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
5915 struct mpi3_sas_phy_page1 *phy_pg1, u16 pg_sz, u32 form,
5916 u32 form_spec)
5917 {
5918 struct mpi3_config_page_header cfg_hdr;
5919 struct mpi3_config_request cfg_req;
5920 u32 page_address;
5921
5922 memset(phy_pg1, 0, pg_sz);
5923 memset(&cfg_hdr, 0, sizeof(cfg_hdr));
5924 memset(&cfg_req, 0, sizeof(cfg_req));
5925
5926 cfg_req.function = MPI3_FUNCTION_CONFIG;
5927 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5928 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_PHY;
5929 cfg_req.page_number = 1;
5930 cfg_req.page_address = 0;
5931
5932 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
5933 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
5934 ioc_err(mrioc, "sas phy page1 header read failed\n");
5935 goto out_failed;
5936 }
5937 if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5938 ioc_err(mrioc, "sas phy page1 header read failed with ioc_status(0x%04x)\n",
5939 *ioc_status);
5940 goto out_failed;
5941 }
5942 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
5943 page_address = ((form & MPI3_SAS_PHY_PGAD_FORM_MASK) |
5944 (form_spec & MPI3_SAS_PHY_PGAD_PHY_NUMBER_MASK));
5945 cfg_req.page_address = cpu_to_le32(page_address);
5946 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
5947 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, phy_pg1, pg_sz)) {
5948 ioc_err(mrioc, "sas phy page1 read failed\n");
5949 goto out_failed;
5950 }
5951 return 0;
5952 out_failed:
5953 return -1;
5954 }
5955
5956
5957 /**
5958 * mpi3mr_cfg_get_sas_exp_pg0 - Read current SAS Expander page0
5959 * @mrioc: Adapter instance reference
5960 * @ioc_status: Pointer to return ioc status
5961 * @exp_pg0: Pointer to return SAS Expander page 0
5962 * @pg_sz: Size of the memory allocated to the page pointer
5963 * @form: The form to be used for addressing the page
5964 * @form_spec: Form specific information like device handle
5965 *
5966 * This is handler for config page read for a specific SAS
5967 * Expander page0. The ioc_status has the controller returned
5968 * ioc_status. This routine doesn't check ioc_status to decide
5969 * whether the page read is success or not and it is the callers
5970 * responsibility.
5971 *
5972 * Return: 0 on success, non-zero on failure.
5973 */
mpi3mr_cfg_get_sas_exp_pg0(struct mpi3mr_ioc * mrioc,u16 * ioc_status,struct mpi3_sas_expander_page0 * exp_pg0,u16 pg_sz,u32 form,u32 form_spec)5974 int mpi3mr_cfg_get_sas_exp_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
5975 struct mpi3_sas_expander_page0 *exp_pg0, u16 pg_sz, u32 form,
5976 u32 form_spec)
5977 {
5978 struct mpi3_config_page_header cfg_hdr;
5979 struct mpi3_config_request cfg_req;
5980 u32 page_address;
5981
5982 memset(exp_pg0, 0, pg_sz);
5983 memset(&cfg_hdr, 0, sizeof(cfg_hdr));
5984 memset(&cfg_req, 0, sizeof(cfg_req));
5985
5986 cfg_req.function = MPI3_FUNCTION_CONFIG;
5987 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5988 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_EXPANDER;
5989 cfg_req.page_number = 0;
5990 cfg_req.page_address = 0;
5991
5992 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
5993 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
5994 ioc_err(mrioc, "expander page0 header read failed\n");
5995 goto out_failed;
5996 }
5997 if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5998 ioc_err(mrioc, "expander page0 header read failed with ioc_status(0x%04x)\n",
5999 *ioc_status);
6000 goto out_failed;
6001 }
6002 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6003 page_address = ((form & MPI3_SAS_EXPAND_PGAD_FORM_MASK) |
6004 (form_spec & (MPI3_SAS_EXPAND_PGAD_PHYNUM_MASK |
6005 MPI3_SAS_EXPAND_PGAD_HANDLE_MASK)));
6006 cfg_req.page_address = cpu_to_le32(page_address);
6007 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6008 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, exp_pg0, pg_sz)) {
6009 ioc_err(mrioc, "expander page0 read failed\n");
6010 goto out_failed;
6011 }
6012 return 0;
6013 out_failed:
6014 return -1;
6015 }
6016
6017 /**
6018 * mpi3mr_cfg_get_sas_exp_pg1 - Read current SAS Expander page1
6019 * @mrioc: Adapter instance reference
6020 * @ioc_status: Pointer to return ioc status
6021 * @exp_pg1: Pointer to return SAS Expander page 1
6022 * @pg_sz: Size of the memory allocated to the page pointer
6023 * @form: The form to be used for addressing the page
6024 * @form_spec: Form specific information like phy number
6025 *
6026 * This is handler for config page read for a specific SAS
6027 * Expander page1. The ioc_status has the controller returned
6028 * ioc_status. This routine doesn't check ioc_status to decide
6029 * whether the page read is success or not and it is the callers
6030 * responsibility.
6031 *
6032 * Return: 0 on success, non-zero on failure.
6033 */
mpi3mr_cfg_get_sas_exp_pg1(struct mpi3mr_ioc * mrioc,u16 * ioc_status,struct mpi3_sas_expander_page1 * exp_pg1,u16 pg_sz,u32 form,u32 form_spec)6034 int mpi3mr_cfg_get_sas_exp_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
6035 struct mpi3_sas_expander_page1 *exp_pg1, u16 pg_sz, u32 form,
6036 u32 form_spec)
6037 {
6038 struct mpi3_config_page_header cfg_hdr;
6039 struct mpi3_config_request cfg_req;
6040 u32 page_address;
6041
6042 memset(exp_pg1, 0, pg_sz);
6043 memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6044 memset(&cfg_req, 0, sizeof(cfg_req));
6045
6046 cfg_req.function = MPI3_FUNCTION_CONFIG;
6047 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6048 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_EXPANDER;
6049 cfg_req.page_number = 1;
6050 cfg_req.page_address = 0;
6051
6052 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6053 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6054 ioc_err(mrioc, "expander page1 header read failed\n");
6055 goto out_failed;
6056 }
6057 if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6058 ioc_err(mrioc, "expander page1 header read failed with ioc_status(0x%04x)\n",
6059 *ioc_status);
6060 goto out_failed;
6061 }
6062 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6063 page_address = ((form & MPI3_SAS_EXPAND_PGAD_FORM_MASK) |
6064 (form_spec & (MPI3_SAS_EXPAND_PGAD_PHYNUM_MASK |
6065 MPI3_SAS_EXPAND_PGAD_HANDLE_MASK)));
6066 cfg_req.page_address = cpu_to_le32(page_address);
6067 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6068 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, exp_pg1, pg_sz)) {
6069 ioc_err(mrioc, "expander page1 read failed\n");
6070 goto out_failed;
6071 }
6072 return 0;
6073 out_failed:
6074 return -1;
6075 }
6076
6077 /**
6078 * mpi3mr_cfg_get_enclosure_pg0 - Read current Enclosure page0
6079 * @mrioc: Adapter instance reference
6080 * @ioc_status: Pointer to return ioc status
6081 * @encl_pg0: Pointer to return Enclosure page 0
6082 * @pg_sz: Size of the memory allocated to the page pointer
6083 * @form: The form to be used for addressing the page
6084 * @form_spec: Form specific information like device handle
6085 *
6086 * This is handler for config page read for a specific Enclosure
6087 * page0. The ioc_status has the controller returned ioc_status.
6088 * This routine doesn't check ioc_status to decide whether the
6089 * page read is success or not and it is the callers
6090 * responsibility.
6091 *
6092 * Return: 0 on success, non-zero on failure.
6093 */
mpi3mr_cfg_get_enclosure_pg0(struct mpi3mr_ioc * mrioc,u16 * ioc_status,struct mpi3_enclosure_page0 * encl_pg0,u16 pg_sz,u32 form,u32 form_spec)6094 int mpi3mr_cfg_get_enclosure_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
6095 struct mpi3_enclosure_page0 *encl_pg0, u16 pg_sz, u32 form,
6096 u32 form_spec)
6097 {
6098 struct mpi3_config_page_header cfg_hdr;
6099 struct mpi3_config_request cfg_req;
6100 u32 page_address;
6101
6102 memset(encl_pg0, 0, pg_sz);
6103 memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6104 memset(&cfg_req, 0, sizeof(cfg_req));
6105
6106 cfg_req.function = MPI3_FUNCTION_CONFIG;
6107 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6108 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_ENCLOSURE;
6109 cfg_req.page_number = 0;
6110 cfg_req.page_address = 0;
6111
6112 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6113 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6114 ioc_err(mrioc, "enclosure page0 header read failed\n");
6115 goto out_failed;
6116 }
6117 if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6118 ioc_err(mrioc, "enclosure page0 header read failed with ioc_status(0x%04x)\n",
6119 *ioc_status);
6120 goto out_failed;
6121 }
6122 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6123 page_address = ((form & MPI3_ENCLOS_PGAD_FORM_MASK) |
6124 (form_spec & MPI3_ENCLOS_PGAD_HANDLE_MASK));
6125 cfg_req.page_address = cpu_to_le32(page_address);
6126 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6127 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, encl_pg0, pg_sz)) {
6128 ioc_err(mrioc, "enclosure page0 read failed\n");
6129 goto out_failed;
6130 }
6131 return 0;
6132 out_failed:
6133 return -1;
6134 }
6135
6136
6137 /**
6138 * mpi3mr_cfg_get_sas_io_unit_pg0 - Read current SASIOUnit page0
6139 * @mrioc: Adapter instance reference
6140 * @sas_io_unit_pg0: Pointer to return SAS IO Unit page 0
6141 * @pg_sz: Size of the memory allocated to the page pointer
6142 *
6143 * This is handler for config page read for the SAS IO Unit
6144 * page0. This routine checks ioc_status to decide whether the
6145 * page read is success or not.
6146 *
6147 * Return: 0 on success, non-zero on failure.
6148 */
mpi3mr_cfg_get_sas_io_unit_pg0(struct mpi3mr_ioc * mrioc,struct mpi3_sas_io_unit_page0 * sas_io_unit_pg0,u16 pg_sz)6149 int mpi3mr_cfg_get_sas_io_unit_pg0(struct mpi3mr_ioc *mrioc,
6150 struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0, u16 pg_sz)
6151 {
6152 struct mpi3_config_page_header cfg_hdr;
6153 struct mpi3_config_request cfg_req;
6154 u16 ioc_status = 0;
6155
6156 memset(sas_io_unit_pg0, 0, pg_sz);
6157 memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6158 memset(&cfg_req, 0, sizeof(cfg_req));
6159
6160 cfg_req.function = MPI3_FUNCTION_CONFIG;
6161 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6162 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT;
6163 cfg_req.page_number = 0;
6164 cfg_req.page_address = 0;
6165
6166 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6167 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6168 ioc_err(mrioc, "sas io unit page0 header read failed\n");
6169 goto out_failed;
6170 }
6171 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6172 ioc_err(mrioc, "sas io unit page0 header read failed with ioc_status(0x%04x)\n",
6173 ioc_status);
6174 goto out_failed;
6175 }
6176 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6177
6178 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6179 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg0, pg_sz)) {
6180 ioc_err(mrioc, "sas io unit page0 read failed\n");
6181 goto out_failed;
6182 }
6183 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6184 ioc_err(mrioc, "sas io unit page0 read failed with ioc_status(0x%04x)\n",
6185 ioc_status);
6186 goto out_failed;
6187 }
6188 return 0;
6189 out_failed:
6190 return -1;
6191 }
6192
6193 /**
6194 * mpi3mr_cfg_get_sas_io_unit_pg1 - Read current SASIOUnit page1
6195 * @mrioc: Adapter instance reference
6196 * @sas_io_unit_pg1: Pointer to return SAS IO Unit page 1
6197 * @pg_sz: Size of the memory allocated to the page pointer
6198 *
6199 * This is handler for config page read for the SAS IO Unit
6200 * page1. This routine checks ioc_status to decide whether the
6201 * page read is success or not.
6202 *
6203 * Return: 0 on success, non-zero on failure.
6204 */
mpi3mr_cfg_get_sas_io_unit_pg1(struct mpi3mr_ioc * mrioc,struct mpi3_sas_io_unit_page1 * sas_io_unit_pg1,u16 pg_sz)6205 int mpi3mr_cfg_get_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc,
6206 struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz)
6207 {
6208 struct mpi3_config_page_header cfg_hdr;
6209 struct mpi3_config_request cfg_req;
6210 u16 ioc_status = 0;
6211
6212 memset(sas_io_unit_pg1, 0, pg_sz);
6213 memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6214 memset(&cfg_req, 0, sizeof(cfg_req));
6215
6216 cfg_req.function = MPI3_FUNCTION_CONFIG;
6217 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6218 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT;
6219 cfg_req.page_number = 1;
6220 cfg_req.page_address = 0;
6221
6222 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6223 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6224 ioc_err(mrioc, "sas io unit page1 header read failed\n");
6225 goto out_failed;
6226 }
6227 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6228 ioc_err(mrioc, "sas io unit page1 header read failed with ioc_status(0x%04x)\n",
6229 ioc_status);
6230 goto out_failed;
6231 }
6232 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6233
6234 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6235 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) {
6236 ioc_err(mrioc, "sas io unit page1 read failed\n");
6237 goto out_failed;
6238 }
6239 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6240 ioc_err(mrioc, "sas io unit page1 read failed with ioc_status(0x%04x)\n",
6241 ioc_status);
6242 goto out_failed;
6243 }
6244 return 0;
6245 out_failed:
6246 return -1;
6247 }
6248
6249 /**
6250 * mpi3mr_cfg_set_sas_io_unit_pg1 - Write SASIOUnit page1
6251 * @mrioc: Adapter instance reference
6252 * @sas_io_unit_pg1: Pointer to the SAS IO Unit page 1 to write
6253 * @pg_sz: Size of the memory allocated to the page pointer
6254 *
6255 * This is handler for config page write for the SAS IO Unit
6256 * page1. This routine checks ioc_status to decide whether the
6257 * page read is success or not. This will modify both current
6258 * and persistent page.
6259 *
6260 * Return: 0 on success, non-zero on failure.
6261 */
mpi3mr_cfg_set_sas_io_unit_pg1(struct mpi3mr_ioc * mrioc,struct mpi3_sas_io_unit_page1 * sas_io_unit_pg1,u16 pg_sz)6262 int mpi3mr_cfg_set_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc,
6263 struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz)
6264 {
6265 struct mpi3_config_page_header cfg_hdr;
6266 struct mpi3_config_request cfg_req;
6267 u16 ioc_status = 0;
6268
6269 memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6270 memset(&cfg_req, 0, sizeof(cfg_req));
6271
6272 cfg_req.function = MPI3_FUNCTION_CONFIG;
6273 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6274 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT;
6275 cfg_req.page_number = 1;
6276 cfg_req.page_address = 0;
6277
6278 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6279 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6280 ioc_err(mrioc, "sas io unit page1 header read failed\n");
6281 goto out_failed;
6282 }
6283 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6284 ioc_err(mrioc, "sas io unit page1 header read failed with ioc_status(0x%04x)\n",
6285 ioc_status);
6286 goto out_failed;
6287 }
6288 cfg_req.action = MPI3_CONFIG_ACTION_WRITE_CURRENT;
6289
6290 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6291 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) {
6292 ioc_err(mrioc, "sas io unit page1 write current failed\n");
6293 goto out_failed;
6294 }
6295 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6296 ioc_err(mrioc, "sas io unit page1 write current failed with ioc_status(0x%04x)\n",
6297 ioc_status);
6298 goto out_failed;
6299 }
6300
6301 cfg_req.action = MPI3_CONFIG_ACTION_WRITE_PERSISTENT;
6302
6303 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6304 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) {
6305 ioc_err(mrioc, "sas io unit page1 write persistent failed\n");
6306 goto out_failed;
6307 }
6308 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6309 ioc_err(mrioc, "sas io unit page1 write persistent failed with ioc_status(0x%04x)\n",
6310 ioc_status);
6311 goto out_failed;
6312 }
6313 return 0;
6314 out_failed:
6315 return -1;
6316 }
6317
6318 /**
6319 * mpi3mr_cfg_get_driver_pg1 - Read current Driver page1
6320 * @mrioc: Adapter instance reference
6321 * @driver_pg1: Pointer to return Driver page 1
6322 * @pg_sz: Size of the memory allocated to the page pointer
6323 *
6324 * This is handler for config page read for the Driver page1.
6325 * This routine checks ioc_status to decide whether the page
6326 * read is success or not.
6327 *
6328 * Return: 0 on success, non-zero on failure.
6329 */
mpi3mr_cfg_get_driver_pg1(struct mpi3mr_ioc * mrioc,struct mpi3_driver_page1 * driver_pg1,u16 pg_sz)6330 int mpi3mr_cfg_get_driver_pg1(struct mpi3mr_ioc *mrioc,
6331 struct mpi3_driver_page1 *driver_pg1, u16 pg_sz)
6332 {
6333 struct mpi3_config_page_header cfg_hdr;
6334 struct mpi3_config_request cfg_req;
6335 u16 ioc_status = 0;
6336
6337 memset(driver_pg1, 0, pg_sz);
6338 memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6339 memset(&cfg_req, 0, sizeof(cfg_req));
6340
6341 cfg_req.function = MPI3_FUNCTION_CONFIG;
6342 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6343 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DRIVER;
6344 cfg_req.page_number = 1;
6345 cfg_req.page_address = 0;
6346
6347 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6348 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6349 ioc_err(mrioc, "driver page1 header read failed\n");
6350 goto out_failed;
6351 }
6352 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6353 ioc_err(mrioc, "driver page1 header read failed with ioc_status(0x%04x)\n",
6354 ioc_status);
6355 goto out_failed;
6356 }
6357 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6358
6359 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6360 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, driver_pg1, pg_sz)) {
6361 ioc_err(mrioc, "driver page1 read failed\n");
6362 goto out_failed;
6363 }
6364 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6365 ioc_err(mrioc, "driver page1 read failed with ioc_status(0x%04x)\n",
6366 ioc_status);
6367 goto out_failed;
6368 }
6369 return 0;
6370 out_failed:
6371 return -1;
6372 }
6373
6374 /**
6375 * mpi3mr_cfg_get_driver_pg2 - Read current driver page2
6376 * @mrioc: Adapter instance reference
6377 * @driver_pg2: Pointer to return driver page 2
6378 * @pg_sz: Size of the memory allocated to the page pointer
6379 * @page_action: Page action
6380 *
6381 * This is handler for config page read for the driver page2.
6382 * This routine checks ioc_status to decide whether the page
6383 * read is success or not.
6384 *
6385 * Return: 0 on success, non-zero on failure.
6386 */
mpi3mr_cfg_get_driver_pg2(struct mpi3mr_ioc * mrioc,struct mpi3_driver_page2 * driver_pg2,u16 pg_sz,u8 page_action)6387 int mpi3mr_cfg_get_driver_pg2(struct mpi3mr_ioc *mrioc,
6388 struct mpi3_driver_page2 *driver_pg2, u16 pg_sz, u8 page_action)
6389 {
6390 struct mpi3_config_page_header cfg_hdr;
6391 struct mpi3_config_request cfg_req;
6392 u16 ioc_status = 0;
6393
6394 memset(driver_pg2, 0, pg_sz);
6395 memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6396 memset(&cfg_req, 0, sizeof(cfg_req));
6397
6398 cfg_req.function = MPI3_FUNCTION_CONFIG;
6399 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6400 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DRIVER;
6401 cfg_req.page_number = 2;
6402 cfg_req.page_address = 0;
6403 cfg_req.page_version = MPI3_DRIVER2_PAGEVERSION;
6404
6405 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6406 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6407 ioc_err(mrioc, "driver page2 header read failed\n");
6408 goto out_failed;
6409 }
6410 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6411 ioc_err(mrioc, "driver page2 header read failed with\n"
6412 "ioc_status(0x%04x)\n",
6413 ioc_status);
6414 goto out_failed;
6415 }
6416 cfg_req.action = page_action;
6417
6418 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6419 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, driver_pg2, pg_sz)) {
6420 ioc_err(mrioc, "driver page2 read failed\n");
6421 goto out_failed;
6422 }
6423 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6424 ioc_err(mrioc, "driver page2 read failed with\n"
6425 "ioc_status(0x%04x)\n",
6426 ioc_status);
6427 goto out_failed;
6428 }
6429 return 0;
6430 out_failed:
6431 return -1;
6432 }
6433
6434