1 /*
2 * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy
3 * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy
4 * Support: freebsdraid@avagotech.com
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer. 2. Redistributions
12 * in binary form must reproduce the above copyright notice, this list of
13 * conditions and the following disclaimer in the documentation and/or other
14 * materials provided with the distribution. 3. Neither the name of the
15 * <ORGANIZATION> nor the names of its contributors may be used to endorse or
16 * promote products derived from this software without specific prior written
17 * permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 * The views and conclusions contained in the software and documentation are
32 * those of the authors and should not be interpreted as representing
33 * official policies,either expressed or implied, of the FreeBSD Project.
34 *
35 * Send feedback to: <megaraidfbsd@avagotech.com> Mail to: AVAGO TECHNOLOGIES 1621
36 * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD
37 *
38 */
39
40 #include <sys/cdefs.h>
41 #include <dev/mrsas/mrsas.h>
42 #include <dev/mrsas/mrsas_ioctl.h>
43
44 #include <cam/cam.h>
45 #include <cam/cam_ccb.h>
46
47 #include <sys/sysctl.h>
48 #include <sys/types.h>
49 #include <sys/sysent.h>
50 #include <sys/kthread.h>
51 #include <sys/taskqueue.h>
52 #include <sys/smp.h>
53 #include <sys/endian.h>
54
55 /*
56 * Function prototypes
57 */
58 static d_open_t mrsas_open;
59 static d_close_t mrsas_close;
60 static d_ioctl_t mrsas_ioctl;
61 static d_poll_t mrsas_poll;
62
63 static void mrsas_ich_startup(void *arg);
64 static struct mrsas_mgmt_info mrsas_mgmt_info;
65 static struct mrsas_ident *mrsas_find_ident(device_t);
66 static int mrsas_setup_msix(struct mrsas_softc *sc);
67 static int mrsas_allocate_msix(struct mrsas_softc *sc);
68 static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode);
69 static void mrsas_flush_cache(struct mrsas_softc *sc);
70 static void mrsas_reset_reply_desc(struct mrsas_softc *sc);
71 static void mrsas_ocr_thread(void *arg);
72 static int mrsas_get_map_info(struct mrsas_softc *sc);
73 static int mrsas_get_ld_map_info(struct mrsas_softc *sc);
74 static int mrsas_sync_map_info(struct mrsas_softc *sc);
75 static int mrsas_get_pd_list(struct mrsas_softc *sc);
76 static int mrsas_get_ld_list(struct mrsas_softc *sc);
77 static int mrsas_setup_irq(struct mrsas_softc *sc);
78 static int mrsas_alloc_mem(struct mrsas_softc *sc);
79 static int mrsas_init_fw(struct mrsas_softc *sc);
80 static int mrsas_setup_raidmap(struct mrsas_softc *sc);
81 static void megasas_setup_jbod_map(struct mrsas_softc *sc);
82 static int megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend);
83 static int mrsas_clear_intr(struct mrsas_softc *sc);
84 static int mrsas_get_ctrl_info(struct mrsas_softc *sc);
85 static void mrsas_update_ext_vd_details(struct mrsas_softc *sc);
86 static int
87 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
88 struct mrsas_mfi_cmd *cmd_to_abort);
89 static void
90 mrsas_get_pd_info(struct mrsas_softc *sc, u_int16_t device_id);
91 static struct mrsas_softc *
92 mrsas_get_softc_instance(struct cdev *dev,
93 u_long cmd, caddr_t arg);
94 u_int32_t
95 mrsas_read_reg_with_retries(struct mrsas_softc *sc, int offset);
96 u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset);
97 u_int8_t
98 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc,
99 struct mrsas_mfi_cmd *mfi_cmd);
100 void mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc);
101 int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr);
102 int mrsas_init_adapter(struct mrsas_softc *sc);
103 int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc);
104 int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc);
105 int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc);
106 int mrsas_ioc_init(struct mrsas_softc *sc);
107 int mrsas_bus_scan(struct mrsas_softc *sc);
108 int mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
109 int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
110 int mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason);
111 int mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason);
112 int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex);
113 int mrsas_reset_targets(struct mrsas_softc *sc);
114 int
115 mrsas_issue_blocked_cmd(struct mrsas_softc *sc,
116 struct mrsas_mfi_cmd *cmd);
117 int
118 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd,
119 int size);
120 void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd);
121 void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
122 void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
123 void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
124 void mrsas_disable_intr(struct mrsas_softc *sc);
125 void mrsas_enable_intr(struct mrsas_softc *sc);
126 void mrsas_free_ioc_cmd(struct mrsas_softc *sc);
127 void mrsas_free_mem(struct mrsas_softc *sc);
128 void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp);
129 void mrsas_isr(void *arg);
130 void mrsas_teardown_intr(struct mrsas_softc *sc);
131 void mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error);
132 void mrsas_kill_hba(struct mrsas_softc *sc);
133 void mrsas_aen_handler(struct mrsas_softc *sc);
134 void
135 mrsas_write_reg(struct mrsas_softc *sc, int offset,
136 u_int32_t value);
137 void
138 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
139 u_int32_t req_desc_hi);
140 void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc);
141 void
142 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc,
143 struct mrsas_mfi_cmd *cmd, u_int8_t status);
144 struct mrsas_mfi_cmd *mrsas_get_mfi_cmd(struct mrsas_softc *sc);
145
146 MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_build_mpt_cmd
147 (struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
148
149 extern int mrsas_cam_attach(struct mrsas_softc *sc);
150 extern void mrsas_cam_detach(struct mrsas_softc *sc);
151 extern void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
152 extern void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
153 extern int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc);
154 extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc);
155 extern int mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd);
156 extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc);
157 extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
158 extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
159 extern void mrsas_xpt_freeze(struct mrsas_softc *sc);
160 extern void mrsas_xpt_release(struct mrsas_softc *sc);
161 extern MRSAS_REQUEST_DESCRIPTOR_UNION *
162 mrsas_get_request_desc(struct mrsas_softc *sc,
163 u_int16_t index);
164 extern int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim);
165 static int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc);
166 static void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc);
167 void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd);
168
169 void mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd,
170 union ccb *ccb_ptr, u_int8_t status, u_int8_t extStatus,
171 u_int32_t data_length, u_int8_t *sense);
172 void
173 mrsas_write_64bit_req_desc(struct mrsas_softc *sc, u_int32_t req_desc_lo,
174 u_int32_t req_desc_hi);
175
176 SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
177 "MRSAS Driver Parameters");
178
179 /*
180 * PCI device struct and table
181 *
182 */
183 typedef struct mrsas_ident {
184 uint16_t vendor;
185 uint16_t device;
186 uint16_t subvendor;
187 uint16_t subdevice;
188 const char *desc;
189 } MRSAS_CTLR_ID;
190
191 MRSAS_CTLR_ID device_table[] = {
192 {0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "AVAGO Thunderbolt SAS Controller"},
193 {0x1000, MRSAS_INVADER, 0xffff, 0xffff, "AVAGO Invader SAS Controller"},
194 {0x1000, MRSAS_FURY, 0xffff, 0xffff, "AVAGO Fury SAS Controller"},
195 {0x1000, MRSAS_INTRUDER, 0xffff, 0xffff, "AVAGO Intruder SAS Controller"},
196 {0x1000, MRSAS_INTRUDER_24, 0xffff, 0xffff, "AVAGO Intruder_24 SAS Controller"},
197 {0x1000, MRSAS_CUTLASS_52, 0xffff, 0xffff, "AVAGO Cutlass_52 SAS Controller"},
198 {0x1000, MRSAS_CUTLASS_53, 0xffff, 0xffff, "AVAGO Cutlass_53 SAS Controller"},
199 {0x1000, MRSAS_VENTURA, 0xffff, 0xffff, "AVAGO Ventura SAS Controller"},
200 {0x1000, MRSAS_CRUSADER, 0xffff, 0xffff, "AVAGO Crusader SAS Controller"},
201 {0x1000, MRSAS_HARPOON, 0xffff, 0xffff, "AVAGO Harpoon SAS Controller"},
202 {0x1000, MRSAS_TOMCAT, 0xffff, 0xffff, "AVAGO Tomcat SAS Controller"},
203 {0x1000, MRSAS_VENTURA_4PORT, 0xffff, 0xffff, "AVAGO Ventura_4Port SAS Controller"},
204 {0x1000, MRSAS_CRUSADER_4PORT, 0xffff, 0xffff, "AVAGO Crusader_4Port SAS Controller"},
205 {0x1000, MRSAS_AERO_10E0, 0xffff, 0xffff, "BROADCOM AERO-10E0 SAS Controller"},
206 {0x1000, MRSAS_AERO_10E1, 0xffff, 0xffff, "BROADCOM AERO-10E1 SAS Controller"},
207 {0x1000, MRSAS_AERO_10E2, 0xffff, 0xffff, "BROADCOM AERO-10E2 SAS Controller"},
208 {0x1000, MRSAS_AERO_10E3, 0xffff, 0xffff, "BROADCOM AERO-10E3 SAS Controller"},
209 {0x1000, MRSAS_AERO_10E4, 0xffff, 0xffff, "BROADCOM AERO-10E4 SAS Controller"},
210 {0x1000, MRSAS_AERO_10E5, 0xffff, 0xffff, "BROADCOM AERO-10E5 SAS Controller"},
211 {0x1000, MRSAS_AERO_10E6, 0xffff, 0xffff, "BROADCOM AERO-10E6 SAS Controller"},
212 {0x1000, MRSAS_AERO_10E7, 0xffff, 0xffff, "BROADCOM AERO-10E7 SAS Controller"},
213 {0, 0, 0, 0, NULL}
214 };
215
216 /*
217 * Character device entry points
218 *
219 */
220 static struct cdevsw mrsas_cdevsw = {
221 .d_version = D_VERSION,
222 .d_open = mrsas_open,
223 .d_close = mrsas_close,
224 .d_ioctl = mrsas_ioctl,
225 .d_poll = mrsas_poll,
226 .d_name = "mrsas",
227 };
228
229 MALLOC_DEFINE(M_MRSAS, "mrsasbuf", "Buffers for the MRSAS driver");
230
231 int
mrsas_open(struct cdev * dev,int oflags,int devtype,struct thread * td)232 mrsas_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
233 {
234
235 return (0);
236 }
237
238 int
mrsas_close(struct cdev * dev,int fflag,int devtype,struct thread * td)239 mrsas_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
240 {
241
242 return (0);
243 }
244
245 u_int32_t
mrsas_read_reg_with_retries(struct mrsas_softc * sc,int offset)246 mrsas_read_reg_with_retries(struct mrsas_softc *sc, int offset)
247 {
248 u_int32_t i = 0, ret_val;
249
250 if (sc->is_aero) {
251 do {
252 ret_val = mrsas_read_reg(sc, offset);
253 i++;
254 } while(ret_val == 0 && i < 3);
255 } else
256 ret_val = mrsas_read_reg(sc, offset);
257
258 return ret_val;
259 }
260
261 /*
262 * Register Read/Write Functions
263 *
264 */
265 void
mrsas_write_reg(struct mrsas_softc * sc,int offset,u_int32_t value)266 mrsas_write_reg(struct mrsas_softc *sc, int offset,
267 u_int32_t value)
268 {
269 bus_space_tag_t bus_tag = sc->bus_tag;
270 bus_space_handle_t bus_handle = sc->bus_handle;
271
272 bus_space_write_4(bus_tag, bus_handle, offset, value);
273 }
274
275 u_int32_t
mrsas_read_reg(struct mrsas_softc * sc,int offset)276 mrsas_read_reg(struct mrsas_softc *sc, int offset)
277 {
278 bus_space_tag_t bus_tag = sc->bus_tag;
279 bus_space_handle_t bus_handle = sc->bus_handle;
280
281 return ((u_int32_t)bus_space_read_4(bus_tag, bus_handle, offset));
282 }
283
284 /*
285 * Interrupt Disable/Enable/Clear Functions
286 *
287 */
288 void
mrsas_disable_intr(struct mrsas_softc * sc)289 mrsas_disable_intr(struct mrsas_softc *sc)
290 {
291 u_int32_t mask = 0xFFFFFFFF;
292
293 sc->mask_interrupts = 1;
294 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), mask);
295 /* Dummy read to force pci flush */
296 (void)mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
297 }
298
299 void
mrsas_enable_intr(struct mrsas_softc * sc)300 mrsas_enable_intr(struct mrsas_softc *sc)
301 {
302 u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK;
303
304 sc->mask_interrupts = 0;
305 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0);
306 (void)mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
307
308 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask);
309 (void)mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
310 }
311
312 static int
mrsas_clear_intr(struct mrsas_softc * sc)313 mrsas_clear_intr(struct mrsas_softc *sc)
314 {
315 u_int32_t status;
316
317 /* Read received interrupt */
318 status = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_intr_status));
319
320 /* Not our interrupt, so just return */
321 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
322 return (0);
323
324 /* We got a reply interrupt */
325 return (1);
326 }
327
328 /*
329 * PCI Support Functions
330 *
331 */
332 static struct mrsas_ident *
mrsas_find_ident(device_t dev)333 mrsas_find_ident(device_t dev)
334 {
335 struct mrsas_ident *pci_device;
336
337 for (pci_device = device_table; pci_device->vendor != 0; pci_device++) {
338 if ((pci_device->vendor == pci_get_vendor(dev)) &&
339 (pci_device->device == pci_get_device(dev)) &&
340 ((pci_device->subvendor == pci_get_subvendor(dev)) ||
341 (pci_device->subvendor == 0xffff)) &&
342 ((pci_device->subdevice == pci_get_subdevice(dev)) ||
343 (pci_device->subdevice == 0xffff)))
344 return (pci_device);
345 }
346 return (NULL);
347 }
348
349 static int
mrsas_probe(device_t dev)350 mrsas_probe(device_t dev)
351 {
352 struct mrsas_ident *id;
353
354 if ((id = mrsas_find_ident(dev)) != NULL) {
355 device_set_desc(dev, id->desc);
356 /* between BUS_PROBE_DEFAULT and BUS_PROBE_LOW_PRIORITY */
357 return (-30);
358 }
359 return (ENXIO);
360 }
361
362 /*
363 * mrsas_setup_sysctl: setup sysctl values for mrsas
364 * input: Adapter instance soft state
365 *
366 * Setup sysctl entries for mrsas driver.
367 */
368 static void
mrsas_setup_sysctl(struct mrsas_softc * sc)369 mrsas_setup_sysctl(struct mrsas_softc *sc)
370 {
371 struct sysctl_ctx_list *sysctl_ctx = NULL;
372 struct sysctl_oid *sysctl_tree = NULL;
373 char tmpstr[80], tmpstr2[80];
374
375 /*
376 * Setup the sysctl variable so the user can change the debug level
377 * on the fly.
378 */
379 snprintf(tmpstr, sizeof(tmpstr), "MRSAS controller %d",
380 device_get_unit(sc->mrsas_dev));
381 snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mrsas_dev));
382
383 sysctl_ctx = device_get_sysctl_ctx(sc->mrsas_dev);
384 if (sysctl_ctx != NULL)
385 sysctl_tree = device_get_sysctl_tree(sc->mrsas_dev);
386
387 if (sysctl_tree == NULL) {
388 sysctl_ctx_init(&sc->sysctl_ctx);
389 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
390 SYSCTL_STATIC_CHILDREN(_hw_mrsas), OID_AUTO, tmpstr2,
391 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, tmpstr);
392 if (sc->sysctl_tree == NULL)
393 return;
394 sysctl_ctx = &sc->sysctl_ctx;
395 sysctl_tree = sc->sysctl_tree;
396 }
397 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
398 OID_AUTO, "disable_ocr", CTLFLAG_RW, &sc->disableOnlineCtrlReset, 0,
399 "Disable the use of OCR");
400
401 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
402 OID_AUTO, "driver_version", CTLFLAG_RD, MRSAS_VERSION,
403 strlen(MRSAS_VERSION), "driver version");
404
405 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
406 OID_AUTO, "reset_count", CTLFLAG_RD,
407 &sc->reset_count, 0, "number of ocr from start of the day");
408
409 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
410 OID_AUTO, "fw_outstanding", CTLFLAG_RD,
411 &sc->fw_outstanding.val_rdonly, 0, "FW outstanding commands");
412
413 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
414 OID_AUTO, "io_cmds_highwater", CTLFLAG_RD,
415 &sc->io_cmds_highwater, 0, "Max FW outstanding commands");
416
417 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
418 OID_AUTO, "mrsas_debug", CTLFLAG_RW, &sc->mrsas_debug, 0,
419 "Driver debug level");
420
421 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
422 OID_AUTO, "mrsas_io_timeout", CTLFLAG_RW, &sc->mrsas_io_timeout,
423 0, "Driver IO timeout value in mili-second.");
424
425 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
426 OID_AUTO, "mrsas_fw_fault_check_delay", CTLFLAG_RW,
427 &sc->mrsas_fw_fault_check_delay,
428 0, "FW fault check thread delay in seconds. <default is 1 sec>");
429
430 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
431 OID_AUTO, "reset_in_progress", CTLFLAG_RD,
432 &sc->reset_in_progress, 0, "ocr in progress status");
433
434 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
435 OID_AUTO, "block_sync_cache", CTLFLAG_RW,
436 &sc->block_sync_cache, 0,
437 "Block SYNC CACHE at driver. <default: 0, send it to FW>");
438 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
439 OID_AUTO, "stream detection", CTLFLAG_RW,
440 &sc->drv_stream_detection, 0,
441 "Disable/Enable Stream detection. <default: 1, Enable Stream Detection>");
442 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
443 OID_AUTO, "prp_count", CTLFLAG_RD,
444 &sc->prp_count.val_rdonly, 0, "Number of IOs for which PRPs are built");
445 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
446 OID_AUTO, "SGE holes", CTLFLAG_RD,
447 &sc->sge_holes.val_rdonly, 0, "Number of IOs with holes in SGEs");
448 }
449
450 /*
451 * mrsas_get_tunables: get tunable parameters.
452 * input: Adapter instance soft state
453 *
454 * Get tunable parameters. This will help to debug driver at boot time.
455 */
456 static void
mrsas_get_tunables(struct mrsas_softc * sc)457 mrsas_get_tunables(struct mrsas_softc *sc)
458 {
459 char tmpstr[80];
460
461 /* XXX default to some debugging for now */
462 sc->mrsas_debug =
463 (MRSAS_FAULT | MRSAS_OCR | MRSAS_INFO | MRSAS_TRACE | MRSAS_AEN);
464 sc->mrsas_io_timeout = MRSAS_IO_TIMEOUT;
465 sc->mrsas_fw_fault_check_delay = 1;
466 sc->reset_count = 0;
467 sc->reset_in_progress = 0;
468 sc->block_sync_cache = 0;
469 sc->drv_stream_detection = 1;
470
471 /*
472 * Grab the global variables.
473 */
474 TUNABLE_INT_FETCH("hw.mrsas.debug_level", &sc->mrsas_debug);
475
476 /*
477 * Grab the global variables.
478 */
479 TUNABLE_INT_FETCH("hw.mrsas.lb_pending_cmds", &sc->lb_pending_cmds);
480
481 /* Grab the unit-instance variables */
482 snprintf(tmpstr, sizeof(tmpstr), "dev.mrsas.%d.debug_level",
483 device_get_unit(sc->mrsas_dev));
484 TUNABLE_INT_FETCH(tmpstr, &sc->mrsas_debug);
485 }
486
487 /*
488 * mrsas_alloc_evt_log_info cmd: Allocates memory to get event log information.
489 * Used to get sequence number at driver load time.
490 * input: Adapter soft state
491 *
492 * Allocates DMAable memory for the event log info internal command.
493 */
494 int
mrsas_alloc_evt_log_info_cmd(struct mrsas_softc * sc)495 mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc)
496 {
497 int el_info_size;
498
499 /* Allocate get event log info command */
500 el_info_size = sizeof(struct mrsas_evt_log_info);
501 if (bus_dma_tag_create(sc->mrsas_parent_tag,
502 1, 0,
503 BUS_SPACE_MAXADDR_32BIT,
504 BUS_SPACE_MAXADDR,
505 NULL, NULL,
506 el_info_size,
507 1,
508 el_info_size,
509 BUS_DMA_ALLOCNOW,
510 NULL, NULL,
511 &sc->el_info_tag)) {
512 device_printf(sc->mrsas_dev, "Cannot allocate event log info tag\n");
513 return (ENOMEM);
514 }
515 if (bus_dmamem_alloc(sc->el_info_tag, (void **)&sc->el_info_mem,
516 BUS_DMA_NOWAIT, &sc->el_info_dmamap)) {
517 device_printf(sc->mrsas_dev, "Cannot allocate event log info cmd mem\n");
518 return (ENOMEM);
519 }
520 if (bus_dmamap_load(sc->el_info_tag, sc->el_info_dmamap,
521 sc->el_info_mem, el_info_size, mrsas_addr_cb,
522 &sc->el_info_phys_addr, BUS_DMA_NOWAIT)) {
523 device_printf(sc->mrsas_dev, "Cannot load event log info cmd mem\n");
524 return (ENOMEM);
525 }
526 memset(sc->el_info_mem, 0, el_info_size);
527 return (0);
528 }
529
530 /*
531 * mrsas_free_evt_info_cmd: Free memory for Event log info command
532 * input: Adapter soft state
533 *
534 * Deallocates memory for the event log info internal command.
535 */
536 void
mrsas_free_evt_log_info_cmd(struct mrsas_softc * sc)537 mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc)
538 {
539 if (sc->el_info_phys_addr)
540 bus_dmamap_unload(sc->el_info_tag, sc->el_info_dmamap);
541 if (sc->el_info_mem != NULL)
542 bus_dmamem_free(sc->el_info_tag, sc->el_info_mem, sc->el_info_dmamap);
543 if (sc->el_info_tag != NULL)
544 bus_dma_tag_destroy(sc->el_info_tag);
545 }
546
547 /*
548 * mrsas_get_seq_num: Get latest event sequence number
549 * @sc: Adapter soft state
550 * @eli: Firmware event log sequence number information.
551 *
552 * Firmware maintains a log of all events in a non-volatile area.
553 * Driver get the sequence number using DCMD
554 * "MR_DCMD_CTRL_EVENT_GET_INFO" at driver load time.
555 */
556
557 static int
mrsas_get_seq_num(struct mrsas_softc * sc,struct mrsas_evt_log_info * eli)558 mrsas_get_seq_num(struct mrsas_softc *sc,
559 struct mrsas_evt_log_info *eli)
560 {
561 struct mrsas_mfi_cmd *cmd;
562 struct mrsas_dcmd_frame *dcmd;
563 u_int8_t do_ocr = 1, retcode = 0;
564
565 cmd = mrsas_get_mfi_cmd(sc);
566
567 if (!cmd) {
568 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
569 return -ENOMEM;
570 }
571 dcmd = &cmd->frame->dcmd;
572
573 if (mrsas_alloc_evt_log_info_cmd(sc) != SUCCESS) {
574 device_printf(sc->mrsas_dev, "Cannot allocate evt log info cmd\n");
575 mrsas_release_mfi_cmd(cmd);
576 return -ENOMEM;
577 }
578 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
579
580 dcmd->cmd = MFI_CMD_DCMD;
581 dcmd->cmd_status = 0x0;
582 dcmd->sge_count = 1;
583 dcmd->flags = htole16(MFI_FRAME_DIR_READ);
584 dcmd->timeout = 0;
585 dcmd->pad_0 = 0;
586 dcmd->data_xfer_len = htole32(sizeof(struct mrsas_evt_log_info));
587 dcmd->opcode = htole32(MR_DCMD_CTRL_EVENT_GET_INFO);
588 dcmd->sgl.sge32[0].phys_addr = htole32(sc->el_info_phys_addr & 0xFFFFFFFF);
589 dcmd->sgl.sge32[0].length = htole32(sizeof(struct mrsas_evt_log_info));
590
591 retcode = mrsas_issue_blocked_cmd(sc, cmd);
592 if (retcode == ETIMEDOUT)
593 goto dcmd_timeout;
594
595 do_ocr = 0;
596 /*
597 * Copy the data back into callers buffer
598 */
599 memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info));
600 mrsas_free_evt_log_info_cmd(sc);
601
602 dcmd_timeout:
603 if (do_ocr)
604 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
605 else
606 mrsas_release_mfi_cmd(cmd);
607
608 return retcode;
609 }
610
611 /*
612 * mrsas_register_aen: Register for asynchronous event notification
613 * @sc: Adapter soft state
614 * @seq_num: Starting sequence number
615 * @class_locale: Class of the event
616 *
617 * This function subscribes for events beyond the @seq_num
618 * and type @class_locale.
619 *
620 */
621 static int
mrsas_register_aen(struct mrsas_softc * sc,u_int32_t seq_num,u_int32_t class_locale_word)622 mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num,
623 u_int32_t class_locale_word)
624 {
625 int ret_val;
626 struct mrsas_mfi_cmd *cmd;
627 struct mrsas_dcmd_frame *dcmd;
628 union mrsas_evt_class_locale curr_aen;
629 union mrsas_evt_class_locale prev_aen;
630
631 /*
632 * If there an AEN pending already (aen_cmd), check if the
633 * class_locale of that pending AEN is inclusive of the new AEN
634 * request we currently have. If it is, then we don't have to do
635 * anything. In other words, whichever events the current AEN request
636 * is subscribing to, have already been subscribed to. If the old_cmd
637 * is _not_ inclusive, then we have to abort that command, form a
638 * class_locale that is superset of both old and current and re-issue
639 * to the FW
640 */
641
642 curr_aen.word = class_locale_word;
643
644 if (sc->aen_cmd) {
645 prev_aen.word = le32toh(sc->aen_cmd->frame->dcmd.mbox.w[1]);
646
647 /*
648 * A class whose enum value is smaller is inclusive of all
649 * higher values. If a PROGRESS (= -1) was previously
650 * registered, then a new registration requests for higher
651 * classes need not be sent to FW. They are automatically
652 * included. Locale numbers don't have such hierarchy. They
653 * are bitmap values
654 */
655 if ((prev_aen.members.class <= curr_aen.members.class) &&
656 !((prev_aen.members.locale & curr_aen.members.locale) ^
657 curr_aen.members.locale)) {
658 /*
659 * Previously issued event registration includes
660 * current request. Nothing to do.
661 */
662 return 0;
663 } else {
664 curr_aen.members.locale |= prev_aen.members.locale;
665
666 if (prev_aen.members.class < curr_aen.members.class)
667 curr_aen.members.class = prev_aen.members.class;
668
669 sc->aen_cmd->abort_aen = 1;
670 ret_val = mrsas_issue_blocked_abort_cmd(sc,
671 sc->aen_cmd);
672
673 if (ret_val) {
674 printf("mrsas: Failed to abort previous AEN command\n");
675 return ret_val;
676 } else
677 sc->aen_cmd = NULL;
678 }
679 }
680 cmd = mrsas_get_mfi_cmd(sc);
681 if (!cmd)
682 return ENOMEM;
683
684 dcmd = &cmd->frame->dcmd;
685
686 memset(sc->evt_detail_mem, 0, sizeof(struct mrsas_evt_detail));
687
688 /*
689 * Prepare DCMD for aen registration
690 */
691 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
692
693 dcmd->cmd = MFI_CMD_DCMD;
694 dcmd->cmd_status = 0x0;
695 dcmd->sge_count = 1;
696 dcmd->flags = htole16(MFI_FRAME_DIR_READ);
697 dcmd->timeout = 0;
698 dcmd->pad_0 = 0;
699 dcmd->data_xfer_len = htole32(sizeof(struct mrsas_evt_detail));
700 dcmd->opcode = htole32(MR_DCMD_CTRL_EVENT_WAIT);
701 dcmd->mbox.w[0] = htole32(seq_num);
702 sc->last_seq_num = seq_num;
703 dcmd->mbox.w[1] = htole32(curr_aen.word);
704 dcmd->sgl.sge32[0].phys_addr = htole32((u_int32_t)sc->evt_detail_phys_addr & 0xFFFFFFFF);
705 dcmd->sgl.sge32[0].length = htole32(sizeof(struct mrsas_evt_detail));
706
707 if (sc->aen_cmd != NULL) {
708 mrsas_release_mfi_cmd(cmd);
709 return 0;
710 }
711 /*
712 * Store reference to the cmd used to register for AEN. When an
713 * application wants us to register for AEN, we have to abort this
714 * cmd and re-register with a new EVENT LOCALE supplied by that app
715 */
716 sc->aen_cmd = cmd;
717
718 /*
719 * Issue the aen registration frame
720 */
721 if (mrsas_issue_dcmd(sc, cmd)) {
722 device_printf(sc->mrsas_dev, "Cannot issue AEN DCMD command.\n");
723 return (1);
724 }
725 return 0;
726 }
727
728 /*
729 * mrsas_start_aen: Subscribes to AEN during driver load time
730 * @instance: Adapter soft state
731 */
732 static int
mrsas_start_aen(struct mrsas_softc * sc)733 mrsas_start_aen(struct mrsas_softc *sc)
734 {
735 struct mrsas_evt_log_info eli;
736 union mrsas_evt_class_locale class_locale;
737
738 /* Get the latest sequence number from FW */
739
740 memset(&eli, 0, sizeof(eli));
741
742 if (mrsas_get_seq_num(sc, &eli))
743 return -1;
744
745 /* Register AEN with FW for latest sequence number plus 1 */
746 class_locale.members.reserved = 0;
747 class_locale.members.locale = MR_EVT_LOCALE_ALL;
748 class_locale.members.class = MR_EVT_CLASS_DEBUG;
749
750 return mrsas_register_aen(sc, eli.newest_seq_num + 1,
751 class_locale.word);
752
753 }
754
755 /*
756 * mrsas_setup_msix: Allocate MSI-x vectors
757 * @sc: adapter soft state
758 */
759 static int
mrsas_setup_msix(struct mrsas_softc * sc)760 mrsas_setup_msix(struct mrsas_softc *sc)
761 {
762 int i;
763
764 for (i = 0; i < sc->msix_vectors; i++) {
765 sc->irq_context[i].sc = sc;
766 sc->irq_context[i].MSIxIndex = i;
767 sc->irq_id[i] = i + 1;
768 sc->mrsas_irq[i] = bus_alloc_resource_any
769 (sc->mrsas_dev, SYS_RES_IRQ, &sc->irq_id[i]
770 ,RF_ACTIVE);
771 if (sc->mrsas_irq[i] == NULL) {
772 device_printf(sc->mrsas_dev, "Can't allocate MSI-x\n");
773 goto irq_alloc_failed;
774 }
775 if (bus_setup_intr(sc->mrsas_dev,
776 sc->mrsas_irq[i],
777 INTR_MPSAFE | INTR_TYPE_CAM,
778 NULL, mrsas_isr, &sc->irq_context[i],
779 &sc->intr_handle[i])) {
780 device_printf(sc->mrsas_dev,
781 "Cannot set up MSI-x interrupt handler\n");
782 goto irq_alloc_failed;
783 }
784 }
785 return SUCCESS;
786
787 irq_alloc_failed:
788 mrsas_teardown_intr(sc);
789 return (FAIL);
790 }
791
792 /*
793 * mrsas_allocate_msix: Setup MSI-x vectors
794 * @sc: adapter soft state
795 */
796 static int
mrsas_allocate_msix(struct mrsas_softc * sc)797 mrsas_allocate_msix(struct mrsas_softc *sc)
798 {
799 if (pci_alloc_msix(sc->mrsas_dev, &sc->msix_vectors) == 0) {
800 device_printf(sc->mrsas_dev, "Using MSI-X with %d number"
801 " of vectors\n", sc->msix_vectors);
802 } else {
803 device_printf(sc->mrsas_dev, "MSI-x setup failed\n");
804 goto irq_alloc_failed;
805 }
806 return SUCCESS;
807
808 irq_alloc_failed:
809 mrsas_teardown_intr(sc);
810 return (FAIL);
811 }
812
813 /*
814 * mrsas_attach: PCI entry point
815 * input: pointer to device struct
816 *
817 * Performs setup of PCI and registers, initializes mutexes and linked lists,
818 * registers interrupts and CAM, and initializes the adapter/controller to
819 * its proper state.
820 */
821 static int
mrsas_attach(device_t dev)822 mrsas_attach(device_t dev)
823 {
824 struct mrsas_softc *sc = device_get_softc(dev);
825 uint32_t cmd, error;
826
827 memset(sc, 0, sizeof(struct mrsas_softc));
828
829 /* Look up our softc and initialize its fields. */
830 sc->mrsas_dev = dev;
831 sc->device_id = pci_get_device(dev);
832
833 switch (sc->device_id) {
834 case MRSAS_INVADER:
835 case MRSAS_FURY:
836 case MRSAS_INTRUDER:
837 case MRSAS_INTRUDER_24:
838 case MRSAS_CUTLASS_52:
839 case MRSAS_CUTLASS_53:
840 sc->mrsas_gen3_ctrl = 1;
841 break;
842 case MRSAS_VENTURA:
843 case MRSAS_CRUSADER:
844 case MRSAS_HARPOON:
845 case MRSAS_TOMCAT:
846 case MRSAS_VENTURA_4PORT:
847 case MRSAS_CRUSADER_4PORT:
848 sc->is_ventura = true;
849 break;
850 case MRSAS_AERO_10E1:
851 case MRSAS_AERO_10E5:
852 device_printf(dev, "Adapter is in configurable secure mode\n");
853 case MRSAS_AERO_10E2:
854 case MRSAS_AERO_10E6:
855 sc->is_aero = true;
856 break;
857 case MRSAS_AERO_10E0:
858 case MRSAS_AERO_10E3:
859 case MRSAS_AERO_10E4:
860 case MRSAS_AERO_10E7:
861 device_printf(dev, "Adapter is in non-secure mode\n");
862 return SUCCESS;
863 }
864
865 mrsas_get_tunables(sc);
866
867 /*
868 * Set up PCI and registers
869 */
870 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
871 /* Force the busmaster enable bit on. */
872 cmd |= PCIM_CMD_BUSMASTEREN;
873 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
874
875 /* For Ventura/Aero system registers are mapped to BAR0 */
876 if (sc->is_ventura || sc->is_aero)
877 sc->reg_res_id = PCIR_BAR(0); /* BAR0 offset */
878 else
879 sc->reg_res_id = PCIR_BAR(1); /* BAR1 offset */
880
881 if ((sc->reg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
882 &(sc->reg_res_id), RF_ACTIVE))
883 == NULL) {
884 device_printf(dev, "Cannot allocate PCI registers\n");
885 goto attach_fail;
886 }
887 sc->bus_tag = rman_get_bustag(sc->reg_res);
888 sc->bus_handle = rman_get_bushandle(sc->reg_res);
889
890 /* Intialize mutexes */
891 mtx_init(&sc->sim_lock, "mrsas_sim_lock", NULL, MTX_DEF);
892 mtx_init(&sc->pci_lock, "mrsas_pci_lock", NULL, MTX_DEF);
893 mtx_init(&sc->io_lock, "mrsas_io_lock", NULL, MTX_DEF);
894 mtx_init(&sc->aen_lock, "mrsas_aen_lock", NULL, MTX_DEF);
895 mtx_init(&sc->ioctl_lock, "mrsas_ioctl_lock", NULL, MTX_SPIN);
896 mtx_init(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", NULL, MTX_DEF);
897 mtx_init(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", NULL, MTX_DEF);
898 mtx_init(&sc->raidmap_lock, "mrsas_raidmap_lock", NULL, MTX_DEF);
899 mtx_init(&sc->stream_lock, "mrsas_stream_lock", NULL, MTX_DEF);
900
901 /* Intialize linked list */
902 TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head);
903 TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head);
904
905 mrsas_atomic_set(&sc->fw_outstanding, 0);
906 mrsas_atomic_set(&sc->target_reset_outstanding, 0);
907 mrsas_atomic_set(&sc->prp_count, 0);
908 mrsas_atomic_set(&sc->sge_holes, 0);
909
910 sc->io_cmds_highwater = 0;
911
912 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
913 sc->UnevenSpanSupport = 0;
914
915 sc->msix_enable = 0;
916
917 /* Initialize Firmware */
918 if (mrsas_init_fw(sc) != SUCCESS) {
919 goto attach_fail_fw;
920 }
921 /* Register mrsas to CAM layer */
922 if ((mrsas_cam_attach(sc) != SUCCESS)) {
923 goto attach_fail_cam;
924 }
925 /* Register IRQs */
926 if (mrsas_setup_irq(sc) != SUCCESS) {
927 goto attach_fail_irq;
928 }
929 error = mrsas_kproc_create(mrsas_ocr_thread, sc,
930 &sc->ocr_thread, 0, 0, "mrsas_ocr%d",
931 device_get_unit(sc->mrsas_dev));
932 if (error) {
933 device_printf(sc->mrsas_dev, "Error %d starting OCR thread\n", error);
934 goto attach_fail_ocr_thread;
935 }
936 /*
937 * After FW initialization and OCR thread creation
938 * we will defer the cdev creation, AEN setup on ICH callback
939 */
940 sc->mrsas_ich.ich_func = mrsas_ich_startup;
941 sc->mrsas_ich.ich_arg = sc;
942 if (config_intrhook_establish(&sc->mrsas_ich) != 0) {
943 device_printf(sc->mrsas_dev, "Config hook is already established\n");
944 }
945 mrsas_setup_sysctl(sc);
946 return SUCCESS;
947
948 attach_fail_ocr_thread:
949 if (sc->ocr_thread_active)
950 wakeup(&sc->ocr_chan);
951 attach_fail_irq:
952 mrsas_teardown_intr(sc);
953 attach_fail_cam:
954 mrsas_cam_detach(sc);
955 attach_fail_fw:
956 /* if MSIX vector is allocated and FW Init FAILED then release MSIX */
957 if (sc->msix_enable == 1)
958 pci_release_msi(sc->mrsas_dev);
959 mrsas_free_mem(sc);
960 mtx_destroy(&sc->sim_lock);
961 mtx_destroy(&sc->aen_lock);
962 mtx_destroy(&sc->pci_lock);
963 mtx_destroy(&sc->io_lock);
964 mtx_destroy(&sc->ioctl_lock);
965 mtx_destroy(&sc->mpt_cmd_pool_lock);
966 mtx_destroy(&sc->mfi_cmd_pool_lock);
967 mtx_destroy(&sc->raidmap_lock);
968 mtx_destroy(&sc->stream_lock);
969 attach_fail:
970 if (sc->reg_res) {
971 bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY,
972 sc->reg_res_id, sc->reg_res);
973 }
974 return (ENXIO);
975 }
976
977 /*
978 * Interrupt config hook
979 */
980 static void
mrsas_ich_startup(void * arg)981 mrsas_ich_startup(void *arg)
982 {
983 int i = 0;
984 struct mrsas_softc *sc = (struct mrsas_softc *)arg;
985
986 /*
987 * Intialize a counting Semaphore to take care no. of concurrent IOCTLs
988 */
989 sema_init(&sc->ioctl_count_sema, MRSAS_MAX_IOCTL_CMDS,
990 IOCTL_SEMA_DESCRIPTION);
991
992 /* Create a /dev entry for mrsas controller. */
993 sc->mrsas_cdev = make_dev(&mrsas_cdevsw, device_get_unit(sc->mrsas_dev), UID_ROOT,
994 GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u",
995 device_get_unit(sc->mrsas_dev));
996
997 if (device_get_unit(sc->mrsas_dev) == 0) {
998 make_dev_alias_p(MAKEDEV_CHECKNAME,
999 &sc->mrsas_linux_emulator_cdev, sc->mrsas_cdev,
1000 "megaraid_sas_ioctl_node");
1001 }
1002 if (sc->mrsas_cdev)
1003 sc->mrsas_cdev->si_drv1 = sc;
1004
1005 /*
1006 * Add this controller to mrsas_mgmt_info structure so that it can be
1007 * exported to management applications
1008 */
1009 if (device_get_unit(sc->mrsas_dev) == 0)
1010 memset(&mrsas_mgmt_info, 0, sizeof(mrsas_mgmt_info));
1011
1012 mrsas_mgmt_info.count++;
1013 mrsas_mgmt_info.sc_ptr[mrsas_mgmt_info.max_index] = sc;
1014 mrsas_mgmt_info.max_index++;
1015
1016 /* Enable Interrupts */
1017 mrsas_enable_intr(sc);
1018
1019 /* Call DCMD get_pd_info for all system PDs */
1020 for (i = 0; i < MRSAS_MAX_PD; i++) {
1021 if ((sc->target_list[i].target_id != 0xffff) &&
1022 sc->pd_info_mem)
1023 mrsas_get_pd_info(sc, sc->target_list[i].target_id);
1024 }
1025
1026 /* Initiate AEN (Asynchronous Event Notification) */
1027 if (mrsas_start_aen(sc)) {
1028 device_printf(sc->mrsas_dev, "Error: AEN registration FAILED !!! "
1029 "Further events from the controller will not be communicated.\n"
1030 "Either there is some problem in the controller"
1031 "or the controller does not support AEN.\n"
1032 "Please contact to the SUPPORT TEAM if the problem persists\n");
1033 }
1034 if (sc->mrsas_ich.ich_arg != NULL) {
1035 device_printf(sc->mrsas_dev, "Disestablish mrsas intr hook\n");
1036 config_intrhook_disestablish(&sc->mrsas_ich);
1037 sc->mrsas_ich.ich_arg = NULL;
1038 }
1039 }
1040
1041 /*
1042 * mrsas_detach: De-allocates and teardown resources
1043 * input: pointer to device struct
1044 *
1045 * This function is the entry point for device disconnect and detach.
1046 * It performs memory de-allocations, shutdown of the controller and various
1047 * teardown and destroy resource functions.
1048 */
1049 static int
mrsas_detach(device_t dev)1050 mrsas_detach(device_t dev)
1051 {
1052 struct mrsas_softc *sc;
1053 int i = 0;
1054
1055 sc = device_get_softc(dev);
1056 sc->remove_in_progress = 1;
1057
1058 /* Destroy the character device so no other IOCTL will be handled */
1059 if ((device_get_unit(dev) == 0) && sc->mrsas_linux_emulator_cdev)
1060 destroy_dev(sc->mrsas_linux_emulator_cdev);
1061 destroy_dev(sc->mrsas_cdev);
1062
1063 /*
1064 * Take the instance off the instance array. Note that we will not
1065 * decrement the max_index. We let this array be sparse array
1066 */
1067 for (i = 0; i < mrsas_mgmt_info.max_index; i++) {
1068 if (mrsas_mgmt_info.sc_ptr[i] == sc) {
1069 mrsas_mgmt_info.count--;
1070 mrsas_mgmt_info.sc_ptr[i] = NULL;
1071 break;
1072 }
1073 }
1074
1075 if (sc->ocr_thread_active)
1076 wakeup(&sc->ocr_chan);
1077 while (sc->reset_in_progress) {
1078 i++;
1079 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1080 mrsas_dprint(sc, MRSAS_INFO,
1081 "[%2d]waiting for OCR to be finished from %s\n", i, __func__);
1082 }
1083 pause("mr_shutdown", hz);
1084 }
1085 i = 0;
1086 while (sc->ocr_thread_active) {
1087 i++;
1088 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1089 mrsas_dprint(sc, MRSAS_INFO,
1090 "[%2d]waiting for "
1091 "mrsas_ocr thread to quit ocr %d\n", i,
1092 sc->ocr_thread_active);
1093 }
1094 pause("mr_shutdown", hz);
1095 }
1096 mrsas_flush_cache(sc);
1097 mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN);
1098 mrsas_disable_intr(sc);
1099
1100 if ((sc->is_ventura || sc->is_aero) && sc->streamDetectByLD) {
1101 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i)
1102 free(sc->streamDetectByLD[i], M_MRSAS);
1103 free(sc->streamDetectByLD, M_MRSAS);
1104 sc->streamDetectByLD = NULL;
1105 }
1106
1107 mrsas_cam_detach(sc);
1108 mrsas_teardown_intr(sc);
1109 mrsas_free_mem(sc);
1110 mtx_destroy(&sc->sim_lock);
1111 mtx_destroy(&sc->aen_lock);
1112 mtx_destroy(&sc->pci_lock);
1113 mtx_destroy(&sc->io_lock);
1114 mtx_destroy(&sc->ioctl_lock);
1115 mtx_destroy(&sc->mpt_cmd_pool_lock);
1116 mtx_destroy(&sc->mfi_cmd_pool_lock);
1117 mtx_destroy(&sc->raidmap_lock);
1118 mtx_destroy(&sc->stream_lock);
1119
1120 /* Wait for all the semaphores to be released */
1121 while (sema_value(&sc->ioctl_count_sema) != MRSAS_MAX_IOCTL_CMDS)
1122 pause("mr_shutdown", hz);
1123
1124 /* Destroy the counting semaphore created for Ioctl */
1125 sema_destroy(&sc->ioctl_count_sema);
1126
1127 if (sc->reg_res) {
1128 bus_release_resource(sc->mrsas_dev,
1129 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res);
1130 }
1131 if (sc->sysctl_tree != NULL)
1132 sysctl_ctx_free(&sc->sysctl_ctx);
1133
1134 return (0);
1135 }
1136
1137 static int
mrsas_shutdown(device_t dev)1138 mrsas_shutdown(device_t dev)
1139 {
1140 struct mrsas_softc *sc;
1141 int i;
1142
1143 sc = device_get_softc(dev);
1144 sc->remove_in_progress = 1;
1145 if (!KERNEL_PANICKED()) {
1146 if (sc->ocr_thread_active)
1147 wakeup(&sc->ocr_chan);
1148 i = 0;
1149 while (sc->reset_in_progress && i < 15) {
1150 i++;
1151 if ((i % MRSAS_RESET_NOTICE_INTERVAL) == 0) {
1152 mrsas_dprint(sc, MRSAS_INFO,
1153 "[%2d]waiting for OCR to be finished "
1154 "from %s\n", i, __func__);
1155 }
1156 pause("mr_shutdown", hz);
1157 }
1158 if (sc->reset_in_progress) {
1159 mrsas_dprint(sc, MRSAS_INFO,
1160 "gave up waiting for OCR to be finished\n");
1161 return (0);
1162 }
1163 }
1164
1165 mrsas_flush_cache(sc);
1166 mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN);
1167 mrsas_disable_intr(sc);
1168 return (0);
1169 }
1170
1171 /*
1172 * mrsas_free_mem: Frees allocated memory
1173 * input: Adapter instance soft state
1174 *
1175 * This function is called from mrsas_detach() to free previously allocated
1176 * memory.
1177 */
1178 void
mrsas_free_mem(struct mrsas_softc * sc)1179 mrsas_free_mem(struct mrsas_softc *sc)
1180 {
1181 int i;
1182 u_int32_t max_fw_cmds;
1183 struct mrsas_mfi_cmd *mfi_cmd;
1184 struct mrsas_mpt_cmd *mpt_cmd;
1185
1186 /*
1187 * Free RAID map memory
1188 */
1189 for (i = 0; i < 2; i++) {
1190 if (sc->raidmap_phys_addr[i])
1191 bus_dmamap_unload(sc->raidmap_tag[i], sc->raidmap_dmamap[i]);
1192 if (sc->raidmap_mem[i] != NULL)
1193 bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]);
1194 if (sc->raidmap_tag[i] != NULL)
1195 bus_dma_tag_destroy(sc->raidmap_tag[i]);
1196
1197 if (sc->ld_drv_map[i] != NULL)
1198 free(sc->ld_drv_map[i], M_MRSAS);
1199 }
1200 for (i = 0; i < 2; i++) {
1201 if (sc->jbodmap_phys_addr[i])
1202 bus_dmamap_unload(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i]);
1203 if (sc->jbodmap_mem[i] != NULL)
1204 bus_dmamem_free(sc->jbodmap_tag[i], sc->jbodmap_mem[i], sc->jbodmap_dmamap[i]);
1205 if (sc->jbodmap_tag[i] != NULL)
1206 bus_dma_tag_destroy(sc->jbodmap_tag[i]);
1207 }
1208 /*
1209 * Free version buffer memory
1210 */
1211 if (sc->verbuf_phys_addr)
1212 bus_dmamap_unload(sc->verbuf_tag, sc->verbuf_dmamap);
1213 if (sc->verbuf_mem != NULL)
1214 bus_dmamem_free(sc->verbuf_tag, sc->verbuf_mem, sc->verbuf_dmamap);
1215 if (sc->verbuf_tag != NULL)
1216 bus_dma_tag_destroy(sc->verbuf_tag);
1217
1218 /*
1219 * Free sense buffer memory
1220 */
1221 if (sc->sense_phys_addr)
1222 bus_dmamap_unload(sc->sense_tag, sc->sense_dmamap);
1223 if (sc->sense_mem != NULL)
1224 bus_dmamem_free(sc->sense_tag, sc->sense_mem, sc->sense_dmamap);
1225 if (sc->sense_tag != NULL)
1226 bus_dma_tag_destroy(sc->sense_tag);
1227
1228 /*
1229 * Free chain frame memory
1230 */
1231 if (sc->chain_frame_phys_addr)
1232 bus_dmamap_unload(sc->chain_frame_tag, sc->chain_frame_dmamap);
1233 if (sc->chain_frame_mem != NULL)
1234 bus_dmamem_free(sc->chain_frame_tag, sc->chain_frame_mem, sc->chain_frame_dmamap);
1235 if (sc->chain_frame_tag != NULL)
1236 bus_dma_tag_destroy(sc->chain_frame_tag);
1237
1238 /*
1239 * Free IO Request memory
1240 */
1241 if (sc->io_request_phys_addr)
1242 bus_dmamap_unload(sc->io_request_tag, sc->io_request_dmamap);
1243 if (sc->io_request_mem != NULL)
1244 bus_dmamem_free(sc->io_request_tag, sc->io_request_mem, sc->io_request_dmamap);
1245 if (sc->io_request_tag != NULL)
1246 bus_dma_tag_destroy(sc->io_request_tag);
1247
1248 /*
1249 * Free Reply Descriptor memory
1250 */
1251 if (sc->reply_desc_phys_addr)
1252 bus_dmamap_unload(sc->reply_desc_tag, sc->reply_desc_dmamap);
1253 if (sc->reply_desc_mem != NULL)
1254 bus_dmamem_free(sc->reply_desc_tag, sc->reply_desc_mem, sc->reply_desc_dmamap);
1255 if (sc->reply_desc_tag != NULL)
1256 bus_dma_tag_destroy(sc->reply_desc_tag);
1257
1258 /*
1259 * Free event detail memory
1260 */
1261 if (sc->evt_detail_phys_addr)
1262 bus_dmamap_unload(sc->evt_detail_tag, sc->evt_detail_dmamap);
1263 if (sc->evt_detail_mem != NULL)
1264 bus_dmamem_free(sc->evt_detail_tag, sc->evt_detail_mem, sc->evt_detail_dmamap);
1265 if (sc->evt_detail_tag != NULL)
1266 bus_dma_tag_destroy(sc->evt_detail_tag);
1267
1268 /*
1269 * Free PD info memory
1270 */
1271 if (sc->pd_info_phys_addr)
1272 bus_dmamap_unload(sc->pd_info_tag, sc->pd_info_dmamap);
1273 if (sc->pd_info_mem != NULL)
1274 bus_dmamem_free(sc->pd_info_tag, sc->pd_info_mem, sc->pd_info_dmamap);
1275 if (sc->pd_info_tag != NULL)
1276 bus_dma_tag_destroy(sc->pd_info_tag);
1277
1278 /*
1279 * Free MFI frames
1280 */
1281 if (sc->mfi_cmd_list) {
1282 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1283 mfi_cmd = sc->mfi_cmd_list[i];
1284 mrsas_free_frame(sc, mfi_cmd);
1285 }
1286 }
1287 if (sc->mficmd_frame_tag != NULL)
1288 bus_dma_tag_destroy(sc->mficmd_frame_tag);
1289
1290 /*
1291 * Free MPT internal command list
1292 */
1293 max_fw_cmds = sc->max_fw_cmds;
1294 if (sc->mpt_cmd_list) {
1295 for (i = 0; i < max_fw_cmds; i++) {
1296 mpt_cmd = sc->mpt_cmd_list[i];
1297 bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap);
1298 free(sc->mpt_cmd_list[i], M_MRSAS);
1299 }
1300 free(sc->mpt_cmd_list, M_MRSAS);
1301 sc->mpt_cmd_list = NULL;
1302 }
1303 /*
1304 * Free MFI internal command list
1305 */
1306
1307 if (sc->mfi_cmd_list) {
1308 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1309 free(sc->mfi_cmd_list[i], M_MRSAS);
1310 }
1311 free(sc->mfi_cmd_list, M_MRSAS);
1312 sc->mfi_cmd_list = NULL;
1313 }
1314 /*
1315 * Free request descriptor memory
1316 */
1317 free(sc->req_desc, M_MRSAS);
1318 sc->req_desc = NULL;
1319
1320 /*
1321 * Destroy parent tag
1322 */
1323 if (sc->mrsas_parent_tag != NULL)
1324 bus_dma_tag_destroy(sc->mrsas_parent_tag);
1325
1326 /*
1327 * Free ctrl_info memory
1328 */
1329 if (sc->ctrl_info != NULL)
1330 free(sc->ctrl_info, M_MRSAS);
1331 }
1332
1333 /*
1334 * mrsas_teardown_intr: Teardown interrupt
1335 * input: Adapter instance soft state
1336 *
1337 * This function is called from mrsas_detach() to teardown and release bus
1338 * interrupt resourse.
1339 */
1340 void
mrsas_teardown_intr(struct mrsas_softc * sc)1341 mrsas_teardown_intr(struct mrsas_softc *sc)
1342 {
1343 int i;
1344
1345 if (!sc->msix_enable) {
1346 if (sc->intr_handle[0])
1347 bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[0], sc->intr_handle[0]);
1348 if (sc->mrsas_irq[0] != NULL)
1349 bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ,
1350 sc->irq_id[0], sc->mrsas_irq[0]);
1351 sc->intr_handle[0] = NULL;
1352 } else {
1353 for (i = 0; i < sc->msix_vectors; i++) {
1354 if (sc->intr_handle[i])
1355 bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[i],
1356 sc->intr_handle[i]);
1357
1358 if (sc->mrsas_irq[i] != NULL)
1359 bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ,
1360 sc->irq_id[i], sc->mrsas_irq[i]);
1361
1362 sc->intr_handle[i] = NULL;
1363 }
1364 pci_release_msi(sc->mrsas_dev);
1365 }
1366
1367 }
1368
1369 /*
1370 * mrsas_suspend: Suspend entry point
1371 * input: Device struct pointer
1372 *
1373 * This function is the entry point for system suspend from the OS.
1374 */
1375 static int
mrsas_suspend(device_t dev)1376 mrsas_suspend(device_t dev)
1377 {
1378 /* This will be filled when the driver will have hibernation support */
1379 return (0);
1380 }
1381
1382 /*
1383 * mrsas_resume: Resume entry point
1384 * input: Device struct pointer
1385 *
1386 * This function is the entry point for system resume from the OS.
1387 */
1388 static int
mrsas_resume(device_t dev)1389 mrsas_resume(device_t dev)
1390 {
1391 /* This will be filled when the driver will have hibernation support */
1392 return (0);
1393 }
1394
1395 /**
1396 * mrsas_get_softc_instance: Find softc instance based on cmd type
1397 *
1398 * This function will return softc instance based on cmd type.
1399 * In some case, application fire ioctl on required management instance and
1400 * do not provide host_no. Use cdev->si_drv1 to get softc instance for those
1401 * case, else get the softc instance from host_no provided by application in
1402 * user data.
1403 */
1404
1405 static struct mrsas_softc *
mrsas_get_softc_instance(struct cdev * dev,u_long cmd,caddr_t arg)1406 mrsas_get_softc_instance(struct cdev *dev, u_long cmd, caddr_t arg)
1407 {
1408 struct mrsas_softc *sc = NULL;
1409 struct mrsas_iocpacket *user_ioc = (struct mrsas_iocpacket *)arg;
1410
1411 if (cmd == MRSAS_IOC_GET_PCI_INFO) {
1412 sc = dev->si_drv1;
1413 } else {
1414 /*
1415 * get the Host number & the softc from data sent by the
1416 * Application
1417 */
1418 sc = mrsas_mgmt_info.sc_ptr[user_ioc->host_no];
1419 if (sc == NULL)
1420 printf("There is no Controller number %d\n",
1421 user_ioc->host_no);
1422 else if (user_ioc->host_no >= mrsas_mgmt_info.max_index)
1423 mrsas_dprint(sc, MRSAS_FAULT,
1424 "Invalid Controller number %d\n", user_ioc->host_no);
1425 }
1426
1427 return sc;
1428 }
1429
1430 /*
1431 * mrsas_ioctl: IOCtl commands entry point.
1432 *
1433 * This function is the entry point for IOCtls from the OS. It calls the
1434 * appropriate function for processing depending on the command received.
1435 */
1436 static int
mrsas_ioctl(struct cdev * dev,u_long cmd,caddr_t arg,int flag,struct thread * td)1437 mrsas_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag,
1438 struct thread *td)
1439 {
1440 struct mrsas_softc *sc;
1441 int ret = 0, i = 0;
1442 MRSAS_DRV_PCI_INFORMATION *pciDrvInfo;
1443
1444 switch (cmd) {
1445 case MFIIO_PASSTHRU:
1446 sc = (struct mrsas_softc *)(dev->si_drv1);
1447 break;
1448 default:
1449 sc = mrsas_get_softc_instance(dev, cmd, arg);
1450 break;
1451 }
1452 if (!sc)
1453 return ENOENT;
1454
1455 if (sc->remove_in_progress ||
1456 (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)) {
1457 mrsas_dprint(sc, MRSAS_INFO,
1458 "Either driver remove or shutdown called or "
1459 "HW is in unrecoverable critical error state.\n");
1460 return ENOENT;
1461 }
1462 mtx_lock_spin(&sc->ioctl_lock);
1463 if (!sc->reset_in_progress) {
1464 mtx_unlock_spin(&sc->ioctl_lock);
1465 goto do_ioctl;
1466 }
1467 mtx_unlock_spin(&sc->ioctl_lock);
1468 while (sc->reset_in_progress) {
1469 i++;
1470 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1471 mrsas_dprint(sc, MRSAS_INFO,
1472 "[%2d]waiting for OCR to be finished from %s\n", i, __func__);
1473 }
1474 pause("mr_ioctl", hz);
1475 }
1476
1477 do_ioctl:
1478 switch (cmd) {
1479 case MRSAS_IOC_FIRMWARE_PASS_THROUGH64:
1480 #ifdef COMPAT_FREEBSD32
1481 case MRSAS_IOC_FIRMWARE_PASS_THROUGH32:
1482 #endif
1483 /*
1484 * Decrement the Ioctl counting Semaphore before getting an
1485 * mfi command
1486 */
1487 sema_wait(&sc->ioctl_count_sema);
1488
1489 ret = mrsas_passthru(sc, (void *)arg, cmd);
1490
1491 /* Increment the Ioctl counting semaphore value */
1492 sema_post(&sc->ioctl_count_sema);
1493
1494 break;
1495 case MRSAS_IOC_SCAN_BUS:
1496 ret = mrsas_bus_scan(sc);
1497 break;
1498
1499 case MRSAS_IOC_GET_PCI_INFO:
1500 pciDrvInfo = (MRSAS_DRV_PCI_INFORMATION *) arg;
1501 memset(pciDrvInfo, 0, sizeof(MRSAS_DRV_PCI_INFORMATION));
1502 pciDrvInfo->busNumber = pci_get_bus(sc->mrsas_dev);
1503 pciDrvInfo->deviceNumber = pci_get_slot(sc->mrsas_dev);
1504 pciDrvInfo->functionNumber = pci_get_function(sc->mrsas_dev);
1505 pciDrvInfo->domainID = pci_get_domain(sc->mrsas_dev);
1506 mrsas_dprint(sc, MRSAS_INFO, "pci bus no: %d,"
1507 "pci device no: %d, pci function no: %d,"
1508 "pci domain ID: %d\n",
1509 pciDrvInfo->busNumber, pciDrvInfo->deviceNumber,
1510 pciDrvInfo->functionNumber, pciDrvInfo->domainID);
1511 ret = 0;
1512 break;
1513
1514 case MFIIO_PASSTHRU:
1515 ret = mrsas_user_command(sc, (struct mfi_ioc_passthru *)arg);
1516 break;
1517
1518 default:
1519 mrsas_dprint(sc, MRSAS_TRACE, "IOCTL command 0x%lx is not handled\n", cmd);
1520 ret = ENOENT;
1521 }
1522
1523 return (ret);
1524 }
1525
1526 /*
1527 * mrsas_poll: poll entry point for mrsas driver fd
1528 *
1529 * This function is the entry point for poll from the OS. It waits for some AEN
1530 * events to be triggered from the controller and notifies back.
1531 */
1532 static int
mrsas_poll(struct cdev * dev,int poll_events,struct thread * td)1533 mrsas_poll(struct cdev *dev, int poll_events, struct thread *td)
1534 {
1535 struct mrsas_softc *sc;
1536 int revents = 0;
1537
1538 sc = dev->si_drv1;
1539
1540 if (poll_events & (POLLIN | POLLRDNORM)) {
1541 if (sc->mrsas_aen_triggered) {
1542 revents |= poll_events & (POLLIN | POLLRDNORM);
1543 }
1544 }
1545 if (revents == 0) {
1546 if (poll_events & (POLLIN | POLLRDNORM)) {
1547 mtx_lock(&sc->aen_lock);
1548 sc->mrsas_poll_waiting = 1;
1549 selrecord(td, &sc->mrsas_select);
1550 mtx_unlock(&sc->aen_lock);
1551 }
1552 }
1553 return revents;
1554 }
1555
1556 /*
1557 * mrsas_setup_irq: Set up interrupt
1558 * input: Adapter instance soft state
1559 *
1560 * This function sets up interrupts as a bus resource, with flags indicating
1561 * resource permitting contemporaneous sharing and for resource to activate
1562 * atomically.
1563 */
1564 static int
mrsas_setup_irq(struct mrsas_softc * sc)1565 mrsas_setup_irq(struct mrsas_softc *sc)
1566 {
1567 if (sc->msix_enable && (mrsas_setup_msix(sc) == SUCCESS))
1568 device_printf(sc->mrsas_dev, "MSI-x interrupts setup success\n");
1569
1570 else {
1571 device_printf(sc->mrsas_dev, "Fall back to legacy interrupt\n");
1572 sc->irq_context[0].sc = sc;
1573 sc->irq_context[0].MSIxIndex = 0;
1574 sc->irq_id[0] = 0;
1575 sc->mrsas_irq[0] = bus_alloc_resource_any(sc->mrsas_dev,
1576 SYS_RES_IRQ, &sc->irq_id[0], RF_SHAREABLE | RF_ACTIVE);
1577 if (sc->mrsas_irq[0] == NULL) {
1578 device_printf(sc->mrsas_dev, "Cannot allocate legcay"
1579 "interrupt\n");
1580 return (FAIL);
1581 }
1582 if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq[0],
1583 INTR_MPSAFE | INTR_TYPE_CAM, NULL, mrsas_isr,
1584 &sc->irq_context[0], &sc->intr_handle[0])) {
1585 device_printf(sc->mrsas_dev, "Cannot set up legacy"
1586 "interrupt\n");
1587 return (FAIL);
1588 }
1589 }
1590 return (0);
1591 }
1592
1593 /*
1594 * mrsas_isr: ISR entry point
1595 * input: argument pointer
1596 *
1597 * This function is the interrupt service routine entry point. There are two
1598 * types of interrupts, state change interrupt and response interrupt. If an
1599 * interrupt is not ours, we just return.
1600 */
1601 void
mrsas_isr(void * arg)1602 mrsas_isr(void *arg)
1603 {
1604 struct mrsas_irq_context *irq_context = (struct mrsas_irq_context *)arg;
1605 struct mrsas_softc *sc = irq_context->sc;
1606 int status = 0;
1607
1608 if (sc->mask_interrupts)
1609 return;
1610
1611 if (!sc->msix_vectors) {
1612 status = mrsas_clear_intr(sc);
1613 if (!status)
1614 return;
1615 }
1616 /* If we are resetting, bail */
1617 if (mrsas_test_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags)) {
1618 printf(" Entered into ISR when OCR is going active. \n");
1619 mrsas_clear_intr(sc);
1620 return;
1621 }
1622 /* Process for reply request and clear response interrupt */
1623 if (mrsas_complete_cmd(sc, irq_context->MSIxIndex) != SUCCESS)
1624 mrsas_clear_intr(sc);
1625
1626 return;
1627 }
1628
1629 /*
1630 * mrsas_complete_cmd: Process reply request
1631 * input: Adapter instance soft state
1632 *
1633 * This function is called from mrsas_isr() to process reply request and clear
1634 * response interrupt. Processing of the reply request entails walking
1635 * through the reply descriptor array for the command request pended from
1636 * Firmware. We look at the Function field to determine the command type and
1637 * perform the appropriate action. Before we return, we clear the response
1638 * interrupt.
1639 */
1640 int
mrsas_complete_cmd(struct mrsas_softc * sc,u_int32_t MSIxIndex)1641 mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex)
1642 {
1643 Mpi2ReplyDescriptorsUnion_t *desc;
1644 MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
1645 MRSAS_RAID_SCSI_IO_REQUEST *scsi_io_req;
1646 struct mrsas_mpt_cmd *cmd_mpt, *r1_cmd = NULL;
1647 struct mrsas_mfi_cmd *cmd_mfi;
1648 u_int8_t reply_descript_type, *sense;
1649 u_int16_t smid, num_completed;
1650 u_int8_t status, extStatus;
1651 union desc_value desc_val;
1652 PLD_LOAD_BALANCE_INFO lbinfo;
1653 u_int32_t device_id, data_length;
1654 int threshold_reply_count = 0;
1655 #if TM_DEBUG
1656 MR_TASK_MANAGE_REQUEST *mr_tm_req;
1657 MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_tm_req;
1658 #endif
1659
1660 /* If we have a hardware error, not need to continue */
1661 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
1662 return (DONE);
1663
1664 desc = sc->reply_desc_mem;
1665 desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION))
1666 + sc->last_reply_idx[MSIxIndex];
1667
1668 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc;
1669
1670 desc_val.word = desc->Words;
1671 num_completed = 0;
1672
1673 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1674
1675 /* Find our reply descriptor for the command and process */
1676 while ((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF)) {
1677 smid = le16toh(reply_desc->SMID);
1678 cmd_mpt = sc->mpt_cmd_list[smid - 1];
1679 scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *) cmd_mpt->io_request;
1680
1681 status = scsi_io_req->RaidContext.raid_context.status;
1682 extStatus = scsi_io_req->RaidContext.raid_context.exStatus;
1683 sense = cmd_mpt->sense;
1684 data_length = scsi_io_req->DataLength;
1685
1686 switch (scsi_io_req->Function) {
1687 case MPI2_FUNCTION_SCSI_TASK_MGMT:
1688 #if TM_DEBUG
1689 mr_tm_req = (MR_TASK_MANAGE_REQUEST *) cmd_mpt->io_request;
1690 mpi_tm_req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)
1691 &mr_tm_req->TmRequest;
1692 device_printf(sc->mrsas_dev, "TM completion type 0x%X, "
1693 "TaskMID: 0x%X", mpi_tm_req->TaskType, mpi_tm_req->TaskMID);
1694 #endif
1695 wakeup_one((void *)&sc->ocr_chan);
1696 break;
1697 case MPI2_FUNCTION_SCSI_IO_REQUEST: /* Fast Path IO. */
1698 device_id = cmd_mpt->ccb_ptr->ccb_h.target_id;
1699 lbinfo = &sc->load_balance_info[device_id];
1700 /* R1 load balancing for READ */
1701 if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) {
1702 mrsas_atomic_dec(&lbinfo->scsi_pending_cmds[cmd_mpt->pd_r1_lb]);
1703 cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG;
1704 }
1705 /* Fall thru and complete IO */
1706 case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST:
1707 if (cmd_mpt->r1_alt_dev_handle == MR_DEVHANDLE_INVALID) {
1708 mrsas_map_mpt_cmd_status(cmd_mpt, cmd_mpt->ccb_ptr, status,
1709 extStatus, le32toh(data_length), sense);
1710 mrsas_cmd_done(sc, cmd_mpt);
1711 mrsas_atomic_dec(&sc->fw_outstanding);
1712 } else {
1713 /*
1714 * If the peer Raid 1/10 fast path failed,
1715 * mark IO as failed to the scsi layer.
1716 * Overwrite the current status by the failed status
1717 * and make sure that if any command fails,
1718 * driver returns fail status to CAM.
1719 */
1720 cmd_mpt->cmd_completed = 1;
1721 r1_cmd = cmd_mpt->peer_cmd;
1722 if (r1_cmd->cmd_completed) {
1723 if (r1_cmd->io_request->RaidContext.raid_context.status != MFI_STAT_OK) {
1724 status = r1_cmd->io_request->RaidContext.raid_context.status;
1725 extStatus = r1_cmd->io_request->RaidContext.raid_context.exStatus;
1726 data_length = r1_cmd->io_request->DataLength;
1727 sense = r1_cmd->sense;
1728 }
1729 mtx_lock(&sc->sim_lock);
1730 r1_cmd->ccb_ptr = NULL;
1731 if (r1_cmd->callout_owner) {
1732 callout_stop(&r1_cmd->cm_callout);
1733 r1_cmd->callout_owner = false;
1734 }
1735 mtx_unlock(&sc->sim_lock);
1736 mrsas_release_mpt_cmd(r1_cmd);
1737 mrsas_atomic_dec(&sc->fw_outstanding);
1738 mrsas_map_mpt_cmd_status(cmd_mpt, cmd_mpt->ccb_ptr, status,
1739 extStatus, le32toh(data_length), sense);
1740 mrsas_cmd_done(sc, cmd_mpt);
1741 mrsas_atomic_dec(&sc->fw_outstanding);
1742 }
1743 }
1744 break;
1745 case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /* MFI command */
1746 cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
1747 /*
1748 * Make sure NOT TO release the mfi command from the called
1749 * function's context if it is fired with issue_polled call.
1750 * And also make sure that the issue_polled call should only be
1751 * used if INTERRUPT IS DISABLED.
1752 */
1753 if (cmd_mfi->frame->hdr.flags & htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE))
1754 mrsas_release_mfi_cmd(cmd_mfi);
1755 else
1756 mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status);
1757 break;
1758 }
1759
1760 sc->last_reply_idx[MSIxIndex]++;
1761 if (sc->last_reply_idx[MSIxIndex] >= sc->reply_q_depth)
1762 sc->last_reply_idx[MSIxIndex] = 0;
1763
1764 desc->Words = ~((uint64_t)0x00); /* set it back to all
1765 * 0xFFFFFFFFs */
1766 num_completed++;
1767 threshold_reply_count++;
1768
1769 /* Get the next reply descriptor */
1770 if (!sc->last_reply_idx[MSIxIndex]) {
1771 desc = sc->reply_desc_mem;
1772 desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION));
1773 } else
1774 desc++;
1775
1776 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc;
1777 desc_val.word = desc->Words;
1778
1779 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1780
1781 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1782 break;
1783
1784 /*
1785 * Write to reply post index after completing threshold reply
1786 * count and still there are more replies in reply queue
1787 * pending to be completed.
1788 */
1789 if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
1790 if (sc->msix_enable) {
1791 if (sc->msix_combined)
1792 mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
1793 ((MSIxIndex & 0x7) << 24) |
1794 sc->last_reply_idx[MSIxIndex]);
1795 else
1796 mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) |
1797 sc->last_reply_idx[MSIxIndex]);
1798 } else
1799 mrsas_write_reg(sc, offsetof(mrsas_reg_set,
1800 reply_post_host_index), sc->last_reply_idx[0]);
1801
1802 threshold_reply_count = 0;
1803 }
1804 }
1805
1806 /* No match, just return */
1807 if (num_completed == 0)
1808 return (DONE);
1809
1810 /* Clear response interrupt */
1811 if (sc->msix_enable) {
1812 if (sc->msix_combined) {
1813 mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
1814 ((MSIxIndex & 0x7) << 24) |
1815 sc->last_reply_idx[MSIxIndex]);
1816 } else
1817 mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) |
1818 sc->last_reply_idx[MSIxIndex]);
1819 } else
1820 mrsas_write_reg(sc, offsetof(mrsas_reg_set,
1821 reply_post_host_index), sc->last_reply_idx[0]);
1822
1823 return (0);
1824 }
1825
1826 /*
1827 * mrsas_map_mpt_cmd_status: Allocate DMAable memory.
1828 * input: Adapter instance soft state
1829 *
1830 * This function is called from mrsas_complete_cmd(), for LD IO and FastPath IO.
1831 * It checks the command status and maps the appropriate CAM status for the
1832 * CCB.
1833 */
1834 void
mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd * cmd,union ccb * ccb_ptr,u_int8_t status,u_int8_t extStatus,u_int32_t data_length,u_int8_t * sense)1835 mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, union ccb *ccb_ptr, u_int8_t status,
1836 u_int8_t extStatus, u_int32_t data_length, u_int8_t *sense)
1837 {
1838 struct mrsas_softc *sc = cmd->sc;
1839 u_int8_t *sense_data;
1840
1841 switch (status) {
1842 case MFI_STAT_OK:
1843 ccb_ptr->ccb_h.status = CAM_REQ_CMP;
1844 break;
1845 case MFI_STAT_SCSI_IO_FAILED:
1846 case MFI_STAT_SCSI_DONE_WITH_ERROR:
1847 ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1848 sense_data = (u_int8_t *)&ccb_ptr->csio.sense_data;
1849 if (sense_data) {
1850 /* For now just copy 18 bytes back */
1851 memcpy(sense_data, sense, 18);
1852 ccb_ptr->csio.sense_len = 18;
1853 ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID;
1854 }
1855 break;
1856 case MFI_STAT_LD_OFFLINE:
1857 case MFI_STAT_DEVICE_NOT_FOUND:
1858 if (ccb_ptr->ccb_h.target_lun)
1859 ccb_ptr->ccb_h.status |= CAM_LUN_INVALID;
1860 else
1861 ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE;
1862 break;
1863 case MFI_STAT_CONFIG_SEQ_MISMATCH:
1864 ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ;
1865 break;
1866 default:
1867 device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status);
1868 ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR;
1869 ccb_ptr->csio.scsi_status = status;
1870 }
1871 return;
1872 }
1873
1874 /*
1875 * mrsas_alloc_mem: Allocate DMAable memory
1876 * input: Adapter instance soft state
1877 *
1878 * This function creates the parent DMA tag and allocates DMAable memory. DMA
1879 * tag describes constraints of DMA mapping. Memory allocated is mapped into
1880 * Kernel virtual address. Callback argument is physical memory address.
1881 */
1882 static int
mrsas_alloc_mem(struct mrsas_softc * sc)1883 mrsas_alloc_mem(struct mrsas_softc *sc)
1884 {
1885 u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size, chain_frame_size,
1886 evt_detail_size, count, pd_info_size;
1887
1888 /*
1889 * Allocate parent DMA tag
1890 */
1891 if (bus_dma_tag_create(
1892 bus_get_dma_tag(sc->mrsas_dev), /* parent */
1893 1, /* alignment */
1894 0, /* boundary */
1895 BUS_SPACE_MAXADDR, /* lowaddr */
1896 BUS_SPACE_MAXADDR, /* highaddr */
1897 NULL, NULL, /* filter, filterarg */
1898 BUS_SPACE_MAXSIZE, /* maxsize */
1899 BUS_SPACE_UNRESTRICTED, /* nsegments */
1900 BUS_SPACE_MAXSIZE, /* maxsegsize */
1901 0, /* flags */
1902 NULL, NULL, /* lockfunc, lockarg */
1903 &sc->mrsas_parent_tag /* tag */
1904 )) {
1905 device_printf(sc->mrsas_dev, "Cannot allocate parent DMA tag\n");
1906 return (ENOMEM);
1907 }
1908 /*
1909 * Allocate for version buffer
1910 */
1911 verbuf_size = MRSAS_MAX_NAME_LENGTH * (sizeof(bus_addr_t));
1912 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1913 1, 0,
1914 BUS_SPACE_MAXADDR_32BIT,
1915 BUS_SPACE_MAXADDR,
1916 NULL, NULL,
1917 verbuf_size,
1918 1,
1919 verbuf_size,
1920 BUS_DMA_ALLOCNOW,
1921 NULL, NULL,
1922 &sc->verbuf_tag)) {
1923 device_printf(sc->mrsas_dev, "Cannot allocate verbuf DMA tag\n");
1924 return (ENOMEM);
1925 }
1926 if (bus_dmamem_alloc(sc->verbuf_tag, (void **)&sc->verbuf_mem,
1927 BUS_DMA_NOWAIT, &sc->verbuf_dmamap)) {
1928 device_printf(sc->mrsas_dev, "Cannot allocate verbuf memory\n");
1929 return (ENOMEM);
1930 }
1931 bzero(sc->verbuf_mem, verbuf_size);
1932 if (bus_dmamap_load(sc->verbuf_tag, sc->verbuf_dmamap, sc->verbuf_mem,
1933 verbuf_size, mrsas_addr_cb, &sc->verbuf_phys_addr,
1934 BUS_DMA_NOWAIT)) {
1935 device_printf(sc->mrsas_dev, "Cannot load verbuf DMA map\n");
1936 return (ENOMEM);
1937 }
1938 /*
1939 * Allocate IO Request Frames
1940 */
1941 io_req_size = sc->io_frames_alloc_sz;
1942 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1943 16, 0,
1944 BUS_SPACE_MAXADDR_32BIT,
1945 BUS_SPACE_MAXADDR,
1946 NULL, NULL,
1947 io_req_size,
1948 1,
1949 io_req_size,
1950 BUS_DMA_ALLOCNOW,
1951 NULL, NULL,
1952 &sc->io_request_tag)) {
1953 device_printf(sc->mrsas_dev, "Cannot create IO request tag\n");
1954 return (ENOMEM);
1955 }
1956 if (bus_dmamem_alloc(sc->io_request_tag, (void **)&sc->io_request_mem,
1957 BUS_DMA_NOWAIT, &sc->io_request_dmamap)) {
1958 device_printf(sc->mrsas_dev, "Cannot alloc IO request memory\n");
1959 return (ENOMEM);
1960 }
1961 bzero(sc->io_request_mem, io_req_size);
1962 if (bus_dmamap_load(sc->io_request_tag, sc->io_request_dmamap,
1963 sc->io_request_mem, io_req_size, mrsas_addr_cb,
1964 &sc->io_request_phys_addr, BUS_DMA_NOWAIT)) {
1965 device_printf(sc->mrsas_dev, "Cannot load IO request memory\n");
1966 return (ENOMEM);
1967 }
1968 /*
1969 * Allocate Chain Frames
1970 */
1971 chain_frame_size = sc->chain_frames_alloc_sz;
1972 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1973 4, 0,
1974 BUS_SPACE_MAXADDR_32BIT,
1975 BUS_SPACE_MAXADDR,
1976 NULL, NULL,
1977 chain_frame_size,
1978 1,
1979 chain_frame_size,
1980 BUS_DMA_ALLOCNOW,
1981 NULL, NULL,
1982 &sc->chain_frame_tag)) {
1983 device_printf(sc->mrsas_dev, "Cannot create chain frame tag\n");
1984 return (ENOMEM);
1985 }
1986 if (bus_dmamem_alloc(sc->chain_frame_tag, (void **)&sc->chain_frame_mem,
1987 BUS_DMA_NOWAIT, &sc->chain_frame_dmamap)) {
1988 device_printf(sc->mrsas_dev, "Cannot alloc chain frame memory\n");
1989 return (ENOMEM);
1990 }
1991 bzero(sc->chain_frame_mem, chain_frame_size);
1992 if (bus_dmamap_load(sc->chain_frame_tag, sc->chain_frame_dmamap,
1993 sc->chain_frame_mem, chain_frame_size, mrsas_addr_cb,
1994 &sc->chain_frame_phys_addr, BUS_DMA_NOWAIT)) {
1995 device_printf(sc->mrsas_dev, "Cannot load chain frame memory\n");
1996 return (ENOMEM);
1997 }
1998 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
1999 /*
2000 * Allocate Reply Descriptor Array
2001 */
2002 reply_desc_size = sc->reply_alloc_sz * count;
2003 if (bus_dma_tag_create(sc->mrsas_parent_tag,
2004 16, 0,
2005 BUS_SPACE_MAXADDR_32BIT,
2006 BUS_SPACE_MAXADDR,
2007 NULL, NULL,
2008 reply_desc_size,
2009 1,
2010 reply_desc_size,
2011 BUS_DMA_ALLOCNOW,
2012 NULL, NULL,
2013 &sc->reply_desc_tag)) {
2014 device_printf(sc->mrsas_dev, "Cannot create reply descriptor tag\n");
2015 return (ENOMEM);
2016 }
2017 if (bus_dmamem_alloc(sc->reply_desc_tag, (void **)&sc->reply_desc_mem,
2018 BUS_DMA_NOWAIT, &sc->reply_desc_dmamap)) {
2019 device_printf(sc->mrsas_dev, "Cannot alloc reply descriptor memory\n");
2020 return (ENOMEM);
2021 }
2022 if (bus_dmamap_load(sc->reply_desc_tag, sc->reply_desc_dmamap,
2023 sc->reply_desc_mem, reply_desc_size, mrsas_addr_cb,
2024 &sc->reply_desc_phys_addr, BUS_DMA_NOWAIT)) {
2025 device_printf(sc->mrsas_dev, "Cannot load reply descriptor memory\n");
2026 return (ENOMEM);
2027 }
2028 /*
2029 * Allocate Sense Buffer Array. Keep in lower 4GB
2030 */
2031 sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN;
2032 if (bus_dma_tag_create(sc->mrsas_parent_tag,
2033 64, 0,
2034 BUS_SPACE_MAXADDR_32BIT,
2035 BUS_SPACE_MAXADDR,
2036 NULL, NULL,
2037 sense_size,
2038 1,
2039 sense_size,
2040 BUS_DMA_ALLOCNOW,
2041 NULL, NULL,
2042 &sc->sense_tag)) {
2043 device_printf(sc->mrsas_dev, "Cannot allocate sense buf tag\n");
2044 return (ENOMEM);
2045 }
2046 if (bus_dmamem_alloc(sc->sense_tag, (void **)&sc->sense_mem,
2047 BUS_DMA_NOWAIT, &sc->sense_dmamap)) {
2048 device_printf(sc->mrsas_dev, "Cannot allocate sense buf memory\n");
2049 return (ENOMEM);
2050 }
2051 if (bus_dmamap_load(sc->sense_tag, sc->sense_dmamap,
2052 sc->sense_mem, sense_size, mrsas_addr_cb, &sc->sense_phys_addr,
2053 BUS_DMA_NOWAIT)) {
2054 device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n");
2055 return (ENOMEM);
2056 }
2057
2058 /*
2059 * Allocate for Event detail structure
2060 */
2061 evt_detail_size = sizeof(struct mrsas_evt_detail);
2062 if (bus_dma_tag_create(sc->mrsas_parent_tag,
2063 1, 0,
2064 BUS_SPACE_MAXADDR_32BIT,
2065 BUS_SPACE_MAXADDR,
2066 NULL, NULL,
2067 evt_detail_size,
2068 1,
2069 evt_detail_size,
2070 BUS_DMA_ALLOCNOW,
2071 NULL, NULL,
2072 &sc->evt_detail_tag)) {
2073 device_printf(sc->mrsas_dev, "Cannot create Event detail tag\n");
2074 return (ENOMEM);
2075 }
2076 if (bus_dmamem_alloc(sc->evt_detail_tag, (void **)&sc->evt_detail_mem,
2077 BUS_DMA_NOWAIT, &sc->evt_detail_dmamap)) {
2078 device_printf(sc->mrsas_dev, "Cannot alloc Event detail buffer memory\n");
2079 return (ENOMEM);
2080 }
2081 bzero(sc->evt_detail_mem, evt_detail_size);
2082 if (bus_dmamap_load(sc->evt_detail_tag, sc->evt_detail_dmamap,
2083 sc->evt_detail_mem, evt_detail_size, mrsas_addr_cb,
2084 &sc->evt_detail_phys_addr, BUS_DMA_NOWAIT)) {
2085 device_printf(sc->mrsas_dev, "Cannot load Event detail buffer memory\n");
2086 return (ENOMEM);
2087 }
2088
2089 /*
2090 * Allocate for PD INFO structure
2091 */
2092 pd_info_size = sizeof(struct mrsas_pd_info);
2093 if (bus_dma_tag_create(sc->mrsas_parent_tag,
2094 1, 0,
2095 BUS_SPACE_MAXADDR_32BIT,
2096 BUS_SPACE_MAXADDR,
2097 NULL, NULL,
2098 pd_info_size,
2099 1,
2100 pd_info_size,
2101 BUS_DMA_ALLOCNOW,
2102 NULL, NULL,
2103 &sc->pd_info_tag)) {
2104 device_printf(sc->mrsas_dev, "Cannot create PD INFO tag\n");
2105 return (ENOMEM);
2106 }
2107 if (bus_dmamem_alloc(sc->pd_info_tag, (void **)&sc->pd_info_mem,
2108 BUS_DMA_NOWAIT, &sc->pd_info_dmamap)) {
2109 device_printf(sc->mrsas_dev, "Cannot alloc PD INFO buffer memory\n");
2110 return (ENOMEM);
2111 }
2112 bzero(sc->pd_info_mem, pd_info_size);
2113 if (bus_dmamap_load(sc->pd_info_tag, sc->pd_info_dmamap,
2114 sc->pd_info_mem, pd_info_size, mrsas_addr_cb,
2115 &sc->pd_info_phys_addr, BUS_DMA_NOWAIT)) {
2116 device_printf(sc->mrsas_dev, "Cannot load PD INFO buffer memory\n");
2117 return (ENOMEM);
2118 }
2119
2120 /*
2121 * Create a dma tag for data buffers; size will be the maximum
2122 * possible I/O size (280kB).
2123 */
2124 if (bus_dma_tag_create(sc->mrsas_parent_tag,
2125 1,
2126 0,
2127 BUS_SPACE_MAXADDR,
2128 BUS_SPACE_MAXADDR,
2129 NULL, NULL,
2130 maxphys,
2131 sc->max_num_sge, /* nsegments */
2132 maxphys,
2133 BUS_DMA_ALLOCNOW,
2134 busdma_lock_mutex,
2135 &sc->io_lock,
2136 &sc->data_tag)) {
2137 device_printf(sc->mrsas_dev, "Cannot create data dma tag\n");
2138 return (ENOMEM);
2139 }
2140 return (0);
2141 }
2142
2143 /*
2144 * mrsas_addr_cb: Callback function of bus_dmamap_load()
2145 * input: callback argument, machine dependent type
2146 * that describes DMA segments, number of segments, error code
2147 *
2148 * This function is for the driver to receive mapping information resultant of
2149 * the bus_dmamap_load(). The information is actually not being used, but the
2150 * address is saved anyway.
2151 */
2152 void
mrsas_addr_cb(void * arg,bus_dma_segment_t * segs,int nsegs,int error)2153 mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2154 {
2155 bus_addr_t *addr;
2156
2157 addr = arg;
2158 *addr = segs[0].ds_addr;
2159 }
2160
2161 /*
2162 * mrsas_setup_raidmap: Set up RAID map.
2163 * input: Adapter instance soft state
2164 *
2165 * Allocate DMA memory for the RAID maps and perform setup.
2166 */
2167 static int
mrsas_setup_raidmap(struct mrsas_softc * sc)2168 mrsas_setup_raidmap(struct mrsas_softc *sc)
2169 {
2170 int i;
2171
2172 for (i = 0; i < 2; i++) {
2173 sc->ld_drv_map[i] =
2174 (void *)malloc(sc->drv_map_sz, M_MRSAS, M_NOWAIT);
2175 /* Do Error handling */
2176 if (!sc->ld_drv_map[i]) {
2177 device_printf(sc->mrsas_dev, "Could not allocate memory for local map");
2178
2179 if (i == 1)
2180 free(sc->ld_drv_map[0], M_MRSAS);
2181 /* ABORT driver initialization */
2182 goto ABORT;
2183 }
2184 }
2185
2186 for (int i = 0; i < 2; i++) {
2187 if (bus_dma_tag_create(sc->mrsas_parent_tag,
2188 4, 0,
2189 BUS_SPACE_MAXADDR_32BIT,
2190 BUS_SPACE_MAXADDR,
2191 NULL, NULL,
2192 sc->max_map_sz,
2193 1,
2194 sc->max_map_sz,
2195 BUS_DMA_ALLOCNOW,
2196 NULL, NULL,
2197 &sc->raidmap_tag[i])) {
2198 device_printf(sc->mrsas_dev,
2199 "Cannot allocate raid map tag.\n");
2200 return (ENOMEM);
2201 }
2202 if (bus_dmamem_alloc(sc->raidmap_tag[i],
2203 (void **)&sc->raidmap_mem[i],
2204 BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) {
2205 device_printf(sc->mrsas_dev,
2206 "Cannot allocate raidmap memory.\n");
2207 return (ENOMEM);
2208 }
2209 bzero(sc->raidmap_mem[i], sc->max_map_sz);
2210
2211 if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i],
2212 sc->raidmap_mem[i], sc->max_map_sz,
2213 mrsas_addr_cb, &sc->raidmap_phys_addr[i],
2214 BUS_DMA_NOWAIT)) {
2215 device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n");
2216 return (ENOMEM);
2217 }
2218 if (!sc->raidmap_mem[i]) {
2219 device_printf(sc->mrsas_dev,
2220 "Cannot allocate memory for raid map.\n");
2221 return (ENOMEM);
2222 }
2223 }
2224
2225 if (!mrsas_get_map_info(sc))
2226 mrsas_sync_map_info(sc);
2227
2228 return (0);
2229
2230 ABORT:
2231 return (1);
2232 }
2233
2234 /**
2235 * megasas_setup_jbod_map - setup jbod map for FP seq_number.
2236 * @sc: Adapter soft state
2237 *
2238 * Return 0 on success.
2239 */
2240 void
megasas_setup_jbod_map(struct mrsas_softc * sc)2241 megasas_setup_jbod_map(struct mrsas_softc *sc)
2242 {
2243 int i;
2244 uint32_t pd_seq_map_sz;
2245
2246 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
2247 (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
2248
2249 if (!sc->ctrl_info->adapterOperations3.useSeqNumJbodFP) {
2250 sc->use_seqnum_jbod_fp = 0;
2251 return;
2252 }
2253 if (sc->jbodmap_mem[0])
2254 goto skip_alloc;
2255
2256 for (i = 0; i < 2; i++) {
2257 if (bus_dma_tag_create(sc->mrsas_parent_tag,
2258 4, 0,
2259 BUS_SPACE_MAXADDR_32BIT,
2260 BUS_SPACE_MAXADDR,
2261 NULL, NULL,
2262 pd_seq_map_sz,
2263 1,
2264 pd_seq_map_sz,
2265 BUS_DMA_ALLOCNOW,
2266 NULL, NULL,
2267 &sc->jbodmap_tag[i])) {
2268 device_printf(sc->mrsas_dev,
2269 "Cannot allocate jbod map tag.\n");
2270 return;
2271 }
2272 if (bus_dmamem_alloc(sc->jbodmap_tag[i],
2273 (void **)&sc->jbodmap_mem[i],
2274 BUS_DMA_NOWAIT, &sc->jbodmap_dmamap[i])) {
2275 device_printf(sc->mrsas_dev,
2276 "Cannot allocate jbod map memory.\n");
2277 return;
2278 }
2279 bzero(sc->jbodmap_mem[i], pd_seq_map_sz);
2280
2281 if (bus_dmamap_load(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i],
2282 sc->jbodmap_mem[i], pd_seq_map_sz,
2283 mrsas_addr_cb, &sc->jbodmap_phys_addr[i],
2284 BUS_DMA_NOWAIT)) {
2285 device_printf(sc->mrsas_dev, "Cannot load jbod map memory.\n");
2286 return;
2287 }
2288 if (!sc->jbodmap_mem[i]) {
2289 device_printf(sc->mrsas_dev,
2290 "Cannot allocate memory for jbod map.\n");
2291 sc->use_seqnum_jbod_fp = 0;
2292 return;
2293 }
2294 }
2295
2296 skip_alloc:
2297 if (!megasas_sync_pd_seq_num(sc, false) &&
2298 !megasas_sync_pd_seq_num(sc, true))
2299 sc->use_seqnum_jbod_fp = 1;
2300 else
2301 sc->use_seqnum_jbod_fp = 0;
2302
2303 device_printf(sc->mrsas_dev, "Jbod map is supported\n");
2304 }
2305
2306 /*
2307 * mrsas_init_fw: Initialize Firmware
2308 * input: Adapter soft state
2309 *
2310 * Calls transition_to_ready() to make sure Firmware is in operational state and
2311 * calls mrsas_init_adapter() to send IOC_INIT command to Firmware. It
2312 * issues internal commands to get the controller info after the IOC_INIT
2313 * command response is received by Firmware. Note: code relating to
2314 * get_pdlist, get_ld_list and max_sectors are currently not being used, it
2315 * is left here as placeholder.
2316 */
2317 static int
mrsas_init_fw(struct mrsas_softc * sc)2318 mrsas_init_fw(struct mrsas_softc *sc)
2319 {
2320
2321 int ret, loop, ocr = 0;
2322 u_int32_t max_sectors_1;
2323 u_int32_t max_sectors_2;
2324 u_int32_t tmp_sectors;
2325 u_int32_t scratch_pad_2, scratch_pad_3, scratch_pad_4;
2326 int msix_enable = 0;
2327 int fw_msix_count = 0;
2328 int i, j;
2329
2330 /* Make sure Firmware is ready */
2331 ret = mrsas_transition_to_ready(sc, ocr);
2332 if (ret != SUCCESS) {
2333 return (ret);
2334 }
2335 if (sc->is_ventura || sc->is_aero) {
2336 scratch_pad_3 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad_3));
2337 #if VD_EXT_DEBUG
2338 device_printf(sc->mrsas_dev, "scratch_pad_3 0x%x\n", scratch_pad_3);
2339 #endif
2340 sc->maxRaidMapSize = ((scratch_pad_3 >>
2341 MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) &
2342 MR_MAX_RAID_MAP_SIZE_MASK);
2343 }
2344 /* MSI-x index 0- reply post host index register */
2345 sc->msix_reg_offset[0] = MPI2_REPLY_POST_HOST_INDEX_OFFSET;
2346 /* Check if MSI-X is supported while in ready state */
2347 msix_enable = (mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & 0x4000000) >> 0x1a;
2348
2349 if (msix_enable) {
2350 scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
2351 outbound_scratch_pad_2));
2352
2353 /* Check max MSI-X vectors */
2354 if (sc->device_id == MRSAS_TBOLT) {
2355 sc->msix_vectors = (scratch_pad_2
2356 & MR_MAX_REPLY_QUEUES_OFFSET) + 1;
2357 fw_msix_count = sc->msix_vectors;
2358 } else {
2359 /* Invader/Fury supports 96 MSI-X vectors */
2360 sc->msix_vectors = ((scratch_pad_2
2361 & MR_MAX_REPLY_QUEUES_EXT_OFFSET)
2362 >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
2363 fw_msix_count = sc->msix_vectors;
2364
2365 if ((sc->mrsas_gen3_ctrl && (sc->msix_vectors > 8)) ||
2366 ((sc->is_ventura || sc->is_aero) && (sc->msix_vectors > 16)))
2367 sc->msix_combined = true;
2368 /*
2369 * Save 1-15 reply post index
2370 * address to local memory Index 0
2371 * is already saved from reg offset
2372 * MPI2_REPLY_POST_HOST_INDEX_OFFSET
2373 */
2374 for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY;
2375 loop++) {
2376 sc->msix_reg_offset[loop] =
2377 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET +
2378 (loop * 0x10);
2379 }
2380 }
2381
2382 /* Don't bother allocating more MSI-X vectors than cpus */
2383 sc->msix_vectors = min(sc->msix_vectors,
2384 mp_ncpus);
2385
2386 /* Allocate MSI-x vectors */
2387 if (mrsas_allocate_msix(sc) == SUCCESS)
2388 sc->msix_enable = 1;
2389 else
2390 sc->msix_enable = 0;
2391
2392 device_printf(sc->mrsas_dev, "FW supports <%d> MSIX vector,"
2393 "Online CPU %d Current MSIX <%d>\n",
2394 fw_msix_count, mp_ncpus, sc->msix_vectors);
2395 }
2396 /*
2397 * MSI-X host index 0 is common for all adapter.
2398 * It is used for all MPT based Adapters.
2399 */
2400 if (sc->msix_combined) {
2401 sc->msix_reg_offset[0] =
2402 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET;
2403 }
2404 if (mrsas_init_adapter(sc) != SUCCESS) {
2405 device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n");
2406 return (1);
2407 }
2408
2409 if (sc->is_ventura || sc->is_aero) {
2410 scratch_pad_4 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
2411 outbound_scratch_pad_4));
2412 if ((scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK) >= MR_DEFAULT_NVME_PAGE_SHIFT)
2413 sc->nvme_page_size = 1 << (scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK);
2414
2415 device_printf(sc->mrsas_dev, "NVME page size\t: (%d)\n", sc->nvme_page_size);
2416 }
2417
2418 /* Allocate internal commands for pass-thru */
2419 if (mrsas_alloc_mfi_cmds(sc) != SUCCESS) {
2420 device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n");
2421 return (1);
2422 }
2423 sc->ctrl_info = malloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT);
2424 if (!sc->ctrl_info) {
2425 device_printf(sc->mrsas_dev, "Malloc for ctrl_info failed.\n");
2426 return (1);
2427 }
2428 /*
2429 * Get the controller info from FW, so that the MAX VD support
2430 * availability can be decided.
2431 */
2432 if (mrsas_get_ctrl_info(sc)) {
2433 device_printf(sc->mrsas_dev, "Unable to get FW ctrl_info.\n");
2434 return (1);
2435 }
2436 sc->secure_jbod_support =
2437 (u_int8_t)sc->ctrl_info->adapterOperations3.supportSecurityonJBOD;
2438
2439 if (sc->secure_jbod_support)
2440 device_printf(sc->mrsas_dev, "FW supports SED \n");
2441
2442 if (sc->use_seqnum_jbod_fp)
2443 device_printf(sc->mrsas_dev, "FW supports JBOD Map \n");
2444
2445 if (sc->support_morethan256jbod)
2446 device_printf(sc->mrsas_dev, "FW supports JBOD Map Ext \n");
2447
2448 if (mrsas_setup_raidmap(sc) != SUCCESS) {
2449 device_printf(sc->mrsas_dev, "Error: RAID map setup FAILED !!! "
2450 "There seems to be some problem in the controller\n"
2451 "Please contact to the SUPPORT TEAM if the problem persists\n");
2452 }
2453 megasas_setup_jbod_map(sc);
2454
2455 memset(sc->target_list, 0,
2456 MRSAS_MAX_TM_TARGETS * sizeof(struct mrsas_target));
2457 for (i = 0; i < MRSAS_MAX_TM_TARGETS; i++)
2458 sc->target_list[i].target_id = 0xffff;
2459
2460 /* For pass-thru, get PD/LD list and controller info */
2461 memset(sc->pd_list, 0,
2462 MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
2463 if (mrsas_get_pd_list(sc) != SUCCESS) {
2464 device_printf(sc->mrsas_dev, "Get PD list failed.\n");
2465 return (1);
2466 }
2467 memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS);
2468 if (mrsas_get_ld_list(sc) != SUCCESS) {
2469 device_printf(sc->mrsas_dev, "Get LD lsit failed.\n");
2470 return (1);
2471 }
2472
2473 if ((sc->is_ventura || sc->is_aero) && sc->drv_stream_detection) {
2474 sc->streamDetectByLD = malloc(sizeof(PTR_LD_STREAM_DETECT) *
2475 MAX_LOGICAL_DRIVES_EXT, M_MRSAS, M_NOWAIT);
2476 if (!sc->streamDetectByLD) {
2477 device_printf(sc->mrsas_dev,
2478 "unable to allocate stream detection for pool of LDs\n");
2479 return (1);
2480 }
2481 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) {
2482 sc->streamDetectByLD[i] = malloc(sizeof(LD_STREAM_DETECT), M_MRSAS, M_NOWAIT);
2483 if (!sc->streamDetectByLD[i]) {
2484 device_printf(sc->mrsas_dev, "unable to allocate stream detect by LD\n");
2485 for (j = 0; j < i; ++j)
2486 free(sc->streamDetectByLD[j], M_MRSAS);
2487 free(sc->streamDetectByLD, M_MRSAS);
2488 sc->streamDetectByLD = NULL;
2489 return (1);
2490 }
2491 memset(sc->streamDetectByLD[i], 0, sizeof(LD_STREAM_DETECT));
2492 sc->streamDetectByLD[i]->mruBitMap = MR_STREAM_BITMAP;
2493 }
2494 }
2495
2496 /*
2497 * Compute the max allowed sectors per IO: The controller info has
2498 * two limits on max sectors. Driver should use the minimum of these
2499 * two.
2500 *
2501 * 1 << stripe_sz_ops.min = max sectors per strip
2502 *
2503 * Note that older firmwares ( < FW ver 30) didn't report information to
2504 * calculate max_sectors_1. So the number ended up as zero always.
2505 */
2506 tmp_sectors = 0;
2507 max_sectors_1 = (1 << sc->ctrl_info->stripe_sz_ops.min) *
2508 sc->ctrl_info->max_strips_per_io;
2509 max_sectors_2 = sc->ctrl_info->max_request_size;
2510 tmp_sectors = min(max_sectors_1, max_sectors_2);
2511 sc->max_sectors_per_req = (sc->max_num_sge - 1) * MRSAS_PAGE_SIZE / 512;
2512
2513 if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors))
2514 sc->max_sectors_per_req = tmp_sectors;
2515
2516 sc->disableOnlineCtrlReset =
2517 sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
2518 sc->UnevenSpanSupport =
2519 sc->ctrl_info->adapterOperations2.supportUnevenSpans;
2520 if (sc->UnevenSpanSupport) {
2521 device_printf(sc->mrsas_dev, "FW supports: UnevenSpanSupport=%x\n\n",
2522 sc->UnevenSpanSupport);
2523
2524 if (MR_ValidateMapInfo(sc))
2525 sc->fast_path_io = 1;
2526 else
2527 sc->fast_path_io = 0;
2528 }
2529
2530 device_printf(sc->mrsas_dev, "max_fw_cmds: %u max_scsi_cmds: %u\n",
2531 sc->max_fw_cmds, sc->max_scsi_cmds);
2532 return (0);
2533 }
2534
2535 /*
2536 * mrsas_init_adapter: Initializes the adapter/controller
2537 * input: Adapter soft state
2538 *
2539 * Prepares for the issuing of the IOC Init cmd to FW for initializing the
2540 * ROC/controller. The FW register is read to determined the number of
2541 * commands that is supported. All memory allocations for IO is based on
2542 * max_cmd. Appropriate calculations are performed in this function.
2543 */
2544 int
mrsas_init_adapter(struct mrsas_softc * sc)2545 mrsas_init_adapter(struct mrsas_softc *sc)
2546 {
2547 uint32_t status;
2548 u_int32_t scratch_pad_2;
2549 int ret;
2550 int i = 0;
2551
2552 /* Read FW status register */
2553 status = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2554
2555 sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK;
2556
2557 /* Decrement the max supported by 1, to correlate with FW */
2558 sc->max_fw_cmds = sc->max_fw_cmds - 1;
2559 sc->max_scsi_cmds = sc->max_fw_cmds - MRSAS_MAX_MFI_CMDS;
2560
2561 /* Determine allocation size of command frames */
2562 sc->reply_q_depth = ((sc->max_fw_cmds + 1 + 15) / 16 * 16) * 2;
2563 sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * sc->max_fw_cmds;
2564 sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth);
2565 sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE +
2566 (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (sc->max_fw_cmds + 1));
2567 scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
2568 outbound_scratch_pad_2));
2569
2570 mrsas_dprint(sc, MRSAS_TRACE, "%s: sc->reply_q_depth 0x%x,"
2571 "sc->request_alloc_sz 0x%x, sc->reply_alloc_sz 0x%x,"
2572 "sc->io_frames_alloc_sz 0x%x\n", __func__,
2573 sc->reply_q_depth, sc->request_alloc_sz,
2574 sc->reply_alloc_sz, sc->io_frames_alloc_sz);
2575
2576 /*
2577 * If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set,
2578 * Firmware support extended IO chain frame which is 4 time more
2579 * than legacy Firmware. Legacy Firmware - Frame size is (8 * 128) =
2580 * 1K 1M IO Firmware - Frame size is (8 * 128 * 4) = 4K
2581 */
2582 if (scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK)
2583 sc->max_chain_frame_sz =
2584 ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5)
2585 * MEGASAS_1MB_IO;
2586 else
2587 sc->max_chain_frame_sz =
2588 ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5)
2589 * MEGASAS_256K_IO;
2590
2591 sc->chain_frames_alloc_sz = sc->max_chain_frame_sz * sc->max_fw_cmds;
2592 sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
2593 offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)) / 16;
2594
2595 sc->max_sge_in_chain = sc->max_chain_frame_sz / sizeof(MPI2_SGE_IO_UNION);
2596 sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2;
2597
2598 mrsas_dprint(sc, MRSAS_INFO,
2599 "max sge: 0x%x, max chain frame size: 0x%x, "
2600 "max fw cmd: 0x%x sc->chain_frames_alloc_sz: 0x%x\n",
2601 sc->max_num_sge,
2602 sc->max_chain_frame_sz, sc->max_fw_cmds,
2603 sc->chain_frames_alloc_sz);
2604
2605 /* Used for pass thru MFI frame (DCMD) */
2606 sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 16;
2607
2608 sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
2609 sizeof(MPI2_SGE_IO_UNION)) / 16;
2610
2611 int count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2612
2613 for (i = 0; i < count; i++)
2614 sc->last_reply_idx[i] = 0;
2615
2616 ret = mrsas_alloc_mem(sc);
2617 if (ret != SUCCESS)
2618 return (ret);
2619
2620 ret = mrsas_alloc_mpt_cmds(sc);
2621 if (ret != SUCCESS)
2622 return (ret);
2623
2624 ret = mrsas_ioc_init(sc);
2625 if (ret != SUCCESS)
2626 return (ret);
2627
2628 return (0);
2629 }
2630
2631 /*
2632 * mrsas_alloc_ioc_cmd: Allocates memory for IOC Init command
2633 * input: Adapter soft state
2634 *
2635 * Allocates for the IOC Init cmd to FW to initialize the ROC/controller.
2636 */
2637 int
mrsas_alloc_ioc_cmd(struct mrsas_softc * sc)2638 mrsas_alloc_ioc_cmd(struct mrsas_softc *sc)
2639 {
2640 int ioc_init_size;
2641
2642 /* Allocate IOC INIT command */
2643 ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST);
2644 if (bus_dma_tag_create(sc->mrsas_parent_tag,
2645 1, 0,
2646 BUS_SPACE_MAXADDR_32BIT,
2647 BUS_SPACE_MAXADDR,
2648 NULL, NULL,
2649 ioc_init_size,
2650 1,
2651 ioc_init_size,
2652 BUS_DMA_ALLOCNOW,
2653 NULL, NULL,
2654 &sc->ioc_init_tag)) {
2655 device_printf(sc->mrsas_dev, "Cannot allocate ioc init tag\n");
2656 return (ENOMEM);
2657 }
2658 if (bus_dmamem_alloc(sc->ioc_init_tag, (void **)&sc->ioc_init_mem,
2659 BUS_DMA_NOWAIT, &sc->ioc_init_dmamap)) {
2660 device_printf(sc->mrsas_dev, "Cannot allocate ioc init cmd mem\n");
2661 return (ENOMEM);
2662 }
2663 bzero(sc->ioc_init_mem, ioc_init_size);
2664 if (bus_dmamap_load(sc->ioc_init_tag, sc->ioc_init_dmamap,
2665 sc->ioc_init_mem, ioc_init_size, mrsas_addr_cb,
2666 &sc->ioc_init_phys_mem, BUS_DMA_NOWAIT)) {
2667 device_printf(sc->mrsas_dev, "Cannot load ioc init cmd mem\n");
2668 return (ENOMEM);
2669 }
2670 return (0);
2671 }
2672
2673 /*
2674 * mrsas_free_ioc_cmd: Allocates memory for IOC Init command
2675 * input: Adapter soft state
2676 *
2677 * Deallocates memory of the IOC Init cmd.
2678 */
2679 void
mrsas_free_ioc_cmd(struct mrsas_softc * sc)2680 mrsas_free_ioc_cmd(struct mrsas_softc *sc)
2681 {
2682 if (sc->ioc_init_phys_mem)
2683 bus_dmamap_unload(sc->ioc_init_tag, sc->ioc_init_dmamap);
2684 if (sc->ioc_init_mem != NULL)
2685 bus_dmamem_free(sc->ioc_init_tag, sc->ioc_init_mem, sc->ioc_init_dmamap);
2686 if (sc->ioc_init_tag != NULL)
2687 bus_dma_tag_destroy(sc->ioc_init_tag);
2688 }
2689
2690 /*
2691 * mrsas_ioc_init: Sends IOC Init command to FW
2692 * input: Adapter soft state
2693 *
2694 * Issues the IOC Init cmd to FW to initialize the ROC/controller.
2695 */
2696 int
mrsas_ioc_init(struct mrsas_softc * sc)2697 mrsas_ioc_init(struct mrsas_softc *sc)
2698 {
2699 struct mrsas_init_frame *init_frame;
2700 pMpi2IOCInitRequest_t IOCInitMsg;
2701 MRSAS_REQUEST_DESCRIPTOR_UNION req_desc;
2702 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
2703 bus_addr_t phys_addr;
2704 int i, retcode = 0;
2705 u_int32_t scratch_pad_2;
2706
2707 /* Allocate memory for the IOC INIT command */
2708 if (mrsas_alloc_ioc_cmd(sc)) {
2709 device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n");
2710 return (1);
2711 }
2712
2713 if (!sc->block_sync_cache) {
2714 scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
2715 outbound_scratch_pad_2));
2716 sc->fw_sync_cache_support = (scratch_pad_2 &
2717 MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0;
2718 }
2719
2720 IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) + 1024);
2721 IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT;
2722 IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
2723 IOCInitMsg->MsgVersion = htole16(MPI2_VERSION);
2724 IOCInitMsg->HeaderVersion = htole16(MPI2_HEADER_VERSION);
2725 IOCInitMsg->SystemRequestFrameSize = htole16(MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4);
2726 IOCInitMsg->ReplyDescriptorPostQueueDepth = htole16(sc->reply_q_depth);
2727 IOCInitMsg->ReplyDescriptorPostQueueAddress = htole64(sc->reply_desc_phys_addr);
2728 IOCInitMsg->SystemRequestFrameBaseAddress = htole64(sc->io_request_phys_addr);
2729 IOCInitMsg->HostMSIxVectors = (sc->msix_vectors > 0 ? sc->msix_vectors : 0);
2730 IOCInitMsg->HostPageSize = MR_DEFAULT_NVME_PAGE_SHIFT;
2731
2732 init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem;
2733 init_frame->cmd = MFI_CMD_INIT;
2734 init_frame->cmd_status = 0xFF;
2735 init_frame->flags |= htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
2736
2737 /* driver support Extended MSIX */
2738 if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) {
2739 init_frame->driver_operations.
2740 mfi_capabilities.support_additional_msix = 1;
2741 }
2742 if (sc->verbuf_mem) {
2743 snprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION) + 2, "%s\n",
2744 MRSAS_VERSION);
2745 init_frame->driver_ver_lo = (bus_addr_t)sc->verbuf_phys_addr;
2746 init_frame->driver_ver_hi = 0;
2747 }
2748 init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb = 1;
2749 init_frame->driver_operations.mfi_capabilities.support_max_255lds = 1;
2750 init_frame->driver_operations.mfi_capabilities.security_protocol_cmds_fw = 1;
2751 if (sc->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN)
2752 init_frame->driver_operations.mfi_capabilities.support_ext_io_size = 1;
2753
2754 init_frame->driver_operations.reg = htole32(init_frame->driver_operations.reg);
2755
2756 phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024;
2757 init_frame->queue_info_new_phys_addr_lo = htole32(phys_addr);
2758 init_frame->data_xfer_len = htole32(sizeof(Mpi2IOCInitRequest_t));
2759
2760 req_desc.addr.Words = htole64((bus_addr_t)sc->ioc_init_phys_mem);
2761 req_desc.MFAIo.RequestFlags =
2762 (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2763
2764 mrsas_disable_intr(sc);
2765 mrsas_dprint(sc, MRSAS_OCR, "Issuing IOC INIT command to FW.\n");
2766 mrsas_write_64bit_req_desc(sc, req_desc.addr.u.low, req_desc.addr.u.high);
2767
2768 /*
2769 * Poll response timer to wait for Firmware response. While this
2770 * timer with the DELAY call could block CPU, the time interval for
2771 * this is only 1 millisecond.
2772 */
2773 if (init_frame->cmd_status == 0xFF) {
2774 for (i = 0; i < (max_wait * 1000); i++) {
2775 if (init_frame->cmd_status == 0xFF)
2776 DELAY(1000);
2777 else
2778 break;
2779 }
2780 }
2781 if (init_frame->cmd_status == 0)
2782 mrsas_dprint(sc, MRSAS_OCR,
2783 "IOC INIT response received from FW.\n");
2784 else {
2785 if (init_frame->cmd_status == 0xFF)
2786 device_printf(sc->mrsas_dev, "IOC Init timed out after %d seconds.\n", max_wait);
2787 else
2788 device_printf(sc->mrsas_dev, "IOC Init failed, status = 0x%x\n", init_frame->cmd_status);
2789 retcode = 1;
2790 }
2791
2792 if (sc->is_aero) {
2793 scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
2794 outbound_scratch_pad_2));
2795 sc->atomic_desc_support = (scratch_pad_2 &
2796 MR_ATOMIC_DESCRIPTOR_SUPPORT_OFFSET) ? 1 : 0;
2797 device_printf(sc->mrsas_dev, "FW supports atomic descriptor: %s\n",
2798 sc->atomic_desc_support ? "Yes" : "No");
2799 }
2800
2801 mrsas_free_ioc_cmd(sc);
2802 return (retcode);
2803 }
2804
2805 /*
2806 * mrsas_alloc_mpt_cmds: Allocates the command packets
2807 * input: Adapter instance soft state
2808 *
2809 * This function allocates the internal commands for IOs. Each command that is
2810 * issued to FW is wrapped in a local data structure called mrsas_mpt_cmd. An
2811 * array is allocated with mrsas_mpt_cmd context. The free commands are
2812 * maintained in a linked list (cmd pool). SMID value range is from 1 to
2813 * max_fw_cmds.
2814 */
2815 int
mrsas_alloc_mpt_cmds(struct mrsas_softc * sc)2816 mrsas_alloc_mpt_cmds(struct mrsas_softc *sc)
2817 {
2818 int i, j;
2819 u_int32_t max_fw_cmds, count;
2820 struct mrsas_mpt_cmd *cmd;
2821 pMpi2ReplyDescriptorsUnion_t reply_desc;
2822 u_int32_t offset, chain_offset, sense_offset;
2823 bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys;
2824 u_int8_t *io_req_base, *chain_frame_base, *sense_base;
2825
2826 max_fw_cmds = sc->max_fw_cmds;
2827
2828 sc->req_desc = malloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT);
2829 if (!sc->req_desc) {
2830 device_printf(sc->mrsas_dev, "Out of memory, cannot alloc req desc\n");
2831 return (ENOMEM);
2832 }
2833 memset(sc->req_desc, 0, sc->request_alloc_sz);
2834
2835 /*
2836 * sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers.
2837 * Allocate the dynamic array first and then allocate individual
2838 * commands.
2839 */
2840 sc->mpt_cmd_list = malloc(sizeof(struct mrsas_mpt_cmd *) * max_fw_cmds,
2841 M_MRSAS, M_NOWAIT);
2842 if (!sc->mpt_cmd_list) {
2843 device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n");
2844 return (ENOMEM);
2845 }
2846 memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *) * max_fw_cmds);
2847 for (i = 0; i < max_fw_cmds; i++) {
2848 sc->mpt_cmd_list[i] = malloc(sizeof(struct mrsas_mpt_cmd),
2849 M_MRSAS, M_NOWAIT);
2850 if (!sc->mpt_cmd_list[i]) {
2851 for (j = 0; j < i; j++)
2852 free(sc->mpt_cmd_list[j], M_MRSAS);
2853 free(sc->mpt_cmd_list, M_MRSAS);
2854 sc->mpt_cmd_list = NULL;
2855 return (ENOMEM);
2856 }
2857 }
2858
2859 io_req_base = (u_int8_t *)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2860 io_req_base_phys = (bus_addr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2861 chain_frame_base = (u_int8_t *)sc->chain_frame_mem;
2862 chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr;
2863 sense_base = (u_int8_t *)sc->sense_mem;
2864 sense_base_phys = (bus_addr_t)sc->sense_phys_addr;
2865 for (i = 0; i < max_fw_cmds; i++) {
2866 cmd = sc->mpt_cmd_list[i];
2867 offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
2868 chain_offset = sc->max_chain_frame_sz * i;
2869 sense_offset = MRSAS_SENSE_LEN * i;
2870 memset(cmd, 0, sizeof(struct mrsas_mpt_cmd));
2871 cmd->index = i + 1;
2872 cmd->ccb_ptr = NULL;
2873 cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
2874 callout_init_mtx(&cmd->cm_callout, &sc->sim_lock, 0);
2875 cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
2876 cmd->sc = sc;
2877 cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset);
2878 memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST));
2879 cmd->io_request_phys_addr = io_req_base_phys + offset;
2880 cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset);
2881 cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset;
2882 cmd->sense = sense_base + sense_offset;
2883 cmd->sense_phys_addr = sense_base_phys + sense_offset;
2884 if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) {
2885 return (FAIL);
2886 }
2887 TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
2888 }
2889
2890 /* Initialize reply descriptor array to 0xFFFFFFFF */
2891 reply_desc = sc->reply_desc_mem;
2892 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2893 for (i = 0; i < sc->reply_q_depth * count; i++, reply_desc++) {
2894 reply_desc->Words = MRSAS_ULONG_MAX;
2895 }
2896 return (0);
2897 }
2898
2899 /*
2900 * mrsas_write_64bit_req_dsc: Writes 64 bit request descriptor to FW
2901 * input: Adapter softstate
2902 * request descriptor address low
2903 * request descriptor address high
2904 */
2905 void
mrsas_write_64bit_req_desc(struct mrsas_softc * sc,u_int32_t req_desc_lo,u_int32_t req_desc_hi)2906 mrsas_write_64bit_req_desc(struct mrsas_softc *sc, u_int32_t req_desc_lo,
2907 u_int32_t req_desc_hi)
2908 {
2909 mtx_lock(&sc->pci_lock);
2910 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port),
2911 le32toh(req_desc_lo));
2912 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port),
2913 le32toh(req_desc_hi));
2914 mtx_unlock(&sc->pci_lock);
2915 }
2916
2917 /*
2918 * mrsas_fire_cmd: Sends command to FW
2919 * input: Adapter softstate
2920 * request descriptor address low
2921 * request descriptor address high
2922 *
2923 * This functions fires the command to Firmware by writing to the
2924 * inbound_low_queue_port and inbound_high_queue_port.
2925 */
2926 void
mrsas_fire_cmd(struct mrsas_softc * sc,u_int32_t req_desc_lo,u_int32_t req_desc_hi)2927 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
2928 u_int32_t req_desc_hi)
2929 {
2930 if (sc->atomic_desc_support)
2931 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_single_queue_port),
2932 le32toh(req_desc_lo));
2933 else
2934 mrsas_write_64bit_req_desc(sc, req_desc_lo, req_desc_hi);
2935 }
2936
2937 /*
2938 * mrsas_transition_to_ready: Move FW to Ready state input:
2939 * Adapter instance soft state
2940 *
2941 * During the initialization, FW passes can potentially be in any one of several
2942 * possible states. If the FW in operational, waiting-for-handshake states,
2943 * driver must take steps to bring it to ready state. Otherwise, it has to
2944 * wait for the ready state.
2945 */
2946 int
mrsas_transition_to_ready(struct mrsas_softc * sc,int ocr)2947 mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr)
2948 {
2949 int i;
2950 u_int8_t max_wait;
2951 u_int32_t val, fw_state;
2952 u_int32_t cur_state __unused;
2953 u_int32_t abs_state, curr_abs_state;
2954
2955 val = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2956 fw_state = val & MFI_STATE_MASK;
2957 max_wait = MRSAS_RESET_WAIT_TIME;
2958
2959 if (fw_state != MFI_STATE_READY)
2960 device_printf(sc->mrsas_dev, "Waiting for FW to come to ready state\n");
2961
2962 while (fw_state != MFI_STATE_READY) {
2963 abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2964 switch (fw_state) {
2965 case MFI_STATE_FAULT:
2966 device_printf(sc->mrsas_dev, "FW is in FAULT state!!\n");
2967 if (ocr) {
2968 cur_state = MFI_STATE_FAULT;
2969 break;
2970 } else
2971 return -ENODEV;
2972 case MFI_STATE_WAIT_HANDSHAKE:
2973 /* Set the CLR bit in inbound doorbell */
2974 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2975 MFI_INIT_CLEAR_HANDSHAKE | MFI_INIT_HOTPLUG);
2976 cur_state = MFI_STATE_WAIT_HANDSHAKE;
2977 break;
2978 case MFI_STATE_BOOT_MESSAGE_PENDING:
2979 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2980 MFI_INIT_HOTPLUG);
2981 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
2982 break;
2983 case MFI_STATE_OPERATIONAL:
2984 /*
2985 * Bring it to READY state; assuming max wait 10
2986 * secs
2987 */
2988 mrsas_disable_intr(sc);
2989 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_RESET_FLAGS);
2990 for (i = 0; i < max_wait * 1000; i++) {
2991 if (mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, doorbell)) & 1)
2992 DELAY(1000);
2993 else
2994 break;
2995 }
2996 cur_state = MFI_STATE_OPERATIONAL;
2997 break;
2998 case MFI_STATE_UNDEFINED:
2999 /*
3000 * This state should not last for more than 2
3001 * seconds
3002 */
3003 cur_state = MFI_STATE_UNDEFINED;
3004 break;
3005 case MFI_STATE_BB_INIT:
3006 cur_state = MFI_STATE_BB_INIT;
3007 break;
3008 case MFI_STATE_FW_INIT:
3009 cur_state = MFI_STATE_FW_INIT;
3010 break;
3011 case MFI_STATE_FW_INIT_2:
3012 cur_state = MFI_STATE_FW_INIT_2;
3013 break;
3014 case MFI_STATE_DEVICE_SCAN:
3015 cur_state = MFI_STATE_DEVICE_SCAN;
3016 break;
3017 case MFI_STATE_FLUSH_CACHE:
3018 cur_state = MFI_STATE_FLUSH_CACHE;
3019 break;
3020 default:
3021 device_printf(sc->mrsas_dev, "Unknown state 0x%x\n", fw_state);
3022 return -ENODEV;
3023 }
3024
3025 /*
3026 * The cur_state should not last for more than max_wait secs
3027 */
3028 for (i = 0; i < (max_wait * 1000); i++) {
3029 fw_state = (mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3030 outbound_scratch_pad)) & MFI_STATE_MASK);
3031 curr_abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3032 outbound_scratch_pad));
3033 if (abs_state == curr_abs_state)
3034 DELAY(1000);
3035 else
3036 break;
3037 }
3038
3039 /*
3040 * Return error if fw_state hasn't changed after max_wait
3041 */
3042 if (curr_abs_state == abs_state) {
3043 device_printf(sc->mrsas_dev, "FW state [%d] hasn't changed "
3044 "in %d secs\n", fw_state, max_wait);
3045 return -ENODEV;
3046 }
3047 }
3048 mrsas_dprint(sc, MRSAS_OCR, "FW now in Ready state\n");
3049 return 0;
3050 }
3051
3052 /*
3053 * mrsas_get_mfi_cmd: Get a cmd from free command pool
3054 * input: Adapter soft state
3055 *
3056 * This function removes an MFI command from the command list.
3057 */
3058 struct mrsas_mfi_cmd *
mrsas_get_mfi_cmd(struct mrsas_softc * sc)3059 mrsas_get_mfi_cmd(struct mrsas_softc *sc)
3060 {
3061 struct mrsas_mfi_cmd *cmd = NULL;
3062
3063 mtx_lock(&sc->mfi_cmd_pool_lock);
3064 if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)) {
3065 cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head);
3066 TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next);
3067 }
3068 mtx_unlock(&sc->mfi_cmd_pool_lock);
3069
3070 return cmd;
3071 }
3072
3073 /*
3074 * mrsas_ocr_thread: Thread to handle OCR/Kill Adapter.
3075 * input: Adapter Context.
3076 *
3077 * This function will check FW status register and flag do_timeout_reset flag.
3078 * It will do OCR/Kill adapter if FW is in fault state or IO timed out has
3079 * trigger reset.
3080 */
3081 static void
mrsas_ocr_thread(void * arg)3082 mrsas_ocr_thread(void *arg)
3083 {
3084 struct mrsas_softc *sc;
3085 u_int32_t fw_status, fw_state;
3086 u_int8_t tm_target_reset_failed = 0;
3087
3088 sc = (struct mrsas_softc *)arg;
3089
3090 mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__);
3091 sc->ocr_thread_active = 1;
3092 mtx_lock(&sc->sim_lock);
3093 for (;;) {
3094 /* Sleep for 1 second and check the queue status */
3095 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO,
3096 "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz);
3097 if (sc->remove_in_progress ||
3098 sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
3099 mrsas_dprint(sc, MRSAS_OCR,
3100 "Exit due to %s from %s\n",
3101 sc->remove_in_progress ? "Shutdown" :
3102 "Hardware critical error", __func__);
3103 break;
3104 }
3105 fw_status = mrsas_read_reg_with_retries(sc,
3106 offsetof(mrsas_reg_set, outbound_scratch_pad));
3107 fw_state = fw_status & MFI_STATE_MASK;
3108 if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset ||
3109 mrsas_atomic_read(&sc->target_reset_outstanding)) {
3110 /* First, freeze further IOs to come to the SIM */
3111 mrsas_xpt_freeze(sc);
3112
3113 /* If this is an IO timeout then go for target reset */
3114 if (mrsas_atomic_read(&sc->target_reset_outstanding)) {
3115 device_printf(sc->mrsas_dev, "Initiating Target RESET "
3116 "because of SCSI IO timeout!\n");
3117
3118 /* Let the remaining IOs to complete */
3119 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO,
3120 "mrsas_reset_targets", 5 * hz);
3121
3122 /* Try to reset the target device */
3123 if (mrsas_reset_targets(sc) == FAIL)
3124 tm_target_reset_failed = 1;
3125 }
3126
3127 /* If this is a DCMD timeout or FW fault,
3128 * then go for controller reset
3129 */
3130 if (fw_state == MFI_STATE_FAULT || tm_target_reset_failed ||
3131 (sc->do_timedout_reset == MFI_DCMD_TIMEOUT_OCR)) {
3132 if (tm_target_reset_failed)
3133 device_printf(sc->mrsas_dev, "Initiaiting OCR because of "
3134 "TM FAILURE!\n");
3135 else
3136 device_printf(sc->mrsas_dev, "Initiaiting OCR "
3137 "because of %s!\n", sc->do_timedout_reset ?
3138 "DCMD IO Timeout" : "FW fault");
3139
3140 mtx_lock_spin(&sc->ioctl_lock);
3141 sc->reset_in_progress = 1;
3142 mtx_unlock_spin(&sc->ioctl_lock);
3143 sc->reset_count++;
3144
3145 /*
3146 * Wait for the AEN task to be completed if it is running.
3147 */
3148 mtx_unlock(&sc->sim_lock);
3149 taskqueue_drain(sc->ev_tq, &sc->ev_task);
3150 mtx_lock(&sc->sim_lock);
3151
3152 taskqueue_block(sc->ev_tq);
3153 /* Try to reset the controller */
3154 mrsas_reset_ctrl(sc, sc->do_timedout_reset);
3155
3156 sc->do_timedout_reset = 0;
3157 sc->reset_in_progress = 0;
3158 tm_target_reset_failed = 0;
3159 mrsas_atomic_set(&sc->target_reset_outstanding, 0);
3160 memset(sc->target_reset_pool, 0,
3161 sizeof(sc->target_reset_pool));
3162 taskqueue_unblock(sc->ev_tq);
3163 }
3164
3165 /* Now allow IOs to come to the SIM */
3166 mrsas_xpt_release(sc);
3167 }
3168 }
3169 mtx_unlock(&sc->sim_lock);
3170 sc->ocr_thread_active = 0;
3171 mrsas_kproc_exit(0);
3172 }
3173
3174 /*
3175 * mrsas_reset_reply_desc: Reset Reply descriptor as part of OCR.
3176 * input: Adapter Context.
3177 *
3178 * This function will clear reply descriptor so that post OCR driver and FW will
3179 * lost old history.
3180 */
3181 void
mrsas_reset_reply_desc(struct mrsas_softc * sc)3182 mrsas_reset_reply_desc(struct mrsas_softc *sc)
3183 {
3184 int i, count;
3185 pMpi2ReplyDescriptorsUnion_t reply_desc;
3186
3187 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3188 for (i = 0; i < count; i++)
3189 sc->last_reply_idx[i] = 0;
3190
3191 reply_desc = sc->reply_desc_mem;
3192 for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) {
3193 reply_desc->Words = MRSAS_ULONG_MAX;
3194 }
3195 }
3196
3197 /*
3198 * mrsas_reset_ctrl: Core function to OCR/Kill adapter.
3199 * input: Adapter Context.
3200 *
3201 * This function will run from thread context so that it can sleep. 1. Do not
3202 * handle OCR if FW is in HW critical error. 2. Wait for outstanding command
3203 * to complete for 180 seconds. 3. If #2 does not find any outstanding
3204 * command Controller is in working state, so skip OCR. Otherwise, do
3205 * OCR/kill Adapter based on flag disableOnlineCtrlReset. 4. Start of the
3206 * OCR, return all SCSI command back to CAM layer which has ccb_ptr. 5. Post
3207 * OCR, Re-fire Management command and move Controller to Operation state.
3208 */
3209 int
mrsas_reset_ctrl(struct mrsas_softc * sc,u_int8_t reset_reason)3210 mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason)
3211 {
3212 int retval = SUCCESS, i, j, retry = 0;
3213 u_int32_t host_diag, abs_state, status_reg, reset_adapter;
3214 union ccb *ccb;
3215 struct mrsas_mfi_cmd *mfi_cmd;
3216 struct mrsas_mpt_cmd *mpt_cmd;
3217 union mrsas_evt_class_locale class_locale;
3218 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3219
3220 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
3221 device_printf(sc->mrsas_dev,
3222 "mrsas: Hardware critical error, returning FAIL.\n");
3223 return FAIL;
3224 }
3225 mrsas_set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3226 sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT;
3227 mrsas_disable_intr(sc);
3228 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "mrsas_ocr",
3229 sc->mrsas_fw_fault_check_delay * hz);
3230
3231 /* First try waiting for commands to complete */
3232 if (mrsas_wait_for_outstanding(sc, reset_reason)) {
3233 mrsas_dprint(sc, MRSAS_OCR,
3234 "resetting adapter from %s.\n",
3235 __func__);
3236 /* Now return commands back to the CAM layer */
3237 mtx_unlock(&sc->sim_lock);
3238 for (i = 0; i < sc->max_fw_cmds; i++) {
3239 mpt_cmd = sc->mpt_cmd_list[i];
3240
3241 if (mpt_cmd->peer_cmd) {
3242 mrsas_dprint(sc, MRSAS_OCR,
3243 "R1 FP command [%d] - (mpt_cmd) %p, (peer_cmd) %p\n",
3244 i, mpt_cmd, mpt_cmd->peer_cmd);
3245 }
3246
3247 if (mpt_cmd->ccb_ptr) {
3248 if (mpt_cmd->callout_owner) {
3249 ccb = (union ccb *)(mpt_cmd->ccb_ptr);
3250 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
3251 mrsas_cmd_done(sc, mpt_cmd);
3252 } else {
3253 mpt_cmd->ccb_ptr = NULL;
3254 mrsas_release_mpt_cmd(mpt_cmd);
3255 }
3256 }
3257 }
3258
3259 mrsas_atomic_set(&sc->fw_outstanding, 0);
3260
3261 mtx_lock(&sc->sim_lock);
3262
3263 status_reg = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3264 outbound_scratch_pad));
3265 abs_state = status_reg & MFI_STATE_MASK;
3266 reset_adapter = status_reg & MFI_RESET_ADAPTER;
3267 if (sc->disableOnlineCtrlReset ||
3268 (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
3269 /* Reset not supported, kill adapter */
3270 mrsas_dprint(sc, MRSAS_OCR, "Reset not supported, killing adapter.\n");
3271 mrsas_kill_hba(sc);
3272 retval = FAIL;
3273 goto out;
3274 }
3275 /* Now try to reset the chip */
3276 for (i = 0; i < MRSAS_FUSION_MAX_RESET_TRIES; i++) {
3277 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3278 MPI2_WRSEQ_FLUSH_KEY_VALUE);
3279 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3280 MPI2_WRSEQ_1ST_KEY_VALUE);
3281 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3282 MPI2_WRSEQ_2ND_KEY_VALUE);
3283 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3284 MPI2_WRSEQ_3RD_KEY_VALUE);
3285 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3286 MPI2_WRSEQ_4TH_KEY_VALUE);
3287 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3288 MPI2_WRSEQ_5TH_KEY_VALUE);
3289 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3290 MPI2_WRSEQ_6TH_KEY_VALUE);
3291
3292 /* Check that the diag write enable (DRWE) bit is on */
3293 host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3294 fusion_host_diag));
3295 retry = 0;
3296 while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
3297 DELAY(100 * 1000);
3298 host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3299 fusion_host_diag));
3300 if (retry++ == 100) {
3301 mrsas_dprint(sc, MRSAS_OCR,
3302 "Host diag unlock failed!\n");
3303 break;
3304 }
3305 }
3306 if (!(host_diag & HOST_DIAG_WRITE_ENABLE))
3307 continue;
3308
3309 /* Send chip reset command */
3310 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag),
3311 host_diag | HOST_DIAG_RESET_ADAPTER);
3312 DELAY(3000 * 1000);
3313
3314 /* Make sure reset adapter bit is cleared */
3315 host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3316 fusion_host_diag));
3317 retry = 0;
3318 while (host_diag & HOST_DIAG_RESET_ADAPTER) {
3319 DELAY(100 * 1000);
3320 host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3321 fusion_host_diag));
3322 if (retry++ == 1000) {
3323 mrsas_dprint(sc, MRSAS_OCR,
3324 "Diag reset adapter never cleared!\n");
3325 break;
3326 }
3327 }
3328 if (host_diag & HOST_DIAG_RESET_ADAPTER)
3329 continue;
3330
3331 abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3332 outbound_scratch_pad)) & MFI_STATE_MASK;
3333 retry = 0;
3334
3335 while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
3336 DELAY(100 * 1000);
3337 abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3338 outbound_scratch_pad)) & MFI_STATE_MASK;
3339 }
3340 if (abs_state <= MFI_STATE_FW_INIT) {
3341 mrsas_dprint(sc, MRSAS_OCR, "firmware state < MFI_STATE_FW_INIT,"
3342 " state = 0x%x\n", abs_state);
3343 continue;
3344 }
3345 /* Wait for FW to become ready */
3346 if (mrsas_transition_to_ready(sc, 1)) {
3347 mrsas_dprint(sc, MRSAS_OCR,
3348 "mrsas: Failed to transition controller to ready.\n");
3349 continue;
3350 }
3351 mrsas_reset_reply_desc(sc);
3352 if (mrsas_ioc_init(sc)) {
3353 mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n");
3354 continue;
3355 }
3356 for (j = 0; j < sc->max_fw_cmds; j++) {
3357 mpt_cmd = sc->mpt_cmd_list[j];
3358 if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
3359 mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx];
3360 /* If not an IOCTL then release the command else re-fire */
3361 if (!mfi_cmd->sync_cmd) {
3362 mrsas_release_mfi_cmd(mfi_cmd);
3363 } else {
3364 req_desc = mrsas_get_request_desc(sc,
3365 mfi_cmd->cmd_id.context.smid - 1);
3366 mrsas_dprint(sc, MRSAS_OCR,
3367 "Re-fire command DCMD opcode 0x%x index %d\n ",
3368 mfi_cmd->frame->dcmd.opcode, j);
3369 if (!req_desc)
3370 device_printf(sc->mrsas_dev,
3371 "Cannot build MPT cmd.\n");
3372 else
3373 mrsas_fire_cmd(sc, req_desc->addr.u.low,
3374 req_desc->addr.u.high);
3375 }
3376 }
3377 }
3378
3379 /* Reset load balance info */
3380 memset(sc->load_balance_info, 0,
3381 sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT);
3382
3383 if (mrsas_get_ctrl_info(sc)) {
3384 mrsas_kill_hba(sc);
3385 retval = FAIL;
3386 goto out;
3387 }
3388 if (!mrsas_get_map_info(sc))
3389 mrsas_sync_map_info(sc);
3390
3391 megasas_setup_jbod_map(sc);
3392
3393 if ((sc->is_ventura || sc->is_aero) && sc->streamDetectByLD) {
3394 for (j = 0; j < MAX_LOGICAL_DRIVES_EXT; ++j) {
3395 memset(sc->streamDetectByLD[i], 0, sizeof(LD_STREAM_DETECT));
3396 sc->streamDetectByLD[i]->mruBitMap = MR_STREAM_BITMAP;
3397 }
3398 }
3399
3400 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3401 mrsas_enable_intr(sc);
3402 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
3403
3404 /* Register AEN with FW for last sequence number */
3405 class_locale.members.reserved = 0;
3406 class_locale.members.locale = MR_EVT_LOCALE_ALL;
3407 class_locale.members.class = MR_EVT_CLASS_DEBUG;
3408
3409 mtx_unlock(&sc->sim_lock);
3410 if (mrsas_register_aen(sc, sc->last_seq_num,
3411 class_locale.word)) {
3412 device_printf(sc->mrsas_dev,
3413 "ERROR: AEN registration FAILED from OCR !!! "
3414 "Further events from the controller cannot be notified."
3415 "Either there is some problem in the controller"
3416 "or the controller does not support AEN.\n"
3417 "Please contact to the SUPPORT TEAM if the problem persists\n");
3418 }
3419 mtx_lock(&sc->sim_lock);
3420
3421 /* Adapter reset completed successfully */
3422 device_printf(sc->mrsas_dev, "Reset successful\n");
3423 retval = SUCCESS;
3424 goto out;
3425 }
3426 /* Reset failed, kill the adapter */
3427 device_printf(sc->mrsas_dev, "Reset failed, killing adapter.\n");
3428 mrsas_kill_hba(sc);
3429 retval = FAIL;
3430 } else {
3431 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3432 mrsas_enable_intr(sc);
3433 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
3434 }
3435 out:
3436 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3437 mrsas_dprint(sc, MRSAS_OCR,
3438 "Reset Exit with %d.\n", retval);
3439 return retval;
3440 }
3441
3442 /*
3443 * mrsas_kill_hba: Kill HBA when OCR is not supported
3444 * input: Adapter Context.
3445 *
3446 * This function will kill HBA when OCR is not supported.
3447 */
3448 void
mrsas_kill_hba(struct mrsas_softc * sc)3449 mrsas_kill_hba(struct mrsas_softc *sc)
3450 {
3451 sc->adprecovery = MRSAS_HW_CRITICAL_ERROR;
3452 DELAY(1000 * 1000);
3453 mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__);
3454 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
3455 MFI_STOP_ADP);
3456 /* Flush */
3457 mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell));
3458 mrsas_complete_outstanding_ioctls(sc);
3459 }
3460
3461 /**
3462 * mrsas_complete_outstanding_ioctls Complete pending IOCTLS after kill_hba
3463 * input: Controller softc
3464 *
3465 * Returns void
3466 */
3467 void
mrsas_complete_outstanding_ioctls(struct mrsas_softc * sc)3468 mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc)
3469 {
3470 int i;
3471 struct mrsas_mpt_cmd *cmd_mpt;
3472 struct mrsas_mfi_cmd *cmd_mfi;
3473 u_int32_t count, MSIxIndex;
3474
3475 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3476 for (i = 0; i < sc->max_fw_cmds; i++) {
3477 cmd_mpt = sc->mpt_cmd_list[i];
3478
3479 if (cmd_mpt->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
3480 cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
3481 if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) {
3482 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
3483 mrsas_complete_mptmfi_passthru(sc, cmd_mfi,
3484 cmd_mpt->io_request->RaidContext.raid_context.status);
3485 }
3486 }
3487 }
3488 }
3489
3490 /*
3491 * mrsas_wait_for_outstanding: Wait for outstanding commands
3492 * input: Adapter Context.
3493 *
3494 * This function will wait for 180 seconds for outstanding commands to be
3495 * completed.
3496 */
3497 int
mrsas_wait_for_outstanding(struct mrsas_softc * sc,u_int8_t check_reason)3498 mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason)
3499 {
3500 int i, outstanding, retval = 0;
3501 u_int32_t fw_state, count, MSIxIndex;
3502
3503 for (i = 0; i < MRSAS_RESET_WAIT_TIME; i++) {
3504 if (sc->remove_in_progress) {
3505 mrsas_dprint(sc, MRSAS_OCR,
3506 "Driver remove or shutdown called.\n");
3507 retval = 1;
3508 goto out;
3509 }
3510 /* Check if firmware is in fault state */
3511 fw_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3512 outbound_scratch_pad)) & MFI_STATE_MASK;
3513 if (fw_state == MFI_STATE_FAULT) {
3514 mrsas_dprint(sc, MRSAS_OCR,
3515 "Found FW in FAULT state, will reset adapter.\n");
3516 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3517 mtx_unlock(&sc->sim_lock);
3518 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
3519 mrsas_complete_cmd(sc, MSIxIndex);
3520 mtx_lock(&sc->sim_lock);
3521 retval = 1;
3522 goto out;
3523 }
3524 if (check_reason == MFI_DCMD_TIMEOUT_OCR) {
3525 mrsas_dprint(sc, MRSAS_OCR,
3526 "DCMD IO TIMEOUT detected, will reset adapter.\n");
3527 retval = 1;
3528 goto out;
3529 }
3530 outstanding = mrsas_atomic_read(&sc->fw_outstanding);
3531 if (!outstanding)
3532 goto out;
3533
3534 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
3535 mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d "
3536 "commands to complete\n", i, outstanding);
3537 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3538 mtx_unlock(&sc->sim_lock);
3539 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
3540 mrsas_complete_cmd(sc, MSIxIndex);
3541 mtx_lock(&sc->sim_lock);
3542 }
3543 DELAY(1000 * 1000);
3544 }
3545
3546 if (mrsas_atomic_read(&sc->fw_outstanding)) {
3547 mrsas_dprint(sc, MRSAS_OCR,
3548 " pending commands remain after waiting,"
3549 " will reset adapter.\n");
3550 retval = 1;
3551 }
3552 out:
3553 return retval;
3554 }
3555
3556 /*
3557 * mrsas_release_mfi_cmd: Return a cmd to free command pool
3558 * input: Command packet for return to free cmd pool
3559 *
3560 * This function returns the MFI & MPT command to the command list.
3561 */
3562 void
mrsas_release_mfi_cmd(struct mrsas_mfi_cmd * cmd_mfi)3563 mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd_mfi)
3564 {
3565 struct mrsas_softc *sc = cmd_mfi->sc;
3566 struct mrsas_mpt_cmd *cmd_mpt;
3567
3568 mtx_lock(&sc->mfi_cmd_pool_lock);
3569 /*
3570 * Release the mpt command (if at all it is allocated
3571 * associated with the mfi command
3572 */
3573 if (cmd_mfi->cmd_id.context.smid) {
3574 mtx_lock(&sc->mpt_cmd_pool_lock);
3575 /* Get the mpt cmd from mfi cmd frame's smid value */
3576 cmd_mpt = sc->mpt_cmd_list[cmd_mfi->cmd_id.context.smid-1];
3577 cmd_mpt->flags = 0;
3578 cmd_mpt->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
3579 TAILQ_INSERT_HEAD(&(sc->mrsas_mpt_cmd_list_head), cmd_mpt, next);
3580 mtx_unlock(&sc->mpt_cmd_pool_lock);
3581 }
3582 /* Release the mfi command */
3583 cmd_mfi->ccb_ptr = NULL;
3584 cmd_mfi->cmd_id.frame_count = 0;
3585 TAILQ_INSERT_HEAD(&(sc->mrsas_mfi_cmd_list_head), cmd_mfi, next);
3586 mtx_unlock(&sc->mfi_cmd_pool_lock);
3587
3588 return;
3589 }
3590
3591 /*
3592 * mrsas_get_controller_info: Returns FW's controller structure
3593 * input: Adapter soft state
3594 * Controller information structure
3595 *
3596 * Issues an internal command (DCMD) to get the FW's controller structure. This
3597 * information is mainly used to find out the maximum IO transfer per command
3598 * supported by the FW.
3599 */
3600 static int
mrsas_get_ctrl_info(struct mrsas_softc * sc)3601 mrsas_get_ctrl_info(struct mrsas_softc *sc)
3602 {
3603 int retcode = 0;
3604 u_int8_t do_ocr = 1;
3605 struct mrsas_mfi_cmd *cmd;
3606 struct mrsas_dcmd_frame *dcmd;
3607
3608 cmd = mrsas_get_mfi_cmd(sc);
3609
3610 if (!cmd) {
3611 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
3612 return -ENOMEM;
3613 }
3614 dcmd = &cmd->frame->dcmd;
3615
3616 if (mrsas_alloc_ctlr_info_cmd(sc) != SUCCESS) {
3617 device_printf(sc->mrsas_dev, "Cannot allocate get ctlr info cmd\n");
3618 mrsas_release_mfi_cmd(cmd);
3619 return -ENOMEM;
3620 }
3621 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3622
3623 dcmd->cmd = MFI_CMD_DCMD;
3624 dcmd->cmd_status = 0xFF;
3625 dcmd->sge_count = 1;
3626 dcmd->flags = MFI_FRAME_DIR_READ;
3627 dcmd->timeout = 0;
3628 dcmd->pad_0 = 0;
3629 dcmd->data_xfer_len = htole32(sizeof(struct mrsas_ctrl_info));
3630 dcmd->opcode = htole32(MR_DCMD_CTRL_GET_INFO);
3631 dcmd->sgl.sge32[0].phys_addr = htole32(sc->ctlr_info_phys_addr & 0xFFFFFFFF);
3632 dcmd->sgl.sge32[0].length = htole32(sizeof(struct mrsas_ctrl_info));
3633
3634 if (!sc->mask_interrupts)
3635 retcode = mrsas_issue_blocked_cmd(sc, cmd);
3636 else
3637 retcode = mrsas_issue_polled(sc, cmd);
3638
3639 if (retcode == ETIMEDOUT)
3640 goto dcmd_timeout;
3641 else {
3642 memcpy(sc->ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info));
3643 le32_to_cpus(&sc->ctrl_info->properties.OnOffProperties);
3644 le32_to_cpus(&sc->ctrl_info->adapterOperations2);
3645 le32_to_cpus(&sc->ctrl_info->adapterOperations3);
3646 le16_to_cpus(&sc->ctrl_info->adapterOperations4);
3647 }
3648
3649 do_ocr = 0;
3650 mrsas_update_ext_vd_details(sc);
3651
3652 sc->use_seqnum_jbod_fp =
3653 sc->ctrl_info->adapterOperations3.useSeqNumJbodFP;
3654 sc->support_morethan256jbod =
3655 sc->ctrl_info->adapterOperations4.supportPdMapTargetId;
3656
3657 sc->disableOnlineCtrlReset =
3658 sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
3659
3660 dcmd_timeout:
3661 mrsas_free_ctlr_info_cmd(sc);
3662
3663 if (do_ocr)
3664 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
3665
3666 if (!sc->mask_interrupts)
3667 mrsas_release_mfi_cmd(cmd);
3668
3669 return (retcode);
3670 }
3671
3672 /*
3673 * mrsas_update_ext_vd_details : Update details w.r.t Extended VD
3674 * input:
3675 * sc - Controller's softc
3676 */
3677 static void
mrsas_update_ext_vd_details(struct mrsas_softc * sc)3678 mrsas_update_ext_vd_details(struct mrsas_softc *sc)
3679 {
3680 u_int32_t ventura_map_sz = 0;
3681 sc->max256vdSupport =
3682 sc->ctrl_info->adapterOperations3.supportMaxExtLDs;
3683
3684 /* Below is additional check to address future FW enhancement */
3685 if (sc->ctrl_info->max_lds > 64)
3686 sc->max256vdSupport = 1;
3687
3688 sc->drv_supported_vd_count = MRSAS_MAX_LD_CHANNELS
3689 * MRSAS_MAX_DEV_PER_CHANNEL;
3690 sc->drv_supported_pd_count = MRSAS_MAX_PD_CHANNELS
3691 * MRSAS_MAX_DEV_PER_CHANNEL;
3692 if (sc->max256vdSupport) {
3693 sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
3694 sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
3695 } else {
3696 sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
3697 sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
3698 }
3699
3700 if (sc->maxRaidMapSize) {
3701 ventura_map_sz = sc->maxRaidMapSize *
3702 MR_MIN_MAP_SIZE;
3703 sc->current_map_sz = ventura_map_sz;
3704 sc->max_map_sz = ventura_map_sz;
3705 } else {
3706 sc->old_map_sz = sizeof(MR_FW_RAID_MAP) +
3707 (sizeof(MR_LD_SPAN_MAP) * (sc->fw_supported_vd_count - 1));
3708 sc->new_map_sz = sizeof(MR_FW_RAID_MAP_EXT);
3709 sc->max_map_sz = max(sc->old_map_sz, sc->new_map_sz);
3710 if (sc->max256vdSupport)
3711 sc->current_map_sz = sc->new_map_sz;
3712 else
3713 sc->current_map_sz = sc->old_map_sz;
3714 }
3715
3716 sc->drv_map_sz = sizeof(MR_DRV_RAID_MAP_ALL);
3717 #if VD_EXT_DEBUG
3718 device_printf(sc->mrsas_dev, "sc->maxRaidMapSize 0x%x \n",
3719 sc->maxRaidMapSize);
3720 device_printf(sc->mrsas_dev,
3721 "new_map_sz = 0x%x, old_map_sz = 0x%x, "
3722 "ventura_map_sz = 0x%x, current_map_sz = 0x%x "
3723 "fusion->drv_map_sz =0x%x, size of driver raid map 0x%lx \n",
3724 sc->new_map_sz, sc->old_map_sz, ventura_map_sz,
3725 sc->current_map_sz, sc->drv_map_sz, sizeof(MR_DRV_RAID_MAP_ALL));
3726 #endif
3727 }
3728
3729 /*
3730 * mrsas_alloc_ctlr_info_cmd: Allocates memory for controller info command
3731 * input: Adapter soft state
3732 *
3733 * Allocates DMAable memory for the controller info internal command.
3734 */
3735 int
mrsas_alloc_ctlr_info_cmd(struct mrsas_softc * sc)3736 mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc)
3737 {
3738 int ctlr_info_size;
3739
3740 /* Allocate get controller info command */
3741 ctlr_info_size = sizeof(struct mrsas_ctrl_info);
3742 if (bus_dma_tag_create(sc->mrsas_parent_tag,
3743 1, 0,
3744 BUS_SPACE_MAXADDR_32BIT,
3745 BUS_SPACE_MAXADDR,
3746 NULL, NULL,
3747 ctlr_info_size,
3748 1,
3749 ctlr_info_size,
3750 BUS_DMA_ALLOCNOW,
3751 NULL, NULL,
3752 &sc->ctlr_info_tag)) {
3753 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info tag\n");
3754 return (ENOMEM);
3755 }
3756 if (bus_dmamem_alloc(sc->ctlr_info_tag, (void **)&sc->ctlr_info_mem,
3757 BUS_DMA_NOWAIT, &sc->ctlr_info_dmamap)) {
3758 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info cmd mem\n");
3759 return (ENOMEM);
3760 }
3761 if (bus_dmamap_load(sc->ctlr_info_tag, sc->ctlr_info_dmamap,
3762 sc->ctlr_info_mem, ctlr_info_size, mrsas_addr_cb,
3763 &sc->ctlr_info_phys_addr, BUS_DMA_NOWAIT)) {
3764 device_printf(sc->mrsas_dev, "Cannot load ctlr info cmd mem\n");
3765 return (ENOMEM);
3766 }
3767 memset(sc->ctlr_info_mem, 0, ctlr_info_size);
3768 return (0);
3769 }
3770
3771 /*
3772 * mrsas_free_ctlr_info_cmd: Free memory for controller info command
3773 * input: Adapter soft state
3774 *
3775 * Deallocates memory of the get controller info cmd.
3776 */
3777 void
mrsas_free_ctlr_info_cmd(struct mrsas_softc * sc)3778 mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc)
3779 {
3780 if (sc->ctlr_info_phys_addr)
3781 bus_dmamap_unload(sc->ctlr_info_tag, sc->ctlr_info_dmamap);
3782 if (sc->ctlr_info_mem != NULL)
3783 bus_dmamem_free(sc->ctlr_info_tag, sc->ctlr_info_mem, sc->ctlr_info_dmamap);
3784 if (sc->ctlr_info_tag != NULL)
3785 bus_dma_tag_destroy(sc->ctlr_info_tag);
3786 }
3787
3788 /*
3789 * mrsas_issue_polled: Issues a polling command
3790 * inputs: Adapter soft state
3791 * Command packet to be issued
3792 *
3793 * This function is for posting of internal commands to Firmware. MFI requires
3794 * the cmd_status to be set to 0xFF before posting. The maximun wait time of
3795 * the poll response timer is 180 seconds.
3796 */
3797 int
mrsas_issue_polled(struct mrsas_softc * sc,struct mrsas_mfi_cmd * cmd)3798 mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3799 {
3800 struct mrsas_header *frame_hdr = &cmd->frame->hdr;
3801 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3802 int i, retcode = SUCCESS;
3803
3804 frame_hdr->cmd_status = 0xFF;
3805 frame_hdr->flags |= htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
3806
3807 /* Issue the frame using inbound queue port */
3808 if (mrsas_issue_dcmd(sc, cmd)) {
3809 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
3810 return (1);
3811 }
3812 /*
3813 * Poll response timer to wait for Firmware response. While this
3814 * timer with the DELAY call could block CPU, the time interval for
3815 * this is only 1 millisecond.
3816 */
3817 if (frame_hdr->cmd_status == 0xFF) {
3818 for (i = 0; i < (max_wait * 1000); i++) {
3819 if (frame_hdr->cmd_status == 0xFF)
3820 DELAY(1000);
3821 else
3822 break;
3823 }
3824 }
3825 if (frame_hdr->cmd_status == 0xFF) {
3826 device_printf(sc->mrsas_dev, "DCMD timed out after %d "
3827 "seconds from %s\n", max_wait, __func__);
3828 device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n",
3829 cmd->frame->dcmd.opcode);
3830 retcode = ETIMEDOUT;
3831 }
3832 return (retcode);
3833 }
3834
3835 /*
3836 * mrsas_issue_dcmd: Issues a MFI Pass thru cmd
3837 * input: Adapter soft state mfi cmd pointer
3838 *
3839 * This function is called by mrsas_issued_blocked_cmd() and
3840 * mrsas_issued_polled(), to build the MPT command and then fire the command
3841 * to Firmware.
3842 */
3843 int
mrsas_issue_dcmd(struct mrsas_softc * sc,struct mrsas_mfi_cmd * cmd)3844 mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3845 {
3846 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3847
3848 req_desc = mrsas_build_mpt_cmd(sc, cmd);
3849 if (!req_desc) {
3850 device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n");
3851 return (1);
3852 }
3853 mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high);
3854
3855 return (0);
3856 }
3857
3858 /*
3859 * mrsas_build_mpt_cmd: Calls helper function to build Passthru cmd
3860 * input: Adapter soft state mfi cmd to build
3861 *
3862 * This function is called by mrsas_issue_cmd() to build the MPT-MFI passthru
3863 * command and prepares the MPT command to send to Firmware.
3864 */
3865 MRSAS_REQUEST_DESCRIPTOR_UNION *
mrsas_build_mpt_cmd(struct mrsas_softc * sc,struct mrsas_mfi_cmd * cmd)3866 mrsas_build_mpt_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3867 {
3868 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3869 u_int16_t index;
3870
3871 if (mrsas_build_mptmfi_passthru(sc, cmd)) {
3872 device_printf(sc->mrsas_dev, "Cannot build MPT-MFI passthru cmd.\n");
3873 return NULL;
3874 }
3875 index = cmd->cmd_id.context.smid;
3876
3877 req_desc = mrsas_get_request_desc(sc, index - 1);
3878 if (!req_desc)
3879 return NULL;
3880
3881 req_desc->addr.Words = 0;
3882 req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
3883
3884 req_desc->SCSIIO.SMID = htole16(index);
3885
3886 return (req_desc);
3887 }
3888
3889 /*
3890 * mrsas_build_mptmfi_passthru: Builds a MPT MFI Passthru command
3891 * input: Adapter soft state mfi cmd pointer
3892 *
3893 * The MPT command and the io_request are setup as a passthru command. The SGE
3894 * chain address is set to frame_phys_addr of the MFI command.
3895 */
3896 u_int8_t
mrsas_build_mptmfi_passthru(struct mrsas_softc * sc,struct mrsas_mfi_cmd * mfi_cmd)3897 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd)
3898 {
3899 MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
3900 PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req;
3901 struct mrsas_mpt_cmd *mpt_cmd;
3902 struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr;
3903
3904 mpt_cmd = mrsas_get_mpt_cmd(sc);
3905 if (!mpt_cmd)
3906 return (1);
3907
3908 /* Save the smid. To be used for returning the cmd */
3909 mfi_cmd->cmd_id.context.smid = mpt_cmd->index;
3910
3911 mpt_cmd->sync_cmd_idx = mfi_cmd->index;
3912
3913 /*
3914 * For cmds where the flag is set, store the flag and check on
3915 * completion. For cmds with this flag, don't call
3916 * mrsas_complete_cmd.
3917 */
3918
3919 if (frame_hdr->flags & htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE))
3920 mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
3921
3922 io_req = mpt_cmd->io_request;
3923
3924 if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) {
3925 pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t)&io_req->SGL;
3926
3927 sgl_ptr_end += sc->max_sge_in_main_msg - 1;
3928 sgl_ptr_end->Flags = 0;
3929 }
3930 mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *) & io_req->SGL.IeeeChain;
3931
3932 io_req->Function = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
3933 io_req->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4;
3934 io_req->ChainOffset = sc->chain_offset_mfi_pthru;
3935
3936 mpi25_ieee_chain->Address = htole64(mfi_cmd->frame_phys_addr);
3937
3938 mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
3939 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
3940
3941 mpi25_ieee_chain->Length = htole32(sc->max_chain_frame_sz);
3942
3943 return (0);
3944 }
3945
3946 /*
3947 * mrsas_issue_blocked_cmd: Synchronous wrapper around regular FW cmds
3948 * input: Adapter soft state Command to be issued
3949 *
3950 * This function waits on an event for the command to be returned from the ISR.
3951 * Max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME secs. Used for issuing
3952 * internal and ioctl commands.
3953 */
3954 int
mrsas_issue_blocked_cmd(struct mrsas_softc * sc,struct mrsas_mfi_cmd * cmd)3955 mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3956 {
3957 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3958 unsigned long total_time = 0;
3959 int retcode = SUCCESS;
3960
3961 /* Initialize cmd_status */
3962 cmd->cmd_status = 0xFF;
3963
3964 /* Build MPT-MFI command for issue to FW */
3965 if (mrsas_issue_dcmd(sc, cmd)) {
3966 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
3967 return (1);
3968 }
3969 sc->chan = (void *)&cmd;
3970
3971 while (1) {
3972 if (cmd->cmd_status == 0xFF) {
3973 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
3974 } else
3975 break;
3976
3977 if (!cmd->sync_cmd) { /* cmd->sync will be set for an IOCTL
3978 * command */
3979 total_time++;
3980 if (total_time >= max_wait) {
3981 device_printf(sc->mrsas_dev,
3982 "Internal command timed out after %d seconds.\n", max_wait);
3983 retcode = 1;
3984 break;
3985 }
3986 }
3987 }
3988 sc->chan = NULL;
3989
3990 if (cmd->cmd_status == 0xFF) {
3991 device_printf(sc->mrsas_dev, "DCMD timed out after %d "
3992 "seconds from %s\n", max_wait, __func__);
3993 device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n",
3994 cmd->frame->dcmd.opcode);
3995 retcode = ETIMEDOUT;
3996 }
3997 return (retcode);
3998 }
3999
4000 /*
4001 * mrsas_complete_mptmfi_passthru: Completes a command
4002 * input: @sc: Adapter soft state
4003 * @cmd: Command to be completed
4004 * @status: cmd completion status
4005 *
4006 * This function is called from mrsas_complete_cmd() after an interrupt is
4007 * received from Firmware, and io_request->Function is
4008 * MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST.
4009 */
4010 void
mrsas_complete_mptmfi_passthru(struct mrsas_softc * sc,struct mrsas_mfi_cmd * cmd,u_int8_t status)4011 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd,
4012 u_int8_t status)
4013 {
4014 struct mrsas_header *hdr = &cmd->frame->hdr;
4015 u_int8_t cmd_status = cmd->frame->hdr.cmd_status;
4016
4017 /* Reset the retry counter for future re-tries */
4018 cmd->retry_for_fw_reset = 0;
4019
4020 if (cmd->ccb_ptr)
4021 cmd->ccb_ptr = NULL;
4022
4023 switch (hdr->cmd) {
4024 case MFI_CMD_INVALID:
4025 device_printf(sc->mrsas_dev, "MFI_CMD_INVALID command.\n");
4026 break;
4027 case MFI_CMD_PD_SCSI_IO:
4028 case MFI_CMD_LD_SCSI_IO:
4029 /*
4030 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
4031 * issued either through an IO path or an IOCTL path. If it
4032 * was via IOCTL, we will send it to internal completion.
4033 */
4034 if (cmd->sync_cmd) {
4035 cmd->sync_cmd = 0;
4036 mrsas_wakeup(sc, cmd);
4037 break;
4038 }
4039 case MFI_CMD_SMP:
4040 case MFI_CMD_STP:
4041 case MFI_CMD_DCMD:
4042 /* Check for LD map update */
4043 if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) &&
4044 (cmd->frame->dcmd.mbox.b[1] == 1)) {
4045 sc->fast_path_io = 0;
4046 mtx_lock(&sc->raidmap_lock);
4047 sc->map_update_cmd = NULL;
4048 if (cmd_status != 0) {
4049 if (cmd_status != MFI_STAT_NOT_FOUND)
4050 device_printf(sc->mrsas_dev, "map sync failed, status=%x\n", cmd_status);
4051 else {
4052 mrsas_release_mfi_cmd(cmd);
4053 mtx_unlock(&sc->raidmap_lock);
4054 break;
4055 }
4056 } else
4057 sc->map_id++;
4058 mrsas_release_mfi_cmd(cmd);
4059 if (MR_ValidateMapInfo(sc))
4060 sc->fast_path_io = 0;
4061 else
4062 sc->fast_path_io = 1;
4063 mrsas_sync_map_info(sc);
4064 mtx_unlock(&sc->raidmap_lock);
4065 break;
4066 }
4067 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
4068 cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) {
4069 sc->mrsas_aen_triggered = 0;
4070 }
4071 /* FW has an updated PD sequence */
4072 if ((cmd->frame->dcmd.opcode ==
4073 MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
4074 (cmd->frame->dcmd.mbox.b[0] == 1)) {
4075 mtx_lock(&sc->raidmap_lock);
4076 sc->jbod_seq_cmd = NULL;
4077 mrsas_release_mfi_cmd(cmd);
4078
4079 if (cmd_status == MFI_STAT_OK) {
4080 sc->pd_seq_map_id++;
4081 /* Re-register a pd sync seq num cmd */
4082 if (megasas_sync_pd_seq_num(sc, true))
4083 sc->use_seqnum_jbod_fp = 0;
4084 } else {
4085 sc->use_seqnum_jbod_fp = 0;
4086 device_printf(sc->mrsas_dev,
4087 "Jbod map sync failed, status=%x\n", cmd_status);
4088 }
4089 mtx_unlock(&sc->raidmap_lock);
4090 break;
4091 }
4092 /* See if got an event notification */
4093 if (le32toh(cmd->frame->dcmd.opcode) == MR_DCMD_CTRL_EVENT_WAIT)
4094 mrsas_complete_aen(sc, cmd);
4095 else
4096 mrsas_wakeup(sc, cmd);
4097 break;
4098 case MFI_CMD_ABORT:
4099 /* Command issued to abort another cmd return */
4100 mrsas_complete_abort(sc, cmd);
4101 break;
4102 default:
4103 device_printf(sc->mrsas_dev, "Unknown command completed! [0x%X]\n", hdr->cmd);
4104 break;
4105 }
4106 }
4107
4108 /*
4109 * mrsas_wakeup: Completes an internal command
4110 * input: Adapter soft state
4111 * Command to be completed
4112 *
4113 * In mrsas_issue_blocked_cmd(), after a command is issued to Firmware, a wait
4114 * timer is started. This function is called from
4115 * mrsas_complete_mptmfi_passthru() as it completes the command, to wake up
4116 * from the command wait.
4117 */
4118 void
mrsas_wakeup(struct mrsas_softc * sc,struct mrsas_mfi_cmd * cmd)4119 mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
4120 {
4121 cmd->cmd_status = cmd->frame->io.cmd_status;
4122
4123 if (cmd->cmd_status == 0xFF)
4124 cmd->cmd_status = 0;
4125
4126 sc->chan = (void *)&cmd;
4127 wakeup_one((void *)&sc->chan);
4128 return;
4129 }
4130
4131 /*
4132 * mrsas_shutdown_ctlr: Instructs FW to shutdown the controller input:
4133 * Adapter soft state Shutdown/Hibernate
4134 *
4135 * This function issues a DCMD internal command to Firmware to initiate shutdown
4136 * of the controller.
4137 */
4138 static void
mrsas_shutdown_ctlr(struct mrsas_softc * sc,u_int32_t opcode)4139 mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode)
4140 {
4141 struct mrsas_mfi_cmd *cmd;
4142 struct mrsas_dcmd_frame *dcmd;
4143
4144 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
4145 return;
4146
4147 cmd = mrsas_get_mfi_cmd(sc);
4148 if (!cmd) {
4149 device_printf(sc->mrsas_dev, "Cannot allocate for shutdown cmd.\n");
4150 return;
4151 }
4152 if (sc->aen_cmd)
4153 mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd);
4154 if (sc->map_update_cmd)
4155 mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd);
4156 if (sc->jbod_seq_cmd)
4157 mrsas_issue_blocked_abort_cmd(sc, sc->jbod_seq_cmd);
4158
4159 dcmd = &cmd->frame->dcmd;
4160 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4161
4162 dcmd->cmd = MFI_CMD_DCMD;
4163 dcmd->cmd_status = 0x0;
4164 dcmd->sge_count = 0;
4165 dcmd->flags = MFI_FRAME_DIR_NONE;
4166 dcmd->timeout = 0;
4167 dcmd->pad_0 = 0;
4168 dcmd->data_xfer_len = 0;
4169 dcmd->opcode = opcode;
4170
4171 device_printf(sc->mrsas_dev, "Preparing to shut down controller.\n");
4172
4173 mrsas_issue_blocked_cmd(sc, cmd);
4174 mrsas_release_mfi_cmd(cmd);
4175
4176 return;
4177 }
4178
4179 /*
4180 * mrsas_flush_cache: Requests FW to flush all its caches input:
4181 * Adapter soft state
4182 *
4183 * This function is issues a DCMD internal command to Firmware to initiate
4184 * flushing of all caches.
4185 */
4186 static void
mrsas_flush_cache(struct mrsas_softc * sc)4187 mrsas_flush_cache(struct mrsas_softc *sc)
4188 {
4189 struct mrsas_mfi_cmd *cmd;
4190 struct mrsas_dcmd_frame *dcmd;
4191
4192 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
4193 return;
4194
4195 cmd = mrsas_get_mfi_cmd(sc);
4196 if (!cmd) {
4197 device_printf(sc->mrsas_dev, "Cannot allocate for flush cache cmd.\n");
4198 return;
4199 }
4200 dcmd = &cmd->frame->dcmd;
4201 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4202
4203 dcmd->cmd = MFI_CMD_DCMD;
4204 dcmd->cmd_status = 0x0;
4205 dcmd->sge_count = 0;
4206 dcmd->flags = MFI_FRAME_DIR_NONE;
4207 dcmd->timeout = 0;
4208 dcmd->pad_0 = 0;
4209 dcmd->data_xfer_len = 0;
4210 dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
4211 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
4212
4213 mrsas_issue_blocked_cmd(sc, cmd);
4214 mrsas_release_mfi_cmd(cmd);
4215
4216 return;
4217 }
4218
4219 int
megasas_sync_pd_seq_num(struct mrsas_softc * sc,boolean_t pend)4220 megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend)
4221 {
4222 int retcode = 0;
4223 u_int8_t do_ocr = 1;
4224 struct mrsas_mfi_cmd *cmd;
4225 struct mrsas_dcmd_frame *dcmd;
4226 uint32_t pd_seq_map_sz;
4227 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
4228 bus_addr_t pd_seq_h;
4229
4230 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
4231 (sizeof(struct MR_PD_CFG_SEQ) *
4232 (MAX_PHYSICAL_DEVICES - 1));
4233
4234 cmd = mrsas_get_mfi_cmd(sc);
4235 if (!cmd) {
4236 device_printf(sc->mrsas_dev,
4237 "Cannot alloc for ld map info cmd.\n");
4238 return 1;
4239 }
4240 dcmd = &cmd->frame->dcmd;
4241
4242 pd_sync = (void *)sc->jbodmap_mem[(sc->pd_seq_map_id & 1)];
4243 pd_seq_h = sc->jbodmap_phys_addr[(sc->pd_seq_map_id & 1)];
4244 if (!pd_sync) {
4245 device_printf(sc->mrsas_dev,
4246 "Failed to alloc mem for jbod map info.\n");
4247 mrsas_release_mfi_cmd(cmd);
4248 return (ENOMEM);
4249 }
4250 memset(pd_sync, 0, pd_seq_map_sz);
4251 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4252 dcmd->cmd = MFI_CMD_DCMD;
4253 dcmd->cmd_status = 0xFF;
4254 dcmd->sge_count = 1;
4255 dcmd->timeout = 0;
4256 dcmd->pad_0 = 0;
4257 dcmd->data_xfer_len = htole32(pd_seq_map_sz);
4258 dcmd->opcode = htole32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO);
4259 dcmd->sgl.sge32[0].phys_addr = htole32(pd_seq_h & 0xFFFFFFFF);
4260 dcmd->sgl.sge32[0].length = htole32(pd_seq_map_sz);
4261
4262 if (pend) {
4263 dcmd->mbox.b[0] = MRSAS_DCMD_MBOX_PEND_FLAG;
4264 dcmd->flags = htole16(MFI_FRAME_DIR_WRITE);
4265 sc->jbod_seq_cmd = cmd;
4266 if (mrsas_issue_dcmd(sc, cmd)) {
4267 device_printf(sc->mrsas_dev,
4268 "Fail to send sync map info command.\n");
4269 return 1;
4270 } else
4271 return 0;
4272 } else
4273 dcmd->flags = htole16(MFI_FRAME_DIR_READ);
4274
4275 retcode = mrsas_issue_polled(sc, cmd);
4276 if (retcode == ETIMEDOUT)
4277 goto dcmd_timeout;
4278
4279 if (le32toh(pd_sync->count) > MAX_PHYSICAL_DEVICES) {
4280 device_printf(sc->mrsas_dev,
4281 "driver supports max %d JBOD, but FW reports %d\n",
4282 MAX_PHYSICAL_DEVICES, pd_sync->count);
4283 retcode = -EINVAL;
4284 }
4285 if (!retcode)
4286 sc->pd_seq_map_id++;
4287 do_ocr = 0;
4288
4289 dcmd_timeout:
4290 if (do_ocr)
4291 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4292
4293 return (retcode);
4294 }
4295
4296 /*
4297 * mrsas_get_map_info: Load and validate RAID map input:
4298 * Adapter instance soft state
4299 *
4300 * This function calls mrsas_get_ld_map_info() and MR_ValidateMapInfo() to load
4301 * and validate RAID map. It returns 0 if successful, 1 other- wise.
4302 */
4303 static int
mrsas_get_map_info(struct mrsas_softc * sc)4304 mrsas_get_map_info(struct mrsas_softc *sc)
4305 {
4306 uint8_t retcode = 0;
4307
4308 sc->fast_path_io = 0;
4309 if (!mrsas_get_ld_map_info(sc)) {
4310 retcode = MR_ValidateMapInfo(sc);
4311 if (retcode == 0) {
4312 sc->fast_path_io = 1;
4313 return 0;
4314 }
4315 }
4316 return 1;
4317 }
4318
4319 /*
4320 * mrsas_get_ld_map_info: Get FW's ld_map structure input:
4321 * Adapter instance soft state
4322 *
4323 * Issues an internal command (DCMD) to get the FW's controller PD list
4324 * structure.
4325 */
4326 static int
mrsas_get_ld_map_info(struct mrsas_softc * sc)4327 mrsas_get_ld_map_info(struct mrsas_softc *sc)
4328 {
4329 int retcode = 0;
4330 struct mrsas_mfi_cmd *cmd;
4331 struct mrsas_dcmd_frame *dcmd;
4332 void *map;
4333 bus_addr_t map_phys_addr = 0;
4334
4335 cmd = mrsas_get_mfi_cmd(sc);
4336 if (!cmd) {
4337 device_printf(sc->mrsas_dev,
4338 "Cannot alloc for ld map info cmd.\n");
4339 return 1;
4340 }
4341 dcmd = &cmd->frame->dcmd;
4342
4343 map = (void *)sc->raidmap_mem[(sc->map_id & 1)];
4344 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)];
4345 if (!map) {
4346 device_printf(sc->mrsas_dev,
4347 "Failed to alloc mem for ld map info.\n");
4348 mrsas_release_mfi_cmd(cmd);
4349 return (ENOMEM);
4350 }
4351 memset(map, 0, sizeof(sc->max_map_sz));
4352 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4353
4354 dcmd->cmd = MFI_CMD_DCMD;
4355 dcmd->cmd_status = 0xFF;
4356 dcmd->sge_count = 1;
4357 dcmd->flags = htole16(MFI_FRAME_DIR_READ);
4358 dcmd->timeout = 0;
4359 dcmd->pad_0 = 0;
4360 dcmd->data_xfer_len = htole32(sc->current_map_sz);
4361 dcmd->opcode = htole32(MR_DCMD_LD_MAP_GET_INFO);
4362 dcmd->sgl.sge32[0].phys_addr = htole32(map_phys_addr & 0xFFFFFFFF);
4363 dcmd->sgl.sge32[0].length = htole32(sc->current_map_sz);
4364
4365 retcode = mrsas_issue_polled(sc, cmd);
4366 if (retcode == ETIMEDOUT)
4367 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4368
4369 return (retcode);
4370 }
4371
4372 /*
4373 * mrsas_sync_map_info: Get FW's ld_map structure input:
4374 * Adapter instance soft state
4375 *
4376 * Issues an internal command (DCMD) to get the FW's controller PD list
4377 * structure.
4378 */
4379 static int
mrsas_sync_map_info(struct mrsas_softc * sc)4380 mrsas_sync_map_info(struct mrsas_softc *sc)
4381 {
4382 int retcode = 0, i;
4383 struct mrsas_mfi_cmd *cmd;
4384 struct mrsas_dcmd_frame *dcmd;
4385 uint32_t num_lds;
4386 MR_LD_TARGET_SYNC *target_map = NULL;
4387 MR_DRV_RAID_MAP_ALL *map;
4388 MR_LD_RAID *raid;
4389 MR_LD_TARGET_SYNC *ld_sync;
4390 bus_addr_t map_phys_addr = 0;
4391
4392 cmd = mrsas_get_mfi_cmd(sc);
4393 if (!cmd) {
4394 device_printf(sc->mrsas_dev, "Cannot alloc for sync map info cmd\n");
4395 return ENOMEM;
4396 }
4397 map = sc->ld_drv_map[sc->map_id & 1];
4398 num_lds = map->raidMap.ldCount;
4399
4400 dcmd = &cmd->frame->dcmd;
4401 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4402
4403 target_map = (MR_LD_TARGET_SYNC *) sc->raidmap_mem[(sc->map_id - 1) & 1];
4404 memset(target_map, 0, sc->max_map_sz);
4405
4406 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1];
4407
4408 ld_sync = (MR_LD_TARGET_SYNC *) target_map;
4409
4410 for (i = 0; i < num_lds; i++, ld_sync++) {
4411 raid = MR_LdRaidGet(i, map);
4412 ld_sync->targetId = MR_GetLDTgtId(i, map);
4413 ld_sync->seqNum = raid->seqNum;
4414 }
4415
4416 dcmd->cmd = MFI_CMD_DCMD;
4417 dcmd->cmd_status = 0xFF;
4418 dcmd->sge_count = 1;
4419 dcmd->flags = htole16(MFI_FRAME_DIR_WRITE);
4420 dcmd->timeout = 0;
4421 dcmd->pad_0 = 0;
4422 dcmd->data_xfer_len = htole32(sc->current_map_sz);
4423 dcmd->mbox.b[0] = num_lds;
4424 dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG;
4425 dcmd->opcode = htole32(MR_DCMD_LD_MAP_GET_INFO);
4426 dcmd->sgl.sge32[0].phys_addr = htole32(map_phys_addr & 0xFFFFFFFF);
4427 dcmd->sgl.sge32[0].length = htole32(sc->current_map_sz);
4428
4429 sc->map_update_cmd = cmd;
4430 if (mrsas_issue_dcmd(sc, cmd)) {
4431 device_printf(sc->mrsas_dev,
4432 "Fail to send sync map info command.\n");
4433 return (1);
4434 }
4435 return (retcode);
4436 }
4437
4438 /* Input: dcmd.opcode - MR_DCMD_PD_GET_INFO
4439 * dcmd.mbox.s[0] - deviceId for this physical drive
4440 * dcmd.sge IN - ptr to returned MR_PD_INFO structure
4441 * Desc: Firmware return the physical drive info structure
4442 *
4443 */
4444 static void
mrsas_get_pd_info(struct mrsas_softc * sc,u_int16_t device_id)4445 mrsas_get_pd_info(struct mrsas_softc *sc, u_int16_t device_id)
4446 {
4447 int retcode;
4448 u_int8_t do_ocr = 1;
4449 struct mrsas_mfi_cmd *cmd;
4450 struct mrsas_dcmd_frame *dcmd;
4451
4452 cmd = mrsas_get_mfi_cmd(sc);
4453
4454 if (!cmd) {
4455 device_printf(sc->mrsas_dev,
4456 "Cannot alloc for get PD info cmd\n");
4457 return;
4458 }
4459 dcmd = &cmd->frame->dcmd;
4460
4461 memset(sc->pd_info_mem, 0, sizeof(struct mrsas_pd_info));
4462 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4463
4464 dcmd->mbox.s[0] = htole16(device_id);
4465 dcmd->cmd = MFI_CMD_DCMD;
4466 dcmd->cmd_status = 0xFF;
4467 dcmd->sge_count = 1;
4468 dcmd->flags = MFI_FRAME_DIR_READ;
4469 dcmd->timeout = 0;
4470 dcmd->pad_0 = 0;
4471 dcmd->data_xfer_len = htole32(sizeof(struct mrsas_pd_info));
4472 dcmd->opcode = htole32(MR_DCMD_PD_GET_INFO);
4473 dcmd->sgl.sge32[0].phys_addr = htole32((u_int32_t)sc->pd_info_phys_addr & 0xFFFFFFFF);
4474 dcmd->sgl.sge32[0].length = htole32(sizeof(struct mrsas_pd_info));
4475
4476 if (!sc->mask_interrupts)
4477 retcode = mrsas_issue_blocked_cmd(sc, cmd);
4478 else
4479 retcode = mrsas_issue_polled(sc, cmd);
4480
4481 if (retcode == ETIMEDOUT)
4482 goto dcmd_timeout;
4483
4484 sc->target_list[device_id].interface_type =
4485 le16toh(sc->pd_info_mem->state.ddf.pdType.intf);
4486
4487 do_ocr = 0;
4488
4489 dcmd_timeout:
4490
4491 if (do_ocr)
4492 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4493
4494 if (!sc->mask_interrupts)
4495 mrsas_release_mfi_cmd(cmd);
4496 }
4497
4498 /*
4499 * mrsas_add_target: Add target ID of system PD/VD to driver's data structure.
4500 * sc: Adapter's soft state
4501 * target_id: Unique target id per controller(managed by driver)
4502 * for system PDs- target ID ranges from 0 to (MRSAS_MAX_PD - 1)
4503 * for VDs- target ID ranges from MRSAS_MAX_PD to MRSAS_MAX_TM_TARGETS
4504 * return: void
4505 * Descripton: This function will be called whenever system PD or VD is created.
4506 */
mrsas_add_target(struct mrsas_softc * sc,u_int16_t target_id)4507 static void mrsas_add_target(struct mrsas_softc *sc,
4508 u_int16_t target_id)
4509 {
4510 sc->target_list[target_id].target_id = target_id;
4511
4512 device_printf(sc->mrsas_dev,
4513 "%s created target ID: 0x%x\n",
4514 (target_id < MRSAS_MAX_PD ? "System PD" : "VD"),
4515 (target_id < MRSAS_MAX_PD ? target_id : (target_id - MRSAS_MAX_PD)));
4516 /*
4517 * If interrupts are enabled, then only fire DCMD to get pd_info
4518 * for system PDs
4519 */
4520 if (!sc->mask_interrupts && sc->pd_info_mem &&
4521 (target_id < MRSAS_MAX_PD))
4522 mrsas_get_pd_info(sc, target_id);
4523
4524 }
4525
4526 /*
4527 * mrsas_remove_target: Remove target ID of system PD/VD from driver's data structure.
4528 * sc: Adapter's soft state
4529 * target_id: Unique target id per controller(managed by driver)
4530 * for system PDs- target ID ranges from 0 to (MRSAS_MAX_PD - 1)
4531 * for VDs- target ID ranges from MRSAS_MAX_PD to MRSAS_MAX_TM_TARGETS
4532 * return: void
4533 * Descripton: This function will be called whenever system PD or VD is deleted
4534 */
mrsas_remove_target(struct mrsas_softc * sc,u_int16_t target_id)4535 static void mrsas_remove_target(struct mrsas_softc *sc,
4536 u_int16_t target_id)
4537 {
4538 sc->target_list[target_id].target_id = 0xffff;
4539 device_printf(sc->mrsas_dev,
4540 "%s deleted target ID: 0x%x\n",
4541 (target_id < MRSAS_MAX_PD ? "System PD" : "VD"),
4542 (target_id < MRSAS_MAX_PD ? target_id : (target_id - MRSAS_MAX_PD)));
4543 }
4544
4545 /*
4546 * mrsas_get_pd_list: Returns FW's PD list structure input:
4547 * Adapter soft state
4548 *
4549 * Issues an internal command (DCMD) to get the FW's controller PD list
4550 * structure. This information is mainly used to find out about system
4551 * supported by Firmware.
4552 */
4553 static int
mrsas_get_pd_list(struct mrsas_softc * sc)4554 mrsas_get_pd_list(struct mrsas_softc *sc)
4555 {
4556 int retcode = 0, pd_index = 0, pd_count = 0, pd_list_size;
4557 u_int8_t do_ocr = 1;
4558 struct mrsas_mfi_cmd *cmd;
4559 struct mrsas_dcmd_frame *dcmd;
4560 struct MR_PD_LIST *pd_list_mem;
4561 struct MR_PD_ADDRESS *pd_addr;
4562 bus_addr_t pd_list_phys_addr = 0;
4563 struct mrsas_tmp_dcmd *tcmd;
4564 u_int16_t dev_id;
4565
4566 cmd = mrsas_get_mfi_cmd(sc);
4567 if (!cmd) {
4568 device_printf(sc->mrsas_dev,
4569 "Cannot alloc for get PD list cmd\n");
4570 return 1;
4571 }
4572 dcmd = &cmd->frame->dcmd;
4573
4574 tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
4575 pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
4576 if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) {
4577 device_printf(sc->mrsas_dev,
4578 "Cannot alloc dmamap for get PD list cmd\n");
4579 mrsas_release_mfi_cmd(cmd);
4580 mrsas_free_tmp_dcmd(tcmd);
4581 free(tcmd, M_MRSAS);
4582 return (ENOMEM);
4583 } else {
4584 pd_list_mem = tcmd->tmp_dcmd_mem;
4585 pd_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
4586 }
4587 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4588
4589 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
4590 dcmd->mbox.b[1] = 0;
4591 dcmd->cmd = MFI_CMD_DCMD;
4592 dcmd->cmd_status = 0xFF;
4593 dcmd->sge_count = 1;
4594 dcmd->flags = htole16(MFI_FRAME_DIR_READ);
4595 dcmd->timeout = 0;
4596 dcmd->pad_0 = 0;
4597 dcmd->data_xfer_len = htole32(MRSAS_MAX_PD * sizeof(struct MR_PD_LIST));
4598 dcmd->opcode = htole32(MR_DCMD_PD_LIST_QUERY);
4599 dcmd->sgl.sge32[0].phys_addr = htole32(pd_list_phys_addr & 0xFFFFFFFF);
4600 dcmd->sgl.sge32[0].length = htole32(MRSAS_MAX_PD * sizeof(struct MR_PD_LIST));
4601
4602 if (!sc->mask_interrupts)
4603 retcode = mrsas_issue_blocked_cmd(sc, cmd);
4604 else
4605 retcode = mrsas_issue_polled(sc, cmd);
4606
4607 if (retcode == ETIMEDOUT)
4608 goto dcmd_timeout;
4609
4610 /* Get the instance PD list */
4611 pd_count = MRSAS_MAX_PD;
4612 pd_addr = pd_list_mem->addr;
4613 if (le32toh(pd_list_mem->count) < pd_count) {
4614 memset(sc->local_pd_list, 0,
4615 MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
4616 for (pd_index = 0; pd_index < le32toh(pd_list_mem->count); pd_index++) {
4617 dev_id = le16toh(pd_addr->deviceId);
4618 sc->local_pd_list[dev_id].tid = dev_id;
4619 sc->local_pd_list[dev_id].driveType =
4620 le16toh(pd_addr->scsiDevType);
4621 sc->local_pd_list[dev_id].driveState =
4622 MR_PD_STATE_SYSTEM;
4623 if (sc->target_list[dev_id].target_id == 0xffff)
4624 mrsas_add_target(sc, dev_id);
4625 pd_addr++;
4626 }
4627 for (pd_index = 0; pd_index < MRSAS_MAX_PD; pd_index++) {
4628 if ((sc->local_pd_list[pd_index].driveState !=
4629 MR_PD_STATE_SYSTEM) &&
4630 (sc->target_list[pd_index].target_id !=
4631 0xffff)) {
4632 mrsas_remove_target(sc, pd_index);
4633 }
4634 }
4635 /*
4636 * Use mutext/spinlock if pd_list component size increase more than
4637 * 32 bit.
4638 */
4639 memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list));
4640 do_ocr = 0;
4641 }
4642 dcmd_timeout:
4643 mrsas_free_tmp_dcmd(tcmd);
4644 free(tcmd, M_MRSAS);
4645
4646 if (do_ocr)
4647 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4648
4649 if (!sc->mask_interrupts)
4650 mrsas_release_mfi_cmd(cmd);
4651
4652 return (retcode);
4653 }
4654
4655 /*
4656 * mrsas_get_ld_list: Returns FW's LD list structure input:
4657 * Adapter soft state
4658 *
4659 * Issues an internal command (DCMD) to get the FW's controller PD list
4660 * structure. This information is mainly used to find out about supported by
4661 * the FW.
4662 */
4663 static int
mrsas_get_ld_list(struct mrsas_softc * sc)4664 mrsas_get_ld_list(struct mrsas_softc *sc)
4665 {
4666 int ld_list_size, retcode = 0, ld_index = 0, ids = 0, drv_tgt_id;
4667 u_int8_t do_ocr = 1;
4668 struct mrsas_mfi_cmd *cmd;
4669 struct mrsas_dcmd_frame *dcmd;
4670 struct MR_LD_LIST *ld_list_mem;
4671 bus_addr_t ld_list_phys_addr = 0;
4672 struct mrsas_tmp_dcmd *tcmd;
4673
4674 cmd = mrsas_get_mfi_cmd(sc);
4675 if (!cmd) {
4676 device_printf(sc->mrsas_dev,
4677 "Cannot alloc for get LD list cmd\n");
4678 return 1;
4679 }
4680 dcmd = &cmd->frame->dcmd;
4681
4682 tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
4683 ld_list_size = sizeof(struct MR_LD_LIST);
4684 if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) {
4685 device_printf(sc->mrsas_dev,
4686 "Cannot alloc dmamap for get LD list cmd\n");
4687 mrsas_release_mfi_cmd(cmd);
4688 mrsas_free_tmp_dcmd(tcmd);
4689 free(tcmd, M_MRSAS);
4690 return (ENOMEM);
4691 } else {
4692 ld_list_mem = tcmd->tmp_dcmd_mem;
4693 ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
4694 }
4695 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4696
4697 if (sc->max256vdSupport)
4698 dcmd->mbox.b[0] = 1;
4699
4700 dcmd->cmd = MFI_CMD_DCMD;
4701 dcmd->cmd_status = 0xFF;
4702 dcmd->sge_count = 1;
4703 dcmd->flags = MFI_FRAME_DIR_READ;
4704 dcmd->timeout = 0;
4705 dcmd->data_xfer_len = htole32(sizeof(struct MR_LD_LIST));
4706 dcmd->opcode = htole32(MR_DCMD_LD_GET_LIST);
4707 dcmd->sgl.sge32[0].phys_addr = htole32(ld_list_phys_addr);
4708 dcmd->sgl.sge32[0].length = htole32(sizeof(struct MR_LD_LIST));
4709 dcmd->pad_0 = 0;
4710
4711 if (!sc->mask_interrupts)
4712 retcode = mrsas_issue_blocked_cmd(sc, cmd);
4713 else
4714 retcode = mrsas_issue_polled(sc, cmd);
4715
4716 if (retcode == ETIMEDOUT)
4717 goto dcmd_timeout;
4718
4719 #if VD_EXT_DEBUG
4720 printf("Number of LDs %d\n", ld_list_mem->ldCount);
4721 #endif
4722
4723 /* Get the instance LD list */
4724 if (le32toh(ld_list_mem->ldCount) <= sc->fw_supported_vd_count) {
4725 sc->CurLdCount = le32toh(ld_list_mem->ldCount);
4726 memset(sc->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
4727 for (ld_index = 0; ld_index < le32toh(ld_list_mem->ldCount); ld_index++) {
4728 ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
4729 drv_tgt_id = ids + MRSAS_MAX_PD;
4730 if (ld_list_mem->ldList[ld_index].state != 0) {
4731 sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
4732 if (sc->target_list[drv_tgt_id].target_id ==
4733 0xffff)
4734 mrsas_add_target(sc, drv_tgt_id);
4735 } else {
4736 if (sc->target_list[drv_tgt_id].target_id !=
4737 0xffff)
4738 mrsas_remove_target(sc,
4739 drv_tgt_id);
4740 }
4741 }
4742
4743 do_ocr = 0;
4744 }
4745 dcmd_timeout:
4746 mrsas_free_tmp_dcmd(tcmd);
4747 free(tcmd, M_MRSAS);
4748
4749 if (do_ocr)
4750 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4751 if (!sc->mask_interrupts)
4752 mrsas_release_mfi_cmd(cmd);
4753
4754 return (retcode);
4755 }
4756
4757 /*
4758 * mrsas_alloc_tmp_dcmd: Allocates memory for temporary command input:
4759 * Adapter soft state Temp command Size of allocation
4760 *
4761 * Allocates DMAable memory for a temporary internal command. The allocated
4762 * memory is initialized to all zeros upon successful loading of the dma
4763 * mapped memory.
4764 */
4765 int
mrsas_alloc_tmp_dcmd(struct mrsas_softc * sc,struct mrsas_tmp_dcmd * tcmd,int size)4766 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc,
4767 struct mrsas_tmp_dcmd *tcmd, int size)
4768 {
4769 if (bus_dma_tag_create(sc->mrsas_parent_tag,
4770 1, 0,
4771 BUS_SPACE_MAXADDR_32BIT,
4772 BUS_SPACE_MAXADDR,
4773 NULL, NULL,
4774 size,
4775 1,
4776 size,
4777 BUS_DMA_ALLOCNOW,
4778 NULL, NULL,
4779 &tcmd->tmp_dcmd_tag)) {
4780 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd tag\n");
4781 return (ENOMEM);
4782 }
4783 if (bus_dmamem_alloc(tcmd->tmp_dcmd_tag, (void **)&tcmd->tmp_dcmd_mem,
4784 BUS_DMA_NOWAIT, &tcmd->tmp_dcmd_dmamap)) {
4785 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd mem\n");
4786 return (ENOMEM);
4787 }
4788 if (bus_dmamap_load(tcmd->tmp_dcmd_tag, tcmd->tmp_dcmd_dmamap,
4789 tcmd->tmp_dcmd_mem, size, mrsas_addr_cb,
4790 &tcmd->tmp_dcmd_phys_addr, BUS_DMA_NOWAIT)) {
4791 device_printf(sc->mrsas_dev, "Cannot load tmp dcmd mem\n");
4792 return (ENOMEM);
4793 }
4794 memset(tcmd->tmp_dcmd_mem, 0, size);
4795 return (0);
4796 }
4797
4798 /*
4799 * mrsas_free_tmp_dcmd: Free memory for temporary command input:
4800 * temporary dcmd pointer
4801 *
4802 * Deallocates memory of the temporary command for use in the construction of
4803 * the internal DCMD.
4804 */
4805 void
mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd * tmp)4806 mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp)
4807 {
4808 if (tmp->tmp_dcmd_phys_addr)
4809 bus_dmamap_unload(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_dmamap);
4810 if (tmp->tmp_dcmd_mem != NULL)
4811 bus_dmamem_free(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_mem, tmp->tmp_dcmd_dmamap);
4812 if (tmp->tmp_dcmd_tag != NULL)
4813 bus_dma_tag_destroy(tmp->tmp_dcmd_tag);
4814 }
4815
4816 /*
4817 * mrsas_issue_blocked_abort_cmd: Aborts previously issued cmd input:
4818 * Adapter soft state Previously issued cmd to be aborted
4819 *
4820 * This function is used to abort previously issued commands, such as AEN and
4821 * RAID map sync map commands. The abort command is sent as a DCMD internal
4822 * command and subsequently the driver will wait for a return status. The
4823 * max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME seconds.
4824 */
4825 static int
mrsas_issue_blocked_abort_cmd(struct mrsas_softc * sc,struct mrsas_mfi_cmd * cmd_to_abort)4826 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
4827 struct mrsas_mfi_cmd *cmd_to_abort)
4828 {
4829 struct mrsas_mfi_cmd *cmd;
4830 struct mrsas_abort_frame *abort_fr;
4831 u_int8_t retcode = 0;
4832 unsigned long total_time = 0;
4833 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
4834
4835 cmd = mrsas_get_mfi_cmd(sc);
4836 if (!cmd) {
4837 device_printf(sc->mrsas_dev, "Cannot alloc for abort cmd\n");
4838 return (1);
4839 }
4840 abort_fr = &cmd->frame->abort;
4841
4842 /* Prepare and issue the abort frame */
4843 abort_fr->cmd = MFI_CMD_ABORT;
4844 abort_fr->cmd_status = 0xFF;
4845 abort_fr->flags = 0;
4846 abort_fr->abort_context = cmd_to_abort->index;
4847 abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
4848 abort_fr->abort_mfi_phys_addr_hi = 0;
4849
4850 cmd->sync_cmd = 1;
4851 cmd->cmd_status = 0xFF;
4852
4853 if (mrsas_issue_dcmd(sc, cmd)) {
4854 device_printf(sc->mrsas_dev, "Fail to send abort command.\n");
4855 return (1);
4856 }
4857 /* Wait for this cmd to complete */
4858 sc->chan = (void *)&cmd;
4859 while (1) {
4860 if (cmd->cmd_status == 0xFF) {
4861 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
4862 } else
4863 break;
4864 total_time++;
4865 if (total_time >= max_wait) {
4866 device_printf(sc->mrsas_dev, "Abort cmd timed out after %d sec.\n", max_wait);
4867 retcode = 1;
4868 break;
4869 }
4870 }
4871
4872 cmd->sync_cmd = 0;
4873 mrsas_release_mfi_cmd(cmd);
4874 return (retcode);
4875 }
4876
4877 /*
4878 * mrsas_complete_abort: Completes aborting a command input:
4879 * Adapter soft state Cmd that was issued to abort another cmd
4880 *
4881 * The mrsas_issue_blocked_abort_cmd() function waits for the command status to
4882 * change after sending the command. This function is called from
4883 * mrsas_complete_mptmfi_passthru() to wake up the sleep thread associated.
4884 */
4885 void
mrsas_complete_abort(struct mrsas_softc * sc,struct mrsas_mfi_cmd * cmd)4886 mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
4887 {
4888 if (cmd->sync_cmd) {
4889 cmd->sync_cmd = 0;
4890 cmd->cmd_status = 0;
4891 sc->chan = (void *)&cmd;
4892 wakeup_one((void *)&sc->chan);
4893 }
4894 return;
4895 }
4896
4897 /*
4898 * mrsas_aen_handler: AEN processing callback function from thread context
4899 * input: Adapter soft state
4900 *
4901 * Asynchronous event handler
4902 */
4903 void
mrsas_aen_handler(struct mrsas_softc * sc)4904 mrsas_aen_handler(struct mrsas_softc *sc)
4905 {
4906 union mrsas_evt_class_locale class_locale;
4907 int doscan = 0;
4908 u_int32_t seq_num;
4909 int error, fail_aen = 0;
4910
4911 if (sc == NULL) {
4912 printf("invalid instance!\n");
4913 return;
4914 }
4915 if (sc->remove_in_progress || sc->reset_in_progress) {
4916 device_printf(sc->mrsas_dev, "Returning from %s, line no %d\n",
4917 __func__, __LINE__);
4918 return;
4919 }
4920 if (sc->evt_detail_mem) {
4921 switch (sc->evt_detail_mem->code) {
4922 case MR_EVT_PD_INSERTED:
4923 fail_aen = mrsas_get_pd_list(sc);
4924 if (!fail_aen)
4925 mrsas_bus_scan_sim(sc, sc->sim_1);
4926 else
4927 goto skip_register_aen;
4928 break;
4929 case MR_EVT_PD_REMOVED:
4930 fail_aen = mrsas_get_pd_list(sc);
4931 if (!fail_aen)
4932 mrsas_bus_scan_sim(sc, sc->sim_1);
4933 else
4934 goto skip_register_aen;
4935 break;
4936 case MR_EVT_LD_OFFLINE:
4937 case MR_EVT_CFG_CLEARED:
4938 case MR_EVT_LD_DELETED:
4939 mrsas_bus_scan_sim(sc, sc->sim_0);
4940 break;
4941 case MR_EVT_LD_CREATED:
4942 fail_aen = mrsas_get_ld_list(sc);
4943 if (!fail_aen)
4944 mrsas_bus_scan_sim(sc, sc->sim_0);
4945 else
4946 goto skip_register_aen;
4947 break;
4948 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
4949 case MR_EVT_FOREIGN_CFG_IMPORTED:
4950 case MR_EVT_LD_STATE_CHANGE:
4951 doscan = 1;
4952 break;
4953 case MR_EVT_CTRL_PROP_CHANGED:
4954 fail_aen = mrsas_get_ctrl_info(sc);
4955 if (fail_aen)
4956 goto skip_register_aen;
4957 break;
4958 default:
4959 break;
4960 }
4961 } else {
4962 device_printf(sc->mrsas_dev, "invalid evt_detail\n");
4963 return;
4964 }
4965 if (doscan) {
4966 fail_aen = mrsas_get_pd_list(sc);
4967 if (!fail_aen) {
4968 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n");
4969 mrsas_bus_scan_sim(sc, sc->sim_1);
4970 } else
4971 goto skip_register_aen;
4972
4973 fail_aen = mrsas_get_ld_list(sc);
4974 if (!fail_aen) {
4975 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n");
4976 mrsas_bus_scan_sim(sc, sc->sim_0);
4977 } else
4978 goto skip_register_aen;
4979 }
4980 seq_num = sc->evt_detail_mem->seq_num + 1;
4981
4982 /* Register AEN with FW for latest sequence number plus 1 */
4983 class_locale.members.reserved = 0;
4984 class_locale.members.locale = MR_EVT_LOCALE_ALL;
4985 class_locale.members.class = MR_EVT_CLASS_DEBUG;
4986
4987 if (sc->aen_cmd != NULL)
4988 return;
4989
4990 mtx_lock(&sc->aen_lock);
4991 error = mrsas_register_aen(sc, seq_num,
4992 class_locale.word);
4993 mtx_unlock(&sc->aen_lock);
4994
4995 if (error)
4996 device_printf(sc->mrsas_dev, "register aen failed error %x\n", error);
4997
4998 skip_register_aen:
4999 return;
5000
5001 }
5002
5003 /*
5004 * mrsas_complete_aen: Completes AEN command
5005 * input: Adapter soft state
5006 * Cmd that was issued to abort another cmd
5007 *
5008 * This function will be called from ISR and will continue event processing from
5009 * thread context by enqueuing task in ev_tq (callback function
5010 * "mrsas_aen_handler").
5011 */
5012 void
mrsas_complete_aen(struct mrsas_softc * sc,struct mrsas_mfi_cmd * cmd)5013 mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
5014 {
5015 /*
5016 * Don't signal app if it is just an aborted previously registered
5017 * aen
5018 */
5019 if ((!cmd->abort_aen) && (sc->remove_in_progress == 0)) {
5020 sc->mrsas_aen_triggered = 1;
5021 mtx_lock(&sc->aen_lock);
5022 if (sc->mrsas_poll_waiting) {
5023 sc->mrsas_poll_waiting = 0;
5024 selwakeup(&sc->mrsas_select);
5025 }
5026 mtx_unlock(&sc->aen_lock);
5027 } else
5028 cmd->abort_aen = 0;
5029
5030 sc->aen_cmd = NULL;
5031 mrsas_release_mfi_cmd(cmd);
5032
5033 taskqueue_enqueue(sc->ev_tq, &sc->ev_task);
5034
5035 return;
5036 }
5037
5038 static device_method_t mrsas_methods[] = {
5039 DEVMETHOD(device_probe, mrsas_probe),
5040 DEVMETHOD(device_attach, mrsas_attach),
5041 DEVMETHOD(device_detach, mrsas_detach),
5042 DEVMETHOD(device_shutdown, mrsas_shutdown),
5043 DEVMETHOD(device_suspend, mrsas_suspend),
5044 DEVMETHOD(device_resume, mrsas_resume),
5045 DEVMETHOD(bus_print_child, bus_generic_print_child),
5046 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
5047 {0, 0}
5048 };
5049
5050 static driver_t mrsas_driver = {
5051 "mrsas",
5052 mrsas_methods,
5053 sizeof(struct mrsas_softc)
5054 };
5055
5056 DRIVER_MODULE(mrsas, pci, mrsas_driver, 0, 0);
5057 MODULE_DEPEND(mrsas, cam, 1, 1, 1);
5058