1 /*-
2 * Copyright (c) 2009 Yahoo! Inc.
3 * Copyright (c) 2011-2015 LSI Corp.
4 * Copyright (c) 2013-2016 Avago Technologies
5 * Copyright 2000-2020 Broadcom Inc.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * Broadcom Inc. (LSI) MPT-Fusion Host Adapter FreeBSD
30 *
31 */
32
33 #include <sys/cdefs.h>
34 /* Communications core for Avago Technologies (LSI) MPT3 */
35
36 /* TODO Move headers to mprvar */
37 #include <sys/types.h>
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/selinfo.h>
42 #include <sys/lock.h>
43 #include <sys/mutex.h>
44 #include <sys/module.h>
45 #include <sys/bus.h>
46 #include <sys/conf.h>
47 #include <sys/bio.h>
48 #include <sys/malloc.h>
49 #include <sys/uio.h>
50 #include <sys/sysctl.h>
51 #include <sys/smp.h>
52 #include <sys/queue.h>
53 #include <sys/kthread.h>
54 #include <sys/taskqueue.h>
55 #include <sys/endian.h>
56 #include <sys/eventhandler.h>
57 #include <sys/sbuf.h>
58 #include <sys/priv.h>
59
60 #include <machine/bus.h>
61 #include <machine/resource.h>
62 #include <sys/rman.h>
63 #include <sys/proc.h>
64
65 #include <dev/pci/pcivar.h>
66
67 #include <cam/cam.h>
68 #include <cam/cam_ccb.h>
69 #include <cam/scsi/scsi_all.h>
70
71 #include <dev/mpr/mpi/mpi2_type.h>
72 #include <dev/mpr/mpi/mpi2.h>
73 #include <dev/mpr/mpi/mpi2_ioc.h>
74 #include <dev/mpr/mpi/mpi2_sas.h>
75 #include <dev/mpr/mpi/mpi2_pci.h>
76 #include <dev/mpr/mpi/mpi2_cnfg.h>
77 #include <dev/mpr/mpi/mpi2_init.h>
78 #include <dev/mpr/mpi/mpi2_tool.h>
79 #include <dev/mpr/mpr_ioctl.h>
80 #include <dev/mpr/mprvar.h>
81 #include <dev/mpr/mpr_table.h>
82 #include <dev/mpr/mpr_sas.h>
83
84 static int mpr_diag_reset(struct mpr_softc *sc, int sleep_flag);
85 static int mpr_init_queues(struct mpr_softc *sc);
86 static void mpr_resize_queues(struct mpr_softc *sc);
87 static int mpr_message_unit_reset(struct mpr_softc *sc, int sleep_flag);
88 static int mpr_transition_operational(struct mpr_softc *sc);
89 static int mpr_iocfacts_allocate(struct mpr_softc *sc, uint8_t attaching);
90 static void mpr_iocfacts_free(struct mpr_softc *sc);
91 static void mpr_startup(void *arg);
92 static int mpr_send_iocinit(struct mpr_softc *sc);
93 static int mpr_alloc_queues(struct mpr_softc *sc);
94 static int mpr_alloc_hw_queues(struct mpr_softc *sc);
95 static int mpr_alloc_replies(struct mpr_softc *sc);
96 static int mpr_alloc_requests(struct mpr_softc *sc);
97 static int mpr_alloc_nvme_prp_pages(struct mpr_softc *sc);
98 static int mpr_attach_log(struct mpr_softc *sc);
99 static __inline void mpr_complete_command(struct mpr_softc *sc,
100 struct mpr_command *cm);
101 static void mpr_dispatch_event(struct mpr_softc *sc, uintptr_t data,
102 MPI2_EVENT_NOTIFICATION_REPLY *reply);
103 static void mpr_config_complete(struct mpr_softc *sc, struct mpr_command *cm);
104 static void mpr_periodic(void *);
105 static int mpr_reregister_events(struct mpr_softc *sc);
106 static void mpr_enqueue_request(struct mpr_softc *sc, struct mpr_command *cm);
107 static int mpr_get_iocfacts(struct mpr_softc *sc, MPI2_IOC_FACTS_REPLY *facts);
108 static int mpr_wait_db_ack(struct mpr_softc *sc, int timeout, int sleep_flag);
109 static int mpr_debug_sysctl(SYSCTL_HANDLER_ARGS);
110 static int mpr_dump_reqs(SYSCTL_HANDLER_ARGS);
111 static void mpr_parse_debug(struct mpr_softc *sc, char *list);
112 static void adjust_iocfacts_endianness(MPI2_IOC_FACTS_REPLY *facts);
113
114 SYSCTL_NODE(_hw, OID_AUTO, mpr, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
115 "MPR Driver Parameters");
116
117 MALLOC_DEFINE(M_MPR, "mpr", "mpr driver memory");
118
119 /*
120 * Do a "Diagnostic Reset" aka a hard reset. This should get the chip out of
121 * any state and back to its initialization state machine.
122 */
123 static char mpt2_reset_magic[] = { 0x00, 0x0f, 0x04, 0x0b, 0x02, 0x07, 0x0d };
124
125 /*
126 * Added this union to smoothly convert le64toh cm->cm_desc.Words.
127 * Compiler only supports uint64_t to be passed as an argument.
128 * Otherwise it will throw this error:
129 * "aggregate value used where an integer was expected"
130 */
131 typedef union {
132 u64 word;
133 struct {
134 u32 low;
135 u32 high;
136 } u;
137 } request_descriptor_t;
138
139 /* Rate limit chain-fail messages to 1 per minute */
140 static struct timeval mpr_chainfail_interval = { 60, 0 };
141
142 /*
143 * sleep_flag can be either CAN_SLEEP or NO_SLEEP.
144 * If this function is called from process context, it can sleep
145 * and there is no harm to sleep, in case if this fuction is called
146 * from Interrupt handler, we can not sleep and need NO_SLEEP flag set.
147 * based on sleep flags driver will call either msleep, pause or DELAY.
148 * msleep and pause are of same variant, but pause is used when mpr_mtx
149 * is not hold by driver.
150 */
151 static int
mpr_diag_reset(struct mpr_softc * sc,int sleep_flag)152 mpr_diag_reset(struct mpr_softc *sc,int sleep_flag)
153 {
154 uint32_t reg;
155 int i, error, tries = 0;
156 uint8_t first_wait_done = FALSE;
157
158 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
159
160 /* Clear any pending interrupts */
161 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
162
163 /*
164 * Force NO_SLEEP for threads prohibited to sleep
165 * e.a Thread from interrupt handler are prohibited to sleep.
166 */
167 if (curthread->td_no_sleeping)
168 sleep_flag = NO_SLEEP;
169
170 mpr_dprint(sc, MPR_INIT, "sequence start, sleep_flag=%d\n", sleep_flag);
171 /* Push the magic sequence */
172 error = ETIMEDOUT;
173 while (tries++ < 20) {
174 for (i = 0; i < sizeof(mpt2_reset_magic); i++)
175 mpr_regwrite(sc, MPI2_WRITE_SEQUENCE_OFFSET,
176 mpt2_reset_magic[i]);
177
178 /* wait 100 msec */
179 if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP)
180 msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0,
181 "mprdiag", hz/10);
182 else if (sleep_flag == CAN_SLEEP)
183 pause("mprdiag", hz/10);
184 else
185 DELAY(100 * 1000);
186
187 reg = mpr_regread(sc, MPI2_HOST_DIAGNOSTIC_OFFSET);
188 if (reg & MPI2_DIAG_DIAG_WRITE_ENABLE) {
189 error = 0;
190 break;
191 }
192 }
193 if (error) {
194 mpr_dprint(sc, MPR_INIT, "sequence failed, error=%d, exit\n",
195 error);
196 return (error);
197 }
198
199 /* Send the actual reset. XXX need to refresh the reg? */
200 reg |= MPI2_DIAG_RESET_ADAPTER;
201 mpr_dprint(sc, MPR_INIT, "sequence success, sending reset, reg= 0x%x\n",
202 reg);
203 mpr_regwrite(sc, MPI2_HOST_DIAGNOSTIC_OFFSET, reg);
204
205 /* Wait up to 300 seconds in 50ms intervals */
206 error = ETIMEDOUT;
207 for (i = 0; i < 6000; i++) {
208 /*
209 * Wait 50 msec. If this is the first time through, wait 256
210 * msec to satisfy Diag Reset timing requirements.
211 */
212 if (first_wait_done) {
213 if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP)
214 msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0,
215 "mprdiag", hz/20);
216 else if (sleep_flag == CAN_SLEEP)
217 pause("mprdiag", hz/20);
218 else
219 DELAY(50 * 1000);
220 } else {
221 DELAY(256 * 1000);
222 first_wait_done = TRUE;
223 }
224 /*
225 * Check for the RESET_ADAPTER bit to be cleared first, then
226 * wait for the RESET state to be cleared, which takes a little
227 * longer.
228 */
229 reg = mpr_regread(sc, MPI2_HOST_DIAGNOSTIC_OFFSET);
230 if (reg & MPI2_DIAG_RESET_ADAPTER) {
231 continue;
232 }
233 reg = mpr_regread(sc, MPI2_DOORBELL_OFFSET);
234 if ((reg & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_RESET) {
235 error = 0;
236 break;
237 }
238 }
239 if (error) {
240 mpr_dprint(sc, MPR_INIT, "reset failed, error= %d, exit\n",
241 error);
242 return (error);
243 }
244
245 mpr_regwrite(sc, MPI2_WRITE_SEQUENCE_OFFSET, 0x0);
246 mpr_dprint(sc, MPR_INIT, "diag reset success, exit\n");
247
248 return (0);
249 }
250
251 static int
mpr_message_unit_reset(struct mpr_softc * sc,int sleep_flag)252 mpr_message_unit_reset(struct mpr_softc *sc, int sleep_flag)
253 {
254 int error;
255
256 MPR_FUNCTRACE(sc);
257
258 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
259
260 error = 0;
261 mpr_regwrite(sc, MPI2_DOORBELL_OFFSET,
262 MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET <<
263 MPI2_DOORBELL_FUNCTION_SHIFT);
264
265 if (mpr_wait_db_ack(sc, 5, sleep_flag) != 0) {
266 mpr_dprint(sc, MPR_INIT|MPR_FAULT,
267 "Doorbell handshake failed\n");
268 error = ETIMEDOUT;
269 }
270
271 mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__);
272 return (error);
273 }
274
275 static int
mpr_transition_ready(struct mpr_softc * sc)276 mpr_transition_ready(struct mpr_softc *sc)
277 {
278 uint32_t reg, state;
279 int error, tries = 0;
280 int sleep_flags;
281
282 MPR_FUNCTRACE(sc);
283 /* If we are in attach call, do not sleep */
284 sleep_flags = (sc->mpr_flags & MPR_FLAGS_ATTACH_DONE)
285 ? CAN_SLEEP : NO_SLEEP;
286
287 error = 0;
288
289 mpr_dprint(sc, MPR_INIT, "%s entered, sleep_flags= %d\n",
290 __func__, sleep_flags);
291
292 while (tries++ < 1200) {
293 reg = mpr_regread(sc, MPI2_DOORBELL_OFFSET);
294 mpr_dprint(sc, MPR_INIT, " Doorbell= 0x%x\n", reg);
295
296 /*
297 * Ensure the IOC is ready to talk. If it's not, try
298 * resetting it.
299 */
300 if (reg & MPI2_DOORBELL_USED) {
301 mpr_dprint(sc, MPR_INIT, " Not ready, sending diag "
302 "reset\n");
303 mpr_diag_reset(sc, sleep_flags);
304 DELAY(50000);
305 continue;
306 }
307
308 /* Is the adapter owned by another peer? */
309 if ((reg & MPI2_DOORBELL_WHO_INIT_MASK) ==
310 (MPI2_WHOINIT_PCI_PEER << MPI2_DOORBELL_WHO_INIT_SHIFT)) {
311 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "IOC is under the "
312 "control of another peer host, aborting "
313 "initialization.\n");
314 error = ENXIO;
315 break;
316 }
317
318 state = reg & MPI2_IOC_STATE_MASK;
319 if (state == MPI2_IOC_STATE_READY) {
320 /* Ready to go! */
321 error = 0;
322 break;
323 } else if (state == MPI2_IOC_STATE_FAULT) {
324 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "IOC in fault "
325 "state 0x%x, resetting\n",
326 state & MPI2_DOORBELL_FAULT_CODE_MASK);
327 mpr_diag_reset(sc, sleep_flags);
328 } else if (state == MPI2_IOC_STATE_OPERATIONAL) {
329 /* Need to take ownership */
330 mpr_message_unit_reset(sc, sleep_flags);
331 } else if (state == MPI2_IOC_STATE_RESET) {
332 /* Wait a bit, IOC might be in transition */
333 mpr_dprint(sc, MPR_INIT|MPR_FAULT,
334 "IOC in unexpected reset state\n");
335 } else {
336 mpr_dprint(sc, MPR_INIT|MPR_FAULT,
337 "IOC in unknown state 0x%x\n", state);
338 error = EINVAL;
339 break;
340 }
341
342 /* Wait 50ms for things to settle down. */
343 DELAY(50000);
344 }
345
346 if (error)
347 mpr_dprint(sc, MPR_INIT|MPR_FAULT,
348 "Cannot transition IOC to ready\n");
349 mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__);
350 return (error);
351 }
352
353 static int
mpr_transition_operational(struct mpr_softc * sc)354 mpr_transition_operational(struct mpr_softc *sc)
355 {
356 uint32_t reg, state;
357 int error;
358
359 MPR_FUNCTRACE(sc);
360
361 error = 0;
362 reg = mpr_regread(sc, MPI2_DOORBELL_OFFSET);
363 mpr_dprint(sc, MPR_INIT, "%s entered, Doorbell= 0x%x\n", __func__, reg);
364
365 state = reg & MPI2_IOC_STATE_MASK;
366 if (state != MPI2_IOC_STATE_READY) {
367 mpr_dprint(sc, MPR_INIT, "IOC not ready\n");
368 if ((error = mpr_transition_ready(sc)) != 0) {
369 mpr_dprint(sc, MPR_INIT|MPR_FAULT,
370 "failed to transition ready, exit\n");
371 return (error);
372 }
373 }
374
375 error = mpr_send_iocinit(sc);
376 mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__);
377
378 return (error);
379 }
380
381 static void
mpr_resize_queues(struct mpr_softc * sc)382 mpr_resize_queues(struct mpr_softc *sc)
383 {
384 u_int reqcr, prireqcr, maxio, sges_per_frame, chain_seg_size;
385
386 /*
387 * Size the queues. Since the reply queues always need one free
388 * entry, we'll deduct one reply message here. The LSI documents
389 * suggest instead to add a count to the request queue, but I think
390 * that it's better to deduct from reply queue.
391 */
392 prireqcr = MAX(1, sc->max_prireqframes);
393 prireqcr = MIN(prireqcr, sc->facts->HighPriorityCredit);
394
395 reqcr = MAX(2, sc->max_reqframes);
396 reqcr = MIN(reqcr, sc->facts->RequestCredit);
397
398 sc->num_reqs = prireqcr + reqcr;
399 sc->num_prireqs = prireqcr;
400 sc->num_replies = MIN(sc->max_replyframes + sc->max_evtframes,
401 sc->facts->MaxReplyDescriptorPostQueueDepth) - 1;
402
403 /* Store the request frame size in bytes rather than as 32bit words */
404 sc->reqframesz = sc->facts->IOCRequestFrameSize * 4;
405
406 /*
407 * Gen3 and beyond uses the IOCMaxChainSegmentSize from IOC Facts to
408 * get the size of a Chain Frame. Previous versions use the size as a
409 * Request Frame for the Chain Frame size. If IOCMaxChainSegmentSize
410 * is 0, use the default value. The IOCMaxChainSegmentSize is the
411 * number of 16-byte elelements that can fit in a Chain Frame, which is
412 * the size of an IEEE Simple SGE.
413 */
414 if (sc->facts->MsgVersion >= MPI2_VERSION_02_05) {
415 chain_seg_size = sc->facts->IOCMaxChainSegmentSize;
416 if (chain_seg_size == 0)
417 chain_seg_size = MPR_DEFAULT_CHAIN_SEG_SIZE;
418 sc->chain_frame_size = chain_seg_size *
419 MPR_MAX_CHAIN_ELEMENT_SIZE;
420 } else {
421 sc->chain_frame_size = sc->reqframesz;
422 }
423
424 /*
425 * Max IO Size is Page Size * the following:
426 * ((SGEs per frame - 1 for chain element) * Max Chain Depth)
427 * + 1 for no chain needed in last frame
428 *
429 * If user suggests a Max IO size to use, use the smaller of the
430 * user's value and the calculated value as long as the user's
431 * value is larger than 0. The user's value is in pages.
432 */
433 sges_per_frame = sc->chain_frame_size/sizeof(MPI2_IEEE_SGE_SIMPLE64)-1;
434 maxio = (sges_per_frame * sc->facts->MaxChainDepth + 1) * PAGE_SIZE;
435
436 /*
437 * If I/O size limitation requested then use it and pass up to CAM.
438 * If not, use maxphys as an optimization hint, but report HW limit.
439 */
440 if (sc->max_io_pages > 0) {
441 maxio = min(maxio, sc->max_io_pages * PAGE_SIZE);
442 sc->maxio = maxio;
443 } else {
444 sc->maxio = maxio;
445 maxio = min(maxio, maxphys);
446 }
447
448 sc->num_chains = (maxio / PAGE_SIZE + sges_per_frame - 2) /
449 sges_per_frame * reqcr;
450 if (sc->max_chains > 0 && sc->max_chains < sc->num_chains)
451 sc->num_chains = sc->max_chains;
452
453 /*
454 * Figure out the number of MSIx-based queues. If the firmware or
455 * user has done something crazy and not allowed enough credit for
456 * the queues to be useful then don't enable multi-queue.
457 */
458 if (sc->facts->MaxMSIxVectors < 2)
459 sc->msi_msgs = 1;
460
461 if (sc->msi_msgs > 1) {
462 sc->msi_msgs = MIN(sc->msi_msgs, mp_ncpus);
463 sc->msi_msgs = MIN(sc->msi_msgs, sc->facts->MaxMSIxVectors);
464 if (sc->num_reqs / sc->msi_msgs < 2)
465 sc->msi_msgs = 1;
466 }
467
468 mpr_dprint(sc, MPR_INIT, "Sized queues to q=%d reqs=%d replies=%d\n",
469 sc->msi_msgs, sc->num_reqs, sc->num_replies);
470 }
471
472 /*
473 * This is called during attach and when re-initializing due to a Diag Reset.
474 * IOC Facts is used to allocate many of the structures needed by the driver.
475 * If called from attach, de-allocation is not required because the driver has
476 * not allocated any structures yet, but if called from a Diag Reset, previously
477 * allocated structures based on IOC Facts will need to be freed and re-
478 * allocated bases on the latest IOC Facts.
479 */
480 static int
mpr_iocfacts_allocate(struct mpr_softc * sc,uint8_t attaching)481 mpr_iocfacts_allocate(struct mpr_softc *sc, uint8_t attaching)
482 {
483 int error;
484 Mpi2IOCFactsReply_t saved_facts;
485 uint8_t saved_mode, reallocating;
486
487 mpr_dprint(sc, MPR_INIT|MPR_TRACE, "%s entered\n", __func__);
488
489 /* Save old IOC Facts and then only reallocate if Facts have changed */
490 if (!attaching) {
491 bcopy(sc->facts, &saved_facts, sizeof(MPI2_IOC_FACTS_REPLY));
492 }
493
494 /*
495 * Get IOC Facts. In all cases throughout this function, panic if doing
496 * a re-initialization and only return the error if attaching so the OS
497 * can handle it.
498 */
499 if ((error = mpr_get_iocfacts(sc, sc->facts)) != 0) {
500 if (attaching) {
501 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "Failed to get "
502 "IOC Facts with error %d, exit\n", error);
503 return (error);
504 } else {
505 panic("%s failed to get IOC Facts with error %d\n",
506 __func__, error);
507 }
508 }
509
510 MPR_DPRINT_PAGE(sc, MPR_XINFO, iocfacts, sc->facts);
511
512 snprintf(sc->fw_version, sizeof(sc->fw_version),
513 "%02d.%02d.%02d.%02d",
514 sc->facts->FWVersion.Struct.Major,
515 sc->facts->FWVersion.Struct.Minor,
516 sc->facts->FWVersion.Struct.Unit,
517 sc->facts->FWVersion.Struct.Dev);
518
519 snprintf(sc->msg_version, sizeof(sc->msg_version), "%d.%d",
520 (sc->facts->MsgVersion & MPI2_IOCFACTS_MSGVERSION_MAJOR_MASK) >>
521 MPI2_IOCFACTS_MSGVERSION_MAJOR_SHIFT,
522 (sc->facts->MsgVersion & MPI2_IOCFACTS_MSGVERSION_MINOR_MASK) >>
523 MPI2_IOCFACTS_MSGVERSION_MINOR_SHIFT);
524
525 mpr_dprint(sc, MPR_INFO, "Firmware: %s, Driver: %s\n", sc->fw_version,
526 MPR_DRIVER_VERSION);
527 mpr_dprint(sc, MPR_INFO,
528 "IOCCapabilities: %b\n", sc->facts->IOCCapabilities,
529 "\20" "\3ScsiTaskFull" "\4DiagTrace" "\5SnapBuf" "\6ExtBuf"
530 "\7EEDP" "\10BiDirTarg" "\11Multicast" "\14TransRetry" "\15IR"
531 "\16EventReplay" "\17RaidAccel" "\20MSIXIndex" "\21HostDisc"
532 "\22FastPath" "\23RDPQArray" "\24AtomicReqDesc" "\25PCIeSRIOV");
533
534 /*
535 * If the chip doesn't support event replay then a hard reset will be
536 * required to trigger a full discovery. Do the reset here then
537 * retransition to Ready. A hard reset might have already been done,
538 * but it doesn't hurt to do it again. Only do this if attaching, not
539 * for a Diag Reset.
540 */
541 if (attaching && ((sc->facts->IOCCapabilities &
542 MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY) == 0)) {
543 mpr_dprint(sc, MPR_INIT, "No event replay, resetting\n");
544 mpr_diag_reset(sc, NO_SLEEP);
545 if ((error = mpr_transition_ready(sc)) != 0) {
546 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "Failed to "
547 "transition to ready with error %d, exit\n",
548 error);
549 return (error);
550 }
551 }
552
553 /*
554 * Set flag if IR Firmware is loaded. If the RAID Capability has
555 * changed from the previous IOC Facts, log a warning, but only if
556 * checking this after a Diag Reset and not during attach.
557 */
558 saved_mode = sc->ir_firmware;
559 if (sc->facts->IOCCapabilities &
560 MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID)
561 sc->ir_firmware = 1;
562 if (!attaching) {
563 if (sc->ir_firmware != saved_mode) {
564 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "new IR/IT mode "
565 "in IOC Facts does not match previous mode\n");
566 }
567 }
568
569 /* Only deallocate and reallocate if relevant IOC Facts have changed */
570 reallocating = FALSE;
571 sc->mpr_flags &= ~MPR_FLAGS_REALLOCATED;
572
573 if ((!attaching) &&
574 ((saved_facts.MsgVersion != sc->facts->MsgVersion) ||
575 (saved_facts.HeaderVersion != sc->facts->HeaderVersion) ||
576 (saved_facts.MaxChainDepth != sc->facts->MaxChainDepth) ||
577 (saved_facts.RequestCredit != sc->facts->RequestCredit) ||
578 (saved_facts.ProductID != sc->facts->ProductID) ||
579 (saved_facts.IOCCapabilities != sc->facts->IOCCapabilities) ||
580 (saved_facts.IOCRequestFrameSize !=
581 sc->facts->IOCRequestFrameSize) ||
582 (saved_facts.IOCMaxChainSegmentSize !=
583 sc->facts->IOCMaxChainSegmentSize) ||
584 (saved_facts.MaxTargets != sc->facts->MaxTargets) ||
585 (saved_facts.MaxSasExpanders != sc->facts->MaxSasExpanders) ||
586 (saved_facts.MaxEnclosures != sc->facts->MaxEnclosures) ||
587 (saved_facts.HighPriorityCredit != sc->facts->HighPriorityCredit) ||
588 (saved_facts.MaxReplyDescriptorPostQueueDepth !=
589 sc->facts->MaxReplyDescriptorPostQueueDepth) ||
590 (saved_facts.ReplyFrameSize != sc->facts->ReplyFrameSize) ||
591 (saved_facts.MaxVolumes != sc->facts->MaxVolumes) ||
592 (saved_facts.MaxPersistentEntries !=
593 sc->facts->MaxPersistentEntries))) {
594 reallocating = TRUE;
595
596 /* Record that we reallocated everything */
597 sc->mpr_flags |= MPR_FLAGS_REALLOCATED;
598 }
599
600 /*
601 * Some things should be done if attaching or re-allocating after a Diag
602 * Reset, but are not needed after a Diag Reset if the FW has not
603 * changed.
604 */
605 if (attaching || reallocating) {
606 /*
607 * Check if controller supports FW diag buffers and set flag to
608 * enable each type.
609 */
610 if (sc->facts->IOCCapabilities &
611 MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER)
612 sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_TRACE].
613 enabled = TRUE;
614 if (sc->facts->IOCCapabilities &
615 MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER)
616 sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_SNAPSHOT].
617 enabled = TRUE;
618 if (sc->facts->IOCCapabilities &
619 MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER)
620 sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_EXTENDED].
621 enabled = TRUE;
622
623 /*
624 * Set flags for some supported items.
625 */
626 if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP)
627 sc->eedp_enabled = TRUE;
628 if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR)
629 sc->control_TLR = TRUE;
630 if ((sc->facts->IOCCapabilities &
631 MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ) &&
632 (sc->mpr_flags & MPR_FLAGS_SEA_IOC))
633 sc->atomic_desc_capable = TRUE;
634
635 mpr_resize_queues(sc);
636
637 /*
638 * Initialize all Tail Queues
639 */
640 TAILQ_INIT(&sc->req_list);
641 TAILQ_INIT(&sc->high_priority_req_list);
642 TAILQ_INIT(&sc->chain_list);
643 TAILQ_INIT(&sc->prp_page_list);
644 TAILQ_INIT(&sc->tm_list);
645 }
646
647 /*
648 * If doing a Diag Reset and the FW is significantly different
649 * (reallocating will be set above in IOC Facts comparison), then all
650 * buffers based on the IOC Facts will need to be freed before they are
651 * reallocated.
652 */
653 if (reallocating) {
654 mpr_iocfacts_free(sc);
655 mprsas_realloc_targets(sc, saved_facts.MaxTargets +
656 saved_facts.MaxVolumes);
657 }
658
659 /*
660 * Any deallocation has been completed. Now start reallocating
661 * if needed. Will only need to reallocate if attaching or if the new
662 * IOC Facts are different from the previous IOC Facts after a Diag
663 * Reset. Targets have already been allocated above if needed.
664 */
665 error = 0;
666 while (attaching || reallocating) {
667 if ((error = mpr_alloc_hw_queues(sc)) != 0)
668 break;
669 if ((error = mpr_alloc_replies(sc)) != 0)
670 break;
671 if ((error = mpr_alloc_requests(sc)) != 0)
672 break;
673 if ((error = mpr_alloc_queues(sc)) != 0)
674 break;
675 break;
676 }
677 if (error) {
678 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
679 "Failed to alloc queues with error %d\n", error);
680 mpr_free(sc);
681 return (error);
682 }
683
684 /* Always initialize the queues */
685 bzero(sc->free_queue, sc->fqdepth * 4);
686 mpr_init_queues(sc);
687
688 /*
689 * Always get the chip out of the reset state, but only panic if not
690 * attaching. If attaching and there is an error, that is handled by
691 * the OS.
692 */
693 error = mpr_transition_operational(sc);
694 if (error != 0) {
695 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "Failed to "
696 "transition to operational with error %d\n", error);
697 mpr_free(sc);
698 return (error);
699 }
700
701 /*
702 * Finish the queue initialization.
703 * These are set here instead of in mpr_init_queues() because the
704 * IOC resets these values during the state transition in
705 * mpr_transition_operational(). The free index is set to 1
706 * because the corresponding index in the IOC is set to 0, and the
707 * IOC treats the queues as full if both are set to the same value.
708 * Hence the reason that the queue can't hold all of the possible
709 * replies.
710 */
711 sc->replypostindex = 0;
712 mpr_regwrite(sc, MPI2_REPLY_FREE_HOST_INDEX_OFFSET, sc->replyfreeindex);
713 mpr_regwrite(sc, MPI2_REPLY_POST_HOST_INDEX_OFFSET, 0);
714
715 /*
716 * Attach the subsystems so they can prepare their event masks.
717 * XXX Should be dynamic so that IM/IR and user modules can attach
718 */
719 error = 0;
720 while (attaching) {
721 mpr_dprint(sc, MPR_INIT, "Attaching subsystems\n");
722 if ((error = mpr_attach_log(sc)) != 0)
723 break;
724 if ((error = mpr_attach_sas(sc)) != 0)
725 break;
726 if ((error = mpr_attach_user(sc)) != 0)
727 break;
728 break;
729 }
730 if (error) {
731 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
732 "Failed to attach all subsystems: error %d\n", error);
733 mpr_free(sc);
734 return (error);
735 }
736
737 /*
738 * XXX If the number of MSI-X vectors changes during re-init, this
739 * won't see it and adjust.
740 */
741 if ((attaching || reallocating) && (error = mpr_pci_setup_interrupts(sc)) != 0) {
742 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
743 "Failed to setup interrupts\n");
744 mpr_free(sc);
745 return (error);
746 }
747
748 return (error);
749 }
750
751 /*
752 * This is called if memory is being free (during detach for example) and when
753 * buffers need to be reallocated due to a Diag Reset.
754 */
755 static void
mpr_iocfacts_free(struct mpr_softc * sc)756 mpr_iocfacts_free(struct mpr_softc *sc)
757 {
758 struct mpr_command *cm;
759 int i;
760
761 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__);
762
763 if (sc->free_busaddr != 0)
764 bus_dmamap_unload(sc->queues_dmat, sc->queues_map);
765 if (sc->free_queue != NULL)
766 bus_dmamem_free(sc->queues_dmat, sc->free_queue,
767 sc->queues_map);
768 if (sc->queues_dmat != NULL)
769 bus_dma_tag_destroy(sc->queues_dmat);
770
771 if (sc->chain_frames != NULL) {
772 bus_dmamap_unload(sc->chain_dmat, sc->chain_map);
773 bus_dmamem_free(sc->chain_dmat, sc->chain_frames,
774 sc->chain_map);
775 }
776 if (sc->chain_dmat != NULL)
777 bus_dma_tag_destroy(sc->chain_dmat);
778
779 if (sc->sense_busaddr != 0)
780 bus_dmamap_unload(sc->sense_dmat, sc->sense_map);
781 if (sc->sense_frames != NULL)
782 bus_dmamem_free(sc->sense_dmat, sc->sense_frames,
783 sc->sense_map);
784 if (sc->sense_dmat != NULL)
785 bus_dma_tag_destroy(sc->sense_dmat);
786
787 if (sc->prp_page_busaddr != 0)
788 bus_dmamap_unload(sc->prp_page_dmat, sc->prp_page_map);
789 if (sc->prp_pages != NULL)
790 bus_dmamem_free(sc->prp_page_dmat, sc->prp_pages,
791 sc->prp_page_map);
792 if (sc->prp_page_dmat != NULL)
793 bus_dma_tag_destroy(sc->prp_page_dmat);
794
795 if (sc->reply_busaddr != 0)
796 bus_dmamap_unload(sc->reply_dmat, sc->reply_map);
797 if (sc->reply_frames != NULL)
798 bus_dmamem_free(sc->reply_dmat, sc->reply_frames,
799 sc->reply_map);
800 if (sc->reply_dmat != NULL)
801 bus_dma_tag_destroy(sc->reply_dmat);
802
803 if (sc->req_busaddr != 0)
804 bus_dmamap_unload(sc->req_dmat, sc->req_map);
805 if (sc->req_frames != NULL)
806 bus_dmamem_free(sc->req_dmat, sc->req_frames, sc->req_map);
807 if (sc->req_dmat != NULL)
808 bus_dma_tag_destroy(sc->req_dmat);
809
810 if (sc->chains != NULL)
811 free(sc->chains, M_MPR);
812 if (sc->prps != NULL)
813 free(sc->prps, M_MPR);
814 if (sc->commands != NULL) {
815 for (i = 1; i < sc->num_reqs; i++) {
816 cm = &sc->commands[i];
817 bus_dmamap_destroy(sc->buffer_dmat, cm->cm_dmamap);
818 }
819 free(sc->commands, M_MPR);
820 }
821 if (sc->buffer_dmat != NULL)
822 bus_dma_tag_destroy(sc->buffer_dmat);
823
824 mpr_pci_free_interrupts(sc);
825 free(sc->queues, M_MPR);
826 sc->queues = NULL;
827 }
828
829 /*
830 * The terms diag reset and hard reset are used interchangeably in the MPI
831 * docs to mean resetting the controller chip. In this code diag reset
832 * cleans everything up, and the hard reset function just sends the reset
833 * sequence to the chip. This should probably be refactored so that every
834 * subsystem gets a reset notification of some sort, and can clean up
835 * appropriately.
836 */
837 int
mpr_reinit(struct mpr_softc * sc)838 mpr_reinit(struct mpr_softc *sc)
839 {
840 int error;
841 struct mprsas_softc *sassc;
842
843 sassc = sc->sassc;
844
845 MPR_FUNCTRACE(sc);
846
847 mtx_assert(&sc->mpr_mtx, MA_OWNED);
848
849 mpr_dprint(sc, MPR_INIT|MPR_INFO, "Reinitializing controller\n");
850 if (sc->mpr_flags & MPR_FLAGS_DIAGRESET) {
851 mpr_dprint(sc, MPR_INIT, "Reset already in progress\n");
852 return 0;
853 }
854
855 /*
856 * Make sure the completion callbacks can recognize they're getting
857 * a NULL cm_reply due to a reset.
858 */
859 sc->mpr_flags |= MPR_FLAGS_DIAGRESET;
860
861 /*
862 * Mask interrupts here.
863 */
864 mpr_dprint(sc, MPR_INIT, "Masking interrupts and resetting\n");
865 mpr_mask_intr(sc);
866
867 error = mpr_diag_reset(sc, CAN_SLEEP);
868 if (error != 0) {
869 panic("%s hard reset failed with error %d\n", __func__, error);
870 }
871
872 /* Restore the PCI state, including the MSI-X registers */
873 mpr_pci_restore(sc);
874
875 /* Give the I/O subsystem special priority to get itself prepared */
876 mprsas_handle_reinit(sc);
877
878 /*
879 * Get IOC Facts and allocate all structures based on this information.
880 * The attach function will also call mpr_iocfacts_allocate at startup.
881 * If relevant values have changed in IOC Facts, this function will free
882 * all of the memory based on IOC Facts and reallocate that memory.
883 */
884 if ((error = mpr_iocfacts_allocate(sc, FALSE)) != 0) {
885 panic("%s IOC Facts based allocation failed with error %d\n",
886 __func__, error);
887 }
888
889 /*
890 * Mapping structures will be re-allocated after getting IOC Page8, so
891 * free these structures here.
892 */
893 mpr_mapping_exit(sc);
894
895 /*
896 * The static page function currently read is IOC Page8. Others can be
897 * added in future. It's possible that the values in IOC Page8 have
898 * changed after a Diag Reset due to user modification, so always read
899 * these. Interrupts are masked, so unmask them before getting config
900 * pages.
901 */
902 mpr_unmask_intr(sc);
903 sc->mpr_flags &= ~MPR_FLAGS_DIAGRESET;
904 mpr_base_static_config_pages(sc);
905
906 /*
907 * Some mapping info is based in IOC Page8 data, so re-initialize the
908 * mapping tables.
909 */
910 mpr_mapping_initialize(sc);
911
912 /*
913 * Restart will reload the event masks clobbered by the reset, and
914 * then enable the port.
915 */
916 mpr_reregister_events(sc);
917
918 /* the end of discovery will release the simq, so we're done. */
919 mpr_dprint(sc, MPR_INIT|MPR_XINFO, "Finished sc %p post %u free %u\n",
920 sc, sc->replypostindex, sc->replyfreeindex);
921 mprsas_release_simq_reinit(sassc);
922 mpr_dprint(sc, MPR_INIT, "%s exit error= %d\n", __func__, error);
923
924 return 0;
925 }
926
927 /* Wait for the chip to ACK a word that we've put into its FIFO
928 * Wait for <timeout> seconds. In single loop wait for busy loop
929 * for 500 microseconds.
930 * Total is [ 0.5 * (2000 * <timeout>) ] in miliseconds.
931 * */
932 static int
mpr_wait_db_ack(struct mpr_softc * sc,int timeout,int sleep_flag)933 mpr_wait_db_ack(struct mpr_softc *sc, int timeout, int sleep_flag)
934 {
935 u32 cntdn, count;
936 u32 int_status;
937 u32 doorbell;
938
939 count = 0;
940 cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
941 do {
942 int_status = mpr_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET);
943 if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
944 mpr_dprint(sc, MPR_TRACE, "%s: successful count(%d), "
945 "timeout(%d)\n", __func__, count, timeout);
946 return 0;
947 } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
948 doorbell = mpr_regread(sc, MPI2_DOORBELL_OFFSET);
949 if ((doorbell & MPI2_IOC_STATE_MASK) ==
950 MPI2_IOC_STATE_FAULT) {
951 mpr_dprint(sc, MPR_FAULT,
952 "fault_state(0x%04x)!\n", doorbell);
953 return (EFAULT);
954 }
955 } else if (int_status == 0xFFFFFFFF)
956 goto out;
957
958 /*
959 * If it can sleep, sleep for 1 milisecond, else busy loop for
960 * 0.5 milisecond
961 */
962 if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP)
963 msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0, "mprdba",
964 hz/1000);
965 else if (sleep_flag == CAN_SLEEP)
966 pause("mprdba", hz/1000);
967 else
968 DELAY(500);
969 count++;
970 } while (--cntdn);
971
972 out:
973 mpr_dprint(sc, MPR_FAULT, "%s: failed due to timeout count(%d), "
974 "int_status(%x)!\n", __func__, count, int_status);
975 return (ETIMEDOUT);
976 }
977
978 /* Wait for the chip to signal that the next word in its FIFO can be fetched */
979 static int
mpr_wait_db_int(struct mpr_softc * sc)980 mpr_wait_db_int(struct mpr_softc *sc)
981 {
982 int retry;
983
984 for (retry = 0; retry < MPR_DB_MAX_WAIT; retry++) {
985 if ((mpr_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET) &
986 MPI2_HIS_IOC2SYS_DB_STATUS) != 0)
987 return (0);
988 DELAY(2000);
989 }
990 return (ETIMEDOUT);
991 }
992
993 /* Step through the synchronous command state machine, i.e. "Doorbell mode" */
994 static int
mpr_request_sync(struct mpr_softc * sc,void * req,MPI2_DEFAULT_REPLY * reply,int req_sz,int reply_sz,int timeout)995 mpr_request_sync(struct mpr_softc *sc, void *req, MPI2_DEFAULT_REPLY *reply,
996 int req_sz, int reply_sz, int timeout)
997 {
998 uint32_t *data32;
999 uint16_t *data16;
1000 int i, count, ioc_sz, residual;
1001 int sleep_flags = CAN_SLEEP;
1002
1003 if (curthread->td_no_sleeping)
1004 sleep_flags = NO_SLEEP;
1005
1006 /* Step 1 */
1007 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
1008
1009 /* Step 2 */
1010 if (mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED)
1011 return (EBUSY);
1012
1013 /* Step 3
1014 * Announce that a message is coming through the doorbell. Messages
1015 * are pushed at 32bit words, so round up if needed.
1016 */
1017 count = (req_sz + 3) / 4;
1018 mpr_regwrite(sc, MPI2_DOORBELL_OFFSET,
1019 (MPI2_FUNCTION_HANDSHAKE << MPI2_DOORBELL_FUNCTION_SHIFT) |
1020 (count << MPI2_DOORBELL_ADD_DWORDS_SHIFT));
1021
1022 /* Step 4 */
1023 if (mpr_wait_db_int(sc) ||
1024 (mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED) == 0) {
1025 mpr_dprint(sc, MPR_FAULT, "Doorbell failed to activate\n");
1026 return (ENXIO);
1027 }
1028 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
1029 if (mpr_wait_db_ack(sc, 5, sleep_flags) != 0) {
1030 mpr_dprint(sc, MPR_FAULT, "Doorbell handshake failed\n");
1031 return (ENXIO);
1032 }
1033
1034 /* Step 5 */
1035 /* Clock out the message data synchronously in 32-bit dwords*/
1036 data32 = (uint32_t *)req;
1037 for (i = 0; i < count; i++) {
1038 mpr_regwrite(sc, MPI2_DOORBELL_OFFSET, htole32(data32[i]));
1039 if (mpr_wait_db_ack(sc, 5, sleep_flags) != 0) {
1040 mpr_dprint(sc, MPR_FAULT,
1041 "Timeout while writing doorbell\n");
1042 return (ENXIO);
1043 }
1044 }
1045
1046 /* Step 6 */
1047 /* Clock in the reply in 16-bit words. The total length of the
1048 * message is always in the 4th byte, so clock out the first 2 words
1049 * manually, then loop the rest.
1050 */
1051 data16 = (uint16_t *)reply;
1052 if (mpr_wait_db_int(sc) != 0) {
1053 mpr_dprint(sc, MPR_FAULT, "Timeout reading doorbell 0\n");
1054 return (ENXIO);
1055 }
1056
1057 /*
1058 * If in a BE platform, swap bytes using le16toh to not
1059 * disturb 8 bit field neighbors in destination structure
1060 * pointed by data16.
1061 */
1062 data16[0] =
1063 le16toh(mpr_regread(sc, MPI2_DOORBELL_OFFSET)) & MPI2_DOORBELL_DATA_MASK;
1064 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
1065 if (mpr_wait_db_int(sc) != 0) {
1066 mpr_dprint(sc, MPR_FAULT, "Timeout reading doorbell 1\n");
1067 return (ENXIO);
1068 }
1069 data16[1] =
1070 le16toh(mpr_regread(sc, MPI2_DOORBELL_OFFSET)) & MPI2_DOORBELL_DATA_MASK;
1071 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
1072
1073 /* Number of 32bit words in the message */
1074 ioc_sz = reply->MsgLength;
1075
1076 /*
1077 * Figure out how many 16bit words to clock in without overrunning.
1078 * The precision loss with dividing reply_sz can safely be
1079 * ignored because the messages can only be multiples of 32bits.
1080 */
1081 residual = 0;
1082 count = MIN((reply_sz / 4), ioc_sz) * 2;
1083 if (count < ioc_sz * 2) {
1084 residual = ioc_sz * 2 - count;
1085 mpr_dprint(sc, MPR_ERROR, "Driver error, throwing away %d "
1086 "residual message words\n", residual);
1087 }
1088
1089 for (i = 2; i < count; i++) {
1090 if (mpr_wait_db_int(sc) != 0) {
1091 mpr_dprint(sc, MPR_FAULT,
1092 "Timeout reading doorbell %d\n", i);
1093 return (ENXIO);
1094 }
1095 data16[i] = le16toh(mpr_regread(sc, MPI2_DOORBELL_OFFSET)) &
1096 MPI2_DOORBELL_DATA_MASK;
1097 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
1098 }
1099
1100 /*
1101 * Pull out residual words that won't fit into the provided buffer.
1102 * This keeps the chip from hanging due to a driver programming
1103 * error.
1104 */
1105 while (residual--) {
1106 if (mpr_wait_db_int(sc) != 0) {
1107 mpr_dprint(sc, MPR_FAULT, "Timeout reading doorbell\n");
1108 return (ENXIO);
1109 }
1110 (void)mpr_regread(sc, MPI2_DOORBELL_OFFSET);
1111 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
1112 }
1113
1114 /* Step 7 */
1115 if (mpr_wait_db_int(sc) != 0) {
1116 mpr_dprint(sc, MPR_FAULT, "Timeout waiting to exit doorbell\n");
1117 return (ENXIO);
1118 }
1119 if (mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED)
1120 mpr_dprint(sc, MPR_FAULT, "Warning, doorbell still active\n");
1121 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
1122
1123 return (0);
1124 }
1125
1126 static void
mpr_enqueue_request(struct mpr_softc * sc,struct mpr_command * cm)1127 mpr_enqueue_request(struct mpr_softc *sc, struct mpr_command *cm)
1128 {
1129 request_descriptor_t rd;
1130
1131 MPR_FUNCTRACE(sc);
1132 mpr_dprint(sc, MPR_TRACE, "SMID %u cm %p ccb %p\n",
1133 cm->cm_desc.Default.SMID, cm, cm->cm_ccb);
1134
1135 if (sc->mpr_flags & MPR_FLAGS_ATTACH_DONE && !(sc->mpr_flags &
1136 MPR_FLAGS_SHUTDOWN))
1137 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1138
1139 if (++sc->io_cmds_active > sc->io_cmds_highwater)
1140 sc->io_cmds_highwater++;
1141
1142 KASSERT(cm->cm_state == MPR_CM_STATE_BUSY,
1143 ("command not busy, state = %u\n", cm->cm_state));
1144 cm->cm_state = MPR_CM_STATE_INQUEUE;
1145
1146 if (sc->atomic_desc_capable) {
1147 rd.u.low = cm->cm_desc.Words.Low;
1148 mpr_regwrite(sc, MPI26_ATOMIC_REQUEST_DESCRIPTOR_POST_OFFSET,
1149 rd.u.low);
1150 } else {
1151 rd.u.low = htole32(cm->cm_desc.Words.Low);
1152 rd.u.high = htole32(cm->cm_desc.Words.High);
1153 mpr_regwrite(sc, MPI2_REQUEST_DESCRIPTOR_POST_LOW_OFFSET,
1154 rd.u.low);
1155 mpr_regwrite(sc, MPI2_REQUEST_DESCRIPTOR_POST_HIGH_OFFSET,
1156 rd.u.high);
1157 }
1158 }
1159
1160 /*
1161 * Ioc facts are read in 16 bit words and and stored with le16toh,
1162 * this takes care of proper U8 fields endianness in
1163 * MPI2_IOC_FACTS_REPLY, but we still need to swap back U16 fields.
1164 */
1165 static void
adjust_iocfacts_endianness(MPI2_IOC_FACTS_REPLY * facts)1166 adjust_iocfacts_endianness(MPI2_IOC_FACTS_REPLY *facts)
1167 {
1168 facts->HeaderVersion = le16toh(facts->HeaderVersion);
1169 facts->Reserved1 = le16toh(facts->Reserved1);
1170 facts->IOCExceptions = le16toh(facts->IOCExceptions);
1171 facts->IOCStatus = le16toh(facts->IOCStatus);
1172 facts->IOCLogInfo = le32toh(facts->IOCLogInfo);
1173 facts->RequestCredit = le16toh(facts->RequestCredit);
1174 facts->ProductID = le16toh(facts->ProductID);
1175 facts->IOCCapabilities = le32toh(facts->IOCCapabilities);
1176 facts->IOCRequestFrameSize = le16toh(facts->IOCRequestFrameSize);
1177 facts->IOCMaxChainSegmentSize = le16toh(facts->IOCMaxChainSegmentSize);
1178 facts->MaxInitiators = le16toh(facts->MaxInitiators);
1179 facts->MaxTargets = le16toh(facts->MaxTargets);
1180 facts->MaxSasExpanders = le16toh(facts->MaxSasExpanders);
1181 facts->MaxEnclosures = le16toh(facts->MaxEnclosures);
1182 facts->ProtocolFlags = le16toh(facts->ProtocolFlags);
1183 facts->HighPriorityCredit = le16toh(facts->HighPriorityCredit);
1184 facts->MaxReplyDescriptorPostQueueDepth = le16toh(facts->MaxReplyDescriptorPostQueueDepth);
1185 facts->MaxDevHandle = le16toh(facts->MaxDevHandle);
1186 facts->MaxPersistentEntries = le16toh(facts->MaxPersistentEntries);
1187 facts->MinDevHandle = le16toh(facts->MinDevHandle);
1188 }
1189
1190 /*
1191 * Just the FACTS, ma'am.
1192 */
1193 static int
mpr_get_iocfacts(struct mpr_softc * sc,MPI2_IOC_FACTS_REPLY * facts)1194 mpr_get_iocfacts(struct mpr_softc *sc, MPI2_IOC_FACTS_REPLY *facts)
1195 {
1196 MPI2_DEFAULT_REPLY *reply;
1197 MPI2_IOC_FACTS_REQUEST request;
1198 int error, req_sz, reply_sz, retry = 0;
1199
1200 MPR_FUNCTRACE(sc);
1201 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
1202
1203 req_sz = sizeof(MPI2_IOC_FACTS_REQUEST);
1204 reply_sz = sizeof(MPI2_IOC_FACTS_REPLY);
1205 reply = (MPI2_DEFAULT_REPLY *)facts;
1206
1207 /*
1208 * Retry sending the initialization sequence. Sometimes, especially with
1209 * older firmware, the initialization process fails. Retrying allows the
1210 * error to clear in the firmware.
1211 */
1212 bzero(&request, req_sz);
1213 request.Function = MPI2_FUNCTION_IOC_FACTS;
1214 while (retry < 5) {
1215 error = mpr_request_sync(sc, &request, reply, req_sz, reply_sz, 5);
1216 if (error == 0)
1217 break;
1218 mpr_dprint(sc, MPR_FAULT, "%s failed retry %d\n", __func__, retry);
1219 DELAY(1000);
1220 retry++;
1221 }
1222
1223 if (error == 0) {
1224 adjust_iocfacts_endianness(facts);
1225 mpr_dprint(sc, MPR_TRACE, "facts->IOCCapabilities 0x%x\n", facts->IOCCapabilities);
1226 }
1227 mpr_dprint(sc, MPR_INIT, "%s exit, error= %d\n", __func__, error);
1228 return (error);
1229 }
1230
1231 static int
mpr_send_iocinit(struct mpr_softc * sc)1232 mpr_send_iocinit(struct mpr_softc *sc)
1233 {
1234 MPI2_IOC_INIT_REQUEST init;
1235 MPI2_DEFAULT_REPLY reply;
1236 int req_sz, reply_sz, error;
1237 struct timeval now;
1238 uint64_t time_in_msec;
1239
1240 MPR_FUNCTRACE(sc);
1241 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
1242
1243 /* Do a quick sanity check on proper initialization */
1244 if ((sc->pqdepth == 0) || (sc->fqdepth == 0) || (sc->reqframesz == 0)
1245 || (sc->replyframesz == 0)) {
1246 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
1247 "Driver not fully initialized for IOCInit\n");
1248 return (EINVAL);
1249 }
1250
1251 req_sz = sizeof(MPI2_IOC_INIT_REQUEST);
1252 reply_sz = sizeof(MPI2_IOC_INIT_REPLY);
1253 bzero(&init, req_sz);
1254 bzero(&reply, reply_sz);
1255
1256 /*
1257 * Fill in the init block. Note that most addresses are
1258 * deliberately in the lower 32bits of memory. This is a micro-
1259 * optimzation for PCI/PCIX, though it's not clear if it helps PCIe.
1260 */
1261 init.Function = MPI2_FUNCTION_IOC_INIT;
1262 init.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
1263 init.MsgVersion = htole16(MPI2_VERSION);
1264 init.HeaderVersion = htole16(MPI2_HEADER_VERSION);
1265 init.SystemRequestFrameSize = htole16((uint16_t)(sc->reqframesz / 4));
1266 init.ReplyDescriptorPostQueueDepth = htole16(sc->pqdepth);
1267 init.ReplyFreeQueueDepth = htole16(sc->fqdepth);
1268 init.SenseBufferAddressHigh = 0;
1269 init.SystemReplyAddressHigh = 0;
1270 init.SystemRequestFrameBaseAddress.High = 0;
1271 init.SystemRequestFrameBaseAddress.Low =
1272 htole32((uint32_t)sc->req_busaddr);
1273 init.ReplyDescriptorPostQueueAddress.High = 0;
1274 init.ReplyDescriptorPostQueueAddress.Low =
1275 htole32((uint32_t)sc->post_busaddr);
1276 init.ReplyFreeQueueAddress.High = 0;
1277 init.ReplyFreeQueueAddress.Low = htole32((uint32_t)sc->free_busaddr);
1278 getmicrotime(&now);
1279 time_in_msec = (now.tv_sec * 1000 + now.tv_usec/1000);
1280 init.TimeStamp.High = htole32((time_in_msec >> 32) & 0xFFFFFFFF);
1281 init.TimeStamp.Low = htole32(time_in_msec & 0xFFFFFFFF);
1282 init.HostPageSize = HOST_PAGE_SIZE_4K;
1283
1284 error = mpr_request_sync(sc, &init, &reply, req_sz, reply_sz, 5);
1285 if ((le16toh(reply.IOCStatus) & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS)
1286 error = ENXIO;
1287
1288 mpr_dprint(sc, MPR_INIT, "IOCInit status= 0x%x\n", le16toh(reply.IOCStatus));
1289 mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__);
1290 return (error);
1291 }
1292
1293 void
mpr_memaddr_cb(void * arg,bus_dma_segment_t * segs,int nsegs,int error)1294 mpr_memaddr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1295 {
1296 bus_addr_t *addr;
1297
1298 addr = arg;
1299 *addr = segs[0].ds_addr;
1300 }
1301
1302 void
mpr_memaddr_wait_cb(void * arg,bus_dma_segment_t * segs,int nsegs,int error)1303 mpr_memaddr_wait_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1304 {
1305 struct mpr_busdma_context *ctx;
1306 int need_unload, need_free;
1307
1308 ctx = (struct mpr_busdma_context *)arg;
1309 need_unload = 0;
1310 need_free = 0;
1311
1312 mpr_lock(ctx->softc);
1313 ctx->error = error;
1314 ctx->completed = 1;
1315 if ((error == 0) && (ctx->abandoned == 0)) {
1316 *ctx->addr = segs[0].ds_addr;
1317 } else {
1318 if (nsegs != 0)
1319 need_unload = 1;
1320 if (ctx->abandoned != 0)
1321 need_free = 1;
1322 }
1323 if (need_free == 0)
1324 wakeup(ctx);
1325
1326 mpr_unlock(ctx->softc);
1327
1328 if (need_unload != 0) {
1329 bus_dmamap_unload(ctx->buffer_dmat,
1330 ctx->buffer_dmamap);
1331 *ctx->addr = 0;
1332 }
1333
1334 if (need_free != 0)
1335 free(ctx, M_MPR);
1336 }
1337
1338 static int
mpr_alloc_queues(struct mpr_softc * sc)1339 mpr_alloc_queues(struct mpr_softc *sc)
1340 {
1341 struct mpr_queue *q;
1342 int nq, i;
1343
1344 nq = sc->msi_msgs;
1345 mpr_dprint(sc, MPR_INIT|MPR_XINFO, "Allocating %d I/O queues\n", nq);
1346
1347 sc->queues = malloc(sizeof(struct mpr_queue) * nq, M_MPR,
1348 M_NOWAIT|M_ZERO);
1349 if (sc->queues == NULL)
1350 return (ENOMEM);
1351
1352 for (i = 0; i < nq; i++) {
1353 q = &sc->queues[i];
1354 mpr_dprint(sc, MPR_INIT, "Configuring queue %d %p\n", i, q);
1355 q->sc = sc;
1356 q->qnum = i;
1357 }
1358 return (0);
1359 }
1360
1361 static int
mpr_alloc_hw_queues(struct mpr_softc * sc)1362 mpr_alloc_hw_queues(struct mpr_softc *sc)
1363 {
1364 bus_dma_template_t t;
1365 bus_addr_t queues_busaddr;
1366 uint8_t *queues;
1367 int qsize, fqsize, pqsize;
1368
1369 /*
1370 * The reply free queue contains 4 byte entries in multiples of 16 and
1371 * aligned on a 16 byte boundary. There must always be an unused entry.
1372 * This queue supplies fresh reply frames for the firmware to use.
1373 *
1374 * The reply descriptor post queue contains 8 byte entries in
1375 * multiples of 16 and aligned on a 16 byte boundary. This queue
1376 * contains filled-in reply frames sent from the firmware to the host.
1377 *
1378 * These two queues are allocated together for simplicity.
1379 */
1380 sc->fqdepth = roundup2(sc->num_replies + 1, 16);
1381 sc->pqdepth = roundup2(sc->num_replies + 1, 16);
1382 fqsize= sc->fqdepth * 4;
1383 pqsize = sc->pqdepth * 8;
1384 qsize = fqsize + pqsize;
1385
1386 bus_dma_template_init(&t, sc->mpr_parent_dmat);
1387 BUS_DMA_TEMPLATE_FILL(&t, BD_ALIGNMENT(16), BD_MAXSIZE(qsize),
1388 BD_MAXSEGSIZE(qsize), BD_NSEGMENTS(1),
1389 BD_LOWADDR(BUS_SPACE_MAXADDR_32BIT));
1390 if (bus_dma_template_tag(&t, &sc->queues_dmat)) {
1391 mpr_dprint(sc, MPR_ERROR, "Cannot allocate queues DMA tag\n");
1392 return (ENOMEM);
1393 }
1394 if (bus_dmamem_alloc(sc->queues_dmat, (void **)&queues, BUS_DMA_NOWAIT,
1395 &sc->queues_map)) {
1396 mpr_dprint(sc, MPR_ERROR, "Cannot allocate queues memory\n");
1397 return (ENOMEM);
1398 }
1399 bzero(queues, qsize);
1400 bus_dmamap_load(sc->queues_dmat, sc->queues_map, queues, qsize,
1401 mpr_memaddr_cb, &queues_busaddr, 0);
1402
1403 sc->free_queue = (uint32_t *)queues;
1404 sc->free_busaddr = queues_busaddr;
1405 sc->post_queue = (MPI2_REPLY_DESCRIPTORS_UNION *)(queues + fqsize);
1406 sc->post_busaddr = queues_busaddr + fqsize;
1407 mpr_dprint(sc, MPR_INIT, "free queue busaddr= %#016jx size= %d\n",
1408 (uintmax_t)sc->free_busaddr, fqsize);
1409 mpr_dprint(sc, MPR_INIT, "reply queue busaddr= %#016jx size= %d\n",
1410 (uintmax_t)sc->post_busaddr, pqsize);
1411
1412 return (0);
1413 }
1414
1415 static int
mpr_alloc_replies(struct mpr_softc * sc)1416 mpr_alloc_replies(struct mpr_softc *sc)
1417 {
1418 bus_dma_template_t t;
1419 int rsize, num_replies;
1420
1421 /* Store the reply frame size in bytes rather than as 32bit words */
1422 sc->replyframesz = sc->facts->ReplyFrameSize * 4;
1423
1424 /*
1425 * sc->num_replies should be one less than sc->fqdepth. We need to
1426 * allocate space for sc->fqdepth replies, but only sc->num_replies
1427 * replies can be used at once.
1428 */
1429 num_replies = max(sc->fqdepth, sc->num_replies);
1430
1431 rsize = sc->replyframesz * num_replies;
1432 bus_dma_template_init(&t, sc->mpr_parent_dmat);
1433 BUS_DMA_TEMPLATE_FILL(&t, BD_ALIGNMENT(4), BD_MAXSIZE(rsize),
1434 BD_MAXSEGSIZE(rsize), BD_NSEGMENTS(1),
1435 BD_LOWADDR(BUS_SPACE_MAXADDR_32BIT));
1436 if (bus_dma_template_tag(&t, &sc->reply_dmat)) {
1437 mpr_dprint(sc, MPR_ERROR, "Cannot allocate replies DMA tag\n");
1438 return (ENOMEM);
1439 }
1440 if (bus_dmamem_alloc(sc->reply_dmat, (void **)&sc->reply_frames,
1441 BUS_DMA_NOWAIT, &sc->reply_map)) {
1442 mpr_dprint(sc, MPR_ERROR, "Cannot allocate replies memory\n");
1443 return (ENOMEM);
1444 }
1445 bzero(sc->reply_frames, rsize);
1446 bus_dmamap_load(sc->reply_dmat, sc->reply_map, sc->reply_frames, rsize,
1447 mpr_memaddr_cb, &sc->reply_busaddr, 0);
1448 mpr_dprint(sc, MPR_INIT, "reply frames busaddr= %#016jx size= %d\n",
1449 (uintmax_t)sc->reply_busaddr, rsize);
1450
1451 return (0);
1452 }
1453
1454 static void
mpr_load_chains_cb(void * arg,bus_dma_segment_t * segs,int nsegs,int error)1455 mpr_load_chains_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1456 {
1457 struct mpr_softc *sc = arg;
1458 struct mpr_chain *chain;
1459 bus_size_t bo;
1460 int i, o, s;
1461
1462 if (error != 0)
1463 return;
1464
1465 for (i = 0, o = 0, s = 0; s < nsegs; s++) {
1466 for (bo = 0; bo + sc->chain_frame_size <= segs[s].ds_len;
1467 bo += sc->chain_frame_size) {
1468 chain = &sc->chains[i++];
1469 chain->chain =(MPI2_SGE_IO_UNION *)(sc->chain_frames+o);
1470 chain->chain_busaddr = segs[s].ds_addr + bo;
1471 o += sc->chain_frame_size;
1472 mpr_free_chain(sc, chain);
1473 }
1474 if (bo != segs[s].ds_len)
1475 o += segs[s].ds_len - bo;
1476 }
1477 sc->chain_free_lowwater = i;
1478 }
1479
1480 static int
mpr_alloc_requests(struct mpr_softc * sc)1481 mpr_alloc_requests(struct mpr_softc *sc)
1482 {
1483 bus_dma_template_t t;
1484 struct mpr_command *cm;
1485 int i, rsize, nsegs;
1486
1487 rsize = sc->reqframesz * sc->num_reqs;
1488 bus_dma_template_init(&t, sc->mpr_parent_dmat);
1489 BUS_DMA_TEMPLATE_FILL(&t, BD_ALIGNMENT(16), BD_MAXSIZE(rsize),
1490 BD_MAXSEGSIZE(rsize), BD_NSEGMENTS(1),
1491 BD_LOWADDR(BUS_SPACE_MAXADDR_32BIT));
1492 if (bus_dma_template_tag(&t, &sc->req_dmat)) {
1493 mpr_dprint(sc, MPR_ERROR, "Cannot allocate request DMA tag\n");
1494 return (ENOMEM);
1495 }
1496 if (bus_dmamem_alloc(sc->req_dmat, (void **)&sc->req_frames,
1497 BUS_DMA_NOWAIT, &sc->req_map)) {
1498 mpr_dprint(sc, MPR_ERROR, "Cannot allocate request memory\n");
1499 return (ENOMEM);
1500 }
1501 bzero(sc->req_frames, rsize);
1502 bus_dmamap_load(sc->req_dmat, sc->req_map, sc->req_frames, rsize,
1503 mpr_memaddr_cb, &sc->req_busaddr, 0);
1504 mpr_dprint(sc, MPR_INIT, "request frames busaddr= %#016jx size= %d\n",
1505 (uintmax_t)sc->req_busaddr, rsize);
1506
1507 sc->chains = malloc(sizeof(struct mpr_chain) * sc->num_chains, M_MPR,
1508 M_NOWAIT | M_ZERO);
1509 if (!sc->chains) {
1510 mpr_dprint(sc, MPR_ERROR, "Cannot allocate chain memory\n");
1511 return (ENOMEM);
1512 }
1513 rsize = sc->chain_frame_size * sc->num_chains;
1514 bus_dma_template_init(&t, sc->mpr_parent_dmat);
1515 BUS_DMA_TEMPLATE_FILL(&t, BD_ALIGNMENT(16), BD_MAXSIZE(rsize),
1516 BD_MAXSEGSIZE(rsize), BD_NSEGMENTS((howmany(rsize, PAGE_SIZE))),
1517 BD_BOUNDARY(BUS_SPACE_MAXSIZE_32BIT+1));
1518 if (bus_dma_template_tag(&t, &sc->chain_dmat)) {
1519 mpr_dprint(sc, MPR_ERROR, "Cannot allocate chain DMA tag\n");
1520 return (ENOMEM);
1521 }
1522 if (bus_dmamem_alloc(sc->chain_dmat, (void **)&sc->chain_frames,
1523 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->chain_map)) {
1524 mpr_dprint(sc, MPR_ERROR, "Cannot allocate chain memory\n");
1525 return (ENOMEM);
1526 }
1527 if (bus_dmamap_load(sc->chain_dmat, sc->chain_map, sc->chain_frames,
1528 rsize, mpr_load_chains_cb, sc, BUS_DMA_NOWAIT)) {
1529 mpr_dprint(sc, MPR_ERROR, "Cannot load chain memory\n");
1530 bus_dmamem_free(sc->chain_dmat, sc->chain_frames,
1531 sc->chain_map);
1532 return (ENOMEM);
1533 }
1534
1535 rsize = MPR_SENSE_LEN * sc->num_reqs;
1536 bus_dma_template_clone(&t, sc->req_dmat);
1537 BUS_DMA_TEMPLATE_FILL(&t, BD_ALIGNMENT(1), BD_MAXSIZE(rsize),
1538 BD_MAXSEGSIZE(rsize));
1539 if (bus_dma_template_tag(&t, &sc->sense_dmat)) {
1540 mpr_dprint(sc, MPR_ERROR, "Cannot allocate sense DMA tag\n");
1541 return (ENOMEM);
1542 }
1543 if (bus_dmamem_alloc(sc->sense_dmat, (void **)&sc->sense_frames,
1544 BUS_DMA_NOWAIT, &sc->sense_map)) {
1545 mpr_dprint(sc, MPR_ERROR, "Cannot allocate sense memory\n");
1546 return (ENOMEM);
1547 }
1548 bzero(sc->sense_frames, rsize);
1549 bus_dmamap_load(sc->sense_dmat, sc->sense_map, sc->sense_frames, rsize,
1550 mpr_memaddr_cb, &sc->sense_busaddr, 0);
1551 mpr_dprint(sc, MPR_INIT, "sense frames busaddr= %#016jx size= %d\n",
1552 (uintmax_t)sc->sense_busaddr, rsize);
1553
1554 /*
1555 * Allocate NVMe PRP Pages for NVMe SGL support only if the FW supports
1556 * these devices.
1557 */
1558 if ((sc->facts->MsgVersion >= MPI2_VERSION_02_06) &&
1559 (sc->facts->ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES)) {
1560 if (mpr_alloc_nvme_prp_pages(sc) == ENOMEM)
1561 return (ENOMEM);
1562 }
1563
1564 nsegs = (sc->maxio / PAGE_SIZE) + 1;
1565 bus_dma_template_init(&t, sc->mpr_parent_dmat);
1566 BUS_DMA_TEMPLATE_FILL(&t, BD_MAXSIZE(BUS_SPACE_MAXSIZE_32BIT),
1567 BD_NSEGMENTS(nsegs), BD_MAXSEGSIZE(BUS_SPACE_MAXSIZE_32BIT),
1568 BD_FLAGS(BUS_DMA_ALLOCNOW), BD_LOCKFUNC(busdma_lock_mutex),
1569 BD_LOCKFUNCARG(&sc->mpr_mtx),
1570 BD_BOUNDARY(BUS_SPACE_MAXSIZE_32BIT+1));
1571 if (bus_dma_template_tag(&t, &sc->buffer_dmat)) {
1572 mpr_dprint(sc, MPR_ERROR, "Cannot allocate buffer DMA tag\n");
1573 return (ENOMEM);
1574 }
1575
1576 /*
1577 * SMID 0 cannot be used as a free command per the firmware spec.
1578 * Just drop that command instead of risking accounting bugs.
1579 */
1580 sc->commands = malloc(sizeof(struct mpr_command) * sc->num_reqs,
1581 M_MPR, M_WAITOK | M_ZERO);
1582 for (i = 1; i < sc->num_reqs; i++) {
1583 cm = &sc->commands[i];
1584 cm->cm_req = sc->req_frames + i * sc->reqframesz;
1585 cm->cm_req_busaddr = sc->req_busaddr + i * sc->reqframesz;
1586 cm->cm_sense = &sc->sense_frames[i];
1587 cm->cm_sense_busaddr = sc->sense_busaddr + i * MPR_SENSE_LEN;
1588 cm->cm_desc.Default.SMID = htole16(i);
1589 cm->cm_sc = sc;
1590 cm->cm_state = MPR_CM_STATE_BUSY;
1591 TAILQ_INIT(&cm->cm_chain_list);
1592 TAILQ_INIT(&cm->cm_prp_page_list);
1593 callout_init_mtx(&cm->cm_callout, &sc->mpr_mtx, 0);
1594
1595 /* XXX Is a failure here a critical problem? */
1596 if (bus_dmamap_create(sc->buffer_dmat, 0, &cm->cm_dmamap)
1597 == 0) {
1598 if (i <= sc->num_prireqs)
1599 mpr_free_high_priority_command(sc, cm);
1600 else
1601 mpr_free_command(sc, cm);
1602 } else {
1603 panic("failed to allocate command %d\n", i);
1604 sc->num_reqs = i;
1605 break;
1606 }
1607 }
1608
1609 return (0);
1610 }
1611
1612 /*
1613 * Allocate contiguous buffers for PCIe NVMe devices for building native PRPs,
1614 * which are scatter/gather lists for NVMe devices.
1615 *
1616 * This buffer must be contiguous due to the nature of how NVMe PRPs are built
1617 * and translated by FW.
1618 *
1619 * returns ENOMEM if memory could not be allocated, otherwise returns 0.
1620 */
1621 static int
mpr_alloc_nvme_prp_pages(struct mpr_softc * sc)1622 mpr_alloc_nvme_prp_pages(struct mpr_softc *sc)
1623 {
1624 bus_dma_template_t t;
1625 struct mpr_prp_page *prp_page;
1626 int PRPs_per_page, PRPs_required, pages_required;
1627 int rsize, i;
1628
1629 /*
1630 * Assuming a MAX_IO_SIZE of 1MB and a PAGE_SIZE of 4k, the max number
1631 * of PRPs (NVMe's Scatter/Gather Element) needed per I/O is:
1632 * MAX_IO_SIZE / PAGE_SIZE = 256
1633 *
1634 * 1 PRP entry in main frame for PRP list pointer still leaves 255 PRPs
1635 * required for the remainder of the 1MB I/O. 512 PRPs can fit into one
1636 * page (4096 / 8 = 512), so only one page is required for each I/O.
1637 *
1638 * Each of these buffers will need to be contiguous. For simplicity,
1639 * only one buffer is allocated here, which has all of the space
1640 * required for the NVMe Queue Depth. If there are problems allocating
1641 * this one buffer, this function will need to change to allocate
1642 * individual, contiguous NVME_QDEPTH buffers.
1643 *
1644 * The real calculation will use the real max io size. Above is just an
1645 * example.
1646 *
1647 */
1648 PRPs_required = sc->maxio / PAGE_SIZE;
1649 PRPs_per_page = (PAGE_SIZE / PRP_ENTRY_SIZE) - 1;
1650 pages_required = (PRPs_required / PRPs_per_page) + 1;
1651
1652 sc->prp_buffer_size = PAGE_SIZE * pages_required;
1653 rsize = sc->prp_buffer_size * NVME_QDEPTH;
1654 bus_dma_template_init(&t, sc->mpr_parent_dmat);
1655 BUS_DMA_TEMPLATE_FILL(&t, BD_ALIGNMENT(4), BD_MAXSIZE(rsize),
1656 BD_MAXSEGSIZE(rsize), BD_NSEGMENTS(1),
1657 BD_LOWADDR(BUS_SPACE_MAXADDR_32BIT));
1658 if (bus_dma_template_tag(&t, &sc->prp_page_dmat)) {
1659 mpr_dprint(sc, MPR_ERROR, "Cannot allocate NVMe PRP DMA "
1660 "tag\n");
1661 return (ENOMEM);
1662 }
1663 if (bus_dmamem_alloc(sc->prp_page_dmat, (void **)&sc->prp_pages,
1664 BUS_DMA_NOWAIT, &sc->prp_page_map)) {
1665 mpr_dprint(sc, MPR_ERROR, "Cannot allocate NVMe PRP memory\n");
1666 return (ENOMEM);
1667 }
1668 bzero(sc->prp_pages, rsize);
1669 bus_dmamap_load(sc->prp_page_dmat, sc->prp_page_map, sc->prp_pages,
1670 rsize, mpr_memaddr_cb, &sc->prp_page_busaddr, 0);
1671
1672 sc->prps = malloc(sizeof(struct mpr_prp_page) * NVME_QDEPTH, M_MPR,
1673 M_WAITOK | M_ZERO);
1674 for (i = 0; i < NVME_QDEPTH; i++) {
1675 prp_page = &sc->prps[i];
1676 prp_page->prp_page = (uint64_t *)(sc->prp_pages +
1677 i * sc->prp_buffer_size);
1678 prp_page->prp_page_busaddr = (uint64_t)(sc->prp_page_busaddr +
1679 i * sc->prp_buffer_size);
1680 mpr_free_prp_page(sc, prp_page);
1681 sc->prp_pages_free_lowwater++;
1682 }
1683
1684 return (0);
1685 }
1686
1687 static int
mpr_init_queues(struct mpr_softc * sc)1688 mpr_init_queues(struct mpr_softc *sc)
1689 {
1690 int i;
1691
1692 memset((uint8_t *)sc->post_queue, 0xff, sc->pqdepth * 8);
1693
1694 /*
1695 * According to the spec, we need to use one less reply than we
1696 * have space for on the queue. So sc->num_replies (the number we
1697 * use) should be less than sc->fqdepth (allocated size).
1698 */
1699 if (sc->num_replies >= sc->fqdepth)
1700 return (EINVAL);
1701
1702 /*
1703 * Initialize all of the free queue entries.
1704 */
1705 for (i = 0; i < sc->fqdepth; i++) {
1706 sc->free_queue[i] = htole32(sc->reply_busaddr + (i * sc->replyframesz));
1707 }
1708 sc->replyfreeindex = sc->num_replies;
1709
1710 return (0);
1711 }
1712
1713 /* Get the driver parameter tunables. Lowest priority are the driver defaults.
1714 * Next are the global settings, if they exist. Highest are the per-unit
1715 * settings, if they exist.
1716 */
1717 void
mpr_get_tunables(struct mpr_softc * sc)1718 mpr_get_tunables(struct mpr_softc *sc)
1719 {
1720 char tmpstr[80], mpr_debug[80];
1721
1722 /* XXX default to some debugging for now */
1723 sc->mpr_debug = MPR_INFO | MPR_FAULT;
1724 sc->disable_msix = 0;
1725 sc->disable_msi = 0;
1726 sc->max_msix = MPR_MSIX_MAX;
1727 sc->max_chains = MPR_CHAIN_FRAMES;
1728 sc->max_io_pages = MPR_MAXIO_PAGES;
1729 sc->enable_ssu = MPR_SSU_ENABLE_SSD_DISABLE_HDD;
1730 sc->spinup_wait_time = DEFAULT_SPINUP_WAIT;
1731 sc->use_phynum = 1;
1732 sc->encl_min_slots = 0;
1733 sc->max_reqframes = MPR_REQ_FRAMES;
1734 sc->max_prireqframes = MPR_PRI_REQ_FRAMES;
1735 sc->max_replyframes = MPR_REPLY_FRAMES;
1736 sc->max_evtframes = MPR_EVT_REPLY_FRAMES;
1737
1738 /*
1739 * Grab the global variables.
1740 */
1741 bzero(mpr_debug, 80);
1742 if (TUNABLE_STR_FETCH("hw.mpr.debug_level", mpr_debug, 80) != 0)
1743 mpr_parse_debug(sc, mpr_debug);
1744 TUNABLE_INT_FETCH("hw.mpr.disable_msix", &sc->disable_msix);
1745 TUNABLE_INT_FETCH("hw.mpr.disable_msi", &sc->disable_msi);
1746 TUNABLE_INT_FETCH("hw.mpr.max_msix", &sc->max_msix);
1747 TUNABLE_INT_FETCH("hw.mpr.max_chains", &sc->max_chains);
1748 TUNABLE_INT_FETCH("hw.mpr.max_io_pages", &sc->max_io_pages);
1749 TUNABLE_INT_FETCH("hw.mpr.enable_ssu", &sc->enable_ssu);
1750 TUNABLE_INT_FETCH("hw.mpr.spinup_wait_time", &sc->spinup_wait_time);
1751 TUNABLE_INT_FETCH("hw.mpr.use_phy_num", &sc->use_phynum);
1752 TUNABLE_INT_FETCH("hw.mpr.encl_min_slots", &sc->encl_min_slots);
1753 TUNABLE_INT_FETCH("hw.mpr.max_reqframes", &sc->max_reqframes);
1754 TUNABLE_INT_FETCH("hw.mpr.max_prireqframes", &sc->max_prireqframes);
1755 TUNABLE_INT_FETCH("hw.mpr.max_replyframes", &sc->max_replyframes);
1756 TUNABLE_INT_FETCH("hw.mpr.max_evtframes", &sc->max_evtframes);
1757
1758 /* Grab the unit-instance variables */
1759 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.debug_level",
1760 device_get_unit(sc->mpr_dev));
1761 bzero(mpr_debug, 80);
1762 if (TUNABLE_STR_FETCH(tmpstr, mpr_debug, 80) != 0)
1763 mpr_parse_debug(sc, mpr_debug);
1764
1765 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.disable_msix",
1766 device_get_unit(sc->mpr_dev));
1767 TUNABLE_INT_FETCH(tmpstr, &sc->disable_msix);
1768
1769 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.disable_msi",
1770 device_get_unit(sc->mpr_dev));
1771 TUNABLE_INT_FETCH(tmpstr, &sc->disable_msi);
1772
1773 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_msix",
1774 device_get_unit(sc->mpr_dev));
1775 TUNABLE_INT_FETCH(tmpstr, &sc->max_msix);
1776
1777 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_chains",
1778 device_get_unit(sc->mpr_dev));
1779 TUNABLE_INT_FETCH(tmpstr, &sc->max_chains);
1780
1781 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_io_pages",
1782 device_get_unit(sc->mpr_dev));
1783 TUNABLE_INT_FETCH(tmpstr, &sc->max_io_pages);
1784
1785 bzero(sc->exclude_ids, sizeof(sc->exclude_ids));
1786 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.exclude_ids",
1787 device_get_unit(sc->mpr_dev));
1788 TUNABLE_STR_FETCH(tmpstr, sc->exclude_ids, sizeof(sc->exclude_ids));
1789
1790 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.enable_ssu",
1791 device_get_unit(sc->mpr_dev));
1792 TUNABLE_INT_FETCH(tmpstr, &sc->enable_ssu);
1793
1794 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.spinup_wait_time",
1795 device_get_unit(sc->mpr_dev));
1796 TUNABLE_INT_FETCH(tmpstr, &sc->spinup_wait_time);
1797
1798 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.use_phy_num",
1799 device_get_unit(sc->mpr_dev));
1800 TUNABLE_INT_FETCH(tmpstr, &sc->use_phynum);
1801
1802 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.encl_min_slots",
1803 device_get_unit(sc->mpr_dev));
1804 TUNABLE_INT_FETCH(tmpstr, &sc->encl_min_slots);
1805
1806 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_reqframes",
1807 device_get_unit(sc->mpr_dev));
1808 TUNABLE_INT_FETCH(tmpstr, &sc->max_reqframes);
1809
1810 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_prireqframes",
1811 device_get_unit(sc->mpr_dev));
1812 TUNABLE_INT_FETCH(tmpstr, &sc->max_prireqframes);
1813
1814 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_replyframes",
1815 device_get_unit(sc->mpr_dev));
1816 TUNABLE_INT_FETCH(tmpstr, &sc->max_replyframes);
1817
1818 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_evtframes",
1819 device_get_unit(sc->mpr_dev));
1820 TUNABLE_INT_FETCH(tmpstr, &sc->max_evtframes);
1821 }
1822
1823 static void
mpr_setup_sysctl(struct mpr_softc * sc)1824 mpr_setup_sysctl(struct mpr_softc *sc)
1825 {
1826 struct sysctl_ctx_list *sysctl_ctx = NULL;
1827 struct sysctl_oid *sysctl_tree = NULL;
1828 char tmpstr[80], tmpstr2[80];
1829
1830 /*
1831 * Setup the sysctl variable so the user can change the debug level
1832 * on the fly.
1833 */
1834 snprintf(tmpstr, sizeof(tmpstr), "MPR controller %d",
1835 device_get_unit(sc->mpr_dev));
1836 snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mpr_dev));
1837
1838 sysctl_ctx = device_get_sysctl_ctx(sc->mpr_dev);
1839 if (sysctl_ctx != NULL)
1840 sysctl_tree = device_get_sysctl_tree(sc->mpr_dev);
1841
1842 if (sysctl_tree == NULL) {
1843 sysctl_ctx_init(&sc->sysctl_ctx);
1844 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
1845 SYSCTL_STATIC_CHILDREN(_hw_mpr), OID_AUTO, tmpstr2,
1846 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, tmpstr);
1847 if (sc->sysctl_tree == NULL)
1848 return;
1849 sysctl_ctx = &sc->sysctl_ctx;
1850 sysctl_tree = sc->sysctl_tree;
1851 }
1852
1853 SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1854 OID_AUTO, "debug_level", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
1855 sc, 0, mpr_debug_sysctl, "A", "mpr debug level");
1856
1857 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1858 OID_AUTO, "disable_msix", CTLFLAG_RD, &sc->disable_msix, 0,
1859 "Disable the use of MSI-X interrupts");
1860
1861 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1862 OID_AUTO, "max_msix", CTLFLAG_RD, &sc->max_msix, 0,
1863 "User-defined maximum number of MSIX queues");
1864
1865 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1866 OID_AUTO, "msix_msgs", CTLFLAG_RD, &sc->msi_msgs, 0,
1867 "Negotiated number of MSIX queues");
1868
1869 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1870 OID_AUTO, "max_reqframes", CTLFLAG_RD, &sc->max_reqframes, 0,
1871 "Total number of allocated request frames");
1872
1873 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1874 OID_AUTO, "max_prireqframes", CTLFLAG_RD, &sc->max_prireqframes, 0,
1875 "Total number of allocated high priority request frames");
1876
1877 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1878 OID_AUTO, "max_replyframes", CTLFLAG_RD, &sc->max_replyframes, 0,
1879 "Total number of allocated reply frames");
1880
1881 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1882 OID_AUTO, "max_evtframes", CTLFLAG_RD, &sc->max_evtframes, 0,
1883 "Total number of event frames allocated");
1884
1885 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1886 OID_AUTO, "firmware_version", CTLFLAG_RD, sc->fw_version,
1887 strlen(sc->fw_version), "firmware version");
1888
1889 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1890 OID_AUTO, "driver_version", CTLFLAG_RD, MPR_DRIVER_VERSION,
1891 strlen(MPR_DRIVER_VERSION), "driver version");
1892
1893 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1894 OID_AUTO, "msg_version", CTLFLAG_RD, sc->msg_version,
1895 strlen(sc->msg_version), "message interface version (deprecated)");
1896
1897 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1898 OID_AUTO, "io_cmds_active", CTLFLAG_RD,
1899 &sc->io_cmds_active, 0, "number of currently active commands");
1900
1901 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1902 OID_AUTO, "io_cmds_highwater", CTLFLAG_RD,
1903 &sc->io_cmds_highwater, 0, "maximum active commands seen");
1904
1905 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1906 OID_AUTO, "chain_free", CTLFLAG_RD,
1907 &sc->chain_free, 0, "number of free chain elements");
1908
1909 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1910 OID_AUTO, "chain_free_lowwater", CTLFLAG_RD,
1911 &sc->chain_free_lowwater, 0,"lowest number of free chain elements");
1912
1913 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1914 OID_AUTO, "max_chains", CTLFLAG_RD,
1915 &sc->max_chains, 0,"maximum chain frames that will be allocated");
1916
1917 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1918 OID_AUTO, "max_io_pages", CTLFLAG_RD,
1919 &sc->max_io_pages, 0,"maximum pages to allow per I/O (if <1 use "
1920 "IOCFacts)");
1921
1922 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1923 OID_AUTO, "enable_ssu", CTLFLAG_RW, &sc->enable_ssu, 0,
1924 "enable SSU to SATA SSD/HDD at shutdown");
1925
1926 SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1927 OID_AUTO, "chain_alloc_fail", CTLFLAG_RD,
1928 &sc->chain_alloc_fail, "chain allocation failures");
1929
1930 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1931 OID_AUTO, "spinup_wait_time", CTLFLAG_RD,
1932 &sc->spinup_wait_time, DEFAULT_SPINUP_WAIT, "seconds to wait for "
1933 "spinup after SATA ID error");
1934
1935 SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1936 OID_AUTO, "dump_reqs",
1937 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE,
1938 sc, 0, mpr_dump_reqs, "I", "Dump Active Requests");
1939
1940 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1941 OID_AUTO, "dump_reqs_alltypes", CTLFLAG_RW,
1942 &sc->dump_reqs_alltypes, 0,
1943 "dump all request types not just inqueue");
1944
1945 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1946 OID_AUTO, "use_phy_num", CTLFLAG_RD, &sc->use_phynum, 0,
1947 "Use the phy number for enumeration");
1948
1949 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1950 OID_AUTO, "prp_pages_free", CTLFLAG_RD,
1951 &sc->prp_pages_free, 0, "number of free PRP pages");
1952
1953 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1954 OID_AUTO, "prp_pages_free_lowwater", CTLFLAG_RD,
1955 &sc->prp_pages_free_lowwater, 0,"lowest number of free PRP pages");
1956
1957 SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1958 OID_AUTO, "prp_page_alloc_fail", CTLFLAG_RD,
1959 &sc->prp_page_alloc_fail, "PRP page allocation failures");
1960
1961 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1962 OID_AUTO, "encl_min_slots", CTLFLAG_RW, &sc->encl_min_slots, 0,
1963 "force enclosure minimum slots");
1964 }
1965
1966 static struct mpr_debug_string {
1967 char *name;
1968 int flag;
1969 } mpr_debug_strings[] = {
1970 {"info", MPR_INFO},
1971 {"fault", MPR_FAULT},
1972 {"event", MPR_EVENT},
1973 {"log", MPR_LOG},
1974 {"recovery", MPR_RECOVERY},
1975 {"error", MPR_ERROR},
1976 {"init", MPR_INIT},
1977 {"xinfo", MPR_XINFO},
1978 {"user", MPR_USER},
1979 {"mapping", MPR_MAPPING},
1980 {"trace", MPR_TRACE}
1981 };
1982
1983 enum mpr_debug_level_combiner {
1984 COMB_NONE,
1985 COMB_ADD,
1986 COMB_SUB
1987 };
1988
1989 static int
mpr_debug_sysctl(SYSCTL_HANDLER_ARGS)1990 mpr_debug_sysctl(SYSCTL_HANDLER_ARGS)
1991 {
1992 struct mpr_softc *sc;
1993 struct mpr_debug_string *string;
1994 struct sbuf *sbuf;
1995 char *buffer;
1996 size_t sz;
1997 int i, len, debug, error;
1998
1999 sc = (struct mpr_softc *)arg1;
2000
2001 error = sysctl_wire_old_buffer(req, 0);
2002 if (error != 0)
2003 return (error);
2004
2005 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
2006 debug = sc->mpr_debug;
2007
2008 sbuf_printf(sbuf, "%#x", debug);
2009
2010 sz = sizeof(mpr_debug_strings) / sizeof(mpr_debug_strings[0]);
2011 for (i = 0; i < sz; i++) {
2012 string = &mpr_debug_strings[i];
2013 if (debug & string->flag)
2014 sbuf_printf(sbuf, ",%s", string->name);
2015 }
2016
2017 error = sbuf_finish(sbuf);
2018 sbuf_delete(sbuf);
2019
2020 if (error || req->newptr == NULL)
2021 return (error);
2022
2023 len = req->newlen - req->newidx;
2024 if (len == 0)
2025 return (0);
2026
2027 buffer = malloc(len, M_MPR, M_ZERO|M_WAITOK);
2028 error = SYSCTL_IN(req, buffer, len);
2029
2030 mpr_parse_debug(sc, buffer);
2031
2032 free(buffer, M_MPR);
2033 return (error);
2034 }
2035
2036 static void
mpr_parse_debug(struct mpr_softc * sc,char * list)2037 mpr_parse_debug(struct mpr_softc *sc, char *list)
2038 {
2039 struct mpr_debug_string *string;
2040 enum mpr_debug_level_combiner op;
2041 char *token, *endtoken;
2042 size_t sz;
2043 int flags, i;
2044
2045 if (list == NULL || *list == '\0')
2046 return;
2047
2048 if (*list == '+') {
2049 op = COMB_ADD;
2050 list++;
2051 } else if (*list == '-') {
2052 op = COMB_SUB;
2053 list++;
2054 } else
2055 op = COMB_NONE;
2056 if (*list == '\0')
2057 return;
2058
2059 flags = 0;
2060 sz = sizeof(mpr_debug_strings) / sizeof(mpr_debug_strings[0]);
2061 while ((token = strsep(&list, ":,")) != NULL) {
2062 /* Handle integer flags */
2063 flags |= strtol(token, &endtoken, 0);
2064 if (token != endtoken)
2065 continue;
2066
2067 /* Handle text flags */
2068 for (i = 0; i < sz; i++) {
2069 string = &mpr_debug_strings[i];
2070 if (strcasecmp(token, string->name) == 0) {
2071 flags |= string->flag;
2072 break;
2073 }
2074 }
2075 }
2076
2077 switch (op) {
2078 case COMB_NONE:
2079 sc->mpr_debug = flags;
2080 break;
2081 case COMB_ADD:
2082 sc->mpr_debug |= flags;
2083 break;
2084 case COMB_SUB:
2085 sc->mpr_debug &= (~flags);
2086 break;
2087 }
2088 return;
2089 }
2090
2091 struct mpr_dumpreq_hdr {
2092 uint32_t smid;
2093 uint32_t state;
2094 uint32_t numframes;
2095 uint32_t deschi;
2096 uint32_t desclo;
2097 };
2098
2099 static int
mpr_dump_reqs(SYSCTL_HANDLER_ARGS)2100 mpr_dump_reqs(SYSCTL_HANDLER_ARGS)
2101 {
2102 struct mpr_softc *sc;
2103 struct mpr_chain *chain, *chain1;
2104 struct mpr_command *cm;
2105 struct mpr_dumpreq_hdr hdr;
2106 struct sbuf *sb;
2107 uint32_t smid, state;
2108 int i, numreqs, error = 0;
2109
2110 sc = (struct mpr_softc *)arg1;
2111
2112 if ((error = priv_check(curthread, PRIV_DRIVER)) != 0) {
2113 printf("priv check error %d\n", error);
2114 return (error);
2115 }
2116
2117 state = MPR_CM_STATE_INQUEUE;
2118 smid = 1;
2119 numreqs = sc->num_reqs;
2120
2121 if (req->newptr != NULL)
2122 return (EINVAL);
2123
2124 if (smid == 0 || smid > sc->num_reqs)
2125 return (EINVAL);
2126 if (numreqs <= 0 || (numreqs + smid > sc->num_reqs))
2127 numreqs = sc->num_reqs;
2128 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
2129
2130 /* Best effort, no locking */
2131 for (i = smid; i < numreqs; i++) {
2132 cm = &sc->commands[i];
2133 if ((sc->dump_reqs_alltypes == 0) && (cm->cm_state != state))
2134 continue;
2135 hdr.smid = i;
2136 hdr.state = cm->cm_state;
2137 hdr.numframes = 1;
2138 hdr.deschi = cm->cm_desc.Words.High;
2139 hdr.desclo = cm->cm_desc.Words.Low;
2140 TAILQ_FOREACH_SAFE(chain, &cm->cm_chain_list, chain_link,
2141 chain1)
2142 hdr.numframes++;
2143 sbuf_bcat(sb, &hdr, sizeof(hdr));
2144 sbuf_bcat(sb, cm->cm_req, 128);
2145 TAILQ_FOREACH_SAFE(chain, &cm->cm_chain_list, chain_link,
2146 chain1)
2147 sbuf_bcat(sb, chain->chain, 128);
2148 }
2149
2150 error = sbuf_finish(sb);
2151 sbuf_delete(sb);
2152 return (error);
2153 }
2154
2155 int
mpr_attach(struct mpr_softc * sc)2156 mpr_attach(struct mpr_softc *sc)
2157 {
2158 int error;
2159
2160 MPR_FUNCTRACE(sc);
2161 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
2162
2163 mtx_init(&sc->mpr_mtx, "MPR lock", NULL, MTX_DEF);
2164 callout_init_mtx(&sc->periodic, &sc->mpr_mtx, 0);
2165 callout_init_mtx(&sc->device_check_callout, &sc->mpr_mtx, 0);
2166 TAILQ_INIT(&sc->event_list);
2167 timevalclear(&sc->lastfail);
2168
2169 if ((error = mpr_transition_ready(sc)) != 0) {
2170 mpr_dprint(sc, MPR_INIT|MPR_FAULT,
2171 "Failed to transition ready\n");
2172 return (error);
2173 }
2174
2175 sc->facts = malloc(sizeof(MPI2_IOC_FACTS_REPLY), M_MPR,
2176 M_ZERO|M_NOWAIT);
2177 if (!sc->facts) {
2178 mpr_dprint(sc, MPR_INIT|MPR_FAULT,
2179 "Cannot allocate memory, exit\n");
2180 return (ENOMEM);
2181 }
2182
2183 /*
2184 * Get IOC Facts and allocate all structures based on this information.
2185 * A Diag Reset will also call mpr_iocfacts_allocate and re-read the IOC
2186 * Facts. If relevant values have changed in IOC Facts, this function
2187 * will free all of the memory based on IOC Facts and reallocate that
2188 * memory. If this fails, any allocated memory should already be freed.
2189 */
2190 if ((error = mpr_iocfacts_allocate(sc, TRUE)) != 0) {
2191 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "IOC Facts allocation "
2192 "failed with error %d\n", error);
2193 return (error);
2194 }
2195
2196 /* Start the periodic watchdog check on the IOC Doorbell */
2197 mpr_periodic(sc);
2198
2199 /*
2200 * The portenable will kick off discovery events that will drive the
2201 * rest of the initialization process. The CAM/SAS module will
2202 * hold up the boot sequence until discovery is complete.
2203 */
2204 sc->mpr_ich.ich_func = mpr_startup;
2205 sc->mpr_ich.ich_arg = sc;
2206 if (config_intrhook_establish(&sc->mpr_ich) != 0) {
2207 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
2208 "Cannot establish MPR config hook\n");
2209 error = EINVAL;
2210 }
2211
2212 /*
2213 * Allow IR to shutdown gracefully when shutdown occurs.
2214 */
2215 sc->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final,
2216 mprsas_ir_shutdown, sc, SHUTDOWN_PRI_DEFAULT);
2217
2218 if (sc->shutdown_eh == NULL)
2219 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
2220 "shutdown event registration failed\n");
2221
2222 mpr_setup_sysctl(sc);
2223
2224 sc->mpr_flags |= MPR_FLAGS_ATTACH_DONE;
2225 mpr_dprint(sc, MPR_INIT, "%s exit error= %d\n", __func__, error);
2226
2227 return (error);
2228 }
2229
2230 /* Run through any late-start handlers. */
2231 static void
mpr_startup(void * arg)2232 mpr_startup(void *arg)
2233 {
2234 struct mpr_softc *sc;
2235
2236 sc = (struct mpr_softc *)arg;
2237 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
2238
2239 mpr_lock(sc);
2240 mpr_unmask_intr(sc);
2241
2242 /* initialize device mapping tables */
2243 mpr_base_static_config_pages(sc);
2244 mpr_mapping_initialize(sc);
2245 mprsas_startup(sc);
2246 mpr_unlock(sc);
2247
2248 mpr_dprint(sc, MPR_INIT, "disestablish config intrhook\n");
2249 config_intrhook_disestablish(&sc->mpr_ich);
2250 sc->mpr_ich.ich_arg = NULL;
2251
2252 mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__);
2253 }
2254
2255 /* Periodic watchdog. Is called with the driver lock already held. */
2256 static void
mpr_periodic(void * arg)2257 mpr_periodic(void *arg)
2258 {
2259 struct mpr_softc *sc;
2260 uint32_t db;
2261
2262 sc = (struct mpr_softc *)arg;
2263 if (sc->mpr_flags & MPR_FLAGS_SHUTDOWN)
2264 return;
2265
2266 db = mpr_regread(sc, MPI2_DOORBELL_OFFSET);
2267 if ((db & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
2268 if ((db & MPI2_DOORBELL_FAULT_CODE_MASK) ==
2269 IFAULT_IOP_OVER_TEMP_THRESHOLD_EXCEEDED) {
2270 panic("TEMPERATURE FAULT: STOPPING.");
2271 }
2272 mpr_dprint(sc, MPR_FAULT, "IOC Fault 0x%08x, Resetting\n", db);
2273 mpr_reinit(sc);
2274 }
2275
2276 callout_reset_sbt(&sc->periodic, MPR_PERIODIC_DELAY * SBT_1S, 0,
2277 mpr_periodic, sc, C_PREL(1));
2278 }
2279
2280 static void
mpr_log_evt_handler(struct mpr_softc * sc,uintptr_t data,MPI2_EVENT_NOTIFICATION_REPLY * event)2281 mpr_log_evt_handler(struct mpr_softc *sc, uintptr_t data,
2282 MPI2_EVENT_NOTIFICATION_REPLY *event)
2283 {
2284 MPI2_EVENT_DATA_LOG_ENTRY_ADDED *entry;
2285
2286 MPR_DPRINT_EVENT(sc, generic, event);
2287
2288 switch (event->Event) {
2289 case MPI2_EVENT_LOG_DATA:
2290 mpr_dprint(sc, MPR_EVENT, "MPI2_EVENT_LOG_DATA:\n");
2291 if (sc->mpr_debug & MPR_EVENT)
2292 hexdump(event->EventData, event->EventDataLength, NULL,
2293 0);
2294 break;
2295 case MPI2_EVENT_LOG_ENTRY_ADDED:
2296 entry = (MPI2_EVENT_DATA_LOG_ENTRY_ADDED *)event->EventData;
2297 mpr_dprint(sc, MPR_EVENT, "MPI2_EVENT_LOG_ENTRY_ADDED event "
2298 "0x%x Sequence %d:\n", entry->LogEntryQualifier,
2299 entry->LogSequence);
2300 break;
2301 default:
2302 break;
2303 }
2304 return;
2305 }
2306
2307 static int
mpr_attach_log(struct mpr_softc * sc)2308 mpr_attach_log(struct mpr_softc *sc)
2309 {
2310 uint8_t events[16];
2311
2312 bzero(events, 16);
2313 setbit(events, MPI2_EVENT_LOG_DATA);
2314 setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
2315
2316 mpr_register_events(sc, events, mpr_log_evt_handler, NULL,
2317 &sc->mpr_log_eh);
2318
2319 return (0);
2320 }
2321
2322 static int
mpr_detach_log(struct mpr_softc * sc)2323 mpr_detach_log(struct mpr_softc *sc)
2324 {
2325
2326 if (sc->mpr_log_eh != NULL)
2327 mpr_deregister_events(sc, sc->mpr_log_eh);
2328 return (0);
2329 }
2330
2331 /*
2332 * Free all of the driver resources and detach submodules. Should be called
2333 * without the lock held.
2334 */
2335 int
mpr_free(struct mpr_softc * sc)2336 mpr_free(struct mpr_softc *sc)
2337 {
2338 int error;
2339
2340 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
2341 /* Turn off the watchdog */
2342 mpr_lock(sc);
2343 sc->mpr_flags |= MPR_FLAGS_SHUTDOWN;
2344 mpr_unlock(sc);
2345 /* Lock must not be held for this */
2346 callout_drain(&sc->periodic);
2347 callout_drain(&sc->device_check_callout);
2348
2349 if (((error = mpr_detach_log(sc)) != 0) ||
2350 ((error = mpr_detach_sas(sc)) != 0)) {
2351 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "failed to detach "
2352 "subsystems, error= %d, exit\n", error);
2353 return (error);
2354 }
2355
2356 mpr_detach_user(sc);
2357
2358 /* Put the IOC back in the READY state. */
2359 mpr_lock(sc);
2360 if ((error = mpr_transition_ready(sc)) != 0) {
2361 mpr_unlock(sc);
2362 return (error);
2363 }
2364 mpr_unlock(sc);
2365
2366 if (sc->facts != NULL)
2367 free(sc->facts, M_MPR);
2368
2369 /*
2370 * Free all buffers that are based on IOC Facts. A Diag Reset may need
2371 * to free these buffers too.
2372 */
2373 mpr_iocfacts_free(sc);
2374
2375 if (sc->sysctl_tree != NULL)
2376 sysctl_ctx_free(&sc->sysctl_ctx);
2377
2378 /* Deregister the shutdown function */
2379 if (sc->shutdown_eh != NULL)
2380 EVENTHANDLER_DEREGISTER(shutdown_final, sc->shutdown_eh);
2381
2382 mtx_destroy(&sc->mpr_mtx);
2383 mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__);
2384
2385 return (0);
2386 }
2387
2388 static __inline void
mpr_complete_command(struct mpr_softc * sc,struct mpr_command * cm)2389 mpr_complete_command(struct mpr_softc *sc, struct mpr_command *cm)
2390 {
2391 MPR_FUNCTRACE(sc);
2392
2393 if (cm == NULL) {
2394 mpr_dprint(sc, MPR_ERROR, "Completing NULL command\n");
2395 return;
2396 }
2397
2398 KASSERT(cm->cm_state == MPR_CM_STATE_INQUEUE,
2399 ("command not inqueue, state = %u\n", cm->cm_state));
2400 cm->cm_state = MPR_CM_STATE_BUSY;
2401 if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
2402 cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
2403
2404 if (cm->cm_complete != NULL) {
2405 mpr_dprint(sc, MPR_TRACE,
2406 "%s cm %p calling cm_complete %p data %p reply %p\n",
2407 __func__, cm, cm->cm_complete, cm->cm_complete_data,
2408 cm->cm_reply);
2409 cm->cm_complete(sc, cm);
2410 }
2411
2412 if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
2413 mpr_dprint(sc, MPR_TRACE, "waking up %p\n", cm);
2414 wakeup(cm);
2415 }
2416
2417 if (sc->io_cmds_active != 0) {
2418 sc->io_cmds_active--;
2419 } else {
2420 mpr_dprint(sc, MPR_ERROR, "Warning: io_cmds_active is "
2421 "out of sync - resynching to 0\n");
2422 }
2423 }
2424
2425 static void
mpr_sas_log_info(struct mpr_softc * sc,u32 log_info)2426 mpr_sas_log_info(struct mpr_softc *sc , u32 log_info)
2427 {
2428 union loginfo_type {
2429 u32 loginfo;
2430 struct {
2431 u32 subcode:16;
2432 u32 code:8;
2433 u32 originator:4;
2434 u32 bus_type:4;
2435 } dw;
2436 };
2437 union loginfo_type sas_loginfo;
2438 char *originator_str = NULL;
2439
2440 sas_loginfo.loginfo = log_info;
2441 if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
2442 return;
2443
2444 /* each nexus loss loginfo */
2445 if (log_info == 0x31170000)
2446 return;
2447
2448 /* eat the loginfos associated with task aborts */
2449 if ((log_info == 30050000) || (log_info == 0x31140000) ||
2450 (log_info == 0x31130000))
2451 return;
2452
2453 switch (sas_loginfo.dw.originator) {
2454 case 0:
2455 originator_str = "IOP";
2456 break;
2457 case 1:
2458 originator_str = "PL";
2459 break;
2460 case 2:
2461 originator_str = "IR";
2462 break;
2463 }
2464
2465 mpr_dprint(sc, MPR_LOG, "log_info(0x%08x): originator(%s), "
2466 "code(0x%02x), sub_code(0x%04x)\n", log_info, originator_str,
2467 sas_loginfo.dw.code, sas_loginfo.dw.subcode);
2468 }
2469
2470 static void
mpr_display_reply_info(struct mpr_softc * sc,uint8_t * reply)2471 mpr_display_reply_info(struct mpr_softc *sc, uint8_t *reply)
2472 {
2473 MPI2DefaultReply_t *mpi_reply;
2474 u16 sc_status;
2475
2476 mpi_reply = (MPI2DefaultReply_t*)reply;
2477 sc_status = le16toh(mpi_reply->IOCStatus);
2478 if (sc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
2479 mpr_sas_log_info(sc, le32toh(mpi_reply->IOCLogInfo));
2480 }
2481
2482 void
mpr_intr(void * data)2483 mpr_intr(void *data)
2484 {
2485 struct mpr_softc *sc;
2486 uint32_t status;
2487
2488 sc = (struct mpr_softc *)data;
2489 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__);
2490
2491 /*
2492 * Check interrupt status register to flush the bus. This is
2493 * needed for both INTx interrupts and driver-driven polling
2494 */
2495 status = mpr_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET);
2496 if ((status & MPI2_HIS_REPLY_DESCRIPTOR_INTERRUPT) == 0)
2497 return;
2498
2499 mpr_lock(sc);
2500 mpr_intr_locked(data);
2501 mpr_unlock(sc);
2502 return;
2503 }
2504
2505 /*
2506 * In theory, MSI/MSIX interrupts shouldn't need to read any registers on the
2507 * chip. Hopefully this theory is correct.
2508 */
2509 void
mpr_intr_msi(void * data)2510 mpr_intr_msi(void *data)
2511 {
2512 struct mpr_softc *sc;
2513
2514 sc = (struct mpr_softc *)data;
2515 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__);
2516 mpr_lock(sc);
2517 mpr_intr_locked(data);
2518 mpr_unlock(sc);
2519 return;
2520 }
2521
2522 /*
2523 * The locking is overly broad and simplistic, but easy to deal with for now.
2524 */
2525 void
mpr_intr_locked(void * data)2526 mpr_intr_locked(void *data)
2527 {
2528 MPI2_REPLY_DESCRIPTORS_UNION *desc;
2529 MPI2_DIAG_RELEASE_REPLY *rel_rep;
2530 mpr_fw_diagnostic_buffer_t *pBuffer;
2531 struct mpr_softc *sc;
2532 uint64_t tdesc;
2533 struct mpr_command *cm = NULL;
2534 uint8_t flags;
2535 u_int pq;
2536
2537 sc = (struct mpr_softc *)data;
2538
2539 pq = sc->replypostindex;
2540 mpr_dprint(sc, MPR_TRACE,
2541 "%s sc %p starting with replypostindex %u\n",
2542 __func__, sc, sc->replypostindex);
2543
2544 for ( ;; ) {
2545 cm = NULL;
2546 desc = &sc->post_queue[sc->replypostindex];
2547
2548 /*
2549 * Copy and clear out the descriptor so that any reentry will
2550 * immediately know that this descriptor has already been
2551 * looked at. There is unfortunate casting magic because the
2552 * MPI API doesn't have a cardinal 64bit type.
2553 */
2554 tdesc = 0xffffffffffffffff;
2555 tdesc = atomic_swap_64((uint64_t *)desc, tdesc);
2556 desc = (MPI2_REPLY_DESCRIPTORS_UNION *)&tdesc;
2557
2558 flags = desc->Default.ReplyFlags &
2559 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2560 if ((flags == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) ||
2561 (le32toh(desc->Words.High) == 0xffffffff))
2562 break;
2563
2564 /* increment the replypostindex now, so that event handlers
2565 * and cm completion handlers which decide to do a diag
2566 * reset can zero it without it getting incremented again
2567 * afterwards, and we break out of this loop on the next
2568 * iteration since the reply post queue has been cleared to
2569 * 0xFF and all descriptors look unused (which they are).
2570 */
2571 if (++sc->replypostindex >= sc->pqdepth)
2572 sc->replypostindex = 0;
2573
2574 switch (flags) {
2575 case MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS:
2576 case MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS:
2577 case MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS:
2578 cm = &sc->commands[le16toh(desc->SCSIIOSuccess.SMID)];
2579 cm->cm_reply = NULL;
2580 break;
2581 case MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY:
2582 {
2583 uint32_t baddr;
2584 uint8_t *reply;
2585
2586 /*
2587 * Re-compose the reply address from the address
2588 * sent back from the chip. The ReplyFrameAddress
2589 * is the lower 32 bits of the physical address of
2590 * particular reply frame. Convert that address to
2591 * host format, and then use that to provide the
2592 * offset against the virtual address base
2593 * (sc->reply_frames).
2594 */
2595 baddr = le32toh(desc->AddressReply.ReplyFrameAddress);
2596 reply = sc->reply_frames +
2597 (baddr - ((uint32_t)sc->reply_busaddr));
2598 /*
2599 * Make sure the reply we got back is in a valid
2600 * range. If not, go ahead and panic here, since
2601 * we'll probably panic as soon as we deference the
2602 * reply pointer anyway.
2603 */
2604 if ((reply < sc->reply_frames)
2605 || (reply > (sc->reply_frames +
2606 (sc->fqdepth * sc->replyframesz)))) {
2607 printf("%s: WARNING: reply %p out of range!\n",
2608 __func__, reply);
2609 printf("%s: reply_frames %p, fqdepth %d, "
2610 "frame size %d\n", __func__,
2611 sc->reply_frames, sc->fqdepth,
2612 sc->replyframesz);
2613 printf("%s: baddr %#x,\n", __func__, baddr);
2614 /* LSI-TODO. See Linux Code for Graceful exit */
2615 panic("Reply address out of range");
2616 }
2617 if (le16toh(desc->AddressReply.SMID) == 0) {
2618 if (((MPI2_DEFAULT_REPLY *)reply)->Function ==
2619 MPI2_FUNCTION_DIAG_BUFFER_POST) {
2620 /*
2621 * If SMID is 0 for Diag Buffer Post,
2622 * this implies that the reply is due to
2623 * a release function with a status that
2624 * the buffer has been released. Set
2625 * the buffer flags accordingly.
2626 */
2627 rel_rep =
2628 (MPI2_DIAG_RELEASE_REPLY *)reply;
2629 if ((le16toh(rel_rep->IOCStatus) &
2630 MPI2_IOCSTATUS_MASK) ==
2631 MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED)
2632 {
2633 pBuffer =
2634 &sc->fw_diag_buffer_list[
2635 rel_rep->BufferType];
2636 pBuffer->valid_data = TRUE;
2637 pBuffer->owned_by_firmware =
2638 FALSE;
2639 pBuffer->immediate = FALSE;
2640 }
2641 } else
2642 mpr_dispatch_event(sc, baddr,
2643 (MPI2_EVENT_NOTIFICATION_REPLY *)
2644 reply);
2645 } else {
2646 cm = &sc->commands[
2647 le16toh(desc->AddressReply.SMID)];
2648 if (cm->cm_state == MPR_CM_STATE_INQUEUE) {
2649 cm->cm_reply = reply;
2650 cm->cm_reply_data =
2651 le32toh(desc->AddressReply.
2652 ReplyFrameAddress);
2653 } else {
2654 mpr_dprint(sc, MPR_RECOVERY,
2655 "Bad state for ADDRESS_REPLY status,"
2656 " ignoring state %d cm %p\n",
2657 cm->cm_state, cm);
2658 }
2659 }
2660 break;
2661 }
2662 case MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS:
2663 case MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER:
2664 case MPI2_RPY_DESCRIPT_FLAGS_RAID_ACCELERATOR_SUCCESS:
2665 default:
2666 /* Unhandled */
2667 mpr_dprint(sc, MPR_ERROR, "Unhandled reply 0x%x\n",
2668 desc->Default.ReplyFlags);
2669 cm = NULL;
2670 break;
2671 }
2672
2673 if (cm != NULL) {
2674 // Print Error reply frame
2675 if (cm->cm_reply)
2676 mpr_display_reply_info(sc,cm->cm_reply);
2677 mpr_complete_command(sc, cm);
2678 }
2679 }
2680
2681 if (pq != sc->replypostindex) {
2682 mpr_dprint(sc, MPR_TRACE, "%s sc %p writing postindex %d\n",
2683 __func__, sc, sc->replypostindex);
2684 mpr_regwrite(sc, MPI2_REPLY_POST_HOST_INDEX_OFFSET,
2685 sc->replypostindex);
2686 }
2687
2688 return;
2689 }
2690
2691 static void
mpr_dispatch_event(struct mpr_softc * sc,uintptr_t data,MPI2_EVENT_NOTIFICATION_REPLY * reply)2692 mpr_dispatch_event(struct mpr_softc *sc, uintptr_t data,
2693 MPI2_EVENT_NOTIFICATION_REPLY *reply)
2694 {
2695 struct mpr_event_handle *eh;
2696 int event, handled = 0;
2697
2698 event = le16toh(reply->Event);
2699 TAILQ_FOREACH(eh, &sc->event_list, eh_list) {
2700 if (isset(eh->mask, event)) {
2701 eh->callback(sc, data, reply);
2702 handled++;
2703 }
2704 }
2705
2706 if (handled == 0)
2707 mpr_dprint(sc, MPR_EVENT, "Unhandled event 0x%x\n",
2708 le16toh(event));
2709
2710 /*
2711 * This is the only place that the event/reply should be freed.
2712 * Anything wanting to hold onto the event data should have
2713 * already copied it into their own storage.
2714 */
2715 mpr_free_reply(sc, data);
2716 }
2717
2718 static void
mpr_reregister_events_complete(struct mpr_softc * sc,struct mpr_command * cm)2719 mpr_reregister_events_complete(struct mpr_softc *sc, struct mpr_command *cm)
2720 {
2721 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__);
2722
2723 if (cm->cm_reply)
2724 MPR_DPRINT_EVENT(sc, generic,
2725 (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply);
2726
2727 mpr_free_command(sc, cm);
2728
2729 /* next, send a port enable */
2730 mprsas_startup(sc);
2731 }
2732
2733 /*
2734 * For both register_events and update_events, the caller supplies a bitmap
2735 * of events that it _wants_. These functions then turn that into a bitmask
2736 * suitable for the controller.
2737 */
2738 int
mpr_register_events(struct mpr_softc * sc,uint8_t * mask,mpr_evt_callback_t * cb,void * data,struct mpr_event_handle ** handle)2739 mpr_register_events(struct mpr_softc *sc, uint8_t *mask,
2740 mpr_evt_callback_t *cb, void *data, struct mpr_event_handle **handle)
2741 {
2742 struct mpr_event_handle *eh;
2743 int error = 0;
2744
2745 eh = malloc(sizeof(struct mpr_event_handle), M_MPR, M_WAITOK|M_ZERO);
2746 eh->callback = cb;
2747 eh->data = data;
2748 TAILQ_INSERT_TAIL(&sc->event_list, eh, eh_list);
2749 if (mask != NULL)
2750 error = mpr_update_events(sc, eh, mask);
2751 *handle = eh;
2752
2753 return (error);
2754 }
2755
2756 int
mpr_update_events(struct mpr_softc * sc,struct mpr_event_handle * handle,uint8_t * mask)2757 mpr_update_events(struct mpr_softc *sc, struct mpr_event_handle *handle,
2758 uint8_t *mask)
2759 {
2760 MPI2_EVENT_NOTIFICATION_REQUEST *evtreq;
2761 MPI2_EVENT_NOTIFICATION_REPLY *reply = NULL;
2762 struct mpr_command *cm = NULL;
2763 struct mpr_event_handle *eh;
2764 int error, i;
2765
2766 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__);
2767
2768 if ((mask != NULL) && (handle != NULL))
2769 bcopy(mask, &handle->mask[0], 16);
2770 memset(sc->event_mask, 0xff, 16);
2771
2772 TAILQ_FOREACH(eh, &sc->event_list, eh_list) {
2773 for (i = 0; i < 16; i++)
2774 sc->event_mask[i] &= ~eh->mask[i];
2775 }
2776
2777 if ((cm = mpr_alloc_command(sc)) == NULL)
2778 return (EBUSY);
2779 evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req;
2780 evtreq->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
2781 evtreq->MsgFlags = 0;
2782 evtreq->SASBroadcastPrimitiveMasks = 0;
2783 #ifdef MPR_DEBUG_ALL_EVENTS
2784 {
2785 u_char fullmask[sizeof(evtreq->EventMasks)];
2786 memset(fullmask, 0x00, sizeof(fullmask));
2787 bcopy(fullmask, (uint8_t *)&evtreq->EventMasks, sizeof(fullmask));
2788 }
2789 #else
2790 bcopy(sc->event_mask, (uint8_t *)&evtreq->EventMasks, sizeof(sc->event_mask));
2791 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
2792 evtreq->EventMasks[i] = htole32(evtreq->EventMasks[i]);
2793 #endif
2794 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2795 cm->cm_data = NULL;
2796
2797 error = mpr_request_polled(sc, &cm);
2798 if (cm != NULL)
2799 reply = (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply;
2800 if ((reply == NULL) ||
2801 (reply->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS)
2802 error = ENXIO;
2803
2804 if (reply)
2805 MPR_DPRINT_EVENT(sc, generic, reply);
2806
2807 mpr_dprint(sc, MPR_TRACE, "%s finished error %d\n", __func__, error);
2808
2809 if (cm != NULL)
2810 mpr_free_command(sc, cm);
2811 return (error);
2812 }
2813
2814 static int
mpr_reregister_events(struct mpr_softc * sc)2815 mpr_reregister_events(struct mpr_softc *sc)
2816 {
2817 MPI2_EVENT_NOTIFICATION_REQUEST *evtreq;
2818 struct mpr_command *cm;
2819 struct mpr_event_handle *eh;
2820 int error, i;
2821
2822 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__);
2823
2824 /* first, reregister events */
2825
2826 memset(sc->event_mask, 0xff, 16);
2827
2828 TAILQ_FOREACH(eh, &sc->event_list, eh_list) {
2829 for (i = 0; i < 16; i++)
2830 sc->event_mask[i] &= ~eh->mask[i];
2831 }
2832
2833 if ((cm = mpr_alloc_command(sc)) == NULL)
2834 return (EBUSY);
2835 evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req;
2836 evtreq->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
2837 evtreq->MsgFlags = 0;
2838 evtreq->SASBroadcastPrimitiveMasks = 0;
2839 #ifdef MPR_DEBUG_ALL_EVENTS
2840 {
2841 u_char fullmask[sizeof(evtreq->EventMasks)];
2842 memset(fullmask, 0x00, sizeof(fullmask));
2843 bcopy(fullmask, (uint8_t *)&evtreq->EventMasks, sizeof(fullmask));
2844 }
2845 #else
2846 bcopy(sc->event_mask, (uint8_t *)&evtreq->EventMasks, sizeof(sc->event_mask));
2847 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
2848 evtreq->EventMasks[i] = htole32(evtreq->EventMasks[i]);
2849 #endif
2850 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2851 cm->cm_data = NULL;
2852 cm->cm_complete = mpr_reregister_events_complete;
2853
2854 error = mpr_map_command(sc, cm);
2855
2856 mpr_dprint(sc, MPR_TRACE, "%s finished with error %d\n", __func__,
2857 error);
2858 return (error);
2859 }
2860
2861 int
mpr_deregister_events(struct mpr_softc * sc,struct mpr_event_handle * handle)2862 mpr_deregister_events(struct mpr_softc *sc, struct mpr_event_handle *handle)
2863 {
2864
2865 TAILQ_REMOVE(&sc->event_list, handle, eh_list);
2866 free(handle, M_MPR);
2867 return (mpr_update_events(sc, NULL, NULL));
2868 }
2869
2870 /**
2871 * mpr_build_nvme_prp - This function is called for NVMe end devices to build a
2872 * native SGL (NVMe PRP). The native SGL is built starting in the first PRP entry
2873 * of the NVMe message (PRP1). If the data buffer is small enough to be described
2874 * entirely using PRP1, then PRP2 is not used. If needed, PRP2 is used to
2875 * describe a larger data buffer. If the data buffer is too large to describe
2876 * using the two PRP entriess inside the NVMe message, then PRP1 describes the
2877 * first data memory segment, and PRP2 contains a pointer to a PRP list located
2878 * elsewhere in memory to describe the remaining data memory segments. The PRP
2879 * list will be contiguous.
2880
2881 * The native SGL for NVMe devices is a Physical Region Page (PRP). A PRP
2882 * consists of a list of PRP entries to describe a number of noncontigous
2883 * physical memory segments as a single memory buffer, just as a SGL does. Note
2884 * however, that this function is only used by the IOCTL call, so the memory
2885 * given will be guaranteed to be contiguous. There is no need to translate
2886 * non-contiguous SGL into a PRP in this case. All PRPs will describe contiguous
2887 * space that is one page size each.
2888 *
2889 * Each NVMe message contains two PRP entries. The first (PRP1) either contains
2890 * a PRP list pointer or a PRP element, depending upon the command. PRP2 contains
2891 * the second PRP element if the memory being described fits within 2 PRP
2892 * entries, or a PRP list pointer if the PRP spans more than two entries.
2893 *
2894 * A PRP list pointer contains the address of a PRP list, structured as a linear
2895 * array of PRP entries. Each PRP entry in this list describes a segment of
2896 * physical memory.
2897 *
2898 * Each 64-bit PRP entry comprises an address and an offset field. The address
2899 * always points to the beginning of a PAGE_SIZE physical memory page, and the
2900 * offset describes where within that page the memory segment begins. Only the
2901 * first element in a PRP list may contain a non-zero offest, implying that all
2902 * memory segments following the first begin at the start of a PAGE_SIZE page.
2903 *
2904 * Each PRP element normally describes a chunck of PAGE_SIZE physical memory,
2905 * with exceptions for the first and last elements in the list. If the memory
2906 * being described by the list begins at a non-zero offset within the first page,
2907 * then the first PRP element will contain a non-zero offset indicating where the
2908 * region begins within the page. The last memory segment may end before the end
2909 * of the PAGE_SIZE segment, depending upon the overall size of the memory being
2910 * described by the PRP list.
2911 *
2912 * Since PRP entries lack any indication of size, the overall data buffer length
2913 * is used to determine where the end of the data memory buffer is located, and
2914 * how many PRP entries are required to describe it.
2915 *
2916 * Returns nothing.
2917 */
2918 void
mpr_build_nvme_prp(struct mpr_softc * sc,struct mpr_command * cm,Mpi26NVMeEncapsulatedRequest_t * nvme_encap_request,void * data,uint32_t data_in_sz,uint32_t data_out_sz)2919 mpr_build_nvme_prp(struct mpr_softc *sc, struct mpr_command *cm,
2920 Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request, void *data,
2921 uint32_t data_in_sz, uint32_t data_out_sz)
2922 {
2923 int prp_size = PRP_ENTRY_SIZE;
2924 uint64_t *prp_entry, *prp1_entry, *prp2_entry;
2925 uint64_t *prp_entry_phys, *prp_page, *prp_page_phys;
2926 uint32_t offset, entry_len, page_mask_result, page_mask;
2927 bus_addr_t paddr;
2928 size_t length;
2929 struct mpr_prp_page *prp_page_info = NULL;
2930
2931 /*
2932 * Not all commands require a data transfer. If no data, just return
2933 * without constructing any PRP.
2934 */
2935 if (!data_in_sz && !data_out_sz)
2936 return;
2937
2938 /*
2939 * Set pointers to PRP1 and PRP2, which are in the NVMe command. PRP1 is
2940 * located at a 24 byte offset from the start of the NVMe command. Then
2941 * set the current PRP entry pointer to PRP1.
2942 */
2943 prp1_entry = (uint64_t *)(nvme_encap_request->NVMe_Command +
2944 NVME_CMD_PRP1_OFFSET);
2945 prp2_entry = (uint64_t *)(nvme_encap_request->NVMe_Command +
2946 NVME_CMD_PRP2_OFFSET);
2947 prp_entry = prp1_entry;
2948
2949 /*
2950 * For the PRP entries, use the specially allocated buffer of
2951 * contiguous memory. PRP Page allocation failures should not happen
2952 * because there should be enough PRP page buffers to account for the
2953 * possible NVMe QDepth.
2954 */
2955 prp_page_info = mpr_alloc_prp_page(sc);
2956 KASSERT(prp_page_info != NULL, ("%s: There are no PRP Pages left to be "
2957 "used for building a native NVMe SGL.\n", __func__));
2958 prp_page = (uint64_t *)prp_page_info->prp_page;
2959 prp_page_phys = (uint64_t *)(uintptr_t)prp_page_info->prp_page_busaddr;
2960
2961 /*
2962 * Insert the allocated PRP page into the command's PRP page list. This
2963 * will be freed when the command is freed.
2964 */
2965 TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link);
2966
2967 /*
2968 * Check if we are within 1 entry of a page boundary we don't want our
2969 * first entry to be a PRP List entry.
2970 */
2971 page_mask = PAGE_SIZE - 1;
2972 page_mask_result = (uintptr_t)((uint8_t *)prp_page + prp_size) &
2973 page_mask;
2974 if (!page_mask_result)
2975 {
2976 /* Bump up to next page boundary. */
2977 prp_page = (uint64_t *)((uint8_t *)prp_page + prp_size);
2978 prp_page_phys = (uint64_t *)((uint8_t *)prp_page_phys +
2979 prp_size);
2980 }
2981
2982 /*
2983 * Set PRP physical pointer, which initially points to the current PRP
2984 * DMA memory page.
2985 */
2986 prp_entry_phys = prp_page_phys;
2987
2988 /* Get physical address and length of the data buffer. */
2989 paddr = (bus_addr_t)(uintptr_t)data;
2990 if (data_in_sz)
2991 length = data_in_sz;
2992 else
2993 length = data_out_sz;
2994
2995 /* Loop while the length is not zero. */
2996 while (length)
2997 {
2998 /*
2999 * Check if we need to put a list pointer here if we are at page
3000 * boundary - prp_size (8 bytes).
3001 */
3002 page_mask_result = (uintptr_t)((uint8_t *)prp_entry_phys +
3003 prp_size) & page_mask;
3004 if (!page_mask_result)
3005 {
3006 /*
3007 * This is the last entry in a PRP List, so we need to
3008 * put a PRP list pointer here. What this does is:
3009 * - bump the current memory pointer to the next
3010 * address, which will be the next full page.
3011 * - set the PRP Entry to point to that page. This is
3012 * now the PRP List pointer.
3013 * - bump the PRP Entry pointer the start of the next
3014 * page. Since all of this PRP memory is contiguous,
3015 * no need to get a new page - it's just the next
3016 * address.
3017 */
3018 prp_entry_phys++;
3019 *prp_entry =
3020 htole64((uint64_t)(uintptr_t)prp_entry_phys);
3021 prp_entry++;
3022 }
3023
3024 /* Need to handle if entry will be part of a page. */
3025 offset = (uint32_t)paddr & page_mask;
3026 entry_len = PAGE_SIZE - offset;
3027
3028 if (prp_entry == prp1_entry)
3029 {
3030 /*
3031 * Must fill in the first PRP pointer (PRP1) before
3032 * moving on.
3033 */
3034 *prp1_entry = htole64((uint64_t)paddr);
3035
3036 /*
3037 * Now point to the second PRP entry within the
3038 * command (PRP2).
3039 */
3040 prp_entry = prp2_entry;
3041 }
3042 else if (prp_entry == prp2_entry)
3043 {
3044 /*
3045 * Should the PRP2 entry be a PRP List pointer or just a
3046 * regular PRP pointer? If there is more than one more
3047 * page of data, must use a PRP List pointer.
3048 */
3049 if (length > PAGE_SIZE)
3050 {
3051 /*
3052 * PRP2 will contain a PRP List pointer because
3053 * more PRP's are needed with this command. The
3054 * list will start at the beginning of the
3055 * contiguous buffer.
3056 */
3057 *prp2_entry =
3058 htole64(
3059 (uint64_t)(uintptr_t)prp_entry_phys);
3060
3061 /*
3062 * The next PRP Entry will be the start of the
3063 * first PRP List.
3064 */
3065 prp_entry = prp_page;
3066 }
3067 else
3068 {
3069 /*
3070 * After this, the PRP Entries are complete.
3071 * This command uses 2 PRP's and no PRP list.
3072 */
3073 *prp2_entry = htole64((uint64_t)paddr);
3074 }
3075 }
3076 else
3077 {
3078 /*
3079 * Put entry in list and bump the addresses.
3080 *
3081 * After PRP1 and PRP2 are filled in, this will fill in
3082 * all remaining PRP entries in a PRP List, one per each
3083 * time through the loop.
3084 */
3085 *prp_entry = htole64((uint64_t)paddr);
3086 prp_entry++;
3087 prp_entry_phys++;
3088 }
3089
3090 /*
3091 * Bump the phys address of the command's data buffer by the
3092 * entry_len.
3093 */
3094 paddr += entry_len;
3095
3096 /* Decrement length accounting for last partial page. */
3097 if (entry_len > length)
3098 length = 0;
3099 else
3100 length -= entry_len;
3101 }
3102 }
3103
3104 /*
3105 * mpr_check_pcie_native_sgl - This function is called for PCIe end devices to
3106 * determine if the driver needs to build a native SGL. If so, that native SGL
3107 * is built in the contiguous buffers allocated especially for PCIe SGL
3108 * creation. If the driver will not build a native SGL, return TRUE and a
3109 * normal IEEE SGL will be built. Currently this routine supports NVMe devices
3110 * only.
3111 *
3112 * Returns FALSE (0) if native SGL was built, TRUE (1) if no SGL was built.
3113 */
3114 static int
mpr_check_pcie_native_sgl(struct mpr_softc * sc,struct mpr_command * cm,bus_dma_segment_t * segs,int segs_left)3115 mpr_check_pcie_native_sgl(struct mpr_softc *sc, struct mpr_command *cm,
3116 bus_dma_segment_t *segs, int segs_left)
3117 {
3118 uint32_t i, sge_dwords, length, offset, entry_len;
3119 uint32_t num_entries, buff_len = 0, sges_in_segment;
3120 uint32_t page_mask, page_mask_result, *curr_buff;
3121 uint32_t *ptr_sgl, *ptr_first_sgl, first_page_offset;
3122 uint32_t first_page_data_size, end_residual;
3123 uint64_t *msg_phys;
3124 bus_addr_t paddr;
3125 int build_native_sgl = 0, first_prp_entry;
3126 int prp_size = PRP_ENTRY_SIZE;
3127 Mpi25IeeeSgeChain64_t *main_chain_element = NULL;
3128 struct mpr_prp_page *prp_page_info = NULL;
3129
3130 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__);
3131
3132 /*
3133 * Add up the sizes of each segment length to get the total transfer
3134 * size, which will be checked against the Maximum Data Transfer Size.
3135 * If the data transfer length exceeds the MDTS for this device, just
3136 * return 1 so a normal IEEE SGL will be built. F/W will break the I/O
3137 * up into multiple I/O's. [nvme_mdts = 0 means unlimited]
3138 */
3139 for (i = 0; i < segs_left; i++)
3140 buff_len += htole32(segs[i].ds_len);
3141 if ((cm->cm_targ->MDTS > 0) && (buff_len > cm->cm_targ->MDTS))
3142 return 1;
3143
3144 /* Create page_mask (to get offset within page) */
3145 page_mask = PAGE_SIZE - 1;
3146
3147 /*
3148 * Check if the number of elements exceeds the max number that can be
3149 * put in the main message frame (H/W can only translate an SGL that
3150 * is contained entirely in the main message frame).
3151 */
3152 sges_in_segment = (sc->reqframesz -
3153 offsetof(Mpi25SCSIIORequest_t, SGL)) / sizeof(MPI25_SGE_IO_UNION);
3154 if (segs_left > sges_in_segment)
3155 build_native_sgl = 1;
3156 else
3157 {
3158 /*
3159 * NVMe uses one PRP for each physical page (or part of physical
3160 * page).
3161 * if 4 pages or less then IEEE is OK
3162 * if > 5 pages then we need to build a native SGL
3163 * if > 4 and <= 5 pages, then check the physical address of
3164 * the first SG entry, then if this first size in the page
3165 * is >= the residual beyond 4 pages then use IEEE,
3166 * otherwise use native SGL
3167 */
3168 if (buff_len > (PAGE_SIZE * 5))
3169 build_native_sgl = 1;
3170 else if ((buff_len > (PAGE_SIZE * 4)) &&
3171 (buff_len <= (PAGE_SIZE * 5)) )
3172 {
3173 msg_phys = (uint64_t *)(uintptr_t)segs[0].ds_addr;
3174 first_page_offset =
3175 ((uint32_t)(uint64_t)(uintptr_t)msg_phys &
3176 page_mask);
3177 first_page_data_size = PAGE_SIZE - first_page_offset;
3178 end_residual = buff_len % PAGE_SIZE;
3179
3180 /*
3181 * If offset into first page pushes the end of the data
3182 * beyond end of the 5th page, we need the extra PRP
3183 * list.
3184 */
3185 if (first_page_data_size < end_residual)
3186 build_native_sgl = 1;
3187
3188 /*
3189 * Check if first SG entry size is < residual beyond 4
3190 * pages.
3191 */
3192 if (htole32(segs[0].ds_len) <
3193 (buff_len - (PAGE_SIZE * 4)))
3194 build_native_sgl = 1;
3195 }
3196 }
3197
3198 /* check if native SGL is needed */
3199 if (!build_native_sgl)
3200 return 1;
3201
3202 /*
3203 * Native SGL is needed.
3204 * Put a chain element in main message frame that points to the first
3205 * chain buffer.
3206 *
3207 * NOTE: The ChainOffset field must be 0 when using a chain pointer to
3208 * a native SGL.
3209 */
3210
3211 /* Set main message chain element pointer */
3212 main_chain_element = (pMpi25IeeeSgeChain64_t)cm->cm_sge;
3213
3214 /*
3215 * For NVMe the chain element needs to be the 2nd SGL entry in the main
3216 * message.
3217 */
3218 main_chain_element = (Mpi25IeeeSgeChain64_t *)
3219 ((uint8_t *)main_chain_element + sizeof(MPI25_IEEE_SGE_CHAIN64));
3220
3221 /*
3222 * For the PRP entries, use the specially allocated buffer of
3223 * contiguous memory. PRP Page allocation failures should not happen
3224 * because there should be enough PRP page buffers to account for the
3225 * possible NVMe QDepth.
3226 */
3227 prp_page_info = mpr_alloc_prp_page(sc);
3228 KASSERT(prp_page_info != NULL, ("%s: There are no PRP Pages left to be "
3229 "used for building a native NVMe SGL.\n", __func__));
3230 curr_buff = (uint32_t *)prp_page_info->prp_page;
3231 msg_phys = (uint64_t *)(uintptr_t)prp_page_info->prp_page_busaddr;
3232
3233 /*
3234 * Insert the allocated PRP page into the command's PRP page list. This
3235 * will be freed when the command is freed.
3236 */
3237 TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link);
3238
3239 /*
3240 * Check if we are within 1 entry of a page boundary we don't want our
3241 * first entry to be a PRP List entry.
3242 */
3243 page_mask_result = (uintptr_t)((uint8_t *)curr_buff + prp_size) &
3244 page_mask;
3245 if (!page_mask_result) {
3246 /* Bump up to next page boundary. */
3247 curr_buff = (uint32_t *)((uint8_t *)curr_buff + prp_size);
3248 msg_phys = (uint64_t *)((uint8_t *)msg_phys + prp_size);
3249 }
3250
3251 /* Fill in the chain element and make it an NVMe segment type. */
3252 main_chain_element->Address.High =
3253 htole32((uint32_t)((uint64_t)(uintptr_t)msg_phys >> 32));
3254 main_chain_element->Address.Low =
3255 htole32((uint32_t)(uintptr_t)msg_phys);
3256 main_chain_element->NextChainOffset = 0;
3257 main_chain_element->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
3258 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
3259 MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP;
3260
3261 /* Set SGL pointer to start of contiguous PCIe buffer. */
3262 ptr_sgl = curr_buff;
3263 sge_dwords = 2;
3264 num_entries = 0;
3265
3266 /*
3267 * NVMe has a very convoluted PRP format. One PRP is required for each
3268 * page or partial page. We need to split up OS SG entries if they are
3269 * longer than one page or cross a page boundary. We also have to insert
3270 * a PRP list pointer entry as the last entry in each physical page of
3271 * the PRP list.
3272 *
3273 * NOTE: The first PRP "entry" is actually placed in the first SGL entry
3274 * in the main message in IEEE 64 format. The 2nd entry in the main
3275 * message is the chain element, and the rest of the PRP entries are
3276 * built in the contiguous PCIe buffer.
3277 */
3278 first_prp_entry = 1;
3279 ptr_first_sgl = (uint32_t *)cm->cm_sge;
3280
3281 for (i = 0; i < segs_left; i++) {
3282 /* Get physical address and length of this SG entry. */
3283 paddr = segs[i].ds_addr;
3284 length = segs[i].ds_len;
3285
3286 /*
3287 * Check whether a given SGE buffer lies on a non-PAGED
3288 * boundary if this is not the first page. If so, this is not
3289 * expected so have FW build the SGL.
3290 */
3291 if ((i != 0) && (((uint32_t)paddr & page_mask) != 0)) {
3292 mpr_dprint(sc, MPR_ERROR, "Unaligned SGE while "
3293 "building NVMe PRPs, low address is 0x%x\n",
3294 (uint32_t)paddr);
3295 return 1;
3296 }
3297
3298 /* Apart from last SGE, if any other SGE boundary is not page
3299 * aligned then it means that hole exists. Existence of hole
3300 * leads to data corruption. So fallback to IEEE SGEs.
3301 */
3302 if (i != (segs_left - 1)) {
3303 if (((uint32_t)paddr + length) & page_mask) {
3304 mpr_dprint(sc, MPR_ERROR, "Unaligned SGE "
3305 "boundary while building NVMe PRPs, low "
3306 "address: 0x%x and length: %u\n",
3307 (uint32_t)paddr, length);
3308 return 1;
3309 }
3310 }
3311
3312 /* Loop while the length is not zero. */
3313 while (length) {
3314 /*
3315 * Check if we need to put a list pointer here if we are
3316 * at page boundary - prp_size.
3317 */
3318 page_mask_result = (uintptr_t)((uint8_t *)ptr_sgl +
3319 prp_size) & page_mask;
3320 if (!page_mask_result) {
3321 /*
3322 * Need to put a PRP list pointer here.
3323 */
3324 msg_phys = (uint64_t *)((uint8_t *)msg_phys +
3325 prp_size);
3326 *ptr_sgl = htole32((uintptr_t)msg_phys);
3327 *(ptr_sgl+1) = htole32((uint64_t)(uintptr_t)
3328 msg_phys >> 32);
3329 ptr_sgl += sge_dwords;
3330 num_entries++;
3331 }
3332
3333 /* Need to handle if entry will be part of a page. */
3334 offset = (uint32_t)paddr & page_mask;
3335 entry_len = PAGE_SIZE - offset;
3336 if (first_prp_entry) {
3337 /*
3338 * Put IEEE entry in first SGE in main message.
3339 * (Simple element, System addr, not end of
3340 * list.)
3341 */
3342 *ptr_first_sgl = htole32((uint32_t)paddr);
3343 *(ptr_first_sgl + 1) =
3344 htole32((uint32_t)((uint64_t)paddr >> 32));
3345 *(ptr_first_sgl + 2) = htole32(entry_len);
3346 *(ptr_first_sgl + 3) = 0;
3347
3348 /* No longer the first PRP entry. */
3349 first_prp_entry = 0;
3350 } else {
3351 /* Put entry in list. */
3352 *ptr_sgl = htole32((uint32_t)paddr);
3353 *(ptr_sgl + 1) =
3354 htole32((uint32_t)((uint64_t)paddr >> 32));
3355
3356 /* Bump ptr_sgl, msg_phys, and num_entries. */
3357 ptr_sgl += sge_dwords;
3358 msg_phys = (uint64_t *)((uint8_t *)msg_phys +
3359 prp_size);
3360 num_entries++;
3361 }
3362
3363 /* Bump the phys address by the entry_len. */
3364 paddr += entry_len;
3365
3366 /* Decrement length accounting for last partial page. */
3367 if (entry_len > length)
3368 length = 0;
3369 else
3370 length -= entry_len;
3371 }
3372 }
3373
3374 /* Set chain element Length. */
3375 main_chain_element->Length = htole32(num_entries * prp_size);
3376
3377 /* Return 0, indicating we built a native SGL. */
3378 return 0;
3379 }
3380
3381 /*
3382 * Add a chain element as the next SGE for the specified command.
3383 * Reset cm_sge and cm_sgesize to indicate all the available space. Chains are
3384 * only required for IEEE commands. Therefore there is no code for commands
3385 * that have the MPR_CM_FLAGS_SGE_SIMPLE flag set (and those commands
3386 * shouldn't be requesting chains).
3387 */
3388 static int
mpr_add_chain(struct mpr_command * cm,int segsleft)3389 mpr_add_chain(struct mpr_command *cm, int segsleft)
3390 {
3391 struct mpr_softc *sc = cm->cm_sc;
3392 MPI2_REQUEST_HEADER *req;
3393 MPI25_IEEE_SGE_CHAIN64 *ieee_sgc;
3394 struct mpr_chain *chain;
3395 int sgc_size, current_segs, rem_segs, segs_per_frame;
3396 uint8_t next_chain_offset = 0;
3397
3398 /*
3399 * Fail if a command is requesting a chain for SIMPLE SGE's. For SAS3
3400 * only IEEE commands should be requesting chains. Return some error
3401 * code other than 0.
3402 */
3403 if (cm->cm_flags & MPR_CM_FLAGS_SGE_SIMPLE) {
3404 mpr_dprint(sc, MPR_ERROR, "A chain element cannot be added to "
3405 "an MPI SGL.\n");
3406 return(ENOBUFS);
3407 }
3408
3409 sgc_size = sizeof(MPI25_IEEE_SGE_CHAIN64);
3410 if (cm->cm_sglsize < sgc_size)
3411 panic("MPR: Need SGE Error Code\n");
3412
3413 chain = mpr_alloc_chain(cm->cm_sc);
3414 if (chain == NULL)
3415 return (ENOBUFS);
3416
3417 /*
3418 * Note: a double-linked list is used to make it easier to walk for
3419 * debugging.
3420 */
3421 TAILQ_INSERT_TAIL(&cm->cm_chain_list, chain, chain_link);
3422
3423 /*
3424 * Need to know if the number of frames left is more than 1 or not. If
3425 * more than 1 frame is required, NextChainOffset will need to be set,
3426 * which will just be the last segment of the frame.
3427 */
3428 rem_segs = 0;
3429 if (cm->cm_sglsize < (sgc_size * segsleft)) {
3430 /*
3431 * rem_segs is the number of segment remaining after the
3432 * segments that will go into the current frame. Since it is
3433 * known that at least one more frame is required, account for
3434 * the chain element. To know if more than one more frame is
3435 * required, just check if there will be a remainder after using
3436 * the current frame (with this chain) and the next frame. If
3437 * so the NextChainOffset must be the last element of the next
3438 * frame.
3439 */
3440 current_segs = (cm->cm_sglsize / sgc_size) - 1;
3441 rem_segs = segsleft - current_segs;
3442 segs_per_frame = sc->chain_frame_size / sgc_size;
3443 if (rem_segs > segs_per_frame) {
3444 next_chain_offset = segs_per_frame - 1;
3445 }
3446 }
3447 ieee_sgc = &((MPI25_SGE_IO_UNION *)cm->cm_sge)->IeeeChain;
3448 ieee_sgc->Length = next_chain_offset ?
3449 htole32((uint32_t)sc->chain_frame_size) :
3450 htole32((uint32_t)rem_segs * (uint32_t)sgc_size);
3451 ieee_sgc->NextChainOffset = next_chain_offset;
3452 ieee_sgc->Flags = (MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
3453 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
3454 ieee_sgc->Address.Low = htole32(chain->chain_busaddr);
3455 ieee_sgc->Address.High = htole32(chain->chain_busaddr >> 32);
3456 cm->cm_sge = &((MPI25_SGE_IO_UNION *)chain->chain)->IeeeSimple;
3457 req = (MPI2_REQUEST_HEADER *)cm->cm_req;
3458 req->ChainOffset = (sc->chain_frame_size - sgc_size) >> 4;
3459
3460 cm->cm_sglsize = sc->chain_frame_size;
3461 return (0);
3462 }
3463
3464 /*
3465 * Add one scatter-gather element to the scatter-gather list for a command.
3466 * Maintain cm_sglsize and cm_sge as the remaining size and pointer to the
3467 * next SGE to fill in, respectively. In Gen3, the MPI SGL does not have a
3468 * chain, so don't consider any chain additions.
3469 */
3470 int
mpr_push_sge(struct mpr_command * cm,MPI2_SGE_SIMPLE64 * sge,size_t len,int segsleft)3471 mpr_push_sge(struct mpr_command *cm, MPI2_SGE_SIMPLE64 *sge, size_t len,
3472 int segsleft)
3473 {
3474 uint32_t saved_buf_len, saved_address_low, saved_address_high;
3475 u32 sge_flags;
3476
3477 /*
3478 * case 1: >=1 more segment, no room for anything (error)
3479 * case 2: 1 more segment and enough room for it
3480 */
3481
3482 if (cm->cm_sglsize < (segsleft * sizeof(MPI2_SGE_SIMPLE64))) {
3483 mpr_dprint(cm->cm_sc, MPR_ERROR,
3484 "%s: warning: Not enough room for MPI SGL in frame.\n",
3485 __func__);
3486 return(ENOBUFS);
3487 }
3488
3489 KASSERT(segsleft == 1,
3490 ("segsleft cannot be more than 1 for an MPI SGL; segsleft = %d\n",
3491 segsleft));
3492
3493 /*
3494 * There is one more segment left to add for the MPI SGL and there is
3495 * enough room in the frame to add it. This is the normal case because
3496 * MPI SGL's don't have chains, otherwise something is wrong.
3497 *
3498 * If this is a bi-directional request, need to account for that
3499 * here. Save the pre-filled sge values. These will be used
3500 * either for the 2nd SGL or for a single direction SGL. If
3501 * cm_out_len is non-zero, this is a bi-directional request, so
3502 * fill in the OUT SGL first, then the IN SGL, otherwise just
3503 * fill in the IN SGL. Note that at this time, when filling in
3504 * 2 SGL's for a bi-directional request, they both use the same
3505 * DMA buffer (same cm command).
3506 */
3507 saved_buf_len = sge->FlagsLength & 0x00FFFFFF;
3508 saved_address_low = sge->Address.Low;
3509 saved_address_high = sge->Address.High;
3510 if (cm->cm_out_len) {
3511 sge->FlagsLength = cm->cm_out_len |
3512 ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
3513 MPI2_SGE_FLAGS_END_OF_BUFFER |
3514 MPI2_SGE_FLAGS_HOST_TO_IOC |
3515 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
3516 MPI2_SGE_FLAGS_SHIFT);
3517 cm->cm_sglsize -= len;
3518 /* Endian Safe code */
3519 sge_flags = sge->FlagsLength;
3520 sge->FlagsLength = htole32(sge_flags);
3521 bcopy(sge, cm->cm_sge, len);
3522 cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len);
3523 }
3524 sge->FlagsLength = saved_buf_len |
3525 ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
3526 MPI2_SGE_FLAGS_END_OF_BUFFER |
3527 MPI2_SGE_FLAGS_LAST_ELEMENT |
3528 MPI2_SGE_FLAGS_END_OF_LIST |
3529 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
3530 MPI2_SGE_FLAGS_SHIFT);
3531 if (cm->cm_flags & MPR_CM_FLAGS_DATAIN) {
3532 sge->FlagsLength |=
3533 ((uint32_t)(MPI2_SGE_FLAGS_IOC_TO_HOST) <<
3534 MPI2_SGE_FLAGS_SHIFT);
3535 } else {
3536 sge->FlagsLength |=
3537 ((uint32_t)(MPI2_SGE_FLAGS_HOST_TO_IOC) <<
3538 MPI2_SGE_FLAGS_SHIFT);
3539 }
3540 sge->Address.Low = saved_address_low;
3541 sge->Address.High = saved_address_high;
3542
3543 cm->cm_sglsize -= len;
3544 /* Endian Safe code */
3545 sge_flags = sge->FlagsLength;
3546 sge->FlagsLength = htole32(sge_flags);
3547 bcopy(sge, cm->cm_sge, len);
3548 cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len);
3549 return (0);
3550 }
3551
3552 /*
3553 * Add one IEEE scatter-gather element (chain or simple) to the IEEE scatter-
3554 * gather list for a command. Maintain cm_sglsize and cm_sge as the
3555 * remaining size and pointer to the next SGE to fill in, respectively.
3556 */
3557 int
mpr_push_ieee_sge(struct mpr_command * cm,void * sgep,int segsleft)3558 mpr_push_ieee_sge(struct mpr_command *cm, void *sgep, int segsleft)
3559 {
3560 MPI2_IEEE_SGE_SIMPLE64 *sge = sgep;
3561 int error, ieee_sge_size = sizeof(MPI25_SGE_IO_UNION);
3562 uint32_t saved_buf_len, saved_address_low, saved_address_high;
3563 uint32_t sge_length;
3564
3565 /*
3566 * case 1: No room for chain or segment (error).
3567 * case 2: Two or more segments left but only room for chain.
3568 * case 3: Last segment and room for it, so set flags.
3569 */
3570
3571 /*
3572 * There should be room for at least one element, or there is a big
3573 * problem.
3574 */
3575 if (cm->cm_sglsize < ieee_sge_size)
3576 panic("MPR: Need SGE Error Code\n");
3577
3578 if ((segsleft >= 2) && (cm->cm_sglsize < (ieee_sge_size * 2))) {
3579 if ((error = mpr_add_chain(cm, segsleft)) != 0)
3580 return (error);
3581 }
3582
3583 if (segsleft == 1) {
3584 /*
3585 * If this is a bi-directional request, need to account for that
3586 * here. Save the pre-filled sge values. These will be used
3587 * either for the 2nd SGL or for a single direction SGL. If
3588 * cm_out_len is non-zero, this is a bi-directional request, so
3589 * fill in the OUT SGL first, then the IN SGL, otherwise just
3590 * fill in the IN SGL. Note that at this time, when filling in
3591 * 2 SGL's for a bi-directional request, they both use the same
3592 * DMA buffer (same cm command).
3593 */
3594 saved_buf_len = sge->Length;
3595 saved_address_low = sge->Address.Low;
3596 saved_address_high = sge->Address.High;
3597 if (cm->cm_out_len) {
3598 sge->Length = cm->cm_out_len;
3599 sge->Flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
3600 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
3601 cm->cm_sglsize -= ieee_sge_size;
3602 /* Endian Safe code */
3603 sge_length = sge->Length;
3604 sge->Length = htole32(sge_length);
3605 bcopy(sgep, cm->cm_sge, ieee_sge_size);
3606 cm->cm_sge =
3607 (MPI25_SGE_IO_UNION *)((uintptr_t)cm->cm_sge +
3608 ieee_sge_size);
3609 }
3610 sge->Length = saved_buf_len;
3611 sge->Flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
3612 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
3613 MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
3614 sge->Address.Low = saved_address_low;
3615 sge->Address.High = saved_address_high;
3616 }
3617
3618 cm->cm_sglsize -= ieee_sge_size;
3619 /* Endian Safe code */
3620 sge_length = sge->Length;
3621 sge->Length = htole32(sge_length);
3622 bcopy(sgep, cm->cm_sge, ieee_sge_size);
3623 cm->cm_sge = (MPI25_SGE_IO_UNION *)((uintptr_t)cm->cm_sge +
3624 ieee_sge_size);
3625 return (0);
3626 }
3627
3628 /*
3629 * Add one dma segment to the scatter-gather list for a command.
3630 */
3631 int
mpr_add_dmaseg(struct mpr_command * cm,vm_paddr_t pa,size_t len,u_int flags,int segsleft)3632 mpr_add_dmaseg(struct mpr_command *cm, vm_paddr_t pa, size_t len, u_int flags,
3633 int segsleft)
3634 {
3635 MPI2_SGE_SIMPLE64 sge;
3636 MPI2_IEEE_SGE_SIMPLE64 ieee_sge;
3637
3638 if (!(cm->cm_flags & MPR_CM_FLAGS_SGE_SIMPLE)) {
3639 ieee_sge.Flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
3640 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
3641 ieee_sge.Length = len;
3642 mpr_from_u64(pa, &ieee_sge.Address);
3643
3644 return (mpr_push_ieee_sge(cm, &ieee_sge, segsleft));
3645 } else {
3646 /*
3647 * This driver always uses 64-bit address elements for
3648 * simplicity.
3649 */
3650 flags |= MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
3651 MPI2_SGE_FLAGS_64_BIT_ADDRESSING;
3652 /* Set Endian safe macro in mpr_push_sge */
3653 sge.FlagsLength = len | (flags << MPI2_SGE_FLAGS_SHIFT);
3654 mpr_from_u64(pa, &sge.Address);
3655
3656 return (mpr_push_sge(cm, &sge, sizeof sge, segsleft));
3657 }
3658 }
3659
3660 static void
mpr_data_cb(void * arg,bus_dma_segment_t * segs,int nsegs,int error)3661 mpr_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
3662 {
3663 struct mpr_softc *sc;
3664 struct mpr_command *cm;
3665 u_int i, dir, sflags;
3666
3667 cm = (struct mpr_command *)arg;
3668 sc = cm->cm_sc;
3669
3670 /*
3671 * In this case, just print out a warning and let the chip tell the
3672 * user they did the wrong thing.
3673 */
3674 if ((cm->cm_max_segs != 0) && (nsegs > cm->cm_max_segs)) {
3675 mpr_dprint(sc, MPR_ERROR, "%s: warning: busdma returned %d "
3676 "segments, more than the %d allowed\n", __func__, nsegs,
3677 cm->cm_max_segs);
3678 }
3679
3680 /*
3681 * Set up DMA direction flags. Bi-directional requests are also handled
3682 * here. In that case, both direction flags will be set.
3683 */
3684 sflags = 0;
3685 if (cm->cm_flags & MPR_CM_FLAGS_SMP_PASS) {
3686 /*
3687 * We have to add a special case for SMP passthrough, there
3688 * is no easy way to generically handle it. The first
3689 * S/G element is used for the command (therefore the
3690 * direction bit needs to be set). The second one is used
3691 * for the reply. We'll leave it to the caller to make
3692 * sure we only have two buffers.
3693 */
3694 /*
3695 * Even though the busdma man page says it doesn't make
3696 * sense to have both direction flags, it does in this case.
3697 * We have one s/g element being accessed in each direction.
3698 */
3699 dir = BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD;
3700
3701 /*
3702 * Set the direction flag on the first buffer in the SMP
3703 * passthrough request. We'll clear it for the second one.
3704 */
3705 sflags |= MPI2_SGE_FLAGS_DIRECTION |
3706 MPI2_SGE_FLAGS_END_OF_BUFFER;
3707 } else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT) {
3708 sflags |= MPI2_SGE_FLAGS_HOST_TO_IOC;
3709 dir = BUS_DMASYNC_PREWRITE;
3710 } else
3711 dir = BUS_DMASYNC_PREREAD;
3712
3713 /* Check if a native SG list is needed for an NVMe PCIe device. */
3714 if (cm->cm_targ && cm->cm_targ->is_nvme &&
3715 mpr_check_pcie_native_sgl(sc, cm, segs, nsegs) == 0) {
3716 /* A native SG list was built, skip to end. */
3717 goto out;
3718 }
3719
3720 for (i = 0; i < nsegs; i++) {
3721 if ((cm->cm_flags & MPR_CM_FLAGS_SMP_PASS) && (i != 0)) {
3722 sflags &= ~MPI2_SGE_FLAGS_DIRECTION;
3723 }
3724 error = mpr_add_dmaseg(cm, segs[i].ds_addr, segs[i].ds_len,
3725 sflags, nsegs - i);
3726 if (error != 0) {
3727 /* Resource shortage, roll back! */
3728 if (ratecheck(&sc->lastfail, &mpr_chainfail_interval))
3729 mpr_dprint(sc, MPR_INFO, "Out of chain frames, "
3730 "consider increasing hw.mpr.max_chains.\n");
3731 cm->cm_flags |= MPR_CM_FLAGS_CHAIN_FAILED;
3732 /*
3733 * mpr_complete_command can only be called on commands
3734 * that are in the queue. Since this is an error path
3735 * which gets called before we enqueue, update the state
3736 * to meet this requirement before we complete it.
3737 */
3738 cm->cm_state = MPR_CM_STATE_INQUEUE;
3739 mpr_complete_command(sc, cm);
3740 return;
3741 }
3742 }
3743
3744 out:
3745 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
3746 mpr_enqueue_request(sc, cm);
3747
3748 return;
3749 }
3750
3751 static void
mpr_data_cb2(void * arg,bus_dma_segment_t * segs,int nsegs,bus_size_t mapsize,int error)3752 mpr_data_cb2(void *arg, bus_dma_segment_t *segs, int nsegs, bus_size_t mapsize,
3753 int error)
3754 {
3755 mpr_data_cb(arg, segs, nsegs, error);
3756 }
3757
3758 /*
3759 * This is the routine to enqueue commands ansynchronously.
3760 * Note that the only error path here is from bus_dmamap_load(), which can
3761 * return EINPROGRESS if it is waiting for resources. Other than this, it's
3762 * assumed that if you have a command in-hand, then you have enough credits
3763 * to use it.
3764 */
3765 int
mpr_map_command(struct mpr_softc * sc,struct mpr_command * cm)3766 mpr_map_command(struct mpr_softc *sc, struct mpr_command *cm)
3767 {
3768 int error = 0;
3769
3770 if (cm->cm_flags & MPR_CM_FLAGS_USE_UIO) {
3771 error = bus_dmamap_load_uio(sc->buffer_dmat, cm->cm_dmamap,
3772 &cm->cm_uio, mpr_data_cb2, cm, 0);
3773 } else if (cm->cm_flags & MPR_CM_FLAGS_USE_CCB) {
3774 error = bus_dmamap_load_ccb(sc->buffer_dmat, cm->cm_dmamap,
3775 cm->cm_data, mpr_data_cb, cm, 0);
3776 } else if ((cm->cm_data != NULL) && (cm->cm_length != 0)) {
3777 error = bus_dmamap_load(sc->buffer_dmat, cm->cm_dmamap,
3778 cm->cm_data, cm->cm_length, mpr_data_cb, cm, 0);
3779 } else {
3780 /* Add a zero-length element as needed */
3781 if (cm->cm_sge != NULL)
3782 mpr_add_dmaseg(cm, 0, 0, 0, 1);
3783 mpr_enqueue_request(sc, cm);
3784 }
3785
3786 return (error);
3787 }
3788
3789 /*
3790 * This is the routine to enqueue commands synchronously. An error of
3791 * EINPROGRESS from mpr_map_command() is ignored since the command will
3792 * be executed and enqueued automatically. Other errors come from msleep().
3793 */
3794 int
mpr_wait_command(struct mpr_softc * sc,struct mpr_command ** cmp,int timeout,int sleep_flag)3795 mpr_wait_command(struct mpr_softc *sc, struct mpr_command **cmp, int timeout,
3796 int sleep_flag)
3797 {
3798 int error, rc;
3799 struct timeval cur_time, start_time;
3800 struct mpr_command *cm = *cmp;
3801
3802 if (sc->mpr_flags & MPR_FLAGS_DIAGRESET)
3803 return EBUSY;
3804
3805 cm->cm_complete = NULL;
3806 cm->cm_flags |= (MPR_CM_FLAGS_WAKEUP + MPR_CM_FLAGS_POLLED);
3807 error = mpr_map_command(sc, cm);
3808 if ((error != 0) && (error != EINPROGRESS))
3809 return (error);
3810
3811 // Check for context and wait for 50 mSec at a time until time has
3812 // expired or the command has finished. If msleep can't be used, need
3813 // to poll.
3814 if (curthread->td_no_sleeping)
3815 sleep_flag = NO_SLEEP;
3816 getmicrouptime(&start_time);
3817 if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP) {
3818 error = msleep(cm, &sc->mpr_mtx, 0, "mprwait", timeout*hz);
3819 if (error == EWOULDBLOCK) {
3820 /*
3821 * Record the actual elapsed time in the case of a
3822 * timeout for the message below.
3823 */
3824 getmicrouptime(&cur_time);
3825 timevalsub(&cur_time, &start_time);
3826 }
3827 } else {
3828 while ((cm->cm_flags & MPR_CM_FLAGS_COMPLETE) == 0) {
3829 mpr_intr_locked(sc);
3830 if (sleep_flag == CAN_SLEEP)
3831 pause("mprwait", hz/20);
3832 else
3833 DELAY(50000);
3834
3835 getmicrouptime(&cur_time);
3836 timevalsub(&cur_time, &start_time);
3837 if (cur_time.tv_sec > timeout) {
3838 error = EWOULDBLOCK;
3839 break;
3840 }
3841 }
3842 }
3843
3844 if (error == EWOULDBLOCK) {
3845 if (cm->cm_timeout_handler == NULL) {
3846 mpr_dprint(sc, MPR_FAULT, "Calling Reinit from %s, timeout=%d,"
3847 " elapsed=%jd\n", __func__, timeout,
3848 (intmax_t)cur_time.tv_sec);
3849 rc = mpr_reinit(sc);
3850 mpr_dprint(sc, MPR_FAULT, "Reinit %s\n", (rc == 0) ? "success" :
3851 "failed");
3852 } else
3853 cm->cm_timeout_handler(sc, cm);
3854 if (sc->mpr_flags & MPR_FLAGS_REALLOCATED) {
3855 /*
3856 * Tell the caller that we freed the command in a
3857 * reinit.
3858 */
3859 *cmp = NULL;
3860 }
3861 error = ETIMEDOUT;
3862 }
3863 return (error);
3864 }
3865
3866 /*
3867 * This is the routine to enqueue a command synchonously and poll for
3868 * completion. Its use should be rare.
3869 */
3870 int
mpr_request_polled(struct mpr_softc * sc,struct mpr_command ** cmp)3871 mpr_request_polled(struct mpr_softc *sc, struct mpr_command **cmp)
3872 {
3873 int error, rc;
3874 struct timeval cur_time, start_time;
3875 struct mpr_command *cm = *cmp;
3876
3877 error = 0;
3878
3879 cm->cm_flags |= MPR_CM_FLAGS_POLLED;
3880 cm->cm_complete = NULL;
3881 mpr_map_command(sc, cm);
3882
3883 getmicrouptime(&start_time);
3884 while ((cm->cm_flags & MPR_CM_FLAGS_COMPLETE) == 0) {
3885 mpr_intr_locked(sc);
3886
3887 if (mtx_owned(&sc->mpr_mtx))
3888 msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0,
3889 "mprpoll", hz/20);
3890 else
3891 pause("mprpoll", hz/20);
3892
3893 /*
3894 * Check for real-time timeout and fail if more than 60 seconds.
3895 */
3896 getmicrouptime(&cur_time);
3897 timevalsub(&cur_time, &start_time);
3898 if (cur_time.tv_sec > 60) {
3899 mpr_dprint(sc, MPR_FAULT, "polling failed\n");
3900 error = ETIMEDOUT;
3901 break;
3902 }
3903 }
3904 cm->cm_state = MPR_CM_STATE_BUSY;
3905 if (error) {
3906 mpr_dprint(sc, MPR_FAULT, "Calling Reinit from %s\n", __func__);
3907 rc = mpr_reinit(sc);
3908 mpr_dprint(sc, MPR_FAULT, "Reinit %s\n", (rc == 0) ? "success" :
3909 "failed");
3910
3911 if (sc->mpr_flags & MPR_FLAGS_REALLOCATED) {
3912 /*
3913 * Tell the caller that we freed the command in a
3914 * reinit.
3915 */
3916 *cmp = NULL;
3917 }
3918 }
3919 return (error);
3920 }
3921
3922 /*
3923 * The MPT driver had a verbose interface for config pages. In this driver,
3924 * reduce it to much simpler terms, similar to the Linux driver.
3925 */
3926 int
mpr_read_config_page(struct mpr_softc * sc,struct mpr_config_params * params)3927 mpr_read_config_page(struct mpr_softc *sc, struct mpr_config_params *params)
3928 {
3929 MPI2_CONFIG_REQUEST *req;
3930 struct mpr_command *cm;
3931 int error;
3932
3933 if (sc->mpr_flags & MPR_FLAGS_BUSY) {
3934 return (EBUSY);
3935 }
3936
3937 cm = mpr_alloc_command(sc);
3938 if (cm == NULL) {
3939 return (EBUSY);
3940 }
3941
3942 req = (MPI2_CONFIG_REQUEST *)cm->cm_req;
3943 req->Function = MPI2_FUNCTION_CONFIG;
3944 req->Action = params->action;
3945 req->SGLFlags = 0;
3946 req->ChainOffset = 0;
3947 req->PageAddress = params->page_address;
3948 if (params->hdr.Struct.PageType == MPI2_CONFIG_PAGETYPE_EXTENDED) {
3949 MPI2_CONFIG_EXTENDED_PAGE_HEADER *hdr;
3950
3951 hdr = ¶ms->hdr.Ext;
3952 req->ExtPageType = hdr->ExtPageType;
3953 req->ExtPageLength = hdr->ExtPageLength;
3954 req->Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
3955 req->Header.PageLength = 0; /* Must be set to zero */
3956 req->Header.PageNumber = hdr->PageNumber;
3957 req->Header.PageVersion = hdr->PageVersion;
3958 } else {
3959 MPI2_CONFIG_PAGE_HEADER *hdr;
3960
3961 hdr = ¶ms->hdr.Struct;
3962 req->Header.PageType = hdr->PageType;
3963 req->Header.PageNumber = hdr->PageNumber;
3964 req->Header.PageLength = hdr->PageLength;
3965 req->Header.PageVersion = hdr->PageVersion;
3966 }
3967
3968 cm->cm_data = params->buffer;
3969 cm->cm_length = params->length;
3970 if (cm->cm_data != NULL) {
3971 cm->cm_sge = &req->PageBufferSGE;
3972 cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION);
3973 cm->cm_flags = MPR_CM_FLAGS_SGE_SIMPLE | MPR_CM_FLAGS_DATAIN;
3974 } else
3975 cm->cm_sge = NULL;
3976 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3977
3978 cm->cm_complete_data = params;
3979 if (params->callback != NULL) {
3980 cm->cm_complete = mpr_config_complete;
3981 return (mpr_map_command(sc, cm));
3982 } else {
3983 error = mpr_wait_command(sc, &cm, 0, CAN_SLEEP);
3984 if (error) {
3985 mpr_dprint(sc, MPR_FAULT,
3986 "Error %d reading config page\n", error);
3987 if (cm != NULL)
3988 mpr_free_command(sc, cm);
3989 return (error);
3990 }
3991 mpr_config_complete(sc, cm);
3992 }
3993
3994 return (0);
3995 }
3996
3997 int
mpr_write_config_page(struct mpr_softc * sc,struct mpr_config_params * params)3998 mpr_write_config_page(struct mpr_softc *sc, struct mpr_config_params *params)
3999 {
4000 return (EINVAL);
4001 }
4002
4003 static void
mpr_config_complete(struct mpr_softc * sc,struct mpr_command * cm)4004 mpr_config_complete(struct mpr_softc *sc, struct mpr_command *cm)
4005 {
4006 MPI2_CONFIG_REPLY *reply;
4007 struct mpr_config_params *params;
4008
4009 MPR_FUNCTRACE(sc);
4010 params = cm->cm_complete_data;
4011
4012 if (cm->cm_data != NULL) {
4013 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
4014 BUS_DMASYNC_POSTREAD);
4015 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
4016 }
4017
4018 /*
4019 * XXX KDM need to do more error recovery? This results in the
4020 * device in question not getting probed.
4021 */
4022 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
4023 params->status = MPI2_IOCSTATUS_BUSY;
4024 goto done;
4025 }
4026
4027 reply = (MPI2_CONFIG_REPLY *)cm->cm_reply;
4028 if (reply == NULL) {
4029 params->status = MPI2_IOCSTATUS_BUSY;
4030 goto done;
4031 }
4032 params->status = reply->IOCStatus;
4033 if (params->hdr.Struct.PageType == MPI2_CONFIG_PAGETYPE_EXTENDED) {
4034 params->hdr.Ext.ExtPageType = reply->ExtPageType;
4035 params->hdr.Ext.ExtPageLength = reply->ExtPageLength;
4036 params->hdr.Ext.PageType = reply->Header.PageType;
4037 params->hdr.Ext.PageNumber = reply->Header.PageNumber;
4038 params->hdr.Ext.PageVersion = reply->Header.PageVersion;
4039 } else {
4040 params->hdr.Struct.PageType = reply->Header.PageType;
4041 params->hdr.Struct.PageNumber = reply->Header.PageNumber;
4042 params->hdr.Struct.PageLength = reply->Header.PageLength;
4043 params->hdr.Struct.PageVersion = reply->Header.PageVersion;
4044 }
4045
4046 done:
4047 mpr_free_command(sc, cm);
4048 if (params->callback != NULL)
4049 params->callback(sc, params);
4050
4051 return;
4052 }
4053