xref: /freebsd/sys/dev/nvme/nvme_private.h (revision 9c999a259f00b35f0467acd351fea9157ed7e1e4)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (C) 2012-2014 Intel Corporation
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  */
30 
31 #ifndef __NVME_PRIVATE_H__
32 #define __NVME_PRIVATE_H__
33 
34 #include <sys/param.h>
35 #include <sys/bio.h>
36 #include <sys/bus.h>
37 #include <sys/kernel.h>
38 #include <sys/lock.h>
39 #include <sys/malloc.h>
40 #include <sys/module.h>
41 #include <sys/mutex.h>
42 #include <sys/rman.h>
43 #include <sys/systm.h>
44 #include <sys/taskqueue.h>
45 
46 #include <vm/uma.h>
47 
48 #include <machine/bus.h>
49 
50 #include "nvme.h"
51 
52 #define DEVICE2SOFTC(dev) ((struct nvme_controller *) device_get_softc(dev))
53 
54 MALLOC_DECLARE(M_NVME);
55 
56 #define IDT32_PCI_ID		0x80d0111d /* 32 channel board */
57 #define IDT8_PCI_ID		0x80d2111d /* 8 channel board */
58 
59 #define NVME_ADMIN_TRACKERS	(16)
60 #define NVME_ADMIN_ENTRIES	(128)
61 /* min and max are defined in admin queue attributes section of spec */
62 #define NVME_MIN_ADMIN_ENTRIES	(2)
63 #define NVME_MAX_ADMIN_ENTRIES	(4096)
64 
65 /*
66  * NVME_IO_ENTRIES defines the size of an I/O qpair's submission and completion
67  *  queues, while NVME_IO_TRACKERS defines the maximum number of I/O that we
68  *  will allow outstanding on an I/O qpair at any time.  The only advantage in
69  *  having IO_ENTRIES > IO_TRACKERS is for debugging purposes - when dumping
70  *  the contents of the submission and completion queues, it will show a longer
71  *  history of data.
72  */
73 #define NVME_IO_ENTRIES		(256)
74 #define NVME_IO_TRACKERS	(128)
75 #define NVME_MIN_IO_TRACKERS	(4)
76 #define NVME_MAX_IO_TRACKERS	(1024)
77 
78 /*
79  * NVME_MAX_IO_ENTRIES is not defined, since it is specified in CC.MQES
80  *  for each controller.
81  */
82 
83 #define NVME_INT_COAL_TIME	(0)	/* disabled */
84 #define NVME_INT_COAL_THRESHOLD (0)	/* 0-based */
85 
86 #define NVME_MAX_NAMESPACES	(16)
87 #define NVME_MAX_CONSUMERS	(2)
88 #define NVME_MAX_ASYNC_EVENTS	(8)
89 
90 #define NVME_DEFAULT_TIMEOUT_PERIOD	(30)    /* in seconds */
91 #define NVME_MIN_TIMEOUT_PERIOD		(5)
92 #define NVME_MAX_TIMEOUT_PERIOD		(120)
93 
94 #define NVME_DEFAULT_RETRY_COUNT	(4)
95 
96 /* Maximum log page size to fetch for AERs. */
97 #define NVME_MAX_AER_LOG_SIZE		(4096)
98 
99 /*
100  * Define CACHE_LINE_SIZE here for older FreeBSD versions that do not define
101  *  it.
102  */
103 #ifndef CACHE_LINE_SIZE
104 #define CACHE_LINE_SIZE		(64)
105 #endif
106 
107 #define NVME_GONE		0xfffffffful
108 
109 extern int32_t		nvme_retry_count;
110 extern bool		nvme_verbose_cmd_dump;
111 
112 struct nvme_completion_poll_status {
113 	struct nvme_completion	cpl;
114 	int			done;
115 };
116 
117 extern devclass_t nvme_devclass;
118 
119 #define NVME_REQUEST_VADDR	1
120 #define NVME_REQUEST_NULL	2 /* For requests with no payload. */
121 #define NVME_REQUEST_UIO	3
122 #define NVME_REQUEST_BIO	4
123 #define NVME_REQUEST_CCB        5
124 
125 struct nvme_request {
126 	struct nvme_command		cmd;
127 	struct nvme_qpair		*qpair;
128 	union {
129 		void			*payload;
130 		struct bio		*bio;
131 	} u;
132 	uint32_t			type;
133 	uint32_t			payload_size;
134 	bool				timeout;
135 	nvme_cb_fn_t			cb_fn;
136 	void				*cb_arg;
137 	int32_t				retries;
138 	STAILQ_ENTRY(nvme_request)	stailq;
139 };
140 
141 struct nvme_async_event_request {
142 	struct nvme_controller		*ctrlr;
143 	struct nvme_request		*req;
144 	struct nvme_completion		cpl;
145 	uint32_t			log_page_id;
146 	uint32_t			log_page_size;
147 	uint8_t				log_page_buffer[NVME_MAX_AER_LOG_SIZE];
148 };
149 
150 struct nvme_tracker {
151 	TAILQ_ENTRY(nvme_tracker)	tailq;
152 	struct nvme_request		*req;
153 	struct nvme_qpair		*qpair;
154 	sbintime_t			deadline;
155 	bus_dmamap_t			payload_dma_map;
156 	uint16_t			cid;
157 
158 	uint64_t			*prp;
159 	bus_addr_t			prp_bus_addr;
160 };
161 
162 enum nvme_recovery {
163 	RECOVERY_NONE = 0,		/* Normal operations */
164 	RECOVERY_START,			/* Deadline has passed, start recovering */
165 	RECOVERY_RESET,			/* This pass, initiate reset of controller */
166 	RECOVERY_WAITING,		/* waiting for the reset to complete */
167 };
168 struct nvme_qpair {
169 	struct nvme_controller	*ctrlr;
170 	uint32_t		id;
171 	int			domain;
172 	int			cpu;
173 
174 	uint16_t		vector;
175 	int			rid;
176 	struct resource		*res;
177 	void 			*tag;
178 
179 	struct callout		timer;
180 	sbintime_t		deadline;
181 	bool			timer_armed;
182 	enum nvme_recovery	recovery_state;
183 
184 	uint32_t		num_entries;
185 	uint32_t		num_trackers;
186 	uint32_t		sq_tdbl_off;
187 	uint32_t		cq_hdbl_off;
188 
189 	uint32_t		phase;
190 	uint32_t		sq_head;
191 	uint32_t		sq_tail;
192 	uint32_t		cq_head;
193 
194 	int64_t			num_cmds;
195 	int64_t			num_intr_handler_calls;
196 	int64_t			num_retries;
197 	int64_t			num_failures;
198 	int64_t			num_ignored;
199 
200 	struct nvme_command	*cmd;
201 	struct nvme_completion	*cpl;
202 
203 	bus_dma_tag_t		dma_tag;
204 	bus_dma_tag_t		dma_tag_payload;
205 
206 	bus_dmamap_t		queuemem_map;
207 	uint64_t		cmd_bus_addr;
208 	uint64_t		cpl_bus_addr;
209 
210 	TAILQ_HEAD(, nvme_tracker)	free_tr;
211 	TAILQ_HEAD(, nvme_tracker)	outstanding_tr;
212 	STAILQ_HEAD(, nvme_request)	queued_req;
213 
214 	struct nvme_tracker	**act_tr;
215 
216 	struct mtx		lock __aligned(CACHE_LINE_SIZE);
217 
218 } __aligned(CACHE_LINE_SIZE);
219 
220 struct nvme_namespace {
221 	struct nvme_controller		*ctrlr;
222 	struct nvme_namespace_data	data;
223 	uint32_t			id;
224 	uint32_t			flags;
225 	struct cdev			*cdev;
226 	void				*cons_cookie[NVME_MAX_CONSUMERS];
227 	uint32_t			boundary;
228 	struct mtx			lock;
229 };
230 
231 /*
232  * One of these per allocated PCI device.
233  */
234 struct nvme_controller {
235 	device_t		dev;
236 
237 	struct mtx		lock;
238 	int			domain;
239 	uint32_t		ready_timeout_in_ms;
240 	uint32_t		quirks;
241 #define	QUIRK_DELAY_B4_CHK_RDY	1		/* Can't touch MMIO on disable */
242 #define	QUIRK_DISABLE_TIMEOUT	2		/* Disable broken completion timeout feature */
243 
244 	bus_space_tag_t		bus_tag;
245 	bus_space_handle_t	bus_handle;
246 	int			resource_id;
247 	struct resource		*resource;
248 
249 	/*
250 	 * The NVMe spec allows for the MSI-X table to be placed in BAR 4/5,
251 	 *  separate from the control registers which are in BAR 0/1.  These
252 	 *  members track the mapping of BAR 4/5 for that reason.
253 	 */
254 	int			bar4_resource_id;
255 	struct resource		*bar4_resource;
256 
257 	int			msi_count;
258 	uint32_t		enable_aborts;
259 
260 	uint32_t		num_io_queues;
261 	uint32_t		max_hw_pend_io;
262 
263 	/* Fields for tracking progress during controller initialization. */
264 	struct intr_config_hook	config_hook;
265 	uint32_t		ns_identified;
266 	uint32_t		queues_created;
267 
268 	struct task		reset_task;
269 	struct task		fail_req_task;
270 	struct taskqueue	*taskqueue;
271 
272 	/* For shared legacy interrupt. */
273 	int			rid;
274 	struct resource		*res;
275 	void			*tag;
276 
277 	/** maximum i/o size in bytes */
278 	uint32_t		max_xfer_size;
279 
280 	/** minimum page size supported by this controller in bytes */
281 	uint32_t		min_page_size;
282 
283 	/** interrupt coalescing time period (in microseconds) */
284 	uint32_t		int_coal_time;
285 
286 	/** interrupt coalescing threshold */
287 	uint32_t		int_coal_threshold;
288 
289 	/** timeout period in seconds */
290 	uint32_t		timeout_period;
291 
292 	/** doorbell stride */
293 	uint32_t		dstrd;
294 
295 	struct nvme_qpair	adminq;
296 	struct nvme_qpair	*ioq;
297 
298 	struct nvme_registers		*regs;
299 
300 	struct nvme_controller_data	cdata;
301 	struct nvme_namespace		ns[NVME_MAX_NAMESPACES];
302 
303 	struct cdev			*cdev;
304 
305 	/** bit mask of event types currently enabled for async events */
306 	uint32_t			async_event_config;
307 
308 	uint32_t			num_aers;
309 	struct nvme_async_event_request	aer[NVME_MAX_ASYNC_EVENTS];
310 
311 	void				*cons_cookie[NVME_MAX_CONSUMERS];
312 
313 	uint32_t			is_resetting;
314 	uint32_t			is_initialized;
315 	uint32_t			notification_sent;
316 
317 	bool				is_failed;
318 	bool				is_dying;
319 	STAILQ_HEAD(, nvme_request)	fail_req;
320 
321 	/* Host Memory Buffer */
322 	int				hmb_nchunks;
323 	size_t				hmb_chunk;
324 	bus_dma_tag_t			hmb_tag;
325 	struct nvme_hmb_chunk {
326 		bus_dmamap_t		hmbc_map;
327 		void			*hmbc_vaddr;
328 		uint64_t		hmbc_paddr;
329 	} *hmb_chunks;
330 	bus_dma_tag_t			hmb_desc_tag;
331 	bus_dmamap_t			hmb_desc_map;
332 	struct nvme_hmb_desc		*hmb_desc_vaddr;
333 	uint64_t			hmb_desc_paddr;
334 };
335 
336 #define nvme_mmio_offsetof(reg)						       \
337 	offsetof(struct nvme_registers, reg)
338 
339 #define nvme_mmio_read_4(sc, reg)					       \
340 	bus_space_read_4((sc)->bus_tag, (sc)->bus_handle,		       \
341 	    nvme_mmio_offsetof(reg))
342 
343 #define nvme_mmio_write_4(sc, reg, val)					       \
344 	bus_space_write_4((sc)->bus_tag, (sc)->bus_handle,		       \
345 	    nvme_mmio_offsetof(reg), val)
346 
347 #define nvme_mmio_write_8(sc, reg, val)					       \
348 	do {								       \
349 		bus_space_write_4((sc)->bus_tag, (sc)->bus_handle,	       \
350 		    nvme_mmio_offsetof(reg), val & 0xFFFFFFFF); 	       \
351 		bus_space_write_4((sc)->bus_tag, (sc)->bus_handle,	       \
352 		    nvme_mmio_offsetof(reg)+4,				       \
353 		    (val & 0xFFFFFFFF00000000ULL) >> 32);		       \
354 	} while (0);
355 
356 #define nvme_printf(ctrlr, fmt, args...)	\
357     device_printf(ctrlr->dev, fmt, ##args)
358 
359 void	nvme_ns_test(struct nvme_namespace *ns, u_long cmd, caddr_t arg);
360 
361 void	nvme_ctrlr_cmd_identify_controller(struct nvme_controller *ctrlr,
362 					   void *payload,
363 					   nvme_cb_fn_t cb_fn, void *cb_arg);
364 void	nvme_ctrlr_cmd_identify_namespace(struct nvme_controller *ctrlr,
365 					  uint32_t nsid, void *payload,
366 					  nvme_cb_fn_t cb_fn, void *cb_arg);
367 void	nvme_ctrlr_cmd_set_interrupt_coalescing(struct nvme_controller *ctrlr,
368 						uint32_t microseconds,
369 						uint32_t threshold,
370 						nvme_cb_fn_t cb_fn,
371 						void *cb_arg);
372 void	nvme_ctrlr_cmd_get_error_page(struct nvme_controller *ctrlr,
373 				      struct nvme_error_information_entry *payload,
374 				      uint32_t num_entries, /* 0 = max */
375 				      nvme_cb_fn_t cb_fn,
376 				      void *cb_arg);
377 void	nvme_ctrlr_cmd_get_health_information_page(struct nvme_controller *ctrlr,
378 						   uint32_t nsid,
379 						   struct nvme_health_information_page *payload,
380 						   nvme_cb_fn_t cb_fn,
381 						   void *cb_arg);
382 void	nvme_ctrlr_cmd_get_firmware_page(struct nvme_controller *ctrlr,
383 					 struct nvme_firmware_page *payload,
384 					 nvme_cb_fn_t cb_fn,
385 					 void *cb_arg);
386 void	nvme_ctrlr_cmd_create_io_cq(struct nvme_controller *ctrlr,
387 				    struct nvme_qpair *io_que,
388 				    nvme_cb_fn_t cb_fn, void *cb_arg);
389 void	nvme_ctrlr_cmd_create_io_sq(struct nvme_controller *ctrlr,
390 				    struct nvme_qpair *io_que,
391 				    nvme_cb_fn_t cb_fn, void *cb_arg);
392 void	nvme_ctrlr_cmd_delete_io_cq(struct nvme_controller *ctrlr,
393 				    struct nvme_qpair *io_que,
394 				    nvme_cb_fn_t cb_fn, void *cb_arg);
395 void	nvme_ctrlr_cmd_delete_io_sq(struct nvme_controller *ctrlr,
396 				    struct nvme_qpair *io_que,
397 				    nvme_cb_fn_t cb_fn, void *cb_arg);
398 void	nvme_ctrlr_cmd_set_num_queues(struct nvme_controller *ctrlr,
399 				      uint32_t num_queues, nvme_cb_fn_t cb_fn,
400 				      void *cb_arg);
401 void	nvme_ctrlr_cmd_set_async_event_config(struct nvme_controller *ctrlr,
402 					      uint32_t state,
403 					      nvme_cb_fn_t cb_fn, void *cb_arg);
404 void	nvme_ctrlr_cmd_abort(struct nvme_controller *ctrlr, uint16_t cid,
405 			     uint16_t sqid, nvme_cb_fn_t cb_fn, void *cb_arg);
406 
407 void	nvme_completion_poll_cb(void *arg, const struct nvme_completion *cpl);
408 
409 int	nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev);
410 void	nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev);
411 void	nvme_ctrlr_shutdown(struct nvme_controller *ctrlr);
412 void	nvme_ctrlr_reset(struct nvme_controller *ctrlr);
413 /* ctrlr defined as void * to allow use with config_intrhook. */
414 void	nvme_ctrlr_start_config_hook(void *ctrlr_arg);
415 void	nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr,
416 					struct nvme_request *req);
417 void	nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
418 				     struct nvme_request *req);
419 void	nvme_ctrlr_post_failed_request(struct nvme_controller *ctrlr,
420 				       struct nvme_request *req);
421 
422 int	nvme_qpair_construct(struct nvme_qpair *qpair,
423 			     uint32_t num_entries, uint32_t num_trackers,
424 			     struct nvme_controller *ctrlr);
425 void	nvme_qpair_submit_tracker(struct nvme_qpair *qpair,
426 				  struct nvme_tracker *tr);
427 bool	nvme_qpair_process_completions(struct nvme_qpair *qpair);
428 void	nvme_qpair_submit_request(struct nvme_qpair *qpair,
429 				  struct nvme_request *req);
430 void	nvme_qpair_reset(struct nvme_qpair *qpair);
431 void	nvme_qpair_fail(struct nvme_qpair *qpair);
432 void	nvme_qpair_manual_complete_request(struct nvme_qpair *qpair,
433 					   struct nvme_request *req,
434                                            uint32_t sct, uint32_t sc);
435 
436 void	nvme_admin_qpair_enable(struct nvme_qpair *qpair);
437 void	nvme_admin_qpair_disable(struct nvme_qpair *qpair);
438 void	nvme_admin_qpair_destroy(struct nvme_qpair *qpair);
439 
440 void	nvme_io_qpair_enable(struct nvme_qpair *qpair);
441 void	nvme_io_qpair_disable(struct nvme_qpair *qpair);
442 void	nvme_io_qpair_destroy(struct nvme_qpair *qpair);
443 
444 int	nvme_ns_construct(struct nvme_namespace *ns, uint32_t id,
445 			  struct nvme_controller *ctrlr);
446 void	nvme_ns_destruct(struct nvme_namespace *ns);
447 
448 void	nvme_sysctl_initialize_ctrlr(struct nvme_controller *ctrlr);
449 
450 void	nvme_dump_command(struct nvme_command *cmd);
451 void	nvme_dump_completion(struct nvme_completion *cpl);
452 
453 int	nvme_attach(device_t dev);
454 int	nvme_shutdown(device_t dev);
455 int	nvme_detach(device_t dev);
456 
457 /*
458  * Wait for a command to complete using the nvme_completion_poll_cb.
459  * Used in limited contexts where the caller knows it's OK to block
460  * briefly while the command runs. The ISR will run the callback which
461  * will set status->done to true, usually within microseconds. If not,
462  * then after one second timeout handler should reset the controller
463  * and abort all outstanding requests including this polled one. If
464  * still not after ten seconds, then something is wrong with the driver,
465  * and panic is the only way to recover.
466  */
467 static __inline
468 void
469 nvme_completion_poll(struct nvme_completion_poll_status *status)
470 {
471 	int sanity = hz * 10;
472 
473 	while (!atomic_load_acq_int(&status->done) && --sanity > 0)
474 		pause("nvme", 1);
475 	if (sanity <= 0)
476 		panic("NVME polled command failed to complete within 10s.");
477 }
478 
479 static __inline void
480 nvme_single_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
481 {
482 	uint64_t *bus_addr = (uint64_t *)arg;
483 
484 	KASSERT(nseg == 1, ("number of segments (%d) is not 1", nseg));
485 	if (error != 0)
486 		printf("nvme_single_map err %d\n", error);
487 	*bus_addr = seg[0].ds_addr;
488 }
489 
490 static __inline struct nvme_request *
491 _nvme_allocate_request(nvme_cb_fn_t cb_fn, void *cb_arg)
492 {
493 	struct nvme_request *req;
494 
495 	req = malloc(sizeof(*req), M_NVME, M_NOWAIT | M_ZERO);
496 	if (req != NULL) {
497 		req->cb_fn = cb_fn;
498 		req->cb_arg = cb_arg;
499 		req->timeout = true;
500 	}
501 	return (req);
502 }
503 
504 static __inline struct nvme_request *
505 nvme_allocate_request_vaddr(void *payload, uint32_t payload_size,
506     nvme_cb_fn_t cb_fn, void *cb_arg)
507 {
508 	struct nvme_request *req;
509 
510 	req = _nvme_allocate_request(cb_fn, cb_arg);
511 	if (req != NULL) {
512 		req->type = NVME_REQUEST_VADDR;
513 		req->u.payload = payload;
514 		req->payload_size = payload_size;
515 	}
516 	return (req);
517 }
518 
519 static __inline struct nvme_request *
520 nvme_allocate_request_null(nvme_cb_fn_t cb_fn, void *cb_arg)
521 {
522 	struct nvme_request *req;
523 
524 	req = _nvme_allocate_request(cb_fn, cb_arg);
525 	if (req != NULL)
526 		req->type = NVME_REQUEST_NULL;
527 	return (req);
528 }
529 
530 static __inline struct nvme_request *
531 nvme_allocate_request_bio(struct bio *bio, nvme_cb_fn_t cb_fn, void *cb_arg)
532 {
533 	struct nvme_request *req;
534 
535 	req = _nvme_allocate_request(cb_fn, cb_arg);
536 	if (req != NULL) {
537 		req->type = NVME_REQUEST_BIO;
538 		req->u.bio = bio;
539 	}
540 	return (req);
541 }
542 
543 static __inline struct nvme_request *
544 nvme_allocate_request_ccb(union ccb *ccb, nvme_cb_fn_t cb_fn, void *cb_arg)
545 {
546 	struct nvme_request *req;
547 
548 	req = _nvme_allocate_request(cb_fn, cb_arg);
549 	if (req != NULL) {
550 		req->type = NVME_REQUEST_CCB;
551 		req->u.payload = ccb;
552 	}
553 
554 	return (req);
555 }
556 
557 #define nvme_free_request(req)	free(req, M_NVME)
558 
559 void	nvme_notify_async_consumers(struct nvme_controller *ctrlr,
560 				    const struct nvme_completion *async_cpl,
561 				    uint32_t log_page_id, void *log_page_buffer,
562 				    uint32_t log_page_size);
563 void	nvme_notify_fail_consumers(struct nvme_controller *ctrlr);
564 void	nvme_notify_new_controller(struct nvme_controller *ctrlr);
565 void	nvme_notify_ns(struct nvme_controller *ctrlr, int nsid);
566 
567 void	nvme_ctrlr_shared_handler(void *arg);
568 void	nvme_ctrlr_poll(struct nvme_controller *ctrlr);
569 
570 int	nvme_ctrlr_suspend(struct nvme_controller *ctrlr);
571 int	nvme_ctrlr_resume(struct nvme_controller *ctrlr);
572 
573 #endif /* __NVME_PRIVATE_H__ */
574