xref: /freebsd/sys/dev/nvme/nvme_private.h (revision e5b786625f7f82a1fa91e41823332459ea5550f9)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (C) 2012-2014 Intel Corporation
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #ifndef __NVME_PRIVATE_H__
30 #define __NVME_PRIVATE_H__
31 
32 #include <sys/param.h>
33 #include <sys/bio.h>
34 #include <sys/bus.h>
35 #include <sys/kernel.h>
36 #include <sys/lock.h>
37 #include <sys/malloc.h>
38 #include <sys/memdesc.h>
39 #include <sys/module.h>
40 #include <sys/mutex.h>
41 #include <sys/rman.h>
42 #include <sys/systm.h>
43 #include <sys/taskqueue.h>
44 
45 #include <vm/uma.h>
46 
47 #include <machine/bus.h>
48 
49 #include "nvme.h"
50 
51 #define DEVICE2SOFTC(dev) ((struct nvme_controller *) device_get_softc(dev))
52 
53 MALLOC_DECLARE(M_NVME);
54 
55 #define IDT32_PCI_ID		0x80d0111d /* 32 channel board */
56 #define IDT8_PCI_ID		0x80d2111d /* 8 channel board */
57 
58 #define NVME_ADMIN_TRACKERS	(16)
59 #define NVME_ADMIN_ENTRIES	(128)
60 /* min and max are defined in admin queue attributes section of spec */
61 #define NVME_MIN_ADMIN_ENTRIES	(2)
62 #define NVME_MAX_ADMIN_ENTRIES	(4096)
63 
64 /*
65  * NVME_IO_ENTRIES defines the size of an I/O qpair's submission and completion
66  *  queues, while NVME_IO_TRACKERS defines the maximum number of I/O that we
67  *  will allow outstanding on an I/O qpair at any time.  The only advantage in
68  *  having IO_ENTRIES > IO_TRACKERS is for debugging purposes - when dumping
69  *  the contents of the submission and completion queues, it will show a longer
70  *  history of data.
71  */
72 #define NVME_IO_ENTRIES		(256)
73 #define NVME_IO_TRACKERS	(128)
74 #define NVME_MIN_IO_TRACKERS	(4)
75 #define NVME_MAX_IO_TRACKERS	(1024)
76 
77 /*
78  * NVME_MAX_IO_ENTRIES is not defined, since it is specified in CC.MQES
79  *  for each controller.
80  */
81 
82 #define NVME_INT_COAL_TIME	(0)	/* disabled */
83 #define NVME_INT_COAL_THRESHOLD (0)	/* 0-based */
84 
85 #define NVME_MAX_NAMESPACES	(16)
86 #define NVME_MAX_CONSUMERS	(2)
87 #define NVME_MAX_ASYNC_EVENTS	(8)
88 
89 #define NVME_DEFAULT_TIMEOUT_PERIOD	(30)    /* in seconds */
90 #define NVME_MIN_TIMEOUT_PERIOD		(5)
91 #define NVME_MAX_TIMEOUT_PERIOD		(120)
92 
93 #define NVME_DEFAULT_RETRY_COUNT	(4)
94 
95 /* Maximum log page size to fetch for AERs. */
96 #define NVME_MAX_AER_LOG_SIZE		(4096)
97 
98 /*
99  * Define CACHE_LINE_SIZE here for older FreeBSD versions that do not define
100  *  it.
101  */
102 #ifndef CACHE_LINE_SIZE
103 #define CACHE_LINE_SIZE		(64)
104 #endif
105 
106 #define NVME_GONE		0xfffffffful
107 
108 extern int32_t		nvme_retry_count;
109 extern bool		nvme_verbose_cmd_dump;
110 
111 struct nvme_completion_poll_status {
112 	struct nvme_completion	cpl;
113 	int			done;
114 };
115 
116 struct nvme_request {
117 	struct nvme_command		cmd;
118 	struct nvme_qpair		*qpair;
119 	struct memdesc			payload;
120 	nvme_cb_fn_t			cb_fn;
121 	void				*cb_arg;
122 	int32_t				retries;
123 	bool				payload_valid;
124 	bool				timeout;
125 	bool				spare[2];		/* Future use */
126 	STAILQ_ENTRY(nvme_request)	stailq;
127 };
128 
129 struct nvme_async_event_request {
130 	struct nvme_controller		*ctrlr;
131 	struct nvme_request		*req;
132 	struct nvme_completion		cpl;
133 	uint32_t			log_page_id;
134 	uint32_t			log_page_size;
135 	uint8_t				log_page_buffer[NVME_MAX_AER_LOG_SIZE];
136 };
137 
138 struct nvme_tracker {
139 	TAILQ_ENTRY(nvme_tracker)	tailq;
140 	struct nvme_request		*req;
141 	struct nvme_qpair		*qpair;
142 	sbintime_t			deadline;
143 	bus_dmamap_t			payload_dma_map;
144 	uint16_t			cid;
145 
146 	uint64_t			*prp;
147 	bus_addr_t			prp_bus_addr;
148 };
149 
150 enum nvme_recovery {
151 	RECOVERY_NONE = 0,		/* Normal operations */
152 	RECOVERY_WAITING,		/* waiting for the reset to complete */
153 };
154 struct nvme_qpair {
155 	struct nvme_controller	*ctrlr;
156 	uint32_t		id;
157 	int			domain;
158 	int			cpu;
159 
160 	uint16_t		vector;
161 	int			rid;
162 	struct resource		*res;
163 	void 			*tag;
164 
165 	struct callout		timer;			/* recovery lock */
166 	bool			timer_armed;		/* recovery lock */
167 	enum nvme_recovery	recovery_state;		/* recovery lock */
168 
169 	uint32_t		num_entries;
170 	uint32_t		num_trackers;
171 	uint32_t		sq_tdbl_off;
172 	uint32_t		cq_hdbl_off;
173 
174 	uint32_t		phase;
175 	uint32_t		sq_head;
176 	uint32_t		sq_tail;
177 	uint32_t		cq_head;
178 
179 	int64_t			num_cmds;
180 	int64_t			num_intr_handler_calls;
181 	int64_t			num_retries;
182 	int64_t			num_failures;
183 	int64_t			num_ignored;
184 	int64_t			num_recovery_nolock;
185 
186 	struct nvme_command	*cmd;
187 	struct nvme_completion	*cpl;
188 
189 	bus_dma_tag_t		dma_tag;
190 	bus_dma_tag_t		dma_tag_payload;
191 
192 	bus_dmamap_t		queuemem_map;
193 	uint64_t		cmd_bus_addr;
194 	uint64_t		cpl_bus_addr;
195 
196 	TAILQ_HEAD(, nvme_tracker)	free_tr;
197 	TAILQ_HEAD(, nvme_tracker)	outstanding_tr;
198 	STAILQ_HEAD(, nvme_request)	queued_req;
199 
200 	struct nvme_tracker	**act_tr;
201 
202 	struct mtx_padalign	lock;
203 	struct mtx_padalign	recovery;
204 } __aligned(CACHE_LINE_SIZE);
205 
206 struct nvme_namespace {
207 	struct nvme_controller		*ctrlr;
208 	struct nvme_namespace_data	data;
209 	uint32_t			id;
210 	uint32_t			flags;
211 	struct cdev			*cdev;
212 	void				*cons_cookie[NVME_MAX_CONSUMERS];
213 	uint32_t			boundary;
214 	struct mtx			lock;
215 };
216 
217 /*
218  * One of these per allocated PCI device.
219  */
220 struct nvme_controller {
221 	device_t		dev;
222 
223 	struct mtx		lock;
224 	int			domain;
225 	uint32_t		ready_timeout_in_ms;
226 	uint32_t		quirks;
227 #define	QUIRK_DELAY_B4_CHK_RDY	1		/* Can't touch MMIO on disable */
228 #define	QUIRK_DISABLE_TIMEOUT	2		/* Disable broken completion timeout feature */
229 #define	QUIRK_INTEL_ALIGNMENT	4		/* Pre NVMe 1.3 performance alignment */
230 #define QUIRK_AHCI		8		/* Attached via AHCI redirect */
231 
232 	bus_space_tag_t		bus_tag;
233 	bus_space_handle_t	bus_handle;
234 	int			resource_id;
235 	struct resource		*resource;
236 
237 	/*
238 	 * The NVMe spec allows for the MSI-X table to be placed in BAR 4/5,
239 	 *  separate from the control registers which are in BAR 0/1.  These
240 	 *  members track the mapping of BAR 4/5 for that reason.
241 	 */
242 	int			bar4_resource_id;
243 	struct resource		*bar4_resource;
244 
245 	int			msi_count;
246 	uint32_t		enable_aborts;
247 
248 	uint32_t		num_io_queues;
249 	uint32_t		max_hw_pend_io;
250 
251 	/* Fields for tracking progress during controller initialization. */
252 	struct intr_config_hook	config_hook;
253 	uint32_t		ns_identified;
254 	uint32_t		queues_created;
255 
256 	struct task		reset_task;
257 	struct taskqueue	*taskqueue;
258 
259 	/* For shared legacy interrupt. */
260 	int			rid;
261 	struct resource		*res;
262 	void			*tag;
263 
264 	/** maximum i/o size in bytes */
265 	uint32_t		max_xfer_size;
266 
267 	/** LO and HI capacity mask */
268 	uint32_t		cap_lo;
269 	uint32_t		cap_hi;
270 
271 	/** Page size and log2(page_size) - 12 that we're currently using */
272 	uint32_t		page_size;
273 	uint32_t		mps;
274 
275 	/** interrupt coalescing time period (in microseconds) */
276 	uint32_t		int_coal_time;
277 
278 	/** interrupt coalescing threshold */
279 	uint32_t		int_coal_threshold;
280 
281 	/** timeout period in seconds */
282 	uint32_t		timeout_period;
283 
284 	/** doorbell stride */
285 	uint32_t		dstrd;
286 
287 	struct nvme_qpair	adminq;
288 	struct nvme_qpair	*ioq;
289 
290 	struct nvme_registers		*regs;
291 
292 	struct nvme_controller_data	cdata;
293 	struct nvme_namespace		ns[NVME_MAX_NAMESPACES];
294 
295 	struct cdev			*cdev;
296 
297 	/** bit mask of event types currently enabled for async events */
298 	uint32_t			async_event_config;
299 
300 	uint32_t			num_aers;
301 	struct nvme_async_event_request	aer[NVME_MAX_ASYNC_EVENTS];
302 
303 	void				*cons_cookie[NVME_MAX_CONSUMERS];
304 
305 	uint32_t			is_resetting;
306 	uint32_t			is_initialized;
307 	uint32_t			notification_sent;
308 
309 	bool				is_failed;
310 	bool				is_dying;
311 	STAILQ_HEAD(, nvme_request)	fail_req;
312 
313 	/* Host Memory Buffer */
314 	int				hmb_nchunks;
315 	size_t				hmb_chunk;
316 	bus_dma_tag_t			hmb_tag;
317 	struct nvme_hmb_chunk {
318 		bus_dmamap_t		hmbc_map;
319 		void			*hmbc_vaddr;
320 		uint64_t		hmbc_paddr;
321 	} *hmb_chunks;
322 	bus_dma_tag_t			hmb_desc_tag;
323 	bus_dmamap_t			hmb_desc_map;
324 	struct nvme_hmb_desc		*hmb_desc_vaddr;
325 	uint64_t			hmb_desc_paddr;
326 };
327 
328 #define nvme_mmio_offsetof(reg)						       \
329 	offsetof(struct nvme_registers, reg)
330 
331 #define nvme_mmio_read_4(sc, reg)					       \
332 	bus_space_read_4((sc)->bus_tag, (sc)->bus_handle,		       \
333 	    nvme_mmio_offsetof(reg))
334 
335 #define nvme_mmio_write_4(sc, reg, val)					       \
336 	bus_space_write_4((sc)->bus_tag, (sc)->bus_handle,		       \
337 	    nvme_mmio_offsetof(reg), val)
338 
339 #define nvme_mmio_write_8(sc, reg, val)					       \
340 	do {								       \
341 		bus_space_write_4((sc)->bus_tag, (sc)->bus_handle,	       \
342 		    nvme_mmio_offsetof(reg), val & 0xFFFFFFFF); 	       \
343 		bus_space_write_4((sc)->bus_tag, (sc)->bus_handle,	       \
344 		    nvme_mmio_offsetof(reg)+4,				       \
345 		    (val & 0xFFFFFFFF00000000ULL) >> 32);		       \
346 	} while (0);
347 
348 #define nvme_printf(ctrlr, fmt, args...)	\
349     device_printf(ctrlr->dev, fmt, ##args)
350 
351 void	nvme_ns_test(struct nvme_namespace *ns, u_long cmd, caddr_t arg);
352 
353 void	nvme_ctrlr_cmd_identify_controller(struct nvme_controller *ctrlr,
354 					   void *payload,
355 					   nvme_cb_fn_t cb_fn, void *cb_arg);
356 void	nvme_ctrlr_cmd_identify_namespace(struct nvme_controller *ctrlr,
357 					  uint32_t nsid, void *payload,
358 					  nvme_cb_fn_t cb_fn, void *cb_arg);
359 void	nvme_ctrlr_cmd_set_interrupt_coalescing(struct nvme_controller *ctrlr,
360 						uint32_t microseconds,
361 						uint32_t threshold,
362 						nvme_cb_fn_t cb_fn,
363 						void *cb_arg);
364 void	nvme_ctrlr_cmd_get_error_page(struct nvme_controller *ctrlr,
365 				      struct nvme_error_information_entry *payload,
366 				      uint32_t num_entries, /* 0 = max */
367 				      nvme_cb_fn_t cb_fn,
368 				      void *cb_arg);
369 void	nvme_ctrlr_cmd_get_health_information_page(struct nvme_controller *ctrlr,
370 						   uint32_t nsid,
371 						   struct nvme_health_information_page *payload,
372 						   nvme_cb_fn_t cb_fn,
373 						   void *cb_arg);
374 void	nvme_ctrlr_cmd_get_firmware_page(struct nvme_controller *ctrlr,
375 					 struct nvme_firmware_page *payload,
376 					 nvme_cb_fn_t cb_fn,
377 					 void *cb_arg);
378 void	nvme_ctrlr_cmd_create_io_cq(struct nvme_controller *ctrlr,
379 				    struct nvme_qpair *io_que,
380 				    nvme_cb_fn_t cb_fn, void *cb_arg);
381 void	nvme_ctrlr_cmd_create_io_sq(struct nvme_controller *ctrlr,
382 				    struct nvme_qpair *io_que,
383 				    nvme_cb_fn_t cb_fn, void *cb_arg);
384 void	nvme_ctrlr_cmd_delete_io_cq(struct nvme_controller *ctrlr,
385 				    struct nvme_qpair *io_que,
386 				    nvme_cb_fn_t cb_fn, void *cb_arg);
387 void	nvme_ctrlr_cmd_delete_io_sq(struct nvme_controller *ctrlr,
388 				    struct nvme_qpair *io_que,
389 				    nvme_cb_fn_t cb_fn, void *cb_arg);
390 void	nvme_ctrlr_cmd_set_num_queues(struct nvme_controller *ctrlr,
391 				      uint32_t num_queues, nvme_cb_fn_t cb_fn,
392 				      void *cb_arg);
393 void	nvme_ctrlr_cmd_set_async_event_config(struct nvme_controller *ctrlr,
394 					      uint32_t state,
395 					      nvme_cb_fn_t cb_fn, void *cb_arg);
396 void	nvme_ctrlr_cmd_abort(struct nvme_controller *ctrlr, uint16_t cid,
397 			     uint16_t sqid, nvme_cb_fn_t cb_fn, void *cb_arg);
398 
399 void	nvme_completion_poll_cb(void *arg, const struct nvme_completion *cpl);
400 
401 int	nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev);
402 void	nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev);
403 void	nvme_ctrlr_shutdown(struct nvme_controller *ctrlr);
404 void	nvme_ctrlr_reset(struct nvme_controller *ctrlr);
405 /* ctrlr defined as void * to allow use with config_intrhook. */
406 void	nvme_ctrlr_start_config_hook(void *ctrlr_arg);
407 void	nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr,
408 					struct nvme_request *req);
409 void	nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
410 				     struct nvme_request *req);
411 
412 int	nvme_qpair_construct(struct nvme_qpair *qpair,
413 			     uint32_t num_entries, uint32_t num_trackers,
414 			     struct nvme_controller *ctrlr);
415 void	nvme_qpair_submit_tracker(struct nvme_qpair *qpair,
416 				  struct nvme_tracker *tr);
417 bool	nvme_qpair_process_completions(struct nvme_qpair *qpair);
418 void	nvme_qpair_submit_request(struct nvme_qpair *qpair,
419 				  struct nvme_request *req);
420 void	nvme_qpair_reset(struct nvme_qpair *qpair);
421 void	nvme_qpair_fail(struct nvme_qpair *qpair);
422 void	nvme_qpair_manual_complete_request(struct nvme_qpair *qpair,
423 					   struct nvme_request *req,
424                                            uint32_t sct, uint32_t sc);
425 
426 void	nvme_admin_qpair_enable(struct nvme_qpair *qpair);
427 void	nvme_admin_qpair_disable(struct nvme_qpair *qpair);
428 void	nvme_admin_qpair_destroy(struct nvme_qpair *qpair);
429 
430 void	nvme_io_qpair_enable(struct nvme_qpair *qpair);
431 void	nvme_io_qpair_disable(struct nvme_qpair *qpair);
432 void	nvme_io_qpair_destroy(struct nvme_qpair *qpair);
433 
434 int	nvme_ns_construct(struct nvme_namespace *ns, uint32_t id,
435 			  struct nvme_controller *ctrlr);
436 void	nvme_ns_destruct(struct nvme_namespace *ns);
437 
438 void	nvme_sysctl_initialize_ctrlr(struct nvme_controller *ctrlr);
439 
440 void	nvme_qpair_print_command(struct nvme_qpair *qpair,
441 	    struct nvme_command *cmd);
442 void	nvme_qpair_print_completion(struct nvme_qpair *qpair,
443 	    struct nvme_completion *cpl);
444 
445 int	nvme_attach(device_t dev);
446 int	nvme_shutdown(device_t dev);
447 int	nvme_detach(device_t dev);
448 
449 /*
450  * Wait for a command to complete using the nvme_completion_poll_cb.  Used in
451  * limited contexts where the caller knows it's OK to block briefly while the
452  * command runs. The ISR will run the callback which will set status->done to
453  * true, usually within microseconds. If not, then after one second timeout
454  * handler should reset the controller and abort all outstanding requests
455  * including this polled one. If still not after ten seconds, then something is
456  * wrong with the driver, and panic is the only way to recover.
457  *
458  * Most commands using this interface aren't actual I/O to the drive's media so
459  * complete within a few microseconds. Adaptively spin for one tick to catch the
460  * vast majority of these without waiting for a tick plus scheduling delays. Since
461  * these are on startup, this drastically reduces startup time.
462  */
463 static __inline
464 void
465 nvme_completion_poll(struct nvme_completion_poll_status *status)
466 {
467 	int timeout = ticks + 10 * hz;
468 	sbintime_t delta_t = SBT_1US;
469 
470 	while (!atomic_load_acq_int(&status->done)) {
471 		if (timeout - ticks < 0)
472 			panic("NVME polled command failed to complete within 10s.");
473 		pause_sbt("nvme", delta_t, 0, C_PREL(1));
474 		delta_t = min(SBT_1MS, delta_t * 3 / 2);
475 	}
476 }
477 
478 static __inline void
479 nvme_single_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
480 {
481 	uint64_t *bus_addr = (uint64_t *)arg;
482 
483 	KASSERT(nseg == 1, ("number of segments (%d) is not 1", nseg));
484 	if (error != 0)
485 		printf("nvme_single_map err %d\n", error);
486 	*bus_addr = seg[0].ds_addr;
487 }
488 
489 static __inline struct nvme_request *
490 _nvme_allocate_request(nvme_cb_fn_t cb_fn, void *cb_arg)
491 {
492 	struct nvme_request *req;
493 
494 	req = malloc(sizeof(*req), M_NVME, M_NOWAIT | M_ZERO);
495 	if (req != NULL) {
496 		req->cb_fn = cb_fn;
497 		req->cb_arg = cb_arg;
498 		req->timeout = true;
499 	}
500 	return (req);
501 }
502 
503 static __inline struct nvme_request *
504 nvme_allocate_request_vaddr(void *payload, uint32_t payload_size,
505     nvme_cb_fn_t cb_fn, void *cb_arg)
506 {
507 	struct nvme_request *req;
508 
509 	req = _nvme_allocate_request(cb_fn, cb_arg);
510 	if (req != NULL) {
511 		req->payload = memdesc_vaddr(payload, payload_size);
512 		req->payload_valid = true;
513 	}
514 	return (req);
515 }
516 
517 static __inline struct nvme_request *
518 nvme_allocate_request_null(nvme_cb_fn_t cb_fn, void *cb_arg)
519 {
520 	struct nvme_request *req;
521 
522 	req = _nvme_allocate_request(cb_fn, cb_arg);
523 	return (req);
524 }
525 
526 static __inline struct nvme_request *
527 nvme_allocate_request_bio(struct bio *bio, nvme_cb_fn_t cb_fn, void *cb_arg)
528 {
529 	struct nvme_request *req;
530 
531 	req = _nvme_allocate_request(cb_fn, cb_arg);
532 	if (req != NULL) {
533 		req->payload = memdesc_bio(bio);
534 		req->payload_valid = true;
535 	}
536 	return (req);
537 }
538 
539 static __inline struct nvme_request *
540 nvme_allocate_request_ccb(union ccb *ccb, nvme_cb_fn_t cb_fn, void *cb_arg)
541 {
542 	struct nvme_request *req;
543 
544 	req = _nvme_allocate_request(cb_fn, cb_arg);
545 	if (req != NULL) {
546 		req->payload = memdesc_ccb(ccb);
547 		req->payload_valid = true;
548 	}
549 
550 	return (req);
551 }
552 
553 #define nvme_free_request(req)	free(req, M_NVME)
554 
555 void	nvme_notify_async_consumers(struct nvme_controller *ctrlr,
556 				    const struct nvme_completion *async_cpl,
557 				    uint32_t log_page_id, void *log_page_buffer,
558 				    uint32_t log_page_size);
559 void	nvme_notify_fail_consumers(struct nvme_controller *ctrlr);
560 void	nvme_notify_new_controller(struct nvme_controller *ctrlr);
561 void	nvme_notify_ns(struct nvme_controller *ctrlr, int nsid);
562 
563 void	nvme_ctrlr_shared_handler(void *arg);
564 void	nvme_ctrlr_poll(struct nvme_controller *ctrlr);
565 
566 int	nvme_ctrlr_suspend(struct nvme_controller *ctrlr);
567 int	nvme_ctrlr_resume(struct nvme_controller *ctrlr);
568 
569 #endif /* __NVME_PRIVATE_H__ */
570