xref: /freebsd/lib/libnvmf/libnvmf.h (revision 2da066ef6d85d3f7cd8aaec14369d66254836536)
1*2da066efSJohn Baldwin /*-
2*2da066efSJohn Baldwin  * SPDX-License-Identifier: BSD-2-Clause
3*2da066efSJohn Baldwin  *
4*2da066efSJohn Baldwin  * Copyright (c) 2022-2024 Chelsio Communications, Inc.
5*2da066efSJohn Baldwin  * Written by: John Baldwin <jhb@FreeBSD.org>
6*2da066efSJohn Baldwin  */
7*2da066efSJohn Baldwin 
8*2da066efSJohn Baldwin #ifndef __LIBNVMF_H__
9*2da066efSJohn Baldwin #define	__LIBNVMF_H__
10*2da066efSJohn Baldwin 
11*2da066efSJohn Baldwin #include <sys/uio.h>
12*2da066efSJohn Baldwin #include <stdbool.h>
13*2da066efSJohn Baldwin #include <stddef.h>
14*2da066efSJohn Baldwin #include <dev/nvme/nvme.h>
15*2da066efSJohn Baldwin #include <dev/nvmf/nvmf.h>
16*2da066efSJohn Baldwin #include <dev/nvmf/nvmf_proto.h>
17*2da066efSJohn Baldwin 
18*2da066efSJohn Baldwin struct nvmf_capsule;
19*2da066efSJohn Baldwin struct nvmf_association;
20*2da066efSJohn Baldwin struct nvmf_qpair;
21*2da066efSJohn Baldwin 
22*2da066efSJohn Baldwin /*
23*2da066efSJohn Baldwin  * Parameters shared by all queue-pairs of an association.  Note that
24*2da066efSJohn Baldwin  * this contains the requested values used to initiate transport
25*2da066efSJohn Baldwin  * negotiation.
26*2da066efSJohn Baldwin  */
27*2da066efSJohn Baldwin struct nvmf_association_params {
28*2da066efSJohn Baldwin 	bool sq_flow_control;		/* SQ flow control required. */
29*2da066efSJohn Baldwin 	bool dynamic_controller_model;	/* Controller only */
30*2da066efSJohn Baldwin 	uint16_t max_admin_qsize;	/* Controller only */
31*2da066efSJohn Baldwin 	uint32_t max_io_qsize;		/* Controller only, 0 for discovery */
32*2da066efSJohn Baldwin 	union {
33*2da066efSJohn Baldwin 		struct {
34*2da066efSJohn Baldwin 			uint8_t pda;	/* Tx-side PDA. */
35*2da066efSJohn Baldwin 			bool header_digests;
36*2da066efSJohn Baldwin 			bool data_digests;
37*2da066efSJohn Baldwin 			uint32_t maxr2t;	/* Host only */
38*2da066efSJohn Baldwin 			uint32_t maxh2cdata;	/* Controller only */
39*2da066efSJohn Baldwin 		} tcp;
40*2da066efSJohn Baldwin 	};
41*2da066efSJohn Baldwin };
42*2da066efSJohn Baldwin 
43*2da066efSJohn Baldwin /* Parameters specific to a single queue pair of an association. */
44*2da066efSJohn Baldwin struct nvmf_qpair_params {
45*2da066efSJohn Baldwin 	bool admin;			/* Host only */
46*2da066efSJohn Baldwin 	union {
47*2da066efSJohn Baldwin 		struct {
48*2da066efSJohn Baldwin 			int fd;
49*2da066efSJohn Baldwin 		} tcp;
50*2da066efSJohn Baldwin 	};
51*2da066efSJohn Baldwin };
52*2da066efSJohn Baldwin 
53*2da066efSJohn Baldwin /* Transport-independent APIs. */
54*2da066efSJohn Baldwin 
55*2da066efSJohn Baldwin /*
56*2da066efSJohn Baldwin  * A host should allocate a new association for each association with
57*2da066efSJohn Baldwin  * a controller.  After the admin queue has been allocated and the
58*2da066efSJohn Baldwin  * controller's data has been fetched, it should be passed to
59*2da066efSJohn Baldwin  * nvmf_update_association to update internal transport-specific
60*2da066efSJohn Baldwin  * parameters before allocating I/O queues.
61*2da066efSJohn Baldwin  *
62*2da066efSJohn Baldwin  * A controller uses a single association to manage all incoming
63*2da066efSJohn Baldwin  * queues since it is not known until after parsing the CONNECT
64*2da066efSJohn Baldwin  * command which transport queues are admin vs I/O and which
65*2da066efSJohn Baldwin  * controller they are created against.
66*2da066efSJohn Baldwin  */
67*2da066efSJohn Baldwin struct nvmf_association *nvmf_allocate_association(enum nvmf_trtype trtype,
68*2da066efSJohn Baldwin     bool controller, const struct nvmf_association_params *params);
69*2da066efSJohn Baldwin void	nvmf_update_assocation(struct nvmf_association *na,
70*2da066efSJohn Baldwin     const struct nvme_controller_data *cdata);
71*2da066efSJohn Baldwin void	nvmf_free_association(struct nvmf_association *na);
72*2da066efSJohn Baldwin 
73*2da066efSJohn Baldwin /* The most recent association-wide error message. */
74*2da066efSJohn Baldwin const char *nvmf_association_error(const struct nvmf_association *na);
75*2da066efSJohn Baldwin 
76*2da066efSJohn Baldwin /*
77*2da066efSJohn Baldwin  * A queue pair represents either an Admin or I/O
78*2da066efSJohn Baldwin  * submission/completion queue pair.
79*2da066efSJohn Baldwin  *
80*2da066efSJohn Baldwin  * Each open qpair holds a reference on its association.  Once queue
81*2da066efSJohn Baldwin  * pairs are allocated, callers can safely free the association to
82*2da066efSJohn Baldwin  * ease bookkeeping.
83*2da066efSJohn Baldwin  *
84*2da066efSJohn Baldwin  * If nvmf_allocate_qpair fails, a detailed error message can be obtained
85*2da066efSJohn Baldwin  * from nvmf_association_error.
86*2da066efSJohn Baldwin  */
87*2da066efSJohn Baldwin struct nvmf_qpair *nvmf_allocate_qpair(struct nvmf_association *na,
88*2da066efSJohn Baldwin     const struct nvmf_qpair_params *params);
89*2da066efSJohn Baldwin void	nvmf_free_qpair(struct nvmf_qpair *qp);
90*2da066efSJohn Baldwin 
91*2da066efSJohn Baldwin /*
92*2da066efSJohn Baldwin  * Capsules are either commands (host -> controller) or responses
93*2da066efSJohn Baldwin  * (controller -> host).  A single data buffer segment may be
94*2da066efSJohn Baldwin  * associated with a command capsule.  Transmitted data is not copied
95*2da066efSJohn Baldwin  * by this API but instead must be preserved until the capsule is
96*2da066efSJohn Baldwin  * transmitted and freed.
97*2da066efSJohn Baldwin  */
98*2da066efSJohn Baldwin struct nvmf_capsule *nvmf_allocate_command(struct nvmf_qpair *qp,
99*2da066efSJohn Baldwin     const void *sqe);
100*2da066efSJohn Baldwin struct nvmf_capsule *nvmf_allocate_response(struct nvmf_qpair *qp,
101*2da066efSJohn Baldwin     const void *cqe);
102*2da066efSJohn Baldwin void	nvmf_free_capsule(struct nvmf_capsule *nc);
103*2da066efSJohn Baldwin int	nvmf_capsule_append_data(struct nvmf_capsule *nc,
104*2da066efSJohn Baldwin     void *buf, size_t len, bool send);
105*2da066efSJohn Baldwin int	nvmf_transmit_capsule(struct nvmf_capsule *nc);
106*2da066efSJohn Baldwin int	nvmf_receive_capsule(struct nvmf_qpair *qp, struct nvmf_capsule **ncp);
107*2da066efSJohn Baldwin const void *nvmf_capsule_sqe(const struct nvmf_capsule *nc);
108*2da066efSJohn Baldwin const void *nvmf_capsule_cqe(const struct nvmf_capsule *nc);
109*2da066efSJohn Baldwin 
110*2da066efSJohn Baldwin /* Return a string name for a transport type. */
111*2da066efSJohn Baldwin const char *nvmf_transport_type(uint8_t trtype);
112*2da066efSJohn Baldwin 
113*2da066efSJohn Baldwin /* Validate a NVMe Qualified Name. */
114*2da066efSJohn Baldwin bool	nvmf_nqn_valid(const char *nqn);
115*2da066efSJohn Baldwin 
116*2da066efSJohn Baldwin /* Controller-specific APIs. */
117*2da066efSJohn Baldwin 
118*2da066efSJohn Baldwin /*
119*2da066efSJohn Baldwin  * A controller calls this function to check for any
120*2da066efSJohn Baldwin  * transport-specific errors (invalid fields) in a received command
121*2da066efSJohn Baldwin  * capsule.  The callback returns a generic command status value:
122*2da066efSJohn Baldwin  * NVME_SC_SUCCESS if no error is found.
123*2da066efSJohn Baldwin  */
124*2da066efSJohn Baldwin uint8_t	nvmf_validate_command_capsule(const struct nvmf_capsule *nc);
125*2da066efSJohn Baldwin 
126*2da066efSJohn Baldwin /*
127*2da066efSJohn Baldwin  * A controller calls this function to query the amount of data
128*2da066efSJohn Baldwin  * associated with a command capsule.
129*2da066efSJohn Baldwin  */
130*2da066efSJohn Baldwin size_t	nvmf_capsule_data_len(const struct nvmf_capsule *cc);
131*2da066efSJohn Baldwin 
132*2da066efSJohn Baldwin /*
133*2da066efSJohn Baldwin  * A controller calls this function to receive data associated with a
134*2da066efSJohn Baldwin  * command capsule (e.g. the data for a WRITE command).  This can
135*2da066efSJohn Baldwin  * either return in-capsule data or fetch data from the host
136*2da066efSJohn Baldwin  * (e.g. using a R2T PDU over TCP).  The received command capsule
137*2da066efSJohn Baldwin  * should be passed in 'nc'.  The received data is stored in '*buf'.
138*2da066efSJohn Baldwin  */
139*2da066efSJohn Baldwin int	nvmf_receive_controller_data(const struct nvmf_capsule *nc,
140*2da066efSJohn Baldwin     uint32_t data_offset, void *buf, size_t len);
141*2da066efSJohn Baldwin 
142*2da066efSJohn Baldwin /*
143*2da066efSJohn Baldwin  * A controller calls this function to send data in response to a
144*2da066efSJohn Baldwin  * command along with a response capsule.  If the data transfer
145*2da066efSJohn Baldwin  * succeeds, a success response is sent.  If the data transfer fails,
146*2da066efSJohn Baldwin  * an appropriate error status capsule is sent.  Regardless, a
147*2da066efSJohn Baldwin  * response capsule is always sent.
148*2da066efSJohn Baldwin  */
149*2da066efSJohn Baldwin int	nvmf_send_controller_data(const struct nvmf_capsule *nc,
150*2da066efSJohn Baldwin     const void *buf, size_t len);
151*2da066efSJohn Baldwin 
152*2da066efSJohn Baldwin /*
153*2da066efSJohn Baldwin  * Construct a CQE for a reply to a command capsule in 'nc' with the
154*2da066efSJohn Baldwin  * completion status 'status'.  This is useful when additional CQE
155*2da066efSJohn Baldwin  * info is required beyond the completion status.
156*2da066efSJohn Baldwin  */
157*2da066efSJohn Baldwin void	nvmf_init_cqe(void *cqe, const struct nvmf_capsule *nc,
158*2da066efSJohn Baldwin     uint16_t status);
159*2da066efSJohn Baldwin 
160*2da066efSJohn Baldwin /*
161*2da066efSJohn Baldwin  * Construct and send a response capsule to a command capsule with
162*2da066efSJohn Baldwin  * the supplied CQE.
163*2da066efSJohn Baldwin  */
164*2da066efSJohn Baldwin int	nvmf_send_response(const struct nvmf_capsule *nc, const void *cqe);
165*2da066efSJohn Baldwin 
166*2da066efSJohn Baldwin /*
167*2da066efSJohn Baldwin  * Wait for a single command capsule and return it in *ncp.  This can
168*2da066efSJohn Baldwin  * fail if an invalid capsule is received or an I/O error occurs.
169*2da066efSJohn Baldwin  */
170*2da066efSJohn Baldwin int	nvmf_controller_receive_capsule(struct nvmf_qpair *qp,
171*2da066efSJohn Baldwin     struct nvmf_capsule **ncp);
172*2da066efSJohn Baldwin 
173*2da066efSJohn Baldwin /* Send a response capsule from a controller. */
174*2da066efSJohn Baldwin int	nvmf_controller_transmit_response(struct nvmf_capsule *nc);
175*2da066efSJohn Baldwin 
176*2da066efSJohn Baldwin /* Construct and send an error response capsule. */
177*2da066efSJohn Baldwin int	nvmf_send_error(const struct nvmf_capsule *cc, uint8_t sc_type,
178*2da066efSJohn Baldwin     uint8_t sc_status);
179*2da066efSJohn Baldwin 
180*2da066efSJohn Baldwin /*
181*2da066efSJohn Baldwin  * Construct and send an error response capsule using a generic status
182*2da066efSJohn Baldwin  * code.
183*2da066efSJohn Baldwin  */
184*2da066efSJohn Baldwin int	nvmf_send_generic_error(const struct nvmf_capsule *nc,
185*2da066efSJohn Baldwin     uint8_t sc_status);
186*2da066efSJohn Baldwin 
187*2da066efSJohn Baldwin /* Construct and send a simple success response capsule. */
188*2da066efSJohn Baldwin int	nvmf_send_success(const struct nvmf_capsule *nc);
189*2da066efSJohn Baldwin 
190*2da066efSJohn Baldwin /*
191*2da066efSJohn Baldwin  * Allocate a new queue pair and wait for the CONNECT command capsule.
192*2da066efSJohn Baldwin  * If this fails, a detailed error message can be obtained from
193*2da066efSJohn Baldwin  * nvmf_association_error.  On success, the command capsule is saved
194*2da066efSJohn Baldwin  * in '*ccp' and the connect data is saved in 'data'.  The caller
195*2da066efSJohn Baldwin  * must send an explicit response and free the the command capsule.
196*2da066efSJohn Baldwin  */
197*2da066efSJohn Baldwin struct nvmf_qpair *nvmf_accept(struct nvmf_association *na,
198*2da066efSJohn Baldwin     const struct nvmf_qpair_params *params, struct nvmf_capsule **ccp,
199*2da066efSJohn Baldwin     struct nvmf_fabric_connect_data *data);
200*2da066efSJohn Baldwin 
201*2da066efSJohn Baldwin /*
202*2da066efSJohn Baldwin  * Construct and send a response capsule with the Fabrics CONNECT
203*2da066efSJohn Baldwin  * invalid parameters error status.  If data is true the offset is
204*2da066efSJohn Baldwin  * relative to the CONNECT data structure, otherwise the offset is
205*2da066efSJohn Baldwin  * relative to the SQE.
206*2da066efSJohn Baldwin  */
207*2da066efSJohn Baldwin void	nvmf_connect_invalid_parameters(const struct nvmf_capsule *cc,
208*2da066efSJohn Baldwin     bool data, uint16_t offset);
209*2da066efSJohn Baldwin 
210*2da066efSJohn Baldwin /* Construct and send a response capsule for a successful CONNECT. */
211*2da066efSJohn Baldwin int	nvmf_finish_accept(const struct nvmf_capsule *cc, uint16_t cntlid);
212*2da066efSJohn Baldwin 
213*2da066efSJohn Baldwin /* Compute the initial state of CAP for a controller. */
214*2da066efSJohn Baldwin uint64_t nvmf_controller_cap(struct nvmf_qpair *qp);
215*2da066efSJohn Baldwin 
216*2da066efSJohn Baldwin /* Generate a serial number string from a host ID. */
217*2da066efSJohn Baldwin void	nvmf_controller_serial(char *buf, size_t len, u_long hostid);
218*2da066efSJohn Baldwin 
219*2da066efSJohn Baldwin /*
220*2da066efSJohn Baldwin  * Populate an Identify Controller data structure for a Discovery
221*2da066efSJohn Baldwin  * controller.
222*2da066efSJohn Baldwin  */
223*2da066efSJohn Baldwin void	nvmf_init_discovery_controller_data(struct nvmf_qpair *qp,
224*2da066efSJohn Baldwin     struct nvme_controller_data *cdata);
225*2da066efSJohn Baldwin 
226*2da066efSJohn Baldwin /*
227*2da066efSJohn Baldwin  * Populate an Identify Controller data structure for an I/O
228*2da066efSJohn Baldwin  * controller.
229*2da066efSJohn Baldwin  */
230*2da066efSJohn Baldwin void	nvmf_init_io_controller_data(struct nvmf_qpair *qp, const char *serial,
231*2da066efSJohn Baldwin     const char *subnqn, int nn, uint32_t ioccsz,
232*2da066efSJohn Baldwin     struct nvme_controller_data *cdata);
233*2da066efSJohn Baldwin 
234*2da066efSJohn Baldwin /*
235*2da066efSJohn Baldwin  * Validate if a new value for CC is legal given the existing values of
236*2da066efSJohn Baldwin  * CAP and CC.
237*2da066efSJohn Baldwin  */
238*2da066efSJohn Baldwin bool	nvmf_validate_cc(struct nvmf_qpair *qp, uint64_t cap, uint32_t old_cc,
239*2da066efSJohn Baldwin     uint32_t new_cc);
240*2da066efSJohn Baldwin 
241*2da066efSJohn Baldwin /* Return the log page id (LID) of a GET_LOG_PAGE command. */
242*2da066efSJohn Baldwin uint8_t	nvmf_get_log_page_id(const struct nvme_command *cmd);
243*2da066efSJohn Baldwin 
244*2da066efSJohn Baldwin /* Return the requested data length of a GET_LOG_PAGE command. */
245*2da066efSJohn Baldwin uint64_t nvmf_get_log_page_length(const struct nvme_command *cmd);
246*2da066efSJohn Baldwin 
247*2da066efSJohn Baldwin /* Return the requested data offset of a GET_LOG_PAGE command. */
248*2da066efSJohn Baldwin uint64_t nvmf_get_log_page_offset(const struct nvme_command *cmd);
249*2da066efSJohn Baldwin 
250*2da066efSJohn Baldwin /* Prepare to handoff a controller qpair. */
251*2da066efSJohn Baldwin int	nvmf_handoff_controller_qpair(struct nvmf_qpair *qp,
252*2da066efSJohn Baldwin     struct nvmf_handoff_controller_qpair *h);
253*2da066efSJohn Baldwin 
254*2da066efSJohn Baldwin /* Host-specific APIs. */
255*2da066efSJohn Baldwin 
256*2da066efSJohn Baldwin /*
257*2da066efSJohn Baldwin  * Connect to an admin or I/O queue.  If this fails, a detailed error
258*2da066efSJohn Baldwin  * message can be obtained from nvmf_association_error.
259*2da066efSJohn Baldwin  */
260*2da066efSJohn Baldwin struct nvmf_qpair *nvmf_connect(struct nvmf_association *na,
261*2da066efSJohn Baldwin     const struct nvmf_qpair_params *params, uint16_t qid, u_int queue_size,
262*2da066efSJohn Baldwin     const uint8_t hostid[16], uint16_t cntlid, const char *subnqn,
263*2da066efSJohn Baldwin     const char *hostnqn, uint32_t kato);
264*2da066efSJohn Baldwin 
265*2da066efSJohn Baldwin /* Return the CNTLID for a queue returned from CONNECT. */
266*2da066efSJohn Baldwin uint16_t nvmf_cntlid(struct nvmf_qpair *qp);
267*2da066efSJohn Baldwin 
268*2da066efSJohn Baldwin /*
269*2da066efSJohn Baldwin  * Send a command to the controller.  This can fail with EBUSY if the
270*2da066efSJohn Baldwin  * submission queue is full.
271*2da066efSJohn Baldwin  */
272*2da066efSJohn Baldwin int	nvmf_host_transmit_command(struct nvmf_capsule *nc);
273*2da066efSJohn Baldwin 
274*2da066efSJohn Baldwin /*
275*2da066efSJohn Baldwin  * Wait for a response to a command.  If there are no outstanding
276*2da066efSJohn Baldwin  * commands in the SQ, fails with EWOULDBLOCK.
277*2da066efSJohn Baldwin  */
278*2da066efSJohn Baldwin int	nvmf_host_receive_response(struct nvmf_qpair *qp,
279*2da066efSJohn Baldwin     struct nvmf_capsule **rcp);
280*2da066efSJohn Baldwin 
281*2da066efSJohn Baldwin /*
282*2da066efSJohn Baldwin  * Wait for a response to a specific command.  The command must have been
283*2da066efSJohn Baldwin  * succesfully sent previously.
284*2da066efSJohn Baldwin  */
285*2da066efSJohn Baldwin int	nvmf_host_wait_for_response(struct nvmf_capsule *cc,
286*2da066efSJohn Baldwin     struct nvmf_capsule **rcp);
287*2da066efSJohn Baldwin 
288*2da066efSJohn Baldwin /* Build a KeepAlive command. */
289*2da066efSJohn Baldwin struct nvmf_capsule *nvmf_keepalive(struct nvmf_qpair *qp);
290*2da066efSJohn Baldwin 
291*2da066efSJohn Baldwin /* Read a controller property. */
292*2da066efSJohn Baldwin int	nvmf_read_property(struct nvmf_qpair *qp, uint32_t offset, uint8_t size,
293*2da066efSJohn Baldwin     uint64_t *value);
294*2da066efSJohn Baldwin 
295*2da066efSJohn Baldwin /* Write a controller property. */
296*2da066efSJohn Baldwin int	nvmf_write_property(struct nvmf_qpair *qp, uint32_t offset,
297*2da066efSJohn Baldwin     uint8_t size, uint64_t value);
298*2da066efSJohn Baldwin 
299*2da066efSJohn Baldwin /* Construct a 16-byte HostId from kern.hostuuid. */
300*2da066efSJohn Baldwin int	nvmf_hostid_from_hostuuid(uint8_t hostid[16]);
301*2da066efSJohn Baldwin 
302*2da066efSJohn Baldwin /* Construct a NQN from kern.hostuuid. */
303*2da066efSJohn Baldwin int	nvmf_nqn_from_hostuuid(char nqn[NVMF_NQN_MAX_LEN]);
304*2da066efSJohn Baldwin 
305*2da066efSJohn Baldwin /* Fetch controller data via IDENTIFY. */
306*2da066efSJohn Baldwin int	nvmf_host_identify_controller(struct nvmf_qpair *qp,
307*2da066efSJohn Baldwin     struct nvme_controller_data *data);
308*2da066efSJohn Baldwin 
309*2da066efSJohn Baldwin /* Fetch namespace data via IDENTIFY. */
310*2da066efSJohn Baldwin int	nvmf_host_identify_namespace(struct nvmf_qpair *qp, uint32_t nsid,
311*2da066efSJohn Baldwin     struct nvme_namespace_data *nsdata);
312*2da066efSJohn Baldwin 
313*2da066efSJohn Baldwin /*
314*2da066efSJohn Baldwin  * Fetch discovery log page.  The memory for the log page is allocated
315*2da066efSJohn Baldwin  * by malloc() and returned in *logp.  The caller must free the
316*2da066efSJohn Baldwin  * memory.
317*2da066efSJohn Baldwin  */
318*2da066efSJohn Baldwin int	nvmf_host_fetch_discovery_log_page(struct nvmf_qpair *qp,
319*2da066efSJohn Baldwin     struct nvme_discovery_log **logp);
320*2da066efSJohn Baldwin 
321*2da066efSJohn Baldwin /*
322*2da066efSJohn Baldwin  * Request a desired number of I/O queues via SET_FEATURES.  The
323*2da066efSJohn Baldwin  * number of actual I/O queues available is returned in *actual on
324*2da066efSJohn Baldwin  * success.
325*2da066efSJohn Baldwin  */
326*2da066efSJohn Baldwin int	nvmf_host_request_queues(struct nvmf_qpair *qp, u_int requested,
327*2da066efSJohn Baldwin     u_int *actual);
328*2da066efSJohn Baldwin 
329*2da066efSJohn Baldwin /*
330*2da066efSJohn Baldwin  * Handoff active host association to the kernel.  This frees the
331*2da066efSJohn Baldwin  * qpairs (even on error).
332*2da066efSJohn Baldwin  */
333*2da066efSJohn Baldwin int	nvmf_handoff_host(struct nvmf_qpair *admin_qp, u_int num_queues,
334*2da066efSJohn Baldwin     struct nvmf_qpair **io_queues, const struct nvme_controller_data *cdata);
335*2da066efSJohn Baldwin 
336*2da066efSJohn Baldwin /*
337*2da066efSJohn Baldwin  * Disconnect an active host association previously handed off to the
338*2da066efSJohn Baldwin  * kernel.  *name is either the name of the device (nvmeX) for this
339*2da066efSJohn Baldwin  * association or the remote subsystem NQN.
340*2da066efSJohn Baldwin  */
341*2da066efSJohn Baldwin int	nvmf_disconnect_host(const char *host);
342*2da066efSJohn Baldwin 
343*2da066efSJohn Baldwin /*
344*2da066efSJohn Baldwin  * Disconnect all active host associations previously handed off to
345*2da066efSJohn Baldwin  * the kernel.
346*2da066efSJohn Baldwin  */
347*2da066efSJohn Baldwin int	nvmf_disconnect_all(void);
348*2da066efSJohn Baldwin 
349*2da066efSJohn Baldwin /*
350*2da066efSJohn Baldwin  * Fetch reconnect parameters from an existing kernel host to use for
351*2da066efSJohn Baldwin  * establishing a new association.
352*2da066efSJohn Baldwin  */
353*2da066efSJohn Baldwin int	nvmf_reconnect_params(int fd, struct nvmf_reconnect_params *rparams);
354*2da066efSJohn Baldwin 
355*2da066efSJohn Baldwin /*
356*2da066efSJohn Baldwin  * Handoff active host association to an existing host in the kernel.
357*2da066efSJohn Baldwin  * This frees the qpairs (even on error).
358*2da066efSJohn Baldwin  */
359*2da066efSJohn Baldwin int	nvmf_reconnect_host(int fd, struct nvmf_qpair *admin_qp,
360*2da066efSJohn Baldwin     u_int num_queues, struct nvmf_qpair **io_queues,
361*2da066efSJohn Baldwin     const struct nvme_controller_data *cdata);
362*2da066efSJohn Baldwin 
363*2da066efSJohn Baldwin #endif /* !__LIBNVMF_H__ */
364