xref: /freebsd/lib/libnvmf/libnvmf.h (revision 5b56413d04e608379c9a306373554a8e4d321bc0)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2022-2024 Chelsio Communications, Inc.
5  * Written by: John Baldwin <jhb@FreeBSD.org>
6  */
7 
8 #ifndef __LIBNVMF_H__
9 #define	__LIBNVMF_H__
10 
11 #include <sys/uio.h>
12 #include <stdbool.h>
13 #include <stddef.h>
14 #include <dev/nvme/nvme.h>
15 #include <dev/nvmf/nvmf.h>
16 #include <dev/nvmf/nvmf_proto.h>
17 
18 struct nvmf_capsule;
19 struct nvmf_association;
20 struct nvmf_qpair;
21 
22 /*
23  * Parameters shared by all queue-pairs of an association.  Note that
24  * this contains the requested values used to initiate transport
25  * negotiation.
26  */
27 struct nvmf_association_params {
28 	bool sq_flow_control;		/* SQ flow control required. */
29 	bool dynamic_controller_model;	/* Controller only */
30 	uint16_t max_admin_qsize;	/* Controller only */
31 	uint32_t max_io_qsize;		/* Controller only, 0 for discovery */
32 	union {
33 		struct {
34 			uint8_t pda;	/* Tx-side PDA. */
35 			bool header_digests;
36 			bool data_digests;
37 			uint32_t maxr2t;	/* Host only */
38 			uint32_t maxh2cdata;	/* Controller only */
39 		} tcp;
40 	};
41 };
42 
43 /* Parameters specific to a single queue pair of an association. */
44 struct nvmf_qpair_params {
45 	bool admin;			/* Host only */
46 	union {
47 		struct {
48 			int fd;
49 		} tcp;
50 	};
51 };
52 
53 /* Transport-independent APIs. */
54 
55 /*
56  * A host should allocate a new association for each association with
57  * a controller.  After the admin queue has been allocated and the
58  * controller's data has been fetched, it should be passed to
59  * nvmf_update_association to update internal transport-specific
60  * parameters before allocating I/O queues.
61  *
62  * A controller uses a single association to manage all incoming
63  * queues since it is not known until after parsing the CONNECT
64  * command which transport queues are admin vs I/O and which
65  * controller they are created against.
66  */
67 struct nvmf_association *nvmf_allocate_association(enum nvmf_trtype trtype,
68     bool controller, const struct nvmf_association_params *params);
69 void	nvmf_update_assocation(struct nvmf_association *na,
70     const struct nvme_controller_data *cdata);
71 void	nvmf_free_association(struct nvmf_association *na);
72 
73 /* The most recent association-wide error message. */
74 const char *nvmf_association_error(const struct nvmf_association *na);
75 
76 /*
77  * A queue pair represents either an Admin or I/O
78  * submission/completion queue pair.
79  *
80  * Each open qpair holds a reference on its association.  Once queue
81  * pairs are allocated, callers can safely free the association to
82  * ease bookkeeping.
83  *
84  * If nvmf_allocate_qpair fails, a detailed error message can be obtained
85  * from nvmf_association_error.
86  */
87 struct nvmf_qpair *nvmf_allocate_qpair(struct nvmf_association *na,
88     const struct nvmf_qpair_params *params);
89 void	nvmf_free_qpair(struct nvmf_qpair *qp);
90 
91 /*
92  * Capsules are either commands (host -> controller) or responses
93  * (controller -> host).  A single data buffer segment may be
94  * associated with a command capsule.  Transmitted data is not copied
95  * by this API but instead must be preserved until the capsule is
96  * transmitted and freed.
97  */
98 struct nvmf_capsule *nvmf_allocate_command(struct nvmf_qpair *qp,
99     const void *sqe);
100 struct nvmf_capsule *nvmf_allocate_response(struct nvmf_qpair *qp,
101     const void *cqe);
102 void	nvmf_free_capsule(struct nvmf_capsule *nc);
103 int	nvmf_capsule_append_data(struct nvmf_capsule *nc,
104     void *buf, size_t len, bool send);
105 int	nvmf_transmit_capsule(struct nvmf_capsule *nc);
106 int	nvmf_receive_capsule(struct nvmf_qpair *qp, struct nvmf_capsule **ncp);
107 const void *nvmf_capsule_sqe(const struct nvmf_capsule *nc);
108 const void *nvmf_capsule_cqe(const struct nvmf_capsule *nc);
109 
110 /* Return a string name for a transport type. */
111 const char *nvmf_transport_type(uint8_t trtype);
112 
113 /* Validate a NVMe Qualified Name. */
114 bool	nvmf_nqn_valid(const char *nqn);
115 
116 /* Controller-specific APIs. */
117 
118 /*
119  * A controller calls this function to check for any
120  * transport-specific errors (invalid fields) in a received command
121  * capsule.  The callback returns a generic command status value:
122  * NVME_SC_SUCCESS if no error is found.
123  */
124 uint8_t	nvmf_validate_command_capsule(const struct nvmf_capsule *nc);
125 
126 /*
127  * A controller calls this function to query the amount of data
128  * associated with a command capsule.
129  */
130 size_t	nvmf_capsule_data_len(const struct nvmf_capsule *cc);
131 
132 /*
133  * A controller calls this function to receive data associated with a
134  * command capsule (e.g. the data for a WRITE command).  This can
135  * either return in-capsule data or fetch data from the host
136  * (e.g. using a R2T PDU over TCP).  The received command capsule
137  * should be passed in 'nc'.  The received data is stored in '*buf'.
138  */
139 int	nvmf_receive_controller_data(const struct nvmf_capsule *nc,
140     uint32_t data_offset, void *buf, size_t len);
141 
142 /*
143  * A controller calls this function to send data in response to a
144  * command along with a response capsule.  If the data transfer
145  * succeeds, a success response is sent.  If the data transfer fails,
146  * an appropriate error status capsule is sent.  Regardless, a
147  * response capsule is always sent.
148  */
149 int	nvmf_send_controller_data(const struct nvmf_capsule *nc,
150     const void *buf, size_t len);
151 
152 /*
153  * Construct a CQE for a reply to a command capsule in 'nc' with the
154  * completion status 'status'.  This is useful when additional CQE
155  * info is required beyond the completion status.
156  */
157 void	nvmf_init_cqe(void *cqe, const struct nvmf_capsule *nc,
158     uint16_t status);
159 
160 /*
161  * Construct and send a response capsule to a command capsule with
162  * the supplied CQE.
163  */
164 int	nvmf_send_response(const struct nvmf_capsule *nc, const void *cqe);
165 
166 /*
167  * Wait for a single command capsule and return it in *ncp.  This can
168  * fail if an invalid capsule is received or an I/O error occurs.
169  */
170 int	nvmf_controller_receive_capsule(struct nvmf_qpair *qp,
171     struct nvmf_capsule **ncp);
172 
173 /* Send a response capsule from a controller. */
174 int	nvmf_controller_transmit_response(struct nvmf_capsule *nc);
175 
176 /* Construct and send an error response capsule. */
177 int	nvmf_send_error(const struct nvmf_capsule *cc, uint8_t sc_type,
178     uint8_t sc_status);
179 
180 /*
181  * Construct and send an error response capsule using a generic status
182  * code.
183  */
184 int	nvmf_send_generic_error(const struct nvmf_capsule *nc,
185     uint8_t sc_status);
186 
187 /* Construct and send a simple success response capsule. */
188 int	nvmf_send_success(const struct nvmf_capsule *nc);
189 
190 /*
191  * Allocate a new queue pair and wait for the CONNECT command capsule.
192  * If this fails, a detailed error message can be obtained from
193  * nvmf_association_error.  On success, the command capsule is saved
194  * in '*ccp' and the connect data is saved in 'data'.  The caller
195  * must send an explicit response and free the the command capsule.
196  */
197 struct nvmf_qpair *nvmf_accept(struct nvmf_association *na,
198     const struct nvmf_qpair_params *params, struct nvmf_capsule **ccp,
199     struct nvmf_fabric_connect_data *data);
200 
201 /*
202  * Construct and send a response capsule with the Fabrics CONNECT
203  * invalid parameters error status.  If data is true the offset is
204  * relative to the CONNECT data structure, otherwise the offset is
205  * relative to the SQE.
206  */
207 void	nvmf_connect_invalid_parameters(const struct nvmf_capsule *cc,
208     bool data, uint16_t offset);
209 
210 /* Construct and send a response capsule for a successful CONNECT. */
211 int	nvmf_finish_accept(const struct nvmf_capsule *cc, uint16_t cntlid);
212 
213 /* Compute the initial state of CAP for a controller. */
214 uint64_t nvmf_controller_cap(struct nvmf_qpair *qp);
215 
216 /* Generate a serial number string from a host ID. */
217 void	nvmf_controller_serial(char *buf, size_t len, u_long hostid);
218 
219 /*
220  * Populate an Identify Controller data structure for a Discovery
221  * controller.
222  */
223 void	nvmf_init_discovery_controller_data(struct nvmf_qpair *qp,
224     struct nvme_controller_data *cdata);
225 
226 /*
227  * Populate an Identify Controller data structure for an I/O
228  * controller.
229  */
230 void	nvmf_init_io_controller_data(struct nvmf_qpair *qp, const char *serial,
231     const char *subnqn, int nn, uint32_t ioccsz,
232     struct nvme_controller_data *cdata);
233 
234 /*
235  * Validate if a new value for CC is legal given the existing values of
236  * CAP and CC.
237  */
238 bool	nvmf_validate_cc(struct nvmf_qpair *qp, uint64_t cap, uint32_t old_cc,
239     uint32_t new_cc);
240 
241 /* Return the log page id (LID) of a GET_LOG_PAGE command. */
242 uint8_t	nvmf_get_log_page_id(const struct nvme_command *cmd);
243 
244 /* Return the requested data length of a GET_LOG_PAGE command. */
245 uint64_t nvmf_get_log_page_length(const struct nvme_command *cmd);
246 
247 /* Return the requested data offset of a GET_LOG_PAGE command. */
248 uint64_t nvmf_get_log_page_offset(const struct nvme_command *cmd);
249 
250 /* Prepare to handoff a controller qpair. */
251 int	nvmf_handoff_controller_qpair(struct nvmf_qpair *qp,
252     struct nvmf_handoff_controller_qpair *h);
253 
254 /* Host-specific APIs. */
255 
256 /*
257  * Connect to an admin or I/O queue.  If this fails, a detailed error
258  * message can be obtained from nvmf_association_error.
259  */
260 struct nvmf_qpair *nvmf_connect(struct nvmf_association *na,
261     const struct nvmf_qpair_params *params, uint16_t qid, u_int queue_size,
262     const uint8_t hostid[16], uint16_t cntlid, const char *subnqn,
263     const char *hostnqn, uint32_t kato);
264 
265 /* Return the CNTLID for a queue returned from CONNECT. */
266 uint16_t nvmf_cntlid(struct nvmf_qpair *qp);
267 
268 /*
269  * Send a command to the controller.  This can fail with EBUSY if the
270  * submission queue is full.
271  */
272 int	nvmf_host_transmit_command(struct nvmf_capsule *nc);
273 
274 /*
275  * Wait for a response to a command.  If there are no outstanding
276  * commands in the SQ, fails with EWOULDBLOCK.
277  */
278 int	nvmf_host_receive_response(struct nvmf_qpair *qp,
279     struct nvmf_capsule **rcp);
280 
281 /*
282  * Wait for a response to a specific command.  The command must have been
283  * succesfully sent previously.
284  */
285 int	nvmf_host_wait_for_response(struct nvmf_capsule *cc,
286     struct nvmf_capsule **rcp);
287 
288 /* Build a KeepAlive command. */
289 struct nvmf_capsule *nvmf_keepalive(struct nvmf_qpair *qp);
290 
291 /* Read a controller property. */
292 int	nvmf_read_property(struct nvmf_qpair *qp, uint32_t offset, uint8_t size,
293     uint64_t *value);
294 
295 /* Write a controller property. */
296 int	nvmf_write_property(struct nvmf_qpair *qp, uint32_t offset,
297     uint8_t size, uint64_t value);
298 
299 /* Construct a 16-byte HostId from kern.hostuuid. */
300 int	nvmf_hostid_from_hostuuid(uint8_t hostid[16]);
301 
302 /* Construct a NQN from kern.hostuuid. */
303 int	nvmf_nqn_from_hostuuid(char nqn[NVMF_NQN_MAX_LEN]);
304 
305 /* Fetch controller data via IDENTIFY. */
306 int	nvmf_host_identify_controller(struct nvmf_qpair *qp,
307     struct nvme_controller_data *data);
308 
309 /* Fetch namespace data via IDENTIFY. */
310 int	nvmf_host_identify_namespace(struct nvmf_qpair *qp, uint32_t nsid,
311     struct nvme_namespace_data *nsdata);
312 
313 /*
314  * Fetch discovery log page.  The memory for the log page is allocated
315  * by malloc() and returned in *logp.  The caller must free the
316  * memory.
317  */
318 int	nvmf_host_fetch_discovery_log_page(struct nvmf_qpair *qp,
319     struct nvme_discovery_log **logp);
320 
321 /*
322  * Request a desired number of I/O queues via SET_FEATURES.  The
323  * number of actual I/O queues available is returned in *actual on
324  * success.
325  */
326 int	nvmf_host_request_queues(struct nvmf_qpair *qp, u_int requested,
327     u_int *actual);
328 
329 /*
330  * Handoff active host association to the kernel.  This frees the
331  * qpairs (even on error).
332  */
333 int	nvmf_handoff_host(struct nvmf_qpair *admin_qp, u_int num_queues,
334     struct nvmf_qpair **io_queues, const struct nvme_controller_data *cdata);
335 
336 /*
337  * Disconnect an active host association previously handed off to the
338  * kernel.  *name is either the name of the device (nvmeX) for this
339  * association or the remote subsystem NQN.
340  */
341 int	nvmf_disconnect_host(const char *host);
342 
343 /*
344  * Disconnect all active host associations previously handed off to
345  * the kernel.
346  */
347 int	nvmf_disconnect_all(void);
348 
349 /*
350  * Fetch reconnect parameters from an existing kernel host to use for
351  * establishing a new association.
352  */
353 int	nvmf_reconnect_params(int fd, struct nvmf_reconnect_params *rparams);
354 
355 /*
356  * Handoff active host association to an existing host in the kernel.
357  * This frees the qpairs (even on error).
358  */
359 int	nvmf_reconnect_host(int fd, struct nvmf_qpair *admin_qp,
360     u_int num_queues, struct nvmf_qpair **io_queues,
361     const struct nvme_controller_data *cdata);
362 
363 #endif /* !__LIBNVMF_H__ */
364