1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2022-2024 Chelsio Communications, Inc. 5 * Written by: John Baldwin <jhb@FreeBSD.org> 6 */ 7 8 #ifndef __LIBNVMF_H__ 9 #define __LIBNVMF_H__ 10 11 #include <sys/_nv.h> 12 #include <sys/uio.h> 13 #include <stdbool.h> 14 #include <stddef.h> 15 #include <dev/nvme/nvme.h> 16 #include <dev/nvmf/nvmf.h> 17 #include <dev/nvmf/nvmf_proto.h> 18 19 struct nvmf_capsule; 20 struct nvmf_association; 21 struct nvmf_qpair; 22 23 /* 24 * Parameters shared by all queue-pairs of an association. Note that 25 * this contains the requested values used to initiate transport 26 * negotiation. 27 */ 28 struct nvmf_association_params { 29 bool sq_flow_control; /* SQ flow control required. */ 30 bool dynamic_controller_model; /* Controller only */ 31 uint16_t max_admin_qsize; /* Controller only */ 32 uint32_t max_io_qsize; /* Controller only, 0 for discovery */ 33 union { 34 struct { 35 uint8_t pda; /* Tx-side PDA. */ 36 bool header_digests; 37 bool data_digests; 38 uint32_t maxr2t; /* Host only */ 39 uint32_t maxh2cdata; /* Controller only */ 40 } tcp; 41 }; 42 }; 43 44 /* Parameters specific to a single queue pair of an association. */ 45 struct nvmf_qpair_params { 46 bool admin; /* Host only */ 47 union { 48 struct { 49 int fd; 50 } tcp; 51 }; 52 }; 53 54 __BEGIN_DECLS 55 56 /* Transport-independent APIs. */ 57 58 /* 59 * A host should allocate a new association for each association with 60 * a controller. After the admin queue has been allocated and the 61 * controller's data has been fetched, it should be passed to 62 * nvmf_update_association to update internal transport-specific 63 * parameters before allocating I/O queues. 64 * 65 * A controller uses a single association to manage all incoming 66 * queues since it is not known until after parsing the CONNECT 67 * command which transport queues are admin vs I/O and which 68 * controller they are created against. 69 */ 70 struct nvmf_association *nvmf_allocate_association(enum nvmf_trtype trtype, 71 bool controller, const struct nvmf_association_params *params); 72 void nvmf_update_assocation(struct nvmf_association *na, 73 const struct nvme_controller_data *cdata); 74 void nvmf_free_association(struct nvmf_association *na); 75 76 /* The most recent association-wide error message. */ 77 const char *nvmf_association_error(const struct nvmf_association *na); 78 79 /* 80 * A queue pair represents either an Admin or I/O 81 * submission/completion queue pair. 82 * 83 * Each open qpair holds a reference on its association. Once queue 84 * pairs are allocated, callers can safely free the association to 85 * ease bookkeeping. 86 * 87 * If nvmf_allocate_qpair fails, a detailed error message can be obtained 88 * from nvmf_association_error. 89 */ 90 struct nvmf_qpair *nvmf_allocate_qpair(struct nvmf_association *na, 91 const struct nvmf_qpair_params *params); 92 void nvmf_free_qpair(struct nvmf_qpair *qp); 93 94 /* 95 * Capsules are either commands (host -> controller) or responses 96 * (controller -> host). A single data buffer segment may be 97 * associated with a command capsule. Transmitted data is not copied 98 * by this API but instead must be preserved until the capsule is 99 * transmitted and freed. 100 */ 101 struct nvmf_capsule *nvmf_allocate_command(struct nvmf_qpair *qp, 102 const void *sqe); 103 struct nvmf_capsule *nvmf_allocate_response(struct nvmf_qpair *qp, 104 const void *cqe); 105 void nvmf_free_capsule(struct nvmf_capsule *nc); 106 int nvmf_capsule_append_data(struct nvmf_capsule *nc, 107 void *buf, size_t len, bool send); 108 int nvmf_transmit_capsule(struct nvmf_capsule *nc); 109 int nvmf_receive_capsule(struct nvmf_qpair *qp, struct nvmf_capsule **ncp); 110 const void *nvmf_capsule_sqe(const struct nvmf_capsule *nc); 111 const void *nvmf_capsule_cqe(const struct nvmf_capsule *nc); 112 113 /* Return a string name for a transport type. */ 114 const char *nvmf_transport_type(uint8_t trtype); 115 116 /* 117 * Validate a NVMe Qualified Name. The second version enforces 118 * stricter checks inline with the specification. The first version 119 * enforces more minimal checks. 120 */ 121 bool nvmf_nqn_valid(const char *nqn); 122 bool nvmf_nqn_valid_strict(const char *nqn); 123 124 /* Controller-specific APIs. */ 125 126 /* 127 * A controller calls this function to check for any 128 * transport-specific errors (invalid fields) in a received command 129 * capsule. The callback returns a generic command status value: 130 * NVME_SC_SUCCESS if no error is found. 131 */ 132 uint8_t nvmf_validate_command_capsule(const struct nvmf_capsule *nc); 133 134 /* 135 * A controller calls this function to query the amount of data 136 * associated with a command capsule. 137 */ 138 size_t nvmf_capsule_data_len(const struct nvmf_capsule *cc); 139 140 /* 141 * A controller calls this function to receive data associated with a 142 * command capsule (e.g. the data for a WRITE command). This can 143 * either return in-capsule data or fetch data from the host 144 * (e.g. using a R2T PDU over TCP). The received command capsule 145 * should be passed in 'nc'. The received data is stored in '*buf'. 146 */ 147 int nvmf_receive_controller_data(const struct nvmf_capsule *nc, 148 uint32_t data_offset, void *buf, size_t len); 149 150 /* 151 * A controller calls this function to send data in response to a 152 * command along with a response capsule. If the data transfer 153 * succeeds, a success response is sent. If the data transfer fails, 154 * an appropriate error status capsule is sent. Regardless, a 155 * response capsule is always sent. 156 */ 157 int nvmf_send_controller_data(const struct nvmf_capsule *nc, 158 const void *buf, size_t len); 159 160 /* 161 * Construct a CQE for a reply to a command capsule in 'nc' with the 162 * completion status 'status'. This is useful when additional CQE 163 * info is required beyond the completion status. 164 */ 165 void nvmf_init_cqe(void *cqe, const struct nvmf_capsule *nc, 166 uint16_t status); 167 168 /* 169 * Construct and send a response capsule to a command capsule with 170 * the supplied CQE. 171 */ 172 int nvmf_send_response(const struct nvmf_capsule *nc, const void *cqe); 173 174 /* 175 * Wait for a single command capsule and return it in *ncp. This can 176 * fail if an invalid capsule is received or an I/O error occurs. 177 */ 178 int nvmf_controller_receive_capsule(struct nvmf_qpair *qp, 179 struct nvmf_capsule **ncp); 180 181 /* Send a response capsule from a controller. */ 182 int nvmf_controller_transmit_response(struct nvmf_capsule *nc); 183 184 /* Construct and send an error response capsule. */ 185 int nvmf_send_error(const struct nvmf_capsule *cc, uint8_t sc_type, 186 uint8_t sc_status); 187 188 /* 189 * Construct and send an error response capsule using a generic status 190 * code. 191 */ 192 int nvmf_send_generic_error(const struct nvmf_capsule *nc, 193 uint8_t sc_status); 194 195 /* Construct and send a simple success response capsule. */ 196 int nvmf_send_success(const struct nvmf_capsule *nc); 197 198 /* 199 * Allocate a new queue pair and wait for the CONNECT command capsule. 200 * If this fails, a detailed error message can be obtained from 201 * nvmf_association_error. On success, the command capsule is saved 202 * in '*ccp' and the connect data is saved in 'data'. The caller 203 * must send an explicit response and free the the command capsule. 204 */ 205 struct nvmf_qpair *nvmf_accept(struct nvmf_association *na, 206 const struct nvmf_qpair_params *params, struct nvmf_capsule **ccp, 207 struct nvmf_fabric_connect_data *data); 208 209 /* 210 * Construct and send a response capsule with the Fabrics CONNECT 211 * invalid parameters error status. If data is true the offset is 212 * relative to the CONNECT data structure, otherwise the offset is 213 * relative to the SQE. 214 */ 215 void nvmf_connect_invalid_parameters(const struct nvmf_capsule *cc, 216 bool data, uint16_t offset); 217 218 /* Construct and send a response capsule for a successful CONNECT. */ 219 int nvmf_finish_accept(const struct nvmf_capsule *cc, uint16_t cntlid); 220 221 /* Compute the initial state of CAP for a controller. */ 222 uint64_t nvmf_controller_cap(struct nvmf_qpair *qp); 223 224 /* Generate a serial number string from a host ID. */ 225 void nvmf_controller_serial(char *buf, size_t len, u_long hostid); 226 227 /* 228 * Populate an Identify Controller data structure for a Discovery 229 * controller. 230 */ 231 void nvmf_init_discovery_controller_data(struct nvmf_qpair *qp, 232 struct nvme_controller_data *cdata); 233 234 /* 235 * Populate an Identify Controller data structure for an I/O 236 * controller. 237 */ 238 void nvmf_init_io_controller_data(struct nvmf_qpair *qp, const char *serial, 239 const char *subnqn, int nn, uint32_t ioccsz, 240 struct nvme_controller_data *cdata); 241 242 /* 243 * Validate if a new value for CC is legal given the existing values of 244 * CAP and CC. 245 */ 246 bool nvmf_validate_cc(struct nvmf_qpair *qp, uint64_t cap, uint32_t old_cc, 247 uint32_t new_cc); 248 249 /* Return the log page id (LID) of a GET_LOG_PAGE command. */ 250 uint8_t nvmf_get_log_page_id(const struct nvme_command *cmd); 251 252 /* Return the requested data length of a GET_LOG_PAGE command. */ 253 uint64_t nvmf_get_log_page_length(const struct nvme_command *cmd); 254 255 /* Return the requested data offset of a GET_LOG_PAGE command. */ 256 uint64_t nvmf_get_log_page_offset(const struct nvme_command *cmd); 257 258 /* Prepare to handoff a controller qpair. */ 259 int nvmf_handoff_controller_qpair(struct nvmf_qpair *qp, 260 const struct nvmf_fabric_connect_cmd *cmd, 261 const struct nvmf_fabric_connect_data *data, struct nvmf_ioc_nv *nv); 262 263 /* Host-specific APIs. */ 264 265 /* 266 * Connect to an admin or I/O queue. If this fails, a detailed error 267 * message can be obtained from nvmf_association_error. 268 */ 269 struct nvmf_qpair *nvmf_connect(struct nvmf_association *na, 270 const struct nvmf_qpair_params *params, uint16_t qid, u_int queue_size, 271 const uint8_t hostid[16], uint16_t cntlid, const char *subnqn, 272 const char *hostnqn, uint32_t kato); 273 274 /* Return the CNTLID for a queue returned from CONNECT. */ 275 uint16_t nvmf_cntlid(struct nvmf_qpair *qp); 276 277 /* 278 * Send a command to the controller. This can fail with EBUSY if the 279 * submission queue is full. 280 */ 281 int nvmf_host_transmit_command(struct nvmf_capsule *nc); 282 283 /* 284 * Wait for a response to a command. If there are no outstanding 285 * commands in the SQ, fails with EWOULDBLOCK. 286 */ 287 int nvmf_host_receive_response(struct nvmf_qpair *qp, 288 struct nvmf_capsule **rcp); 289 290 /* 291 * Wait for a response to a specific command. The command must have been 292 * succesfully sent previously. 293 */ 294 int nvmf_host_wait_for_response(struct nvmf_capsule *cc, 295 struct nvmf_capsule **rcp); 296 297 /* Build a KeepAlive command. */ 298 struct nvmf_capsule *nvmf_keepalive(struct nvmf_qpair *qp); 299 300 /* Read a controller property. */ 301 int nvmf_read_property(struct nvmf_qpair *qp, uint32_t offset, uint8_t size, 302 uint64_t *value); 303 304 /* Write a controller property. */ 305 int nvmf_write_property(struct nvmf_qpair *qp, uint32_t offset, 306 uint8_t size, uint64_t value); 307 308 /* Construct a 16-byte HostId from kern.hostuuid. */ 309 int nvmf_hostid_from_hostuuid(uint8_t hostid[16]); 310 311 /* Construct a NQN from kern.hostuuid. */ 312 int nvmf_nqn_from_hostuuid(char nqn[NVMF_NQN_MAX_LEN]); 313 314 /* Fetch controller data via IDENTIFY. */ 315 int nvmf_host_identify_controller(struct nvmf_qpair *qp, 316 struct nvme_controller_data *data); 317 318 /* Fetch namespace data via IDENTIFY. */ 319 int nvmf_host_identify_namespace(struct nvmf_qpair *qp, uint32_t nsid, 320 struct nvme_namespace_data *nsdata); 321 322 /* 323 * Fetch discovery log page. The memory for the log page is allocated 324 * by malloc() and returned in *logp. The caller must free the 325 * memory. 326 */ 327 int nvmf_host_fetch_discovery_log_page(struct nvmf_qpair *qp, 328 struct nvme_discovery_log **logp); 329 330 /* 331 * Construct a discovery log page entry that describes the connection 332 * used by a host association's admin queue pair. 333 */ 334 int nvmf_init_dle_from_admin_qp(struct nvmf_qpair *qp, 335 const struct nvme_controller_data *cdata, 336 struct nvme_discovery_log_entry *dle); 337 338 /* 339 * Request a desired number of I/O queues via SET_FEATURES. The 340 * number of actual I/O queues available is returned in *actual on 341 * success. 342 */ 343 int nvmf_host_request_queues(struct nvmf_qpair *qp, u_int requested, 344 u_int *actual); 345 346 /* 347 * Handoff active host association to the kernel. This frees the 348 * qpairs (even on error). 349 */ 350 int nvmf_handoff_host(const struct nvme_discovery_log_entry *dle, 351 const char *hostnqn, struct nvmf_qpair *admin_qp, u_int num_queues, 352 struct nvmf_qpair **io_queues, const struct nvme_controller_data *cdata, 353 uint32_t reconnect_delay, uint32_t controller_loss_timeout); 354 355 /* 356 * Disconnect an active host association previously handed off to the 357 * kernel. *name is either the name of the device (nvmeX) for this 358 * association or the remote subsystem NQN. 359 */ 360 int nvmf_disconnect_host(const char *host); 361 362 /* 363 * Disconnect all active host associations previously handed off to 364 * the kernel. 365 */ 366 int nvmf_disconnect_all(void); 367 368 /* 369 * Fetch reconnect parameters from an existing kernel host to use for 370 * establishing a new association. The caller must destroy the 371 * returned nvlist. 372 */ 373 int nvmf_reconnect_params(int fd, nvlist_t **nvlp); 374 375 /* 376 * Handoff active host association to an existing host in the kernel. 377 * This frees the qpairs (even on error). 378 */ 379 int nvmf_reconnect_host(int fd, const struct nvme_discovery_log_entry *dle, 380 const char *hostnqn, struct nvmf_qpair *admin_qp, u_int num_queues, 381 struct nvmf_qpair **io_queues, const struct nvme_controller_data *cdata, 382 uint32_t reconnect_delay, uint32_t controller_loss_timeout); 383 384 /* 385 * Fetch connection status from an existing kernel host. 386 */ 387 int nvmf_connection_status(int fd, nvlist_t **nvlp); 388 389 __END_DECLS 390 391 #endif /* !__LIBNVMF_H__ */ 392