1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2022-2024 Chelsio Communications, Inc. 5 * Written by: John Baldwin <jhb@FreeBSD.org> 6 */ 7 8 #ifndef __LIBNVMF_H__ 9 #define __LIBNVMF_H__ 10 11 #include <sys/_nv.h> 12 #include <sys/uio.h> 13 #include <stdbool.h> 14 #include <stddef.h> 15 #include <dev/nvme/nvme.h> 16 #include <dev/nvmf/nvmf.h> 17 #include <dev/nvmf/nvmf_proto.h> 18 19 struct nvmf_capsule; 20 struct nvmf_association; 21 struct nvmf_qpair; 22 23 /* 24 * Parameters shared by all queue-pairs of an association. Note that 25 * this contains the requested values used to initiate transport 26 * negotiation. 27 */ 28 struct nvmf_association_params { 29 bool sq_flow_control; /* SQ flow control required. */ 30 bool dynamic_controller_model; /* Controller only */ 31 uint16_t max_admin_qsize; /* Controller only */ 32 uint32_t max_io_qsize; /* Controller only, 0 for discovery */ 33 union { 34 struct { 35 uint8_t pda; /* Tx-side PDA. */ 36 bool header_digests; 37 bool data_digests; 38 uint32_t maxr2t; /* Host only */ 39 uint32_t maxh2cdata; /* Controller only */ 40 } tcp; 41 }; 42 }; 43 44 /* Parameters specific to a single queue pair of an association. */ 45 struct nvmf_qpair_params { 46 bool admin; /* Host only */ 47 union { 48 struct { 49 int fd; 50 } tcp; 51 }; 52 }; 53 54 /* Transport-independent APIs. */ 55 56 /* 57 * A host should allocate a new association for each association with 58 * a controller. After the admin queue has been allocated and the 59 * controller's data has been fetched, it should be passed to 60 * nvmf_update_association to update internal transport-specific 61 * parameters before allocating I/O queues. 62 * 63 * A controller uses a single association to manage all incoming 64 * queues since it is not known until after parsing the CONNECT 65 * command which transport queues are admin vs I/O and which 66 * controller they are created against. 67 */ 68 struct nvmf_association *nvmf_allocate_association(enum nvmf_trtype trtype, 69 bool controller, const struct nvmf_association_params *params); 70 void nvmf_update_assocation(struct nvmf_association *na, 71 const struct nvme_controller_data *cdata); 72 void nvmf_free_association(struct nvmf_association *na); 73 74 /* The most recent association-wide error message. */ 75 const char *nvmf_association_error(const struct nvmf_association *na); 76 77 /* 78 * A queue pair represents either an Admin or I/O 79 * submission/completion queue pair. 80 * 81 * Each open qpair holds a reference on its association. Once queue 82 * pairs are allocated, callers can safely free the association to 83 * ease bookkeeping. 84 * 85 * If nvmf_allocate_qpair fails, a detailed error message can be obtained 86 * from nvmf_association_error. 87 */ 88 struct nvmf_qpair *nvmf_allocate_qpair(struct nvmf_association *na, 89 const struct nvmf_qpair_params *params); 90 void nvmf_free_qpair(struct nvmf_qpair *qp); 91 92 /* 93 * Capsules are either commands (host -> controller) or responses 94 * (controller -> host). A single data buffer segment may be 95 * associated with a command capsule. Transmitted data is not copied 96 * by this API but instead must be preserved until the capsule is 97 * transmitted and freed. 98 */ 99 struct nvmf_capsule *nvmf_allocate_command(struct nvmf_qpair *qp, 100 const void *sqe); 101 struct nvmf_capsule *nvmf_allocate_response(struct nvmf_qpair *qp, 102 const void *cqe); 103 void nvmf_free_capsule(struct nvmf_capsule *nc); 104 int nvmf_capsule_append_data(struct nvmf_capsule *nc, 105 void *buf, size_t len, bool send); 106 int nvmf_transmit_capsule(struct nvmf_capsule *nc); 107 int nvmf_receive_capsule(struct nvmf_qpair *qp, struct nvmf_capsule **ncp); 108 const void *nvmf_capsule_sqe(const struct nvmf_capsule *nc); 109 const void *nvmf_capsule_cqe(const struct nvmf_capsule *nc); 110 111 /* Return a string name for a transport type. */ 112 const char *nvmf_transport_type(uint8_t trtype); 113 114 /* 115 * Validate a NVMe Qualified Name. The second version enforces 116 * stricter checks inline with the specification. The first version 117 * enforces more minimal checks. 118 */ 119 bool nvmf_nqn_valid(const char *nqn); 120 bool nvmf_nqn_valid_strict(const char *nqn); 121 122 /* Controller-specific APIs. */ 123 124 /* 125 * A controller calls this function to check for any 126 * transport-specific errors (invalid fields) in a received command 127 * capsule. The callback returns a generic command status value: 128 * NVME_SC_SUCCESS if no error is found. 129 */ 130 uint8_t nvmf_validate_command_capsule(const struct nvmf_capsule *nc); 131 132 /* 133 * A controller calls this function to query the amount of data 134 * associated with a command capsule. 135 */ 136 size_t nvmf_capsule_data_len(const struct nvmf_capsule *cc); 137 138 /* 139 * A controller calls this function to receive data associated with a 140 * command capsule (e.g. the data for a WRITE command). This can 141 * either return in-capsule data or fetch data from the host 142 * (e.g. using a R2T PDU over TCP). The received command capsule 143 * should be passed in 'nc'. The received data is stored in '*buf'. 144 */ 145 int nvmf_receive_controller_data(const struct nvmf_capsule *nc, 146 uint32_t data_offset, void *buf, size_t len); 147 148 /* 149 * A controller calls this function to send data in response to a 150 * command along with a response capsule. If the data transfer 151 * succeeds, a success response is sent. If the data transfer fails, 152 * an appropriate error status capsule is sent. Regardless, a 153 * response capsule is always sent. 154 */ 155 int nvmf_send_controller_data(const struct nvmf_capsule *nc, 156 const void *buf, size_t len); 157 158 /* 159 * Construct a CQE for a reply to a command capsule in 'nc' with the 160 * completion status 'status'. This is useful when additional CQE 161 * info is required beyond the completion status. 162 */ 163 void nvmf_init_cqe(void *cqe, const struct nvmf_capsule *nc, 164 uint16_t status); 165 166 /* 167 * Construct and send a response capsule to a command capsule with 168 * the supplied CQE. 169 */ 170 int nvmf_send_response(const struct nvmf_capsule *nc, const void *cqe); 171 172 /* 173 * Wait for a single command capsule and return it in *ncp. This can 174 * fail if an invalid capsule is received or an I/O error occurs. 175 */ 176 int nvmf_controller_receive_capsule(struct nvmf_qpair *qp, 177 struct nvmf_capsule **ncp); 178 179 /* Send a response capsule from a controller. */ 180 int nvmf_controller_transmit_response(struct nvmf_capsule *nc); 181 182 /* Construct and send an error response capsule. */ 183 int nvmf_send_error(const struct nvmf_capsule *cc, uint8_t sc_type, 184 uint8_t sc_status); 185 186 /* 187 * Construct and send an error response capsule using a generic status 188 * code. 189 */ 190 int nvmf_send_generic_error(const struct nvmf_capsule *nc, 191 uint8_t sc_status); 192 193 /* Construct and send a simple success response capsule. */ 194 int nvmf_send_success(const struct nvmf_capsule *nc); 195 196 /* 197 * Allocate a new queue pair and wait for the CONNECT command capsule. 198 * If this fails, a detailed error message can be obtained from 199 * nvmf_association_error. On success, the command capsule is saved 200 * in '*ccp' and the connect data is saved in 'data'. The caller 201 * must send an explicit response and free the the command capsule. 202 */ 203 struct nvmf_qpair *nvmf_accept(struct nvmf_association *na, 204 const struct nvmf_qpair_params *params, struct nvmf_capsule **ccp, 205 struct nvmf_fabric_connect_data *data); 206 207 /* 208 * Construct and send a response capsule with the Fabrics CONNECT 209 * invalid parameters error status. If data is true the offset is 210 * relative to the CONNECT data structure, otherwise the offset is 211 * relative to the SQE. 212 */ 213 void nvmf_connect_invalid_parameters(const struct nvmf_capsule *cc, 214 bool data, uint16_t offset); 215 216 /* Construct and send a response capsule for a successful CONNECT. */ 217 int nvmf_finish_accept(const struct nvmf_capsule *cc, uint16_t cntlid); 218 219 /* Compute the initial state of CAP for a controller. */ 220 uint64_t nvmf_controller_cap(struct nvmf_qpair *qp); 221 222 /* Generate a serial number string from a host ID. */ 223 void nvmf_controller_serial(char *buf, size_t len, u_long hostid); 224 225 /* 226 * Populate an Identify Controller data structure for a Discovery 227 * controller. 228 */ 229 void nvmf_init_discovery_controller_data(struct nvmf_qpair *qp, 230 struct nvme_controller_data *cdata); 231 232 /* 233 * Populate an Identify Controller data structure for an I/O 234 * controller. 235 */ 236 void nvmf_init_io_controller_data(struct nvmf_qpair *qp, const char *serial, 237 const char *subnqn, int nn, uint32_t ioccsz, 238 struct nvme_controller_data *cdata); 239 240 /* 241 * Validate if a new value for CC is legal given the existing values of 242 * CAP and CC. 243 */ 244 bool nvmf_validate_cc(struct nvmf_qpair *qp, uint64_t cap, uint32_t old_cc, 245 uint32_t new_cc); 246 247 /* Return the log page id (LID) of a GET_LOG_PAGE command. */ 248 uint8_t nvmf_get_log_page_id(const struct nvme_command *cmd); 249 250 /* Return the requested data length of a GET_LOG_PAGE command. */ 251 uint64_t nvmf_get_log_page_length(const struct nvme_command *cmd); 252 253 /* Return the requested data offset of a GET_LOG_PAGE command. */ 254 uint64_t nvmf_get_log_page_offset(const struct nvme_command *cmd); 255 256 /* Prepare to handoff a controller qpair. */ 257 int nvmf_handoff_controller_qpair(struct nvmf_qpair *qp, 258 const struct nvmf_fabric_connect_cmd *cmd, 259 const struct nvmf_fabric_connect_data *data, struct nvmf_ioc_nv *nv); 260 261 /* Host-specific APIs. */ 262 263 /* 264 * Connect to an admin or I/O queue. If this fails, a detailed error 265 * message can be obtained from nvmf_association_error. 266 */ 267 struct nvmf_qpair *nvmf_connect(struct nvmf_association *na, 268 const struct nvmf_qpair_params *params, uint16_t qid, u_int queue_size, 269 const uint8_t hostid[16], uint16_t cntlid, const char *subnqn, 270 const char *hostnqn, uint32_t kato); 271 272 /* Return the CNTLID for a queue returned from CONNECT. */ 273 uint16_t nvmf_cntlid(struct nvmf_qpair *qp); 274 275 /* 276 * Send a command to the controller. This can fail with EBUSY if the 277 * submission queue is full. 278 */ 279 int nvmf_host_transmit_command(struct nvmf_capsule *nc); 280 281 /* 282 * Wait for a response to a command. If there are no outstanding 283 * commands in the SQ, fails with EWOULDBLOCK. 284 */ 285 int nvmf_host_receive_response(struct nvmf_qpair *qp, 286 struct nvmf_capsule **rcp); 287 288 /* 289 * Wait for a response to a specific command. The command must have been 290 * succesfully sent previously. 291 */ 292 int nvmf_host_wait_for_response(struct nvmf_capsule *cc, 293 struct nvmf_capsule **rcp); 294 295 /* Build a KeepAlive command. */ 296 struct nvmf_capsule *nvmf_keepalive(struct nvmf_qpair *qp); 297 298 /* Read a controller property. */ 299 int nvmf_read_property(struct nvmf_qpair *qp, uint32_t offset, uint8_t size, 300 uint64_t *value); 301 302 /* Write a controller property. */ 303 int nvmf_write_property(struct nvmf_qpair *qp, uint32_t offset, 304 uint8_t size, uint64_t value); 305 306 /* Construct a 16-byte HostId from kern.hostuuid. */ 307 int nvmf_hostid_from_hostuuid(uint8_t hostid[16]); 308 309 /* Construct a NQN from kern.hostuuid. */ 310 int nvmf_nqn_from_hostuuid(char nqn[NVMF_NQN_MAX_LEN]); 311 312 /* Fetch controller data via IDENTIFY. */ 313 int nvmf_host_identify_controller(struct nvmf_qpair *qp, 314 struct nvme_controller_data *data); 315 316 /* Fetch namespace data via IDENTIFY. */ 317 int nvmf_host_identify_namespace(struct nvmf_qpair *qp, uint32_t nsid, 318 struct nvme_namespace_data *nsdata); 319 320 /* 321 * Fetch discovery log page. The memory for the log page is allocated 322 * by malloc() and returned in *logp. The caller must free the 323 * memory. 324 */ 325 int nvmf_host_fetch_discovery_log_page(struct nvmf_qpair *qp, 326 struct nvme_discovery_log **logp); 327 328 /* 329 * Construct a discovery log page entry that describes the connection 330 * used by a host association's admin queue pair. 331 */ 332 int nvmf_init_dle_from_admin_qp(struct nvmf_qpair *qp, 333 const struct nvme_controller_data *cdata, 334 struct nvme_discovery_log_entry *dle); 335 336 /* 337 * Request a desired number of I/O queues via SET_FEATURES. The 338 * number of actual I/O queues available is returned in *actual on 339 * success. 340 */ 341 int nvmf_host_request_queues(struct nvmf_qpair *qp, u_int requested, 342 u_int *actual); 343 344 /* 345 * Handoff active host association to the kernel. This frees the 346 * qpairs (even on error). 347 */ 348 int nvmf_handoff_host(const struct nvme_discovery_log_entry *dle, 349 const char *hostnqn, struct nvmf_qpair *admin_qp, u_int num_queues, 350 struct nvmf_qpair **io_queues, const struct nvme_controller_data *cdata, 351 uint32_t reconnect_delay, uint32_t controller_loss_timeout); 352 353 /* 354 * Disconnect an active host association previously handed off to the 355 * kernel. *name is either the name of the device (nvmeX) for this 356 * association or the remote subsystem NQN. 357 */ 358 int nvmf_disconnect_host(const char *host); 359 360 /* 361 * Disconnect all active host associations previously handed off to 362 * the kernel. 363 */ 364 int nvmf_disconnect_all(void); 365 366 /* 367 * Fetch reconnect parameters from an existing kernel host to use for 368 * establishing a new association. The caller must destroy the 369 * returned nvlist. 370 */ 371 int nvmf_reconnect_params(int fd, nvlist_t **nvlp); 372 373 /* 374 * Handoff active host association to an existing host in the kernel. 375 * This frees the qpairs (even on error). 376 */ 377 int nvmf_reconnect_host(int fd, const struct nvme_discovery_log_entry *dle, 378 const char *hostnqn, struct nvmf_qpair *admin_qp, u_int num_queues, 379 struct nvmf_qpair **io_queues, const struct nvme_controller_data *cdata, 380 uint32_t reconnect_delay, uint32_t controller_loss_timeout); 381 382 /* 383 * Fetch connection status from an existing kernel host. 384 */ 385 int nvmf_connection_status(int fd, nvlist_t **nvlp); 386 387 #endif /* !__LIBNVMF_H__ */ 388