1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2022-2024 Chelsio Communications, Inc. 5 * Written by: John Baldwin <jhb@FreeBSD.org> 6 */ 7 8 #ifndef __LIBNVMF_H__ 9 #define __LIBNVMF_H__ 10 11 #include <sys/_nv.h> 12 #include <sys/uio.h> 13 #include <stdbool.h> 14 #include <stddef.h> 15 #include <dev/nvme/nvme.h> 16 #include <dev/nvmf/nvmf.h> 17 #include <dev/nvmf/nvmf_proto.h> 18 19 struct nvmf_capsule; 20 struct nvmf_association; 21 struct nvmf_qpair; 22 23 /* 24 * Parameters shared by all queue-pairs of an association. Note that 25 * this contains the requested values used to initiate transport 26 * negotiation. 27 */ 28 struct nvmf_association_params { 29 bool sq_flow_control; /* SQ flow control required. */ 30 bool dynamic_controller_model; /* Controller only */ 31 uint16_t max_admin_qsize; /* Controller only */ 32 uint32_t max_io_qsize; /* Controller only, 0 for discovery */ 33 union { 34 struct { 35 uint8_t pda; /* Tx-side PDA. */ 36 bool header_digests; 37 bool data_digests; 38 uint32_t maxr2t; /* Host only */ 39 uint32_t maxh2cdata; /* Controller only */ 40 } tcp; 41 }; 42 }; 43 44 /* Parameters specific to a single queue pair of an association. */ 45 struct nvmf_qpair_params { 46 bool admin; /* Host only */ 47 union { 48 struct { 49 int fd; 50 } tcp; 51 }; 52 }; 53 54 /* Transport-independent APIs. */ 55 56 /* 57 * A host should allocate a new association for each association with 58 * a controller. After the admin queue has been allocated and the 59 * controller's data has been fetched, it should be passed to 60 * nvmf_update_association to update internal transport-specific 61 * parameters before allocating I/O queues. 62 * 63 * A controller uses a single association to manage all incoming 64 * queues since it is not known until after parsing the CONNECT 65 * command which transport queues are admin vs I/O and which 66 * controller they are created against. 67 */ 68 struct nvmf_association *nvmf_allocate_association(enum nvmf_trtype trtype, 69 bool controller, const struct nvmf_association_params *params); 70 void nvmf_update_assocation(struct nvmf_association *na, 71 const struct nvme_controller_data *cdata); 72 void nvmf_free_association(struct nvmf_association *na); 73 74 /* The most recent association-wide error message. */ 75 const char *nvmf_association_error(const struct nvmf_association *na); 76 77 /* 78 * A queue pair represents either an Admin or I/O 79 * submission/completion queue pair. 80 * 81 * Each open qpair holds a reference on its association. Once queue 82 * pairs are allocated, callers can safely free the association to 83 * ease bookkeeping. 84 * 85 * If nvmf_allocate_qpair fails, a detailed error message can be obtained 86 * from nvmf_association_error. 87 */ 88 struct nvmf_qpair *nvmf_allocate_qpair(struct nvmf_association *na, 89 const struct nvmf_qpair_params *params); 90 void nvmf_free_qpair(struct nvmf_qpair *qp); 91 92 /* 93 * Capsules are either commands (host -> controller) or responses 94 * (controller -> host). A single data buffer segment may be 95 * associated with a command capsule. Transmitted data is not copied 96 * by this API but instead must be preserved until the capsule is 97 * transmitted and freed. 98 */ 99 struct nvmf_capsule *nvmf_allocate_command(struct nvmf_qpair *qp, 100 const void *sqe); 101 struct nvmf_capsule *nvmf_allocate_response(struct nvmf_qpair *qp, 102 const void *cqe); 103 void nvmf_free_capsule(struct nvmf_capsule *nc); 104 int nvmf_capsule_append_data(struct nvmf_capsule *nc, 105 void *buf, size_t len, bool send); 106 int nvmf_transmit_capsule(struct nvmf_capsule *nc); 107 int nvmf_receive_capsule(struct nvmf_qpair *qp, struct nvmf_capsule **ncp); 108 const void *nvmf_capsule_sqe(const struct nvmf_capsule *nc); 109 const void *nvmf_capsule_cqe(const struct nvmf_capsule *nc); 110 111 /* Return a string name for a transport type. */ 112 const char *nvmf_transport_type(uint8_t trtype); 113 114 /* Validate a NVMe Qualified Name. */ 115 bool nvmf_nqn_valid(const char *nqn); 116 117 /* Controller-specific APIs. */ 118 119 /* 120 * A controller calls this function to check for any 121 * transport-specific errors (invalid fields) in a received command 122 * capsule. The callback returns a generic command status value: 123 * NVME_SC_SUCCESS if no error is found. 124 */ 125 uint8_t nvmf_validate_command_capsule(const struct nvmf_capsule *nc); 126 127 /* 128 * A controller calls this function to query the amount of data 129 * associated with a command capsule. 130 */ 131 size_t nvmf_capsule_data_len(const struct nvmf_capsule *cc); 132 133 /* 134 * A controller calls this function to receive data associated with a 135 * command capsule (e.g. the data for a WRITE command). This can 136 * either return in-capsule data or fetch data from the host 137 * (e.g. using a R2T PDU over TCP). The received command capsule 138 * should be passed in 'nc'. The received data is stored in '*buf'. 139 */ 140 int nvmf_receive_controller_data(const struct nvmf_capsule *nc, 141 uint32_t data_offset, void *buf, size_t len); 142 143 /* 144 * A controller calls this function to send data in response to a 145 * command along with a response capsule. If the data transfer 146 * succeeds, a success response is sent. If the data transfer fails, 147 * an appropriate error status capsule is sent. Regardless, a 148 * response capsule is always sent. 149 */ 150 int nvmf_send_controller_data(const struct nvmf_capsule *nc, 151 const void *buf, size_t len); 152 153 /* 154 * Construct a CQE for a reply to a command capsule in 'nc' with the 155 * completion status 'status'. This is useful when additional CQE 156 * info is required beyond the completion status. 157 */ 158 void nvmf_init_cqe(void *cqe, const struct nvmf_capsule *nc, 159 uint16_t status); 160 161 /* 162 * Construct and send a response capsule to a command capsule with 163 * the supplied CQE. 164 */ 165 int nvmf_send_response(const struct nvmf_capsule *nc, const void *cqe); 166 167 /* 168 * Wait for a single command capsule and return it in *ncp. This can 169 * fail if an invalid capsule is received or an I/O error occurs. 170 */ 171 int nvmf_controller_receive_capsule(struct nvmf_qpair *qp, 172 struct nvmf_capsule **ncp); 173 174 /* Send a response capsule from a controller. */ 175 int nvmf_controller_transmit_response(struct nvmf_capsule *nc); 176 177 /* Construct and send an error response capsule. */ 178 int nvmf_send_error(const struct nvmf_capsule *cc, uint8_t sc_type, 179 uint8_t sc_status); 180 181 /* 182 * Construct and send an error response capsule using a generic status 183 * code. 184 */ 185 int nvmf_send_generic_error(const struct nvmf_capsule *nc, 186 uint8_t sc_status); 187 188 /* Construct and send a simple success response capsule. */ 189 int nvmf_send_success(const struct nvmf_capsule *nc); 190 191 /* 192 * Allocate a new queue pair and wait for the CONNECT command capsule. 193 * If this fails, a detailed error message can be obtained from 194 * nvmf_association_error. On success, the command capsule is saved 195 * in '*ccp' and the connect data is saved in 'data'. The caller 196 * must send an explicit response and free the the command capsule. 197 */ 198 struct nvmf_qpair *nvmf_accept(struct nvmf_association *na, 199 const struct nvmf_qpair_params *params, struct nvmf_capsule **ccp, 200 struct nvmf_fabric_connect_data *data); 201 202 /* 203 * Construct and send a response capsule with the Fabrics CONNECT 204 * invalid parameters error status. If data is true the offset is 205 * relative to the CONNECT data structure, otherwise the offset is 206 * relative to the SQE. 207 */ 208 void nvmf_connect_invalid_parameters(const struct nvmf_capsule *cc, 209 bool data, uint16_t offset); 210 211 /* Construct and send a response capsule for a successful CONNECT. */ 212 int nvmf_finish_accept(const struct nvmf_capsule *cc, uint16_t cntlid); 213 214 /* Compute the initial state of CAP for a controller. */ 215 uint64_t nvmf_controller_cap(struct nvmf_qpair *qp); 216 217 /* Generate a serial number string from a host ID. */ 218 void nvmf_controller_serial(char *buf, size_t len, u_long hostid); 219 220 /* 221 * Populate an Identify Controller data structure for a Discovery 222 * controller. 223 */ 224 void nvmf_init_discovery_controller_data(struct nvmf_qpair *qp, 225 struct nvme_controller_data *cdata); 226 227 /* 228 * Populate an Identify Controller data structure for an I/O 229 * controller. 230 */ 231 void nvmf_init_io_controller_data(struct nvmf_qpair *qp, const char *serial, 232 const char *subnqn, int nn, uint32_t ioccsz, 233 struct nvme_controller_data *cdata); 234 235 /* 236 * Validate if a new value for CC is legal given the existing values of 237 * CAP and CC. 238 */ 239 bool nvmf_validate_cc(struct nvmf_qpair *qp, uint64_t cap, uint32_t old_cc, 240 uint32_t new_cc); 241 242 /* Return the log page id (LID) of a GET_LOG_PAGE command. */ 243 uint8_t nvmf_get_log_page_id(const struct nvme_command *cmd); 244 245 /* Return the requested data length of a GET_LOG_PAGE command. */ 246 uint64_t nvmf_get_log_page_length(const struct nvme_command *cmd); 247 248 /* Return the requested data offset of a GET_LOG_PAGE command. */ 249 uint64_t nvmf_get_log_page_offset(const struct nvme_command *cmd); 250 251 /* Prepare to handoff a controller qpair. */ 252 int nvmf_handoff_controller_qpair(struct nvmf_qpair *qp, 253 const struct nvmf_fabric_connect_cmd *cmd, 254 const struct nvmf_fabric_connect_data *data, struct nvmf_ioc_nv *nv); 255 256 /* Host-specific APIs. */ 257 258 /* 259 * Connect to an admin or I/O queue. If this fails, a detailed error 260 * message can be obtained from nvmf_association_error. 261 */ 262 struct nvmf_qpair *nvmf_connect(struct nvmf_association *na, 263 const struct nvmf_qpair_params *params, uint16_t qid, u_int queue_size, 264 const uint8_t hostid[16], uint16_t cntlid, const char *subnqn, 265 const char *hostnqn, uint32_t kato); 266 267 /* Return the CNTLID for a queue returned from CONNECT. */ 268 uint16_t nvmf_cntlid(struct nvmf_qpair *qp); 269 270 /* 271 * Send a command to the controller. This can fail with EBUSY if the 272 * submission queue is full. 273 */ 274 int nvmf_host_transmit_command(struct nvmf_capsule *nc); 275 276 /* 277 * Wait for a response to a command. If there are no outstanding 278 * commands in the SQ, fails with EWOULDBLOCK. 279 */ 280 int nvmf_host_receive_response(struct nvmf_qpair *qp, 281 struct nvmf_capsule **rcp); 282 283 /* 284 * Wait for a response to a specific command. The command must have been 285 * succesfully sent previously. 286 */ 287 int nvmf_host_wait_for_response(struct nvmf_capsule *cc, 288 struct nvmf_capsule **rcp); 289 290 /* Build a KeepAlive command. */ 291 struct nvmf_capsule *nvmf_keepalive(struct nvmf_qpair *qp); 292 293 /* Read a controller property. */ 294 int nvmf_read_property(struct nvmf_qpair *qp, uint32_t offset, uint8_t size, 295 uint64_t *value); 296 297 /* Write a controller property. */ 298 int nvmf_write_property(struct nvmf_qpair *qp, uint32_t offset, 299 uint8_t size, uint64_t value); 300 301 /* Construct a 16-byte HostId from kern.hostuuid. */ 302 int nvmf_hostid_from_hostuuid(uint8_t hostid[16]); 303 304 /* Construct a NQN from kern.hostuuid. */ 305 int nvmf_nqn_from_hostuuid(char nqn[NVMF_NQN_MAX_LEN]); 306 307 /* Fetch controller data via IDENTIFY. */ 308 int nvmf_host_identify_controller(struct nvmf_qpair *qp, 309 struct nvme_controller_data *data); 310 311 /* Fetch namespace data via IDENTIFY. */ 312 int nvmf_host_identify_namespace(struct nvmf_qpair *qp, uint32_t nsid, 313 struct nvme_namespace_data *nsdata); 314 315 /* 316 * Fetch discovery log page. The memory for the log page is allocated 317 * by malloc() and returned in *logp. The caller must free the 318 * memory. 319 */ 320 int nvmf_host_fetch_discovery_log_page(struct nvmf_qpair *qp, 321 struct nvme_discovery_log **logp); 322 323 /* 324 * Request a desired number of I/O queues via SET_FEATURES. The 325 * number of actual I/O queues available is returned in *actual on 326 * success. 327 */ 328 int nvmf_host_request_queues(struct nvmf_qpair *qp, u_int requested, 329 u_int *actual); 330 331 /* 332 * Handoff active host association to the kernel. This frees the 333 * qpairs (even on error). 334 */ 335 int nvmf_handoff_host(struct nvmf_qpair *admin_qp, u_int num_queues, 336 struct nvmf_qpair **io_queues, const struct nvme_controller_data *cdata); 337 338 /* 339 * Disconnect an active host association previously handed off to the 340 * kernel. *name is either the name of the device (nvmeX) for this 341 * association or the remote subsystem NQN. 342 */ 343 int nvmf_disconnect_host(const char *host); 344 345 /* 346 * Disconnect all active host associations previously handed off to 347 * the kernel. 348 */ 349 int nvmf_disconnect_all(void); 350 351 /* 352 * Fetch reconnect parameters from an existing kernel host to use for 353 * establishing a new association. The caller must destroy the 354 * returned nvlist. 355 */ 356 int nvmf_reconnect_params(int fd, nvlist_t **nvlp); 357 358 /* 359 * Handoff active host association to an existing host in the kernel. 360 * This frees the qpairs (even on error). 361 */ 362 int nvmf_reconnect_host(int fd, struct nvmf_qpair *admin_qp, 363 u_int num_queues, struct nvmf_qpair **io_queues, 364 const struct nvme_controller_data *cdata); 365 366 #endif /* !__LIBNVMF_H__ */ 367