xref: /freebsd/lib/libnvmf/nvmf_controller.c (revision e0649a35a670c4b625d1de289b3886d9b3c9654f)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2024 Chelsio Communications, Inc.
5  * Written by: John Baldwin <jhb@FreeBSD.org>
6  */
7 
8 #include <sys/utsname.h>
9 #include <assert.h>
10 #include <errno.h>
11 #include <string.h>
12 #include <unistd.h>
13 
14 #include "libnvmf.h"
15 #include "internal.h"
16 #include "nvmft_subr.h"
17 
18 void
nvmf_init_cqe(void * cqe,const struct nvmf_capsule * nc,uint16_t status)19 nvmf_init_cqe(void *cqe, const struct nvmf_capsule *nc, uint16_t status)
20 {
21 	struct nvme_completion *cpl = cqe;
22 	const struct nvme_command *cmd = nvmf_capsule_sqe(nc);
23 
24 	memset(cpl, 0, sizeof(*cpl));
25 	cpl->cid = cmd->cid;
26 	cpl->status = htole16(status);
27 }
28 
29 static struct nvmf_capsule *
nvmf_simple_response(const struct nvmf_capsule * nc,uint8_t sc_type,uint8_t sc_status)30 nvmf_simple_response(const struct nvmf_capsule *nc, uint8_t sc_type,
31     uint8_t sc_status)
32 {
33 	struct nvme_completion cpl;
34 	uint16_t status;
35 
36 	status = NVMEF(NVME_STATUS_SCT, sc_type) |
37 	    NVMEF(NVME_STATUS_SC, sc_status);
38 	nvmf_init_cqe(&cpl, nc, status);
39 	return (nvmf_allocate_response(nc->nc_qpair, &cpl));
40 }
41 
42 int
nvmf_controller_receive_capsule(struct nvmf_qpair * qp,struct nvmf_capsule ** ncp)43 nvmf_controller_receive_capsule(struct nvmf_qpair *qp,
44     struct nvmf_capsule **ncp)
45 {
46 	struct nvmf_capsule *nc;
47 	int error;
48 	uint8_t sc_status;
49 
50 	*ncp = NULL;
51 	error = nvmf_receive_capsule(qp, &nc);
52 	if (error != 0)
53 		return (error);
54 
55 	sc_status = nvmf_validate_command_capsule(nc);
56 	if (sc_status != NVME_SC_SUCCESS) {
57 		nvmf_send_generic_error(nc, sc_status);
58 		nvmf_free_capsule(nc);
59 		return (EPROTO);
60 	}
61 
62 	*ncp = nc;
63 	return (0);
64 }
65 
66 int
nvmf_controller_transmit_response(struct nvmf_capsule * nc)67 nvmf_controller_transmit_response(struct nvmf_capsule *nc)
68 {
69 	struct nvmf_qpair *qp = nc->nc_qpair;
70 
71 	/* Set SQHD. */
72 	if (qp->nq_flow_control) {
73 		qp->nq_sqhd = (qp->nq_sqhd + 1) % qp->nq_qsize;
74 		nc->nc_cqe.sqhd = htole16(qp->nq_sqhd);
75 	} else
76 		nc->nc_cqe.sqhd = 0;
77 
78 	return (nvmf_transmit_capsule(nc));
79 }
80 
81 int
nvmf_send_response(const struct nvmf_capsule * cc,const void * cqe)82 nvmf_send_response(const struct nvmf_capsule *cc, const void *cqe)
83 {
84 	struct nvmf_capsule *rc;
85 	int error;
86 
87 	rc = nvmf_allocate_response(cc->nc_qpair, cqe);
88 	if (rc == NULL)
89 		return (ENOMEM);
90 	error = nvmf_controller_transmit_response(rc);
91 	nvmf_free_capsule(rc);
92 	return (error);
93 }
94 
95 int
nvmf_send_error(const struct nvmf_capsule * cc,uint8_t sc_type,uint8_t sc_status)96 nvmf_send_error(const struct nvmf_capsule *cc, uint8_t sc_type,
97     uint8_t sc_status)
98 {
99 	struct nvmf_capsule *rc;
100 	int error;
101 
102 	rc = nvmf_simple_response(cc, sc_type, sc_status);
103 	error = nvmf_controller_transmit_response(rc);
104 	nvmf_free_capsule(rc);
105 	return (error);
106 }
107 
108 int
nvmf_send_generic_error(const struct nvmf_capsule * nc,uint8_t sc_status)109 nvmf_send_generic_error(const struct nvmf_capsule *nc, uint8_t sc_status)
110 {
111 	return (nvmf_send_error(nc, NVME_SCT_GENERIC, sc_status));
112 }
113 
114 int
nvmf_send_success(const struct nvmf_capsule * nc)115 nvmf_send_success(const struct nvmf_capsule *nc)
116 {
117 	return (nvmf_send_generic_error(nc, NVME_SC_SUCCESS));
118 }
119 
120 void
nvmf_connect_invalid_parameters(const struct nvmf_capsule * cc,bool data,uint16_t offset)121 nvmf_connect_invalid_parameters(const struct nvmf_capsule *cc, bool data,
122     uint16_t offset)
123 {
124 	struct nvmf_fabric_connect_rsp rsp;
125 	struct nvmf_capsule *rc;
126 
127 	nvmf_init_cqe(&rsp, cc,
128 	    NVMEF(NVME_STATUS_SCT, NVME_SCT_COMMAND_SPECIFIC) |
129 	    NVMEF(NVME_STATUS_SC, NVMF_FABRIC_SC_INVALID_PARAM));
130 	rsp.status_code_specific.invalid.ipo = htole16(offset);
131 	rsp.status_code_specific.invalid.iattr = data ? 1 : 0;
132 	rc = nvmf_allocate_response(cc->nc_qpair, &rsp);
133 	nvmf_transmit_capsule(rc);
134 	nvmf_free_capsule(rc);
135 }
136 
137 struct nvmf_qpair *
nvmf_accept(struct nvmf_association * na,const struct nvmf_qpair_params * params,struct nvmf_capsule ** ccp,struct nvmf_fabric_connect_data * data)138 nvmf_accept(struct nvmf_association *na, const struct nvmf_qpair_params *params,
139     struct nvmf_capsule **ccp, struct nvmf_fabric_connect_data *data)
140 {
141 	static const char hostid_zero[sizeof(data->hostid)];
142 	const struct nvmf_fabric_connect_cmd *cmd;
143 	struct nvmf_qpair *qp;
144 	struct nvmf_capsule *cc, *rc;
145 	u_int qsize;
146 	int error;
147 	uint16_t cntlid;
148 	uint8_t sc_status;
149 
150 	qp = NULL;
151 	cc = NULL;
152 	rc = NULL;
153 	*ccp = NULL;
154 	na_clear_error(na);
155 	if (!na->na_controller) {
156 		na_error(na, "Cannot accept on a host");
157 		goto error;
158 	}
159 
160 	qp = nvmf_allocate_qpair(na, params);
161 	if (qp == NULL)
162 		goto error;
163 
164 	/* Read the CONNECT capsule. */
165 	error = nvmf_receive_capsule(qp, &cc);
166 	if (error != 0) {
167 		na_error(na, "Failed to receive CONNECT: %s", strerror(error));
168 		goto error;
169 	}
170 
171 	sc_status = nvmf_validate_command_capsule(cc);
172 	if (sc_status != 0) {
173 		na_error(na, "CONNECT command failed to validate: %u",
174 		    sc_status);
175 		rc = nvmf_simple_response(cc, NVME_SCT_GENERIC, sc_status);
176 		goto error;
177 	}
178 
179 	cmd = nvmf_capsule_sqe(cc);
180 	if (cmd->opcode != NVME_OPC_FABRICS_COMMANDS ||
181 	    cmd->fctype != NVMF_FABRIC_COMMAND_CONNECT) {
182 		na_error(na, "Invalid opcode in CONNECT (%u,%u)", cmd->opcode,
183 		    cmd->fctype);
184 		rc = nvmf_simple_response(cc, NVME_SCT_GENERIC,
185 		    NVME_SC_INVALID_OPCODE);
186 		goto error;
187 	}
188 
189 	if (cmd->recfmt != htole16(0)) {
190 		na_error(na, "Unsupported CONNECT record format %u",
191 		    le16toh(cmd->recfmt));
192 		rc = nvmf_simple_response(cc, NVME_SCT_COMMAND_SPECIFIC,
193 		    NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT);
194 		goto error;
195 	}
196 
197 	qsize = le16toh(cmd->sqsize) + 1;
198 	if (cmd->qid == 0) {
199 		/* Admin queue limits. */
200 		if (qsize < NVME_MIN_ADMIN_ENTRIES ||
201 		    qsize > NVME_MAX_ADMIN_ENTRIES ||
202 		    qsize > na->na_params.max_admin_qsize) {
203 			na_error(na, "Invalid queue size %u", qsize);
204 			nvmf_connect_invalid_parameters(cc, false,
205 			    offsetof(struct nvmf_fabric_connect_cmd, sqsize));
206 			goto error;
207 		}
208 		qp->nq_admin = true;
209 	} else {
210 		/* I/O queues not allowed for discovery. */
211 		if (na->na_params.max_io_qsize == 0) {
212 			na_error(na, "I/O queue on discovery controller");
213 			nvmf_connect_invalid_parameters(cc, false,
214 			    offsetof(struct nvmf_fabric_connect_cmd, qid));
215 			goto error;
216 		}
217 
218 		/* I/O queue limits. */
219 		if (qsize < NVME_MIN_IO_ENTRIES ||
220 		    qsize > NVME_MAX_IO_ENTRIES ||
221 		    qsize > na->na_params.max_io_qsize) {
222 			na_error(na, "Invalid queue size %u", qsize);
223 			nvmf_connect_invalid_parameters(cc, false,
224 			    offsetof(struct nvmf_fabric_connect_cmd, sqsize));
225 			goto error;
226 		}
227 
228 		/* KATO is reserved for I/O queues. */
229 		if (cmd->kato != 0) {
230 			na_error(na,
231 			    "KeepAlive timeout specified for I/O queue");
232 			nvmf_connect_invalid_parameters(cc, false,
233 			    offsetof(struct nvmf_fabric_connect_cmd, kato));
234 			goto error;
235 		}
236 		qp->nq_admin = false;
237 	}
238 	qp->nq_qsize = qsize;
239 
240 	/* Fetch CONNECT data. */
241 	if (nvmf_capsule_data_len(cc) != sizeof(*data)) {
242 		na_error(na, "Invalid data payload length for CONNECT: %zu",
243 		    nvmf_capsule_data_len(cc));
244 		nvmf_connect_invalid_parameters(cc, false,
245 		    offsetof(struct nvmf_fabric_connect_cmd, sgl1));
246 		goto error;
247 	}
248 
249 	error = nvmf_receive_controller_data(cc, 0, data, sizeof(*data));
250 	if (error != 0) {
251 		na_error(na, "Failed to read data for CONNECT: %s",
252 		    strerror(error));
253 		rc = nvmf_simple_response(cc, NVME_SCT_GENERIC,
254 		    NVME_SC_DATA_TRANSFER_ERROR);
255 		goto error;
256 	}
257 
258 	/* The hostid must be non-zero. */
259 	if (memcmp(data->hostid, hostid_zero, sizeof(hostid_zero)) == 0) {
260 		na_error(na, "HostID in CONNECT data is zero");
261 		nvmf_connect_invalid_parameters(cc, true,
262 		    offsetof(struct nvmf_fabric_connect_data, hostid));
263 		goto error;
264 	}
265 
266 	cntlid = le16toh(data->cntlid);
267 	if (cmd->qid == 0) {
268 		if (na->na_params.dynamic_controller_model) {
269 			if (cntlid != NVMF_CNTLID_DYNAMIC) {
270 				na_error(na, "Invalid controller ID %#x",
271 				    cntlid);
272 				nvmf_connect_invalid_parameters(cc, true,
273 				    offsetof(struct nvmf_fabric_connect_data,
274 					cntlid));
275 				goto error;
276 			}
277 		} else {
278 			if (cntlid > NVMF_CNTLID_STATIC_MAX &&
279 			    cntlid != NVMF_CNTLID_STATIC_ANY) {
280 				na_error(na, "Invalid controller ID %#x",
281 				    cntlid);
282 				nvmf_connect_invalid_parameters(cc, true,
283 				    offsetof(struct nvmf_fabric_connect_data,
284 					cntlid));
285 				goto error;
286 			}
287 		}
288 	} else {
289 		/* Wildcard Controller IDs are only valid on an Admin queue. */
290 		if (cntlid > NVMF_CNTLID_STATIC_MAX) {
291 			na_error(na, "Invalid controller ID %#x", cntlid);
292 			nvmf_connect_invalid_parameters(cc, true,
293 			    offsetof(struct nvmf_fabric_connect_data, cntlid));
294 			goto error;
295 		}
296 	}
297 
298 	/* Simple validation of each NQN. */
299 	if (!nvmf_nqn_valid(data->subnqn)) {
300 		na_error(na, "Invalid SubNQN %.*s", (int)sizeof(data->subnqn),
301 		    data->subnqn);
302 		nvmf_connect_invalid_parameters(cc, true,
303 		    offsetof(struct nvmf_fabric_connect_data, subnqn));
304 		goto error;
305 	}
306 	if (!nvmf_nqn_valid(data->hostnqn)) {
307 		na_error(na, "Invalid HostNQN %.*s", (int)sizeof(data->hostnqn),
308 		    data->hostnqn);
309 		nvmf_connect_invalid_parameters(cc, true,
310 		    offsetof(struct nvmf_fabric_connect_data, hostnqn));
311 		goto error;
312 	}
313 
314 	if (na->na_params.sq_flow_control ||
315 	    (cmd->cattr & NVMF_CONNECT_ATTR_DISABLE_SQ_FC) == 0)
316 		qp->nq_flow_control = true;
317 	else
318 		qp->nq_flow_control = false;
319 	qp->nq_sqhd = 0;
320 	qp->nq_kato = le32toh(cmd->kato);
321 	*ccp = cc;
322 	return (qp);
323 error:
324 	if (rc != NULL) {
325 		nvmf_transmit_capsule(rc);
326 		nvmf_free_capsule(rc);
327 	}
328 	if (cc != NULL)
329 		nvmf_free_capsule(cc);
330 	if (qp != NULL)
331 		nvmf_free_qpair(qp);
332 	return (NULL);
333 }
334 
335 int
nvmf_finish_accept(const struct nvmf_capsule * cc,uint16_t cntlid)336 nvmf_finish_accept(const struct nvmf_capsule *cc, uint16_t cntlid)
337 {
338 	struct nvmf_fabric_connect_rsp rsp;
339 	struct nvmf_qpair *qp = cc->nc_qpair;
340 	struct nvmf_capsule *rc;
341 	int error;
342 
343 	nvmf_init_cqe(&rsp, cc, 0);
344 	if (qp->nq_flow_control)
345 		rsp.sqhd = htole16(qp->nq_sqhd);
346 	else
347 		rsp.sqhd = htole16(0xffff);
348 	rsp.status_code_specific.success.cntlid = htole16(cntlid);
349 	rc = nvmf_allocate_response(qp, &rsp);
350 	if (rc == NULL)
351 		return (ENOMEM);
352 	error = nvmf_transmit_capsule(rc);
353 	nvmf_free_capsule(rc);
354 	if (error == 0)
355 		qp->nq_cntlid = cntlid;
356 	return (error);
357 }
358 
359 uint64_t
nvmf_controller_cap(struct nvmf_qpair * qp)360 nvmf_controller_cap(struct nvmf_qpair *qp)
361 {
362 	const struct nvmf_association *na = qp->nq_association;
363 
364 	return (_nvmf_controller_cap(na->na_params.max_io_qsize,
365 	    NVMF_CC_EN_TIMEOUT));
366 }
367 
368 bool
nvmf_validate_cc(struct nvmf_qpair * qp,uint64_t cap,uint32_t old_cc,uint32_t new_cc)369 nvmf_validate_cc(struct nvmf_qpair *qp, uint64_t cap, uint32_t old_cc,
370     uint32_t new_cc)
371 {
372 	const struct nvmf_association *na = qp->nq_association;
373 
374 	return (_nvmf_validate_cc(na->na_params.max_io_qsize, cap, old_cc,
375 	    new_cc));
376 }
377 
378 void
nvmf_init_discovery_controller_data(struct nvmf_qpair * qp,struct nvme_controller_data * cdata)379 nvmf_init_discovery_controller_data(struct nvmf_qpair *qp,
380     struct nvme_controller_data *cdata)
381 {
382 	const struct nvmf_association *na = qp->nq_association;
383 	struct utsname utsname;
384 	char *cp;
385 
386 	memset(cdata, 0, sizeof(*cdata));
387 
388 	/*
389 	 * 5.2 Figure 37 states model name and serial are reserved,
390 	 * but Linux includes them.  Don't bother with serial, but
391 	 * do set model name.
392 	 */
393 	uname(&utsname);
394 	nvmf_strpad(cdata->mn, utsname.sysname, sizeof(cdata->mn));
395 	nvmf_strpad(cdata->fr, utsname.release, sizeof(cdata->fr));
396 	cp = memchr(cdata->fr, '-', sizeof(cdata->fr));
397 	if (cp != NULL)
398 		memset(cp, ' ', sizeof(cdata->fr) - (cp - (char *)cdata->fr));
399 
400 	cdata->ctrlr_id = htole16(qp->nq_cntlid);
401 	cdata->ver = htole32(NVME_REV(1, 4));
402 	cdata->cntrltype = 2;
403 
404 	cdata->lpa = NVMEF(NVME_CTRLR_DATA_LPA_EXT_DATA, 1);
405 	cdata->elpe = 0;
406 
407 	cdata->maxcmd = htole16(na->na_params.max_admin_qsize);
408 
409 	/* Transport-specific? */
410 	cdata->sgls = htole32(
411 	    NVMEF(NVME_CTRLR_DATA_SGLS_TRANSPORT_DATA_BLOCK, 1) |
412 	    NVMEF(NVME_CTRLR_DATA_SGLS_ADDRESS_AS_OFFSET, 1) |
413 	    NVMEF(NVME_CTRLR_DATA_SGLS_NVM_COMMAND_SET, 1));
414 
415 	strlcpy(cdata->subnqn, NVMF_DISCOVERY_NQN, sizeof(cdata->subnqn));
416 }
417 
418 void
nvmf_init_io_controller_data(struct nvmf_qpair * qp,const char * serial,const char * subnqn,int nn,uint32_t ioccsz,struct nvme_controller_data * cdata)419 nvmf_init_io_controller_data(struct nvmf_qpair *qp, const char *serial,
420     const char *subnqn, int nn, uint32_t ioccsz,
421     struct nvme_controller_data *cdata)
422 {
423 	const struct nvmf_association *na = qp->nq_association;
424 	struct utsname utsname;
425 
426 	uname(&utsname);
427 
428 	memset(cdata, 0, sizeof(*cdata));
429 	_nvmf_init_io_controller_data(qp->nq_cntlid, na->na_params.max_io_qsize,
430 	    serial, utsname.sysname, utsname.release, subnqn, nn, ioccsz,
431 	    sizeof(struct nvme_completion), cdata);
432 }
433 
434 uint8_t
nvmf_get_log_page_id(const struct nvme_command * cmd)435 nvmf_get_log_page_id(const struct nvme_command *cmd)
436 {
437 	assert(cmd->opc == NVME_OPC_GET_LOG_PAGE);
438 	return (le32toh(cmd->cdw10) & 0xff);
439 }
440 
441 uint64_t
nvmf_get_log_page_length(const struct nvme_command * cmd)442 nvmf_get_log_page_length(const struct nvme_command *cmd)
443 {
444 	uint32_t numd;
445 
446 	assert(cmd->opc == NVME_OPC_GET_LOG_PAGE);
447 	numd = le32toh(cmd->cdw10) >> 16 | (le32toh(cmd->cdw11) & 0xffff) << 16;
448 	return ((numd + 1) * 4);
449 }
450 
451 uint64_t
nvmf_get_log_page_offset(const struct nvme_command * cmd)452 nvmf_get_log_page_offset(const struct nvme_command *cmd)
453 {
454 	assert(cmd->opc == NVME_OPC_GET_LOG_PAGE);
455 	return (le32toh(cmd->cdw12) | (uint64_t)le32toh(cmd->cdw13) << 32);
456 }
457 
458 int
nvmf_handoff_controller_qpair(struct nvmf_qpair * qp,struct nvmf_handoff_controller_qpair * h)459 nvmf_handoff_controller_qpair(struct nvmf_qpair *qp,
460     struct nvmf_handoff_controller_qpair *h)
461 {
462 	h->trtype = qp->nq_association->na_trtype;
463 	return (nvmf_kernel_handoff_params(qp, &h->params));
464 }
465