xref: /illumos-gate/usr/src/lib/libnvme/common/libnvme_vuc.c (revision e5d0cebc3bbd01b8ae62cebd964dde7bb8157b02)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source.  A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * Copyright 2024 Oxide Computer Company
14  */
15 
16 /*
17  * NVMe Vendor Unique Command (VUC) support. The NVMe standard offers support
18  * for a 'standard' format for vendor unique admin and NVMe commands. We provide
19  * both a discovery mechanism and a way to construct and execute vendor unique
20  * commands. Unlike with log page and feature discovery there is not a way to
21  * turn the discovery information into a request structure. Rather, our
22  * expectation is that more intrinsic library functions for these would be added
23  * based on the specifics of the unique commands.
24  */
25 
26 #include <strings.h>
27 #include <unistd.h>
28 
29 #include "libnvme_impl.h"
30 
31 void
32 nvme_vuc_disc_free(nvme_vuc_disc_t *disc)
33 {
34 	free(disc);
35 }
36 
37 bool
38 nvme_vuc_disc_dup(nvme_ctrl_t *ctrl, const nvme_vuc_disc_t *src,
39     nvme_vuc_disc_t **discp)
40 {
41 	nvme_vuc_disc_t *disc;
42 
43 	if (src == NULL) {
44 		return (nvme_ctrl_error(ctrl, NVME_ERR_BAD_PTR, 0,
45 		    "encountered invalid nvme_vuc_disc_t pointer to duplicate: "
46 		    "%p", discp));
47 	}
48 
49 	if (discp == NULL) {
50 		return (nvme_ctrl_error(ctrl, NVME_ERR_BAD_PTR, 0,
51 		    "encountered invalid nvme_vuc_disc_t output pointer: %p",
52 		    discp));
53 	}
54 
55 	disc = calloc(1, sizeof (nvme_vuc_disc_t));
56 	if (disc == NULL) {
57 		int e = errno;
58 		return (nvme_ctrl_error(ctrl, NVME_ERR_NO_MEM, e, "failed to "
59 		    "allocate memory for a new nvme_vuc_disc_t: %s",
60 		    strerror(e)));
61 	}
62 
63 	(void) memcpy(disc, src, sizeof (nvme_vuc_disc_t));
64 	*discp = disc;
65 	return (nvme_ctrl_success(ctrl));
66 
67 }
68 
69 const char *
70 nvme_vuc_disc_name(const nvme_vuc_disc_t *disc)
71 {
72 	return (disc->nvd_short);
73 }
74 
75 const char *
76 nvme_vuc_disc_desc(const nvme_vuc_disc_t *disc)
77 {
78 	return (disc->nvd_desc);
79 }
80 
81 uint32_t
82 nvme_vuc_disc_opcode(const nvme_vuc_disc_t *disc)
83 {
84 	return (disc->nvd_opc);
85 }
86 
87 nvme_vuc_disc_io_t
88 nvme_vuc_disc_dt(const nvme_vuc_disc_t *disc)
89 {
90 	return (disc->nvd_dt);
91 }
92 
93 nvme_vuc_disc_impact_t
94 nvme_vuc_disc_impact(const nvme_vuc_disc_t *disc)
95 {
96 	return (disc->nvd_impact);
97 }
98 
99 nvme_vuc_disc_lock_t
100 nvme_vuc_disc_lock(const nvme_vuc_disc_t *disc)
101 {
102 	return (disc->nvd_lock);
103 }
104 
105 void
106 nvme_vuc_discover_fini(nvme_vuc_iter_t *iter)
107 {
108 	free(iter);
109 }
110 
111 nvme_iter_t
112 nvme_vuc_discover_step(nvme_vuc_iter_t *iter, const nvme_vuc_disc_t **outp)
113 {
114 	nvme_ctrl_t *ctrl = iter->nvi_ctrl;
115 
116 	if (ctrl->nc_vsd == NULL) {
117 		return (NVME_ITER_DONE);
118 	}
119 
120 	if (iter->nvi_cur_idx >= ctrl->nc_vsd->nvd_nvuc) {
121 		return (NVME_ITER_DONE);
122 	}
123 
124 	*outp = &ctrl->nc_vsd->nvd_vuc[iter->nvi_cur_idx];
125 	iter->nvi_cur_idx++;
126 	return (NVME_ITER_VALID);
127 }
128 
129 bool
130 nvme_vuc_discover_init(nvme_ctrl_t *ctrl, uint32_t flags,
131     nvme_vuc_iter_t **iterp)
132 {
133 	nvme_vuc_iter_t *iter;
134 
135 	if (flags != 0) {
136 		return (nvme_ctrl_error(ctrl, NVME_ERR_BAD_FLAG, 0,
137 		    "encountered invalid discovery flags: 0x%x", flags));
138 	}
139 
140 	if (iterp == NULL) {
141 		return (nvme_ctrl_error(ctrl, NVME_ERR_BAD_PTR, 0,
142 		    "encountered invalid nvme_vuc_iter_t output pointer: %p",
143 		    iterp));
144 	}
145 
146 	iter = calloc(1, sizeof (nvme_vuc_iter_t));
147 	if (iter == NULL) {
148 		int e = errno;
149 		return (nvme_ctrl_error(ctrl, NVME_ERR_NO_MEM, e, "failed to "
150 		    "allocate memory for a new nvme_vuc_iter_t: %s",
151 		    strerror(e)));
152 	}
153 
154 	iter->nvi_ctrl = ctrl;
155 
156 	*iterp = iter;
157 	return (nvme_ctrl_success(ctrl));
158 }
159 
160 bool
161 nvme_vuc_discover(nvme_ctrl_t *ctrl, uint32_t flags, nvme_vuc_disc_f func,
162     void *arg)
163 {
164 	nvme_vuc_iter_t *iter;
165 	nvme_iter_t ret;
166 	const nvme_vuc_disc_t *disc;
167 
168 	if (func == NULL) {
169 		return (nvme_ctrl_error(ctrl, NVME_ERR_BAD_PTR, 0,
170 		    "encountered invalid nvme_vuc_disc_f function pointer: %p",
171 		    func));
172 	}
173 
174 	if (!nvme_vuc_discover_init(ctrl, flags, &iter)) {
175 		return (false);
176 	}
177 
178 	while ((ret = nvme_vuc_discover_step(iter, &disc)) == NVME_ITER_VALID) {
179 		if (!func(ctrl, disc, arg))
180 			break;
181 	}
182 
183 	nvme_vuc_discover_fini(iter);
184 	if (ret == NVME_ITER_ERROR) {
185 		return (false);
186 	}
187 
188 	return (nvme_ctrl_success(ctrl));
189 }
190 
191 bool
192 nvme_vuc_discover_by_name(nvme_ctrl_t *ctrl, const char *name, uint32_t flags,
193     nvme_vuc_disc_t **discp)
194 {
195 	nvme_vuc_iter_t *iter;
196 	nvme_iter_t ret;
197 	const nvme_vuc_disc_t *disc;
198 
199 	if (discp == NULL) {
200 		return (nvme_ctrl_error(ctrl, NVME_ERR_BAD_PTR, 0,
201 		    "encountered invalid nvme_vuc_disc_t output pointer: %p",
202 		    discp));
203 	}
204 
205 	if (name == NULL) {
206 		return (nvme_ctrl_error(ctrl, NVME_ERR_BAD_PTR, 0,
207 		    "encountered invalid pointer for name: %p", name));
208 	}
209 
210 	if (!nvme_vuc_discover_init(ctrl, flags, &iter)) {
211 		return (false);
212 	}
213 
214 	*discp = NULL;
215 	while ((ret = nvme_vuc_discover_step(iter, &disc)) == NVME_ITER_VALID) {
216 		if (strcmp(name, nvme_vuc_disc_name(disc)) == 0) {
217 			break;
218 		}
219 	}
220 
221 	if (ret == NVME_ITER_VALID && !nvme_vuc_disc_dup(ctrl, disc, discp)) {
222 		nvme_err_data_t err;
223 
224 		nvme_ctrl_err_save(ctrl, &err);
225 		nvme_vuc_discover_fini(iter);
226 		nvme_ctrl_err_set(ctrl, &err);
227 		return (false);
228 	}
229 
230 	nvme_vuc_discover_fini(iter);
231 	if (ret == NVME_ITER_ERROR) {
232 		return (false);
233 	}
234 
235 	if (*discp == NULL) {
236 		return (nvme_ctrl_error(ctrl, NVME_ERR_VUC_UNKNOWN, 0, "failed "
237 		    "to map %s to a known vendor unique command", name));
238 	}
239 
240 	return (nvme_ctrl_success(ctrl));
241 }
242 
243 void
244 nvme_vuc_req_fini(nvme_vuc_req_t *req)
245 {
246 	free(req);
247 }
248 
249 bool
250 nvme_vuc_req_init(nvme_ctrl_t *ctrl, nvme_vuc_req_t **reqp)
251 {
252 	nvme_vuc_req_t *req;
253 
254 	if (reqp == NULL) {
255 		return (nvme_ctrl_error(ctrl, NVME_ERR_BAD_PTR, 0,
256 		    "encountered invalid nvme_vuc_req_t output pointer: %p",
257 		    reqp));
258 	}
259 
260 	if (ctrl->nc_info.id_nvscc.nv_spec == 0) {
261 		return (nvme_ctrl_error(ctrl, NVME_ERR_VUC_UNSUP_BY_DEV, 0,
262 		    "cannot create vuc request because the controller does "
263 		    "not support the NVMe standard vendor unique command "
264 		    "interface"));
265 	}
266 
267 	req = calloc(1, sizeof (nvme_vuc_req_t));
268 	if (req == NULL) {
269 		int e = errno;
270 		return (nvme_ctrl_error(ctrl, NVME_ERR_NO_MEM, e, "failed to "
271 		    "allocate memory for a new nvme_vuc_req_t: %s",
272 		    strerror(e)));
273 	}
274 
275 	req->nvr_ctrl = ctrl;
276 
277 	for (size_t i = 0; i < nvme_vuc_nfields; i++) {
278 		if (nvme_vuc_fields[i].nlfi_def_req) {
279 			req->nvr_need |= 1 << i;
280 		}
281 	}
282 
283 	*reqp = req;
284 	return (nvme_ctrl_success(ctrl));
285 }
286 
287 static void
288 nvme_vuc_req_clear_need(nvme_vuc_req_t *req, nvme_vuc_req_field_t field)
289 {
290 	req->nvr_need &= ~(1 << field);
291 }
292 
293 /*
294  * We have no way to validate any of the cdw1[2-5] values as these are all
295  * vendor-specific commands and the semantics of these are not something we can
296  * know. Therefore there are no calls to validate these fields.
297  */
298 bool
299 nvme_vuc_req_set_cdw12(nvme_vuc_req_t *req, uint32_t cdw12)
300 {
301 	req->nvr_cdw12 = cdw12;
302 	nvme_vuc_req_clear_need(req, NVME_VUC_REQ_FIELD_CDW12);
303 	return (nvme_ctrl_success(req->nvr_ctrl));
304 }
305 
306 bool
307 nvme_vuc_req_set_cdw13(nvme_vuc_req_t *req, uint32_t cdw13)
308 {
309 	req->nvr_cdw13 = cdw13;
310 	nvme_vuc_req_clear_need(req, NVME_VUC_REQ_FIELD_CDW13);
311 	return (nvme_ctrl_success(req->nvr_ctrl));
312 }
313 
314 bool
315 nvme_vuc_req_set_cdw14(nvme_vuc_req_t *req, uint32_t cdw14)
316 {
317 	req->nvr_cdw14 = cdw14;
318 	nvme_vuc_req_clear_need(req, NVME_VUC_REQ_FIELD_CDW14);
319 	return (nvme_ctrl_success(req->nvr_ctrl));
320 }
321 
322 bool
323 nvme_vuc_req_set_cdw15(nvme_vuc_req_t *req, uint32_t cdw15)
324 {
325 	req->nvr_cdw15 = cdw15;
326 	nvme_vuc_req_clear_need(req, NVME_VUC_REQ_FIELD_CDW15);
327 	return (nvme_ctrl_success(req->nvr_ctrl));
328 }
329 
330 static const nvme_field_check_t nvme_vuc_check_opcode = {
331 	nvme_vuc_fields, NVME_VUC_REQ_FIELD_OPC,
332 	NVME_ERR_VUC_OPCODE_RANGE, 0, 0
333 };
334 
335 bool
336 nvme_vuc_req_set_opcode(nvme_vuc_req_t *req, uint32_t opc)
337 {
338 	if (!nvme_field_check_one(req->nvr_ctrl, opc, "vendor unique command",
339 	    &nvme_vuc_check_opcode, 0)) {
340 		return (false);
341 	}
342 
343 	req->nvr_opcode = opc;
344 	nvme_vuc_req_clear_need(req, NVME_VUC_REQ_FIELD_OPC);
345 	return (nvme_ctrl_success(req->nvr_ctrl));
346 }
347 
348 static const nvme_field_check_t nvme_vuc_check_nsid = {
349 	nvme_vuc_fields, NVME_VUC_REQ_FIELD_NSID,
350 	NVME_ERR_NS_RANGE, 0, 0
351 };
352 
353 bool
354 nvme_vuc_req_set_nsid(nvme_vuc_req_t *req, uint32_t nsid)
355 {
356 	if (!nvme_field_check_one(req->nvr_ctrl, nsid, "vendor unique command",
357 	    &nvme_vuc_check_nsid, 0)) {
358 		return (false);
359 	}
360 
361 	req->nvr_nsid = nsid;
362 	nvme_vuc_req_clear_need(req, NVME_VUC_REQ_FIELD_NSID);
363 	return (nvme_ctrl_success(req->nvr_ctrl));
364 }
365 
366 static const nvme_field_check_t nvme_vuc_check_to = {
367 	nvme_vuc_fields, NVME_VUC_REQ_FIELD_TO,
368 	NVME_ERR_VUC_TIMEOUT_RANGE, 0, 0
369 };
370 
371 bool
372 nvme_vuc_req_set_timeout(nvme_vuc_req_t *req, uint32_t to)
373 {
374 	if (!nvme_field_check_one(req->nvr_ctrl, to, "vendor unique command",
375 	    &nvme_vuc_check_to, 0)) {
376 		return (false);
377 	}
378 
379 	req->nvr_timeout = to;
380 	nvme_vuc_req_clear_need(req, NVME_VUC_REQ_FIELD_TO);
381 	return (nvme_ctrl_success(req->nvr_ctrl));
382 }
383 
384 /*
385  * Check common parts of a VUC data transfer. While the kernel is going to
386  * further constrain our length, we will still check the specified length
387  * against the actual specification max.
388  */
389 static const nvme_field_check_t nvme_vuc_check_ndt = {
390 	nvme_vuc_fields, NVME_VUC_REQ_FIELD_NDT,
391 	NVME_ERR_VUC_NDT_RANGE, 0, 0
392 };
393 
394 static bool
395 nvme_vuc_req_data_validate(nvme_vuc_req_t *req, const void *buf, size_t len,
396     bool in)
397 {
398 	nvme_ctrl_t *ctrl = req->nvr_ctrl;
399 	const char *dir = in ? "input" : "output";
400 	const char *alt_dir = in ? "output" : "input";
401 	const void *alt_buf = in ? req->nvr_output : req->nvr_input;
402 
403 	if (buf == NULL && len > 0) {
404 		return (nvme_ctrl_error(ctrl, NVME_ERR_BAD_PTR, 0, "vendor "
405 		    "unique command output output buffer cannot be NULL when "
406 		    "the length is non-zero"));
407 	} else if (buf != NULL && len == 0) {
408 		return (nvme_ctrl_error(ctrl, NVME_ERR_VUC_NDT_RANGE, 0,
409 		    "vendor unique command buffer size may not be zero when "
410 		    "given a non-NULL pointer (%p)", buf));
411 	}
412 
413 	if (alt_buf != NULL && buf != NULL) {
414 		return (nvme_ctrl_error(ctrl, NVME_ERR_VUC_CANNOT_RW, 0,
415 		    "an %s buffer is already set and therefore an %s buffer "
416 		    "cannot also be added", alt_dir, dir));
417 
418 	}
419 
420 	/*
421 	 * This takes care of alignment and the upper bound.
422 	 */
423 	if (!nvme_field_check_one(req->nvr_ctrl, len, "vendor unique command",
424 	    &nvme_vuc_check_ndt, 0)) {
425 		return (false);
426 	}
427 
428 	return (true);
429 }
430 
431 /*
432  * The impact values are a libnvme specific item which maps things to the
433  * kernel's values. Therefore we don't use the standard validation routines.
434  */
435 bool
436 nvme_vuc_req_set_impact(nvme_vuc_req_t *req, nvme_vuc_disc_impact_t impact)
437 {
438 	const nvme_vuc_disc_impact_t all_impact = NVME_VUC_DISC_IMPACT_DATA |
439 	    NVME_VUC_DISC_IMPACT_NS;
440 
441 	if ((impact & ~all_impact) != 0) {
442 		return (nvme_ctrl_error(req->nvr_ctrl,
443 		    NVME_ERR_VUC_IMPACT_RANGE, 0, "encountered unknown impact "
444 		    "flags: 0x%x", impact & ~all_impact));
445 	}
446 
447 	req->nvr_impact = impact;
448 	return (nvme_ctrl_success(req->nvr_ctrl));
449 }
450 
451 bool
452 nvme_vuc_req_set_output(nvme_vuc_req_t *req, void *buf, size_t len)
453 {
454 	if (!nvme_vuc_req_data_validate(req, buf, len, false)) {
455 		return (false);
456 	}
457 
458 	req->nvr_output = buf;
459 	req->nvr_outlen = len;
460 	return (nvme_ctrl_success(req->nvr_ctrl));
461 }
462 
463 bool
464 nvme_vuc_req_clear_output(nvme_vuc_req_t *req)
465 {
466 	req->nvr_output = NULL;
467 	req->nvr_outlen = 0;
468 	return (nvme_ctrl_success(req->nvr_ctrl));
469 }
470 
471 bool
472 nvme_vuc_req_set_input(nvme_vuc_req_t *req, const void *buf, size_t len)
473 {
474 	if (!nvme_vuc_req_data_validate(req, buf, len, true)) {
475 		return (false);
476 	}
477 
478 	req->nvr_input = buf;
479 	req->nvr_inlen = len;
480 	return (nvme_ctrl_success(req->nvr_ctrl));
481 }
482 
483 bool
484 nvme_vuc_req_get_cdw0(nvme_vuc_req_t *req, uint32_t *cdw0)
485 {
486 	if (cdw0 == NULL) {
487 		return (nvme_ctrl_error(req->nvr_ctrl, NVME_ERR_BAD_PTR, 0,
488 		    "encountered invalid cdw0 output pointer: %p", cdw0));
489 	}
490 
491 	if (!req->nvr_results_valid) {
492 		return (nvme_ctrl_error(req->nvr_ctrl, NVME_ERR_VUC_NO_RESULTS,
493 		    0, "vendor unique command results are not currently valid "
494 		    "and cannot be returned"));
495 	}
496 
497 	*cdw0 = req->nvr_cdw0;
498 	return (nvme_ctrl_success(req->nvr_ctrl));
499 }
500 
501 bool
502 nvme_vuc_req_exec(nvme_vuc_req_t *req)
503 {
504 	nvme_ctrl_t *ctrl = req->nvr_ctrl;
505 	nvme_ioctl_passthru_t pass;
506 
507 	/*
508 	 * Immediately invalidate our stored data.
509 	 */
510 	req->nvr_results_valid = false;
511 	req->nvr_cdw0 = 0;
512 
513 	if (req->nvr_need != 0) {
514 		return (nvme_field_miss_err(ctrl, nvme_vuc_fields,
515 		    nvme_vuc_nfields, NVME_ERR_VUC_REQ_MISSING_FIELDS,
516 		    "vendor unique command", req->nvr_need));
517 	}
518 
519 	(void) memset(&pass, 0, sizeof (nvme_ioctl_passthru_t));
520 	pass.npc_common.nioc_nsid = req->nvr_nsid;
521 	pass.npc_opcode = req->nvr_opcode;
522 	pass.npc_timeout = req->nvr_timeout;
523 	pass.npc_cdw12 = req->nvr_cdw12;
524 	pass.npc_cdw13 = req->nvr_cdw13;
525 	pass.npc_cdw14 = req->nvr_cdw14;
526 	pass.npc_cdw15 = req->nvr_cdw14;
527 
528 	if (req->nvr_input != NULL) {
529 		pass.npc_buflen = req->nvr_inlen;
530 		pass.npc_buf = (uintptr_t)req->nvr_input;
531 		pass.npc_flags = NVME_PASSTHRU_WRITE;
532 	} else if (req->nvr_output != NULL) {
533 		pass.npc_buflen = req->nvr_outlen;
534 		pass.npc_buf = (uintptr_t)req->nvr_output;
535 		pass.npc_flags = NVME_PASSTHRU_READ;
536 	}
537 
538 	if ((req->nvr_impact & NVME_VUC_DISC_IMPACT_NS) != 0) {
539 		pass.npc_impact |= NVME_IMPACT_NS;
540 	}
541 
542 	if (ioctl(ctrl->nc_fd, NVME_IOC_PASSTHRU, &pass) != 0) {
543 		int e = errno;
544 		return (nvme_ioctl_syserror(ctrl, e, "vendor unique command"));
545 	}
546 
547 	if (pass.npc_common.nioc_drv_err != NVME_IOCTL_E_OK) {
548 		return (nvme_ioctl_error(ctrl, &pass.npc_common,
549 		    "vendor unique command"));
550 	}
551 
552 	req->nvr_results_valid = true;
553 	req->nvr_cdw0 = pass.npc_cdw0;
554 
555 	return (nvme_ctrl_success(ctrl));
556 }
557