xref: /illumos-gate/usr/src/lib/libnvme/common/libnvme_identify.c (revision 533affcbc7fc4d0c8132976ea454aaa715fe2307)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source.  A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * Copyright 2024 Oxide Computer Company
14  */
15 
16 /*
17  * This implements iterators for the various NVMe identify related features that
18  * return lists of information (rather than the basic data structures). These
19  * are all phrased as iterators to the user so that way we can abstract around
20  * the fact that there may be additional commands required to make this happen
21  * or eventually a number of namespaces that exceeds the basic amount supported
22  * here.
23  */
24 
25 #include <string.h>
26 #include <unistd.h>
27 
28 #include "libnvme_impl.h"
29 
30 void
nvme_id_req_fini(nvme_id_req_t * idreq)31 nvme_id_req_fini(nvme_id_req_t *idreq)
32 {
33 	free(idreq);
34 }
35 
36 bool
nvme_id_req_init_by_cns(nvme_ctrl_t * ctrl,nvme_csi_t csi,uint32_t cns,nvme_id_req_t ** idreqp)37 nvme_id_req_init_by_cns(nvme_ctrl_t *ctrl, nvme_csi_t csi, uint32_t cns,
38     nvme_id_req_t **idreqp)
39 {
40 	const nvme_identify_info_t *info = NULL;
41 	nvme_id_req_t *req;
42 	nvme_valid_ctrl_data_t ctrl_data;
43 
44 	if (idreqp == NULL) {
45 		return (nvme_ctrl_error(ctrl, NVME_ERR_BAD_PTR, 0,
46 		    "encountered invalid nvme_id_req_t output pointer: %p",
47 		    idreqp));
48 	}
49 
50 	for (size_t i = 0; i < nvme_identify_ncmds; i++) {
51 		if (nvme_identify_cmds[i].nii_csi == csi &&
52 		    nvme_identify_cmds[i].nii_cns == cns) {
53 			info = &nvme_identify_cmds[i];
54 			break;
55 		}
56 	}
57 
58 	if (info == NULL) {
59 		return (nvme_ctrl_error(ctrl, NVME_ERR_IDENTIFY_UNKNOWN, 0,
60 		    "unknown identify command CSI/CNS 0x%x/0x%x", csi, cns));
61 	}
62 
63 	ctrl_data.vcd_vers = &ctrl->nc_vers;
64 	ctrl_data.vcd_id = &ctrl->nc_info;
65 
66 	if (!nvme_identify_info_supported(info, &ctrl_data)) {
67 		return (nvme_ctrl_error(ctrl, NVME_ERR_IDENTIFY_UNSUP_BY_DEV, 0,
68 		    "device does not support identify command %s (CSI/CNS "
69 		    "0x%x/0x%x)", info->nii_name, info->nii_csi,
70 		    info->nii_cns));
71 	}
72 
73 	req = calloc(1, sizeof (nvme_id_req_t));
74 	if (req == NULL) {
75 		int e = errno;
76 		return (nvme_ctrl_error(ctrl, NVME_ERR_NO_MEM, e,
77 		    "failed to allocate memory for a new nvme_id_req_t: %s",
78 		    strerror(e)));
79 	}
80 
81 	req->nir_ctrl = ctrl;
82 	req->nir_info = info;
83 	req->nir_need = info->nii_fields | (1 << NVME_ID_REQ_F_BUF);
84 	req->nir_allow = info->nii_fields;
85 
86 	*idreqp = req;
87 	return (nvme_ctrl_success(ctrl));
88 }
89 
90 static void
nvme_id_req_clear_need(nvme_id_req_t * req,nvme_identify_req_field_t field)91 nvme_id_req_clear_need(nvme_id_req_t *req, nvme_identify_req_field_t field)
92 {
93 	req->nir_need &= ~(1 << field);
94 }
95 
96 static const nvme_field_check_t nvme_identify_check_nsid = {
97 	nvme_identify_fields, NVME_ID_REQ_F_NSID,
98 	NVME_ERR_NS_RANGE, 0, NVME_ERR_NS_UNUSE
99 };
100 
101 bool
nvme_id_req_set_nsid(nvme_id_req_t * req,uint32_t nsid)102 nvme_id_req_set_nsid(nvme_id_req_t *req, uint32_t nsid)
103 {
104 	nvme_ctrl_t *ctrl = req->nir_ctrl;
105 	nvme_identify_info_flags_t flags = req->nir_info->nii_flags;
106 
107 	/*
108 	 * In some contexts the NSID here must refer to an actual valid
109 	 * namespace. In other cases it's referring to a search index and
110 	 * therefore all we care about is the value. Finally, sometimes the
111 	 * broadcast address is used to access things that are common across all
112 	 * namespaces. If we have a list operation, we just pass this through to
113 	 * the kernel. This unfortunately requires a bit more manual checking.
114 	 */
115 	if ((flags & NVME_IDENTIFY_INFO_F_NSID_LIST) == 0 &&
116 	    !nvme_field_check_one(req->nir_ctrl, nsid, "identify",
117 	    &nvme_identify_check_nsid, req->nir_allow)) {
118 		return (false);
119 	}
120 
121 	if ((flags & NVME_IDENTIFY_INFO_F_NSID_LIST) == 0 &&
122 	    (req->nir_allow & (1 << NVME_ID_REQ_F_NSID)) != 0) {
123 		if (nsid == 0) {
124 			return (nvme_ctrl_error(ctrl, NVME_ERR_NS_RANGE, 0,
125 			    "namespaces id 0x%x is invalid, valid namespaces "
126 			    "are [0x%x, 0x%x]", nsid, NVME_NSID_MIN,
127 			    req->nir_ctrl->nc_info.id_nn));
128 		}
129 
130 		if (nsid == NVME_NSID_BCAST &&
131 		    (flags & NVME_IDENTIFY_INFO_F_BCAST) == 0) {
132 			return (nvme_ctrl_error(ctrl, NVME_ERR_NS_RANGE, 0,
133 			    "the all namespaces/controller nsid (0x%x) is not "
134 			    "allowed for this identify command, valid "
135 			    "namespaces are [0x%x, 0x%x]", nsid,
136 			    NVME_NSID_MIN, req->nir_ctrl->nc_info.id_nn));
137 
138 		}
139 	}
140 
141 	req->nir_nsid = nsid;
142 	nvme_id_req_clear_need(req, NVME_ID_REQ_F_NSID);
143 	return (nvme_ctrl_success(req->nir_ctrl));
144 }
145 
146 static const nvme_field_check_t nvme_identify_check_ctrlid = {
147 	nvme_identify_fields, NVME_ID_REQ_F_CTRLID,
148 	NVME_ERR_IDENTIFY_CTRLID_RANGE, NVME_ERR_IDENTIFY_CTRLID_UNSUP,
149 	NVME_ERR_IDENTIFY_CTRLID_UNUSE
150 };
151 
152 bool
nvme_id_req_set_ctrlid(nvme_id_req_t * req,uint32_t ctrlid)153 nvme_id_req_set_ctrlid(nvme_id_req_t *req, uint32_t ctrlid)
154 {
155 	if (!nvme_field_check_one(req->nir_ctrl, ctrlid, "identify",
156 	    &nvme_identify_check_ctrlid, req->nir_allow)) {
157 		return (false);
158 	}
159 
160 	req->nir_ctrlid = ctrlid;
161 	nvme_id_req_clear_need(req, NVME_ID_REQ_F_CTRLID);
162 	return (nvme_ctrl_success(req->nir_ctrl));
163 }
164 
165 bool
nvme_id_req_set_output(nvme_id_req_t * req,void * buf,size_t len)166 nvme_id_req_set_output(nvme_id_req_t *req, void *buf, size_t len)
167 {
168 	nvme_ctrl_t *ctrl = req->nir_ctrl;
169 
170 	if (buf == NULL) {
171 		return (nvme_ctrl_error(ctrl, NVME_ERR_BAD_PTR, 0,
172 		    "identify output buffer cannot be NULL"));
173 	}
174 
175 	if (len < NVME_IDENTIFY_BUFSIZE) {
176 		return (nvme_ctrl_error(ctrl, NVME_ERR_IDENTIFY_OUTPUT_RANGE, 0,
177 		    "identify buffer size must be at least %u bytes large",
178 		    NVME_IDENTIFY_BUFSIZE));
179 	}
180 
181 	req->nir_buf = buf;
182 	nvme_id_req_clear_need(req, NVME_ID_REQ_F_BUF);
183 	return (nvme_ctrl_success(req->nir_ctrl));
184 }
185 
186 bool
nvme_id_req_exec(nvme_id_req_t * req)187 nvme_id_req_exec(nvme_id_req_t *req)
188 {
189 	nvme_ctrl_t *ctrl = req->nir_ctrl;
190 	nvme_ioctl_identify_t id;
191 
192 	if (req->nir_need != 0) {
193 		return (nvme_field_miss_err(ctrl, nvme_identify_fields,
194 		    nvme_identify_nfields, NVME_ERR_IDENTIFY_REQ_MISSING_FIELDS,
195 		    "identify", req->nir_need));
196 	}
197 
198 	(void) memset(&id, 0, sizeof (nvme_ioctl_identify_t));
199 	id.nid_common.nioc_nsid = req->nir_nsid;
200 	id.nid_cns = req->nir_info->nii_cns;
201 	id.nid_ctrlid = req->nir_ctrlid;
202 	id.nid_data = (uintptr_t)req->nir_buf;
203 
204 	if (ioctl(req->nir_ctrl->nc_fd, NVME_IOC_IDENTIFY, &id) != 0) {
205 		int e = errno;
206 		return (nvme_ioctl_syserror(ctrl, e, "identify"));
207 	}
208 
209 	if (id.nid_common.nioc_drv_err != NVME_IOCTL_E_OK) {
210 		return (nvme_ioctl_error(ctrl, &id.nid_common, "identify"));
211 	}
212 
213 	return (nvme_ctrl_success(ctrl));
214 }
215