1 /*
2 * This file and its contents are supplied under the terms of the
3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 * You may only use this file in accordance with the terms of version
5 * 1.0 of the CDDL.
6 *
7 * A full copy of the text of the CDDL should have accompanied this
8 * source. A copy of the CDDL is also available via the Internet at
9 * http://www.illumos.org/license/CDDL.
10 */
11
12 /*
13 * Copyright 2025 Oxide Computer Company
14 */
15
16 /*
17 * Perform various validation checks for user and kernel initiated requests.
18 * This file focuses on the validation of NVMe semantic operations. It assumes
19 * that any necessary permission checks (privileges, exclusive access, etc.)
20 * are being taken care of separately.
21 *
22 * Log Pages
23 * ---------
24 *
25 * Log page requests come into the kernel and we have a few different
26 * constraints that we need to consider while performing validation. There are a
27 * few different gotchas:
28 *
29 * 1) The arguments that one can pass to a get log page command have changed
30 * over the different device revisions. While specifying the log page ID (lid)
31 * has always been supported, a log-specific field (lsp) was added in NVMe 1.3,
32 * and the ability to specify a command-set identifier (csi) was added in NVMe
33 * 2.0. Regardless of whether this is a vendor-specific command or not, we need
34 * to be able to validate that we're not going to send parameters to the
35 * controller that will cause the command to be rejected.
36 *
37 * 2) There are going to be log pages that we know about and some that we don't.
38 * At the moment, we constrain non-admin pass through log pages to be log pages
39 * that the kernel knows about and therefore has an expected size for. This
40 * means that there is a lot more that we can check and enforce, such as whether
41 * or not specific pages support an lsp, lsi, etc. Conversely, for log pages
42 * that are admin pass-through commands, there's not a whole lot that we can
43 * actually do and will only do the version-specific checking.
44 *
45 * For any log page request that comes in, we'll try to identify which of the
46 * different types of log pages that it is, and go through and validate it
47 * appropriately.
48 *
49 * Get Feature
50 * -----------
51 *
52 * Currently, the kernel only allows standard features to be requested that it
53 * knows about. This will be loosened and look a little bit more like log pages
54 * when we have support for vendor-unique features.
55 *
56 * Like with log pages, in addition to the set of features having evolved, the
57 * arguments to the get features command has also changed to include additions
58 * like whether you want the default or saved value of a feature rather than its
59 * current value.
60 *
61 * One general complication with features is that for a number of optional
62 * features, there is no good way to know whether or not the device supports
63 * said feature other than asking for it.
64 *
65 * The last bit we need to be cognizant of is the fact that only a handful of
66 * features accept a namespace ID. Those that do, may not even support the use
67 * of a broadcast namespace ID. While the controller node may ask for any
68 * feature, those using a namespace node are limited in terms of what they can
69 * actually issue.
70 *
71 * Identify
72 * --------
73 *
74 * The kernel currently knows about the various identify data structure commands
75 * that it supports. It does this to enforce checking the version and if certain
76 * fields are set. The most complicated form of this is related to the namespace
77 * due to the fact that identify commands come in a few forms:
78 *
79 * 1) Identify commands that do not use a namespace ID at all (like identify
80 * controller).
81 * 2) Identify commands that are used to list namespaces. These allow a zero to
82 * be listed in the namespace ID field to ensure all namespaces are captured.
83 * 3) Identify commands that require a valid namespace and allow the broadcast
84 * namespace ID to be specified.
85 * 4) Identify commands that require a valid namespace and do not allow for a
86 * broadcast namespace ID to be specified.
87 *
88 * The cases here are identified based on flags in the nvme_identify_info_t. We
89 * must check the entire validity here.
90 *
91 * Vendor Unique Commands
92 * ----------------------
93 *
94 * When it comes to vendor unique commands, the main things that we try to
95 * validate are limited to what the specification requires of the shape of these
96 * commands and the constraints that we have. While there is discovery
97 * functionality in libnvme, we explicitly are not trying to leverage and know
98 * what those are here. This makes things fairly different to both identify
99 * commands and log pages.
100 *
101 * Format Requests
102 * ---------------
103 *
104 * There are a few different things that we need to check before we allow a
105 * format request to proceed. Note, some of these are artificial constraints
106 * that we have opted to place in the driver right now. In particular, right now
107 * we don't support any namespaces with metadata or protection. There is no way
108 * to set this right now in our ioctl interface. Therefore, this stuff is not
109 * verified.
110 *
111 * 1) First we must verify that the controller actually supports the Format NVM
112 * command at all.
113 *
114 * 2) Once that is good, we must validate the secure erase settings and that the
115 * LBA format is valid.
116 *
117 * 3) A controller can limit whether a secure erase or a format must impact the
118 * whole device or not.
119 *
120 * Firmware Download and Commit
121 * ----------------------------
122 *
123 * Validating a firmware download request is fairly straightforward. Here we're
124 * mostly checking that the requested sizes and offsets have the proper
125 * alignment and aren't beyond the underlying command's maximum sizes. We also
126 * verify whether or not the device actually supports firmware download requests
127 * at all. We don't try to validate the contents of the data or ask if there are
128 * other ongoing things or if we've skipped gaps in the download by changing
129 * offsets.
130 *
131 * When we opt to perform a firmware commit, then all we check is that the
132 * command is supported, that we aren't going to a read-only slot when saving,
133 * or related.
134 *
135 * Namesapce Management
136 * --------------------
137 *
138 * Namespace management consists of four commands: namespace create, namespace
139 * delete, controller attach, and controller detach. Namespace create is the
140 * most complicated of these. A namespace create must first validate that we
141 * support namespace management. After that, we have to validate all of the
142 * different fields that will be submitted through the identify namespace data
143 * structure.
144 *
145 * We do not attempt to validate whether or not there is sufficient capacity to
146 * create the namespace and leave that to the controller and the backend.
147 * However, we do verify if the request does require thin provisioning support.
148 * Most other fields are basic range checks against what's supported in the
149 * version. We are looser on the LBA format for a create namespace to allow for
150 * more flexibility and just require that the LBA is within range for the
151 * device.
152 *
153 * The most notable piece here is the CSI. Create namespace adds the notion of a
154 * CSI starting in NVME 2.0. Prior to this, it is implicitly the NVM CSI. Right
155 * now the kernel only supports the NVM command set and therefore restricts
156 * namespace creation to that CSI.
157 *
158 * Namespace delete is straightforward. The only thing that we need to validate
159 * is that the device supports namespace commands as the surrounding kernel code
160 * ensures that the namespace is both valid and in the correct state. Attaching
161 * and detaching a controller to a namespace is the same as we currently only
162 * support attaching and detaching with the controller that we're talking
163 * through.
164 */
165
166 #include <sys/sysmacros.h>
167 #include <sys/nvme.h>
168
169 #include "nvme_reg.h"
170 #include "nvme_var.h"
171
172 typedef struct nvme_validate_info {
173 const nvme_field_info_t *err_fields;
174 size_t err_index;
175 uint32_t err_unuse_bit;
176 nvme_ioctl_errno_t err_field_range;
177 nvme_ioctl_errno_t err_field_unsup;
178 nvme_ioctl_errno_t err_field_unuse;
179 } nvme_validate_info_t;
180
181 static boolean_t
nvme_validate_one_field(nvme_ioctl_common_t * com,uint64_t val,const nvme_validate_info_t * info,const nvme_valid_ctrl_data_t * data,uint32_t valid)182 nvme_validate_one_field(nvme_ioctl_common_t *com, uint64_t val,
183 const nvme_validate_info_t *info, const nvme_valid_ctrl_data_t *data,
184 uint32_t valid)
185 {
186 const nvme_field_info_t *field = &info->err_fields[info->err_index];
187 nvme_field_error_t err;
188
189 if (val == 0) {
190 return (B_TRUE);
191 }
192
193 if (valid != 0 && info->err_unuse_bit != 0 &&
194 (valid & info->err_unuse_bit) == 0) {
195 VERIFY3U(info->err_field_unuse, !=, 0);
196 return (nvme_ioctl_error(com, info->err_field_unuse, 0, 0));
197 }
198
199 err = nvme_field_validate(field, data, val, NULL, 0);
200 switch (err) {
201 case NVME_FIELD_ERR_UNSUP_VERSION:
202 case NVME_FIELD_ERR_UNSUP_FIELD:
203 VERIFY3U(info->err_field_unsup, !=, 0);
204 return (nvme_ioctl_error(com, info->err_field_unsup, 0, 0));
205 case NVME_FIELD_ERR_BAD_VALUE:
206 VERIFY3U(info->err_field_range, !=, 0);
207 return (nvme_ioctl_error(com, info->err_field_range, 0, 0));
208 case NVME_FIELD_ERR_OK:
209 return (B_TRUE);
210 default:
211 panic("unsupported nvme_field_validate() value: 0x%x", err);
212 }
213 }
214
215 /*
216 * NVMe devices specify log page requests in units of uint32_t's. The original
217 * spec had a zeros based value that was 12 bits wide, providing a little over
218 * 16 KiB for a log page. In NVMe 1.3, this was changed and a device could
219 * optionally support a 32-bit wide length argument. We opt to support a smaller
220 * amount than the NVMe 1.3 maximum: 1 MiB, which is a fairly arbitrary sized
221 * value.
222 */
223 uint32_t nvme_log_page_max_size = 1 * 1024 * 1024;
224
225 static boolean_t
nvme_logpage_is_vendor(nvme_ioctl_get_logpage_t * log)226 nvme_logpage_is_vendor(nvme_ioctl_get_logpage_t *log)
227 {
228 return (log->nigl_lid >= NVME_LOGPAGE_VEND_MIN &&
229 log->nigl_lid <= NVME_LOGPAGE_VEND_MAX);
230 }
231
232 static const nvme_validate_info_t nvme_valid_log_csi = {
233 nvme_log_fields, NVME_LOG_REQ_FIELD_CSI, 0,
234 NVME_IOCTL_E_LOG_CSI_RANGE, 0, NVME_IOCTL_E_LOG_CSI_UNSUP
235 };
236
237 static const nvme_validate_info_t nvme_valid_log_lid = {
238 nvme_log_fields, NVME_LOG_REQ_FIELD_LID, 0,
239 NVME_IOCTL_E_LOG_LID_RANGE, 0, 0
240 };
241
242 static const nvme_validate_info_t nvme_valid_log_lsp = {
243 nvme_log_fields, NVME_LOG_REQ_FIELD_LSP,
244 NVME_LOG_DISC_F_NEED_LSP, NVME_IOCTL_E_LOG_LSP_RANGE,
245 NVME_IOCTL_E_LOG_LSP_UNSUP, NVME_IOCTL_E_LOG_LSP_UNUSE
246 };
247
248 static const nvme_validate_info_t nvme_valid_log_lsi = {
249 nvme_log_fields, NVME_LOG_REQ_FIELD_LSI,
250 NVME_LOG_DISC_F_NEED_LSI, NVME_IOCTL_E_LOG_LSI_RANGE,
251 NVME_IOCTL_E_LOG_LSI_UNSUP, NVME_IOCTL_E_LOG_LSI_UNUSE
252 };
253
254 static const nvme_validate_info_t nvme_valid_log_rae = {
255 nvme_log_fields, NVME_LOG_REQ_FIELD_RAE,
256 NVME_LOG_DISC_F_NEED_RAE, NVME_IOCTL_E_LOG_RAE_RANGE,
257 NVME_IOCTL_E_LOG_RAE_UNSUP, NVME_IOCTL_E_LOG_RAE_UNUSE
258 };
259
260 static const nvme_validate_info_t nvme_valid_log_size = {
261 nvme_log_fields, NVME_LOG_REQ_FIELD_SIZE, 0,
262 NVME_IOCTL_E_LOG_SIZE_RANGE, 0, 0
263 };
264
265 static const nvme_validate_info_t nvme_valid_log_offset = {
266 nvme_log_fields, NVME_LOG_REQ_FIELD_OFFSET, 0,
267 NVME_IOCTL_E_LOG_OFFSET_RANGE, 0, NVME_IOCTL_E_LOG_OFFSET_UNSUP
268 };
269
270 /*
271 * Validate all of the fields that are present in a log request. The only one we
272 * don't take care of here is the namespace ID, because we have already checked
273 * it prior to this as part of nvme_ioctl_check().
274 */
275 static boolean_t
nvme_validate_logpage_fields(nvme_ioctl_get_logpage_t * log,const nvme_valid_ctrl_data_t * ctrl_data,const nvme_log_page_info_t * info)276 nvme_validate_logpage_fields(nvme_ioctl_get_logpage_t *log,
277 const nvme_valid_ctrl_data_t *ctrl_data, const nvme_log_page_info_t *info)
278 {
279 uint32_t disc = 0;
280
281 if (info != NULL) {
282 disc = info->nlpi_disc;
283 }
284
285 if (!nvme_validate_one_field(&log->nigl_common, log->nigl_csi,
286 &nvme_valid_log_csi, ctrl_data, disc)) {
287 return (B_FALSE);
288 }
289
290 if (!nvme_validate_one_field(&log->nigl_common, log->nigl_lid,
291 &nvme_valid_log_lid, ctrl_data, disc)) {
292 return (B_FALSE);
293 }
294
295 if (!nvme_validate_one_field(&log->nigl_common, log->nigl_lsp,
296 &nvme_valid_log_lsp, ctrl_data, disc)) {
297 return (B_FALSE);
298 }
299
300 if (!nvme_validate_one_field(&log->nigl_common, log->nigl_lsi,
301 &nvme_valid_log_lsi, ctrl_data, disc)) {
302 return (B_FALSE);
303 }
304
305 /*
306 * Just like the LID, we treat the size as having two of the same error
307 * type right now as it's always been supported since NVMe 1.0. The
308 * common check confirms that the value is non-zero and that it is
309 * 4-byte aligned.
310 */
311 if (!nvme_validate_one_field(&log->nigl_common, log->nigl_len,
312 &nvme_valid_log_size, ctrl_data, disc)) {
313 return (B_FALSE);
314 }
315
316 /*
317 * Ensure that the log page does not exceed the kernel's maximum size
318 * that one can get in one request.
319 */
320 if (log->nigl_len > nvme_log_page_max_size) {
321 return (nvme_ioctl_error(&log->nigl_common,
322 NVME_IOCTL_E_LOG_SIZE_RANGE, 0, 0));
323 }
324
325 if (!nvme_validate_one_field(&log->nigl_common, log->nigl_rae,
326 &nvme_valid_log_rae, ctrl_data, disc)) {
327 return (B_FALSE);
328 }
329
330 if (!nvme_validate_one_field(&log->nigl_common, log->nigl_offset,
331 &nvme_valid_log_offset, ctrl_data, disc)) {
332 return (B_FALSE);
333 }
334
335 /*
336 * Log pages may either have a known fixed size, a variable size, or an
337 * unknown size. If we have a log page with a known, fixed size, then we
338 * require that the requested size match that and we do not allow an
339 * offset to be specified at this time. Otherwise, there is nothing to
340 * check for a variable length page as we have constrained everything by
341 * the maximum size above. As we encounter fixed size log pages that
342 * exceed the kernel's maximum value, we will likely have to change this
343 * in the future.
344 */
345 if (info != NULL) {
346 bool var;
347 size_t targ = nvme_log_page_info_size(info, ctrl_data, &var);
348
349 if (!var) {
350 if (targ != 0 && targ != log->nigl_len) {
351 return (nvme_ioctl_error(&log->nigl_common,
352 NVME_IOCTL_E_LOG_SIZE_RANGE, 0, 0));
353 }
354
355 if (log->nigl_offset != 0) {
356 return (nvme_ioctl_error(&log->nigl_common,
357 NVME_IOCTL_E_LOG_OFFSET_RANGE, 0, 0));
358 }
359 }
360 }
361
362 return (B_TRUE);
363 }
364
365 /*
366 * Validating a log page comes in a series of a few different steps. Once we
367 * identify that this is a known log page, we first validate that our controller
368 * actually supports the command. Once we know that, then we'll move onto the
369 * question of whether we have an appropriate scope. After that we go through
370 * and make sure all of the fields are set appropriately for the log page.
371 */
372 boolean_t
nvme_validate_logpage(nvme_t * nvme,nvme_ioctl_get_logpage_t * log)373 nvme_validate_logpage(nvme_t *nvme, nvme_ioctl_get_logpage_t *log)
374 {
375 const nvme_log_page_info_t *info = NULL;
376 nvme_valid_ctrl_data_t ctrl_data;
377 nvme_log_disc_scope_t scope, req_scope;
378
379 ctrl_data.vcd_vers = &nvme->n_version;
380 ctrl_data.vcd_id = nvme->n_idctl;
381
382 if (nvme_logpage_is_vendor(log)) {
383 return (nvme_validate_logpage_fields(log, &ctrl_data, NULL));
384 }
385
386 for (size_t i = 0; i < nvme_std_log_npages; i++) {
387 if (nvme_std_log_pages[i].nlpi_csi == log->nigl_csi &&
388 nvme_std_log_pages[i].nlpi_lid == log->nigl_lid) {
389 info = &nvme_std_log_pages[i];
390 break;
391 }
392 }
393
394 if (info == NULL) {
395 return (nvme_ioctl_error(&log->nigl_common,
396 NVME_IOCTL_E_UNKNOWN_LOG_PAGE, 0, 0));
397 }
398
399 if (!nvme_log_page_info_supported(info, &ctrl_data)) {
400 return (nvme_ioctl_error(&log->nigl_common,
401 NVME_IOCTL_E_UNSUP_LOG_PAGE, 0, 0));
402 }
403
404 scope = nvme_log_page_info_scope(info, &ctrl_data);
405 if (log->nigl_common.nioc_nsid == NVME_NSID_BCAST) {
406 req_scope = NVME_LOG_SCOPE_CTRL | NVME_LOG_SCOPE_NVM;
407 } else {
408 req_scope = NVME_LOG_SCOPE_NS;
409 }
410
411 if ((scope & req_scope) == 0) {
412 return (nvme_ioctl_error(&log->nigl_common,
413 NVME_IOCTL_E_BAD_LOG_SCOPE, 0, 0));
414 }
415
416 return (nvme_validate_logpage_fields(log, &ctrl_data, info));
417 }
418
419 static const nvme_validate_info_t nvme_valid_get_feat_sel = {
420 nvme_get_feat_fields, NVME_GET_FEAT_REQ_FIELD_SEL, 0,
421 NVME_IOCTL_E_GET_FEAT_SEL_RANGE, NVME_IOCTL_E_GET_FEAT_SEL_UNSUP, 0
422 };
423
424 static const nvme_validate_info_t nvme_valid_get_feat_cdw11 = {
425 nvme_get_feat_fields, NVME_GET_FEAT_REQ_FIELD_CDW11,
426 NVME_GET_FEAT_F_CDW11, NVME_IOCTL_E_GET_FEAT_CDW11_RANGE,
427 0, NVME_IOCTL_E_GET_FEAT_CDW11_UNUSE
428 };
429
430 /*
431 * To validate a feature we take the following high-level steps:
432 *
433 * 1) First, we have to determine that this is a feature that we know about.
434 * 2) Ensure that this feature is actually supported. We may not be able to
435 * confirm that it is, but we can sometimes confirm that it is not. Do not
436 * execute any unsupported features.
437 * 3) We have to determine whether we can actually issue this feature with the
438 * specified namespace or not.
439 * 4) Go through and validate all the remaining fields.
440 */
441 boolean_t
nvme_validate_get_feature(nvme_t * nvme,nvme_ioctl_get_feature_t * get)442 nvme_validate_get_feature(nvme_t *nvme, nvme_ioctl_get_feature_t *get)
443 {
444 const nvme_feat_info_t *feat = NULL;
445 const uint32_t nsid = get->nigf_common.nioc_nsid;
446 nvme_valid_ctrl_data_t ctrl_data;
447 nvme_feat_impl_t impl;
448
449 ctrl_data.vcd_vers = &nvme->n_version;
450 ctrl_data.vcd_id = nvme->n_idctl;
451
452 for (size_t i = 0; i < nvme_std_nfeats; i++) {
453 if (nvme_std_feats[i].nfeat_fid == get->nigf_fid) {
454 feat = &nvme_std_feats[i];
455 break;
456 }
457 }
458
459 if (feat == NULL) {
460 return (nvme_ioctl_error(&get->nigf_common,
461 NVME_IOCTL_E_UNKNOWN_FEATURE, 0, 0));
462 }
463
464 /*
465 * Before we do anything else, determine if this is supported. For
466 * things that are unknown, there is naught we can do, but try.
467 */
468 impl = nvme_feat_supported(feat, &ctrl_data);
469 if (impl == NVME_FEAT_IMPL_UNSUPPORTED) {
470 return (nvme_ioctl_error(&get->nigf_common,
471 NVME_IOCTL_E_UNSUP_FEATURE, 0, 0));
472 }
473
474 /*
475 * To check the namespace related information we rely on whether the get
476 * fields indicates a namespace is required or not. We prefer to use
477 * this rather than the scope as we've seen log pages that end up
478 * supporting multiple scopes. If a namespace is specified, but there is
479 * not one required for the feature, then we assume that this is an
480 * attempt to read something from the controller node. After that we
481 * must check if the broadcast namespace is allowed.
482 *
483 * Conversely, if a namespace is required, then we can't be on the
484 * generic controller node with the namespace left as 0.
485 */
486 if ((feat->nfeat_in_get & NVME_GET_FEAT_F_NSID) != 0) {
487 if (nsid == 0 || (nsid == NVME_NSID_BCAST &&
488 (feat->nfeat_flags & NVME_FEAT_F_GET_BCAST_NSID) == 0)) {
489 return (nvme_ioctl_error(&get->nigf_common,
490 NVME_IOCTL_E_NS_RANGE, 0, 0));
491 }
492 } else {
493 if (nsid != 0) {
494 return (nvme_ioctl_error(&get->nigf_common,
495 NVME_IOCTL_E_NS_UNUSE, 0, 0));
496 }
497 }
498
499 /*
500 * The last step is to perform field validation. Note, we've already
501 * validated the nsid above and we skip validating the fid because we've
502 * already taken care of that by selecting for a valid feature. For a
503 * get features, this leaves us with cdw11, a data pointer, and the
504 * 'sel' field. We validate the sel field first. If we find a request
505 * that is asking for the supported capabilities, then we will change
506 * our validation policy and require that the other fields explicitly be
507 * zero to proceed.
508 */
509 if (!nvme_validate_one_field(&get->nigf_common, get->nigf_sel,
510 &nvme_valid_get_feat_sel, &ctrl_data, feat->nfeat_in_get)) {
511 return (B_FALSE);
512 }
513
514 if (get->nigf_sel == NVME_FEATURE_SEL_SUPPORTED) {
515 if (get->nigf_cdw11 != 0) {
516 return (nvme_ioctl_error(&get->nigf_common,
517 NVME_IOCTL_E_GET_FEAT_CDW11_UNUSE, 0, 0));
518 }
519
520 if (get->nigf_data != 0 || get->nigf_len != 0) {
521 return (nvme_ioctl_error(&get->nigf_common,
522 NVME_IOCTL_E_GET_FEAT_DATA_UNUSE, 0, 0));
523 }
524
525 return (B_TRUE);
526 }
527
528 if (!nvme_validate_one_field(&get->nigf_common, get->nigf_cdw11,
529 &nvme_valid_get_feat_cdw11, &ctrl_data, feat->nfeat_in_get)) {
530 return (B_FALSE);
531 }
532
533 /*
534 * The last piece we need to do here is validate the size that we've
535 * been given. There are no size/offset fields in the get feature
536 * request unlike with get log page. Therefore we must be given a data
537 * buffer that matches exactly what the feature requires.
538 */
539 if ((feat->nfeat_in_get & NVME_GET_FEAT_F_DATA) == 0) {
540 if (get->nigf_data != 0 || get->nigf_len != 0) {
541 return (nvme_ioctl_error(&get->nigf_common,
542 NVME_IOCTL_E_GET_FEAT_DATA_UNUSE, 0, 0));
543 }
544 } else {
545 if (get->nigf_data == 0 || get->nigf_len != feat->nfeat_len) {
546 return (nvme_ioctl_error(&get->nigf_common,
547 NVME_IOCTL_E_GET_FEAT_DATA_RANGE, 0, 0));
548 }
549 }
550
551 /*
552 * In the past, the driver also checked a few of the specific values of
553 * cdw11 against information that the kernel had such as the maximum
554 * number of interrupts that we had configured or the valid temperature
555 * types in the temperature thrshold. In the future, if we wanted to add
556 * a cdw11-specific validation, this is where we'd want to insert it
557 * roughly.
558 */
559
560 return (B_TRUE);
561 }
562
563 static const nvme_validate_info_t nvme_valid_identify_nsid = {
564 nvme_identify_fields, NVME_ID_REQ_F_NSID,
565 1 << NVME_ID_REQ_F_NSID, NVME_IOCTL_E_NS_RANGE, 0,
566 NVME_IOCTL_E_NS_UNUSE
567 };
568
569 static const nvme_validate_info_t nvme_valid_identify_ctrlid = {
570 nvme_identify_fields, NVME_ID_REQ_F_CTRLID,
571 1 << NVME_ID_REQ_F_CTRLID, NVME_IOCTL_E_IDENTIFY_CTRLID_RANGE,
572 NVME_IOCTL_E_IDENTIFY_CTRLID_UNSUP, NVME_IOCTL_E_IDENTIFY_CTRLID_UNUSE
573 };
574
575 boolean_t
nvme_validate_identify(nvme_t * nvme,nvme_ioctl_identify_t * id,boolean_t ns_minor)576 nvme_validate_identify(nvme_t *nvme, nvme_ioctl_identify_t *id,
577 boolean_t ns_minor)
578 {
579 const nvme_identify_info_t *info = NULL;
580 nvme_valid_ctrl_data_t ctrl_data;
581
582 ctrl_data.vcd_vers = &nvme->n_version;
583 ctrl_data.vcd_id = nvme->n_idctl;
584
585 for (size_t i = 0; i < nvme_identify_ncmds; i++) {
586 if (nvme_identify_cmds[i].nii_csi == NVME_CSI_NVM &&
587 nvme_identify_cmds[i].nii_cns == id->nid_cns) {
588 info = &nvme_identify_cmds[i];
589 break;
590 }
591 }
592
593 if (info == NULL) {
594 return (nvme_ioctl_error(&id->nid_common,
595 NVME_IOCTL_E_UNKNOWN_IDENTIFY, 0, 0));
596 }
597
598 if (!nvme_identify_info_supported(info, &ctrl_data)) {
599 return (nvme_ioctl_error(&id->nid_common,
600 NVME_IOCTL_E_UNSUP_IDENTIFY, 0, 0));
601 }
602
603 /*
604 * Now it's time for our favorite thing, checking the namespace. Unlike
605 * other validation routines, we can't rely on the general ioctl
606 * checking logic due to all the variations of namespace usage in
607 * commands. See the Identify Commands section of the theory statement
608 * for more information.
609 *
610 * Note: we do not explicitly test the CNS field for validity as we do
611 * the others below as we only allow known CNS values which are
612 * determined above. In addition, we don't use the full generic field
613 * validation for the nsid because it was valid in NVMe 1.0 and its size
614 * hasn't changed throughout.
615 *
616 * First, check that if we're issuing a command that doesn't allow a
617 * namespace to call it, that we've not specified one. In particular, a
618 * namespace minor would already have had its nsid set here, so this is
619 * what would cause us to fail that.
620 */
621 if ((info->nii_flags & NVME_IDENTIFY_INFO_F_NS_OK) == 0 && ns_minor) {
622 return (nvme_ioctl_error(&id->nid_common, NVME_IOCTL_E_NOT_CTRL,
623 0, 0));
624 }
625
626 /*
627 * If we've been told that the broadcast namespace is usable here,
628 * translate that first if we can use it. Otherwise we need to try and
629 * translate this to a namespace ID that'll hopefully have some
630 * information, which means we try nsid 1.
631 */
632 if ((info->nii_flags & NVME_IDENTIFY_INFO_F_BCAST) != 0 &&
633 id->nid_common.nioc_nsid == 0) {
634 if (nvme_ctrl_atleast(nvme, &nvme_vers_1v2) &&
635 nvme->n_idctl->id_oacs.oa_nsmgmt != 0) {
636 id->nid_common.nioc_nsid = NVME_NSID_BCAST;
637 } else {
638 id->nid_common.nioc_nsid = 1;
639 }
640 }
641
642 /*
643 * Perform namespace ID check. We have three different groups of
644 * commands here that we need to consider and all have different
645 * handling:
646 *
647 * 1) Commands that must not have a namespace specified.
648 * 2) Commands which require a namespace ID, but whether the
649 * broadcast namespace can be used is variable.
650 * 3) Commands which are listing namespaces and therefore can take any
651 * value in the namespace list.
652 *
653 * In addition, because of all the weird semantics above, we have not
654 * leveraged our common ioctl logic for checking whether or not the
655 * namespace is valid. In addition, the general field checking logic
656 * allows a zero here. So for case (1) and (2) we start with the normal
657 * field check. Then we verify a non-zero and broadcast namespace check
658 * for (2). For (3), anything goes. Note, we've already verified the
659 * minor is allowed to use this.
660 */
661 if ((info->nii_flags & NVME_IDENTIFY_INFO_F_NSID_LIST) == 0 &&
662 !nvme_validate_one_field(&id->nid_common, id->nid_common.nioc_nsid,
663 &nvme_valid_identify_nsid, &ctrl_data, info->nii_fields)) {
664 return (B_FALSE);
665 }
666
667 if ((info->nii_fields & (1 << NVME_ID_REQ_F_NSID)) != 0 &&
668 (info->nii_flags & NVME_IDENTIFY_INFO_F_NSID_LIST) == 0) {
669 const uint32_t ns = id->nid_common.nioc_nsid;
670 boolean_t allow_bcast = (info->nii_flags &
671 NVME_IDENTIFY_INFO_F_BCAST) != 0;
672
673 if (ns == 0 || ns > nvme->n_namespace_count) {
674 if (ns != NVME_NSID_BCAST) {
675 return (nvme_ioctl_error(&id->nid_common,
676 NVME_IOCTL_E_NS_RANGE, 0, 0));
677 } else if (!allow_bcast) {
678 return (nvme_ioctl_error(&id->nid_common,
679 NVME_IOCTL_E_NO_BCAST_NS, 0, 0));
680 }
681 }
682 }
683
684 if (!nvme_validate_one_field(&id->nid_common, id->nid_ctrlid,
685 &nvme_valid_identify_ctrlid, &ctrl_data, info->nii_fields)) {
686 return (B_FALSE);
687 }
688
689 return (B_TRUE);
690 }
691
692 static const nvme_validate_info_t nvme_valid_vuc_opcode = {
693 nvme_vuc_fields, NVME_VUC_REQ_FIELD_OPC, 0,
694 NVME_IOCTL_E_VUC_OPCODE_RANGE, 0, 0
695 };
696
697 static const nvme_validate_info_t nvme_valid_vuc_nsid = {
698 nvme_vuc_fields, NVME_VUC_REQ_FIELD_NSID, 0,
699 NVME_IOCTL_E_NS_RANGE, 0, 0
700 };
701
702 static const nvme_validate_info_t nvme_valid_vuc_ndt = {
703 nvme_vuc_fields, NVME_VUC_REQ_FIELD_NDT, 0,
704 NVME_IOCTL_E_VUC_NDT_RANGE, 0, 0
705 };
706
707 boolean_t
nvme_validate_vuc(nvme_t * nvme,nvme_ioctl_passthru_t * pass)708 nvme_validate_vuc(nvme_t *nvme, nvme_ioctl_passthru_t *pass)
709 {
710 nvme_valid_ctrl_data_t ctrl_data;
711 const uint32_t all_flags = NVME_PASSTHRU_READ | NVME_PASSTHRU_WRITE;
712 const uint32_t all_impact = NVME_IMPACT_NS;
713
714 ctrl_data.vcd_vers = &nvme->n_version;
715 ctrl_data.vcd_id = nvme->n_idctl;
716
717 /*
718 * If there's no controller support, there's nothing that we can do.
719 */
720 if (nvme->n_idctl->id_nvscc.nv_spec == 0) {
721 return (nvme_ioctl_error(&pass->npc_common,
722 NVME_IOCTL_E_CTRL_VUC_UNSUP, 0, 0));
723 }
724
725 /*
726 * We don't use the common validation code for the timeout because
727 * there's no way for it to know the kernel's max value right now.
728 */
729 if (pass->npc_timeout == 0 ||
730 pass->npc_timeout > nvme_vendor_specific_admin_cmd_max_timeout) {
731 return (nvme_ioctl_error(&pass->npc_common,
732 NVME_IOCTL_E_VUC_TIMEOUT_RANGE, 0, 0));
733 }
734
735 if (!nvme_validate_one_field(&pass->npc_common, pass->npc_opcode,
736 &nvme_valid_vuc_opcode, &ctrl_data, 0)) {
737 return (B_FALSE);
738 }
739
740 if (!nvme_validate_one_field(&pass->npc_common,
741 pass->npc_common.nioc_nsid, &nvme_valid_vuc_nsid, &ctrl_data, 0)) {
742 return (B_FALSE);
743 }
744
745 /*
746 * Ensure that the flags and impact fields only have known values.
747 */
748 if ((pass->npc_flags & ~all_flags) != 0) {
749 return (nvme_ioctl_error(&pass->npc_common,
750 NVME_IOCTL_E_VUC_FLAGS_RANGE, 0, 0));
751 }
752
753 if ((pass->npc_impact & ~all_impact) != 0) {
754 return (nvme_ioctl_error(&pass->npc_common,
755 NVME_IOCTL_E_VUC_IMPACT_RANGE, 0, 0));
756 }
757
758 /*
759 * We need to validate several different things related to the buffer
760 * and its length:
761 *
762 * - The buffer length must be a multiple of 4 bytes (checked by common
763 * code).
764 * - The buffer length cannot exceed the hardware max (checked by
765 * common code).
766 * - The buffer length cannot exceed our maximum size.
767 * - That if the buffer is present, a length is set.
768 * - That if there is no buffer, the length is zero.
769 * - That if a buffer is set, we have the direction flags set.
770 * - That both direction flags aren't set at the same time.
771 *
772 * We only fall into the normal validation code after all this to make
773 * sure there is nothing additional weird here.
774 */
775 if (!nvme_validate_one_field(&pass->npc_common, pass->npc_buflen,
776 &nvme_valid_vuc_ndt, &ctrl_data, 0)) {
777 return (B_FALSE);
778 }
779
780 if (pass->npc_buflen > nvme_vendor_specific_admin_cmd_size) {
781 return (nvme_ioctl_error(&pass->npc_common,
782 NVME_IOCTL_E_VUC_NDT_RANGE, 0, 0));
783 }
784
785 if ((pass->npc_buflen != 0 && pass->npc_buf == 0) ||
786 (pass->npc_buflen == 0 && pass->npc_buf != 0)) {
787 return (nvme_ioctl_error(&pass->npc_common,
788 NVME_IOCTL_E_INCONSIST_VUC_BUF_NDT, 0, 0));
789 }
790
791 if ((pass->npc_buflen != 0 && pass->npc_flags == 0) ||
792 ((pass->npc_buflen == 0 && pass->npc_flags != 0))) {
793 return (nvme_ioctl_error(&pass->npc_common,
794 NVME_IOCTL_E_INCONSIST_VUC_FLAGS_NDT, 0, 0));
795 }
796
797 if ((pass->npc_flags & NVME_PASSTHRU_READ) != 0 &&
798 (pass->npc_flags & NVME_PASSTHRU_WRITE) != 0) {
799 return (nvme_ioctl_error(&pass->npc_common,
800 NVME_IOCTL_E_VUC_FLAGS_RANGE, 0, 0));
801 }
802
803 return (B_TRUE);
804 }
805
806 static const nvme_validate_info_t nvme_valid_format_lbaf = {
807 nvme_format_fields, NVME_FORMAT_REQ_FIELD_LBAF, 0,
808 NVME_IOCTL_E_FORMAT_LBAF_RANGE, 0, 0
809 };
810
811 static const nvme_validate_info_t nvme_valid_format_ses = {
812 nvme_format_fields, NVME_FORMAT_REQ_FIELD_SES, 0,
813 NVME_IOCTL_E_FORMAT_SES_RANGE, 0, 0
814 };
815
816 boolean_t
nvme_validate_format(nvme_t * nvme,nvme_ioctl_format_t * ioc)817 nvme_validate_format(nvme_t *nvme, nvme_ioctl_format_t *ioc)
818 {
819 nvme_valid_ctrl_data_t ctrl_data;
820 const nvme_identify_nsid_t *idns;
821
822 ctrl_data.vcd_vers = &nvme->n_version;
823 ctrl_data.vcd_id = nvme->n_idctl;
824
825 if (!nvme_format_cmds_supported(&ctrl_data)) {
826 return (nvme_ioctl_error(&ioc->nif_common,
827 NVME_IOCTL_E_CTRL_FORMAT_UNSUP, 0, 0));
828 }
829
830 if (!nvme_validate_one_field(&ioc->nif_common, ioc->nif_lbaf,
831 &nvme_valid_format_lbaf, &ctrl_data, 0)) {
832 return (B_FALSE);
833 }
834
835 if (!nvme_validate_one_field(&ioc->nif_common, ioc->nif_ses,
836 &nvme_valid_format_ses, &ctrl_data, 0)) {
837 return (B_FALSE);
838 }
839
840 /*
841 * Now we need to determine if this LBA format is actually one that is
842 * supported by the controller and by the operating system. Note, the
843 * number of LBA formats is considered a zeros values (that is the
844 * actual value is what's there plus one). In the future we should
845 * consider pulling the id_nlbaf check into the common validation code
846 * and passing the common namespace information there as well.
847 */
848 idns = nvme->n_idcomns;
849 if (ioc->nif_lbaf > idns->id_nlbaf) {
850 return (nvme_ioctl_error(&ioc->nif_common,
851 NVME_IOCTL_E_FORMAT_LBAF_RANGE, 0, 0));
852 }
853
854 if (idns->id_lbaf[ioc->nif_lbaf].lbaf_ms != 0) {
855 return (nvme_ioctl_error(&ioc->nif_common,
856 NVME_IOCTL_E_UNSUP_LBAF_META, 0, 0));
857 }
858
859 if (ioc->nif_ses == NVME_FRMT_SES_CRYPTO &&
860 nvme->n_idctl->id_fna.fn_crypt_erase == 0) {
861 return (nvme_ioctl_error(&ioc->nif_common,
862 NVME_IOCTL_E_CTRL_CRYPTO_SE_UNSUP, 0, 0));
863 }
864
865 /*
866 * The remaining checks only apply to cases where we're targeting a
867 * single namespace.
868 */
869 if (ioc->nif_common.nioc_nsid == NVME_NSID_BCAST) {
870 return (B_TRUE);
871 }
872
873 if (nvme->n_idctl->id_fna.fn_format != 0) {
874 return (nvme_ioctl_error(&ioc->nif_common,
875 NVME_IOCTL_E_CTRL_NS_FORMAT_UNSUP, 0, 0));
876 }
877
878 if (ioc->nif_ses != NVME_FRMT_SES_NONE &&
879 nvme->n_idctl->id_fna.fn_sec_erase != 0) {
880 return (nvme_ioctl_error(&ioc->nif_common,
881 NVME_IOCTL_E_CTRL_NS_SE_UNSUP, 0, 0));
882 }
883
884 return (B_TRUE);
885 }
886
887 static const nvme_validate_info_t nvme_valid_fw_load_numd = {
888 nvme_fw_load_fields, NVME_FW_LOAD_REQ_FIELD_NUMD, 0,
889 NVME_IOCTL_E_FW_LOAD_LEN_RANGE, 0, 0
890 };
891
892 static const nvme_validate_info_t nvme_valid_fw_load_offset = {
893 nvme_fw_load_fields, NVME_FW_LOAD_REQ_FIELD_OFFSET, 0,
894 NVME_IOCTL_E_FW_LOAD_OFFSET_RANGE, 0, 0
895 };
896
897 boolean_t
nvme_validate_fw_load(nvme_t * nvme,nvme_ioctl_fw_load_t * fw)898 nvme_validate_fw_load(nvme_t *nvme, nvme_ioctl_fw_load_t *fw)
899 {
900 nvme_valid_ctrl_data_t ctrl_data;
901
902 ctrl_data.vcd_vers = &nvme->n_version;
903 ctrl_data.vcd_id = nvme->n_idctl;
904
905 if (!nvme_fw_cmds_supported(&ctrl_data)) {
906 return (nvme_ioctl_error(&fw->fwl_common,
907 NVME_IOCTL_E_CTRL_FW_UNSUP, 0, 0));
908 }
909
910 if (!nvme_validate_one_field(&fw->fwl_common, fw->fwl_len,
911 &nvme_valid_fw_load_numd, &ctrl_data, 0)) {
912 return (B_FALSE);
913 }
914
915 if (!nvme_validate_one_field(&fw->fwl_common, fw->fwl_off,
916 &nvme_valid_fw_load_offset, &ctrl_data, 0)) {
917 return (B_FALSE);
918 }
919
920 return (B_TRUE);
921 }
922
923 static const nvme_validate_info_t nvme_valid_fw_commit_slot = {
924 nvme_fw_commit_fields, NVME_FW_COMMIT_REQ_FIELD_SLOT, 0,
925 NVME_IOCTL_E_FW_COMMIT_SLOT_RANGE, 0, 0
926 };
927
928 static const nvme_validate_info_t nvme_valid_fw_commit_act = {
929 nvme_fw_commit_fields, NVME_FW_COMMIT_REQ_FIELD_ACT, 0,
930 NVME_IOCTL_E_FW_COMMIT_ACTION_RANGE, 0, 0
931 };
932
933 boolean_t
nvme_validate_fw_commit(nvme_t * nvme,nvme_ioctl_fw_commit_t * fw)934 nvme_validate_fw_commit(nvme_t *nvme, nvme_ioctl_fw_commit_t *fw)
935 {
936 nvme_valid_ctrl_data_t ctrl_data;
937
938 ctrl_data.vcd_vers = &nvme->n_version;
939 ctrl_data.vcd_id = nvme->n_idctl;
940
941 if (!nvme_fw_cmds_supported(&ctrl_data)) {
942 return (nvme_ioctl_error(&fw->fwc_common,
943 NVME_IOCTL_E_CTRL_FW_UNSUP, 0, 0));
944 }
945
946 if (!nvme_validate_one_field(&fw->fwc_common, fw->fwc_slot,
947 &nvme_valid_fw_commit_slot, &ctrl_data, 0)) {
948 return (B_FALSE);
949 }
950
951 if (!nvme_validate_one_field(&fw->fwc_common, fw->fwc_action,
952 &nvme_valid_fw_commit_act, &ctrl_data, 0)) {
953 return (B_FALSE);
954 }
955
956 /*
957 * Do not allow someone to explicitly download an image to a read-only
958 * firmware slot. The specification only allows slot 1 to be marked
959 * read-only.
960 */
961 if (fw->fwc_slot == 1 && nvme->n_idctl->id_frmw.fw_readonly &&
962 (fw->fwc_action == NVME_FWC_SAVE ||
963 fw->fwc_action == NVME_FWC_SAVE_ACTIVATE)) {
964 return (nvme_ioctl_error(&fw->fwc_common,
965 NVME_IOCTL_E_RO_FW_SLOT, 0, 0));
966 }
967
968 return (B_TRUE);
969 }
970
971 /*
972 * Right now we do not allow a controller list to be specified and only will
973 * ever insert our own local controller's ID into the list.
974 */
975 boolean_t
nvme_validate_ctrl_attach_detach_ns(nvme_t * nvme,nvme_ioctl_common_t * com)976 nvme_validate_ctrl_attach_detach_ns(nvme_t *nvme, nvme_ioctl_common_t *com)
977 {
978 nvme_valid_ctrl_data_t ctrl_data;
979
980 ctrl_data.vcd_vers = &nvme->n_version;
981 ctrl_data.vcd_id = nvme->n_idctl;
982
983 if (!nvme_fw_cmds_supported(&ctrl_data)) {
984 return (nvme_ioctl_error(com, NVME_IOCTL_E_CTRL_NS_MGMT_UNSUP,
985 0, 0));
986 }
987
988 return (B_TRUE);
989 }
990
991 boolean_t
nvme_validate_ns_delete(nvme_t * nvme,nvme_ioctl_common_t * com)992 nvme_validate_ns_delete(nvme_t *nvme, nvme_ioctl_common_t *com)
993 {
994 nvme_valid_ctrl_data_t ctrl_data;
995
996 ctrl_data.vcd_vers = &nvme->n_version;
997 ctrl_data.vcd_id = nvme->n_idctl;
998
999 if (!nvme_fw_cmds_supported(&ctrl_data)) {
1000 return (nvme_ioctl_error(com, NVME_IOCTL_E_CTRL_NS_MGMT_UNSUP,
1001 0, 0));
1002 }
1003
1004 return (B_TRUE);
1005 }
1006
1007 static const nvme_validate_info_t nvme_valid_ns_create_nsze = {
1008 nvme_ns_create_fields, NVME_NS_CREATE_REQ_FIELD_NSZE, 0,
1009 NVME_IOCTL_E_NS_CREATE_NSZE_RANGE, 0, 0
1010 };
1011
1012 static const nvme_validate_info_t nvme_valid_ns_create_ncap = {
1013 nvme_ns_create_fields, NVME_NS_CREATE_REQ_FIELD_NCAP, 0,
1014 NVME_IOCTL_E_NS_CREATE_NCAP_RANGE, 0, 0
1015 };
1016
1017 static const nvme_validate_info_t nvme_valid_ns_create_csi = {
1018 nvme_ns_create_fields, NVME_NS_CREATE_REQ_FIELD_CSI, 0,
1019 NVME_IOCTL_E_NS_CREATE_CSI_RANGE, NVME_IOCTL_E_NS_CREATE_CSI_UNSUP, 0
1020 };
1021
1022 static const nvme_validate_info_t nvme_valid_ns_create_nmic = {
1023 nvme_ns_create_fields, NVME_NS_CREATE_REQ_FIELD_NMIC, 0,
1024 NVME_IOCTL_E_NS_CREATE_NMIC_RANGE, 0, 0
1025 };
1026
1027 boolean_t
nvme_validate_ns_create(nvme_t * nvme,nvme_ioctl_ns_create_t * ioc)1028 nvme_validate_ns_create(nvme_t *nvme, nvme_ioctl_ns_create_t *ioc)
1029 {
1030 const nvme_identify_nsid_t *idns = nvme->n_idcomns;
1031 nvme_valid_ctrl_data_t ctrl_data;
1032
1033 ctrl_data.vcd_vers = &nvme->n_version;
1034 ctrl_data.vcd_id = nvme->n_idctl;
1035
1036 if (!nvme_nsmgmt_cmds_supported(&ctrl_data)) {
1037 return (nvme_ioctl_error(&ioc->nnc_common,
1038 NVME_IOCTL_E_CTRL_NS_MGMT_UNSUP, 0, 0));
1039 }
1040
1041 if (!nvme_validate_one_field(&ioc->nnc_common, ioc->nnc_nsze,
1042 &nvme_valid_ns_create_nsze, &ctrl_data, 0)) {
1043 return (B_FALSE);
1044 }
1045
1046 if (!nvme_validate_one_field(&ioc->nnc_common, ioc->nnc_ncap,
1047 &nvme_valid_ns_create_ncap, &ctrl_data, 0)) {
1048 return (B_FALSE);
1049 }
1050
1051 /*
1052 * Verify whether or not thin provisioning is supported. Thin
1053 * provisioning was added in version 1.0. Because we have already
1054 * validated NS management commands are supported, which requires
1055 * version 1.2, we can just check the identify controller bit.
1056 */
1057 if (ioc->nnc_nsze > ioc->nnc_ncap && idns->id_nsfeat.f_thin == 0) {
1058 return (nvme_ioctl_error(&ioc->nnc_common,
1059 NVME_IOCTL_E_CTRL_THIN_PROV_UNSUP, 0, 0));
1060 }
1061
1062 /*
1063 * We do CSI validation in two parts. The first is a standard CSI
1064 * validation technique to see if we have a non-zero value that we have
1065 * a minimum version that we support, etc. The second is then the
1066 * constraint that we have today in the driver that we only support
1067 * creating namespaces whose CSI are of type NVM.
1068 */
1069 if (!nvme_validate_one_field(&ioc->nnc_common, ioc->nnc_csi,
1070 &nvme_valid_ns_create_csi, &ctrl_data, 0)) {
1071 return (B_FALSE);
1072 }
1073
1074 if (ioc->nnc_csi != NVME_CSI_NVM) {
1075 return (nvme_ioctl_error(&ioc->nnc_common,
1076 NVME_IOCTL_E_DRV_CSI_UNSUP, 0, 0));
1077 }
1078
1079 /*
1080 * See our notes around the LBA format in nvme_validate_format(). Unlike
1081 * format, today we don't validate that the driver can actually use it.
1082 * We try to be a little more flexible and just ensure that this is a
1083 * valid choice. However, we currently treat the field as just
1084 * indicating the LBA format and currently don't support the NVMe 2.0
1085 * host behavior around the extended LBA format size.
1086 */
1087 if (ioc->nnc_flbas > idns->id_nlbaf) {
1088 return (nvme_ioctl_error(&ioc->nnc_common,
1089 NVME_IOCTL_E_NS_CREATE_FLBAS_RANGE, 0, 0));
1090 }
1091
1092 if (!nvme_validate_one_field(&ioc->nnc_common, ioc->nnc_nmic,
1093 &nvme_valid_ns_create_nmic, &ctrl_data, 0)) {
1094 return (B_FALSE);
1095 }
1096
1097 return (B_TRUE);
1098 }
1099