1 /*
2 * This file and its contents are supplied under the terms of the
3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 * You may only use this file in accordance with the terms of version
5 * 1.0 of the CDDL.
6 *
7 * A full copy of the text of the CDDL should have accompanied this
8 * source. A copy of the CDDL is also available via the Internet at
9 * http://www.illumos.org/license/CDDL.
10 */
11
12 /*
13 * Copyright 2024 Oxide Computer Company
14 */
15
16 /*
17 * Common functions for the various ioctl tests that we're using.
18 */
19
20 #include <err.h>
21 #include <stdlib.h>
22 #include <string.h>
23 #include <libdevinfo.h>
24 #include <unistd.h>
25 #include <libproc.h>
26
27 #include "nvme_ioctl_util.h"
28
29 /*
30 * Cached copies of devinfo nodes to speed up subsequent lookups.
31 */
32 static di_node_t nvme_test_root;
33 static di_node_t nvme_test_dev;
34
35 /*
36 * Lock ioctl template structures. These are all non-blocking locks because we
37 * don't want the tests to hang in error.
38 */
39 const nvme_ioctl_lock_t nvme_test_ctrl_wrlock = {
40 .nil_ent = NVME_LOCK_E_CTRL,
41 .nil_level = NVME_LOCK_L_WRITE,
42 .nil_flags = NVME_LOCK_F_DONT_BLOCK
43 };
44
45 const nvme_ioctl_lock_t nvme_test_ctrl_rdlock = {
46 .nil_ent = NVME_LOCK_E_CTRL,
47 .nil_level = NVME_LOCK_L_READ,
48 .nil_flags = NVME_LOCK_F_DONT_BLOCK
49 };
50
51 const nvme_ioctl_lock_t nvme_test_ns_wrlock = {
52 .nil_common = { .nioc_nsid = 1 },
53 .nil_ent = NVME_LOCK_E_NS,
54 .nil_level = NVME_LOCK_L_WRITE,
55 .nil_flags = NVME_LOCK_F_DONT_BLOCK
56 };
57
58 const nvme_ioctl_lock_t nvme_test_ns_rdlock = {
59 .nil_common = { .nioc_nsid = 1 },
60 .nil_ent = NVME_LOCK_E_NS,
61 .nil_level = NVME_LOCK_L_READ,
62 .nil_flags = NVME_LOCK_F_DONT_BLOCK
63 };
64
65 const nvme_ioctl_unlock_t nvme_test_ctrl_unlock = {
66 .niu_ent = NVME_LOCK_E_CTRL
67 };
68
69 const nvme_ioctl_unlock_t nvme_test_ns_unlock = {
70 .niu_common = { .nioc_nsid = 1 },
71 .niu_ent = NVME_LOCK_E_NS
72 };
73
74 static int
nvme_ioctl_test_find_nsid(di_node_t di,uint32_t nsid)75 nvme_ioctl_test_find_nsid(di_node_t di, uint32_t nsid)
76 {
77 int fd;
78 const char *type;
79 char name[128], *mpath, path[PATH_MAX];
80 di_minor_t minor;
81
82 if (nsid == 0) {
83 type = DDI_NT_NVME_NEXUS;
84 (void) strlcpy(name, "devctl", sizeof (name));
85 } else {
86 type = DDI_NT_NVME_ATTACHMENT_POINT;
87 (void) snprintf(name, sizeof (name), "%u", nsid);
88 }
89
90 minor = DI_MINOR_NIL;
91 while ((minor = di_minor_next(di, minor)) != DI_MINOR_NIL) {
92 if (strcmp(di_minor_nodetype(minor), type) == 0 &&
93 strcmp(di_minor_name(minor), name) == 0) {
94 break;
95 }
96 }
97
98 if (minor == DI_MINOR_NIL) {
99 errx(EXIT_FAILURE, "failed to find minor for nsid %u on %s%d",
100 nsid, di_driver_name(di), di_instance(di));
101 }
102
103 mpath = di_devfs_minor_path(minor);
104 if (mpath == NULL) {
105 err(EXIT_FAILURE, "failed to get minor device path for nsid %u "
106 "on %s%d", nsid, di_driver_name(di), di_instance(di));
107 }
108
109 if (snprintf(path, sizeof (path), "/devices%s", mpath) >=
110 sizeof (path)) {
111 errx(EXIT_FAILURE, "failed to construct full /devices path for "
112 "%s: snprintf buffer would have overflowed", mpath);
113 }
114 di_devfs_path_free(mpath);
115
116 fd = open(path, O_RDWR);
117 if (fd < 0) {
118 err(EXIT_FAILURE, "failed to open minor path %s", path);
119 }
120
121 return (fd);
122 }
123
124 /*
125 * The ioctl tests expect an NVMe device to be nominated for use to test
126 * against. Translate that device into an fd.
127 */
128 int
nvme_ioctl_test_get_fd(uint32_t nsid)129 nvme_ioctl_test_get_fd(uint32_t nsid)
130 {
131 const char *dev, *errstr;
132 long long ll;
133
134 if (nvme_test_dev != NULL) {
135 return (nvme_ioctl_test_find_nsid(nvme_test_dev, nsid));
136 }
137
138 dev = getenv(NVME_TEST_DEV_ENVVAR);
139 if (dev == NULL) {
140 errx(EXIT_FAILURE, "cannot run test, missing required NVMe "
141 "device, please set the %s environment variable",
142 NVME_TEST_DEV_ENVVAR);
143 }
144
145 if (strncmp("nvme", dev, 4) != 0) {
146 errx(EXIT_FAILURE, "%s environment variable device %s does "
147 "not begin with 'nvme'", NVME_TEST_DEV_ENVVAR, dev);
148 }
149
150 ll = strtonum(dev + 4, 0, INT32_MAX, &errstr);
151 if (errstr != NULL) {
152 errx(EXIT_FAILURE, "failed to parse %s environment variable "
153 "device %s instance: value is %s", NVME_TEST_DEV_ENVVAR,
154 dev, errstr);
155 }
156
157 if (nvme_test_root == NULL) {
158 nvme_test_root = di_init("/", DINFOCPYALL);
159 if (nvme_test_root == DI_NODE_NIL) {
160 err(EXIT_FAILURE, "failed to initialize libdevinfo");
161 }
162 }
163
164 for (di_node_t di = di_drv_first_node("nvme", nvme_test_root);
165 di != DI_NODE_NIL; di = di_drv_next_node(di)) {
166 if (di_instance(di) == (int)ll) {
167 nvme_test_dev = di;
168 return (nvme_ioctl_test_find_nsid(di, nsid));
169 }
170 }
171
172 errx(EXIT_FAILURE, "failed to find %s environment variable device %s: "
173 "cannot run test", NVME_TEST_DEV_ENVVAR, dev);
174 }
175
176 /*
177 * This is a wrapper that requires we successfully lock something.
178 */
179 void
nvme_ioctl_test_lock(int fd,const nvme_ioctl_lock_t * lockp)180 nvme_ioctl_test_lock(int fd, const nvme_ioctl_lock_t *lockp)
181 {
182 nvme_ioctl_lock_t lock = *lockp;
183 const char *targ = lockp->nil_ent == NVME_LOCK_E_CTRL ?
184 "controller" : "namespace";
185 const char *level = lockp->nil_level == NVME_LOCK_L_READ ?
186 "read" : "write";
187
188 if (ioctl(fd, NVME_IOC_LOCK, &lock) != 0) {
189 err(EXIT_FAILURE, "TEST FAILED: cannot proceed with tests due "
190 "to failure to issue %s %s lock ioctl", targ, level);
191 } else if (lock.nil_common.nioc_drv_err != NVME_IOCTL_E_OK) {
192 errx(EXIT_FAILURE, "TEST FAILED: cannot proceed with tests due "
193 "to failure to obtain %s %s lock, got 0x%x", targ, level,
194 lock.nil_common.nioc_drv_err);
195 }
196 }
197
198 /*
199 * Determine if a thread is blocked in our locking ioctl. We use proc_sysname()
200 * so we can avoid encoding the system call number of the ioctl into the test
201 * directly.
202 */
203 bool
nvme_ioctl_test_thr_blocked(thread_t thr)204 nvme_ioctl_test_thr_blocked(thread_t thr)
205 {
206 lwpstatus_t lwp;
207 char name[SYS2STR_MAX];
208
209 if (proc_get_lwpstatus(getpid(), (uint_t)thr, &lwp) != 0) {
210 err(EXIT_FAILURE, "TEST FAILED: unable to continue test "
211 "execution as we failed to retrieve the lwpsinfo_t data "
212 "for thread 0x%x", thr);
213 }
214
215 if ((lwp.pr_flags & PR_ASLEEP) == 0)
216 return (false);
217
218 if (proc_sysname(lwp.pr_syscall, name, sizeof (name)) == NULL)
219 return (false);
220
221 return (strcmp(name, "ioctl") == 0);
222 }
223