xref: /freebsd/sys/dev/nvme/nvme_test.c (revision 6137b5f7b8c183ee8806d79b3f1d8e5e3ddb3df3)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (C) 2012-2013 Intel Corporation
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/param.h>
30 #include <sys/bio.h>
31 #include <sys/conf.h>
32 #include <sys/fcntl.h>
33 #include <sys/kthread.h>
34 #include <sys/module.h>
35 #include <sys/proc.h>
36 #include <sys/syscallsubr.h>
37 #include <sys/sysctl.h>
38 #include <sys/sysproto.h>
39 #include <sys/systm.h>
40 #include <sys/unistd.h>
41 
42 #include <geom/geom.h>
43 
44 #include "nvme_private.h"
45 
46 struct nvme_io_test_thread {
47 	uint32_t		idx;
48 	struct nvme_namespace	*ns;
49 	enum nvme_nvm_opcode	opc;
50 	struct timeval		start;
51 	void			*buf;
52 	uint32_t		size;
53 	uint32_t		time;
54 	uint64_t		io_completed;
55 };
56 
57 struct nvme_io_test_internal {
58 	struct nvme_namespace	*ns;
59 	enum nvme_nvm_opcode	opc;
60 	struct timeval		start;
61 	uint32_t		time;
62 	uint32_t		size;
63 	uint32_t		td_active;
64 	uint32_t		td_idx;
65 	uint32_t		flags;
66 	uint64_t		io_completed[NVME_TEST_MAX_THREADS];
67 };
68 
69 static void
70 nvme_ns_bio_test_cb(struct bio *bio)
71 {
72 	struct mtx *mtx;
73 
74 	mtx = mtx_pool_find(mtxpool_sleep, bio);
75 	mtx_lock(mtx);
76 	wakeup(bio);
77 	mtx_unlock(mtx);
78 }
79 
80 static void
81 nvme_ns_bio_test(void *arg)
82 {
83 	struct nvme_io_test_internal	*io_test = arg;
84 	struct cdevsw			*csw;
85 	struct mtx			*mtx;
86 	struct bio			*bio;
87 	struct cdev			*dev;
88 	void				*buf;
89 	struct timeval			t;
90 	uint64_t			io_completed = 0, offset;
91 	uint32_t			idx;
92 	int				ref;
93 
94 	buf = malloc(io_test->size, M_NVME, M_WAITOK);
95 	idx = atomic_fetchadd_int(&io_test->td_idx, 1);
96 	dev = io_test->ns->cdev;
97 
98 	offset = idx * 2048ULL * nvme_ns_get_sector_size(io_test->ns);
99 
100 	while (1) {
101 		bio = g_alloc_bio();
102 
103 		memset(bio, 0, sizeof(*bio));
104 		bio->bio_cmd = (io_test->opc == NVME_OPC_READ) ?
105 		    BIO_READ : BIO_WRITE;
106 		bio->bio_done = nvme_ns_bio_test_cb;
107 		bio->bio_dev = dev;
108 		bio->bio_offset = offset;
109 		bio->bio_data = buf;
110 		bio->bio_bcount = io_test->size;
111 
112 		if (io_test->flags & NVME_TEST_FLAG_REFTHREAD) {
113 			csw = dev_refthread(dev, &ref);
114 		} else
115 			csw = dev->si_devsw;
116 
117 		if (csw == NULL)
118 			panic("Unable to retrieve device switch");
119 		mtx = mtx_pool_find(mtxpool_sleep, bio);
120 		mtx_lock(mtx);
121 		(*csw->d_strategy)(bio);
122 		msleep(bio, mtx, PRIBIO, "biotestwait", 0);
123 		mtx_unlock(mtx);
124 
125 		if (io_test->flags & NVME_TEST_FLAG_REFTHREAD) {
126 			dev_relthread(dev, ref);
127 		}
128 
129 		if ((bio->bio_flags & BIO_ERROR) || (bio->bio_resid > 0))
130 			break;
131 
132 		g_destroy_bio(bio);
133 
134 		io_completed++;
135 
136 		getmicrouptime(&t);
137 		timevalsub(&t, &io_test->start);
138 
139 		if (t.tv_sec >= io_test->time)
140 			break;
141 
142 		offset += io_test->size;
143 		if ((offset + io_test->size) > nvme_ns_get_size(io_test->ns))
144 			offset = 0;
145 	}
146 
147 	io_test->io_completed[idx] = io_completed;
148 	wakeup_one(io_test);
149 
150 	free(buf, M_NVME);
151 
152 	atomic_subtract_int(&io_test->td_active, 1);
153 	mb();
154 
155 	kthread_exit();
156 }
157 
158 static void
159 nvme_ns_io_test_cb(void *arg, const struct nvme_completion *cpl)
160 {
161 	struct nvme_io_test_thread	*tth = arg;
162 	struct timeval			t;
163 
164 	tth->io_completed++;
165 
166 	if (nvme_completion_is_error(cpl)) {
167 		printf("%s: error occurred\n", __func__);
168 		wakeup_one(tth);
169 		return;
170 	}
171 
172 	getmicrouptime(&t);
173 	timevalsub(&t, &tth->start);
174 
175 	if (t.tv_sec >= tth->time) {
176 		wakeup_one(tth);
177 		return;
178 	}
179 
180 	switch (tth->opc) {
181 	case NVME_OPC_WRITE:
182 		nvme_ns_cmd_write(tth->ns, tth->buf, tth->idx * 2048,
183 		    tth->size/nvme_ns_get_sector_size(tth->ns),
184 		    nvme_ns_io_test_cb, tth);
185 		break;
186 	case NVME_OPC_READ:
187 		nvme_ns_cmd_read(tth->ns, tth->buf, tth->idx * 2048,
188 		    tth->size/nvme_ns_get_sector_size(tth->ns),
189 		    nvme_ns_io_test_cb, tth);
190 		break;
191 	default:
192 		break;
193 	}
194 }
195 
196 static void
197 nvme_ns_io_test(void *arg)
198 {
199 	struct nvme_io_test_internal	*io_test = arg;
200 	struct nvme_io_test_thread	*tth;
201 	struct nvme_completion		cpl;
202 	int				error;
203 
204 	tth = malloc(sizeof(*tth), M_NVME, M_WAITOK | M_ZERO);
205 	tth->ns = io_test->ns;
206 	tth->opc = io_test->opc;
207 	memcpy(&tth->start, &io_test->start, sizeof(tth->start));
208 	tth->buf = malloc(io_test->size, M_NVME, M_WAITOK);
209 	tth->size = io_test->size;
210 	tth->time = io_test->time;
211 	tth->idx = atomic_fetchadd_int(&io_test->td_idx, 1);
212 
213 	memset(&cpl, 0, sizeof(cpl));
214 
215 	nvme_ns_io_test_cb(tth, &cpl);
216 
217 	error = tsleep(tth, 0, "test_wait", tth->time*hz*2);
218 
219 	if (error)
220 		printf("%s: error = %d\n", __func__, error);
221 
222 	io_test->io_completed[tth->idx] = tth->io_completed;
223 	wakeup_one(io_test);
224 
225 	free(tth->buf, M_NVME);
226 	free(tth, M_NVME);
227 
228 	atomic_subtract_int(&io_test->td_active, 1);
229 	mb();
230 
231 	kthread_exit();
232 }
233 
234 void
235 nvme_ns_test(struct nvme_namespace *ns, u_long cmd, caddr_t arg)
236 {
237 	struct nvme_io_test		*io_test;
238 	struct nvme_io_test_internal	*io_test_internal;
239 	void				(*fn)(void *);
240 	int				i;
241 
242 	io_test = (struct nvme_io_test *)arg;
243 
244 	if ((io_test->opc != NVME_OPC_READ) &&
245 	    (io_test->opc != NVME_OPC_WRITE))
246 		return;
247 
248 	if (io_test->size % nvme_ns_get_sector_size(ns))
249 		return;
250 
251 	io_test_internal = malloc(sizeof(*io_test_internal), M_NVME,
252 	    M_WAITOK | M_ZERO);
253 	io_test_internal->opc = io_test->opc;
254 	io_test_internal->ns = ns;
255 	io_test_internal->td_active = io_test->num_threads;
256 	io_test_internal->time = io_test->time;
257 	io_test_internal->size = io_test->size;
258 	io_test_internal->flags = io_test->flags;
259 
260 	if (cmd == NVME_IO_TEST)
261 		fn = nvme_ns_io_test;
262 	else
263 		fn = nvme_ns_bio_test;
264 
265 	getmicrouptime(&io_test_internal->start);
266 
267 	for (i = 0; i < io_test->num_threads; i++)
268 		kthread_add(fn, io_test_internal,
269 		    NULL, NULL, 0, 0, "nvme_io_test[%d]", i);
270 
271 	tsleep(io_test_internal, 0, "nvme_test", io_test->time * 2 * hz);
272 
273 	while (io_test_internal->td_active > 0)
274 		DELAY(10);
275 
276 	memcpy(io_test->io_completed, io_test_internal->io_completed,
277 	    sizeof(io_test->io_completed));
278 
279 	free(io_test_internal, M_NVME);
280 }
281