1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (C) 2012-2014 Intel Corporation
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/param.h>
30 #include <sys/bus.h>
31 #include <sys/conf.h>
32 #include <sys/module.h>
33
34 #include <vm/uma.h>
35
36 #include "nvme_private.h"
37
38 struct nvme_consumer {
39 uint32_t id;
40 nvme_cons_ns_fn_t ns_fn;
41 nvme_cons_ctrlr_fn_t ctrlr_fn;
42 nvme_cons_async_fn_t async_fn;
43 nvme_cons_fail_fn_t fail_fn;
44 };
45
46 struct nvme_consumer nvme_consumer[NVME_MAX_CONSUMERS];
47 #define INVALID_CONSUMER_ID 0xFFFF
48
49 int32_t nvme_retry_count;
50
51 MALLOC_DEFINE(M_NVME, "nvme", "nvme(4) memory allocations");
52
53 static void
nvme_init(void)54 nvme_init(void)
55 {
56 uint32_t i;
57
58 for (i = 0; i < NVME_MAX_CONSUMERS; i++)
59 nvme_consumer[i].id = INVALID_CONSUMER_ID;
60 }
61
62 SYSINIT(nvme_register, SI_SUB_DRIVERS, SI_ORDER_SECOND, nvme_init, NULL);
63
64 static void
nvme_uninit(void)65 nvme_uninit(void)
66 {
67 }
68
69 SYSUNINIT(nvme_unregister, SI_SUB_DRIVERS, SI_ORDER_SECOND, nvme_uninit, NULL);
70
71 int
nvme_shutdown(device_t dev)72 nvme_shutdown(device_t dev)
73 {
74 struct nvme_controller *ctrlr;
75
76 ctrlr = DEVICE2SOFTC(dev);
77 nvme_ctrlr_shutdown(ctrlr);
78
79 return (0);
80 }
81
82 int
nvme_attach(device_t dev)83 nvme_attach(device_t dev)
84 {
85 struct nvme_controller *ctrlr = DEVICE2SOFTC(dev);
86 int status;
87
88 status = nvme_ctrlr_construct(ctrlr, dev);
89 if (status != 0) {
90 nvme_ctrlr_destruct(ctrlr, dev);
91 return (status);
92 }
93
94 ctrlr->config_hook.ich_func = nvme_ctrlr_start_config_hook;
95 ctrlr->config_hook.ich_arg = ctrlr;
96
97 if (config_intrhook_establish(&ctrlr->config_hook) != 0)
98 return (ENOMEM);
99
100 return (0);
101 }
102
103 int
nvme_detach(device_t dev)104 nvme_detach(device_t dev)
105 {
106 struct nvme_controller *ctrlr = DEVICE2SOFTC(dev);
107
108 config_intrhook_drain(&ctrlr->config_hook);
109
110 nvme_ctrlr_destruct(ctrlr, dev);
111 return (0);
112 }
113
114 static void
nvme_notify(struct nvme_consumer * cons,struct nvme_controller * ctrlr)115 nvme_notify(struct nvme_consumer *cons,
116 struct nvme_controller *ctrlr)
117 {
118 struct nvme_namespace *ns;
119 void *ctrlr_cookie;
120 int cmpset, ns_idx;
121
122 /*
123 * The consumer may register itself after the nvme devices
124 * have registered with the kernel, but before the
125 * driver has completed initialization. In that case,
126 * return here, and when initialization completes, the
127 * controller will make sure the consumer gets notified.
128 */
129 if (!ctrlr->is_initialized)
130 return;
131
132 cmpset = atomic_cmpset_32(&ctrlr->notification_sent, 0, 1);
133 if (cmpset == 0)
134 return;
135
136 if (cons->ctrlr_fn != NULL)
137 ctrlr_cookie = (*cons->ctrlr_fn)(ctrlr);
138 else
139 ctrlr_cookie = (void *)(uintptr_t)0xdeadc0dedeadc0de;
140 ctrlr->cons_cookie[cons->id] = ctrlr_cookie;
141
142 /* ctrlr_fn has failed. Nothing to notify here any more. */
143 if (ctrlr_cookie == NULL) {
144 (void)atomic_cmpset_32(&ctrlr->notification_sent, 1, 0);
145 return;
146 }
147
148 if (ctrlr->is_failed) {
149 ctrlr->cons_cookie[cons->id] = NULL;
150 if (cons->fail_fn != NULL)
151 (*cons->fail_fn)(ctrlr_cookie);
152 /*
153 * Do not notify consumers about the namespaces of a
154 * failed controller.
155 */
156 return;
157 }
158 for (ns_idx = 0; ns_idx < min(ctrlr->cdata.nn, NVME_MAX_NAMESPACES); ns_idx++) {
159 ns = &ctrlr->ns[ns_idx];
160 if (ns->data.nsze == 0)
161 continue;
162 if (cons->ns_fn != NULL)
163 ns->cons_cookie[cons->id] =
164 (*cons->ns_fn)(ns, ctrlr_cookie);
165 }
166 }
167
168 void
nvme_notify_new_controller(struct nvme_controller * ctrlr)169 nvme_notify_new_controller(struct nvme_controller *ctrlr)
170 {
171 int i;
172
173 for (i = 0; i < NVME_MAX_CONSUMERS; i++) {
174 if (nvme_consumer[i].id != INVALID_CONSUMER_ID) {
175 nvme_notify(&nvme_consumer[i], ctrlr);
176 }
177 }
178 }
179
180 static void
nvme_notify_new_consumer(struct nvme_consumer * cons)181 nvme_notify_new_consumer(struct nvme_consumer *cons)
182 {
183 device_t *devlist;
184 struct nvme_controller *ctrlr;
185 int dev_idx, devcount;
186
187 if (devclass_get_devices(devclass_find("nvme"), &devlist, &devcount))
188 return;
189
190 for (dev_idx = 0; dev_idx < devcount; dev_idx++) {
191 ctrlr = DEVICE2SOFTC(devlist[dev_idx]);
192 nvme_notify(cons, ctrlr);
193 }
194
195 free(devlist, M_TEMP);
196 }
197
198 void
nvme_notify_async_consumers(struct nvme_controller * ctrlr,const struct nvme_completion * async_cpl,uint32_t log_page_id,void * log_page_buffer,uint32_t log_page_size)199 nvme_notify_async_consumers(struct nvme_controller *ctrlr,
200 const struct nvme_completion *async_cpl,
201 uint32_t log_page_id, void *log_page_buffer,
202 uint32_t log_page_size)
203 {
204 struct nvme_consumer *cons;
205 void *ctrlr_cookie;
206 uint32_t i;
207
208 for (i = 0; i < NVME_MAX_CONSUMERS; i++) {
209 cons = &nvme_consumer[i];
210 if (cons->id != INVALID_CONSUMER_ID && cons->async_fn != NULL &&
211 (ctrlr_cookie = ctrlr->cons_cookie[i]) != NULL) {
212 (*cons->async_fn)(ctrlr_cookie, async_cpl,
213 log_page_id, log_page_buffer, log_page_size);
214 }
215 }
216 }
217
218 void
nvme_notify_fail_consumers(struct nvme_controller * ctrlr)219 nvme_notify_fail_consumers(struct nvme_controller *ctrlr)
220 {
221 struct nvme_consumer *cons;
222 void *ctrlr_cookie;
223 uint32_t i;
224
225 /*
226 * This controller failed during initialization (i.e. IDENTIFY
227 * command failed or timed out). Do not notify any nvme
228 * consumers of the failure here, since the consumer does not
229 * even know about the controller yet.
230 */
231 if (!ctrlr->is_initialized)
232 return;
233
234 for (i = 0; i < NVME_MAX_CONSUMERS; i++) {
235 cons = &nvme_consumer[i];
236 if (cons->id != INVALID_CONSUMER_ID &&
237 (ctrlr_cookie = ctrlr->cons_cookie[i]) != NULL) {
238 ctrlr->cons_cookie[i] = NULL;
239 if (cons->fail_fn != NULL)
240 cons->fail_fn(ctrlr_cookie);
241 }
242 }
243 }
244
245 void
nvme_notify_ns(struct nvme_controller * ctrlr,int nsid)246 nvme_notify_ns(struct nvme_controller *ctrlr, int nsid)
247 {
248 struct nvme_consumer *cons;
249 struct nvme_namespace *ns;
250 void *ctrlr_cookie;
251 uint32_t i;
252
253 KASSERT(nsid <= NVME_MAX_NAMESPACES,
254 ("%s: Namespace notification to nsid %d exceeds range\n",
255 device_get_nameunit(ctrlr->dev), nsid));
256
257 if (!ctrlr->is_initialized)
258 return;
259
260 ns = &ctrlr->ns[nsid - 1];
261 for (i = 0; i < NVME_MAX_CONSUMERS; i++) {
262 cons = &nvme_consumer[i];
263 if (cons->id != INVALID_CONSUMER_ID && cons->ns_fn != NULL &&
264 (ctrlr_cookie = ctrlr->cons_cookie[i]) != NULL)
265 ns->cons_cookie[i] = (*cons->ns_fn)(ns, ctrlr_cookie);
266 }
267 }
268
269 struct nvme_consumer *
nvme_register_consumer(nvme_cons_ns_fn_t ns_fn,nvme_cons_ctrlr_fn_t ctrlr_fn,nvme_cons_async_fn_t async_fn,nvme_cons_fail_fn_t fail_fn)270 nvme_register_consumer(nvme_cons_ns_fn_t ns_fn, nvme_cons_ctrlr_fn_t ctrlr_fn,
271 nvme_cons_async_fn_t async_fn,
272 nvme_cons_fail_fn_t fail_fn)
273 {
274 int i;
275
276 /*
277 * TODO: add locking around consumer registration.
278 */
279 for (i = 0; i < NVME_MAX_CONSUMERS; i++)
280 if (nvme_consumer[i].id == INVALID_CONSUMER_ID) {
281 nvme_consumer[i].id = i;
282 nvme_consumer[i].ns_fn = ns_fn;
283 nvme_consumer[i].ctrlr_fn = ctrlr_fn;
284 nvme_consumer[i].async_fn = async_fn;
285 nvme_consumer[i].fail_fn = fail_fn;
286
287 nvme_notify_new_consumer(&nvme_consumer[i]);
288 return (&nvme_consumer[i]);
289 }
290
291 printf("nvme(4): consumer not registered - no slots available\n");
292 return (NULL);
293 }
294
295 void
nvme_unregister_consumer(struct nvme_consumer * consumer)296 nvme_unregister_consumer(struct nvme_consumer *consumer)
297 {
298
299 consumer->id = INVALID_CONSUMER_ID;
300 }
301
302 void
nvme_completion_poll_cb(void * arg,const struct nvme_completion * cpl)303 nvme_completion_poll_cb(void *arg, const struct nvme_completion *cpl)
304 {
305 struct nvme_completion_poll_status *status = arg;
306
307 /*
308 * Copy status into the argument passed by the caller, so that
309 * the caller can check the status to determine if the
310 * the request passed or failed.
311 */
312 memcpy(&status->cpl, cpl, sizeof(*cpl));
313 atomic_store_rel_int(&status->done, 1);
314 }
315
316 static int
nvme_modevent(module_t mod __unused,int type __unused,void * argp __unused)317 nvme_modevent(module_t mod __unused, int type __unused, void *argp __unused)
318 {
319 return (0);
320 }
321
322 static moduledata_t nvme_mod = {
323 "nvme",
324 nvme_modevent,
325 0
326 };
327
328 DECLARE_MODULE(nvme, nvme_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
329 MODULE_VERSION(nvme, 1);
330 MODULE_DEPEND(nvme, cam, 1, 1, 1);
331