1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (C) 2012-2016 Intel Corporation
5 * All rights reserved.
6 * Copyright (C) 2018-2020 Alexander Motin <mav@FreeBSD.org>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/param.h>
31 #include <sys/bio.h>
32 #include <sys/bus.h>
33 #include <sys/devicestat.h>
34 #include <sys/kernel.h>
35 #include <sys/malloc.h>
36 #include <sys/module.h>
37 #include <sys/queue.h>
38 #include <sys/sysctl.h>
39 #include <sys/systm.h>
40 #include <sys/taskqueue.h>
41 #include <machine/atomic.h>
42
43 #include <geom/geom.h>
44 #include <geom/geom_disk.h>
45
46 #include <dev/nvme/nvme.h>
47 #include <dev/nvme/nvme_private.h>
48
49 #include <dev/pci/pcivar.h>
50
51 #include "nvme_if.h"
52
53 #define NVD_STR "nvd"
54
55 struct nvd_disk;
56 struct nvd_controller;
57
58 static disk_ioctl_t nvd_ioctl;
59 static disk_strategy_t nvd_strategy;
60 static dumper_t nvd_dump;
61 static disk_getattr_t nvd_getattr;
62
63 static void nvd_done(void *arg, const struct nvme_completion *cpl);
64 static void nvd_gone(struct nvd_disk *ndisk);
65
66 static int nvd_load(void);
67 static void nvd_unload(void);
68
69 MALLOC_DEFINE(M_NVD, "nvd", "nvd(4) allocations");
70
71 struct nvme_consumer *consumer_handle;
72
73 struct nvd_disk {
74 struct nvd_controller *ctrlr;
75
76 struct bio_queue_head bioq;
77 struct task bioqtask;
78 struct mtx bioqlock;
79
80 struct disk *disk;
81 struct taskqueue *tq;
82 struct nvme_namespace *ns;
83
84 uint32_t cur_depth;
85 #define NVD_ODEPTH (1 << 30)
86 uint32_t ordered_in_flight;
87 u_int unit;
88
89 TAILQ_ENTRY(nvd_disk) global_tailq;
90 TAILQ_ENTRY(nvd_disk) ctrlr_tailq;
91 };
92
93 struct nvd_controller {
94 struct nvme_controller *ctrlr;
95 TAILQ_ENTRY(nvd_controller) tailq;
96 TAILQ_HEAD(, nvd_disk) disk_head;
97 };
98
99 static struct mtx nvd_lock;
100 static TAILQ_HEAD(, nvd_controller) ctrlr_head;
101 static TAILQ_HEAD(disk_list, nvd_disk) disk_head;
102
103 static SYSCTL_NODE(_hw, OID_AUTO, nvd, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
104 "nvd driver parameters");
105 /*
106 * The NVMe specification does not define a maximum or optimal delete size, so
107 * technically max delete size is min(full size of the namespace, 2^32 - 1
108 * LBAs). A single delete for a multi-TB NVMe namespace though may take much
109 * longer to complete than the nvme(4) I/O timeout period. So choose a sensible
110 * default here that is still suitably large to minimize the number of overall
111 * delete operations.
112 */
113 static uint64_t nvd_delete_max = (1024 * 1024 * 1024); /* 1GB */
114 SYSCTL_UQUAD(_hw_nvd, OID_AUTO, delete_max, CTLFLAG_RDTUN, &nvd_delete_max, 0,
115 "nvd maximum BIO_DELETE size in bytes");
116
nvd_modevent(module_t mod,int type,void * arg)117 static int nvd_modevent(module_t mod, int type, void *arg)
118 {
119 int error = 0;
120
121 switch (type) {
122 case MOD_LOAD:
123 error = nvd_load();
124 break;
125 case MOD_UNLOAD:
126 nvd_unload();
127 break;
128 default:
129 break;
130 }
131
132 return (error);
133 }
134
135 moduledata_t nvd_mod = {
136 NVD_STR,
137 (modeventhand_t)nvd_modevent,
138 0
139 };
140
141 DECLARE_MODULE(nvd, nvd_mod, SI_SUB_DRIVERS, SI_ORDER_ANY);
142 MODULE_VERSION(nvd, 1);
143 MODULE_DEPEND(nvd, nvme, 1, 1, 1);
144
145 static int
nvd_load(void)146 nvd_load(void)
147 {
148 if (!nvme_use_nvd)
149 return (0);
150
151 mtx_init(&nvd_lock, "nvd_lock", NULL, MTX_DEF);
152 TAILQ_INIT(&ctrlr_head);
153 TAILQ_INIT(&disk_head);
154 return (0);
155 }
156
157 static void
nvd_unload(void)158 nvd_unload(void)
159 {
160 struct nvd_controller *ctrlr;
161 struct nvd_disk *ndisk;
162
163 if (!nvme_use_nvd)
164 return;
165
166 mtx_lock(&nvd_lock);
167 while ((ctrlr = TAILQ_FIRST(&ctrlr_head)) != NULL) {
168 TAILQ_REMOVE(&ctrlr_head, ctrlr, tailq);
169 TAILQ_FOREACH(ndisk, &ctrlr->disk_head, ctrlr_tailq)
170 nvd_gone(ndisk);
171 while (!TAILQ_EMPTY(&ctrlr->disk_head))
172 msleep(&ctrlr->disk_head, &nvd_lock, 0, "nvd_unload",0);
173 free(ctrlr, M_NVD);
174 }
175 mtx_unlock(&nvd_lock);
176
177 mtx_destroy(&nvd_lock);
178 }
179
180 static void
nvd_bio_submit(struct nvd_disk * ndisk,struct bio * bp)181 nvd_bio_submit(struct nvd_disk *ndisk, struct bio *bp)
182 {
183 int err;
184
185 bp->bio_driver1 = NULL;
186 if (__predict_false(bp->bio_flags & BIO_ORDERED))
187 atomic_add_int(&ndisk->cur_depth, NVD_ODEPTH);
188 else
189 atomic_add_int(&ndisk->cur_depth, 1);
190 err = nvme_ns_bio_process(ndisk->ns, bp, nvd_done);
191 if (err) {
192 if (__predict_false(bp->bio_flags & BIO_ORDERED)) {
193 atomic_add_int(&ndisk->cur_depth, -NVD_ODEPTH);
194 atomic_add_int(&ndisk->ordered_in_flight, -1);
195 wakeup(&ndisk->cur_depth);
196 } else {
197 if (atomic_fetchadd_int(&ndisk->cur_depth, -1) == 1 &&
198 __predict_false(ndisk->ordered_in_flight != 0))
199 wakeup(&ndisk->cur_depth);
200 }
201 bp->bio_error = err;
202 bp->bio_flags |= BIO_ERROR;
203 bp->bio_resid = bp->bio_bcount;
204 biodone(bp);
205 }
206 }
207
208 static void
nvd_strategy(struct bio * bp)209 nvd_strategy(struct bio *bp)
210 {
211 struct nvd_disk *ndisk = (struct nvd_disk *)bp->bio_disk->d_drv1;
212
213 /*
214 * bio with BIO_ORDERED flag must be executed after all previous
215 * bios in the queue, and before any successive bios.
216 */
217 if (__predict_false(bp->bio_flags & BIO_ORDERED)) {
218 if (atomic_fetchadd_int(&ndisk->ordered_in_flight, 1) == 0 &&
219 ndisk->cur_depth == 0 && bioq_first(&ndisk->bioq) == NULL) {
220 nvd_bio_submit(ndisk, bp);
221 return;
222 }
223 } else if (__predict_true(ndisk->ordered_in_flight == 0)) {
224 nvd_bio_submit(ndisk, bp);
225 return;
226 }
227
228 /*
229 * There are ordered bios in flight, so we need to submit
230 * bios through the task queue to enforce ordering.
231 */
232 mtx_lock(&ndisk->bioqlock);
233 bioq_insert_tail(&ndisk->bioq, bp);
234 mtx_unlock(&ndisk->bioqlock);
235 taskqueue_enqueue(ndisk->tq, &ndisk->bioqtask);
236 }
237
238 static void
nvd_gone(struct nvd_disk * ndisk)239 nvd_gone(struct nvd_disk *ndisk)
240 {
241 struct bio *bp;
242
243 printf(NVD_STR"%u: detached\n", ndisk->unit);
244 mtx_lock(&ndisk->bioqlock);
245 disk_gone(ndisk->disk);
246 while ((bp = bioq_takefirst(&ndisk->bioq)) != NULL) {
247 if (__predict_false(bp->bio_flags & BIO_ORDERED))
248 atomic_add_int(&ndisk->ordered_in_flight, -1);
249 bp->bio_error = ENXIO;
250 bp->bio_flags |= BIO_ERROR;
251 bp->bio_resid = bp->bio_bcount;
252 biodone(bp);
253 }
254 mtx_unlock(&ndisk->bioqlock);
255 }
256
257 static void
nvd_gonecb(struct disk * dp)258 nvd_gonecb(struct disk *dp)
259 {
260 struct nvd_disk *ndisk = (struct nvd_disk *)dp->d_drv1;
261
262 disk_destroy(ndisk->disk);
263 mtx_lock(&nvd_lock);
264 TAILQ_REMOVE(&disk_head, ndisk, global_tailq);
265 TAILQ_REMOVE(&ndisk->ctrlr->disk_head, ndisk, ctrlr_tailq);
266 if (TAILQ_EMPTY(&ndisk->ctrlr->disk_head))
267 wakeup(&ndisk->ctrlr->disk_head);
268 mtx_unlock(&nvd_lock);
269 taskqueue_free(ndisk->tq);
270 mtx_destroy(&ndisk->bioqlock);
271 free(ndisk, M_NVD);
272 }
273
274 static int
nvd_ioctl(struct disk * dp,u_long cmd,void * data,int fflag,struct thread * td)275 nvd_ioctl(struct disk *dp, u_long cmd, void *data, int fflag,
276 struct thread *td)
277 {
278 struct nvd_disk *ndisk = dp->d_drv1;
279
280 return (nvme_ns_ioctl_process(ndisk->ns, cmd, data, fflag, td));
281 }
282
283 static int
nvd_dump(void * arg,void * virt,off_t offset,size_t len)284 nvd_dump(void *arg, void *virt, off_t offset, size_t len)
285 {
286 struct disk *dp = arg;
287 struct nvd_disk *ndisk = dp->d_drv1;
288
289 return (nvme_ns_dump(ndisk->ns, virt, offset, len));
290 }
291
292 static int
nvd_getattr(struct bio * bp)293 nvd_getattr(struct bio *bp)
294 {
295 struct nvd_disk *ndisk = (struct nvd_disk *)bp->bio_disk->d_drv1;
296 const struct nvme_namespace_data *nsdata;
297 u_int i;
298
299 if (!strcmp("GEOM::lunid", bp->bio_attribute)) {
300 nsdata = nvme_ns_get_data(ndisk->ns);
301
302 /* Try to return NGUID as lunid. */
303 for (i = 0; i < sizeof(nsdata->nguid); i++) {
304 if (nsdata->nguid[i] != 0)
305 break;
306 }
307 if (i < sizeof(nsdata->nguid)) {
308 if (bp->bio_length < sizeof(nsdata->nguid) * 2 + 1)
309 return (EFAULT);
310 for (i = 0; i < sizeof(nsdata->nguid); i++) {
311 sprintf(&bp->bio_data[i * 2], "%02x",
312 nsdata->nguid[i]);
313 }
314 bp->bio_completed = bp->bio_length;
315 return (0);
316 }
317
318 /* Try to return EUI64 as lunid. */
319 for (i = 0; i < sizeof(nsdata->eui64); i++) {
320 if (nsdata->eui64[i] != 0)
321 break;
322 }
323 if (i < sizeof(nsdata->eui64)) {
324 if (bp->bio_length < sizeof(nsdata->eui64) * 2 + 1)
325 return (EFAULT);
326 for (i = 0; i < sizeof(nsdata->eui64); i++) {
327 sprintf(&bp->bio_data[i * 2], "%02x",
328 nsdata->eui64[i]);
329 }
330 bp->bio_completed = bp->bio_length;
331 return (0);
332 }
333 }
334 return (-1);
335 }
336
337 static void
nvd_done(void * arg,const struct nvme_completion * cpl)338 nvd_done(void *arg, const struct nvme_completion *cpl)
339 {
340 struct bio *bp = (struct bio *)arg;
341 struct nvd_disk *ndisk = bp->bio_disk->d_drv1;
342
343 if (__predict_false(bp->bio_flags & BIO_ORDERED)) {
344 atomic_add_int(&ndisk->cur_depth, -NVD_ODEPTH);
345 atomic_add_int(&ndisk->ordered_in_flight, -1);
346 wakeup(&ndisk->cur_depth);
347 } else {
348 if (atomic_fetchadd_int(&ndisk->cur_depth, -1) == 1 &&
349 __predict_false(ndisk->ordered_in_flight != 0))
350 wakeup(&ndisk->cur_depth);
351 }
352
353 biodone(bp);
354 }
355
356 static void
nvd_bioq_process(void * arg,int pending)357 nvd_bioq_process(void *arg, int pending)
358 {
359 struct nvd_disk *ndisk = arg;
360 struct bio *bp;
361
362 for (;;) {
363 mtx_lock(&ndisk->bioqlock);
364 bp = bioq_takefirst(&ndisk->bioq);
365 mtx_unlock(&ndisk->bioqlock);
366 if (bp == NULL)
367 break;
368
369 if (__predict_false(bp->bio_flags & BIO_ORDERED)) {
370 /*
371 * bio with BIO_ORDERED flag set must be executed
372 * after all previous bios.
373 */
374 while (ndisk->cur_depth > 0)
375 tsleep(&ndisk->cur_depth, 0, "nvdorb", 1);
376 } else {
377 /*
378 * bio with BIO_ORDERED flag set must be completed
379 * before proceeding with additional bios.
380 */
381 while (ndisk->cur_depth >= NVD_ODEPTH)
382 tsleep(&ndisk->cur_depth, 0, "nvdora", 1);
383 }
384
385 nvd_bio_submit(ndisk, bp);
386 }
387 }
388
389 static int
nvdc_controller_failed(device_t dev)390 nvdc_controller_failed(device_t dev)
391 {
392 struct nvd_controller *nvd_ctrlr = device_get_softc(dev);
393 struct nvd_disk *ndisk;
394
395 mtx_lock(&nvd_lock);
396 TAILQ_REMOVE(&ctrlr_head, nvd_ctrlr, tailq);
397 TAILQ_FOREACH(ndisk, &nvd_ctrlr->disk_head, ctrlr_tailq)
398 nvd_gone(ndisk);
399 while (!TAILQ_EMPTY(&nvd_ctrlr->disk_head))
400 msleep(&nvd_ctrlr->disk_head, &nvd_lock, 0, "nvd_fail", 0);
401 mtx_unlock(&nvd_lock);
402 return (0);
403 }
404
405 static int
nvdc_probe(device_t dev)406 nvdc_probe(device_t dev)
407 {
408 if (!nvme_use_nvd)
409 return (ENXIO);
410
411 device_set_desc(dev, "nvme storage namespace");
412 return (BUS_PROBE_DEFAULT);
413 }
414
415 static int
nvdc_attach(device_t dev)416 nvdc_attach(device_t dev)
417 {
418 struct nvd_controller *nvd_ctrlr = device_get_softc(dev);
419 struct nvme_controller *ctrlr = device_get_ivars(dev);
420
421 nvd_ctrlr->ctrlr = ctrlr;
422 TAILQ_INIT(&nvd_ctrlr->disk_head);
423 mtx_lock(&nvd_lock);
424 TAILQ_INSERT_TAIL(&ctrlr_head, nvd_ctrlr, tailq);
425 mtx_unlock(&nvd_lock);
426
427 return (0);
428 }
429
430 static int
nvdc_detach(device_t dev)431 nvdc_detach(device_t dev)
432 {
433 return (nvdc_controller_failed(dev));
434 }
435
436 static struct nvd_disk *
nvd_nsid_to_disk(struct nvd_controller * nvd_ctrlr,uint32_t nsid)437 nvd_nsid_to_disk(struct nvd_controller *nvd_ctrlr, uint32_t nsid)
438 {
439 struct nvd_disk *ndisk;
440
441 mtx_lock(&nvd_lock);
442 TAILQ_FOREACH(ndisk, &nvd_ctrlr->disk_head, ctrlr_tailq) {
443 if (ndisk->ns->id != nsid)
444 continue;
445 break;
446 }
447 mtx_unlock(&nvd_lock);
448 return ndisk;
449 }
450
451 static struct nvd_disk *
nvd_ns_to_disk(struct nvd_controller * nvd_ctrlr,struct nvme_namespace * ns)452 nvd_ns_to_disk(struct nvd_controller *nvd_ctrlr, struct nvme_namespace *ns)
453 {
454 struct nvd_disk *ndisk;
455
456 mtx_lock(&nvd_lock);
457 TAILQ_FOREACH(ndisk, &nvd_ctrlr->disk_head, ctrlr_tailq) {
458 if (ndisk->ns != ns)
459 continue;
460 break;
461 }
462 mtx_unlock(&nvd_lock);
463 return ndisk;
464 }
465
466 static int
nvdc_ns_added(device_t dev,struct nvme_namespace * ns)467 nvdc_ns_added(device_t dev, struct nvme_namespace *ns)
468 {
469 struct nvd_controller *nvd_ctrlr = device_get_softc(dev);
470 struct nvd_disk *ndisk;
471 uint8_t descr[NVME_MODEL_NUMBER_LENGTH+1];
472 struct nvd_disk *tnd;
473 struct disk *disk;
474 device_t pdev = nvd_ctrlr->ctrlr->dev;
475 int unit;
476
477 ndisk = malloc(sizeof(struct nvd_disk), M_NVD, M_ZERO | M_WAITOK);
478 ndisk->ctrlr = nvd_ctrlr;
479 ndisk->ns = ns;
480 ndisk->cur_depth = 0;
481 ndisk->ordered_in_flight = 0;
482 mtx_init(&ndisk->bioqlock, "nvd bioq lock", NULL, MTX_DEF);
483 bioq_init(&ndisk->bioq);
484 TASK_INIT(&ndisk->bioqtask, 0, nvd_bioq_process, ndisk);
485
486 mtx_lock(&nvd_lock);
487 unit = 0;
488 TAILQ_FOREACH(tnd, &disk_head, global_tailq) {
489 if (tnd->unit > unit)
490 break;
491 unit = tnd->unit + 1;
492 }
493 ndisk->unit = unit;
494 if (tnd != NULL)
495 TAILQ_INSERT_BEFORE(tnd, ndisk, global_tailq);
496 else
497 TAILQ_INSERT_TAIL(&disk_head, ndisk, global_tailq);
498 TAILQ_INSERT_TAIL(&nvd_ctrlr->disk_head, ndisk, ctrlr_tailq);
499 mtx_unlock(&nvd_lock);
500
501 ndisk->tq = taskqueue_create("nvd_taskq", M_WAITOK,
502 taskqueue_thread_enqueue, &ndisk->tq);
503 taskqueue_start_threads(&ndisk->tq, 1, PI_DISK, "nvd taskq");
504
505 disk = ndisk->disk = disk_alloc();
506 disk->d_strategy = nvd_strategy;
507 disk->d_ioctl = nvd_ioctl;
508 disk->d_dump = nvd_dump;
509 disk->d_getattr = nvd_getattr;
510 disk->d_gone = nvd_gonecb;
511 disk->d_name = NVD_STR;
512 disk->d_unit = ndisk->unit;
513 disk->d_drv1 = ndisk;
514
515 disk->d_sectorsize = nvme_ns_get_sector_size(ns);
516 disk->d_mediasize = (off_t)nvme_ns_get_size(ns);
517 disk->d_maxsize = nvme_ns_get_max_io_xfer_size(ns);
518 disk->d_delmaxsize = (off_t)nvme_ns_get_size(ns);
519 if (disk->d_delmaxsize > nvd_delete_max)
520 disk->d_delmaxsize = nvd_delete_max;
521 disk->d_stripesize = nvme_ns_get_stripesize(ns);
522 disk->d_flags = DISKFLAG_UNMAPPED_BIO | DISKFLAG_DIRECT_COMPLETION;
523 if (nvme_ns_get_flags(ns) & NVME_NS_DEALLOCATE_SUPPORTED)
524 disk->d_flags |= DISKFLAG_CANDELETE;
525 if (nvme_ns_get_flags(ns) & NVME_NS_FLUSH_SUPPORTED)
526 disk->d_flags |= DISKFLAG_CANFLUSHCACHE;
527 disk->d_devstat = devstat_new_entry(disk->d_name, disk->d_unit,
528 disk->d_sectorsize, DEVSTAT_ALL_SUPPORTED,
529 DEVSTAT_TYPE_DIRECT | DEVSTAT_TYPE_IF_NVME,
530 DEVSTAT_PRIORITY_DISK);
531
532 /*
533 * d_ident and d_descr are both far bigger than the length of either
534 * the serial or model number strings.
535 */
536 nvme_strvis(disk->d_ident, nvme_ns_get_serial_number(ns),
537 sizeof(disk->d_ident), NVME_SERIAL_NUMBER_LENGTH);
538 nvme_strvis(descr, nvme_ns_get_model_number(ns), sizeof(descr),
539 NVME_MODEL_NUMBER_LENGTH);
540 strlcpy(disk->d_descr, descr, sizeof(descr));
541
542 /*
543 * For devices that are reported as children of the AHCI controller,
544 * which has no access to the config space for this controller, report
545 * the AHCI controller's data.
546 */
547 if (nvd_ctrlr->ctrlr->quirks & QUIRK_AHCI)
548 pdev = device_get_parent(pdev);
549 disk->d_hba_vendor = pci_get_vendor(pdev);
550 disk->d_hba_device = pci_get_device(pdev);
551 disk->d_hba_subvendor = pci_get_subvendor(pdev);
552 disk->d_hba_subdevice = pci_get_subdevice(pdev);
553 disk->d_rotation_rate = DISK_RR_NON_ROTATING;
554 strlcpy(disk->d_attachment, device_get_nameunit(pdev),
555 sizeof(disk->d_attachment));
556
557 disk_create(disk, DISK_VERSION);
558
559 printf(NVD_STR"%u: <%s> NVMe namespace\n", disk->d_unit, descr);
560 printf(NVD_STR"%u: %juMB (%ju %u byte sectors)\n", disk->d_unit,
561 (uintmax_t)disk->d_mediasize / (1024*1024),
562 (uintmax_t)disk->d_mediasize / disk->d_sectorsize,
563 disk->d_sectorsize);
564
565 return (0);
566 }
567
568 static int
nvdc_ns_removed(device_t dev,struct nvme_namespace * ns)569 nvdc_ns_removed(device_t dev, struct nvme_namespace *ns)
570 {
571 struct nvd_controller *nvd_ctrlr = device_get_softc(dev);
572 struct nvd_disk *ndisk = nvd_ns_to_disk(nvd_ctrlr, ns);
573
574 if (ndisk == NULL)
575 panic("nvdc: no namespace found for ns %p", ns);
576 nvd_gone(ndisk);
577 /* gonecb removes it from the list -- no need to wait */
578 return (0);
579 }
580
581 static int
nvdc_ns_changed(device_t dev,uint32_t nsid)582 nvdc_ns_changed(device_t dev, uint32_t nsid)
583 {
584 struct nvd_controller *nvd_ctrlr = device_get_softc(dev);
585 struct nvd_disk *ndisk = nvd_nsid_to_disk(nvd_ctrlr, nsid);
586 struct disk *disk;
587 struct nvme_namespace *ns;
588
589 if (ndisk == NULL)
590 panic("nvdc: no namespace found for %d", nsid);
591 disk = ndisk->disk;
592 ns = ndisk->ns;
593
594 disk->d_sectorsize = nvme_ns_get_sector_size(ns);
595 disk->d_mediasize = (off_t)nvme_ns_get_size(ns);
596 disk->d_maxsize = nvme_ns_get_max_io_xfer_size(ns);
597 disk->d_delmaxsize = (off_t)nvme_ns_get_size(ns);
598 if (disk->d_delmaxsize > nvd_delete_max)
599 disk->d_delmaxsize = nvd_delete_max;
600
601 disk_resize(disk, M_NOWAIT);
602
603 printf(NVD_STR"%u: NVMe namespace resized\n", ndisk->unit);
604 printf(NVD_STR"%u: %juMB (%ju %u byte sectors)\n", disk->d_unit,
605 (uintmax_t)disk->d_mediasize / (1024*1024),
606 (uintmax_t)disk->d_mediasize / disk->d_sectorsize,
607 disk->d_sectorsize);
608 return (0);
609 }
610
611 static int
nvdc_handle_aen(device_t dev,const struct nvme_completion * cpl,uint32_t pg_nr,void * page,uint32_t page_len)612 nvdc_handle_aen(device_t dev, const struct nvme_completion *cpl,
613 uint32_t pg_nr, void *page, uint32_t page_len)
614 {
615 /* Do nothing */
616 return (0);
617 }
618
619 static device_method_t nvdc_methods[] = {
620 /* Device interface */
621 DEVMETHOD(device_probe, nvdc_probe),
622 DEVMETHOD(device_attach, nvdc_attach),
623 DEVMETHOD(device_detach, nvdc_detach),
624 /* Nvme controller messages */
625 DEVMETHOD(nvme_ns_added, nvdc_ns_added),
626 DEVMETHOD(nvme_ns_removed, nvdc_ns_removed),
627 DEVMETHOD(nvme_ns_changed, nvdc_ns_changed),
628 DEVMETHOD(nvme_controller_failed, nvdc_controller_failed),
629 DEVMETHOD(nvme_handle_aen, nvdc_handle_aen),
630 { 0, 0 }
631 };
632
633 static driver_t nvdc_driver = {
634 "nvdc",
635 nvdc_methods,
636 sizeof(struct nvd_controller),
637 };
638
639 DRIVER_MODULE(nvdc, nvme, nvdc_driver, NULL, NULL);
640