1 /*- 2 * Copyright (C) 2012-2016 Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/bio.h> 32 #include <sys/kernel.h> 33 #include <sys/malloc.h> 34 #include <sys/module.h> 35 #include <sys/sysctl.h> 36 #include <sys/systm.h> 37 #include <sys/taskqueue.h> 38 39 #include <geom/geom.h> 40 #include <geom/geom_disk.h> 41 42 #include <dev/nvme/nvme.h> 43 44 #define NVD_STR "nvd" 45 46 struct nvd_disk; 47 48 static disk_ioctl_t nvd_ioctl; 49 static disk_strategy_t nvd_strategy; 50 static dumper_t nvd_dump; 51 52 static void nvd_done(void *arg, const struct nvme_completion *cpl); 53 54 static void *nvd_new_disk(struct nvme_namespace *ns, void *ctrlr); 55 static void destroy_geom_disk(struct nvd_disk *ndisk); 56 57 static void *nvd_new_controller(struct nvme_controller *ctrlr); 58 static void nvd_controller_fail(void *ctrlr); 59 60 static int nvd_load(void); 61 static void nvd_unload(void); 62 63 MALLOC_DEFINE(M_NVD, "nvd", "nvd(4) allocations"); 64 65 struct nvme_consumer *consumer_handle; 66 67 struct nvd_disk { 68 69 struct bio_queue_head bioq; 70 struct task bioqtask; 71 struct mtx bioqlock; 72 73 struct disk *disk; 74 struct taskqueue *tq; 75 struct nvme_namespace *ns; 76 77 uint32_t cur_depth; 78 uint32_t ordered_in_flight; 79 80 TAILQ_ENTRY(nvd_disk) global_tailq; 81 TAILQ_ENTRY(nvd_disk) ctrlr_tailq; 82 }; 83 84 struct nvd_controller { 85 86 TAILQ_ENTRY(nvd_controller) tailq; 87 TAILQ_HEAD(, nvd_disk) disk_head; 88 }; 89 90 static TAILQ_HEAD(, nvd_controller) ctrlr_head; 91 static TAILQ_HEAD(disk_list, nvd_disk) disk_head; 92 93 static SYSCTL_NODE(_hw, OID_AUTO, nvd, CTLFLAG_RD, 0, "nvd driver parameters"); 94 /* 95 * The NVMe specification does not define a maximum or optimal delete size, so 96 * technically max delete size is min(full size of the namespace, 2^32 - 1 97 * LBAs). A single delete for a multi-TB NVMe namespace though may take much 98 * longer to complete than the nvme(4) I/O timeout period. So choose a sensible 99 * default here that is still suitably large to minimize the number of overall 100 * delete operations. 101 */ 102 static uint64_t nvd_delete_max = (1024 * 1024 * 1024); /* 1GB */ 103 SYSCTL_UQUAD(_hw_nvd, OID_AUTO, delete_max, CTLFLAG_RDTUN, &nvd_delete_max, 0, 104 "nvd maximum BIO_DELETE size in bytes"); 105 106 static int nvd_modevent(module_t mod, int type, void *arg) 107 { 108 int error = 0; 109 110 switch (type) { 111 case MOD_LOAD: 112 error = nvd_load(); 113 break; 114 case MOD_UNLOAD: 115 nvd_unload(); 116 break; 117 default: 118 break; 119 } 120 121 return (error); 122 } 123 124 moduledata_t nvd_mod = { 125 NVD_STR, 126 (modeventhand_t)nvd_modevent, 127 0 128 }; 129 130 DECLARE_MODULE(nvd, nvd_mod, SI_SUB_DRIVERS, SI_ORDER_ANY); 131 MODULE_VERSION(nvd, 1); 132 MODULE_DEPEND(nvd, nvme, 1, 1, 1); 133 134 static int 135 nvd_load() 136 { 137 if (!nvme_use_nvd) 138 return 0; 139 140 TAILQ_INIT(&ctrlr_head); 141 TAILQ_INIT(&disk_head); 142 143 consumer_handle = nvme_register_consumer(nvd_new_disk, 144 nvd_new_controller, NULL, nvd_controller_fail); 145 146 return (consumer_handle != NULL ? 0 : -1); 147 } 148 149 static void 150 nvd_unload() 151 { 152 struct nvd_controller *ctrlr; 153 struct nvd_disk *disk; 154 155 if (!nvme_use_nvd) 156 return; 157 158 while (!TAILQ_EMPTY(&ctrlr_head)) { 159 ctrlr = TAILQ_FIRST(&ctrlr_head); 160 TAILQ_REMOVE(&ctrlr_head, ctrlr, tailq); 161 free(ctrlr, M_NVD); 162 } 163 164 while (!TAILQ_EMPTY(&disk_head)) { 165 disk = TAILQ_FIRST(&disk_head); 166 TAILQ_REMOVE(&disk_head, disk, global_tailq); 167 destroy_geom_disk(disk); 168 free(disk, M_NVD); 169 } 170 171 nvme_unregister_consumer(consumer_handle); 172 } 173 174 static int 175 nvd_bio_submit(struct nvd_disk *ndisk, struct bio *bp) 176 { 177 int err; 178 179 bp->bio_driver1 = NULL; 180 atomic_add_int(&ndisk->cur_depth, 1); 181 err = nvme_ns_bio_process(ndisk->ns, bp, nvd_done); 182 if (err) { 183 atomic_add_int(&ndisk->cur_depth, -1); 184 if (__predict_false(bp->bio_flags & BIO_ORDERED)) 185 atomic_add_int(&ndisk->ordered_in_flight, -1); 186 bp->bio_error = err; 187 bp->bio_flags |= BIO_ERROR; 188 bp->bio_resid = bp->bio_bcount; 189 biodone(bp); 190 return (-1); 191 } 192 193 return (0); 194 } 195 196 static void 197 nvd_strategy(struct bio *bp) 198 { 199 struct nvd_disk *ndisk; 200 201 ndisk = (struct nvd_disk *)bp->bio_disk->d_drv1; 202 203 if (__predict_false(bp->bio_flags & BIO_ORDERED)) 204 atomic_add_int(&ndisk->ordered_in_flight, 1); 205 206 if (__predict_true(ndisk->ordered_in_flight == 0)) { 207 nvd_bio_submit(ndisk, bp); 208 return; 209 } 210 211 /* 212 * There are ordered bios in flight, so we need to submit 213 * bios through the task queue to enforce ordering. 214 */ 215 mtx_lock(&ndisk->bioqlock); 216 bioq_insert_tail(&ndisk->bioq, bp); 217 mtx_unlock(&ndisk->bioqlock); 218 taskqueue_enqueue(ndisk->tq, &ndisk->bioqtask); 219 } 220 221 static int 222 nvd_ioctl(struct disk *ndisk, u_long cmd, void *data, int fflag, 223 struct thread *td) 224 { 225 int ret = 0; 226 227 switch (cmd) { 228 default: 229 ret = EIO; 230 } 231 232 return (ret); 233 } 234 235 static int 236 nvd_dump(void *arg, void *virt, vm_offset_t phys, off_t offset, size_t len) 237 { 238 struct nvd_disk *ndisk; 239 struct disk *dp; 240 241 dp = arg; 242 ndisk = dp->d_drv1; 243 244 return (nvme_ns_dump(ndisk->ns, virt, offset, len)); 245 } 246 247 static void 248 nvd_done(void *arg, const struct nvme_completion *cpl) 249 { 250 struct bio *bp; 251 struct nvd_disk *ndisk; 252 253 bp = (struct bio *)arg; 254 255 ndisk = bp->bio_disk->d_drv1; 256 257 atomic_add_int(&ndisk->cur_depth, -1); 258 if (__predict_false(bp->bio_flags & BIO_ORDERED)) 259 atomic_add_int(&ndisk->ordered_in_flight, -1); 260 261 biodone(bp); 262 } 263 264 static void 265 nvd_bioq_process(void *arg, int pending) 266 { 267 struct nvd_disk *ndisk = arg; 268 struct bio *bp; 269 270 for (;;) { 271 mtx_lock(&ndisk->bioqlock); 272 bp = bioq_takefirst(&ndisk->bioq); 273 mtx_unlock(&ndisk->bioqlock); 274 if (bp == NULL) 275 break; 276 277 if (nvd_bio_submit(ndisk, bp) != 0) { 278 continue; 279 } 280 281 #ifdef BIO_ORDERED 282 /* 283 * BIO_ORDERED flag dictates that the bio with BIO_ORDERED 284 * flag set must be completed before proceeding with 285 * additional bios. 286 */ 287 if (bp->bio_flags & BIO_ORDERED) { 288 while (ndisk->cur_depth > 0) { 289 pause("nvd flush", 1); 290 } 291 } 292 #endif 293 } 294 } 295 296 static void * 297 nvd_new_controller(struct nvme_controller *ctrlr) 298 { 299 struct nvd_controller *nvd_ctrlr; 300 301 nvd_ctrlr = malloc(sizeof(struct nvd_controller), M_NVD, 302 M_ZERO | M_WAITOK); 303 304 TAILQ_INIT(&nvd_ctrlr->disk_head); 305 TAILQ_INSERT_TAIL(&ctrlr_head, nvd_ctrlr, tailq); 306 307 return (nvd_ctrlr); 308 } 309 310 static void * 311 nvd_new_disk(struct nvme_namespace *ns, void *ctrlr_arg) 312 { 313 uint8_t descr[NVME_MODEL_NUMBER_LENGTH+1]; 314 struct nvd_disk *ndisk; 315 struct disk *disk; 316 struct nvd_controller *ctrlr = ctrlr_arg; 317 318 ndisk = malloc(sizeof(struct nvd_disk), M_NVD, M_ZERO | M_WAITOK); 319 320 disk = disk_alloc(); 321 disk->d_strategy = nvd_strategy; 322 disk->d_ioctl = nvd_ioctl; 323 disk->d_dump = nvd_dump; 324 disk->d_name = NVD_STR; 325 disk->d_drv1 = ndisk; 326 327 disk->d_maxsize = nvme_ns_get_max_io_xfer_size(ns); 328 disk->d_sectorsize = nvme_ns_get_sector_size(ns); 329 disk->d_mediasize = (off_t)nvme_ns_get_size(ns); 330 disk->d_delmaxsize = (off_t)nvme_ns_get_size(ns); 331 if (disk->d_delmaxsize > nvd_delete_max) 332 disk->d_delmaxsize = nvd_delete_max; 333 disk->d_stripesize = nvme_ns_get_stripesize(ns); 334 335 if (TAILQ_EMPTY(&disk_head)) 336 disk->d_unit = 0; 337 else 338 disk->d_unit = 339 TAILQ_LAST(&disk_head, disk_list)->disk->d_unit + 1; 340 341 disk->d_flags = DISKFLAG_DIRECT_COMPLETION; 342 343 if (nvme_ns_get_flags(ns) & NVME_NS_DEALLOCATE_SUPPORTED) 344 disk->d_flags |= DISKFLAG_CANDELETE; 345 346 if (nvme_ns_get_flags(ns) & NVME_NS_FLUSH_SUPPORTED) 347 disk->d_flags |= DISKFLAG_CANFLUSHCACHE; 348 349 /* ifdef used here to ease porting to stable branches at a later point. */ 350 #ifdef DISKFLAG_UNMAPPED_BIO 351 disk->d_flags |= DISKFLAG_UNMAPPED_BIO; 352 #endif 353 354 /* 355 * d_ident and d_descr are both far bigger than the length of either 356 * the serial or model number strings. 357 */ 358 nvme_strvis(disk->d_ident, nvme_ns_get_serial_number(ns), 359 sizeof(disk->d_ident), NVME_SERIAL_NUMBER_LENGTH); 360 nvme_strvis(descr, nvme_ns_get_model_number(ns), sizeof(descr), 361 NVME_MODEL_NUMBER_LENGTH); 362 strlcpy(disk->d_descr, descr, sizeof(descr)); 363 364 disk->d_rotation_rate = DISK_RR_NON_ROTATING; 365 366 ndisk->ns = ns; 367 ndisk->disk = disk; 368 ndisk->cur_depth = 0; 369 ndisk->ordered_in_flight = 0; 370 371 mtx_init(&ndisk->bioqlock, "NVD bioq lock", NULL, MTX_DEF); 372 bioq_init(&ndisk->bioq); 373 374 TASK_INIT(&ndisk->bioqtask, 0, nvd_bioq_process, ndisk); 375 ndisk->tq = taskqueue_create("nvd_taskq", M_WAITOK, 376 taskqueue_thread_enqueue, &ndisk->tq); 377 taskqueue_start_threads(&ndisk->tq, 1, PI_DISK, "nvd taskq"); 378 379 TAILQ_INSERT_TAIL(&disk_head, ndisk, global_tailq); 380 TAILQ_INSERT_TAIL(&ctrlr->disk_head, ndisk, ctrlr_tailq); 381 382 disk_create(disk, DISK_VERSION); 383 384 printf(NVD_STR"%u: <%s> NVMe namespace\n", disk->d_unit, descr); 385 printf(NVD_STR"%u: %juMB (%ju %u byte sectors)\n", disk->d_unit, 386 (uintmax_t)disk->d_mediasize / (1024*1024), 387 (uintmax_t)disk->d_mediasize / disk->d_sectorsize, 388 disk->d_sectorsize); 389 390 return (NULL); 391 } 392 393 static void 394 destroy_geom_disk(struct nvd_disk *ndisk) 395 { 396 struct bio *bp; 397 struct disk *disk; 398 uint32_t unit; 399 int cnt = 0; 400 401 disk = ndisk->disk; 402 unit = disk->d_unit; 403 taskqueue_free(ndisk->tq); 404 405 disk_destroy(ndisk->disk); 406 407 mtx_lock(&ndisk->bioqlock); 408 for (;;) { 409 bp = bioq_takefirst(&ndisk->bioq); 410 if (bp == NULL) 411 break; 412 bp->bio_error = EIO; 413 bp->bio_flags |= BIO_ERROR; 414 bp->bio_resid = bp->bio_bcount; 415 cnt++; 416 biodone(bp); 417 } 418 419 printf(NVD_STR"%u: lost device - %d outstanding\n", unit, cnt); 420 printf(NVD_STR"%u: removing device entry\n", unit); 421 422 mtx_unlock(&ndisk->bioqlock); 423 424 mtx_destroy(&ndisk->bioqlock); 425 } 426 427 static void 428 nvd_controller_fail(void *ctrlr_arg) 429 { 430 struct nvd_controller *ctrlr = ctrlr_arg; 431 struct nvd_disk *disk; 432 433 while (!TAILQ_EMPTY(&ctrlr->disk_head)) { 434 disk = TAILQ_FIRST(&ctrlr->disk_head); 435 TAILQ_REMOVE(&disk_head, disk, global_tailq); 436 TAILQ_REMOVE(&ctrlr->disk_head, disk, ctrlr_tailq); 437 destroy_geom_disk(disk); 438 free(disk, M_NVD); 439 } 440 441 TAILQ_REMOVE(&ctrlr_head, ctrlr, tailq); 442 free(ctrlr, M_NVD); 443 } 444 445