1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (C) 2012-2013 Intel Corporation 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/bio.h> 34 #include <sys/bus.h> 35 #include <sys/conf.h> 36 #include <sys/disk.h> 37 #include <sys/fcntl.h> 38 #include <sys/ioccom.h> 39 #include <sys/malloc.h> 40 #include <sys/module.h> 41 #include <sys/proc.h> 42 #include <sys/systm.h> 43 44 #include <dev/pci/pcivar.h> 45 46 #include <geom/geom.h> 47 48 #include "nvme_private.h" 49 50 static void nvme_bio_child_inbed(struct bio *parent, int bio_error); 51 static void nvme_bio_child_done(void *arg, 52 const struct nvme_completion *cpl); 53 static uint32_t nvme_get_num_segments(uint64_t addr, uint64_t size, 54 uint32_t alignment); 55 static void nvme_free_child_bios(int num_bios, 56 struct bio **child_bios); 57 static struct bio ** nvme_allocate_child_bios(int num_bios); 58 static struct bio ** nvme_construct_child_bios(struct bio *bp, 59 uint32_t alignment, 60 int *num_bios); 61 static int nvme_ns_split_bio(struct nvme_namespace *ns, 62 struct bio *bp, 63 uint32_t alignment); 64 65 static int 66 nvme_ns_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag, 67 struct thread *td) 68 { 69 struct nvme_namespace *ns; 70 struct nvme_controller *ctrlr; 71 struct nvme_pt_command *pt; 72 73 ns = cdev->si_drv1; 74 ctrlr = ns->ctrlr; 75 76 switch (cmd) { 77 case NVME_IO_TEST: 78 case NVME_BIO_TEST: 79 nvme_ns_test(ns, cmd, arg); 80 break; 81 case NVME_PASSTHROUGH_CMD: 82 pt = (struct nvme_pt_command *)arg; 83 return (nvme_ctrlr_passthrough_cmd(ctrlr, pt, ns->id, 84 1 /* is_user_buffer */, 0 /* is_admin_cmd */)); 85 case NVME_GET_NSID: 86 { 87 struct nvme_get_nsid *gnsid = (struct nvme_get_nsid *)arg; 88 strncpy(gnsid->cdev, device_get_nameunit(ctrlr->dev), 89 sizeof(gnsid->cdev)); 90 gnsid->nsid = ns->id; 91 break; 92 } 93 case DIOCGMEDIASIZE: 94 *(off_t *)arg = (off_t)nvme_ns_get_size(ns); 95 break; 96 case DIOCGSECTORSIZE: 97 *(u_int *)arg = nvme_ns_get_sector_size(ns); 98 break; 99 default: 100 return (ENOTTY); 101 } 102 103 return (0); 104 } 105 106 static int 107 nvme_ns_open(struct cdev *dev __unused, int flags, int fmt __unused, 108 struct thread *td) 109 { 110 int error = 0; 111 112 if (flags & FWRITE) 113 error = securelevel_gt(td->td_ucred, 0); 114 115 return (error); 116 } 117 118 static int 119 nvme_ns_close(struct cdev *dev __unused, int flags, int fmt __unused, 120 struct thread *td) 121 { 122 123 return (0); 124 } 125 126 static void 127 nvme_ns_strategy_done(void *arg, const struct nvme_completion *cpl) 128 { 129 struct bio *bp = arg; 130 131 /* 132 * TODO: add more extensive translation of NVMe status codes 133 * to different bio error codes (i.e. EIO, EINVAL, etc.) 134 */ 135 if (nvme_completion_is_error(cpl)) { 136 bp->bio_error = EIO; 137 bp->bio_flags |= BIO_ERROR; 138 bp->bio_resid = bp->bio_bcount; 139 } else 140 bp->bio_resid = 0; 141 142 biodone(bp); 143 } 144 145 static void 146 nvme_ns_strategy(struct bio *bp) 147 { 148 struct nvme_namespace *ns; 149 int err; 150 151 ns = bp->bio_dev->si_drv1; 152 err = nvme_ns_bio_process(ns, bp, nvme_ns_strategy_done); 153 154 if (err) { 155 bp->bio_error = err; 156 bp->bio_flags |= BIO_ERROR; 157 bp->bio_resid = bp->bio_bcount; 158 biodone(bp); 159 } 160 161 } 162 163 static struct cdevsw nvme_ns_cdevsw = { 164 .d_version = D_VERSION, 165 .d_flags = D_DISK, 166 .d_read = physread, 167 .d_write = physwrite, 168 .d_open = nvme_ns_open, 169 .d_close = nvme_ns_close, 170 .d_strategy = nvme_ns_strategy, 171 .d_ioctl = nvme_ns_ioctl 172 }; 173 174 uint32_t 175 nvme_ns_get_max_io_xfer_size(struct nvme_namespace *ns) 176 { 177 return ns->ctrlr->max_xfer_size; 178 } 179 180 uint32_t 181 nvme_ns_get_sector_size(struct nvme_namespace *ns) 182 { 183 uint8_t flbas_fmt, lbads; 184 185 flbas_fmt = (ns->data.flbas >> NVME_NS_DATA_FLBAS_FORMAT_SHIFT) & 186 NVME_NS_DATA_FLBAS_FORMAT_MASK; 187 lbads = (ns->data.lbaf[flbas_fmt] >> NVME_NS_DATA_LBAF_LBADS_SHIFT) & 188 NVME_NS_DATA_LBAF_LBADS_MASK; 189 190 return (1 << lbads); 191 } 192 193 uint64_t 194 nvme_ns_get_num_sectors(struct nvme_namespace *ns) 195 { 196 return (ns->data.nsze); 197 } 198 199 uint64_t 200 nvme_ns_get_size(struct nvme_namespace *ns) 201 { 202 return (nvme_ns_get_num_sectors(ns) * nvme_ns_get_sector_size(ns)); 203 } 204 205 uint32_t 206 nvme_ns_get_flags(struct nvme_namespace *ns) 207 { 208 return (ns->flags); 209 } 210 211 const char * 212 nvme_ns_get_serial_number(struct nvme_namespace *ns) 213 { 214 return ((const char *)ns->ctrlr->cdata.sn); 215 } 216 217 const char * 218 nvme_ns_get_model_number(struct nvme_namespace *ns) 219 { 220 return ((const char *)ns->ctrlr->cdata.mn); 221 } 222 223 const struct nvme_namespace_data * 224 nvme_ns_get_data(struct nvme_namespace *ns) 225 { 226 227 return (&ns->data); 228 } 229 230 uint32_t 231 nvme_ns_get_stripesize(struct nvme_namespace *ns) 232 { 233 234 if (((ns->data.nsfeat >> NVME_NS_DATA_NSFEAT_NPVALID_SHIFT) & 235 NVME_NS_DATA_NSFEAT_NPVALID_MASK) != 0 && ns->data.npwg != 0) { 236 return ((ns->data.npwg + 1) * nvme_ns_get_sector_size(ns)); 237 } 238 return (ns->boundary); 239 } 240 241 static void 242 nvme_ns_bio_done(void *arg, const struct nvme_completion *status) 243 { 244 struct bio *bp = arg; 245 nvme_cb_fn_t bp_cb_fn; 246 247 bp_cb_fn = bp->bio_driver1; 248 249 if (bp->bio_driver2) 250 free(bp->bio_driver2, M_NVME); 251 252 if (nvme_completion_is_error(status)) { 253 bp->bio_flags |= BIO_ERROR; 254 if (bp->bio_error == 0) 255 bp->bio_error = EIO; 256 } 257 258 if ((bp->bio_flags & BIO_ERROR) == 0) 259 bp->bio_resid = 0; 260 else 261 bp->bio_resid = bp->bio_bcount; 262 263 bp_cb_fn(bp, status); 264 } 265 266 static void 267 nvme_bio_child_inbed(struct bio *parent, int bio_error) 268 { 269 struct nvme_completion parent_cpl; 270 int children, inbed; 271 272 if (bio_error != 0) { 273 parent->bio_flags |= BIO_ERROR; 274 parent->bio_error = bio_error; 275 } 276 277 /* 278 * atomic_fetchadd will return value before adding 1, so we still 279 * must add 1 to get the updated inbed number. Save bio_children 280 * before incrementing to guard against race conditions when 281 * two children bios complete on different queues. 282 */ 283 children = atomic_load_acq_int(&parent->bio_children); 284 inbed = atomic_fetchadd_int(&parent->bio_inbed, 1) + 1; 285 if (inbed == children) { 286 bzero(&parent_cpl, sizeof(parent_cpl)); 287 if (parent->bio_flags & BIO_ERROR) { 288 parent_cpl.status &= ~(NVME_STATUS_SC_MASK << NVME_STATUS_SC_SHIFT); 289 parent_cpl.status |= (NVME_SC_DATA_TRANSFER_ERROR) << NVME_STATUS_SC_SHIFT; 290 } 291 nvme_ns_bio_done(parent, &parent_cpl); 292 } 293 } 294 295 static void 296 nvme_bio_child_done(void *arg, const struct nvme_completion *cpl) 297 { 298 struct bio *child = arg; 299 struct bio *parent; 300 int bio_error; 301 302 parent = child->bio_parent; 303 g_destroy_bio(child); 304 bio_error = nvme_completion_is_error(cpl) ? EIO : 0; 305 nvme_bio_child_inbed(parent, bio_error); 306 } 307 308 static uint32_t 309 nvme_get_num_segments(uint64_t addr, uint64_t size, uint32_t align) 310 { 311 uint32_t num_segs, offset, remainder; 312 313 if (align == 0) 314 return (1); 315 316 KASSERT((align & (align - 1)) == 0, ("alignment not power of 2\n")); 317 318 num_segs = size / align; 319 remainder = size & (align - 1); 320 offset = addr & (align - 1); 321 if (remainder > 0 || offset > 0) 322 num_segs += 1 + (remainder + offset - 1) / align; 323 return (num_segs); 324 } 325 326 static void 327 nvme_free_child_bios(int num_bios, struct bio **child_bios) 328 { 329 int i; 330 331 for (i = 0; i < num_bios; i++) { 332 if (child_bios[i] != NULL) 333 g_destroy_bio(child_bios[i]); 334 } 335 336 free(child_bios, M_NVME); 337 } 338 339 static struct bio ** 340 nvme_allocate_child_bios(int num_bios) 341 { 342 struct bio **child_bios; 343 int err = 0, i; 344 345 child_bios = malloc(num_bios * sizeof(struct bio *), M_NVME, M_NOWAIT); 346 if (child_bios == NULL) 347 return (NULL); 348 349 for (i = 0; i < num_bios; i++) { 350 child_bios[i] = g_new_bio(); 351 if (child_bios[i] == NULL) 352 err = ENOMEM; 353 } 354 355 if (err == ENOMEM) { 356 nvme_free_child_bios(num_bios, child_bios); 357 return (NULL); 358 } 359 360 return (child_bios); 361 } 362 363 static struct bio ** 364 nvme_construct_child_bios(struct bio *bp, uint32_t alignment, int *num_bios) 365 { 366 struct bio **child_bios; 367 struct bio *child; 368 uint64_t cur_offset; 369 caddr_t data; 370 uint32_t rem_bcount; 371 int i; 372 struct vm_page **ma; 373 uint32_t ma_offset; 374 375 *num_bios = nvme_get_num_segments(bp->bio_offset, bp->bio_bcount, 376 alignment); 377 child_bios = nvme_allocate_child_bios(*num_bios); 378 if (child_bios == NULL) 379 return (NULL); 380 381 bp->bio_children = *num_bios; 382 bp->bio_inbed = 0; 383 cur_offset = bp->bio_offset; 384 rem_bcount = bp->bio_bcount; 385 data = bp->bio_data; 386 ma_offset = bp->bio_ma_offset; 387 ma = bp->bio_ma; 388 389 for (i = 0; i < *num_bios; i++) { 390 child = child_bios[i]; 391 child->bio_parent = bp; 392 child->bio_cmd = bp->bio_cmd; 393 child->bio_offset = cur_offset; 394 child->bio_bcount = min(rem_bcount, 395 alignment - (cur_offset & (alignment - 1))); 396 child->bio_flags = bp->bio_flags; 397 if (bp->bio_flags & BIO_UNMAPPED) { 398 child->bio_ma_offset = ma_offset; 399 child->bio_ma = ma; 400 child->bio_ma_n = 401 nvme_get_num_segments(child->bio_ma_offset, 402 child->bio_bcount, PAGE_SIZE); 403 ma_offset = (ma_offset + child->bio_bcount) & 404 PAGE_MASK; 405 ma += child->bio_ma_n; 406 if (ma_offset != 0) 407 ma -= 1; 408 } else { 409 child->bio_data = data; 410 data += child->bio_bcount; 411 } 412 cur_offset += child->bio_bcount; 413 rem_bcount -= child->bio_bcount; 414 } 415 416 return (child_bios); 417 } 418 419 static int 420 nvme_ns_split_bio(struct nvme_namespace *ns, struct bio *bp, 421 uint32_t alignment) 422 { 423 struct bio *child; 424 struct bio **child_bios; 425 int err, i, num_bios; 426 427 child_bios = nvme_construct_child_bios(bp, alignment, &num_bios); 428 if (child_bios == NULL) 429 return (ENOMEM); 430 431 for (i = 0; i < num_bios; i++) { 432 child = child_bios[i]; 433 err = nvme_ns_bio_process(ns, child, nvme_bio_child_done); 434 if (err != 0) { 435 nvme_bio_child_inbed(bp, err); 436 g_destroy_bio(child); 437 } 438 } 439 440 free(child_bios, M_NVME); 441 return (0); 442 } 443 444 int 445 nvme_ns_bio_process(struct nvme_namespace *ns, struct bio *bp, 446 nvme_cb_fn_t cb_fn) 447 { 448 struct nvme_dsm_range *dsm_range; 449 uint32_t num_bios; 450 int err; 451 452 bp->bio_driver1 = cb_fn; 453 454 if (ns->boundary > 0 && 455 (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE)) { 456 num_bios = nvme_get_num_segments(bp->bio_offset, 457 bp->bio_bcount, ns->boundary); 458 if (num_bios > 1) 459 return (nvme_ns_split_bio(ns, bp, ns->boundary)); 460 } 461 462 switch (bp->bio_cmd) { 463 case BIO_READ: 464 err = nvme_ns_cmd_read_bio(ns, bp, nvme_ns_bio_done, bp); 465 break; 466 case BIO_WRITE: 467 err = nvme_ns_cmd_write_bio(ns, bp, nvme_ns_bio_done, bp); 468 break; 469 case BIO_FLUSH: 470 err = nvme_ns_cmd_flush(ns, nvme_ns_bio_done, bp); 471 break; 472 case BIO_DELETE: 473 dsm_range = 474 malloc(sizeof(struct nvme_dsm_range), M_NVME, 475 M_ZERO | M_WAITOK); 476 if (!dsm_range) { 477 err = ENOMEM; 478 break; 479 } 480 dsm_range->length = 481 htole32(bp->bio_bcount/nvme_ns_get_sector_size(ns)); 482 dsm_range->starting_lba = 483 htole64(bp->bio_offset/nvme_ns_get_sector_size(ns)); 484 bp->bio_driver2 = dsm_range; 485 err = nvme_ns_cmd_deallocate(ns, dsm_range, 1, 486 nvme_ns_bio_done, bp); 487 if (err != 0) 488 free(dsm_range, M_NVME); 489 break; 490 default: 491 err = EOPNOTSUPP; 492 break; 493 } 494 495 return (err); 496 } 497 498 int 499 nvme_ns_ioctl_process(struct nvme_namespace *ns, u_long cmd, caddr_t arg, 500 int flag, struct thread *td) 501 { 502 return (nvme_ns_ioctl(ns->cdev, cmd, arg, flag, td)); 503 } 504 505 int 506 nvme_ns_construct(struct nvme_namespace *ns, uint32_t id, 507 struct nvme_controller *ctrlr) 508 { 509 struct make_dev_args md_args; 510 struct nvme_completion_poll_status status; 511 int res; 512 int unit; 513 uint8_t flbas_fmt; 514 uint8_t vwc_present; 515 516 ns->ctrlr = ctrlr; 517 ns->id = id; 518 519 /* 520 * Namespaces are reconstructed after a controller reset, so check 521 * to make sure we only call mtx_init once on each mtx. 522 * 523 * TODO: Move this somewhere where it gets called at controller 524 * construction time, which is not invoked as part of each 525 * controller reset. 526 */ 527 if (!mtx_initialized(&ns->lock)) 528 mtx_init(&ns->lock, "nvme ns lock", NULL, MTX_DEF); 529 530 status.done = 0; 531 nvme_ctrlr_cmd_identify_namespace(ctrlr, id, &ns->data, 532 nvme_completion_poll_cb, &status); 533 nvme_completion_poll(&status); 534 if (nvme_completion_is_error(&status.cpl)) { 535 nvme_printf(ctrlr, "nvme_identify_namespace failed\n"); 536 return (ENXIO); 537 } 538 539 /* Convert data to host endian */ 540 nvme_namespace_data_swapbytes(&ns->data); 541 542 /* 543 * If the size of is zero, chances are this isn't a valid 544 * namespace (eg one that's not been configured yet). The 545 * standard says the entire id will be zeros, so this is a 546 * cheap way to test for that. 547 */ 548 if (ns->data.nsze == 0) 549 return (ENXIO); 550 551 flbas_fmt = (ns->data.flbas >> NVME_NS_DATA_FLBAS_FORMAT_SHIFT) & 552 NVME_NS_DATA_FLBAS_FORMAT_MASK; 553 /* 554 * Note: format is a 0-based value, so > is appropriate here, 555 * not >=. 556 */ 557 if (flbas_fmt > ns->data.nlbaf) { 558 printf("lba format %d exceeds number supported (%d)\n", 559 flbas_fmt, ns->data.nlbaf + 1); 560 return (ENXIO); 561 } 562 563 /* 564 * Older Intel devices advertise in vendor specific space an alignment 565 * that improves performance. If present use for the stripe size. NVMe 566 * 1.3 standardized this as NOIOB, and newer Intel drives use that. 567 */ 568 switch (pci_get_devid(ctrlr->dev)) { 569 case 0x09538086: /* Intel DC PC3500 */ 570 case 0x0a538086: /* Intel DC PC3520 */ 571 case 0x0a548086: /* Intel DC PC4500 */ 572 case 0x0a558086: /* Dell Intel P4600 */ 573 if (ctrlr->cdata.vs[3] != 0) 574 ns->boundary = 575 (1 << ctrlr->cdata.vs[3]) * ctrlr->min_page_size; 576 else 577 ns->boundary = 0; 578 break; 579 default: 580 ns->boundary = ns->data.noiob * nvme_ns_get_sector_size(ns); 581 break; 582 } 583 584 if (nvme_ctrlr_has_dataset_mgmt(&ctrlr->cdata)) 585 ns->flags |= NVME_NS_DEALLOCATE_SUPPORTED; 586 587 vwc_present = (ctrlr->cdata.vwc >> NVME_CTRLR_DATA_VWC_PRESENT_SHIFT) & 588 NVME_CTRLR_DATA_VWC_PRESENT_MASK; 589 if (vwc_present) 590 ns->flags |= NVME_NS_FLUSH_SUPPORTED; 591 592 /* 593 * cdev may have already been created, if we are reconstructing the 594 * namespace after a controller-level reset. 595 */ 596 if (ns->cdev != NULL) 597 return (0); 598 599 /* 600 * Namespace IDs start at 1, so we need to subtract 1 to create a 601 * correct unit number. 602 */ 603 unit = device_get_unit(ctrlr->dev) * NVME_MAX_NAMESPACES + ns->id - 1; 604 605 make_dev_args_init(&md_args); 606 md_args.mda_devsw = &nvme_ns_cdevsw; 607 md_args.mda_unit = unit; 608 md_args.mda_mode = 0600; 609 md_args.mda_si_drv1 = ns; 610 res = make_dev_s(&md_args, &ns->cdev, "nvme%dns%d", 611 device_get_unit(ctrlr->dev), ns->id); 612 if (res != 0) 613 return (ENXIO); 614 615 ns->cdev->si_flags |= SI_UNMAPPED; 616 617 return (0); 618 } 619 620 void nvme_ns_destruct(struct nvme_namespace *ns) 621 { 622 623 if (ns->cdev != NULL) 624 destroy_dev(ns->cdev); 625 } 626