1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2024 Chelsio Communications, Inc. 5 * Written by: John Baldwin <jhb@FreeBSD.org> 6 */ 7 8 #include <sys/nv.h> 9 #include <sys/sysctl.h> 10 #include <errno.h> 11 #include <fcntl.h> 12 #include <stdio.h> 13 #include <stdlib.h> 14 #include <string.h> 15 #include <unistd.h> 16 #include <uuid.h> 17 18 #include "libnvmf.h" 19 #include "internal.h" 20 21 static void 22 nvmf_init_sqe(void *sqe, uint8_t opcode) 23 { 24 struct nvme_command *cmd = sqe; 25 26 memset(cmd, 0, sizeof(*cmd)); 27 cmd->opc = opcode; 28 } 29 30 static void 31 nvmf_init_fabrics_sqe(void *sqe, uint8_t fctype) 32 { 33 struct nvmf_capsule_cmd *cmd = sqe; 34 35 nvmf_init_sqe(sqe, NVME_OPC_FABRICS_COMMANDS); 36 cmd->fctype = fctype; 37 } 38 39 struct nvmf_qpair * 40 nvmf_connect(struct nvmf_association *na, 41 const struct nvmf_qpair_params *params, uint16_t qid, u_int queue_size, 42 const uint8_t hostid[16], uint16_t cntlid, const char *subnqn, 43 const char *hostnqn, uint32_t kato) 44 { 45 struct nvmf_fabric_connect_cmd cmd; 46 struct nvmf_fabric_connect_data data; 47 const struct nvmf_fabric_connect_rsp *rsp; 48 struct nvmf_qpair *qp; 49 struct nvmf_capsule *cc, *rc; 50 int error; 51 uint16_t sqhd, status; 52 53 qp = NULL; 54 cc = NULL; 55 rc = NULL; 56 na_clear_error(na); 57 if (na->na_controller) { 58 na_error(na, "Cannot connect on a controller"); 59 goto error; 60 } 61 62 if (params->admin != (qid == 0)) { 63 na_error(na, "Admin queue must use Queue ID 0"); 64 goto error; 65 } 66 67 if (qid == 0) { 68 if (queue_size < NVME_MIN_ADMIN_ENTRIES || 69 queue_size > NVME_MAX_ADMIN_ENTRIES) { 70 na_error(na, "Invalid queue size %u", queue_size); 71 goto error; 72 } 73 } else { 74 if (queue_size < NVME_MIN_IO_ENTRIES || 75 queue_size > NVME_MAX_IO_ENTRIES) { 76 na_error(na, "Invalid queue size %u", queue_size); 77 goto error; 78 } 79 80 /* KATO is only for Admin queues. */ 81 if (kato != 0) { 82 na_error(na, "Cannot set KATO on I/O queues"); 83 goto error; 84 } 85 } 86 87 qp = nvmf_allocate_qpair(na, params); 88 if (qp == NULL) 89 goto error; 90 91 nvmf_init_fabrics_sqe(&cmd, NVMF_FABRIC_COMMAND_CONNECT); 92 cmd.recfmt = 0; 93 cmd.qid = htole16(qid); 94 95 /* N.B. sqsize is 0's based. */ 96 cmd.sqsize = htole16(queue_size - 1); 97 if (!na->na_params.sq_flow_control) 98 cmd.cattr |= NVMF_CONNECT_ATTR_DISABLE_SQ_FC; 99 cmd.kato = htole32(kato); 100 101 cc = nvmf_allocate_command(qp, &cmd); 102 if (cc == NULL) { 103 na_error(na, "Failed to allocate command capsule: %s", 104 strerror(errno)); 105 goto error; 106 } 107 108 memset(&data, 0, sizeof(data)); 109 memcpy(data.hostid, hostid, sizeof(data.hostid)); 110 data.cntlid = htole16(cntlid); 111 strlcpy(data.subnqn, subnqn, sizeof(data.subnqn)); 112 strlcpy(data.hostnqn, hostnqn, sizeof(data.hostnqn)); 113 114 error = nvmf_capsule_append_data(cc, &data, sizeof(data), true); 115 if (error != 0) { 116 na_error(na, "Failed to append data to CONNECT capsule: %s", 117 strerror(error)); 118 goto error; 119 } 120 121 error = nvmf_transmit_capsule(cc); 122 if (error != 0) { 123 na_error(na, "Failed to transmit CONNECT capsule: %s", 124 strerror(errno)); 125 goto error; 126 } 127 128 error = nvmf_receive_capsule(qp, &rc); 129 if (error != 0) { 130 na_error(na, "Failed to receive CONNECT response: %s", 131 strerror(error)); 132 goto error; 133 } 134 135 rsp = (const struct nvmf_fabric_connect_rsp *)&rc->nc_cqe; 136 status = le16toh(rc->nc_cqe.status); 137 if (status != 0) { 138 if (NVME_STATUS_GET_SC(status) == NVMF_FABRIC_SC_INVALID_PARAM) 139 na_error(na, 140 "CONNECT invalid parameter IATTR: %#x IPO: %#x", 141 rsp->status_code_specific.invalid.iattr, 142 rsp->status_code_specific.invalid.ipo); 143 else 144 na_error(na, "CONNECT failed, status %#x", status); 145 goto error; 146 } 147 148 if (rc->nc_cqe.cid != cmd.cid) { 149 na_error(na, "Mismatched CID in CONNECT response"); 150 goto error; 151 } 152 153 if (!rc->nc_sqhd_valid) { 154 na_error(na, "CONNECT response without valid SQHD"); 155 goto error; 156 } 157 158 sqhd = le16toh(rsp->sqhd); 159 if (sqhd == 0xffff) { 160 if (na->na_params.sq_flow_control) { 161 na_error(na, "Controller disabled SQ flow control"); 162 goto error; 163 } 164 qp->nq_flow_control = false; 165 } else { 166 qp->nq_flow_control = true; 167 qp->nq_sqhd = sqhd; 168 qp->nq_sqtail = sqhd; 169 } 170 171 if (rsp->status_code_specific.success.authreq) { 172 na_error(na, "CONNECT response requests authentication\n"); 173 goto error; 174 } 175 176 qp->nq_qsize = queue_size; 177 qp->nq_cntlid = le16toh(rsp->status_code_specific.success.cntlid); 178 qp->nq_kato = kato; 179 /* XXX: Save qid in qp? */ 180 return (qp); 181 182 error: 183 if (rc != NULL) 184 nvmf_free_capsule(rc); 185 if (cc != NULL) 186 nvmf_free_capsule(cc); 187 if (qp != NULL) 188 nvmf_free_qpair(qp); 189 return (NULL); 190 } 191 192 uint16_t 193 nvmf_cntlid(struct nvmf_qpair *qp) 194 { 195 return (qp->nq_cntlid); 196 } 197 198 int 199 nvmf_host_transmit_command(struct nvmf_capsule *nc) 200 { 201 struct nvmf_qpair *qp = nc->nc_qpair; 202 uint16_t new_sqtail; 203 int error; 204 205 /* Fail if the queue is full. */ 206 new_sqtail = (qp->nq_sqtail + 1) % qp->nq_qsize; 207 if (new_sqtail == qp->nq_sqhd) 208 return (EBUSY); 209 210 nc->nc_sqe.cid = htole16(qp->nq_cid); 211 212 /* 4.2 Skip CID of 0xFFFF. */ 213 qp->nq_cid++; 214 if (qp->nq_cid == 0xFFFF) 215 qp->nq_cid = 0; 216 217 error = nvmf_transmit_capsule(nc); 218 if (error != 0) 219 return (error); 220 221 qp->nq_sqtail = new_sqtail; 222 return (0); 223 } 224 225 /* Receive a single capsule and update SQ FC accounting. */ 226 static int 227 nvmf_host_receive_capsule(struct nvmf_qpair *qp, struct nvmf_capsule **ncp) 228 { 229 struct nvmf_capsule *nc; 230 int error; 231 232 /* If the SQ is empty, there is no response to wait for. */ 233 if (qp->nq_sqhd == qp->nq_sqtail) 234 return (EWOULDBLOCK); 235 236 error = nvmf_receive_capsule(qp, &nc); 237 if (error != 0) 238 return (error); 239 240 if (qp->nq_flow_control) { 241 if (nc->nc_sqhd_valid) 242 qp->nq_sqhd = le16toh(nc->nc_cqe.sqhd); 243 } else { 244 /* 245 * If SQ FC is disabled, just advance the head for 246 * each response capsule received so that we track the 247 * number of outstanding commands. 248 */ 249 qp->nq_sqhd = (qp->nq_sqhd + 1) % qp->nq_qsize; 250 } 251 *ncp = nc; 252 return (0); 253 } 254 255 int 256 nvmf_host_receive_response(struct nvmf_qpair *qp, struct nvmf_capsule **ncp) 257 { 258 struct nvmf_capsule *nc; 259 260 /* Return the oldest previously received response. */ 261 if (!TAILQ_EMPTY(&qp->nq_rx_capsules)) { 262 nc = TAILQ_FIRST(&qp->nq_rx_capsules); 263 TAILQ_REMOVE(&qp->nq_rx_capsules, nc, nc_link); 264 *ncp = nc; 265 return (0); 266 } 267 268 return (nvmf_host_receive_capsule(qp, ncp)); 269 } 270 271 int 272 nvmf_host_wait_for_response(struct nvmf_capsule *cc, 273 struct nvmf_capsule **rcp) 274 { 275 struct nvmf_qpair *qp = cc->nc_qpair; 276 struct nvmf_capsule *rc; 277 int error; 278 279 /* Check if a response was already received. */ 280 TAILQ_FOREACH(rc, &qp->nq_rx_capsules, nc_link) { 281 if (rc->nc_cqe.cid == cc->nc_sqe.cid) { 282 TAILQ_REMOVE(&qp->nq_rx_capsules, rc, nc_link); 283 *rcp = rc; 284 return (0); 285 } 286 } 287 288 /* Wait for a response. */ 289 for (;;) { 290 error = nvmf_host_receive_capsule(qp, &rc); 291 if (error != 0) 292 return (error); 293 294 if (rc->nc_cqe.cid != cc->nc_sqe.cid) { 295 TAILQ_INSERT_TAIL(&qp->nq_rx_capsules, rc, nc_link); 296 continue; 297 } 298 299 *rcp = rc; 300 return (0); 301 } 302 } 303 304 struct nvmf_capsule * 305 nvmf_keepalive(struct nvmf_qpair *qp) 306 { 307 struct nvme_command cmd; 308 309 if (!qp->nq_admin) { 310 errno = EINVAL; 311 return (NULL); 312 } 313 314 nvmf_init_sqe(&cmd, NVME_OPC_KEEP_ALIVE); 315 316 return (nvmf_allocate_command(qp, &cmd)); 317 } 318 319 static struct nvmf_capsule * 320 nvmf_get_property(struct nvmf_qpair *qp, uint32_t offset, uint8_t size) 321 { 322 struct nvmf_fabric_prop_get_cmd cmd; 323 324 nvmf_init_fabrics_sqe(&cmd, NVMF_FABRIC_COMMAND_PROPERTY_GET); 325 switch (size) { 326 case 4: 327 cmd.attrib.size = NVMF_PROP_SIZE_4; 328 break; 329 case 8: 330 cmd.attrib.size = NVMF_PROP_SIZE_8; 331 break; 332 default: 333 errno = EINVAL; 334 return (NULL); 335 } 336 cmd.ofst = htole32(offset); 337 338 return (nvmf_allocate_command(qp, &cmd)); 339 } 340 341 int 342 nvmf_read_property(struct nvmf_qpair *qp, uint32_t offset, uint8_t size, 343 uint64_t *value) 344 { 345 struct nvmf_capsule *cc, *rc; 346 const struct nvmf_fabric_prop_get_rsp *rsp; 347 uint16_t status; 348 int error; 349 350 if (!qp->nq_admin) 351 return (EINVAL); 352 353 cc = nvmf_get_property(qp, offset, size); 354 if (cc == NULL) 355 return (errno); 356 357 error = nvmf_host_transmit_command(cc); 358 if (error != 0) { 359 nvmf_free_capsule(cc); 360 return (error); 361 } 362 363 error = nvmf_host_wait_for_response(cc, &rc); 364 nvmf_free_capsule(cc); 365 if (error != 0) 366 return (error); 367 368 rsp = (const struct nvmf_fabric_prop_get_rsp *)&rc->nc_cqe; 369 status = le16toh(rc->nc_cqe.status); 370 if (status != 0) { 371 printf("NVMF: PROPERTY_GET failed, status %#x\n", status); 372 nvmf_free_capsule(rc); 373 return (EIO); 374 } 375 376 if (size == 8) 377 *value = le64toh(rsp->value.u64); 378 else 379 *value = le32toh(rsp->value.u32.low); 380 nvmf_free_capsule(rc); 381 return (0); 382 } 383 384 static struct nvmf_capsule * 385 nvmf_set_property(struct nvmf_qpair *qp, uint32_t offset, uint8_t size, 386 uint64_t value) 387 { 388 struct nvmf_fabric_prop_set_cmd cmd; 389 390 nvmf_init_fabrics_sqe(&cmd, NVMF_FABRIC_COMMAND_PROPERTY_SET); 391 switch (size) { 392 case 4: 393 cmd.attrib.size = NVMF_PROP_SIZE_4; 394 cmd.value.u32.low = htole32(value); 395 break; 396 case 8: 397 cmd.attrib.size = NVMF_PROP_SIZE_8; 398 cmd.value.u64 = htole64(value); 399 break; 400 default: 401 errno = EINVAL; 402 return (NULL); 403 } 404 cmd.ofst = htole32(offset); 405 406 return (nvmf_allocate_command(qp, &cmd)); 407 } 408 409 int 410 nvmf_write_property(struct nvmf_qpair *qp, uint32_t offset, uint8_t size, 411 uint64_t value) 412 { 413 struct nvmf_capsule *cc, *rc; 414 uint16_t status; 415 int error; 416 417 if (!qp->nq_admin) 418 return (EINVAL); 419 420 cc = nvmf_set_property(qp, offset, size, value); 421 if (cc == NULL) 422 return (errno); 423 424 error = nvmf_host_transmit_command(cc); 425 if (error != 0) { 426 nvmf_free_capsule(cc); 427 return (error); 428 } 429 430 error = nvmf_host_wait_for_response(cc, &rc); 431 nvmf_free_capsule(cc); 432 if (error != 0) 433 return (error); 434 435 status = le16toh(rc->nc_cqe.status); 436 if (status != 0) { 437 printf("NVMF: PROPERTY_SET failed, status %#x\n", status); 438 nvmf_free_capsule(rc); 439 return (EIO); 440 } 441 442 nvmf_free_capsule(rc); 443 return (0); 444 } 445 446 int 447 nvmf_hostid_from_hostuuid(uint8_t hostid[16]) 448 { 449 char hostuuid_str[64]; 450 uuid_t hostuuid; 451 size_t len; 452 uint32_t status; 453 454 len = sizeof(hostuuid_str); 455 if (sysctlbyname("kern.hostuuid", hostuuid_str, &len, NULL, 0) != 0) 456 return (errno); 457 458 uuid_from_string(hostuuid_str, &hostuuid, &status); 459 switch (status) { 460 case uuid_s_ok: 461 break; 462 case uuid_s_no_memory: 463 return (ENOMEM); 464 default: 465 return (EINVAL); 466 } 467 468 uuid_enc_le(hostid, &hostuuid); 469 return (0); 470 } 471 472 int 473 nvmf_nqn_from_hostuuid(char nqn[NVMF_NQN_MAX_LEN]) 474 { 475 char hostuuid_str[64]; 476 size_t len; 477 478 len = sizeof(hostuuid_str); 479 if (sysctlbyname("kern.hostuuid", hostuuid_str, &len, NULL, 0) != 0) 480 return (errno); 481 482 strlcpy(nqn, NVMF_NQN_UUID_PRE, NVMF_NQN_MAX_LEN); 483 strlcat(nqn, hostuuid_str, NVMF_NQN_MAX_LEN); 484 return (0); 485 } 486 487 int 488 nvmf_host_identify_controller(struct nvmf_qpair *qp, 489 struct nvme_controller_data *cdata) 490 { 491 struct nvme_command cmd; 492 struct nvmf_capsule *cc, *rc; 493 int error; 494 uint16_t status; 495 496 if (!qp->nq_admin) 497 return (EINVAL); 498 499 nvmf_init_sqe(&cmd, NVME_OPC_IDENTIFY); 500 501 /* 5.15.1 Use CNS of 0x01 for controller data. */ 502 cmd.cdw10 = htole32(1); 503 504 cc = nvmf_allocate_command(qp, &cmd); 505 if (cc == NULL) 506 return (errno); 507 508 error = nvmf_capsule_append_data(cc, cdata, sizeof(*cdata), false); 509 if (error != 0) { 510 nvmf_free_capsule(cc); 511 return (error); 512 } 513 514 error = nvmf_host_transmit_command(cc); 515 if (error != 0) { 516 nvmf_free_capsule(cc); 517 return (error); 518 } 519 520 error = nvmf_host_wait_for_response(cc, &rc); 521 nvmf_free_capsule(cc); 522 if (error != 0) 523 return (error); 524 525 status = le16toh(rc->nc_cqe.status); 526 if (status != 0) { 527 printf("NVMF: IDENTIFY failed, status %#x\n", status); 528 nvmf_free_capsule(rc); 529 return (EIO); 530 } 531 532 nvmf_free_capsule(rc); 533 return (0); 534 } 535 536 int 537 nvmf_host_identify_namespace(struct nvmf_qpair *qp, uint32_t nsid, 538 struct nvme_namespace_data *nsdata) 539 { 540 struct nvme_command cmd; 541 struct nvmf_capsule *cc, *rc; 542 int error; 543 uint16_t status; 544 545 if (!qp->nq_admin) 546 return (EINVAL); 547 548 nvmf_init_sqe(&cmd, NVME_OPC_IDENTIFY); 549 550 /* 5.15.1 Use CNS of 0x00 for namespace data. */ 551 cmd.cdw10 = htole32(0); 552 cmd.nsid = htole32(nsid); 553 554 cc = nvmf_allocate_command(qp, &cmd); 555 if (cc == NULL) 556 return (errno); 557 558 error = nvmf_capsule_append_data(cc, nsdata, sizeof(*nsdata), false); 559 if (error != 0) { 560 nvmf_free_capsule(cc); 561 return (error); 562 } 563 564 error = nvmf_host_transmit_command(cc); 565 if (error != 0) { 566 nvmf_free_capsule(cc); 567 return (error); 568 } 569 570 error = nvmf_host_wait_for_response(cc, &rc); 571 nvmf_free_capsule(cc); 572 if (error != 0) 573 return (error); 574 575 status = le16toh(rc->nc_cqe.status); 576 if (status != 0) { 577 printf("NVMF: IDENTIFY failed, status %#x\n", status); 578 nvmf_free_capsule(rc); 579 return (EIO); 580 } 581 582 nvmf_free_capsule(rc); 583 return (0); 584 } 585 586 static int 587 nvmf_get_discovery_log_page(struct nvmf_qpair *qp, uint64_t offset, void *buf, 588 size_t len) 589 { 590 struct nvme_command cmd; 591 struct nvmf_capsule *cc, *rc; 592 size_t numd; 593 int error; 594 uint16_t status; 595 596 if (len % 4 != 0 || len == 0 || offset % 4 != 0) 597 return (EINVAL); 598 599 numd = (len / 4) - 1; 600 nvmf_init_sqe(&cmd, NVME_OPC_GET_LOG_PAGE); 601 cmd.cdw10 = htole32(numd << 16 | NVME_LOG_DISCOVERY); 602 cmd.cdw11 = htole32(numd >> 16); 603 cmd.cdw12 = htole32(offset); 604 cmd.cdw13 = htole32(offset >> 32); 605 606 cc = nvmf_allocate_command(qp, &cmd); 607 if (cc == NULL) 608 return (errno); 609 610 error = nvmf_capsule_append_data(cc, buf, len, false); 611 if (error != 0) { 612 nvmf_free_capsule(cc); 613 return (error); 614 } 615 616 error = nvmf_host_transmit_command(cc); 617 if (error != 0) { 618 nvmf_free_capsule(cc); 619 return (error); 620 } 621 622 error = nvmf_host_wait_for_response(cc, &rc); 623 nvmf_free_capsule(cc); 624 if (error != 0) 625 return (error); 626 627 status = le16toh(rc->nc_cqe.status); 628 if (NVMEV(NVME_STATUS_SC, status) == 629 NVMF_FABRIC_SC_LOG_RESTART_DISCOVERY) { 630 nvmf_free_capsule(rc); 631 return (EAGAIN); 632 } 633 if (status != 0) { 634 printf("NVMF: GET_LOG_PAGE failed, status %#x\n", status); 635 nvmf_free_capsule(rc); 636 return (EIO); 637 } 638 639 nvmf_free_capsule(rc); 640 return (0); 641 } 642 643 int 644 nvmf_host_fetch_discovery_log_page(struct nvmf_qpair *qp, 645 struct nvme_discovery_log **logp) 646 { 647 struct nvme_discovery_log hdr, *log; 648 size_t payload_len; 649 int error; 650 651 if (!qp->nq_admin) 652 return (EINVAL); 653 654 log = NULL; 655 for (;;) { 656 error = nvmf_get_discovery_log_page(qp, 0, &hdr, sizeof(hdr)); 657 if (error != 0) { 658 free(log); 659 return (error); 660 } 661 nvme_discovery_log_swapbytes(&hdr); 662 663 if (hdr.recfmt != 0) { 664 printf("NVMF: Unsupported discovery log format: %d\n", 665 hdr.recfmt); 666 free(log); 667 return (EINVAL); 668 } 669 670 if (hdr.numrec > 1024) { 671 printf("NVMF: Too many discovery log entries: %ju\n", 672 (uintmax_t)hdr.numrec); 673 free(log); 674 return (EFBIG); 675 } 676 677 payload_len = sizeof(log->entries[0]) * hdr.numrec; 678 log = reallocf(log, sizeof(*log) + payload_len); 679 if (log == NULL) 680 return (ENOMEM); 681 *log = hdr; 682 if (hdr.numrec == 0) 683 break; 684 685 error = nvmf_get_discovery_log_page(qp, sizeof(hdr), 686 log->entries, payload_len); 687 if (error == EAGAIN) 688 continue; 689 if (error != 0) { 690 free(log); 691 return (error); 692 } 693 694 /* Re-read the header and check the generation count. */ 695 error = nvmf_get_discovery_log_page(qp, 0, &hdr, sizeof(hdr)); 696 if (error != 0) { 697 free(log); 698 return (error); 699 } 700 nvme_discovery_log_swapbytes(&hdr); 701 702 if (log->genctr != hdr.genctr) 703 continue; 704 705 for (u_int i = 0; i < log->numrec; i++) 706 nvme_discovery_log_entry_swapbytes(&log->entries[i]); 707 break; 708 } 709 *logp = log; 710 return (0); 711 } 712 713 int 714 nvmf_init_dle_from_admin_qp(struct nvmf_qpair *qp, 715 const struct nvme_controller_data *cdata, 716 struct nvme_discovery_log_entry *dle) 717 { 718 int error; 719 uint16_t cntlid; 720 721 memset(dle, 0, sizeof(*dle)); 722 error = nvmf_populate_dle(qp, dle); 723 if (error != 0) 724 return (error); 725 if ((cdata->fcatt & 1) == 0) 726 cntlid = NVMF_CNTLID_DYNAMIC; 727 else 728 cntlid = cdata->ctrlr_id; 729 dle->cntlid = htole16(cntlid); 730 memcpy(dle->subnqn, cdata->subnqn, sizeof(dle->subnqn)); 731 return (0); 732 } 733 734 int 735 nvmf_host_request_queues(struct nvmf_qpair *qp, u_int requested, u_int *actual) 736 { 737 struct nvme_command cmd; 738 struct nvmf_capsule *cc, *rc; 739 int error; 740 uint16_t status; 741 742 if (!qp->nq_admin || requested < 1 || requested > 65535) 743 return (EINVAL); 744 745 /* The number of queues is 0's based. */ 746 requested--; 747 748 nvmf_init_sqe(&cmd, NVME_OPC_SET_FEATURES); 749 cmd.cdw10 = htole32(NVME_FEAT_NUMBER_OF_QUEUES); 750 751 /* Same number of completion and submission queues. */ 752 cmd.cdw11 = htole32((requested << 16) | requested); 753 754 cc = nvmf_allocate_command(qp, &cmd); 755 if (cc == NULL) 756 return (errno); 757 758 error = nvmf_host_transmit_command(cc); 759 if (error != 0) { 760 nvmf_free_capsule(cc); 761 return (error); 762 } 763 764 error = nvmf_host_wait_for_response(cc, &rc); 765 nvmf_free_capsule(cc); 766 if (error != 0) 767 return (error); 768 769 status = le16toh(rc->nc_cqe.status); 770 if (status != 0) { 771 printf("NVMF: SET_FEATURES failed, status %#x\n", status); 772 nvmf_free_capsule(rc); 773 return (EIO); 774 } 775 776 *actual = (le32toh(rc->nc_cqe.cdw0) & 0xffff) + 1; 777 nvmf_free_capsule(rc); 778 return (0); 779 } 780 781 static bool 782 is_queue_pair_idle(struct nvmf_qpair *qp) 783 { 784 if (qp->nq_sqhd != qp->nq_sqtail) 785 return (false); 786 if (!TAILQ_EMPTY(&qp->nq_rx_capsules)) 787 return (false); 788 return (true); 789 } 790 791 static int 792 prepare_queues_for_handoff(struct nvmf_ioc_nv *nv, 793 const struct nvme_discovery_log_entry *dle, const char *hostnqn, 794 struct nvmf_qpair *admin_qp, u_int num_queues, 795 struct nvmf_qpair **io_queues, const struct nvme_controller_data *cdata) 796 { 797 const struct nvmf_association *na = admin_qp->nq_association; 798 nvlist_t *nvl, *nvl_qp, *nvl_rparams; 799 u_int i; 800 int error; 801 802 if (num_queues == 0) 803 return (EINVAL); 804 805 /* Ensure trtype matches. */ 806 if (dle->trtype != na->na_trtype) 807 return (EINVAL); 808 809 /* All queue pairs must be idle. */ 810 if (!is_queue_pair_idle(admin_qp)) 811 return (EBUSY); 812 for (i = 0; i < num_queues; i++) { 813 if (!is_queue_pair_idle(io_queues[i])) 814 return (EBUSY); 815 } 816 817 /* Fill out reconnect parameters. */ 818 nvl_rparams = nvlist_create(0); 819 nvlist_add_binary(nvl_rparams, "dle", dle, sizeof(*dle)); 820 nvlist_add_string(nvl_rparams, "hostnqn", hostnqn); 821 nvlist_add_number(nvl_rparams, "num_io_queues", num_queues); 822 nvlist_add_number(nvl_rparams, "kato", admin_qp->nq_kato); 823 nvlist_add_number(nvl_rparams, "io_qsize", io_queues[0]->nq_qsize); 824 nvlist_add_bool(nvl_rparams, "sq_flow_control", 825 na->na_params.sq_flow_control); 826 switch (na->na_trtype) { 827 case NVMF_TRTYPE_TCP: 828 nvlist_add_bool(nvl_rparams, "header_digests", 829 na->na_params.tcp.header_digests); 830 nvlist_add_bool(nvl_rparams, "data_digests", 831 na->na_params.tcp.data_digests); 832 break; 833 default: 834 __unreachable(); 835 } 836 error = nvlist_error(nvl_rparams); 837 if (error != 0) { 838 nvlist_destroy(nvl_rparams); 839 return (error); 840 } 841 842 nvl = nvlist_create(0); 843 nvlist_add_number(nvl, "trtype", na->na_trtype); 844 nvlist_add_number(nvl, "kato", admin_qp->nq_kato); 845 nvlist_move_nvlist(nvl, "rparams", nvl_rparams); 846 847 /* First, the admin queue. */ 848 error = nvmf_kernel_handoff_params(admin_qp, &nvl_qp); 849 if (error) { 850 nvlist_destroy(nvl); 851 return (error); 852 } 853 nvlist_move_nvlist(nvl, "admin", nvl_qp); 854 855 /* Next, the I/O queues. */ 856 for (i = 0; i < num_queues; i++) { 857 error = nvmf_kernel_handoff_params(io_queues[i], &nvl_qp); 858 if (error) { 859 nvlist_destroy(nvl); 860 return (error); 861 } 862 nvlist_append_nvlist_array(nvl, "io", nvl_qp); 863 } 864 865 nvlist_add_binary(nvl, "cdata", cdata, sizeof(*cdata)); 866 867 error = nvmf_pack_ioc_nvlist(nv, nvl); 868 nvlist_destroy(nvl); 869 return (error); 870 } 871 872 int 873 nvmf_handoff_host(const struct nvme_discovery_log_entry *dle, 874 const char *hostnqn, struct nvmf_qpair *admin_qp, u_int num_queues, 875 struct nvmf_qpair **io_queues, const struct nvme_controller_data *cdata) 876 { 877 struct nvmf_ioc_nv nv; 878 u_int i; 879 int error, fd; 880 881 fd = open("/dev/nvmf", O_RDWR); 882 if (fd == -1) { 883 error = errno; 884 goto out; 885 } 886 887 error = prepare_queues_for_handoff(&nv, dle, hostnqn, admin_qp, 888 num_queues, io_queues, cdata); 889 if (error != 0) 890 goto out; 891 892 if (ioctl(fd, NVMF_HANDOFF_HOST, &nv) == -1) 893 error = errno; 894 free(nv.data); 895 896 out: 897 if (fd >= 0) 898 close(fd); 899 for (i = 0; i < num_queues; i++) 900 (void)nvmf_free_qpair(io_queues[i]); 901 (void)nvmf_free_qpair(admin_qp); 902 return (error); 903 } 904 905 int 906 nvmf_disconnect_host(const char *host) 907 { 908 int error, fd; 909 910 error = 0; 911 fd = open("/dev/nvmf", O_RDWR); 912 if (fd == -1) { 913 error = errno; 914 goto out; 915 } 916 917 if (ioctl(fd, NVMF_DISCONNECT_HOST, &host) == -1) 918 error = errno; 919 920 out: 921 if (fd >= 0) 922 close(fd); 923 return (error); 924 } 925 926 int 927 nvmf_disconnect_all(void) 928 { 929 int error, fd; 930 931 error = 0; 932 fd = open("/dev/nvmf", O_RDWR); 933 if (fd == -1) { 934 error = errno; 935 goto out; 936 } 937 938 if (ioctl(fd, NVMF_DISCONNECT_ALL) == -1) 939 error = errno; 940 941 out: 942 if (fd >= 0) 943 close(fd); 944 return (error); 945 } 946 947 static int 948 nvmf_read_ioc_nv(int fd, u_long com, nvlist_t **nvlp) 949 { 950 struct nvmf_ioc_nv nv; 951 nvlist_t *nvl; 952 int error; 953 954 memset(&nv, 0, sizeof(nv)); 955 if (ioctl(fd, com, &nv) == -1) 956 return (errno); 957 958 nv.data = malloc(nv.len); 959 nv.size = nv.len; 960 if (ioctl(fd, com, &nv) == -1) { 961 error = errno; 962 free(nv.data); 963 return (error); 964 } 965 966 nvl = nvlist_unpack(nv.data, nv.len, 0); 967 free(nv.data); 968 if (nvl == NULL) 969 return (EINVAL); 970 971 *nvlp = nvl; 972 return (0); 973 } 974 975 int 976 nvmf_reconnect_params(int fd, nvlist_t **nvlp) 977 { 978 return (nvmf_read_ioc_nv(fd, NVMF_RECONNECT_PARAMS, nvlp)); 979 } 980 981 int 982 nvmf_reconnect_host(int fd, const struct nvme_discovery_log_entry *dle, 983 const char *hostnqn, struct nvmf_qpair *admin_qp, u_int num_queues, 984 struct nvmf_qpair **io_queues, const struct nvme_controller_data *cdata) 985 { 986 struct nvmf_ioc_nv nv; 987 u_int i; 988 int error; 989 990 error = prepare_queues_for_handoff(&nv, dle, hostnqn, admin_qp, 991 num_queues, io_queues, cdata); 992 if (error != 0) 993 goto out; 994 995 if (ioctl(fd, NVMF_RECONNECT_HOST, &nv) == -1) 996 error = errno; 997 free(nv.data); 998 999 out: 1000 for (i = 0; i < num_queues; i++) 1001 (void)nvmf_free_qpair(io_queues[i]); 1002 (void)nvmf_free_qpair(admin_qp); 1003 return (error); 1004 } 1005 1006 int 1007 nvmf_connection_status(int fd, nvlist_t **nvlp) 1008 { 1009 return (nvmf_read_ioc_nv(fd, NVMF_CONNECTION_STATUS, nvlp)); 1010 } 1011