Lines Matching +full:reset +full:- +full:n +full:- +full:io
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
6 * Copyright 2020-2021 Joyent, Inc.
74 #define VTBLK_F_RO (1 << 5) /* Disk is read-only */
77 #define VTBLK_F_FLUSH (1 << 9) /* Writeback mode enabled after reset */
81 #define VTBLK_F_MQ (1 << 12) /* Multi-Queue */
139 * Fixed-size block header
182 * Per-device softc
227 DPRINTF(("vtblk: device reset requested !")); in pci_vtblk_reset()
228 vi_reset_dev(&sc->vbsc_vs); in pci_vtblk_reset()
232 pci_vtblk_done_locked(struct pci_vtblk_ioreq *io, int err) in pci_vtblk_done_locked() argument
234 struct pci_vtblk_softc *sc = io->io_sc; in pci_vtblk_done_locked()
238 *io->io_status = VTBLK_S_UNSUPP; in pci_vtblk_done_locked()
240 *io->io_status = VTBLK_S_IOERR; in pci_vtblk_done_locked()
242 *io->io_status = VTBLK_S_OK; in pci_vtblk_done_locked()
248 vq_relchain(&sc->vbsc_vq, io->io_idx, 1); in pci_vtblk_done_locked()
249 vq_endchains(&sc->vbsc_vq, 0); in pci_vtblk_done_locked()
258 DPRINTF(("vtblk: device pause requested !\n")); in pci_vtblk_pause()
259 blockif_pause(sc->bc); in pci_vtblk_pause()
267 DPRINTF(("vtblk: device resume requested !\n")); in pci_vtblk_resume()
268 blockif_resume(sc->bc); in pci_vtblk_resume()
277 SNAPSHOT_VAR_OR_LEAVE(sc->vbsc_cfg, meta, ret, done); in pci_vtblk_snapshot()
278 SNAPSHOT_BUF_OR_LEAVE(sc->vbsc_ident, sizeof(sc->vbsc_ident), in pci_vtblk_snapshot()
289 struct pci_vtblk_ioreq *io = br->br_param; in pci_vtblk_done() local
290 struct pci_vtblk_softc *sc = io->io_sc; in pci_vtblk_done()
292 pthread_mutex_lock(&sc->vsc_mtx); in pci_vtblk_done()
293 pci_vtblk_done_locked(io, err); in pci_vtblk_done()
294 pthread_mutex_unlock(&sc->vsc_mtx); in pci_vtblk_done()
301 struct pci_vtblk_ioreq *io; in pci_vtblk_proc() local
302 int i, n; in pci_vtblk_proc() local
310 n = vq_getchain(vq, iov, BLOCKIF_IOV_MAX + 2, &req); in pci_vtblk_proc()
313 * The first descriptor will be the read-only fixed header, in pci_vtblk_proc()
317 * XXX - note - this fails on crash dump, which does a in pci_vtblk_proc()
320 assert(n >= 2 && n <= BLOCKIF_IOV_MAX + 2); in pci_vtblk_proc()
322 io = &sc->vbsc_ios[req.idx]; in pci_vtblk_proc()
326 memcpy(&io->io_req.br_iov, &iov[1], sizeof(struct iovec) * (n - 2)); in pci_vtblk_proc()
327 io->io_req.br_iovcnt = n - 2; in pci_vtblk_proc()
328 io->io_req.br_offset = vbh->vbh_sector * VTBLK_BSIZE; in pci_vtblk_proc()
329 io->io_status = (uint8_t *)iov[--n].iov_base; in pci_vtblk_proc()
331 assert(iov[n].iov_len == 1); in pci_vtblk_proc()
338 type = vbh->vbh_type & ~VBH_FLAG_BARRIER; in pci_vtblk_proc()
341 * - Write op implies read-only descriptor in pci_vtblk_proc()
342 * - Read/ident op implies write-only descriptor in pci_vtblk_proc()
344 * By taking away either the read-only fixed header or the write-only in pci_vtblk_proc()
347 assert(n == (writeop ? req.readable : req.writable)); in pci_vtblk_proc()
350 for (i = 1; i < n; i++) { in pci_vtblk_proc()
353 io->io_req.br_resid = iolen; in pci_vtblk_proc()
355 DPRINTF(("virtio-block: %s op, %zd bytes, %d segs, offset %ld", in pci_vtblk_proc()
356 writeop ? "write/discard" : "read/ident", iolen, i - 1, in pci_vtblk_proc()
357 io->io_req.br_offset)); in pci_vtblk_proc()
361 err = blockif_read(sc->bc, &io->io_req); in pci_vtblk_proc()
364 err = blockif_write(sc->bc, &io->io_req); in pci_vtblk_proc()
373 pci_vtblk_done_locked(io, EINVAL); in pci_vtblk_proc()
391 if (discard->flags.unmap != 0 || discard->flags.reserved != 0) { in pci_vtblk_proc()
392 pci_vtblk_done_locked(io, ENOTSUP); in pci_vtblk_proc()
397 if (discard->num_sectors > VTBLK_MAX_DISCARD_SECT) { in pci_vtblk_proc()
398 pci_vtblk_done_locked(io, EINVAL); in pci_vtblk_proc()
402 io->io_req.br_offset = discard->sector * VTBLK_BSIZE; in pci_vtblk_proc()
403 io->io_req.br_resid = discard->num_sectors * VTBLK_BSIZE; in pci_vtblk_proc()
404 err = blockif_delete(sc->bc, &io->io_req); in pci_vtblk_proc()
408 err = blockif_flush(sc->bc, &io->io_req); in pci_vtblk_proc()
412 /* S/n equal to buffer is not zero-terminated. */ in pci_vtblk_proc()
414 strncpy(iov[1].iov_base, sc->vbsc_ident, in pci_vtblk_proc()
415 MIN(iov[1].iov_len, sizeof(sc->vbsc_ident))); in pci_vtblk_proc()
416 pci_vtblk_done_locked(io, 0); in pci_vtblk_proc()
419 pci_vtblk_done_locked(io, EOPNOTSUPP); in pci_vtblk_proc()
442 sc->vbsc_cfg.vbc_capacity = new_size / VTBLK_BSIZE; /* 512-byte units */ in pci_vtblk_resized()
443 vi_interrupt(&sc->vbsc_vs, VIRTIO_PCI_ISR_CONFIG, in pci_vtblk_resized()
444 sc->vbsc_vs.vs_msix_cfg_idx); in pci_vtblk_resized()
462 snprintf(bident, sizeof(bident), "%u:%u", pi->pi_slot, pi->pi_func); in pci_vtblk_init()
479 sc->bc = bctxt; in pci_vtblk_init()
481 struct pci_vtblk_ioreq *io = &sc->vbsc_ios[i]; in pci_vtblk_init() local
482 io->io_req.br_callback = pci_vtblk_done; in pci_vtblk_init()
483 io->io_req.br_param = io; in pci_vtblk_init()
484 io->io_sc = sc; in pci_vtblk_init()
485 io->io_idx = i; in pci_vtblk_init()
488 bcopy(&vtblk_vi_consts, &sc->vbsc_consts, sizeof (vtblk_vi_consts)); in pci_vtblk_init()
489 if (blockif_candelete(sc->bc)) in pci_vtblk_init()
490 sc->vbsc_consts.vc_hv_caps |= VTBLK_F_DISCARD; in pci_vtblk_init()
492 pthread_mutex_init(&sc->vsc_mtx, NULL); in pci_vtblk_init()
495 vi_softc_linkup(&sc->vbsc_vs, &sc->vbsc_consts, sc, pi, &sc->vbsc_vq); in pci_vtblk_init()
496 sc->vbsc_vs.vs_mtx = &sc->vsc_mtx; in pci_vtblk_init()
498 sc->vbsc_vq.vq_qsize = VTBLK_RINGSZ; in pci_vtblk_init()
499 /* sc->vbsc_vq.vq_notify = we have no per-queue notify */ in pci_vtblk_init()
505 bzero(sc->vbsc_ident, VTBLK_BLK_ID_BYTES); in pci_vtblk_init()
508 strlcpy(sc->vbsc_ident, serial, VTBLK_BLK_ID_BYTES); in pci_vtblk_init()
514 snprintf(sc->vbsc_ident, VTBLK_BLK_ID_BYTES, in pci_vtblk_init()
515 "BHYVE-%02X%02X-%02X%02X-%02X%02X", in pci_vtblk_init()
521 sc->vbsc_cfg.vbc_capacity = size / VTBLK_BSIZE; /* 512-byte units */ in pci_vtblk_init()
522 sc->vbsc_cfg.vbc_size_max = 0; /* not negotiated */ in pci_vtblk_init()
531 sc->vbsc_cfg.vbc_seg_max = MIN(VTBLK_RINGSZ - 2, BLOCKIF_IOV_MAX); in pci_vtblk_init()
532 sc->vbsc_cfg.vbc_geometry.cylinders = 0; /* no geometry */ in pci_vtblk_init()
533 sc->vbsc_cfg.vbc_geometry.heads = 0; in pci_vtblk_init()
534 sc->vbsc_cfg.vbc_geometry.sectors = 0; in pci_vtblk_init()
535 sc->vbsc_cfg.vbc_blk_size = sectsz; in pci_vtblk_init()
536 sc->vbsc_cfg.vbc_topology.physical_block_exp = in pci_vtblk_init()
537 (sts > sectsz) ? (ffsll(sts / sectsz) - 1) : 0; in pci_vtblk_init()
538 sc->vbsc_cfg.vbc_topology.alignment_offset = in pci_vtblk_init()
539 (sto != 0) ? ((sts - sto) / sectsz) : 0; in pci_vtblk_init()
540 sc->vbsc_cfg.vbc_topology.min_io_size = 0; in pci_vtblk_init()
541 sc->vbsc_cfg.vbc_topology.opt_io_size = 0; in pci_vtblk_init()
542 sc->vbsc_cfg.vbc_writeback = 0; in pci_vtblk_init()
543 sc->vbsc_cfg.max_discard_sectors = VTBLK_MAX_DISCARD_SECT; in pci_vtblk_init()
544 sc->vbsc_cfg.max_discard_seg = VTBLK_MAX_DISCARD_SEG; in pci_vtblk_init()
545 sc->vbsc_cfg.discard_sector_alignment = MAX(sectsz, sts) / VTBLK_BSIZE; in pci_vtblk_init()
558 if (vi_intr_init(&sc->vbsc_vs, 1, fbsdrun_virtio_msix())) { in pci_vtblk_init()
559 blockif_close(sc->bc); in pci_vtblk_init()
563 vi_set_io_bar(&sc->vbsc_vs, 0); in pci_vtblk_init()
564 blockif_register_resize_callback(sc->bc, pci_vtblk_resized, sc); in pci_vtblk_init()
584 ptr = (uint8_t *)&sc->vbsc_cfg + offset; in pci_vtblk_cfgread()
590 .pe_emu = "virtio-blk",