xref: /illumos-gate/usr/src/cmd/bhyve/common/pci_nvme.c (revision 5c4a5fe16715fb423db76577a6883b5bbecdbe45)
1*5c4a5fe1SAndy Fiddaman /*-
2*5c4a5fe1SAndy Fiddaman  * SPDX-License-Identifier: BSD-2-Clause
3*5c4a5fe1SAndy Fiddaman  *
4*5c4a5fe1SAndy Fiddaman  * Copyright (c) 2017 Shunsuke Mie
5*5c4a5fe1SAndy Fiddaman  * Copyright (c) 2018 Leon Dang
6*5c4a5fe1SAndy Fiddaman  * Copyright (c) 2020 Chuck Tuffli
7*5c4a5fe1SAndy Fiddaman  *
8*5c4a5fe1SAndy Fiddaman  * Redistribution and use in source and binary forms, with or without
9*5c4a5fe1SAndy Fiddaman  * modification, are permitted provided that the following conditions
10*5c4a5fe1SAndy Fiddaman  * are met:
11*5c4a5fe1SAndy Fiddaman  * 1. Redistributions of source code must retain the above copyright
12*5c4a5fe1SAndy Fiddaman  *    notice, this list of conditions and the following disclaimer.
13*5c4a5fe1SAndy Fiddaman  * 2. Redistributions in binary form must reproduce the above copyright
14*5c4a5fe1SAndy Fiddaman  *    notice, this list of conditions and the following disclaimer in the
15*5c4a5fe1SAndy Fiddaman  *    documentation and/or other materials provided with the distribution.
16*5c4a5fe1SAndy Fiddaman  *
17*5c4a5fe1SAndy Fiddaman  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18*5c4a5fe1SAndy Fiddaman  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19*5c4a5fe1SAndy Fiddaman  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20*5c4a5fe1SAndy Fiddaman  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21*5c4a5fe1SAndy Fiddaman  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22*5c4a5fe1SAndy Fiddaman  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23*5c4a5fe1SAndy Fiddaman  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24*5c4a5fe1SAndy Fiddaman  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25*5c4a5fe1SAndy Fiddaman  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26*5c4a5fe1SAndy Fiddaman  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27*5c4a5fe1SAndy Fiddaman  * SUCH DAMAGE.
28*5c4a5fe1SAndy Fiddaman  */
29*5c4a5fe1SAndy Fiddaman 
30*5c4a5fe1SAndy Fiddaman /*
31*5c4a5fe1SAndy Fiddaman  * bhyve PCIe-NVMe device emulation.
32*5c4a5fe1SAndy Fiddaman  *
33*5c4a5fe1SAndy Fiddaman  * options:
34*5c4a5fe1SAndy Fiddaman  *  -s <n>,nvme,devpath,maxq=#,qsz=#,ioslots=#,sectsz=#,ser=A-Z,eui64=#,dsm=<opt>
35*5c4a5fe1SAndy Fiddaman  *
36*5c4a5fe1SAndy Fiddaman  *  accepted devpath:
37*5c4a5fe1SAndy Fiddaman  *    /dev/blockdev
38*5c4a5fe1SAndy Fiddaman  *    /path/to/image
39*5c4a5fe1SAndy Fiddaman  *    ram=size_in_MiB
40*5c4a5fe1SAndy Fiddaman  *
41*5c4a5fe1SAndy Fiddaman  *  maxq    = max number of queues
42*5c4a5fe1SAndy Fiddaman  *  qsz     = max elements in each queue
43*5c4a5fe1SAndy Fiddaman  *  ioslots = max number of concurrent io requests
44*5c4a5fe1SAndy Fiddaman  *  sectsz  = sector size (defaults to blockif sector size)
45*5c4a5fe1SAndy Fiddaman  *  ser     = serial number (20-chars max)
46*5c4a5fe1SAndy Fiddaman  *  eui64   = IEEE Extended Unique Identifier (8 byte value)
47*5c4a5fe1SAndy Fiddaman  *  dsm     = DataSet Management support. Option is one of auto, enable,disable
48*5c4a5fe1SAndy Fiddaman  *
49*5c4a5fe1SAndy Fiddaman  */
50*5c4a5fe1SAndy Fiddaman 
51*5c4a5fe1SAndy Fiddaman /* TODO:
52*5c4a5fe1SAndy Fiddaman     - create async event for smart and log
53*5c4a5fe1SAndy Fiddaman     - intr coalesce
54*5c4a5fe1SAndy Fiddaman  */
55*5c4a5fe1SAndy Fiddaman 
56*5c4a5fe1SAndy Fiddaman 
57*5c4a5fe1SAndy Fiddaman #include <sys/errno.h>
58*5c4a5fe1SAndy Fiddaman #include <sys/types.h>
59*5c4a5fe1SAndy Fiddaman #ifdef __FreeBSD__
60*5c4a5fe1SAndy Fiddaman #include <sys/crc16.h>
61*5c4a5fe1SAndy Fiddaman #else
62*5c4a5fe1SAndy Fiddaman #include "crc16.h"
63*5c4a5fe1SAndy Fiddaman #endif
64*5c4a5fe1SAndy Fiddaman #include <net/ieee_oui.h>
65*5c4a5fe1SAndy Fiddaman #ifndef __FreeBSD__
66*5c4a5fe1SAndy Fiddaman #include <endian.h>
67*5c4a5fe1SAndy Fiddaman #endif
68*5c4a5fe1SAndy Fiddaman 
69*5c4a5fe1SAndy Fiddaman #include <assert.h>
70*5c4a5fe1SAndy Fiddaman #include <pthread.h>
71*5c4a5fe1SAndy Fiddaman #include <pthread_np.h>
72*5c4a5fe1SAndy Fiddaman #include <semaphore.h>
73*5c4a5fe1SAndy Fiddaman #include <stdbool.h>
74*5c4a5fe1SAndy Fiddaman #include <stddef.h>
75*5c4a5fe1SAndy Fiddaman #include <stdint.h>
76*5c4a5fe1SAndy Fiddaman #include <stdio.h>
77*5c4a5fe1SAndy Fiddaman #include <stdlib.h>
78*5c4a5fe1SAndy Fiddaman #include <string.h>
79*5c4a5fe1SAndy Fiddaman 
80*5c4a5fe1SAndy Fiddaman #include <machine/atomic.h>
81*5c4a5fe1SAndy Fiddaman #include <machine/vmm.h>
82*5c4a5fe1SAndy Fiddaman #include <vmmapi.h>
83*5c4a5fe1SAndy Fiddaman 
84*5c4a5fe1SAndy Fiddaman #include <dev/nvme/nvme.h>
85*5c4a5fe1SAndy Fiddaman 
86*5c4a5fe1SAndy Fiddaman #include "bhyverun.h"
87*5c4a5fe1SAndy Fiddaman #include "block_if.h"
88*5c4a5fe1SAndy Fiddaman #include "config.h"
89*5c4a5fe1SAndy Fiddaman #include "debug.h"
90*5c4a5fe1SAndy Fiddaman #include "pci_emul.h"
91*5c4a5fe1SAndy Fiddaman 
92*5c4a5fe1SAndy Fiddaman 
93*5c4a5fe1SAndy Fiddaman static int nvme_debug = 0;
94*5c4a5fe1SAndy Fiddaman #define	DPRINTF(fmt, args...) if (nvme_debug) PRINTLN(fmt, ##args)
95*5c4a5fe1SAndy Fiddaman #define	WPRINTF(fmt, args...) PRINTLN(fmt, ##args)
96*5c4a5fe1SAndy Fiddaman 
97*5c4a5fe1SAndy Fiddaman /* defaults; can be overridden */
98*5c4a5fe1SAndy Fiddaman #define	NVME_MSIX_BAR		4
99*5c4a5fe1SAndy Fiddaman 
100*5c4a5fe1SAndy Fiddaman #define	NVME_IOSLOTS		8
101*5c4a5fe1SAndy Fiddaman 
102*5c4a5fe1SAndy Fiddaman /* The NVMe spec defines bits 13:4 in BAR0 as reserved */
103*5c4a5fe1SAndy Fiddaman #define NVME_MMIO_SPACE_MIN	(1 << 14)
104*5c4a5fe1SAndy Fiddaman 
105*5c4a5fe1SAndy Fiddaman #define	NVME_QUEUES		16
106*5c4a5fe1SAndy Fiddaman #define	NVME_MAX_QENTRIES	2048
107*5c4a5fe1SAndy Fiddaman /* Memory Page size Minimum reported in CAP register */
108*5c4a5fe1SAndy Fiddaman #define	NVME_MPSMIN		0
109*5c4a5fe1SAndy Fiddaman /* MPSMIN converted to bytes */
110*5c4a5fe1SAndy Fiddaman #define	NVME_MPSMIN_BYTES	(1 << (12 + NVME_MPSMIN))
111*5c4a5fe1SAndy Fiddaman 
112*5c4a5fe1SAndy Fiddaman #define	NVME_PRP2_ITEMS		(PAGE_SIZE/sizeof(uint64_t))
113*5c4a5fe1SAndy Fiddaman #define	NVME_MDTS		9
114*5c4a5fe1SAndy Fiddaman /* Note the + 1 allows for the initial descriptor to not be page aligned */
115*5c4a5fe1SAndy Fiddaman #define	NVME_MAX_IOVEC		((1 << NVME_MDTS) + 1)
116*5c4a5fe1SAndy Fiddaman #define	NVME_MAX_DATA_SIZE	((1 << NVME_MDTS) * NVME_MPSMIN_BYTES)
117*5c4a5fe1SAndy Fiddaman 
118*5c4a5fe1SAndy Fiddaman /* This is a synthetic status code to indicate there is no status */
119*5c4a5fe1SAndy Fiddaman #define NVME_NO_STATUS		0xffff
120*5c4a5fe1SAndy Fiddaman #define NVME_COMPLETION_VALID(c)	((c).status != NVME_NO_STATUS)
121*5c4a5fe1SAndy Fiddaman 
122*5c4a5fe1SAndy Fiddaman /* Reported temperature in Kelvin (i.e. room temperature) */
123*5c4a5fe1SAndy Fiddaman #define NVME_TEMPERATURE 296
124*5c4a5fe1SAndy Fiddaman 
125*5c4a5fe1SAndy Fiddaman /* helpers */
126*5c4a5fe1SAndy Fiddaman 
127*5c4a5fe1SAndy Fiddaman /* Convert a zero-based value into a one-based value */
128*5c4a5fe1SAndy Fiddaman #define ONE_BASED(zero)		((zero) + 1)
129*5c4a5fe1SAndy Fiddaman /* Convert a one-based value into a zero-based value */
130*5c4a5fe1SAndy Fiddaman #define ZERO_BASED(one)		((one)  - 1)
131*5c4a5fe1SAndy Fiddaman 
132*5c4a5fe1SAndy Fiddaman /* Encode number of SQ's and CQ's for Set/Get Features */
133*5c4a5fe1SAndy Fiddaman #define NVME_FEATURE_NUM_QUEUES(sc) \
134*5c4a5fe1SAndy Fiddaman 	(ZERO_BASED((sc)->num_squeues) & 0xffff) | \
135*5c4a5fe1SAndy Fiddaman 	(ZERO_BASED((sc)->num_cqueues) & 0xffff) << 16
136*5c4a5fe1SAndy Fiddaman 
137*5c4a5fe1SAndy Fiddaman #define	NVME_DOORBELL_OFFSET	offsetof(struct nvme_registers, doorbell)
138*5c4a5fe1SAndy Fiddaman 
139*5c4a5fe1SAndy Fiddaman enum nvme_controller_register_offsets {
140*5c4a5fe1SAndy Fiddaman 	NVME_CR_CAP_LOW = 0x00,
141*5c4a5fe1SAndy Fiddaman 	NVME_CR_CAP_HI  = 0x04,
142*5c4a5fe1SAndy Fiddaman 	NVME_CR_VS      = 0x08,
143*5c4a5fe1SAndy Fiddaman 	NVME_CR_INTMS   = 0x0c,
144*5c4a5fe1SAndy Fiddaman 	NVME_CR_INTMC   = 0x10,
145*5c4a5fe1SAndy Fiddaman 	NVME_CR_CC      = 0x14,
146*5c4a5fe1SAndy Fiddaman 	NVME_CR_CSTS    = 0x1c,
147*5c4a5fe1SAndy Fiddaman 	NVME_CR_NSSR    = 0x20,
148*5c4a5fe1SAndy Fiddaman 	NVME_CR_AQA     = 0x24,
149*5c4a5fe1SAndy Fiddaman 	NVME_CR_ASQ_LOW = 0x28,
150*5c4a5fe1SAndy Fiddaman 	NVME_CR_ASQ_HI  = 0x2c,
151*5c4a5fe1SAndy Fiddaman 	NVME_CR_ACQ_LOW = 0x30,
152*5c4a5fe1SAndy Fiddaman 	NVME_CR_ACQ_HI  = 0x34,
153*5c4a5fe1SAndy Fiddaman };
154*5c4a5fe1SAndy Fiddaman 
155*5c4a5fe1SAndy Fiddaman enum nvme_cmd_cdw11 {
156*5c4a5fe1SAndy Fiddaman 	NVME_CMD_CDW11_PC  = 0x0001,
157*5c4a5fe1SAndy Fiddaman 	NVME_CMD_CDW11_IEN = 0x0002,
158*5c4a5fe1SAndy Fiddaman 	NVME_CMD_CDW11_IV  = 0xFFFF0000,
159*5c4a5fe1SAndy Fiddaman };
160*5c4a5fe1SAndy Fiddaman 
161*5c4a5fe1SAndy Fiddaman enum nvme_copy_dir {
162*5c4a5fe1SAndy Fiddaman 	NVME_COPY_TO_PRP,
163*5c4a5fe1SAndy Fiddaman 	NVME_COPY_FROM_PRP,
164*5c4a5fe1SAndy Fiddaman };
165*5c4a5fe1SAndy Fiddaman 
166*5c4a5fe1SAndy Fiddaman #define	NVME_CQ_INTEN	0x01
167*5c4a5fe1SAndy Fiddaman #define	NVME_CQ_INTCOAL	0x02
168*5c4a5fe1SAndy Fiddaman 
169*5c4a5fe1SAndy Fiddaman struct nvme_completion_queue {
170*5c4a5fe1SAndy Fiddaman 	struct nvme_completion *qbase;
171*5c4a5fe1SAndy Fiddaman 	pthread_mutex_t	mtx;
172*5c4a5fe1SAndy Fiddaman 	uint32_t	size;
173*5c4a5fe1SAndy Fiddaman 	uint16_t	tail; /* nvme progress */
174*5c4a5fe1SAndy Fiddaman 	uint16_t	head; /* guest progress */
175*5c4a5fe1SAndy Fiddaman 	uint16_t	intr_vec;
176*5c4a5fe1SAndy Fiddaman 	uint32_t	intr_en;
177*5c4a5fe1SAndy Fiddaman };
178*5c4a5fe1SAndy Fiddaman 
179*5c4a5fe1SAndy Fiddaman struct nvme_submission_queue {
180*5c4a5fe1SAndy Fiddaman 	struct nvme_command *qbase;
181*5c4a5fe1SAndy Fiddaman 	pthread_mutex_t	mtx;
182*5c4a5fe1SAndy Fiddaman 	uint32_t	size;
183*5c4a5fe1SAndy Fiddaman 	uint16_t	head; /* nvme progress */
184*5c4a5fe1SAndy Fiddaman 	uint16_t	tail; /* guest progress */
185*5c4a5fe1SAndy Fiddaman 	uint16_t	cqid; /* completion queue id */
186*5c4a5fe1SAndy Fiddaman 	int		qpriority;
187*5c4a5fe1SAndy Fiddaman };
188*5c4a5fe1SAndy Fiddaman 
189*5c4a5fe1SAndy Fiddaman enum nvme_storage_type {
190*5c4a5fe1SAndy Fiddaman 	NVME_STOR_BLOCKIF = 0,
191*5c4a5fe1SAndy Fiddaman 	NVME_STOR_RAM = 1,
192*5c4a5fe1SAndy Fiddaman };
193*5c4a5fe1SAndy Fiddaman 
194*5c4a5fe1SAndy Fiddaman struct pci_nvme_blockstore {
195*5c4a5fe1SAndy Fiddaman 	enum nvme_storage_type type;
196*5c4a5fe1SAndy Fiddaman 	void		*ctx;
197*5c4a5fe1SAndy Fiddaman 	uint64_t	size;
198*5c4a5fe1SAndy Fiddaman 	uint32_t	sectsz;
199*5c4a5fe1SAndy Fiddaman 	uint32_t	sectsz_bits;
200*5c4a5fe1SAndy Fiddaman 	uint64_t	eui64;
201*5c4a5fe1SAndy Fiddaman 	uint32_t	deallocate:1;
202*5c4a5fe1SAndy Fiddaman };
203*5c4a5fe1SAndy Fiddaman 
204*5c4a5fe1SAndy Fiddaman /*
205*5c4a5fe1SAndy Fiddaman  * Calculate the number of additional page descriptors for guest IO requests
206*5c4a5fe1SAndy Fiddaman  * based on the advertised Max Data Transfer (MDTS) and given the number of
207*5c4a5fe1SAndy Fiddaman  * default iovec's in a struct blockif_req.
208*5c4a5fe1SAndy Fiddaman  */
209*5c4a5fe1SAndy Fiddaman #define MDTS_PAD_SIZE \
210*5c4a5fe1SAndy Fiddaman 	( NVME_MAX_IOVEC > BLOCKIF_IOV_MAX ? \
211*5c4a5fe1SAndy Fiddaman 	  NVME_MAX_IOVEC - BLOCKIF_IOV_MAX : \
212*5c4a5fe1SAndy Fiddaman 	  0 )
213*5c4a5fe1SAndy Fiddaman 
214*5c4a5fe1SAndy Fiddaman struct pci_nvme_ioreq {
215*5c4a5fe1SAndy Fiddaman 	struct pci_nvme_softc *sc;
216*5c4a5fe1SAndy Fiddaman 	STAILQ_ENTRY(pci_nvme_ioreq) link;
217*5c4a5fe1SAndy Fiddaman 	struct nvme_submission_queue *nvme_sq;
218*5c4a5fe1SAndy Fiddaman 	uint16_t	sqid;
219*5c4a5fe1SAndy Fiddaman 
220*5c4a5fe1SAndy Fiddaman 	/* command information */
221*5c4a5fe1SAndy Fiddaman 	uint16_t	opc;
222*5c4a5fe1SAndy Fiddaman 	uint16_t	cid;
223*5c4a5fe1SAndy Fiddaman 	uint32_t	nsid;
224*5c4a5fe1SAndy Fiddaman 
225*5c4a5fe1SAndy Fiddaman 	uint64_t	prev_gpaddr;
226*5c4a5fe1SAndy Fiddaman 	size_t		prev_size;
227*5c4a5fe1SAndy Fiddaman 	size_t		bytes;
228*5c4a5fe1SAndy Fiddaman 
229*5c4a5fe1SAndy Fiddaman 	struct blockif_req io_req;
230*5c4a5fe1SAndy Fiddaman 
231*5c4a5fe1SAndy Fiddaman 	struct iovec	iovpadding[MDTS_PAD_SIZE];
232*5c4a5fe1SAndy Fiddaman };
233*5c4a5fe1SAndy Fiddaman 
234*5c4a5fe1SAndy Fiddaman enum nvme_dsm_type {
235*5c4a5fe1SAndy Fiddaman 	/* Dataset Management bit in ONCS reflects backing storage capability */
236*5c4a5fe1SAndy Fiddaman 	NVME_DATASET_MANAGEMENT_AUTO,
237*5c4a5fe1SAndy Fiddaman 	/* Unconditionally set Dataset Management bit in ONCS */
238*5c4a5fe1SAndy Fiddaman 	NVME_DATASET_MANAGEMENT_ENABLE,
239*5c4a5fe1SAndy Fiddaman 	/* Unconditionally clear Dataset Management bit in ONCS */
240*5c4a5fe1SAndy Fiddaman 	NVME_DATASET_MANAGEMENT_DISABLE,
241*5c4a5fe1SAndy Fiddaman };
242*5c4a5fe1SAndy Fiddaman 
243*5c4a5fe1SAndy Fiddaman struct pci_nvme_softc;
244*5c4a5fe1SAndy Fiddaman struct nvme_feature_obj;
245*5c4a5fe1SAndy Fiddaman 
246*5c4a5fe1SAndy Fiddaman typedef void (*nvme_feature_cb)(struct pci_nvme_softc *,
247*5c4a5fe1SAndy Fiddaman     struct nvme_feature_obj *,
248*5c4a5fe1SAndy Fiddaman     struct nvme_command *,
249*5c4a5fe1SAndy Fiddaman     struct nvme_completion *);
250*5c4a5fe1SAndy Fiddaman 
251*5c4a5fe1SAndy Fiddaman struct nvme_feature_obj {
252*5c4a5fe1SAndy Fiddaman 	uint32_t	cdw11;
253*5c4a5fe1SAndy Fiddaman 	nvme_feature_cb	set;
254*5c4a5fe1SAndy Fiddaman 	nvme_feature_cb	get;
255*5c4a5fe1SAndy Fiddaman 	bool namespace_specific;
256*5c4a5fe1SAndy Fiddaman };
257*5c4a5fe1SAndy Fiddaman 
258*5c4a5fe1SAndy Fiddaman #define NVME_FID_MAX		(NVME_FEAT_ENDURANCE_GROUP_EVENT_CONFIGURATION + 1)
259*5c4a5fe1SAndy Fiddaman 
260*5c4a5fe1SAndy Fiddaman typedef enum {
261*5c4a5fe1SAndy Fiddaman 	PCI_NVME_AE_TYPE_ERROR = 0,
262*5c4a5fe1SAndy Fiddaman 	PCI_NVME_AE_TYPE_SMART,
263*5c4a5fe1SAndy Fiddaman 	PCI_NVME_AE_TYPE_NOTICE,
264*5c4a5fe1SAndy Fiddaman 	PCI_NVME_AE_TYPE_IO_CMD = 6,
265*5c4a5fe1SAndy Fiddaman 	PCI_NVME_AE_TYPE_VENDOR = 7,
266*5c4a5fe1SAndy Fiddaman 	PCI_NVME_AE_TYPE_MAX		/* Must be last */
267*5c4a5fe1SAndy Fiddaman } pci_nvme_async_type;
268*5c4a5fe1SAndy Fiddaman 
269*5c4a5fe1SAndy Fiddaman /* Asynchronous Event Requests */
270*5c4a5fe1SAndy Fiddaman struct pci_nvme_aer {
271*5c4a5fe1SAndy Fiddaman 	STAILQ_ENTRY(pci_nvme_aer) link;
272*5c4a5fe1SAndy Fiddaman 	uint16_t	cid;	/* Command ID of the submitted AER */
273*5c4a5fe1SAndy Fiddaman };
274*5c4a5fe1SAndy Fiddaman 
275*5c4a5fe1SAndy Fiddaman /** Asynchronous Event Information - Notice */
276*5c4a5fe1SAndy Fiddaman typedef enum {
277*5c4a5fe1SAndy Fiddaman 	PCI_NVME_AEI_NOTICE_NS_ATTR_CHANGED = 0,
278*5c4a5fe1SAndy Fiddaman 	PCI_NVME_AEI_NOTICE_FW_ACTIVATION,
279*5c4a5fe1SAndy Fiddaman 	PCI_NVME_AEI_NOTICE_TELEMETRY_CHANGE,
280*5c4a5fe1SAndy Fiddaman 	PCI_NVME_AEI_NOTICE_ANA_CHANGE,
281*5c4a5fe1SAndy Fiddaman 	PCI_NVME_AEI_NOTICE_PREDICT_LATENCY_CHANGE,
282*5c4a5fe1SAndy Fiddaman 	PCI_NVME_AEI_NOTICE_LBA_STATUS_ALERT,
283*5c4a5fe1SAndy Fiddaman 	PCI_NVME_AEI_NOTICE_ENDURANCE_GROUP_CHANGE,
284*5c4a5fe1SAndy Fiddaman 	PCI_NVME_AEI_NOTICE_MAX,
285*5c4a5fe1SAndy Fiddaman } pci_nvme_async_event_info_notice;
286*5c4a5fe1SAndy Fiddaman 
287*5c4a5fe1SAndy Fiddaman #define PCI_NVME_AEI_NOTICE_SHIFT		8
288*5c4a5fe1SAndy Fiddaman #define PCI_NVME_AEI_NOTICE_MASK(event)	(1 << (event + PCI_NVME_AEI_NOTICE_SHIFT))
289*5c4a5fe1SAndy Fiddaman 
290*5c4a5fe1SAndy Fiddaman /* Asynchronous Event Notifications */
291*5c4a5fe1SAndy Fiddaman struct pci_nvme_aen {
292*5c4a5fe1SAndy Fiddaman 	pci_nvme_async_type atype;
293*5c4a5fe1SAndy Fiddaman 	uint32_t	event_data;
294*5c4a5fe1SAndy Fiddaman 	bool		posted;
295*5c4a5fe1SAndy Fiddaman };
296*5c4a5fe1SAndy Fiddaman 
297*5c4a5fe1SAndy Fiddaman /*
298*5c4a5fe1SAndy Fiddaman  * By default, enable all Asynchrnous Event Notifications:
299*5c4a5fe1SAndy Fiddaman  *     SMART / Health Critical Warnings
300*5c4a5fe1SAndy Fiddaman  *     Namespace Attribute Notices
301*5c4a5fe1SAndy Fiddaman  */
302*5c4a5fe1SAndy Fiddaman #define PCI_NVME_AEN_DEFAULT_MASK	0x11f
303*5c4a5fe1SAndy Fiddaman 
304*5c4a5fe1SAndy Fiddaman typedef enum {
305*5c4a5fe1SAndy Fiddaman 	NVME_CNTRLTYPE_IO = 1,
306*5c4a5fe1SAndy Fiddaman 	NVME_CNTRLTYPE_DISCOVERY = 2,
307*5c4a5fe1SAndy Fiddaman 	NVME_CNTRLTYPE_ADMIN = 3,
308*5c4a5fe1SAndy Fiddaman } pci_nvme_cntrl_type;
309*5c4a5fe1SAndy Fiddaman 
310*5c4a5fe1SAndy Fiddaman struct pci_nvme_softc {
311*5c4a5fe1SAndy Fiddaman 	struct pci_devinst *nsc_pi;
312*5c4a5fe1SAndy Fiddaman 
313*5c4a5fe1SAndy Fiddaman 	pthread_mutex_t	mtx;
314*5c4a5fe1SAndy Fiddaman 
315*5c4a5fe1SAndy Fiddaman 	struct nvme_registers regs;
316*5c4a5fe1SAndy Fiddaman 
317*5c4a5fe1SAndy Fiddaman 	struct nvme_namespace_data  nsdata;
318*5c4a5fe1SAndy Fiddaman 	struct nvme_controller_data ctrldata;
319*5c4a5fe1SAndy Fiddaman 	struct nvme_error_information_entry err_log;
320*5c4a5fe1SAndy Fiddaman 	struct nvme_health_information_page health_log;
321*5c4a5fe1SAndy Fiddaman 	struct nvme_firmware_page fw_log;
322*5c4a5fe1SAndy Fiddaman 	struct nvme_ns_list ns_log;
323*5c4a5fe1SAndy Fiddaman 
324*5c4a5fe1SAndy Fiddaman 	struct pci_nvme_blockstore nvstore;
325*5c4a5fe1SAndy Fiddaman 
326*5c4a5fe1SAndy Fiddaman 	uint16_t	max_qentries;	/* max entries per queue */
327*5c4a5fe1SAndy Fiddaman 	uint32_t	max_queues;	/* max number of IO SQ's or CQ's */
328*5c4a5fe1SAndy Fiddaman 	uint32_t	num_cqueues;
329*5c4a5fe1SAndy Fiddaman 	uint32_t	num_squeues;
330*5c4a5fe1SAndy Fiddaman 	bool		num_q_is_set; /* Has host set Number of Queues */
331*5c4a5fe1SAndy Fiddaman 
332*5c4a5fe1SAndy Fiddaman 	struct pci_nvme_ioreq *ioreqs;
333*5c4a5fe1SAndy Fiddaman 	STAILQ_HEAD(, pci_nvme_ioreq) ioreqs_free; /* free list of ioreqs */
334*5c4a5fe1SAndy Fiddaman 	uint32_t	pending_ios;
335*5c4a5fe1SAndy Fiddaman 	uint32_t	ioslots;
336*5c4a5fe1SAndy Fiddaman 	sem_t		iosemlock;
337*5c4a5fe1SAndy Fiddaman 
338*5c4a5fe1SAndy Fiddaman 	/*
339*5c4a5fe1SAndy Fiddaman 	 * Memory mapped Submission and Completion queues
340*5c4a5fe1SAndy Fiddaman 	 * Each array includes both Admin and IO queues
341*5c4a5fe1SAndy Fiddaman 	 */
342*5c4a5fe1SAndy Fiddaman 	struct nvme_completion_queue *compl_queues;
343*5c4a5fe1SAndy Fiddaman 	struct nvme_submission_queue *submit_queues;
344*5c4a5fe1SAndy Fiddaman 
345*5c4a5fe1SAndy Fiddaman 	struct nvme_feature_obj feat[NVME_FID_MAX];
346*5c4a5fe1SAndy Fiddaman 
347*5c4a5fe1SAndy Fiddaman 	enum nvme_dsm_type dataset_management;
348*5c4a5fe1SAndy Fiddaman 
349*5c4a5fe1SAndy Fiddaman 	/* Accounting for SMART data */
350*5c4a5fe1SAndy Fiddaman 	__uint128_t	read_data_units;
351*5c4a5fe1SAndy Fiddaman 	__uint128_t	write_data_units;
352*5c4a5fe1SAndy Fiddaman 	__uint128_t	read_commands;
353*5c4a5fe1SAndy Fiddaman 	__uint128_t	write_commands;
354*5c4a5fe1SAndy Fiddaman 	uint32_t	read_dunits_remainder;
355*5c4a5fe1SAndy Fiddaman 	uint32_t	write_dunits_remainder;
356*5c4a5fe1SAndy Fiddaman 
357*5c4a5fe1SAndy Fiddaman 	STAILQ_HEAD(, pci_nvme_aer) aer_list;
358*5c4a5fe1SAndy Fiddaman 	pthread_mutex_t	aer_mtx;
359*5c4a5fe1SAndy Fiddaman 	uint32_t	aer_count;
360*5c4a5fe1SAndy Fiddaman 	struct pci_nvme_aen aen[PCI_NVME_AE_TYPE_MAX];
361*5c4a5fe1SAndy Fiddaman 	pthread_t	aen_tid;
362*5c4a5fe1SAndy Fiddaman 	pthread_mutex_t	aen_mtx;
363*5c4a5fe1SAndy Fiddaman 	pthread_cond_t	aen_cond;
364*5c4a5fe1SAndy Fiddaman };
365*5c4a5fe1SAndy Fiddaman 
366*5c4a5fe1SAndy Fiddaman 
367*5c4a5fe1SAndy Fiddaman static void pci_nvme_cq_update(struct pci_nvme_softc *sc,
368*5c4a5fe1SAndy Fiddaman     struct nvme_completion_queue *cq,
369*5c4a5fe1SAndy Fiddaman     uint32_t cdw0,
370*5c4a5fe1SAndy Fiddaman     uint16_t cid,
371*5c4a5fe1SAndy Fiddaman     uint16_t sqid,
372*5c4a5fe1SAndy Fiddaman     uint16_t status);
373*5c4a5fe1SAndy Fiddaman static struct pci_nvme_ioreq *pci_nvme_get_ioreq(struct pci_nvme_softc *);
374*5c4a5fe1SAndy Fiddaman static void pci_nvme_release_ioreq(struct pci_nvme_softc *, struct pci_nvme_ioreq *);
375*5c4a5fe1SAndy Fiddaman static void pci_nvme_io_done(struct blockif_req *, int);
376*5c4a5fe1SAndy Fiddaman 
377*5c4a5fe1SAndy Fiddaman /* Controller Configuration utils */
378*5c4a5fe1SAndy Fiddaman #define	NVME_CC_GET_EN(cc) \
379*5c4a5fe1SAndy Fiddaman 	NVMEV(NVME_CC_REG_EN, cc)
380*5c4a5fe1SAndy Fiddaman #define	NVME_CC_GET_CSS(cc) \
381*5c4a5fe1SAndy Fiddaman 	NVMEV(NVME_CC_REG_CSS, cc)
382*5c4a5fe1SAndy Fiddaman #define	NVME_CC_GET_SHN(cc) \
383*5c4a5fe1SAndy Fiddaman 	NVMEV(NVME_CC_REG_SHN, cc)
384*5c4a5fe1SAndy Fiddaman #define	NVME_CC_GET_IOSQES(cc) \
385*5c4a5fe1SAndy Fiddaman 	NVMEV(NVME_CC_REG_IOSQES, cc)
386*5c4a5fe1SAndy Fiddaman #define	NVME_CC_GET_IOCQES(cc) \
387*5c4a5fe1SAndy Fiddaman 	NVMEV(NVME_CC_REG_IOCQES, cc)
388*5c4a5fe1SAndy Fiddaman 
389*5c4a5fe1SAndy Fiddaman #define	NVME_CC_WRITE_MASK \
390*5c4a5fe1SAndy Fiddaman 	(NVMEM(NVME_CC_REG_EN) | \
391*5c4a5fe1SAndy Fiddaman 	 NVMEM(NVME_CC_REG_IOSQES) | \
392*5c4a5fe1SAndy Fiddaman 	 NVMEM(NVME_CC_REG_IOCQES))
393*5c4a5fe1SAndy Fiddaman 
394*5c4a5fe1SAndy Fiddaman #define	NVME_CC_NEN_WRITE_MASK \
395*5c4a5fe1SAndy Fiddaman 	(NVMEM(NVME_CC_REG_CSS) | \
396*5c4a5fe1SAndy Fiddaman 	 NVMEM(NVME_CC_REG_MPS) | \
397*5c4a5fe1SAndy Fiddaman 	 NVMEM(NVME_CC_REG_AMS))
398*5c4a5fe1SAndy Fiddaman 
399*5c4a5fe1SAndy Fiddaman /* Controller Status utils */
400*5c4a5fe1SAndy Fiddaman #define	NVME_CSTS_GET_RDY(sts) \
401*5c4a5fe1SAndy Fiddaman 	NVMEV(NVME_CSTS_REG_RDY, sts)
402*5c4a5fe1SAndy Fiddaman 
403*5c4a5fe1SAndy Fiddaman #define	NVME_CSTS_RDY	(NVMEF(NVME_CSTS_REG_RDY, 1))
404*5c4a5fe1SAndy Fiddaman #define	NVME_CSTS_CFS	(NVMEF(NVME_CSTS_REG_CFS, 1))
405*5c4a5fe1SAndy Fiddaman 
406*5c4a5fe1SAndy Fiddaman /* Completion Queue status word utils */
407*5c4a5fe1SAndy Fiddaman #define	NVME_STATUS_P	(NVMEF(NVME_STATUS_P, 1))
408*5c4a5fe1SAndy Fiddaman #define	NVME_STATUS_MASK \
409*5c4a5fe1SAndy Fiddaman 	(NVMEM(NVME_STATUS_SCT) | \
410*5c4a5fe1SAndy Fiddaman 	 NVMEM(NVME_STATUS_SC))
411*5c4a5fe1SAndy Fiddaman 
412*5c4a5fe1SAndy Fiddaman #define NVME_ONCS_DSM	NVMEM(NVME_CTRLR_DATA_ONCS_DSM)
413*5c4a5fe1SAndy Fiddaman 
414*5c4a5fe1SAndy Fiddaman static void nvme_feature_invalid_cb(struct pci_nvme_softc *,
415*5c4a5fe1SAndy Fiddaman     struct nvme_feature_obj *,
416*5c4a5fe1SAndy Fiddaman     struct nvme_command *,
417*5c4a5fe1SAndy Fiddaman     struct nvme_completion *);
418*5c4a5fe1SAndy Fiddaman static void nvme_feature_temperature(struct pci_nvme_softc *,
419*5c4a5fe1SAndy Fiddaman     struct nvme_feature_obj *,
420*5c4a5fe1SAndy Fiddaman     struct nvme_command *,
421*5c4a5fe1SAndy Fiddaman     struct nvme_completion *);
422*5c4a5fe1SAndy Fiddaman static void nvme_feature_num_queues(struct pci_nvme_softc *,
423*5c4a5fe1SAndy Fiddaman     struct nvme_feature_obj *,
424*5c4a5fe1SAndy Fiddaman     struct nvme_command *,
425*5c4a5fe1SAndy Fiddaman     struct nvme_completion *);
426*5c4a5fe1SAndy Fiddaman static void nvme_feature_iv_config(struct pci_nvme_softc *,
427*5c4a5fe1SAndy Fiddaman     struct nvme_feature_obj *,
428*5c4a5fe1SAndy Fiddaman     struct nvme_command *,
429*5c4a5fe1SAndy Fiddaman     struct nvme_completion *);
430*5c4a5fe1SAndy Fiddaman static void nvme_feature_async_event(struct pci_nvme_softc *,
431*5c4a5fe1SAndy Fiddaman     struct nvme_feature_obj *,
432*5c4a5fe1SAndy Fiddaman     struct nvme_command *,
433*5c4a5fe1SAndy Fiddaman     struct nvme_completion *);
434*5c4a5fe1SAndy Fiddaman 
435*5c4a5fe1SAndy Fiddaman static void *aen_thr(void *arg);
436*5c4a5fe1SAndy Fiddaman 
437*5c4a5fe1SAndy Fiddaman static __inline void
cpywithpad(char * dst,size_t dst_size,const char * src,char pad)438*5c4a5fe1SAndy Fiddaman cpywithpad(char *dst, size_t dst_size, const char *src, char pad)
439*5c4a5fe1SAndy Fiddaman {
440*5c4a5fe1SAndy Fiddaman 	size_t len;
441*5c4a5fe1SAndy Fiddaman 
442*5c4a5fe1SAndy Fiddaman 	len = strnlen(src, dst_size);
443*5c4a5fe1SAndy Fiddaman 	memset(dst, pad, dst_size);
444*5c4a5fe1SAndy Fiddaman 	memcpy(dst, src, len);
445*5c4a5fe1SAndy Fiddaman }
446*5c4a5fe1SAndy Fiddaman 
447*5c4a5fe1SAndy Fiddaman static __inline void
pci_nvme_status_tc(uint16_t * status,uint16_t type,uint16_t code)448*5c4a5fe1SAndy Fiddaman pci_nvme_status_tc(uint16_t *status, uint16_t type, uint16_t code)
449*5c4a5fe1SAndy Fiddaman {
450*5c4a5fe1SAndy Fiddaman 
451*5c4a5fe1SAndy Fiddaman 	*status &= ~NVME_STATUS_MASK;
452*5c4a5fe1SAndy Fiddaman 	*status |= NVMEF(NVME_STATUS_SCT, type) | NVMEF(NVME_STATUS_SC, code);
453*5c4a5fe1SAndy Fiddaman }
454*5c4a5fe1SAndy Fiddaman 
455*5c4a5fe1SAndy Fiddaman static __inline void
pci_nvme_status_genc(uint16_t * status,uint16_t code)456*5c4a5fe1SAndy Fiddaman pci_nvme_status_genc(uint16_t *status, uint16_t code)
457*5c4a5fe1SAndy Fiddaman {
458*5c4a5fe1SAndy Fiddaman 
459*5c4a5fe1SAndy Fiddaman 	pci_nvme_status_tc(status, NVME_SCT_GENERIC, code);
460*5c4a5fe1SAndy Fiddaman }
461*5c4a5fe1SAndy Fiddaman 
462*5c4a5fe1SAndy Fiddaman /*
463*5c4a5fe1SAndy Fiddaman  * Initialize the requested number or IO Submission and Completion Queues.
464*5c4a5fe1SAndy Fiddaman  * Admin queues are allocated implicitly.
465*5c4a5fe1SAndy Fiddaman  */
466*5c4a5fe1SAndy Fiddaman static void
pci_nvme_init_queues(struct pci_nvme_softc * sc,uint32_t nsq,uint32_t ncq)467*5c4a5fe1SAndy Fiddaman pci_nvme_init_queues(struct pci_nvme_softc *sc, uint32_t nsq, uint32_t ncq)
468*5c4a5fe1SAndy Fiddaman {
469*5c4a5fe1SAndy Fiddaman 	uint32_t i;
470*5c4a5fe1SAndy Fiddaman 
471*5c4a5fe1SAndy Fiddaman 	/*
472*5c4a5fe1SAndy Fiddaman 	 * Allocate and initialize the Submission Queues
473*5c4a5fe1SAndy Fiddaman 	 */
474*5c4a5fe1SAndy Fiddaman 	if (nsq > NVME_QUEUES) {
475*5c4a5fe1SAndy Fiddaman 		WPRINTF("%s: clamping number of SQ from %u to %u",
476*5c4a5fe1SAndy Fiddaman 					__func__, nsq, NVME_QUEUES);
477*5c4a5fe1SAndy Fiddaman 		nsq = NVME_QUEUES;
478*5c4a5fe1SAndy Fiddaman 	}
479*5c4a5fe1SAndy Fiddaman 
480*5c4a5fe1SAndy Fiddaman 	sc->num_squeues = nsq;
481*5c4a5fe1SAndy Fiddaman 
482*5c4a5fe1SAndy Fiddaman 	sc->submit_queues = calloc(sc->num_squeues + 1,
483*5c4a5fe1SAndy Fiddaman 				sizeof(struct nvme_submission_queue));
484*5c4a5fe1SAndy Fiddaman 	if (sc->submit_queues == NULL) {
485*5c4a5fe1SAndy Fiddaman 		WPRINTF("%s: SQ allocation failed", __func__);
486*5c4a5fe1SAndy Fiddaman 		sc->num_squeues = 0;
487*5c4a5fe1SAndy Fiddaman 	} else {
488*5c4a5fe1SAndy Fiddaman 		struct nvme_submission_queue *sq = sc->submit_queues;
489*5c4a5fe1SAndy Fiddaman 
490*5c4a5fe1SAndy Fiddaman 		for (i = 0; i < sc->num_squeues + 1; i++)
491*5c4a5fe1SAndy Fiddaman 			pthread_mutex_init(&sq[i].mtx, NULL);
492*5c4a5fe1SAndy Fiddaman 	}
493*5c4a5fe1SAndy Fiddaman 
494*5c4a5fe1SAndy Fiddaman 	/*
495*5c4a5fe1SAndy Fiddaman 	 * Allocate and initialize the Completion Queues
496*5c4a5fe1SAndy Fiddaman 	 */
497*5c4a5fe1SAndy Fiddaman 	if (ncq > NVME_QUEUES) {
498*5c4a5fe1SAndy Fiddaman 		WPRINTF("%s: clamping number of CQ from %u to %u",
499*5c4a5fe1SAndy Fiddaman 					__func__, ncq, NVME_QUEUES);
500*5c4a5fe1SAndy Fiddaman 		ncq = NVME_QUEUES;
501*5c4a5fe1SAndy Fiddaman 	}
502*5c4a5fe1SAndy Fiddaman 
503*5c4a5fe1SAndy Fiddaman 	sc->num_cqueues = ncq;
504*5c4a5fe1SAndy Fiddaman 
505*5c4a5fe1SAndy Fiddaman 	sc->compl_queues = calloc(sc->num_cqueues + 1,
506*5c4a5fe1SAndy Fiddaman 				sizeof(struct nvme_completion_queue));
507*5c4a5fe1SAndy Fiddaman 	if (sc->compl_queues == NULL) {
508*5c4a5fe1SAndy Fiddaman 		WPRINTF("%s: CQ allocation failed", __func__);
509*5c4a5fe1SAndy Fiddaman 		sc->num_cqueues = 0;
510*5c4a5fe1SAndy Fiddaman 	} else {
511*5c4a5fe1SAndy Fiddaman 		struct nvme_completion_queue *cq = sc->compl_queues;
512*5c4a5fe1SAndy Fiddaman 
513*5c4a5fe1SAndy Fiddaman 		for (i = 0; i < sc->num_cqueues + 1; i++)
514*5c4a5fe1SAndy Fiddaman 			pthread_mutex_init(&cq[i].mtx, NULL);
515*5c4a5fe1SAndy Fiddaman 	}
516*5c4a5fe1SAndy Fiddaman }
517*5c4a5fe1SAndy Fiddaman 
518*5c4a5fe1SAndy Fiddaman static void
pci_nvme_init_ctrldata(struct pci_nvme_softc * sc)519*5c4a5fe1SAndy Fiddaman pci_nvme_init_ctrldata(struct pci_nvme_softc *sc)
520*5c4a5fe1SAndy Fiddaman {
521*5c4a5fe1SAndy Fiddaman 	struct nvme_controller_data *cd = &sc->ctrldata;
522*5c4a5fe1SAndy Fiddaman 	int ret;
523*5c4a5fe1SAndy Fiddaman 
524*5c4a5fe1SAndy Fiddaman 	cd->vid = 0xFB5D;
525*5c4a5fe1SAndy Fiddaman 	cd->ssvid = 0x0000;
526*5c4a5fe1SAndy Fiddaman 
527*5c4a5fe1SAndy Fiddaman 	cpywithpad((char *)cd->mn, sizeof(cd->mn), "bhyve-NVMe", ' ');
528*5c4a5fe1SAndy Fiddaman 	cpywithpad((char *)cd->fr, sizeof(cd->fr), "1.0", ' ');
529*5c4a5fe1SAndy Fiddaman 
530*5c4a5fe1SAndy Fiddaman 	/* Num of submission commands that we can handle at a time (2^rab) */
531*5c4a5fe1SAndy Fiddaman 	cd->rab   = 4;
532*5c4a5fe1SAndy Fiddaman 
533*5c4a5fe1SAndy Fiddaman 	/* FreeBSD OUI */
534*5c4a5fe1SAndy Fiddaman 	cd->ieee[0] = 0xfc;
535*5c4a5fe1SAndy Fiddaman 	cd->ieee[1] = 0x9c;
536*5c4a5fe1SAndy Fiddaman 	cd->ieee[2] = 0x58;
537*5c4a5fe1SAndy Fiddaman 
538*5c4a5fe1SAndy Fiddaman 	cd->mic = 0;
539*5c4a5fe1SAndy Fiddaman 
540*5c4a5fe1SAndy Fiddaman 	cd->mdts = NVME_MDTS;	/* max data transfer size (2^mdts * CAP.MPSMIN) */
541*5c4a5fe1SAndy Fiddaman 
542*5c4a5fe1SAndy Fiddaman 	cd->ver = NVME_REV(1,4);
543*5c4a5fe1SAndy Fiddaman 
544*5c4a5fe1SAndy Fiddaman 	cd->cntrltype = NVME_CNTRLTYPE_IO;
545*5c4a5fe1SAndy Fiddaman 	cd->oacs = NVMEF(NVME_CTRLR_DATA_OACS_FORMAT, 1);
546*5c4a5fe1SAndy Fiddaman 	cd->oaes = NVMEM(NVME_CTRLR_DATA_OAES_NS_ATTR);
547*5c4a5fe1SAndy Fiddaman 	cd->acl = 2;
548*5c4a5fe1SAndy Fiddaman 	cd->aerl = 4;
549*5c4a5fe1SAndy Fiddaman 
550*5c4a5fe1SAndy Fiddaman 	/* Advertise 1, Read-only firmware slot */
551*5c4a5fe1SAndy Fiddaman 	cd->frmw = NVMEM(NVME_CTRLR_DATA_FRMW_SLOT1_RO) |
552*5c4a5fe1SAndy Fiddaman 	    NVMEF(NVME_CTRLR_DATA_FRMW_NUM_SLOTS, 1);
553*5c4a5fe1SAndy Fiddaman 	cd->lpa = 0;	/* TODO: support some simple things like SMART */
554*5c4a5fe1SAndy Fiddaman 	cd->elpe = 0;	/* max error log page entries */
555*5c4a5fe1SAndy Fiddaman 	/*
556*5c4a5fe1SAndy Fiddaman 	 * Report a single power state (zero-based value)
557*5c4a5fe1SAndy Fiddaman 	 * power_state[] values are left as zero to indicate "Not reported"
558*5c4a5fe1SAndy Fiddaman 	 */
559*5c4a5fe1SAndy Fiddaman 	cd->npss = 0;
560*5c4a5fe1SAndy Fiddaman 
561*5c4a5fe1SAndy Fiddaman 	/* Warning Composite Temperature Threshold */
562*5c4a5fe1SAndy Fiddaman 	cd->wctemp = 0x0157;
563*5c4a5fe1SAndy Fiddaman 	cd->cctemp = 0x0157;
564*5c4a5fe1SAndy Fiddaman 
565*5c4a5fe1SAndy Fiddaman 	/* SANICAP must not be 0 for Revision 1.4 and later NVMe Controllers */
566*5c4a5fe1SAndy Fiddaman 	cd->sanicap = NVMEF(NVME_CTRLR_DATA_SANICAP_NODMMAS,
567*5c4a5fe1SAndy Fiddaman 	    NVME_CTRLR_DATA_SANICAP_NODMMAS_NO);
568*5c4a5fe1SAndy Fiddaman 
569*5c4a5fe1SAndy Fiddaman 	cd->sqes = NVMEF(NVME_CTRLR_DATA_SQES_MAX, 6) |
570*5c4a5fe1SAndy Fiddaman 	    NVMEF(NVME_CTRLR_DATA_SQES_MIN, 6);
571*5c4a5fe1SAndy Fiddaman 	cd->cqes = NVMEF(NVME_CTRLR_DATA_CQES_MAX, 4) |
572*5c4a5fe1SAndy Fiddaman 	    NVMEF(NVME_CTRLR_DATA_CQES_MIN, 4);
573*5c4a5fe1SAndy Fiddaman 	cd->nn = 1;	/* number of namespaces */
574*5c4a5fe1SAndy Fiddaman 
575*5c4a5fe1SAndy Fiddaman 	cd->oncs = 0;
576*5c4a5fe1SAndy Fiddaman 	switch (sc->dataset_management) {
577*5c4a5fe1SAndy Fiddaman 	case NVME_DATASET_MANAGEMENT_AUTO:
578*5c4a5fe1SAndy Fiddaman 		if (sc->nvstore.deallocate)
579*5c4a5fe1SAndy Fiddaman 			cd->oncs |= NVME_ONCS_DSM;
580*5c4a5fe1SAndy Fiddaman 		break;
581*5c4a5fe1SAndy Fiddaman 	case NVME_DATASET_MANAGEMENT_ENABLE:
582*5c4a5fe1SAndy Fiddaman 		cd->oncs |= NVME_ONCS_DSM;
583*5c4a5fe1SAndy Fiddaman 		break;
584*5c4a5fe1SAndy Fiddaman 	default:
585*5c4a5fe1SAndy Fiddaman 		break;
586*5c4a5fe1SAndy Fiddaman 	}
587*5c4a5fe1SAndy Fiddaman 
588*5c4a5fe1SAndy Fiddaman 	cd->fna = NVMEM(NVME_CTRLR_DATA_FNA_FORMAT_ALL);
589*5c4a5fe1SAndy Fiddaman 
590*5c4a5fe1SAndy Fiddaman 	cd->vwc = NVMEF(NVME_CTRLR_DATA_VWC_ALL, NVME_CTRLR_DATA_VWC_ALL_NO);
591*5c4a5fe1SAndy Fiddaman 
592*5c4a5fe1SAndy Fiddaman #ifdef	__FreeBSD__
593*5c4a5fe1SAndy Fiddaman 	ret = snprintf(cd->subnqn, sizeof(cd->subnqn),
594*5c4a5fe1SAndy Fiddaman 	    "nqn.2013-12.org.freebsd:bhyve-%s-%u-%u-%u",
595*5c4a5fe1SAndy Fiddaman 	    get_config_value("name"), sc->nsc_pi->pi_bus,
596*5c4a5fe1SAndy Fiddaman 	    sc->nsc_pi->pi_slot, sc->nsc_pi->pi_func);
597*5c4a5fe1SAndy Fiddaman #else
598*5c4a5fe1SAndy Fiddaman 	ret = snprintf((char *)cd->subnqn, sizeof (cd->subnqn),
599*5c4a5fe1SAndy Fiddaman 	    "nqn.2013-12.org.illumos:bhyve-%s-%u-%u-%u",
600*5c4a5fe1SAndy Fiddaman 	    get_config_value("name"), sc->nsc_pi->pi_bus,
601*5c4a5fe1SAndy Fiddaman 	    sc->nsc_pi->pi_slot, sc->nsc_pi->pi_func);
602*5c4a5fe1SAndy Fiddaman #endif
603*5c4a5fe1SAndy Fiddaman 	if ((ret < 0) || ((unsigned)ret > sizeof(cd->subnqn)))
604*5c4a5fe1SAndy Fiddaman 		EPRINTLN("%s: error setting subnqn (%d)", __func__, ret);
605*5c4a5fe1SAndy Fiddaman }
606*5c4a5fe1SAndy Fiddaman 
607*5c4a5fe1SAndy Fiddaman static void
pci_nvme_init_nsdata_size(struct pci_nvme_blockstore * nvstore,struct nvme_namespace_data * nd)608*5c4a5fe1SAndy Fiddaman pci_nvme_init_nsdata_size(struct pci_nvme_blockstore *nvstore,
609*5c4a5fe1SAndy Fiddaman     struct nvme_namespace_data *nd)
610*5c4a5fe1SAndy Fiddaman {
611*5c4a5fe1SAndy Fiddaman 
612*5c4a5fe1SAndy Fiddaman 	/* Get capacity and block size information from backing store */
613*5c4a5fe1SAndy Fiddaman 	nd->nsze = nvstore->size / nvstore->sectsz;
614*5c4a5fe1SAndy Fiddaman 	nd->ncap = nd->nsze;
615*5c4a5fe1SAndy Fiddaman 	nd->nuse = nd->nsze;
616*5c4a5fe1SAndy Fiddaman }
617*5c4a5fe1SAndy Fiddaman 
618*5c4a5fe1SAndy Fiddaman static void
pci_nvme_init_nsdata(struct pci_nvme_softc * sc,struct nvme_namespace_data * nd,uint32_t nsid,struct pci_nvme_blockstore * nvstore)619*5c4a5fe1SAndy Fiddaman pci_nvme_init_nsdata(struct pci_nvme_softc *sc,
620*5c4a5fe1SAndy Fiddaman     struct nvme_namespace_data *nd, uint32_t nsid,
621*5c4a5fe1SAndy Fiddaman     struct pci_nvme_blockstore *nvstore)
622*5c4a5fe1SAndy Fiddaman {
623*5c4a5fe1SAndy Fiddaman 
624*5c4a5fe1SAndy Fiddaman 	pci_nvme_init_nsdata_size(nvstore, nd);
625*5c4a5fe1SAndy Fiddaman 
626*5c4a5fe1SAndy Fiddaman 	if (nvstore->type == NVME_STOR_BLOCKIF)
627*5c4a5fe1SAndy Fiddaman 		nvstore->deallocate = blockif_candelete(nvstore->ctx);
628*5c4a5fe1SAndy Fiddaman 
629*5c4a5fe1SAndy Fiddaman 	nd->nlbaf = 0; /* NLBAF is a 0's based value (i.e. 1 LBA Format) */
630*5c4a5fe1SAndy Fiddaman 	nd->flbas = 0;
631*5c4a5fe1SAndy Fiddaman 
632*5c4a5fe1SAndy Fiddaman 	/* Create an EUI-64 if user did not provide one */
633*5c4a5fe1SAndy Fiddaman 	if (nvstore->eui64 == 0) {
634*5c4a5fe1SAndy Fiddaman 		char *data = NULL;
635*5c4a5fe1SAndy Fiddaman 		uint64_t eui64 = nvstore->eui64;
636*5c4a5fe1SAndy Fiddaman 
637*5c4a5fe1SAndy Fiddaman 		asprintf(&data, "%s%u%u%u", get_config_value("name"),
638*5c4a5fe1SAndy Fiddaman 		    sc->nsc_pi->pi_bus, sc->nsc_pi->pi_slot,
639*5c4a5fe1SAndy Fiddaman 		    sc->nsc_pi->pi_func);
640*5c4a5fe1SAndy Fiddaman 
641*5c4a5fe1SAndy Fiddaman 		if (data != NULL) {
642*5c4a5fe1SAndy Fiddaman 			eui64 = OUI_FREEBSD_NVME_LOW | crc16(0, data, strlen(data));
643*5c4a5fe1SAndy Fiddaman 			free(data);
644*5c4a5fe1SAndy Fiddaman 		}
645*5c4a5fe1SAndy Fiddaman 		nvstore->eui64 = (eui64 << 16) | (nsid & 0xffff);
646*5c4a5fe1SAndy Fiddaman 	}
647*5c4a5fe1SAndy Fiddaman 	be64enc(nd->eui64, nvstore->eui64);
648*5c4a5fe1SAndy Fiddaman 
649*5c4a5fe1SAndy Fiddaman 	/* LBA data-sz = 2^lbads */
650*5c4a5fe1SAndy Fiddaman 	nd->lbaf[0] = NVMEF(NVME_NS_DATA_LBAF_LBADS, nvstore->sectsz_bits);
651*5c4a5fe1SAndy Fiddaman }
652*5c4a5fe1SAndy Fiddaman 
653*5c4a5fe1SAndy Fiddaman static void
pci_nvme_init_logpages(struct pci_nvme_softc * sc)654*5c4a5fe1SAndy Fiddaman pci_nvme_init_logpages(struct pci_nvme_softc *sc)
655*5c4a5fe1SAndy Fiddaman {
656*5c4a5fe1SAndy Fiddaman 	__uint128_t power_cycles = 1;
657*5c4a5fe1SAndy Fiddaman 
658*5c4a5fe1SAndy Fiddaman 	memset(&sc->err_log, 0, sizeof(sc->err_log));
659*5c4a5fe1SAndy Fiddaman 	memset(&sc->health_log, 0, sizeof(sc->health_log));
660*5c4a5fe1SAndy Fiddaman 	memset(&sc->fw_log, 0, sizeof(sc->fw_log));
661*5c4a5fe1SAndy Fiddaman 	memset(&sc->ns_log, 0, sizeof(sc->ns_log));
662*5c4a5fe1SAndy Fiddaman 
663*5c4a5fe1SAndy Fiddaman 	/* Set read/write remainder to round up according to spec */
664*5c4a5fe1SAndy Fiddaman 	sc->read_dunits_remainder = 999;
665*5c4a5fe1SAndy Fiddaman 	sc->write_dunits_remainder = 999;
666*5c4a5fe1SAndy Fiddaman 
667*5c4a5fe1SAndy Fiddaman 	/* Set nominal Health values checked by implementations */
668*5c4a5fe1SAndy Fiddaman 	sc->health_log.temperature = NVME_TEMPERATURE;
669*5c4a5fe1SAndy Fiddaman 	sc->health_log.available_spare = 100;
670*5c4a5fe1SAndy Fiddaman 	sc->health_log.available_spare_threshold = 10;
671*5c4a5fe1SAndy Fiddaman 
672*5c4a5fe1SAndy Fiddaman 	/* Set Active Firmware Info to slot 1 */
673*5c4a5fe1SAndy Fiddaman 	sc->fw_log.afi = NVMEF(NVME_FIRMWARE_PAGE_AFI_SLOT, 1);
674*5c4a5fe1SAndy Fiddaman 	memcpy(&sc->fw_log.revision[0], sc->ctrldata.fr,
675*5c4a5fe1SAndy Fiddaman 	    sizeof(sc->fw_log.revision[0]));
676*5c4a5fe1SAndy Fiddaman 
677*5c4a5fe1SAndy Fiddaman 	memcpy(&sc->health_log.power_cycles, &power_cycles,
678*5c4a5fe1SAndy Fiddaman 	    sizeof(sc->health_log.power_cycles));
679*5c4a5fe1SAndy Fiddaman }
680*5c4a5fe1SAndy Fiddaman 
681*5c4a5fe1SAndy Fiddaman static void
pci_nvme_init_features(struct pci_nvme_softc * sc)682*5c4a5fe1SAndy Fiddaman pci_nvme_init_features(struct pci_nvme_softc *sc)
683*5c4a5fe1SAndy Fiddaman {
684*5c4a5fe1SAndy Fiddaman 	enum nvme_feature	fid;
685*5c4a5fe1SAndy Fiddaman 
686*5c4a5fe1SAndy Fiddaman 	for (fid = 0; fid < NVME_FID_MAX; fid++) {
687*5c4a5fe1SAndy Fiddaman 		switch (fid) {
688*5c4a5fe1SAndy Fiddaman 		case NVME_FEAT_ARBITRATION:
689*5c4a5fe1SAndy Fiddaman 		case NVME_FEAT_POWER_MANAGEMENT:
690*5c4a5fe1SAndy Fiddaman 		case NVME_FEAT_INTERRUPT_COALESCING: //XXX
691*5c4a5fe1SAndy Fiddaman 		case NVME_FEAT_WRITE_ATOMICITY:
692*5c4a5fe1SAndy Fiddaman 			/* Mandatory but no special handling required */
693*5c4a5fe1SAndy Fiddaman 		//XXX hang - case NVME_FEAT_PREDICTABLE_LATENCY_MODE_CONFIG:
694*5c4a5fe1SAndy Fiddaman 		//XXX hang - case NVME_FEAT_HOST_BEHAVIOR_SUPPORT:
695*5c4a5fe1SAndy Fiddaman 		//		  this returns a data buffer
696*5c4a5fe1SAndy Fiddaman 			break;
697*5c4a5fe1SAndy Fiddaman 		case NVME_FEAT_TEMPERATURE_THRESHOLD:
698*5c4a5fe1SAndy Fiddaman 			sc->feat[fid].set = nvme_feature_temperature;
699*5c4a5fe1SAndy Fiddaman 			break;
700*5c4a5fe1SAndy Fiddaman 		case NVME_FEAT_ERROR_RECOVERY:
701*5c4a5fe1SAndy Fiddaman 			sc->feat[fid].namespace_specific = true;
702*5c4a5fe1SAndy Fiddaman 			break;
703*5c4a5fe1SAndy Fiddaman 		case NVME_FEAT_NUMBER_OF_QUEUES:
704*5c4a5fe1SAndy Fiddaman 			sc->feat[fid].set = nvme_feature_num_queues;
705*5c4a5fe1SAndy Fiddaman 			break;
706*5c4a5fe1SAndy Fiddaman 		case NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION:
707*5c4a5fe1SAndy Fiddaman 			sc->feat[fid].set = nvme_feature_iv_config;
708*5c4a5fe1SAndy Fiddaman 			break;
709*5c4a5fe1SAndy Fiddaman 		case NVME_FEAT_ASYNC_EVENT_CONFIGURATION:
710*5c4a5fe1SAndy Fiddaman 			sc->feat[fid].set = nvme_feature_async_event;
711*5c4a5fe1SAndy Fiddaman 			/* Enable all AENs by default */
712*5c4a5fe1SAndy Fiddaman 			sc->feat[fid].cdw11 = PCI_NVME_AEN_DEFAULT_MASK;
713*5c4a5fe1SAndy Fiddaman 			break;
714*5c4a5fe1SAndy Fiddaman 		default:
715*5c4a5fe1SAndy Fiddaman 			sc->feat[fid].set = nvme_feature_invalid_cb;
716*5c4a5fe1SAndy Fiddaman 			sc->feat[fid].get = nvme_feature_invalid_cb;
717*5c4a5fe1SAndy Fiddaman 		}
718*5c4a5fe1SAndy Fiddaman 	}
719*5c4a5fe1SAndy Fiddaman }
720*5c4a5fe1SAndy Fiddaman 
721*5c4a5fe1SAndy Fiddaman static void
pci_nvme_aer_reset(struct pci_nvme_softc * sc)722*5c4a5fe1SAndy Fiddaman pci_nvme_aer_reset(struct pci_nvme_softc *sc)
723*5c4a5fe1SAndy Fiddaman {
724*5c4a5fe1SAndy Fiddaman 
725*5c4a5fe1SAndy Fiddaman 	STAILQ_INIT(&sc->aer_list);
726*5c4a5fe1SAndy Fiddaman 	sc->aer_count = 0;
727*5c4a5fe1SAndy Fiddaman }
728*5c4a5fe1SAndy Fiddaman 
729*5c4a5fe1SAndy Fiddaman static void
pci_nvme_aer_init(struct pci_nvme_softc * sc)730*5c4a5fe1SAndy Fiddaman pci_nvme_aer_init(struct pci_nvme_softc *sc)
731*5c4a5fe1SAndy Fiddaman {
732*5c4a5fe1SAndy Fiddaman 
733*5c4a5fe1SAndy Fiddaman 	pthread_mutex_init(&sc->aer_mtx, NULL);
734*5c4a5fe1SAndy Fiddaman 	pci_nvme_aer_reset(sc);
735*5c4a5fe1SAndy Fiddaman }
736*5c4a5fe1SAndy Fiddaman 
737*5c4a5fe1SAndy Fiddaman static void
pci_nvme_aer_destroy(struct pci_nvme_softc * sc)738*5c4a5fe1SAndy Fiddaman pci_nvme_aer_destroy(struct pci_nvme_softc *sc)
739*5c4a5fe1SAndy Fiddaman {
740*5c4a5fe1SAndy Fiddaman 	struct pci_nvme_aer *aer = NULL;
741*5c4a5fe1SAndy Fiddaman 
742*5c4a5fe1SAndy Fiddaman 	pthread_mutex_lock(&sc->aer_mtx);
743*5c4a5fe1SAndy Fiddaman 	while (!STAILQ_EMPTY(&sc->aer_list)) {
744*5c4a5fe1SAndy Fiddaman 		aer = STAILQ_FIRST(&sc->aer_list);
745*5c4a5fe1SAndy Fiddaman 		STAILQ_REMOVE_HEAD(&sc->aer_list, link);
746*5c4a5fe1SAndy Fiddaman 		free(aer);
747*5c4a5fe1SAndy Fiddaman 	}
748*5c4a5fe1SAndy Fiddaman 	pthread_mutex_unlock(&sc->aer_mtx);
749*5c4a5fe1SAndy Fiddaman 
750*5c4a5fe1SAndy Fiddaman 	pci_nvme_aer_reset(sc);
751*5c4a5fe1SAndy Fiddaman }
752*5c4a5fe1SAndy Fiddaman 
753*5c4a5fe1SAndy Fiddaman static bool
pci_nvme_aer_available(struct pci_nvme_softc * sc)754*5c4a5fe1SAndy Fiddaman pci_nvme_aer_available(struct pci_nvme_softc *sc)
755*5c4a5fe1SAndy Fiddaman {
756*5c4a5fe1SAndy Fiddaman 
757*5c4a5fe1SAndy Fiddaman 	return (sc->aer_count != 0);
758*5c4a5fe1SAndy Fiddaman }
759*5c4a5fe1SAndy Fiddaman 
760*5c4a5fe1SAndy Fiddaman static bool
pci_nvme_aer_limit_reached(struct pci_nvme_softc * sc)761*5c4a5fe1SAndy Fiddaman pci_nvme_aer_limit_reached(struct pci_nvme_softc *sc)
762*5c4a5fe1SAndy Fiddaman {
763*5c4a5fe1SAndy Fiddaman 	struct nvme_controller_data *cd = &sc->ctrldata;
764*5c4a5fe1SAndy Fiddaman 
765*5c4a5fe1SAndy Fiddaman 	/* AERL is a zero based value while aer_count is one's based */
766*5c4a5fe1SAndy Fiddaman 	return (sc->aer_count == (cd->aerl + 1U));
767*5c4a5fe1SAndy Fiddaman }
768*5c4a5fe1SAndy Fiddaman 
769*5c4a5fe1SAndy Fiddaman /*
770*5c4a5fe1SAndy Fiddaman  * Add an Async Event Request
771*5c4a5fe1SAndy Fiddaman  *
772*5c4a5fe1SAndy Fiddaman  * Stores an AER to be returned later if the Controller needs to notify the
773*5c4a5fe1SAndy Fiddaman  * host of an event.
774*5c4a5fe1SAndy Fiddaman  * Note that while the NVMe spec doesn't require Controllers to return AER's
775*5c4a5fe1SAndy Fiddaman  * in order, this implementation does preserve the order.
776*5c4a5fe1SAndy Fiddaman  */
777*5c4a5fe1SAndy Fiddaman static int
pci_nvme_aer_add(struct pci_nvme_softc * sc,uint16_t cid)778*5c4a5fe1SAndy Fiddaman pci_nvme_aer_add(struct pci_nvme_softc *sc, uint16_t cid)
779*5c4a5fe1SAndy Fiddaman {
780*5c4a5fe1SAndy Fiddaman 	struct pci_nvme_aer *aer = NULL;
781*5c4a5fe1SAndy Fiddaman 
782*5c4a5fe1SAndy Fiddaman 	aer = calloc(1, sizeof(struct pci_nvme_aer));
783*5c4a5fe1SAndy Fiddaman 	if (aer == NULL)
784*5c4a5fe1SAndy Fiddaman 		return (-1);
785*5c4a5fe1SAndy Fiddaman 
786*5c4a5fe1SAndy Fiddaman 	/* Save the Command ID for use in the completion message */
787*5c4a5fe1SAndy Fiddaman 	aer->cid = cid;
788*5c4a5fe1SAndy Fiddaman 
789*5c4a5fe1SAndy Fiddaman 	pthread_mutex_lock(&sc->aer_mtx);
790*5c4a5fe1SAndy Fiddaman 	sc->aer_count++;
791*5c4a5fe1SAndy Fiddaman 	STAILQ_INSERT_TAIL(&sc->aer_list, aer, link);
792*5c4a5fe1SAndy Fiddaman 	pthread_mutex_unlock(&sc->aer_mtx);
793*5c4a5fe1SAndy Fiddaman 
794*5c4a5fe1SAndy Fiddaman 	return (0);
795*5c4a5fe1SAndy Fiddaman }
796*5c4a5fe1SAndy Fiddaman 
797*5c4a5fe1SAndy Fiddaman /*
798*5c4a5fe1SAndy Fiddaman  * Get an Async Event Request structure
799*5c4a5fe1SAndy Fiddaman  *
800*5c4a5fe1SAndy Fiddaman  * Returns a pointer to an AER previously submitted by the host or NULL if
801*5c4a5fe1SAndy Fiddaman  * no AER's exist. Caller is responsible for freeing the returned struct.
802*5c4a5fe1SAndy Fiddaman  */
803*5c4a5fe1SAndy Fiddaman static struct pci_nvme_aer *
pci_nvme_aer_get(struct pci_nvme_softc * sc)804*5c4a5fe1SAndy Fiddaman pci_nvme_aer_get(struct pci_nvme_softc *sc)
805*5c4a5fe1SAndy Fiddaman {
806*5c4a5fe1SAndy Fiddaman 	struct pci_nvme_aer *aer = NULL;
807*5c4a5fe1SAndy Fiddaman 
808*5c4a5fe1SAndy Fiddaman 	pthread_mutex_lock(&sc->aer_mtx);
809*5c4a5fe1SAndy Fiddaman 	aer = STAILQ_FIRST(&sc->aer_list);
810*5c4a5fe1SAndy Fiddaman 	if (aer != NULL) {
811*5c4a5fe1SAndy Fiddaman 		STAILQ_REMOVE_HEAD(&sc->aer_list, link);
812*5c4a5fe1SAndy Fiddaman 		sc->aer_count--;
813*5c4a5fe1SAndy Fiddaman 	}
814*5c4a5fe1SAndy Fiddaman 	pthread_mutex_unlock(&sc->aer_mtx);
815*5c4a5fe1SAndy Fiddaman 
816*5c4a5fe1SAndy Fiddaman 	return (aer);
817*5c4a5fe1SAndy Fiddaman }
818*5c4a5fe1SAndy Fiddaman 
819*5c4a5fe1SAndy Fiddaman static void
pci_nvme_aen_reset(struct pci_nvme_softc * sc)820*5c4a5fe1SAndy Fiddaman pci_nvme_aen_reset(struct pci_nvme_softc *sc)
821*5c4a5fe1SAndy Fiddaman {
822*5c4a5fe1SAndy Fiddaman 	uint32_t	atype;
823*5c4a5fe1SAndy Fiddaman 
824*5c4a5fe1SAndy Fiddaman 	memset(sc->aen, 0, PCI_NVME_AE_TYPE_MAX * sizeof(struct pci_nvme_aen));
825*5c4a5fe1SAndy Fiddaman 
826*5c4a5fe1SAndy Fiddaman 	for (atype = 0; atype < PCI_NVME_AE_TYPE_MAX; atype++) {
827*5c4a5fe1SAndy Fiddaman 		sc->aen[atype].atype = atype;
828*5c4a5fe1SAndy Fiddaman 	}
829*5c4a5fe1SAndy Fiddaman }
830*5c4a5fe1SAndy Fiddaman 
831*5c4a5fe1SAndy Fiddaman static void
pci_nvme_aen_init(struct pci_nvme_softc * sc)832*5c4a5fe1SAndy Fiddaman pci_nvme_aen_init(struct pci_nvme_softc *sc)
833*5c4a5fe1SAndy Fiddaman {
834*5c4a5fe1SAndy Fiddaman 	char nstr[80];
835*5c4a5fe1SAndy Fiddaman 
836*5c4a5fe1SAndy Fiddaman 	pci_nvme_aen_reset(sc);
837*5c4a5fe1SAndy Fiddaman 
838*5c4a5fe1SAndy Fiddaman 	pthread_mutex_init(&sc->aen_mtx, NULL);
839*5c4a5fe1SAndy Fiddaman 	pthread_create(&sc->aen_tid, NULL, aen_thr, sc);
840*5c4a5fe1SAndy Fiddaman 	snprintf(nstr, sizeof(nstr), "nvme-aen-%d:%d", sc->nsc_pi->pi_slot,
841*5c4a5fe1SAndy Fiddaman 	    sc->nsc_pi->pi_func);
842*5c4a5fe1SAndy Fiddaman 	pthread_set_name_np(sc->aen_tid, nstr);
843*5c4a5fe1SAndy Fiddaman }
844*5c4a5fe1SAndy Fiddaman 
845*5c4a5fe1SAndy Fiddaman static void
pci_nvme_aen_destroy(struct pci_nvme_softc * sc)846*5c4a5fe1SAndy Fiddaman pci_nvme_aen_destroy(struct pci_nvme_softc *sc)
847*5c4a5fe1SAndy Fiddaman {
848*5c4a5fe1SAndy Fiddaman 
849*5c4a5fe1SAndy Fiddaman 	pci_nvme_aen_reset(sc);
850*5c4a5fe1SAndy Fiddaman }
851*5c4a5fe1SAndy Fiddaman 
852*5c4a5fe1SAndy Fiddaman /* Notify the AEN thread of pending work */
853*5c4a5fe1SAndy Fiddaman static void
pci_nvme_aen_notify(struct pci_nvme_softc * sc)854*5c4a5fe1SAndy Fiddaman pci_nvme_aen_notify(struct pci_nvme_softc *sc)
855*5c4a5fe1SAndy Fiddaman {
856*5c4a5fe1SAndy Fiddaman 
857*5c4a5fe1SAndy Fiddaman 	pthread_cond_signal(&sc->aen_cond);
858*5c4a5fe1SAndy Fiddaman }
859*5c4a5fe1SAndy Fiddaman 
860*5c4a5fe1SAndy Fiddaman /*
861*5c4a5fe1SAndy Fiddaman  * Post an Asynchronous Event Notification
862*5c4a5fe1SAndy Fiddaman  */
863*5c4a5fe1SAndy Fiddaman static int32_t
pci_nvme_aen_post(struct pci_nvme_softc * sc,pci_nvme_async_type atype,uint32_t event_data)864*5c4a5fe1SAndy Fiddaman pci_nvme_aen_post(struct pci_nvme_softc *sc, pci_nvme_async_type atype,
865*5c4a5fe1SAndy Fiddaman 		uint32_t event_data)
866*5c4a5fe1SAndy Fiddaman {
867*5c4a5fe1SAndy Fiddaman 	struct pci_nvme_aen *aen;
868*5c4a5fe1SAndy Fiddaman 
869*5c4a5fe1SAndy Fiddaman 	if (atype >= PCI_NVME_AE_TYPE_MAX) {
870*5c4a5fe1SAndy Fiddaman 		return(EINVAL);
871*5c4a5fe1SAndy Fiddaman 	}
872*5c4a5fe1SAndy Fiddaman 
873*5c4a5fe1SAndy Fiddaman 	pthread_mutex_lock(&sc->aen_mtx);
874*5c4a5fe1SAndy Fiddaman 	aen = &sc->aen[atype];
875*5c4a5fe1SAndy Fiddaman 
876*5c4a5fe1SAndy Fiddaman 	/* Has the controller already posted an event of this type? */
877*5c4a5fe1SAndy Fiddaman 	if (aen->posted) {
878*5c4a5fe1SAndy Fiddaman 		pthread_mutex_unlock(&sc->aen_mtx);
879*5c4a5fe1SAndy Fiddaman 		return(EALREADY);
880*5c4a5fe1SAndy Fiddaman 	}
881*5c4a5fe1SAndy Fiddaman 
882*5c4a5fe1SAndy Fiddaman 	aen->event_data = event_data;
883*5c4a5fe1SAndy Fiddaman 	aen->posted = true;
884*5c4a5fe1SAndy Fiddaman 	pthread_mutex_unlock(&sc->aen_mtx);
885*5c4a5fe1SAndy Fiddaman 
886*5c4a5fe1SAndy Fiddaman 	pci_nvme_aen_notify(sc);
887*5c4a5fe1SAndy Fiddaman 
888*5c4a5fe1SAndy Fiddaman 	return(0);
889*5c4a5fe1SAndy Fiddaman }
890*5c4a5fe1SAndy Fiddaman 
891*5c4a5fe1SAndy Fiddaman static void
pci_nvme_aen_process(struct pci_nvme_softc * sc)892*5c4a5fe1SAndy Fiddaman pci_nvme_aen_process(struct pci_nvme_softc *sc)
893*5c4a5fe1SAndy Fiddaman {
894*5c4a5fe1SAndy Fiddaman 	struct pci_nvme_aer *aer;
895*5c4a5fe1SAndy Fiddaman 	struct pci_nvme_aen *aen;
896*5c4a5fe1SAndy Fiddaman 	pci_nvme_async_type atype;
897*5c4a5fe1SAndy Fiddaman 	uint32_t mask;
898*5c4a5fe1SAndy Fiddaman 	uint16_t status;
899*5c4a5fe1SAndy Fiddaman 	uint8_t lid;
900*5c4a5fe1SAndy Fiddaman 
901*5c4a5fe1SAndy Fiddaman #ifndef __FreeBSD__
902*5c4a5fe1SAndy Fiddaman 	lid = 0;
903*5c4a5fe1SAndy Fiddaman #endif
904*5c4a5fe1SAndy Fiddaman 
905*5c4a5fe1SAndy Fiddaman 	assert(pthread_mutex_isowned_np(&sc->aen_mtx));
906*5c4a5fe1SAndy Fiddaman 	for (atype = 0; atype < PCI_NVME_AE_TYPE_MAX; atype++) {
907*5c4a5fe1SAndy Fiddaman 		aen = &sc->aen[atype];
908*5c4a5fe1SAndy Fiddaman 		/* Previous iterations may have depleted the available AER's */
909*5c4a5fe1SAndy Fiddaman 		if (!pci_nvme_aer_available(sc)) {
910*5c4a5fe1SAndy Fiddaman 			DPRINTF("%s: no AER", __func__);
911*5c4a5fe1SAndy Fiddaman 			break;
912*5c4a5fe1SAndy Fiddaman 		}
913*5c4a5fe1SAndy Fiddaman 
914*5c4a5fe1SAndy Fiddaman 		if (!aen->posted) {
915*5c4a5fe1SAndy Fiddaman 			DPRINTF("%s: no AEN posted for atype=%#x", __func__, atype);
916*5c4a5fe1SAndy Fiddaman 			continue;
917*5c4a5fe1SAndy Fiddaman 		}
918*5c4a5fe1SAndy Fiddaman 
919*5c4a5fe1SAndy Fiddaman 		status = NVME_SC_SUCCESS;
920*5c4a5fe1SAndy Fiddaman 
921*5c4a5fe1SAndy Fiddaman 		/* Is the event masked? */
922*5c4a5fe1SAndy Fiddaman 		mask =
923*5c4a5fe1SAndy Fiddaman 		    sc->feat[NVME_FEAT_ASYNC_EVENT_CONFIGURATION].cdw11;
924*5c4a5fe1SAndy Fiddaman 
925*5c4a5fe1SAndy Fiddaman 		DPRINTF("%s: atype=%#x mask=%#x event_data=%#x", __func__, atype, mask, aen->event_data);
926*5c4a5fe1SAndy Fiddaman 		switch (atype) {
927*5c4a5fe1SAndy Fiddaman 		case PCI_NVME_AE_TYPE_ERROR:
928*5c4a5fe1SAndy Fiddaman 			lid = NVME_LOG_ERROR;
929*5c4a5fe1SAndy Fiddaman 			break;
930*5c4a5fe1SAndy Fiddaman 		case PCI_NVME_AE_TYPE_SMART:
931*5c4a5fe1SAndy Fiddaman 			mask &= 0xff;
932*5c4a5fe1SAndy Fiddaman 			if ((mask & aen->event_data) == 0)
933*5c4a5fe1SAndy Fiddaman 				continue;
934*5c4a5fe1SAndy Fiddaman 			lid = NVME_LOG_HEALTH_INFORMATION;
935*5c4a5fe1SAndy Fiddaman 			break;
936*5c4a5fe1SAndy Fiddaman 		case PCI_NVME_AE_TYPE_NOTICE:
937*5c4a5fe1SAndy Fiddaman 			if (aen->event_data >= PCI_NVME_AEI_NOTICE_MAX) {
938*5c4a5fe1SAndy Fiddaman 				EPRINTLN("%s unknown AEN notice type %u",
939*5c4a5fe1SAndy Fiddaman 				    __func__, aen->event_data);
940*5c4a5fe1SAndy Fiddaman 				status = NVME_SC_INTERNAL_DEVICE_ERROR;
941*5c4a5fe1SAndy Fiddaman 				lid = 0;
942*5c4a5fe1SAndy Fiddaman 				break;
943*5c4a5fe1SAndy Fiddaman 			}
944*5c4a5fe1SAndy Fiddaman 			if ((PCI_NVME_AEI_NOTICE_MASK(aen->event_data) & mask) == 0)
945*5c4a5fe1SAndy Fiddaman 				continue;
946*5c4a5fe1SAndy Fiddaman 			switch (aen->event_data) {
947*5c4a5fe1SAndy Fiddaman 			case PCI_NVME_AEI_NOTICE_NS_ATTR_CHANGED:
948*5c4a5fe1SAndy Fiddaman 				lid = NVME_LOG_CHANGED_NAMESPACE;
949*5c4a5fe1SAndy Fiddaman 				break;
950*5c4a5fe1SAndy Fiddaman 			case PCI_NVME_AEI_NOTICE_FW_ACTIVATION:
951*5c4a5fe1SAndy Fiddaman 				lid = NVME_LOG_FIRMWARE_SLOT;
952*5c4a5fe1SAndy Fiddaman 				break;
953*5c4a5fe1SAndy Fiddaman 			case PCI_NVME_AEI_NOTICE_TELEMETRY_CHANGE:
954*5c4a5fe1SAndy Fiddaman 				lid = NVME_LOG_TELEMETRY_CONTROLLER_INITIATED;
955*5c4a5fe1SAndy Fiddaman 				break;
956*5c4a5fe1SAndy Fiddaman 			case PCI_NVME_AEI_NOTICE_ANA_CHANGE:
957*5c4a5fe1SAndy Fiddaman 				lid = NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS;
958*5c4a5fe1SAndy Fiddaman 				break;
959*5c4a5fe1SAndy Fiddaman 			case PCI_NVME_AEI_NOTICE_PREDICT_LATENCY_CHANGE:
960*5c4a5fe1SAndy Fiddaman 				lid = NVME_LOG_PREDICTABLE_LATENCY_EVENT_AGGREGATE;
961*5c4a5fe1SAndy Fiddaman 				break;
962*5c4a5fe1SAndy Fiddaman 			case PCI_NVME_AEI_NOTICE_LBA_STATUS_ALERT:
963*5c4a5fe1SAndy Fiddaman 				lid = NVME_LOG_LBA_STATUS_INFORMATION;
964*5c4a5fe1SAndy Fiddaman 				break;
965*5c4a5fe1SAndy Fiddaman 			case PCI_NVME_AEI_NOTICE_ENDURANCE_GROUP_CHANGE:
966*5c4a5fe1SAndy Fiddaman 				lid = NVME_LOG_ENDURANCE_GROUP_EVENT_AGGREGATE;
967*5c4a5fe1SAndy Fiddaman 				break;
968*5c4a5fe1SAndy Fiddaman 			default:
969*5c4a5fe1SAndy Fiddaman 				lid = 0;
970*5c4a5fe1SAndy Fiddaman 			}
971*5c4a5fe1SAndy Fiddaman 			break;
972*5c4a5fe1SAndy Fiddaman 		default:
973*5c4a5fe1SAndy Fiddaman 			/* bad type?!? */
974*5c4a5fe1SAndy Fiddaman 			EPRINTLN("%s unknown AEN type %u", __func__, atype);
975*5c4a5fe1SAndy Fiddaman 			status = NVME_SC_INTERNAL_DEVICE_ERROR;
976*5c4a5fe1SAndy Fiddaman 			lid = 0;
977*5c4a5fe1SAndy Fiddaman 			break;
978*5c4a5fe1SAndy Fiddaman 		}
979*5c4a5fe1SAndy Fiddaman 
980*5c4a5fe1SAndy Fiddaman 		aer = pci_nvme_aer_get(sc);
981*5c4a5fe1SAndy Fiddaman 		assert(aer != NULL);
982*5c4a5fe1SAndy Fiddaman 
983*5c4a5fe1SAndy Fiddaman 		DPRINTF("%s: CID=%#x CDW0=%#x", __func__, aer->cid, (lid << 16) | (aen->event_data << 8) | atype);
984*5c4a5fe1SAndy Fiddaman 		pci_nvme_cq_update(sc, &sc->compl_queues[0],
985*5c4a5fe1SAndy Fiddaman 		    (lid << 16) | (aen->event_data << 8) | atype, /* cdw0 */
986*5c4a5fe1SAndy Fiddaman 		    aer->cid,
987*5c4a5fe1SAndy Fiddaman 		    0,		/* SQID */
988*5c4a5fe1SAndy Fiddaman 		    status);
989*5c4a5fe1SAndy Fiddaman 
990*5c4a5fe1SAndy Fiddaman 		aen->event_data = 0;
991*5c4a5fe1SAndy Fiddaman 		aen->posted = false;
992*5c4a5fe1SAndy Fiddaman 
993*5c4a5fe1SAndy Fiddaman 		pci_generate_msix(sc->nsc_pi, 0);
994*5c4a5fe1SAndy Fiddaman 	}
995*5c4a5fe1SAndy Fiddaman }
996*5c4a5fe1SAndy Fiddaman 
997*5c4a5fe1SAndy Fiddaman static void *
aen_thr(void * arg)998*5c4a5fe1SAndy Fiddaman aen_thr(void *arg)
999*5c4a5fe1SAndy Fiddaman {
1000*5c4a5fe1SAndy Fiddaman 	struct pci_nvme_softc *sc;
1001*5c4a5fe1SAndy Fiddaman 
1002*5c4a5fe1SAndy Fiddaman 	sc = arg;
1003*5c4a5fe1SAndy Fiddaman 
1004*5c4a5fe1SAndy Fiddaman 	pthread_mutex_lock(&sc->aen_mtx);
1005*5c4a5fe1SAndy Fiddaman 	for (;;) {
1006*5c4a5fe1SAndy Fiddaman 		pci_nvme_aen_process(sc);
1007*5c4a5fe1SAndy Fiddaman 		pthread_cond_wait(&sc->aen_cond, &sc->aen_mtx);
1008*5c4a5fe1SAndy Fiddaman 	}
1009*5c4a5fe1SAndy Fiddaman #ifdef __FreeBSD__	/* Smatch spots unreachable code */
1010*5c4a5fe1SAndy Fiddaman 	pthread_mutex_unlock(&sc->aen_mtx);
1011*5c4a5fe1SAndy Fiddaman 
1012*5c4a5fe1SAndy Fiddaman 	pthread_exit(NULL);
1013*5c4a5fe1SAndy Fiddaman #endif
1014*5c4a5fe1SAndy Fiddaman 	return (NULL);
1015*5c4a5fe1SAndy Fiddaman }
1016*5c4a5fe1SAndy Fiddaman 
1017*5c4a5fe1SAndy Fiddaman static void
pci_nvme_reset_locked(struct pci_nvme_softc * sc)1018*5c4a5fe1SAndy Fiddaman pci_nvme_reset_locked(struct pci_nvme_softc *sc)
1019*5c4a5fe1SAndy Fiddaman {
1020*5c4a5fe1SAndy Fiddaman 	uint32_t i;
1021*5c4a5fe1SAndy Fiddaman 
1022*5c4a5fe1SAndy Fiddaman 	DPRINTF("%s", __func__);
1023*5c4a5fe1SAndy Fiddaman 
1024*5c4a5fe1SAndy Fiddaman 	sc->regs.cap_lo = (ZERO_BASED(sc->max_qentries) & NVME_CAP_LO_REG_MQES_MASK) |
1025*5c4a5fe1SAndy Fiddaman 	    NVMEF(NVME_CAP_LO_REG_CQR, 1) |
1026*5c4a5fe1SAndy Fiddaman 	    NVMEF(NVME_CAP_LO_REG_TO, 60);
1027*5c4a5fe1SAndy Fiddaman 
1028*5c4a5fe1SAndy Fiddaman 	sc->regs.cap_hi = NVMEF(NVME_CAP_HI_REG_CSS_NVM, 1);
1029*5c4a5fe1SAndy Fiddaman 
1030*5c4a5fe1SAndy Fiddaman 	sc->regs.vs = NVME_REV(1,4);	/* NVMe v1.4 */
1031*5c4a5fe1SAndy Fiddaman 
1032*5c4a5fe1SAndy Fiddaman 	sc->regs.cc = 0;
1033*5c4a5fe1SAndy Fiddaman 
1034*5c4a5fe1SAndy Fiddaman 	assert(sc->submit_queues != NULL);
1035*5c4a5fe1SAndy Fiddaman 
1036*5c4a5fe1SAndy Fiddaman 	for (i = 0; i < sc->num_squeues + 1; i++) {
1037*5c4a5fe1SAndy Fiddaman 		sc->submit_queues[i].qbase = NULL;
1038*5c4a5fe1SAndy Fiddaman 		sc->submit_queues[i].size = 0;
1039*5c4a5fe1SAndy Fiddaman 		sc->submit_queues[i].cqid = 0;
1040*5c4a5fe1SAndy Fiddaman 		sc->submit_queues[i].tail = 0;
1041*5c4a5fe1SAndy Fiddaman 		sc->submit_queues[i].head = 0;
1042*5c4a5fe1SAndy Fiddaman 	}
1043*5c4a5fe1SAndy Fiddaman 
1044*5c4a5fe1SAndy Fiddaman 	assert(sc->compl_queues != NULL);
1045*5c4a5fe1SAndy Fiddaman 
1046*5c4a5fe1SAndy Fiddaman 	for (i = 0; i < sc->num_cqueues + 1; i++) {
1047*5c4a5fe1SAndy Fiddaman 		sc->compl_queues[i].qbase = NULL;
1048*5c4a5fe1SAndy Fiddaman 		sc->compl_queues[i].size = 0;
1049*5c4a5fe1SAndy Fiddaman 		sc->compl_queues[i].tail = 0;
1050*5c4a5fe1SAndy Fiddaman 		sc->compl_queues[i].head = 0;
1051*5c4a5fe1SAndy Fiddaman 	}
1052*5c4a5fe1SAndy Fiddaman 
1053*5c4a5fe1SAndy Fiddaman 	sc->num_q_is_set = false;
1054*5c4a5fe1SAndy Fiddaman 
1055*5c4a5fe1SAndy Fiddaman 	pci_nvme_aer_destroy(sc);
1056*5c4a5fe1SAndy Fiddaman 	pci_nvme_aen_destroy(sc);
1057*5c4a5fe1SAndy Fiddaman 
1058*5c4a5fe1SAndy Fiddaman 	/*
1059*5c4a5fe1SAndy Fiddaman 	 * Clear CSTS.RDY last to prevent the host from enabling Controller
1060*5c4a5fe1SAndy Fiddaman 	 * before cleanup completes
1061*5c4a5fe1SAndy Fiddaman 	 */
1062*5c4a5fe1SAndy Fiddaman 	sc->regs.csts = 0;
1063*5c4a5fe1SAndy Fiddaman }
1064*5c4a5fe1SAndy Fiddaman 
1065*5c4a5fe1SAndy Fiddaman static void
pci_nvme_reset(struct pci_nvme_softc * sc)1066*5c4a5fe1SAndy Fiddaman pci_nvme_reset(struct pci_nvme_softc *sc)
1067*5c4a5fe1SAndy Fiddaman {
1068*5c4a5fe1SAndy Fiddaman 	pthread_mutex_lock(&sc->mtx);
1069*5c4a5fe1SAndy Fiddaman 	pci_nvme_reset_locked(sc);
1070*5c4a5fe1SAndy Fiddaman 	pthread_mutex_unlock(&sc->mtx);
1071*5c4a5fe1SAndy Fiddaman }
1072*5c4a5fe1SAndy Fiddaman 
1073*5c4a5fe1SAndy Fiddaman static int
pci_nvme_init_controller(struct pci_nvme_softc * sc)1074*5c4a5fe1SAndy Fiddaman pci_nvme_init_controller(struct pci_nvme_softc *sc)
1075*5c4a5fe1SAndy Fiddaman {
1076*5c4a5fe1SAndy Fiddaman 	uint16_t acqs, asqs;
1077*5c4a5fe1SAndy Fiddaman 
1078*5c4a5fe1SAndy Fiddaman 	DPRINTF("%s", __func__);
1079*5c4a5fe1SAndy Fiddaman 
1080*5c4a5fe1SAndy Fiddaman 	/*
1081*5c4a5fe1SAndy Fiddaman 	 * NVMe 2.0 states that "enabling a controller while this field is
1082*5c4a5fe1SAndy Fiddaman 	 * cleared to 0h produces undefined results" for both ACQS and
1083*5c4a5fe1SAndy Fiddaman 	 * ASQS. If zero, set CFS and do not become ready.
1084*5c4a5fe1SAndy Fiddaman 	 */
1085*5c4a5fe1SAndy Fiddaman 	asqs = ONE_BASED(NVMEV(NVME_AQA_REG_ASQS, sc->regs.aqa));
1086*5c4a5fe1SAndy Fiddaman 	if (asqs < 2) {
1087*5c4a5fe1SAndy Fiddaman 		EPRINTLN("%s: illegal ASQS value %#x (aqa=%#x)", __func__,
1088*5c4a5fe1SAndy Fiddaman 		    asqs - 1, sc->regs.aqa);
1089*5c4a5fe1SAndy Fiddaman 		sc->regs.csts |= NVME_CSTS_CFS;
1090*5c4a5fe1SAndy Fiddaman 		return (-1);
1091*5c4a5fe1SAndy Fiddaman 	}
1092*5c4a5fe1SAndy Fiddaman 	sc->submit_queues[0].size = asqs;
1093*5c4a5fe1SAndy Fiddaman 	sc->submit_queues[0].qbase = vm_map_gpa(sc->nsc_pi->pi_vmctx,
1094*5c4a5fe1SAndy Fiddaman 	    sc->regs.asq, sizeof(struct nvme_command) * asqs);
1095*5c4a5fe1SAndy Fiddaman 	if (sc->submit_queues[0].qbase == NULL) {
1096*5c4a5fe1SAndy Fiddaman 		EPRINTLN("%s: ASQ vm_map_gpa(%lx) failed", __func__,
1097*5c4a5fe1SAndy Fiddaman 		    sc->regs.asq);
1098*5c4a5fe1SAndy Fiddaman 		sc->regs.csts |= NVME_CSTS_CFS;
1099*5c4a5fe1SAndy Fiddaman 		return (-1);
1100*5c4a5fe1SAndy Fiddaman 	}
1101*5c4a5fe1SAndy Fiddaman 
1102*5c4a5fe1SAndy Fiddaman 	DPRINTF("%s mapping Admin-SQ guest 0x%lx, host: %p",
1103*5c4a5fe1SAndy Fiddaman 	        __func__, sc->regs.asq, sc->submit_queues[0].qbase);
1104*5c4a5fe1SAndy Fiddaman 
1105*5c4a5fe1SAndy Fiddaman 	acqs = ONE_BASED(NVMEV(NVME_AQA_REG_ACQS, sc->regs.aqa));
1106*5c4a5fe1SAndy Fiddaman 	if (acqs < 2) {
1107*5c4a5fe1SAndy Fiddaman 		EPRINTLN("%s: illegal ACQS value %#x (aqa=%#x)", __func__,
1108*5c4a5fe1SAndy Fiddaman 		    acqs - 1, sc->regs.aqa);
1109*5c4a5fe1SAndy Fiddaman 		sc->regs.csts |= NVME_CSTS_CFS;
1110*5c4a5fe1SAndy Fiddaman 		return (-1);
1111*5c4a5fe1SAndy Fiddaman 	}
1112*5c4a5fe1SAndy Fiddaman 	sc->compl_queues[0].size = acqs;
1113*5c4a5fe1SAndy Fiddaman 	sc->compl_queues[0].qbase = vm_map_gpa(sc->nsc_pi->pi_vmctx,
1114*5c4a5fe1SAndy Fiddaman 	    sc->regs.acq, sizeof(struct nvme_completion) * acqs);
1115*5c4a5fe1SAndy Fiddaman 	if (sc->compl_queues[0].qbase == NULL) {
1116*5c4a5fe1SAndy Fiddaman 		EPRINTLN("%s: ACQ vm_map_gpa(%lx) failed", __func__,
1117*5c4a5fe1SAndy Fiddaman 		    sc->regs.acq);
1118*5c4a5fe1SAndy Fiddaman 		sc->regs.csts |= NVME_CSTS_CFS;
1119*5c4a5fe1SAndy Fiddaman 		return (-1);
1120*5c4a5fe1SAndy Fiddaman 	}
1121*5c4a5fe1SAndy Fiddaman 	sc->compl_queues[0].intr_en = NVME_CQ_INTEN;
1122*5c4a5fe1SAndy Fiddaman 
1123*5c4a5fe1SAndy Fiddaman 	DPRINTF("%s mapping Admin-CQ guest 0x%lx, host: %p",
1124*5c4a5fe1SAndy Fiddaman 	        __func__, sc->regs.acq, sc->compl_queues[0].qbase);
1125*5c4a5fe1SAndy Fiddaman 
1126*5c4a5fe1SAndy Fiddaman 	return (0);
1127*5c4a5fe1SAndy Fiddaman }
1128*5c4a5fe1SAndy Fiddaman 
1129*5c4a5fe1SAndy Fiddaman static int
nvme_prp_memcpy(struct vmctx * ctx,uint64_t prp1,uint64_t prp2,uint8_t * b,size_t len,enum nvme_copy_dir dir)1130*5c4a5fe1SAndy Fiddaman nvme_prp_memcpy(struct vmctx *ctx, uint64_t prp1, uint64_t prp2, uint8_t *b,
1131*5c4a5fe1SAndy Fiddaman 	size_t len, enum nvme_copy_dir dir)
1132*5c4a5fe1SAndy Fiddaman {
1133*5c4a5fe1SAndy Fiddaman 	uint8_t *p;
1134*5c4a5fe1SAndy Fiddaman 	size_t bytes;
1135*5c4a5fe1SAndy Fiddaman 
1136*5c4a5fe1SAndy Fiddaman 	if (len > (8 * 1024)) {
1137*5c4a5fe1SAndy Fiddaman 		return (-1);
1138*5c4a5fe1SAndy Fiddaman 	}
1139*5c4a5fe1SAndy Fiddaman 
1140*5c4a5fe1SAndy Fiddaman 	/* Copy from the start of prp1 to the end of the physical page */
1141*5c4a5fe1SAndy Fiddaman 	bytes = PAGE_SIZE - (prp1 & PAGE_MASK);
1142*5c4a5fe1SAndy Fiddaman 	bytes = MIN(bytes, len);
1143*5c4a5fe1SAndy Fiddaman 
1144*5c4a5fe1SAndy Fiddaman 	p = vm_map_gpa(ctx, prp1, bytes);
1145*5c4a5fe1SAndy Fiddaman 	if (p == NULL) {
1146*5c4a5fe1SAndy Fiddaman 		return (-1);
1147*5c4a5fe1SAndy Fiddaman 	}
1148*5c4a5fe1SAndy Fiddaman 
1149*5c4a5fe1SAndy Fiddaman 	if (dir == NVME_COPY_TO_PRP)
1150*5c4a5fe1SAndy Fiddaman 		memcpy(p, b, bytes);
1151*5c4a5fe1SAndy Fiddaman 	else
1152*5c4a5fe1SAndy Fiddaman 		memcpy(b, p, bytes);
1153*5c4a5fe1SAndy Fiddaman 
1154*5c4a5fe1SAndy Fiddaman 	b += bytes;
1155*5c4a5fe1SAndy Fiddaman 
1156*5c4a5fe1SAndy Fiddaman 	len -= bytes;
1157*5c4a5fe1SAndy Fiddaman 	if (len == 0) {
1158*5c4a5fe1SAndy Fiddaman 		return (0);
1159*5c4a5fe1SAndy Fiddaman 	}
1160*5c4a5fe1SAndy Fiddaman 
1161*5c4a5fe1SAndy Fiddaman 	len = MIN(len, PAGE_SIZE);
1162*5c4a5fe1SAndy Fiddaman 
1163*5c4a5fe1SAndy Fiddaman 	p = vm_map_gpa(ctx, prp2, len);
1164*5c4a5fe1SAndy Fiddaman 	if (p == NULL) {
1165*5c4a5fe1SAndy Fiddaman 		return (-1);
1166*5c4a5fe1SAndy Fiddaman 	}
1167*5c4a5fe1SAndy Fiddaman 
1168*5c4a5fe1SAndy Fiddaman 	if (dir == NVME_COPY_TO_PRP)
1169*5c4a5fe1SAndy Fiddaman 		memcpy(p, b, len);
1170*5c4a5fe1SAndy Fiddaman 	else
1171*5c4a5fe1SAndy Fiddaman 		memcpy(b, p, len);
1172*5c4a5fe1SAndy Fiddaman 
1173*5c4a5fe1SAndy Fiddaman 	return (0);
1174*5c4a5fe1SAndy Fiddaman }
1175*5c4a5fe1SAndy Fiddaman 
1176*5c4a5fe1SAndy Fiddaman /*
1177*5c4a5fe1SAndy Fiddaman  * Write a Completion Queue Entry update
1178*5c4a5fe1SAndy Fiddaman  *
1179*5c4a5fe1SAndy Fiddaman  * Write the completion and update the doorbell value
1180*5c4a5fe1SAndy Fiddaman  */
1181*5c4a5fe1SAndy Fiddaman static void
pci_nvme_cq_update(struct pci_nvme_softc * sc,struct nvme_completion_queue * cq,uint32_t cdw0,uint16_t cid,uint16_t sqid,uint16_t status)1182*5c4a5fe1SAndy Fiddaman pci_nvme_cq_update(struct pci_nvme_softc *sc,
1183*5c4a5fe1SAndy Fiddaman 		struct nvme_completion_queue *cq,
1184*5c4a5fe1SAndy Fiddaman 		uint32_t cdw0,
1185*5c4a5fe1SAndy Fiddaman 		uint16_t cid,
1186*5c4a5fe1SAndy Fiddaman 		uint16_t sqid,
1187*5c4a5fe1SAndy Fiddaman 		uint16_t status)
1188*5c4a5fe1SAndy Fiddaman {
1189*5c4a5fe1SAndy Fiddaman 	struct nvme_submission_queue *sq = &sc->submit_queues[sqid];
1190*5c4a5fe1SAndy Fiddaman 	struct nvme_completion *cqe;
1191*5c4a5fe1SAndy Fiddaman 
1192*5c4a5fe1SAndy Fiddaman 	assert(cq->qbase != NULL);
1193*5c4a5fe1SAndy Fiddaman 
1194*5c4a5fe1SAndy Fiddaman 	pthread_mutex_lock(&cq->mtx);
1195*5c4a5fe1SAndy Fiddaman 
1196*5c4a5fe1SAndy Fiddaman 	cqe = &cq->qbase[cq->tail];
1197*5c4a5fe1SAndy Fiddaman 
1198*5c4a5fe1SAndy Fiddaman 	/* Flip the phase bit */
1199*5c4a5fe1SAndy Fiddaman 	status |= (cqe->status ^ NVME_STATUS_P) & NVME_STATUS_P_MASK;
1200*5c4a5fe1SAndy Fiddaman 
1201*5c4a5fe1SAndy Fiddaman 	cqe->cdw0 = cdw0;
1202*5c4a5fe1SAndy Fiddaman 	cqe->sqhd = sq->head;
1203*5c4a5fe1SAndy Fiddaman 	cqe->sqid = sqid;
1204*5c4a5fe1SAndy Fiddaman 	cqe->cid = cid;
1205*5c4a5fe1SAndy Fiddaman 	cqe->status = status;
1206*5c4a5fe1SAndy Fiddaman 
1207*5c4a5fe1SAndy Fiddaman 	cq->tail++;
1208*5c4a5fe1SAndy Fiddaman 	if (cq->tail >= cq->size) {
1209*5c4a5fe1SAndy Fiddaman 		cq->tail = 0;
1210*5c4a5fe1SAndy Fiddaman 	}
1211*5c4a5fe1SAndy Fiddaman 
1212*5c4a5fe1SAndy Fiddaman 	pthread_mutex_unlock(&cq->mtx);
1213*5c4a5fe1SAndy Fiddaman }
1214*5c4a5fe1SAndy Fiddaman 
1215*5c4a5fe1SAndy Fiddaman static int
nvme_opc_delete_io_sq(struct pci_nvme_softc * sc,struct nvme_command * command,struct nvme_completion * compl)1216*5c4a5fe1SAndy Fiddaman nvme_opc_delete_io_sq(struct pci_nvme_softc* sc, struct nvme_command* command,
1217*5c4a5fe1SAndy Fiddaman 	struct nvme_completion* compl)
1218*5c4a5fe1SAndy Fiddaman {
1219*5c4a5fe1SAndy Fiddaman 	uint16_t qid = command->cdw10 & 0xffff;
1220*5c4a5fe1SAndy Fiddaman 
1221*5c4a5fe1SAndy Fiddaman 	DPRINTF("%s DELETE_IO_SQ %u", __func__, qid);
1222*5c4a5fe1SAndy Fiddaman 	if (qid == 0 || qid > sc->num_squeues ||
1223*5c4a5fe1SAndy Fiddaman 	    (sc->submit_queues[qid].qbase == NULL)) {
1224*5c4a5fe1SAndy Fiddaman 		WPRINTF("%s NOT PERMITTED queue id %u / num_squeues %u",
1225*5c4a5fe1SAndy Fiddaman 		        __func__, qid, sc->num_squeues);
1226*5c4a5fe1SAndy Fiddaman 		pci_nvme_status_tc(&compl->status, NVME_SCT_COMMAND_SPECIFIC,
1227*5c4a5fe1SAndy Fiddaman 		    NVME_SC_INVALID_QUEUE_IDENTIFIER);
1228*5c4a5fe1SAndy Fiddaman 		return (1);
1229*5c4a5fe1SAndy Fiddaman 	}
1230*5c4a5fe1SAndy Fiddaman 
1231*5c4a5fe1SAndy Fiddaman 	sc->submit_queues[qid].qbase = NULL;
1232*5c4a5fe1SAndy Fiddaman 	sc->submit_queues[qid].cqid = 0;
1233*5c4a5fe1SAndy Fiddaman 	pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS);
1234*5c4a5fe1SAndy Fiddaman 	return (1);
1235*5c4a5fe1SAndy Fiddaman }
1236*5c4a5fe1SAndy Fiddaman 
1237*5c4a5fe1SAndy Fiddaman static int
nvme_opc_create_io_sq(struct pci_nvme_softc * sc,struct nvme_command * command,struct nvme_completion * compl)1238*5c4a5fe1SAndy Fiddaman nvme_opc_create_io_sq(struct pci_nvme_softc* sc, struct nvme_command* command,
1239*5c4a5fe1SAndy Fiddaman 	struct nvme_completion* compl)
1240*5c4a5fe1SAndy Fiddaman {
1241*5c4a5fe1SAndy Fiddaman 	if (command->cdw11 & NVME_CMD_CDW11_PC) {
1242*5c4a5fe1SAndy Fiddaman 		uint16_t qid = command->cdw10 & 0xffff;
1243*5c4a5fe1SAndy Fiddaman 		struct nvme_submission_queue *nsq;
1244*5c4a5fe1SAndy Fiddaman 
1245*5c4a5fe1SAndy Fiddaman 		if ((qid == 0) || (qid > sc->num_squeues) ||
1246*5c4a5fe1SAndy Fiddaman 		    (sc->submit_queues[qid].qbase != NULL)) {
1247*5c4a5fe1SAndy Fiddaman 			WPRINTF("%s queue index %u > num_squeues %u",
1248*5c4a5fe1SAndy Fiddaman 			        __func__, qid, sc->num_squeues);
1249*5c4a5fe1SAndy Fiddaman 			pci_nvme_status_tc(&compl->status,
1250*5c4a5fe1SAndy Fiddaman 			    NVME_SCT_COMMAND_SPECIFIC,
1251*5c4a5fe1SAndy Fiddaman 			    NVME_SC_INVALID_QUEUE_IDENTIFIER);
1252*5c4a5fe1SAndy Fiddaman 			return (1);
1253*5c4a5fe1SAndy Fiddaman 		}
1254*5c4a5fe1SAndy Fiddaman 
1255*5c4a5fe1SAndy Fiddaman 		nsq = &sc->submit_queues[qid];
1256*5c4a5fe1SAndy Fiddaman 		nsq->size = ONE_BASED((command->cdw10 >> 16) & 0xffff);
1257*5c4a5fe1SAndy Fiddaman 		DPRINTF("%s size=%u (max=%u)", __func__, nsq->size, sc->max_qentries);
1258*5c4a5fe1SAndy Fiddaman 		if ((nsq->size < 2) || (nsq->size > sc->max_qentries)) {
1259*5c4a5fe1SAndy Fiddaman 			/*
1260*5c4a5fe1SAndy Fiddaman 			 * Queues must specify at least two entries
1261*5c4a5fe1SAndy Fiddaman 			 * NOTE: "MAXIMUM QUEUE SIZE EXCEEDED" was renamed to
1262*5c4a5fe1SAndy Fiddaman 			 * "INVALID QUEUE SIZE" in the NVM Express 1.3 Spec
1263*5c4a5fe1SAndy Fiddaman 			 */
1264*5c4a5fe1SAndy Fiddaman 			pci_nvme_status_tc(&compl->status,
1265*5c4a5fe1SAndy Fiddaman 			    NVME_SCT_COMMAND_SPECIFIC,
1266*5c4a5fe1SAndy Fiddaman 			    NVME_SC_MAXIMUM_QUEUE_SIZE_EXCEEDED);
1267*5c4a5fe1SAndy Fiddaman 			return (1);
1268*5c4a5fe1SAndy Fiddaman 		}
1269*5c4a5fe1SAndy Fiddaman 		nsq->head = nsq->tail = 0;
1270*5c4a5fe1SAndy Fiddaman 
1271*5c4a5fe1SAndy Fiddaman 		nsq->cqid = (command->cdw11 >> 16) & 0xffff;
1272*5c4a5fe1SAndy Fiddaman 		if ((nsq->cqid == 0) || (nsq->cqid > sc->num_cqueues)) {
1273*5c4a5fe1SAndy Fiddaman 			pci_nvme_status_tc(&compl->status,
1274*5c4a5fe1SAndy Fiddaman 			    NVME_SCT_COMMAND_SPECIFIC,
1275*5c4a5fe1SAndy Fiddaman 			    NVME_SC_INVALID_QUEUE_IDENTIFIER);
1276*5c4a5fe1SAndy Fiddaman 			return (1);
1277*5c4a5fe1SAndy Fiddaman 		}
1278*5c4a5fe1SAndy Fiddaman 
1279*5c4a5fe1SAndy Fiddaman 		if (sc->compl_queues[nsq->cqid].qbase == NULL) {
1280*5c4a5fe1SAndy Fiddaman 			pci_nvme_status_tc(&compl->status,
1281*5c4a5fe1SAndy Fiddaman 			    NVME_SCT_COMMAND_SPECIFIC,
1282*5c4a5fe1SAndy Fiddaman 			    NVME_SC_COMPLETION_QUEUE_INVALID);
1283*5c4a5fe1SAndy Fiddaman 			return (1);
1284*5c4a5fe1SAndy Fiddaman 		}
1285*5c4a5fe1SAndy Fiddaman 
1286*5c4a5fe1SAndy Fiddaman 		nsq->qpriority = (command->cdw11 >> 1) & 0x03;
1287*5c4a5fe1SAndy Fiddaman 
1288*5c4a5fe1SAndy Fiddaman 		nsq->qbase = vm_map_gpa(sc->nsc_pi->pi_vmctx, command->prp1,
1289*5c4a5fe1SAndy Fiddaman 		              sizeof(struct nvme_command) * (size_t)nsq->size);
1290*5c4a5fe1SAndy Fiddaman 
1291*5c4a5fe1SAndy Fiddaman 		DPRINTF("%s sq %u size %u gaddr %p cqid %u", __func__,
1292*5c4a5fe1SAndy Fiddaman 		        qid, nsq->size, nsq->qbase, nsq->cqid);
1293*5c4a5fe1SAndy Fiddaman 
1294*5c4a5fe1SAndy Fiddaman 		pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS);
1295*5c4a5fe1SAndy Fiddaman 
1296*5c4a5fe1SAndy Fiddaman 		DPRINTF("%s completed creating IOSQ qid %u",
1297*5c4a5fe1SAndy Fiddaman 		         __func__, qid);
1298*5c4a5fe1SAndy Fiddaman 	} else {
1299*5c4a5fe1SAndy Fiddaman 		/*
1300*5c4a5fe1SAndy Fiddaman 		 * Guest sent non-cont submission queue request.
1301*5c4a5fe1SAndy Fiddaman 		 * This setting is unsupported by this emulation.
1302*5c4a5fe1SAndy Fiddaman 		 */
1303*5c4a5fe1SAndy Fiddaman 		WPRINTF("%s unsupported non-contig (list-based) "
1304*5c4a5fe1SAndy Fiddaman 		         "create i/o submission queue", __func__);
1305*5c4a5fe1SAndy Fiddaman 
1306*5c4a5fe1SAndy Fiddaman 		pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1307*5c4a5fe1SAndy Fiddaman 	}
1308*5c4a5fe1SAndy Fiddaman 	return (1);
1309*5c4a5fe1SAndy Fiddaman }
1310*5c4a5fe1SAndy Fiddaman 
1311*5c4a5fe1SAndy Fiddaman static int
nvme_opc_delete_io_cq(struct pci_nvme_softc * sc,struct nvme_command * command,struct nvme_completion * compl)1312*5c4a5fe1SAndy Fiddaman nvme_opc_delete_io_cq(struct pci_nvme_softc* sc, struct nvme_command* command,
1313*5c4a5fe1SAndy Fiddaman 	struct nvme_completion* compl)
1314*5c4a5fe1SAndy Fiddaman {
1315*5c4a5fe1SAndy Fiddaman 	uint16_t qid = command->cdw10 & 0xffff;
1316*5c4a5fe1SAndy Fiddaman 	uint16_t sqid;
1317*5c4a5fe1SAndy Fiddaman 
1318*5c4a5fe1SAndy Fiddaman 	DPRINTF("%s DELETE_IO_CQ %u", __func__, qid);
1319*5c4a5fe1SAndy Fiddaman 	if (qid == 0 || qid > sc->num_cqueues ||
1320*5c4a5fe1SAndy Fiddaman 	    (sc->compl_queues[qid].qbase == NULL)) {
1321*5c4a5fe1SAndy Fiddaman 		WPRINTF("%s queue index %u / num_cqueues %u",
1322*5c4a5fe1SAndy Fiddaman 		        __func__, qid, sc->num_cqueues);
1323*5c4a5fe1SAndy Fiddaman 		pci_nvme_status_tc(&compl->status, NVME_SCT_COMMAND_SPECIFIC,
1324*5c4a5fe1SAndy Fiddaman 		    NVME_SC_INVALID_QUEUE_IDENTIFIER);
1325*5c4a5fe1SAndy Fiddaman 		return (1);
1326*5c4a5fe1SAndy Fiddaman 	}
1327*5c4a5fe1SAndy Fiddaman 
1328*5c4a5fe1SAndy Fiddaman 	/* Deleting an Active CQ is an error */
1329*5c4a5fe1SAndy Fiddaman 	for (sqid = 1; sqid < sc->num_squeues + 1; sqid++)
1330*5c4a5fe1SAndy Fiddaman 		if (sc->submit_queues[sqid].cqid == qid) {
1331*5c4a5fe1SAndy Fiddaman 			pci_nvme_status_tc(&compl->status,
1332*5c4a5fe1SAndy Fiddaman 			    NVME_SCT_COMMAND_SPECIFIC,
1333*5c4a5fe1SAndy Fiddaman 			    NVME_SC_INVALID_QUEUE_DELETION);
1334*5c4a5fe1SAndy Fiddaman 			return (1);
1335*5c4a5fe1SAndy Fiddaman 		}
1336*5c4a5fe1SAndy Fiddaman 
1337*5c4a5fe1SAndy Fiddaman 	sc->compl_queues[qid].qbase = NULL;
1338*5c4a5fe1SAndy Fiddaman 	pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS);
1339*5c4a5fe1SAndy Fiddaman 	return (1);
1340*5c4a5fe1SAndy Fiddaman }
1341*5c4a5fe1SAndy Fiddaman 
1342*5c4a5fe1SAndy Fiddaman static int
nvme_opc_create_io_cq(struct pci_nvme_softc * sc,struct nvme_command * command,struct nvme_completion * compl)1343*5c4a5fe1SAndy Fiddaman nvme_opc_create_io_cq(struct pci_nvme_softc* sc, struct nvme_command* command,
1344*5c4a5fe1SAndy Fiddaman 	struct nvme_completion* compl)
1345*5c4a5fe1SAndy Fiddaman {
1346*5c4a5fe1SAndy Fiddaman 	struct nvme_completion_queue *ncq;
1347*5c4a5fe1SAndy Fiddaman 	uint16_t qid = command->cdw10 & 0xffff;
1348*5c4a5fe1SAndy Fiddaman 
1349*5c4a5fe1SAndy Fiddaman 	/* Only support Physically Contiguous queues */
1350*5c4a5fe1SAndy Fiddaman 	if ((command->cdw11 & NVME_CMD_CDW11_PC) == 0) {
1351*5c4a5fe1SAndy Fiddaman 		WPRINTF("%s unsupported non-contig (list-based) "
1352*5c4a5fe1SAndy Fiddaman 		         "create i/o completion queue",
1353*5c4a5fe1SAndy Fiddaman 		         __func__);
1354*5c4a5fe1SAndy Fiddaman 
1355*5c4a5fe1SAndy Fiddaman 		pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1356*5c4a5fe1SAndy Fiddaman 		return (1);
1357*5c4a5fe1SAndy Fiddaman 	}
1358*5c4a5fe1SAndy Fiddaman 
1359*5c4a5fe1SAndy Fiddaman 	if ((qid == 0) || (qid > sc->num_cqueues) ||
1360*5c4a5fe1SAndy Fiddaman 	    (sc->compl_queues[qid].qbase != NULL)) {
1361*5c4a5fe1SAndy Fiddaman 		WPRINTF("%s queue index %u > num_cqueues %u",
1362*5c4a5fe1SAndy Fiddaman 			__func__, qid, sc->num_cqueues);
1363*5c4a5fe1SAndy Fiddaman 		pci_nvme_status_tc(&compl->status,
1364*5c4a5fe1SAndy Fiddaman 		    NVME_SCT_COMMAND_SPECIFIC,
1365*5c4a5fe1SAndy Fiddaman 		    NVME_SC_INVALID_QUEUE_IDENTIFIER);
1366*5c4a5fe1SAndy Fiddaman 		return (1);
1367*5c4a5fe1SAndy Fiddaman  	}
1368*5c4a5fe1SAndy Fiddaman 
1369*5c4a5fe1SAndy Fiddaman 	ncq = &sc->compl_queues[qid];
1370*5c4a5fe1SAndy Fiddaman 	ncq->intr_en = (command->cdw11 & NVME_CMD_CDW11_IEN) >> 1;
1371*5c4a5fe1SAndy Fiddaman 	ncq->intr_vec = (command->cdw11 >> 16) & 0xffff;
1372*5c4a5fe1SAndy Fiddaman 	if (ncq->intr_vec > (sc->max_queues + 1)) {
1373*5c4a5fe1SAndy Fiddaman 		pci_nvme_status_tc(&compl->status,
1374*5c4a5fe1SAndy Fiddaman 		    NVME_SCT_COMMAND_SPECIFIC,
1375*5c4a5fe1SAndy Fiddaman 		    NVME_SC_INVALID_INTERRUPT_VECTOR);
1376*5c4a5fe1SAndy Fiddaman 		return (1);
1377*5c4a5fe1SAndy Fiddaman 	}
1378*5c4a5fe1SAndy Fiddaman 
1379*5c4a5fe1SAndy Fiddaman 	ncq->size = ONE_BASED((command->cdw10 >> 16) & 0xffff);
1380*5c4a5fe1SAndy Fiddaman 	if ((ncq->size < 2) || (ncq->size > sc->max_qentries))  {
1381*5c4a5fe1SAndy Fiddaman 		/*
1382*5c4a5fe1SAndy Fiddaman 		 * Queues must specify at least two entries
1383*5c4a5fe1SAndy Fiddaman 		 * NOTE: "MAXIMUM QUEUE SIZE EXCEEDED" was renamed to
1384*5c4a5fe1SAndy Fiddaman 		 * "INVALID QUEUE SIZE" in the NVM Express 1.3 Spec
1385*5c4a5fe1SAndy Fiddaman 		 */
1386*5c4a5fe1SAndy Fiddaman 		pci_nvme_status_tc(&compl->status,
1387*5c4a5fe1SAndy Fiddaman 		    NVME_SCT_COMMAND_SPECIFIC,
1388*5c4a5fe1SAndy Fiddaman 		    NVME_SC_MAXIMUM_QUEUE_SIZE_EXCEEDED);
1389*5c4a5fe1SAndy Fiddaman 		return (1);
1390*5c4a5fe1SAndy Fiddaman 	}
1391*5c4a5fe1SAndy Fiddaman 	ncq->head = ncq->tail = 0;
1392*5c4a5fe1SAndy Fiddaman 	ncq->qbase = vm_map_gpa(sc->nsc_pi->pi_vmctx,
1393*5c4a5fe1SAndy Fiddaman 		     command->prp1,
1394*5c4a5fe1SAndy Fiddaman 		     sizeof(struct nvme_command) * (size_t)ncq->size);
1395*5c4a5fe1SAndy Fiddaman 
1396*5c4a5fe1SAndy Fiddaman 	pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS);
1397*5c4a5fe1SAndy Fiddaman 
1398*5c4a5fe1SAndy Fiddaman 
1399*5c4a5fe1SAndy Fiddaman 	return (1);
1400*5c4a5fe1SAndy Fiddaman }
1401*5c4a5fe1SAndy Fiddaman 
1402*5c4a5fe1SAndy Fiddaman static int
nvme_opc_get_log_page(struct pci_nvme_softc * sc,struct nvme_command * command,struct nvme_completion * compl)1403*5c4a5fe1SAndy Fiddaman nvme_opc_get_log_page(struct pci_nvme_softc* sc, struct nvme_command* command,
1404*5c4a5fe1SAndy Fiddaman 	struct nvme_completion* compl)
1405*5c4a5fe1SAndy Fiddaman {
1406*5c4a5fe1SAndy Fiddaman 	uint64_t logoff;
1407*5c4a5fe1SAndy Fiddaman 	uint32_t logsize;
1408*5c4a5fe1SAndy Fiddaman 	uint8_t logpage;
1409*5c4a5fe1SAndy Fiddaman 
1410*5c4a5fe1SAndy Fiddaman 	pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS);
1411*5c4a5fe1SAndy Fiddaman 
1412*5c4a5fe1SAndy Fiddaman 	/*
1413*5c4a5fe1SAndy Fiddaman 	 * Command specifies the number of dwords to return in fields NUMDU
1414*5c4a5fe1SAndy Fiddaman 	 * and NUMDL. This is a zero-based value.
1415*5c4a5fe1SAndy Fiddaman 	 */
1416*5c4a5fe1SAndy Fiddaman 	logpage = command->cdw10 & 0xFF;
1417*5c4a5fe1SAndy Fiddaman 	logsize = ((command->cdw11 << 16) | (command->cdw10 >> 16)) + 1;
1418*5c4a5fe1SAndy Fiddaman 	logsize *= sizeof(uint32_t);
1419*5c4a5fe1SAndy Fiddaman 	logoff  = ((uint64_t)(command->cdw13) << 32) | command->cdw12;
1420*5c4a5fe1SAndy Fiddaman 
1421*5c4a5fe1SAndy Fiddaman 	DPRINTF("%s log page %u len %u", __func__, logpage, logsize);
1422*5c4a5fe1SAndy Fiddaman 
1423*5c4a5fe1SAndy Fiddaman 	switch (logpage) {
1424*5c4a5fe1SAndy Fiddaman 	case NVME_LOG_ERROR:
1425*5c4a5fe1SAndy Fiddaman 		if (logoff >= sizeof(sc->err_log)) {
1426*5c4a5fe1SAndy Fiddaman 			pci_nvme_status_genc(&compl->status,
1427*5c4a5fe1SAndy Fiddaman 			    NVME_SC_INVALID_FIELD);
1428*5c4a5fe1SAndy Fiddaman 			break;
1429*5c4a5fe1SAndy Fiddaman 		}
1430*5c4a5fe1SAndy Fiddaman 
1431*5c4a5fe1SAndy Fiddaman 		nvme_prp_memcpy(sc->nsc_pi->pi_vmctx, command->prp1,
1432*5c4a5fe1SAndy Fiddaman 		    command->prp2, (uint8_t *)&sc->err_log + logoff,
1433*5c4a5fe1SAndy Fiddaman 		    MIN(logsize - logoff, sizeof(sc->err_log)),
1434*5c4a5fe1SAndy Fiddaman 		    NVME_COPY_TO_PRP);
1435*5c4a5fe1SAndy Fiddaman 		break;
1436*5c4a5fe1SAndy Fiddaman 	case NVME_LOG_HEALTH_INFORMATION:
1437*5c4a5fe1SAndy Fiddaman 		if (logoff >= sizeof(sc->health_log)) {
1438*5c4a5fe1SAndy Fiddaman 			pci_nvme_status_genc(&compl->status,
1439*5c4a5fe1SAndy Fiddaman 			    NVME_SC_INVALID_FIELD);
1440*5c4a5fe1SAndy Fiddaman 			break;
1441*5c4a5fe1SAndy Fiddaman 		}
1442*5c4a5fe1SAndy Fiddaman 
1443*5c4a5fe1SAndy Fiddaman 		pthread_mutex_lock(&sc->mtx);
1444*5c4a5fe1SAndy Fiddaman 		memcpy(&sc->health_log.data_units_read, &sc->read_data_units,
1445*5c4a5fe1SAndy Fiddaman 		    sizeof(sc->health_log.data_units_read));
1446*5c4a5fe1SAndy Fiddaman 		memcpy(&sc->health_log.data_units_written, &sc->write_data_units,
1447*5c4a5fe1SAndy Fiddaman 		    sizeof(sc->health_log.data_units_written));
1448*5c4a5fe1SAndy Fiddaman 		memcpy(&sc->health_log.host_read_commands, &sc->read_commands,
1449*5c4a5fe1SAndy Fiddaman 		    sizeof(sc->health_log.host_read_commands));
1450*5c4a5fe1SAndy Fiddaman 		memcpy(&sc->health_log.host_write_commands, &sc->write_commands,
1451*5c4a5fe1SAndy Fiddaman 		    sizeof(sc->health_log.host_write_commands));
1452*5c4a5fe1SAndy Fiddaman 		pthread_mutex_unlock(&sc->mtx);
1453*5c4a5fe1SAndy Fiddaman 
1454*5c4a5fe1SAndy Fiddaman 		nvme_prp_memcpy(sc->nsc_pi->pi_vmctx, command->prp1,
1455*5c4a5fe1SAndy Fiddaman 		    command->prp2, (uint8_t *)&sc->health_log + logoff,
1456*5c4a5fe1SAndy Fiddaman 		    MIN(logsize - logoff, sizeof(sc->health_log)),
1457*5c4a5fe1SAndy Fiddaman 		    NVME_COPY_TO_PRP);
1458*5c4a5fe1SAndy Fiddaman 		break;
1459*5c4a5fe1SAndy Fiddaman 	case NVME_LOG_FIRMWARE_SLOT:
1460*5c4a5fe1SAndy Fiddaman 		if (logoff >= sizeof(sc->fw_log)) {
1461*5c4a5fe1SAndy Fiddaman 			pci_nvme_status_genc(&compl->status,
1462*5c4a5fe1SAndy Fiddaman 			    NVME_SC_INVALID_FIELD);
1463*5c4a5fe1SAndy Fiddaman 			break;
1464*5c4a5fe1SAndy Fiddaman 		}
1465*5c4a5fe1SAndy Fiddaman 
1466*5c4a5fe1SAndy Fiddaman 		nvme_prp_memcpy(sc->nsc_pi->pi_vmctx, command->prp1,
1467*5c4a5fe1SAndy Fiddaman 		    command->prp2, (uint8_t *)&sc->fw_log + logoff,
1468*5c4a5fe1SAndy Fiddaman 		    MIN(logsize - logoff, sizeof(sc->fw_log)),
1469*5c4a5fe1SAndy Fiddaman 		    NVME_COPY_TO_PRP);
1470*5c4a5fe1SAndy Fiddaman 		break;
1471*5c4a5fe1SAndy Fiddaman 	case NVME_LOG_CHANGED_NAMESPACE:
1472*5c4a5fe1SAndy Fiddaman 		if (logoff >= sizeof(sc->ns_log)) {
1473*5c4a5fe1SAndy Fiddaman 			pci_nvme_status_genc(&compl->status,
1474*5c4a5fe1SAndy Fiddaman 			    NVME_SC_INVALID_FIELD);
1475*5c4a5fe1SAndy Fiddaman 			break;
1476*5c4a5fe1SAndy Fiddaman 		}
1477*5c4a5fe1SAndy Fiddaman 
1478*5c4a5fe1SAndy Fiddaman 		nvme_prp_memcpy(sc->nsc_pi->pi_vmctx, command->prp1,
1479*5c4a5fe1SAndy Fiddaman 		    command->prp2, (uint8_t *)&sc->ns_log + logoff,
1480*5c4a5fe1SAndy Fiddaman 		    MIN(logsize - logoff, sizeof(sc->ns_log)),
1481*5c4a5fe1SAndy Fiddaman 		    NVME_COPY_TO_PRP);
1482*5c4a5fe1SAndy Fiddaman 		memset(&sc->ns_log, 0, sizeof(sc->ns_log));
1483*5c4a5fe1SAndy Fiddaman 		break;
1484*5c4a5fe1SAndy Fiddaman 	default:
1485*5c4a5fe1SAndy Fiddaman 		DPRINTF("%s get log page %x command not supported",
1486*5c4a5fe1SAndy Fiddaman 		        __func__, logpage);
1487*5c4a5fe1SAndy Fiddaman 
1488*5c4a5fe1SAndy Fiddaman 		pci_nvme_status_tc(&compl->status, NVME_SCT_COMMAND_SPECIFIC,
1489*5c4a5fe1SAndy Fiddaman 		    NVME_SC_INVALID_LOG_PAGE);
1490*5c4a5fe1SAndy Fiddaman 	}
1491*5c4a5fe1SAndy Fiddaman 
1492*5c4a5fe1SAndy Fiddaman 	return (1);
1493*5c4a5fe1SAndy Fiddaman }
1494*5c4a5fe1SAndy Fiddaman 
1495*5c4a5fe1SAndy Fiddaman static int
nvme_opc_identify(struct pci_nvme_softc * sc,struct nvme_command * command,struct nvme_completion * compl)1496*5c4a5fe1SAndy Fiddaman nvme_opc_identify(struct pci_nvme_softc* sc, struct nvme_command* command,
1497*5c4a5fe1SAndy Fiddaman 	struct nvme_completion* compl)
1498*5c4a5fe1SAndy Fiddaman {
1499*5c4a5fe1SAndy Fiddaman 	void *dest;
1500*5c4a5fe1SAndy Fiddaman 	uint16_t status;
1501*5c4a5fe1SAndy Fiddaman 
1502*5c4a5fe1SAndy Fiddaman 	DPRINTF("%s identify 0x%x nsid 0x%x", __func__,
1503*5c4a5fe1SAndy Fiddaman 	        command->cdw10 & 0xFF, command->nsid);
1504*5c4a5fe1SAndy Fiddaman 
1505*5c4a5fe1SAndy Fiddaman 	status = 0;
1506*5c4a5fe1SAndy Fiddaman 	pci_nvme_status_genc(&status, NVME_SC_SUCCESS);
1507*5c4a5fe1SAndy Fiddaman 
1508*5c4a5fe1SAndy Fiddaman 	switch (command->cdw10 & 0xFF) {
1509*5c4a5fe1SAndy Fiddaman 	case 0x00: /* return Identify Namespace data structure */
1510*5c4a5fe1SAndy Fiddaman 		/* Global NS only valid with NS Management */
1511*5c4a5fe1SAndy Fiddaman 		if (command->nsid == NVME_GLOBAL_NAMESPACE_TAG) {
1512*5c4a5fe1SAndy Fiddaman 			pci_nvme_status_genc(&status,
1513*5c4a5fe1SAndy Fiddaman 			    NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1514*5c4a5fe1SAndy Fiddaman 			break;
1515*5c4a5fe1SAndy Fiddaman 		}
1516*5c4a5fe1SAndy Fiddaman 		nvme_prp_memcpy(sc->nsc_pi->pi_vmctx, command->prp1,
1517*5c4a5fe1SAndy Fiddaman 		    command->prp2, (uint8_t *)&sc->nsdata, sizeof(sc->nsdata),
1518*5c4a5fe1SAndy Fiddaman 		    NVME_COPY_TO_PRP);
1519*5c4a5fe1SAndy Fiddaman 		break;
1520*5c4a5fe1SAndy Fiddaman 	case 0x01: /* return Identify Controller data structure */
1521*5c4a5fe1SAndy Fiddaman 		nvme_prp_memcpy(sc->nsc_pi->pi_vmctx, command->prp1,
1522*5c4a5fe1SAndy Fiddaman 		    command->prp2, (uint8_t *)&sc->ctrldata,
1523*5c4a5fe1SAndy Fiddaman 		    sizeof(sc->ctrldata),
1524*5c4a5fe1SAndy Fiddaman 		    NVME_COPY_TO_PRP);
1525*5c4a5fe1SAndy Fiddaman 		break;
1526*5c4a5fe1SAndy Fiddaman 	case 0x02: /* list of 1024 active NSIDs > CDW1.NSID */
1527*5c4a5fe1SAndy Fiddaman 		dest = vm_map_gpa(sc->nsc_pi->pi_vmctx, command->prp1,
1528*5c4a5fe1SAndy Fiddaman 		                  sizeof(uint32_t) * 1024);
1529*5c4a5fe1SAndy Fiddaman 		/* All unused entries shall be zero */
1530*5c4a5fe1SAndy Fiddaman 		memset(dest, 0, sizeof(uint32_t) * 1024);
1531*5c4a5fe1SAndy Fiddaman 		((uint32_t *)dest)[0] = 1;
1532*5c4a5fe1SAndy Fiddaman 		break;
1533*5c4a5fe1SAndy Fiddaman 	case 0x03: /* list of NSID structures in CDW1.NSID, 4096 bytes */
1534*5c4a5fe1SAndy Fiddaman 		if (command->nsid != 1) {
1535*5c4a5fe1SAndy Fiddaman 			pci_nvme_status_genc(&status,
1536*5c4a5fe1SAndy Fiddaman 			    NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1537*5c4a5fe1SAndy Fiddaman 			break;
1538*5c4a5fe1SAndy Fiddaman 		}
1539*5c4a5fe1SAndy Fiddaman 		dest = vm_map_gpa(sc->nsc_pi->pi_vmctx, command->prp1,
1540*5c4a5fe1SAndy Fiddaman 		                  sizeof(uint32_t) * 1024);
1541*5c4a5fe1SAndy Fiddaman 		/* All bytes after the descriptor shall be zero */
1542*5c4a5fe1SAndy Fiddaman 		memset(dest, 0, sizeof(uint32_t) * 1024);
1543*5c4a5fe1SAndy Fiddaman 
1544*5c4a5fe1SAndy Fiddaman 		/* Return NIDT=1 (i.e. EUI64) descriptor */
1545*5c4a5fe1SAndy Fiddaman 		((uint8_t *)dest)[0] = 1;
1546*5c4a5fe1SAndy Fiddaman 		((uint8_t *)dest)[1] = sizeof(uint64_t);
1547*5c4a5fe1SAndy Fiddaman 		memcpy(((uint8_t *)dest) + 4, sc->nsdata.eui64, sizeof(uint64_t));
1548*5c4a5fe1SAndy Fiddaman 		break;
1549*5c4a5fe1SAndy Fiddaman 	case 0x13:
1550*5c4a5fe1SAndy Fiddaman 		/*
1551*5c4a5fe1SAndy Fiddaman 		 * Controller list is optional but used by UNH tests. Return
1552*5c4a5fe1SAndy Fiddaman 		 * a valid but empty list.
1553*5c4a5fe1SAndy Fiddaman 		 */
1554*5c4a5fe1SAndy Fiddaman 		dest = vm_map_gpa(sc->nsc_pi->pi_vmctx, command->prp1,
1555*5c4a5fe1SAndy Fiddaman 		                  sizeof(uint16_t) * 2048);
1556*5c4a5fe1SAndy Fiddaman 		memset(dest, 0, sizeof(uint16_t) * 2048);
1557*5c4a5fe1SAndy Fiddaman 		break;
1558*5c4a5fe1SAndy Fiddaman 	default:
1559*5c4a5fe1SAndy Fiddaman 		DPRINTF("%s unsupported identify command requested 0x%x",
1560*5c4a5fe1SAndy Fiddaman 		         __func__, command->cdw10 & 0xFF);
1561*5c4a5fe1SAndy Fiddaman 		pci_nvme_status_genc(&status, NVME_SC_INVALID_FIELD);
1562*5c4a5fe1SAndy Fiddaman 		break;
1563*5c4a5fe1SAndy Fiddaman 	}
1564*5c4a5fe1SAndy Fiddaman 
1565*5c4a5fe1SAndy Fiddaman 	compl->status = status;
1566*5c4a5fe1SAndy Fiddaman 	return (1);
1567*5c4a5fe1SAndy Fiddaman }
1568*5c4a5fe1SAndy Fiddaman 
1569*5c4a5fe1SAndy Fiddaman static const char *
nvme_fid_to_name(uint8_t fid)1570*5c4a5fe1SAndy Fiddaman nvme_fid_to_name(uint8_t fid)
1571*5c4a5fe1SAndy Fiddaman {
1572*5c4a5fe1SAndy Fiddaman 	const char *name;
1573*5c4a5fe1SAndy Fiddaman 
1574*5c4a5fe1SAndy Fiddaman 	switch (fid) {
1575*5c4a5fe1SAndy Fiddaman 	case NVME_FEAT_ARBITRATION:
1576*5c4a5fe1SAndy Fiddaman 		name = "Arbitration";
1577*5c4a5fe1SAndy Fiddaman 		break;
1578*5c4a5fe1SAndy Fiddaman 	case NVME_FEAT_POWER_MANAGEMENT:
1579*5c4a5fe1SAndy Fiddaman 		name = "Power Management";
1580*5c4a5fe1SAndy Fiddaman 		break;
1581*5c4a5fe1SAndy Fiddaman 	case NVME_FEAT_LBA_RANGE_TYPE:
1582*5c4a5fe1SAndy Fiddaman 		name = "LBA Range Type";
1583*5c4a5fe1SAndy Fiddaman 		break;
1584*5c4a5fe1SAndy Fiddaman 	case NVME_FEAT_TEMPERATURE_THRESHOLD:
1585*5c4a5fe1SAndy Fiddaman 		name = "Temperature Threshold";
1586*5c4a5fe1SAndy Fiddaman 		break;
1587*5c4a5fe1SAndy Fiddaman 	case NVME_FEAT_ERROR_RECOVERY:
1588*5c4a5fe1SAndy Fiddaman 		name = "Error Recovery";
1589*5c4a5fe1SAndy Fiddaman 		break;
1590*5c4a5fe1SAndy Fiddaman 	case NVME_FEAT_VOLATILE_WRITE_CACHE:
1591*5c4a5fe1SAndy Fiddaman 		name = "Volatile Write Cache";
1592*5c4a5fe1SAndy Fiddaman 		break;
1593*5c4a5fe1SAndy Fiddaman 	case NVME_FEAT_NUMBER_OF_QUEUES:
1594*5c4a5fe1SAndy Fiddaman 		name = "Number of Queues";
1595*5c4a5fe1SAndy Fiddaman 		break;
1596*5c4a5fe1SAndy Fiddaman 	case NVME_FEAT_INTERRUPT_COALESCING:
1597*5c4a5fe1SAndy Fiddaman 		name = "Interrupt Coalescing";
1598*5c4a5fe1SAndy Fiddaman 		break;
1599*5c4a5fe1SAndy Fiddaman 	case NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION:
1600*5c4a5fe1SAndy Fiddaman 		name = "Interrupt Vector Configuration";
1601*5c4a5fe1SAndy Fiddaman 		break;
1602*5c4a5fe1SAndy Fiddaman 	case NVME_FEAT_WRITE_ATOMICITY:
1603*5c4a5fe1SAndy Fiddaman 		name = "Write Atomicity Normal";
1604*5c4a5fe1SAndy Fiddaman 		break;
1605*5c4a5fe1SAndy Fiddaman 	case NVME_FEAT_ASYNC_EVENT_CONFIGURATION:
1606*5c4a5fe1SAndy Fiddaman 		name = "Asynchronous Event Configuration";
1607*5c4a5fe1SAndy Fiddaman 		break;
1608*5c4a5fe1SAndy Fiddaman 	case NVME_FEAT_AUTONOMOUS_POWER_STATE_TRANSITION:
1609*5c4a5fe1SAndy Fiddaman 		name = "Autonomous Power State Transition";
1610*5c4a5fe1SAndy Fiddaman 		break;
1611*5c4a5fe1SAndy Fiddaman 	case NVME_FEAT_HOST_MEMORY_BUFFER:
1612*5c4a5fe1SAndy Fiddaman 		name = "Host Memory Buffer";
1613*5c4a5fe1SAndy Fiddaman 		break;
1614*5c4a5fe1SAndy Fiddaman 	case NVME_FEAT_TIMESTAMP:
1615*5c4a5fe1SAndy Fiddaman 		name = "Timestamp";
1616*5c4a5fe1SAndy Fiddaman 		break;
1617*5c4a5fe1SAndy Fiddaman 	case NVME_FEAT_KEEP_ALIVE_TIMER:
1618*5c4a5fe1SAndy Fiddaman 		name = "Keep Alive Timer";
1619*5c4a5fe1SAndy Fiddaman 		break;
1620*5c4a5fe1SAndy Fiddaman 	case NVME_FEAT_HOST_CONTROLLED_THERMAL_MGMT:
1621*5c4a5fe1SAndy Fiddaman 		name = "Host Controlled Thermal Management";
1622*5c4a5fe1SAndy Fiddaman 		break;
1623*5c4a5fe1SAndy Fiddaman 	case NVME_FEAT_NON_OP_POWER_STATE_CONFIG:
1624*5c4a5fe1SAndy Fiddaman 		name = "Non-Operation Power State Config";
1625*5c4a5fe1SAndy Fiddaman 		break;
1626*5c4a5fe1SAndy Fiddaman 	case NVME_FEAT_READ_RECOVERY_LEVEL_CONFIG:
1627*5c4a5fe1SAndy Fiddaman 		name = "Read Recovery Level Config";
1628*5c4a5fe1SAndy Fiddaman 		break;
1629*5c4a5fe1SAndy Fiddaman 	case NVME_FEAT_PREDICTABLE_LATENCY_MODE_CONFIG:
1630*5c4a5fe1SAndy Fiddaman 		name = "Predictable Latency Mode Config";
1631*5c4a5fe1SAndy Fiddaman 		break;
1632*5c4a5fe1SAndy Fiddaman 	case NVME_FEAT_PREDICTABLE_LATENCY_MODE_WINDOW:
1633*5c4a5fe1SAndy Fiddaman 		name = "Predictable Latency Mode Window";
1634*5c4a5fe1SAndy Fiddaman 		break;
1635*5c4a5fe1SAndy Fiddaman 	case NVME_FEAT_LBA_STATUS_INFORMATION_ATTRIBUTES:
1636*5c4a5fe1SAndy Fiddaman 		name = "LBA Status Information Report Interval";
1637*5c4a5fe1SAndy Fiddaman 		break;
1638*5c4a5fe1SAndy Fiddaman 	case NVME_FEAT_HOST_BEHAVIOR_SUPPORT:
1639*5c4a5fe1SAndy Fiddaman 		name = "Host Behavior Support";
1640*5c4a5fe1SAndy Fiddaman 		break;
1641*5c4a5fe1SAndy Fiddaman 	case NVME_FEAT_SANITIZE_CONFIG:
1642*5c4a5fe1SAndy Fiddaman 		name = "Sanitize Config";
1643*5c4a5fe1SAndy Fiddaman 		break;
1644*5c4a5fe1SAndy Fiddaman 	case NVME_FEAT_ENDURANCE_GROUP_EVENT_CONFIGURATION:
1645*5c4a5fe1SAndy Fiddaman 		name = "Endurance Group Event Configuration";
1646*5c4a5fe1SAndy Fiddaman 		break;
1647*5c4a5fe1SAndy Fiddaman 	case NVME_FEAT_SOFTWARE_PROGRESS_MARKER:
1648*5c4a5fe1SAndy Fiddaman 		name = "Software Progress Marker";
1649*5c4a5fe1SAndy Fiddaman 		break;
1650*5c4a5fe1SAndy Fiddaman 	case NVME_FEAT_HOST_IDENTIFIER:
1651*5c4a5fe1SAndy Fiddaman 		name = "Host Identifier";
1652*5c4a5fe1SAndy Fiddaman 		break;
1653*5c4a5fe1SAndy Fiddaman 	case NVME_FEAT_RESERVATION_NOTIFICATION_MASK:
1654*5c4a5fe1SAndy Fiddaman 		name = "Reservation Notification Mask";
1655*5c4a5fe1SAndy Fiddaman 		break;
1656*5c4a5fe1SAndy Fiddaman 	case NVME_FEAT_RESERVATION_PERSISTENCE:
1657*5c4a5fe1SAndy Fiddaman 		name = "Reservation Persistence";
1658*5c4a5fe1SAndy Fiddaman 		break;
1659*5c4a5fe1SAndy Fiddaman 	case NVME_FEAT_NAMESPACE_WRITE_PROTECTION_CONFIG:
1660*5c4a5fe1SAndy Fiddaman 		name = "Namespace Write Protection Config";
1661*5c4a5fe1SAndy Fiddaman 		break;
1662*5c4a5fe1SAndy Fiddaman 	default:
1663*5c4a5fe1SAndy Fiddaman 		name = "Unknown";
1664*5c4a5fe1SAndy Fiddaman 		break;
1665*5c4a5fe1SAndy Fiddaman 	}
1666*5c4a5fe1SAndy Fiddaman 
1667*5c4a5fe1SAndy Fiddaman 	return (name);
1668*5c4a5fe1SAndy Fiddaman }
1669*5c4a5fe1SAndy Fiddaman 
1670*5c4a5fe1SAndy Fiddaman static void
nvme_feature_invalid_cb(struct pci_nvme_softc * sc __unused,struct nvme_feature_obj * feat __unused,struct nvme_command * command __unused,struct nvme_completion * compl)1671*5c4a5fe1SAndy Fiddaman nvme_feature_invalid_cb(struct pci_nvme_softc *sc __unused,
1672*5c4a5fe1SAndy Fiddaman     struct nvme_feature_obj *feat __unused,
1673*5c4a5fe1SAndy Fiddaman     struct nvme_command *command __unused,
1674*5c4a5fe1SAndy Fiddaman     struct nvme_completion *compl)
1675*5c4a5fe1SAndy Fiddaman {
1676*5c4a5fe1SAndy Fiddaman 	pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1677*5c4a5fe1SAndy Fiddaman }
1678*5c4a5fe1SAndy Fiddaman 
1679*5c4a5fe1SAndy Fiddaman static void
nvme_feature_iv_config(struct pci_nvme_softc * sc,struct nvme_feature_obj * feat __unused,struct nvme_command * command,struct nvme_completion * compl)1680*5c4a5fe1SAndy Fiddaman nvme_feature_iv_config(struct pci_nvme_softc *sc,
1681*5c4a5fe1SAndy Fiddaman     struct nvme_feature_obj *feat __unused,
1682*5c4a5fe1SAndy Fiddaman     struct nvme_command *command,
1683*5c4a5fe1SAndy Fiddaman     struct nvme_completion *compl)
1684*5c4a5fe1SAndy Fiddaman {
1685*5c4a5fe1SAndy Fiddaman 	uint32_t i;
1686*5c4a5fe1SAndy Fiddaman 	uint32_t cdw11 = command->cdw11;
1687*5c4a5fe1SAndy Fiddaman 	uint16_t iv;
1688*5c4a5fe1SAndy Fiddaman 	bool cd;
1689*5c4a5fe1SAndy Fiddaman 
1690*5c4a5fe1SAndy Fiddaman 	pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1691*5c4a5fe1SAndy Fiddaman 
1692*5c4a5fe1SAndy Fiddaman 	iv = cdw11 & 0xffff;
1693*5c4a5fe1SAndy Fiddaman 	cd = cdw11 & (1 << 16);
1694*5c4a5fe1SAndy Fiddaman 
1695*5c4a5fe1SAndy Fiddaman 	if (iv > (sc->max_queues + 1)) {
1696*5c4a5fe1SAndy Fiddaman 		return;
1697*5c4a5fe1SAndy Fiddaman 	}
1698*5c4a5fe1SAndy Fiddaman 
1699*5c4a5fe1SAndy Fiddaman 	/* No Interrupt Coalescing (i.e. not Coalescing Disable) for Admin Q */
1700*5c4a5fe1SAndy Fiddaman 	if ((iv == 0) && !cd)
1701*5c4a5fe1SAndy Fiddaman 		return;
1702*5c4a5fe1SAndy Fiddaman 
1703*5c4a5fe1SAndy Fiddaman 	/* Requested Interrupt Vector must be used by a CQ */
1704*5c4a5fe1SAndy Fiddaman 	for (i = 0; i < sc->num_cqueues + 1; i++) {
1705*5c4a5fe1SAndy Fiddaman 		if (sc->compl_queues[i].intr_vec == iv) {
1706*5c4a5fe1SAndy Fiddaman 			pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS);
1707*5c4a5fe1SAndy Fiddaman 		}
1708*5c4a5fe1SAndy Fiddaman 	}
1709*5c4a5fe1SAndy Fiddaman }
1710*5c4a5fe1SAndy Fiddaman 
1711*5c4a5fe1SAndy Fiddaman #define NVME_ASYNC_EVENT_ENDURANCE_GROUP		(0x4000)
1712*5c4a5fe1SAndy Fiddaman static void
nvme_feature_async_event(struct pci_nvme_softc * sc __unused,struct nvme_feature_obj * feat __unused,struct nvme_command * command,struct nvme_completion * compl)1713*5c4a5fe1SAndy Fiddaman nvme_feature_async_event(struct pci_nvme_softc *sc __unused,
1714*5c4a5fe1SAndy Fiddaman     struct nvme_feature_obj *feat __unused,
1715*5c4a5fe1SAndy Fiddaman     struct nvme_command *command,
1716*5c4a5fe1SAndy Fiddaman     struct nvme_completion *compl)
1717*5c4a5fe1SAndy Fiddaman {
1718*5c4a5fe1SAndy Fiddaman 	if (command->cdw11 & NVME_ASYNC_EVENT_ENDURANCE_GROUP)
1719*5c4a5fe1SAndy Fiddaman 		pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1720*5c4a5fe1SAndy Fiddaman }
1721*5c4a5fe1SAndy Fiddaman 
1722*5c4a5fe1SAndy Fiddaman #define NVME_TEMP_THRESH_OVER	0
1723*5c4a5fe1SAndy Fiddaman #define NVME_TEMP_THRESH_UNDER	1
1724*5c4a5fe1SAndy Fiddaman static void
nvme_feature_temperature(struct pci_nvme_softc * sc,struct nvme_feature_obj * feat __unused,struct nvme_command * command,struct nvme_completion * compl)1725*5c4a5fe1SAndy Fiddaman nvme_feature_temperature(struct pci_nvme_softc *sc,
1726*5c4a5fe1SAndy Fiddaman     struct nvme_feature_obj *feat __unused,
1727*5c4a5fe1SAndy Fiddaman     struct nvme_command *command,
1728*5c4a5fe1SAndy Fiddaman     struct nvme_completion *compl)
1729*5c4a5fe1SAndy Fiddaman {
1730*5c4a5fe1SAndy Fiddaman 	uint16_t	tmpth;	/* Temperature Threshold */
1731*5c4a5fe1SAndy Fiddaman 	uint8_t		tmpsel; /* Threshold Temperature Select */
1732*5c4a5fe1SAndy Fiddaman 	uint8_t		thsel;  /* Threshold Type Select */
1733*5c4a5fe1SAndy Fiddaman 	bool		set_crit = false;
1734*5c4a5fe1SAndy Fiddaman 	bool		report_crit;
1735*5c4a5fe1SAndy Fiddaman 
1736*5c4a5fe1SAndy Fiddaman 	tmpth  = command->cdw11 & 0xffff;
1737*5c4a5fe1SAndy Fiddaman 	tmpsel = (command->cdw11 >> 16) & 0xf;
1738*5c4a5fe1SAndy Fiddaman 	thsel  = (command->cdw11 >> 20) & 0x3;
1739*5c4a5fe1SAndy Fiddaman 
1740*5c4a5fe1SAndy Fiddaman 	DPRINTF("%s: tmpth=%#x tmpsel=%#x thsel=%#x", __func__, tmpth, tmpsel, thsel);
1741*5c4a5fe1SAndy Fiddaman 
1742*5c4a5fe1SAndy Fiddaman 	/* Check for unsupported values */
1743*5c4a5fe1SAndy Fiddaman 	if (((tmpsel != 0) && (tmpsel != 0xf)) ||
1744*5c4a5fe1SAndy Fiddaman 	    (thsel > NVME_TEMP_THRESH_UNDER)) {
1745*5c4a5fe1SAndy Fiddaman 		pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1746*5c4a5fe1SAndy Fiddaman 		return;
1747*5c4a5fe1SAndy Fiddaman 	}
1748*5c4a5fe1SAndy Fiddaman 
1749*5c4a5fe1SAndy Fiddaman 	if (((thsel == NVME_TEMP_THRESH_OVER)  && (NVME_TEMPERATURE >= tmpth)) ||
1750*5c4a5fe1SAndy Fiddaman 	    ((thsel == NVME_TEMP_THRESH_UNDER) && (NVME_TEMPERATURE <= tmpth)))
1751*5c4a5fe1SAndy Fiddaman 		set_crit = true;
1752*5c4a5fe1SAndy Fiddaman 
1753*5c4a5fe1SAndy Fiddaman 	pthread_mutex_lock(&sc->mtx);
1754*5c4a5fe1SAndy Fiddaman 	if (set_crit)
1755*5c4a5fe1SAndy Fiddaman 		sc->health_log.critical_warning |=
1756*5c4a5fe1SAndy Fiddaman 		    NVME_CRIT_WARN_ST_TEMPERATURE;
1757*5c4a5fe1SAndy Fiddaman 	else
1758*5c4a5fe1SAndy Fiddaman 		sc->health_log.critical_warning &=
1759*5c4a5fe1SAndy Fiddaman 		    ~NVME_CRIT_WARN_ST_TEMPERATURE;
1760*5c4a5fe1SAndy Fiddaman 	pthread_mutex_unlock(&sc->mtx);
1761*5c4a5fe1SAndy Fiddaman 
1762*5c4a5fe1SAndy Fiddaman 	report_crit = sc->feat[NVME_FEAT_ASYNC_EVENT_CONFIGURATION].cdw11 &
1763*5c4a5fe1SAndy Fiddaman 	    NVME_CRIT_WARN_ST_TEMPERATURE;
1764*5c4a5fe1SAndy Fiddaman 
1765*5c4a5fe1SAndy Fiddaman 	if (set_crit && report_crit)
1766*5c4a5fe1SAndy Fiddaman 		pci_nvme_aen_post(sc, PCI_NVME_AE_TYPE_SMART,
1767*5c4a5fe1SAndy Fiddaman 		    sc->health_log.critical_warning);
1768*5c4a5fe1SAndy Fiddaman 
1769*5c4a5fe1SAndy Fiddaman 	DPRINTF("%s: set_crit=%c critical_warning=%#x status=%#x", __func__, set_crit ? 'T':'F', sc->health_log.critical_warning, compl->status);
1770*5c4a5fe1SAndy Fiddaman }
1771*5c4a5fe1SAndy Fiddaman 
1772*5c4a5fe1SAndy Fiddaman static void
nvme_feature_num_queues(struct pci_nvme_softc * sc,struct nvme_feature_obj * feat __unused,struct nvme_command * command,struct nvme_completion * compl)1773*5c4a5fe1SAndy Fiddaman nvme_feature_num_queues(struct pci_nvme_softc *sc,
1774*5c4a5fe1SAndy Fiddaman     struct nvme_feature_obj *feat __unused,
1775*5c4a5fe1SAndy Fiddaman     struct nvme_command *command,
1776*5c4a5fe1SAndy Fiddaman     struct nvme_completion *compl)
1777*5c4a5fe1SAndy Fiddaman {
1778*5c4a5fe1SAndy Fiddaman 	uint16_t nqr;	/* Number of Queues Requested */
1779*5c4a5fe1SAndy Fiddaman 
1780*5c4a5fe1SAndy Fiddaman 	if (sc->num_q_is_set) {
1781*5c4a5fe1SAndy Fiddaman 		WPRINTF("%s: Number of Queues already set", __func__);
1782*5c4a5fe1SAndy Fiddaman 		pci_nvme_status_genc(&compl->status,
1783*5c4a5fe1SAndy Fiddaman 		    NVME_SC_COMMAND_SEQUENCE_ERROR);
1784*5c4a5fe1SAndy Fiddaman 		return;
1785*5c4a5fe1SAndy Fiddaman 	}
1786*5c4a5fe1SAndy Fiddaman 
1787*5c4a5fe1SAndy Fiddaman 	nqr = command->cdw11 & 0xFFFF;
1788*5c4a5fe1SAndy Fiddaman 	if (nqr == 0xffff) {
1789*5c4a5fe1SAndy Fiddaman 		WPRINTF("%s: Illegal NSQR value %#x", __func__, nqr);
1790*5c4a5fe1SAndy Fiddaman 		pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1791*5c4a5fe1SAndy Fiddaman 		return;
1792*5c4a5fe1SAndy Fiddaman 	}
1793*5c4a5fe1SAndy Fiddaman 
1794*5c4a5fe1SAndy Fiddaman 	sc->num_squeues = ONE_BASED(nqr);
1795*5c4a5fe1SAndy Fiddaman 	if (sc->num_squeues > sc->max_queues) {
1796*5c4a5fe1SAndy Fiddaman 		DPRINTF("NSQR=%u is greater than max %u", sc->num_squeues,
1797*5c4a5fe1SAndy Fiddaman 					sc->max_queues);
1798*5c4a5fe1SAndy Fiddaman 		sc->num_squeues = sc->max_queues;
1799*5c4a5fe1SAndy Fiddaman 	}
1800*5c4a5fe1SAndy Fiddaman 
1801*5c4a5fe1SAndy Fiddaman 	nqr = (command->cdw11 >> 16) & 0xFFFF;
1802*5c4a5fe1SAndy Fiddaman 	if (nqr == 0xffff) {
1803*5c4a5fe1SAndy Fiddaman 		WPRINTF("%s: Illegal NCQR value %#x", __func__, nqr);
1804*5c4a5fe1SAndy Fiddaman 		pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1805*5c4a5fe1SAndy Fiddaman 		return;
1806*5c4a5fe1SAndy Fiddaman 	}
1807*5c4a5fe1SAndy Fiddaman 
1808*5c4a5fe1SAndy Fiddaman 	sc->num_cqueues = ONE_BASED(nqr);
1809*5c4a5fe1SAndy Fiddaman 	if (sc->num_cqueues > sc->max_queues) {
1810*5c4a5fe1SAndy Fiddaman 		DPRINTF("NCQR=%u is greater than max %u", sc->num_cqueues,
1811*5c4a5fe1SAndy Fiddaman 					sc->max_queues);
1812*5c4a5fe1SAndy Fiddaman 		sc->num_cqueues = sc->max_queues;
1813*5c4a5fe1SAndy Fiddaman 	}
1814*5c4a5fe1SAndy Fiddaman 
1815*5c4a5fe1SAndy Fiddaman 	/* Patch the command value which will be saved on callback's return */
1816*5c4a5fe1SAndy Fiddaman 	command->cdw11 = NVME_FEATURE_NUM_QUEUES(sc);
1817*5c4a5fe1SAndy Fiddaman 	compl->cdw0 = NVME_FEATURE_NUM_QUEUES(sc);
1818*5c4a5fe1SAndy Fiddaman 
1819*5c4a5fe1SAndy Fiddaman 	sc->num_q_is_set = true;
1820*5c4a5fe1SAndy Fiddaman }
1821*5c4a5fe1SAndy Fiddaman 
1822*5c4a5fe1SAndy Fiddaman static int
nvme_opc_set_features(struct pci_nvme_softc * sc,struct nvme_command * command,struct nvme_completion * compl)1823*5c4a5fe1SAndy Fiddaman nvme_opc_set_features(struct pci_nvme_softc *sc, struct nvme_command *command,
1824*5c4a5fe1SAndy Fiddaman 	struct nvme_completion *compl)
1825*5c4a5fe1SAndy Fiddaman {
1826*5c4a5fe1SAndy Fiddaman 	struct nvme_feature_obj *feat;
1827*5c4a5fe1SAndy Fiddaman 	uint32_t nsid = command->nsid;
1828*5c4a5fe1SAndy Fiddaman 	uint8_t fid = NVMEV(NVME_FEAT_SET_FID, command->cdw10);
1829*5c4a5fe1SAndy Fiddaman 	bool sv = NVMEV(NVME_FEAT_SET_SV, command->cdw10);
1830*5c4a5fe1SAndy Fiddaman 
1831*5c4a5fe1SAndy Fiddaman 	DPRINTF("%s: Feature ID 0x%x (%s)", __func__, fid, nvme_fid_to_name(fid));
1832*5c4a5fe1SAndy Fiddaman 
1833*5c4a5fe1SAndy Fiddaman 	if (fid >= NVME_FID_MAX) {
1834*5c4a5fe1SAndy Fiddaman 		DPRINTF("%s invalid feature 0x%x", __func__, fid);
1835*5c4a5fe1SAndy Fiddaman 		pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1836*5c4a5fe1SAndy Fiddaman 		return (1);
1837*5c4a5fe1SAndy Fiddaman 	}
1838*5c4a5fe1SAndy Fiddaman 
1839*5c4a5fe1SAndy Fiddaman 	if (sv) {
1840*5c4a5fe1SAndy Fiddaman 		pci_nvme_status_tc(&compl->status, NVME_SCT_COMMAND_SPECIFIC,
1841*5c4a5fe1SAndy Fiddaman 		    NVME_SC_FEATURE_NOT_SAVEABLE);
1842*5c4a5fe1SAndy Fiddaman 		return (1);
1843*5c4a5fe1SAndy Fiddaman 	}
1844*5c4a5fe1SAndy Fiddaman 
1845*5c4a5fe1SAndy Fiddaman 	feat = &sc->feat[fid];
1846*5c4a5fe1SAndy Fiddaman 
1847*5c4a5fe1SAndy Fiddaman 	if (feat->namespace_specific && (nsid == NVME_GLOBAL_NAMESPACE_TAG)) {
1848*5c4a5fe1SAndy Fiddaman 		pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1849*5c4a5fe1SAndy Fiddaman 		return (1);
1850*5c4a5fe1SAndy Fiddaman 	}
1851*5c4a5fe1SAndy Fiddaman 
1852*5c4a5fe1SAndy Fiddaman 	if (!feat->namespace_specific &&
1853*5c4a5fe1SAndy Fiddaman 	    !((nsid == 0) || (nsid == NVME_GLOBAL_NAMESPACE_TAG))) {
1854*5c4a5fe1SAndy Fiddaman 		pci_nvme_status_tc(&compl->status, NVME_SCT_COMMAND_SPECIFIC,
1855*5c4a5fe1SAndy Fiddaman 		    NVME_SC_FEATURE_NOT_NS_SPECIFIC);
1856*5c4a5fe1SAndy Fiddaman 		return (1);
1857*5c4a5fe1SAndy Fiddaman 	}
1858*5c4a5fe1SAndy Fiddaman 
1859*5c4a5fe1SAndy Fiddaman 	compl->cdw0 = 0;
1860*5c4a5fe1SAndy Fiddaman 	pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS);
1861*5c4a5fe1SAndy Fiddaman 
1862*5c4a5fe1SAndy Fiddaman 	if (feat->set)
1863*5c4a5fe1SAndy Fiddaman 		feat->set(sc, feat, command, compl);
1864*5c4a5fe1SAndy Fiddaman 	else {
1865*5c4a5fe1SAndy Fiddaman 		pci_nvme_status_tc(&compl->status, NVME_SCT_COMMAND_SPECIFIC,
1866*5c4a5fe1SAndy Fiddaman 		    NVME_SC_FEATURE_NOT_CHANGEABLE);
1867*5c4a5fe1SAndy Fiddaman 		return (1);
1868*5c4a5fe1SAndy Fiddaman 	}
1869*5c4a5fe1SAndy Fiddaman 
1870*5c4a5fe1SAndy Fiddaman 	DPRINTF("%s: status=%#x cdw11=%#x", __func__, compl->status, command->cdw11);
1871*5c4a5fe1SAndy Fiddaman 	if (compl->status == NVME_SC_SUCCESS) {
1872*5c4a5fe1SAndy Fiddaman 		feat->cdw11 = command->cdw11;
1873*5c4a5fe1SAndy Fiddaman 		if ((fid == NVME_FEAT_ASYNC_EVENT_CONFIGURATION) &&
1874*5c4a5fe1SAndy Fiddaman 		    (command->cdw11 != 0))
1875*5c4a5fe1SAndy Fiddaman 			pci_nvme_aen_notify(sc);
1876*5c4a5fe1SAndy Fiddaman 	}
1877*5c4a5fe1SAndy Fiddaman 
1878*5c4a5fe1SAndy Fiddaman 	return (0);
1879*5c4a5fe1SAndy Fiddaman }
1880*5c4a5fe1SAndy Fiddaman 
1881*5c4a5fe1SAndy Fiddaman #define NVME_FEATURES_SEL_SUPPORTED	0x3
1882*5c4a5fe1SAndy Fiddaman #define NVME_FEATURES_NS_SPECIFIC	(1 << 1)
1883*5c4a5fe1SAndy Fiddaman 
1884*5c4a5fe1SAndy Fiddaman static int
nvme_opc_get_features(struct pci_nvme_softc * sc,struct nvme_command * command,struct nvme_completion * compl)1885*5c4a5fe1SAndy Fiddaman nvme_opc_get_features(struct pci_nvme_softc* sc, struct nvme_command* command,
1886*5c4a5fe1SAndy Fiddaman 	struct nvme_completion* compl)
1887*5c4a5fe1SAndy Fiddaman {
1888*5c4a5fe1SAndy Fiddaman 	struct nvme_feature_obj *feat;
1889*5c4a5fe1SAndy Fiddaman 	uint8_t fid = command->cdw10 & 0xFF;
1890*5c4a5fe1SAndy Fiddaman 	uint8_t sel = (command->cdw10 >> 8) & 0x7;
1891*5c4a5fe1SAndy Fiddaman 
1892*5c4a5fe1SAndy Fiddaman 	DPRINTF("%s: Feature ID 0x%x (%s)", __func__, fid, nvme_fid_to_name(fid));
1893*5c4a5fe1SAndy Fiddaman 
1894*5c4a5fe1SAndy Fiddaman 	if (fid >= NVME_FID_MAX) {
1895*5c4a5fe1SAndy Fiddaman 		DPRINTF("%s invalid feature 0x%x", __func__, fid);
1896*5c4a5fe1SAndy Fiddaman 		pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1897*5c4a5fe1SAndy Fiddaman 		return (1);
1898*5c4a5fe1SAndy Fiddaman 	}
1899*5c4a5fe1SAndy Fiddaman 
1900*5c4a5fe1SAndy Fiddaman 	compl->cdw0 = 0;
1901*5c4a5fe1SAndy Fiddaman 	pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS);
1902*5c4a5fe1SAndy Fiddaman 
1903*5c4a5fe1SAndy Fiddaman 	feat = &sc->feat[fid];
1904*5c4a5fe1SAndy Fiddaman 	if (feat->get) {
1905*5c4a5fe1SAndy Fiddaman 		feat->get(sc, feat, command, compl);
1906*5c4a5fe1SAndy Fiddaman 	}
1907*5c4a5fe1SAndy Fiddaman 
1908*5c4a5fe1SAndy Fiddaman 	if (compl->status == NVME_SC_SUCCESS) {
1909*5c4a5fe1SAndy Fiddaman 		if ((sel == NVME_FEATURES_SEL_SUPPORTED) && feat->namespace_specific)
1910*5c4a5fe1SAndy Fiddaman 			compl->cdw0 = NVME_FEATURES_NS_SPECIFIC;
1911*5c4a5fe1SAndy Fiddaman 		else
1912*5c4a5fe1SAndy Fiddaman 			compl->cdw0 = feat->cdw11;
1913*5c4a5fe1SAndy Fiddaman 	}
1914*5c4a5fe1SAndy Fiddaman 
1915*5c4a5fe1SAndy Fiddaman 	return (0);
1916*5c4a5fe1SAndy Fiddaman }
1917*5c4a5fe1SAndy Fiddaman 
1918*5c4a5fe1SAndy Fiddaman static int
nvme_opc_format_nvm(struct pci_nvme_softc * sc,struct nvme_command * command,struct nvme_completion * compl)1919*5c4a5fe1SAndy Fiddaman nvme_opc_format_nvm(struct pci_nvme_softc* sc, struct nvme_command* command,
1920*5c4a5fe1SAndy Fiddaman 	struct nvme_completion* compl)
1921*5c4a5fe1SAndy Fiddaman {
1922*5c4a5fe1SAndy Fiddaman 	uint8_t	ses, lbaf, pi;
1923*5c4a5fe1SAndy Fiddaman 
1924*5c4a5fe1SAndy Fiddaman 	/* Only supports Secure Erase Setting - User Data Erase */
1925*5c4a5fe1SAndy Fiddaman 	ses = (command->cdw10 >> 9) & 0x7;
1926*5c4a5fe1SAndy Fiddaman 	if (ses > 0x1) {
1927*5c4a5fe1SAndy Fiddaman 		pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1928*5c4a5fe1SAndy Fiddaman 		return (1);
1929*5c4a5fe1SAndy Fiddaman 	}
1930*5c4a5fe1SAndy Fiddaman 
1931*5c4a5fe1SAndy Fiddaman 	/* Only supports a single LBA Format */
1932*5c4a5fe1SAndy Fiddaman 	lbaf = command->cdw10 & 0xf;
1933*5c4a5fe1SAndy Fiddaman 	if (lbaf != 0) {
1934*5c4a5fe1SAndy Fiddaman 		pci_nvme_status_tc(&compl->status, NVME_SCT_COMMAND_SPECIFIC,
1935*5c4a5fe1SAndy Fiddaman 		    NVME_SC_INVALID_FORMAT);
1936*5c4a5fe1SAndy Fiddaman 		return (1);
1937*5c4a5fe1SAndy Fiddaman 	}
1938*5c4a5fe1SAndy Fiddaman 
1939*5c4a5fe1SAndy Fiddaman 	/* Doesn't support Protection Information */
1940*5c4a5fe1SAndy Fiddaman 	pi = (command->cdw10 >> 5) & 0x7;
1941*5c4a5fe1SAndy Fiddaman 	if (pi != 0) {
1942*5c4a5fe1SAndy Fiddaman 		pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1943*5c4a5fe1SAndy Fiddaman 		return (1);
1944*5c4a5fe1SAndy Fiddaman 	}
1945*5c4a5fe1SAndy Fiddaman 
1946*5c4a5fe1SAndy Fiddaman 	if (sc->nvstore.type == NVME_STOR_RAM) {
1947*5c4a5fe1SAndy Fiddaman 		if (sc->nvstore.ctx)
1948*5c4a5fe1SAndy Fiddaman 			free(sc->nvstore.ctx);
1949*5c4a5fe1SAndy Fiddaman 		sc->nvstore.ctx = calloc(1, sc->nvstore.size);
1950*5c4a5fe1SAndy Fiddaman 		pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS);
1951*5c4a5fe1SAndy Fiddaman 	} else {
1952*5c4a5fe1SAndy Fiddaman 		struct pci_nvme_ioreq *req;
1953*5c4a5fe1SAndy Fiddaman 		int err;
1954*5c4a5fe1SAndy Fiddaman 
1955*5c4a5fe1SAndy Fiddaman 		req = pci_nvme_get_ioreq(sc);
1956*5c4a5fe1SAndy Fiddaman 		if (req == NULL) {
1957*5c4a5fe1SAndy Fiddaman 			pci_nvme_status_genc(&compl->status,
1958*5c4a5fe1SAndy Fiddaman 			    NVME_SC_INTERNAL_DEVICE_ERROR);
1959*5c4a5fe1SAndy Fiddaman 			WPRINTF("%s: unable to allocate IO req", __func__);
1960*5c4a5fe1SAndy Fiddaman 			return (1);
1961*5c4a5fe1SAndy Fiddaman 		}
1962*5c4a5fe1SAndy Fiddaman 		req->nvme_sq = &sc->submit_queues[0];
1963*5c4a5fe1SAndy Fiddaman 		req->sqid = 0;
1964*5c4a5fe1SAndy Fiddaman 		req->opc = command->opc;
1965*5c4a5fe1SAndy Fiddaman 		req->cid = command->cid;
1966*5c4a5fe1SAndy Fiddaman 		req->nsid = command->nsid;
1967*5c4a5fe1SAndy Fiddaman 
1968*5c4a5fe1SAndy Fiddaman 		req->io_req.br_offset = 0;
1969*5c4a5fe1SAndy Fiddaman 		req->io_req.br_resid = sc->nvstore.size;
1970*5c4a5fe1SAndy Fiddaman 		req->io_req.br_callback = pci_nvme_io_done;
1971*5c4a5fe1SAndy Fiddaman 
1972*5c4a5fe1SAndy Fiddaman 		err = blockif_delete(sc->nvstore.ctx, &req->io_req);
1973*5c4a5fe1SAndy Fiddaman 		if (err) {
1974*5c4a5fe1SAndy Fiddaman 			pci_nvme_status_genc(&compl->status,
1975*5c4a5fe1SAndy Fiddaman 			    NVME_SC_INTERNAL_DEVICE_ERROR);
1976*5c4a5fe1SAndy Fiddaman 			pci_nvme_release_ioreq(sc, req);
1977*5c4a5fe1SAndy Fiddaman 		} else
1978*5c4a5fe1SAndy Fiddaman 			compl->status = NVME_NO_STATUS;
1979*5c4a5fe1SAndy Fiddaman 	}
1980*5c4a5fe1SAndy Fiddaman 
1981*5c4a5fe1SAndy Fiddaman 	return (1);
1982*5c4a5fe1SAndy Fiddaman }
1983*5c4a5fe1SAndy Fiddaman 
1984*5c4a5fe1SAndy Fiddaman static int
nvme_opc_abort(struct pci_nvme_softc * sc __unused,struct nvme_command * command,struct nvme_completion * compl)1985*5c4a5fe1SAndy Fiddaman nvme_opc_abort(struct pci_nvme_softc *sc __unused, struct nvme_command *command,
1986*5c4a5fe1SAndy Fiddaman     struct nvme_completion *compl)
1987*5c4a5fe1SAndy Fiddaman {
1988*5c4a5fe1SAndy Fiddaman 	DPRINTF("%s submission queue %u, command ID 0x%x", __func__,
1989*5c4a5fe1SAndy Fiddaman 	        command->cdw10 & 0xFFFF, (command->cdw10 >> 16) & 0xFFFF);
1990*5c4a5fe1SAndy Fiddaman 
1991*5c4a5fe1SAndy Fiddaman 	/* TODO: search for the command ID and abort it */
1992*5c4a5fe1SAndy Fiddaman 
1993*5c4a5fe1SAndy Fiddaman 	compl->cdw0 = 1;
1994*5c4a5fe1SAndy Fiddaman 	pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS);
1995*5c4a5fe1SAndy Fiddaman 	return (1);
1996*5c4a5fe1SAndy Fiddaman }
1997*5c4a5fe1SAndy Fiddaman 
1998*5c4a5fe1SAndy Fiddaman static int
nvme_opc_async_event_req(struct pci_nvme_softc * sc,struct nvme_command * command,struct nvme_completion * compl)1999*5c4a5fe1SAndy Fiddaman nvme_opc_async_event_req(struct pci_nvme_softc* sc,
2000*5c4a5fe1SAndy Fiddaman 	struct nvme_command* command, struct nvme_completion* compl)
2001*5c4a5fe1SAndy Fiddaman {
2002*5c4a5fe1SAndy Fiddaman 	DPRINTF("%s async event request count=%u aerl=%u cid=%#x", __func__,
2003*5c4a5fe1SAndy Fiddaman 	    sc->aer_count, sc->ctrldata.aerl, command->cid);
2004*5c4a5fe1SAndy Fiddaman 
2005*5c4a5fe1SAndy Fiddaman 	/* Don't exceed the Async Event Request Limit (AERL). */
2006*5c4a5fe1SAndy Fiddaman 	if (pci_nvme_aer_limit_reached(sc)) {
2007*5c4a5fe1SAndy Fiddaman 		pci_nvme_status_tc(&compl->status, NVME_SCT_COMMAND_SPECIFIC,
2008*5c4a5fe1SAndy Fiddaman 				NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED);
2009*5c4a5fe1SAndy Fiddaman 		return (1);
2010*5c4a5fe1SAndy Fiddaman 	}
2011*5c4a5fe1SAndy Fiddaman 
2012*5c4a5fe1SAndy Fiddaman 	if (pci_nvme_aer_add(sc, command->cid)) {
2013*5c4a5fe1SAndy Fiddaman 		pci_nvme_status_tc(&compl->status, NVME_SCT_GENERIC,
2014*5c4a5fe1SAndy Fiddaman 				NVME_SC_INTERNAL_DEVICE_ERROR);
2015*5c4a5fe1SAndy Fiddaman 		return (1);
2016*5c4a5fe1SAndy Fiddaman 	}
2017*5c4a5fe1SAndy Fiddaman 
2018*5c4a5fe1SAndy Fiddaman 	/*
2019*5c4a5fe1SAndy Fiddaman 	 * Raise events when they happen based on the Set Features cmd.
2020*5c4a5fe1SAndy Fiddaman 	 * These events happen async, so only set completion successful if
2021*5c4a5fe1SAndy Fiddaman 	 * there is an event reflective of the request to get event.
2022*5c4a5fe1SAndy Fiddaman 	 */
2023*5c4a5fe1SAndy Fiddaman 	compl->status = NVME_NO_STATUS;
2024*5c4a5fe1SAndy Fiddaman 	pci_nvme_aen_notify(sc);
2025*5c4a5fe1SAndy Fiddaman 
2026*5c4a5fe1SAndy Fiddaman 	return (0);
2027*5c4a5fe1SAndy Fiddaman }
2028*5c4a5fe1SAndy Fiddaman 
2029*5c4a5fe1SAndy Fiddaman static void
pci_nvme_handle_admin_cmd(struct pci_nvme_softc * sc,uint64_t value)2030*5c4a5fe1SAndy Fiddaman pci_nvme_handle_admin_cmd(struct pci_nvme_softc* sc, uint64_t value)
2031*5c4a5fe1SAndy Fiddaman {
2032*5c4a5fe1SAndy Fiddaman 	struct nvme_completion compl;
2033*5c4a5fe1SAndy Fiddaman 	struct nvme_command *cmd;
2034*5c4a5fe1SAndy Fiddaman 	struct nvme_submission_queue *sq;
2035*5c4a5fe1SAndy Fiddaman 	struct nvme_completion_queue *cq;
2036*5c4a5fe1SAndy Fiddaman 	uint16_t sqhead;
2037*5c4a5fe1SAndy Fiddaman 
2038*5c4a5fe1SAndy Fiddaman 	DPRINTF("%s index %u", __func__, (uint32_t)value);
2039*5c4a5fe1SAndy Fiddaman 
2040*5c4a5fe1SAndy Fiddaman 	sq = &sc->submit_queues[0];
2041*5c4a5fe1SAndy Fiddaman 	cq = &sc->compl_queues[0];
2042*5c4a5fe1SAndy Fiddaman 
2043*5c4a5fe1SAndy Fiddaman 	pthread_mutex_lock(&sq->mtx);
2044*5c4a5fe1SAndy Fiddaman 
2045*5c4a5fe1SAndy Fiddaman 	sqhead = sq->head;
2046*5c4a5fe1SAndy Fiddaman 	DPRINTF("sqhead %u, tail %u", sqhead, sq->tail);
2047*5c4a5fe1SAndy Fiddaman 
2048*5c4a5fe1SAndy Fiddaman 	while (sqhead != atomic_load_acq_short(&sq->tail)) {
2049*5c4a5fe1SAndy Fiddaman 		cmd = &(sq->qbase)[sqhead];
2050*5c4a5fe1SAndy Fiddaman 		compl.cdw0 = 0;
2051*5c4a5fe1SAndy Fiddaman 		compl.status = 0;
2052*5c4a5fe1SAndy Fiddaman 
2053*5c4a5fe1SAndy Fiddaman 		switch (cmd->opc) {
2054*5c4a5fe1SAndy Fiddaman 		case NVME_OPC_DELETE_IO_SQ:
2055*5c4a5fe1SAndy Fiddaman 			DPRINTF("%s command DELETE_IO_SQ", __func__);
2056*5c4a5fe1SAndy Fiddaman 			nvme_opc_delete_io_sq(sc, cmd, &compl);
2057*5c4a5fe1SAndy Fiddaman 			break;
2058*5c4a5fe1SAndy Fiddaman 		case NVME_OPC_CREATE_IO_SQ:
2059*5c4a5fe1SAndy Fiddaman 			DPRINTF("%s command CREATE_IO_SQ", __func__);
2060*5c4a5fe1SAndy Fiddaman 			nvme_opc_create_io_sq(sc, cmd, &compl);
2061*5c4a5fe1SAndy Fiddaman 			break;
2062*5c4a5fe1SAndy Fiddaman 		case NVME_OPC_DELETE_IO_CQ:
2063*5c4a5fe1SAndy Fiddaman 			DPRINTF("%s command DELETE_IO_CQ", __func__);
2064*5c4a5fe1SAndy Fiddaman 			nvme_opc_delete_io_cq(sc, cmd, &compl);
2065*5c4a5fe1SAndy Fiddaman 			break;
2066*5c4a5fe1SAndy Fiddaman 		case NVME_OPC_CREATE_IO_CQ:
2067*5c4a5fe1SAndy Fiddaman 			DPRINTF("%s command CREATE_IO_CQ", __func__);
2068*5c4a5fe1SAndy Fiddaman 			nvme_opc_create_io_cq(sc, cmd, &compl);
2069*5c4a5fe1SAndy Fiddaman 			break;
2070*5c4a5fe1SAndy Fiddaman 		case NVME_OPC_GET_LOG_PAGE:
2071*5c4a5fe1SAndy Fiddaman 			DPRINTF("%s command GET_LOG_PAGE", __func__);
2072*5c4a5fe1SAndy Fiddaman 			nvme_opc_get_log_page(sc, cmd, &compl);
2073*5c4a5fe1SAndy Fiddaman 			break;
2074*5c4a5fe1SAndy Fiddaman 		case NVME_OPC_IDENTIFY:
2075*5c4a5fe1SAndy Fiddaman 			DPRINTF("%s command IDENTIFY", __func__);
2076*5c4a5fe1SAndy Fiddaman 			nvme_opc_identify(sc, cmd, &compl);
2077*5c4a5fe1SAndy Fiddaman 			break;
2078*5c4a5fe1SAndy Fiddaman 		case NVME_OPC_ABORT:
2079*5c4a5fe1SAndy Fiddaman 			DPRINTF("%s command ABORT", __func__);
2080*5c4a5fe1SAndy Fiddaman 			nvme_opc_abort(sc, cmd, &compl);
2081*5c4a5fe1SAndy Fiddaman 			break;
2082*5c4a5fe1SAndy Fiddaman 		case NVME_OPC_SET_FEATURES:
2083*5c4a5fe1SAndy Fiddaman 			DPRINTF("%s command SET_FEATURES", __func__);
2084*5c4a5fe1SAndy Fiddaman 			nvme_opc_set_features(sc, cmd, &compl);
2085*5c4a5fe1SAndy Fiddaman 			break;
2086*5c4a5fe1SAndy Fiddaman 		case NVME_OPC_GET_FEATURES:
2087*5c4a5fe1SAndy Fiddaman 			DPRINTF("%s command GET_FEATURES", __func__);
2088*5c4a5fe1SAndy Fiddaman 			nvme_opc_get_features(sc, cmd, &compl);
2089*5c4a5fe1SAndy Fiddaman 			break;
2090*5c4a5fe1SAndy Fiddaman 		case NVME_OPC_FIRMWARE_ACTIVATE:
2091*5c4a5fe1SAndy Fiddaman 			DPRINTF("%s command FIRMWARE_ACTIVATE", __func__);
2092*5c4a5fe1SAndy Fiddaman 			pci_nvme_status_tc(&compl.status,
2093*5c4a5fe1SAndy Fiddaman 			    NVME_SCT_COMMAND_SPECIFIC,
2094*5c4a5fe1SAndy Fiddaman 			    NVME_SC_INVALID_FIRMWARE_SLOT);
2095*5c4a5fe1SAndy Fiddaman 			break;
2096*5c4a5fe1SAndy Fiddaman 		case NVME_OPC_ASYNC_EVENT_REQUEST:
2097*5c4a5fe1SAndy Fiddaman 			DPRINTF("%s command ASYNC_EVENT_REQ", __func__);
2098*5c4a5fe1SAndy Fiddaman 			nvme_opc_async_event_req(sc, cmd, &compl);
2099*5c4a5fe1SAndy Fiddaman 			break;
2100*5c4a5fe1SAndy Fiddaman 		case NVME_OPC_FORMAT_NVM:
2101*5c4a5fe1SAndy Fiddaman 			DPRINTF("%s command FORMAT_NVM", __func__);
2102*5c4a5fe1SAndy Fiddaman 			if (NVMEV(NVME_CTRLR_DATA_OACS_FORMAT,
2103*5c4a5fe1SAndy Fiddaman 			    sc->ctrldata.oacs) == 0) {
2104*5c4a5fe1SAndy Fiddaman 				pci_nvme_status_genc(&compl.status, NVME_SC_INVALID_OPCODE);
2105*5c4a5fe1SAndy Fiddaman 				break;
2106*5c4a5fe1SAndy Fiddaman 			}
2107*5c4a5fe1SAndy Fiddaman 			nvme_opc_format_nvm(sc, cmd, &compl);
2108*5c4a5fe1SAndy Fiddaman 			break;
2109*5c4a5fe1SAndy Fiddaman 		case NVME_OPC_SECURITY_SEND:
2110*5c4a5fe1SAndy Fiddaman 		case NVME_OPC_SECURITY_RECEIVE:
2111*5c4a5fe1SAndy Fiddaman 		case NVME_OPC_SANITIZE:
2112*5c4a5fe1SAndy Fiddaman 		case NVME_OPC_GET_LBA_STATUS:
2113*5c4a5fe1SAndy Fiddaman 			DPRINTF("%s command OPC=%#x (unsupported)", __func__,
2114*5c4a5fe1SAndy Fiddaman 			    cmd->opc);
2115*5c4a5fe1SAndy Fiddaman 			/* Valid but unsupported opcodes */
2116*5c4a5fe1SAndy Fiddaman 			pci_nvme_status_genc(&compl.status, NVME_SC_INVALID_FIELD);
2117*5c4a5fe1SAndy Fiddaman 			break;
2118*5c4a5fe1SAndy Fiddaman 		default:
2119*5c4a5fe1SAndy Fiddaman 			DPRINTF("%s command OPC=%#X (not implemented)",
2120*5c4a5fe1SAndy Fiddaman 			    __func__,
2121*5c4a5fe1SAndy Fiddaman 			    cmd->opc);
2122*5c4a5fe1SAndy Fiddaman 			pci_nvme_status_genc(&compl.status, NVME_SC_INVALID_OPCODE);
2123*5c4a5fe1SAndy Fiddaman 		}
2124*5c4a5fe1SAndy Fiddaman 		sqhead = (sqhead + 1) % sq->size;
2125*5c4a5fe1SAndy Fiddaman 
2126*5c4a5fe1SAndy Fiddaman 		if (NVME_COMPLETION_VALID(compl)) {
2127*5c4a5fe1SAndy Fiddaman 			pci_nvme_cq_update(sc, &sc->compl_queues[0],
2128*5c4a5fe1SAndy Fiddaman 			    compl.cdw0,
2129*5c4a5fe1SAndy Fiddaman 			    cmd->cid,
2130*5c4a5fe1SAndy Fiddaman 			    0,		/* SQID */
2131*5c4a5fe1SAndy Fiddaman 			    compl.status);
2132*5c4a5fe1SAndy Fiddaman 		}
2133*5c4a5fe1SAndy Fiddaman 	}
2134*5c4a5fe1SAndy Fiddaman 
2135*5c4a5fe1SAndy Fiddaman 	DPRINTF("setting sqhead %u", sqhead);
2136*5c4a5fe1SAndy Fiddaman 	sq->head = sqhead;
2137*5c4a5fe1SAndy Fiddaman 
2138*5c4a5fe1SAndy Fiddaman 	if (cq->head != cq->tail)
2139*5c4a5fe1SAndy Fiddaman 		pci_generate_msix(sc->nsc_pi, 0);
2140*5c4a5fe1SAndy Fiddaman 
2141*5c4a5fe1SAndy Fiddaman 	pthread_mutex_unlock(&sq->mtx);
2142*5c4a5fe1SAndy Fiddaman }
2143*5c4a5fe1SAndy Fiddaman 
2144*5c4a5fe1SAndy Fiddaman /*
2145*5c4a5fe1SAndy Fiddaman  * Update the Write and Read statistics reported in SMART data
2146*5c4a5fe1SAndy Fiddaman  *
2147*5c4a5fe1SAndy Fiddaman  * NVMe defines "data unit" as thousand's of 512 byte blocks and is rounded up.
2148*5c4a5fe1SAndy Fiddaman  * E.g. 1 data unit is 1 - 1,000 512 byte blocks. 3 data units are 2,001 - 3,000
2149*5c4a5fe1SAndy Fiddaman  * 512 byte blocks. Rounding up is achieved by initializing the remainder to 999.
2150*5c4a5fe1SAndy Fiddaman  */
2151*5c4a5fe1SAndy Fiddaman static void
pci_nvme_stats_write_read_update(struct pci_nvme_softc * sc,uint8_t opc,size_t bytes,uint16_t status)2152*5c4a5fe1SAndy Fiddaman pci_nvme_stats_write_read_update(struct pci_nvme_softc *sc, uint8_t opc,
2153*5c4a5fe1SAndy Fiddaman     size_t bytes, uint16_t status)
2154*5c4a5fe1SAndy Fiddaman {
2155*5c4a5fe1SAndy Fiddaman 
2156*5c4a5fe1SAndy Fiddaman 	pthread_mutex_lock(&sc->mtx);
2157*5c4a5fe1SAndy Fiddaman 	switch (opc) {
2158*5c4a5fe1SAndy Fiddaman 	case NVME_OPC_WRITE:
2159*5c4a5fe1SAndy Fiddaman 		sc->write_commands++;
2160*5c4a5fe1SAndy Fiddaman 		if (status != NVME_SC_SUCCESS)
2161*5c4a5fe1SAndy Fiddaman 			break;
2162*5c4a5fe1SAndy Fiddaman 		sc->write_dunits_remainder += (bytes / 512);
2163*5c4a5fe1SAndy Fiddaman 		while (sc->write_dunits_remainder >= 1000) {
2164*5c4a5fe1SAndy Fiddaman 			sc->write_data_units++;
2165*5c4a5fe1SAndy Fiddaman 			sc->write_dunits_remainder -= 1000;
2166*5c4a5fe1SAndy Fiddaman 		}
2167*5c4a5fe1SAndy Fiddaman 		break;
2168*5c4a5fe1SAndy Fiddaman 	case NVME_OPC_READ:
2169*5c4a5fe1SAndy Fiddaman 		sc->read_commands++;
2170*5c4a5fe1SAndy Fiddaman 		if (status != NVME_SC_SUCCESS)
2171*5c4a5fe1SAndy Fiddaman 			break;
2172*5c4a5fe1SAndy Fiddaman 		sc->read_dunits_remainder += (bytes / 512);
2173*5c4a5fe1SAndy Fiddaman 		while (sc->read_dunits_remainder >= 1000) {
2174*5c4a5fe1SAndy Fiddaman 			sc->read_data_units++;
2175*5c4a5fe1SAndy Fiddaman 			sc->read_dunits_remainder -= 1000;
2176*5c4a5fe1SAndy Fiddaman 		}
2177*5c4a5fe1SAndy Fiddaman 		break;
2178*5c4a5fe1SAndy Fiddaman 	default:
2179*5c4a5fe1SAndy Fiddaman 		DPRINTF("%s: Invalid OPC 0x%02x for stats", __func__, opc);
2180*5c4a5fe1SAndy Fiddaman 		break;
2181*5c4a5fe1SAndy Fiddaman 	}
2182*5c4a5fe1SAndy Fiddaman 	pthread_mutex_unlock(&sc->mtx);
2183*5c4a5fe1SAndy Fiddaman }
2184*5c4a5fe1SAndy Fiddaman 
2185*5c4a5fe1SAndy Fiddaman /*
2186*5c4a5fe1SAndy Fiddaman  * Check if the combination of Starting LBA (slba) and number of blocks
2187*5c4a5fe1SAndy Fiddaman  * exceeds the range of the underlying storage.
2188*5c4a5fe1SAndy Fiddaman  *
2189*5c4a5fe1SAndy Fiddaman  * Because NVMe specifies the SLBA in blocks as a uint64_t and blockif stores
2190*5c4a5fe1SAndy Fiddaman  * the capacity in bytes as a uint64_t, care must be taken to avoid integer
2191*5c4a5fe1SAndy Fiddaman  * overflow.
2192*5c4a5fe1SAndy Fiddaman  */
2193*5c4a5fe1SAndy Fiddaman static bool
pci_nvme_out_of_range(struct pci_nvme_blockstore * nvstore,uint64_t slba,uint32_t nblocks)2194*5c4a5fe1SAndy Fiddaman pci_nvme_out_of_range(struct pci_nvme_blockstore *nvstore, uint64_t slba,
2195*5c4a5fe1SAndy Fiddaman     uint32_t nblocks)
2196*5c4a5fe1SAndy Fiddaman {
2197*5c4a5fe1SAndy Fiddaman 	size_t	offset, bytes;
2198*5c4a5fe1SAndy Fiddaman 
2199*5c4a5fe1SAndy Fiddaman 	/* Overflow check of multiplying Starting LBA by the sector size */
2200*5c4a5fe1SAndy Fiddaman 	if (slba >> (64 - nvstore->sectsz_bits))
2201*5c4a5fe1SAndy Fiddaman 		return (true);
2202*5c4a5fe1SAndy Fiddaman 
2203*5c4a5fe1SAndy Fiddaman 	offset = slba << nvstore->sectsz_bits;
2204*5c4a5fe1SAndy Fiddaman 	bytes = nblocks << nvstore->sectsz_bits;
2205*5c4a5fe1SAndy Fiddaman 
2206*5c4a5fe1SAndy Fiddaman 	/* Overflow check of Number of Logical Blocks */
2207*5c4a5fe1SAndy Fiddaman 	if ((nvstore->size <= offset) || ((nvstore->size - offset) < bytes))
2208*5c4a5fe1SAndy Fiddaman 		return (true);
2209*5c4a5fe1SAndy Fiddaman 
2210*5c4a5fe1SAndy Fiddaman 	return (false);
2211*5c4a5fe1SAndy Fiddaman }
2212*5c4a5fe1SAndy Fiddaman 
2213*5c4a5fe1SAndy Fiddaman static int
pci_nvme_append_iov_req(struct pci_nvme_softc * sc __unused,struct pci_nvme_ioreq * req,uint64_t gpaddr,size_t size,uint64_t offset)2214*5c4a5fe1SAndy Fiddaman pci_nvme_append_iov_req(struct pci_nvme_softc *sc __unused,
2215*5c4a5fe1SAndy Fiddaman     struct pci_nvme_ioreq *req, uint64_t gpaddr, size_t size, uint64_t offset)
2216*5c4a5fe1SAndy Fiddaman {
2217*5c4a5fe1SAndy Fiddaman 	int iovidx;
2218*5c4a5fe1SAndy Fiddaman 	bool range_is_contiguous;
2219*5c4a5fe1SAndy Fiddaman 
2220*5c4a5fe1SAndy Fiddaman 	if (req == NULL)
2221*5c4a5fe1SAndy Fiddaman 		return (-1);
2222*5c4a5fe1SAndy Fiddaman 
2223*5c4a5fe1SAndy Fiddaman 	if (req->io_req.br_iovcnt == NVME_MAX_IOVEC) {
2224*5c4a5fe1SAndy Fiddaman 		return (-1);
2225*5c4a5fe1SAndy Fiddaman 	}
2226*5c4a5fe1SAndy Fiddaman 
2227*5c4a5fe1SAndy Fiddaman 	/*
2228*5c4a5fe1SAndy Fiddaman 	 * Minimize the number of IOVs by concatenating contiguous address
2229*5c4a5fe1SAndy Fiddaman 	 * ranges. If the IOV count is zero, there is no previous range to
2230*5c4a5fe1SAndy Fiddaman 	 * concatenate.
2231*5c4a5fe1SAndy Fiddaman 	 */
2232*5c4a5fe1SAndy Fiddaman 	if (req->io_req.br_iovcnt == 0)
2233*5c4a5fe1SAndy Fiddaman 		range_is_contiguous = false;
2234*5c4a5fe1SAndy Fiddaman 	else
2235*5c4a5fe1SAndy Fiddaman 		range_is_contiguous = (req->prev_gpaddr + req->prev_size) == gpaddr;
2236*5c4a5fe1SAndy Fiddaman 
2237*5c4a5fe1SAndy Fiddaman 	if (range_is_contiguous) {
2238*5c4a5fe1SAndy Fiddaman 		iovidx = req->io_req.br_iovcnt - 1;
2239*5c4a5fe1SAndy Fiddaman 
2240*5c4a5fe1SAndy Fiddaman 		req->io_req.br_iov[iovidx].iov_base =
2241*5c4a5fe1SAndy Fiddaman 		    paddr_guest2host(req->sc->nsc_pi->pi_vmctx,
2242*5c4a5fe1SAndy Fiddaman 				     req->prev_gpaddr, size);
2243*5c4a5fe1SAndy Fiddaman 		if (req->io_req.br_iov[iovidx].iov_base == NULL)
2244*5c4a5fe1SAndy Fiddaman 			return (-1);
2245*5c4a5fe1SAndy Fiddaman 
2246*5c4a5fe1SAndy Fiddaman 		req->prev_size += size;
2247*5c4a5fe1SAndy Fiddaman 		req->io_req.br_resid += size;
2248*5c4a5fe1SAndy Fiddaman 
2249*5c4a5fe1SAndy Fiddaman 		req->io_req.br_iov[iovidx].iov_len = req->prev_size;
2250*5c4a5fe1SAndy Fiddaman 	} else {
2251*5c4a5fe1SAndy Fiddaman 		iovidx = req->io_req.br_iovcnt;
2252*5c4a5fe1SAndy Fiddaman 		if (iovidx == 0) {
2253*5c4a5fe1SAndy Fiddaman 			req->io_req.br_offset = offset;
2254*5c4a5fe1SAndy Fiddaman 			req->io_req.br_resid = 0;
2255*5c4a5fe1SAndy Fiddaman 			req->io_req.br_param = req;
2256*5c4a5fe1SAndy Fiddaman 		}
2257*5c4a5fe1SAndy Fiddaman 
2258*5c4a5fe1SAndy Fiddaman 		req->io_req.br_iov[iovidx].iov_base =
2259*5c4a5fe1SAndy Fiddaman 		    paddr_guest2host(req->sc->nsc_pi->pi_vmctx,
2260*5c4a5fe1SAndy Fiddaman 				     gpaddr, size);
2261*5c4a5fe1SAndy Fiddaman 		if (req->io_req.br_iov[iovidx].iov_base == NULL)
2262*5c4a5fe1SAndy Fiddaman 			return (-1);
2263*5c4a5fe1SAndy Fiddaman 
2264*5c4a5fe1SAndy Fiddaman 		req->io_req.br_iov[iovidx].iov_len = size;
2265*5c4a5fe1SAndy Fiddaman 
2266*5c4a5fe1SAndy Fiddaman 		req->prev_gpaddr = gpaddr;
2267*5c4a5fe1SAndy Fiddaman 		req->prev_size = size;
2268*5c4a5fe1SAndy Fiddaman 		req->io_req.br_resid += size;
2269*5c4a5fe1SAndy Fiddaman 
2270*5c4a5fe1SAndy Fiddaman 		req->io_req.br_iovcnt++;
2271*5c4a5fe1SAndy Fiddaman 	}
2272*5c4a5fe1SAndy Fiddaman 
2273*5c4a5fe1SAndy Fiddaman 	return (0);
2274*5c4a5fe1SAndy Fiddaman }
2275*5c4a5fe1SAndy Fiddaman 
2276*5c4a5fe1SAndy Fiddaman static void
pci_nvme_set_completion(struct pci_nvme_softc * sc,struct nvme_submission_queue * sq,int sqid,uint16_t cid,uint16_t status)2277*5c4a5fe1SAndy Fiddaman pci_nvme_set_completion(struct pci_nvme_softc *sc,
2278*5c4a5fe1SAndy Fiddaman     struct nvme_submission_queue *sq, int sqid, uint16_t cid, uint16_t status)
2279*5c4a5fe1SAndy Fiddaman {
2280*5c4a5fe1SAndy Fiddaman 	struct nvme_completion_queue *cq = &sc->compl_queues[sq->cqid];
2281*5c4a5fe1SAndy Fiddaman 
2282*5c4a5fe1SAndy Fiddaman 	DPRINTF("%s sqid %d cqid %u cid %u status: 0x%x 0x%x",
2283*5c4a5fe1SAndy Fiddaman 		 __func__, sqid, sq->cqid, cid, NVME_STATUS_GET_SCT(status),
2284*5c4a5fe1SAndy Fiddaman 		 NVME_STATUS_GET_SC(status));
2285*5c4a5fe1SAndy Fiddaman 
2286*5c4a5fe1SAndy Fiddaman 	pci_nvme_cq_update(sc, cq, 0, cid, sqid, status);
2287*5c4a5fe1SAndy Fiddaman 
2288*5c4a5fe1SAndy Fiddaman 	if (cq->head != cq->tail) {
2289*5c4a5fe1SAndy Fiddaman 		if (cq->intr_en & NVME_CQ_INTEN) {
2290*5c4a5fe1SAndy Fiddaman 			pci_generate_msix(sc->nsc_pi, cq->intr_vec);
2291*5c4a5fe1SAndy Fiddaman 		} else {
2292*5c4a5fe1SAndy Fiddaman 			DPRINTF("%s: CQ%u interrupt disabled",
2293*5c4a5fe1SAndy Fiddaman 						__func__, sq->cqid);
2294*5c4a5fe1SAndy Fiddaman 		}
2295*5c4a5fe1SAndy Fiddaman 	}
2296*5c4a5fe1SAndy Fiddaman }
2297*5c4a5fe1SAndy Fiddaman 
2298*5c4a5fe1SAndy Fiddaman static void
pci_nvme_release_ioreq(struct pci_nvme_softc * sc,struct pci_nvme_ioreq * req)2299*5c4a5fe1SAndy Fiddaman pci_nvme_release_ioreq(struct pci_nvme_softc *sc, struct pci_nvme_ioreq *req)
2300*5c4a5fe1SAndy Fiddaman {
2301*5c4a5fe1SAndy Fiddaman 	req->sc = NULL;
2302*5c4a5fe1SAndy Fiddaman 	req->nvme_sq = NULL;
2303*5c4a5fe1SAndy Fiddaman 	req->sqid = 0;
2304*5c4a5fe1SAndy Fiddaman 
2305*5c4a5fe1SAndy Fiddaman 	pthread_mutex_lock(&sc->mtx);
2306*5c4a5fe1SAndy Fiddaman 
2307*5c4a5fe1SAndy Fiddaman 	STAILQ_INSERT_TAIL(&sc->ioreqs_free, req, link);
2308*5c4a5fe1SAndy Fiddaman 	sc->pending_ios--;
2309*5c4a5fe1SAndy Fiddaman 
2310*5c4a5fe1SAndy Fiddaman 	/* when no more IO pending, can set to ready if device reset/enabled */
2311*5c4a5fe1SAndy Fiddaman 	if (sc->pending_ios == 0 &&
2312*5c4a5fe1SAndy Fiddaman 	    NVME_CC_GET_EN(sc->regs.cc) && !(NVME_CSTS_GET_RDY(sc->regs.csts)))
2313*5c4a5fe1SAndy Fiddaman 		sc->regs.csts |= NVME_CSTS_RDY;
2314*5c4a5fe1SAndy Fiddaman 
2315*5c4a5fe1SAndy Fiddaman 	pthread_mutex_unlock(&sc->mtx);
2316*5c4a5fe1SAndy Fiddaman 
2317*5c4a5fe1SAndy Fiddaman 	sem_post(&sc->iosemlock);
2318*5c4a5fe1SAndy Fiddaman }
2319*5c4a5fe1SAndy Fiddaman 
2320*5c4a5fe1SAndy Fiddaman static struct pci_nvme_ioreq *
pci_nvme_get_ioreq(struct pci_nvme_softc * sc)2321*5c4a5fe1SAndy Fiddaman pci_nvme_get_ioreq(struct pci_nvme_softc *sc)
2322*5c4a5fe1SAndy Fiddaman {
2323*5c4a5fe1SAndy Fiddaman 	struct pci_nvme_ioreq *req = NULL;
2324*5c4a5fe1SAndy Fiddaman 
2325*5c4a5fe1SAndy Fiddaman 	sem_wait(&sc->iosemlock);
2326*5c4a5fe1SAndy Fiddaman 	pthread_mutex_lock(&sc->mtx);
2327*5c4a5fe1SAndy Fiddaman 
2328*5c4a5fe1SAndy Fiddaman 	req = STAILQ_FIRST(&sc->ioreqs_free);
2329*5c4a5fe1SAndy Fiddaman 	assert(req != NULL);
2330*5c4a5fe1SAndy Fiddaman 	STAILQ_REMOVE_HEAD(&sc->ioreqs_free, link);
2331*5c4a5fe1SAndy Fiddaman 
2332*5c4a5fe1SAndy Fiddaman 	req->sc = sc;
2333*5c4a5fe1SAndy Fiddaman 
2334*5c4a5fe1SAndy Fiddaman 	sc->pending_ios++;
2335*5c4a5fe1SAndy Fiddaman 
2336*5c4a5fe1SAndy Fiddaman 	pthread_mutex_unlock(&sc->mtx);
2337*5c4a5fe1SAndy Fiddaman 
2338*5c4a5fe1SAndy Fiddaman 	req->io_req.br_iovcnt = 0;
2339*5c4a5fe1SAndy Fiddaman 	req->io_req.br_offset = 0;
2340*5c4a5fe1SAndy Fiddaman 	req->io_req.br_resid = 0;
2341*5c4a5fe1SAndy Fiddaman 	req->io_req.br_param = req;
2342*5c4a5fe1SAndy Fiddaman 	req->prev_gpaddr = 0;
2343*5c4a5fe1SAndy Fiddaman 	req->prev_size = 0;
2344*5c4a5fe1SAndy Fiddaman 
2345*5c4a5fe1SAndy Fiddaman 	return req;
2346*5c4a5fe1SAndy Fiddaman }
2347*5c4a5fe1SAndy Fiddaman 
2348*5c4a5fe1SAndy Fiddaman static void
pci_nvme_io_done(struct blockif_req * br,int err)2349*5c4a5fe1SAndy Fiddaman pci_nvme_io_done(struct blockif_req *br, int err)
2350*5c4a5fe1SAndy Fiddaman {
2351*5c4a5fe1SAndy Fiddaman 	struct pci_nvme_ioreq *req = br->br_param;
2352*5c4a5fe1SAndy Fiddaman 	struct nvme_submission_queue *sq = req->nvme_sq;
2353*5c4a5fe1SAndy Fiddaman 	uint16_t code, status;
2354*5c4a5fe1SAndy Fiddaman 
2355*5c4a5fe1SAndy Fiddaman 	DPRINTF("%s error %d %s", __func__, err, strerror(err));
2356*5c4a5fe1SAndy Fiddaman 
2357*5c4a5fe1SAndy Fiddaman 	/* TODO return correct error */
2358*5c4a5fe1SAndy Fiddaman 	code = err ? NVME_SC_DATA_TRANSFER_ERROR : NVME_SC_SUCCESS;
2359*5c4a5fe1SAndy Fiddaman 	status = 0;
2360*5c4a5fe1SAndy Fiddaman 	pci_nvme_status_genc(&status, code);
2361*5c4a5fe1SAndy Fiddaman 
2362*5c4a5fe1SAndy Fiddaman 	pci_nvme_set_completion(req->sc, sq, req->sqid, req->cid, status);
2363*5c4a5fe1SAndy Fiddaman 	pci_nvme_stats_write_read_update(req->sc, req->opc,
2364*5c4a5fe1SAndy Fiddaman 	    req->bytes, status);
2365*5c4a5fe1SAndy Fiddaman 	pci_nvme_release_ioreq(req->sc, req);
2366*5c4a5fe1SAndy Fiddaman }
2367*5c4a5fe1SAndy Fiddaman 
2368*5c4a5fe1SAndy Fiddaman /*
2369*5c4a5fe1SAndy Fiddaman  * Implements the Flush command. The specification states:
2370*5c4a5fe1SAndy Fiddaman  *    If a volatile write cache is not present, Flush commands complete
2371*5c4a5fe1SAndy Fiddaman  *    successfully and have no effect
2372*5c4a5fe1SAndy Fiddaman  * in the description of the Volatile Write Cache (VWC) field of the Identify
2373*5c4a5fe1SAndy Fiddaman  * Controller data. Therefore, set status to Success if the command is
2374*5c4a5fe1SAndy Fiddaman  * not supported (i.e. RAM or as indicated by the blockif).
2375*5c4a5fe1SAndy Fiddaman  */
2376*5c4a5fe1SAndy Fiddaman static bool
nvme_opc_flush(struct pci_nvme_softc * sc __unused,struct nvme_command * cmd __unused,struct pci_nvme_blockstore * nvstore,struct pci_nvme_ioreq * req,uint16_t * status)2377*5c4a5fe1SAndy Fiddaman nvme_opc_flush(struct pci_nvme_softc *sc __unused,
2378*5c4a5fe1SAndy Fiddaman     struct nvme_command *cmd __unused,
2379*5c4a5fe1SAndy Fiddaman     struct pci_nvme_blockstore *nvstore,
2380*5c4a5fe1SAndy Fiddaman     struct pci_nvme_ioreq *req,
2381*5c4a5fe1SAndy Fiddaman     uint16_t *status)
2382*5c4a5fe1SAndy Fiddaman {
2383*5c4a5fe1SAndy Fiddaman 	bool pending = false;
2384*5c4a5fe1SAndy Fiddaman 
2385*5c4a5fe1SAndy Fiddaman 	if (nvstore->type == NVME_STOR_RAM) {
2386*5c4a5fe1SAndy Fiddaman 		pci_nvme_status_genc(status, NVME_SC_SUCCESS);
2387*5c4a5fe1SAndy Fiddaman 	} else {
2388*5c4a5fe1SAndy Fiddaman 		int err;
2389*5c4a5fe1SAndy Fiddaman 
2390*5c4a5fe1SAndy Fiddaman 		req->io_req.br_callback = pci_nvme_io_done;
2391*5c4a5fe1SAndy Fiddaman 
2392*5c4a5fe1SAndy Fiddaman 		err = blockif_flush(nvstore->ctx, &req->io_req);
2393*5c4a5fe1SAndy Fiddaman 		switch (err) {
2394*5c4a5fe1SAndy Fiddaman 		case 0:
2395*5c4a5fe1SAndy Fiddaman 			pending = true;
2396*5c4a5fe1SAndy Fiddaman 			break;
2397*5c4a5fe1SAndy Fiddaman 		case EOPNOTSUPP:
2398*5c4a5fe1SAndy Fiddaman 			pci_nvme_status_genc(status, NVME_SC_SUCCESS);
2399*5c4a5fe1SAndy Fiddaman 			break;
2400*5c4a5fe1SAndy Fiddaman 		default:
2401*5c4a5fe1SAndy Fiddaman 			pci_nvme_status_genc(status, NVME_SC_INTERNAL_DEVICE_ERROR);
2402*5c4a5fe1SAndy Fiddaman 		}
2403*5c4a5fe1SAndy Fiddaman 	}
2404*5c4a5fe1SAndy Fiddaman 
2405*5c4a5fe1SAndy Fiddaman 	return (pending);
2406*5c4a5fe1SAndy Fiddaman }
2407*5c4a5fe1SAndy Fiddaman 
2408*5c4a5fe1SAndy Fiddaman static uint16_t
nvme_write_read_ram(struct pci_nvme_softc * sc,struct pci_nvme_blockstore * nvstore,uint64_t prp1,uint64_t prp2,size_t offset,uint64_t bytes,bool is_write)2409*5c4a5fe1SAndy Fiddaman nvme_write_read_ram(struct pci_nvme_softc *sc,
2410*5c4a5fe1SAndy Fiddaman     struct pci_nvme_blockstore *nvstore,
2411*5c4a5fe1SAndy Fiddaman     uint64_t prp1, uint64_t prp2,
2412*5c4a5fe1SAndy Fiddaman     size_t offset, uint64_t bytes,
2413*5c4a5fe1SAndy Fiddaman     bool is_write)
2414*5c4a5fe1SAndy Fiddaman {
2415*5c4a5fe1SAndy Fiddaman 	uint8_t *buf = nvstore->ctx;
2416*5c4a5fe1SAndy Fiddaman 	enum nvme_copy_dir dir;
2417*5c4a5fe1SAndy Fiddaman 	uint16_t status;
2418*5c4a5fe1SAndy Fiddaman 
2419*5c4a5fe1SAndy Fiddaman 	if (is_write)
2420*5c4a5fe1SAndy Fiddaman 		dir = NVME_COPY_TO_PRP;
2421*5c4a5fe1SAndy Fiddaman 	else
2422*5c4a5fe1SAndy Fiddaman 		dir = NVME_COPY_FROM_PRP;
2423*5c4a5fe1SAndy Fiddaman 
2424*5c4a5fe1SAndy Fiddaman 	status = 0;
2425*5c4a5fe1SAndy Fiddaman 	if (nvme_prp_memcpy(sc->nsc_pi->pi_vmctx, prp1, prp2,
2426*5c4a5fe1SAndy Fiddaman 	    buf + offset, bytes, dir))
2427*5c4a5fe1SAndy Fiddaman 		pci_nvme_status_genc(&status,
2428*5c4a5fe1SAndy Fiddaman 		    NVME_SC_DATA_TRANSFER_ERROR);
2429*5c4a5fe1SAndy Fiddaman 	else
2430*5c4a5fe1SAndy Fiddaman 		pci_nvme_status_genc(&status, NVME_SC_SUCCESS);
2431*5c4a5fe1SAndy Fiddaman 
2432*5c4a5fe1SAndy Fiddaman 	return (status);
2433*5c4a5fe1SAndy Fiddaman }
2434*5c4a5fe1SAndy Fiddaman 
2435*5c4a5fe1SAndy Fiddaman static uint16_t
nvme_write_read_blockif(struct pci_nvme_softc * sc,struct pci_nvme_blockstore * nvstore,struct pci_nvme_ioreq * req,uint64_t prp1,uint64_t prp2,size_t offset,uint64_t bytes,bool is_write)2436*5c4a5fe1SAndy Fiddaman nvme_write_read_blockif(struct pci_nvme_softc *sc,
2437*5c4a5fe1SAndy Fiddaman     struct pci_nvme_blockstore *nvstore,
2438*5c4a5fe1SAndy Fiddaman     struct pci_nvme_ioreq *req,
2439*5c4a5fe1SAndy Fiddaman     uint64_t prp1, uint64_t prp2,
2440*5c4a5fe1SAndy Fiddaman     size_t offset, uint64_t bytes,
2441*5c4a5fe1SAndy Fiddaman     bool is_write)
2442*5c4a5fe1SAndy Fiddaman {
2443*5c4a5fe1SAndy Fiddaman 	uint64_t size;
2444*5c4a5fe1SAndy Fiddaman 	int err;
2445*5c4a5fe1SAndy Fiddaman 	uint16_t status = NVME_NO_STATUS;
2446*5c4a5fe1SAndy Fiddaman 
2447*5c4a5fe1SAndy Fiddaman 	size = MIN(PAGE_SIZE - (prp1 % PAGE_SIZE), bytes);
2448*5c4a5fe1SAndy Fiddaman 	if (pci_nvme_append_iov_req(sc, req, prp1, size, offset)) {
2449*5c4a5fe1SAndy Fiddaman 		err = -1;
2450*5c4a5fe1SAndy Fiddaman 		goto out;
2451*5c4a5fe1SAndy Fiddaman 	}
2452*5c4a5fe1SAndy Fiddaman 
2453*5c4a5fe1SAndy Fiddaman 	offset += size;
2454*5c4a5fe1SAndy Fiddaman 	bytes  -= size;
2455*5c4a5fe1SAndy Fiddaman 
2456*5c4a5fe1SAndy Fiddaman 	if (bytes == 0) {
2457*5c4a5fe1SAndy Fiddaman 		;
2458*5c4a5fe1SAndy Fiddaman 	} else if (bytes <= PAGE_SIZE) {
2459*5c4a5fe1SAndy Fiddaman 		size = bytes;
2460*5c4a5fe1SAndy Fiddaman 		if (pci_nvme_append_iov_req(sc, req, prp2, size, offset)) {
2461*5c4a5fe1SAndy Fiddaman 			err = -1;
2462*5c4a5fe1SAndy Fiddaman 			goto out;
2463*5c4a5fe1SAndy Fiddaman 		}
2464*5c4a5fe1SAndy Fiddaman 	} else {
2465*5c4a5fe1SAndy Fiddaman 		void *vmctx = sc->nsc_pi->pi_vmctx;
2466*5c4a5fe1SAndy Fiddaman 		uint64_t *prp_list = &prp2;
2467*5c4a5fe1SAndy Fiddaman 		uint64_t *last = prp_list;
2468*5c4a5fe1SAndy Fiddaman 
2469*5c4a5fe1SAndy Fiddaman 		/* PRP2 is pointer to a physical region page list */
2470*5c4a5fe1SAndy Fiddaman 		while (bytes) {
2471*5c4a5fe1SAndy Fiddaman 			/* Last entry in list points to the next list */
2472*5c4a5fe1SAndy Fiddaman 			if ((prp_list == last) && (bytes > PAGE_SIZE)) {
2473*5c4a5fe1SAndy Fiddaman 				uint64_t prp = *prp_list;
2474*5c4a5fe1SAndy Fiddaman 
2475*5c4a5fe1SAndy Fiddaman 				prp_list = paddr_guest2host(vmctx, prp,
2476*5c4a5fe1SAndy Fiddaman 				    PAGE_SIZE - (prp % PAGE_SIZE));
2477*5c4a5fe1SAndy Fiddaman 				if (prp_list == NULL) {
2478*5c4a5fe1SAndy Fiddaman 					err = -1;
2479*5c4a5fe1SAndy Fiddaman 					goto out;
2480*5c4a5fe1SAndy Fiddaman 				}
2481*5c4a5fe1SAndy Fiddaman 				last = prp_list + (NVME_PRP2_ITEMS - 1);
2482*5c4a5fe1SAndy Fiddaman 			}
2483*5c4a5fe1SAndy Fiddaman 
2484*5c4a5fe1SAndy Fiddaman 			size = MIN(bytes, PAGE_SIZE);
2485*5c4a5fe1SAndy Fiddaman 
2486*5c4a5fe1SAndy Fiddaman 			if (pci_nvme_append_iov_req(sc, req, *prp_list, size,
2487*5c4a5fe1SAndy Fiddaman 			    offset)) {
2488*5c4a5fe1SAndy Fiddaman 				err = -1;
2489*5c4a5fe1SAndy Fiddaman 				goto out;
2490*5c4a5fe1SAndy Fiddaman 			}
2491*5c4a5fe1SAndy Fiddaman 
2492*5c4a5fe1SAndy Fiddaman 			offset += size;
2493*5c4a5fe1SAndy Fiddaman 			bytes  -= size;
2494*5c4a5fe1SAndy Fiddaman 
2495*5c4a5fe1SAndy Fiddaman 			prp_list++;
2496*5c4a5fe1SAndy Fiddaman 		}
2497*5c4a5fe1SAndy Fiddaman 	}
2498*5c4a5fe1SAndy Fiddaman 	req->io_req.br_callback = pci_nvme_io_done;
2499*5c4a5fe1SAndy Fiddaman 	if (is_write)
2500*5c4a5fe1SAndy Fiddaman 		err = blockif_write(nvstore->ctx, &req->io_req);
2501*5c4a5fe1SAndy Fiddaman 	else
2502*5c4a5fe1SAndy Fiddaman 		err = blockif_read(nvstore->ctx, &req->io_req);
2503*5c4a5fe1SAndy Fiddaman out:
2504*5c4a5fe1SAndy Fiddaman 	if (err)
2505*5c4a5fe1SAndy Fiddaman 		pci_nvme_status_genc(&status, NVME_SC_DATA_TRANSFER_ERROR);
2506*5c4a5fe1SAndy Fiddaman 
2507*5c4a5fe1SAndy Fiddaman 	return (status);
2508*5c4a5fe1SAndy Fiddaman }
2509*5c4a5fe1SAndy Fiddaman 
2510*5c4a5fe1SAndy Fiddaman static bool
nvme_opc_write_read(struct pci_nvme_softc * sc,struct nvme_command * cmd,struct pci_nvme_blockstore * nvstore,struct pci_nvme_ioreq * req,uint16_t * status)2511*5c4a5fe1SAndy Fiddaman nvme_opc_write_read(struct pci_nvme_softc *sc,
2512*5c4a5fe1SAndy Fiddaman     struct nvme_command *cmd,
2513*5c4a5fe1SAndy Fiddaman     struct pci_nvme_blockstore *nvstore,
2514*5c4a5fe1SAndy Fiddaman     struct pci_nvme_ioreq *req,
2515*5c4a5fe1SAndy Fiddaman     uint16_t *status)
2516*5c4a5fe1SAndy Fiddaman {
2517*5c4a5fe1SAndy Fiddaman 	uint64_t lba, nblocks, bytes;
2518*5c4a5fe1SAndy Fiddaman 	size_t offset;
2519*5c4a5fe1SAndy Fiddaman 	bool is_write = cmd->opc == NVME_OPC_WRITE;
2520*5c4a5fe1SAndy Fiddaman 	bool pending = false;
2521*5c4a5fe1SAndy Fiddaman 
2522*5c4a5fe1SAndy Fiddaman 	lba = ((uint64_t)cmd->cdw11 << 32) | cmd->cdw10;
2523*5c4a5fe1SAndy Fiddaman 	nblocks = (cmd->cdw12 & 0xFFFF) + 1;
2524*5c4a5fe1SAndy Fiddaman 	bytes = nblocks << nvstore->sectsz_bits;
2525*5c4a5fe1SAndy Fiddaman 	if (bytes > NVME_MAX_DATA_SIZE) {
2526*5c4a5fe1SAndy Fiddaman 		WPRINTF("%s command would exceed MDTS", __func__);
2527*5c4a5fe1SAndy Fiddaman 		pci_nvme_status_genc(status, NVME_SC_INVALID_FIELD);
2528*5c4a5fe1SAndy Fiddaman 		goto out;
2529*5c4a5fe1SAndy Fiddaman 	}
2530*5c4a5fe1SAndy Fiddaman 
2531*5c4a5fe1SAndy Fiddaman 	if (pci_nvme_out_of_range(nvstore, lba, nblocks)) {
2532*5c4a5fe1SAndy Fiddaman 		WPRINTF("%s command would exceed LBA range(slba=%#lx nblocks=%#lx)",
2533*5c4a5fe1SAndy Fiddaman 		    __func__, lba, nblocks);
2534*5c4a5fe1SAndy Fiddaman 		pci_nvme_status_genc(status, NVME_SC_LBA_OUT_OF_RANGE);
2535*5c4a5fe1SAndy Fiddaman 		goto out;
2536*5c4a5fe1SAndy Fiddaman 	}
2537*5c4a5fe1SAndy Fiddaman 
2538*5c4a5fe1SAndy Fiddaman 	offset = lba << nvstore->sectsz_bits;
2539*5c4a5fe1SAndy Fiddaman 
2540*5c4a5fe1SAndy Fiddaman 	req->bytes = bytes;
2541*5c4a5fe1SAndy Fiddaman 	req->io_req.br_offset = lba;
2542*5c4a5fe1SAndy Fiddaman 
2543*5c4a5fe1SAndy Fiddaman 	/* PRP bits 1:0 must be zero */
2544*5c4a5fe1SAndy Fiddaman 	cmd->prp1 &= ~0x3UL;
2545*5c4a5fe1SAndy Fiddaman 	cmd->prp2 &= ~0x3UL;
2546*5c4a5fe1SAndy Fiddaman 
2547*5c4a5fe1SAndy Fiddaman 	if (nvstore->type == NVME_STOR_RAM) {
2548*5c4a5fe1SAndy Fiddaman 		*status = nvme_write_read_ram(sc, nvstore, cmd->prp1,
2549*5c4a5fe1SAndy Fiddaman 		    cmd->prp2, offset, bytes, is_write);
2550*5c4a5fe1SAndy Fiddaman 	} else {
2551*5c4a5fe1SAndy Fiddaman 		*status = nvme_write_read_blockif(sc, nvstore, req,
2552*5c4a5fe1SAndy Fiddaman 		    cmd->prp1, cmd->prp2, offset, bytes, is_write);
2553*5c4a5fe1SAndy Fiddaman 
2554*5c4a5fe1SAndy Fiddaman 		if (*status == NVME_NO_STATUS)
2555*5c4a5fe1SAndy Fiddaman 			pending = true;
2556*5c4a5fe1SAndy Fiddaman 	}
2557*5c4a5fe1SAndy Fiddaman out:
2558*5c4a5fe1SAndy Fiddaman 	if (!pending)
2559*5c4a5fe1SAndy Fiddaman 		pci_nvme_stats_write_read_update(sc, cmd->opc, bytes, *status);
2560*5c4a5fe1SAndy Fiddaman 
2561*5c4a5fe1SAndy Fiddaman 	return (pending);
2562*5c4a5fe1SAndy Fiddaman }
2563*5c4a5fe1SAndy Fiddaman 
2564*5c4a5fe1SAndy Fiddaman static void
pci_nvme_dealloc_sm(struct blockif_req * br,int err)2565*5c4a5fe1SAndy Fiddaman pci_nvme_dealloc_sm(struct blockif_req *br, int err)
2566*5c4a5fe1SAndy Fiddaman {
2567*5c4a5fe1SAndy Fiddaman 	struct pci_nvme_ioreq *req = br->br_param;
2568*5c4a5fe1SAndy Fiddaman 	struct pci_nvme_softc *sc = req->sc;
2569*5c4a5fe1SAndy Fiddaman 	bool done = true;
2570*5c4a5fe1SAndy Fiddaman 	uint16_t status;
2571*5c4a5fe1SAndy Fiddaman 
2572*5c4a5fe1SAndy Fiddaman 	status = 0;
2573*5c4a5fe1SAndy Fiddaman 	if (err) {
2574*5c4a5fe1SAndy Fiddaman 		pci_nvme_status_genc(&status, NVME_SC_INTERNAL_DEVICE_ERROR);
2575*5c4a5fe1SAndy Fiddaman 	} else if ((req->prev_gpaddr + 1) == (req->prev_size)) {
2576*5c4a5fe1SAndy Fiddaman 		pci_nvme_status_genc(&status, NVME_SC_SUCCESS);
2577*5c4a5fe1SAndy Fiddaman 	} else {
2578*5c4a5fe1SAndy Fiddaman 		struct iovec *iov = req->io_req.br_iov;
2579*5c4a5fe1SAndy Fiddaman 
2580*5c4a5fe1SAndy Fiddaman 		req->prev_gpaddr++;
2581*5c4a5fe1SAndy Fiddaman 		iov += req->prev_gpaddr;
2582*5c4a5fe1SAndy Fiddaman 
2583*5c4a5fe1SAndy Fiddaman 		/* The iov_* values already include the sector size */
2584*5c4a5fe1SAndy Fiddaman 		req->io_req.br_offset = (off_t)iov->iov_base;
2585*5c4a5fe1SAndy Fiddaman 		req->io_req.br_resid = iov->iov_len;
2586*5c4a5fe1SAndy Fiddaman 		if (blockif_delete(sc->nvstore.ctx, &req->io_req)) {
2587*5c4a5fe1SAndy Fiddaman 			pci_nvme_status_genc(&status,
2588*5c4a5fe1SAndy Fiddaman 			    NVME_SC_INTERNAL_DEVICE_ERROR);
2589*5c4a5fe1SAndy Fiddaman 		} else
2590*5c4a5fe1SAndy Fiddaman 			done = false;
2591*5c4a5fe1SAndy Fiddaman 	}
2592*5c4a5fe1SAndy Fiddaman 
2593*5c4a5fe1SAndy Fiddaman 	if (done) {
2594*5c4a5fe1SAndy Fiddaman 		pci_nvme_set_completion(sc, req->nvme_sq, req->sqid, req->cid,
2595*5c4a5fe1SAndy Fiddaman 		    status);
2596*5c4a5fe1SAndy Fiddaman 		pci_nvme_release_ioreq(sc, req);
2597*5c4a5fe1SAndy Fiddaman 	}
2598*5c4a5fe1SAndy Fiddaman }
2599*5c4a5fe1SAndy Fiddaman 
2600*5c4a5fe1SAndy Fiddaman static bool
nvme_opc_dataset_mgmt(struct pci_nvme_softc * sc,struct nvme_command * cmd,struct pci_nvme_blockstore * nvstore,struct pci_nvme_ioreq * req,uint16_t * status)2601*5c4a5fe1SAndy Fiddaman nvme_opc_dataset_mgmt(struct pci_nvme_softc *sc,
2602*5c4a5fe1SAndy Fiddaman     struct nvme_command *cmd,
2603*5c4a5fe1SAndy Fiddaman     struct pci_nvme_blockstore *nvstore,
2604*5c4a5fe1SAndy Fiddaman     struct pci_nvme_ioreq *req,
2605*5c4a5fe1SAndy Fiddaman     uint16_t *status)
2606*5c4a5fe1SAndy Fiddaman {
2607*5c4a5fe1SAndy Fiddaman 	struct nvme_dsm_range *range = NULL;
2608*5c4a5fe1SAndy Fiddaman 	uint32_t nr, r, non_zero, dr;
2609*5c4a5fe1SAndy Fiddaman 	int err;
2610*5c4a5fe1SAndy Fiddaman 	bool pending = false;
2611*5c4a5fe1SAndy Fiddaman 
2612*5c4a5fe1SAndy Fiddaman 	if ((sc->ctrldata.oncs & NVME_ONCS_DSM) == 0) {
2613*5c4a5fe1SAndy Fiddaman 		pci_nvme_status_genc(status, NVME_SC_INVALID_OPCODE);
2614*5c4a5fe1SAndy Fiddaman 		goto out;
2615*5c4a5fe1SAndy Fiddaman 	}
2616*5c4a5fe1SAndy Fiddaman 
2617*5c4a5fe1SAndy Fiddaman 	nr = cmd->cdw10 & 0xff;
2618*5c4a5fe1SAndy Fiddaman 
2619*5c4a5fe1SAndy Fiddaman 	/* copy locally because a range entry could straddle PRPs */
2620*5c4a5fe1SAndy Fiddaman #ifdef	__FreeBSD__
2621*5c4a5fe1SAndy Fiddaman 	range = calloc(1, NVME_MAX_DSM_TRIM);
2622*5c4a5fe1SAndy Fiddaman #else
2623*5c4a5fe1SAndy Fiddaman 	_Static_assert(NVME_MAX_DSM_TRIM % sizeof(struct nvme_dsm_range) == 0,
2624*5c4a5fe1SAndy Fiddaman 	    "NVME_MAX_DSM_TRIM is not a multiple of struct size");
2625*5c4a5fe1SAndy Fiddaman 	range = calloc(NVME_MAX_DSM_TRIM / sizeof (*range), sizeof (*range));
2626*5c4a5fe1SAndy Fiddaman #endif
2627*5c4a5fe1SAndy Fiddaman 	if (range == NULL) {
2628*5c4a5fe1SAndy Fiddaman 		pci_nvme_status_genc(status, NVME_SC_INTERNAL_DEVICE_ERROR);
2629*5c4a5fe1SAndy Fiddaman 		goto out;
2630*5c4a5fe1SAndy Fiddaman 	}
2631*5c4a5fe1SAndy Fiddaman 	nvme_prp_memcpy(sc->nsc_pi->pi_vmctx, cmd->prp1, cmd->prp2,
2632*5c4a5fe1SAndy Fiddaman 	    (uint8_t *)range, NVME_MAX_DSM_TRIM, NVME_COPY_FROM_PRP);
2633*5c4a5fe1SAndy Fiddaman 
2634*5c4a5fe1SAndy Fiddaman 	/* Check for invalid ranges and the number of non-zero lengths */
2635*5c4a5fe1SAndy Fiddaman 	non_zero = 0;
2636*5c4a5fe1SAndy Fiddaman 	for (r = 0; r <= nr; r++) {
2637*5c4a5fe1SAndy Fiddaman 		if (pci_nvme_out_of_range(nvstore,
2638*5c4a5fe1SAndy Fiddaman 		    range[r].starting_lba, range[r].length)) {
2639*5c4a5fe1SAndy Fiddaman 			pci_nvme_status_genc(status, NVME_SC_LBA_OUT_OF_RANGE);
2640*5c4a5fe1SAndy Fiddaman 			goto out;
2641*5c4a5fe1SAndy Fiddaman 		}
2642*5c4a5fe1SAndy Fiddaman 		if (range[r].length != 0)
2643*5c4a5fe1SAndy Fiddaman 			non_zero++;
2644*5c4a5fe1SAndy Fiddaman 	}
2645*5c4a5fe1SAndy Fiddaman 
2646*5c4a5fe1SAndy Fiddaman 	if (cmd->cdw11 & NVME_DSM_ATTR_DEALLOCATE) {
2647*5c4a5fe1SAndy Fiddaman 		size_t offset, bytes;
2648*5c4a5fe1SAndy Fiddaman 		int sectsz_bits = sc->nvstore.sectsz_bits;
2649*5c4a5fe1SAndy Fiddaman 
2650*5c4a5fe1SAndy Fiddaman 		/*
2651*5c4a5fe1SAndy Fiddaman 		 * DSM calls are advisory only, and compliant controllers
2652*5c4a5fe1SAndy Fiddaman 		 * may choose to take no actions (i.e. return Success).
2653*5c4a5fe1SAndy Fiddaman 		 */
2654*5c4a5fe1SAndy Fiddaman 		if (!nvstore->deallocate) {
2655*5c4a5fe1SAndy Fiddaman 			pci_nvme_status_genc(status, NVME_SC_SUCCESS);
2656*5c4a5fe1SAndy Fiddaman 			goto out;
2657*5c4a5fe1SAndy Fiddaman 		}
2658*5c4a5fe1SAndy Fiddaman 
2659*5c4a5fe1SAndy Fiddaman 		/* If all ranges have a zero length, return Success */
2660*5c4a5fe1SAndy Fiddaman 		if (non_zero == 0) {
2661*5c4a5fe1SAndy Fiddaman 			pci_nvme_status_genc(status, NVME_SC_SUCCESS);
2662*5c4a5fe1SAndy Fiddaman 			goto out;
2663*5c4a5fe1SAndy Fiddaman 		}
2664*5c4a5fe1SAndy Fiddaman 
2665*5c4a5fe1SAndy Fiddaman 		if (req == NULL) {
2666*5c4a5fe1SAndy Fiddaman 			pci_nvme_status_genc(status, NVME_SC_INTERNAL_DEVICE_ERROR);
2667*5c4a5fe1SAndy Fiddaman 			goto out;
2668*5c4a5fe1SAndy Fiddaman 		}
2669*5c4a5fe1SAndy Fiddaman 
2670*5c4a5fe1SAndy Fiddaman 		offset = range[0].starting_lba << sectsz_bits;
2671*5c4a5fe1SAndy Fiddaman 		bytes = range[0].length << sectsz_bits;
2672*5c4a5fe1SAndy Fiddaman 
2673*5c4a5fe1SAndy Fiddaman 		/*
2674*5c4a5fe1SAndy Fiddaman 		 * If the request is for more than a single range, store
2675*5c4a5fe1SAndy Fiddaman 		 * the ranges in the br_iov. Optimize for the common case
2676*5c4a5fe1SAndy Fiddaman 		 * of a single range.
2677*5c4a5fe1SAndy Fiddaman 		 *
2678*5c4a5fe1SAndy Fiddaman 		 * Note that NVMe Number of Ranges is a zero based value
2679*5c4a5fe1SAndy Fiddaman 		 */
2680*5c4a5fe1SAndy Fiddaman 		req->io_req.br_iovcnt = 0;
2681*5c4a5fe1SAndy Fiddaman 		req->io_req.br_offset = offset;
2682*5c4a5fe1SAndy Fiddaman 		req->io_req.br_resid = bytes;
2683*5c4a5fe1SAndy Fiddaman 
2684*5c4a5fe1SAndy Fiddaman 		if (nr == 0) {
2685*5c4a5fe1SAndy Fiddaman 			req->io_req.br_callback = pci_nvme_io_done;
2686*5c4a5fe1SAndy Fiddaman 		} else {
2687*5c4a5fe1SAndy Fiddaman 			struct iovec *iov = req->io_req.br_iov;
2688*5c4a5fe1SAndy Fiddaman 
2689*5c4a5fe1SAndy Fiddaman 			for (r = 0, dr = 0; r <= nr; r++) {
2690*5c4a5fe1SAndy Fiddaman 				offset = range[r].starting_lba << sectsz_bits;
2691*5c4a5fe1SAndy Fiddaman 				bytes = range[r].length << sectsz_bits;
2692*5c4a5fe1SAndy Fiddaman 				if (bytes == 0)
2693*5c4a5fe1SAndy Fiddaman 					continue;
2694*5c4a5fe1SAndy Fiddaman 
2695*5c4a5fe1SAndy Fiddaman 				if ((nvstore->size - offset) < bytes) {
2696*5c4a5fe1SAndy Fiddaman 					pci_nvme_status_genc(status,
2697*5c4a5fe1SAndy Fiddaman 					    NVME_SC_LBA_OUT_OF_RANGE);
2698*5c4a5fe1SAndy Fiddaman 					goto out;
2699*5c4a5fe1SAndy Fiddaman 				}
2700*5c4a5fe1SAndy Fiddaman 				iov[dr].iov_base = (void *)offset;
2701*5c4a5fe1SAndy Fiddaman 				iov[dr].iov_len = bytes;
2702*5c4a5fe1SAndy Fiddaman 				dr++;
2703*5c4a5fe1SAndy Fiddaman 			}
2704*5c4a5fe1SAndy Fiddaman 			req->io_req.br_callback = pci_nvme_dealloc_sm;
2705*5c4a5fe1SAndy Fiddaman 
2706*5c4a5fe1SAndy Fiddaman 			/*
2707*5c4a5fe1SAndy Fiddaman 			 * Use prev_gpaddr to track the current entry and
2708*5c4a5fe1SAndy Fiddaman 			 * prev_size to track the number of entries
2709*5c4a5fe1SAndy Fiddaman 			 */
2710*5c4a5fe1SAndy Fiddaman 			req->prev_gpaddr = 0;
2711*5c4a5fe1SAndy Fiddaman 			req->prev_size = dr;
2712*5c4a5fe1SAndy Fiddaman 		}
2713*5c4a5fe1SAndy Fiddaman 
2714*5c4a5fe1SAndy Fiddaman 		err = blockif_delete(nvstore->ctx, &req->io_req);
2715*5c4a5fe1SAndy Fiddaman 		if (err)
2716*5c4a5fe1SAndy Fiddaman 			pci_nvme_status_genc(status, NVME_SC_INTERNAL_DEVICE_ERROR);
2717*5c4a5fe1SAndy Fiddaman 		else
2718*5c4a5fe1SAndy Fiddaman 			pending = true;
2719*5c4a5fe1SAndy Fiddaman 	}
2720*5c4a5fe1SAndy Fiddaman out:
2721*5c4a5fe1SAndy Fiddaman 	free(range);
2722*5c4a5fe1SAndy Fiddaman 	return (pending);
2723*5c4a5fe1SAndy Fiddaman }
2724*5c4a5fe1SAndy Fiddaman 
2725*5c4a5fe1SAndy Fiddaman static void
pci_nvme_handle_io_cmd(struct pci_nvme_softc * sc,uint16_t idx)2726*5c4a5fe1SAndy Fiddaman pci_nvme_handle_io_cmd(struct pci_nvme_softc* sc, uint16_t idx)
2727*5c4a5fe1SAndy Fiddaman {
2728*5c4a5fe1SAndy Fiddaman 	struct nvme_submission_queue *sq;
2729*5c4a5fe1SAndy Fiddaman 	uint16_t status;
2730*5c4a5fe1SAndy Fiddaman 	uint16_t sqhead;
2731*5c4a5fe1SAndy Fiddaman 
2732*5c4a5fe1SAndy Fiddaman 	/* handle all submissions up to sq->tail index */
2733*5c4a5fe1SAndy Fiddaman 	sq = &sc->submit_queues[idx];
2734*5c4a5fe1SAndy Fiddaman 
2735*5c4a5fe1SAndy Fiddaman 	pthread_mutex_lock(&sq->mtx);
2736*5c4a5fe1SAndy Fiddaman 
2737*5c4a5fe1SAndy Fiddaman 	sqhead = sq->head;
2738*5c4a5fe1SAndy Fiddaman 	DPRINTF("nvme_handle_io qid %u head %u tail %u cmdlist %p",
2739*5c4a5fe1SAndy Fiddaman 	         idx, sqhead, sq->tail, sq->qbase);
2740*5c4a5fe1SAndy Fiddaman 
2741*5c4a5fe1SAndy Fiddaman 	while (sqhead != atomic_load_acq_short(&sq->tail)) {
2742*5c4a5fe1SAndy Fiddaman 		struct nvme_command *cmd;
2743*5c4a5fe1SAndy Fiddaman 		struct pci_nvme_ioreq *req;
2744*5c4a5fe1SAndy Fiddaman 		uint32_t nsid;
2745*5c4a5fe1SAndy Fiddaman 		bool pending;
2746*5c4a5fe1SAndy Fiddaman 
2747*5c4a5fe1SAndy Fiddaman 		pending = false;
2748*5c4a5fe1SAndy Fiddaman 		req = NULL;
2749*5c4a5fe1SAndy Fiddaman 		status = 0;
2750*5c4a5fe1SAndy Fiddaman 
2751*5c4a5fe1SAndy Fiddaman 		cmd = &sq->qbase[sqhead];
2752*5c4a5fe1SAndy Fiddaman 		sqhead = (sqhead + 1) % sq->size;
2753*5c4a5fe1SAndy Fiddaman 
2754*5c4a5fe1SAndy Fiddaman 		nsid = le32toh(cmd->nsid);
2755*5c4a5fe1SAndy Fiddaman 		if ((nsid == 0) || (nsid > sc->ctrldata.nn)) {
2756*5c4a5fe1SAndy Fiddaman 			pci_nvme_status_genc(&status,
2757*5c4a5fe1SAndy Fiddaman 			    NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
2758*5c4a5fe1SAndy Fiddaman 			status |= NVMEM(NVME_STATUS_DNR);
2759*5c4a5fe1SAndy Fiddaman 			goto complete;
2760*5c4a5fe1SAndy Fiddaman  		}
2761*5c4a5fe1SAndy Fiddaman 
2762*5c4a5fe1SAndy Fiddaman 		req = pci_nvme_get_ioreq(sc);
2763*5c4a5fe1SAndy Fiddaman 		if (req == NULL) {
2764*5c4a5fe1SAndy Fiddaman 			pci_nvme_status_genc(&status,
2765*5c4a5fe1SAndy Fiddaman 			    NVME_SC_INTERNAL_DEVICE_ERROR);
2766*5c4a5fe1SAndy Fiddaman 			WPRINTF("%s: unable to allocate IO req", __func__);
2767*5c4a5fe1SAndy Fiddaman 			goto complete;
2768*5c4a5fe1SAndy Fiddaman 		}
2769*5c4a5fe1SAndy Fiddaman 		req->nvme_sq = sq;
2770*5c4a5fe1SAndy Fiddaman 		req->sqid = idx;
2771*5c4a5fe1SAndy Fiddaman 		req->opc = cmd->opc;
2772*5c4a5fe1SAndy Fiddaman 		req->cid = cmd->cid;
2773*5c4a5fe1SAndy Fiddaman 		req->nsid = cmd->nsid;
2774*5c4a5fe1SAndy Fiddaman 
2775*5c4a5fe1SAndy Fiddaman 		switch (cmd->opc) {
2776*5c4a5fe1SAndy Fiddaman 		case NVME_OPC_FLUSH:
2777*5c4a5fe1SAndy Fiddaman 			pending = nvme_opc_flush(sc, cmd, &sc->nvstore,
2778*5c4a5fe1SAndy Fiddaman 			    req, &status);
2779*5c4a5fe1SAndy Fiddaman  			break;
2780*5c4a5fe1SAndy Fiddaman 		case NVME_OPC_WRITE:
2781*5c4a5fe1SAndy Fiddaman 		case NVME_OPC_READ:
2782*5c4a5fe1SAndy Fiddaman 			pending = nvme_opc_write_read(sc, cmd, &sc->nvstore,
2783*5c4a5fe1SAndy Fiddaman 			    req, &status);
2784*5c4a5fe1SAndy Fiddaman 			break;
2785*5c4a5fe1SAndy Fiddaman 		case NVME_OPC_WRITE_ZEROES:
2786*5c4a5fe1SAndy Fiddaman 			/* TODO: write zeroes
2787*5c4a5fe1SAndy Fiddaman 			WPRINTF("%s write zeroes lba 0x%lx blocks %u",
2788*5c4a5fe1SAndy Fiddaman 			        __func__, lba, cmd->cdw12 & 0xFFFF); */
2789*5c4a5fe1SAndy Fiddaman 			pci_nvme_status_genc(&status, NVME_SC_SUCCESS);
2790*5c4a5fe1SAndy Fiddaman 			break;
2791*5c4a5fe1SAndy Fiddaman 		case NVME_OPC_DATASET_MANAGEMENT:
2792*5c4a5fe1SAndy Fiddaman  			pending = nvme_opc_dataset_mgmt(sc, cmd, &sc->nvstore,
2793*5c4a5fe1SAndy Fiddaman 			    req, &status);
2794*5c4a5fe1SAndy Fiddaman 			break;
2795*5c4a5fe1SAndy Fiddaman  		default:
2796*5c4a5fe1SAndy Fiddaman  			WPRINTF("%s unhandled io command 0x%x",
2797*5c4a5fe1SAndy Fiddaman 			    __func__, cmd->opc);
2798*5c4a5fe1SAndy Fiddaman 			pci_nvme_status_genc(&status, NVME_SC_INVALID_OPCODE);
2799*5c4a5fe1SAndy Fiddaman 		}
2800*5c4a5fe1SAndy Fiddaman complete:
2801*5c4a5fe1SAndy Fiddaman 		if (!pending) {
2802*5c4a5fe1SAndy Fiddaman 			pci_nvme_set_completion(sc, sq, idx, cmd->cid, status);
2803*5c4a5fe1SAndy Fiddaman 			if (req != NULL)
2804*5c4a5fe1SAndy Fiddaman 				pci_nvme_release_ioreq(sc, req);
2805*5c4a5fe1SAndy Fiddaman 		}
2806*5c4a5fe1SAndy Fiddaman 	}
2807*5c4a5fe1SAndy Fiddaman 
2808*5c4a5fe1SAndy Fiddaman 	sq->head = sqhead;
2809*5c4a5fe1SAndy Fiddaman 
2810*5c4a5fe1SAndy Fiddaman 	pthread_mutex_unlock(&sq->mtx);
2811*5c4a5fe1SAndy Fiddaman }
2812*5c4a5fe1SAndy Fiddaman 
2813*5c4a5fe1SAndy Fiddaman static void
pci_nvme_handle_doorbell(struct pci_nvme_softc * sc,uint64_t idx,int is_sq,uint64_t value)2814*5c4a5fe1SAndy Fiddaman pci_nvme_handle_doorbell(struct pci_nvme_softc* sc,
2815*5c4a5fe1SAndy Fiddaman 	uint64_t idx, int is_sq, uint64_t value)
2816*5c4a5fe1SAndy Fiddaman {
2817*5c4a5fe1SAndy Fiddaman 	DPRINTF("nvme doorbell %lu, %s, val 0x%lx",
2818*5c4a5fe1SAndy Fiddaman 	        idx, is_sq ? "SQ" : "CQ", value & 0xFFFF);
2819*5c4a5fe1SAndy Fiddaman 
2820*5c4a5fe1SAndy Fiddaman 	if (is_sq) {
2821*5c4a5fe1SAndy Fiddaman 		if (idx > sc->num_squeues) {
2822*5c4a5fe1SAndy Fiddaman 			WPRINTF("%s queue index %lu overflow from "
2823*5c4a5fe1SAndy Fiddaman 			         "guest (max %u)",
2824*5c4a5fe1SAndy Fiddaman 			         __func__, idx, sc->num_squeues);
2825*5c4a5fe1SAndy Fiddaman 			return;
2826*5c4a5fe1SAndy Fiddaman 		}
2827*5c4a5fe1SAndy Fiddaman 
2828*5c4a5fe1SAndy Fiddaman 		atomic_store_short(&sc->submit_queues[idx].tail,
2829*5c4a5fe1SAndy Fiddaman 		                   (uint16_t)value);
2830*5c4a5fe1SAndy Fiddaman 
2831*5c4a5fe1SAndy Fiddaman 		if (idx == 0) {
2832*5c4a5fe1SAndy Fiddaman 			pci_nvme_handle_admin_cmd(sc, value);
2833*5c4a5fe1SAndy Fiddaman 		} else {
2834*5c4a5fe1SAndy Fiddaman 			/* submission queue; handle new entries in SQ */
2835*5c4a5fe1SAndy Fiddaman 			if (idx > sc->num_squeues) {
2836*5c4a5fe1SAndy Fiddaman 				WPRINTF("%s SQ index %lu overflow from "
2837*5c4a5fe1SAndy Fiddaman 				         "guest (max %u)",
2838*5c4a5fe1SAndy Fiddaman 				         __func__, idx, sc->num_squeues);
2839*5c4a5fe1SAndy Fiddaman 				return;
2840*5c4a5fe1SAndy Fiddaman 			}
2841*5c4a5fe1SAndy Fiddaman 			pci_nvme_handle_io_cmd(sc, (uint16_t)idx);
2842*5c4a5fe1SAndy Fiddaman 		}
2843*5c4a5fe1SAndy Fiddaman 	} else {
2844*5c4a5fe1SAndy Fiddaman 		if (idx > sc->num_cqueues) {
2845*5c4a5fe1SAndy Fiddaman 			WPRINTF("%s queue index %lu overflow from "
2846*5c4a5fe1SAndy Fiddaman 			         "guest (max %u)",
2847*5c4a5fe1SAndy Fiddaman 			         __func__, idx, sc->num_cqueues);
2848*5c4a5fe1SAndy Fiddaman 			return;
2849*5c4a5fe1SAndy Fiddaman 		}
2850*5c4a5fe1SAndy Fiddaman 
2851*5c4a5fe1SAndy Fiddaman 		atomic_store_short(&sc->compl_queues[idx].head,
2852*5c4a5fe1SAndy Fiddaman 				(uint16_t)value);
2853*5c4a5fe1SAndy Fiddaman 	}
2854*5c4a5fe1SAndy Fiddaman }
2855*5c4a5fe1SAndy Fiddaman 
2856*5c4a5fe1SAndy Fiddaman static void
pci_nvme_bar0_reg_dumps(const char * func,uint64_t offset,int iswrite)2857*5c4a5fe1SAndy Fiddaman pci_nvme_bar0_reg_dumps(const char *func, uint64_t offset, int iswrite)
2858*5c4a5fe1SAndy Fiddaman {
2859*5c4a5fe1SAndy Fiddaman 	const char *s = iswrite ? "WRITE" : "READ";
2860*5c4a5fe1SAndy Fiddaman 
2861*5c4a5fe1SAndy Fiddaman 	switch (offset) {
2862*5c4a5fe1SAndy Fiddaman 	case NVME_CR_CAP_LOW:
2863*5c4a5fe1SAndy Fiddaman 		DPRINTF("%s %s NVME_CR_CAP_LOW", func, s);
2864*5c4a5fe1SAndy Fiddaman 		break;
2865*5c4a5fe1SAndy Fiddaman 	case NVME_CR_CAP_HI:
2866*5c4a5fe1SAndy Fiddaman 		DPRINTF("%s %s NVME_CR_CAP_HI", func, s);
2867*5c4a5fe1SAndy Fiddaman 		break;
2868*5c4a5fe1SAndy Fiddaman 	case NVME_CR_VS:
2869*5c4a5fe1SAndy Fiddaman 		DPRINTF("%s %s NVME_CR_VS", func, s);
2870*5c4a5fe1SAndy Fiddaman 		break;
2871*5c4a5fe1SAndy Fiddaman 	case NVME_CR_INTMS:
2872*5c4a5fe1SAndy Fiddaman 		DPRINTF("%s %s NVME_CR_INTMS", func, s);
2873*5c4a5fe1SAndy Fiddaman 		break;
2874*5c4a5fe1SAndy Fiddaman 	case NVME_CR_INTMC:
2875*5c4a5fe1SAndy Fiddaman 		DPRINTF("%s %s NVME_CR_INTMC", func, s);
2876*5c4a5fe1SAndy Fiddaman 		break;
2877*5c4a5fe1SAndy Fiddaman 	case NVME_CR_CC:
2878*5c4a5fe1SAndy Fiddaman 		DPRINTF("%s %s NVME_CR_CC", func, s);
2879*5c4a5fe1SAndy Fiddaman 		break;
2880*5c4a5fe1SAndy Fiddaman 	case NVME_CR_CSTS:
2881*5c4a5fe1SAndy Fiddaman 		DPRINTF("%s %s NVME_CR_CSTS", func, s);
2882*5c4a5fe1SAndy Fiddaman 		break;
2883*5c4a5fe1SAndy Fiddaman 	case NVME_CR_NSSR:
2884*5c4a5fe1SAndy Fiddaman 		DPRINTF("%s %s NVME_CR_NSSR", func, s);
2885*5c4a5fe1SAndy Fiddaman 		break;
2886*5c4a5fe1SAndy Fiddaman 	case NVME_CR_AQA:
2887*5c4a5fe1SAndy Fiddaman 		DPRINTF("%s %s NVME_CR_AQA", func, s);
2888*5c4a5fe1SAndy Fiddaman 		break;
2889*5c4a5fe1SAndy Fiddaman 	case NVME_CR_ASQ_LOW:
2890*5c4a5fe1SAndy Fiddaman 		DPRINTF("%s %s NVME_CR_ASQ_LOW", func, s);
2891*5c4a5fe1SAndy Fiddaman 		break;
2892*5c4a5fe1SAndy Fiddaman 	case NVME_CR_ASQ_HI:
2893*5c4a5fe1SAndy Fiddaman 		DPRINTF("%s %s NVME_CR_ASQ_HI", func, s);
2894*5c4a5fe1SAndy Fiddaman 		break;
2895*5c4a5fe1SAndy Fiddaman 	case NVME_CR_ACQ_LOW:
2896*5c4a5fe1SAndy Fiddaman 		DPRINTF("%s %s NVME_CR_ACQ_LOW", func, s);
2897*5c4a5fe1SAndy Fiddaman 		break;
2898*5c4a5fe1SAndy Fiddaman 	case NVME_CR_ACQ_HI:
2899*5c4a5fe1SAndy Fiddaman 		DPRINTF("%s %s NVME_CR_ACQ_HI", func, s);
2900*5c4a5fe1SAndy Fiddaman 		break;
2901*5c4a5fe1SAndy Fiddaman 	default:
2902*5c4a5fe1SAndy Fiddaman 		DPRINTF("unknown nvme bar-0 offset 0x%lx", offset);
2903*5c4a5fe1SAndy Fiddaman 	}
2904*5c4a5fe1SAndy Fiddaman 
2905*5c4a5fe1SAndy Fiddaman }
2906*5c4a5fe1SAndy Fiddaman 
2907*5c4a5fe1SAndy Fiddaman static void
pci_nvme_write_bar_0(struct pci_nvme_softc * sc,uint64_t offset,int size,uint64_t value)2908*5c4a5fe1SAndy Fiddaman pci_nvme_write_bar_0(struct pci_nvme_softc *sc, uint64_t offset, int size,
2909*5c4a5fe1SAndy Fiddaman     uint64_t value)
2910*5c4a5fe1SAndy Fiddaman {
2911*5c4a5fe1SAndy Fiddaman 	uint32_t ccreg;
2912*5c4a5fe1SAndy Fiddaman 
2913*5c4a5fe1SAndy Fiddaman 	if (offset >= NVME_DOORBELL_OFFSET) {
2914*5c4a5fe1SAndy Fiddaman 		uint64_t belloffset = offset - NVME_DOORBELL_OFFSET;
2915*5c4a5fe1SAndy Fiddaman 		uint64_t idx = belloffset / 8; /* door bell size = 2*int */
2916*5c4a5fe1SAndy Fiddaman 		int is_sq = (belloffset % 8) < 4;
2917*5c4a5fe1SAndy Fiddaman 
2918*5c4a5fe1SAndy Fiddaman 		if ((sc->regs.csts & NVME_CSTS_RDY) == 0) {
2919*5c4a5fe1SAndy Fiddaman 			WPRINTF("doorbell write prior to RDY (offset=%#lx)\n",
2920*5c4a5fe1SAndy Fiddaman 			    offset);
2921*5c4a5fe1SAndy Fiddaman 			return;
2922*5c4a5fe1SAndy Fiddaman 		}
2923*5c4a5fe1SAndy Fiddaman 
2924*5c4a5fe1SAndy Fiddaman 		if (belloffset > ((sc->max_queues+1) * 8 - 4)) {
2925*5c4a5fe1SAndy Fiddaman 			WPRINTF("guest attempted an overflow write offset "
2926*5c4a5fe1SAndy Fiddaman 			         "0x%lx, val 0x%lx in %s",
2927*5c4a5fe1SAndy Fiddaman 			         offset, value, __func__);
2928*5c4a5fe1SAndy Fiddaman 			return;
2929*5c4a5fe1SAndy Fiddaman 		}
2930*5c4a5fe1SAndy Fiddaman 
2931*5c4a5fe1SAndy Fiddaman 		if (is_sq) {
2932*5c4a5fe1SAndy Fiddaman 			if (sc->submit_queues[idx].qbase == NULL)
2933*5c4a5fe1SAndy Fiddaman 				return;
2934*5c4a5fe1SAndy Fiddaman 		} else if (sc->compl_queues[idx].qbase == NULL)
2935*5c4a5fe1SAndy Fiddaman 			return;
2936*5c4a5fe1SAndy Fiddaman 
2937*5c4a5fe1SAndy Fiddaman 		pci_nvme_handle_doorbell(sc, idx, is_sq, value);
2938*5c4a5fe1SAndy Fiddaman 		return;
2939*5c4a5fe1SAndy Fiddaman 	}
2940*5c4a5fe1SAndy Fiddaman 
2941*5c4a5fe1SAndy Fiddaman 	DPRINTF("nvme-write offset 0x%lx, size %d, value 0x%lx",
2942*5c4a5fe1SAndy Fiddaman 	        offset, size, value);
2943*5c4a5fe1SAndy Fiddaman 
2944*5c4a5fe1SAndy Fiddaman 	if (size != 4) {
2945*5c4a5fe1SAndy Fiddaman 		WPRINTF("guest wrote invalid size %d (offset 0x%lx, "
2946*5c4a5fe1SAndy Fiddaman 		         "val 0x%lx) to bar0 in %s",
2947*5c4a5fe1SAndy Fiddaman 		         size, offset, value, __func__);
2948*5c4a5fe1SAndy Fiddaman 		/* TODO: shutdown device */
2949*5c4a5fe1SAndy Fiddaman 		return;
2950*5c4a5fe1SAndy Fiddaman 	}
2951*5c4a5fe1SAndy Fiddaman 
2952*5c4a5fe1SAndy Fiddaman 	pci_nvme_bar0_reg_dumps(__func__, offset, 1);
2953*5c4a5fe1SAndy Fiddaman 
2954*5c4a5fe1SAndy Fiddaman 	pthread_mutex_lock(&sc->mtx);
2955*5c4a5fe1SAndy Fiddaman 
2956*5c4a5fe1SAndy Fiddaman 	switch (offset) {
2957*5c4a5fe1SAndy Fiddaman 	case NVME_CR_CAP_LOW:
2958*5c4a5fe1SAndy Fiddaman 	case NVME_CR_CAP_HI:
2959*5c4a5fe1SAndy Fiddaman 		/* readonly */
2960*5c4a5fe1SAndy Fiddaman 		break;
2961*5c4a5fe1SAndy Fiddaman 	case NVME_CR_VS:
2962*5c4a5fe1SAndy Fiddaman 		/* readonly */
2963*5c4a5fe1SAndy Fiddaman 		break;
2964*5c4a5fe1SAndy Fiddaman 	case NVME_CR_INTMS:
2965*5c4a5fe1SAndy Fiddaman 		/* MSI-X, so ignore */
2966*5c4a5fe1SAndy Fiddaman 		break;
2967*5c4a5fe1SAndy Fiddaman 	case NVME_CR_INTMC:
2968*5c4a5fe1SAndy Fiddaman 		/* MSI-X, so ignore */
2969*5c4a5fe1SAndy Fiddaman 		break;
2970*5c4a5fe1SAndy Fiddaman 	case NVME_CR_CC:
2971*5c4a5fe1SAndy Fiddaman 		ccreg = (uint32_t)value;
2972*5c4a5fe1SAndy Fiddaman 
2973*5c4a5fe1SAndy Fiddaman 		DPRINTF("%s NVME_CR_CC en %x css %x shn %x iosqes %u "
2974*5c4a5fe1SAndy Fiddaman 		         "iocqes %u",
2975*5c4a5fe1SAndy Fiddaman 		        __func__,
2976*5c4a5fe1SAndy Fiddaman 			 NVME_CC_GET_EN(ccreg), NVME_CC_GET_CSS(ccreg),
2977*5c4a5fe1SAndy Fiddaman 			 NVME_CC_GET_SHN(ccreg), NVME_CC_GET_IOSQES(ccreg),
2978*5c4a5fe1SAndy Fiddaman 			 NVME_CC_GET_IOCQES(ccreg));
2979*5c4a5fe1SAndy Fiddaman 
2980*5c4a5fe1SAndy Fiddaman 		if (NVME_CC_GET_SHN(ccreg)) {
2981*5c4a5fe1SAndy Fiddaman 			/* perform shutdown - flush out data to backend */
2982*5c4a5fe1SAndy Fiddaman 			sc->regs.csts &= ~NVMEM(NVME_CSTS_REG_SHST);
2983*5c4a5fe1SAndy Fiddaman 			sc->regs.csts |= NVMEF(NVME_CSTS_REG_SHST,
2984*5c4a5fe1SAndy Fiddaman 			    NVME_SHST_COMPLETE);
2985*5c4a5fe1SAndy Fiddaman 		}
2986*5c4a5fe1SAndy Fiddaman 		if (NVME_CC_GET_EN(ccreg) != NVME_CC_GET_EN(sc->regs.cc)) {
2987*5c4a5fe1SAndy Fiddaman 			if (NVME_CC_GET_EN(ccreg) == 0)
2988*5c4a5fe1SAndy Fiddaman 				/* transition 1-> causes controller reset */
2989*5c4a5fe1SAndy Fiddaman 				pci_nvme_reset_locked(sc);
2990*5c4a5fe1SAndy Fiddaman 			else
2991*5c4a5fe1SAndy Fiddaman 				pci_nvme_init_controller(sc);
2992*5c4a5fe1SAndy Fiddaman 		}
2993*5c4a5fe1SAndy Fiddaman 
2994*5c4a5fe1SAndy Fiddaman 		/* Insert the iocqes, iosqes and en bits from the write */
2995*5c4a5fe1SAndy Fiddaman 		sc->regs.cc &= ~NVME_CC_WRITE_MASK;
2996*5c4a5fe1SAndy Fiddaman 		sc->regs.cc |= ccreg & NVME_CC_WRITE_MASK;
2997*5c4a5fe1SAndy Fiddaman 		if (NVME_CC_GET_EN(ccreg) == 0) {
2998*5c4a5fe1SAndy Fiddaman 			/* Insert the ams, mps and css bit fields */
2999*5c4a5fe1SAndy Fiddaman 			sc->regs.cc &= ~NVME_CC_NEN_WRITE_MASK;
3000*5c4a5fe1SAndy Fiddaman 			sc->regs.cc |= ccreg & NVME_CC_NEN_WRITE_MASK;
3001*5c4a5fe1SAndy Fiddaman 			sc->regs.csts &= ~NVME_CSTS_RDY;
3002*5c4a5fe1SAndy Fiddaman 		} else if ((sc->pending_ios == 0) &&
3003*5c4a5fe1SAndy Fiddaman 		    !(sc->regs.csts & NVME_CSTS_CFS)) {
3004*5c4a5fe1SAndy Fiddaman 			sc->regs.csts |= NVME_CSTS_RDY;
3005*5c4a5fe1SAndy Fiddaman 		}
3006*5c4a5fe1SAndy Fiddaman 		break;
3007*5c4a5fe1SAndy Fiddaman 	case NVME_CR_CSTS:
3008*5c4a5fe1SAndy Fiddaman 		break;
3009*5c4a5fe1SAndy Fiddaman 	case NVME_CR_NSSR:
3010*5c4a5fe1SAndy Fiddaman 		/* ignore writes; don't support subsystem reset */
3011*5c4a5fe1SAndy Fiddaman 		break;
3012*5c4a5fe1SAndy Fiddaman 	case NVME_CR_AQA:
3013*5c4a5fe1SAndy Fiddaman 		sc->regs.aqa = (uint32_t)value;
3014*5c4a5fe1SAndy Fiddaman 		break;
3015*5c4a5fe1SAndy Fiddaman 	case NVME_CR_ASQ_LOW:
3016*5c4a5fe1SAndy Fiddaman 		sc->regs.asq = (sc->regs.asq & (0xFFFFFFFF00000000)) |
3017*5c4a5fe1SAndy Fiddaman 		               (0xFFFFF000 & value);
3018*5c4a5fe1SAndy Fiddaman 		break;
3019*5c4a5fe1SAndy Fiddaman 	case NVME_CR_ASQ_HI:
3020*5c4a5fe1SAndy Fiddaman 		sc->regs.asq = (sc->regs.asq & (0x00000000FFFFFFFF)) |
3021*5c4a5fe1SAndy Fiddaman 		               (value << 32);
3022*5c4a5fe1SAndy Fiddaman 		break;
3023*5c4a5fe1SAndy Fiddaman 	case NVME_CR_ACQ_LOW:
3024*5c4a5fe1SAndy Fiddaman 		sc->regs.acq = (sc->regs.acq & (0xFFFFFFFF00000000)) |
3025*5c4a5fe1SAndy Fiddaman 		               (0xFFFFF000 & value);
3026*5c4a5fe1SAndy Fiddaman 		break;
3027*5c4a5fe1SAndy Fiddaman 	case NVME_CR_ACQ_HI:
3028*5c4a5fe1SAndy Fiddaman 		sc->regs.acq = (sc->regs.acq & (0x00000000FFFFFFFF)) |
3029*5c4a5fe1SAndy Fiddaman 		               (value << 32);
3030*5c4a5fe1SAndy Fiddaman 		break;
3031*5c4a5fe1SAndy Fiddaman 	default:
3032*5c4a5fe1SAndy Fiddaman 		DPRINTF("%s unknown offset 0x%lx, value 0x%lx size %d",
3033*5c4a5fe1SAndy Fiddaman 		         __func__, offset, value, size);
3034*5c4a5fe1SAndy Fiddaman 	}
3035*5c4a5fe1SAndy Fiddaman 	pthread_mutex_unlock(&sc->mtx);
3036*5c4a5fe1SAndy Fiddaman }
3037*5c4a5fe1SAndy Fiddaman 
3038*5c4a5fe1SAndy Fiddaman static void
pci_nvme_write(struct pci_devinst * pi,int baridx,uint64_t offset,int size,uint64_t value)3039*5c4a5fe1SAndy Fiddaman pci_nvme_write(struct pci_devinst *pi, int baridx, uint64_t offset, int size,
3040*5c4a5fe1SAndy Fiddaman     uint64_t value)
3041*5c4a5fe1SAndy Fiddaman {
3042*5c4a5fe1SAndy Fiddaman 	struct pci_nvme_softc* sc = pi->pi_arg;
3043*5c4a5fe1SAndy Fiddaman 
3044*5c4a5fe1SAndy Fiddaman 	if (baridx == pci_msix_table_bar(pi) ||
3045*5c4a5fe1SAndy Fiddaman 	    baridx == pci_msix_pba_bar(pi)) {
3046*5c4a5fe1SAndy Fiddaman 		DPRINTF("nvme-write baridx %d, msix: off 0x%lx, size %d, "
3047*5c4a5fe1SAndy Fiddaman 		         " value 0x%lx", baridx, offset, size, value);
3048*5c4a5fe1SAndy Fiddaman 
3049*5c4a5fe1SAndy Fiddaman 		pci_emul_msix_twrite(pi, offset, size, value);
3050*5c4a5fe1SAndy Fiddaman 		return;
3051*5c4a5fe1SAndy Fiddaman 	}
3052*5c4a5fe1SAndy Fiddaman 
3053*5c4a5fe1SAndy Fiddaman 	switch (baridx) {
3054*5c4a5fe1SAndy Fiddaman 	case 0:
3055*5c4a5fe1SAndy Fiddaman 		pci_nvme_write_bar_0(sc, offset, size, value);
3056*5c4a5fe1SAndy Fiddaman 		break;
3057*5c4a5fe1SAndy Fiddaman 
3058*5c4a5fe1SAndy Fiddaman 	default:
3059*5c4a5fe1SAndy Fiddaman 		DPRINTF("%s unknown baridx %d, val 0x%lx",
3060*5c4a5fe1SAndy Fiddaman 		         __func__, baridx, value);
3061*5c4a5fe1SAndy Fiddaman 	}
3062*5c4a5fe1SAndy Fiddaman }
3063*5c4a5fe1SAndy Fiddaman 
pci_nvme_read_bar_0(struct pci_nvme_softc * sc,uint64_t offset,int size)3064*5c4a5fe1SAndy Fiddaman static uint64_t pci_nvme_read_bar_0(struct pci_nvme_softc* sc,
3065*5c4a5fe1SAndy Fiddaman 	uint64_t offset, int size)
3066*5c4a5fe1SAndy Fiddaman {
3067*5c4a5fe1SAndy Fiddaman 	uint64_t value;
3068*5c4a5fe1SAndy Fiddaman 
3069*5c4a5fe1SAndy Fiddaman 	pci_nvme_bar0_reg_dumps(__func__, offset, 0);
3070*5c4a5fe1SAndy Fiddaman 
3071*5c4a5fe1SAndy Fiddaman 	if (offset < NVME_DOORBELL_OFFSET) {
3072*5c4a5fe1SAndy Fiddaman 		void *p = &(sc->regs);
3073*5c4a5fe1SAndy Fiddaman 		pthread_mutex_lock(&sc->mtx);
3074*5c4a5fe1SAndy Fiddaman 		memcpy(&value, (void *)((uintptr_t)p + offset), size);
3075*5c4a5fe1SAndy Fiddaman 		pthread_mutex_unlock(&sc->mtx);
3076*5c4a5fe1SAndy Fiddaman 	} else {
3077*5c4a5fe1SAndy Fiddaman 		value = 0;
3078*5c4a5fe1SAndy Fiddaman                 WPRINTF("pci_nvme: read invalid offset %ld", offset);
3079*5c4a5fe1SAndy Fiddaman 	}
3080*5c4a5fe1SAndy Fiddaman 
3081*5c4a5fe1SAndy Fiddaman 	switch (size) {
3082*5c4a5fe1SAndy Fiddaman 	case 1:
3083*5c4a5fe1SAndy Fiddaman 		value &= 0xFF;
3084*5c4a5fe1SAndy Fiddaman 		break;
3085*5c4a5fe1SAndy Fiddaman 	case 2:
3086*5c4a5fe1SAndy Fiddaman 		value &= 0xFFFF;
3087*5c4a5fe1SAndy Fiddaman 		break;
3088*5c4a5fe1SAndy Fiddaman 	case 4:
3089*5c4a5fe1SAndy Fiddaman 		value &= 0xFFFFFFFF;
3090*5c4a5fe1SAndy Fiddaman 		break;
3091*5c4a5fe1SAndy Fiddaman 	}
3092*5c4a5fe1SAndy Fiddaman 
3093*5c4a5fe1SAndy Fiddaman 	DPRINTF("   nvme-read offset 0x%lx, size %d -> value 0x%x",
3094*5c4a5fe1SAndy Fiddaman 	         offset, size, (uint32_t)value);
3095*5c4a5fe1SAndy Fiddaman 
3096*5c4a5fe1SAndy Fiddaman 	return (value);
3097*5c4a5fe1SAndy Fiddaman }
3098*5c4a5fe1SAndy Fiddaman 
3099*5c4a5fe1SAndy Fiddaman 
3100*5c4a5fe1SAndy Fiddaman 
3101*5c4a5fe1SAndy Fiddaman static uint64_t
pci_nvme_read(struct pci_devinst * pi,int baridx,uint64_t offset,int size)3102*5c4a5fe1SAndy Fiddaman pci_nvme_read(struct pci_devinst *pi, int baridx, uint64_t offset, int size)
3103*5c4a5fe1SAndy Fiddaman {
3104*5c4a5fe1SAndy Fiddaman 	struct pci_nvme_softc* sc = pi->pi_arg;
3105*5c4a5fe1SAndy Fiddaman 
3106*5c4a5fe1SAndy Fiddaman 	if (baridx == pci_msix_table_bar(pi) ||
3107*5c4a5fe1SAndy Fiddaman 	    baridx == pci_msix_pba_bar(pi)) {
3108*5c4a5fe1SAndy Fiddaman 		DPRINTF("nvme-read bar: %d, msix: regoff 0x%lx, size %d",
3109*5c4a5fe1SAndy Fiddaman 		        baridx, offset, size);
3110*5c4a5fe1SAndy Fiddaman 
3111*5c4a5fe1SAndy Fiddaman 		return pci_emul_msix_tread(pi, offset, size);
3112*5c4a5fe1SAndy Fiddaman 	}
3113*5c4a5fe1SAndy Fiddaman 
3114*5c4a5fe1SAndy Fiddaman 	switch (baridx) {
3115*5c4a5fe1SAndy Fiddaman 	case 0:
3116*5c4a5fe1SAndy Fiddaman        		return pci_nvme_read_bar_0(sc, offset, size);
3117*5c4a5fe1SAndy Fiddaman 
3118*5c4a5fe1SAndy Fiddaman 	default:
3119*5c4a5fe1SAndy Fiddaman 		DPRINTF("unknown bar %d, 0x%lx", baridx, offset);
3120*5c4a5fe1SAndy Fiddaman 	}
3121*5c4a5fe1SAndy Fiddaman 
3122*5c4a5fe1SAndy Fiddaman 	return (0);
3123*5c4a5fe1SAndy Fiddaman }
3124*5c4a5fe1SAndy Fiddaman 
3125*5c4a5fe1SAndy Fiddaman static int
pci_nvme_parse_config(struct pci_nvme_softc * sc,nvlist_t * nvl)3126*5c4a5fe1SAndy Fiddaman pci_nvme_parse_config(struct pci_nvme_softc *sc, nvlist_t *nvl)
3127*5c4a5fe1SAndy Fiddaman {
3128*5c4a5fe1SAndy Fiddaman 	char bident[sizeof("XXX:XXX")];
3129*5c4a5fe1SAndy Fiddaman 	const char *value;
3130*5c4a5fe1SAndy Fiddaman 	uint32_t sectsz;
3131*5c4a5fe1SAndy Fiddaman 
3132*5c4a5fe1SAndy Fiddaman 	sc->max_queues = NVME_QUEUES;
3133*5c4a5fe1SAndy Fiddaman 	sc->max_qentries = NVME_MAX_QENTRIES;
3134*5c4a5fe1SAndy Fiddaman 	sc->ioslots = NVME_IOSLOTS;
3135*5c4a5fe1SAndy Fiddaman 	sc->num_squeues = sc->max_queues;
3136*5c4a5fe1SAndy Fiddaman 	sc->num_cqueues = sc->max_queues;
3137*5c4a5fe1SAndy Fiddaman 	sc->dataset_management = NVME_DATASET_MANAGEMENT_AUTO;
3138*5c4a5fe1SAndy Fiddaman 	sectsz = 0;
3139*5c4a5fe1SAndy Fiddaman #ifdef	__FreeBSD__
3140*5c4a5fe1SAndy Fiddaman 	snprintf(sc->ctrldata.sn, sizeof(sc->ctrldata.sn),
3141*5c4a5fe1SAndy Fiddaman 	         "NVME-%d-%d", sc->nsc_pi->pi_slot, sc->nsc_pi->pi_func);
3142*5c4a5fe1SAndy Fiddaman #else
3143*5c4a5fe1SAndy Fiddaman 	snprintf((char *)sc->ctrldata.sn, sizeof(sc->ctrldata.sn),
3144*5c4a5fe1SAndy Fiddaman 	         "NVME-%d-%d", sc->nsc_pi->pi_slot, sc->nsc_pi->pi_func);
3145*5c4a5fe1SAndy Fiddaman #endif
3146*5c4a5fe1SAndy Fiddaman 
3147*5c4a5fe1SAndy Fiddaman 	value = get_config_value_node(nvl, "maxq");
3148*5c4a5fe1SAndy Fiddaman 	if (value != NULL)
3149*5c4a5fe1SAndy Fiddaman 		sc->max_queues = atoi(value);
3150*5c4a5fe1SAndy Fiddaman 	value = get_config_value_node(nvl, "qsz");
3151*5c4a5fe1SAndy Fiddaman 	if (value != NULL) {
3152*5c4a5fe1SAndy Fiddaman 		sc->max_qentries = atoi(value);
3153*5c4a5fe1SAndy Fiddaman 		if (sc->max_qentries <= 0) {
3154*5c4a5fe1SAndy Fiddaman 			EPRINTLN("nvme: Invalid qsz option %d",
3155*5c4a5fe1SAndy Fiddaman 			    sc->max_qentries);
3156*5c4a5fe1SAndy Fiddaman 			return (-1);
3157*5c4a5fe1SAndy Fiddaman 		}
3158*5c4a5fe1SAndy Fiddaman 	}
3159*5c4a5fe1SAndy Fiddaman 	value = get_config_value_node(nvl, "ioslots");
3160*5c4a5fe1SAndy Fiddaman 	if (value != NULL) {
3161*5c4a5fe1SAndy Fiddaman 		sc->ioslots = atoi(value);
3162*5c4a5fe1SAndy Fiddaman 		if (sc->ioslots <= 0) {
3163*5c4a5fe1SAndy Fiddaman 			EPRINTLN("Invalid ioslots option %d", sc->ioslots);
3164*5c4a5fe1SAndy Fiddaman 			return (-1);
3165*5c4a5fe1SAndy Fiddaman 		}
3166*5c4a5fe1SAndy Fiddaman 	}
3167*5c4a5fe1SAndy Fiddaman 	value = get_config_value_node(nvl, "sectsz");
3168*5c4a5fe1SAndy Fiddaman 	if (value != NULL)
3169*5c4a5fe1SAndy Fiddaman 		sectsz = atoi(value);
3170*5c4a5fe1SAndy Fiddaman 	value = get_config_value_node(nvl, "ser");
3171*5c4a5fe1SAndy Fiddaman 	if (value != NULL) {
3172*5c4a5fe1SAndy Fiddaman 		/*
3173*5c4a5fe1SAndy Fiddaman 		 * This field indicates the Product Serial Number in
3174*5c4a5fe1SAndy Fiddaman 		 * 7-bit ASCII, unused bytes should be space characters.
3175*5c4a5fe1SAndy Fiddaman 		 * Ref: NVMe v1.3c.
3176*5c4a5fe1SAndy Fiddaman 		 */
3177*5c4a5fe1SAndy Fiddaman 		cpywithpad((char *)sc->ctrldata.sn,
3178*5c4a5fe1SAndy Fiddaman 		    sizeof(sc->ctrldata.sn), value, ' ');
3179*5c4a5fe1SAndy Fiddaman 	}
3180*5c4a5fe1SAndy Fiddaman 	value = get_config_value_node(nvl, "eui64");
3181*5c4a5fe1SAndy Fiddaman 	if (value != NULL)
3182*5c4a5fe1SAndy Fiddaman 		sc->nvstore.eui64 = htobe64(strtoull(value, NULL, 0));
3183*5c4a5fe1SAndy Fiddaman 	value = get_config_value_node(nvl, "dsm");
3184*5c4a5fe1SAndy Fiddaman 	if (value != NULL) {
3185*5c4a5fe1SAndy Fiddaman 		if (strcmp(value, "auto") == 0)
3186*5c4a5fe1SAndy Fiddaman 			sc->dataset_management = NVME_DATASET_MANAGEMENT_AUTO;
3187*5c4a5fe1SAndy Fiddaman 		else if (strcmp(value, "enable") == 0)
3188*5c4a5fe1SAndy Fiddaman 			sc->dataset_management = NVME_DATASET_MANAGEMENT_ENABLE;
3189*5c4a5fe1SAndy Fiddaman 		else if (strcmp(value, "disable") == 0)
3190*5c4a5fe1SAndy Fiddaman 			sc->dataset_management = NVME_DATASET_MANAGEMENT_DISABLE;
3191*5c4a5fe1SAndy Fiddaman 	}
3192*5c4a5fe1SAndy Fiddaman 
3193*5c4a5fe1SAndy Fiddaman 	value = get_config_value_node(nvl, "bootindex");
3194*5c4a5fe1SAndy Fiddaman 	if (value != NULL) {
3195*5c4a5fe1SAndy Fiddaman 		if (pci_emul_add_boot_device(sc->nsc_pi, atoi(value))) {
3196*5c4a5fe1SAndy Fiddaman 			EPRINTLN("Invalid bootindex %d", atoi(value));
3197*5c4a5fe1SAndy Fiddaman 			return (-1);
3198*5c4a5fe1SAndy Fiddaman 		}
3199*5c4a5fe1SAndy Fiddaman 	}
3200*5c4a5fe1SAndy Fiddaman 
3201*5c4a5fe1SAndy Fiddaman 	value = get_config_value_node(nvl, "ram");
3202*5c4a5fe1SAndy Fiddaman 	if (value != NULL) {
3203*5c4a5fe1SAndy Fiddaman 		uint64_t sz = strtoull(value, NULL, 10);
3204*5c4a5fe1SAndy Fiddaman 
3205*5c4a5fe1SAndy Fiddaman 		sc->nvstore.type = NVME_STOR_RAM;
3206*5c4a5fe1SAndy Fiddaman 		sc->nvstore.size = sz * 1024 * 1024;
3207*5c4a5fe1SAndy Fiddaman 		sc->nvstore.ctx = calloc(1, sc->nvstore.size);
3208*5c4a5fe1SAndy Fiddaman 		sc->nvstore.sectsz = 4096;
3209*5c4a5fe1SAndy Fiddaman 		sc->nvstore.sectsz_bits = 12;
3210*5c4a5fe1SAndy Fiddaman 		if (sc->nvstore.ctx == NULL) {
3211*5c4a5fe1SAndy Fiddaman 			EPRINTLN("nvme: Unable to allocate RAM");
3212*5c4a5fe1SAndy Fiddaman 			return (-1);
3213*5c4a5fe1SAndy Fiddaman 		}
3214*5c4a5fe1SAndy Fiddaman 	} else {
3215*5c4a5fe1SAndy Fiddaman 		snprintf(bident, sizeof(bident), "%u:%u",
3216*5c4a5fe1SAndy Fiddaman 		    sc->nsc_pi->pi_slot, sc->nsc_pi->pi_func);
3217*5c4a5fe1SAndy Fiddaman 		sc->nvstore.ctx = blockif_open(nvl, bident);
3218*5c4a5fe1SAndy Fiddaman 		if (sc->nvstore.ctx == NULL) {
3219*5c4a5fe1SAndy Fiddaman 			EPRINTLN("nvme: Could not open backing file: %s",
3220*5c4a5fe1SAndy Fiddaman 			    strerror(errno));
3221*5c4a5fe1SAndy Fiddaman 			return (-1);
3222*5c4a5fe1SAndy Fiddaman 		}
3223*5c4a5fe1SAndy Fiddaman 		sc->nvstore.type = NVME_STOR_BLOCKIF;
3224*5c4a5fe1SAndy Fiddaman 		sc->nvstore.size = blockif_size(sc->nvstore.ctx);
3225*5c4a5fe1SAndy Fiddaman 	}
3226*5c4a5fe1SAndy Fiddaman 
3227*5c4a5fe1SAndy Fiddaman 	if (sectsz == 512 || sectsz == 4096 || sectsz == 8192)
3228*5c4a5fe1SAndy Fiddaman 		sc->nvstore.sectsz = sectsz;
3229*5c4a5fe1SAndy Fiddaman 	else if (sc->nvstore.type != NVME_STOR_RAM)
3230*5c4a5fe1SAndy Fiddaman 		sc->nvstore.sectsz = blockif_sectsz(sc->nvstore.ctx);
3231*5c4a5fe1SAndy Fiddaman 	for (sc->nvstore.sectsz_bits = 9;
3232*5c4a5fe1SAndy Fiddaman 	     (1U << sc->nvstore.sectsz_bits) < sc->nvstore.sectsz;
3233*5c4a5fe1SAndy Fiddaman 	     sc->nvstore.sectsz_bits++);
3234*5c4a5fe1SAndy Fiddaman 
3235*5c4a5fe1SAndy Fiddaman 	if (sc->max_queues <= 0 || sc->max_queues > NVME_QUEUES)
3236*5c4a5fe1SAndy Fiddaman 		sc->max_queues = NVME_QUEUES;
3237*5c4a5fe1SAndy Fiddaman 
3238*5c4a5fe1SAndy Fiddaman 	return (0);
3239*5c4a5fe1SAndy Fiddaman }
3240*5c4a5fe1SAndy Fiddaman 
3241*5c4a5fe1SAndy Fiddaman static void
pci_nvme_resized(struct blockif_ctxt * bctxt __unused,void * arg,size_t new_size)3242*5c4a5fe1SAndy Fiddaman pci_nvme_resized(struct blockif_ctxt *bctxt __unused, void *arg,
3243*5c4a5fe1SAndy Fiddaman     size_t new_size)
3244*5c4a5fe1SAndy Fiddaman {
3245*5c4a5fe1SAndy Fiddaman 	struct pci_nvme_softc *sc;
3246*5c4a5fe1SAndy Fiddaman 	struct pci_nvme_blockstore *nvstore;
3247*5c4a5fe1SAndy Fiddaman 	struct nvme_namespace_data *nd;
3248*5c4a5fe1SAndy Fiddaman 
3249*5c4a5fe1SAndy Fiddaman 	sc = arg;
3250*5c4a5fe1SAndy Fiddaman 	nvstore = &sc->nvstore;
3251*5c4a5fe1SAndy Fiddaman 	nd = &sc->nsdata;
3252*5c4a5fe1SAndy Fiddaman 
3253*5c4a5fe1SAndy Fiddaman 	nvstore->size = new_size;
3254*5c4a5fe1SAndy Fiddaman 	pci_nvme_init_nsdata_size(nvstore, nd);
3255*5c4a5fe1SAndy Fiddaman 
3256*5c4a5fe1SAndy Fiddaman 	/* Add changed NSID to list */
3257*5c4a5fe1SAndy Fiddaman 	sc->ns_log.ns[0] = 1;
3258*5c4a5fe1SAndy Fiddaman 	sc->ns_log.ns[1] = 0;
3259*5c4a5fe1SAndy Fiddaman 
3260*5c4a5fe1SAndy Fiddaman 	pci_nvme_aen_post(sc, PCI_NVME_AE_TYPE_NOTICE,
3261*5c4a5fe1SAndy Fiddaman 	    PCI_NVME_AEI_NOTICE_NS_ATTR_CHANGED);
3262*5c4a5fe1SAndy Fiddaman }
3263*5c4a5fe1SAndy Fiddaman 
3264*5c4a5fe1SAndy Fiddaman static int
pci_nvme_init(struct pci_devinst * pi,nvlist_t * nvl)3265*5c4a5fe1SAndy Fiddaman pci_nvme_init(struct pci_devinst *pi, nvlist_t *nvl)
3266*5c4a5fe1SAndy Fiddaman {
3267*5c4a5fe1SAndy Fiddaman 	struct pci_nvme_softc *sc;
3268*5c4a5fe1SAndy Fiddaman 	uint32_t pci_membar_sz;
3269*5c4a5fe1SAndy Fiddaman 	int	error;
3270*5c4a5fe1SAndy Fiddaman 
3271*5c4a5fe1SAndy Fiddaman 	error = 0;
3272*5c4a5fe1SAndy Fiddaman 
3273*5c4a5fe1SAndy Fiddaman 	sc = calloc(1, sizeof(struct pci_nvme_softc));
3274*5c4a5fe1SAndy Fiddaman 	pi->pi_arg = sc;
3275*5c4a5fe1SAndy Fiddaman 	sc->nsc_pi = pi;
3276*5c4a5fe1SAndy Fiddaman 
3277*5c4a5fe1SAndy Fiddaman 	error = pci_nvme_parse_config(sc, nvl);
3278*5c4a5fe1SAndy Fiddaman 	if (error < 0)
3279*5c4a5fe1SAndy Fiddaman 		goto done;
3280*5c4a5fe1SAndy Fiddaman 	else
3281*5c4a5fe1SAndy Fiddaman 		error = 0;
3282*5c4a5fe1SAndy Fiddaman 
3283*5c4a5fe1SAndy Fiddaman 	STAILQ_INIT(&sc->ioreqs_free);
3284*5c4a5fe1SAndy Fiddaman 	sc->ioreqs = calloc(sc->ioslots, sizeof(struct pci_nvme_ioreq));
3285*5c4a5fe1SAndy Fiddaman 	for (uint32_t i = 0; i < sc->ioslots; i++) {
3286*5c4a5fe1SAndy Fiddaman 		STAILQ_INSERT_TAIL(&sc->ioreqs_free, &sc->ioreqs[i], link);
3287*5c4a5fe1SAndy Fiddaman 	}
3288*5c4a5fe1SAndy Fiddaman 
3289*5c4a5fe1SAndy Fiddaman 	pci_set_cfgdata16(pi, PCIR_DEVICE, 0x0A0A);
3290*5c4a5fe1SAndy Fiddaman 	pci_set_cfgdata16(pi, PCIR_VENDOR, 0xFB5D);
3291*5c4a5fe1SAndy Fiddaman 	pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_STORAGE);
3292*5c4a5fe1SAndy Fiddaman 	pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_STORAGE_NVM);
3293*5c4a5fe1SAndy Fiddaman 	pci_set_cfgdata8(pi, PCIR_PROGIF,
3294*5c4a5fe1SAndy Fiddaman 	                 PCIP_STORAGE_NVM_ENTERPRISE_NVMHCI_1_0);
3295*5c4a5fe1SAndy Fiddaman 
3296*5c4a5fe1SAndy Fiddaman 	/*
3297*5c4a5fe1SAndy Fiddaman 	 * Allocate size of NVMe registers + doorbell space for all queues.
3298*5c4a5fe1SAndy Fiddaman 	 *
3299*5c4a5fe1SAndy Fiddaman 	 * The specification requires a minimum memory I/O window size of 16K.
3300*5c4a5fe1SAndy Fiddaman 	 * The Windows driver will refuse to start a device with a smaller
3301*5c4a5fe1SAndy Fiddaman 	 * window.
3302*5c4a5fe1SAndy Fiddaman 	 */
3303*5c4a5fe1SAndy Fiddaman 	pci_membar_sz = sizeof(struct nvme_registers) +
3304*5c4a5fe1SAndy Fiddaman 	    2 * sizeof(uint32_t) * (sc->max_queues + 1);
3305*5c4a5fe1SAndy Fiddaman 	pci_membar_sz = MAX(pci_membar_sz, NVME_MMIO_SPACE_MIN);
3306*5c4a5fe1SAndy Fiddaman 
3307*5c4a5fe1SAndy Fiddaman 	DPRINTF("nvme membar size: %u", pci_membar_sz);
3308*5c4a5fe1SAndy Fiddaman 
3309*5c4a5fe1SAndy Fiddaman 	error = pci_emul_alloc_bar(pi, 0, PCIBAR_MEM64, pci_membar_sz);
3310*5c4a5fe1SAndy Fiddaman 	if (error) {
3311*5c4a5fe1SAndy Fiddaman 		WPRINTF("%s pci alloc mem bar failed", __func__);
3312*5c4a5fe1SAndy Fiddaman 		goto done;
3313*5c4a5fe1SAndy Fiddaman 	}
3314*5c4a5fe1SAndy Fiddaman 
3315*5c4a5fe1SAndy Fiddaman 	error = pci_emul_add_msixcap(pi, sc->max_queues + 1, NVME_MSIX_BAR);
3316*5c4a5fe1SAndy Fiddaman 	if (error) {
3317*5c4a5fe1SAndy Fiddaman 		WPRINTF("%s pci add msixcap failed", __func__);
3318*5c4a5fe1SAndy Fiddaman 		goto done;
3319*5c4a5fe1SAndy Fiddaman 	}
3320*5c4a5fe1SAndy Fiddaman 
3321*5c4a5fe1SAndy Fiddaman 	error = pci_emul_add_pciecap(pi, PCIEM_TYPE_ROOT_INT_EP);
3322*5c4a5fe1SAndy Fiddaman 	if (error) {
3323*5c4a5fe1SAndy Fiddaman 		WPRINTF("%s pci add Express capability failed", __func__);
3324*5c4a5fe1SAndy Fiddaman 		goto done;
3325*5c4a5fe1SAndy Fiddaman 	}
3326*5c4a5fe1SAndy Fiddaman 
3327*5c4a5fe1SAndy Fiddaman 	pthread_mutex_init(&sc->mtx, NULL);
3328*5c4a5fe1SAndy Fiddaman 	sem_init(&sc->iosemlock, 0, sc->ioslots);
3329*5c4a5fe1SAndy Fiddaman 	blockif_register_resize_callback(sc->nvstore.ctx, pci_nvme_resized, sc);
3330*5c4a5fe1SAndy Fiddaman 
3331*5c4a5fe1SAndy Fiddaman 	pci_nvme_init_queues(sc, sc->max_queues, sc->max_queues);
3332*5c4a5fe1SAndy Fiddaman 	/*
3333*5c4a5fe1SAndy Fiddaman 	 * Controller data depends on Namespace data so initialize Namespace
3334*5c4a5fe1SAndy Fiddaman 	 * data first.
3335*5c4a5fe1SAndy Fiddaman 	 */
3336*5c4a5fe1SAndy Fiddaman 	pci_nvme_init_nsdata(sc, &sc->nsdata, 1, &sc->nvstore);
3337*5c4a5fe1SAndy Fiddaman 	pci_nvme_init_ctrldata(sc);
3338*5c4a5fe1SAndy Fiddaman 	pci_nvme_init_logpages(sc);
3339*5c4a5fe1SAndy Fiddaman 	pci_nvme_init_features(sc);
3340*5c4a5fe1SAndy Fiddaman 
3341*5c4a5fe1SAndy Fiddaman 	pci_nvme_aer_init(sc);
3342*5c4a5fe1SAndy Fiddaman 	pci_nvme_aen_init(sc);
3343*5c4a5fe1SAndy Fiddaman 
3344*5c4a5fe1SAndy Fiddaman 	pci_nvme_reset(sc);
3345*5c4a5fe1SAndy Fiddaman done:
3346*5c4a5fe1SAndy Fiddaman 	return (error);
3347*5c4a5fe1SAndy Fiddaman }
3348*5c4a5fe1SAndy Fiddaman 
3349*5c4a5fe1SAndy Fiddaman static int
pci_nvme_legacy_config(nvlist_t * nvl,const char * opts)3350*5c4a5fe1SAndy Fiddaman pci_nvme_legacy_config(nvlist_t *nvl, const char *opts)
3351*5c4a5fe1SAndy Fiddaman {
3352*5c4a5fe1SAndy Fiddaman 	char *cp, *ram;
3353*5c4a5fe1SAndy Fiddaman 
3354*5c4a5fe1SAndy Fiddaman 	if (opts == NULL)
3355*5c4a5fe1SAndy Fiddaman 		return (0);
3356*5c4a5fe1SAndy Fiddaman 
3357*5c4a5fe1SAndy Fiddaman 	if (strncmp(opts, "ram=", 4) == 0) {
3358*5c4a5fe1SAndy Fiddaman 		cp = strchr(opts, ',');
3359*5c4a5fe1SAndy Fiddaman 		if (cp == NULL) {
3360*5c4a5fe1SAndy Fiddaman 			set_config_value_node(nvl, "ram", opts + 4);
3361*5c4a5fe1SAndy Fiddaman 			return (0);
3362*5c4a5fe1SAndy Fiddaman 		}
3363*5c4a5fe1SAndy Fiddaman 		ram = strndup(opts + 4, cp - opts - 4);
3364*5c4a5fe1SAndy Fiddaman 		set_config_value_node(nvl, "ram", ram);
3365*5c4a5fe1SAndy Fiddaman 		free(ram);
3366*5c4a5fe1SAndy Fiddaman 		return (pci_parse_legacy_config(nvl, cp + 1));
3367*5c4a5fe1SAndy Fiddaman 	} else
3368*5c4a5fe1SAndy Fiddaman 		return (blockif_legacy_config(nvl, opts));
3369*5c4a5fe1SAndy Fiddaman }
3370*5c4a5fe1SAndy Fiddaman 
3371*5c4a5fe1SAndy Fiddaman static const struct pci_devemu pci_de_nvme = {
3372*5c4a5fe1SAndy Fiddaman 	.pe_emu =	"nvme",
3373*5c4a5fe1SAndy Fiddaman 	.pe_init =	pci_nvme_init,
3374*5c4a5fe1SAndy Fiddaman 	.pe_legacy_config = pci_nvme_legacy_config,
3375*5c4a5fe1SAndy Fiddaman 	.pe_barwrite =	pci_nvme_write,
3376*5c4a5fe1SAndy Fiddaman 	.pe_barread =	pci_nvme_read
3377*5c4a5fe1SAndy Fiddaman };
3378*5c4a5fe1SAndy Fiddaman PCI_EMUL_SET(pci_de_nvme);
3379