xref: /freebsd/sys/dev/nvmf/host/nvmf_qpair.c (revision 365b89e8ea4af34a05f68aa28e77573e89fa00b2)
1a1eda741SJohn Baldwin /*-
2a1eda741SJohn Baldwin  * SPDX-License-Identifier: BSD-2-Clause
3a1eda741SJohn Baldwin  *
4a1eda741SJohn Baldwin  * Copyright (c) 2023-2024 Chelsio Communications, Inc.
5a1eda741SJohn Baldwin  * Written by: John Baldwin <jhb@FreeBSD.org>
6a1eda741SJohn Baldwin  */
7a1eda741SJohn Baldwin 
8a1eda741SJohn Baldwin #include <sys/types.h>
9a1eda741SJohn Baldwin #include <sys/bus.h>
10a1eda741SJohn Baldwin #include <sys/lock.h>
11a1eda741SJohn Baldwin #include <sys/malloc.h>
12a1eda741SJohn Baldwin #include <sys/mutex.h>
13*365b89e8SJohn Baldwin #include <sys/nv.h>
14931dd5feSJohn Baldwin #include <sys/sysctl.h>
15a1eda741SJohn Baldwin #include <dev/nvme/nvme.h>
16a1eda741SJohn Baldwin #include <dev/nvmf/nvmf.h>
17a1eda741SJohn Baldwin #include <dev/nvmf/nvmf_transport.h>
18a1eda741SJohn Baldwin #include <dev/nvmf/host/nvmf_var.h>
19a1eda741SJohn Baldwin 
20a1eda741SJohn Baldwin struct nvmf_host_command {
21a1eda741SJohn Baldwin 	struct nvmf_request *req;
22a1eda741SJohn Baldwin 	TAILQ_ENTRY(nvmf_host_command) link;
23a1eda741SJohn Baldwin 	uint16_t cid;
24a1eda741SJohn Baldwin };
25a1eda741SJohn Baldwin 
26a1eda741SJohn Baldwin struct nvmf_host_qpair {
27a1eda741SJohn Baldwin 	struct nvmf_softc *sc;
28a1eda741SJohn Baldwin 	struct nvmf_qpair *qp;
29a1eda741SJohn Baldwin 
30a1eda741SJohn Baldwin 	bool	sq_flow_control;
31a1eda741SJohn Baldwin 	bool	shutting_down;
32a1eda741SJohn Baldwin 	u_int	allocating;
33a1eda741SJohn Baldwin 	u_int	num_commands;
34a1eda741SJohn Baldwin 	uint16_t sqhd;
35a1eda741SJohn Baldwin 	uint16_t sqtail;
36931dd5feSJohn Baldwin 	uint64_t submitted;
37a1eda741SJohn Baldwin 
38a1eda741SJohn Baldwin 	struct mtx lock;
39a1eda741SJohn Baldwin 
40a1eda741SJohn Baldwin 	TAILQ_HEAD(, nvmf_host_command) free_commands;
41a1eda741SJohn Baldwin 	STAILQ_HEAD(, nvmf_request) pending_requests;
42a1eda741SJohn Baldwin 
43a1eda741SJohn Baldwin 	/* Indexed by cid. */
44a1eda741SJohn Baldwin 	struct nvmf_host_command **active_commands;
45a1eda741SJohn Baldwin 
46a1eda741SJohn Baldwin 	char	name[16];
47931dd5feSJohn Baldwin 	struct sysctl_ctx_list sysctl_ctx;
48a1eda741SJohn Baldwin };
49a1eda741SJohn Baldwin 
50a1eda741SJohn Baldwin struct nvmf_request *
nvmf_allocate_request(struct nvmf_host_qpair * qp,void * sqe,nvmf_request_complete_t * cb,void * cb_arg,int how)51a1eda741SJohn Baldwin nvmf_allocate_request(struct nvmf_host_qpair *qp, void *sqe,
52a1eda741SJohn Baldwin     nvmf_request_complete_t *cb, void *cb_arg, int how)
53a1eda741SJohn Baldwin {
54a1eda741SJohn Baldwin 	struct nvmf_request *req;
55a1eda741SJohn Baldwin 	struct nvmf_qpair *nq;
56a1eda741SJohn Baldwin 
57a1eda741SJohn Baldwin 	KASSERT(how == M_WAITOK || how == M_NOWAIT,
58a1eda741SJohn Baldwin 	    ("%s: invalid how", __func__));
59a1eda741SJohn Baldwin 
60a1eda741SJohn Baldwin 	req = malloc(sizeof(*req), M_NVMF, how | M_ZERO);
61a1eda741SJohn Baldwin 	if (req == NULL)
62a1eda741SJohn Baldwin 		return (NULL);
63a1eda741SJohn Baldwin 
64a1eda741SJohn Baldwin 	mtx_lock(&qp->lock);
65a1eda741SJohn Baldwin 	nq = qp->qp;
66a1eda741SJohn Baldwin 	if (nq == NULL) {
67a1eda741SJohn Baldwin 		mtx_unlock(&qp->lock);
68a1eda741SJohn Baldwin 		free(req, M_NVMF);
69a1eda741SJohn Baldwin 		return (NULL);
70a1eda741SJohn Baldwin 	}
71a1eda741SJohn Baldwin 	qp->allocating++;
72a1eda741SJohn Baldwin 	MPASS(qp->allocating != 0);
73a1eda741SJohn Baldwin 	mtx_unlock(&qp->lock);
74a1eda741SJohn Baldwin 
75a1eda741SJohn Baldwin 	req->qp = qp;
76a1eda741SJohn Baldwin 	req->cb = cb;
77a1eda741SJohn Baldwin 	req->cb_arg = cb_arg;
78a1eda741SJohn Baldwin 	req->nc = nvmf_allocate_command(nq, sqe, how);
79a1eda741SJohn Baldwin 	if (req->nc == NULL) {
80a1eda741SJohn Baldwin 		free(req, M_NVMF);
81a1eda741SJohn Baldwin 		req = NULL;
82a1eda741SJohn Baldwin 	}
83a1eda741SJohn Baldwin 
84a1eda741SJohn Baldwin 	mtx_lock(&qp->lock);
85a1eda741SJohn Baldwin 	qp->allocating--;
86a1eda741SJohn Baldwin 	if (qp->allocating == 0 && qp->shutting_down)
87a1eda741SJohn Baldwin 		wakeup(qp);
88a1eda741SJohn Baldwin 	mtx_unlock(&qp->lock);
89a1eda741SJohn Baldwin 
90a1eda741SJohn Baldwin 	return (req);
91a1eda741SJohn Baldwin }
92a1eda741SJohn Baldwin 
93a1eda741SJohn Baldwin static void
nvmf_abort_request(struct nvmf_request * req,uint16_t cid)94a1eda741SJohn Baldwin nvmf_abort_request(struct nvmf_request *req, uint16_t cid)
95a1eda741SJohn Baldwin {
96a1eda741SJohn Baldwin 	struct nvme_completion cqe;
97a1eda741SJohn Baldwin 
98a1eda741SJohn Baldwin 	memset(&cqe, 0, sizeof(cqe));
99a1eda741SJohn Baldwin 	cqe.cid = cid;
100a1eda741SJohn Baldwin 	cqe.status = htole16(NVMEF(NVME_STATUS_SCT, NVME_SCT_PATH_RELATED) |
101a1eda741SJohn Baldwin 	    NVMEF(NVME_STATUS_SC, NVME_SC_COMMAND_ABORTED_BY_HOST));
102a1eda741SJohn Baldwin 	req->cb(req->cb_arg, &cqe);
103a1eda741SJohn Baldwin }
104a1eda741SJohn Baldwin 
105a1eda741SJohn Baldwin void
nvmf_free_request(struct nvmf_request * req)106a1eda741SJohn Baldwin nvmf_free_request(struct nvmf_request *req)
107a1eda741SJohn Baldwin {
108a1eda741SJohn Baldwin 	if (req->nc != NULL)
109a1eda741SJohn Baldwin 		nvmf_free_capsule(req->nc);
110a1eda741SJohn Baldwin 	free(req, M_NVMF);
111a1eda741SJohn Baldwin }
112a1eda741SJohn Baldwin 
113a1eda741SJohn Baldwin static void
nvmf_dispatch_command(struct nvmf_host_qpair * qp,struct nvmf_host_command * cmd)114a1eda741SJohn Baldwin nvmf_dispatch_command(struct nvmf_host_qpair *qp, struct nvmf_host_command *cmd)
115a1eda741SJohn Baldwin {
116a1eda741SJohn Baldwin 	struct nvmf_softc *sc = qp->sc;
117a1eda741SJohn Baldwin 	struct nvme_command *sqe;
118a1eda741SJohn Baldwin 	struct nvmf_capsule *nc;
1194d3b659fSJohn Baldwin 	uint16_t new_sqtail;
120a1eda741SJohn Baldwin 	int error;
121a1eda741SJohn Baldwin 
1224d3b659fSJohn Baldwin 	mtx_assert(&qp->lock, MA_OWNED);
1234d3b659fSJohn Baldwin 
1244d3b659fSJohn Baldwin 	qp->submitted++;
1254d3b659fSJohn Baldwin 
1264d3b659fSJohn Baldwin 	/*
1274d3b659fSJohn Baldwin 	 * Update flow control tracking.  This is just a sanity check.
1284d3b659fSJohn Baldwin 	 * Since num_commands == qsize - 1, there can never be too
1294d3b659fSJohn Baldwin 	 * many commands in flight.
1304d3b659fSJohn Baldwin 	 */
1314d3b659fSJohn Baldwin 	new_sqtail = (qp->sqtail + 1) % (qp->num_commands + 1);
1324d3b659fSJohn Baldwin 	KASSERT(new_sqtail != qp->sqhd, ("%s: qp %p is full", __func__, qp));
1334d3b659fSJohn Baldwin 	qp->sqtail = new_sqtail;
1344d3b659fSJohn Baldwin 	mtx_unlock(&qp->lock);
1354d3b659fSJohn Baldwin 
136a1eda741SJohn Baldwin 	nc = cmd->req->nc;
137a1eda741SJohn Baldwin 	sqe = nvmf_capsule_sqe(nc);
138a1eda741SJohn Baldwin 
139a1eda741SJohn Baldwin 	/*
140a1eda741SJohn Baldwin 	 * NB: Don't bother byte-swapping the cid so that receive
141a1eda741SJohn Baldwin 	 * doesn't have to swap.
142a1eda741SJohn Baldwin 	 */
143a1eda741SJohn Baldwin 	sqe->cid = cmd->cid;
144a1eda741SJohn Baldwin 
145a1eda741SJohn Baldwin 	error = nvmf_transmit_capsule(nc);
146a1eda741SJohn Baldwin 	if (error != 0) {
147a1eda741SJohn Baldwin 		device_printf(sc->dev,
148a1eda741SJohn Baldwin 		    "failed to transmit capsule: %d, disconnecting\n", error);
149a1eda741SJohn Baldwin 		nvmf_disconnect(sc);
150a1eda741SJohn Baldwin 		return;
151a1eda741SJohn Baldwin 	}
152a1eda741SJohn Baldwin 
153a1eda741SJohn Baldwin 	if (sc->ka_traffic)
154a1eda741SJohn Baldwin 		atomic_store_int(&sc->ka_active_tx_traffic, 1);
155a1eda741SJohn Baldwin }
156a1eda741SJohn Baldwin 
157a1eda741SJohn Baldwin static void
nvmf_qp_error(void * arg,int error)158a1eda741SJohn Baldwin nvmf_qp_error(void *arg, int error)
159a1eda741SJohn Baldwin {
160a1eda741SJohn Baldwin 	struct nvmf_host_qpair *qp = arg;
161a1eda741SJohn Baldwin 	struct nvmf_softc *sc = qp->sc;
162a1eda741SJohn Baldwin 
163a1eda741SJohn Baldwin 	/* Ignore simple close of queue pairs during shutdown. */
164a1eda741SJohn Baldwin 	if (!(sc->detaching && error == 0))
165a1eda741SJohn Baldwin 		device_printf(sc->dev, "error %d on %s, disconnecting\n", error,
166a1eda741SJohn Baldwin 		    qp->name);
167a1eda741SJohn Baldwin 	nvmf_disconnect(sc);
168a1eda741SJohn Baldwin }
169a1eda741SJohn Baldwin 
170a1eda741SJohn Baldwin static void
nvmf_receive_capsule(void * arg,struct nvmf_capsule * nc)171a1eda741SJohn Baldwin nvmf_receive_capsule(void *arg, struct nvmf_capsule *nc)
172a1eda741SJohn Baldwin {
173a1eda741SJohn Baldwin 	struct nvmf_host_qpair *qp = arg;
174a1eda741SJohn Baldwin 	struct nvmf_softc *sc = qp->sc;
175a1eda741SJohn Baldwin 	struct nvmf_host_command *cmd;
176a1eda741SJohn Baldwin 	struct nvmf_request *req;
177a1eda741SJohn Baldwin 	const struct nvme_completion *cqe;
178a1eda741SJohn Baldwin 	uint16_t cid;
179a1eda741SJohn Baldwin 
180a1eda741SJohn Baldwin 	cqe = nvmf_capsule_cqe(nc);
181a1eda741SJohn Baldwin 
182a1eda741SJohn Baldwin 	if (sc->ka_traffic)
183a1eda741SJohn Baldwin 		atomic_store_int(&sc->ka_active_rx_traffic, 1);
184a1eda741SJohn Baldwin 
185a1eda741SJohn Baldwin 	/*
186a1eda741SJohn Baldwin 	 * NB: Don't bother byte-swapping the cid as transmit doesn't
187a1eda741SJohn Baldwin 	 * swap either.
188a1eda741SJohn Baldwin 	 */
189a1eda741SJohn Baldwin 	cid = cqe->cid;
190a1eda741SJohn Baldwin 
191a1eda741SJohn Baldwin 	if (cid > qp->num_commands) {
192a1eda741SJohn Baldwin 		device_printf(sc->dev,
193a1eda741SJohn Baldwin 		    "received invalid CID %u, disconnecting\n", cid);
194a1eda741SJohn Baldwin 		nvmf_disconnect(sc);
195a1eda741SJohn Baldwin 		nvmf_free_capsule(nc);
196a1eda741SJohn Baldwin 		return;
197a1eda741SJohn Baldwin 	}
198a1eda741SJohn Baldwin 
1994d3b659fSJohn Baldwin 	/* Update flow control tracking. */
2004d3b659fSJohn Baldwin 	mtx_lock(&qp->lock);
2014d3b659fSJohn Baldwin 	if (qp->sq_flow_control) {
2024d3b659fSJohn Baldwin 		if (nvmf_sqhd_valid(nc))
2034d3b659fSJohn Baldwin 			qp->sqhd = le16toh(cqe->sqhd);
2044d3b659fSJohn Baldwin 	} else {
2054d3b659fSJohn Baldwin 		/*
2064d3b659fSJohn Baldwin 		 * If SQ FC is disabled, just advance the head for
2074d3b659fSJohn Baldwin 		 * each response capsule received.
2084d3b659fSJohn Baldwin 		 */
2094d3b659fSJohn Baldwin 		qp->sqhd = (qp->sqhd + 1) % (qp->num_commands + 1);
2104d3b659fSJohn Baldwin 	}
2114d3b659fSJohn Baldwin 
212a1eda741SJohn Baldwin 	/*
213a1eda741SJohn Baldwin 	 * If the queue has been shutdown due to an error, silently
214a1eda741SJohn Baldwin 	 * drop the response.
215a1eda741SJohn Baldwin 	 */
216a1eda741SJohn Baldwin 	if (qp->qp == NULL) {
217a1eda741SJohn Baldwin 		device_printf(sc->dev,
218a1eda741SJohn Baldwin 		    "received completion for CID %u on shutdown %s\n", cid,
219a1eda741SJohn Baldwin 		    qp->name);
220a1eda741SJohn Baldwin 		mtx_unlock(&qp->lock);
221a1eda741SJohn Baldwin 		nvmf_free_capsule(nc);
222a1eda741SJohn Baldwin 		return;
223a1eda741SJohn Baldwin 	}
224a1eda741SJohn Baldwin 
225a1eda741SJohn Baldwin 	cmd = qp->active_commands[cid];
226a1eda741SJohn Baldwin 	if (cmd == NULL) {
227a1eda741SJohn Baldwin 		mtx_unlock(&qp->lock);
228a1eda741SJohn Baldwin 		device_printf(sc->dev,
229a1eda741SJohn Baldwin 		    "received completion for inactive CID %u, disconnecting\n",
230a1eda741SJohn Baldwin 		    cid);
231a1eda741SJohn Baldwin 		nvmf_disconnect(sc);
232a1eda741SJohn Baldwin 		nvmf_free_capsule(nc);
233a1eda741SJohn Baldwin 		return;
234a1eda741SJohn Baldwin 	}
235a1eda741SJohn Baldwin 
236a1eda741SJohn Baldwin 	KASSERT(cmd->cid == cid, ("%s: CID mismatch", __func__));
237a1eda741SJohn Baldwin 	req = cmd->req;
238a1eda741SJohn Baldwin 	cmd->req = NULL;
239a1eda741SJohn Baldwin 	if (STAILQ_EMPTY(&qp->pending_requests)) {
240a1eda741SJohn Baldwin 		qp->active_commands[cid] = NULL;
241a1eda741SJohn Baldwin 		TAILQ_INSERT_TAIL(&qp->free_commands, cmd, link);
242a1eda741SJohn Baldwin 		mtx_unlock(&qp->lock);
243a1eda741SJohn Baldwin 	} else {
244a1eda741SJohn Baldwin 		cmd->req = STAILQ_FIRST(&qp->pending_requests);
245a1eda741SJohn Baldwin 		STAILQ_REMOVE_HEAD(&qp->pending_requests, link);
246a1eda741SJohn Baldwin 		nvmf_dispatch_command(qp, cmd);
247a1eda741SJohn Baldwin 	}
248a1eda741SJohn Baldwin 
249a1eda741SJohn Baldwin 	req->cb(req->cb_arg, cqe);
250a1eda741SJohn Baldwin 	nvmf_free_capsule(nc);
251a1eda741SJohn Baldwin 	nvmf_free_request(req);
252a1eda741SJohn Baldwin }
253a1eda741SJohn Baldwin 
254931dd5feSJohn Baldwin static void
nvmf_sysctls_qp(struct nvmf_softc * sc,struct nvmf_host_qpair * qp,bool admin,u_int qid)255931dd5feSJohn Baldwin nvmf_sysctls_qp(struct nvmf_softc *sc, struct nvmf_host_qpair *qp,
256931dd5feSJohn Baldwin     bool admin, u_int qid)
257931dd5feSJohn Baldwin {
258931dd5feSJohn Baldwin 	struct sysctl_ctx_list *ctx = &qp->sysctl_ctx;
259931dd5feSJohn Baldwin 	struct sysctl_oid *oid;
260931dd5feSJohn Baldwin 	struct sysctl_oid_list *list;
261931dd5feSJohn Baldwin 	char name[8];
262931dd5feSJohn Baldwin 
263931dd5feSJohn Baldwin 	if (admin) {
264931dd5feSJohn Baldwin 		oid = SYSCTL_ADD_NODE(ctx,
265931dd5feSJohn Baldwin 		    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO,
266931dd5feSJohn Baldwin 		    "adminq", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Admin Queue");
267931dd5feSJohn Baldwin 	} else {
268931dd5feSJohn Baldwin 		snprintf(name, sizeof(name), "%u", qid);
269931dd5feSJohn Baldwin 		oid = SYSCTL_ADD_NODE(ctx, sc->ioq_oid_list, OID_AUTO, name,
270931dd5feSJohn Baldwin 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "I/O Queue");
271931dd5feSJohn Baldwin 	}
272931dd5feSJohn Baldwin 	list = SYSCTL_CHILDREN(oid);
273931dd5feSJohn Baldwin 
274931dd5feSJohn Baldwin 	SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "num_entries", CTLFLAG_RD,
275931dd5feSJohn Baldwin 	    NULL, qp->num_commands + 1, "Number of entries in queue");
276931dd5feSJohn Baldwin 	SYSCTL_ADD_U16(ctx, list, OID_AUTO, "sq_head", CTLFLAG_RD, &qp->sqhd,
277931dd5feSJohn Baldwin 	    0, "Current head of submission queue (as observed by driver)");
278931dd5feSJohn Baldwin 	SYSCTL_ADD_U16(ctx, list, OID_AUTO, "sq_tail", CTLFLAG_RD, &qp->sqtail,
279931dd5feSJohn Baldwin 	    0, "Current tail of submission queue (as observed by driver)");
280931dd5feSJohn Baldwin 	SYSCTL_ADD_U64(ctx, list, OID_AUTO, "num_cmds", CTLFLAG_RD,
281931dd5feSJohn Baldwin 	    &qp->submitted, 0, "Number of commands submitted");
282931dd5feSJohn Baldwin }
283931dd5feSJohn Baldwin 
284a1eda741SJohn Baldwin struct nvmf_host_qpair *
nvmf_init_qp(struct nvmf_softc * sc,enum nvmf_trtype trtype,const nvlist_t * nvl,const char * name,u_int qid)285a1eda741SJohn Baldwin nvmf_init_qp(struct nvmf_softc *sc, enum nvmf_trtype trtype,
286*365b89e8SJohn Baldwin     const nvlist_t *nvl, const char *name, u_int qid)
287a1eda741SJohn Baldwin {
288a1eda741SJohn Baldwin 	struct nvmf_host_command *cmd, *ncmd;
289a1eda741SJohn Baldwin 	struct nvmf_host_qpair *qp;
290a1eda741SJohn Baldwin 	u_int i;
291*365b89e8SJohn Baldwin 	bool admin;
292a1eda741SJohn Baldwin 
293*365b89e8SJohn Baldwin 	admin = nvlist_get_bool(nvl, "admin");
294a1eda741SJohn Baldwin 	qp = malloc(sizeof(*qp), M_NVMF, M_WAITOK | M_ZERO);
295a1eda741SJohn Baldwin 	qp->sc = sc;
296*365b89e8SJohn Baldwin 	qp->sq_flow_control = nvlist_get_bool(nvl, "sq_flow_control");
297*365b89e8SJohn Baldwin 	qp->sqhd = nvlist_get_number(nvl, "sqhd");
298*365b89e8SJohn Baldwin 	qp->sqtail = nvlist_get_number(nvl, "sqtail");
299a1eda741SJohn Baldwin 	strlcpy(qp->name, name, sizeof(qp->name));
300a1eda741SJohn Baldwin 	mtx_init(&qp->lock, "nvmf qp", NULL, MTX_DEF);
301931dd5feSJohn Baldwin 	(void)sysctl_ctx_init(&qp->sysctl_ctx);
302a1eda741SJohn Baldwin 
303a1eda741SJohn Baldwin 	/*
304a1eda741SJohn Baldwin 	 * Allocate a spare command slot for each pending AER command
305a1eda741SJohn Baldwin 	 * on the admin queue.
306a1eda741SJohn Baldwin 	 */
307*365b89e8SJohn Baldwin 	qp->num_commands = nvlist_get_number(nvl, "qsize") - 1;
308*365b89e8SJohn Baldwin 	if (admin)
309a1eda741SJohn Baldwin 		qp->num_commands += sc->num_aer;
310a1eda741SJohn Baldwin 
311a1eda741SJohn Baldwin 	qp->active_commands = malloc(sizeof(*qp->active_commands) *
312a1eda741SJohn Baldwin 	    qp->num_commands, M_NVMF, M_WAITOK | M_ZERO);
313a1eda741SJohn Baldwin 	TAILQ_INIT(&qp->free_commands);
314a1eda741SJohn Baldwin 	for (i = 0; i < qp->num_commands; i++) {
315a1eda741SJohn Baldwin 		cmd = malloc(sizeof(*cmd), M_NVMF, M_WAITOK | M_ZERO);
316a1eda741SJohn Baldwin 		cmd->cid = i;
317a1eda741SJohn Baldwin 		TAILQ_INSERT_TAIL(&qp->free_commands, cmd, link);
318a1eda741SJohn Baldwin 	}
319a1eda741SJohn Baldwin 	STAILQ_INIT(&qp->pending_requests);
320a1eda741SJohn Baldwin 
321*365b89e8SJohn Baldwin 	qp->qp = nvmf_allocate_qpair(trtype, false, nvl, nvmf_qp_error, qp,
322*365b89e8SJohn Baldwin 	    nvmf_receive_capsule, qp);
323a1eda741SJohn Baldwin 	if (qp->qp == NULL) {
324931dd5feSJohn Baldwin 		(void)sysctl_ctx_free(&qp->sysctl_ctx);
325a1eda741SJohn Baldwin 		TAILQ_FOREACH_SAFE(cmd, &qp->free_commands, link, ncmd) {
326a1eda741SJohn Baldwin 			TAILQ_REMOVE(&qp->free_commands, cmd, link);
327a1eda741SJohn Baldwin 			free(cmd, M_NVMF);
328a1eda741SJohn Baldwin 		}
329a1eda741SJohn Baldwin 		free(qp->active_commands, M_NVMF);
330a1eda741SJohn Baldwin 		mtx_destroy(&qp->lock);
331a1eda741SJohn Baldwin 		free(qp, M_NVMF);
332a1eda741SJohn Baldwin 		return (NULL);
333a1eda741SJohn Baldwin 	}
334a1eda741SJohn Baldwin 
335*365b89e8SJohn Baldwin 	nvmf_sysctls_qp(sc, qp, admin, qid);
336931dd5feSJohn Baldwin 
337a1eda741SJohn Baldwin 	return (qp);
338a1eda741SJohn Baldwin }
339a1eda741SJohn Baldwin 
340a1eda741SJohn Baldwin void
nvmf_shutdown_qp(struct nvmf_host_qpair * qp)341a1eda741SJohn Baldwin nvmf_shutdown_qp(struct nvmf_host_qpair *qp)
342a1eda741SJohn Baldwin {
343a1eda741SJohn Baldwin 	struct nvmf_host_command *cmd;
344a1eda741SJohn Baldwin 	struct nvmf_request *req;
345a1eda741SJohn Baldwin 	struct nvmf_qpair *nq;
346a1eda741SJohn Baldwin 
347a1eda741SJohn Baldwin 	mtx_lock(&qp->lock);
348a1eda741SJohn Baldwin 	nq = qp->qp;
349a1eda741SJohn Baldwin 	qp->qp = NULL;
350a1eda741SJohn Baldwin 
351a1eda741SJohn Baldwin 	if (nq == NULL) {
352a1eda741SJohn Baldwin 		while (qp->shutting_down)
353a1eda741SJohn Baldwin 			mtx_sleep(qp, &qp->lock, 0, "nvmfqpsh", 0);
354a1eda741SJohn Baldwin 		mtx_unlock(&qp->lock);
355a1eda741SJohn Baldwin 		return;
356a1eda741SJohn Baldwin 	}
357a1eda741SJohn Baldwin 	qp->shutting_down = true;
358a1eda741SJohn Baldwin 	while (qp->allocating != 0)
359a1eda741SJohn Baldwin 		mtx_sleep(qp, &qp->lock, 0, "nvmfqpqu", 0);
360a1eda741SJohn Baldwin 	mtx_unlock(&qp->lock);
361a1eda741SJohn Baldwin 
362a1eda741SJohn Baldwin 	nvmf_free_qpair(nq);
363a1eda741SJohn Baldwin 
364a1eda741SJohn Baldwin 	/*
365a1eda741SJohn Baldwin 	 * Abort outstanding requests.  Active requests will have
366a1eda741SJohn Baldwin 	 * their I/O completions invoked and associated capsules freed
367a1eda741SJohn Baldwin 	 * by the transport layer via nvmf_free_qpair.  Pending
368a1eda741SJohn Baldwin 	 * requests must have their I/O completion invoked via
369a1eda741SJohn Baldwin 	 * nvmf_abort_capsule_data.
370a1eda741SJohn Baldwin 	 */
371a1eda741SJohn Baldwin 	for (u_int i = 0; i < qp->num_commands; i++) {
372a1eda741SJohn Baldwin 		cmd = qp->active_commands[i];
373a1eda741SJohn Baldwin 		if (cmd != NULL) {
374a1eda741SJohn Baldwin 			if (!cmd->req->aer)
375a1eda741SJohn Baldwin 				printf("%s: aborted active command %p (CID %u)\n",
376a1eda741SJohn Baldwin 				    __func__, cmd->req, cmd->cid);
377a1eda741SJohn Baldwin 
378a1eda741SJohn Baldwin 			/* This was freed by nvmf_free_qpair. */
379a1eda741SJohn Baldwin 			cmd->req->nc = NULL;
380a1eda741SJohn Baldwin 			nvmf_abort_request(cmd->req, cmd->cid);
381a1eda741SJohn Baldwin 			nvmf_free_request(cmd->req);
382a1eda741SJohn Baldwin 			free(cmd, M_NVMF);
383a1eda741SJohn Baldwin 		}
384a1eda741SJohn Baldwin 	}
385a1eda741SJohn Baldwin 	while (!STAILQ_EMPTY(&qp->pending_requests)) {
386a1eda741SJohn Baldwin 		req = STAILQ_FIRST(&qp->pending_requests);
387a1eda741SJohn Baldwin 		STAILQ_REMOVE_HEAD(&qp->pending_requests, link);
388a1eda741SJohn Baldwin 		if (!req->aer)
389a1eda741SJohn Baldwin 			printf("%s: aborted pending command %p\n", __func__,
390a1eda741SJohn Baldwin 			    req);
391a1eda741SJohn Baldwin 		nvmf_abort_capsule_data(req->nc, ECONNABORTED);
392a1eda741SJohn Baldwin 		nvmf_abort_request(req, 0);
393a1eda741SJohn Baldwin 		nvmf_free_request(req);
394a1eda741SJohn Baldwin 	}
395a1eda741SJohn Baldwin 
396a1eda741SJohn Baldwin 	mtx_lock(&qp->lock);
397a1eda741SJohn Baldwin 	qp->shutting_down = false;
398a1eda741SJohn Baldwin 	mtx_unlock(&qp->lock);
399a1eda741SJohn Baldwin 	wakeup(qp);
400a1eda741SJohn Baldwin }
401a1eda741SJohn Baldwin 
402a1eda741SJohn Baldwin void
nvmf_destroy_qp(struct nvmf_host_qpair * qp)403a1eda741SJohn Baldwin nvmf_destroy_qp(struct nvmf_host_qpair *qp)
404a1eda741SJohn Baldwin {
405a1eda741SJohn Baldwin 	struct nvmf_host_command *cmd, *ncmd;
406a1eda741SJohn Baldwin 
407a1eda741SJohn Baldwin 	nvmf_shutdown_qp(qp);
408931dd5feSJohn Baldwin 	(void)sysctl_ctx_free(&qp->sysctl_ctx);
409a1eda741SJohn Baldwin 
410a1eda741SJohn Baldwin 	TAILQ_FOREACH_SAFE(cmd, &qp->free_commands, link, ncmd) {
411a1eda741SJohn Baldwin 		TAILQ_REMOVE(&qp->free_commands, cmd, link);
412a1eda741SJohn Baldwin 		free(cmd, M_NVMF);
413a1eda741SJohn Baldwin 	}
414a1eda741SJohn Baldwin 	free(qp->active_commands, M_NVMF);
415a1eda741SJohn Baldwin 	mtx_destroy(&qp->lock);
416a1eda741SJohn Baldwin 	free(qp, M_NVMF);
417a1eda741SJohn Baldwin }
418a1eda741SJohn Baldwin 
419a1eda741SJohn Baldwin void
nvmf_submit_request(struct nvmf_request * req)420a1eda741SJohn Baldwin nvmf_submit_request(struct nvmf_request *req)
421a1eda741SJohn Baldwin {
422a1eda741SJohn Baldwin 	struct nvmf_host_qpair *qp;
423a1eda741SJohn Baldwin 	struct nvmf_host_command *cmd;
424a1eda741SJohn Baldwin 
425a1eda741SJohn Baldwin 	qp = req->qp;
426a1eda741SJohn Baldwin 	mtx_lock(&qp->lock);
427a1eda741SJohn Baldwin 	if (qp->qp == NULL) {
428a1eda741SJohn Baldwin 		mtx_unlock(&qp->lock);
429a1eda741SJohn Baldwin 		printf("%s: aborted pending command %p\n", __func__, req);
430a1eda741SJohn Baldwin 		nvmf_abort_capsule_data(req->nc, ECONNABORTED);
431a1eda741SJohn Baldwin 		nvmf_abort_request(req, 0);
432a1eda741SJohn Baldwin 		nvmf_free_request(req);
433a1eda741SJohn Baldwin 		return;
434a1eda741SJohn Baldwin 	}
435a1eda741SJohn Baldwin 	cmd = TAILQ_FIRST(&qp->free_commands);
436a1eda741SJohn Baldwin 	if (cmd == NULL) {
437a1eda741SJohn Baldwin 		/*
438a1eda741SJohn Baldwin 		 * Queue this request.  Will be sent after enough
439a1eda741SJohn Baldwin 		 * in-flight requests have completed.
440a1eda741SJohn Baldwin 		 */
441a1eda741SJohn Baldwin 		STAILQ_INSERT_TAIL(&qp->pending_requests, req, link);
442a1eda741SJohn Baldwin 		mtx_unlock(&qp->lock);
443a1eda741SJohn Baldwin 		return;
444a1eda741SJohn Baldwin 	}
445a1eda741SJohn Baldwin 
446a1eda741SJohn Baldwin 	TAILQ_REMOVE(&qp->free_commands, cmd, link);
447a1eda741SJohn Baldwin 	KASSERT(qp->active_commands[cmd->cid] == NULL,
448a1eda741SJohn Baldwin 	    ("%s: CID already busy", __func__));
449a1eda741SJohn Baldwin 	qp->active_commands[cmd->cid] = cmd;
450a1eda741SJohn Baldwin 	cmd->req = req;
451a1eda741SJohn Baldwin 	nvmf_dispatch_command(qp, cmd);
452a1eda741SJohn Baldwin }
453