xref: /freebsd/sys/dev/nvmf/host/nvmf_qpair.c (revision 4d3b659f24a88f76557019e6f3192760bdebfbdf)
1a1eda741SJohn Baldwin /*-
2a1eda741SJohn Baldwin  * SPDX-License-Identifier: BSD-2-Clause
3a1eda741SJohn Baldwin  *
4a1eda741SJohn Baldwin  * Copyright (c) 2023-2024 Chelsio Communications, Inc.
5a1eda741SJohn Baldwin  * Written by: John Baldwin <jhb@FreeBSD.org>
6a1eda741SJohn Baldwin  */
7a1eda741SJohn Baldwin 
8a1eda741SJohn Baldwin #include <sys/types.h>
9a1eda741SJohn Baldwin #include <sys/bus.h>
10a1eda741SJohn Baldwin #include <sys/lock.h>
11a1eda741SJohn Baldwin #include <sys/malloc.h>
12a1eda741SJohn Baldwin #include <sys/mutex.h>
13931dd5feSJohn Baldwin #include <sys/sysctl.h>
14a1eda741SJohn Baldwin #include <dev/nvme/nvme.h>
15a1eda741SJohn Baldwin #include <dev/nvmf/nvmf.h>
16a1eda741SJohn Baldwin #include <dev/nvmf/nvmf_transport.h>
17a1eda741SJohn Baldwin #include <dev/nvmf/host/nvmf_var.h>
18a1eda741SJohn Baldwin 
19a1eda741SJohn Baldwin struct nvmf_host_command {
20a1eda741SJohn Baldwin 	struct nvmf_request *req;
21a1eda741SJohn Baldwin 	TAILQ_ENTRY(nvmf_host_command) link;
22a1eda741SJohn Baldwin 	uint16_t cid;
23a1eda741SJohn Baldwin };
24a1eda741SJohn Baldwin 
25a1eda741SJohn Baldwin struct nvmf_host_qpair {
26a1eda741SJohn Baldwin 	struct nvmf_softc *sc;
27a1eda741SJohn Baldwin 	struct nvmf_qpair *qp;
28a1eda741SJohn Baldwin 
29a1eda741SJohn Baldwin 	bool	sq_flow_control;
30a1eda741SJohn Baldwin 	bool	shutting_down;
31a1eda741SJohn Baldwin 	u_int	allocating;
32a1eda741SJohn Baldwin 	u_int	num_commands;
33a1eda741SJohn Baldwin 	uint16_t sqhd;
34a1eda741SJohn Baldwin 	uint16_t sqtail;
35931dd5feSJohn Baldwin 	uint64_t submitted;
36a1eda741SJohn Baldwin 
37a1eda741SJohn Baldwin 	struct mtx lock;
38a1eda741SJohn Baldwin 
39a1eda741SJohn Baldwin 	TAILQ_HEAD(, nvmf_host_command) free_commands;
40a1eda741SJohn Baldwin 	STAILQ_HEAD(, nvmf_request) pending_requests;
41a1eda741SJohn Baldwin 
42a1eda741SJohn Baldwin 	/* Indexed by cid. */
43a1eda741SJohn Baldwin 	struct nvmf_host_command **active_commands;
44a1eda741SJohn Baldwin 
45a1eda741SJohn Baldwin 	char	name[16];
46931dd5feSJohn Baldwin 	struct sysctl_ctx_list sysctl_ctx;
47a1eda741SJohn Baldwin };
48a1eda741SJohn Baldwin 
49a1eda741SJohn Baldwin struct nvmf_request *
50a1eda741SJohn Baldwin nvmf_allocate_request(struct nvmf_host_qpair *qp, void *sqe,
51a1eda741SJohn Baldwin     nvmf_request_complete_t *cb, void *cb_arg, int how)
52a1eda741SJohn Baldwin {
53a1eda741SJohn Baldwin 	struct nvmf_request *req;
54a1eda741SJohn Baldwin 	struct nvmf_qpair *nq;
55a1eda741SJohn Baldwin 
56a1eda741SJohn Baldwin 	KASSERT(how == M_WAITOK || how == M_NOWAIT,
57a1eda741SJohn Baldwin 	    ("%s: invalid how", __func__));
58a1eda741SJohn Baldwin 
59a1eda741SJohn Baldwin 	req = malloc(sizeof(*req), M_NVMF, how | M_ZERO);
60a1eda741SJohn Baldwin 	if (req == NULL)
61a1eda741SJohn Baldwin 		return (NULL);
62a1eda741SJohn Baldwin 
63a1eda741SJohn Baldwin 	mtx_lock(&qp->lock);
64a1eda741SJohn Baldwin 	nq = qp->qp;
65a1eda741SJohn Baldwin 	if (nq == NULL) {
66a1eda741SJohn Baldwin 		mtx_unlock(&qp->lock);
67a1eda741SJohn Baldwin 		free(req, M_NVMF);
68a1eda741SJohn Baldwin 		return (NULL);
69a1eda741SJohn Baldwin 	}
70a1eda741SJohn Baldwin 	qp->allocating++;
71a1eda741SJohn Baldwin 	MPASS(qp->allocating != 0);
72a1eda741SJohn Baldwin 	mtx_unlock(&qp->lock);
73a1eda741SJohn Baldwin 
74a1eda741SJohn Baldwin 	req->qp = qp;
75a1eda741SJohn Baldwin 	req->cb = cb;
76a1eda741SJohn Baldwin 	req->cb_arg = cb_arg;
77a1eda741SJohn Baldwin 	req->nc = nvmf_allocate_command(nq, sqe, how);
78a1eda741SJohn Baldwin 	if (req->nc == NULL) {
79a1eda741SJohn Baldwin 		free(req, M_NVMF);
80a1eda741SJohn Baldwin 		req = NULL;
81a1eda741SJohn Baldwin 	}
82a1eda741SJohn Baldwin 
83a1eda741SJohn Baldwin 	mtx_lock(&qp->lock);
84a1eda741SJohn Baldwin 	qp->allocating--;
85a1eda741SJohn Baldwin 	if (qp->allocating == 0 && qp->shutting_down)
86a1eda741SJohn Baldwin 		wakeup(qp);
87a1eda741SJohn Baldwin 	mtx_unlock(&qp->lock);
88a1eda741SJohn Baldwin 
89a1eda741SJohn Baldwin 	return (req);
90a1eda741SJohn Baldwin }
91a1eda741SJohn Baldwin 
92a1eda741SJohn Baldwin static void
93a1eda741SJohn Baldwin nvmf_abort_request(struct nvmf_request *req, uint16_t cid)
94a1eda741SJohn Baldwin {
95a1eda741SJohn Baldwin 	struct nvme_completion cqe;
96a1eda741SJohn Baldwin 
97a1eda741SJohn Baldwin 	memset(&cqe, 0, sizeof(cqe));
98a1eda741SJohn Baldwin 	cqe.cid = cid;
99a1eda741SJohn Baldwin 	cqe.status = htole16(NVMEF(NVME_STATUS_SCT, NVME_SCT_PATH_RELATED) |
100a1eda741SJohn Baldwin 	    NVMEF(NVME_STATUS_SC, NVME_SC_COMMAND_ABORTED_BY_HOST));
101a1eda741SJohn Baldwin 	req->cb(req->cb_arg, &cqe);
102a1eda741SJohn Baldwin }
103a1eda741SJohn Baldwin 
104a1eda741SJohn Baldwin void
105a1eda741SJohn Baldwin nvmf_free_request(struct nvmf_request *req)
106a1eda741SJohn Baldwin {
107a1eda741SJohn Baldwin 	if (req->nc != NULL)
108a1eda741SJohn Baldwin 		nvmf_free_capsule(req->nc);
109a1eda741SJohn Baldwin 	free(req, M_NVMF);
110a1eda741SJohn Baldwin }
111a1eda741SJohn Baldwin 
112a1eda741SJohn Baldwin static void
113a1eda741SJohn Baldwin nvmf_dispatch_command(struct nvmf_host_qpair *qp, struct nvmf_host_command *cmd)
114a1eda741SJohn Baldwin {
115a1eda741SJohn Baldwin 	struct nvmf_softc *sc = qp->sc;
116a1eda741SJohn Baldwin 	struct nvme_command *sqe;
117a1eda741SJohn Baldwin 	struct nvmf_capsule *nc;
118*4d3b659fSJohn Baldwin 	uint16_t new_sqtail;
119a1eda741SJohn Baldwin 	int error;
120a1eda741SJohn Baldwin 
121*4d3b659fSJohn Baldwin 	mtx_assert(&qp->lock, MA_OWNED);
122*4d3b659fSJohn Baldwin 
123*4d3b659fSJohn Baldwin 	qp->submitted++;
124*4d3b659fSJohn Baldwin 
125*4d3b659fSJohn Baldwin 	/*
126*4d3b659fSJohn Baldwin 	 * Update flow control tracking.  This is just a sanity check.
127*4d3b659fSJohn Baldwin 	 * Since num_commands == qsize - 1, there can never be too
128*4d3b659fSJohn Baldwin 	 * many commands in flight.
129*4d3b659fSJohn Baldwin 	 */
130*4d3b659fSJohn Baldwin 	new_sqtail = (qp->sqtail + 1) % (qp->num_commands + 1);
131*4d3b659fSJohn Baldwin 	KASSERT(new_sqtail != qp->sqhd, ("%s: qp %p is full", __func__, qp));
132*4d3b659fSJohn Baldwin 	qp->sqtail = new_sqtail;
133*4d3b659fSJohn Baldwin 	mtx_unlock(&qp->lock);
134*4d3b659fSJohn Baldwin 
135a1eda741SJohn Baldwin 	nc = cmd->req->nc;
136a1eda741SJohn Baldwin 	sqe = nvmf_capsule_sqe(nc);
137a1eda741SJohn Baldwin 
138a1eda741SJohn Baldwin 	/*
139a1eda741SJohn Baldwin 	 * NB: Don't bother byte-swapping the cid so that receive
140a1eda741SJohn Baldwin 	 * doesn't have to swap.
141a1eda741SJohn Baldwin 	 */
142a1eda741SJohn Baldwin 	sqe->cid = cmd->cid;
143a1eda741SJohn Baldwin 
144a1eda741SJohn Baldwin 	error = nvmf_transmit_capsule(nc);
145a1eda741SJohn Baldwin 	if (error != 0) {
146a1eda741SJohn Baldwin 		device_printf(sc->dev,
147a1eda741SJohn Baldwin 		    "failed to transmit capsule: %d, disconnecting\n", error);
148a1eda741SJohn Baldwin 		nvmf_disconnect(sc);
149a1eda741SJohn Baldwin 		return;
150a1eda741SJohn Baldwin 	}
151a1eda741SJohn Baldwin 
152a1eda741SJohn Baldwin 	if (sc->ka_traffic)
153a1eda741SJohn Baldwin 		atomic_store_int(&sc->ka_active_tx_traffic, 1);
154a1eda741SJohn Baldwin }
155a1eda741SJohn Baldwin 
156a1eda741SJohn Baldwin static void
157a1eda741SJohn Baldwin nvmf_qp_error(void *arg, int error)
158a1eda741SJohn Baldwin {
159a1eda741SJohn Baldwin 	struct nvmf_host_qpair *qp = arg;
160a1eda741SJohn Baldwin 	struct nvmf_softc *sc = qp->sc;
161a1eda741SJohn Baldwin 
162a1eda741SJohn Baldwin 	/* Ignore simple close of queue pairs during shutdown. */
163a1eda741SJohn Baldwin 	if (!(sc->detaching && error == 0))
164a1eda741SJohn Baldwin 		device_printf(sc->dev, "error %d on %s, disconnecting\n", error,
165a1eda741SJohn Baldwin 		    qp->name);
166a1eda741SJohn Baldwin 	nvmf_disconnect(sc);
167a1eda741SJohn Baldwin }
168a1eda741SJohn Baldwin 
169a1eda741SJohn Baldwin static void
170a1eda741SJohn Baldwin nvmf_receive_capsule(void *arg, struct nvmf_capsule *nc)
171a1eda741SJohn Baldwin {
172a1eda741SJohn Baldwin 	struct nvmf_host_qpair *qp = arg;
173a1eda741SJohn Baldwin 	struct nvmf_softc *sc = qp->sc;
174a1eda741SJohn Baldwin 	struct nvmf_host_command *cmd;
175a1eda741SJohn Baldwin 	struct nvmf_request *req;
176a1eda741SJohn Baldwin 	const struct nvme_completion *cqe;
177a1eda741SJohn Baldwin 	uint16_t cid;
178a1eda741SJohn Baldwin 
179a1eda741SJohn Baldwin 	cqe = nvmf_capsule_cqe(nc);
180a1eda741SJohn Baldwin 
181a1eda741SJohn Baldwin 	if (sc->ka_traffic)
182a1eda741SJohn Baldwin 		atomic_store_int(&sc->ka_active_rx_traffic, 1);
183a1eda741SJohn Baldwin 
184a1eda741SJohn Baldwin 	/*
185a1eda741SJohn Baldwin 	 * NB: Don't bother byte-swapping the cid as transmit doesn't
186a1eda741SJohn Baldwin 	 * swap either.
187a1eda741SJohn Baldwin 	 */
188a1eda741SJohn Baldwin 	cid = cqe->cid;
189a1eda741SJohn Baldwin 
190a1eda741SJohn Baldwin 	if (cid > qp->num_commands) {
191a1eda741SJohn Baldwin 		device_printf(sc->dev,
192a1eda741SJohn Baldwin 		    "received invalid CID %u, disconnecting\n", cid);
193a1eda741SJohn Baldwin 		nvmf_disconnect(sc);
194a1eda741SJohn Baldwin 		nvmf_free_capsule(nc);
195a1eda741SJohn Baldwin 		return;
196a1eda741SJohn Baldwin 	}
197a1eda741SJohn Baldwin 
198*4d3b659fSJohn Baldwin 	/* Update flow control tracking. */
199*4d3b659fSJohn Baldwin 	mtx_lock(&qp->lock);
200*4d3b659fSJohn Baldwin 	if (qp->sq_flow_control) {
201*4d3b659fSJohn Baldwin 		if (nvmf_sqhd_valid(nc))
202*4d3b659fSJohn Baldwin 			qp->sqhd = le16toh(cqe->sqhd);
203*4d3b659fSJohn Baldwin 	} else {
204*4d3b659fSJohn Baldwin 		/*
205*4d3b659fSJohn Baldwin 		 * If SQ FC is disabled, just advance the head for
206*4d3b659fSJohn Baldwin 		 * each response capsule received.
207*4d3b659fSJohn Baldwin 		 */
208*4d3b659fSJohn Baldwin 		qp->sqhd = (qp->sqhd + 1) % (qp->num_commands + 1);
209*4d3b659fSJohn Baldwin 	}
210*4d3b659fSJohn Baldwin 
211a1eda741SJohn Baldwin 	/*
212a1eda741SJohn Baldwin 	 * If the queue has been shutdown due to an error, silently
213a1eda741SJohn Baldwin 	 * drop the response.
214a1eda741SJohn Baldwin 	 */
215a1eda741SJohn Baldwin 	if (qp->qp == NULL) {
216a1eda741SJohn Baldwin 		device_printf(sc->dev,
217a1eda741SJohn Baldwin 		    "received completion for CID %u on shutdown %s\n", cid,
218a1eda741SJohn Baldwin 		    qp->name);
219a1eda741SJohn Baldwin 		mtx_unlock(&qp->lock);
220a1eda741SJohn Baldwin 		nvmf_free_capsule(nc);
221a1eda741SJohn Baldwin 		return;
222a1eda741SJohn Baldwin 	}
223a1eda741SJohn Baldwin 
224a1eda741SJohn Baldwin 	cmd = qp->active_commands[cid];
225a1eda741SJohn Baldwin 	if (cmd == NULL) {
226a1eda741SJohn Baldwin 		mtx_unlock(&qp->lock);
227a1eda741SJohn Baldwin 		device_printf(sc->dev,
228a1eda741SJohn Baldwin 		    "received completion for inactive CID %u, disconnecting\n",
229a1eda741SJohn Baldwin 		    cid);
230a1eda741SJohn Baldwin 		nvmf_disconnect(sc);
231a1eda741SJohn Baldwin 		nvmf_free_capsule(nc);
232a1eda741SJohn Baldwin 		return;
233a1eda741SJohn Baldwin 	}
234a1eda741SJohn Baldwin 
235a1eda741SJohn Baldwin 	KASSERT(cmd->cid == cid, ("%s: CID mismatch", __func__));
236a1eda741SJohn Baldwin 	req = cmd->req;
237a1eda741SJohn Baldwin 	cmd->req = NULL;
238a1eda741SJohn Baldwin 	if (STAILQ_EMPTY(&qp->pending_requests)) {
239a1eda741SJohn Baldwin 		qp->active_commands[cid] = NULL;
240a1eda741SJohn Baldwin 		TAILQ_INSERT_TAIL(&qp->free_commands, cmd, link);
241a1eda741SJohn Baldwin 		mtx_unlock(&qp->lock);
242a1eda741SJohn Baldwin 	} else {
243a1eda741SJohn Baldwin 		cmd->req = STAILQ_FIRST(&qp->pending_requests);
244a1eda741SJohn Baldwin 		STAILQ_REMOVE_HEAD(&qp->pending_requests, link);
245a1eda741SJohn Baldwin 		nvmf_dispatch_command(qp, cmd);
246a1eda741SJohn Baldwin 	}
247a1eda741SJohn Baldwin 
248a1eda741SJohn Baldwin 	req->cb(req->cb_arg, cqe);
249a1eda741SJohn Baldwin 	nvmf_free_capsule(nc);
250a1eda741SJohn Baldwin 	nvmf_free_request(req);
251a1eda741SJohn Baldwin }
252a1eda741SJohn Baldwin 
253931dd5feSJohn Baldwin static void
254931dd5feSJohn Baldwin nvmf_sysctls_qp(struct nvmf_softc *sc, struct nvmf_host_qpair *qp,
255931dd5feSJohn Baldwin     bool admin, u_int qid)
256931dd5feSJohn Baldwin {
257931dd5feSJohn Baldwin 	struct sysctl_ctx_list *ctx = &qp->sysctl_ctx;
258931dd5feSJohn Baldwin 	struct sysctl_oid *oid;
259931dd5feSJohn Baldwin 	struct sysctl_oid_list *list;
260931dd5feSJohn Baldwin 	char name[8];
261931dd5feSJohn Baldwin 
262931dd5feSJohn Baldwin 	if (admin) {
263931dd5feSJohn Baldwin 		oid = SYSCTL_ADD_NODE(ctx,
264931dd5feSJohn Baldwin 		    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO,
265931dd5feSJohn Baldwin 		    "adminq", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Admin Queue");
266931dd5feSJohn Baldwin 	} else {
267931dd5feSJohn Baldwin 		snprintf(name, sizeof(name), "%u", qid);
268931dd5feSJohn Baldwin 		oid = SYSCTL_ADD_NODE(ctx, sc->ioq_oid_list, OID_AUTO, name,
269931dd5feSJohn Baldwin 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "I/O Queue");
270931dd5feSJohn Baldwin 	}
271931dd5feSJohn Baldwin 	list = SYSCTL_CHILDREN(oid);
272931dd5feSJohn Baldwin 
273931dd5feSJohn Baldwin 	SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "num_entries", CTLFLAG_RD,
274931dd5feSJohn Baldwin 	    NULL, qp->num_commands + 1, "Number of entries in queue");
275931dd5feSJohn Baldwin 	SYSCTL_ADD_U16(ctx, list, OID_AUTO, "sq_head", CTLFLAG_RD, &qp->sqhd,
276931dd5feSJohn Baldwin 	    0, "Current head of submission queue (as observed by driver)");
277931dd5feSJohn Baldwin 	SYSCTL_ADD_U16(ctx, list, OID_AUTO, "sq_tail", CTLFLAG_RD, &qp->sqtail,
278931dd5feSJohn Baldwin 	    0, "Current tail of submission queue (as observed by driver)");
279931dd5feSJohn Baldwin 	SYSCTL_ADD_U64(ctx, list, OID_AUTO, "num_cmds", CTLFLAG_RD,
280931dd5feSJohn Baldwin 	    &qp->submitted, 0, "Number of commands submitted");
281931dd5feSJohn Baldwin }
282931dd5feSJohn Baldwin 
283a1eda741SJohn Baldwin struct nvmf_host_qpair *
284a1eda741SJohn Baldwin nvmf_init_qp(struct nvmf_softc *sc, enum nvmf_trtype trtype,
285931dd5feSJohn Baldwin     struct nvmf_handoff_qpair_params *handoff, const char *name, u_int qid)
286a1eda741SJohn Baldwin {
287a1eda741SJohn Baldwin 	struct nvmf_host_command *cmd, *ncmd;
288a1eda741SJohn Baldwin 	struct nvmf_host_qpair *qp;
289a1eda741SJohn Baldwin 	u_int i;
290a1eda741SJohn Baldwin 
291a1eda741SJohn Baldwin 	qp = malloc(sizeof(*qp), M_NVMF, M_WAITOK | M_ZERO);
292a1eda741SJohn Baldwin 	qp->sc = sc;
293a1eda741SJohn Baldwin 	qp->sq_flow_control = handoff->sq_flow_control;
294a1eda741SJohn Baldwin 	qp->sqhd = handoff->sqhd;
295a1eda741SJohn Baldwin 	qp->sqtail = handoff->sqtail;
296a1eda741SJohn Baldwin 	strlcpy(qp->name, name, sizeof(qp->name));
297a1eda741SJohn Baldwin 	mtx_init(&qp->lock, "nvmf qp", NULL, MTX_DEF);
298931dd5feSJohn Baldwin 	(void)sysctl_ctx_init(&qp->sysctl_ctx);
299a1eda741SJohn Baldwin 
300a1eda741SJohn Baldwin 	/*
301a1eda741SJohn Baldwin 	 * Allocate a spare command slot for each pending AER command
302a1eda741SJohn Baldwin 	 * on the admin queue.
303a1eda741SJohn Baldwin 	 */
304a1eda741SJohn Baldwin 	qp->num_commands = handoff->qsize - 1;
305a1eda741SJohn Baldwin 	if (handoff->admin)
306a1eda741SJohn Baldwin 		qp->num_commands += sc->num_aer;
307a1eda741SJohn Baldwin 
308a1eda741SJohn Baldwin 	qp->active_commands = malloc(sizeof(*qp->active_commands) *
309a1eda741SJohn Baldwin 	    qp->num_commands, M_NVMF, M_WAITOK | M_ZERO);
310a1eda741SJohn Baldwin 	TAILQ_INIT(&qp->free_commands);
311a1eda741SJohn Baldwin 	for (i = 0; i < qp->num_commands; i++) {
312a1eda741SJohn Baldwin 		cmd = malloc(sizeof(*cmd), M_NVMF, M_WAITOK | M_ZERO);
313a1eda741SJohn Baldwin 		cmd->cid = i;
314a1eda741SJohn Baldwin 		TAILQ_INSERT_TAIL(&qp->free_commands, cmd, link);
315a1eda741SJohn Baldwin 	}
316a1eda741SJohn Baldwin 	STAILQ_INIT(&qp->pending_requests);
317a1eda741SJohn Baldwin 
318a1eda741SJohn Baldwin 	qp->qp = nvmf_allocate_qpair(trtype, false, handoff, nvmf_qp_error,
319a1eda741SJohn Baldwin 	    qp, nvmf_receive_capsule, qp);
320a1eda741SJohn Baldwin 	if (qp->qp == NULL) {
321931dd5feSJohn Baldwin 		(void)sysctl_ctx_free(&qp->sysctl_ctx);
322a1eda741SJohn Baldwin 		TAILQ_FOREACH_SAFE(cmd, &qp->free_commands, link, ncmd) {
323a1eda741SJohn Baldwin 			TAILQ_REMOVE(&qp->free_commands, cmd, link);
324a1eda741SJohn Baldwin 			free(cmd, M_NVMF);
325a1eda741SJohn Baldwin 		}
326a1eda741SJohn Baldwin 		free(qp->active_commands, M_NVMF);
327a1eda741SJohn Baldwin 		mtx_destroy(&qp->lock);
328a1eda741SJohn Baldwin 		free(qp, M_NVMF);
329a1eda741SJohn Baldwin 		return (NULL);
330a1eda741SJohn Baldwin 	}
331a1eda741SJohn Baldwin 
332931dd5feSJohn Baldwin 	nvmf_sysctls_qp(sc, qp, handoff->admin, qid);
333931dd5feSJohn Baldwin 
334a1eda741SJohn Baldwin 	return (qp);
335a1eda741SJohn Baldwin }
336a1eda741SJohn Baldwin 
337a1eda741SJohn Baldwin void
338a1eda741SJohn Baldwin nvmf_shutdown_qp(struct nvmf_host_qpair *qp)
339a1eda741SJohn Baldwin {
340a1eda741SJohn Baldwin 	struct nvmf_host_command *cmd;
341a1eda741SJohn Baldwin 	struct nvmf_request *req;
342a1eda741SJohn Baldwin 	struct nvmf_qpair *nq;
343a1eda741SJohn Baldwin 
344a1eda741SJohn Baldwin 	mtx_lock(&qp->lock);
345a1eda741SJohn Baldwin 	nq = qp->qp;
346a1eda741SJohn Baldwin 	qp->qp = NULL;
347a1eda741SJohn Baldwin 
348a1eda741SJohn Baldwin 	if (nq == NULL) {
349a1eda741SJohn Baldwin 		while (qp->shutting_down)
350a1eda741SJohn Baldwin 			mtx_sleep(qp, &qp->lock, 0, "nvmfqpsh", 0);
351a1eda741SJohn Baldwin 		mtx_unlock(&qp->lock);
352a1eda741SJohn Baldwin 		return;
353a1eda741SJohn Baldwin 	}
354a1eda741SJohn Baldwin 	qp->shutting_down = true;
355a1eda741SJohn Baldwin 	while (qp->allocating != 0)
356a1eda741SJohn Baldwin 		mtx_sleep(qp, &qp->lock, 0, "nvmfqpqu", 0);
357a1eda741SJohn Baldwin 	mtx_unlock(&qp->lock);
358a1eda741SJohn Baldwin 
359a1eda741SJohn Baldwin 	nvmf_free_qpair(nq);
360a1eda741SJohn Baldwin 
361a1eda741SJohn Baldwin 	/*
362a1eda741SJohn Baldwin 	 * Abort outstanding requests.  Active requests will have
363a1eda741SJohn Baldwin 	 * their I/O completions invoked and associated capsules freed
364a1eda741SJohn Baldwin 	 * by the transport layer via nvmf_free_qpair.  Pending
365a1eda741SJohn Baldwin 	 * requests must have their I/O completion invoked via
366a1eda741SJohn Baldwin 	 * nvmf_abort_capsule_data.
367a1eda741SJohn Baldwin 	 */
368a1eda741SJohn Baldwin 	for (u_int i = 0; i < qp->num_commands; i++) {
369a1eda741SJohn Baldwin 		cmd = qp->active_commands[i];
370a1eda741SJohn Baldwin 		if (cmd != NULL) {
371a1eda741SJohn Baldwin 			if (!cmd->req->aer)
372a1eda741SJohn Baldwin 				printf("%s: aborted active command %p (CID %u)\n",
373a1eda741SJohn Baldwin 				    __func__, cmd->req, cmd->cid);
374a1eda741SJohn Baldwin 
375a1eda741SJohn Baldwin 			/* This was freed by nvmf_free_qpair. */
376a1eda741SJohn Baldwin 			cmd->req->nc = NULL;
377a1eda741SJohn Baldwin 			nvmf_abort_request(cmd->req, cmd->cid);
378a1eda741SJohn Baldwin 			nvmf_free_request(cmd->req);
379a1eda741SJohn Baldwin 			free(cmd, M_NVMF);
380a1eda741SJohn Baldwin 		}
381a1eda741SJohn Baldwin 	}
382a1eda741SJohn Baldwin 	while (!STAILQ_EMPTY(&qp->pending_requests)) {
383a1eda741SJohn Baldwin 		req = STAILQ_FIRST(&qp->pending_requests);
384a1eda741SJohn Baldwin 		STAILQ_REMOVE_HEAD(&qp->pending_requests, link);
385a1eda741SJohn Baldwin 		if (!req->aer)
386a1eda741SJohn Baldwin 			printf("%s: aborted pending command %p\n", __func__,
387a1eda741SJohn Baldwin 			    req);
388a1eda741SJohn Baldwin 		nvmf_abort_capsule_data(req->nc, ECONNABORTED);
389a1eda741SJohn Baldwin 		nvmf_abort_request(req, 0);
390a1eda741SJohn Baldwin 		nvmf_free_request(req);
391a1eda741SJohn Baldwin 	}
392a1eda741SJohn Baldwin 
393a1eda741SJohn Baldwin 	mtx_lock(&qp->lock);
394a1eda741SJohn Baldwin 	qp->shutting_down = false;
395a1eda741SJohn Baldwin 	mtx_unlock(&qp->lock);
396a1eda741SJohn Baldwin 	wakeup(qp);
397a1eda741SJohn Baldwin }
398a1eda741SJohn Baldwin 
399a1eda741SJohn Baldwin void
400a1eda741SJohn Baldwin nvmf_destroy_qp(struct nvmf_host_qpair *qp)
401a1eda741SJohn Baldwin {
402a1eda741SJohn Baldwin 	struct nvmf_host_command *cmd, *ncmd;
403a1eda741SJohn Baldwin 
404a1eda741SJohn Baldwin 	nvmf_shutdown_qp(qp);
405931dd5feSJohn Baldwin 	(void)sysctl_ctx_free(&qp->sysctl_ctx);
406a1eda741SJohn Baldwin 
407a1eda741SJohn Baldwin 	TAILQ_FOREACH_SAFE(cmd, &qp->free_commands, link, ncmd) {
408a1eda741SJohn Baldwin 		TAILQ_REMOVE(&qp->free_commands, cmd, link);
409a1eda741SJohn Baldwin 		free(cmd, M_NVMF);
410a1eda741SJohn Baldwin 	}
411a1eda741SJohn Baldwin 	free(qp->active_commands, M_NVMF);
412a1eda741SJohn Baldwin 	mtx_destroy(&qp->lock);
413a1eda741SJohn Baldwin 	free(qp, M_NVMF);
414a1eda741SJohn Baldwin }
415a1eda741SJohn Baldwin 
416a1eda741SJohn Baldwin void
417a1eda741SJohn Baldwin nvmf_submit_request(struct nvmf_request *req)
418a1eda741SJohn Baldwin {
419a1eda741SJohn Baldwin 	struct nvmf_host_qpair *qp;
420a1eda741SJohn Baldwin 	struct nvmf_host_command *cmd;
421a1eda741SJohn Baldwin 
422a1eda741SJohn Baldwin 	qp = req->qp;
423a1eda741SJohn Baldwin 	mtx_lock(&qp->lock);
424a1eda741SJohn Baldwin 	if (qp->qp == NULL) {
425a1eda741SJohn Baldwin 		mtx_unlock(&qp->lock);
426a1eda741SJohn Baldwin 		printf("%s: aborted pending command %p\n", __func__, req);
427a1eda741SJohn Baldwin 		nvmf_abort_capsule_data(req->nc, ECONNABORTED);
428a1eda741SJohn Baldwin 		nvmf_abort_request(req, 0);
429a1eda741SJohn Baldwin 		nvmf_free_request(req);
430a1eda741SJohn Baldwin 		return;
431a1eda741SJohn Baldwin 	}
432a1eda741SJohn Baldwin 	cmd = TAILQ_FIRST(&qp->free_commands);
433a1eda741SJohn Baldwin 	if (cmd == NULL) {
434a1eda741SJohn Baldwin 		/*
435a1eda741SJohn Baldwin 		 * Queue this request.  Will be sent after enough
436a1eda741SJohn Baldwin 		 * in-flight requests have completed.
437a1eda741SJohn Baldwin 		 */
438a1eda741SJohn Baldwin 		STAILQ_INSERT_TAIL(&qp->pending_requests, req, link);
439a1eda741SJohn Baldwin 		mtx_unlock(&qp->lock);
440a1eda741SJohn Baldwin 		return;
441a1eda741SJohn Baldwin 	}
442a1eda741SJohn Baldwin 
443a1eda741SJohn Baldwin 	TAILQ_REMOVE(&qp->free_commands, cmd, link);
444a1eda741SJohn Baldwin 	KASSERT(qp->active_commands[cmd->cid] == NULL,
445a1eda741SJohn Baldwin 	    ("%s: CID already busy", __func__));
446a1eda741SJohn Baldwin 	qp->active_commands[cmd->cid] = cmd;
447a1eda741SJohn Baldwin 	cmd->req = req;
448a1eda741SJohn Baldwin 	nvmf_dispatch_command(qp, cmd);
449a1eda741SJohn Baldwin }
450