xref: /freebsd/sys/dev/nvmf/host/nvmf_qpair.c (revision a1eda74167b5edb99fd31d507d8a3f7d7e14ae2b)
1*a1eda741SJohn Baldwin /*-
2*a1eda741SJohn Baldwin  * SPDX-License-Identifier: BSD-2-Clause
3*a1eda741SJohn Baldwin  *
4*a1eda741SJohn Baldwin  * Copyright (c) 2023-2024 Chelsio Communications, Inc.
5*a1eda741SJohn Baldwin  * Written by: John Baldwin <jhb@FreeBSD.org>
6*a1eda741SJohn Baldwin  */
7*a1eda741SJohn Baldwin 
8*a1eda741SJohn Baldwin #include <sys/types.h>
9*a1eda741SJohn Baldwin #include <sys/bus.h>
10*a1eda741SJohn Baldwin #include <sys/lock.h>
11*a1eda741SJohn Baldwin #include <sys/malloc.h>
12*a1eda741SJohn Baldwin #include <sys/mutex.h>
13*a1eda741SJohn Baldwin #include <dev/nvme/nvme.h>
14*a1eda741SJohn Baldwin #include <dev/nvmf/nvmf.h>
15*a1eda741SJohn Baldwin #include <dev/nvmf/nvmf_transport.h>
16*a1eda741SJohn Baldwin #include <dev/nvmf/host/nvmf_var.h>
17*a1eda741SJohn Baldwin 
18*a1eda741SJohn Baldwin struct nvmf_host_command {
19*a1eda741SJohn Baldwin 	struct nvmf_request *req;
20*a1eda741SJohn Baldwin 	TAILQ_ENTRY(nvmf_host_command) link;
21*a1eda741SJohn Baldwin 	uint16_t cid;
22*a1eda741SJohn Baldwin };
23*a1eda741SJohn Baldwin 
24*a1eda741SJohn Baldwin struct nvmf_host_qpair {
25*a1eda741SJohn Baldwin 	struct nvmf_softc *sc;
26*a1eda741SJohn Baldwin 	struct nvmf_qpair *qp;
27*a1eda741SJohn Baldwin 
28*a1eda741SJohn Baldwin 	bool	sq_flow_control;
29*a1eda741SJohn Baldwin 	bool	shutting_down;
30*a1eda741SJohn Baldwin 	u_int	allocating;
31*a1eda741SJohn Baldwin 	u_int	num_commands;
32*a1eda741SJohn Baldwin 	uint16_t sqhd;
33*a1eda741SJohn Baldwin 	uint16_t sqtail;
34*a1eda741SJohn Baldwin 
35*a1eda741SJohn Baldwin 	struct mtx lock;
36*a1eda741SJohn Baldwin 
37*a1eda741SJohn Baldwin 	TAILQ_HEAD(, nvmf_host_command) free_commands;
38*a1eda741SJohn Baldwin 	STAILQ_HEAD(, nvmf_request) pending_requests;
39*a1eda741SJohn Baldwin 
40*a1eda741SJohn Baldwin 	/* Indexed by cid. */
41*a1eda741SJohn Baldwin 	struct nvmf_host_command **active_commands;
42*a1eda741SJohn Baldwin 
43*a1eda741SJohn Baldwin 	char	name[16];
44*a1eda741SJohn Baldwin };
45*a1eda741SJohn Baldwin 
46*a1eda741SJohn Baldwin struct nvmf_request *
47*a1eda741SJohn Baldwin nvmf_allocate_request(struct nvmf_host_qpair *qp, void *sqe,
48*a1eda741SJohn Baldwin     nvmf_request_complete_t *cb, void *cb_arg, int how)
49*a1eda741SJohn Baldwin {
50*a1eda741SJohn Baldwin 	struct nvmf_request *req;
51*a1eda741SJohn Baldwin 	struct nvmf_qpair *nq;
52*a1eda741SJohn Baldwin 
53*a1eda741SJohn Baldwin 	KASSERT(how == M_WAITOK || how == M_NOWAIT,
54*a1eda741SJohn Baldwin 	    ("%s: invalid how", __func__));
55*a1eda741SJohn Baldwin 
56*a1eda741SJohn Baldwin 	req = malloc(sizeof(*req), M_NVMF, how | M_ZERO);
57*a1eda741SJohn Baldwin 	if (req == NULL)
58*a1eda741SJohn Baldwin 		return (NULL);
59*a1eda741SJohn Baldwin 
60*a1eda741SJohn Baldwin 	mtx_lock(&qp->lock);
61*a1eda741SJohn Baldwin 	nq = qp->qp;
62*a1eda741SJohn Baldwin 	if (nq == NULL) {
63*a1eda741SJohn Baldwin 		mtx_unlock(&qp->lock);
64*a1eda741SJohn Baldwin 		free(req, M_NVMF);
65*a1eda741SJohn Baldwin 		return (NULL);
66*a1eda741SJohn Baldwin 	}
67*a1eda741SJohn Baldwin 	qp->allocating++;
68*a1eda741SJohn Baldwin 	MPASS(qp->allocating != 0);
69*a1eda741SJohn Baldwin 	mtx_unlock(&qp->lock);
70*a1eda741SJohn Baldwin 
71*a1eda741SJohn Baldwin 	req->qp = qp;
72*a1eda741SJohn Baldwin 	req->cb = cb;
73*a1eda741SJohn Baldwin 	req->cb_arg = cb_arg;
74*a1eda741SJohn Baldwin 	req->nc = nvmf_allocate_command(nq, sqe, how);
75*a1eda741SJohn Baldwin 	if (req->nc == NULL) {
76*a1eda741SJohn Baldwin 		free(req, M_NVMF);
77*a1eda741SJohn Baldwin 		req = NULL;
78*a1eda741SJohn Baldwin 	}
79*a1eda741SJohn Baldwin 
80*a1eda741SJohn Baldwin 	mtx_lock(&qp->lock);
81*a1eda741SJohn Baldwin 	qp->allocating--;
82*a1eda741SJohn Baldwin 	if (qp->allocating == 0 && qp->shutting_down)
83*a1eda741SJohn Baldwin 		wakeup(qp);
84*a1eda741SJohn Baldwin 	mtx_unlock(&qp->lock);
85*a1eda741SJohn Baldwin 
86*a1eda741SJohn Baldwin 	return (req);
87*a1eda741SJohn Baldwin }
88*a1eda741SJohn Baldwin 
89*a1eda741SJohn Baldwin static void
90*a1eda741SJohn Baldwin nvmf_abort_request(struct nvmf_request *req, uint16_t cid)
91*a1eda741SJohn Baldwin {
92*a1eda741SJohn Baldwin 	struct nvme_completion cqe;
93*a1eda741SJohn Baldwin 
94*a1eda741SJohn Baldwin 	memset(&cqe, 0, sizeof(cqe));
95*a1eda741SJohn Baldwin 	cqe.cid = cid;
96*a1eda741SJohn Baldwin 	cqe.status = htole16(NVMEF(NVME_STATUS_SCT, NVME_SCT_PATH_RELATED) |
97*a1eda741SJohn Baldwin 	    NVMEF(NVME_STATUS_SC, NVME_SC_COMMAND_ABORTED_BY_HOST));
98*a1eda741SJohn Baldwin 	req->cb(req->cb_arg, &cqe);
99*a1eda741SJohn Baldwin }
100*a1eda741SJohn Baldwin 
101*a1eda741SJohn Baldwin void
102*a1eda741SJohn Baldwin nvmf_free_request(struct nvmf_request *req)
103*a1eda741SJohn Baldwin {
104*a1eda741SJohn Baldwin 	if (req->nc != NULL)
105*a1eda741SJohn Baldwin 		nvmf_free_capsule(req->nc);
106*a1eda741SJohn Baldwin 	free(req, M_NVMF);
107*a1eda741SJohn Baldwin }
108*a1eda741SJohn Baldwin 
109*a1eda741SJohn Baldwin static void
110*a1eda741SJohn Baldwin nvmf_dispatch_command(struct nvmf_host_qpair *qp, struct nvmf_host_command *cmd)
111*a1eda741SJohn Baldwin {
112*a1eda741SJohn Baldwin 	struct nvmf_softc *sc = qp->sc;
113*a1eda741SJohn Baldwin 	struct nvme_command *sqe;
114*a1eda741SJohn Baldwin 	struct nvmf_capsule *nc;
115*a1eda741SJohn Baldwin 	int error;
116*a1eda741SJohn Baldwin 
117*a1eda741SJohn Baldwin 	nc = cmd->req->nc;
118*a1eda741SJohn Baldwin 	sqe = nvmf_capsule_sqe(nc);
119*a1eda741SJohn Baldwin 
120*a1eda741SJohn Baldwin 	/*
121*a1eda741SJohn Baldwin 	 * NB: Don't bother byte-swapping the cid so that receive
122*a1eda741SJohn Baldwin 	 * doesn't have to swap.
123*a1eda741SJohn Baldwin 	 */
124*a1eda741SJohn Baldwin 	sqe->cid = cmd->cid;
125*a1eda741SJohn Baldwin 
126*a1eda741SJohn Baldwin 	error = nvmf_transmit_capsule(nc);
127*a1eda741SJohn Baldwin 	if (error != 0) {
128*a1eda741SJohn Baldwin 		device_printf(sc->dev,
129*a1eda741SJohn Baldwin 		    "failed to transmit capsule: %d, disconnecting\n", error);
130*a1eda741SJohn Baldwin 		nvmf_disconnect(sc);
131*a1eda741SJohn Baldwin 		return;
132*a1eda741SJohn Baldwin 	}
133*a1eda741SJohn Baldwin 
134*a1eda741SJohn Baldwin 	if (sc->ka_traffic)
135*a1eda741SJohn Baldwin 		atomic_store_int(&sc->ka_active_tx_traffic, 1);
136*a1eda741SJohn Baldwin }
137*a1eda741SJohn Baldwin 
138*a1eda741SJohn Baldwin static void
139*a1eda741SJohn Baldwin nvmf_qp_error(void *arg, int error)
140*a1eda741SJohn Baldwin {
141*a1eda741SJohn Baldwin 	struct nvmf_host_qpair *qp = arg;
142*a1eda741SJohn Baldwin 	struct nvmf_softc *sc = qp->sc;
143*a1eda741SJohn Baldwin 
144*a1eda741SJohn Baldwin 	/* Ignore simple close of queue pairs during shutdown. */
145*a1eda741SJohn Baldwin 	if (!(sc->detaching && error == 0))
146*a1eda741SJohn Baldwin 		device_printf(sc->dev, "error %d on %s, disconnecting\n", error,
147*a1eda741SJohn Baldwin 		    qp->name);
148*a1eda741SJohn Baldwin 	nvmf_disconnect(sc);
149*a1eda741SJohn Baldwin }
150*a1eda741SJohn Baldwin 
151*a1eda741SJohn Baldwin static void
152*a1eda741SJohn Baldwin nvmf_receive_capsule(void *arg, struct nvmf_capsule *nc)
153*a1eda741SJohn Baldwin {
154*a1eda741SJohn Baldwin 	struct nvmf_host_qpair *qp = arg;
155*a1eda741SJohn Baldwin 	struct nvmf_softc *sc = qp->sc;
156*a1eda741SJohn Baldwin 	struct nvmf_host_command *cmd;
157*a1eda741SJohn Baldwin 	struct nvmf_request *req;
158*a1eda741SJohn Baldwin 	const struct nvme_completion *cqe;
159*a1eda741SJohn Baldwin 	uint16_t cid;
160*a1eda741SJohn Baldwin 
161*a1eda741SJohn Baldwin 	cqe = nvmf_capsule_cqe(nc);
162*a1eda741SJohn Baldwin 
163*a1eda741SJohn Baldwin 	if (sc->ka_traffic)
164*a1eda741SJohn Baldwin 		atomic_store_int(&sc->ka_active_rx_traffic, 1);
165*a1eda741SJohn Baldwin 
166*a1eda741SJohn Baldwin 	/*
167*a1eda741SJohn Baldwin 	 * NB: Don't bother byte-swapping the cid as transmit doesn't
168*a1eda741SJohn Baldwin 	 * swap either.
169*a1eda741SJohn Baldwin 	 */
170*a1eda741SJohn Baldwin 	cid = cqe->cid;
171*a1eda741SJohn Baldwin 
172*a1eda741SJohn Baldwin 	if (cid > qp->num_commands) {
173*a1eda741SJohn Baldwin 		device_printf(sc->dev,
174*a1eda741SJohn Baldwin 		    "received invalid CID %u, disconnecting\n", cid);
175*a1eda741SJohn Baldwin 		nvmf_disconnect(sc);
176*a1eda741SJohn Baldwin 		nvmf_free_capsule(nc);
177*a1eda741SJohn Baldwin 		return;
178*a1eda741SJohn Baldwin 	}
179*a1eda741SJohn Baldwin 
180*a1eda741SJohn Baldwin 	/*
181*a1eda741SJohn Baldwin 	 * If the queue has been shutdown due to an error, silently
182*a1eda741SJohn Baldwin 	 * drop the response.
183*a1eda741SJohn Baldwin 	 */
184*a1eda741SJohn Baldwin 	mtx_lock(&qp->lock);
185*a1eda741SJohn Baldwin 	if (qp->qp == NULL) {
186*a1eda741SJohn Baldwin 		device_printf(sc->dev,
187*a1eda741SJohn Baldwin 		    "received completion for CID %u on shutdown %s\n", cid,
188*a1eda741SJohn Baldwin 		    qp->name);
189*a1eda741SJohn Baldwin 		mtx_unlock(&qp->lock);
190*a1eda741SJohn Baldwin 		nvmf_free_capsule(nc);
191*a1eda741SJohn Baldwin 		return;
192*a1eda741SJohn Baldwin 	}
193*a1eda741SJohn Baldwin 
194*a1eda741SJohn Baldwin 	cmd = qp->active_commands[cid];
195*a1eda741SJohn Baldwin 	if (cmd == NULL) {
196*a1eda741SJohn Baldwin 		mtx_unlock(&qp->lock);
197*a1eda741SJohn Baldwin 		device_printf(sc->dev,
198*a1eda741SJohn Baldwin 		    "received completion for inactive CID %u, disconnecting\n",
199*a1eda741SJohn Baldwin 		    cid);
200*a1eda741SJohn Baldwin 		nvmf_disconnect(sc);
201*a1eda741SJohn Baldwin 		nvmf_free_capsule(nc);
202*a1eda741SJohn Baldwin 		return;
203*a1eda741SJohn Baldwin 	}
204*a1eda741SJohn Baldwin 
205*a1eda741SJohn Baldwin 	KASSERT(cmd->cid == cid, ("%s: CID mismatch", __func__));
206*a1eda741SJohn Baldwin 	req = cmd->req;
207*a1eda741SJohn Baldwin 	cmd->req = NULL;
208*a1eda741SJohn Baldwin 	if (STAILQ_EMPTY(&qp->pending_requests)) {
209*a1eda741SJohn Baldwin 		qp->active_commands[cid] = NULL;
210*a1eda741SJohn Baldwin 		TAILQ_INSERT_TAIL(&qp->free_commands, cmd, link);
211*a1eda741SJohn Baldwin 		mtx_unlock(&qp->lock);
212*a1eda741SJohn Baldwin 	} else {
213*a1eda741SJohn Baldwin 		cmd->req = STAILQ_FIRST(&qp->pending_requests);
214*a1eda741SJohn Baldwin 		STAILQ_REMOVE_HEAD(&qp->pending_requests, link);
215*a1eda741SJohn Baldwin 		mtx_unlock(&qp->lock);
216*a1eda741SJohn Baldwin 		nvmf_dispatch_command(qp, cmd);
217*a1eda741SJohn Baldwin 	}
218*a1eda741SJohn Baldwin 
219*a1eda741SJohn Baldwin 	req->cb(req->cb_arg, cqe);
220*a1eda741SJohn Baldwin 	nvmf_free_capsule(nc);
221*a1eda741SJohn Baldwin 	nvmf_free_request(req);
222*a1eda741SJohn Baldwin }
223*a1eda741SJohn Baldwin 
224*a1eda741SJohn Baldwin struct nvmf_host_qpair *
225*a1eda741SJohn Baldwin nvmf_init_qp(struct nvmf_softc *sc, enum nvmf_trtype trtype,
226*a1eda741SJohn Baldwin     struct nvmf_handoff_qpair_params *handoff, const char *name)
227*a1eda741SJohn Baldwin {
228*a1eda741SJohn Baldwin 	struct nvmf_host_command *cmd, *ncmd;
229*a1eda741SJohn Baldwin 	struct nvmf_host_qpair *qp;
230*a1eda741SJohn Baldwin 	u_int i;
231*a1eda741SJohn Baldwin 
232*a1eda741SJohn Baldwin 	qp = malloc(sizeof(*qp), M_NVMF, M_WAITOK | M_ZERO);
233*a1eda741SJohn Baldwin 	qp->sc = sc;
234*a1eda741SJohn Baldwin 	qp->sq_flow_control = handoff->sq_flow_control;
235*a1eda741SJohn Baldwin 	qp->sqhd = handoff->sqhd;
236*a1eda741SJohn Baldwin 	qp->sqtail = handoff->sqtail;
237*a1eda741SJohn Baldwin 	strlcpy(qp->name, name, sizeof(qp->name));
238*a1eda741SJohn Baldwin 	mtx_init(&qp->lock, "nvmf qp", NULL, MTX_DEF);
239*a1eda741SJohn Baldwin 
240*a1eda741SJohn Baldwin 	/*
241*a1eda741SJohn Baldwin 	 * Allocate a spare command slot for each pending AER command
242*a1eda741SJohn Baldwin 	 * on the admin queue.
243*a1eda741SJohn Baldwin 	 */
244*a1eda741SJohn Baldwin 	qp->num_commands = handoff->qsize - 1;
245*a1eda741SJohn Baldwin 	if (handoff->admin)
246*a1eda741SJohn Baldwin 		qp->num_commands += sc->num_aer;
247*a1eda741SJohn Baldwin 
248*a1eda741SJohn Baldwin 	qp->active_commands = malloc(sizeof(*qp->active_commands) *
249*a1eda741SJohn Baldwin 	    qp->num_commands, M_NVMF, M_WAITOK | M_ZERO);
250*a1eda741SJohn Baldwin 	TAILQ_INIT(&qp->free_commands);
251*a1eda741SJohn Baldwin 	for (i = 0; i < qp->num_commands; i++) {
252*a1eda741SJohn Baldwin 		cmd = malloc(sizeof(*cmd), M_NVMF, M_WAITOK | M_ZERO);
253*a1eda741SJohn Baldwin 		cmd->cid = i;
254*a1eda741SJohn Baldwin 		TAILQ_INSERT_TAIL(&qp->free_commands, cmd, link);
255*a1eda741SJohn Baldwin 	}
256*a1eda741SJohn Baldwin 	STAILQ_INIT(&qp->pending_requests);
257*a1eda741SJohn Baldwin 
258*a1eda741SJohn Baldwin 	qp->qp = nvmf_allocate_qpair(trtype, false, handoff, nvmf_qp_error,
259*a1eda741SJohn Baldwin 	    qp, nvmf_receive_capsule, qp);
260*a1eda741SJohn Baldwin 	if (qp->qp == NULL) {
261*a1eda741SJohn Baldwin 		TAILQ_FOREACH_SAFE(cmd, &qp->free_commands, link, ncmd) {
262*a1eda741SJohn Baldwin 			TAILQ_REMOVE(&qp->free_commands, cmd, link);
263*a1eda741SJohn Baldwin 			free(cmd, M_NVMF);
264*a1eda741SJohn Baldwin 		}
265*a1eda741SJohn Baldwin 		free(qp->active_commands, M_NVMF);
266*a1eda741SJohn Baldwin 		mtx_destroy(&qp->lock);
267*a1eda741SJohn Baldwin 		free(qp, M_NVMF);
268*a1eda741SJohn Baldwin 		return (NULL);
269*a1eda741SJohn Baldwin 	}
270*a1eda741SJohn Baldwin 
271*a1eda741SJohn Baldwin 	return (qp);
272*a1eda741SJohn Baldwin }
273*a1eda741SJohn Baldwin 
274*a1eda741SJohn Baldwin void
275*a1eda741SJohn Baldwin nvmf_shutdown_qp(struct nvmf_host_qpair *qp)
276*a1eda741SJohn Baldwin {
277*a1eda741SJohn Baldwin 	struct nvmf_host_command *cmd;
278*a1eda741SJohn Baldwin 	struct nvmf_request *req;
279*a1eda741SJohn Baldwin 	struct nvmf_qpair *nq;
280*a1eda741SJohn Baldwin 
281*a1eda741SJohn Baldwin 	mtx_lock(&qp->lock);
282*a1eda741SJohn Baldwin 	nq = qp->qp;
283*a1eda741SJohn Baldwin 	qp->qp = NULL;
284*a1eda741SJohn Baldwin 
285*a1eda741SJohn Baldwin 	if (nq == NULL) {
286*a1eda741SJohn Baldwin 		while (qp->shutting_down)
287*a1eda741SJohn Baldwin 			mtx_sleep(qp, &qp->lock, 0, "nvmfqpsh", 0);
288*a1eda741SJohn Baldwin 		mtx_unlock(&qp->lock);
289*a1eda741SJohn Baldwin 		return;
290*a1eda741SJohn Baldwin 	}
291*a1eda741SJohn Baldwin 	qp->shutting_down = true;
292*a1eda741SJohn Baldwin 	while (qp->allocating != 0)
293*a1eda741SJohn Baldwin 		mtx_sleep(qp, &qp->lock, 0, "nvmfqpqu", 0);
294*a1eda741SJohn Baldwin 	mtx_unlock(&qp->lock);
295*a1eda741SJohn Baldwin 
296*a1eda741SJohn Baldwin 	nvmf_free_qpair(nq);
297*a1eda741SJohn Baldwin 
298*a1eda741SJohn Baldwin 	/*
299*a1eda741SJohn Baldwin 	 * Abort outstanding requests.  Active requests will have
300*a1eda741SJohn Baldwin 	 * their I/O completions invoked and associated capsules freed
301*a1eda741SJohn Baldwin 	 * by the transport layer via nvmf_free_qpair.  Pending
302*a1eda741SJohn Baldwin 	 * requests must have their I/O completion invoked via
303*a1eda741SJohn Baldwin 	 * nvmf_abort_capsule_data.
304*a1eda741SJohn Baldwin 	 */
305*a1eda741SJohn Baldwin 	for (u_int i = 0; i < qp->num_commands; i++) {
306*a1eda741SJohn Baldwin 		cmd = qp->active_commands[i];
307*a1eda741SJohn Baldwin 		if (cmd != NULL) {
308*a1eda741SJohn Baldwin 			if (!cmd->req->aer)
309*a1eda741SJohn Baldwin 				printf("%s: aborted active command %p (CID %u)\n",
310*a1eda741SJohn Baldwin 				    __func__, cmd->req, cmd->cid);
311*a1eda741SJohn Baldwin 
312*a1eda741SJohn Baldwin 			/* This was freed by nvmf_free_qpair. */
313*a1eda741SJohn Baldwin 			cmd->req->nc = NULL;
314*a1eda741SJohn Baldwin 			nvmf_abort_request(cmd->req, cmd->cid);
315*a1eda741SJohn Baldwin 			nvmf_free_request(cmd->req);
316*a1eda741SJohn Baldwin 			free(cmd, M_NVMF);
317*a1eda741SJohn Baldwin 		}
318*a1eda741SJohn Baldwin 	}
319*a1eda741SJohn Baldwin 	while (!STAILQ_EMPTY(&qp->pending_requests)) {
320*a1eda741SJohn Baldwin 		req = STAILQ_FIRST(&qp->pending_requests);
321*a1eda741SJohn Baldwin 		STAILQ_REMOVE_HEAD(&qp->pending_requests, link);
322*a1eda741SJohn Baldwin 		if (!req->aer)
323*a1eda741SJohn Baldwin 			printf("%s: aborted pending command %p\n", __func__,
324*a1eda741SJohn Baldwin 			    req);
325*a1eda741SJohn Baldwin 		nvmf_abort_capsule_data(req->nc, ECONNABORTED);
326*a1eda741SJohn Baldwin 		nvmf_abort_request(req, 0);
327*a1eda741SJohn Baldwin 		nvmf_free_request(req);
328*a1eda741SJohn Baldwin 	}
329*a1eda741SJohn Baldwin 
330*a1eda741SJohn Baldwin 	mtx_lock(&qp->lock);
331*a1eda741SJohn Baldwin 	qp->shutting_down = false;
332*a1eda741SJohn Baldwin 	mtx_unlock(&qp->lock);
333*a1eda741SJohn Baldwin 	wakeup(qp);
334*a1eda741SJohn Baldwin }
335*a1eda741SJohn Baldwin 
336*a1eda741SJohn Baldwin void
337*a1eda741SJohn Baldwin nvmf_destroy_qp(struct nvmf_host_qpair *qp)
338*a1eda741SJohn Baldwin {
339*a1eda741SJohn Baldwin 	struct nvmf_host_command *cmd, *ncmd;
340*a1eda741SJohn Baldwin 
341*a1eda741SJohn Baldwin 	nvmf_shutdown_qp(qp);
342*a1eda741SJohn Baldwin 
343*a1eda741SJohn Baldwin 	TAILQ_FOREACH_SAFE(cmd, &qp->free_commands, link, ncmd) {
344*a1eda741SJohn Baldwin 		TAILQ_REMOVE(&qp->free_commands, cmd, link);
345*a1eda741SJohn Baldwin 		free(cmd, M_NVMF);
346*a1eda741SJohn Baldwin 	}
347*a1eda741SJohn Baldwin 	free(qp->active_commands, M_NVMF);
348*a1eda741SJohn Baldwin 	mtx_destroy(&qp->lock);
349*a1eda741SJohn Baldwin 	free(qp, M_NVMF);
350*a1eda741SJohn Baldwin }
351*a1eda741SJohn Baldwin 
352*a1eda741SJohn Baldwin void
353*a1eda741SJohn Baldwin nvmf_submit_request(struct nvmf_request *req)
354*a1eda741SJohn Baldwin {
355*a1eda741SJohn Baldwin 	struct nvmf_host_qpair *qp;
356*a1eda741SJohn Baldwin 	struct nvmf_host_command *cmd;
357*a1eda741SJohn Baldwin 
358*a1eda741SJohn Baldwin 	qp = req->qp;
359*a1eda741SJohn Baldwin 	mtx_lock(&qp->lock);
360*a1eda741SJohn Baldwin 	if (qp->qp == NULL) {
361*a1eda741SJohn Baldwin 		mtx_unlock(&qp->lock);
362*a1eda741SJohn Baldwin 		printf("%s: aborted pending command %p\n", __func__, req);
363*a1eda741SJohn Baldwin 		nvmf_abort_capsule_data(req->nc, ECONNABORTED);
364*a1eda741SJohn Baldwin 		nvmf_abort_request(req, 0);
365*a1eda741SJohn Baldwin 		nvmf_free_request(req);
366*a1eda741SJohn Baldwin 		return;
367*a1eda741SJohn Baldwin 	}
368*a1eda741SJohn Baldwin 	cmd = TAILQ_FIRST(&qp->free_commands);
369*a1eda741SJohn Baldwin 	if (cmd == NULL) {
370*a1eda741SJohn Baldwin 		/*
371*a1eda741SJohn Baldwin 		 * Queue this request.  Will be sent after enough
372*a1eda741SJohn Baldwin 		 * in-flight requests have completed.
373*a1eda741SJohn Baldwin 		 */
374*a1eda741SJohn Baldwin 		STAILQ_INSERT_TAIL(&qp->pending_requests, req, link);
375*a1eda741SJohn Baldwin 		mtx_unlock(&qp->lock);
376*a1eda741SJohn Baldwin 		return;
377*a1eda741SJohn Baldwin 	}
378*a1eda741SJohn Baldwin 
379*a1eda741SJohn Baldwin 	TAILQ_REMOVE(&qp->free_commands, cmd, link);
380*a1eda741SJohn Baldwin 	KASSERT(qp->active_commands[cmd->cid] == NULL,
381*a1eda741SJohn Baldwin 	    ("%s: CID already busy", __func__));
382*a1eda741SJohn Baldwin 	qp->active_commands[cmd->cid] = cmd;
383*a1eda741SJohn Baldwin 	cmd->req = req;
384*a1eda741SJohn Baldwin 	mtx_unlock(&qp->lock);
385*a1eda741SJohn Baldwin 	nvmf_dispatch_command(qp, cmd);
386*a1eda741SJohn Baldwin }
387