xref: /freebsd/sys/dev/nvmf/host/nvmf_var.h (revision dd21556857e8d40f66bf5ad54754d9d52669ebf7)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2023-2024 Chelsio Communications, Inc.
5  * Written by: John Baldwin <jhb@FreeBSD.org>
6  */
7 
8 #ifndef __NVMF_VAR_H__
9 #define	__NVMF_VAR_H__
10 
11 #include <sys/_callout.h>
12 #include <sys/_eventhandler.h>
13 #include <sys/_lock.h>
14 #include <sys/_mutex.h>
15 //#include <sys/_nv.h>
16 #include <sys/_sx.h>
17 #include <sys/_task.h>
18 #include <sys/smp.h>
19 #include <sys/queue.h>
20 #include <dev/nvme/nvme.h>
21 #include <dev/nvmf/nvmf_transport.h>
22 
23 struct nvmf_aer;
24 struct nvmf_capsule;
25 struct nvmf_host_qpair;
26 struct nvmf_namespace;
27 struct sysctl_oid_list;
28 
29 typedef void nvmf_request_complete_t(void *, const struct nvme_completion *);
30 
31 struct nvmf_softc {
32 	device_t dev;
33 
34 	struct nvmf_host_qpair *admin;
35 	struct nvmf_host_qpair **io;
36 	u_int	num_io_queues;
37 	enum nvmf_trtype trtype;
38 
39 	struct cam_sim *sim;
40 	struct cam_path *path;
41 	struct mtx sim_mtx;
42 	bool sim_disconnected;
43 	bool sim_shutdown;
44 
45 	struct nvmf_namespace **ns;
46 
47 	struct nvme_controller_data *cdata;
48 	uint64_t cap;
49 	uint32_t vs;
50 	u_int max_pending_io;
51 	u_long max_xfer_size;
52 
53 	struct cdev *cdev;
54 
55 	/*
56 	 * Keep Alive support depends on two timers.  The 'tx' timer
57 	 * is responsible for sending KeepAlive commands and runs at
58 	 * half the timeout interval.  The 'rx' timer is responsible
59 	 * for detecting an actual timeout.
60 	 *
61 	 * For efficient support of TKAS, the host does not reschedule
62 	 * these timers every time new commands are scheduled.
63 	 * Instead, the host sets the *_traffic flags when commands
64 	 * are sent and received.  The timeout handlers check and
65 	 * clear these flags.  This does mean it can take up to twice
66 	 * the timeout time to detect an AWOL controller.
67 	 */
68 	bool	ka_traffic;			/* Using TKAS? */
69 
70 	volatile int ka_active_tx_traffic;
71 	struct callout ka_tx_timer;
72 	sbintime_t ka_tx_sbt;
73 
74 	volatile int ka_active_rx_traffic;
75 	struct callout ka_rx_timer;
76 	sbintime_t ka_rx_sbt;
77 
78 	struct sx connection_lock;
79 	struct task disconnect_task;
80 	bool detaching;
81 
82 	u_int num_aer;
83 	struct nvmf_aer *aer;
84 
85 	struct sysctl_oid_list *ioq_oid_list;
86 
87 	eventhandler_tag shutdown_pre_sync_eh;
88 	eventhandler_tag shutdown_post_sync_eh;
89 };
90 
91 struct nvmf_request {
92 	struct nvmf_host_qpair *qp;
93 	struct nvmf_capsule *nc;
94 	nvmf_request_complete_t *cb;
95 	void	*cb_arg;
96 	bool	aer;
97 
98 	STAILQ_ENTRY(nvmf_request) link;
99 };
100 
101 struct nvmf_completion_status {
102 	struct nvme_completion cqe;
103 	bool	done;
104 	bool	io_done;
105 	int	io_error;
106 };
107 
108 static __inline struct nvmf_host_qpair *
109 nvmf_select_io_queue(struct nvmf_softc *sc)
110 {
111 	u_int idx = curcpu * sc->num_io_queues / (mp_maxid + 1);
112 	return (sc->io[idx]);
113 }
114 
115 static __inline bool
116 nvmf_cqe_aborted(const struct nvme_completion *cqe)
117 {
118 	uint16_t status;
119 
120 	status = le16toh(cqe->status);
121 	return (NVME_STATUS_GET_SCT(status) == NVME_SCT_PATH_RELATED &&
122 	    NVME_STATUS_GET_SC(status) == NVME_SC_COMMAND_ABORTED_BY_HOST);
123 }
124 
125 static __inline void
126 nvmf_status_init(struct nvmf_completion_status *status)
127 {
128 	status->done = false;
129 	status->io_done = true;
130 	status->io_error = 0;
131 }
132 
133 static __inline void
134 nvmf_status_wait_io(struct nvmf_completion_status *status)
135 {
136 	status->io_done = false;
137 }
138 
139 #ifdef DRIVER_MODULE
140 extern driver_t nvme_nvmf_driver;
141 #endif
142 
143 #ifdef MALLOC_DECLARE
144 MALLOC_DECLARE(M_NVMF);
145 #endif
146 
147 /* If true, I/O requests will fail while the host is disconnected. */
148 extern bool nvmf_fail_disconnect;
149 
150 /* nvmf.c */
151 void	nvmf_complete(void *arg, const struct nvme_completion *cqe);
152 void	nvmf_io_complete(void *arg, size_t xfered, int error);
153 void	nvmf_wait_for_reply(struct nvmf_completion_status *status);
154 int	nvmf_copyin_handoff(const struct nvmf_ioc_nv *nv, nvlist_t **nvlp);
155 void	nvmf_disconnect(struct nvmf_softc *sc);
156 void	nvmf_rescan_ns(struct nvmf_softc *sc, uint32_t nsid);
157 void	nvmf_rescan_all_ns(struct nvmf_softc *sc);
158 int	nvmf_passthrough_cmd(struct nvmf_softc *sc, struct nvme_pt_command *pt,
159     bool admin);
160 
161 /* nvmf_aer.c */
162 void	nvmf_init_aer(struct nvmf_softc *sc);
163 int	nvmf_start_aer(struct nvmf_softc *sc);
164 void	nvmf_destroy_aer(struct nvmf_softc *sc);
165 
166 /* nvmf_cmd.c */
167 bool	nvmf_cmd_get_property(struct nvmf_softc *sc, uint32_t offset,
168     uint8_t size, nvmf_request_complete_t *cb, void *cb_arg, int how);
169 bool	nvmf_cmd_set_property(struct nvmf_softc *sc, uint32_t offset,
170     uint8_t size, uint64_t value, nvmf_request_complete_t *cb, void *cb_arg,
171     int how);
172 bool	nvmf_cmd_keep_alive(struct nvmf_softc *sc, nvmf_request_complete_t *cb,
173     void *cb_arg, int how);
174 bool	nvmf_cmd_identify_active_namespaces(struct nvmf_softc *sc, uint32_t id,
175     struct nvme_ns_list *nslist, nvmf_request_complete_t *req_cb,
176     void *req_cb_arg, nvmf_io_complete_t *io_cb, void *io_cb_arg, int how);
177 bool	nvmf_cmd_identify_namespace(struct nvmf_softc *sc, uint32_t id,
178     struct nvme_namespace_data *nsdata, nvmf_request_complete_t *req_cb,
179     void *req_cb_arg, nvmf_io_complete_t *io_cb, void *io_cb_arg, int how);
180 bool	nvmf_cmd_get_log_page(struct nvmf_softc *sc, uint32_t nsid, uint8_t lid,
181     uint64_t offset, void *buf, size_t len, nvmf_request_complete_t *req_cb,
182     void *req_cb_arg, nvmf_io_complete_t *io_cb, void *io_cb_arg, int how);
183 
184 /* nvmf_ctldev.c */
185 int	nvmf_ctl_load(void);
186 void	nvmf_ctl_unload(void);
187 
188 /* nvmf_ns.c */
189 struct nvmf_namespace *nvmf_init_ns(struct nvmf_softc *sc, uint32_t id,
190     const struct nvme_namespace_data *data);
191 void	nvmf_disconnect_ns(struct nvmf_namespace *ns);
192 void	nvmf_reconnect_ns(struct nvmf_namespace *ns);
193 void	nvmf_shutdown_ns(struct nvmf_namespace *ns);
194 void	nvmf_destroy_ns(struct nvmf_namespace *ns);
195 bool	nvmf_update_ns(struct nvmf_namespace *ns,
196     const struct nvme_namespace_data *data);
197 
198 /* nvmf_qpair.c */
199 struct nvmf_host_qpair *nvmf_init_qp(struct nvmf_softc *sc,
200     enum nvmf_trtype trtype, const nvlist_t *nvl, const char *name, u_int qid);
201 void	nvmf_shutdown_qp(struct nvmf_host_qpair *qp);
202 void	nvmf_destroy_qp(struct nvmf_host_qpair *qp);
203 struct nvmf_request *nvmf_allocate_request(struct nvmf_host_qpair *qp,
204     void *sqe, nvmf_request_complete_t *cb, void *cb_arg, int how);
205 void	nvmf_submit_request(struct nvmf_request *req);
206 void	nvmf_free_request(struct nvmf_request *req);
207 
208 /* nvmf_sim.c */
209 int	nvmf_init_sim(struct nvmf_softc *sc);
210 void	nvmf_disconnect_sim(struct nvmf_softc *sc);
211 void	nvmf_reconnect_sim(struct nvmf_softc *sc);
212 void	nvmf_shutdown_sim(struct nvmf_softc *sc);
213 void	nvmf_destroy_sim(struct nvmf_softc *sc);
214 void	nvmf_sim_rescan_ns(struct nvmf_softc *sc, uint32_t id);
215 
216 #endif /* !__NVMF_VAR_H__ */
217