1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2023-2024 Chelsio Communications, Inc.
5 * Written by: John Baldwin <jhb@FreeBSD.org>
6 */
7
8 #ifndef __NVMF_VAR_H__
9 #define __NVMF_VAR_H__
10
11 #include <sys/_callout.h>
12 #include <sys/_eventhandler.h>
13 #include <sys/_lock.h>
14 #include <sys/_mutex.h>
15 //#include <sys/_nv.h>
16 #include <sys/_sx.h>
17 #include <sys/_task.h>
18 #include <sys/smp.h>
19 #include <sys/queue.h>
20 #include <dev/nvme/nvme.h>
21 #include <dev/nvmf/nvmf_transport.h>
22
23 struct nvmf_aer;
24 struct nvmf_capsule;
25 struct nvmf_host_qpair;
26 struct nvmf_namespace;
27 struct sysctl_oid_list;
28
29 typedef void nvmf_request_complete_t(void *, const struct nvme_completion *);
30
31 struct nvmf_softc {
32 device_t dev;
33
34 struct nvmf_host_qpair *admin;
35 struct nvmf_host_qpair **io;
36 u_int num_io_queues;
37 enum nvmf_trtype trtype;
38
39 struct cam_sim *sim;
40 struct cam_path *path;
41 struct mtx sim_mtx;
42 bool sim_disconnected;
43 bool sim_shutdown;
44
45 struct nvmf_namespace **ns;
46
47 struct nvme_controller_data *cdata;
48 uint64_t cap;
49 uint32_t vs;
50 u_int max_pending_io;
51 u_long max_xfer_size;
52
53 struct cdev *cdev;
54
55 /*
56 * Keep Alive support depends on two timers. The 'tx' timer
57 * is responsible for sending KeepAlive commands and runs at
58 * half the timeout interval. The 'rx' timer is responsible
59 * for detecting an actual timeout.
60 *
61 * For efficient support of TKAS, the host does not reschedule
62 * these timers every time new commands are scheduled.
63 * Instead, the host sets the *_traffic flags when commands
64 * are sent and received. The timeout handlers check and
65 * clear these flags. This does mean it can take up to twice
66 * the timeout time to detect an AWOL controller.
67 */
68 bool ka_traffic; /* Using TKAS? */
69
70 volatile int ka_active_tx_traffic;
71 struct callout ka_tx_timer;
72 sbintime_t ka_tx_sbt;
73
74 volatile int ka_active_rx_traffic;
75 struct callout ka_rx_timer;
76 sbintime_t ka_rx_sbt;
77
78 struct timeout_task request_reconnect_task;
79 struct timeout_task controller_loss_task;
80 uint32_t reconnect_delay;
81 uint32_t controller_loss_timeout;
82
83 struct sx connection_lock;
84 struct task disconnect_task;
85 bool detaching;
86 bool controller_timedout;
87
88 u_int num_aer;
89 struct nvmf_aer *aer;
90
91 struct sysctl_oid_list *ioq_oid_list;
92
93 nvlist_t *rparams;
94
95 struct timespec last_disconnect;
96
97 eventhandler_tag shutdown_pre_sync_eh;
98 eventhandler_tag shutdown_post_sync_eh;
99 };
100
101 struct nvmf_request {
102 struct nvmf_host_qpair *qp;
103 struct nvmf_capsule *nc;
104 nvmf_request_complete_t *cb;
105 void *cb_arg;
106 bool aer;
107
108 STAILQ_ENTRY(nvmf_request) link;
109 };
110
111 struct nvmf_completion_status {
112 struct nvme_completion cqe;
113 bool done;
114 bool io_done;
115 int io_error;
116 };
117
118 static __inline struct nvmf_host_qpair *
nvmf_select_io_queue(struct nvmf_softc * sc)119 nvmf_select_io_queue(struct nvmf_softc *sc)
120 {
121 u_int idx = curcpu * sc->num_io_queues / (mp_maxid + 1);
122 return (sc->io[idx]);
123 }
124
125 static __inline bool
nvmf_cqe_aborted(const struct nvme_completion * cqe)126 nvmf_cqe_aborted(const struct nvme_completion *cqe)
127 {
128 uint16_t status;
129
130 status = le16toh(cqe->status);
131 return (NVME_STATUS_GET_SCT(status) == NVME_SCT_PATH_RELATED &&
132 NVME_STATUS_GET_SC(status) == NVME_SC_COMMAND_ABORTED_BY_HOST);
133 }
134
135 static __inline void
nvmf_status_init(struct nvmf_completion_status * status)136 nvmf_status_init(struct nvmf_completion_status *status)
137 {
138 status->done = false;
139 status->io_done = true;
140 status->io_error = 0;
141 }
142
143 static __inline void
nvmf_status_wait_io(struct nvmf_completion_status * status)144 nvmf_status_wait_io(struct nvmf_completion_status *status)
145 {
146 status->io_done = false;
147 }
148
149 #ifdef DRIVER_MODULE
150 extern driver_t nvme_nvmf_driver;
151 #endif
152
153 #ifdef MALLOC_DECLARE
154 MALLOC_DECLARE(M_NVMF);
155 #endif
156
157 /* If true, I/O requests will fail while the host is disconnected. */
158 extern bool nvmf_fail_disconnect;
159
160 /* nvmf.c */
161 void nvmf_complete(void *arg, const struct nvme_completion *cqe);
162 void nvmf_io_complete(void *arg, size_t xfered, int error);
163 void nvmf_wait_for_reply(struct nvmf_completion_status *status);
164 int nvmf_copyin_handoff(const struct nvmf_ioc_nv *nv, nvlist_t **nvlp);
165 void nvmf_disconnect(struct nvmf_softc *sc);
166 void nvmf_rescan_ns(struct nvmf_softc *sc, uint32_t nsid);
167 void nvmf_rescan_all_ns(struct nvmf_softc *sc);
168 int nvmf_passthrough_cmd(struct nvmf_softc *sc, struct nvme_pt_command *pt,
169 bool admin);
170
171 /* nvmf_aer.c */
172 void nvmf_init_aer(struct nvmf_softc *sc);
173 int nvmf_start_aer(struct nvmf_softc *sc);
174 void nvmf_destroy_aer(struct nvmf_softc *sc);
175
176 /* nvmf_cmd.c */
177 bool nvmf_cmd_get_property(struct nvmf_softc *sc, uint32_t offset,
178 uint8_t size, nvmf_request_complete_t *cb, void *cb_arg, int how);
179 bool nvmf_cmd_set_property(struct nvmf_softc *sc, uint32_t offset,
180 uint8_t size, uint64_t value, nvmf_request_complete_t *cb, void *cb_arg,
181 int how);
182 bool nvmf_cmd_keep_alive(struct nvmf_softc *sc, nvmf_request_complete_t *cb,
183 void *cb_arg, int how);
184 bool nvmf_cmd_identify_active_namespaces(struct nvmf_softc *sc, uint32_t id,
185 struct nvme_ns_list *nslist, nvmf_request_complete_t *req_cb,
186 void *req_cb_arg, nvmf_io_complete_t *io_cb, void *io_cb_arg, int how);
187 bool nvmf_cmd_identify_namespace(struct nvmf_softc *sc, uint32_t id,
188 struct nvme_namespace_data *nsdata, nvmf_request_complete_t *req_cb,
189 void *req_cb_arg, nvmf_io_complete_t *io_cb, void *io_cb_arg, int how);
190 bool nvmf_cmd_get_log_page(struct nvmf_softc *sc, uint32_t nsid, uint8_t lid,
191 uint64_t offset, void *buf, size_t len, nvmf_request_complete_t *req_cb,
192 void *req_cb_arg, nvmf_io_complete_t *io_cb, void *io_cb_arg, int how);
193
194 /* nvmf_ctldev.c */
195 int nvmf_ctl_load(void);
196 void nvmf_ctl_unload(void);
197
198 /* nvmf_ns.c */
199 struct nvmf_namespace *nvmf_init_ns(struct nvmf_softc *sc, uint32_t id,
200 const struct nvme_namespace_data *data);
201 void nvmf_disconnect_ns(struct nvmf_namespace *ns);
202 void nvmf_reconnect_ns(struct nvmf_namespace *ns);
203 void nvmf_shutdown_ns(struct nvmf_namespace *ns);
204 void nvmf_destroy_ns(struct nvmf_namespace *ns);
205 bool nvmf_update_ns(struct nvmf_namespace *ns,
206 const struct nvme_namespace_data *data);
207
208 /* nvmf_qpair.c */
209 struct nvmf_host_qpair *nvmf_init_qp(struct nvmf_softc *sc,
210 enum nvmf_trtype trtype, const nvlist_t *nvl, const char *name, u_int qid);
211 void nvmf_shutdown_qp(struct nvmf_host_qpair *qp);
212 void nvmf_destroy_qp(struct nvmf_host_qpair *qp);
213 struct nvmf_request *nvmf_allocate_request(struct nvmf_host_qpair *qp,
214 void *sqe, nvmf_request_complete_t *cb, void *cb_arg, int how);
215 void nvmf_submit_request(struct nvmf_request *req);
216 void nvmf_free_request(struct nvmf_request *req);
217
218 /* nvmf_sim.c */
219 int nvmf_init_sim(struct nvmf_softc *sc);
220 void nvmf_disconnect_sim(struct nvmf_softc *sc);
221 void nvmf_reconnect_sim(struct nvmf_softc *sc);
222 void nvmf_shutdown_sim(struct nvmf_softc *sc);
223 void nvmf_destroy_sim(struct nvmf_softc *sc);
224 void nvmf_sim_rescan_ns(struct nvmf_softc *sc, uint32_t id);
225
226 #endif /* !__NVMF_VAR_H__ */
227