1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2022-2024 Chelsio Communications, Inc.
5 * Written by: John Baldwin <jhb@FreeBSD.org>
6 */
7
8 #include <sys/refcount.h>
9 #include <assert.h>
10 #include <errno.h>
11 #include <stdarg.h>
12 #include <stdio.h>
13 #include <stdlib.h>
14 #include <string.h>
15
16 #include "libnvmf.h"
17 #include "internal.h"
18
19 struct nvmf_association *
nvmf_allocate_association(enum nvmf_trtype trtype,bool controller,const struct nvmf_association_params * params)20 nvmf_allocate_association(enum nvmf_trtype trtype, bool controller,
21 const struct nvmf_association_params *params)
22 {
23 struct nvmf_transport_ops *ops;
24 struct nvmf_association *na;
25
26 switch (trtype) {
27 case NVMF_TRTYPE_TCP:
28 ops = &tcp_ops;
29 break;
30 default:
31 errno = EINVAL;
32 return (NULL);
33 }
34
35 na = ops->allocate_association(controller, params);
36 if (na == NULL)
37 return (NULL);
38
39 na->na_ops = ops;
40 na->na_trtype = trtype;
41 na->na_controller = controller;
42 na->na_params = *params;
43 na->na_last_error = NULL;
44 refcount_init(&na->na_refs, 1);
45 return (na);
46 }
47
48 void
nvmf_update_assocation(struct nvmf_association * na,const struct nvme_controller_data * cdata)49 nvmf_update_assocation(struct nvmf_association *na,
50 const struct nvme_controller_data *cdata)
51 {
52 na->na_ops->update_association(na, cdata);
53 }
54
55 void
nvmf_free_association(struct nvmf_association * na)56 nvmf_free_association(struct nvmf_association *na)
57 {
58 if (refcount_release(&na->na_refs)) {
59 free(na->na_last_error);
60 na->na_ops->free_association(na);
61 }
62 }
63
64 const char *
nvmf_association_error(const struct nvmf_association * na)65 nvmf_association_error(const struct nvmf_association *na)
66 {
67 return (na->na_last_error);
68 }
69
70 void
na_clear_error(struct nvmf_association * na)71 na_clear_error(struct nvmf_association *na)
72 {
73 free(na->na_last_error);
74 na->na_last_error = NULL;
75 }
76
77 void
na_error(struct nvmf_association * na,const char * fmt,...)78 na_error(struct nvmf_association *na, const char *fmt, ...)
79 {
80 va_list ap;
81 char *str;
82
83 if (na->na_last_error != NULL)
84 return;
85 va_start(ap, fmt);
86 vasprintf(&str, fmt, ap);
87 va_end(ap);
88 na->na_last_error = str;
89 }
90
91 struct nvmf_qpair *
nvmf_allocate_qpair(struct nvmf_association * na,const struct nvmf_qpair_params * params)92 nvmf_allocate_qpair(struct nvmf_association *na,
93 const struct nvmf_qpair_params *params)
94 {
95 struct nvmf_qpair *qp;
96
97 na_clear_error(na);
98 qp = na->na_ops->allocate_qpair(na, params);
99 if (qp == NULL)
100 return (NULL);
101
102 refcount_acquire(&na->na_refs);
103 qp->nq_association = na;
104 qp->nq_admin = params->admin;
105 TAILQ_INIT(&qp->nq_rx_capsules);
106 return (qp);
107 }
108
109 void
nvmf_free_qpair(struct nvmf_qpair * qp)110 nvmf_free_qpair(struct nvmf_qpair *qp)
111 {
112 struct nvmf_association *na;
113 struct nvmf_capsule *nc, *tc;
114
115 TAILQ_FOREACH_SAFE(nc, &qp->nq_rx_capsules, nc_link, tc) {
116 TAILQ_REMOVE(&qp->nq_rx_capsules, nc, nc_link);
117 nvmf_free_capsule(nc);
118 }
119 na = qp->nq_association;
120 na->na_ops->free_qpair(qp);
121 nvmf_free_association(na);
122 }
123
124 struct nvmf_capsule *
nvmf_allocate_command(struct nvmf_qpair * qp,const void * sqe)125 nvmf_allocate_command(struct nvmf_qpair *qp, const void *sqe)
126 {
127 struct nvmf_capsule *nc;
128
129 nc = qp->nq_association->na_ops->allocate_capsule(qp);
130 if (nc == NULL)
131 return (NULL);
132
133 nc->nc_qpair = qp;
134 nc->nc_qe_len = sizeof(struct nvme_command);
135 memcpy(&nc->nc_sqe, sqe, nc->nc_qe_len);
136
137 /* 4.2 of NVMe base spec: Fabrics always uses SGL. */
138 nc->nc_sqe.fuse &= ~NVMEM(NVME_CMD_PSDT);
139 nc->nc_sqe.fuse |= NVMEF(NVME_CMD_PSDT, NVME_PSDT_SGL);
140 return (nc);
141 }
142
143 struct nvmf_capsule *
nvmf_allocate_response(struct nvmf_qpair * qp,const void * cqe)144 nvmf_allocate_response(struct nvmf_qpair *qp, const void *cqe)
145 {
146 struct nvmf_capsule *nc;
147
148 nc = qp->nq_association->na_ops->allocate_capsule(qp);
149 if (nc == NULL)
150 return (NULL);
151
152 nc->nc_qpair = qp;
153 nc->nc_qe_len = sizeof(struct nvme_completion);
154 memcpy(&nc->nc_cqe, cqe, nc->nc_qe_len);
155 return (nc);
156 }
157
158 int
nvmf_capsule_append_data(struct nvmf_capsule * nc,void * buf,size_t len,bool send)159 nvmf_capsule_append_data(struct nvmf_capsule *nc, void *buf, size_t len,
160 bool send)
161 {
162 if (nc->nc_qe_len == sizeof(struct nvme_completion))
163 return (EINVAL);
164 if (nc->nc_data_len != 0)
165 return (EBUSY);
166
167 nc->nc_data = buf;
168 nc->nc_data_len = len;
169 nc->nc_send_data = send;
170 return (0);
171 }
172
173 void
nvmf_free_capsule(struct nvmf_capsule * nc)174 nvmf_free_capsule(struct nvmf_capsule *nc)
175 {
176 nc->nc_qpair->nq_association->na_ops->free_capsule(nc);
177 }
178
179 int
nvmf_transmit_capsule(struct nvmf_capsule * nc)180 nvmf_transmit_capsule(struct nvmf_capsule *nc)
181 {
182 return (nc->nc_qpair->nq_association->na_ops->transmit_capsule(nc));
183 }
184
185 int
nvmf_receive_capsule(struct nvmf_qpair * qp,struct nvmf_capsule ** ncp)186 nvmf_receive_capsule(struct nvmf_qpair *qp, struct nvmf_capsule **ncp)
187 {
188 return (qp->nq_association->na_ops->receive_capsule(qp, ncp));
189 }
190
191 const void *
nvmf_capsule_sqe(const struct nvmf_capsule * nc)192 nvmf_capsule_sqe(const struct nvmf_capsule *nc)
193 {
194 assert(nc->nc_qe_len == sizeof(struct nvme_command));
195 return (&nc->nc_sqe);
196 }
197
198 const void *
nvmf_capsule_cqe(const struct nvmf_capsule * nc)199 nvmf_capsule_cqe(const struct nvmf_capsule *nc)
200 {
201 assert(nc->nc_qe_len == sizeof(struct nvme_completion));
202 return (&nc->nc_cqe);
203 }
204
205 uint8_t
nvmf_validate_command_capsule(const struct nvmf_capsule * nc)206 nvmf_validate_command_capsule(const struct nvmf_capsule *nc)
207 {
208 assert(nc->nc_qe_len == sizeof(struct nvme_command));
209
210 if (NVMEV(NVME_CMD_PSDT, nc->nc_sqe.fuse) != NVME_PSDT_SGL)
211 return (NVME_SC_INVALID_FIELD);
212
213 return (nc->nc_qpair->nq_association->na_ops->validate_command_capsule(nc));
214 }
215
216 size_t
nvmf_capsule_data_len(const struct nvmf_capsule * nc)217 nvmf_capsule_data_len(const struct nvmf_capsule *nc)
218 {
219 return (nc->nc_qpair->nq_association->na_ops->capsule_data_len(nc));
220 }
221
222 int
nvmf_receive_controller_data(const struct nvmf_capsule * nc,uint32_t data_offset,void * buf,size_t len)223 nvmf_receive_controller_data(const struct nvmf_capsule *nc,
224 uint32_t data_offset, void *buf, size_t len)
225 {
226 return (nc->nc_qpair->nq_association->na_ops->receive_controller_data(nc,
227 data_offset, buf, len));
228 }
229
230 int
nvmf_send_controller_data(const struct nvmf_capsule * nc,const void * buf,size_t len)231 nvmf_send_controller_data(const struct nvmf_capsule *nc, const void *buf,
232 size_t len)
233 {
234 return (nc->nc_qpair->nq_association->na_ops->send_controller_data(nc,
235 buf, len));
236 }
237
238 int
nvmf_kernel_handoff_params(struct nvmf_qpair * qp,nvlist_t ** nvlp)239 nvmf_kernel_handoff_params(struct nvmf_qpair *qp, nvlist_t **nvlp)
240 {
241 nvlist_t *nvl;
242 int error;
243
244 nvl = nvlist_create(0);
245 nvlist_add_bool(nvl, "admin", qp->nq_admin);
246 nvlist_add_bool(nvl, "sq_flow_control", qp->nq_flow_control);
247 nvlist_add_number(nvl, "qsize", qp->nq_qsize);
248 nvlist_add_number(nvl, "sqhd", qp->nq_sqhd);
249 if (!qp->nq_association->na_controller)
250 nvlist_add_number(nvl, "sqtail", qp->nq_sqtail);
251 qp->nq_association->na_ops->kernel_handoff_params(qp, nvl);
252 error = nvlist_error(nvl);
253 if (error != 0) {
254 nvlist_destroy(nvl);
255 return (error);
256 }
257
258 *nvlp = nvl;
259 return (0);
260 }
261
262 int
nvmf_populate_dle(struct nvmf_qpair * qp,struct nvme_discovery_log_entry * dle)263 nvmf_populate_dle(struct nvmf_qpair *qp, struct nvme_discovery_log_entry *dle)
264 {
265 struct nvmf_association *na = qp->nq_association;
266
267 dle->trtype = na->na_trtype;
268 return (na->na_ops->populate_dle(qp, dle));
269 }
270
271 const char *
nvmf_transport_type(uint8_t trtype)272 nvmf_transport_type(uint8_t trtype)
273 {
274 static _Thread_local char buf[8];
275
276 switch (trtype) {
277 case NVMF_TRTYPE_RDMA:
278 return ("RDMA");
279 case NVMF_TRTYPE_FC:
280 return ("Fibre Channel");
281 case NVMF_TRTYPE_TCP:
282 return ("TCP");
283 case NVMF_TRTYPE_INTRA_HOST:
284 return ("Intra-host");
285 default:
286 snprintf(buf, sizeof(buf), "0x%02x\n", trtype);
287 return (buf);
288 }
289 }
290
291 int
nvmf_pack_ioc_nvlist(struct nvmf_ioc_nv * nv,nvlist_t * nvl)292 nvmf_pack_ioc_nvlist(struct nvmf_ioc_nv *nv, nvlist_t *nvl)
293 {
294 int error;
295
296 memset(nv, 0, sizeof(*nv));
297
298 error = nvlist_error(nvl);
299 if (error)
300 return (error);
301
302 nv->data = nvlist_pack(nvl, &nv->size);
303 if (nv->data == NULL)
304 return (ENOMEM);
305
306 return (0);
307 }
308