1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2015 Netflix, Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * derived from ata_xpt.c: Copyright (c) 2009 Alexander Motin <mav@FreeBSD.org>
28 */
29
30 #include <sys/param.h>
31 #include <sys/bus.h>
32 #include <sys/endian.h>
33 #include <sys/systm.h>
34 #include <sys/types.h>
35 #include <sys/malloc.h>
36 #include <sys/kernel.h>
37 #include <sys/time.h>
38 #include <sys/conf.h>
39 #include <sys/fcntl.h>
40 #include <sys/sbuf.h>
41 #include <sys/stdarg.h>
42
43 #include <sys/lock.h>
44 #include <sys/mutex.h>
45 #include <sys/sysctl.h>
46
47 #include <cam/cam.h>
48 #include <cam/cam_ccb.h>
49 #include <cam/cam_queue.h>
50 #include <cam/cam_periph.h>
51 #include <cam/cam_sim.h>
52 #include <cam/cam_xpt.h>
53 #include <cam/cam_xpt_sim.h>
54 #include <cam/cam_xpt_periph.h>
55 #include <cam/cam_xpt_internal.h>
56 #include <cam/cam_debug.h>
57
58 #include <cam/scsi/scsi_all.h>
59 #include <cam/scsi/scsi_message.h>
60 #include <cam/nvme/nvme_all.h>
61
62 struct nvme_quirk_entry {
63 u_int quirks;
64 #define CAM_QUIRK_MAXTAGS 1
65 u_int mintags;
66 u_int maxtags;
67 };
68
69 /* Not even sure why we need this */
70 static periph_init_t nvme_probe_periph_init;
71
72 static struct periph_driver nvme_probe_driver =
73 {
74 nvme_probe_periph_init, "nvme_probe",
75 TAILQ_HEAD_INITIALIZER(nvme_probe_driver.units), /* generation */ 0,
76 CAM_PERIPH_DRV_EARLY
77 };
78
79 PERIPHDRIVER_DECLARE(nvme_probe, nvme_probe_driver);
80
81 typedef enum {
82 NVME_PROBE_IDENTIFY_CD,
83 NVME_PROBE_IDENTIFY_NS,
84 NVME_PROBE_DONE,
85 NVME_PROBE_INVALID
86 } nvme_probe_action;
87
88 static char *nvme_probe_action_text[] = {
89 "NVME_PROBE_IDENTIFY_CD",
90 "NVME_PROBE_IDENTIFY_NS",
91 "NVME_PROBE_DONE",
92 "NVME_PROBE_INVALID"
93 };
94
95 #define NVME_PROBE_SET_ACTION(softc, newaction) \
96 do { \
97 char **text; \
98 text = nvme_probe_action_text; \
99 CAM_DEBUG((softc)->periph->path, CAM_DEBUG_PROBE, \
100 ("Probe %s to %s\n", text[(softc)->action], \
101 text[(newaction)])); \
102 (softc)->action = (newaction); \
103 } while(0)
104
105 typedef enum {
106 NVME_PROBE_NO_ANNOUNCE = 0x04
107 } nvme_probe_flags;
108
109 typedef struct {
110 TAILQ_HEAD(, ccb_hdr) request_ccbs;
111 union {
112 struct nvme_controller_data cd;
113 struct nvme_namespace_data ns;
114 };
115 nvme_probe_action action;
116 nvme_probe_flags flags;
117 int restart;
118 struct cam_periph *periph;
119 } nvme_probe_softc;
120
121 static struct nvme_quirk_entry nvme_quirk_table[] =
122 {
123 {
124 // {
125 // T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
126 // /*vendor*/"*", /*product*/"*", /*revision*/"*"
127 // },
128 .quirks = 0, .mintags = 0, .maxtags = 0
129 },
130 };
131
132 static const int nvme_quirk_table_size =
133 sizeof(nvme_quirk_table) / sizeof(*nvme_quirk_table);
134
135 static cam_status nvme_probe_register(struct cam_periph *periph,
136 void *arg);
137 static void nvme_probe_schedule(struct cam_periph *nvme_probe_periph);
138 static void nvme_probe_start(struct cam_periph *periph, union ccb *start_ccb);
139 static void nvme_probe_done(struct cam_periph *periph, union ccb *done_ccb);
140 static void nvme_probe_cleanup(struct cam_periph *periph);
141 //static void nvme_find_quirk(struct cam_ed *device);
142 static void nvme_scan_lun(struct cam_periph *periph,
143 struct cam_path *path, cam_flags flags,
144 union ccb *ccb);
145 static struct cam_ed *
146 nvme_alloc_device(struct cam_eb *bus, struct cam_et *target,
147 lun_id_t lun_id);
148 static void nvme_device_transport(struct cam_path *path);
149 static void nvme_dev_async(uint32_t async_code,
150 struct cam_eb *bus,
151 struct cam_et *target,
152 struct cam_ed *device,
153 void *async_arg);
154 static void nvme_action(union ccb *start_ccb);
155 static void nvme_announce_periph_sbuf(struct cam_periph *periph,
156 struct sbuf *sb);
157 static void nvme_proto_announce_sbuf(struct cam_ed *device,
158 struct sbuf *sb);
159 static void nvme_proto_denounce_sbuf(struct cam_ed *device,
160 struct sbuf *sb);
161 static void nvme_proto_debug_out(union ccb *ccb);
162
163 static struct xpt_xport_ops nvme_xport_ops = {
164 .alloc_device = nvme_alloc_device,
165 .action = nvme_action,
166 .async = nvme_dev_async,
167 .announce_sbuf = nvme_announce_periph_sbuf,
168 };
169 #define NVME_XPT_XPORT(x, X) \
170 static struct xpt_xport nvme_xport_ ## x = { \
171 .xport = XPORT_ ## X, \
172 .name = #x, \
173 .ops = &nvme_xport_ops, \
174 }; \
175 CAM_XPT_XPORT(nvme_xport_ ## x);
176
177 NVME_XPT_XPORT(nvme, NVME);
178 NVME_XPT_XPORT(nvmf, NVMF);
179
180 #undef NVME_XPT_XPORT
181
182 static struct xpt_proto_ops nvme_proto_ops = {
183 .announce_sbuf = nvme_proto_announce_sbuf,
184 .denounce_sbuf = nvme_proto_denounce_sbuf,
185 .debug_out = nvme_proto_debug_out,
186 };
187 static struct xpt_proto nvme_proto = {
188 .proto = PROTO_NVME,
189 .name = "nvme",
190 .ops = &nvme_proto_ops,
191 };
192 CAM_XPT_PROTO(nvme_proto);
193
194 static void
nvme_probe_periph_init(void)195 nvme_probe_periph_init(void)
196 {
197 }
198
199 static cam_status
nvme_probe_register(struct cam_periph * periph,void * arg)200 nvme_probe_register(struct cam_periph *periph, void *arg)
201 {
202 union ccb *request_ccb; /* CCB representing the probe request */
203 nvme_probe_softc *softc;
204
205 request_ccb = (union ccb *)arg;
206 if (request_ccb == NULL) {
207 printf(
208 "nvme_probe_register: no probe CCB, can't register device\n");
209 return(CAM_REQ_CMP_ERR);
210 }
211
212 softc = (nvme_probe_softc *)malloc(sizeof(*softc), M_CAMXPT, M_ZERO | M_NOWAIT);
213
214 if (softc == NULL) {
215 printf(
216 "nvme_probe_register: Unable to probe new device. Unable to allocate softc\n");
217 return(CAM_REQ_CMP_ERR);
218 }
219 TAILQ_INIT(&softc->request_ccbs);
220 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
221 periph_links.tqe);
222 softc->flags = 0;
223 periph->softc = softc;
224 softc->periph = periph;
225 softc->action = NVME_PROBE_INVALID;
226 if (cam_periph_acquire(periph) != 0)
227 return (CAM_REQ_CMP_ERR);
228
229 CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Probe started\n"));
230
231 // nvme_device_transport(periph->path);
232 nvme_probe_schedule(periph);
233
234 return(CAM_REQ_CMP);
235 }
236
237 static void
nvme_probe_schedule(struct cam_periph * periph)238 nvme_probe_schedule(struct cam_periph *periph)
239 {
240 union ccb *ccb;
241 nvme_probe_softc *softc;
242
243 softc = (nvme_probe_softc *)periph->softc;
244 ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
245
246 NVME_PROBE_SET_ACTION(softc, NVME_PROBE_IDENTIFY_CD);
247
248 if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE)
249 softc->flags |= NVME_PROBE_NO_ANNOUNCE;
250 else
251 softc->flags &= ~NVME_PROBE_NO_ANNOUNCE;
252
253 xpt_schedule(periph, CAM_PRIORITY_XPT);
254 }
255
256 static void
nvme_probe_start(struct cam_periph * periph,union ccb * start_ccb)257 nvme_probe_start(struct cam_periph *periph, union ccb *start_ccb)
258 {
259 struct ccb_nvmeio *nvmeio;
260 nvme_probe_softc *softc;
261 lun_id_t lun;
262
263 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("nvme_probe_start\n"));
264
265 softc = (nvme_probe_softc *)periph->softc;
266 nvmeio = &start_ccb->nvmeio;
267 lun = xpt_path_lun_id(periph->path);
268
269 if (softc->restart) {
270 softc->restart = 0;
271 NVME_PROBE_SET_ACTION(softc, NVME_PROBE_IDENTIFY_CD);
272 }
273
274 switch (softc->action) {
275 case NVME_PROBE_IDENTIFY_CD:
276 cam_fill_nvmeadmin(nvmeio,
277 0, /* retries */
278 nvme_probe_done, /* cbfcnp */
279 CAM_DIR_IN, /* flags */
280 (uint8_t *)&softc->cd, /* data_ptr */
281 sizeof(softc->cd), /* dxfer_len */
282 30 * 1000); /* timeout 30s */
283 nvme_ns_cmd(nvmeio, NVME_OPC_IDENTIFY, 0,
284 1, 0, 0, 0, 0, 0);
285 break;
286 case NVME_PROBE_IDENTIFY_NS:
287 cam_fill_nvmeadmin(nvmeio,
288 0, /* retries */
289 nvme_probe_done, /* cbfcnp */
290 CAM_DIR_IN, /* flags */
291 (uint8_t *)&softc->ns, /* data_ptr */
292 sizeof(softc->ns), /* dxfer_len */
293 30 * 1000); /* timeout 30s */
294 nvme_ns_cmd(nvmeio, NVME_OPC_IDENTIFY, lun,
295 0, 0, 0, 0, 0, 0);
296 break;
297 default:
298 panic("nvme_probe_start: invalid action state 0x%x\n", softc->action);
299 }
300 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
301 xpt_action(start_ccb);
302 }
303
304 static void
nvme_probe_done(struct cam_periph * periph,union ccb * done_ccb)305 nvme_probe_done(struct cam_periph *periph, union ccb *done_ccb)
306 {
307 struct nvme_namespace_data *nvme_data;
308 struct nvme_controller_data *nvme_cdata;
309 nvme_probe_softc *softc;
310 struct cam_path *path;
311 struct scsi_vpd_device_id *did;
312 struct scsi_vpd_id_descriptor *idd;
313 uint32_t priority;
314 int found = 1, e, g, len;
315
316 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("nvme_probe_done\n"));
317
318 softc = (nvme_probe_softc *)periph->softc;
319 path = done_ccb->ccb_h.path;
320 priority = done_ccb->ccb_h.pinfo.priority;
321
322 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
323 if (cam_periph_error(done_ccb,
324 0, softc->restart ? (SF_NO_RECOVERY | SF_NO_RETRY) : 0
325 ) == ERESTART) {
326 out:
327 /* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */
328 cam_release_devq(path, 0, 0, 0, FALSE);
329 return;
330 }
331 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
332 /* Don't wedge the queue */
333 xpt_release_devq(path, /*count*/1, /*run_queue*/TRUE);
334 }
335
336 /*
337 * If we get to this point, we got an error status back
338 * from the inquiry and the error status doesn't require
339 * automatically retrying the command. Therefore, the
340 * inquiry failed. If we had inquiry information before
341 * for this device, but this latest inquiry command failed,
342 * the device has probably gone away. If this device isn't
343 * already marked unconfigured, notify the peripheral
344 * drivers that this device is no more.
345 */
346 device_fail: if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
347 xpt_async(AC_LOST_DEVICE, path, NULL);
348 NVME_PROBE_SET_ACTION(softc, NVME_PROBE_INVALID);
349 found = 0;
350 goto done;
351 }
352 if (softc->restart)
353 goto done;
354 switch (softc->action) {
355 case NVME_PROBE_IDENTIFY_CD:
356 nvme_controller_data_swapbytes(&softc->cd);
357
358 nvme_cdata = path->device->nvme_cdata;
359 if (nvme_cdata == NULL) {
360 nvme_cdata = malloc(sizeof(*nvme_cdata), M_CAMXPT,
361 M_NOWAIT);
362 if (nvme_cdata == NULL) {
363 xpt_print(path, "Can't allocate memory");
364 goto device_fail;
365 }
366 }
367 bcopy(&softc->cd, nvme_cdata, sizeof(*nvme_cdata));
368 path->device->nvme_cdata = nvme_cdata;
369
370 /* Save/update serial number. */
371 if (path->device->serial_num != NULL) {
372 free(path->device->serial_num, M_CAMXPT);
373 path->device->serial_num = NULL;
374 path->device->serial_num_len = 0;
375 }
376 path->device->serial_num = (uint8_t *)
377 malloc(NVME_SERIAL_NUMBER_LENGTH + 1, M_CAMXPT, M_NOWAIT);
378 if (path->device->serial_num != NULL) {
379 cam_strvis_flag(path->device->serial_num,
380 nvme_cdata->sn, sizeof(nvme_cdata->sn),
381 NVME_SERIAL_NUMBER_LENGTH + 1,
382 CAM_STRVIS_FLAG_NONASCII_SPC);
383
384 path->device->serial_num_len =
385 strlen(path->device->serial_num);
386 }
387
388 // nvme_find_quirk(path->device);
389 nvme_device_transport(path);
390 NVME_PROBE_SET_ACTION(softc, NVME_PROBE_IDENTIFY_NS);
391 xpt_release_ccb(done_ccb);
392 xpt_schedule(periph, priority);
393 goto out;
394 case NVME_PROBE_IDENTIFY_NS:
395 nvme_namespace_data_swapbytes(&softc->ns);
396
397 /* Check that the namespace exists. */
398 if (softc->ns.nsze == 0)
399 goto device_fail;
400
401 nvme_data = path->device->nvme_data;
402 if (nvme_data == NULL) {
403 nvme_data = malloc(sizeof(*nvme_data), M_CAMXPT,
404 M_NOWAIT);
405 if (nvme_data == NULL) {
406 xpt_print(path, "Can't allocate memory");
407 goto device_fail;
408 }
409 }
410 bcopy(&softc->ns, nvme_data, sizeof(*nvme_data));
411 path->device->nvme_data = nvme_data;
412
413 /* Save/update device_id based on NGUID and/or EUI64. */
414 if (path->device->device_id != NULL) {
415 free(path->device->device_id, M_CAMXPT);
416 path->device->device_id = NULL;
417 path->device->device_id_len = 0;
418 }
419 len = 0;
420 for (g = 0; g < sizeof(nvme_data->nguid); g++) {
421 if (nvme_data->nguid[g] != 0)
422 break;
423 }
424 if (g < sizeof(nvme_data->nguid))
425 len += sizeof(struct scsi_vpd_id_descriptor) + 16;
426 for (e = 0; e < sizeof(nvme_data->eui64); e++) {
427 if (nvme_data->eui64[e] != 0)
428 break;
429 }
430 if (e < sizeof(nvme_data->eui64))
431 len += sizeof(struct scsi_vpd_id_descriptor) + 8;
432 if (len > 0) {
433 path->device->device_id = (uint8_t *)
434 malloc(SVPD_DEVICE_ID_HDR_LEN + len,
435 M_CAMXPT, M_NOWAIT);
436 }
437 if (path->device->device_id != NULL) {
438 did = (struct scsi_vpd_device_id *)path->device->device_id;
439 did->device = SID_QUAL_LU_CONNECTED | T_DIRECT;
440 did->page_code = SVPD_DEVICE_ID;
441 scsi_ulto2b(len, did->length);
442 idd = (struct scsi_vpd_id_descriptor *)(did + 1);
443 if (g < sizeof(nvme_data->nguid)) {
444 idd->proto_codeset = SVPD_ID_CODESET_BINARY;
445 idd->id_type = SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_EUI64;
446 idd->length = 16;
447 bcopy(nvme_data->nguid, idd->identifier, 16);
448 idd = (struct scsi_vpd_id_descriptor *)
449 &idd->identifier[16];
450 }
451 if (e < sizeof(nvme_data->eui64)) {
452 idd->proto_codeset = SVPD_ID_CODESET_BINARY;
453 idd->id_type = SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_EUI64;
454 idd->length = 8;
455 bcopy(nvme_data->eui64, idd->identifier, 8);
456 }
457 path->device->device_id_len = SVPD_DEVICE_ID_HDR_LEN + len;
458 }
459
460 if (periph->path->device->flags & CAM_DEV_UNCONFIGURED) {
461 path->device->flags &= ~CAM_DEV_UNCONFIGURED;
462 xpt_acquire_device(path->device);
463 done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
464 xpt_action(done_ccb);
465 xpt_async(AC_FOUND_DEVICE, path, done_ccb);
466 } else {
467 xpt_async(AC_GETDEV_CHANGED, path, NULL);
468 }
469 NVME_PROBE_SET_ACTION(softc, NVME_PROBE_DONE);
470 break;
471 default:
472 panic("nvme_probe_done: invalid action state 0x%x\n", softc->action);
473 }
474 done:
475 if (softc->restart) {
476 softc->restart = 0;
477 xpt_release_ccb(done_ccb);
478 nvme_probe_schedule(periph);
479 goto out;
480 }
481 xpt_release_ccb(done_ccb);
482 CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Probe completed\n"));
483 while ((done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs))) {
484 TAILQ_REMOVE(&softc->request_ccbs,
485 &done_ccb->ccb_h, periph_links.tqe);
486 done_ccb->ccb_h.status = found ? CAM_REQ_CMP : CAM_REQ_CMP_ERR;
487 xpt_done(done_ccb);
488 }
489 /* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */
490 cam_release_devq(path, 0, 0, 0, FALSE);
491 cam_periph_invalidate(periph);
492 cam_periph_release_locked(periph);
493 }
494
495 static void
nvme_probe_cleanup(struct cam_periph * periph)496 nvme_probe_cleanup(struct cam_periph *periph)
497 {
498
499 free(periph->softc, M_CAMXPT);
500 }
501
502 #if 0
503 /* XXX should be used, don't delete */
504 static void
505 nvme_find_quirk(struct cam_ed *device)
506 {
507 struct nvme_quirk_entry *quirk;
508 caddr_t match;
509
510 match = cam_quirkmatch((caddr_t)&device->nvme_data,
511 (caddr_t)nvme_quirk_table,
512 nvme_quirk_table_size,
513 sizeof(*nvme_quirk_table), nvme_identify_match);
514
515 if (match == NULL)
516 panic("xpt_find_quirk: device didn't match wildcard entry!!");
517
518 quirk = (struct nvme_quirk_entry *)match;
519 device->quirk = quirk;
520 if (quirk->quirks & CAM_QUIRK_MAXTAGS) {
521 device->mintags = quirk->mintags;
522 device->maxtags = quirk->maxtags;
523 }
524 }
525 #endif
526
527 static void
nvme_scan_lun(struct cam_periph * periph,struct cam_path * path,cam_flags flags,union ccb * request_ccb)528 nvme_scan_lun(struct cam_periph *periph, struct cam_path *path,
529 cam_flags flags, union ccb *request_ccb)
530 {
531 struct ccb_pathinq cpi;
532 cam_status status;
533 struct cam_periph *old_periph;
534 int lock;
535
536 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("nvme_scan_lun\n"));
537
538 xpt_path_inq(&cpi, path);
539
540 if (cpi.ccb_h.status != CAM_REQ_CMP) {
541 if (request_ccb != NULL) {
542 request_ccb->ccb_h.status = cpi.ccb_h.status;
543 xpt_done(request_ccb);
544 }
545 return;
546 }
547
548 if (xpt_path_lun_id(path) == CAM_LUN_WILDCARD) {
549 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("nvme_scan_lun ignoring bus\n"));
550 request_ccb->ccb_h.status = CAM_REQ_CMP; /* XXX signal error ? */
551 xpt_done(request_ccb);
552 return;
553 }
554
555 lock = (xpt_path_owned(path) == 0);
556 if (lock)
557 xpt_path_lock(path);
558 if ((old_periph = cam_periph_find(path, "nvme_probe")) != NULL) {
559 if ((old_periph->flags & CAM_PERIPH_INVALID) == 0) {
560 nvme_probe_softc *softc;
561
562 softc = (nvme_probe_softc *)old_periph->softc;
563 TAILQ_INSERT_TAIL(&softc->request_ccbs,
564 &request_ccb->ccb_h, periph_links.tqe);
565 softc->restart = 1;
566 CAM_DEBUG(path, CAM_DEBUG_TRACE,
567 ("restarting nvme_probe device\n"));
568 } else {
569 request_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
570 CAM_DEBUG(path, CAM_DEBUG_TRACE,
571 ("Failing to restart nvme_probe device\n"));
572 xpt_done(request_ccb);
573 }
574 } else {
575 CAM_DEBUG(path, CAM_DEBUG_TRACE,
576 ("Adding nvme_probe device\n"));
577 status = cam_periph_alloc(nvme_probe_register, NULL, nvme_probe_cleanup,
578 nvme_probe_start, "nvme_probe",
579 CAM_PERIPH_BIO,
580 request_ccb->ccb_h.path, NULL, 0,
581 request_ccb);
582
583 if (status != CAM_REQ_CMP) {
584 xpt_print(path,
585 "xpt_scan_lun: cam_alloc_periph returned an error, can't continue probe\n");
586 request_ccb->ccb_h.status = status;
587 xpt_done(request_ccb);
588 }
589 }
590 if (lock)
591 xpt_path_unlock(path);
592 }
593
594 static struct cam_ed *
nvme_alloc_device(struct cam_eb * bus,struct cam_et * target,lun_id_t lun_id)595 nvme_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
596 {
597 struct nvme_quirk_entry *quirk;
598 struct cam_ed *device;
599
600 device = xpt_alloc_device(bus, target, lun_id);
601 if (device == NULL)
602 return (NULL);
603
604 /*
605 * Take the default quirk entry until we have inquiry
606 * data from nvme and can determine a better quirk to use.
607 */
608 quirk = &nvme_quirk_table[nvme_quirk_table_size - 1];
609 device->quirk = (void *)quirk;
610 device->mintags = 0;
611 device->maxtags = 0;
612 device->inq_flags = 0;
613 device->queue_flags = 0;
614 device->device_id = NULL;
615 device->device_id_len = 0;
616 device->serial_num = NULL;
617 device->serial_num_len = 0;
618 return (device);
619 }
620
621 static void
nvme_device_transport(struct cam_path * path)622 nvme_device_transport(struct cam_path *path)
623 {
624 struct ccb_pathinq cpi;
625 struct ccb_trans_settings cts;
626 /* XXX get data from nvme namespace and other info ??? */
627
628 /* Get transport information from the SIM */
629 xpt_path_inq(&cpi, path);
630
631 path->device->transport = cpi.transport;
632 path->device->transport_version = cpi.transport_version;
633
634 path->device->protocol = cpi.protocol;
635 path->device->protocol_version = cpi.protocol_version;
636
637 /* Tell the controller what we think */
638 memset(&cts, 0, sizeof(cts));
639 xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE);
640 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
641 cts.type = CTS_TYPE_CURRENT_SETTINGS;
642 cts.transport = path->device->transport;
643 cts.transport_version = path->device->transport_version;
644 cts.protocol = path->device->protocol;
645 cts.protocol_version = path->device->protocol_version;
646 cts.proto_specific.valid = 0;
647 cts.xport_specific.valid = 0;
648 xpt_action((union ccb *)&cts);
649 }
650
651 static void
nvme_dev_advinfo(union ccb * start_ccb)652 nvme_dev_advinfo(union ccb *start_ccb)
653 {
654 struct cam_ed *device;
655 struct ccb_dev_advinfo *cdai;
656 off_t amt;
657
658 xpt_path_assert(start_ccb->ccb_h.path, MA_OWNED);
659 start_ccb->ccb_h.status = CAM_REQ_INVALID;
660 device = start_ccb->ccb_h.path->device;
661 cdai = &start_ccb->cdai;
662 switch(cdai->buftype) {
663 case CDAI_TYPE_SCSI_DEVID:
664 if (cdai->flags & CDAI_FLAG_STORE)
665 return;
666 cdai->provsiz = device->device_id_len;
667 if (device->device_id_len == 0)
668 break;
669 amt = device->device_id_len;
670 if (cdai->provsiz > cdai->bufsiz)
671 amt = cdai->bufsiz;
672 memcpy(cdai->buf, device->device_id, amt);
673 break;
674 case CDAI_TYPE_SERIAL_NUM:
675 if (cdai->flags & CDAI_FLAG_STORE)
676 return;
677 cdai->provsiz = device->serial_num_len;
678 if (device->serial_num_len == 0)
679 break;
680 amt = device->serial_num_len;
681 if (cdai->provsiz > cdai->bufsiz)
682 amt = cdai->bufsiz;
683 memcpy(cdai->buf, device->serial_num, amt);
684 break;
685 case CDAI_TYPE_PHYS_PATH:
686 if (cdai->flags & CDAI_FLAG_STORE) {
687 if (device->physpath != NULL) {
688 free(device->physpath, M_CAMXPT);
689 device->physpath = NULL;
690 device->physpath_len = 0;
691 }
692 /* Clear existing buffer if zero length */
693 if (cdai->bufsiz == 0)
694 break;
695 device->physpath = malloc(cdai->bufsiz, M_CAMXPT, M_NOWAIT);
696 if (device->physpath == NULL) {
697 start_ccb->ccb_h.status = CAM_REQ_ABORTED;
698 return;
699 }
700 device->physpath_len = cdai->bufsiz;
701 memcpy(device->physpath, cdai->buf, cdai->bufsiz);
702 } else {
703 cdai->provsiz = device->physpath_len;
704 if (device->physpath_len == 0)
705 break;
706 amt = device->physpath_len;
707 if (cdai->provsiz > cdai->bufsiz)
708 amt = cdai->bufsiz;
709 memcpy(cdai->buf, device->physpath, amt);
710 }
711 break;
712 case CDAI_TYPE_NVME_CNTRL:
713 if (cdai->flags & CDAI_FLAG_STORE)
714 return;
715 amt = sizeof(struct nvme_controller_data);
716 cdai->provsiz = amt;
717 if (amt > cdai->bufsiz)
718 amt = cdai->bufsiz;
719 memcpy(cdai->buf, device->nvme_cdata, amt);
720 break;
721 case CDAI_TYPE_NVME_NS:
722 if (cdai->flags & CDAI_FLAG_STORE)
723 return;
724 amt = sizeof(struct nvme_namespace_data);
725 cdai->provsiz = amt;
726 if (amt > cdai->bufsiz)
727 amt = cdai->bufsiz;
728 memcpy(cdai->buf, device->nvme_data, amt);
729 break;
730 default:
731 return;
732 }
733 start_ccb->ccb_h.status = CAM_REQ_CMP;
734
735 if (cdai->flags & CDAI_FLAG_STORE) {
736 xpt_async(AC_ADVINFO_CHANGED, start_ccb->ccb_h.path,
737 (void *)(uintptr_t)cdai->buftype);
738 }
739 }
740
741 static void
nvme_action(union ccb * start_ccb)742 nvme_action(union ccb *start_ccb)
743 {
744 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE,
745 ("nvme_action: func= %#x\n", start_ccb->ccb_h.func_code));
746
747 switch (start_ccb->ccb_h.func_code) {
748 case XPT_SCAN_BUS:
749 case XPT_SCAN_TGT:
750 case XPT_SCAN_LUN:
751 nvme_scan_lun(start_ccb->ccb_h.path->periph,
752 start_ccb->ccb_h.path, start_ccb->crcn.flags,
753 start_ccb);
754 break;
755 case XPT_DEV_ADVINFO:
756 nvme_dev_advinfo(start_ccb);
757 break;
758
759 default:
760 xpt_action_default(start_ccb);
761 break;
762 }
763 }
764
765 /*
766 * Handle any per-device event notifications that require action by the XPT.
767 */
768 static void
nvme_dev_async(uint32_t async_code,struct cam_eb * bus,struct cam_et * target,struct cam_ed * device,void * async_arg)769 nvme_dev_async(uint32_t async_code, struct cam_eb *bus, struct cam_et *target,
770 struct cam_ed *device, void *async_arg)
771 {
772
773 /*
774 * We only need to handle events for real devices.
775 */
776 if (target->target_id == CAM_TARGET_WILDCARD
777 || device->lun_id == CAM_LUN_WILDCARD)
778 return;
779
780 if (async_code == AC_LOST_DEVICE &&
781 (device->flags & CAM_DEV_UNCONFIGURED) == 0) {
782 device->flags |= CAM_DEV_UNCONFIGURED;
783 xpt_release_device(device);
784 }
785 }
786
787 static void
nvme_announce_periph_sbuf(struct cam_periph * periph,struct sbuf * sb)788 nvme_announce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb)
789 {
790 struct ccb_pathinq cpi;
791 struct ccb_trans_settings cts;
792 struct cam_path *path = periph->path;
793 struct ccb_trans_settings_nvme *nvmex;
794
795 cam_periph_assert(periph, MA_OWNED);
796
797 /* Ask the SIM for connection details */
798 memset(&cts, 0, sizeof(cts));
799 xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NORMAL);
800 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
801 cts.type = CTS_TYPE_CURRENT_SETTINGS;
802 xpt_action((union ccb*)&cts);
803 if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
804 return;
805
806 /* Ask the SIM for its base transfer speed */
807 xpt_path_inq(&cpi, periph->path);
808 sbuf_printf(sb, "%s%d: nvme version %d.%d",
809 periph->periph_name, periph->unit_number,
810 NVME_MAJOR(cts.protocol_version),
811 NVME_MINOR(cts.protocol_version));
812 if (cts.transport == XPORT_NVME) {
813 nvmex = &cts.proto_specific.nvme;
814 if (nvmex->valid & CTS_NVME_VALID_LINK)
815 sbuf_printf(sb,
816 " x%d (max x%d) lanes PCIe Gen%d (max Gen%d) link",
817 nvmex->lanes, nvmex->max_lanes,
818 nvmex->speed, nvmex->max_speed);
819 }
820 sbuf_putc(sb, '\n');
821 }
822
823 static void
nvme_proto_announce_sbuf(struct cam_ed * device,struct sbuf * sb)824 nvme_proto_announce_sbuf(struct cam_ed *device, struct sbuf *sb)
825 {
826 nvme_print_ident(device->nvme_cdata, device->nvme_data, sb);
827 }
828
829 static void
nvme_proto_denounce_sbuf(struct cam_ed * device,struct sbuf * sb)830 nvme_proto_denounce_sbuf(struct cam_ed *device, struct sbuf *sb)
831 {
832 nvme_print_ident_short(device->nvme_cdata, device->nvme_data, sb);
833 }
834
835 static void
nvme_proto_debug_out(union ccb * ccb)836 nvme_proto_debug_out(union ccb *ccb)
837 {
838 char command_str[128];
839
840 if (ccb->ccb_h.func_code != XPT_NVME_IO &&
841 ccb->ccb_h.func_code != XPT_NVME_ADMIN)
842 return;
843
844 CAM_DEBUG(ccb->ccb_h.path,
845 CAM_DEBUG_CDB,("%s\n", nvme_command_string(&ccb->nvmeio,
846 command_str, sizeof(command_str))));
847 }
848