1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright 2013 Nathan Whitehorn
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
32 #include <sys/malloc.h>
33 #include <sys/module.h>
34 #include <sys/selinfo.h>
35 #include <sys/bus.h>
36 #include <sys/conf.h>
37 #include <sys/eventhandler.h>
38 #include <sys/rman.h>
39 #include <sys/bus_dma.h>
40 #include <sys/bio.h>
41 #include <sys/ioccom.h>
42 #include <sys/uio.h>
43 #include <sys/proc.h>
44 #include <sys/signalvar.h>
45 #include <sys/sysctl.h>
46 #include <sys/endian.h>
47 #include <sys/vmem.h>
48
49 #include <cam/cam.h>
50 #include <cam/cam_ccb.h>
51 #include <cam/cam_debug.h>
52 #include <cam/cam_periph.h>
53 #include <cam/cam_sim.h>
54 #include <cam/cam_xpt_periph.h>
55 #include <cam/cam_xpt_sim.h>
56 #include <cam/scsi/scsi_all.h>
57 #include <cam/scsi/scsi_message.h>
58
59 #include <dev/ofw/openfirm.h>
60 #include <dev/ofw/ofw_bus.h>
61 #include <dev/ofw/ofw_bus_subr.h>
62
63 #include <machine/bus.h>
64 #include <machine/resource.h>
65
66 #include <powerpc/pseries/phyp-hvcall.h>
67
68 struct vscsi_softc;
69
70 /* VSCSI CRQ format from table 260 of PAPR spec 2.4 (page 760) */
71 struct vscsi_crq {
72 uint8_t valid;
73 uint8_t format;
74 uint8_t reserved;
75 uint8_t status;
76 uint16_t timeout;
77 uint16_t iu_length;
78 uint64_t iu_data;
79 };
80
81 struct vscsi_xfer {
82 TAILQ_ENTRY(vscsi_xfer) queue;
83 struct vscsi_softc *sc;
84 union ccb *ccb;
85 bus_dmamap_t dmamap;
86 uint64_t tag;
87
88 vmem_addr_t srp_iu_offset;
89 vmem_size_t srp_iu_size;
90 };
91
92 TAILQ_HEAD(vscsi_xferq, vscsi_xfer);
93
94 struct vscsi_softc {
95 device_t dev;
96 struct cam_devq *devq;
97 struct cam_sim *sim;
98 struct cam_path *path;
99 struct mtx io_lock;
100
101 cell_t unit;
102 int bus_initialized;
103 int bus_logged_in;
104 int max_transactions;
105
106 int irqid;
107 struct resource *irq;
108 void *irq_cookie;
109
110 bus_dma_tag_t crq_tag;
111 struct vscsi_crq *crq_queue;
112 int n_crqs, cur_crq;
113 bus_dmamap_t crq_map;
114 bus_addr_t crq_phys;
115
116 vmem_t *srp_iu_arena;
117 void *srp_iu_queue;
118 bus_addr_t srp_iu_phys;
119
120 bus_dma_tag_t data_tag;
121
122 struct vscsi_xfer loginxp;
123 struct vscsi_xfer *xfer;
124 struct vscsi_xferq active_xferq;
125 struct vscsi_xferq free_xferq;
126 };
127
128 struct srp_login {
129 uint8_t type;
130 uint8_t reserved[7];
131 uint64_t tag;
132 uint64_t max_cmd_length;
133 uint32_t reserved2;
134 uint16_t buffer_formats;
135 uint8_t flags;
136 uint8_t reserved3[5];
137 uint8_t initiator_port_id[16];
138 uint8_t target_port_id[16];
139 } __packed;
140
141 struct srp_login_rsp {
142 uint8_t type;
143 uint8_t reserved[3];
144 uint32_t request_limit_delta;
145 uint8_t tag;
146 uint32_t max_i_to_t_len;
147 uint32_t max_t_to_i_len;
148 uint16_t buffer_formats;
149 uint8_t flags;
150 /* Some reserved bits follow */
151 } __packed;
152
153 struct srp_cmd {
154 uint8_t type;
155 uint8_t flags1;
156 uint8_t reserved[3];
157 uint8_t formats;
158 uint8_t out_buffer_count;
159 uint8_t in_buffer_count;
160 uint64_t tag;
161 uint32_t reserved2;
162 uint64_t lun;
163 uint8_t reserved3[3];
164 uint8_t additional_cdb;
165 uint8_t cdb[16];
166 uint8_t data_payload[0];
167 } __packed;
168
169 struct srp_rsp {
170 uint8_t type;
171 uint8_t reserved[3];
172 uint32_t request_limit_delta;
173 uint64_t tag;
174 uint16_t reserved2;
175 uint8_t flags;
176 uint8_t status;
177 uint32_t data_out_resid;
178 uint32_t data_in_resid;
179 uint32_t sense_data_len;
180 uint32_t response_data_len;
181 uint8_t data_payload[0];
182 } __packed;
183
184 struct srp_tsk_mgmt {
185 uint8_t type;
186 uint8_t reserved[7];
187 uint64_t tag;
188 uint32_t reserved2;
189 uint64_t lun;
190 uint8_t reserved3[2];
191 uint8_t function;
192 uint8_t reserved4;
193 uint64_t manage_tag;
194 uint64_t reserved5;
195 } __packed;
196
197 /* Message code type */
198 #define SRP_LOGIN_REQ 0x00
199 #define SRP_TSK_MGMT 0x01
200 #define SRP_CMD 0x02
201 #define SRP_I_LOGOUT 0x03
202
203 #define SRP_LOGIN_RSP 0xC0
204 #define SRP_RSP 0xC1
205 #define SRP_LOGIN_REJ 0xC2
206
207 #define SRP_T_LOGOUT 0x80
208 #define SRP_CRED_REQ 0x81
209 #define SRP_AER_REQ 0x82
210
211 #define SRP_CRED_RSP 0x41
212 #define SRP_AER_RSP 0x41
213
214 /* Flags for srp_rsp flags field */
215 #define SRP_RSPVALID 0x01
216 #define SRP_SNSVALID 0x02
217 #define SRP_DOOVER 0x04
218 #define SRP_DOUNDER 0x08
219 #define SRP_DIOVER 0x10
220 #define SRP_DIUNDER 0x20
221
222 #define MAD_SUCESS 0x00
223 #define MAD_NOT_SUPPORTED 0xf1
224 #define MAD_FAILED 0xf7
225
226 #define MAD_EMPTY_IU 0x01
227 #define MAD_ERROR_LOGGING_REQUEST 0x02
228 #define MAD_ADAPTER_INFO_REQUEST 0x03
229 #define MAD_CAPABILITIES_EXCHANGE 0x05
230 #define MAD_PHYS_ADAP_INFO_REQUEST 0x06
231 #define MAD_TAPE_PASSTHROUGH_REQUEST 0x07
232 #define MAD_ENABLE_FAST_FAIL 0x08
233
234 static int vscsi_probe(device_t);
235 static int vscsi_attach(device_t);
236 static int vscsi_detach(device_t);
237 static void vscsi_cam_action(struct cam_sim *, union ccb *);
238 static void vscsi_cam_poll(struct cam_sim *);
239 static void vscsi_intr(void *arg);
240 static void vscsi_check_response_queue(struct vscsi_softc *sc);
241 static void vscsi_setup_bus(struct vscsi_softc *sc);
242
243 static void vscsi_srp_login(struct vscsi_softc *sc);
244 static void vscsi_crq_load_cb(void *, bus_dma_segment_t *, int, int);
245 static void vscsi_scsi_command(void *xxp, bus_dma_segment_t *segs,
246 int nsegs, int err);
247 static void vscsi_task_management(struct vscsi_softc *sc, union ccb *ccb);
248 static void vscsi_srp_response(struct vscsi_xfer *, struct vscsi_crq *);
249
250 static device_method_t vscsi_methods[] = {
251 DEVMETHOD(device_probe, vscsi_probe),
252 DEVMETHOD(device_attach, vscsi_attach),
253 DEVMETHOD(device_detach, vscsi_detach),
254
255 DEVMETHOD_END
256 };
257
258 static driver_t vscsi_driver = {
259 "vscsi",
260 vscsi_methods,
261 sizeof(struct vscsi_softc)
262 };
263
264 DRIVER_MODULE(vscsi, vdevice, vscsi_driver, 0, 0);
265 MALLOC_DEFINE(M_VSCSI, "vscsi", "CAM device queue for VSCSI");
266
267 static int
vscsi_probe(device_t dev)268 vscsi_probe(device_t dev)
269 {
270
271 if (!ofw_bus_is_compatible(dev, "IBM,v-scsi"))
272 return (ENXIO);
273
274 device_set_desc(dev, "POWER Hypervisor Virtual SCSI Bus");
275 return (0);
276 }
277
278 static int
vscsi_attach(device_t dev)279 vscsi_attach(device_t dev)
280 {
281 struct vscsi_softc *sc;
282 struct vscsi_xfer *xp;
283 int error, i;
284
285 sc = device_get_softc(dev);
286 if (sc == NULL)
287 return (EINVAL);
288
289 sc->dev = dev;
290 mtx_init(&sc->io_lock, "vscsi", NULL, MTX_DEF);
291
292 /* Get properties */
293 OF_getencprop(ofw_bus_get_node(dev), "reg", &sc->unit,
294 sizeof(sc->unit));
295
296 /* Setup interrupt */
297 sc->irqid = 0;
298 sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irqid,
299 RF_ACTIVE);
300
301 if (!sc->irq) {
302 device_printf(dev, "Could not allocate IRQ\n");
303 mtx_destroy(&sc->io_lock);
304 return (ENXIO);
305 }
306
307 bus_setup_intr(dev, sc->irq, INTR_TYPE_CAM | INTR_MPSAFE |
308 INTR_ENTROPY, NULL, vscsi_intr, sc, &sc->irq_cookie);
309
310 /* Data DMA */
311 error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
312 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE,
313 256, BUS_SPACE_MAXSIZE_32BIT, 0, busdma_lock_mutex, &sc->io_lock,
314 &sc->data_tag);
315
316 TAILQ_INIT(&sc->active_xferq);
317 TAILQ_INIT(&sc->free_xferq);
318
319 /* First XFER for login data */
320 sc->loginxp.sc = sc;
321 bus_dmamap_create(sc->data_tag, 0, &sc->loginxp.dmamap);
322 TAILQ_INSERT_TAIL(&sc->free_xferq, &sc->loginxp, queue);
323
324 /* CRQ area */
325 error = bus_dma_tag_create(bus_get_dma_tag(dev), PAGE_SIZE, 0,
326 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 8*PAGE_SIZE,
327 1, BUS_SPACE_MAXSIZE, 0, NULL, NULL, &sc->crq_tag);
328 error = bus_dmamem_alloc(sc->crq_tag, (void **)&sc->crq_queue,
329 BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->crq_map);
330 sc->crq_phys = 0;
331 sc->n_crqs = 0;
332 error = bus_dmamap_load(sc->crq_tag, sc->crq_map, sc->crq_queue,
333 8*PAGE_SIZE, vscsi_crq_load_cb, sc, 0);
334
335 mtx_lock(&sc->io_lock);
336 vscsi_setup_bus(sc);
337 sc->xfer = malloc(sizeof(sc->xfer[0])*sc->max_transactions, M_VSCSI,
338 M_NOWAIT);
339 for (i = 0; i < sc->max_transactions; i++) {
340 xp = &sc->xfer[i];
341 xp->sc = sc;
342
343 error = bus_dmamap_create(sc->data_tag, 0, &xp->dmamap);
344 if (error) {
345 device_printf(dev, "Could not create DMA map (%d)\n",
346 error);
347 break;
348 }
349
350 TAILQ_INSERT_TAIL(&sc->free_xferq, xp, queue);
351 }
352 mtx_unlock(&sc->io_lock);
353
354 /* Allocate CAM bits */
355 if ((sc->devq = cam_simq_alloc(sc->max_transactions)) == NULL)
356 return (ENOMEM);
357
358 sc->sim = cam_sim_alloc(vscsi_cam_action, vscsi_cam_poll, "vscsi", sc,
359 device_get_unit(dev), &sc->io_lock,
360 sc->max_transactions, sc->max_transactions,
361 sc->devq);
362 if (sc->sim == NULL) {
363 cam_simq_free(sc->devq);
364 sc->devq = NULL;
365 device_printf(dev, "CAM SIM attach failed\n");
366 return (EINVAL);
367 }
368
369 mtx_lock(&sc->io_lock);
370 if (xpt_bus_register(sc->sim, dev, 0) != 0) {
371 device_printf(dev, "XPT bus registration failed\n");
372 cam_sim_free(sc->sim, FALSE);
373 sc->sim = NULL;
374 cam_simq_free(sc->devq);
375 sc->devq = NULL;
376 mtx_unlock(&sc->io_lock);
377 return (EINVAL);
378 }
379 mtx_unlock(&sc->io_lock);
380
381 return (0);
382 }
383
384 static int
vscsi_detach(device_t dev)385 vscsi_detach(device_t dev)
386 {
387 struct vscsi_softc *sc;
388
389 sc = device_get_softc(dev);
390 if (sc == NULL)
391 return (EINVAL);
392
393 if (sc->sim != NULL) {
394 mtx_lock(&sc->io_lock);
395 xpt_bus_deregister(cam_sim_path(sc->sim));
396 cam_sim_free(sc->sim, FALSE);
397 sc->sim = NULL;
398 mtx_unlock(&sc->io_lock);
399 }
400
401 if (sc->devq != NULL) {
402 cam_simq_free(sc->devq);
403 sc->devq = NULL;
404 }
405
406 mtx_destroy(&sc->io_lock);
407
408 return (0);
409 }
410
411 static void
vscsi_cam_action(struct cam_sim * sim,union ccb * ccb)412 vscsi_cam_action(struct cam_sim *sim, union ccb *ccb)
413 {
414 struct vscsi_softc *sc = cam_sim_softc(sim);
415
416 mtx_assert(&sc->io_lock, MA_OWNED);
417
418 switch (ccb->ccb_h.func_code) {
419 case XPT_PATH_INQ:
420 {
421 struct ccb_pathinq *cpi = &ccb->cpi;
422
423 cpi->version_num = 1;
424 cpi->hba_inquiry = PI_TAG_ABLE;
425 cpi->hba_misc = PIM_EXTLUNS;
426 cpi->target_sprt = 0;
427 cpi->hba_eng_cnt = 0;
428 cpi->max_target = 0;
429 cpi->max_lun = 0;
430 cpi->initiator_id = ~0;
431 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
432 strlcpy(cpi->hba_vid, "IBM", HBA_IDLEN);
433 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
434 cpi->unit_number = cam_sim_unit(sim);
435 cpi->bus_id = cam_sim_bus(sim);
436 cpi->base_transfer_speed = 150000;
437 cpi->transport = XPORT_SRP;
438 cpi->transport_version = 0;
439 cpi->protocol = PROTO_SCSI;
440 cpi->protocol_version = SCSI_REV_SPC4;
441 cpi->ccb_h.status = CAM_REQ_CMP;
442 break;
443 }
444 case XPT_RESET_BUS:
445 ccb->ccb_h.status = CAM_REQ_CMP;
446 break;
447 case XPT_RESET_DEV:
448 ccb->ccb_h.status = CAM_REQ_INPROG;
449 vscsi_task_management(sc, ccb);
450 return;
451 case XPT_GET_TRAN_SETTINGS:
452 ccb->cts.protocol = PROTO_SCSI;
453 ccb->cts.protocol_version = SCSI_REV_SPC4;
454 ccb->cts.transport = XPORT_SRP;
455 ccb->cts.transport_version = 0;
456 ccb->cts.proto_specific.valid = 0;
457 ccb->cts.xport_specific.valid = 0;
458 ccb->ccb_h.status = CAM_REQ_CMP;
459 break;
460 case XPT_SET_TRAN_SETTINGS:
461 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
462 break;
463 case XPT_SCSI_IO:
464 {
465 struct vscsi_xfer *xp;
466
467 ccb->ccb_h.status = CAM_REQ_INPROG;
468
469 xp = TAILQ_FIRST(&sc->free_xferq);
470 if (xp == NULL)
471 panic("SCSI queue flooded");
472 xp->ccb = ccb;
473 TAILQ_REMOVE(&sc->free_xferq, xp, queue);
474 TAILQ_INSERT_TAIL(&sc->active_xferq, xp, queue);
475 bus_dmamap_load_ccb(sc->data_tag, xp->dmamap,
476 ccb, vscsi_scsi_command, xp, 0);
477
478 return;
479 }
480 default:
481 ccb->ccb_h.status = CAM_REQ_INVALID;
482 break;
483 }
484
485 xpt_done(ccb);
486 return;
487 }
488
489 static void
vscsi_srp_login(struct vscsi_softc * sc)490 vscsi_srp_login(struct vscsi_softc *sc)
491 {
492 struct vscsi_xfer *xp;
493 struct srp_login *login;
494 struct vscsi_crq crq;
495 int err;
496
497 mtx_assert(&sc->io_lock, MA_OWNED);
498
499 xp = TAILQ_FIRST(&sc->free_xferq);
500 if (xp == NULL)
501 panic("SCSI queue flooded");
502 xp->ccb = NULL;
503 TAILQ_REMOVE(&sc->free_xferq, xp, queue);
504 TAILQ_INSERT_TAIL(&sc->active_xferq, xp, queue);
505
506 /* Set up command */
507 xp->srp_iu_size = 64;
508 crq.iu_length = htobe16(xp->srp_iu_size);
509 err = vmem_alloc(xp->sc->srp_iu_arena, xp->srp_iu_size,
510 M_BESTFIT | M_NOWAIT, &xp->srp_iu_offset);
511 if (err)
512 panic("Error during VMEM allocation (%d)", err);
513
514 login = (struct srp_login *)((uint8_t *)xp->sc->srp_iu_queue +
515 (uintptr_t)xp->srp_iu_offset);
516 bzero(login, xp->srp_iu_size);
517 login->type = SRP_LOGIN_REQ;
518 login->tag = (uint64_t)(xp);
519 login->max_cmd_length = htobe64(256);
520 login->buffer_formats = htobe16(0x1 | 0x2); /* Direct and indirect */
521 login->flags = 0;
522
523 /* Create CRQ entry */
524 crq.valid = 0x80;
525 crq.format = 0x01;
526 crq.iu_data = htobe64(xp->sc->srp_iu_phys + xp->srp_iu_offset);
527 bus_dmamap_sync(sc->crq_tag, sc->crq_map, BUS_DMASYNC_PREWRITE);
528
529 err = phyp_hcall(H_SEND_CRQ, xp->sc->unit,
530 be64toh(((uint64_t *)(&crq))[0]),
531 be64toh(((uint64_t *)(&crq))[1]));
532 if (err != 0)
533 panic("CRQ send failure (%d)", err);
534 }
535
536 static void
vscsi_task_management(struct vscsi_softc * sc,union ccb * ccb)537 vscsi_task_management(struct vscsi_softc *sc, union ccb *ccb)
538 {
539 struct srp_tsk_mgmt *cmd;
540 struct vscsi_xfer *xp;
541 struct vscsi_crq crq;
542 int err;
543
544 mtx_assert(&sc->io_lock, MA_OWNED);
545
546 xp = TAILQ_FIRST(&sc->free_xferq);
547 if (xp == NULL)
548 panic("SCSI queue flooded");
549 xp->ccb = ccb;
550 TAILQ_REMOVE(&sc->free_xferq, xp, queue);
551 TAILQ_INSERT_TAIL(&sc->active_xferq, xp, queue);
552
553 xp->srp_iu_size = sizeof(*cmd);
554 crq.iu_length = htobe16(xp->srp_iu_size);
555 err = vmem_alloc(xp->sc->srp_iu_arena, xp->srp_iu_size,
556 M_BESTFIT | M_NOWAIT, &xp->srp_iu_offset);
557 if (err)
558 panic("Error during VMEM allocation (%d)", err);
559
560 cmd = (struct srp_tsk_mgmt *)((uint8_t *)xp->sc->srp_iu_queue +
561 (uintptr_t)xp->srp_iu_offset);
562 bzero(cmd, xp->srp_iu_size);
563 cmd->type = SRP_TSK_MGMT;
564 cmd->tag = (uint64_t)xp;
565 cmd->lun = htobe64(CAM_EXTLUN_BYTE_SWIZZLE(ccb->ccb_h.target_lun));
566
567 switch (ccb->ccb_h.func_code) {
568 case XPT_RESET_DEV:
569 cmd->function = 0x08;
570 break;
571 default:
572 panic("Unimplemented code %d", ccb->ccb_h.func_code);
573 break;
574 }
575
576 bus_dmamap_sync(xp->sc->crq_tag, xp->sc->crq_map, BUS_DMASYNC_PREWRITE);
577
578 /* Create CRQ entry */
579 crq.valid = 0x80;
580 crq.format = 0x01;
581 crq.iu_data = htobe64(xp->sc->srp_iu_phys + xp->srp_iu_offset);
582
583 err = phyp_hcall(H_SEND_CRQ, xp->sc->unit,
584 be64toh(((uint64_t *)(&crq))[0]),
585 be64toh(((uint64_t *)(&crq))[1]));
586 if (err != 0)
587 panic("CRQ send failure (%d)", err);
588 }
589
590 static void
vscsi_scsi_command(void * xxp,bus_dma_segment_t * segs,int nsegs,int err)591 vscsi_scsi_command(void *xxp, bus_dma_segment_t *segs, int nsegs, int err)
592 {
593 struct vscsi_xfer *xp = xxp;
594 uint8_t *cdb;
595 union ccb *ccb = xp->ccb;
596 struct srp_cmd *cmd;
597 uint64_t chunk_addr;
598 uint32_t chunk_size;
599 int desc_start, i;
600 struct vscsi_crq crq;
601
602 KASSERT(err == 0, ("DMA error %d\n", err));
603
604 mtx_assert(&xp->sc->io_lock, MA_OWNED);
605
606 cdb = (ccb->ccb_h.flags & CAM_CDB_POINTER) ?
607 ccb->csio.cdb_io.cdb_ptr : ccb->csio.cdb_io.cdb_bytes;
608
609 /* Command format from Table 20, page 37 of SRP spec */
610 xp->srp_iu_size = 48 + ((nsegs > 1) ? 20 : 16) +
611 ((ccb->csio.cdb_len > 16) ? (ccb->csio.cdb_len - 16) : 0);
612 crq.iu_length = htobe16(xp->srp_iu_size);
613 if (nsegs > 1)
614 xp->srp_iu_size += nsegs*16;
615 xp->srp_iu_size = roundup(xp->srp_iu_size, 16);
616 err = vmem_alloc(xp->sc->srp_iu_arena, xp->srp_iu_size,
617 M_BESTFIT | M_NOWAIT, &xp->srp_iu_offset);
618 if (err)
619 panic("Error during VMEM allocation (%d)", err);
620
621 cmd = (struct srp_cmd *)((uint8_t *)xp->sc->srp_iu_queue +
622 (uintptr_t)xp->srp_iu_offset);
623 bzero(cmd, xp->srp_iu_size);
624 cmd->type = SRP_CMD;
625 if (ccb->csio.cdb_len > 16)
626 cmd->additional_cdb = (ccb->csio.cdb_len - 16) << 2;
627 memcpy(cmd->cdb, cdb, ccb->csio.cdb_len);
628
629 cmd->tag = (uint64_t)(xp); /* Let the responder find this again */
630 cmd->lun = htobe64(CAM_EXTLUN_BYTE_SWIZZLE(ccb->ccb_h.target_lun));
631
632 if (nsegs > 1) {
633 /* Use indirect descriptors */
634 switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
635 case CAM_DIR_OUT:
636 cmd->formats = (2 << 4);
637 break;
638 case CAM_DIR_IN:
639 cmd->formats = 2;
640 break;
641 default:
642 panic("Does not support bidirectional commands (%d)",
643 ccb->ccb_h.flags & CAM_DIR_MASK);
644 break;
645 }
646
647 desc_start = ((ccb->csio.cdb_len > 16) ?
648 ccb->csio.cdb_len - 16 : 0);
649 chunk_addr = htobe64(xp->sc->srp_iu_phys + xp->srp_iu_offset + 20 +
650 desc_start + sizeof(*cmd));
651 chunk_size = htobe32(16*nsegs);
652 memcpy(&cmd->data_payload[desc_start], &chunk_addr, 8);
653 memcpy(&cmd->data_payload[desc_start+12], &chunk_size, 4);
654 chunk_size = 0;
655 for (i = 0; i < nsegs; i++)
656 chunk_size += segs[i].ds_len;
657 chunk_size = htobe32(chunk_size);
658 memcpy(&cmd->data_payload[desc_start+16], &chunk_size, 4);
659 desc_start += 20;
660 for (i = 0; i < nsegs; i++) {
661 chunk_addr = htobe64(segs[i].ds_addr);
662 chunk_size = htobe32(segs[i].ds_len);
663
664 memcpy(&cmd->data_payload[desc_start + 16*i],
665 &chunk_addr, 8);
666 /* Set handle tag to 0 */
667 memcpy(&cmd->data_payload[desc_start + 16*i + 12],
668 &chunk_size, 4);
669 }
670 } else if (nsegs == 1) {
671 switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
672 case CAM_DIR_OUT:
673 cmd->formats = (1 << 4);
674 break;
675 case CAM_DIR_IN:
676 cmd->formats = 1;
677 break;
678 default:
679 panic("Does not support bidirectional commands (%d)",
680 ccb->ccb_h.flags & CAM_DIR_MASK);
681 break;
682 }
683
684 /*
685 * Memory descriptor:
686 * 8 byte address
687 * 4 byte handle
688 * 4 byte length
689 */
690
691 chunk_addr = htobe64(segs[0].ds_addr);
692 chunk_size = htobe32(segs[0].ds_len);
693 desc_start = ((ccb->csio.cdb_len > 16) ?
694 ccb->csio.cdb_len - 16 : 0);
695
696 memcpy(&cmd->data_payload[desc_start], &chunk_addr, 8);
697 /* Set handle tag to 0 */
698 memcpy(&cmd->data_payload[desc_start+12], &chunk_size, 4);
699 KASSERT(xp->srp_iu_size >= 48 + ((ccb->csio.cdb_len > 16) ?
700 ccb->csio.cdb_len : 16), ("SRP IU command length"));
701 } else {
702 cmd->formats = 0;
703 }
704 bus_dmamap_sync(xp->sc->crq_tag, xp->sc->crq_map, BUS_DMASYNC_PREWRITE);
705
706 /* Create CRQ entry */
707 crq.valid = 0x80;
708 crq.format = 0x01;
709 crq.iu_data = htobe64(xp->sc->srp_iu_phys + xp->srp_iu_offset);
710
711 err = phyp_hcall(H_SEND_CRQ, xp->sc->unit,
712 be64toh(((uint64_t *)(&crq))[0]),
713 be64toh(((uint64_t *)(&crq))[1]));
714 if (err != 0)
715 panic("CRQ send failure (%d)", err);
716 }
717
718 static void
vscsi_crq_load_cb(void * xsc,bus_dma_segment_t * segs,int nsegs,int err)719 vscsi_crq_load_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int err)
720 {
721 struct vscsi_softc *sc = xsc;
722
723 sc->crq_phys = segs[0].ds_addr;
724 sc->n_crqs = PAGE_SIZE/sizeof(struct vscsi_crq);
725
726 sc->srp_iu_queue = (uint8_t *)(sc->crq_queue);
727 sc->srp_iu_phys = segs[0].ds_addr;
728 sc->srp_iu_arena = vmem_create("VSCSI SRP IU", PAGE_SIZE,
729 segs[0].ds_len - PAGE_SIZE, 16, 0, M_BESTFIT | M_NOWAIT);
730 }
731
732 static void
vscsi_setup_bus(struct vscsi_softc * sc)733 vscsi_setup_bus(struct vscsi_softc *sc)
734 {
735 struct vscsi_crq crq;
736 struct vscsi_xfer *xp;
737 int error;
738
739 struct {
740 uint32_t type;
741 uint16_t status;
742 uint16_t length;
743 uint64_t tag;
744 uint64_t buffer;
745 struct {
746 char srp_version[8];
747 char partition_name[96];
748 uint32_t partition_number;
749 uint32_t mad_version;
750 uint32_t os_type;
751 uint32_t port_max_txu[8];
752 } payload;
753 } mad_adapter_info;
754
755 bzero(&crq, sizeof(crq));
756
757 /* Init message */
758 crq.valid = 0xc0;
759 crq.format = 0x01;
760
761 do {
762 error = phyp_hcall(H_FREE_CRQ, sc->unit);
763 } while (error == H_BUSY);
764
765 /* See initialization sequence page 757 */
766 bzero(sc->crq_queue, sc->n_crqs*sizeof(sc->crq_queue[0]));
767 sc->cur_crq = 0;
768 sc->bus_initialized = 0;
769 sc->bus_logged_in = 0;
770 bus_dmamap_sync(sc->crq_tag, sc->crq_map, BUS_DMASYNC_PREWRITE);
771 error = phyp_hcall(H_REG_CRQ, sc->unit, sc->crq_phys,
772 sc->n_crqs*sizeof(sc->crq_queue[0]));
773 KASSERT(error == 0, ("CRQ registration success"));
774
775 error = phyp_hcall(H_SEND_CRQ, sc->unit,
776 be64toh(((uint64_t *)(&crq))[0]),
777 be64toh(((uint64_t *)(&crq))[1]));
778 if (error != 0)
779 panic("CRQ setup failure (%d)", error);
780
781 while (sc->bus_initialized == 0)
782 vscsi_check_response_queue(sc);
783
784 /* Send MAD adapter info */
785 mad_adapter_info.type = htobe32(MAD_ADAPTER_INFO_REQUEST);
786 mad_adapter_info.status = 0;
787 mad_adapter_info.length = htobe16(sizeof(mad_adapter_info.payload));
788
789 strcpy(mad_adapter_info.payload.srp_version, "16.a");
790 strcpy(mad_adapter_info.payload.partition_name, "UNKNOWN");
791 mad_adapter_info.payload.partition_number = -1;
792 mad_adapter_info.payload.mad_version = htobe32(1);
793 mad_adapter_info.payload.os_type = htobe32(2); /* Claim we are Linux */
794 mad_adapter_info.payload.port_max_txu[0] = 0;
795 /* If this fails, we get the defaults above */
796 OF_getprop(OF_finddevice("/"), "ibm,partition-name",
797 mad_adapter_info.payload.partition_name,
798 sizeof(mad_adapter_info.payload.partition_name));
799 OF_getprop(OF_finddevice("/"), "ibm,partition-no",
800 &mad_adapter_info.payload.partition_number,
801 sizeof(mad_adapter_info.payload.partition_number));
802
803 xp = TAILQ_FIRST(&sc->free_xferq);
804 xp->ccb = NULL;
805 TAILQ_REMOVE(&sc->free_xferq, xp, queue);
806 TAILQ_INSERT_TAIL(&sc->active_xferq, xp, queue);
807 xp->srp_iu_size = sizeof(mad_adapter_info);
808 crq.iu_length = htobe16(xp->srp_iu_size);
809 vmem_alloc(xp->sc->srp_iu_arena, xp->srp_iu_size,
810 M_BESTFIT | M_NOWAIT, &xp->srp_iu_offset);
811 mad_adapter_info.buffer = htobe64(xp->sc->srp_iu_phys + xp->srp_iu_offset + 24);
812 mad_adapter_info.tag = (uint64_t)xp;
813 memcpy((uint8_t *)xp->sc->srp_iu_queue + (uintptr_t)xp->srp_iu_offset,
814 &mad_adapter_info, sizeof(mad_adapter_info));
815 crq.valid = 0x80;
816 crq.format = 0x02;
817 crq.iu_data = htobe64(xp->sc->srp_iu_phys + xp->srp_iu_offset);
818 bus_dmamap_sync(sc->crq_tag, sc->crq_map, BUS_DMASYNC_PREWRITE);
819 phyp_hcall(H_SEND_CRQ, xp->sc->unit,
820 be64toh(((uint64_t *)(&crq))[0]),
821 be64toh(((uint64_t *)(&crq))[1]));
822
823 while (TAILQ_EMPTY(&sc->free_xferq))
824 vscsi_check_response_queue(sc);
825
826 /* Send SRP login */
827 vscsi_srp_login(sc);
828 while (sc->bus_logged_in == 0)
829 vscsi_check_response_queue(sc);
830
831 error = phyp_hcall(H_VIO_SIGNAL, sc->unit, 1); /* Enable interrupts */
832 }
833
834 static void
vscsi_intr(void * xsc)835 vscsi_intr(void *xsc)
836 {
837 struct vscsi_softc *sc = xsc;
838
839 mtx_lock(&sc->io_lock);
840 vscsi_check_response_queue(sc);
841 mtx_unlock(&sc->io_lock);
842 }
843
844 static void
vscsi_srp_response(struct vscsi_xfer * xp,struct vscsi_crq * crq)845 vscsi_srp_response(struct vscsi_xfer *xp, struct vscsi_crq *crq)
846 {
847 union ccb *ccb = xp->ccb;
848 struct vscsi_softc *sc = xp->sc;
849 struct srp_rsp *rsp;
850 uint32_t sense_len;
851
852 /* SRP response packet in original request */
853 rsp = (struct srp_rsp *)((uint8_t *)sc->srp_iu_queue +
854 (uintptr_t)xp->srp_iu_offset);
855 ccb->csio.scsi_status = rsp->status;
856 if (ccb->csio.scsi_status == SCSI_STATUS_OK)
857 ccb->ccb_h.status = CAM_REQ_CMP;
858 else
859 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
860 #ifdef NOTYET
861 /* Collect fast fail codes */
862 if (crq->status != 0)
863 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
864 #endif
865
866 if (ccb->ccb_h.status != CAM_REQ_CMP) {
867 ccb->ccb_h.status |= CAM_DEV_QFRZN;
868 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
869 }
870
871 if (!(rsp->flags & SRP_RSPVALID))
872 rsp->response_data_len = 0;
873 if (!(rsp->flags & SRP_SNSVALID))
874 rsp->sense_data_len = 0;
875 if (!(rsp->flags & (SRP_DOOVER | SRP_DOUNDER)))
876 rsp->data_out_resid = 0;
877 if (!(rsp->flags & (SRP_DIOVER | SRP_DIUNDER)))
878 rsp->data_in_resid = 0;
879
880 if (rsp->flags & SRP_SNSVALID) {
881 bzero(&ccb->csio.sense_data, sizeof(struct scsi_sense_data));
882 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
883 sense_len = min(be32toh(rsp->sense_data_len),
884 ccb->csio.sense_len);
885 memcpy(&ccb->csio.sense_data,
886 &rsp->data_payload[be32toh(rsp->response_data_len)],
887 sense_len);
888 ccb->csio.sense_resid = ccb->csio.sense_len -
889 be32toh(rsp->sense_data_len);
890 }
891
892 switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
893 case CAM_DIR_OUT:
894 ccb->csio.resid = rsp->data_out_resid;
895 break;
896 case CAM_DIR_IN:
897 ccb->csio.resid = rsp->data_in_resid;
898 break;
899 }
900
901 bus_dmamap_sync(sc->data_tag, xp->dmamap, BUS_DMASYNC_POSTREAD);
902 bus_dmamap_unload(sc->data_tag, xp->dmamap);
903 xpt_done(ccb);
904 xp->ccb = NULL;
905 }
906
907 static void
vscsi_login_response(struct vscsi_xfer * xp,struct vscsi_crq * crq)908 vscsi_login_response(struct vscsi_xfer *xp, struct vscsi_crq *crq)
909 {
910 struct vscsi_softc *sc = xp->sc;
911 struct srp_login_rsp *rsp;
912
913 /* SRP response packet in original request */
914 rsp = (struct srp_login_rsp *)((uint8_t *)sc->srp_iu_queue +
915 (uintptr_t)xp->srp_iu_offset);
916 KASSERT(be16toh(rsp->buffer_formats) & 0x3, ("Both direct and indirect "
917 "buffers supported"));
918
919 sc->max_transactions = be32toh(rsp->request_limit_delta);
920 device_printf(sc->dev, "Queue depth %d commands\n",
921 sc->max_transactions);
922 sc->bus_logged_in = 1;
923 }
924
925 static void
vscsi_cam_poll(struct cam_sim * sim)926 vscsi_cam_poll(struct cam_sim *sim)
927 {
928 struct vscsi_softc *sc = cam_sim_softc(sim);
929
930 vscsi_check_response_queue(sc);
931 }
932
933 static void
vscsi_check_response_queue(struct vscsi_softc * sc)934 vscsi_check_response_queue(struct vscsi_softc *sc)
935 {
936 struct vscsi_crq *crq;
937 struct vscsi_xfer *xp;
938 int code;
939
940 mtx_assert(&sc->io_lock, MA_OWNED);
941
942 while (sc->crq_queue[sc->cur_crq].valid != 0) {
943 /* The hypercalls at both ends of this are not optimal */
944 phyp_hcall(H_VIO_SIGNAL, sc->unit, 0);
945 bus_dmamap_sync(sc->crq_tag, sc->crq_map, BUS_DMASYNC_POSTREAD);
946
947 crq = &sc->crq_queue[sc->cur_crq];
948
949 switch (crq->valid) {
950 case 0xc0:
951 if (crq->format == 0x02)
952 sc->bus_initialized = 1;
953 break;
954 case 0x80:
955 /* IU data is set to tag pointer (the XP) */
956 xp = (struct vscsi_xfer *)crq->iu_data;
957
958 switch (crq->format) {
959 case 0x01:
960 code = *((uint8_t *)sc->srp_iu_queue +
961 (uintptr_t)xp->srp_iu_offset);
962 switch (code) {
963 case SRP_RSP:
964 vscsi_srp_response(xp, crq);
965 break;
966 case SRP_LOGIN_RSP:
967 vscsi_login_response(xp, crq);
968 break;
969 default:
970 device_printf(sc->dev, "Unknown SRP "
971 "response code %d\n", code);
972 break;
973 }
974 break;
975 case 0x02:
976 /* Ignore management datagrams */
977 break;
978 default:
979 panic("Unknown CRQ format %d\n", crq->format);
980 break;
981 }
982 vmem_free(sc->srp_iu_arena, xp->srp_iu_offset,
983 xp->srp_iu_size);
984 TAILQ_REMOVE(&sc->active_xferq, xp, queue);
985 TAILQ_INSERT_TAIL(&sc->free_xferq, xp, queue);
986 break;
987 default:
988 device_printf(sc->dev,
989 "Unknown CRQ message type %d\n", crq->valid);
990 break;
991 }
992
993 crq->valid = 0;
994 sc->cur_crq = (sc->cur_crq + 1) % sc->n_crqs;
995
996 bus_dmamap_sync(sc->crq_tag, sc->crq_map, BUS_DMASYNC_PREWRITE);
997 phyp_hcall(H_VIO_SIGNAL, sc->unit, 1);
998 }
999 }
1000