1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2003 Sam Leffler, Errno Consulting
5 * Copyright (c) 2003 Global Technology Associates, Inc.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 /*
32 * SafeNet SafeXcel-1141 hardware crypto accelerator
33 */
34 #include "opt_safe.h"
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/proc.h>
39 #include <sys/errno.h>
40 #include <sys/malloc.h>
41 #include <sys/kernel.h>
42 #include <sys/mbuf.h>
43 #include <sys/module.h>
44 #include <sys/lock.h>
45 #include <sys/mutex.h>
46 #include <sys/sysctl.h>
47 #include <sys/endian.h>
48 #include <sys/uio.h>
49
50 #include <vm/vm.h>
51 #include <vm/pmap.h>
52
53 #include <machine/bus.h>
54 #include <machine/resource.h>
55 #include <sys/bus.h>
56 #include <sys/rman.h>
57
58 #include <opencrypto/cryptodev.h>
59 #include <opencrypto/xform_auth.h>
60 #include <sys/random.h>
61 #include <sys/kobj.h>
62
63 #include "cryptodev_if.h"
64
65 #include <dev/pci/pcivar.h>
66 #include <dev/pci/pcireg.h>
67
68 #ifdef SAFE_RNDTEST
69 #include <dev/rndtest/rndtest.h>
70 #endif
71 #include <dev/safe/safereg.h>
72 #include <dev/safe/safevar.h>
73
74 #ifndef bswap32
75 #define bswap32 NTOHL
76 #endif
77
78 /*
79 * Prototypes and count for the pci_device structure
80 */
81 static int safe_probe(device_t);
82 static int safe_attach(device_t);
83 static int safe_detach(device_t);
84 static int safe_suspend(device_t);
85 static int safe_resume(device_t);
86 static int safe_shutdown(device_t);
87
88 static int safe_probesession(device_t, const struct crypto_session_params *);
89 static int safe_newsession(device_t, crypto_session_t,
90 const struct crypto_session_params *);
91 static int safe_process(device_t, struct cryptop *, int);
92
93 static device_method_t safe_methods[] = {
94 /* Device interface */
95 DEVMETHOD(device_probe, safe_probe),
96 DEVMETHOD(device_attach, safe_attach),
97 DEVMETHOD(device_detach, safe_detach),
98 DEVMETHOD(device_suspend, safe_suspend),
99 DEVMETHOD(device_resume, safe_resume),
100 DEVMETHOD(device_shutdown, safe_shutdown),
101
102 /* crypto device methods */
103 DEVMETHOD(cryptodev_probesession, safe_probesession),
104 DEVMETHOD(cryptodev_newsession, safe_newsession),
105 DEVMETHOD(cryptodev_process, safe_process),
106
107 DEVMETHOD_END
108 };
109
110 static driver_t safe_driver = {
111 "safe",
112 safe_methods,
113 sizeof (struct safe_softc)
114 };
115
116 DRIVER_MODULE(safe, pci, safe_driver, 0, 0);
117 MODULE_DEPEND(safe, crypto, 1, 1, 1);
118 #ifdef SAFE_RNDTEST
119 MODULE_DEPEND(safe, rndtest, 1, 1, 1);
120 #endif
121
122 static void safe_intr(void *);
123 static void safe_callback(struct safe_softc *, struct safe_ringentry *);
124 static void safe_feed(struct safe_softc *, struct safe_ringentry *);
125 static void safe_mcopy(struct mbuf *, struct mbuf *, u_int);
126 #ifndef SAFE_NO_RNG
127 static void safe_rng_init(struct safe_softc *);
128 static void safe_rng(void *);
129 #endif /* SAFE_NO_RNG */
130 static int safe_dma_malloc(struct safe_softc *, bus_size_t,
131 struct safe_dma_alloc *, int);
132 #define safe_dma_sync(_dma, _flags) \
133 bus_dmamap_sync((_dma)->dma_tag, (_dma)->dma_map, (_flags))
134 static void safe_dma_free(struct safe_softc *, struct safe_dma_alloc *);
135 static int safe_dmamap_aligned(const struct safe_operand *);
136 static int safe_dmamap_uniform(const struct safe_operand *);
137
138 static void safe_reset_board(struct safe_softc *);
139 static void safe_init_board(struct safe_softc *);
140 static void safe_init_pciregs(device_t dev);
141 static void safe_cleanchip(struct safe_softc *);
142 static void safe_totalreset(struct safe_softc *);
143
144 static int safe_free_entry(struct safe_softc *, struct safe_ringentry *);
145
146 static SYSCTL_NODE(_hw, OID_AUTO, safe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
147 "SafeNet driver parameters");
148
149 #ifdef SAFE_DEBUG
150 static void safe_dump_dmastatus(struct safe_softc *, const char *);
151 static void safe_dump_ringstate(struct safe_softc *, const char *);
152 static void safe_dump_intrstate(struct safe_softc *, const char *);
153 static void safe_dump_request(struct safe_softc *, const char *,
154 struct safe_ringentry *);
155
156 static struct safe_softc *safec; /* for use by hw.safe.dump */
157
158 static int safe_debug = 0;
159 SYSCTL_INT(_hw_safe, OID_AUTO, debug, CTLFLAG_RW, &safe_debug,
160 0, "control debugging msgs");
161 #define DPRINTF(_x) if (safe_debug) printf _x
162 #else
163 #define DPRINTF(_x)
164 #endif
165
166 #define READ_REG(sc,r) \
167 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (r))
168
169 #define WRITE_REG(sc,reg,val) \
170 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, reg, val)
171
172 struct safe_stats safestats;
173 SYSCTL_STRUCT(_hw_safe, OID_AUTO, stats, CTLFLAG_RD, &safestats,
174 safe_stats, "driver statistics");
175 #ifndef SAFE_NO_RNG
176 static int safe_rnginterval = 1; /* poll once a second */
177 SYSCTL_INT(_hw_safe, OID_AUTO, rnginterval, CTLFLAG_RW, &safe_rnginterval,
178 0, "RNG polling interval (secs)");
179 static int safe_rngbufsize = 16; /* 64 bytes each poll */
180 SYSCTL_INT(_hw_safe, OID_AUTO, rngbufsize, CTLFLAG_RW, &safe_rngbufsize,
181 0, "RNG polling buffer size (32-bit words)");
182 static int safe_rngmaxalarm = 8; /* max alarms before reset */
183 SYSCTL_INT(_hw_safe, OID_AUTO, rngmaxalarm, CTLFLAG_RW, &safe_rngmaxalarm,
184 0, "RNG max alarms before reset");
185 #endif /* SAFE_NO_RNG */
186
187 static int
safe_probe(device_t dev)188 safe_probe(device_t dev)
189 {
190 if (pci_get_vendor(dev) == PCI_VENDOR_SAFENET &&
191 pci_get_device(dev) == PCI_PRODUCT_SAFEXCEL)
192 return (BUS_PROBE_DEFAULT);
193 return (ENXIO);
194 }
195
196 static const char*
safe_partname(struct safe_softc * sc)197 safe_partname(struct safe_softc *sc)
198 {
199 /* XXX sprintf numbers when not decoded */
200 switch (pci_get_vendor(sc->sc_dev)) {
201 case PCI_VENDOR_SAFENET:
202 switch (pci_get_device(sc->sc_dev)) {
203 case PCI_PRODUCT_SAFEXCEL: return "SafeNet SafeXcel-1141";
204 }
205 return "SafeNet unknown-part";
206 }
207 return "Unknown-vendor unknown-part";
208 }
209
210 #ifndef SAFE_NO_RNG
211 static void
default_harvest(struct rndtest_state * rsp,void * buf,u_int count)212 default_harvest(struct rndtest_state *rsp, void *buf, u_int count)
213 {
214 /* MarkM: FIX!! Check that this does not swamp the harvester! */
215 random_harvest_queue(buf, count, RANDOM_PURE_SAFE);
216 }
217 #endif /* SAFE_NO_RNG */
218
219 static int
safe_attach(device_t dev)220 safe_attach(device_t dev)
221 {
222 struct safe_softc *sc = device_get_softc(dev);
223 u_int32_t raddr;
224 u_int32_t i;
225 int rid;
226
227 bzero(sc, sizeof (*sc));
228 sc->sc_dev = dev;
229
230 /* XXX handle power management */
231
232 pci_enable_busmaster(dev);
233
234 /*
235 * Setup memory-mapping of PCI registers.
236 */
237 rid = BS_BAR;
238 sc->sc_sr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
239 RF_ACTIVE);
240 if (sc->sc_sr == NULL) {
241 device_printf(dev, "cannot map register space\n");
242 goto bad;
243 }
244 sc->sc_st = rman_get_bustag(sc->sc_sr);
245 sc->sc_sh = rman_get_bushandle(sc->sc_sr);
246
247 /*
248 * Arrange interrupt line.
249 */
250 rid = 0;
251 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
252 RF_SHAREABLE|RF_ACTIVE);
253 if (sc->sc_irq == NULL) {
254 device_printf(dev, "could not map interrupt\n");
255 goto bad1;
256 }
257 /*
258 * NB: Network code assumes we are blocked with splimp()
259 * so make sure the IRQ is mapped appropriately.
260 */
261 if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
262 NULL, safe_intr, sc, &sc->sc_ih)) {
263 device_printf(dev, "could not establish interrupt\n");
264 goto bad2;
265 }
266
267 sc->sc_cid = crypto_get_driverid(dev, sizeof(struct safe_session),
268 CRYPTOCAP_F_HARDWARE);
269 if (sc->sc_cid < 0) {
270 device_printf(dev, "could not get crypto driver id\n");
271 goto bad3;
272 }
273
274 sc->sc_chiprev = READ_REG(sc, SAFE_DEVINFO) &
275 (SAFE_DEVINFO_REV_MAJ | SAFE_DEVINFO_REV_MIN);
276
277 /*
278 * Setup DMA descriptor area.
279 */
280 if (bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
281 1, /* alignment */
282 SAFE_DMA_BOUNDARY, /* boundary */
283 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
284 BUS_SPACE_MAXADDR, /* highaddr */
285 NULL, NULL, /* filter, filterarg */
286 SAFE_MAX_DMA, /* maxsize */
287 SAFE_MAX_PART, /* nsegments */
288 SAFE_MAX_SSIZE, /* maxsegsize */
289 BUS_DMA_ALLOCNOW, /* flags */
290 NULL, NULL, /* locking */
291 &sc->sc_srcdmat)) {
292 device_printf(dev, "cannot allocate DMA tag\n");
293 goto bad4;
294 }
295 if (bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
296 1, /* alignment */
297 SAFE_MAX_DSIZE, /* boundary */
298 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
299 BUS_SPACE_MAXADDR, /* highaddr */
300 NULL, NULL, /* filter, filterarg */
301 SAFE_MAX_DMA, /* maxsize */
302 SAFE_MAX_PART, /* nsegments */
303 SAFE_MAX_DSIZE, /* maxsegsize */
304 BUS_DMA_ALLOCNOW, /* flags */
305 NULL, NULL, /* locking */
306 &sc->sc_dstdmat)) {
307 device_printf(dev, "cannot allocate DMA tag\n");
308 goto bad4;
309 }
310
311 /*
312 * Allocate packet engine descriptors.
313 */
314 if (safe_dma_malloc(sc,
315 SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry),
316 &sc->sc_ringalloc, 0)) {
317 device_printf(dev, "cannot allocate PE descriptor ring\n");
318 bus_dma_tag_destroy(sc->sc_srcdmat);
319 goto bad4;
320 }
321 /*
322 * Hookup the static portion of all our data structures.
323 */
324 sc->sc_ring = (struct safe_ringentry *) sc->sc_ringalloc.dma_vaddr;
325 sc->sc_ringtop = sc->sc_ring + SAFE_MAX_NQUEUE;
326 sc->sc_front = sc->sc_ring;
327 sc->sc_back = sc->sc_ring;
328 raddr = sc->sc_ringalloc.dma_paddr;
329 bzero(sc->sc_ring, SAFE_MAX_NQUEUE * sizeof(struct safe_ringentry));
330 for (i = 0; i < SAFE_MAX_NQUEUE; i++) {
331 struct safe_ringentry *re = &sc->sc_ring[i];
332
333 re->re_desc.d_sa = raddr +
334 offsetof(struct safe_ringentry, re_sa);
335 re->re_sa.sa_staterec = raddr +
336 offsetof(struct safe_ringentry, re_sastate);
337
338 raddr += sizeof (struct safe_ringentry);
339 }
340 mtx_init(&sc->sc_ringmtx, device_get_nameunit(dev),
341 "packet engine ring", MTX_DEF);
342
343 /*
344 * Allocate scatter and gather particle descriptors.
345 */
346 if (safe_dma_malloc(sc, SAFE_TOTAL_SPART * sizeof (struct safe_pdesc),
347 &sc->sc_spalloc, 0)) {
348 device_printf(dev, "cannot allocate source particle "
349 "descriptor ring\n");
350 mtx_destroy(&sc->sc_ringmtx);
351 safe_dma_free(sc, &sc->sc_ringalloc);
352 bus_dma_tag_destroy(sc->sc_srcdmat);
353 goto bad4;
354 }
355 sc->sc_spring = (struct safe_pdesc *) sc->sc_spalloc.dma_vaddr;
356 sc->sc_springtop = sc->sc_spring + SAFE_TOTAL_SPART;
357 sc->sc_spfree = sc->sc_spring;
358 bzero(sc->sc_spring, SAFE_TOTAL_SPART * sizeof(struct safe_pdesc));
359
360 if (safe_dma_malloc(sc, SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
361 &sc->sc_dpalloc, 0)) {
362 device_printf(dev, "cannot allocate destination particle "
363 "descriptor ring\n");
364 mtx_destroy(&sc->sc_ringmtx);
365 safe_dma_free(sc, &sc->sc_spalloc);
366 safe_dma_free(sc, &sc->sc_ringalloc);
367 bus_dma_tag_destroy(sc->sc_dstdmat);
368 goto bad4;
369 }
370 sc->sc_dpring = (struct safe_pdesc *) sc->sc_dpalloc.dma_vaddr;
371 sc->sc_dpringtop = sc->sc_dpring + SAFE_TOTAL_DPART;
372 sc->sc_dpfree = sc->sc_dpring;
373 bzero(sc->sc_dpring, SAFE_TOTAL_DPART * sizeof(struct safe_pdesc));
374
375 device_printf(sc->sc_dev, "%s", safe_partname(sc));
376
377 sc->sc_devinfo = READ_REG(sc, SAFE_DEVINFO);
378 if (sc->sc_devinfo & SAFE_DEVINFO_RNG) {
379 sc->sc_flags |= SAFE_FLAGS_RNG;
380 printf(" rng");
381 }
382 if (sc->sc_devinfo & SAFE_DEVINFO_PKEY) {
383 #if 0
384 printf(" key");
385 sc->sc_flags |= SAFE_FLAGS_KEY;
386 #endif
387 }
388 if (sc->sc_devinfo & SAFE_DEVINFO_DES) {
389 printf(" des/3des");
390 }
391 if (sc->sc_devinfo & SAFE_DEVINFO_AES) {
392 printf(" aes");
393 }
394 if (sc->sc_devinfo & SAFE_DEVINFO_MD5) {
395 printf(" md5");
396 }
397 if (sc->sc_devinfo & SAFE_DEVINFO_SHA1) {
398 printf(" sha1");
399 }
400 /* XXX other supported algorithms */
401 printf("\n");
402
403 safe_reset_board(sc); /* reset h/w */
404 safe_init_pciregs(dev); /* init pci settings */
405 safe_init_board(sc); /* init h/w */
406
407 #ifndef SAFE_NO_RNG
408 if (sc->sc_flags & SAFE_FLAGS_RNG) {
409 #ifdef SAFE_RNDTEST
410 sc->sc_rndtest = rndtest_attach(dev);
411 if (sc->sc_rndtest)
412 sc->sc_harvest = rndtest_harvest;
413 else
414 sc->sc_harvest = default_harvest;
415 #else
416 sc->sc_harvest = default_harvest;
417 #endif
418 safe_rng_init(sc);
419
420 callout_init(&sc->sc_rngto, 1);
421 callout_reset(&sc->sc_rngto, hz*safe_rnginterval, safe_rng, sc);
422 }
423 #endif /* SAFE_NO_RNG */
424 #ifdef SAFE_DEBUG
425 safec = sc; /* for use by hw.safe.dump */
426 #endif
427 return (0);
428 bad4:
429 crypto_unregister_all(sc->sc_cid);
430 bad3:
431 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
432 bad2:
433 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
434 bad1:
435 bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr);
436 bad:
437 return (ENXIO);
438 }
439
440 /*
441 * Detach a device that successfully probed.
442 */
443 static int
safe_detach(device_t dev)444 safe_detach(device_t dev)
445 {
446 struct safe_softc *sc = device_get_softc(dev);
447
448 /* XXX wait/abort active ops */
449
450 WRITE_REG(sc, SAFE_HI_MASK, 0); /* disable interrupts */
451
452 callout_stop(&sc->sc_rngto);
453
454 crypto_unregister_all(sc->sc_cid);
455
456 #ifdef SAFE_RNDTEST
457 if (sc->sc_rndtest)
458 rndtest_detach(sc->sc_rndtest);
459 #endif
460
461 safe_cleanchip(sc);
462 safe_dma_free(sc, &sc->sc_dpalloc);
463 safe_dma_free(sc, &sc->sc_spalloc);
464 mtx_destroy(&sc->sc_ringmtx);
465 safe_dma_free(sc, &sc->sc_ringalloc);
466
467 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
468 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
469
470 bus_dma_tag_destroy(sc->sc_srcdmat);
471 bus_dma_tag_destroy(sc->sc_dstdmat);
472 bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr);
473
474 return (0);
475 }
476
477 /*
478 * Stop all chip i/o so that the kernel's probe routines don't
479 * get confused by errant DMAs when rebooting.
480 */
481 static int
safe_shutdown(device_t dev)482 safe_shutdown(device_t dev)
483 {
484 #ifdef notyet
485 safe_stop(device_get_softc(dev));
486 #endif
487 return (0);
488 }
489
490 /*
491 * Device suspend routine.
492 */
493 static int
safe_suspend(device_t dev)494 safe_suspend(device_t dev)
495 {
496 struct safe_softc *sc = device_get_softc(dev);
497
498 #ifdef notyet
499 /* XXX stop the device and save PCI settings */
500 #endif
501 sc->sc_suspended = 1;
502
503 return (0);
504 }
505
506 static int
safe_resume(device_t dev)507 safe_resume(device_t dev)
508 {
509 struct safe_softc *sc = device_get_softc(dev);
510
511 #ifdef notyet
512 /* XXX retore PCI settings and start the device */
513 #endif
514 sc->sc_suspended = 0;
515 return (0);
516 }
517
518 /*
519 * SafeXcel Interrupt routine
520 */
521 static void
safe_intr(void * arg)522 safe_intr(void *arg)
523 {
524 struct safe_softc *sc = arg;
525 volatile u_int32_t stat;
526
527 stat = READ_REG(sc, SAFE_HM_STAT);
528 if (stat == 0) /* shared irq, not for us */
529 return;
530
531 WRITE_REG(sc, SAFE_HI_CLR, stat); /* IACK */
532
533 if ((stat & SAFE_INT_PE_DDONE)) {
534 /*
535 * Descriptor(s) done; scan the ring and
536 * process completed operations.
537 */
538 mtx_lock(&sc->sc_ringmtx);
539 while (sc->sc_back != sc->sc_front) {
540 struct safe_ringentry *re = sc->sc_back;
541 #ifdef SAFE_DEBUG
542 if (safe_debug) {
543 safe_dump_ringstate(sc, __func__);
544 safe_dump_request(sc, __func__, re);
545 }
546 #endif
547 /*
548 * safe_process marks ring entries that were allocated
549 * but not used with a csr of zero. This insures the
550 * ring front pointer never needs to be set backwards
551 * in the event that an entry is allocated but not used
552 * because of a setup error.
553 */
554 if (re->re_desc.d_csr != 0) {
555 if (!SAFE_PE_CSR_IS_DONE(re->re_desc.d_csr))
556 break;
557 if (!SAFE_PE_LEN_IS_DONE(re->re_desc.d_len))
558 break;
559 sc->sc_nqchip--;
560 safe_callback(sc, re);
561 }
562 if (++(sc->sc_back) == sc->sc_ringtop)
563 sc->sc_back = sc->sc_ring;
564 }
565 mtx_unlock(&sc->sc_ringmtx);
566 }
567
568 /*
569 * Check to see if we got any DMA Error
570 */
571 if (stat & SAFE_INT_PE_ERROR) {
572 DPRINTF(("dmaerr dmastat %08x\n",
573 READ_REG(sc, SAFE_PE_DMASTAT)));
574 safestats.st_dmaerr++;
575 safe_totalreset(sc);
576 #if 0
577 safe_feed(sc);
578 #endif
579 }
580
581 if (sc->sc_needwakeup) { /* XXX check high watermark */
582 int wakeup = sc->sc_needwakeup & CRYPTO_SYMQ;
583 DPRINTF(("%s: wakeup crypto %x\n", __func__,
584 sc->sc_needwakeup));
585 sc->sc_needwakeup &= ~wakeup;
586 crypto_unblock(sc->sc_cid, wakeup);
587 }
588 }
589
590 /*
591 * safe_feed() - post a request to chip
592 */
593 static void
safe_feed(struct safe_softc * sc,struct safe_ringentry * re)594 safe_feed(struct safe_softc *sc, struct safe_ringentry *re)
595 {
596 bus_dmamap_sync(sc->sc_srcdmat, re->re_src_map, BUS_DMASYNC_PREWRITE);
597 if (re->re_dst_map != NULL)
598 bus_dmamap_sync(sc->sc_dstdmat, re->re_dst_map,
599 BUS_DMASYNC_PREREAD);
600 /* XXX have no smaller granularity */
601 safe_dma_sync(&sc->sc_ringalloc,
602 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
603 safe_dma_sync(&sc->sc_spalloc, BUS_DMASYNC_PREWRITE);
604 safe_dma_sync(&sc->sc_dpalloc, BUS_DMASYNC_PREWRITE);
605
606 #ifdef SAFE_DEBUG
607 if (safe_debug) {
608 safe_dump_ringstate(sc, __func__);
609 safe_dump_request(sc, __func__, re);
610 }
611 #endif
612 sc->sc_nqchip++;
613 if (sc->sc_nqchip > safestats.st_maxqchip)
614 safestats.st_maxqchip = sc->sc_nqchip;
615 /* poke h/w to check descriptor ring, any value can be written */
616 WRITE_REG(sc, SAFE_HI_RD_DESCR, 0);
617 }
618
619 #define N(a) (sizeof(a) / sizeof (a[0]))
620 static void
safe_setup_enckey(struct safe_session * ses,const void * key)621 safe_setup_enckey(struct safe_session *ses, const void *key)
622 {
623 int i;
624
625 bcopy(key, ses->ses_key, ses->ses_klen);
626
627 /* PE is little-endian, insure proper byte order */
628 for (i = 0; i < N(ses->ses_key); i++)
629 ses->ses_key[i] = htole32(ses->ses_key[i]);
630 }
631
632 static void
safe_setup_mackey(struct safe_session * ses,int algo,const uint8_t * key,int klen)633 safe_setup_mackey(struct safe_session *ses, int algo, const uint8_t *key,
634 int klen)
635 {
636 SHA1_CTX sha1ctx;
637 int i;
638
639 hmac_init_ipad(&auth_hash_hmac_sha1, key, klen, &sha1ctx);
640 bcopy(sha1ctx.h.b32, ses->ses_hminner, sizeof(sha1ctx.h.b32));
641
642 hmac_init_opad(&auth_hash_hmac_sha1, key, klen, &sha1ctx);
643 bcopy(sha1ctx.h.b32, ses->ses_hmouter, sizeof(sha1ctx.h.b32));
644
645 explicit_bzero(&sha1ctx, sizeof(sha1ctx));
646
647 /* PE is little-endian, insure proper byte order */
648 for (i = 0; i < N(ses->ses_hminner); i++) {
649 ses->ses_hminner[i] = htole32(ses->ses_hminner[i]);
650 ses->ses_hmouter[i] = htole32(ses->ses_hmouter[i]);
651 }
652 }
653 #undef N
654
655 static bool
safe_auth_supported(struct safe_softc * sc,const struct crypto_session_params * csp)656 safe_auth_supported(struct safe_softc *sc,
657 const struct crypto_session_params *csp)
658 {
659
660 switch (csp->csp_auth_alg) {
661 case CRYPTO_SHA1_HMAC:
662 if ((sc->sc_devinfo & SAFE_DEVINFO_SHA1) == 0)
663 return (false);
664 break;
665 default:
666 return (false);
667 }
668 return (true);
669 }
670
671 static bool
safe_cipher_supported(struct safe_softc * sc,const struct crypto_session_params * csp)672 safe_cipher_supported(struct safe_softc *sc,
673 const struct crypto_session_params *csp)
674 {
675
676 switch (csp->csp_cipher_alg) {
677 case CRYPTO_AES_CBC:
678 if ((sc->sc_devinfo & SAFE_DEVINFO_AES) == 0)
679 return (false);
680 if (csp->csp_ivlen != 16)
681 return (false);
682 if (csp->csp_cipher_klen != 16 &&
683 csp->csp_cipher_klen != 24 &&
684 csp->csp_cipher_klen != 32)
685 return (false);
686 break;
687 }
688 return (true);
689 }
690
691 static int
safe_probesession(device_t dev,const struct crypto_session_params * csp)692 safe_probesession(device_t dev, const struct crypto_session_params *csp)
693 {
694 struct safe_softc *sc = device_get_softc(dev);
695
696 if (csp->csp_flags != 0)
697 return (EINVAL);
698 switch (csp->csp_mode) {
699 case CSP_MODE_DIGEST:
700 if (!safe_auth_supported(sc, csp))
701 return (EINVAL);
702 break;
703 case CSP_MODE_CIPHER:
704 if (!safe_cipher_supported(sc, csp))
705 return (EINVAL);
706 break;
707 case CSP_MODE_ETA:
708 if (!safe_auth_supported(sc, csp) ||
709 !safe_cipher_supported(sc, csp))
710 return (EINVAL);
711 break;
712 default:
713 return (EINVAL);
714 }
715
716 return (CRYPTODEV_PROBE_HARDWARE);
717 }
718
719 /*
720 * Allocate a new 'session'.
721 */
722 static int
safe_newsession(device_t dev,crypto_session_t cses,const struct crypto_session_params * csp)723 safe_newsession(device_t dev, crypto_session_t cses,
724 const struct crypto_session_params *csp)
725 {
726 struct safe_session *ses;
727
728 ses = crypto_get_driver_session(cses);
729 if (csp->csp_cipher_alg != 0) {
730 ses->ses_klen = csp->csp_cipher_klen;
731 if (csp->csp_cipher_key != NULL)
732 safe_setup_enckey(ses, csp->csp_cipher_key);
733 }
734
735 if (csp->csp_auth_alg != 0) {
736 ses->ses_mlen = csp->csp_auth_mlen;
737 if (ses->ses_mlen == 0) {
738 ses->ses_mlen = SHA1_HASH_LEN;
739 }
740
741 if (csp->csp_auth_key != NULL) {
742 safe_setup_mackey(ses, csp->csp_auth_alg,
743 csp->csp_auth_key, csp->csp_auth_klen);
744 }
745 }
746
747 return (0);
748 }
749
750 static void
safe_op_cb(void * arg,bus_dma_segment_t * seg,int nsegs,int error)751 safe_op_cb(void *arg, bus_dma_segment_t *seg, int nsegs, int error)
752 {
753 struct safe_operand *op = arg;
754
755 DPRINTF(("%s: nsegs %d error %d\n", __func__,
756 nsegs, error));
757 if (error != 0)
758 return;
759 op->nsegs = nsegs;
760 bcopy(seg, op->segs, nsegs * sizeof (seg[0]));
761 }
762
763 static int
safe_process(device_t dev,struct cryptop * crp,int hint)764 safe_process(device_t dev, struct cryptop *crp, int hint)
765 {
766 struct safe_softc *sc = device_get_softc(dev);
767 const struct crypto_session_params *csp;
768 int err = 0, i, nicealign, uniform;
769 int bypass, oplen;
770 int16_t coffset;
771 struct safe_session *ses;
772 struct safe_ringentry *re;
773 struct safe_sarec *sa;
774 struct safe_pdesc *pd;
775 u_int32_t cmd0, cmd1, staterec;
776
777 mtx_lock(&sc->sc_ringmtx);
778 if (sc->sc_front == sc->sc_back && sc->sc_nqchip != 0) {
779 safestats.st_ringfull++;
780 sc->sc_needwakeup |= CRYPTO_SYMQ;
781 mtx_unlock(&sc->sc_ringmtx);
782 return (ERESTART);
783 }
784 re = sc->sc_front;
785
786 staterec = re->re_sa.sa_staterec; /* save */
787 /* NB: zero everything but the PE descriptor */
788 bzero(&re->re_sa, sizeof(struct safe_ringentry) - sizeof(re->re_desc));
789 re->re_sa.sa_staterec = staterec; /* restore */
790
791 re->re_crp = crp;
792
793 sa = &re->re_sa;
794 ses = crypto_get_driver_session(crp->crp_session);
795 csp = crypto_get_params(crp->crp_session);
796
797 cmd0 = SAFE_SA_CMD0_BASIC; /* basic group operation */
798 cmd1 = 0;
799 switch (csp->csp_mode) {
800 case CSP_MODE_DIGEST:
801 cmd0 |= SAFE_SA_CMD0_OP_HASH;
802 break;
803 case CSP_MODE_CIPHER:
804 cmd0 |= SAFE_SA_CMD0_OP_CRYPT;
805 break;
806 case CSP_MODE_ETA:
807 cmd0 |= SAFE_SA_CMD0_OP_BOTH;
808 break;
809 }
810
811 if (csp->csp_cipher_alg != 0) {
812 if (crp->crp_cipher_key != NULL)
813 safe_setup_enckey(ses, crp->crp_cipher_key);
814
815 switch (csp->csp_cipher_alg) {
816 case CRYPTO_AES_CBC:
817 cmd0 |= SAFE_SA_CMD0_AES;
818 cmd1 |= SAFE_SA_CMD1_CBC;
819 if (ses->ses_klen * 8 == 128)
820 cmd1 |= SAFE_SA_CMD1_AES128;
821 else if (ses->ses_klen * 8 == 192)
822 cmd1 |= SAFE_SA_CMD1_AES192;
823 else
824 cmd1 |= SAFE_SA_CMD1_AES256;
825 }
826
827 /*
828 * Setup encrypt/decrypt state. When using basic ops
829 * we can't use an inline IV because hash/crypt offset
830 * must be from the end of the IV to the start of the
831 * crypt data and this leaves out the preceding header
832 * from the hash calculation. Instead we place the IV
833 * in the state record and set the hash/crypt offset to
834 * copy both the header+IV.
835 */
836 crypto_read_iv(crp, re->re_sastate.sa_saved_iv);
837 cmd0 |= SAFE_SA_CMD0_IVLD_STATE;
838
839 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
840 cmd0 |= SAFE_SA_CMD0_OUTBOUND;
841
842 /*
843 * XXX: I suspect we don't need this since we
844 * don't save the returned IV.
845 */
846 cmd0 |= SAFE_SA_CMD0_SAVEIV;
847 } else {
848 cmd0 |= SAFE_SA_CMD0_INBOUND;
849 }
850 /*
851 * For basic encryption use the zero pad algorithm.
852 * This pads results to an 8-byte boundary and
853 * suppresses padding verification for inbound (i.e.
854 * decrypt) operations.
855 *
856 * NB: Not sure if the 8-byte pad boundary is a problem.
857 */
858 cmd0 |= SAFE_SA_CMD0_PAD_ZERO;
859
860 /* XXX assert key bufs have the same size */
861 bcopy(ses->ses_key, sa->sa_key, sizeof(sa->sa_key));
862 }
863
864 if (csp->csp_auth_alg != 0) {
865 if (crp->crp_auth_key != NULL) {
866 safe_setup_mackey(ses, csp->csp_auth_alg,
867 crp->crp_auth_key, csp->csp_auth_klen);
868 }
869
870 switch (csp->csp_auth_alg) {
871 case CRYPTO_SHA1_HMAC:
872 cmd0 |= SAFE_SA_CMD0_SHA1;
873 cmd1 |= SAFE_SA_CMD1_HMAC; /* NB: enable HMAC */
874 break;
875 }
876
877 /*
878 * Digest data is loaded from the SA and the hash
879 * result is saved to the state block where we
880 * retrieve it for return to the caller.
881 */
882 /* XXX assert digest bufs have the same size */
883 bcopy(ses->ses_hminner, sa->sa_indigest,
884 sizeof(sa->sa_indigest));
885 bcopy(ses->ses_hmouter, sa->sa_outdigest,
886 sizeof(sa->sa_outdigest));
887
888 cmd0 |= SAFE_SA_CMD0_HSLD_SA | SAFE_SA_CMD0_SAVEHASH;
889 re->re_flags |= SAFE_QFLAGS_COPYOUTICV;
890 }
891
892 if (csp->csp_mode == CSP_MODE_ETA) {
893 /*
894 * The driver only supports ETA requests where there
895 * is no gap between the AAD and payload.
896 */
897 if (crp->crp_aad_length != 0 &&
898 crp->crp_aad_start + crp->crp_aad_length !=
899 crp->crp_payload_start) {
900 safestats.st_lenmismatch++;
901 err = EINVAL;
902 goto errout;
903 }
904 if (crp->crp_aad_length != 0)
905 bypass = crp->crp_aad_start;
906 else
907 bypass = crp->crp_payload_start;
908 coffset = crp->crp_aad_length;
909 oplen = crp->crp_payload_start + crp->crp_payload_length;
910 #ifdef SAFE_DEBUG
911 if (safe_debug) {
912 printf("AAD: skip %d, len %d, digest %d\n",
913 crp->crp_aad_start, crp->crp_aad_length,
914 crp->crp_digest_start);
915 printf("payload: skip %d, len %d, IV %d\n",
916 crp->crp_payload_start, crp->crp_payload_length,
917 crp->crp_iv_start);
918 printf("bypass %d coffset %d oplen %d\n",
919 bypass, coffset, oplen);
920 }
921 #endif
922 if (coffset & 3) { /* offset must be 32-bit aligned */
923 DPRINTF(("%s: coffset %u misaligned\n",
924 __func__, coffset));
925 safestats.st_coffmisaligned++;
926 err = EINVAL;
927 goto errout;
928 }
929 coffset >>= 2;
930 if (coffset > 255) { /* offset must be <256 dwords */
931 DPRINTF(("%s: coffset %u too big\n",
932 __func__, coffset));
933 safestats.st_cofftoobig++;
934 err = EINVAL;
935 goto errout;
936 }
937 /*
938 * Tell the hardware to copy the header to the output.
939 * The header is defined as the data from the end of
940 * the bypass to the start of data to be encrypted.
941 * Typically this is the inline IV. Note that you need
942 * to do this even if src+dst are the same; it appears
943 * that w/o this bit the crypted data is written
944 * immediately after the bypass data.
945 */
946 cmd1 |= SAFE_SA_CMD1_HDRCOPY;
947 /*
948 * Disable IP header mutable bit handling. This is
949 * needed to get correct HMAC calculations.
950 */
951 cmd1 |= SAFE_SA_CMD1_MUTABLE;
952 } else {
953 bypass = crp->crp_payload_start;
954 oplen = bypass + crp->crp_payload_length;
955 coffset = 0;
956 }
957 /* XXX verify multiple of 4 when using s/g */
958 if (bypass > 96) { /* bypass offset must be <= 96 bytes */
959 DPRINTF(("%s: bypass %u too big\n", __func__, bypass));
960 safestats.st_bypasstoobig++;
961 err = EINVAL;
962 goto errout;
963 }
964
965 if (bus_dmamap_create(sc->sc_srcdmat, BUS_DMA_NOWAIT, &re->re_src_map)) {
966 safestats.st_nomap++;
967 err = ENOMEM;
968 goto errout;
969 }
970 if (bus_dmamap_load_crp(sc->sc_srcdmat, re->re_src_map, crp, safe_op_cb,
971 &re->re_src, BUS_DMA_NOWAIT) != 0) {
972 bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map);
973 re->re_src_map = NULL;
974 safestats.st_noload++;
975 err = ENOMEM;
976 goto errout;
977 }
978 re->re_src_mapsize = crypto_buffer_len(&crp->crp_buf);
979 nicealign = safe_dmamap_aligned(&re->re_src);
980 uniform = safe_dmamap_uniform(&re->re_src);
981
982 DPRINTF(("src nicealign %u uniform %u nsegs %u\n",
983 nicealign, uniform, re->re_src.nsegs));
984 if (re->re_src.nsegs > 1) {
985 re->re_desc.d_src = sc->sc_spalloc.dma_paddr +
986 ((caddr_t) sc->sc_spfree - (caddr_t) sc->sc_spring);
987 for (i = 0; i < re->re_src_nsegs; i++) {
988 /* NB: no need to check if there's space */
989 pd = sc->sc_spfree;
990 if (++(sc->sc_spfree) == sc->sc_springtop)
991 sc->sc_spfree = sc->sc_spring;
992
993 KASSERT((pd->pd_flags&3) == 0 ||
994 (pd->pd_flags&3) == SAFE_PD_DONE,
995 ("bogus source particle descriptor; flags %x",
996 pd->pd_flags));
997 pd->pd_addr = re->re_src_segs[i].ds_addr;
998 pd->pd_size = re->re_src_segs[i].ds_len;
999 pd->pd_flags = SAFE_PD_READY;
1000 }
1001 cmd0 |= SAFE_SA_CMD0_IGATHER;
1002 } else {
1003 /*
1004 * No need for gather, reference the operand directly.
1005 */
1006 re->re_desc.d_src = re->re_src_segs[0].ds_addr;
1007 }
1008
1009 if (csp->csp_mode == CSP_MODE_DIGEST) {
1010 /*
1011 * Hash op; no destination needed.
1012 */
1013 } else {
1014 if (nicealign && uniform == 1) {
1015 /*
1016 * Source layout is suitable for direct
1017 * sharing of the DMA map and segment list.
1018 */
1019 re->re_dst = re->re_src;
1020 } else if (nicealign && uniform == 2) {
1021 /*
1022 * The source is properly aligned but requires a
1023 * different particle list to handle DMA of the
1024 * result. Create a new map and do the load to
1025 * create the segment list. The particle
1026 * descriptor setup code below will handle the
1027 * rest.
1028 */
1029 if (bus_dmamap_create(sc->sc_dstdmat, BUS_DMA_NOWAIT,
1030 &re->re_dst_map)) {
1031 safestats.st_nomap++;
1032 err = ENOMEM;
1033 goto errout;
1034 }
1035 if (bus_dmamap_load_crp(sc->sc_dstdmat, re->re_dst_map,
1036 crp, safe_op_cb, &re->re_dst, BUS_DMA_NOWAIT) !=
1037 0) {
1038 bus_dmamap_destroy(sc->sc_dstdmat,
1039 re->re_dst_map);
1040 re->re_dst_map = NULL;
1041 safestats.st_noload++;
1042 err = ENOMEM;
1043 goto errout;
1044 }
1045 } else if (crp->crp_buf.cb_type == CRYPTO_BUF_MBUF) {
1046 int totlen, len;
1047 struct mbuf *m, *top, **mp;
1048
1049 /*
1050 * DMA constraints require that we allocate a
1051 * new mbuf chain for the destination. We
1052 * allocate an entire new set of mbufs of
1053 * optimal/required size and then tell the
1054 * hardware to copy any bits that are not
1055 * created as a byproduct of the operation.
1056 */
1057 if (!nicealign)
1058 safestats.st_unaligned++;
1059 if (!uniform)
1060 safestats.st_notuniform++;
1061 totlen = re->re_src_mapsize;
1062 if (crp->crp_buf.cb_mbuf->m_flags & M_PKTHDR) {
1063 len = MHLEN;
1064 MGETHDR(m, M_NOWAIT, MT_DATA);
1065 if (m && !m_dup_pkthdr(m, crp->crp_buf.cb_mbuf,
1066 M_NOWAIT)) {
1067 m_free(m);
1068 m = NULL;
1069 }
1070 } else {
1071 len = MLEN;
1072 MGET(m, M_NOWAIT, MT_DATA);
1073 }
1074 if (m == NULL) {
1075 safestats.st_nombuf++;
1076 err = sc->sc_nqchip ? ERESTART : ENOMEM;
1077 goto errout;
1078 }
1079 if (totlen >= MINCLSIZE) {
1080 if (!(MCLGET(m, M_NOWAIT))) {
1081 m_free(m);
1082 safestats.st_nomcl++;
1083 err = sc->sc_nqchip ?
1084 ERESTART : ENOMEM;
1085 goto errout;
1086 }
1087 len = MCLBYTES;
1088 }
1089 m->m_len = len;
1090 top = NULL;
1091 mp = ⊤
1092
1093 while (totlen > 0) {
1094 if (top) {
1095 MGET(m, M_NOWAIT, MT_DATA);
1096 if (m == NULL) {
1097 m_freem(top);
1098 safestats.st_nombuf++;
1099 err = sc->sc_nqchip ?
1100 ERESTART : ENOMEM;
1101 goto errout;
1102 }
1103 len = MLEN;
1104 }
1105 if (top && totlen >= MINCLSIZE) {
1106 if (!(MCLGET(m, M_NOWAIT))) {
1107 *mp = m;
1108 m_freem(top);
1109 safestats.st_nomcl++;
1110 err = sc->sc_nqchip ?
1111 ERESTART : ENOMEM;
1112 goto errout;
1113 }
1114 len = MCLBYTES;
1115 }
1116 m->m_len = len = min(totlen, len);
1117 totlen -= len;
1118 *mp = m;
1119 mp = &m->m_next;
1120 }
1121 re->re_dst_m = top;
1122 if (bus_dmamap_create(sc->sc_dstdmat,
1123 BUS_DMA_NOWAIT, &re->re_dst_map) != 0) {
1124 safestats.st_nomap++;
1125 err = ENOMEM;
1126 goto errout;
1127 }
1128 if (bus_dmamap_load_mbuf_sg(sc->sc_dstdmat,
1129 re->re_dst_map, top, re->re_dst_segs,
1130 &re->re_dst_nsegs, 0) != 0) {
1131 bus_dmamap_destroy(sc->sc_dstdmat,
1132 re->re_dst_map);
1133 re->re_dst_map = NULL;
1134 safestats.st_noload++;
1135 err = ENOMEM;
1136 goto errout;
1137 }
1138 re->re_dst_mapsize = re->re_src_mapsize;
1139 if (re->re_src.mapsize > oplen) {
1140 /*
1141 * There's data following what the
1142 * hardware will copy for us. If this
1143 * isn't just the ICV (that's going to
1144 * be written on completion), copy it
1145 * to the new mbufs
1146 */
1147 if (!(csp->csp_mode == CSP_MODE_ETA &&
1148 (re->re_src.mapsize-oplen) == ses->ses_mlen &&
1149 crp->crp_digest_start == oplen))
1150 safe_mcopy(crp->crp_buf.cb_mbuf,
1151 re->re_dst_m, oplen);
1152 else
1153 safestats.st_noicvcopy++;
1154 }
1155 } else {
1156 if (!nicealign) {
1157 safestats.st_iovmisaligned++;
1158 err = EINVAL;
1159 goto errout;
1160 } else {
1161 /*
1162 * There's no way to handle the DMA
1163 * requirements with this uio. We
1164 * could create a separate DMA area for
1165 * the result and then copy it back,
1166 * but for now we just bail and return
1167 * an error. Note that uio requests
1168 * > SAFE_MAX_DSIZE are handled because
1169 * the DMA map and segment list for the
1170 * destination wil result in a
1171 * destination particle list that does
1172 * the necessary scatter DMA.
1173 */
1174 safestats.st_iovnotuniform++;
1175 err = EINVAL;
1176 goto errout;
1177 }
1178 }
1179
1180 if (re->re_dst.nsegs > 1) {
1181 re->re_desc.d_dst = sc->sc_dpalloc.dma_paddr +
1182 ((caddr_t) sc->sc_dpfree - (caddr_t) sc->sc_dpring);
1183 for (i = 0; i < re->re_dst_nsegs; i++) {
1184 pd = sc->sc_dpfree;
1185 KASSERT((pd->pd_flags&3) == 0 ||
1186 (pd->pd_flags&3) == SAFE_PD_DONE,
1187 ("bogus dest particle descriptor; flags %x",
1188 pd->pd_flags));
1189 if (++(sc->sc_dpfree) == sc->sc_dpringtop)
1190 sc->sc_dpfree = sc->sc_dpring;
1191 pd->pd_addr = re->re_dst_segs[i].ds_addr;
1192 pd->pd_flags = SAFE_PD_READY;
1193 }
1194 cmd0 |= SAFE_SA_CMD0_OSCATTER;
1195 } else {
1196 /*
1197 * No need for scatter, reference the operand directly.
1198 */
1199 re->re_desc.d_dst = re->re_dst_segs[0].ds_addr;
1200 }
1201 }
1202
1203 /*
1204 * All done with setup; fillin the SA command words
1205 * and the packet engine descriptor. The operation
1206 * is now ready for submission to the hardware.
1207 */
1208 sa->sa_cmd0 = cmd0 | SAFE_SA_CMD0_IPCI | SAFE_SA_CMD0_OPCI;
1209 sa->sa_cmd1 = cmd1
1210 | (coffset << SAFE_SA_CMD1_OFFSET_S)
1211 | SAFE_SA_CMD1_SAREV1 /* Rev 1 SA data structure */
1212 | SAFE_SA_CMD1_SRPCI
1213 ;
1214 /*
1215 * NB: the order of writes is important here. In case the
1216 * chip is scanning the ring because of an outstanding request
1217 * it might nab this one too. In that case we need to make
1218 * sure the setup is complete before we write the length
1219 * field of the descriptor as it signals the descriptor is
1220 * ready for processing.
1221 */
1222 re->re_desc.d_csr = SAFE_PE_CSR_READY | SAFE_PE_CSR_SAPCI;
1223 if (csp->csp_auth_alg != 0)
1224 re->re_desc.d_csr |= SAFE_PE_CSR_LOADSA | SAFE_PE_CSR_HASHFINAL;
1225 re->re_desc.d_len = oplen
1226 | SAFE_PE_LEN_READY
1227 | (bypass << SAFE_PE_LEN_BYPASS_S)
1228 ;
1229
1230 safestats.st_ipackets++;
1231 safestats.st_ibytes += oplen;
1232
1233 if (++(sc->sc_front) == sc->sc_ringtop)
1234 sc->sc_front = sc->sc_ring;
1235
1236 /* XXX honor batching */
1237 safe_feed(sc, re);
1238 mtx_unlock(&sc->sc_ringmtx);
1239 return (0);
1240
1241 errout:
1242 if (re->re_dst_m != NULL)
1243 m_freem(re->re_dst_m);
1244
1245 if (re->re_dst_map != NULL && re->re_dst_map != re->re_src_map) {
1246 bus_dmamap_unload(sc->sc_dstdmat, re->re_dst_map);
1247 bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map);
1248 }
1249 if (re->re_src_map != NULL) {
1250 bus_dmamap_unload(sc->sc_srcdmat, re->re_src_map);
1251 bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map);
1252 }
1253 mtx_unlock(&sc->sc_ringmtx);
1254 if (err != ERESTART) {
1255 crp->crp_etype = err;
1256 crypto_done(crp);
1257 err = 0;
1258 } else {
1259 sc->sc_needwakeup |= CRYPTO_SYMQ;
1260 }
1261 return (err);
1262 }
1263
1264 static void
safe_callback(struct safe_softc * sc,struct safe_ringentry * re)1265 safe_callback(struct safe_softc *sc, struct safe_ringentry *re)
1266 {
1267 const struct crypto_session_params *csp;
1268 struct cryptop *crp = (struct cryptop *)re->re_crp;
1269 struct safe_session *ses;
1270 uint8_t hash[HASH_MAX_LEN];
1271
1272 ses = crypto_get_driver_session(crp->crp_session);
1273 csp = crypto_get_params(crp->crp_session);
1274
1275 safestats.st_opackets++;
1276 safestats.st_obytes += re->re_dst.mapsize;
1277
1278 safe_dma_sync(&sc->sc_ringalloc,
1279 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1280 if (re->re_desc.d_csr & SAFE_PE_CSR_STATUS) {
1281 device_printf(sc->sc_dev, "csr 0x%x cmd0 0x%x cmd1 0x%x\n",
1282 re->re_desc.d_csr,
1283 re->re_sa.sa_cmd0, re->re_sa.sa_cmd1);
1284 safestats.st_peoperr++;
1285 crp->crp_etype = EIO; /* something more meaningful? */
1286 }
1287
1288 /*
1289 * XXX: Should crp_buf.cb_mbuf be updated to re->re_dst_m if
1290 * it is non-NULL?
1291 */
1292
1293 if (re->re_dst_map != NULL && re->re_dst_map != re->re_src_map) {
1294 bus_dmamap_sync(sc->sc_dstdmat, re->re_dst_map,
1295 BUS_DMASYNC_POSTREAD);
1296 bus_dmamap_unload(sc->sc_dstdmat, re->re_dst_map);
1297 bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map);
1298 }
1299 bus_dmamap_sync(sc->sc_srcdmat, re->re_src_map, BUS_DMASYNC_POSTWRITE);
1300 bus_dmamap_unload(sc->sc_srcdmat, re->re_src_map);
1301 bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map);
1302
1303 if (re->re_flags & SAFE_QFLAGS_COPYOUTICV) {
1304 if (csp->csp_auth_alg == CRYPTO_SHA1_HMAC) {
1305 /*
1306 * SHA-1 ICV's are byte-swapped; fix 'em up
1307 * before copying them to their destination.
1308 */
1309 re->re_sastate.sa_saved_indigest[0] =
1310 bswap32(re->re_sastate.sa_saved_indigest[0]);
1311 re->re_sastate.sa_saved_indigest[1] =
1312 bswap32(re->re_sastate.sa_saved_indigest[1]);
1313 re->re_sastate.sa_saved_indigest[2] =
1314 bswap32(re->re_sastate.sa_saved_indigest[2]);
1315 }
1316
1317 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
1318 crypto_copydata(crp, crp->crp_digest_start,
1319 ses->ses_mlen, hash);
1320 if (timingsafe_bcmp(re->re_sastate.sa_saved_indigest,
1321 hash, ses->ses_mlen) != 0)
1322 crp->crp_etype = EBADMSG;
1323 } else
1324 crypto_copyback(crp, crp->crp_digest_start,
1325 ses->ses_mlen, re->re_sastate.sa_saved_indigest);
1326 }
1327 crypto_done(crp);
1328 }
1329
1330 /*
1331 * Copy all data past offset from srcm to dstm.
1332 */
1333 static void
safe_mcopy(struct mbuf * srcm,struct mbuf * dstm,u_int offset)1334 safe_mcopy(struct mbuf *srcm, struct mbuf *dstm, u_int offset)
1335 {
1336 u_int j, dlen, slen;
1337 caddr_t dptr, sptr;
1338
1339 /*
1340 * Advance src and dst to offset.
1341 */
1342 j = offset;
1343 while (j >= srcm->m_len) {
1344 j -= srcm->m_len;
1345 srcm = srcm->m_next;
1346 if (srcm == NULL)
1347 return;
1348 }
1349 sptr = mtod(srcm, caddr_t) + j;
1350 slen = srcm->m_len - j;
1351
1352 j = offset;
1353 while (j >= dstm->m_len) {
1354 j -= dstm->m_len;
1355 dstm = dstm->m_next;
1356 if (dstm == NULL)
1357 return;
1358 }
1359 dptr = mtod(dstm, caddr_t) + j;
1360 dlen = dstm->m_len - j;
1361
1362 /*
1363 * Copy everything that remains.
1364 */
1365 for (;;) {
1366 j = min(slen, dlen);
1367 bcopy(sptr, dptr, j);
1368 if (slen == j) {
1369 srcm = srcm->m_next;
1370 if (srcm == NULL)
1371 return;
1372 sptr = srcm->m_data;
1373 slen = srcm->m_len;
1374 } else
1375 sptr += j, slen -= j;
1376 if (dlen == j) {
1377 dstm = dstm->m_next;
1378 if (dstm == NULL)
1379 return;
1380 dptr = dstm->m_data;
1381 dlen = dstm->m_len;
1382 } else
1383 dptr += j, dlen -= j;
1384 }
1385 }
1386
1387 #ifndef SAFE_NO_RNG
1388 #define SAFE_RNG_MAXWAIT 1000
1389
1390 static void
safe_rng_init(struct safe_softc * sc)1391 safe_rng_init(struct safe_softc *sc)
1392 {
1393 u_int32_t w, v;
1394 int i;
1395
1396 WRITE_REG(sc, SAFE_RNG_CTRL, 0);
1397 /* use default value according to the manual */
1398 WRITE_REG(sc, SAFE_RNG_CNFG, 0x834); /* magic from SafeNet */
1399 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
1400
1401 /*
1402 * There is a bug in rev 1.0 of the 1140 that when the RNG
1403 * is brought out of reset the ready status flag does not
1404 * work until the RNG has finished its internal initialization.
1405 *
1406 * So in order to determine the device is through its
1407 * initialization we must read the data register, using the
1408 * status reg in the read in case it is initialized. Then read
1409 * the data register until it changes from the first read.
1410 * Once it changes read the data register until it changes
1411 * again. At this time the RNG is considered initialized.
1412 * This could take between 750ms - 1000ms in time.
1413 */
1414 i = 0;
1415 w = READ_REG(sc, SAFE_RNG_OUT);
1416 do {
1417 v = READ_REG(sc, SAFE_RNG_OUT);
1418 if (v != w) {
1419 w = v;
1420 break;
1421 }
1422 DELAY(10);
1423 } while (++i < SAFE_RNG_MAXWAIT);
1424
1425 /* Wait Until data changes again */
1426 i = 0;
1427 do {
1428 v = READ_REG(sc, SAFE_RNG_OUT);
1429 if (v != w)
1430 break;
1431 DELAY(10);
1432 } while (++i < SAFE_RNG_MAXWAIT);
1433 }
1434
1435 static __inline void
safe_rng_disable_short_cycle(struct safe_softc * sc)1436 safe_rng_disable_short_cycle(struct safe_softc *sc)
1437 {
1438 WRITE_REG(sc, SAFE_RNG_CTRL,
1439 READ_REG(sc, SAFE_RNG_CTRL) &~ SAFE_RNG_CTRL_SHORTEN);
1440 }
1441
1442 static __inline void
safe_rng_enable_short_cycle(struct safe_softc * sc)1443 safe_rng_enable_short_cycle(struct safe_softc *sc)
1444 {
1445 WRITE_REG(sc, SAFE_RNG_CTRL,
1446 READ_REG(sc, SAFE_RNG_CTRL) | SAFE_RNG_CTRL_SHORTEN);
1447 }
1448
1449 static __inline u_int32_t
safe_rng_read(struct safe_softc * sc)1450 safe_rng_read(struct safe_softc *sc)
1451 {
1452 int i;
1453
1454 i = 0;
1455 while (READ_REG(sc, SAFE_RNG_STAT) != 0 && ++i < SAFE_RNG_MAXWAIT)
1456 ;
1457 return READ_REG(sc, SAFE_RNG_OUT);
1458 }
1459
1460 static void
safe_rng(void * arg)1461 safe_rng(void *arg)
1462 {
1463 struct safe_softc *sc = arg;
1464 u_int32_t buf[SAFE_RNG_MAXBUFSIZ]; /* NB: maybe move to softc */
1465 u_int maxwords;
1466 int i;
1467
1468 safestats.st_rng++;
1469 /*
1470 * Fetch the next block of data.
1471 */
1472 maxwords = safe_rngbufsize;
1473 if (maxwords > SAFE_RNG_MAXBUFSIZ)
1474 maxwords = SAFE_RNG_MAXBUFSIZ;
1475 retry:
1476 for (i = 0; i < maxwords; i++)
1477 buf[i] = safe_rng_read(sc);
1478 /*
1479 * Check the comparator alarm count and reset the h/w if
1480 * it exceeds our threshold. This guards against the
1481 * hardware oscillators resonating with external signals.
1482 */
1483 if (READ_REG(sc, SAFE_RNG_ALM_CNT) > safe_rngmaxalarm) {
1484 u_int32_t freq_inc, w;
1485
1486 DPRINTF(("%s: alarm count %u exceeds threshold %u\n", __func__,
1487 READ_REG(sc, SAFE_RNG_ALM_CNT), safe_rngmaxalarm));
1488 safestats.st_rngalarm++;
1489 safe_rng_enable_short_cycle(sc);
1490 freq_inc = 18;
1491 for (i = 0; i < 64; i++) {
1492 w = READ_REG(sc, SAFE_RNG_CNFG);
1493 freq_inc = ((w + freq_inc) & 0x3fL);
1494 w = ((w & ~0x3fL) | freq_inc);
1495 WRITE_REG(sc, SAFE_RNG_CNFG, w);
1496
1497 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
1498
1499 (void) safe_rng_read(sc);
1500 DELAY(25);
1501
1502 if (READ_REG(sc, SAFE_RNG_ALM_CNT) == 0) {
1503 safe_rng_disable_short_cycle(sc);
1504 goto retry;
1505 }
1506 freq_inc = 1;
1507 }
1508 safe_rng_disable_short_cycle(sc);
1509 } else
1510 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
1511
1512 (*sc->sc_harvest)(sc->sc_rndtest, buf, maxwords*sizeof (u_int32_t));
1513 callout_reset(&sc->sc_rngto,
1514 hz * (safe_rnginterval ? safe_rnginterval : 1), safe_rng, sc);
1515 }
1516 #endif /* SAFE_NO_RNG */
1517
1518 static void
safe_dmamap_cb(void * arg,bus_dma_segment_t * segs,int nseg,int error)1519 safe_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1520 {
1521 bus_addr_t *paddr = (bus_addr_t*) arg;
1522 *paddr = segs->ds_addr;
1523 }
1524
1525 static int
safe_dma_malloc(struct safe_softc * sc,bus_size_t size,struct safe_dma_alloc * dma,int mapflags)1526 safe_dma_malloc(
1527 struct safe_softc *sc,
1528 bus_size_t size,
1529 struct safe_dma_alloc *dma,
1530 int mapflags
1531 )
1532 {
1533 int r;
1534
1535 r = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */
1536 sizeof(u_int32_t), 0, /* alignment, bounds */
1537 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1538 BUS_SPACE_MAXADDR, /* highaddr */
1539 NULL, NULL, /* filter, filterarg */
1540 size, /* maxsize */
1541 1, /* nsegments */
1542 size, /* maxsegsize */
1543 BUS_DMA_ALLOCNOW, /* flags */
1544 NULL, NULL, /* locking */
1545 &dma->dma_tag);
1546 if (r != 0) {
1547 device_printf(sc->sc_dev, "safe_dma_malloc: "
1548 "bus_dma_tag_create failed; error %u\n", r);
1549 goto fail_0;
1550 }
1551
1552 r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
1553 BUS_DMA_NOWAIT, &dma->dma_map);
1554 if (r != 0) {
1555 device_printf(sc->sc_dev, "safe_dma_malloc: "
1556 "bus_dmammem_alloc failed; size %ju, error %u\n",
1557 (uintmax_t)size, r);
1558 goto fail_1;
1559 }
1560
1561 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1562 size,
1563 safe_dmamap_cb,
1564 &dma->dma_paddr,
1565 mapflags | BUS_DMA_NOWAIT);
1566 if (r != 0) {
1567 device_printf(sc->sc_dev, "safe_dma_malloc: "
1568 "bus_dmamap_load failed; error %u\n", r);
1569 goto fail_2;
1570 }
1571
1572 dma->dma_size = size;
1573 return (0);
1574
1575 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1576 fail_2:
1577 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1578 fail_1:
1579 bus_dma_tag_destroy(dma->dma_tag);
1580 fail_0:
1581 dma->dma_tag = NULL;
1582 return (r);
1583 }
1584
1585 static void
safe_dma_free(struct safe_softc * sc,struct safe_dma_alloc * dma)1586 safe_dma_free(struct safe_softc *sc, struct safe_dma_alloc *dma)
1587 {
1588 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1589 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1590 bus_dma_tag_destroy(dma->dma_tag);
1591 }
1592
1593 /*
1594 * Resets the board. Values in the regesters are left as is
1595 * from the reset (i.e. initial values are assigned elsewhere).
1596 */
1597 static void
safe_reset_board(struct safe_softc * sc)1598 safe_reset_board(struct safe_softc *sc)
1599 {
1600 u_int32_t v;
1601 /*
1602 * Reset the device. The manual says no delay
1603 * is needed between marking and clearing reset.
1604 */
1605 v = READ_REG(sc, SAFE_PE_DMACFG) &~
1606 (SAFE_PE_DMACFG_PERESET | SAFE_PE_DMACFG_PDRRESET |
1607 SAFE_PE_DMACFG_SGRESET);
1608 WRITE_REG(sc, SAFE_PE_DMACFG, v
1609 | SAFE_PE_DMACFG_PERESET
1610 | SAFE_PE_DMACFG_PDRRESET
1611 | SAFE_PE_DMACFG_SGRESET);
1612 WRITE_REG(sc, SAFE_PE_DMACFG, v);
1613 }
1614
1615 /*
1616 * Initialize registers we need to touch only once.
1617 */
1618 static void
safe_init_board(struct safe_softc * sc)1619 safe_init_board(struct safe_softc *sc)
1620 {
1621 u_int32_t v, dwords;
1622
1623 v = READ_REG(sc, SAFE_PE_DMACFG);
1624 v &=~ SAFE_PE_DMACFG_PEMODE;
1625 v |= SAFE_PE_DMACFG_FSENA /* failsafe enable */
1626 | SAFE_PE_DMACFG_GPRPCI /* gather ring on PCI */
1627 | SAFE_PE_DMACFG_SPRPCI /* scatter ring on PCI */
1628 | SAFE_PE_DMACFG_ESDESC /* endian-swap descriptors */
1629 | SAFE_PE_DMACFG_ESSA /* endian-swap SA's */
1630 | SAFE_PE_DMACFG_ESPDESC /* endian-swap part. desc's */
1631 ;
1632 WRITE_REG(sc, SAFE_PE_DMACFG, v);
1633 #if 0
1634 /* XXX select byte swap based on host byte order */
1635 WRITE_REG(sc, SAFE_ENDIAN, 0x1b);
1636 #endif
1637 if (sc->sc_chiprev == SAFE_REV(1,0)) {
1638 /*
1639 * Avoid large PCI DMA transfers. Rev 1.0 has a bug where
1640 * "target mode transfers" done while the chip is DMA'ing
1641 * >1020 bytes cause the hardware to lockup. To avoid this
1642 * we reduce the max PCI transfer size and use small source
1643 * particle descriptors (<= 256 bytes).
1644 */
1645 WRITE_REG(sc, SAFE_DMA_CFG, 256);
1646 device_printf(sc->sc_dev,
1647 "Reduce max DMA size to %u words for rev %u.%u WAR\n",
1648 (READ_REG(sc, SAFE_DMA_CFG)>>2) & 0xff,
1649 SAFE_REV_MAJ(sc->sc_chiprev),
1650 SAFE_REV_MIN(sc->sc_chiprev));
1651 }
1652
1653 /* NB: operands+results are overlaid */
1654 WRITE_REG(sc, SAFE_PE_PDRBASE, sc->sc_ringalloc.dma_paddr);
1655 WRITE_REG(sc, SAFE_PE_RDRBASE, sc->sc_ringalloc.dma_paddr);
1656 /*
1657 * Configure ring entry size and number of items in the ring.
1658 */
1659 KASSERT((sizeof(struct safe_ringentry) % sizeof(u_int32_t)) == 0,
1660 ("PE ring entry not 32-bit aligned!"));
1661 dwords = sizeof(struct safe_ringentry) / sizeof(u_int32_t);
1662 WRITE_REG(sc, SAFE_PE_RINGCFG,
1663 (dwords << SAFE_PE_RINGCFG_OFFSET_S) | SAFE_MAX_NQUEUE);
1664 WRITE_REG(sc, SAFE_PE_RINGPOLL, 0); /* disable polling */
1665
1666 WRITE_REG(sc, SAFE_PE_GRNGBASE, sc->sc_spalloc.dma_paddr);
1667 WRITE_REG(sc, SAFE_PE_SRNGBASE, sc->sc_dpalloc.dma_paddr);
1668 WRITE_REG(sc, SAFE_PE_PARTSIZE,
1669 (SAFE_TOTAL_DPART<<16) | SAFE_TOTAL_SPART);
1670 /*
1671 * NB: destination particles are fixed size. We use
1672 * an mbuf cluster and require all results go to
1673 * clusters or smaller.
1674 */
1675 WRITE_REG(sc, SAFE_PE_PARTCFG, SAFE_MAX_DSIZE);
1676
1677 /* it's now safe to enable PE mode, do it */
1678 WRITE_REG(sc, SAFE_PE_DMACFG, v | SAFE_PE_DMACFG_PEMODE);
1679
1680 /*
1681 * Configure hardware to use level-triggered interrupts and
1682 * to interrupt after each descriptor is processed.
1683 */
1684 WRITE_REG(sc, SAFE_HI_CFG, SAFE_HI_CFG_LEVEL);
1685 WRITE_REG(sc, SAFE_HI_DESC_CNT, 1);
1686 WRITE_REG(sc, SAFE_HI_MASK, SAFE_INT_PE_DDONE | SAFE_INT_PE_ERROR);
1687 }
1688
1689 /*
1690 * Init PCI registers
1691 */
1692 static void
safe_init_pciregs(device_t dev)1693 safe_init_pciregs(device_t dev)
1694 {
1695 }
1696
1697 /*
1698 * Clean up after a chip crash.
1699 * It is assumed that the caller in splimp()
1700 */
1701 static void
safe_cleanchip(struct safe_softc * sc)1702 safe_cleanchip(struct safe_softc *sc)
1703 {
1704
1705 if (sc->sc_nqchip != 0) {
1706 struct safe_ringentry *re = sc->sc_back;
1707
1708 while (re != sc->sc_front) {
1709 if (re->re_desc.d_csr != 0)
1710 safe_free_entry(sc, re);
1711 if (++re == sc->sc_ringtop)
1712 re = sc->sc_ring;
1713 }
1714 sc->sc_back = re;
1715 sc->sc_nqchip = 0;
1716 }
1717 }
1718
1719 /*
1720 * free a safe_q
1721 * It is assumed that the caller is within splimp().
1722 */
1723 static int
safe_free_entry(struct safe_softc * sc,struct safe_ringentry * re)1724 safe_free_entry(struct safe_softc *sc, struct safe_ringentry *re)
1725 {
1726 struct cryptop *crp;
1727
1728 /*
1729 * Free header MCR
1730 */
1731 if (re->re_dst_m != NULL)
1732 m_freem(re->re_dst_m);
1733
1734 crp = (struct cryptop *)re->re_crp;
1735
1736 re->re_desc.d_csr = 0;
1737
1738 crp->crp_etype = EFAULT;
1739 crypto_done(crp);
1740 return(0);
1741 }
1742
1743 /*
1744 * Routine to reset the chip and clean up.
1745 * It is assumed that the caller is in splimp()
1746 */
1747 static void
safe_totalreset(struct safe_softc * sc)1748 safe_totalreset(struct safe_softc *sc)
1749 {
1750 safe_reset_board(sc);
1751 safe_init_board(sc);
1752 safe_cleanchip(sc);
1753 }
1754
1755 /*
1756 * Is the operand suitable aligned for direct DMA. Each
1757 * segment must be aligned on a 32-bit boundary and all
1758 * but the last segment must be a multiple of 4 bytes.
1759 */
1760 static int
safe_dmamap_aligned(const struct safe_operand * op)1761 safe_dmamap_aligned(const struct safe_operand *op)
1762 {
1763 int i;
1764
1765 for (i = 0; i < op->nsegs; i++) {
1766 if (op->segs[i].ds_addr & 3)
1767 return (0);
1768 if (i != (op->nsegs - 1) && (op->segs[i].ds_len & 3))
1769 return (0);
1770 }
1771 return (1);
1772 }
1773
1774 /*
1775 * Is the operand suitable for direct DMA as the destination
1776 * of an operation. The hardware requires that each ``particle''
1777 * but the last in an operation result have the same size. We
1778 * fix that size at SAFE_MAX_DSIZE bytes. This routine returns
1779 * 0 if some segment is not a multiple of this size, 1 if all
1780 * segments are exactly this size, or 2 if segments are at worst
1781 * a multiple of this size.
1782 */
1783 static int
safe_dmamap_uniform(const struct safe_operand * op)1784 safe_dmamap_uniform(const struct safe_operand *op)
1785 {
1786 int result = 1;
1787
1788 if (op->nsegs > 0) {
1789 int i;
1790
1791 for (i = 0; i < op->nsegs-1; i++) {
1792 if (op->segs[i].ds_len % SAFE_MAX_DSIZE)
1793 return (0);
1794 if (op->segs[i].ds_len != SAFE_MAX_DSIZE)
1795 result = 2;
1796 }
1797 }
1798 return (result);
1799 }
1800
1801 #ifdef SAFE_DEBUG
1802 static void
safe_dump_dmastatus(struct safe_softc * sc,const char * tag)1803 safe_dump_dmastatus(struct safe_softc *sc, const char *tag)
1804 {
1805 printf("%s: ENDIAN 0x%x SRC 0x%x DST 0x%x STAT 0x%x\n"
1806 , tag
1807 , READ_REG(sc, SAFE_DMA_ENDIAN)
1808 , READ_REG(sc, SAFE_DMA_SRCADDR)
1809 , READ_REG(sc, SAFE_DMA_DSTADDR)
1810 , READ_REG(sc, SAFE_DMA_STAT)
1811 );
1812 }
1813
1814 static void
safe_dump_intrstate(struct safe_softc * sc,const char * tag)1815 safe_dump_intrstate(struct safe_softc *sc, const char *tag)
1816 {
1817 printf("%s: HI_CFG 0x%x HI_MASK 0x%x HI_DESC_CNT 0x%x HU_STAT 0x%x HM_STAT 0x%x\n"
1818 , tag
1819 , READ_REG(sc, SAFE_HI_CFG)
1820 , READ_REG(sc, SAFE_HI_MASK)
1821 , READ_REG(sc, SAFE_HI_DESC_CNT)
1822 , READ_REG(sc, SAFE_HU_STAT)
1823 , READ_REG(sc, SAFE_HM_STAT)
1824 );
1825 }
1826
1827 static void
safe_dump_ringstate(struct safe_softc * sc,const char * tag)1828 safe_dump_ringstate(struct safe_softc *sc, const char *tag)
1829 {
1830 u_int32_t estat = READ_REG(sc, SAFE_PE_ERNGSTAT);
1831
1832 /* NB: assume caller has lock on ring */
1833 printf("%s: ERNGSTAT %x (next %u) back %lu front %lu\n",
1834 tag,
1835 estat, (estat >> SAFE_PE_ERNGSTAT_NEXT_S),
1836 (unsigned long)(sc->sc_back - sc->sc_ring),
1837 (unsigned long)(sc->sc_front - sc->sc_ring));
1838 }
1839
1840 static void
safe_dump_request(struct safe_softc * sc,const char * tag,struct safe_ringentry * re)1841 safe_dump_request(struct safe_softc *sc, const char* tag, struct safe_ringentry *re)
1842 {
1843 int ix, nsegs;
1844
1845 ix = re - sc->sc_ring;
1846 printf("%s: %p (%u): csr %x src %x dst %x sa %x len %x\n"
1847 , tag
1848 , re, ix
1849 , re->re_desc.d_csr
1850 , re->re_desc.d_src
1851 , re->re_desc.d_dst
1852 , re->re_desc.d_sa
1853 , re->re_desc.d_len
1854 );
1855 if (re->re_src.nsegs > 1) {
1856 ix = (re->re_desc.d_src - sc->sc_spalloc.dma_paddr) /
1857 sizeof(struct safe_pdesc);
1858 for (nsegs = re->re_src.nsegs; nsegs; nsegs--) {
1859 printf(" spd[%u] %p: %p size %u flags %x"
1860 , ix, &sc->sc_spring[ix]
1861 , (caddr_t)(uintptr_t) sc->sc_spring[ix].pd_addr
1862 , sc->sc_spring[ix].pd_size
1863 , sc->sc_spring[ix].pd_flags
1864 );
1865 if (sc->sc_spring[ix].pd_size == 0)
1866 printf(" (zero!)");
1867 printf("\n");
1868 if (++ix == SAFE_TOTAL_SPART)
1869 ix = 0;
1870 }
1871 }
1872 if (re->re_dst.nsegs > 1) {
1873 ix = (re->re_desc.d_dst - sc->sc_dpalloc.dma_paddr) /
1874 sizeof(struct safe_pdesc);
1875 for (nsegs = re->re_dst.nsegs; nsegs; nsegs--) {
1876 printf(" dpd[%u] %p: %p flags %x\n"
1877 , ix, &sc->sc_dpring[ix]
1878 , (caddr_t)(uintptr_t) sc->sc_dpring[ix].pd_addr
1879 , sc->sc_dpring[ix].pd_flags
1880 );
1881 if (++ix == SAFE_TOTAL_DPART)
1882 ix = 0;
1883 }
1884 }
1885 printf("sa: cmd0 %08x cmd1 %08x staterec %x\n",
1886 re->re_sa.sa_cmd0, re->re_sa.sa_cmd1, re->re_sa.sa_staterec);
1887 printf("sa: key %x %x %x %x %x %x %x %x\n"
1888 , re->re_sa.sa_key[0]
1889 , re->re_sa.sa_key[1]
1890 , re->re_sa.sa_key[2]
1891 , re->re_sa.sa_key[3]
1892 , re->re_sa.sa_key[4]
1893 , re->re_sa.sa_key[5]
1894 , re->re_sa.sa_key[6]
1895 , re->re_sa.sa_key[7]
1896 );
1897 printf("sa: indigest %x %x %x %x %x\n"
1898 , re->re_sa.sa_indigest[0]
1899 , re->re_sa.sa_indigest[1]
1900 , re->re_sa.sa_indigest[2]
1901 , re->re_sa.sa_indigest[3]
1902 , re->re_sa.sa_indigest[4]
1903 );
1904 printf("sa: outdigest %x %x %x %x %x\n"
1905 , re->re_sa.sa_outdigest[0]
1906 , re->re_sa.sa_outdigest[1]
1907 , re->re_sa.sa_outdigest[2]
1908 , re->re_sa.sa_outdigest[3]
1909 , re->re_sa.sa_outdigest[4]
1910 );
1911 printf("sr: iv %x %x %x %x\n"
1912 , re->re_sastate.sa_saved_iv[0]
1913 , re->re_sastate.sa_saved_iv[1]
1914 , re->re_sastate.sa_saved_iv[2]
1915 , re->re_sastate.sa_saved_iv[3]
1916 );
1917 printf("sr: hashbc %u indigest %x %x %x %x %x\n"
1918 , re->re_sastate.sa_saved_hashbc
1919 , re->re_sastate.sa_saved_indigest[0]
1920 , re->re_sastate.sa_saved_indigest[1]
1921 , re->re_sastate.sa_saved_indigest[2]
1922 , re->re_sastate.sa_saved_indigest[3]
1923 , re->re_sastate.sa_saved_indigest[4]
1924 );
1925 }
1926
1927 static void
safe_dump_ring(struct safe_softc * sc,const char * tag)1928 safe_dump_ring(struct safe_softc *sc, const char *tag)
1929 {
1930 mtx_lock(&sc->sc_ringmtx);
1931 printf("\nSafeNet Ring State:\n");
1932 safe_dump_intrstate(sc, tag);
1933 safe_dump_dmastatus(sc, tag);
1934 safe_dump_ringstate(sc, tag);
1935 if (sc->sc_nqchip) {
1936 struct safe_ringentry *re = sc->sc_back;
1937 do {
1938 safe_dump_request(sc, tag, re);
1939 if (++re == sc->sc_ringtop)
1940 re = sc->sc_ring;
1941 } while (re != sc->sc_front);
1942 }
1943 mtx_unlock(&sc->sc_ringmtx);
1944 }
1945
1946 static int
sysctl_hw_safe_dump(SYSCTL_HANDLER_ARGS)1947 sysctl_hw_safe_dump(SYSCTL_HANDLER_ARGS)
1948 {
1949 char dmode[64];
1950 int error;
1951
1952 strncpy(dmode, "", sizeof(dmode) - 1);
1953 dmode[sizeof(dmode) - 1] = '\0';
1954 error = sysctl_handle_string(oidp, &dmode[0], sizeof(dmode), req);
1955
1956 if (error == 0 && req->newptr != NULL) {
1957 struct safe_softc *sc = safec;
1958
1959 if (!sc)
1960 return EINVAL;
1961 if (strncmp(dmode, "dma", 3) == 0)
1962 safe_dump_dmastatus(sc, "safe0");
1963 else if (strncmp(dmode, "int", 3) == 0)
1964 safe_dump_intrstate(sc, "safe0");
1965 else if (strncmp(dmode, "ring", 4) == 0)
1966 safe_dump_ring(sc, "safe0");
1967 else
1968 return EINVAL;
1969 }
1970 return error;
1971 }
1972 SYSCTL_PROC(_hw_safe, OID_AUTO, dump,
1973 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 0, 0,
1974 sysctl_hw_safe_dump, "A",
1975 "Dump driver state");
1976 #endif /* SAFE_DEBUG */
1977