1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2017 Chelsio Communications, Inc. 5 * Copyright (c) 2017 Conrad Meyer <cem@FreeBSD.org> 6 * All rights reserved. 7 * Largely borrowed from ccr(4), Written by: John Baldwin <jhb@FreeBSD.org> 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include "opt_ddb.h" 35 36 #include <sys/param.h> 37 #include <sys/bus.h> 38 #include <sys/lock.h> 39 #include <sys/kernel.h> 40 #include <sys/malloc.h> 41 #include <sys/mutex.h> 42 #include <sys/module.h> 43 #include <sys/random.h> 44 #include <sys/sglist.h> 45 #include <sys/sysctl.h> 46 47 #ifdef DDB 48 #include <ddb/ddb.h> 49 #endif 50 51 #include <dev/pci/pcivar.h> 52 53 #include <dev/random/randomdev.h> 54 55 #include <opencrypto/cryptodev.h> 56 #include <opencrypto/xform.h> 57 58 #include "cryptodev_if.h" 59 60 #include "ccp.h" 61 #include "ccp_hardware.h" 62 63 MALLOC_DEFINE(M_CCP, "ccp", "AMD CCP crypto"); 64 65 /* 66 * Need a global softc available for garbage random_source API, which lacks any 67 * context pointer. It's also handy for debugging. 68 */ 69 struct ccp_softc *g_ccp_softc; 70 71 bool g_debug_print = false; 72 SYSCTL_BOOL(_hw_ccp, OID_AUTO, debug, CTLFLAG_RWTUN, &g_debug_print, 0, 73 "Set to enable debugging log messages"); 74 75 static struct pciid { 76 uint32_t devid; 77 const char *desc; 78 } ccp_ids[] = { 79 { 0x14561022, "AMD CCP-5a" }, 80 { 0x14681022, "AMD CCP-5b" }, 81 { 0x15df1022, "AMD CCP-5a" }, 82 }; 83 84 static struct random_source random_ccp = { 85 .rs_ident = "AMD CCP TRNG", 86 .rs_source = RANDOM_PURE_CCP, 87 .rs_read = random_ccp_read, 88 }; 89 90 /* 91 * ccp_populate_sglist() generates a scatter/gather list that covers the entire 92 * crypto operation buffer. 93 */ 94 static int 95 ccp_populate_sglist(struct sglist *sg, struct crypto_buffer *cb) 96 { 97 int error; 98 99 sglist_reset(sg); 100 switch (cb->cb_type) { 101 case CRYPTO_BUF_MBUF: 102 error = sglist_append_mbuf(sg, cb->cb_mbuf); 103 break; 104 case CRYPTO_BUF_UIO: 105 error = sglist_append_uio(sg, cb->cb_uio); 106 break; 107 case CRYPTO_BUF_CONTIG: 108 error = sglist_append(sg, cb->cb_buf, cb->cb_buf_len); 109 break; 110 default: 111 error = EINVAL; 112 } 113 return (error); 114 } 115 116 static int 117 ccp_probe(device_t dev) 118 { 119 struct pciid *ip; 120 uint32_t id; 121 122 id = pci_get_devid(dev); 123 for (ip = ccp_ids; ip < &ccp_ids[nitems(ccp_ids)]; ip++) { 124 if (id == ip->devid) { 125 device_set_desc(dev, ip->desc); 126 return (0); 127 } 128 } 129 return (ENXIO); 130 } 131 132 static void 133 ccp_initialize_queues(struct ccp_softc *sc) 134 { 135 struct ccp_queue *qp; 136 size_t i; 137 138 for (i = 0; i < nitems(sc->queues); i++) { 139 qp = &sc->queues[i]; 140 141 qp->cq_softc = sc; 142 qp->cq_qindex = i; 143 mtx_init(&qp->cq_lock, "ccp queue", NULL, MTX_DEF); 144 /* XXX - arbitrarily chosen sizes */ 145 qp->cq_sg_crp = sglist_alloc(32, M_WAITOK); 146 /* Two more SGEs than sg_crp to accommodate ipad. */ 147 qp->cq_sg_ulptx = sglist_alloc(34, M_WAITOK); 148 qp->cq_sg_dst = sglist_alloc(2, M_WAITOK); 149 } 150 } 151 152 static void 153 ccp_free_queues(struct ccp_softc *sc) 154 { 155 struct ccp_queue *qp; 156 size_t i; 157 158 for (i = 0; i < nitems(sc->queues); i++) { 159 qp = &sc->queues[i]; 160 161 mtx_destroy(&qp->cq_lock); 162 sglist_free(qp->cq_sg_crp); 163 sglist_free(qp->cq_sg_ulptx); 164 sglist_free(qp->cq_sg_dst); 165 } 166 } 167 168 static int 169 ccp_attach(device_t dev) 170 { 171 struct ccp_softc *sc; 172 int error; 173 174 sc = device_get_softc(dev); 175 sc->dev = dev; 176 177 sc->cid = crypto_get_driverid(dev, sizeof(struct ccp_session), 178 CRYPTOCAP_F_HARDWARE); 179 if (sc->cid < 0) { 180 device_printf(dev, "could not get crypto driver id\n"); 181 return (ENXIO); 182 } 183 184 error = ccp_hw_attach(dev); 185 if (error != 0) 186 return (error); 187 188 mtx_init(&sc->lock, "ccp", NULL, MTX_DEF); 189 190 ccp_initialize_queues(sc); 191 192 if (g_ccp_softc == NULL) { 193 g_ccp_softc = sc; 194 if ((sc->hw_features & VERSION_CAP_TRNG) != 0) 195 random_source_register(&random_ccp); 196 } 197 198 return (0); 199 } 200 201 static int 202 ccp_detach(device_t dev) 203 { 204 struct ccp_softc *sc; 205 206 sc = device_get_softc(dev); 207 208 mtx_lock(&sc->lock); 209 sc->detaching = true; 210 mtx_unlock(&sc->lock); 211 212 crypto_unregister_all(sc->cid); 213 if (g_ccp_softc == sc && (sc->hw_features & VERSION_CAP_TRNG) != 0) 214 random_source_deregister(&random_ccp); 215 216 ccp_hw_detach(dev); 217 ccp_free_queues(sc); 218 219 if (g_ccp_softc == sc) 220 g_ccp_softc = NULL; 221 222 mtx_destroy(&sc->lock); 223 return (0); 224 } 225 226 static void 227 ccp_init_hmac_digest(struct ccp_session *s, const char *key, int klen) 228 { 229 union authctx auth_ctx; 230 struct auth_hash *axf; 231 u_int i; 232 233 /* 234 * If the key is larger than the block size, use the digest of 235 * the key as the key instead. 236 */ 237 axf = s->hmac.auth_hash; 238 if (klen > axf->blocksize) { 239 axf->Init(&auth_ctx); 240 axf->Update(&auth_ctx, key, klen); 241 axf->Final(s->hmac.ipad, &auth_ctx); 242 explicit_bzero(&auth_ctx, sizeof(auth_ctx)); 243 klen = axf->hashsize; 244 } else 245 memcpy(s->hmac.ipad, key, klen); 246 247 memset(s->hmac.ipad + klen, 0, axf->blocksize - klen); 248 memcpy(s->hmac.opad, s->hmac.ipad, axf->blocksize); 249 250 for (i = 0; i < axf->blocksize; i++) { 251 s->hmac.ipad[i] ^= HMAC_IPAD_VAL; 252 s->hmac.opad[i] ^= HMAC_OPAD_VAL; 253 } 254 } 255 256 static bool 257 ccp_aes_check_keylen(int alg, int klen) 258 { 259 260 switch (klen * 8) { 261 case 128: 262 case 192: 263 if (alg == CRYPTO_AES_XTS) 264 return (false); 265 break; 266 case 256: 267 break; 268 case 512: 269 if (alg != CRYPTO_AES_XTS) 270 return (false); 271 break; 272 default: 273 return (false); 274 } 275 return (true); 276 } 277 278 static void 279 ccp_aes_setkey(struct ccp_session *s, int alg, const void *key, int klen) 280 { 281 unsigned kbits; 282 283 if (alg == CRYPTO_AES_XTS) 284 kbits = (klen / 2) * 8; 285 else 286 kbits = klen * 8; 287 288 switch (kbits) { 289 case 128: 290 s->blkcipher.cipher_type = CCP_AES_TYPE_128; 291 break; 292 case 192: 293 s->blkcipher.cipher_type = CCP_AES_TYPE_192; 294 break; 295 case 256: 296 s->blkcipher.cipher_type = CCP_AES_TYPE_256; 297 break; 298 default: 299 panic("should not get here"); 300 } 301 302 s->blkcipher.key_len = klen; 303 memcpy(s->blkcipher.enckey, key, s->blkcipher.key_len); 304 } 305 306 static bool 307 ccp_auth_supported(struct ccp_softc *sc, 308 const struct crypto_session_params *csp) 309 { 310 311 if ((sc->hw_features & VERSION_CAP_SHA) == 0) 312 return (false); 313 switch (csp->csp_auth_alg) { 314 case CRYPTO_SHA1_HMAC: 315 case CRYPTO_SHA2_256_HMAC: 316 case CRYPTO_SHA2_384_HMAC: 317 case CRYPTO_SHA2_512_HMAC: 318 if (csp->csp_auth_key == NULL) 319 return (false); 320 break; 321 default: 322 return (false); 323 } 324 return (true); 325 } 326 327 static bool 328 ccp_cipher_supported(struct ccp_softc *sc, 329 const struct crypto_session_params *csp) 330 { 331 332 if ((sc->hw_features & VERSION_CAP_AES) == 0) 333 return (false); 334 switch (csp->csp_cipher_alg) { 335 case CRYPTO_AES_CBC: 336 if (csp->csp_ivlen != AES_BLOCK_LEN) 337 return (false); 338 break; 339 case CRYPTO_AES_ICM: 340 if (csp->csp_ivlen != AES_BLOCK_LEN) 341 return (false); 342 break; 343 case CRYPTO_AES_XTS: 344 if (csp->csp_ivlen != AES_XTS_IV_LEN) 345 return (false); 346 break; 347 default: 348 return (false); 349 } 350 return (ccp_aes_check_keylen(csp->csp_cipher_alg, 351 csp->csp_cipher_klen)); 352 } 353 354 static int 355 ccp_probesession(device_t dev, const struct crypto_session_params *csp) 356 { 357 struct ccp_softc *sc; 358 359 if (csp->csp_flags != 0) 360 return (EINVAL); 361 sc = device_get_softc(dev); 362 switch (csp->csp_mode) { 363 case CSP_MODE_DIGEST: 364 if (!ccp_auth_supported(sc, csp)) 365 return (EINVAL); 366 break; 367 case CSP_MODE_CIPHER: 368 if (!ccp_cipher_supported(sc, csp)) 369 return (EINVAL); 370 break; 371 case CSP_MODE_AEAD: 372 switch (csp->csp_cipher_alg) { 373 case CRYPTO_AES_NIST_GCM_16: 374 if (csp->csp_ivlen != AES_GCM_IV_LEN) 375 return (EINVAL); 376 if (csp->csp_auth_mlen < 0 || 377 csp->csp_auth_mlen > AES_GMAC_HASH_LEN) 378 return (EINVAL); 379 if ((sc->hw_features & VERSION_CAP_AES) == 0) 380 return (EINVAL); 381 break; 382 default: 383 return (EINVAL); 384 } 385 break; 386 case CSP_MODE_ETA: 387 if (!ccp_auth_supported(sc, csp) || 388 !ccp_cipher_supported(sc, csp)) 389 return (EINVAL); 390 break; 391 default: 392 return (EINVAL); 393 } 394 395 return (CRYPTODEV_PROBE_HARDWARE); 396 } 397 398 static int 399 ccp_newsession(device_t dev, crypto_session_t cses, 400 const struct crypto_session_params *csp) 401 { 402 struct ccp_softc *sc; 403 struct ccp_session *s; 404 struct auth_hash *auth_hash; 405 enum ccp_aes_mode cipher_mode; 406 unsigned auth_mode; 407 unsigned q; 408 409 /* XXX reconcile auth_mode with use by ccp_sha */ 410 switch (csp->csp_auth_alg) { 411 case CRYPTO_SHA1_HMAC: 412 auth_hash = &auth_hash_hmac_sha1; 413 auth_mode = SHA1; 414 break; 415 case CRYPTO_SHA2_256_HMAC: 416 auth_hash = &auth_hash_hmac_sha2_256; 417 auth_mode = SHA2_256; 418 break; 419 case CRYPTO_SHA2_384_HMAC: 420 auth_hash = &auth_hash_hmac_sha2_384; 421 auth_mode = SHA2_384; 422 break; 423 case CRYPTO_SHA2_512_HMAC: 424 auth_hash = &auth_hash_hmac_sha2_512; 425 auth_mode = SHA2_512; 426 break; 427 default: 428 auth_hash = NULL; 429 auth_mode = 0; 430 break; 431 } 432 433 switch (csp->csp_cipher_alg) { 434 case CRYPTO_AES_CBC: 435 cipher_mode = CCP_AES_MODE_CBC; 436 break; 437 case CRYPTO_AES_ICM: 438 cipher_mode = CCP_AES_MODE_CTR; 439 break; 440 case CRYPTO_AES_NIST_GCM_16: 441 cipher_mode = CCP_AES_MODE_GCTR; 442 break; 443 case CRYPTO_AES_XTS: 444 cipher_mode = CCP_AES_MODE_XTS; 445 break; 446 default: 447 cipher_mode = CCP_AES_MODE_ECB; 448 break; 449 } 450 451 sc = device_get_softc(dev); 452 mtx_lock(&sc->lock); 453 if (sc->detaching) { 454 mtx_unlock(&sc->lock); 455 return (ENXIO); 456 } 457 458 s = crypto_get_driver_session(cses); 459 460 /* Just grab the first usable queue for now. */ 461 for (q = 0; q < nitems(sc->queues); q++) 462 if ((sc->valid_queues & (1 << q)) != 0) 463 break; 464 if (q == nitems(sc->queues)) { 465 mtx_unlock(&sc->lock); 466 return (ENXIO); 467 } 468 s->queue = q; 469 470 switch (csp->csp_mode) { 471 case CSP_MODE_AEAD: 472 s->mode = GCM; 473 break; 474 case CSP_MODE_ETA: 475 s->mode = AUTHENC; 476 break; 477 case CSP_MODE_DIGEST: 478 s->mode = HMAC; 479 break; 480 case CSP_MODE_CIPHER: 481 s->mode = BLKCIPHER; 482 break; 483 } 484 485 if (s->mode == GCM) { 486 if (csp->csp_auth_mlen == 0) 487 s->gmac.hash_len = AES_GMAC_HASH_LEN; 488 else 489 s->gmac.hash_len = csp->csp_auth_mlen; 490 } else if (auth_hash != NULL) { 491 s->hmac.auth_hash = auth_hash; 492 s->hmac.auth_mode = auth_mode; 493 if (csp->csp_auth_mlen == 0) 494 s->hmac.hash_len = auth_hash->hashsize; 495 else 496 s->hmac.hash_len = csp->csp_auth_mlen; 497 ccp_init_hmac_digest(s, csp->csp_auth_key, csp->csp_auth_klen); 498 } 499 if (cipher_mode != CCP_AES_MODE_ECB) { 500 s->blkcipher.cipher_mode = cipher_mode; 501 if (csp->csp_cipher_key != NULL) 502 ccp_aes_setkey(s, csp->csp_cipher_alg, 503 csp->csp_cipher_key, csp->csp_cipher_klen); 504 } 505 506 s->active = true; 507 mtx_unlock(&sc->lock); 508 509 return (0); 510 } 511 512 static void 513 ccp_freesession(device_t dev, crypto_session_t cses) 514 { 515 struct ccp_session *s; 516 517 s = crypto_get_driver_session(cses); 518 519 if (s->pending != 0) 520 device_printf(dev, 521 "session %p freed with %d pending requests\n", s, 522 s->pending); 523 s->active = false; 524 } 525 526 static int 527 ccp_process(device_t dev, struct cryptop *crp, int hint) 528 { 529 const struct crypto_session_params *csp; 530 struct ccp_softc *sc; 531 struct ccp_queue *qp; 532 struct ccp_session *s; 533 int error; 534 bool qpheld; 535 536 qpheld = false; 537 qp = NULL; 538 539 csp = crypto_get_params(crp->crp_session); 540 s = crypto_get_driver_session(crp->crp_session); 541 sc = device_get_softc(dev); 542 mtx_lock(&sc->lock); 543 qp = &sc->queues[s->queue]; 544 mtx_unlock(&sc->lock); 545 error = ccp_queue_acquire_reserve(qp, 1 /* placeholder */, M_NOWAIT); 546 if (error != 0) 547 goto out; 548 qpheld = true; 549 550 error = ccp_populate_sglist(qp->cq_sg_crp, &crp->crp_buf); 551 if (error != 0) 552 goto out; 553 554 if (crp->crp_auth_key != NULL) { 555 KASSERT(s->hmac.auth_hash != NULL, ("auth key without HMAC")); 556 ccp_init_hmac_digest(s, crp->crp_auth_key, csp->csp_auth_klen); 557 } 558 if (crp->crp_cipher_key != NULL) 559 ccp_aes_setkey(s, csp->csp_cipher_alg, crp->crp_cipher_key, 560 csp->csp_cipher_klen); 561 562 switch (s->mode) { 563 case HMAC: 564 if (s->pending != 0) { 565 error = EAGAIN; 566 break; 567 } 568 error = ccp_hmac(qp, s, crp); 569 break; 570 case BLKCIPHER: 571 if (s->pending != 0) { 572 error = EAGAIN; 573 break; 574 } 575 error = ccp_blkcipher(qp, s, crp); 576 break; 577 case AUTHENC: 578 if (s->pending != 0) { 579 error = EAGAIN; 580 break; 581 } 582 error = ccp_authenc(qp, s, crp); 583 break; 584 case GCM: 585 if (s->pending != 0) { 586 error = EAGAIN; 587 break; 588 } 589 error = ccp_gcm(qp, s, crp); 590 break; 591 } 592 593 if (error == 0) 594 s->pending++; 595 596 out: 597 if (qpheld) { 598 if (error != 0) { 599 /* 600 * Squash EAGAIN so callers don't uselessly and 601 * expensively retry if the ring was full. 602 */ 603 if (error == EAGAIN) 604 error = ENOMEM; 605 ccp_queue_abort(qp); 606 } else 607 ccp_queue_release(qp); 608 } 609 610 if (error != 0) { 611 DPRINTF(dev, "%s: early error:%d\n", __func__, error); 612 crp->crp_etype = error; 613 crypto_done(crp); 614 } 615 return (0); 616 } 617 618 static device_method_t ccp_methods[] = { 619 DEVMETHOD(device_probe, ccp_probe), 620 DEVMETHOD(device_attach, ccp_attach), 621 DEVMETHOD(device_detach, ccp_detach), 622 623 DEVMETHOD(cryptodev_probesession, ccp_probesession), 624 DEVMETHOD(cryptodev_newsession, ccp_newsession), 625 DEVMETHOD(cryptodev_freesession, ccp_freesession), 626 DEVMETHOD(cryptodev_process, ccp_process), 627 628 DEVMETHOD_END 629 }; 630 631 static driver_t ccp_driver = { 632 "ccp", 633 ccp_methods, 634 sizeof(struct ccp_softc) 635 }; 636 637 static devclass_t ccp_devclass; 638 DRIVER_MODULE(ccp, pci, ccp_driver, ccp_devclass, NULL, NULL); 639 MODULE_VERSION(ccp, 1); 640 MODULE_DEPEND(ccp, crypto, 1, 1, 1); 641 MODULE_DEPEND(ccp, random_device, 1, 1, 1); 642 #if 0 /* There are enough known issues that we shouldn't load automatically */ 643 MODULE_PNP_INFO("W32:vendor/device", pci, ccp, ccp_ids, 644 nitems(ccp_ids)); 645 #endif 646 647 static int 648 ccp_queue_reserve_space(struct ccp_queue *qp, unsigned n, int mflags) 649 { 650 struct ccp_softc *sc; 651 652 mtx_assert(&qp->cq_lock, MA_OWNED); 653 sc = qp->cq_softc; 654 655 if (n < 1 || n >= (1 << sc->ring_size_order)) 656 return (EINVAL); 657 658 while (true) { 659 if (ccp_queue_get_ring_space(qp) >= n) 660 return (0); 661 if ((mflags & M_WAITOK) == 0) 662 return (EAGAIN); 663 qp->cq_waiting = true; 664 msleep(&qp->cq_tail, &qp->cq_lock, 0, "ccpqfull", 0); 665 } 666 } 667 668 int 669 ccp_queue_acquire_reserve(struct ccp_queue *qp, unsigned n, int mflags) 670 { 671 int error; 672 673 mtx_lock(&qp->cq_lock); 674 qp->cq_acq_tail = qp->cq_tail; 675 error = ccp_queue_reserve_space(qp, n, mflags); 676 if (error != 0) 677 mtx_unlock(&qp->cq_lock); 678 return (error); 679 } 680 681 void 682 ccp_queue_release(struct ccp_queue *qp) 683 { 684 685 mtx_assert(&qp->cq_lock, MA_OWNED); 686 if (qp->cq_tail != qp->cq_acq_tail) { 687 wmb(); 688 ccp_queue_write_tail(qp); 689 } 690 mtx_unlock(&qp->cq_lock); 691 } 692 693 void 694 ccp_queue_abort(struct ccp_queue *qp) 695 { 696 unsigned i; 697 698 mtx_assert(&qp->cq_lock, MA_OWNED); 699 700 /* Wipe out any descriptors associated with this aborted txn. */ 701 for (i = qp->cq_acq_tail; i != qp->cq_tail; 702 i = (i + 1) % (1 << qp->cq_softc->ring_size_order)) { 703 memset(&qp->desc_ring[i], 0, sizeof(qp->desc_ring[i])); 704 } 705 qp->cq_tail = qp->cq_acq_tail; 706 707 mtx_unlock(&qp->cq_lock); 708 } 709 710 #ifdef DDB 711 #define _db_show_lock(lo) LOCK_CLASS(lo)->lc_ddb_show(lo) 712 #define db_show_lock(lk) _db_show_lock(&(lk)->lock_object) 713 static void 714 db_show_ccp_sc(struct ccp_softc *sc) 715 { 716 717 db_printf("ccp softc at %p\n", sc); 718 db_printf(" cid: %d\n", (int)sc->cid); 719 720 db_printf(" lock: "); 721 db_show_lock(&sc->lock); 722 723 db_printf(" detaching: %d\n", (int)sc->detaching); 724 db_printf(" ring_size_order: %u\n", sc->ring_size_order); 725 726 db_printf(" hw_version: %d\n", (int)sc->hw_version); 727 db_printf(" hw_features: %b\n", (int)sc->hw_features, 728 "\20\24ELFC\23TRNG\22Zip_Compress\16Zip_Decompress\13ECC\12RSA" 729 "\11SHA\0103DES\07AES"); 730 731 db_printf(" hw status:\n"); 732 db_ccp_show_hw(sc); 733 } 734 735 static void 736 db_show_ccp_qp(struct ccp_queue *qp) 737 { 738 739 db_printf(" lock: "); 740 db_show_lock(&qp->cq_lock); 741 742 db_printf(" cq_qindex: %u\n", qp->cq_qindex); 743 db_printf(" cq_softc: %p\n", qp->cq_softc); 744 745 db_printf(" head: %u\n", qp->cq_head); 746 db_printf(" tail: %u\n", qp->cq_tail); 747 db_printf(" acq_tail: %u\n", qp->cq_acq_tail); 748 db_printf(" desc_ring: %p\n", qp->desc_ring); 749 db_printf(" completions_ring: %p\n", qp->completions_ring); 750 db_printf(" descriptors (phys): 0x%jx\n", 751 (uintmax_t)qp->desc_ring_bus_addr); 752 753 db_printf(" hw status:\n"); 754 db_ccp_show_queue_hw(qp); 755 } 756 757 DB_SHOW_COMMAND(ccp, db_show_ccp) 758 { 759 struct ccp_softc *sc; 760 unsigned unit, qindex; 761 762 if (!have_addr) 763 goto usage; 764 765 unit = (unsigned)addr; 766 767 sc = devclass_get_softc(ccp_devclass, unit); 768 if (sc == NULL) { 769 db_printf("No such device ccp%u\n", unit); 770 goto usage; 771 } 772 773 if (count == -1) { 774 db_show_ccp_sc(sc); 775 return; 776 } 777 778 qindex = (unsigned)count; 779 if (qindex >= nitems(sc->queues)) { 780 db_printf("No such queue %u\n", qindex); 781 goto usage; 782 } 783 db_show_ccp_qp(&sc->queues[qindex]); 784 return; 785 786 usage: 787 db_printf("usage: show ccp <unit>[,<qindex>]\n"); 788 return; 789 } 790 #endif /* DDB */ 791