1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2005-2011 Pawel Jakub Dawidek <pawel@dawidek.net> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/kernel.h> 35 #include <sys/linker.h> 36 #include <sys/module.h> 37 #include <sys/lock.h> 38 #include <sys/mutex.h> 39 #include <sys/bio.h> 40 #include <sys/sysctl.h> 41 #include <sys/malloc.h> 42 #include <sys/kthread.h> 43 #include <sys/proc.h> 44 #include <sys/sched.h> 45 #include <sys/smp.h> 46 #include <sys/vnode.h> 47 48 #include <vm/uma.h> 49 50 #include <geom/geom.h> 51 #include <geom/eli/g_eli.h> 52 #include <geom/eli/pkcs5v2.h> 53 54 /* 55 * The data layout description when integrity verification is configured. 56 * 57 * One of the most important assumption here is that authenticated data and its 58 * HMAC has to be stored in the same place (namely in the same sector) to make 59 * it work reliable. 60 * The problem is that file systems work only with sectors that are multiple of 61 * 512 bytes and a power of two number. 62 * My idea to implement it is as follows. 63 * Let's store HMAC in sector. This is a must. This leaves us 480 bytes for 64 * data. We can't use that directly (ie. we can't create provider with 480 bytes 65 * sector size). We need another sector from where we take only 32 bytes of data 66 * and we store HMAC of this data as well. This takes two sectors from the 67 * original provider at the input and leaves us one sector of authenticated data 68 * at the output. Not very efficient, but you got the idea. 69 * Now, let's assume, we want to create provider with 4096 bytes sector. 70 * To output 4096 bytes of authenticated data we need 8x480 plus 1x256, so we 71 * need nine 512-bytes sectors at the input to get one 4096-bytes sector at the 72 * output. That's better. With 4096 bytes sector we can use 89% of size of the 73 * original provider. I find it as an acceptable cost. 74 * The reliability comes from the fact, that every HMAC stored inside the sector 75 * is calculated only for the data in the same sector, so its impossible to 76 * write new data and leave old HMAC or vice versa. 77 * 78 * And here is the picture: 79 * 80 * da0: +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+-----+ 81 * |32b |480b| |32b |480b| |32b |480b| |32b |480b| |32b |480b| |32b |480b| |32b |480b| |32b |480b| |32b |256b | 82 * |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data | 83 * +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+-----+ 84 * |512 bytes| |512 bytes| |512 bytes| |512 bytes| |512 bytes| |512 bytes| |512 bytes| |512 bytes| |288 bytes | 85 * +---------+ +---------+ +---------+ +---------+ +---------+ +---------+ +---------+ +---------+ |224 unused| 86 * +----------+ 87 * da0.eli: +----+----+----+----+----+----+----+----+----+ 88 * |480b|480b|480b|480b|480b|480b|480b|480b|256b| 89 * +----+----+----+----+----+----+----+----+----+ 90 * | 4096 bytes | 91 * +--------------------------------------------+ 92 * 93 * PS. You can use any sector size with geli(8). My example is using 4kB, 94 * because it's most efficient. For 8kB sectors you need 2 extra sectors, 95 * so the cost is the same as for 4kB sectors. 96 */ 97 98 /* 99 * Code paths: 100 * BIO_READ: 101 * g_eli_start -> g_eli_auth_read -> g_io_request -> g_eli_read_done -> g_eli_auth_run -> g_eli_auth_read_done -> g_io_deliver 102 * BIO_WRITE: 103 * g_eli_start -> g_eli_auth_run -> g_eli_auth_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver 104 */ 105 106 MALLOC_DECLARE(M_ELI); 107 108 /* 109 * Here we generate key for HMAC. Every sector has its own HMAC key, so it is 110 * not possible to copy sectors. 111 * We cannot depend on fact, that every sector has its own IV, because different 112 * IV doesn't change HMAC, when we use encrypt-then-authenticate method. 113 */ 114 static void 115 g_eli_auth_keygen(struct g_eli_softc *sc, off_t offset, u_char *key) 116 { 117 SHA256_CTX ctx; 118 119 /* Copy precalculated SHA256 context. */ 120 bcopy(&sc->sc_akeyctx, &ctx, sizeof(ctx)); 121 SHA256_Update(&ctx, (uint8_t *)&offset, sizeof(offset)); 122 SHA256_Final(key, &ctx); 123 } 124 125 /* 126 * The function is called after we read and decrypt data. 127 * 128 * g_eli_start -> g_eli_auth_read -> g_io_request -> g_eli_read_done -> g_eli_auth_run -> G_ELI_AUTH_READ_DONE -> g_io_deliver 129 */ 130 static int 131 g_eli_auth_read_done(struct cryptop *crp) 132 { 133 struct g_eli_softc *sc; 134 struct bio *bp; 135 136 if (crp->crp_etype == EAGAIN) { 137 if (g_eli_crypto_rerun(crp) == 0) 138 return (0); 139 } 140 bp = (struct bio *)crp->crp_opaque; 141 bp->bio_inbed++; 142 if (crp->crp_etype == 0) { 143 bp->bio_completed += crp->crp_olen; 144 G_ELI_DEBUG(3, "Crypto READ request done (%d/%d) (add=%jd completed=%jd).", 145 bp->bio_inbed, bp->bio_children, (intmax_t)crp->crp_olen, (intmax_t)bp->bio_completed); 146 } else { 147 G_ELI_DEBUG(1, "Crypto READ request failed (%d/%d) error=%d.", 148 bp->bio_inbed, bp->bio_children, crp->crp_etype); 149 if (bp->bio_error == 0) 150 bp->bio_error = crp->crp_etype; 151 } 152 sc = bp->bio_to->geom->softc; 153 g_eli_key_drop(sc, crp->crp_desc->crd_next->crd_key); 154 /* 155 * Do we have all sectors already? 156 */ 157 if (bp->bio_inbed < bp->bio_children) 158 return (0); 159 if (bp->bio_error == 0) { 160 u_int i, lsec, nsec, data_secsize, decr_secsize, encr_secsize; 161 u_char *srcdata, *dstdata, *auth; 162 off_t coroff, corsize; 163 164 /* 165 * Verify data integrity based on calculated and read HMACs. 166 */ 167 /* Sectorsize of decrypted provider eg. 4096. */ 168 decr_secsize = bp->bio_to->sectorsize; 169 /* The real sectorsize of encrypted provider, eg. 512. */ 170 encr_secsize = LIST_FIRST(&sc->sc_geom->consumer)->provider->sectorsize; 171 /* Number of data bytes in one encrypted sector, eg. 480. */ 172 data_secsize = sc->sc_data_per_sector; 173 /* Number of sectors from decrypted provider, eg. 2. */ 174 nsec = bp->bio_length / decr_secsize; 175 /* Number of sectors from encrypted provider, eg. 18. */ 176 nsec = (nsec * sc->sc_bytes_per_sector) / encr_secsize; 177 /* Last sector number in every big sector, eg. 9. */ 178 lsec = sc->sc_bytes_per_sector / encr_secsize; 179 180 srcdata = bp->bio_driver2; 181 dstdata = bp->bio_data; 182 auth = srcdata + encr_secsize * nsec; 183 coroff = -1; 184 corsize = 0; 185 186 for (i = 1; i <= nsec; i++) { 187 data_secsize = sc->sc_data_per_sector; 188 if ((i % lsec) == 0) 189 data_secsize = decr_secsize % data_secsize; 190 if (bcmp(srcdata, auth, sc->sc_alen) != 0) { 191 /* 192 * Curruption detected, remember the offset if 193 * this is the first corrupted sector and 194 * increase size. 195 */ 196 if (bp->bio_error == 0) 197 bp->bio_error = -1; 198 if (coroff == -1) { 199 coroff = bp->bio_offset + 200 (dstdata - (u_char *)bp->bio_data); 201 } 202 corsize += data_secsize; 203 } else { 204 /* 205 * No curruption, good. 206 * Report previous corruption if there was one. 207 */ 208 if (coroff != -1) { 209 G_ELI_DEBUG(0, "%s: Failed to authenticate %jd " 210 "bytes of data at offset %jd.", 211 sc->sc_name, (intmax_t)corsize, 212 (intmax_t)coroff); 213 coroff = -1; 214 corsize = 0; 215 } 216 bcopy(srcdata + sc->sc_alen, dstdata, 217 data_secsize); 218 } 219 srcdata += encr_secsize; 220 dstdata += data_secsize; 221 auth += sc->sc_alen; 222 } 223 /* Report previous corruption if there was one. */ 224 if (coroff != -1) { 225 G_ELI_DEBUG(0, "%s: Failed to authenticate %jd " 226 "bytes of data at offset %jd.", 227 sc->sc_name, (intmax_t)corsize, (intmax_t)coroff); 228 } 229 } 230 free(bp->bio_driver2, M_ELI); 231 bp->bio_driver2 = NULL; 232 if (bp->bio_error != 0) { 233 if (bp->bio_error == -1) 234 bp->bio_error = EINVAL; 235 else { 236 G_ELI_LOGREQ(0, bp, 237 "Crypto READ request failed (error=%d).", 238 bp->bio_error); 239 } 240 bp->bio_completed = 0; 241 } 242 /* 243 * Read is finished, send it up. 244 */ 245 g_io_deliver(bp, bp->bio_error); 246 atomic_subtract_int(&sc->sc_inflight, 1); 247 return (0); 248 } 249 250 /* 251 * The function is called after data encryption. 252 * 253 * g_eli_start -> g_eli_auth_run -> G_ELI_AUTH_WRITE_DONE -> g_io_request -> g_eli_write_done -> g_io_deliver 254 */ 255 static int 256 g_eli_auth_write_done(struct cryptop *crp) 257 { 258 struct g_eli_softc *sc; 259 struct g_consumer *cp; 260 struct bio *bp, *cbp, *cbp2; 261 u_int nsec; 262 263 if (crp->crp_etype == EAGAIN) { 264 if (g_eli_crypto_rerun(crp) == 0) 265 return (0); 266 } 267 bp = (struct bio *)crp->crp_opaque; 268 bp->bio_inbed++; 269 if (crp->crp_etype == 0) { 270 G_ELI_DEBUG(3, "Crypto WRITE request done (%d/%d).", 271 bp->bio_inbed, bp->bio_children); 272 } else { 273 G_ELI_DEBUG(1, "Crypto WRITE request failed (%d/%d) error=%d.", 274 bp->bio_inbed, bp->bio_children, crp->crp_etype); 275 if (bp->bio_error == 0) 276 bp->bio_error = crp->crp_etype; 277 } 278 sc = bp->bio_to->geom->softc; 279 g_eli_key_drop(sc, crp->crp_desc->crd_key); 280 /* 281 * All sectors are already encrypted? 282 */ 283 if (bp->bio_inbed < bp->bio_children) 284 return (0); 285 if (bp->bio_error != 0) { 286 G_ELI_LOGREQ(0, bp, "Crypto WRITE request failed (error=%d).", 287 bp->bio_error); 288 free(bp->bio_driver2, M_ELI); 289 bp->bio_driver2 = NULL; 290 cbp = bp->bio_driver1; 291 bp->bio_driver1 = NULL; 292 g_destroy_bio(cbp); 293 g_io_deliver(bp, bp->bio_error); 294 atomic_subtract_int(&sc->sc_inflight, 1); 295 return (0); 296 } 297 cp = LIST_FIRST(&sc->sc_geom->consumer); 298 cbp = bp->bio_driver1; 299 bp->bio_driver1 = NULL; 300 cbp->bio_to = cp->provider; 301 cbp->bio_done = g_eli_write_done; 302 303 /* Number of sectors from decrypted provider, eg. 1. */ 304 nsec = bp->bio_length / bp->bio_to->sectorsize; 305 /* Number of sectors from encrypted provider, eg. 9. */ 306 nsec = (nsec * sc->sc_bytes_per_sector) / cp->provider->sectorsize; 307 308 cbp->bio_length = cp->provider->sectorsize * nsec; 309 cbp->bio_offset = (bp->bio_offset / bp->bio_to->sectorsize) * sc->sc_bytes_per_sector; 310 cbp->bio_data = bp->bio_driver2; 311 312 /* 313 * We write more than what is requested, so we have to be ready to write 314 * more than MAXPHYS. 315 */ 316 cbp2 = NULL; 317 if (cbp->bio_length > MAXPHYS) { 318 cbp2 = g_duplicate_bio(bp); 319 cbp2->bio_length = cbp->bio_length - MAXPHYS; 320 cbp2->bio_data = cbp->bio_data + MAXPHYS; 321 cbp2->bio_offset = cbp->bio_offset + MAXPHYS; 322 cbp2->bio_to = cp->provider; 323 cbp2->bio_done = g_eli_write_done; 324 cbp->bio_length = MAXPHYS; 325 } 326 /* 327 * Send encrypted data to the provider. 328 */ 329 G_ELI_LOGREQ(2, cbp, "Sending request."); 330 bp->bio_inbed = 0; 331 bp->bio_children = (cbp2 != NULL ? 2 : 1); 332 g_io_request(cbp, cp); 333 if (cbp2 != NULL) { 334 G_ELI_LOGREQ(2, cbp2, "Sending request."); 335 g_io_request(cbp2, cp); 336 } 337 return (0); 338 } 339 340 void 341 g_eli_auth_read(struct g_eli_softc *sc, struct bio *bp) 342 { 343 struct g_consumer *cp; 344 struct bio *cbp, *cbp2; 345 size_t size; 346 off_t nsec; 347 348 bp->bio_pflags = 0; 349 350 cp = LIST_FIRST(&sc->sc_geom->consumer); 351 cbp = bp->bio_driver1; 352 bp->bio_driver1 = NULL; 353 cbp->bio_to = cp->provider; 354 cbp->bio_done = g_eli_read_done; 355 356 /* Number of sectors from decrypted provider, eg. 1. */ 357 nsec = bp->bio_length / bp->bio_to->sectorsize; 358 /* Number of sectors from encrypted provider, eg. 9. */ 359 nsec = (nsec * sc->sc_bytes_per_sector) / cp->provider->sectorsize; 360 361 cbp->bio_length = cp->provider->sectorsize * nsec; 362 size = cbp->bio_length; 363 size += sc->sc_alen * nsec; 364 size += sizeof(struct cryptop) * nsec; 365 size += sizeof(struct cryptodesc) * nsec * 2; 366 size += G_ELI_AUTH_SECKEYLEN * nsec; 367 cbp->bio_offset = (bp->bio_offset / bp->bio_to->sectorsize) * sc->sc_bytes_per_sector; 368 bp->bio_driver2 = malloc(size, M_ELI, M_WAITOK); 369 cbp->bio_data = bp->bio_driver2; 370 371 /* 372 * We read more than what is requested, so we have to be ready to read 373 * more than MAXPHYS. 374 */ 375 cbp2 = NULL; 376 if (cbp->bio_length > MAXPHYS) { 377 cbp2 = g_duplicate_bio(bp); 378 cbp2->bio_length = cbp->bio_length - MAXPHYS; 379 cbp2->bio_data = cbp->bio_data + MAXPHYS; 380 cbp2->bio_offset = cbp->bio_offset + MAXPHYS; 381 cbp2->bio_to = cp->provider; 382 cbp2->bio_done = g_eli_read_done; 383 cbp->bio_length = MAXPHYS; 384 } 385 /* 386 * Read encrypted data from provider. 387 */ 388 G_ELI_LOGREQ(2, cbp, "Sending request."); 389 g_io_request(cbp, cp); 390 if (cbp2 != NULL) { 391 G_ELI_LOGREQ(2, cbp2, "Sending request."); 392 g_io_request(cbp2, cp); 393 } 394 } 395 396 /* 397 * This is the main function responsible for cryptography (ie. communication 398 * with crypto(9) subsystem). 399 * 400 * BIO_READ: 401 * g_eli_start -> g_eli_auth_read -> g_io_request -> g_eli_read_done -> G_ELI_AUTH_RUN -> g_eli_auth_read_done -> g_io_deliver 402 * BIO_WRITE: 403 * g_eli_start -> G_ELI_AUTH_RUN -> g_eli_auth_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver 404 */ 405 void 406 g_eli_auth_run(struct g_eli_worker *wr, struct bio *bp) 407 { 408 struct g_eli_softc *sc; 409 struct cryptop *crp; 410 struct cryptodesc *crde, *crda; 411 u_int i, lsec, nsec, data_secsize, decr_secsize, encr_secsize; 412 off_t dstoff; 413 u_char *p, *data, *auth, *authkey, *plaindata; 414 int error; 415 416 G_ELI_LOGREQ(3, bp, "%s", __func__); 417 418 bp->bio_pflags = wr->w_number; 419 sc = wr->w_softc; 420 /* Sectorsize of decrypted provider eg. 4096. */ 421 decr_secsize = bp->bio_to->sectorsize; 422 /* The real sectorsize of encrypted provider, eg. 512. */ 423 encr_secsize = LIST_FIRST(&sc->sc_geom->consumer)->provider->sectorsize; 424 /* Number of data bytes in one encrypted sector, eg. 480. */ 425 data_secsize = sc->sc_data_per_sector; 426 /* Number of sectors from decrypted provider, eg. 2. */ 427 nsec = bp->bio_length / decr_secsize; 428 /* Number of sectors from encrypted provider, eg. 18. */ 429 nsec = (nsec * sc->sc_bytes_per_sector) / encr_secsize; 430 /* Last sector number in every big sector, eg. 9. */ 431 lsec = sc->sc_bytes_per_sector / encr_secsize; 432 /* Destination offset, used for IV generation. */ 433 dstoff = (bp->bio_offset / bp->bio_to->sectorsize) * sc->sc_bytes_per_sector; 434 435 auth = NULL; /* Silence compiler warning. */ 436 plaindata = bp->bio_data; 437 if (bp->bio_cmd == BIO_READ) { 438 data = bp->bio_driver2; 439 auth = data + encr_secsize * nsec; 440 p = auth + sc->sc_alen * nsec; 441 } else { 442 size_t size; 443 444 size = encr_secsize * nsec; 445 size += sizeof(*crp) * nsec; 446 size += sizeof(*crde) * nsec; 447 size += sizeof(*crda) * nsec; 448 size += G_ELI_AUTH_SECKEYLEN * nsec; 449 size += sizeof(uintptr_t); /* Space for alignment. */ 450 data = malloc(size, M_ELI, M_WAITOK); 451 bp->bio_driver2 = data; 452 p = data + encr_secsize * nsec; 453 } 454 bp->bio_inbed = 0; 455 bp->bio_children = nsec; 456 457 #if defined(__mips_n64) || defined(__mips_o64) 458 p = (char *)roundup((uintptr_t)p, sizeof(uintptr_t)); 459 #endif 460 461 for (i = 1; i <= nsec; i++, dstoff += encr_secsize) { 462 crp = (struct cryptop *)p; p += sizeof(*crp); 463 crde = (struct cryptodesc *)p; p += sizeof(*crde); 464 crda = (struct cryptodesc *)p; p += sizeof(*crda); 465 authkey = (u_char *)p; p += G_ELI_AUTH_SECKEYLEN; 466 467 data_secsize = sc->sc_data_per_sector; 468 if ((i % lsec) == 0) { 469 data_secsize = decr_secsize % data_secsize; 470 /* 471 * Last encrypted sector of each decrypted sector is 472 * only partially filled. 473 */ 474 if (bp->bio_cmd == BIO_WRITE) 475 memset(data + sc->sc_alen + data_secsize, 0, 476 encr_secsize - sc->sc_alen - data_secsize); 477 } 478 479 if (bp->bio_cmd == BIO_READ) { 480 /* Remember read HMAC. */ 481 bcopy(data, auth, sc->sc_alen); 482 auth += sc->sc_alen; 483 /* TODO: bzero(9) can be commented out later. */ 484 bzero(data, sc->sc_alen); 485 } else { 486 bcopy(plaindata, data + sc->sc_alen, data_secsize); 487 plaindata += data_secsize; 488 } 489 490 crp->crp_session = wr->w_sid; 491 crp->crp_ilen = sc->sc_alen + data_secsize; 492 crp->crp_olen = data_secsize; 493 crp->crp_opaque = (void *)bp; 494 crp->crp_buf = (void *)data; 495 data += encr_secsize; 496 crp->crp_flags = CRYPTO_F_CBIFSYNC; 497 if (g_eli_batch) 498 crp->crp_flags |= CRYPTO_F_BATCH; 499 if (bp->bio_cmd == BIO_WRITE) { 500 crp->crp_callback = g_eli_auth_write_done; 501 crp->crp_desc = crde; 502 crde->crd_next = crda; 503 crda->crd_next = NULL; 504 } else { 505 crp->crp_callback = g_eli_auth_read_done; 506 crp->crp_desc = crda; 507 crda->crd_next = crde; 508 crde->crd_next = NULL; 509 } 510 511 crde->crd_skip = sc->sc_alen; 512 crde->crd_len = data_secsize; 513 crde->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT; 514 if ((sc->sc_flags & G_ELI_FLAG_FIRST_KEY) == 0) 515 crde->crd_flags |= CRD_F_KEY_EXPLICIT; 516 if (bp->bio_cmd == BIO_WRITE) 517 crde->crd_flags |= CRD_F_ENCRYPT; 518 crde->crd_alg = sc->sc_ealgo; 519 crde->crd_key = g_eli_key_hold(sc, dstoff, encr_secsize); 520 crde->crd_klen = sc->sc_ekeylen; 521 if (sc->sc_ealgo == CRYPTO_AES_XTS) 522 crde->crd_klen <<= 1; 523 g_eli_crypto_ivgen(sc, dstoff, crde->crd_iv, 524 sizeof(crde->crd_iv)); 525 526 crda->crd_skip = sc->sc_alen; 527 crda->crd_len = data_secsize; 528 crda->crd_inject = 0; 529 crda->crd_flags = CRD_F_KEY_EXPLICIT; 530 crda->crd_alg = sc->sc_aalgo; 531 g_eli_auth_keygen(sc, dstoff, authkey); 532 crda->crd_key = authkey; 533 crda->crd_klen = G_ELI_AUTH_SECKEYLEN * 8; 534 535 crp->crp_etype = 0; 536 error = crypto_dispatch(crp); 537 KASSERT(error == 0, ("crypto_dispatch() failed (error=%d)", 538 error)); 539 } 540 } 541