1 /* 2 * talitos - Freescale Integrated Security Engine (SEC) device driver 3 * 4 * Copyright (c) 2008 Freescale Semiconductor, Inc. 5 * 6 * Scatterlist Crypto API glue code copied from files with the following: 7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au> 8 * 9 * Crypto algorithm registration code copied from hifn driver: 10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru> 11 * All rights reserved. 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of the GNU General Public License as published by 15 * the Free Software Foundation; either version 2 of the License, or 16 * (at your option) any later version. 17 * 18 * This program is distributed in the hope that it will be useful, 19 * but WITHOUT ANY WARRANTY; without even the implied warranty of 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 * GNU General Public License for more details. 22 * 23 * You should have received a copy of the GNU General Public License 24 * along with this program; if not, write to the Free Software 25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 26 */ 27 28 #include <linux/kernel.h> 29 #include <linux/module.h> 30 #include <linux/mod_devicetable.h> 31 #include <linux/device.h> 32 #include <linux/interrupt.h> 33 #include <linux/crypto.h> 34 #include <linux/hw_random.h> 35 #include <linux/of_platform.h> 36 #include <linux/dma-mapping.h> 37 #include <linux/io.h> 38 #include <linux/spinlock.h> 39 #include <linux/rtnetlink.h> 40 41 #include <crypto/algapi.h> 42 #include <crypto/aes.h> 43 #include <crypto/des.h> 44 #include <crypto/sha.h> 45 #include <crypto/aead.h> 46 #include <crypto/authenc.h> 47 #include <crypto/skcipher.h> 48 #include <crypto/scatterwalk.h> 49 50 #include "talitos.h" 51 52 #define TALITOS_TIMEOUT 100000 53 #define TALITOS_MAX_DATA_LEN 65535 54 55 #define DESC_TYPE(desc_hdr) ((be32_to_cpu(desc_hdr) >> 3) & 0x1f) 56 #define PRIMARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 28) & 0xf) 57 #define SECONDARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 16) & 0xf) 58 59 /* descriptor pointer entry */ 60 struct talitos_ptr { 61 __be16 len; /* length */ 62 u8 j_extent; /* jump to sg link table and/or extent */ 63 u8 eptr; /* extended address */ 64 __be32 ptr; /* address */ 65 }; 66 67 /* descriptor */ 68 struct talitos_desc { 69 __be32 hdr; /* header high bits */ 70 __be32 hdr_lo; /* header low bits */ 71 struct talitos_ptr ptr[7]; /* ptr/len pair array */ 72 }; 73 74 /** 75 * talitos_request - descriptor submission request 76 * @desc: descriptor pointer (kernel virtual) 77 * @dma_desc: descriptor's physical bus address 78 * @callback: whom to call when descriptor processing is done 79 * @context: caller context (optional) 80 */ 81 struct talitos_request { 82 struct talitos_desc *desc; 83 dma_addr_t dma_desc; 84 void (*callback) (struct device *dev, struct talitos_desc *desc, 85 void *context, int error); 86 void *context; 87 }; 88 89 struct talitos_private { 90 struct device *dev; 91 struct of_device *ofdev; 92 void __iomem *reg; 93 int irq; 94 95 /* SEC version geometry (from device tree node) */ 96 unsigned int num_channels; 97 unsigned int chfifo_len; 98 unsigned int exec_units; 99 unsigned int desc_types; 100 101 /* SEC Compatibility info */ 102 unsigned long features; 103 104 /* next channel to be assigned next incoming descriptor */ 105 atomic_t last_chan; 106 107 /* per-channel number of requests pending in channel h/w fifo */ 108 atomic_t *submit_count; 109 110 /* per-channel request fifo */ 111 struct talitos_request **fifo; 112 113 /* 114 * length of the request fifo 115 * fifo_len is chfifo_len rounded up to next power of 2 116 * so we can use bitwise ops to wrap 117 */ 118 unsigned int fifo_len; 119 120 /* per-channel index to next free descriptor request */ 121 int *head; 122 123 /* per-channel index to next in-progress/done descriptor request */ 124 int *tail; 125 126 /* per-channel request submission (head) and release (tail) locks */ 127 spinlock_t *head_lock; 128 spinlock_t *tail_lock; 129 130 /* request callback tasklet */ 131 struct tasklet_struct done_task; 132 133 /* list of registered algorithms */ 134 struct list_head alg_list; 135 136 /* hwrng device */ 137 struct hwrng rng; 138 }; 139 140 /* .features flag */ 141 #define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001 142 #define TALITOS_FTR_HW_AUTH_CHECK 0x00000002 143 144 /* 145 * map virtual single (contiguous) pointer to h/w descriptor pointer 146 */ 147 static void map_single_talitos_ptr(struct device *dev, 148 struct talitos_ptr *talitos_ptr, 149 unsigned short len, void *data, 150 unsigned char extent, 151 enum dma_data_direction dir) 152 { 153 talitos_ptr->len = cpu_to_be16(len); 154 talitos_ptr->ptr = cpu_to_be32(dma_map_single(dev, data, len, dir)); 155 talitos_ptr->j_extent = extent; 156 } 157 158 /* 159 * unmap bus single (contiguous) h/w descriptor pointer 160 */ 161 static void unmap_single_talitos_ptr(struct device *dev, 162 struct talitos_ptr *talitos_ptr, 163 enum dma_data_direction dir) 164 { 165 dma_unmap_single(dev, be32_to_cpu(talitos_ptr->ptr), 166 be16_to_cpu(talitos_ptr->len), dir); 167 } 168 169 static int reset_channel(struct device *dev, int ch) 170 { 171 struct talitos_private *priv = dev_get_drvdata(dev); 172 unsigned int timeout = TALITOS_TIMEOUT; 173 174 setbits32(priv->reg + TALITOS_CCCR(ch), TALITOS_CCCR_RESET); 175 176 while ((in_be32(priv->reg + TALITOS_CCCR(ch)) & TALITOS_CCCR_RESET) 177 && --timeout) 178 cpu_relax(); 179 180 if (timeout == 0) { 181 dev_err(dev, "failed to reset channel %d\n", ch); 182 return -EIO; 183 } 184 185 /* set done writeback and IRQ */ 186 setbits32(priv->reg + TALITOS_CCCR_LO(ch), TALITOS_CCCR_LO_CDWE | 187 TALITOS_CCCR_LO_CDIE); 188 189 /* and ICCR writeback, if available */ 190 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK) 191 setbits32(priv->reg + TALITOS_CCCR_LO(ch), 192 TALITOS_CCCR_LO_IWSE); 193 194 return 0; 195 } 196 197 static int reset_device(struct device *dev) 198 { 199 struct talitos_private *priv = dev_get_drvdata(dev); 200 unsigned int timeout = TALITOS_TIMEOUT; 201 202 setbits32(priv->reg + TALITOS_MCR, TALITOS_MCR_SWR); 203 204 while ((in_be32(priv->reg + TALITOS_MCR) & TALITOS_MCR_SWR) 205 && --timeout) 206 cpu_relax(); 207 208 if (timeout == 0) { 209 dev_err(dev, "failed to reset device\n"); 210 return -EIO; 211 } 212 213 return 0; 214 } 215 216 /* 217 * Reset and initialize the device 218 */ 219 static int init_device(struct device *dev) 220 { 221 struct talitos_private *priv = dev_get_drvdata(dev); 222 int ch, err; 223 224 /* 225 * Master reset 226 * errata documentation: warning: certain SEC interrupts 227 * are not fully cleared by writing the MCR:SWR bit, 228 * set bit twice to completely reset 229 */ 230 err = reset_device(dev); 231 if (err) 232 return err; 233 234 err = reset_device(dev); 235 if (err) 236 return err; 237 238 /* reset channels */ 239 for (ch = 0; ch < priv->num_channels; ch++) { 240 err = reset_channel(dev, ch); 241 if (err) 242 return err; 243 } 244 245 /* enable channel done and error interrupts */ 246 setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT); 247 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT); 248 249 /* disable integrity check error interrupts (use writeback instead) */ 250 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK) 251 setbits32(priv->reg + TALITOS_MDEUICR_LO, 252 TALITOS_MDEUICR_LO_ICE); 253 254 return 0; 255 } 256 257 /** 258 * talitos_submit - submits a descriptor to the device for processing 259 * @dev: the SEC device to be used 260 * @desc: the descriptor to be processed by the device 261 * @callback: whom to call when processing is complete 262 * @context: a handle for use by caller (optional) 263 * 264 * desc must contain valid dma-mapped (bus physical) address pointers. 265 * callback must check err and feedback in descriptor header 266 * for device processing status. 267 */ 268 static int talitos_submit(struct device *dev, struct talitos_desc *desc, 269 void (*callback)(struct device *dev, 270 struct talitos_desc *desc, 271 void *context, int error), 272 void *context) 273 { 274 struct talitos_private *priv = dev_get_drvdata(dev); 275 struct talitos_request *request; 276 unsigned long flags, ch; 277 int head; 278 279 /* select done notification */ 280 desc->hdr |= DESC_HDR_DONE_NOTIFY; 281 282 /* emulate SEC's round-robin channel fifo polling scheme */ 283 ch = atomic_inc_return(&priv->last_chan) & (priv->num_channels - 1); 284 285 spin_lock_irqsave(&priv->head_lock[ch], flags); 286 287 if (!atomic_inc_not_zero(&priv->submit_count[ch])) { 288 /* h/w fifo is full */ 289 spin_unlock_irqrestore(&priv->head_lock[ch], flags); 290 return -EAGAIN; 291 } 292 293 head = priv->head[ch]; 294 request = &priv->fifo[ch][head]; 295 296 /* map descriptor and save caller data */ 297 request->dma_desc = dma_map_single(dev, desc, sizeof(*desc), 298 DMA_BIDIRECTIONAL); 299 request->callback = callback; 300 request->context = context; 301 302 /* increment fifo head */ 303 priv->head[ch] = (priv->head[ch] + 1) & (priv->fifo_len - 1); 304 305 smp_wmb(); 306 request->desc = desc; 307 308 /* GO! */ 309 wmb(); 310 out_be32(priv->reg + TALITOS_FF_LO(ch), request->dma_desc); 311 312 spin_unlock_irqrestore(&priv->head_lock[ch], flags); 313 314 return -EINPROGRESS; 315 } 316 317 /* 318 * process what was done, notify callback of error if not 319 */ 320 static void flush_channel(struct device *dev, int ch, int error, int reset_ch) 321 { 322 struct talitos_private *priv = dev_get_drvdata(dev); 323 struct talitos_request *request, saved_req; 324 unsigned long flags; 325 int tail, status; 326 327 spin_lock_irqsave(&priv->tail_lock[ch], flags); 328 329 tail = priv->tail[ch]; 330 while (priv->fifo[ch][tail].desc) { 331 request = &priv->fifo[ch][tail]; 332 333 /* descriptors with their done bits set don't get the error */ 334 rmb(); 335 if ((request->desc->hdr & DESC_HDR_DONE) == DESC_HDR_DONE) 336 status = 0; 337 else 338 if (!error) 339 break; 340 else 341 status = error; 342 343 dma_unmap_single(dev, request->dma_desc, 344 sizeof(struct talitos_desc), 345 DMA_BIDIRECTIONAL); 346 347 /* copy entries so we can call callback outside lock */ 348 saved_req.desc = request->desc; 349 saved_req.callback = request->callback; 350 saved_req.context = request->context; 351 352 /* release request entry in fifo */ 353 smp_wmb(); 354 request->desc = NULL; 355 356 /* increment fifo tail */ 357 priv->tail[ch] = (tail + 1) & (priv->fifo_len - 1); 358 359 spin_unlock_irqrestore(&priv->tail_lock[ch], flags); 360 361 atomic_dec(&priv->submit_count[ch]); 362 363 saved_req.callback(dev, saved_req.desc, saved_req.context, 364 status); 365 /* channel may resume processing in single desc error case */ 366 if (error && !reset_ch && status == error) 367 return; 368 spin_lock_irqsave(&priv->tail_lock[ch], flags); 369 tail = priv->tail[ch]; 370 } 371 372 spin_unlock_irqrestore(&priv->tail_lock[ch], flags); 373 } 374 375 /* 376 * process completed requests for channels that have done status 377 */ 378 static void talitos_done(unsigned long data) 379 { 380 struct device *dev = (struct device *)data; 381 struct talitos_private *priv = dev_get_drvdata(dev); 382 int ch; 383 384 for (ch = 0; ch < priv->num_channels; ch++) 385 flush_channel(dev, ch, 0, 0); 386 387 /* At this point, all completed channels have been processed. 388 * Unmask done interrupts for channels completed later on. 389 */ 390 setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT); 391 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT); 392 } 393 394 /* 395 * locate current (offending) descriptor 396 */ 397 static struct talitos_desc *current_desc(struct device *dev, int ch) 398 { 399 struct talitos_private *priv = dev_get_drvdata(dev); 400 int tail = priv->tail[ch]; 401 dma_addr_t cur_desc; 402 403 cur_desc = in_be32(priv->reg + TALITOS_CDPR_LO(ch)); 404 405 while (priv->fifo[ch][tail].dma_desc != cur_desc) { 406 tail = (tail + 1) & (priv->fifo_len - 1); 407 if (tail == priv->tail[ch]) { 408 dev_err(dev, "couldn't locate current descriptor\n"); 409 return NULL; 410 } 411 } 412 413 return priv->fifo[ch][tail].desc; 414 } 415 416 /* 417 * user diagnostics; report root cause of error based on execution unit status 418 */ 419 static void report_eu_error(struct device *dev, int ch, 420 struct talitos_desc *desc) 421 { 422 struct talitos_private *priv = dev_get_drvdata(dev); 423 int i; 424 425 switch (desc->hdr & DESC_HDR_SEL0_MASK) { 426 case DESC_HDR_SEL0_AFEU: 427 dev_err(dev, "AFEUISR 0x%08x_%08x\n", 428 in_be32(priv->reg + TALITOS_AFEUISR), 429 in_be32(priv->reg + TALITOS_AFEUISR_LO)); 430 break; 431 case DESC_HDR_SEL0_DEU: 432 dev_err(dev, "DEUISR 0x%08x_%08x\n", 433 in_be32(priv->reg + TALITOS_DEUISR), 434 in_be32(priv->reg + TALITOS_DEUISR_LO)); 435 break; 436 case DESC_HDR_SEL0_MDEUA: 437 case DESC_HDR_SEL0_MDEUB: 438 dev_err(dev, "MDEUISR 0x%08x_%08x\n", 439 in_be32(priv->reg + TALITOS_MDEUISR), 440 in_be32(priv->reg + TALITOS_MDEUISR_LO)); 441 break; 442 case DESC_HDR_SEL0_RNG: 443 dev_err(dev, "RNGUISR 0x%08x_%08x\n", 444 in_be32(priv->reg + TALITOS_RNGUISR), 445 in_be32(priv->reg + TALITOS_RNGUISR_LO)); 446 break; 447 case DESC_HDR_SEL0_PKEU: 448 dev_err(dev, "PKEUISR 0x%08x_%08x\n", 449 in_be32(priv->reg + TALITOS_PKEUISR), 450 in_be32(priv->reg + TALITOS_PKEUISR_LO)); 451 break; 452 case DESC_HDR_SEL0_AESU: 453 dev_err(dev, "AESUISR 0x%08x_%08x\n", 454 in_be32(priv->reg + TALITOS_AESUISR), 455 in_be32(priv->reg + TALITOS_AESUISR_LO)); 456 break; 457 case DESC_HDR_SEL0_CRCU: 458 dev_err(dev, "CRCUISR 0x%08x_%08x\n", 459 in_be32(priv->reg + TALITOS_CRCUISR), 460 in_be32(priv->reg + TALITOS_CRCUISR_LO)); 461 break; 462 case DESC_HDR_SEL0_KEU: 463 dev_err(dev, "KEUISR 0x%08x_%08x\n", 464 in_be32(priv->reg + TALITOS_KEUISR), 465 in_be32(priv->reg + TALITOS_KEUISR_LO)); 466 break; 467 } 468 469 switch (desc->hdr & DESC_HDR_SEL1_MASK) { 470 case DESC_HDR_SEL1_MDEUA: 471 case DESC_HDR_SEL1_MDEUB: 472 dev_err(dev, "MDEUISR 0x%08x_%08x\n", 473 in_be32(priv->reg + TALITOS_MDEUISR), 474 in_be32(priv->reg + TALITOS_MDEUISR_LO)); 475 break; 476 case DESC_HDR_SEL1_CRCU: 477 dev_err(dev, "CRCUISR 0x%08x_%08x\n", 478 in_be32(priv->reg + TALITOS_CRCUISR), 479 in_be32(priv->reg + TALITOS_CRCUISR_LO)); 480 break; 481 } 482 483 for (i = 0; i < 8; i++) 484 dev_err(dev, "DESCBUF 0x%08x_%08x\n", 485 in_be32(priv->reg + TALITOS_DESCBUF(ch) + 8*i), 486 in_be32(priv->reg + TALITOS_DESCBUF_LO(ch) + 8*i)); 487 } 488 489 /* 490 * recover from error interrupts 491 */ 492 static void talitos_error(unsigned long data, u32 isr, u32 isr_lo) 493 { 494 struct device *dev = (struct device *)data; 495 struct talitos_private *priv = dev_get_drvdata(dev); 496 unsigned int timeout = TALITOS_TIMEOUT; 497 int ch, error, reset_dev = 0, reset_ch = 0; 498 u32 v, v_lo; 499 500 for (ch = 0; ch < priv->num_channels; ch++) { 501 /* skip channels without errors */ 502 if (!(isr & (1 << (ch * 2 + 1)))) 503 continue; 504 505 error = -EINVAL; 506 507 v = in_be32(priv->reg + TALITOS_CCPSR(ch)); 508 v_lo = in_be32(priv->reg + TALITOS_CCPSR_LO(ch)); 509 510 if (v_lo & TALITOS_CCPSR_LO_DOF) { 511 dev_err(dev, "double fetch fifo overflow error\n"); 512 error = -EAGAIN; 513 reset_ch = 1; 514 } 515 if (v_lo & TALITOS_CCPSR_LO_SOF) { 516 /* h/w dropped descriptor */ 517 dev_err(dev, "single fetch fifo overflow error\n"); 518 error = -EAGAIN; 519 } 520 if (v_lo & TALITOS_CCPSR_LO_MDTE) 521 dev_err(dev, "master data transfer error\n"); 522 if (v_lo & TALITOS_CCPSR_LO_SGDLZ) 523 dev_err(dev, "s/g data length zero error\n"); 524 if (v_lo & TALITOS_CCPSR_LO_FPZ) 525 dev_err(dev, "fetch pointer zero error\n"); 526 if (v_lo & TALITOS_CCPSR_LO_IDH) 527 dev_err(dev, "illegal descriptor header error\n"); 528 if (v_lo & TALITOS_CCPSR_LO_IEU) 529 dev_err(dev, "invalid execution unit error\n"); 530 if (v_lo & TALITOS_CCPSR_LO_EU) 531 report_eu_error(dev, ch, current_desc(dev, ch)); 532 if (v_lo & TALITOS_CCPSR_LO_GB) 533 dev_err(dev, "gather boundary error\n"); 534 if (v_lo & TALITOS_CCPSR_LO_GRL) 535 dev_err(dev, "gather return/length error\n"); 536 if (v_lo & TALITOS_CCPSR_LO_SB) 537 dev_err(dev, "scatter boundary error\n"); 538 if (v_lo & TALITOS_CCPSR_LO_SRL) 539 dev_err(dev, "scatter return/length error\n"); 540 541 flush_channel(dev, ch, error, reset_ch); 542 543 if (reset_ch) { 544 reset_channel(dev, ch); 545 } else { 546 setbits32(priv->reg + TALITOS_CCCR(ch), 547 TALITOS_CCCR_CONT); 548 setbits32(priv->reg + TALITOS_CCCR_LO(ch), 0); 549 while ((in_be32(priv->reg + TALITOS_CCCR(ch)) & 550 TALITOS_CCCR_CONT) && --timeout) 551 cpu_relax(); 552 if (timeout == 0) { 553 dev_err(dev, "failed to restart channel %d\n", 554 ch); 555 reset_dev = 1; 556 } 557 } 558 } 559 if (reset_dev || isr & ~TALITOS_ISR_CHERR || isr_lo) { 560 dev_err(dev, "done overflow, internal time out, or rngu error: " 561 "ISR 0x%08x_%08x\n", isr, isr_lo); 562 563 /* purge request queues */ 564 for (ch = 0; ch < priv->num_channels; ch++) 565 flush_channel(dev, ch, -EIO, 1); 566 567 /* reset and reinitialize the device */ 568 init_device(dev); 569 } 570 } 571 572 static irqreturn_t talitos_interrupt(int irq, void *data) 573 { 574 struct device *dev = data; 575 struct talitos_private *priv = dev_get_drvdata(dev); 576 u32 isr, isr_lo; 577 578 isr = in_be32(priv->reg + TALITOS_ISR); 579 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); 580 /* Acknowledge interrupt */ 581 out_be32(priv->reg + TALITOS_ICR, isr); 582 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); 583 584 if (unlikely((isr & ~TALITOS_ISR_CHDONE) || isr_lo)) 585 talitos_error((unsigned long)data, isr, isr_lo); 586 else 587 if (likely(isr & TALITOS_ISR_CHDONE)) { 588 /* mask further done interrupts. */ 589 clrbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_DONE); 590 /* done_task will unmask done interrupts at exit */ 591 tasklet_schedule(&priv->done_task); 592 } 593 594 return (isr || isr_lo) ? IRQ_HANDLED : IRQ_NONE; 595 } 596 597 /* 598 * hwrng 599 */ 600 static int talitos_rng_data_present(struct hwrng *rng, int wait) 601 { 602 struct device *dev = (struct device *)rng->priv; 603 struct talitos_private *priv = dev_get_drvdata(dev); 604 u32 ofl; 605 int i; 606 607 for (i = 0; i < 20; i++) { 608 ofl = in_be32(priv->reg + TALITOS_RNGUSR_LO) & 609 TALITOS_RNGUSR_LO_OFL; 610 if (ofl || !wait) 611 break; 612 udelay(10); 613 } 614 615 return !!ofl; 616 } 617 618 static int talitos_rng_data_read(struct hwrng *rng, u32 *data) 619 { 620 struct device *dev = (struct device *)rng->priv; 621 struct talitos_private *priv = dev_get_drvdata(dev); 622 623 /* rng fifo requires 64-bit accesses */ 624 *data = in_be32(priv->reg + TALITOS_RNGU_FIFO); 625 *data = in_be32(priv->reg + TALITOS_RNGU_FIFO_LO); 626 627 return sizeof(u32); 628 } 629 630 static int talitos_rng_init(struct hwrng *rng) 631 { 632 struct device *dev = (struct device *)rng->priv; 633 struct talitos_private *priv = dev_get_drvdata(dev); 634 unsigned int timeout = TALITOS_TIMEOUT; 635 636 setbits32(priv->reg + TALITOS_RNGURCR_LO, TALITOS_RNGURCR_LO_SR); 637 while (!(in_be32(priv->reg + TALITOS_RNGUSR_LO) & TALITOS_RNGUSR_LO_RD) 638 && --timeout) 639 cpu_relax(); 640 if (timeout == 0) { 641 dev_err(dev, "failed to reset rng hw\n"); 642 return -ENODEV; 643 } 644 645 /* start generating */ 646 setbits32(priv->reg + TALITOS_RNGUDSR_LO, 0); 647 648 return 0; 649 } 650 651 static int talitos_register_rng(struct device *dev) 652 { 653 struct talitos_private *priv = dev_get_drvdata(dev); 654 655 priv->rng.name = dev_driver_string(dev), 656 priv->rng.init = talitos_rng_init, 657 priv->rng.data_present = talitos_rng_data_present, 658 priv->rng.data_read = talitos_rng_data_read, 659 priv->rng.priv = (unsigned long)dev; 660 661 return hwrng_register(&priv->rng); 662 } 663 664 static void talitos_unregister_rng(struct device *dev) 665 { 666 struct talitos_private *priv = dev_get_drvdata(dev); 667 668 hwrng_unregister(&priv->rng); 669 } 670 671 /* 672 * crypto alg 673 */ 674 #define TALITOS_CRA_PRIORITY 3000 675 #define TALITOS_MAX_KEY_SIZE 64 676 #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ 677 678 #define MD5_DIGEST_SIZE 16 679 680 struct talitos_ctx { 681 struct device *dev; 682 __be32 desc_hdr_template; 683 u8 key[TALITOS_MAX_KEY_SIZE]; 684 u8 iv[TALITOS_MAX_IV_LENGTH]; 685 unsigned int keylen; 686 unsigned int enckeylen; 687 unsigned int authkeylen; 688 unsigned int authsize; 689 }; 690 691 static int aead_setauthsize(struct crypto_aead *authenc, 692 unsigned int authsize) 693 { 694 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 695 696 ctx->authsize = authsize; 697 698 return 0; 699 } 700 701 static int aead_setkey(struct crypto_aead *authenc, 702 const u8 *key, unsigned int keylen) 703 { 704 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 705 struct rtattr *rta = (void *)key; 706 struct crypto_authenc_key_param *param; 707 unsigned int authkeylen; 708 unsigned int enckeylen; 709 710 if (!RTA_OK(rta, keylen)) 711 goto badkey; 712 713 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) 714 goto badkey; 715 716 if (RTA_PAYLOAD(rta) < sizeof(*param)) 717 goto badkey; 718 719 param = RTA_DATA(rta); 720 enckeylen = be32_to_cpu(param->enckeylen); 721 722 key += RTA_ALIGN(rta->rta_len); 723 keylen -= RTA_ALIGN(rta->rta_len); 724 725 if (keylen < enckeylen) 726 goto badkey; 727 728 authkeylen = keylen - enckeylen; 729 730 if (keylen > TALITOS_MAX_KEY_SIZE) 731 goto badkey; 732 733 memcpy(&ctx->key, key, keylen); 734 735 ctx->keylen = keylen; 736 ctx->enckeylen = enckeylen; 737 ctx->authkeylen = authkeylen; 738 739 return 0; 740 741 badkey: 742 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN); 743 return -EINVAL; 744 } 745 746 /* 747 * talitos_edesc - s/w-extended descriptor 748 * @src_nents: number of segments in input scatterlist 749 * @dst_nents: number of segments in output scatterlist 750 * @dma_len: length of dma mapped link_tbl space 751 * @dma_link_tbl: bus physical address of link_tbl 752 * @desc: h/w descriptor 753 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) 754 * 755 * if decrypting (with authcheck), or either one of src_nents or dst_nents 756 * is greater than 1, an integrity check value is concatenated to the end 757 * of link_tbl data 758 */ 759 struct talitos_edesc { 760 int src_nents; 761 int dst_nents; 762 int src_is_chained; 763 int dst_is_chained; 764 int dma_len; 765 dma_addr_t dma_link_tbl; 766 struct talitos_desc desc; 767 struct talitos_ptr link_tbl[0]; 768 }; 769 770 static int talitos_map_sg(struct device *dev, struct scatterlist *sg, 771 unsigned int nents, enum dma_data_direction dir, 772 int chained) 773 { 774 if (unlikely(chained)) 775 while (sg) { 776 dma_map_sg(dev, sg, 1, dir); 777 sg = scatterwalk_sg_next(sg); 778 } 779 else 780 dma_map_sg(dev, sg, nents, dir); 781 return nents; 782 } 783 784 static void talitos_unmap_sg_chain(struct device *dev, struct scatterlist *sg, 785 enum dma_data_direction dir) 786 { 787 while (sg) { 788 dma_unmap_sg(dev, sg, 1, dir); 789 sg = scatterwalk_sg_next(sg); 790 } 791 } 792 793 static void talitos_sg_unmap(struct device *dev, 794 struct talitos_edesc *edesc, 795 struct scatterlist *src, 796 struct scatterlist *dst) 797 { 798 unsigned int src_nents = edesc->src_nents ? : 1; 799 unsigned int dst_nents = edesc->dst_nents ? : 1; 800 801 if (src != dst) { 802 if (edesc->src_is_chained) 803 talitos_unmap_sg_chain(dev, src, DMA_TO_DEVICE); 804 else 805 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); 806 807 if (edesc->dst_is_chained) 808 talitos_unmap_sg_chain(dev, dst, DMA_FROM_DEVICE); 809 else 810 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); 811 } else 812 if (edesc->src_is_chained) 813 talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL); 814 else 815 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); 816 } 817 818 static void ipsec_esp_unmap(struct device *dev, 819 struct talitos_edesc *edesc, 820 struct aead_request *areq) 821 { 822 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE); 823 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE); 824 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE); 825 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE); 826 827 dma_unmap_sg(dev, areq->assoc, 1, DMA_TO_DEVICE); 828 829 talitos_sg_unmap(dev, edesc, areq->src, areq->dst); 830 831 if (edesc->dma_len) 832 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, 833 DMA_BIDIRECTIONAL); 834 } 835 836 /* 837 * ipsec_esp descriptor callbacks 838 */ 839 static void ipsec_esp_encrypt_done(struct device *dev, 840 struct talitos_desc *desc, void *context, 841 int err) 842 { 843 struct aead_request *areq = context; 844 struct crypto_aead *authenc = crypto_aead_reqtfm(areq); 845 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 846 struct talitos_edesc *edesc; 847 struct scatterlist *sg; 848 void *icvdata; 849 850 edesc = container_of(desc, struct talitos_edesc, desc); 851 852 ipsec_esp_unmap(dev, edesc, areq); 853 854 /* copy the generated ICV to dst */ 855 if (edesc->dma_len) { 856 icvdata = &edesc->link_tbl[edesc->src_nents + 857 edesc->dst_nents + 2]; 858 sg = sg_last(areq->dst, edesc->dst_nents); 859 memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize, 860 icvdata, ctx->authsize); 861 } 862 863 kfree(edesc); 864 865 aead_request_complete(areq, err); 866 } 867 868 static void ipsec_esp_decrypt_swauth_done(struct device *dev, 869 struct talitos_desc *desc, 870 void *context, int err) 871 { 872 struct aead_request *req = context; 873 struct crypto_aead *authenc = crypto_aead_reqtfm(req); 874 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 875 struct talitos_edesc *edesc; 876 struct scatterlist *sg; 877 void *icvdata; 878 879 edesc = container_of(desc, struct talitos_edesc, desc); 880 881 ipsec_esp_unmap(dev, edesc, req); 882 883 if (!err) { 884 /* auth check */ 885 if (edesc->dma_len) 886 icvdata = &edesc->link_tbl[edesc->src_nents + 887 edesc->dst_nents + 2]; 888 else 889 icvdata = &edesc->link_tbl[0]; 890 891 sg = sg_last(req->dst, edesc->dst_nents ? : 1); 892 err = memcmp(icvdata, (char *)sg_virt(sg) + sg->length - 893 ctx->authsize, ctx->authsize) ? -EBADMSG : 0; 894 } 895 896 kfree(edesc); 897 898 aead_request_complete(req, err); 899 } 900 901 static void ipsec_esp_decrypt_hwauth_done(struct device *dev, 902 struct talitos_desc *desc, 903 void *context, int err) 904 { 905 struct aead_request *req = context; 906 struct talitos_edesc *edesc; 907 908 edesc = container_of(desc, struct talitos_edesc, desc); 909 910 ipsec_esp_unmap(dev, edesc, req); 911 912 /* check ICV auth status */ 913 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) != 914 DESC_HDR_LO_ICCR1_PASS)) 915 err = -EBADMSG; 916 917 kfree(edesc); 918 919 aead_request_complete(req, err); 920 } 921 922 /* 923 * convert scatterlist to SEC h/w link table format 924 * stop at cryptlen bytes 925 */ 926 static int sg_to_link_tbl(struct scatterlist *sg, int sg_count, 927 int cryptlen, struct talitos_ptr *link_tbl_ptr) 928 { 929 int n_sg = sg_count; 930 931 while (n_sg--) { 932 link_tbl_ptr->ptr = cpu_to_be32(sg_dma_address(sg)); 933 link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg)); 934 link_tbl_ptr->j_extent = 0; 935 link_tbl_ptr++; 936 cryptlen -= sg_dma_len(sg); 937 sg = scatterwalk_sg_next(sg); 938 } 939 940 /* adjust (decrease) last one (or two) entry's len to cryptlen */ 941 link_tbl_ptr--; 942 while (be16_to_cpu(link_tbl_ptr->len) <= (-cryptlen)) { 943 /* Empty this entry, and move to previous one */ 944 cryptlen += be16_to_cpu(link_tbl_ptr->len); 945 link_tbl_ptr->len = 0; 946 sg_count--; 947 link_tbl_ptr--; 948 } 949 link_tbl_ptr->len = cpu_to_be16(be16_to_cpu(link_tbl_ptr->len) 950 + cryptlen); 951 952 /* tag end of link table */ 953 link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN; 954 955 return sg_count; 956 } 957 958 /* 959 * fill in and submit ipsec_esp descriptor 960 */ 961 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, 962 u8 *giv, u64 seq, 963 void (*callback) (struct device *dev, 964 struct talitos_desc *desc, 965 void *context, int error)) 966 { 967 struct crypto_aead *aead = crypto_aead_reqtfm(areq); 968 struct talitos_ctx *ctx = crypto_aead_ctx(aead); 969 struct device *dev = ctx->dev; 970 struct talitos_desc *desc = &edesc->desc; 971 unsigned int cryptlen = areq->cryptlen; 972 unsigned int authsize = ctx->authsize; 973 unsigned int ivsize; 974 int sg_count, ret; 975 int sg_link_tbl_len; 976 977 /* hmac key */ 978 map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key, 979 0, DMA_TO_DEVICE); 980 /* hmac data */ 981 map_single_talitos_ptr(dev, &desc->ptr[1], sg_virt(areq->src) - 982 sg_virt(areq->assoc), sg_virt(areq->assoc), 0, 983 DMA_TO_DEVICE); 984 /* cipher iv */ 985 ivsize = crypto_aead_ivsize(aead); 986 map_single_talitos_ptr(dev, &desc->ptr[2], ivsize, giv ?: areq->iv, 0, 987 DMA_TO_DEVICE); 988 989 /* cipher key */ 990 map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen, 991 (char *)&ctx->key + ctx->authkeylen, 0, 992 DMA_TO_DEVICE); 993 994 /* 995 * cipher in 996 * map and adjust cipher len to aead request cryptlen. 997 * extent is bytes of HMAC postpended to ciphertext, 998 * typically 12 for ipsec 999 */ 1000 desc->ptr[4].len = cpu_to_be16(cryptlen); 1001 desc->ptr[4].j_extent = authsize; 1002 1003 sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1, 1004 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL 1005 : DMA_TO_DEVICE, 1006 edesc->src_is_chained); 1007 1008 if (sg_count == 1) { 1009 desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src)); 1010 } else { 1011 sg_link_tbl_len = cryptlen; 1012 1013 if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV) 1014 sg_link_tbl_len = cryptlen + authsize; 1015 1016 sg_count = sg_to_link_tbl(areq->src, sg_count, sg_link_tbl_len, 1017 &edesc->link_tbl[0]); 1018 if (sg_count > 1) { 1019 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; 1020 desc->ptr[4].ptr = cpu_to_be32(edesc->dma_link_tbl); 1021 dma_sync_single_for_device(dev, edesc->dma_link_tbl, 1022 edesc->dma_len, 1023 DMA_BIDIRECTIONAL); 1024 } else { 1025 /* Only one segment now, so no link tbl needed */ 1026 desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq-> 1027 src)); 1028 } 1029 } 1030 1031 /* cipher out */ 1032 desc->ptr[5].len = cpu_to_be16(cryptlen); 1033 desc->ptr[5].j_extent = authsize; 1034 1035 if (areq->src != areq->dst) 1036 sg_count = talitos_map_sg(dev, areq->dst, 1037 edesc->dst_nents ? : 1, 1038 DMA_FROM_DEVICE, 1039 edesc->dst_is_chained); 1040 1041 if (sg_count == 1) { 1042 desc->ptr[5].ptr = cpu_to_be32(sg_dma_address(areq->dst)); 1043 } else { 1044 struct talitos_ptr *link_tbl_ptr = 1045 &edesc->link_tbl[edesc->src_nents + 1]; 1046 1047 desc->ptr[5].ptr = cpu_to_be32((struct talitos_ptr *) 1048 edesc->dma_link_tbl + 1049 edesc->src_nents + 1); 1050 sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, 1051 link_tbl_ptr); 1052 1053 /* Add an entry to the link table for ICV data */ 1054 link_tbl_ptr += sg_count - 1; 1055 link_tbl_ptr->j_extent = 0; 1056 sg_count++; 1057 link_tbl_ptr++; 1058 link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN; 1059 link_tbl_ptr->len = cpu_to_be16(authsize); 1060 1061 /* icv data follows link tables */ 1062 link_tbl_ptr->ptr = cpu_to_be32((struct talitos_ptr *) 1063 edesc->dma_link_tbl + 1064 edesc->src_nents + 1065 edesc->dst_nents + 2); 1066 1067 desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP; 1068 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, 1069 edesc->dma_len, DMA_BIDIRECTIONAL); 1070 } 1071 1072 /* iv out */ 1073 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, 0, 1074 DMA_FROM_DEVICE); 1075 1076 ret = talitos_submit(dev, desc, callback, areq); 1077 if (ret != -EINPROGRESS) { 1078 ipsec_esp_unmap(dev, edesc, areq); 1079 kfree(edesc); 1080 } 1081 return ret; 1082 } 1083 1084 /* 1085 * derive number of elements in scatterlist 1086 */ 1087 static int sg_count(struct scatterlist *sg_list, int nbytes, int *chained) 1088 { 1089 struct scatterlist *sg = sg_list; 1090 int sg_nents = 0; 1091 1092 *chained = 0; 1093 while (nbytes > 0) { 1094 sg_nents++; 1095 nbytes -= sg->length; 1096 if (!sg_is_last(sg) && (sg + 1)->length == 0) 1097 *chained = 1; 1098 sg = scatterwalk_sg_next(sg); 1099 } 1100 1101 return sg_nents; 1102 } 1103 1104 /* 1105 * allocate and map the extended descriptor 1106 */ 1107 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, 1108 struct scatterlist *src, 1109 struct scatterlist *dst, 1110 unsigned int cryptlen, 1111 unsigned int authsize, 1112 int icv_stashing, 1113 u32 cryptoflags) 1114 { 1115 struct talitos_edesc *edesc; 1116 int src_nents, dst_nents, alloc_len, dma_len; 1117 int src_chained, dst_chained = 0; 1118 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : 1119 GFP_ATOMIC; 1120 1121 if (cryptlen + authsize > TALITOS_MAX_DATA_LEN) { 1122 dev_err(dev, "length exceeds h/w max limit\n"); 1123 return ERR_PTR(-EINVAL); 1124 } 1125 1126 src_nents = sg_count(src, cryptlen + authsize, &src_chained); 1127 src_nents = (src_nents == 1) ? 0 : src_nents; 1128 1129 if (dst == src) { 1130 dst_nents = src_nents; 1131 } else { 1132 dst_nents = sg_count(dst, cryptlen + authsize, &dst_chained); 1133 dst_nents = (dst_nents == 1) ? 0 : dst_nents; 1134 } 1135 1136 /* 1137 * allocate space for base edesc plus the link tables, 1138 * allowing for two separate entries for ICV and generated ICV (+ 2), 1139 * and the ICV data itself 1140 */ 1141 alloc_len = sizeof(struct talitos_edesc); 1142 if (src_nents || dst_nents) { 1143 dma_len = (src_nents + dst_nents + 2) * 1144 sizeof(struct talitos_ptr) + authsize; 1145 alloc_len += dma_len; 1146 } else { 1147 dma_len = 0; 1148 alloc_len += icv_stashing ? authsize : 0; 1149 } 1150 1151 edesc = kmalloc(alloc_len, GFP_DMA | flags); 1152 if (!edesc) { 1153 dev_err(dev, "could not allocate edescriptor\n"); 1154 return ERR_PTR(-ENOMEM); 1155 } 1156 1157 edesc->src_nents = src_nents; 1158 edesc->dst_nents = dst_nents; 1159 edesc->src_is_chained = src_chained; 1160 edesc->dst_is_chained = dst_chained; 1161 edesc->dma_len = dma_len; 1162 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0], 1163 edesc->dma_len, DMA_BIDIRECTIONAL); 1164 1165 return edesc; 1166 } 1167 1168 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, 1169 int icv_stashing) 1170 { 1171 struct crypto_aead *authenc = crypto_aead_reqtfm(areq); 1172 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 1173 1174 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 1175 areq->cryptlen, ctx->authsize, icv_stashing, 1176 areq->base.flags); 1177 } 1178 1179 static int aead_encrypt(struct aead_request *req) 1180 { 1181 struct crypto_aead *authenc = crypto_aead_reqtfm(req); 1182 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 1183 struct talitos_edesc *edesc; 1184 1185 /* allocate extended descriptor */ 1186 edesc = aead_edesc_alloc(req, 0); 1187 if (IS_ERR(edesc)) 1188 return PTR_ERR(edesc); 1189 1190 /* set encrypt */ 1191 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT; 1192 1193 return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_encrypt_done); 1194 } 1195 1196 static int aead_decrypt(struct aead_request *req) 1197 { 1198 struct crypto_aead *authenc = crypto_aead_reqtfm(req); 1199 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 1200 unsigned int authsize = ctx->authsize; 1201 struct talitos_private *priv = dev_get_drvdata(ctx->dev); 1202 struct talitos_edesc *edesc; 1203 struct scatterlist *sg; 1204 void *icvdata; 1205 1206 req->cryptlen -= authsize; 1207 1208 /* allocate extended descriptor */ 1209 edesc = aead_edesc_alloc(req, 1); 1210 if (IS_ERR(edesc)) 1211 return PTR_ERR(edesc); 1212 1213 if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) && 1214 ((!edesc->src_nents && !edesc->dst_nents) || 1215 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) { 1216 1217 /* decrypt and check the ICV */ 1218 edesc->desc.hdr = ctx->desc_hdr_template | 1219 DESC_HDR_DIR_INBOUND | 1220 DESC_HDR_MODE1_MDEU_CICV; 1221 1222 /* reset integrity check result bits */ 1223 edesc->desc.hdr_lo = 0; 1224 1225 return ipsec_esp(edesc, req, NULL, 0, 1226 ipsec_esp_decrypt_hwauth_done); 1227 1228 } 1229 1230 /* Have to check the ICV with software */ 1231 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND; 1232 1233 /* stash incoming ICV for later cmp with ICV generated by the h/w */ 1234 if (edesc->dma_len) 1235 icvdata = &edesc->link_tbl[edesc->src_nents + 1236 edesc->dst_nents + 2]; 1237 else 1238 icvdata = &edesc->link_tbl[0]; 1239 1240 sg = sg_last(req->src, edesc->src_nents ? : 1); 1241 1242 memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize, 1243 ctx->authsize); 1244 1245 return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_swauth_done); 1246 } 1247 1248 static int aead_givencrypt(struct aead_givcrypt_request *req) 1249 { 1250 struct aead_request *areq = &req->areq; 1251 struct crypto_aead *authenc = crypto_aead_reqtfm(areq); 1252 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 1253 struct talitos_edesc *edesc; 1254 1255 /* allocate extended descriptor */ 1256 edesc = aead_edesc_alloc(areq, 0); 1257 if (IS_ERR(edesc)) 1258 return PTR_ERR(edesc); 1259 1260 /* set encrypt */ 1261 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT; 1262 1263 memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc)); 1264 /* avoid consecutive packets going out with same IV */ 1265 *(__be64 *)req->giv ^= cpu_to_be64(req->seq); 1266 1267 return ipsec_esp(edesc, areq, req->giv, req->seq, 1268 ipsec_esp_encrypt_done); 1269 } 1270 1271 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, 1272 const u8 *key, unsigned int keylen) 1273 { 1274 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); 1275 struct ablkcipher_alg *alg = crypto_ablkcipher_alg(cipher); 1276 1277 if (keylen > TALITOS_MAX_KEY_SIZE) 1278 goto badkey; 1279 1280 if (keylen < alg->min_keysize || keylen > alg->max_keysize) 1281 goto badkey; 1282 1283 memcpy(&ctx->key, key, keylen); 1284 ctx->keylen = keylen; 1285 1286 return 0; 1287 1288 badkey: 1289 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 1290 return -EINVAL; 1291 } 1292 1293 static void common_nonsnoop_unmap(struct device *dev, 1294 struct talitos_edesc *edesc, 1295 struct ablkcipher_request *areq) 1296 { 1297 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); 1298 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE); 1299 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE); 1300 1301 talitos_sg_unmap(dev, edesc, areq->src, areq->dst); 1302 1303 if (edesc->dma_len) 1304 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, 1305 DMA_BIDIRECTIONAL); 1306 } 1307 1308 static void ablkcipher_done(struct device *dev, 1309 struct talitos_desc *desc, void *context, 1310 int err) 1311 { 1312 struct ablkcipher_request *areq = context; 1313 struct talitos_edesc *edesc; 1314 1315 edesc = container_of(desc, struct talitos_edesc, desc); 1316 1317 common_nonsnoop_unmap(dev, edesc, areq); 1318 1319 kfree(edesc); 1320 1321 areq->base.complete(&areq->base, err); 1322 } 1323 1324 static int common_nonsnoop(struct talitos_edesc *edesc, 1325 struct ablkcipher_request *areq, 1326 u8 *giv, 1327 void (*callback) (struct device *dev, 1328 struct talitos_desc *desc, 1329 void *context, int error)) 1330 { 1331 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); 1332 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); 1333 struct device *dev = ctx->dev; 1334 struct talitos_desc *desc = &edesc->desc; 1335 unsigned int cryptlen = areq->nbytes; 1336 unsigned int ivsize; 1337 int sg_count, ret; 1338 1339 /* first DWORD empty */ 1340 desc->ptr[0].len = 0; 1341 desc->ptr[0].ptr = 0; 1342 desc->ptr[0].j_extent = 0; 1343 1344 /* cipher iv */ 1345 ivsize = crypto_ablkcipher_ivsize(cipher); 1346 map_single_talitos_ptr(dev, &desc->ptr[1], ivsize, giv ?: areq->info, 0, 1347 DMA_TO_DEVICE); 1348 1349 /* cipher key */ 1350 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen, 1351 (char *)&ctx->key, 0, DMA_TO_DEVICE); 1352 1353 /* 1354 * cipher in 1355 */ 1356 desc->ptr[3].len = cpu_to_be16(cryptlen); 1357 desc->ptr[3].j_extent = 0; 1358 1359 sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1, 1360 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL 1361 : DMA_TO_DEVICE, 1362 edesc->src_is_chained); 1363 1364 if (sg_count == 1) { 1365 desc->ptr[3].ptr = cpu_to_be32(sg_dma_address(areq->src)); 1366 } else { 1367 sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen, 1368 &edesc->link_tbl[0]); 1369 if (sg_count > 1) { 1370 desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP; 1371 desc->ptr[3].ptr = cpu_to_be32(edesc->dma_link_tbl); 1372 dma_sync_single_for_device(dev, edesc->dma_link_tbl, 1373 edesc->dma_len, 1374 DMA_BIDIRECTIONAL); 1375 } else { 1376 /* Only one segment now, so no link tbl needed */ 1377 desc->ptr[3].ptr = cpu_to_be32(sg_dma_address(areq-> 1378 src)); 1379 } 1380 } 1381 1382 /* cipher out */ 1383 desc->ptr[4].len = cpu_to_be16(cryptlen); 1384 desc->ptr[4].j_extent = 0; 1385 1386 if (areq->src != areq->dst) 1387 sg_count = talitos_map_sg(dev, areq->dst, 1388 edesc->dst_nents ? : 1, 1389 DMA_FROM_DEVICE, 1390 edesc->dst_is_chained); 1391 1392 if (sg_count == 1) { 1393 desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->dst)); 1394 } else { 1395 struct talitos_ptr *link_tbl_ptr = 1396 &edesc->link_tbl[edesc->src_nents + 1]; 1397 1398 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; 1399 desc->ptr[4].ptr = cpu_to_be32((struct talitos_ptr *) 1400 edesc->dma_link_tbl + 1401 edesc->src_nents + 1); 1402 sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, 1403 link_tbl_ptr); 1404 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, 1405 edesc->dma_len, DMA_BIDIRECTIONAL); 1406 } 1407 1408 /* iv out */ 1409 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv, 0, 1410 DMA_FROM_DEVICE); 1411 1412 /* last DWORD empty */ 1413 desc->ptr[6].len = 0; 1414 desc->ptr[6].ptr = 0; 1415 desc->ptr[6].j_extent = 0; 1416 1417 ret = talitos_submit(dev, desc, callback, areq); 1418 if (ret != -EINPROGRESS) { 1419 common_nonsnoop_unmap(dev, edesc, areq); 1420 kfree(edesc); 1421 } 1422 return ret; 1423 } 1424 1425 static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request * 1426 areq) 1427 { 1428 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); 1429 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); 1430 1431 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, areq->nbytes, 1432 0, 0, areq->base.flags); 1433 } 1434 1435 static int ablkcipher_encrypt(struct ablkcipher_request *areq) 1436 { 1437 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); 1438 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); 1439 struct talitos_edesc *edesc; 1440 1441 /* allocate extended descriptor */ 1442 edesc = ablkcipher_edesc_alloc(areq); 1443 if (IS_ERR(edesc)) 1444 return PTR_ERR(edesc); 1445 1446 /* set encrypt */ 1447 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT; 1448 1449 return common_nonsnoop(edesc, areq, NULL, ablkcipher_done); 1450 } 1451 1452 static int ablkcipher_decrypt(struct ablkcipher_request *areq) 1453 { 1454 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); 1455 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); 1456 struct talitos_edesc *edesc; 1457 1458 /* allocate extended descriptor */ 1459 edesc = ablkcipher_edesc_alloc(areq); 1460 if (IS_ERR(edesc)) 1461 return PTR_ERR(edesc); 1462 1463 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND; 1464 1465 return common_nonsnoop(edesc, areq, NULL, ablkcipher_done); 1466 } 1467 1468 struct talitos_alg_template { 1469 struct crypto_alg alg; 1470 __be32 desc_hdr_template; 1471 }; 1472 1473 static struct talitos_alg_template driver_algs[] = { 1474 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */ 1475 { 1476 .alg = { 1477 .cra_name = "authenc(hmac(sha1),cbc(aes))", 1478 .cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos", 1479 .cra_blocksize = AES_BLOCK_SIZE, 1480 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, 1481 .cra_type = &crypto_aead_type, 1482 .cra_aead = { 1483 .setkey = aead_setkey, 1484 .setauthsize = aead_setauthsize, 1485 .encrypt = aead_encrypt, 1486 .decrypt = aead_decrypt, 1487 .givencrypt = aead_givencrypt, 1488 .geniv = "<built-in>", 1489 .ivsize = AES_BLOCK_SIZE, 1490 .maxauthsize = SHA1_DIGEST_SIZE, 1491 } 1492 }, 1493 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 1494 DESC_HDR_SEL0_AESU | 1495 DESC_HDR_MODE0_AESU_CBC | 1496 DESC_HDR_SEL1_MDEUA | 1497 DESC_HDR_MODE1_MDEU_INIT | 1498 DESC_HDR_MODE1_MDEU_PAD | 1499 DESC_HDR_MODE1_MDEU_SHA1_HMAC, 1500 }, 1501 { 1502 .alg = { 1503 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", 1504 .cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos", 1505 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1506 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, 1507 .cra_type = &crypto_aead_type, 1508 .cra_aead = { 1509 .setkey = aead_setkey, 1510 .setauthsize = aead_setauthsize, 1511 .encrypt = aead_encrypt, 1512 .decrypt = aead_decrypt, 1513 .givencrypt = aead_givencrypt, 1514 .geniv = "<built-in>", 1515 .ivsize = DES3_EDE_BLOCK_SIZE, 1516 .maxauthsize = SHA1_DIGEST_SIZE, 1517 } 1518 }, 1519 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 1520 DESC_HDR_SEL0_DEU | 1521 DESC_HDR_MODE0_DEU_CBC | 1522 DESC_HDR_MODE0_DEU_3DES | 1523 DESC_HDR_SEL1_MDEUA | 1524 DESC_HDR_MODE1_MDEU_INIT | 1525 DESC_HDR_MODE1_MDEU_PAD | 1526 DESC_HDR_MODE1_MDEU_SHA1_HMAC, 1527 }, 1528 { 1529 .alg = { 1530 .cra_name = "authenc(hmac(sha256),cbc(aes))", 1531 .cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos", 1532 .cra_blocksize = AES_BLOCK_SIZE, 1533 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, 1534 .cra_type = &crypto_aead_type, 1535 .cra_aead = { 1536 .setkey = aead_setkey, 1537 .setauthsize = aead_setauthsize, 1538 .encrypt = aead_encrypt, 1539 .decrypt = aead_decrypt, 1540 .givencrypt = aead_givencrypt, 1541 .geniv = "<built-in>", 1542 .ivsize = AES_BLOCK_SIZE, 1543 .maxauthsize = SHA256_DIGEST_SIZE, 1544 } 1545 }, 1546 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 1547 DESC_HDR_SEL0_AESU | 1548 DESC_HDR_MODE0_AESU_CBC | 1549 DESC_HDR_SEL1_MDEUA | 1550 DESC_HDR_MODE1_MDEU_INIT | 1551 DESC_HDR_MODE1_MDEU_PAD | 1552 DESC_HDR_MODE1_MDEU_SHA256_HMAC, 1553 }, 1554 { 1555 .alg = { 1556 .cra_name = "authenc(hmac(sha256),cbc(des3_ede))", 1557 .cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos", 1558 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1559 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, 1560 .cra_type = &crypto_aead_type, 1561 .cra_aead = { 1562 .setkey = aead_setkey, 1563 .setauthsize = aead_setauthsize, 1564 .encrypt = aead_encrypt, 1565 .decrypt = aead_decrypt, 1566 .givencrypt = aead_givencrypt, 1567 .geniv = "<built-in>", 1568 .ivsize = DES3_EDE_BLOCK_SIZE, 1569 .maxauthsize = SHA256_DIGEST_SIZE, 1570 } 1571 }, 1572 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 1573 DESC_HDR_SEL0_DEU | 1574 DESC_HDR_MODE0_DEU_CBC | 1575 DESC_HDR_MODE0_DEU_3DES | 1576 DESC_HDR_SEL1_MDEUA | 1577 DESC_HDR_MODE1_MDEU_INIT | 1578 DESC_HDR_MODE1_MDEU_PAD | 1579 DESC_HDR_MODE1_MDEU_SHA256_HMAC, 1580 }, 1581 { 1582 .alg = { 1583 .cra_name = "authenc(hmac(md5),cbc(aes))", 1584 .cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos", 1585 .cra_blocksize = AES_BLOCK_SIZE, 1586 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, 1587 .cra_type = &crypto_aead_type, 1588 .cra_aead = { 1589 .setkey = aead_setkey, 1590 .setauthsize = aead_setauthsize, 1591 .encrypt = aead_encrypt, 1592 .decrypt = aead_decrypt, 1593 .givencrypt = aead_givencrypt, 1594 .geniv = "<built-in>", 1595 .ivsize = AES_BLOCK_SIZE, 1596 .maxauthsize = MD5_DIGEST_SIZE, 1597 } 1598 }, 1599 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 1600 DESC_HDR_SEL0_AESU | 1601 DESC_HDR_MODE0_AESU_CBC | 1602 DESC_HDR_SEL1_MDEUA | 1603 DESC_HDR_MODE1_MDEU_INIT | 1604 DESC_HDR_MODE1_MDEU_PAD | 1605 DESC_HDR_MODE1_MDEU_MD5_HMAC, 1606 }, 1607 { 1608 .alg = { 1609 .cra_name = "authenc(hmac(md5),cbc(des3_ede))", 1610 .cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos", 1611 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1612 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, 1613 .cra_type = &crypto_aead_type, 1614 .cra_aead = { 1615 .setkey = aead_setkey, 1616 .setauthsize = aead_setauthsize, 1617 .encrypt = aead_encrypt, 1618 .decrypt = aead_decrypt, 1619 .givencrypt = aead_givencrypt, 1620 .geniv = "<built-in>", 1621 .ivsize = DES3_EDE_BLOCK_SIZE, 1622 .maxauthsize = MD5_DIGEST_SIZE, 1623 } 1624 }, 1625 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 1626 DESC_HDR_SEL0_DEU | 1627 DESC_HDR_MODE0_DEU_CBC | 1628 DESC_HDR_MODE0_DEU_3DES | 1629 DESC_HDR_SEL1_MDEUA | 1630 DESC_HDR_MODE1_MDEU_INIT | 1631 DESC_HDR_MODE1_MDEU_PAD | 1632 DESC_HDR_MODE1_MDEU_MD5_HMAC, 1633 }, 1634 /* ABLKCIPHER algorithms. */ 1635 { 1636 .alg = { 1637 .cra_name = "cbc(aes)", 1638 .cra_driver_name = "cbc-aes-talitos", 1639 .cra_blocksize = AES_BLOCK_SIZE, 1640 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 1641 CRYPTO_ALG_ASYNC, 1642 .cra_type = &crypto_ablkcipher_type, 1643 .cra_ablkcipher = { 1644 .setkey = ablkcipher_setkey, 1645 .encrypt = ablkcipher_encrypt, 1646 .decrypt = ablkcipher_decrypt, 1647 .geniv = "eseqiv", 1648 .min_keysize = AES_MIN_KEY_SIZE, 1649 .max_keysize = AES_MAX_KEY_SIZE, 1650 .ivsize = AES_BLOCK_SIZE, 1651 } 1652 }, 1653 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 1654 DESC_HDR_SEL0_AESU | 1655 DESC_HDR_MODE0_AESU_CBC, 1656 }, 1657 { 1658 .alg = { 1659 .cra_name = "cbc(des3_ede)", 1660 .cra_driver_name = "cbc-3des-talitos", 1661 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1662 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 1663 CRYPTO_ALG_ASYNC, 1664 .cra_type = &crypto_ablkcipher_type, 1665 .cra_ablkcipher = { 1666 .setkey = ablkcipher_setkey, 1667 .encrypt = ablkcipher_encrypt, 1668 .decrypt = ablkcipher_decrypt, 1669 .geniv = "eseqiv", 1670 .min_keysize = DES3_EDE_KEY_SIZE, 1671 .max_keysize = DES3_EDE_KEY_SIZE, 1672 .ivsize = DES3_EDE_BLOCK_SIZE, 1673 } 1674 }, 1675 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 1676 DESC_HDR_SEL0_DEU | 1677 DESC_HDR_MODE0_DEU_CBC | 1678 DESC_HDR_MODE0_DEU_3DES, 1679 } 1680 }; 1681 1682 struct talitos_crypto_alg { 1683 struct list_head entry; 1684 struct device *dev; 1685 __be32 desc_hdr_template; 1686 struct crypto_alg crypto_alg; 1687 }; 1688 1689 static int talitos_cra_init(struct crypto_tfm *tfm) 1690 { 1691 struct crypto_alg *alg = tfm->__crt_alg; 1692 struct talitos_crypto_alg *talitos_alg; 1693 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); 1694 1695 talitos_alg = container_of(alg, struct talitos_crypto_alg, crypto_alg); 1696 1697 /* update context with ptr to dev */ 1698 ctx->dev = talitos_alg->dev; 1699 1700 /* copy descriptor header template value */ 1701 ctx->desc_hdr_template = talitos_alg->desc_hdr_template; 1702 1703 /* random first IV */ 1704 get_random_bytes(ctx->iv, TALITOS_MAX_IV_LENGTH); 1705 1706 return 0; 1707 } 1708 1709 /* 1710 * given the alg's descriptor header template, determine whether descriptor 1711 * type and primary/secondary execution units required match the hw 1712 * capabilities description provided in the device tree node. 1713 */ 1714 static int hw_supports(struct device *dev, __be32 desc_hdr_template) 1715 { 1716 struct talitos_private *priv = dev_get_drvdata(dev); 1717 int ret; 1718 1719 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) && 1720 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units); 1721 1722 if (SECONDARY_EU(desc_hdr_template)) 1723 ret = ret && (1 << SECONDARY_EU(desc_hdr_template) 1724 & priv->exec_units); 1725 1726 return ret; 1727 } 1728 1729 static int talitos_remove(struct of_device *ofdev) 1730 { 1731 struct device *dev = &ofdev->dev; 1732 struct talitos_private *priv = dev_get_drvdata(dev); 1733 struct talitos_crypto_alg *t_alg, *n; 1734 int i; 1735 1736 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) { 1737 crypto_unregister_alg(&t_alg->crypto_alg); 1738 list_del(&t_alg->entry); 1739 kfree(t_alg); 1740 } 1741 1742 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) 1743 talitos_unregister_rng(dev); 1744 1745 kfree(priv->submit_count); 1746 kfree(priv->tail); 1747 kfree(priv->head); 1748 1749 if (priv->fifo) 1750 for (i = 0; i < priv->num_channels; i++) 1751 kfree(priv->fifo[i]); 1752 1753 kfree(priv->fifo); 1754 kfree(priv->head_lock); 1755 kfree(priv->tail_lock); 1756 1757 if (priv->irq != NO_IRQ) { 1758 free_irq(priv->irq, dev); 1759 irq_dispose_mapping(priv->irq); 1760 } 1761 1762 tasklet_kill(&priv->done_task); 1763 1764 iounmap(priv->reg); 1765 1766 dev_set_drvdata(dev, NULL); 1767 1768 kfree(priv); 1769 1770 return 0; 1771 } 1772 1773 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev, 1774 struct talitos_alg_template 1775 *template) 1776 { 1777 struct talitos_crypto_alg *t_alg; 1778 struct crypto_alg *alg; 1779 1780 t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL); 1781 if (!t_alg) 1782 return ERR_PTR(-ENOMEM); 1783 1784 alg = &t_alg->crypto_alg; 1785 *alg = template->alg; 1786 1787 alg->cra_module = THIS_MODULE; 1788 alg->cra_init = talitos_cra_init; 1789 alg->cra_priority = TALITOS_CRA_PRIORITY; 1790 alg->cra_alignmask = 0; 1791 alg->cra_ctxsize = sizeof(struct talitos_ctx); 1792 1793 t_alg->desc_hdr_template = template->desc_hdr_template; 1794 t_alg->dev = dev; 1795 1796 return t_alg; 1797 } 1798 1799 static int talitos_probe(struct of_device *ofdev, 1800 const struct of_device_id *match) 1801 { 1802 struct device *dev = &ofdev->dev; 1803 struct device_node *np = ofdev->node; 1804 struct talitos_private *priv; 1805 const unsigned int *prop; 1806 int i, err; 1807 1808 priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL); 1809 if (!priv) 1810 return -ENOMEM; 1811 1812 dev_set_drvdata(dev, priv); 1813 1814 priv->ofdev = ofdev; 1815 1816 tasklet_init(&priv->done_task, talitos_done, (unsigned long)dev); 1817 1818 INIT_LIST_HEAD(&priv->alg_list); 1819 1820 priv->irq = irq_of_parse_and_map(np, 0); 1821 1822 if (priv->irq == NO_IRQ) { 1823 dev_err(dev, "failed to map irq\n"); 1824 err = -EINVAL; 1825 goto err_out; 1826 } 1827 1828 /* get the irq line */ 1829 err = request_irq(priv->irq, talitos_interrupt, 0, 1830 dev_driver_string(dev), dev); 1831 if (err) { 1832 dev_err(dev, "failed to request irq %d\n", priv->irq); 1833 irq_dispose_mapping(priv->irq); 1834 priv->irq = NO_IRQ; 1835 goto err_out; 1836 } 1837 1838 priv->reg = of_iomap(np, 0); 1839 if (!priv->reg) { 1840 dev_err(dev, "failed to of_iomap\n"); 1841 err = -ENOMEM; 1842 goto err_out; 1843 } 1844 1845 /* get SEC version capabilities from device tree */ 1846 prop = of_get_property(np, "fsl,num-channels", NULL); 1847 if (prop) 1848 priv->num_channels = *prop; 1849 1850 prop = of_get_property(np, "fsl,channel-fifo-len", NULL); 1851 if (prop) 1852 priv->chfifo_len = *prop; 1853 1854 prop = of_get_property(np, "fsl,exec-units-mask", NULL); 1855 if (prop) 1856 priv->exec_units = *prop; 1857 1858 prop = of_get_property(np, "fsl,descriptor-types-mask", NULL); 1859 if (prop) 1860 priv->desc_types = *prop; 1861 1862 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len || 1863 !priv->exec_units || !priv->desc_types) { 1864 dev_err(dev, "invalid property data in device tree node\n"); 1865 err = -EINVAL; 1866 goto err_out; 1867 } 1868 1869 if (of_device_is_compatible(np, "fsl,sec3.0")) 1870 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT; 1871 1872 if (of_device_is_compatible(np, "fsl,sec2.1")) 1873 priv->features |= TALITOS_FTR_HW_AUTH_CHECK; 1874 1875 priv->head_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels, 1876 GFP_KERNEL); 1877 priv->tail_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels, 1878 GFP_KERNEL); 1879 if (!priv->head_lock || !priv->tail_lock) { 1880 dev_err(dev, "failed to allocate fifo locks\n"); 1881 err = -ENOMEM; 1882 goto err_out; 1883 } 1884 1885 for (i = 0; i < priv->num_channels; i++) { 1886 spin_lock_init(&priv->head_lock[i]); 1887 spin_lock_init(&priv->tail_lock[i]); 1888 } 1889 1890 priv->fifo = kmalloc(sizeof(struct talitos_request *) * 1891 priv->num_channels, GFP_KERNEL); 1892 if (!priv->fifo) { 1893 dev_err(dev, "failed to allocate request fifo\n"); 1894 err = -ENOMEM; 1895 goto err_out; 1896 } 1897 1898 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len); 1899 1900 for (i = 0; i < priv->num_channels; i++) { 1901 priv->fifo[i] = kzalloc(sizeof(struct talitos_request) * 1902 priv->fifo_len, GFP_KERNEL); 1903 if (!priv->fifo[i]) { 1904 dev_err(dev, "failed to allocate request fifo %d\n", i); 1905 err = -ENOMEM; 1906 goto err_out; 1907 } 1908 } 1909 1910 priv->submit_count = kmalloc(sizeof(atomic_t) * priv->num_channels, 1911 GFP_KERNEL); 1912 if (!priv->submit_count) { 1913 dev_err(dev, "failed to allocate fifo submit count space\n"); 1914 err = -ENOMEM; 1915 goto err_out; 1916 } 1917 for (i = 0; i < priv->num_channels; i++) 1918 atomic_set(&priv->submit_count[i], -(priv->chfifo_len - 1)); 1919 1920 priv->head = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL); 1921 priv->tail = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL); 1922 if (!priv->head || !priv->tail) { 1923 dev_err(dev, "failed to allocate request index space\n"); 1924 err = -ENOMEM; 1925 goto err_out; 1926 } 1927 1928 /* reset and initialize the h/w */ 1929 err = init_device(dev); 1930 if (err) { 1931 dev_err(dev, "failed to initialize device\n"); 1932 goto err_out; 1933 } 1934 1935 /* register the RNG, if available */ 1936 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) { 1937 err = talitos_register_rng(dev); 1938 if (err) { 1939 dev_err(dev, "failed to register hwrng: %d\n", err); 1940 goto err_out; 1941 } else 1942 dev_info(dev, "hwrng\n"); 1943 } 1944 1945 /* register crypto algorithms the device supports */ 1946 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { 1947 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) { 1948 struct talitos_crypto_alg *t_alg; 1949 1950 t_alg = talitos_alg_alloc(dev, &driver_algs[i]); 1951 if (IS_ERR(t_alg)) { 1952 err = PTR_ERR(t_alg); 1953 goto err_out; 1954 } 1955 1956 err = crypto_register_alg(&t_alg->crypto_alg); 1957 if (err) { 1958 dev_err(dev, "%s alg registration failed\n", 1959 t_alg->crypto_alg.cra_driver_name); 1960 kfree(t_alg); 1961 } else { 1962 list_add_tail(&t_alg->entry, &priv->alg_list); 1963 dev_info(dev, "%s\n", 1964 t_alg->crypto_alg.cra_driver_name); 1965 } 1966 } 1967 } 1968 1969 return 0; 1970 1971 err_out: 1972 talitos_remove(ofdev); 1973 1974 return err; 1975 } 1976 1977 static struct of_device_id talitos_match[] = { 1978 { 1979 .compatible = "fsl,sec2.0", 1980 }, 1981 {}, 1982 }; 1983 MODULE_DEVICE_TABLE(of, talitos_match); 1984 1985 static struct of_platform_driver talitos_driver = { 1986 .name = "talitos", 1987 .match_table = talitos_match, 1988 .probe = talitos_probe, 1989 .remove = talitos_remove, 1990 }; 1991 1992 static int __init talitos_init(void) 1993 { 1994 return of_register_platform_driver(&talitos_driver); 1995 } 1996 module_init(talitos_init); 1997 1998 static void __exit talitos_exit(void) 1999 { 2000 of_unregister_platform_driver(&talitos_driver); 2001 } 2002 module_exit(talitos_exit); 2003 2004 MODULE_LICENSE("GPL"); 2005 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>"); 2006 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver"); 2007