1*9c4a7965SKim Phillips /* 2*9c4a7965SKim Phillips * talitos - Freescale Integrated Security Engine (SEC) device driver 3*9c4a7965SKim Phillips * 4*9c4a7965SKim Phillips * Copyright (c) 2008 Freescale Semiconductor, Inc. 5*9c4a7965SKim Phillips * 6*9c4a7965SKim Phillips * Scatterlist Crypto API glue code copied from files with the following: 7*9c4a7965SKim Phillips * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au> 8*9c4a7965SKim Phillips * 9*9c4a7965SKim Phillips * Crypto algorithm registration code copied from hifn driver: 10*9c4a7965SKim Phillips * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru> 11*9c4a7965SKim Phillips * All rights reserved. 12*9c4a7965SKim Phillips * 13*9c4a7965SKim Phillips * This program is free software; you can redistribute it and/or modify 14*9c4a7965SKim Phillips * it under the terms of the GNU General Public License as published by 15*9c4a7965SKim Phillips * the Free Software Foundation; either version 2 of the License, or 16*9c4a7965SKim Phillips * (at your option) any later version. 17*9c4a7965SKim Phillips * 18*9c4a7965SKim Phillips * This program is distributed in the hope that it will be useful, 19*9c4a7965SKim Phillips * but WITHOUT ANY WARRANTY; without even the implied warranty of 20*9c4a7965SKim Phillips * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21*9c4a7965SKim Phillips * GNU General Public License for more details. 22*9c4a7965SKim Phillips * 23*9c4a7965SKim Phillips * You should have received a copy of the GNU General Public License 24*9c4a7965SKim Phillips * along with this program; if not, write to the Free Software 25*9c4a7965SKim Phillips * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 26*9c4a7965SKim Phillips */ 27*9c4a7965SKim Phillips 28*9c4a7965SKim Phillips #include <linux/kernel.h> 29*9c4a7965SKim Phillips #include <linux/module.h> 30*9c4a7965SKim Phillips #include <linux/mod_devicetable.h> 31*9c4a7965SKim Phillips #include <linux/device.h> 32*9c4a7965SKim Phillips #include <linux/interrupt.h> 33*9c4a7965SKim Phillips #include <linux/crypto.h> 34*9c4a7965SKim Phillips #include <linux/hw_random.h> 35*9c4a7965SKim Phillips #include <linux/of_platform.h> 36*9c4a7965SKim Phillips #include <linux/dma-mapping.h> 37*9c4a7965SKim Phillips #include <linux/io.h> 38*9c4a7965SKim Phillips #include <linux/spinlock.h> 39*9c4a7965SKim Phillips #include <linux/rtnetlink.h> 40*9c4a7965SKim Phillips 41*9c4a7965SKim Phillips #include <crypto/algapi.h> 42*9c4a7965SKim Phillips #include <crypto/aes.h> 43*9c4a7965SKim Phillips #include <crypto/sha.h> 44*9c4a7965SKim Phillips #include <crypto/aead.h> 45*9c4a7965SKim Phillips #include <crypto/authenc.h> 46*9c4a7965SKim Phillips 47*9c4a7965SKim Phillips #include "talitos.h" 48*9c4a7965SKim Phillips 49*9c4a7965SKim Phillips #define TALITOS_TIMEOUT 100000 50*9c4a7965SKim Phillips #define TALITOS_MAX_DATA_LEN 65535 51*9c4a7965SKim Phillips 52*9c4a7965SKim Phillips #define DESC_TYPE(desc_hdr) ((be32_to_cpu(desc_hdr) >> 3) & 0x1f) 53*9c4a7965SKim Phillips #define PRIMARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 28) & 0xf) 54*9c4a7965SKim Phillips #define SECONDARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 16) & 0xf) 55*9c4a7965SKim Phillips 56*9c4a7965SKim Phillips /* descriptor pointer entry */ 57*9c4a7965SKim Phillips struct talitos_ptr { 58*9c4a7965SKim Phillips __be16 len; /* length */ 59*9c4a7965SKim Phillips u8 j_extent; /* jump to sg link table and/or extent */ 60*9c4a7965SKim Phillips u8 eptr; /* extended address */ 61*9c4a7965SKim Phillips __be32 ptr; /* address */ 62*9c4a7965SKim Phillips }; 63*9c4a7965SKim Phillips 64*9c4a7965SKim Phillips /* descriptor */ 65*9c4a7965SKim Phillips struct talitos_desc { 66*9c4a7965SKim Phillips __be32 hdr; /* header high bits */ 67*9c4a7965SKim Phillips __be32 hdr_lo; /* header low bits */ 68*9c4a7965SKim Phillips struct talitos_ptr ptr[7]; /* ptr/len pair array */ 69*9c4a7965SKim Phillips }; 70*9c4a7965SKim Phillips 71*9c4a7965SKim Phillips /** 72*9c4a7965SKim Phillips * talitos_request - descriptor submission request 73*9c4a7965SKim Phillips * @desc: descriptor pointer (kernel virtual) 74*9c4a7965SKim Phillips * @dma_desc: descriptor's physical bus address 75*9c4a7965SKim Phillips * @callback: whom to call when descriptor processing is done 76*9c4a7965SKim Phillips * @context: caller context (optional) 77*9c4a7965SKim Phillips */ 78*9c4a7965SKim Phillips struct talitos_request { 79*9c4a7965SKim Phillips struct talitos_desc *desc; 80*9c4a7965SKim Phillips dma_addr_t dma_desc; 81*9c4a7965SKim Phillips void (*callback) (struct device *dev, struct talitos_desc *desc, 82*9c4a7965SKim Phillips void *context, int error); 83*9c4a7965SKim Phillips void *context; 84*9c4a7965SKim Phillips }; 85*9c4a7965SKim Phillips 86*9c4a7965SKim Phillips struct talitos_private { 87*9c4a7965SKim Phillips struct device *dev; 88*9c4a7965SKim Phillips struct of_device *ofdev; 89*9c4a7965SKim Phillips void __iomem *reg; 90*9c4a7965SKim Phillips int irq; 91*9c4a7965SKim Phillips 92*9c4a7965SKim Phillips /* SEC version geometry (from device tree node) */ 93*9c4a7965SKim Phillips unsigned int num_channels; 94*9c4a7965SKim Phillips unsigned int chfifo_len; 95*9c4a7965SKim Phillips unsigned int exec_units; 96*9c4a7965SKim Phillips unsigned int desc_types; 97*9c4a7965SKim Phillips 98*9c4a7965SKim Phillips /* next channel to be assigned next incoming descriptor */ 99*9c4a7965SKim Phillips atomic_t last_chan; 100*9c4a7965SKim Phillips 101*9c4a7965SKim Phillips /* per-channel request fifo */ 102*9c4a7965SKim Phillips struct talitos_request **fifo; 103*9c4a7965SKim Phillips 104*9c4a7965SKim Phillips /* 105*9c4a7965SKim Phillips * length of the request fifo 106*9c4a7965SKim Phillips * fifo_len is chfifo_len rounded up to next power of 2 107*9c4a7965SKim Phillips * so we can use bitwise ops to wrap 108*9c4a7965SKim Phillips */ 109*9c4a7965SKim Phillips unsigned int fifo_len; 110*9c4a7965SKim Phillips 111*9c4a7965SKim Phillips /* per-channel index to next free descriptor request */ 112*9c4a7965SKim Phillips int *head; 113*9c4a7965SKim Phillips 114*9c4a7965SKim Phillips /* per-channel index to next in-progress/done descriptor request */ 115*9c4a7965SKim Phillips int *tail; 116*9c4a7965SKim Phillips 117*9c4a7965SKim Phillips /* per-channel request submission (head) and release (tail) locks */ 118*9c4a7965SKim Phillips spinlock_t *head_lock; 119*9c4a7965SKim Phillips spinlock_t *tail_lock; 120*9c4a7965SKim Phillips 121*9c4a7965SKim Phillips /* request callback tasklet */ 122*9c4a7965SKim Phillips struct tasklet_struct done_task; 123*9c4a7965SKim Phillips struct tasklet_struct error_task; 124*9c4a7965SKim Phillips 125*9c4a7965SKim Phillips /* list of registered algorithms */ 126*9c4a7965SKim Phillips struct list_head alg_list; 127*9c4a7965SKim Phillips 128*9c4a7965SKim Phillips /* hwrng device */ 129*9c4a7965SKim Phillips struct hwrng rng; 130*9c4a7965SKim Phillips }; 131*9c4a7965SKim Phillips 132*9c4a7965SKim Phillips /* 133*9c4a7965SKim Phillips * map virtual single (contiguous) pointer to h/w descriptor pointer 134*9c4a7965SKim Phillips */ 135*9c4a7965SKim Phillips static void map_single_talitos_ptr(struct device *dev, 136*9c4a7965SKim Phillips struct talitos_ptr *talitos_ptr, 137*9c4a7965SKim Phillips unsigned short len, void *data, 138*9c4a7965SKim Phillips unsigned char extent, 139*9c4a7965SKim Phillips enum dma_data_direction dir) 140*9c4a7965SKim Phillips { 141*9c4a7965SKim Phillips talitos_ptr->len = cpu_to_be16(len); 142*9c4a7965SKim Phillips talitos_ptr->ptr = cpu_to_be32(dma_map_single(dev, data, len, dir)); 143*9c4a7965SKim Phillips talitos_ptr->j_extent = extent; 144*9c4a7965SKim Phillips } 145*9c4a7965SKim Phillips 146*9c4a7965SKim Phillips /* 147*9c4a7965SKim Phillips * unmap bus single (contiguous) h/w descriptor pointer 148*9c4a7965SKim Phillips */ 149*9c4a7965SKim Phillips static void unmap_single_talitos_ptr(struct device *dev, 150*9c4a7965SKim Phillips struct talitos_ptr *talitos_ptr, 151*9c4a7965SKim Phillips enum dma_data_direction dir) 152*9c4a7965SKim Phillips { 153*9c4a7965SKim Phillips dma_unmap_single(dev, be32_to_cpu(talitos_ptr->ptr), 154*9c4a7965SKim Phillips be16_to_cpu(talitos_ptr->len), dir); 155*9c4a7965SKim Phillips } 156*9c4a7965SKim Phillips 157*9c4a7965SKim Phillips static int reset_channel(struct device *dev, int ch) 158*9c4a7965SKim Phillips { 159*9c4a7965SKim Phillips struct talitos_private *priv = dev_get_drvdata(dev); 160*9c4a7965SKim Phillips unsigned int timeout = TALITOS_TIMEOUT; 161*9c4a7965SKim Phillips 162*9c4a7965SKim Phillips setbits32(priv->reg + TALITOS_CCCR(ch), TALITOS_CCCR_RESET); 163*9c4a7965SKim Phillips 164*9c4a7965SKim Phillips while ((in_be32(priv->reg + TALITOS_CCCR(ch)) & TALITOS_CCCR_RESET) 165*9c4a7965SKim Phillips && --timeout) 166*9c4a7965SKim Phillips cpu_relax(); 167*9c4a7965SKim Phillips 168*9c4a7965SKim Phillips if (timeout == 0) { 169*9c4a7965SKim Phillips dev_err(dev, "failed to reset channel %d\n", ch); 170*9c4a7965SKim Phillips return -EIO; 171*9c4a7965SKim Phillips } 172*9c4a7965SKim Phillips 173*9c4a7965SKim Phillips /* set done writeback and IRQ */ 174*9c4a7965SKim Phillips setbits32(priv->reg + TALITOS_CCCR_LO(ch), TALITOS_CCCR_LO_CDWE | 175*9c4a7965SKim Phillips TALITOS_CCCR_LO_CDIE); 176*9c4a7965SKim Phillips 177*9c4a7965SKim Phillips return 0; 178*9c4a7965SKim Phillips } 179*9c4a7965SKim Phillips 180*9c4a7965SKim Phillips static int reset_device(struct device *dev) 181*9c4a7965SKim Phillips { 182*9c4a7965SKim Phillips struct talitos_private *priv = dev_get_drvdata(dev); 183*9c4a7965SKim Phillips unsigned int timeout = TALITOS_TIMEOUT; 184*9c4a7965SKim Phillips 185*9c4a7965SKim Phillips setbits32(priv->reg + TALITOS_MCR, TALITOS_MCR_SWR); 186*9c4a7965SKim Phillips 187*9c4a7965SKim Phillips while ((in_be32(priv->reg + TALITOS_MCR) & TALITOS_MCR_SWR) 188*9c4a7965SKim Phillips && --timeout) 189*9c4a7965SKim Phillips cpu_relax(); 190*9c4a7965SKim Phillips 191*9c4a7965SKim Phillips if (timeout == 0) { 192*9c4a7965SKim Phillips dev_err(dev, "failed to reset device\n"); 193*9c4a7965SKim Phillips return -EIO; 194*9c4a7965SKim Phillips } 195*9c4a7965SKim Phillips 196*9c4a7965SKim Phillips return 0; 197*9c4a7965SKim Phillips } 198*9c4a7965SKim Phillips 199*9c4a7965SKim Phillips /* 200*9c4a7965SKim Phillips * Reset and initialize the device 201*9c4a7965SKim Phillips */ 202*9c4a7965SKim Phillips static int init_device(struct device *dev) 203*9c4a7965SKim Phillips { 204*9c4a7965SKim Phillips struct talitos_private *priv = dev_get_drvdata(dev); 205*9c4a7965SKim Phillips int ch, err; 206*9c4a7965SKim Phillips 207*9c4a7965SKim Phillips /* 208*9c4a7965SKim Phillips * Master reset 209*9c4a7965SKim Phillips * errata documentation: warning: certain SEC interrupts 210*9c4a7965SKim Phillips * are not fully cleared by writing the MCR:SWR bit, 211*9c4a7965SKim Phillips * set bit twice to completely reset 212*9c4a7965SKim Phillips */ 213*9c4a7965SKim Phillips err = reset_device(dev); 214*9c4a7965SKim Phillips if (err) 215*9c4a7965SKim Phillips return err; 216*9c4a7965SKim Phillips 217*9c4a7965SKim Phillips err = reset_device(dev); 218*9c4a7965SKim Phillips if (err) 219*9c4a7965SKim Phillips return err; 220*9c4a7965SKim Phillips 221*9c4a7965SKim Phillips /* reset channels */ 222*9c4a7965SKim Phillips for (ch = 0; ch < priv->num_channels; ch++) { 223*9c4a7965SKim Phillips err = reset_channel(dev, ch); 224*9c4a7965SKim Phillips if (err) 225*9c4a7965SKim Phillips return err; 226*9c4a7965SKim Phillips } 227*9c4a7965SKim Phillips 228*9c4a7965SKim Phillips /* enable channel done and error interrupts */ 229*9c4a7965SKim Phillips setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT); 230*9c4a7965SKim Phillips setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT); 231*9c4a7965SKim Phillips 232*9c4a7965SKim Phillips return 0; 233*9c4a7965SKim Phillips } 234*9c4a7965SKim Phillips 235*9c4a7965SKim Phillips /** 236*9c4a7965SKim Phillips * talitos_submit - submits a descriptor to the device for processing 237*9c4a7965SKim Phillips * @dev: the SEC device to be used 238*9c4a7965SKim Phillips * @desc: the descriptor to be processed by the device 239*9c4a7965SKim Phillips * @callback: whom to call when processing is complete 240*9c4a7965SKim Phillips * @context: a handle for use by caller (optional) 241*9c4a7965SKim Phillips * 242*9c4a7965SKim Phillips * desc must contain valid dma-mapped (bus physical) address pointers. 243*9c4a7965SKim Phillips * callback must check err and feedback in descriptor header 244*9c4a7965SKim Phillips * for device processing status. 245*9c4a7965SKim Phillips */ 246*9c4a7965SKim Phillips static int talitos_submit(struct device *dev, struct talitos_desc *desc, 247*9c4a7965SKim Phillips void (*callback)(struct device *dev, 248*9c4a7965SKim Phillips struct talitos_desc *desc, 249*9c4a7965SKim Phillips void *context, int error), 250*9c4a7965SKim Phillips void *context) 251*9c4a7965SKim Phillips { 252*9c4a7965SKim Phillips struct talitos_private *priv = dev_get_drvdata(dev); 253*9c4a7965SKim Phillips struct talitos_request *request; 254*9c4a7965SKim Phillips unsigned long flags, ch; 255*9c4a7965SKim Phillips int head; 256*9c4a7965SKim Phillips 257*9c4a7965SKim Phillips /* select done notification */ 258*9c4a7965SKim Phillips desc->hdr |= DESC_HDR_DONE_NOTIFY; 259*9c4a7965SKim Phillips 260*9c4a7965SKim Phillips /* emulate SEC's round-robin channel fifo polling scheme */ 261*9c4a7965SKim Phillips ch = atomic_inc_return(&priv->last_chan) & (priv->num_channels - 1); 262*9c4a7965SKim Phillips 263*9c4a7965SKim Phillips spin_lock_irqsave(&priv->head_lock[ch], flags); 264*9c4a7965SKim Phillips 265*9c4a7965SKim Phillips head = priv->head[ch]; 266*9c4a7965SKim Phillips request = &priv->fifo[ch][head]; 267*9c4a7965SKim Phillips 268*9c4a7965SKim Phillips if (request->desc) { 269*9c4a7965SKim Phillips /* request queue is full */ 270*9c4a7965SKim Phillips spin_unlock_irqrestore(&priv->head_lock[ch], flags); 271*9c4a7965SKim Phillips return -EAGAIN; 272*9c4a7965SKim Phillips } 273*9c4a7965SKim Phillips 274*9c4a7965SKim Phillips /* map descriptor and save caller data */ 275*9c4a7965SKim Phillips request->dma_desc = dma_map_single(dev, desc, sizeof(*desc), 276*9c4a7965SKim Phillips DMA_BIDIRECTIONAL); 277*9c4a7965SKim Phillips request->callback = callback; 278*9c4a7965SKim Phillips request->context = context; 279*9c4a7965SKim Phillips 280*9c4a7965SKim Phillips /* increment fifo head */ 281*9c4a7965SKim Phillips priv->head[ch] = (priv->head[ch] + 1) & (priv->fifo_len - 1); 282*9c4a7965SKim Phillips 283*9c4a7965SKim Phillips smp_wmb(); 284*9c4a7965SKim Phillips request->desc = desc; 285*9c4a7965SKim Phillips 286*9c4a7965SKim Phillips /* GO! */ 287*9c4a7965SKim Phillips wmb(); 288*9c4a7965SKim Phillips out_be32(priv->reg + TALITOS_FF_LO(ch), request->dma_desc); 289*9c4a7965SKim Phillips 290*9c4a7965SKim Phillips spin_unlock_irqrestore(&priv->head_lock[ch], flags); 291*9c4a7965SKim Phillips 292*9c4a7965SKim Phillips return -EINPROGRESS; 293*9c4a7965SKim Phillips } 294*9c4a7965SKim Phillips 295*9c4a7965SKim Phillips /* 296*9c4a7965SKim Phillips * process what was done, notify callback of error if not 297*9c4a7965SKim Phillips */ 298*9c4a7965SKim Phillips static void flush_channel(struct device *dev, int ch, int error, int reset_ch) 299*9c4a7965SKim Phillips { 300*9c4a7965SKim Phillips struct talitos_private *priv = dev_get_drvdata(dev); 301*9c4a7965SKim Phillips struct talitos_request *request, saved_req; 302*9c4a7965SKim Phillips unsigned long flags; 303*9c4a7965SKim Phillips int tail, status; 304*9c4a7965SKim Phillips 305*9c4a7965SKim Phillips spin_lock_irqsave(&priv->tail_lock[ch], flags); 306*9c4a7965SKim Phillips 307*9c4a7965SKim Phillips tail = priv->tail[ch]; 308*9c4a7965SKim Phillips while (priv->fifo[ch][tail].desc) { 309*9c4a7965SKim Phillips request = &priv->fifo[ch][tail]; 310*9c4a7965SKim Phillips 311*9c4a7965SKim Phillips /* descriptors with their done bits set don't get the error */ 312*9c4a7965SKim Phillips rmb(); 313*9c4a7965SKim Phillips if ((request->desc->hdr & DESC_HDR_DONE) == DESC_HDR_DONE) 314*9c4a7965SKim Phillips status = 0; 315*9c4a7965SKim Phillips else 316*9c4a7965SKim Phillips if (!error) 317*9c4a7965SKim Phillips break; 318*9c4a7965SKim Phillips else 319*9c4a7965SKim Phillips status = error; 320*9c4a7965SKim Phillips 321*9c4a7965SKim Phillips dma_unmap_single(dev, request->dma_desc, 322*9c4a7965SKim Phillips sizeof(struct talitos_desc), DMA_BIDIRECTIONAL); 323*9c4a7965SKim Phillips 324*9c4a7965SKim Phillips /* copy entries so we can call callback outside lock */ 325*9c4a7965SKim Phillips saved_req.desc = request->desc; 326*9c4a7965SKim Phillips saved_req.callback = request->callback; 327*9c4a7965SKim Phillips saved_req.context = request->context; 328*9c4a7965SKim Phillips 329*9c4a7965SKim Phillips /* release request entry in fifo */ 330*9c4a7965SKim Phillips smp_wmb(); 331*9c4a7965SKim Phillips request->desc = NULL; 332*9c4a7965SKim Phillips 333*9c4a7965SKim Phillips /* increment fifo tail */ 334*9c4a7965SKim Phillips priv->tail[ch] = (tail + 1) & (priv->fifo_len - 1); 335*9c4a7965SKim Phillips 336*9c4a7965SKim Phillips spin_unlock_irqrestore(&priv->tail_lock[ch], flags); 337*9c4a7965SKim Phillips saved_req.callback(dev, saved_req.desc, saved_req.context, 338*9c4a7965SKim Phillips status); 339*9c4a7965SKim Phillips /* channel may resume processing in single desc error case */ 340*9c4a7965SKim Phillips if (error && !reset_ch && status == error) 341*9c4a7965SKim Phillips return; 342*9c4a7965SKim Phillips spin_lock_irqsave(&priv->tail_lock[ch], flags); 343*9c4a7965SKim Phillips tail = priv->tail[ch]; 344*9c4a7965SKim Phillips } 345*9c4a7965SKim Phillips 346*9c4a7965SKim Phillips spin_unlock_irqrestore(&priv->tail_lock[ch], flags); 347*9c4a7965SKim Phillips } 348*9c4a7965SKim Phillips 349*9c4a7965SKim Phillips /* 350*9c4a7965SKim Phillips * process completed requests for channels that have done status 351*9c4a7965SKim Phillips */ 352*9c4a7965SKim Phillips static void talitos_done(unsigned long data) 353*9c4a7965SKim Phillips { 354*9c4a7965SKim Phillips struct device *dev = (struct device *)data; 355*9c4a7965SKim Phillips struct talitos_private *priv = dev_get_drvdata(dev); 356*9c4a7965SKim Phillips int ch; 357*9c4a7965SKim Phillips 358*9c4a7965SKim Phillips for (ch = 0; ch < priv->num_channels; ch++) 359*9c4a7965SKim Phillips flush_channel(dev, ch, 0, 0); 360*9c4a7965SKim Phillips } 361*9c4a7965SKim Phillips 362*9c4a7965SKim Phillips /* 363*9c4a7965SKim Phillips * locate current (offending) descriptor 364*9c4a7965SKim Phillips */ 365*9c4a7965SKim Phillips static struct talitos_desc *current_desc(struct device *dev, int ch) 366*9c4a7965SKim Phillips { 367*9c4a7965SKim Phillips struct talitos_private *priv = dev_get_drvdata(dev); 368*9c4a7965SKim Phillips int tail = priv->tail[ch]; 369*9c4a7965SKim Phillips dma_addr_t cur_desc; 370*9c4a7965SKim Phillips 371*9c4a7965SKim Phillips cur_desc = in_be32(priv->reg + TALITOS_CDPR_LO(ch)); 372*9c4a7965SKim Phillips 373*9c4a7965SKim Phillips while (priv->fifo[ch][tail].dma_desc != cur_desc) { 374*9c4a7965SKim Phillips tail = (tail + 1) & (priv->fifo_len - 1); 375*9c4a7965SKim Phillips if (tail == priv->tail[ch]) { 376*9c4a7965SKim Phillips dev_err(dev, "couldn't locate current descriptor\n"); 377*9c4a7965SKim Phillips return NULL; 378*9c4a7965SKim Phillips } 379*9c4a7965SKim Phillips } 380*9c4a7965SKim Phillips 381*9c4a7965SKim Phillips return priv->fifo[ch][tail].desc; 382*9c4a7965SKim Phillips } 383*9c4a7965SKim Phillips 384*9c4a7965SKim Phillips /* 385*9c4a7965SKim Phillips * user diagnostics; report root cause of error based on execution unit status 386*9c4a7965SKim Phillips */ 387*9c4a7965SKim Phillips static void report_eu_error(struct device *dev, int ch, struct talitos_desc *desc) 388*9c4a7965SKim Phillips { 389*9c4a7965SKim Phillips struct talitos_private *priv = dev_get_drvdata(dev); 390*9c4a7965SKim Phillips int i; 391*9c4a7965SKim Phillips 392*9c4a7965SKim Phillips switch (desc->hdr & DESC_HDR_SEL0_MASK) { 393*9c4a7965SKim Phillips case DESC_HDR_SEL0_AFEU: 394*9c4a7965SKim Phillips dev_err(dev, "AFEUISR 0x%08x_%08x\n", 395*9c4a7965SKim Phillips in_be32(priv->reg + TALITOS_AFEUISR), 396*9c4a7965SKim Phillips in_be32(priv->reg + TALITOS_AFEUISR_LO)); 397*9c4a7965SKim Phillips break; 398*9c4a7965SKim Phillips case DESC_HDR_SEL0_DEU: 399*9c4a7965SKim Phillips dev_err(dev, "DEUISR 0x%08x_%08x\n", 400*9c4a7965SKim Phillips in_be32(priv->reg + TALITOS_DEUISR), 401*9c4a7965SKim Phillips in_be32(priv->reg + TALITOS_DEUISR_LO)); 402*9c4a7965SKim Phillips break; 403*9c4a7965SKim Phillips case DESC_HDR_SEL0_MDEUA: 404*9c4a7965SKim Phillips case DESC_HDR_SEL0_MDEUB: 405*9c4a7965SKim Phillips dev_err(dev, "MDEUISR 0x%08x_%08x\n", 406*9c4a7965SKim Phillips in_be32(priv->reg + TALITOS_MDEUISR), 407*9c4a7965SKim Phillips in_be32(priv->reg + TALITOS_MDEUISR_LO)); 408*9c4a7965SKim Phillips break; 409*9c4a7965SKim Phillips case DESC_HDR_SEL0_RNG: 410*9c4a7965SKim Phillips dev_err(dev, "RNGUISR 0x%08x_%08x\n", 411*9c4a7965SKim Phillips in_be32(priv->reg + TALITOS_RNGUISR), 412*9c4a7965SKim Phillips in_be32(priv->reg + TALITOS_RNGUISR_LO)); 413*9c4a7965SKim Phillips break; 414*9c4a7965SKim Phillips case DESC_HDR_SEL0_PKEU: 415*9c4a7965SKim Phillips dev_err(dev, "PKEUISR 0x%08x_%08x\n", 416*9c4a7965SKim Phillips in_be32(priv->reg + TALITOS_PKEUISR), 417*9c4a7965SKim Phillips in_be32(priv->reg + TALITOS_PKEUISR_LO)); 418*9c4a7965SKim Phillips break; 419*9c4a7965SKim Phillips case DESC_HDR_SEL0_AESU: 420*9c4a7965SKim Phillips dev_err(dev, "AESUISR 0x%08x_%08x\n", 421*9c4a7965SKim Phillips in_be32(priv->reg + TALITOS_AESUISR), 422*9c4a7965SKim Phillips in_be32(priv->reg + TALITOS_AESUISR_LO)); 423*9c4a7965SKim Phillips break; 424*9c4a7965SKim Phillips case DESC_HDR_SEL0_CRCU: 425*9c4a7965SKim Phillips dev_err(dev, "CRCUISR 0x%08x_%08x\n", 426*9c4a7965SKim Phillips in_be32(priv->reg + TALITOS_CRCUISR), 427*9c4a7965SKim Phillips in_be32(priv->reg + TALITOS_CRCUISR_LO)); 428*9c4a7965SKim Phillips break; 429*9c4a7965SKim Phillips case DESC_HDR_SEL0_KEU: 430*9c4a7965SKim Phillips dev_err(dev, "KEUISR 0x%08x_%08x\n", 431*9c4a7965SKim Phillips in_be32(priv->reg + TALITOS_KEUISR), 432*9c4a7965SKim Phillips in_be32(priv->reg + TALITOS_KEUISR_LO)); 433*9c4a7965SKim Phillips break; 434*9c4a7965SKim Phillips } 435*9c4a7965SKim Phillips 436*9c4a7965SKim Phillips switch (desc->hdr & DESC_HDR_SEL1_MASK) { 437*9c4a7965SKim Phillips case DESC_HDR_SEL1_MDEUA: 438*9c4a7965SKim Phillips case DESC_HDR_SEL1_MDEUB: 439*9c4a7965SKim Phillips dev_err(dev, "MDEUISR 0x%08x_%08x\n", 440*9c4a7965SKim Phillips in_be32(priv->reg + TALITOS_MDEUISR), 441*9c4a7965SKim Phillips in_be32(priv->reg + TALITOS_MDEUISR_LO)); 442*9c4a7965SKim Phillips break; 443*9c4a7965SKim Phillips case DESC_HDR_SEL1_CRCU: 444*9c4a7965SKim Phillips dev_err(dev, "CRCUISR 0x%08x_%08x\n", 445*9c4a7965SKim Phillips in_be32(priv->reg + TALITOS_CRCUISR), 446*9c4a7965SKim Phillips in_be32(priv->reg + TALITOS_CRCUISR_LO)); 447*9c4a7965SKim Phillips break; 448*9c4a7965SKim Phillips } 449*9c4a7965SKim Phillips 450*9c4a7965SKim Phillips for (i = 0; i < 8; i++) 451*9c4a7965SKim Phillips dev_err(dev, "DESCBUF 0x%08x_%08x\n", 452*9c4a7965SKim Phillips in_be32(priv->reg + TALITOS_DESCBUF(ch) + 8*i), 453*9c4a7965SKim Phillips in_be32(priv->reg + TALITOS_DESCBUF_LO(ch) + 8*i)); 454*9c4a7965SKim Phillips } 455*9c4a7965SKim Phillips 456*9c4a7965SKim Phillips /* 457*9c4a7965SKim Phillips * recover from error interrupts 458*9c4a7965SKim Phillips */ 459*9c4a7965SKim Phillips static void talitos_error(unsigned long data) 460*9c4a7965SKim Phillips { 461*9c4a7965SKim Phillips struct device *dev = (struct device *)data; 462*9c4a7965SKim Phillips struct talitos_private *priv = dev_get_drvdata(dev); 463*9c4a7965SKim Phillips unsigned int timeout = TALITOS_TIMEOUT; 464*9c4a7965SKim Phillips int ch, error, reset_dev = 0, reset_ch = 0; 465*9c4a7965SKim Phillips u32 isr, isr_lo, v, v_lo; 466*9c4a7965SKim Phillips 467*9c4a7965SKim Phillips isr = in_be32(priv->reg + TALITOS_ISR); 468*9c4a7965SKim Phillips isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); 469*9c4a7965SKim Phillips 470*9c4a7965SKim Phillips for (ch = 0; ch < priv->num_channels; ch++) { 471*9c4a7965SKim Phillips /* skip channels without errors */ 472*9c4a7965SKim Phillips if (!(isr & (1 << (ch * 2 + 1)))) 473*9c4a7965SKim Phillips continue; 474*9c4a7965SKim Phillips 475*9c4a7965SKim Phillips error = -EINVAL; 476*9c4a7965SKim Phillips 477*9c4a7965SKim Phillips v = in_be32(priv->reg + TALITOS_CCPSR(ch)); 478*9c4a7965SKim Phillips v_lo = in_be32(priv->reg + TALITOS_CCPSR_LO(ch)); 479*9c4a7965SKim Phillips 480*9c4a7965SKim Phillips if (v_lo & TALITOS_CCPSR_LO_DOF) { 481*9c4a7965SKim Phillips dev_err(dev, "double fetch fifo overflow error\n"); 482*9c4a7965SKim Phillips error = -EAGAIN; 483*9c4a7965SKim Phillips reset_ch = 1; 484*9c4a7965SKim Phillips } 485*9c4a7965SKim Phillips if (v_lo & TALITOS_CCPSR_LO_SOF) { 486*9c4a7965SKim Phillips /* h/w dropped descriptor */ 487*9c4a7965SKim Phillips dev_err(dev, "single fetch fifo overflow error\n"); 488*9c4a7965SKim Phillips error = -EAGAIN; 489*9c4a7965SKim Phillips } 490*9c4a7965SKim Phillips if (v_lo & TALITOS_CCPSR_LO_MDTE) 491*9c4a7965SKim Phillips dev_err(dev, "master data transfer error\n"); 492*9c4a7965SKim Phillips if (v_lo & TALITOS_CCPSR_LO_SGDLZ) 493*9c4a7965SKim Phillips dev_err(dev, "s/g data length zero error\n"); 494*9c4a7965SKim Phillips if (v_lo & TALITOS_CCPSR_LO_FPZ) 495*9c4a7965SKim Phillips dev_err(dev, "fetch pointer zero error\n"); 496*9c4a7965SKim Phillips if (v_lo & TALITOS_CCPSR_LO_IDH) 497*9c4a7965SKim Phillips dev_err(dev, "illegal descriptor header error\n"); 498*9c4a7965SKim Phillips if (v_lo & TALITOS_CCPSR_LO_IEU) 499*9c4a7965SKim Phillips dev_err(dev, "invalid execution unit error\n"); 500*9c4a7965SKim Phillips if (v_lo & TALITOS_CCPSR_LO_EU) 501*9c4a7965SKim Phillips report_eu_error(dev, ch, current_desc(dev, ch)); 502*9c4a7965SKim Phillips if (v_lo & TALITOS_CCPSR_LO_GB) 503*9c4a7965SKim Phillips dev_err(dev, "gather boundary error\n"); 504*9c4a7965SKim Phillips if (v_lo & TALITOS_CCPSR_LO_GRL) 505*9c4a7965SKim Phillips dev_err(dev, "gather return/length error\n"); 506*9c4a7965SKim Phillips if (v_lo & TALITOS_CCPSR_LO_SB) 507*9c4a7965SKim Phillips dev_err(dev, "scatter boundary error\n"); 508*9c4a7965SKim Phillips if (v_lo & TALITOS_CCPSR_LO_SRL) 509*9c4a7965SKim Phillips dev_err(dev, "scatter return/length error\n"); 510*9c4a7965SKim Phillips 511*9c4a7965SKim Phillips flush_channel(dev, ch, error, reset_ch); 512*9c4a7965SKim Phillips 513*9c4a7965SKim Phillips if (reset_ch) { 514*9c4a7965SKim Phillips reset_channel(dev, ch); 515*9c4a7965SKim Phillips } else { 516*9c4a7965SKim Phillips setbits32(priv->reg + TALITOS_CCCR(ch), 517*9c4a7965SKim Phillips TALITOS_CCCR_CONT); 518*9c4a7965SKim Phillips setbits32(priv->reg + TALITOS_CCCR_LO(ch), 0); 519*9c4a7965SKim Phillips while ((in_be32(priv->reg + TALITOS_CCCR(ch)) & 520*9c4a7965SKim Phillips TALITOS_CCCR_CONT) && --timeout) 521*9c4a7965SKim Phillips cpu_relax(); 522*9c4a7965SKim Phillips if (timeout == 0) { 523*9c4a7965SKim Phillips dev_err(dev, "failed to restart channel %d\n", 524*9c4a7965SKim Phillips ch); 525*9c4a7965SKim Phillips reset_dev = 1; 526*9c4a7965SKim Phillips } 527*9c4a7965SKim Phillips } 528*9c4a7965SKim Phillips } 529*9c4a7965SKim Phillips if (reset_dev || isr & ~TALITOS_ISR_CHERR || isr_lo) { 530*9c4a7965SKim Phillips dev_err(dev, "done overflow, internal time out, or rngu error: " 531*9c4a7965SKim Phillips "ISR 0x%08x_%08x\n", isr, isr_lo); 532*9c4a7965SKim Phillips 533*9c4a7965SKim Phillips /* purge request queues */ 534*9c4a7965SKim Phillips for (ch = 0; ch < priv->num_channels; ch++) 535*9c4a7965SKim Phillips flush_channel(dev, ch, -EIO, 1); 536*9c4a7965SKim Phillips 537*9c4a7965SKim Phillips /* reset and reinitialize the device */ 538*9c4a7965SKim Phillips init_device(dev); 539*9c4a7965SKim Phillips } 540*9c4a7965SKim Phillips } 541*9c4a7965SKim Phillips 542*9c4a7965SKim Phillips static irqreturn_t talitos_interrupt(int irq, void *data) 543*9c4a7965SKim Phillips { 544*9c4a7965SKim Phillips struct device *dev = data; 545*9c4a7965SKim Phillips struct talitos_private *priv = dev_get_drvdata(dev); 546*9c4a7965SKim Phillips u32 isr, isr_lo; 547*9c4a7965SKim Phillips 548*9c4a7965SKim Phillips isr = in_be32(priv->reg + TALITOS_ISR); 549*9c4a7965SKim Phillips isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); 550*9c4a7965SKim Phillips 551*9c4a7965SKim Phillips /* ack */ 552*9c4a7965SKim Phillips out_be32(priv->reg + TALITOS_ICR, isr); 553*9c4a7965SKim Phillips out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); 554*9c4a7965SKim Phillips 555*9c4a7965SKim Phillips if (unlikely((isr & ~TALITOS_ISR_CHDONE) || isr_lo)) 556*9c4a7965SKim Phillips talitos_error((unsigned long)data); 557*9c4a7965SKim Phillips else 558*9c4a7965SKim Phillips if (likely(isr & TALITOS_ISR_CHDONE)) 559*9c4a7965SKim Phillips tasklet_schedule(&priv->done_task); 560*9c4a7965SKim Phillips 561*9c4a7965SKim Phillips return (isr || isr_lo) ? IRQ_HANDLED : IRQ_NONE; 562*9c4a7965SKim Phillips } 563*9c4a7965SKim Phillips 564*9c4a7965SKim Phillips /* 565*9c4a7965SKim Phillips * hwrng 566*9c4a7965SKim Phillips */ 567*9c4a7965SKim Phillips static int talitos_rng_data_present(struct hwrng *rng, int wait) 568*9c4a7965SKim Phillips { 569*9c4a7965SKim Phillips struct device *dev = (struct device *)rng->priv; 570*9c4a7965SKim Phillips struct talitos_private *priv = dev_get_drvdata(dev); 571*9c4a7965SKim Phillips u32 ofl; 572*9c4a7965SKim Phillips int i; 573*9c4a7965SKim Phillips 574*9c4a7965SKim Phillips for (i = 0; i < 20; i++) { 575*9c4a7965SKim Phillips ofl = in_be32(priv->reg + TALITOS_RNGUSR_LO) & 576*9c4a7965SKim Phillips TALITOS_RNGUSR_LO_OFL; 577*9c4a7965SKim Phillips if (ofl || !wait) 578*9c4a7965SKim Phillips break; 579*9c4a7965SKim Phillips udelay(10); 580*9c4a7965SKim Phillips } 581*9c4a7965SKim Phillips 582*9c4a7965SKim Phillips return !!ofl; 583*9c4a7965SKim Phillips } 584*9c4a7965SKim Phillips 585*9c4a7965SKim Phillips static int talitos_rng_data_read(struct hwrng *rng, u32 *data) 586*9c4a7965SKim Phillips { 587*9c4a7965SKim Phillips struct device *dev = (struct device *)rng->priv; 588*9c4a7965SKim Phillips struct talitos_private *priv = dev_get_drvdata(dev); 589*9c4a7965SKim Phillips 590*9c4a7965SKim Phillips /* rng fifo requires 64-bit accesses */ 591*9c4a7965SKim Phillips *data = in_be32(priv->reg + TALITOS_RNGU_FIFO); 592*9c4a7965SKim Phillips *data = in_be32(priv->reg + TALITOS_RNGU_FIFO_LO); 593*9c4a7965SKim Phillips 594*9c4a7965SKim Phillips return sizeof(u32); 595*9c4a7965SKim Phillips } 596*9c4a7965SKim Phillips 597*9c4a7965SKim Phillips static int talitos_rng_init(struct hwrng *rng) 598*9c4a7965SKim Phillips { 599*9c4a7965SKim Phillips struct device *dev = (struct device *)rng->priv; 600*9c4a7965SKim Phillips struct talitos_private *priv = dev_get_drvdata(dev); 601*9c4a7965SKim Phillips unsigned int timeout = TALITOS_TIMEOUT; 602*9c4a7965SKim Phillips 603*9c4a7965SKim Phillips setbits32(priv->reg + TALITOS_RNGURCR_LO, TALITOS_RNGURCR_LO_SR); 604*9c4a7965SKim Phillips while (!(in_be32(priv->reg + TALITOS_RNGUSR_LO) & TALITOS_RNGUSR_LO_RD) 605*9c4a7965SKim Phillips && --timeout) 606*9c4a7965SKim Phillips cpu_relax(); 607*9c4a7965SKim Phillips if (timeout == 0) { 608*9c4a7965SKim Phillips dev_err(dev, "failed to reset rng hw\n"); 609*9c4a7965SKim Phillips return -ENODEV; 610*9c4a7965SKim Phillips } 611*9c4a7965SKim Phillips 612*9c4a7965SKim Phillips /* start generating */ 613*9c4a7965SKim Phillips setbits32(priv->reg + TALITOS_RNGUDSR_LO, 0); 614*9c4a7965SKim Phillips 615*9c4a7965SKim Phillips return 0; 616*9c4a7965SKim Phillips } 617*9c4a7965SKim Phillips 618*9c4a7965SKim Phillips static int talitos_register_rng(struct device *dev) 619*9c4a7965SKim Phillips { 620*9c4a7965SKim Phillips struct talitos_private *priv = dev_get_drvdata(dev); 621*9c4a7965SKim Phillips 622*9c4a7965SKim Phillips priv->rng.name = dev_driver_string(dev), 623*9c4a7965SKim Phillips priv->rng.init = talitos_rng_init, 624*9c4a7965SKim Phillips priv->rng.data_present = talitos_rng_data_present, 625*9c4a7965SKim Phillips priv->rng.data_read = talitos_rng_data_read, 626*9c4a7965SKim Phillips priv->rng.priv = (unsigned long)dev; 627*9c4a7965SKim Phillips 628*9c4a7965SKim Phillips return hwrng_register(&priv->rng); 629*9c4a7965SKim Phillips } 630*9c4a7965SKim Phillips 631*9c4a7965SKim Phillips static void talitos_unregister_rng(struct device *dev) 632*9c4a7965SKim Phillips { 633*9c4a7965SKim Phillips struct talitos_private *priv = dev_get_drvdata(dev); 634*9c4a7965SKim Phillips 635*9c4a7965SKim Phillips hwrng_unregister(&priv->rng); 636*9c4a7965SKim Phillips } 637*9c4a7965SKim Phillips 638*9c4a7965SKim Phillips /* 639*9c4a7965SKim Phillips * crypto alg 640*9c4a7965SKim Phillips */ 641*9c4a7965SKim Phillips #define TALITOS_CRA_PRIORITY 3000 642*9c4a7965SKim Phillips #define TALITOS_MAX_KEY_SIZE 64 643*9c4a7965SKim Phillips #define TALITOS_MAX_AUTH_SIZE 20 644*9c4a7965SKim Phillips #define TALITOS_AES_MIN_BLOCK_SIZE 16 645*9c4a7965SKim Phillips #define TALITOS_AES_IV_LENGTH 16 646*9c4a7965SKim Phillips 647*9c4a7965SKim Phillips struct talitos_ctx { 648*9c4a7965SKim Phillips struct device *dev; 649*9c4a7965SKim Phillips __be32 desc_hdr_template; 650*9c4a7965SKim Phillips u8 key[TALITOS_MAX_KEY_SIZE]; 651*9c4a7965SKim Phillips u8 iv[TALITOS_AES_IV_LENGTH]; 652*9c4a7965SKim Phillips unsigned int keylen; 653*9c4a7965SKim Phillips unsigned int enckeylen; 654*9c4a7965SKim Phillips unsigned int authkeylen; 655*9c4a7965SKim Phillips unsigned int authsize; 656*9c4a7965SKim Phillips }; 657*9c4a7965SKim Phillips 658*9c4a7965SKim Phillips static int aes_cbc_sha1_hmac_authenc_setauthsize(struct crypto_aead *authenc, 659*9c4a7965SKim Phillips unsigned int authsize) 660*9c4a7965SKim Phillips { 661*9c4a7965SKim Phillips struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 662*9c4a7965SKim Phillips 663*9c4a7965SKim Phillips ctx->authsize = authsize; 664*9c4a7965SKim Phillips 665*9c4a7965SKim Phillips return 0; 666*9c4a7965SKim Phillips } 667*9c4a7965SKim Phillips 668*9c4a7965SKim Phillips static int aes_cbc_sha1_hmac_authenc_setkey(struct crypto_aead *authenc, 669*9c4a7965SKim Phillips const u8 *key, unsigned int keylen) 670*9c4a7965SKim Phillips { 671*9c4a7965SKim Phillips struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 672*9c4a7965SKim Phillips struct rtattr *rta = (void *)key; 673*9c4a7965SKim Phillips struct crypto_authenc_key_param *param; 674*9c4a7965SKim Phillips unsigned int authkeylen; 675*9c4a7965SKim Phillips unsigned int enckeylen; 676*9c4a7965SKim Phillips 677*9c4a7965SKim Phillips if (!RTA_OK(rta, keylen)) 678*9c4a7965SKim Phillips goto badkey; 679*9c4a7965SKim Phillips 680*9c4a7965SKim Phillips if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) 681*9c4a7965SKim Phillips goto badkey; 682*9c4a7965SKim Phillips 683*9c4a7965SKim Phillips if (RTA_PAYLOAD(rta) < sizeof(*param)) 684*9c4a7965SKim Phillips goto badkey; 685*9c4a7965SKim Phillips 686*9c4a7965SKim Phillips param = RTA_DATA(rta); 687*9c4a7965SKim Phillips enckeylen = be32_to_cpu(param->enckeylen); 688*9c4a7965SKim Phillips 689*9c4a7965SKim Phillips key += RTA_ALIGN(rta->rta_len); 690*9c4a7965SKim Phillips keylen -= RTA_ALIGN(rta->rta_len); 691*9c4a7965SKim Phillips 692*9c4a7965SKim Phillips if (keylen < enckeylen) 693*9c4a7965SKim Phillips goto badkey; 694*9c4a7965SKim Phillips 695*9c4a7965SKim Phillips authkeylen = keylen - enckeylen; 696*9c4a7965SKim Phillips 697*9c4a7965SKim Phillips if (keylen > TALITOS_MAX_KEY_SIZE) 698*9c4a7965SKim Phillips goto badkey; 699*9c4a7965SKim Phillips 700*9c4a7965SKim Phillips memcpy(&ctx->key, key, keylen); 701*9c4a7965SKim Phillips 702*9c4a7965SKim Phillips ctx->keylen = keylen; 703*9c4a7965SKim Phillips ctx->enckeylen = enckeylen; 704*9c4a7965SKim Phillips ctx->authkeylen = authkeylen; 705*9c4a7965SKim Phillips 706*9c4a7965SKim Phillips return 0; 707*9c4a7965SKim Phillips 708*9c4a7965SKim Phillips badkey: 709*9c4a7965SKim Phillips crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN); 710*9c4a7965SKim Phillips return -EINVAL; 711*9c4a7965SKim Phillips } 712*9c4a7965SKim Phillips 713*9c4a7965SKim Phillips /* 714*9c4a7965SKim Phillips * ipsec_esp_edesc - s/w-extended ipsec_esp descriptor 715*9c4a7965SKim Phillips * @src_nents: number of segments in input scatterlist 716*9c4a7965SKim Phillips * @dst_nents: number of segments in output scatterlist 717*9c4a7965SKim Phillips * @dma_len: length of dma mapped link_tbl space 718*9c4a7965SKim Phillips * @dma_link_tbl: bus physical address of link_tbl 719*9c4a7965SKim Phillips * @desc: h/w descriptor 720*9c4a7965SKim Phillips * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) 721*9c4a7965SKim Phillips * 722*9c4a7965SKim Phillips * if decrypting (with authcheck), or either one of src_nents or dst_nents 723*9c4a7965SKim Phillips * is greater than 1, an integrity check value is concatenated to the end 724*9c4a7965SKim Phillips * of link_tbl data 725*9c4a7965SKim Phillips */ 726*9c4a7965SKim Phillips struct ipsec_esp_edesc { 727*9c4a7965SKim Phillips int src_nents; 728*9c4a7965SKim Phillips int dst_nents; 729*9c4a7965SKim Phillips int dma_len; 730*9c4a7965SKim Phillips dma_addr_t dma_link_tbl; 731*9c4a7965SKim Phillips struct talitos_desc desc; 732*9c4a7965SKim Phillips struct talitos_ptr link_tbl[0]; 733*9c4a7965SKim Phillips }; 734*9c4a7965SKim Phillips 735*9c4a7965SKim Phillips static void ipsec_esp_unmap(struct device *dev, 736*9c4a7965SKim Phillips struct ipsec_esp_edesc *edesc, 737*9c4a7965SKim Phillips struct aead_request *areq) 738*9c4a7965SKim Phillips { 739*9c4a7965SKim Phillips unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE); 740*9c4a7965SKim Phillips unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE); 741*9c4a7965SKim Phillips unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE); 742*9c4a7965SKim Phillips unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE); 743*9c4a7965SKim Phillips 744*9c4a7965SKim Phillips dma_unmap_sg(dev, areq->assoc, 1, DMA_TO_DEVICE); 745*9c4a7965SKim Phillips 746*9c4a7965SKim Phillips if (areq->src != areq->dst) { 747*9c4a7965SKim Phillips dma_unmap_sg(dev, areq->src, edesc->src_nents ? : 1, 748*9c4a7965SKim Phillips DMA_TO_DEVICE); 749*9c4a7965SKim Phillips dma_unmap_sg(dev, areq->dst, edesc->dst_nents ? : 1, 750*9c4a7965SKim Phillips DMA_FROM_DEVICE); 751*9c4a7965SKim Phillips } else { 752*9c4a7965SKim Phillips dma_unmap_sg(dev, areq->src, edesc->src_nents ? : 1, 753*9c4a7965SKim Phillips DMA_BIDIRECTIONAL); 754*9c4a7965SKim Phillips } 755*9c4a7965SKim Phillips 756*9c4a7965SKim Phillips if (edesc->dma_len) 757*9c4a7965SKim Phillips dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, 758*9c4a7965SKim Phillips DMA_BIDIRECTIONAL); 759*9c4a7965SKim Phillips } 760*9c4a7965SKim Phillips 761*9c4a7965SKim Phillips /* 762*9c4a7965SKim Phillips * ipsec_esp descriptor callbacks 763*9c4a7965SKim Phillips */ 764*9c4a7965SKim Phillips static void ipsec_esp_encrypt_done(struct device *dev, 765*9c4a7965SKim Phillips struct talitos_desc *desc, void *context, 766*9c4a7965SKim Phillips int err) 767*9c4a7965SKim Phillips { 768*9c4a7965SKim Phillips struct aead_request *areq = context; 769*9c4a7965SKim Phillips struct ipsec_esp_edesc *edesc = 770*9c4a7965SKim Phillips container_of(desc, struct ipsec_esp_edesc, desc); 771*9c4a7965SKim Phillips struct crypto_aead *authenc = crypto_aead_reqtfm(areq); 772*9c4a7965SKim Phillips struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 773*9c4a7965SKim Phillips struct scatterlist *sg; 774*9c4a7965SKim Phillips void *icvdata; 775*9c4a7965SKim Phillips 776*9c4a7965SKim Phillips ipsec_esp_unmap(dev, edesc, areq); 777*9c4a7965SKim Phillips 778*9c4a7965SKim Phillips /* copy the generated ICV to dst */ 779*9c4a7965SKim Phillips if (edesc->dma_len) { 780*9c4a7965SKim Phillips icvdata = &edesc->link_tbl[edesc->src_nents + 781*9c4a7965SKim Phillips edesc->dst_nents + 1]; 782*9c4a7965SKim Phillips sg = sg_last(areq->dst, edesc->dst_nents); 783*9c4a7965SKim Phillips memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize, 784*9c4a7965SKim Phillips icvdata, ctx->authsize); 785*9c4a7965SKim Phillips } 786*9c4a7965SKim Phillips 787*9c4a7965SKim Phillips kfree(edesc); 788*9c4a7965SKim Phillips 789*9c4a7965SKim Phillips aead_request_complete(areq, err); 790*9c4a7965SKim Phillips } 791*9c4a7965SKim Phillips 792*9c4a7965SKim Phillips static void ipsec_esp_decrypt_done(struct device *dev, 793*9c4a7965SKim Phillips struct talitos_desc *desc, void *context, 794*9c4a7965SKim Phillips int err) 795*9c4a7965SKim Phillips { 796*9c4a7965SKim Phillips struct aead_request *req = context; 797*9c4a7965SKim Phillips struct ipsec_esp_edesc *edesc = 798*9c4a7965SKim Phillips container_of(desc, struct ipsec_esp_edesc, desc); 799*9c4a7965SKim Phillips struct crypto_aead *authenc = crypto_aead_reqtfm(req); 800*9c4a7965SKim Phillips struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 801*9c4a7965SKim Phillips struct scatterlist *sg; 802*9c4a7965SKim Phillips void *icvdata; 803*9c4a7965SKim Phillips 804*9c4a7965SKim Phillips ipsec_esp_unmap(dev, edesc, req); 805*9c4a7965SKim Phillips 806*9c4a7965SKim Phillips if (!err) { 807*9c4a7965SKim Phillips /* auth check */ 808*9c4a7965SKim Phillips if (edesc->dma_len) 809*9c4a7965SKim Phillips icvdata = &edesc->link_tbl[edesc->src_nents + 810*9c4a7965SKim Phillips edesc->dst_nents + 1]; 811*9c4a7965SKim Phillips else 812*9c4a7965SKim Phillips icvdata = &edesc->link_tbl[0]; 813*9c4a7965SKim Phillips 814*9c4a7965SKim Phillips sg = sg_last(req->dst, edesc->dst_nents ? : 1); 815*9c4a7965SKim Phillips err = memcmp(icvdata, (char *)sg_virt(sg) + sg->length - 816*9c4a7965SKim Phillips ctx->authsize, ctx->authsize) ? -EBADMSG : 0; 817*9c4a7965SKim Phillips } 818*9c4a7965SKim Phillips 819*9c4a7965SKim Phillips kfree(edesc); 820*9c4a7965SKim Phillips 821*9c4a7965SKim Phillips aead_request_complete(req, err); 822*9c4a7965SKim Phillips } 823*9c4a7965SKim Phillips 824*9c4a7965SKim Phillips /* 825*9c4a7965SKim Phillips * convert scatterlist to SEC h/w link table format 826*9c4a7965SKim Phillips * stop at cryptlen bytes 827*9c4a7965SKim Phillips */ 828*9c4a7965SKim Phillips static void sg_to_link_tbl(struct scatterlist *sg, int sg_count, 829*9c4a7965SKim Phillips int cryptlen, struct talitos_ptr *link_tbl_ptr) 830*9c4a7965SKim Phillips { 831*9c4a7965SKim Phillips while (cryptlen > 0) { 832*9c4a7965SKim Phillips link_tbl_ptr->ptr = cpu_to_be32(sg_dma_address(sg)); 833*9c4a7965SKim Phillips link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg)); 834*9c4a7965SKim Phillips link_tbl_ptr->j_extent = 0; 835*9c4a7965SKim Phillips link_tbl_ptr++; 836*9c4a7965SKim Phillips cryptlen -= sg_dma_len(sg); 837*9c4a7965SKim Phillips sg = sg_next(sg); 838*9c4a7965SKim Phillips } 839*9c4a7965SKim Phillips 840*9c4a7965SKim Phillips /* adjust (decrease) last entry's len to cryptlen */ 841*9c4a7965SKim Phillips link_tbl_ptr--; 842*9c4a7965SKim Phillips link_tbl_ptr->len = cpu_to_be16(be16_to_cpu(link_tbl_ptr->len) 843*9c4a7965SKim Phillips + cryptlen); 844*9c4a7965SKim Phillips 845*9c4a7965SKim Phillips /* tag end of link table */ 846*9c4a7965SKim Phillips link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN; 847*9c4a7965SKim Phillips } 848*9c4a7965SKim Phillips 849*9c4a7965SKim Phillips /* 850*9c4a7965SKim Phillips * fill in and submit ipsec_esp descriptor 851*9c4a7965SKim Phillips */ 852*9c4a7965SKim Phillips static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq, 853*9c4a7965SKim Phillips u8 *giv, u64 seq, 854*9c4a7965SKim Phillips void (*callback) (struct device *dev, 855*9c4a7965SKim Phillips struct talitos_desc *desc, 856*9c4a7965SKim Phillips void *context, int error)) 857*9c4a7965SKim Phillips { 858*9c4a7965SKim Phillips struct crypto_aead *aead = crypto_aead_reqtfm(areq); 859*9c4a7965SKim Phillips struct talitos_ctx *ctx = crypto_aead_ctx(aead); 860*9c4a7965SKim Phillips struct device *dev = ctx->dev; 861*9c4a7965SKim Phillips struct talitos_desc *desc = &edesc->desc; 862*9c4a7965SKim Phillips unsigned int cryptlen = areq->cryptlen; 863*9c4a7965SKim Phillips unsigned int authsize = ctx->authsize; 864*9c4a7965SKim Phillips unsigned int ivsize; 865*9c4a7965SKim Phillips int sg_count; 866*9c4a7965SKim Phillips 867*9c4a7965SKim Phillips /* hmac key */ 868*9c4a7965SKim Phillips map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key, 869*9c4a7965SKim Phillips 0, DMA_TO_DEVICE); 870*9c4a7965SKim Phillips /* hmac data */ 871*9c4a7965SKim Phillips map_single_talitos_ptr(dev, &desc->ptr[1], sg_virt(areq->src) - 872*9c4a7965SKim Phillips sg_virt(areq->assoc), sg_virt(areq->assoc), 0, 873*9c4a7965SKim Phillips DMA_TO_DEVICE); 874*9c4a7965SKim Phillips /* cipher iv */ 875*9c4a7965SKim Phillips ivsize = crypto_aead_ivsize(aead); 876*9c4a7965SKim Phillips map_single_talitos_ptr(dev, &desc->ptr[2], ivsize, giv ?: areq->iv, 0, 877*9c4a7965SKim Phillips DMA_TO_DEVICE); 878*9c4a7965SKim Phillips 879*9c4a7965SKim Phillips /* cipher key */ 880*9c4a7965SKim Phillips map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen, 881*9c4a7965SKim Phillips (char *)&ctx->key + ctx->authkeylen, 0, 882*9c4a7965SKim Phillips DMA_TO_DEVICE); 883*9c4a7965SKim Phillips 884*9c4a7965SKim Phillips /* 885*9c4a7965SKim Phillips * cipher in 886*9c4a7965SKim Phillips * map and adjust cipher len to aead request cryptlen. 887*9c4a7965SKim Phillips * extent is bytes of HMAC postpended to ciphertext, 888*9c4a7965SKim Phillips * typically 12 for ipsec 889*9c4a7965SKim Phillips */ 890*9c4a7965SKim Phillips desc->ptr[4].len = cpu_to_be16(cryptlen); 891*9c4a7965SKim Phillips desc->ptr[4].j_extent = authsize; 892*9c4a7965SKim Phillips 893*9c4a7965SKim Phillips if (areq->src == areq->dst) 894*9c4a7965SKim Phillips sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ? : 1, 895*9c4a7965SKim Phillips DMA_BIDIRECTIONAL); 896*9c4a7965SKim Phillips else 897*9c4a7965SKim Phillips sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ? : 1, 898*9c4a7965SKim Phillips DMA_TO_DEVICE); 899*9c4a7965SKim Phillips 900*9c4a7965SKim Phillips if (sg_count == 1) { 901*9c4a7965SKim Phillips desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src)); 902*9c4a7965SKim Phillips } else { 903*9c4a7965SKim Phillips sg_to_link_tbl(areq->src, sg_count, cryptlen, 904*9c4a7965SKim Phillips &edesc->link_tbl[0]); 905*9c4a7965SKim Phillips desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; 906*9c4a7965SKim Phillips desc->ptr[4].ptr = cpu_to_be32(edesc->dma_link_tbl); 907*9c4a7965SKim Phillips dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, 908*9c4a7965SKim Phillips edesc->dma_len, DMA_BIDIRECTIONAL); 909*9c4a7965SKim Phillips } 910*9c4a7965SKim Phillips 911*9c4a7965SKim Phillips /* cipher out */ 912*9c4a7965SKim Phillips desc->ptr[5].len = cpu_to_be16(cryptlen); 913*9c4a7965SKim Phillips desc->ptr[5].j_extent = authsize; 914*9c4a7965SKim Phillips 915*9c4a7965SKim Phillips if (areq->src != areq->dst) { 916*9c4a7965SKim Phillips sg_count = dma_map_sg(dev, areq->dst, edesc->dst_nents ? : 1, 917*9c4a7965SKim Phillips DMA_FROM_DEVICE); 918*9c4a7965SKim Phillips } 919*9c4a7965SKim Phillips 920*9c4a7965SKim Phillips if (sg_count == 1) { 921*9c4a7965SKim Phillips desc->ptr[5].ptr = cpu_to_be32(sg_dma_address(areq->dst)); 922*9c4a7965SKim Phillips } else { 923*9c4a7965SKim Phillips struct talitos_ptr *link_tbl_ptr = 924*9c4a7965SKim Phillips &edesc->link_tbl[edesc->src_nents]; 925*9c4a7965SKim Phillips struct scatterlist *sg; 926*9c4a7965SKim Phillips 927*9c4a7965SKim Phillips desc->ptr[5].ptr = cpu_to_be32((struct talitos_ptr *) 928*9c4a7965SKim Phillips edesc->dma_link_tbl + 929*9c4a7965SKim Phillips edesc->src_nents); 930*9c4a7965SKim Phillips if (areq->src == areq->dst) { 931*9c4a7965SKim Phillips memcpy(link_tbl_ptr, &edesc->link_tbl[0], 932*9c4a7965SKim Phillips edesc->src_nents * sizeof(struct talitos_ptr)); 933*9c4a7965SKim Phillips } else { 934*9c4a7965SKim Phillips sg_to_link_tbl(areq->dst, sg_count, cryptlen, 935*9c4a7965SKim Phillips link_tbl_ptr); 936*9c4a7965SKim Phillips } 937*9c4a7965SKim Phillips link_tbl_ptr += sg_count - 1; 938*9c4a7965SKim Phillips 939*9c4a7965SKim Phillips /* handle case where sg_last contains the ICV exclusively */ 940*9c4a7965SKim Phillips sg = sg_last(areq->dst, edesc->dst_nents); 941*9c4a7965SKim Phillips if (sg->length == ctx->authsize) 942*9c4a7965SKim Phillips link_tbl_ptr--; 943*9c4a7965SKim Phillips 944*9c4a7965SKim Phillips link_tbl_ptr->j_extent = 0; 945*9c4a7965SKim Phillips link_tbl_ptr++; 946*9c4a7965SKim Phillips link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN; 947*9c4a7965SKim Phillips link_tbl_ptr->len = cpu_to_be16(authsize); 948*9c4a7965SKim Phillips 949*9c4a7965SKim Phillips /* icv data follows link tables */ 950*9c4a7965SKim Phillips link_tbl_ptr->ptr = cpu_to_be32((struct talitos_ptr *) 951*9c4a7965SKim Phillips edesc->dma_link_tbl + 952*9c4a7965SKim Phillips edesc->src_nents + 953*9c4a7965SKim Phillips edesc->dst_nents + 1); 954*9c4a7965SKim Phillips 955*9c4a7965SKim Phillips desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP; 956*9c4a7965SKim Phillips dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, 957*9c4a7965SKim Phillips edesc->dma_len, DMA_BIDIRECTIONAL); 958*9c4a7965SKim Phillips } 959*9c4a7965SKim Phillips 960*9c4a7965SKim Phillips /* iv out */ 961*9c4a7965SKim Phillips map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, 0, 962*9c4a7965SKim Phillips DMA_FROM_DEVICE); 963*9c4a7965SKim Phillips 964*9c4a7965SKim Phillips return talitos_submit(dev, desc, callback, areq); 965*9c4a7965SKim Phillips } 966*9c4a7965SKim Phillips 967*9c4a7965SKim Phillips 968*9c4a7965SKim Phillips /* 969*9c4a7965SKim Phillips * derive number of elements in scatterlist 970*9c4a7965SKim Phillips */ 971*9c4a7965SKim Phillips static int sg_count(struct scatterlist *sg_list, int nbytes) 972*9c4a7965SKim Phillips { 973*9c4a7965SKim Phillips struct scatterlist *sg = sg_list; 974*9c4a7965SKim Phillips int sg_nents = 0; 975*9c4a7965SKim Phillips 976*9c4a7965SKim Phillips while (nbytes) { 977*9c4a7965SKim Phillips sg_nents++; 978*9c4a7965SKim Phillips nbytes -= sg->length; 979*9c4a7965SKim Phillips sg = sg_next(sg); 980*9c4a7965SKim Phillips } 981*9c4a7965SKim Phillips 982*9c4a7965SKim Phillips return sg_nents; 983*9c4a7965SKim Phillips } 984*9c4a7965SKim Phillips 985*9c4a7965SKim Phillips /* 986*9c4a7965SKim Phillips * allocate and map the ipsec_esp extended descriptor 987*9c4a7965SKim Phillips */ 988*9c4a7965SKim Phillips static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq, 989*9c4a7965SKim Phillips int icv_stashing) 990*9c4a7965SKim Phillips { 991*9c4a7965SKim Phillips struct crypto_aead *authenc = crypto_aead_reqtfm(areq); 992*9c4a7965SKim Phillips struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 993*9c4a7965SKim Phillips struct ipsec_esp_edesc *edesc; 994*9c4a7965SKim Phillips int src_nents, dst_nents, alloc_len, dma_len; 995*9c4a7965SKim Phillips 996*9c4a7965SKim Phillips if (areq->cryptlen + ctx->authsize > TALITOS_MAX_DATA_LEN) { 997*9c4a7965SKim Phillips dev_err(ctx->dev, "cryptlen exceeds h/w max limit\n"); 998*9c4a7965SKim Phillips return ERR_PTR(-EINVAL); 999*9c4a7965SKim Phillips } 1000*9c4a7965SKim Phillips 1001*9c4a7965SKim Phillips src_nents = sg_count(areq->src, areq->cryptlen + ctx->authsize); 1002*9c4a7965SKim Phillips src_nents = (src_nents == 1) ? 0 : src_nents; 1003*9c4a7965SKim Phillips 1004*9c4a7965SKim Phillips if (areq->dst == areq->src) { 1005*9c4a7965SKim Phillips dst_nents = src_nents; 1006*9c4a7965SKim Phillips } else { 1007*9c4a7965SKim Phillips dst_nents = sg_count(areq->dst, areq->cryptlen + ctx->authsize); 1008*9c4a7965SKim Phillips dst_nents = (dst_nents == 1) ? 0 : src_nents; 1009*9c4a7965SKim Phillips } 1010*9c4a7965SKim Phillips 1011*9c4a7965SKim Phillips /* 1012*9c4a7965SKim Phillips * allocate space for base edesc plus the link tables, 1013*9c4a7965SKim Phillips * allowing for a separate entry for the generated ICV (+ 1), 1014*9c4a7965SKim Phillips * and the ICV data itself 1015*9c4a7965SKim Phillips */ 1016*9c4a7965SKim Phillips alloc_len = sizeof(struct ipsec_esp_edesc); 1017*9c4a7965SKim Phillips if (src_nents || dst_nents) { 1018*9c4a7965SKim Phillips dma_len = (src_nents + dst_nents + 1) * 1019*9c4a7965SKim Phillips sizeof(struct talitos_ptr) + ctx->authsize; 1020*9c4a7965SKim Phillips alloc_len += dma_len; 1021*9c4a7965SKim Phillips } else { 1022*9c4a7965SKim Phillips dma_len = 0; 1023*9c4a7965SKim Phillips alloc_len += icv_stashing ? ctx->authsize : 0; 1024*9c4a7965SKim Phillips } 1025*9c4a7965SKim Phillips 1026*9c4a7965SKim Phillips edesc = kmalloc(alloc_len, GFP_DMA); 1027*9c4a7965SKim Phillips if (!edesc) { 1028*9c4a7965SKim Phillips dev_err(ctx->dev, "could not allocate edescriptor\n"); 1029*9c4a7965SKim Phillips return ERR_PTR(-ENOMEM); 1030*9c4a7965SKim Phillips } 1031*9c4a7965SKim Phillips 1032*9c4a7965SKim Phillips edesc->src_nents = src_nents; 1033*9c4a7965SKim Phillips edesc->dst_nents = dst_nents; 1034*9c4a7965SKim Phillips edesc->dma_len = dma_len; 1035*9c4a7965SKim Phillips edesc->dma_link_tbl = dma_map_single(ctx->dev, &edesc->link_tbl[0], 1036*9c4a7965SKim Phillips edesc->dma_len, DMA_BIDIRECTIONAL); 1037*9c4a7965SKim Phillips 1038*9c4a7965SKim Phillips return edesc; 1039*9c4a7965SKim Phillips } 1040*9c4a7965SKim Phillips 1041*9c4a7965SKim Phillips static int aes_cbc_sha1_hmac_authenc_encrypt(struct aead_request *req) 1042*9c4a7965SKim Phillips { 1043*9c4a7965SKim Phillips struct crypto_aead *authenc = crypto_aead_reqtfm(req); 1044*9c4a7965SKim Phillips struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 1045*9c4a7965SKim Phillips struct ipsec_esp_edesc *edesc; 1046*9c4a7965SKim Phillips 1047*9c4a7965SKim Phillips /* allocate extended descriptor */ 1048*9c4a7965SKim Phillips edesc = ipsec_esp_edesc_alloc(req, 0); 1049*9c4a7965SKim Phillips if (IS_ERR(edesc)) 1050*9c4a7965SKim Phillips return PTR_ERR(edesc); 1051*9c4a7965SKim Phillips 1052*9c4a7965SKim Phillips /* set encrypt */ 1053*9c4a7965SKim Phillips edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_AESU_ENC; 1054*9c4a7965SKim Phillips 1055*9c4a7965SKim Phillips return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_encrypt_done); 1056*9c4a7965SKim Phillips } 1057*9c4a7965SKim Phillips 1058*9c4a7965SKim Phillips static int aes_cbc_sha1_hmac_authenc_decrypt(struct aead_request *req) 1059*9c4a7965SKim Phillips { 1060*9c4a7965SKim Phillips struct crypto_aead *authenc = crypto_aead_reqtfm(req); 1061*9c4a7965SKim Phillips struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 1062*9c4a7965SKim Phillips unsigned int authsize = ctx->authsize; 1063*9c4a7965SKim Phillips struct ipsec_esp_edesc *edesc; 1064*9c4a7965SKim Phillips struct scatterlist *sg; 1065*9c4a7965SKim Phillips void *icvdata; 1066*9c4a7965SKim Phillips 1067*9c4a7965SKim Phillips req->cryptlen -= authsize; 1068*9c4a7965SKim Phillips 1069*9c4a7965SKim Phillips /* allocate extended descriptor */ 1070*9c4a7965SKim Phillips edesc = ipsec_esp_edesc_alloc(req, 1); 1071*9c4a7965SKim Phillips if (IS_ERR(edesc)) 1072*9c4a7965SKim Phillips return PTR_ERR(edesc); 1073*9c4a7965SKim Phillips 1074*9c4a7965SKim Phillips /* stash incoming ICV for later cmp with ICV generated by the h/w */ 1075*9c4a7965SKim Phillips if (edesc->dma_len) 1076*9c4a7965SKim Phillips icvdata = &edesc->link_tbl[edesc->src_nents + 1077*9c4a7965SKim Phillips edesc->dst_nents + 1]; 1078*9c4a7965SKim Phillips else 1079*9c4a7965SKim Phillips icvdata = &edesc->link_tbl[0]; 1080*9c4a7965SKim Phillips 1081*9c4a7965SKim Phillips sg = sg_last(req->src, edesc->src_nents ? : 1); 1082*9c4a7965SKim Phillips 1083*9c4a7965SKim Phillips memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize, 1084*9c4a7965SKim Phillips ctx->authsize); 1085*9c4a7965SKim Phillips 1086*9c4a7965SKim Phillips /* decrypt */ 1087*9c4a7965SKim Phillips edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND; 1088*9c4a7965SKim Phillips 1089*9c4a7965SKim Phillips return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_done); 1090*9c4a7965SKim Phillips } 1091*9c4a7965SKim Phillips 1092*9c4a7965SKim Phillips static int aes_cbc_sha1_hmac_authenc_givencrypt( 1093*9c4a7965SKim Phillips struct aead_givcrypt_request *req) 1094*9c4a7965SKim Phillips { 1095*9c4a7965SKim Phillips struct aead_request *areq = &req->areq; 1096*9c4a7965SKim Phillips struct crypto_aead *authenc = crypto_aead_reqtfm(areq); 1097*9c4a7965SKim Phillips struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 1098*9c4a7965SKim Phillips struct ipsec_esp_edesc *edesc; 1099*9c4a7965SKim Phillips 1100*9c4a7965SKim Phillips /* allocate extended descriptor */ 1101*9c4a7965SKim Phillips edesc = ipsec_esp_edesc_alloc(areq, 0); 1102*9c4a7965SKim Phillips if (IS_ERR(edesc)) 1103*9c4a7965SKim Phillips return PTR_ERR(edesc); 1104*9c4a7965SKim Phillips 1105*9c4a7965SKim Phillips /* set encrypt */ 1106*9c4a7965SKim Phillips edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_AESU_ENC; 1107*9c4a7965SKim Phillips 1108*9c4a7965SKim Phillips memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc)); 1109*9c4a7965SKim Phillips 1110*9c4a7965SKim Phillips return ipsec_esp(edesc, areq, req->giv, req->seq, 1111*9c4a7965SKim Phillips ipsec_esp_encrypt_done); 1112*9c4a7965SKim Phillips } 1113*9c4a7965SKim Phillips 1114*9c4a7965SKim Phillips struct talitos_alg_template { 1115*9c4a7965SKim Phillips char name[CRYPTO_MAX_ALG_NAME]; 1116*9c4a7965SKim Phillips char driver_name[CRYPTO_MAX_ALG_NAME]; 1117*9c4a7965SKim Phillips unsigned int blocksize; 1118*9c4a7965SKim Phillips struct aead_alg aead; 1119*9c4a7965SKim Phillips struct device *dev; 1120*9c4a7965SKim Phillips __be32 desc_hdr_template; 1121*9c4a7965SKim Phillips }; 1122*9c4a7965SKim Phillips 1123*9c4a7965SKim Phillips static struct talitos_alg_template driver_algs[] = { 1124*9c4a7965SKim Phillips /* single-pass ipsec_esp descriptor */ 1125*9c4a7965SKim Phillips { 1126*9c4a7965SKim Phillips .name = "authenc(hmac(sha1),cbc(aes))", 1127*9c4a7965SKim Phillips .driver_name = "authenc(hmac(sha1-talitos),cbc(aes-talitos))", 1128*9c4a7965SKim Phillips .blocksize = TALITOS_AES_MIN_BLOCK_SIZE, 1129*9c4a7965SKim Phillips .aead = { 1130*9c4a7965SKim Phillips .setkey = aes_cbc_sha1_hmac_authenc_setkey, 1131*9c4a7965SKim Phillips .setauthsize = aes_cbc_sha1_hmac_authenc_setauthsize, 1132*9c4a7965SKim Phillips .encrypt = aes_cbc_sha1_hmac_authenc_encrypt, 1133*9c4a7965SKim Phillips .decrypt = aes_cbc_sha1_hmac_authenc_decrypt, 1134*9c4a7965SKim Phillips .givencrypt = aes_cbc_sha1_hmac_authenc_givencrypt, 1135*9c4a7965SKim Phillips .geniv = "<built-in>", 1136*9c4a7965SKim Phillips .ivsize = TALITOS_AES_IV_LENGTH, 1137*9c4a7965SKim Phillips .maxauthsize = TALITOS_MAX_AUTH_SIZE, 1138*9c4a7965SKim Phillips }, 1139*9c4a7965SKim Phillips .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 1140*9c4a7965SKim Phillips DESC_HDR_SEL0_AESU | 1141*9c4a7965SKim Phillips DESC_HDR_MODE0_AESU_CBC | 1142*9c4a7965SKim Phillips DESC_HDR_SEL1_MDEUA | 1143*9c4a7965SKim Phillips DESC_HDR_MODE1_MDEU_INIT | 1144*9c4a7965SKim Phillips DESC_HDR_MODE1_MDEU_PAD | 1145*9c4a7965SKim Phillips DESC_HDR_MODE1_MDEU_SHA1_HMAC, 1146*9c4a7965SKim Phillips } 1147*9c4a7965SKim Phillips }; 1148*9c4a7965SKim Phillips 1149*9c4a7965SKim Phillips struct talitos_crypto_alg { 1150*9c4a7965SKim Phillips struct list_head entry; 1151*9c4a7965SKim Phillips struct device *dev; 1152*9c4a7965SKim Phillips __be32 desc_hdr_template; 1153*9c4a7965SKim Phillips struct crypto_alg crypto_alg; 1154*9c4a7965SKim Phillips }; 1155*9c4a7965SKim Phillips 1156*9c4a7965SKim Phillips static int talitos_cra_init(struct crypto_tfm *tfm) 1157*9c4a7965SKim Phillips { 1158*9c4a7965SKim Phillips struct crypto_alg *alg = tfm->__crt_alg; 1159*9c4a7965SKim Phillips struct talitos_crypto_alg *talitos_alg = 1160*9c4a7965SKim Phillips container_of(alg, struct talitos_crypto_alg, crypto_alg); 1161*9c4a7965SKim Phillips struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); 1162*9c4a7965SKim Phillips 1163*9c4a7965SKim Phillips /* update context with ptr to dev */ 1164*9c4a7965SKim Phillips ctx->dev = talitos_alg->dev; 1165*9c4a7965SKim Phillips /* copy descriptor header template value */ 1166*9c4a7965SKim Phillips ctx->desc_hdr_template = talitos_alg->desc_hdr_template; 1167*9c4a7965SKim Phillips 1168*9c4a7965SKim Phillips /* random first IV */ 1169*9c4a7965SKim Phillips get_random_bytes(ctx->iv, TALITOS_AES_IV_LENGTH); 1170*9c4a7965SKim Phillips 1171*9c4a7965SKim Phillips return 0; 1172*9c4a7965SKim Phillips } 1173*9c4a7965SKim Phillips 1174*9c4a7965SKim Phillips /* 1175*9c4a7965SKim Phillips * given the alg's descriptor header template, determine whether descriptor 1176*9c4a7965SKim Phillips * type and primary/secondary execution units required match the hw 1177*9c4a7965SKim Phillips * capabilities description provided in the device tree node. 1178*9c4a7965SKim Phillips */ 1179*9c4a7965SKim Phillips static int hw_supports(struct device *dev, __be32 desc_hdr_template) 1180*9c4a7965SKim Phillips { 1181*9c4a7965SKim Phillips struct talitos_private *priv = dev_get_drvdata(dev); 1182*9c4a7965SKim Phillips int ret; 1183*9c4a7965SKim Phillips 1184*9c4a7965SKim Phillips ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) && 1185*9c4a7965SKim Phillips (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units); 1186*9c4a7965SKim Phillips 1187*9c4a7965SKim Phillips if (SECONDARY_EU(desc_hdr_template)) 1188*9c4a7965SKim Phillips ret = ret && (1 << SECONDARY_EU(desc_hdr_template) 1189*9c4a7965SKim Phillips & priv->exec_units); 1190*9c4a7965SKim Phillips 1191*9c4a7965SKim Phillips return ret; 1192*9c4a7965SKim Phillips } 1193*9c4a7965SKim Phillips 1194*9c4a7965SKim Phillips static int __devexit talitos_remove(struct of_device *ofdev) 1195*9c4a7965SKim Phillips { 1196*9c4a7965SKim Phillips struct device *dev = &ofdev->dev; 1197*9c4a7965SKim Phillips struct talitos_private *priv = dev_get_drvdata(dev); 1198*9c4a7965SKim Phillips struct talitos_crypto_alg *t_alg, *n; 1199*9c4a7965SKim Phillips int i; 1200*9c4a7965SKim Phillips 1201*9c4a7965SKim Phillips list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) { 1202*9c4a7965SKim Phillips crypto_unregister_alg(&t_alg->crypto_alg); 1203*9c4a7965SKim Phillips list_del(&t_alg->entry); 1204*9c4a7965SKim Phillips kfree(t_alg); 1205*9c4a7965SKim Phillips } 1206*9c4a7965SKim Phillips 1207*9c4a7965SKim Phillips if (hw_supports(dev, DESC_HDR_SEL0_RNG)) 1208*9c4a7965SKim Phillips talitos_unregister_rng(dev); 1209*9c4a7965SKim Phillips 1210*9c4a7965SKim Phillips kfree(priv->tail); 1211*9c4a7965SKim Phillips kfree(priv->head); 1212*9c4a7965SKim Phillips 1213*9c4a7965SKim Phillips if (priv->fifo) 1214*9c4a7965SKim Phillips for (i = 0; i < priv->num_channels; i++) 1215*9c4a7965SKim Phillips kfree(priv->fifo[i]); 1216*9c4a7965SKim Phillips 1217*9c4a7965SKim Phillips kfree(priv->fifo); 1218*9c4a7965SKim Phillips kfree(priv->head_lock); 1219*9c4a7965SKim Phillips kfree(priv->tail_lock); 1220*9c4a7965SKim Phillips 1221*9c4a7965SKim Phillips if (priv->irq != NO_IRQ) { 1222*9c4a7965SKim Phillips free_irq(priv->irq, dev); 1223*9c4a7965SKim Phillips irq_dispose_mapping(priv->irq); 1224*9c4a7965SKim Phillips } 1225*9c4a7965SKim Phillips 1226*9c4a7965SKim Phillips tasklet_kill(&priv->done_task); 1227*9c4a7965SKim Phillips tasklet_kill(&priv->error_task); 1228*9c4a7965SKim Phillips 1229*9c4a7965SKim Phillips iounmap(priv->reg); 1230*9c4a7965SKim Phillips 1231*9c4a7965SKim Phillips dev_set_drvdata(dev, NULL); 1232*9c4a7965SKim Phillips 1233*9c4a7965SKim Phillips kfree(priv); 1234*9c4a7965SKim Phillips 1235*9c4a7965SKim Phillips return 0; 1236*9c4a7965SKim Phillips } 1237*9c4a7965SKim Phillips 1238*9c4a7965SKim Phillips static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev, 1239*9c4a7965SKim Phillips struct talitos_alg_template 1240*9c4a7965SKim Phillips *template) 1241*9c4a7965SKim Phillips { 1242*9c4a7965SKim Phillips struct talitos_crypto_alg *t_alg; 1243*9c4a7965SKim Phillips struct crypto_alg *alg; 1244*9c4a7965SKim Phillips 1245*9c4a7965SKim Phillips t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL); 1246*9c4a7965SKim Phillips if (!t_alg) 1247*9c4a7965SKim Phillips return ERR_PTR(-ENOMEM); 1248*9c4a7965SKim Phillips 1249*9c4a7965SKim Phillips alg = &t_alg->crypto_alg; 1250*9c4a7965SKim Phillips 1251*9c4a7965SKim Phillips snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name); 1252*9c4a7965SKim Phillips snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 1253*9c4a7965SKim Phillips template->driver_name); 1254*9c4a7965SKim Phillips alg->cra_module = THIS_MODULE; 1255*9c4a7965SKim Phillips alg->cra_init = talitos_cra_init; 1256*9c4a7965SKim Phillips alg->cra_priority = TALITOS_CRA_PRIORITY; 1257*9c4a7965SKim Phillips alg->cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC; 1258*9c4a7965SKim Phillips alg->cra_blocksize = template->blocksize; 1259*9c4a7965SKim Phillips alg->cra_alignmask = 0; 1260*9c4a7965SKim Phillips alg->cra_type = &crypto_aead_type; 1261*9c4a7965SKim Phillips alg->cra_ctxsize = sizeof(struct talitos_ctx); 1262*9c4a7965SKim Phillips alg->cra_u.aead = template->aead; 1263*9c4a7965SKim Phillips 1264*9c4a7965SKim Phillips t_alg->desc_hdr_template = template->desc_hdr_template; 1265*9c4a7965SKim Phillips t_alg->dev = dev; 1266*9c4a7965SKim Phillips 1267*9c4a7965SKim Phillips return t_alg; 1268*9c4a7965SKim Phillips } 1269*9c4a7965SKim Phillips 1270*9c4a7965SKim Phillips static int talitos_probe(struct of_device *ofdev, 1271*9c4a7965SKim Phillips const struct of_device_id *match) 1272*9c4a7965SKim Phillips { 1273*9c4a7965SKim Phillips struct device *dev = &ofdev->dev; 1274*9c4a7965SKim Phillips struct device_node *np = ofdev->node; 1275*9c4a7965SKim Phillips struct talitos_private *priv; 1276*9c4a7965SKim Phillips const unsigned int *prop; 1277*9c4a7965SKim Phillips int i, err; 1278*9c4a7965SKim Phillips 1279*9c4a7965SKim Phillips priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL); 1280*9c4a7965SKim Phillips if (!priv) 1281*9c4a7965SKim Phillips return -ENOMEM; 1282*9c4a7965SKim Phillips 1283*9c4a7965SKim Phillips dev_set_drvdata(dev, priv); 1284*9c4a7965SKim Phillips 1285*9c4a7965SKim Phillips priv->ofdev = ofdev; 1286*9c4a7965SKim Phillips 1287*9c4a7965SKim Phillips tasklet_init(&priv->done_task, talitos_done, (unsigned long)dev); 1288*9c4a7965SKim Phillips tasklet_init(&priv->error_task, talitos_error, (unsigned long)dev); 1289*9c4a7965SKim Phillips 1290*9c4a7965SKim Phillips priv->irq = irq_of_parse_and_map(np, 0); 1291*9c4a7965SKim Phillips 1292*9c4a7965SKim Phillips if (priv->irq == NO_IRQ) { 1293*9c4a7965SKim Phillips dev_err(dev, "failed to map irq\n"); 1294*9c4a7965SKim Phillips err = -EINVAL; 1295*9c4a7965SKim Phillips goto err_out; 1296*9c4a7965SKim Phillips } 1297*9c4a7965SKim Phillips 1298*9c4a7965SKim Phillips /* get the irq line */ 1299*9c4a7965SKim Phillips err = request_irq(priv->irq, talitos_interrupt, 0, 1300*9c4a7965SKim Phillips dev_driver_string(dev), dev); 1301*9c4a7965SKim Phillips if (err) { 1302*9c4a7965SKim Phillips dev_err(dev, "failed to request irq %d\n", priv->irq); 1303*9c4a7965SKim Phillips irq_dispose_mapping(priv->irq); 1304*9c4a7965SKim Phillips priv->irq = NO_IRQ; 1305*9c4a7965SKim Phillips goto err_out; 1306*9c4a7965SKim Phillips } 1307*9c4a7965SKim Phillips 1308*9c4a7965SKim Phillips priv->reg = of_iomap(np, 0); 1309*9c4a7965SKim Phillips if (!priv->reg) { 1310*9c4a7965SKim Phillips dev_err(dev, "failed to of_iomap\n"); 1311*9c4a7965SKim Phillips err = -ENOMEM; 1312*9c4a7965SKim Phillips goto err_out; 1313*9c4a7965SKim Phillips } 1314*9c4a7965SKim Phillips 1315*9c4a7965SKim Phillips /* get SEC version capabilities from device tree */ 1316*9c4a7965SKim Phillips prop = of_get_property(np, "fsl,num-channels", NULL); 1317*9c4a7965SKim Phillips if (prop) 1318*9c4a7965SKim Phillips priv->num_channels = *prop; 1319*9c4a7965SKim Phillips 1320*9c4a7965SKim Phillips prop = of_get_property(np, "fsl,channel-fifo-len", NULL); 1321*9c4a7965SKim Phillips if (prop) 1322*9c4a7965SKim Phillips priv->chfifo_len = *prop; 1323*9c4a7965SKim Phillips 1324*9c4a7965SKim Phillips prop = of_get_property(np, "fsl,exec-units-mask", NULL); 1325*9c4a7965SKim Phillips if (prop) 1326*9c4a7965SKim Phillips priv->exec_units = *prop; 1327*9c4a7965SKim Phillips 1328*9c4a7965SKim Phillips prop = of_get_property(np, "fsl,descriptor-types-mask", NULL); 1329*9c4a7965SKim Phillips if (prop) 1330*9c4a7965SKim Phillips priv->desc_types = *prop; 1331*9c4a7965SKim Phillips 1332*9c4a7965SKim Phillips if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len || 1333*9c4a7965SKim Phillips !priv->exec_units || !priv->desc_types) { 1334*9c4a7965SKim Phillips dev_err(dev, "invalid property data in device tree node\n"); 1335*9c4a7965SKim Phillips err = -EINVAL; 1336*9c4a7965SKim Phillips goto err_out; 1337*9c4a7965SKim Phillips } 1338*9c4a7965SKim Phillips 1339*9c4a7965SKim Phillips of_node_put(np); 1340*9c4a7965SKim Phillips np = NULL; 1341*9c4a7965SKim Phillips 1342*9c4a7965SKim Phillips priv->head_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels, 1343*9c4a7965SKim Phillips GFP_KERNEL); 1344*9c4a7965SKim Phillips priv->tail_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels, 1345*9c4a7965SKim Phillips GFP_KERNEL); 1346*9c4a7965SKim Phillips if (!priv->head_lock || !priv->tail_lock) { 1347*9c4a7965SKim Phillips dev_err(dev, "failed to allocate fifo locks\n"); 1348*9c4a7965SKim Phillips err = -ENOMEM; 1349*9c4a7965SKim Phillips goto err_out; 1350*9c4a7965SKim Phillips } 1351*9c4a7965SKim Phillips 1352*9c4a7965SKim Phillips for (i = 0; i < priv->num_channels; i++) { 1353*9c4a7965SKim Phillips spin_lock_init(&priv->head_lock[i]); 1354*9c4a7965SKim Phillips spin_lock_init(&priv->tail_lock[i]); 1355*9c4a7965SKim Phillips } 1356*9c4a7965SKim Phillips 1357*9c4a7965SKim Phillips priv->fifo = kmalloc(sizeof(struct talitos_request *) * 1358*9c4a7965SKim Phillips priv->num_channels, GFP_KERNEL); 1359*9c4a7965SKim Phillips if (!priv->fifo) { 1360*9c4a7965SKim Phillips dev_err(dev, "failed to allocate request fifo\n"); 1361*9c4a7965SKim Phillips err = -ENOMEM; 1362*9c4a7965SKim Phillips goto err_out; 1363*9c4a7965SKim Phillips } 1364*9c4a7965SKim Phillips 1365*9c4a7965SKim Phillips priv->fifo_len = roundup_pow_of_two(priv->chfifo_len); 1366*9c4a7965SKim Phillips 1367*9c4a7965SKim Phillips for (i = 0; i < priv->num_channels; i++) { 1368*9c4a7965SKim Phillips priv->fifo[i] = kzalloc(sizeof(struct talitos_request) * 1369*9c4a7965SKim Phillips priv->fifo_len, GFP_KERNEL); 1370*9c4a7965SKim Phillips if (!priv->fifo[i]) { 1371*9c4a7965SKim Phillips dev_err(dev, "failed to allocate request fifo %d\n", i); 1372*9c4a7965SKim Phillips err = -ENOMEM; 1373*9c4a7965SKim Phillips goto err_out; 1374*9c4a7965SKim Phillips } 1375*9c4a7965SKim Phillips } 1376*9c4a7965SKim Phillips 1377*9c4a7965SKim Phillips priv->head = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL); 1378*9c4a7965SKim Phillips priv->tail = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL); 1379*9c4a7965SKim Phillips if (!priv->head || !priv->tail) { 1380*9c4a7965SKim Phillips dev_err(dev, "failed to allocate request index space\n"); 1381*9c4a7965SKim Phillips err = -ENOMEM; 1382*9c4a7965SKim Phillips goto err_out; 1383*9c4a7965SKim Phillips } 1384*9c4a7965SKim Phillips 1385*9c4a7965SKim Phillips /* reset and initialize the h/w */ 1386*9c4a7965SKim Phillips err = init_device(dev); 1387*9c4a7965SKim Phillips if (err) { 1388*9c4a7965SKim Phillips dev_err(dev, "failed to initialize device\n"); 1389*9c4a7965SKim Phillips goto err_out; 1390*9c4a7965SKim Phillips } 1391*9c4a7965SKim Phillips 1392*9c4a7965SKim Phillips /* register the RNG, if available */ 1393*9c4a7965SKim Phillips if (hw_supports(dev, DESC_HDR_SEL0_RNG)) { 1394*9c4a7965SKim Phillips err = talitos_register_rng(dev); 1395*9c4a7965SKim Phillips if (err) { 1396*9c4a7965SKim Phillips dev_err(dev, "failed to register hwrng: %d\n", err); 1397*9c4a7965SKim Phillips goto err_out; 1398*9c4a7965SKim Phillips } else 1399*9c4a7965SKim Phillips dev_info(dev, "hwrng\n"); 1400*9c4a7965SKim Phillips } 1401*9c4a7965SKim Phillips 1402*9c4a7965SKim Phillips /* register crypto algorithms the device supports */ 1403*9c4a7965SKim Phillips INIT_LIST_HEAD(&priv->alg_list); 1404*9c4a7965SKim Phillips 1405*9c4a7965SKim Phillips for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { 1406*9c4a7965SKim Phillips if (hw_supports(dev, driver_algs[i].desc_hdr_template)) { 1407*9c4a7965SKim Phillips struct talitos_crypto_alg *t_alg; 1408*9c4a7965SKim Phillips 1409*9c4a7965SKim Phillips t_alg = talitos_alg_alloc(dev, &driver_algs[i]); 1410*9c4a7965SKim Phillips if (IS_ERR(t_alg)) { 1411*9c4a7965SKim Phillips err = PTR_ERR(t_alg); 1412*9c4a7965SKim Phillips goto err_out; 1413*9c4a7965SKim Phillips } 1414*9c4a7965SKim Phillips 1415*9c4a7965SKim Phillips err = crypto_register_alg(&t_alg->crypto_alg); 1416*9c4a7965SKim Phillips if (err) { 1417*9c4a7965SKim Phillips dev_err(dev, "%s alg registration failed\n", 1418*9c4a7965SKim Phillips t_alg->crypto_alg.cra_driver_name); 1419*9c4a7965SKim Phillips kfree(t_alg); 1420*9c4a7965SKim Phillips } else { 1421*9c4a7965SKim Phillips list_add_tail(&t_alg->entry, &priv->alg_list); 1422*9c4a7965SKim Phillips dev_info(dev, "%s\n", 1423*9c4a7965SKim Phillips t_alg->crypto_alg.cra_driver_name); 1424*9c4a7965SKim Phillips } 1425*9c4a7965SKim Phillips } 1426*9c4a7965SKim Phillips } 1427*9c4a7965SKim Phillips 1428*9c4a7965SKim Phillips return 0; 1429*9c4a7965SKim Phillips 1430*9c4a7965SKim Phillips err_out: 1431*9c4a7965SKim Phillips talitos_remove(ofdev); 1432*9c4a7965SKim Phillips if (np) 1433*9c4a7965SKim Phillips of_node_put(np); 1434*9c4a7965SKim Phillips 1435*9c4a7965SKim Phillips return err; 1436*9c4a7965SKim Phillips } 1437*9c4a7965SKim Phillips 1438*9c4a7965SKim Phillips static struct of_device_id talitos_match[] = { 1439*9c4a7965SKim Phillips { 1440*9c4a7965SKim Phillips .compatible = "fsl,sec2.0", 1441*9c4a7965SKim Phillips }, 1442*9c4a7965SKim Phillips {}, 1443*9c4a7965SKim Phillips }; 1444*9c4a7965SKim Phillips MODULE_DEVICE_TABLE(of, talitos_match); 1445*9c4a7965SKim Phillips 1446*9c4a7965SKim Phillips static struct of_platform_driver talitos_driver = { 1447*9c4a7965SKim Phillips .name = "talitos", 1448*9c4a7965SKim Phillips .match_table = talitos_match, 1449*9c4a7965SKim Phillips .probe = talitos_probe, 1450*9c4a7965SKim Phillips .remove = __devexit_p(talitos_remove), 1451*9c4a7965SKim Phillips }; 1452*9c4a7965SKim Phillips 1453*9c4a7965SKim Phillips static int __init talitos_init(void) 1454*9c4a7965SKim Phillips { 1455*9c4a7965SKim Phillips return of_register_platform_driver(&talitos_driver); 1456*9c4a7965SKim Phillips } 1457*9c4a7965SKim Phillips module_init(talitos_init); 1458*9c4a7965SKim Phillips 1459*9c4a7965SKim Phillips static void __exit talitos_exit(void) 1460*9c4a7965SKim Phillips { 1461*9c4a7965SKim Phillips of_unregister_platform_driver(&talitos_driver); 1462*9c4a7965SKim Phillips } 1463*9c4a7965SKim Phillips module_exit(talitos_exit); 1464*9c4a7965SKim Phillips 1465*9c4a7965SKim Phillips MODULE_LICENSE("GPL"); 1466*9c4a7965SKim Phillips MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>"); 1467*9c4a7965SKim Phillips MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver"); 1468