1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * CAAM/SEC 4.x transport/backend driver 4 * JobR backend functionality 5 * 6 * Copyright 2008-2012 Freescale Semiconductor, Inc. 7 * Copyright 2019, 2023 NXP 8 */ 9 10 #include <linux/of_irq.h> 11 #include <linux/of_address.h> 12 #include <linux/platform_device.h> 13 14 #include "compat.h" 15 #include "ctrl.h" 16 #include "regs.h" 17 #include "jr.h" 18 #include "desc.h" 19 #include "intern.h" 20 21 struct jr_driver_data { 22 /* List of Physical JobR's with the Driver */ 23 struct list_head jr_list; 24 spinlock_t jr_alloc_lock; /* jr_list lock */ 25 } ____cacheline_aligned; 26 27 static struct jr_driver_data driver_data; 28 static DEFINE_MUTEX(algs_lock); 29 static unsigned int active_devs; 30 31 static void register_algs(struct caam_drv_private_jr *jrpriv, 32 struct device *dev) 33 { 34 mutex_lock(&algs_lock); 35 36 if (++active_devs != 1) 37 goto algs_unlock; 38 39 caam_algapi_init(dev); 40 caam_algapi_hash_init(dev); 41 caam_pkc_init(dev); 42 jrpriv->hwrng = !caam_rng_init(dev); 43 caam_prng_register(dev); 44 caam_qi_algapi_init(dev); 45 46 algs_unlock: 47 mutex_unlock(&algs_lock); 48 } 49 50 static void unregister_algs(void) 51 { 52 mutex_lock(&algs_lock); 53 54 if (--active_devs != 0) 55 goto algs_unlock; 56 57 caam_qi_algapi_exit(); 58 caam_prng_unregister(NULL); 59 caam_pkc_exit(); 60 caam_algapi_hash_exit(); 61 caam_algapi_exit(); 62 63 algs_unlock: 64 mutex_unlock(&algs_lock); 65 } 66 67 static void caam_jr_crypto_engine_exit(void *data) 68 { 69 struct device *jrdev = data; 70 struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev); 71 72 /* Free the resources of crypto-engine */ 73 crypto_engine_exit(jrpriv->engine); 74 } 75 76 /* 77 * Put the CAAM in quiesce, ie stop 78 * 79 * Must be called with itr disabled 80 */ 81 static int caam_jr_stop_processing(struct device *dev, u32 jrcr_bits) 82 { 83 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); 84 unsigned int timeout = 100000; 85 86 /* Check the current status */ 87 if (rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_INPROGRESS) 88 goto wait_quiesce_completion; 89 90 /* Reset the field */ 91 clrsetbits_32(&jrp->rregs->jrintstatus, JRINT_ERR_HALT_MASK, 0); 92 93 /* initiate flush / park (required prior to reset) */ 94 wr_reg32(&jrp->rregs->jrcommand, jrcr_bits); 95 96 wait_quiesce_completion: 97 while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) == 98 JRINT_ERR_HALT_INPROGRESS) && --timeout) 99 cpu_relax(); 100 101 if ((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) != 102 JRINT_ERR_HALT_COMPLETE || timeout == 0) { 103 dev_err(dev, "failed to flush job ring %d\n", jrp->ridx); 104 return -EIO; 105 } 106 107 return 0; 108 } 109 110 /* 111 * Flush the job ring, so the jobs running will be stopped, jobs queued will be 112 * invalidated and the CAAM will no longer fetch fron input ring. 113 * 114 * Must be called with itr disabled 115 */ 116 static int caam_jr_flush(struct device *dev) 117 { 118 return caam_jr_stop_processing(dev, JRCR_RESET); 119 } 120 121 /* The resume can be used after a park or a flush if CAAM has not been reset */ 122 static int caam_jr_restart_processing(struct device *dev) 123 { 124 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); 125 u32 halt_status = rd_reg32(&jrp->rregs->jrintstatus) & 126 JRINT_ERR_HALT_MASK; 127 128 /* Check that the flush/park is completed */ 129 if (halt_status != JRINT_ERR_HALT_COMPLETE) 130 return -1; 131 132 /* Resume processing of jobs */ 133 clrsetbits_32(&jrp->rregs->jrintstatus, 0, JRINT_ERR_HALT_COMPLETE); 134 135 return 0; 136 } 137 138 static int caam_reset_hw_jr(struct device *dev) 139 { 140 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); 141 unsigned int timeout = 100000; 142 int err; 143 /* 144 * mask interrupts since we are going to poll 145 * for reset completion status 146 */ 147 clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JRCFG_IMSK); 148 err = caam_jr_flush(dev); 149 if (err) 150 return err; 151 152 /* initiate reset */ 153 wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET); 154 while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout) 155 cpu_relax(); 156 157 if (timeout == 0) { 158 dev_err(dev, "failed to reset job ring %d\n", jrp->ridx); 159 return -EIO; 160 } 161 162 /* unmask interrupts */ 163 clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0); 164 165 return 0; 166 } 167 168 /* 169 * Shutdown JobR independent of platform property code 170 */ 171 static int caam_jr_shutdown(struct device *dev) 172 { 173 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); 174 int ret; 175 176 ret = caam_reset_hw_jr(dev); 177 178 tasklet_kill(&jrp->irqtask); 179 180 return ret; 181 } 182 183 static void caam_jr_remove(struct platform_device *pdev) 184 { 185 int ret; 186 struct device *jrdev; 187 struct caam_drv_private_jr *jrpriv; 188 189 jrdev = &pdev->dev; 190 jrpriv = dev_get_drvdata(jrdev); 191 192 if (jrpriv->hwrng) 193 caam_rng_exit(jrdev->parent); 194 195 /* 196 * If a job ring is still allocated there is trouble ahead. Once 197 * caam_jr_remove() returned, jrpriv will be freed and the registers 198 * will get unmapped. So any user of such a job ring will probably 199 * crash. 200 */ 201 if (atomic_read(&jrpriv->tfm_count)) { 202 dev_alert(jrdev, "Device is busy; consumers might start to crash\n"); 203 return; 204 } 205 206 /* Unregister JR-based RNG & crypto algorithms */ 207 unregister_algs(); 208 209 /* Remove the node from Physical JobR list maintained by driver */ 210 spin_lock(&driver_data.jr_alloc_lock); 211 list_del(&jrpriv->list_node); 212 spin_unlock(&driver_data.jr_alloc_lock); 213 214 /* Release ring */ 215 ret = caam_jr_shutdown(jrdev); 216 if (ret) 217 dev_err(jrdev, "Failed to shut down job ring\n"); 218 } 219 220 /* Main per-ring interrupt handler */ 221 static irqreturn_t caam_jr_interrupt(int irq, void *st_dev) 222 { 223 struct device *dev = st_dev; 224 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); 225 u32 irqstate; 226 227 /* 228 * Check the output ring for ready responses, kick 229 * tasklet if jobs done. 230 */ 231 irqstate = rd_reg32(&jrp->rregs->jrintstatus); 232 if (!(irqstate & JRINT_JR_INT)) 233 return IRQ_NONE; 234 235 /* 236 * If JobR error, we got more development work to do 237 * Flag a bug now, but we really need to shut down and 238 * restart the queue (and fix code). 239 */ 240 if (irqstate & JRINT_JR_ERROR) { 241 dev_err(dev, "job ring error: irqstate: %08x\n", irqstate); 242 BUG(); 243 } 244 245 /* mask valid interrupts */ 246 clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JRCFG_IMSK); 247 248 /* Have valid interrupt at this point, just ACK and trigger */ 249 wr_reg32(&jrp->rregs->jrintstatus, irqstate); 250 251 preempt_disable(); 252 tasklet_schedule(&jrp->irqtask); 253 preempt_enable(); 254 255 return IRQ_HANDLED; 256 } 257 258 /* Deferred service handler, run as interrupt-fired tasklet */ 259 static void caam_jr_dequeue(unsigned long devarg) 260 { 261 int hw_idx, sw_idx, i, head, tail; 262 struct caam_jr_dequeue_params *params = (void *)devarg; 263 struct device *dev = params->dev; 264 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); 265 void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg); 266 u32 *userdesc, userstatus; 267 void *userarg; 268 u32 outring_used = 0; 269 270 while (outring_used || 271 (outring_used = rd_reg32(&jrp->rregs->outring_used))) { 272 273 head = READ_ONCE(jrp->head); 274 275 sw_idx = tail = jrp->tail; 276 hw_idx = jrp->out_ring_read_index; 277 278 for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) { 279 sw_idx = (tail + i) & (JOBR_DEPTH - 1); 280 281 if (jr_outentry_desc(jrp->outring, hw_idx) == 282 caam_dma_to_cpu(jrp->entinfo[sw_idx].desc_addr_dma)) 283 break; /* found */ 284 } 285 /* we should never fail to find a matching descriptor */ 286 BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0); 287 288 /* Unmap just-run descriptor so we can post-process */ 289 dma_unmap_single(dev, 290 caam_dma_to_cpu(jr_outentry_desc(jrp->outring, 291 hw_idx)), 292 jrp->entinfo[sw_idx].desc_size, 293 DMA_TO_DEVICE); 294 295 /* mark completed, avoid matching on a recycled desc addr */ 296 jrp->entinfo[sw_idx].desc_addr_dma = 0; 297 298 /* Stash callback params */ 299 usercall = jrp->entinfo[sw_idx].callbk; 300 userarg = jrp->entinfo[sw_idx].cbkarg; 301 userdesc = jrp->entinfo[sw_idx].desc_addr_virt; 302 userstatus = caam32_to_cpu(jr_outentry_jrstatus(jrp->outring, 303 hw_idx)); 304 305 /* 306 * Make sure all information from the job has been obtained 307 * before telling CAAM that the job has been removed from the 308 * output ring. 309 */ 310 mb(); 311 312 /* set done */ 313 wr_reg32(&jrp->rregs->outring_rmvd, 1); 314 315 jrp->out_ring_read_index = (jrp->out_ring_read_index + 1) & 316 (JOBR_DEPTH - 1); 317 318 /* 319 * if this job completed out-of-order, do not increment 320 * the tail. Otherwise, increment tail by 1 plus the 321 * number of subsequent jobs already completed out-of-order 322 */ 323 if (sw_idx == tail) { 324 do { 325 tail = (tail + 1) & (JOBR_DEPTH - 1); 326 } while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 && 327 jrp->entinfo[tail].desc_addr_dma == 0); 328 329 jrp->tail = tail; 330 } 331 332 /* Finally, execute user's callback */ 333 usercall(dev, userdesc, userstatus, userarg); 334 outring_used--; 335 } 336 337 if (params->enable_itr) 338 /* reenable / unmask IRQs */ 339 clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0); 340 } 341 342 /** 343 * caam_jr_alloc() - Alloc a job ring for someone to use as needed. 344 * 345 * returns : pointer to the newly allocated physical 346 * JobR dev can be written to if successful. 347 **/ 348 struct device *caam_jr_alloc(void) 349 { 350 struct caam_drv_private_jr *jrpriv, *min_jrpriv = NULL; 351 struct device *dev = ERR_PTR(-ENODEV); 352 int min_tfm_cnt = INT_MAX; 353 int tfm_cnt; 354 355 spin_lock(&driver_data.jr_alloc_lock); 356 357 if (list_empty(&driver_data.jr_list)) { 358 spin_unlock(&driver_data.jr_alloc_lock); 359 return ERR_PTR(-ENODEV); 360 } 361 362 list_for_each_entry(jrpriv, &driver_data.jr_list, list_node) { 363 tfm_cnt = atomic_read(&jrpriv->tfm_count); 364 if (tfm_cnt < min_tfm_cnt) { 365 min_tfm_cnt = tfm_cnt; 366 min_jrpriv = jrpriv; 367 } 368 if (!min_tfm_cnt) 369 break; 370 } 371 372 if (min_jrpriv) { 373 atomic_inc(&min_jrpriv->tfm_count); 374 dev = min_jrpriv->dev; 375 } 376 spin_unlock(&driver_data.jr_alloc_lock); 377 378 return dev; 379 } 380 EXPORT_SYMBOL(caam_jr_alloc); 381 382 /** 383 * caam_jr_free() - Free the Job Ring 384 * @rdev: points to the dev that identifies the Job ring to 385 * be released. 386 **/ 387 void caam_jr_free(struct device *rdev) 388 { 389 struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev); 390 391 atomic_dec(&jrpriv->tfm_count); 392 } 393 EXPORT_SYMBOL(caam_jr_free); 394 395 /** 396 * caam_jr_enqueue() - Enqueue a job descriptor head. Returns -EINPROGRESS 397 * if OK, -ENOSPC if the queue is full, -EIO if it cannot map the caller's 398 * descriptor. 399 * @dev: struct device of the job ring to be used 400 * @desc: points to a job descriptor that execute our request. All 401 * descriptors (and all referenced data) must be in a DMAable 402 * region, and all data references must be physical addresses 403 * accessible to CAAM (i.e. within a PAMU window granted 404 * to it). 405 * @cbk: pointer to a callback function to be invoked upon completion 406 * of this request. This has the form: 407 * callback(struct device *dev, u32 *desc, u32 stat, void *arg) 408 * where: 409 * dev: contains the job ring device that processed this 410 * response. 411 * desc: descriptor that initiated the request, same as 412 * "desc" being argued to caam_jr_enqueue(). 413 * status: untranslated status received from CAAM. See the 414 * reference manual for a detailed description of 415 * error meaning, or see the JRSTA definitions in the 416 * register header file 417 * areq: optional pointer to an argument passed with the 418 * original request 419 * @areq: optional pointer to a user argument for use at callback 420 * time. 421 **/ 422 int caam_jr_enqueue(struct device *dev, u32 *desc, 423 void (*cbk)(struct device *dev, u32 *desc, 424 u32 status, void *areq), 425 void *areq) 426 { 427 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); 428 struct caam_jrentry_info *head_entry; 429 int head, tail, desc_size; 430 dma_addr_t desc_dma; 431 432 desc_size = (caam32_to_cpu(*desc) & HDR_JD_LENGTH_MASK) * sizeof(u32); 433 desc_dma = dma_map_single(dev, desc, desc_size, DMA_TO_DEVICE); 434 if (dma_mapping_error(dev, desc_dma)) { 435 dev_err(dev, "caam_jr_enqueue(): can't map jobdesc\n"); 436 return -EIO; 437 } 438 439 spin_lock_bh(&jrp->inplock); 440 441 head = jrp->head; 442 tail = READ_ONCE(jrp->tail); 443 444 if (!jrp->inpring_avail || 445 CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) { 446 spin_unlock_bh(&jrp->inplock); 447 dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE); 448 return -ENOSPC; 449 } 450 451 head_entry = &jrp->entinfo[head]; 452 head_entry->desc_addr_virt = desc; 453 head_entry->desc_size = desc_size; 454 head_entry->callbk = (void *)cbk; 455 head_entry->cbkarg = areq; 456 head_entry->desc_addr_dma = desc_dma; 457 458 jr_inpentry_set(jrp->inpring, head, cpu_to_caam_dma(desc_dma)); 459 460 /* 461 * Guarantee that the descriptor's DMA address has been written to 462 * the next slot in the ring before the write index is updated, since 463 * other cores may update this index independently. 464 * 465 * Under heavy DDR load, smp_wmb() or dma_wmb() fail to make the input 466 * ring be updated before the CAAM starts reading it. So, CAAM will 467 * process, again, an old descriptor address and will put it in the 468 * output ring. This will make caam_jr_dequeue() to fail, since this 469 * old descriptor is not in the software ring. 470 * To fix this, use wmb() which works on the full system instead of 471 * inner/outer shareable domains. 472 */ 473 wmb(); 474 475 jrp->head = (head + 1) & (JOBR_DEPTH - 1); 476 477 /* 478 * Ensure that all job information has been written before 479 * notifying CAAM that a new job was added to the input ring 480 * using a memory barrier. The wr_reg32() uses api iowrite32() 481 * to do the register write. iowrite32() issues a memory barrier 482 * before the write operation. 483 */ 484 485 wr_reg32(&jrp->rregs->inpring_jobadd, 1); 486 487 jrp->inpring_avail--; 488 if (!jrp->inpring_avail) 489 jrp->inpring_avail = rd_reg32(&jrp->rregs->inpring_avail); 490 491 spin_unlock_bh(&jrp->inplock); 492 493 return -EINPROGRESS; 494 } 495 EXPORT_SYMBOL(caam_jr_enqueue); 496 497 static void caam_jr_init_hw(struct device *dev, dma_addr_t inpbusaddr, 498 dma_addr_t outbusaddr) 499 { 500 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); 501 502 wr_reg64(&jrp->rregs->inpring_base, inpbusaddr); 503 wr_reg64(&jrp->rregs->outring_base, outbusaddr); 504 wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH); 505 wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH); 506 507 /* Select interrupt coalescing parameters */ 508 clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JOBR_INTC | 509 (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) | 510 (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT)); 511 } 512 513 static void caam_jr_reset_index(struct caam_drv_private_jr *jrp) 514 { 515 jrp->out_ring_read_index = 0; 516 jrp->head = 0; 517 jrp->tail = 0; 518 } 519 520 /* 521 * Init JobR independent of platform property detection 522 */ 523 static int caam_jr_init(struct device *dev) 524 { 525 struct caam_drv_private_jr *jrp; 526 dma_addr_t inpbusaddr, outbusaddr; 527 int i, error; 528 529 jrp = dev_get_drvdata(dev); 530 531 error = caam_reset_hw_jr(dev); 532 if (error) 533 return error; 534 535 jrp->inpring = dmam_alloc_coherent(dev, SIZEOF_JR_INPENTRY * 536 JOBR_DEPTH, &inpbusaddr, 537 GFP_KERNEL); 538 if (!jrp->inpring) 539 return -ENOMEM; 540 541 jrp->outring = dmam_alloc_coherent(dev, SIZEOF_JR_OUTENTRY * 542 JOBR_DEPTH, &outbusaddr, 543 GFP_KERNEL); 544 if (!jrp->outring) 545 return -ENOMEM; 546 547 jrp->entinfo = devm_kcalloc(dev, JOBR_DEPTH, sizeof(*jrp->entinfo), 548 GFP_KERNEL); 549 if (!jrp->entinfo) 550 return -ENOMEM; 551 552 for (i = 0; i < JOBR_DEPTH; i++) 553 jrp->entinfo[i].desc_addr_dma = !0; 554 555 /* Setup rings */ 556 caam_jr_reset_index(jrp); 557 jrp->inpring_avail = JOBR_DEPTH; 558 caam_jr_init_hw(dev, inpbusaddr, outbusaddr); 559 560 spin_lock_init(&jrp->inplock); 561 562 jrp->tasklet_params.dev = dev; 563 jrp->tasklet_params.enable_itr = 1; 564 tasklet_init(&jrp->irqtask, caam_jr_dequeue, 565 (unsigned long)&jrp->tasklet_params); 566 567 /* Connect job ring interrupt handler. */ 568 error = devm_request_irq(dev, jrp->irq, caam_jr_interrupt, IRQF_SHARED, 569 dev_name(dev), dev); 570 if (error) { 571 dev_err(dev, "can't connect JobR %d interrupt (%d)\n", 572 jrp->ridx, jrp->irq); 573 tasklet_kill(&jrp->irqtask); 574 } 575 576 return error; 577 } 578 579 static void caam_jr_irq_dispose_mapping(void *data) 580 { 581 irq_dispose_mapping((unsigned long)data); 582 } 583 584 /* 585 * Probe routine for each detected JobR subsystem. 586 */ 587 static int caam_jr_probe(struct platform_device *pdev) 588 { 589 struct device *jrdev; 590 struct device_node *nprop; 591 struct caam_job_ring __iomem *ctrl; 592 struct caam_drv_private_jr *jrpriv; 593 static int total_jobrs; 594 struct resource *r; 595 int error; 596 597 jrdev = &pdev->dev; 598 jrpriv = devm_kzalloc(jrdev, sizeof(*jrpriv), GFP_KERNEL); 599 if (!jrpriv) 600 return -ENOMEM; 601 602 dev_set_drvdata(jrdev, jrpriv); 603 604 /* save ring identity relative to detection */ 605 jrpriv->ridx = total_jobrs++; 606 607 nprop = pdev->dev.of_node; 608 /* Get configuration properties from device tree */ 609 /* First, get register page */ 610 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 611 if (!r) { 612 dev_err(jrdev, "platform_get_resource() failed\n"); 613 return -ENOMEM; 614 } 615 616 ctrl = devm_ioremap(jrdev, r->start, resource_size(r)); 617 if (!ctrl) { 618 dev_err(jrdev, "devm_ioremap() failed\n"); 619 return -ENOMEM; 620 } 621 622 jrpriv->rregs = (struct caam_job_ring __iomem __force *)ctrl; 623 624 error = dma_set_mask_and_coherent(jrdev, caam_get_dma_mask(jrdev)); 625 if (error) { 626 dev_err(jrdev, "dma_set_mask_and_coherent failed (%d)\n", 627 error); 628 return error; 629 } 630 631 /* Initialize crypto engine */ 632 jrpriv->engine = crypto_engine_alloc_init_and_set(jrdev, true, NULL, 633 false, 634 CRYPTO_ENGINE_MAX_QLEN); 635 if (!jrpriv->engine) { 636 dev_err(jrdev, "Could not init crypto-engine\n"); 637 return -ENOMEM; 638 } 639 640 error = devm_add_action_or_reset(jrdev, caam_jr_crypto_engine_exit, 641 jrdev); 642 if (error) 643 return error; 644 645 /* Start crypto engine */ 646 error = crypto_engine_start(jrpriv->engine); 647 if (error) { 648 dev_err(jrdev, "Could not start crypto-engine\n"); 649 return error; 650 } 651 652 /* Identify the interrupt */ 653 jrpriv->irq = irq_of_parse_and_map(nprop, 0); 654 if (!jrpriv->irq) { 655 dev_err(jrdev, "irq_of_parse_and_map failed\n"); 656 return -EINVAL; 657 } 658 659 error = devm_add_action_or_reset(jrdev, caam_jr_irq_dispose_mapping, 660 (void *)(unsigned long)jrpriv->irq); 661 if (error) 662 return error; 663 664 /* Now do the platform independent part */ 665 error = caam_jr_init(jrdev); /* now turn on hardware */ 666 if (error) 667 return error; 668 669 jrpriv->dev = jrdev; 670 spin_lock(&driver_data.jr_alloc_lock); 671 list_add_tail(&jrpriv->list_node, &driver_data.jr_list); 672 spin_unlock(&driver_data.jr_alloc_lock); 673 674 atomic_set(&jrpriv->tfm_count, 0); 675 676 device_init_wakeup(&pdev->dev, 1); 677 device_set_wakeup_enable(&pdev->dev, false); 678 679 register_algs(jrpriv, jrdev->parent); 680 681 return 0; 682 } 683 684 static void caam_jr_get_hw_state(struct device *dev) 685 { 686 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); 687 688 jrp->state.inpbusaddr = rd_reg64(&jrp->rregs->inpring_base); 689 jrp->state.outbusaddr = rd_reg64(&jrp->rregs->outring_base); 690 } 691 692 static int caam_jr_suspend(struct device *dev) 693 { 694 struct platform_device *pdev = to_platform_device(dev); 695 struct caam_drv_private_jr *jrpriv = platform_get_drvdata(pdev); 696 struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev->parent); 697 struct caam_jr_dequeue_params suspend_params = { 698 .dev = dev, 699 .enable_itr = 0, 700 }; 701 702 /* Remove the node from Physical JobR list maintained by driver */ 703 spin_lock(&driver_data.jr_alloc_lock); 704 list_del(&jrpriv->list_node); 705 spin_unlock(&driver_data.jr_alloc_lock); 706 707 if (jrpriv->hwrng) 708 caam_rng_exit(dev->parent); 709 710 if (ctrlpriv->caam_off_during_pm) { 711 int err; 712 713 tasklet_disable(&jrpriv->irqtask); 714 715 /* mask itr to call flush */ 716 clrsetbits_32(&jrpriv->rregs->rconfig_lo, 0, JRCFG_IMSK); 717 718 /* Invalid job in process */ 719 err = caam_jr_flush(dev); 720 if (err) { 721 dev_err(dev, "Failed to flush\n"); 722 return err; 723 } 724 725 /* Dequeing jobs flushed */ 726 caam_jr_dequeue((unsigned long)&suspend_params); 727 728 /* Save state */ 729 caam_jr_get_hw_state(dev); 730 } else if (device_may_wakeup(&pdev->dev)) { 731 enable_irq_wake(jrpriv->irq); 732 } 733 734 return 0; 735 } 736 737 static int caam_jr_resume(struct device *dev) 738 { 739 struct platform_device *pdev = to_platform_device(dev); 740 struct caam_drv_private_jr *jrpriv = platform_get_drvdata(pdev); 741 struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev->parent); 742 743 if (ctrlpriv->caam_off_during_pm) { 744 u64 inp_addr; 745 int err; 746 747 /* 748 * Check if the CAAM has been resetted checking the address of 749 * the input ring 750 */ 751 inp_addr = rd_reg64(&jrpriv->rregs->inpring_base); 752 if (inp_addr != 0) { 753 /* JR still has some configuration */ 754 if (inp_addr == jrpriv->state.inpbusaddr) { 755 /* JR has not been resetted */ 756 err = caam_jr_restart_processing(dev); 757 if (err) { 758 dev_err(dev, 759 "Restart processing failed\n"); 760 return err; 761 } 762 763 tasklet_enable(&jrpriv->irqtask); 764 765 clrsetbits_32(&jrpriv->rregs->rconfig_lo, 766 JRCFG_IMSK, 0); 767 768 goto add_jr; 769 } else if (ctrlpriv->optee_en) { 770 /* JR has been used by OPTEE, reset it */ 771 err = caam_reset_hw_jr(dev); 772 if (err) { 773 dev_err(dev, "Failed to reset JR\n"); 774 return err; 775 } 776 } else { 777 /* No explanation, return error */ 778 return -EIO; 779 } 780 } 781 782 caam_jr_reset_index(jrpriv); 783 caam_jr_init_hw(dev, jrpriv->state.inpbusaddr, 784 jrpriv->state.outbusaddr); 785 786 tasklet_enable(&jrpriv->irqtask); 787 } else if (device_may_wakeup(&pdev->dev)) { 788 disable_irq_wake(jrpriv->irq); 789 } 790 791 add_jr: 792 spin_lock(&driver_data.jr_alloc_lock); 793 list_add_tail(&jrpriv->list_node, &driver_data.jr_list); 794 spin_unlock(&driver_data.jr_alloc_lock); 795 796 if (jrpriv->hwrng) 797 jrpriv->hwrng = !caam_rng_init(dev->parent); 798 799 return 0; 800 } 801 802 static DEFINE_SIMPLE_DEV_PM_OPS(caam_jr_pm_ops, caam_jr_suspend, caam_jr_resume); 803 804 static const struct of_device_id caam_jr_match[] = { 805 { 806 .compatible = "fsl,sec-v4.0-job-ring", 807 }, 808 { 809 .compatible = "fsl,sec4.0-job-ring", 810 }, 811 {}, 812 }; 813 MODULE_DEVICE_TABLE(of, caam_jr_match); 814 815 static struct platform_driver caam_jr_driver = { 816 .driver = { 817 .name = "caam_jr", 818 .of_match_table = caam_jr_match, 819 .pm = pm_ptr(&caam_jr_pm_ops), 820 }, 821 .probe = caam_jr_probe, 822 .remove = caam_jr_remove, 823 .shutdown = caam_jr_remove, 824 }; 825 826 static int __init jr_driver_init(void) 827 { 828 spin_lock_init(&driver_data.jr_alloc_lock); 829 INIT_LIST_HEAD(&driver_data.jr_list); 830 return platform_driver_register(&caam_jr_driver); 831 } 832 833 static void __exit jr_driver_exit(void) 834 { 835 platform_driver_unregister(&caam_jr_driver); 836 } 837 838 module_init(jr_driver_init); 839 module_exit(jr_driver_exit); 840 841 MODULE_LICENSE("GPL"); 842 MODULE_DESCRIPTION("FSL CAAM JR request backend"); 843 MODULE_AUTHOR("Freescale Semiconductor - NMG/STC"); 844