1*57692c94SEric Anholt // SPDX-License-Identifier: GPL-2.0+ 2*57692c94SEric Anholt /* Copyright (C) 2014-2018 Broadcom */ 3*57692c94SEric Anholt 4*57692c94SEric Anholt /** 5*57692c94SEric Anholt * DOC: Interrupt management for the V3D engine 6*57692c94SEric Anholt * 7*57692c94SEric Anholt * When we take a binning or rendering flush done interrupt, we need 8*57692c94SEric Anholt * to signal the fence for that job so that the scheduler can queue up 9*57692c94SEric Anholt * the next one and unblock any waiters. 10*57692c94SEric Anholt * 11*57692c94SEric Anholt * When we take the binner out of memory interrupt, we need to 12*57692c94SEric Anholt * allocate some new memory and pass it to the binner so that the 13*57692c94SEric Anholt * current job can make progress. 14*57692c94SEric Anholt */ 15*57692c94SEric Anholt 16*57692c94SEric Anholt #include "v3d_drv.h" 17*57692c94SEric Anholt #include "v3d_regs.h" 18*57692c94SEric Anholt 19*57692c94SEric Anholt #define V3D_CORE_IRQS ((u32)(V3D_INT_OUTOMEM | \ 20*57692c94SEric Anholt V3D_INT_FLDONE | \ 21*57692c94SEric Anholt V3D_INT_FRDONE | \ 22*57692c94SEric Anholt V3D_INT_GMPV)) 23*57692c94SEric Anholt 24*57692c94SEric Anholt #define V3D_HUB_IRQS ((u32)(V3D_HUB_INT_MMU_WRV | \ 25*57692c94SEric Anholt V3D_HUB_INT_MMU_PTI | \ 26*57692c94SEric Anholt V3D_HUB_INT_MMU_CAP)) 27*57692c94SEric Anholt 28*57692c94SEric Anholt static void 29*57692c94SEric Anholt v3d_overflow_mem_work(struct work_struct *work) 30*57692c94SEric Anholt { 31*57692c94SEric Anholt struct v3d_dev *v3d = 32*57692c94SEric Anholt container_of(work, struct v3d_dev, overflow_mem_work); 33*57692c94SEric Anholt struct drm_device *dev = &v3d->drm; 34*57692c94SEric Anholt struct v3d_bo *bo = v3d_bo_create(dev, NULL /* XXX: GMP */, 256 * 1024); 35*57692c94SEric Anholt unsigned long irqflags; 36*57692c94SEric Anholt 37*57692c94SEric Anholt if (IS_ERR(bo)) { 38*57692c94SEric Anholt DRM_ERROR("Couldn't allocate binner overflow mem\n"); 39*57692c94SEric Anholt return; 40*57692c94SEric Anholt } 41*57692c94SEric Anholt 42*57692c94SEric Anholt /* We lost a race, and our work task came in after the bin job 43*57692c94SEric Anholt * completed and exited. This can happen because the HW 44*57692c94SEric Anholt * signals OOM before it's fully OOM, so the binner might just 45*57692c94SEric Anholt * barely complete. 46*57692c94SEric Anholt * 47*57692c94SEric Anholt * If we lose the race and our work task comes in after a new 48*57692c94SEric Anholt * bin job got scheduled, that's fine. We'll just give them 49*57692c94SEric Anholt * some binner pool anyway. 50*57692c94SEric Anholt */ 51*57692c94SEric Anholt spin_lock_irqsave(&v3d->job_lock, irqflags); 52*57692c94SEric Anholt if (!v3d->bin_job) { 53*57692c94SEric Anholt spin_unlock_irqrestore(&v3d->job_lock, irqflags); 54*57692c94SEric Anholt goto out; 55*57692c94SEric Anholt } 56*57692c94SEric Anholt 57*57692c94SEric Anholt drm_gem_object_get(&bo->base); 58*57692c94SEric Anholt list_add_tail(&bo->unref_head, &v3d->bin_job->unref_list); 59*57692c94SEric Anholt spin_unlock_irqrestore(&v3d->job_lock, irqflags); 60*57692c94SEric Anholt 61*57692c94SEric Anholt V3D_CORE_WRITE(0, V3D_PTB_BPOA, bo->node.start << PAGE_SHIFT); 62*57692c94SEric Anholt V3D_CORE_WRITE(0, V3D_PTB_BPOS, bo->base.size); 63*57692c94SEric Anholt 64*57692c94SEric Anholt out: 65*57692c94SEric Anholt drm_gem_object_put_unlocked(&bo->base); 66*57692c94SEric Anholt } 67*57692c94SEric Anholt 68*57692c94SEric Anholt static irqreturn_t 69*57692c94SEric Anholt v3d_irq(int irq, void *arg) 70*57692c94SEric Anholt { 71*57692c94SEric Anholt struct v3d_dev *v3d = arg; 72*57692c94SEric Anholt u32 intsts; 73*57692c94SEric Anholt irqreturn_t status = IRQ_NONE; 74*57692c94SEric Anholt 75*57692c94SEric Anholt intsts = V3D_CORE_READ(0, V3D_CTL_INT_STS); 76*57692c94SEric Anholt 77*57692c94SEric Anholt /* Acknowledge the interrupts we're handling here. */ 78*57692c94SEric Anholt V3D_CORE_WRITE(0, V3D_CTL_INT_CLR, intsts); 79*57692c94SEric Anholt 80*57692c94SEric Anholt if (intsts & V3D_INT_OUTOMEM) { 81*57692c94SEric Anholt /* Note that the OOM status is edge signaled, so the 82*57692c94SEric Anholt * interrupt won't happen again until the we actually 83*57692c94SEric Anholt * add more memory. 84*57692c94SEric Anholt */ 85*57692c94SEric Anholt schedule_work(&v3d->overflow_mem_work); 86*57692c94SEric Anholt status = IRQ_HANDLED; 87*57692c94SEric Anholt } 88*57692c94SEric Anholt 89*57692c94SEric Anholt if (intsts & V3D_INT_FLDONE) { 90*57692c94SEric Anholt v3d->queue[V3D_BIN].finished_seqno++; 91*57692c94SEric Anholt dma_fence_signal(v3d->bin_job->bin.done_fence); 92*57692c94SEric Anholt status = IRQ_HANDLED; 93*57692c94SEric Anholt } 94*57692c94SEric Anholt 95*57692c94SEric Anholt if (intsts & V3D_INT_FRDONE) { 96*57692c94SEric Anholt v3d->queue[V3D_RENDER].finished_seqno++; 97*57692c94SEric Anholt dma_fence_signal(v3d->render_job->render.done_fence); 98*57692c94SEric Anholt 99*57692c94SEric Anholt status = IRQ_HANDLED; 100*57692c94SEric Anholt } 101*57692c94SEric Anholt 102*57692c94SEric Anholt /* We shouldn't be triggering these if we have GMP in 103*57692c94SEric Anholt * always-allowed mode. 104*57692c94SEric Anholt */ 105*57692c94SEric Anholt if (intsts & V3D_INT_GMPV) 106*57692c94SEric Anholt dev_err(v3d->dev, "GMP violation\n"); 107*57692c94SEric Anholt 108*57692c94SEric Anholt return status; 109*57692c94SEric Anholt } 110*57692c94SEric Anholt 111*57692c94SEric Anholt static irqreturn_t 112*57692c94SEric Anholt v3d_hub_irq(int irq, void *arg) 113*57692c94SEric Anholt { 114*57692c94SEric Anholt struct v3d_dev *v3d = arg; 115*57692c94SEric Anholt u32 intsts; 116*57692c94SEric Anholt irqreturn_t status = IRQ_NONE; 117*57692c94SEric Anholt 118*57692c94SEric Anholt intsts = V3D_READ(V3D_HUB_INT_STS); 119*57692c94SEric Anholt 120*57692c94SEric Anholt /* Acknowledge the interrupts we're handling here. */ 121*57692c94SEric Anholt V3D_WRITE(V3D_HUB_INT_CLR, intsts); 122*57692c94SEric Anholt 123*57692c94SEric Anholt if (intsts & (V3D_HUB_INT_MMU_WRV | 124*57692c94SEric Anholt V3D_HUB_INT_MMU_PTI | 125*57692c94SEric Anholt V3D_HUB_INT_MMU_CAP)) { 126*57692c94SEric Anholt u32 axi_id = V3D_READ(V3D_MMU_VIO_ID); 127*57692c94SEric Anholt u64 vio_addr = (u64)V3D_READ(V3D_MMU_VIO_ADDR) << 8; 128*57692c94SEric Anholt 129*57692c94SEric Anholt dev_err(v3d->dev, "MMU error from client %d at 0x%08llx%s%s%s\n", 130*57692c94SEric Anholt axi_id, (long long)vio_addr, 131*57692c94SEric Anholt ((intsts & V3D_HUB_INT_MMU_WRV) ? 132*57692c94SEric Anholt ", write violation" : ""), 133*57692c94SEric Anholt ((intsts & V3D_HUB_INT_MMU_PTI) ? 134*57692c94SEric Anholt ", pte invalid" : ""), 135*57692c94SEric Anholt ((intsts & V3D_HUB_INT_MMU_CAP) ? 136*57692c94SEric Anholt ", cap exceeded" : "")); 137*57692c94SEric Anholt status = IRQ_HANDLED; 138*57692c94SEric Anholt } 139*57692c94SEric Anholt 140*57692c94SEric Anholt return status; 141*57692c94SEric Anholt } 142*57692c94SEric Anholt 143*57692c94SEric Anholt void 144*57692c94SEric Anholt v3d_irq_init(struct v3d_dev *v3d) 145*57692c94SEric Anholt { 146*57692c94SEric Anholt int ret, core; 147*57692c94SEric Anholt 148*57692c94SEric Anholt INIT_WORK(&v3d->overflow_mem_work, v3d_overflow_mem_work); 149*57692c94SEric Anholt 150*57692c94SEric Anholt /* Clear any pending interrupts someone might have left around 151*57692c94SEric Anholt * for us. 152*57692c94SEric Anholt */ 153*57692c94SEric Anholt for (core = 0; core < v3d->cores; core++) 154*57692c94SEric Anholt V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS); 155*57692c94SEric Anholt V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS); 156*57692c94SEric Anholt 157*57692c94SEric Anholt ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0), 158*57692c94SEric Anholt v3d_hub_irq, IRQF_SHARED, 159*57692c94SEric Anholt "v3d_hub", v3d); 160*57692c94SEric Anholt ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 1), 161*57692c94SEric Anholt v3d_irq, IRQF_SHARED, 162*57692c94SEric Anholt "v3d_core0", v3d); 163*57692c94SEric Anholt if (ret) 164*57692c94SEric Anholt dev_err(v3d->dev, "IRQ setup failed: %d\n", ret); 165*57692c94SEric Anholt 166*57692c94SEric Anholt v3d_irq_enable(v3d); 167*57692c94SEric Anholt } 168*57692c94SEric Anholt 169*57692c94SEric Anholt void 170*57692c94SEric Anholt v3d_irq_enable(struct v3d_dev *v3d) 171*57692c94SEric Anholt { 172*57692c94SEric Anholt int core; 173*57692c94SEric Anholt 174*57692c94SEric Anholt /* Enable our set of interrupts, masking out any others. */ 175*57692c94SEric Anholt for (core = 0; core < v3d->cores; core++) { 176*57692c94SEric Anholt V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~V3D_CORE_IRQS); 177*57692c94SEric Anholt V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_CLR, V3D_CORE_IRQS); 178*57692c94SEric Anholt } 179*57692c94SEric Anholt 180*57692c94SEric Anholt V3D_WRITE(V3D_HUB_INT_MSK_SET, ~V3D_HUB_IRQS); 181*57692c94SEric Anholt V3D_WRITE(V3D_HUB_INT_MSK_CLR, V3D_HUB_IRQS); 182*57692c94SEric Anholt } 183*57692c94SEric Anholt 184*57692c94SEric Anholt void 185*57692c94SEric Anholt v3d_irq_disable(struct v3d_dev *v3d) 186*57692c94SEric Anholt { 187*57692c94SEric Anholt int core; 188*57692c94SEric Anholt 189*57692c94SEric Anholt /* Disable all interrupts. */ 190*57692c94SEric Anholt for (core = 0; core < v3d->cores; core++) 191*57692c94SEric Anholt V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~0); 192*57692c94SEric Anholt V3D_WRITE(V3D_HUB_INT_MSK_SET, ~0); 193*57692c94SEric Anholt 194*57692c94SEric Anholt /* Clear any pending interrupts we might have left. */ 195*57692c94SEric Anholt for (core = 0; core < v3d->cores; core++) 196*57692c94SEric Anholt V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS); 197*57692c94SEric Anholt V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS); 198*57692c94SEric Anholt 199*57692c94SEric Anholt cancel_work_sync(&v3d->overflow_mem_work); 200*57692c94SEric Anholt } 201*57692c94SEric Anholt 202*57692c94SEric Anholt /** Reinitializes interrupt registers when a GPU reset is performed. */ 203*57692c94SEric Anholt void v3d_irq_reset(struct v3d_dev *v3d) 204*57692c94SEric Anholt { 205*57692c94SEric Anholt v3d_irq_enable(v3d); 206*57692c94SEric Anholt } 207