ivpu_mmu.c (1ac731c529cd4d6adbce134754b51ff7d822b145) ivpu_mmu.c (beaf3ebf2924a111a64b2eec12f50104367ce0a0)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2020-2023 Intel Corporation
4 */
5
6#include <linux/circ_buf.h>
7#include <linux/highmem.h>
8
9#include "ivpu_drv.h"
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2020-2023 Intel Corporation
4 */
5
6#include <linux/circ_buf.h>
7#include <linux/highmem.h>
8
9#include "ivpu_drv.h"
10#include "ivpu_hw_mtl_reg.h"
11#include "ivpu_hw_reg_io.h"
12#include "ivpu_mmu.h"
13#include "ivpu_mmu_context.h"
14#include "ivpu_pm.h"
15
10#include "ivpu_hw_reg_io.h"
11#include "ivpu_mmu.h"
12#include "ivpu_mmu_context.h"
13#include "ivpu_pm.h"
14
15#define IVPU_MMU_REG_IDR0 0x00200000u
16#define IVPU_MMU_REG_IDR1 0x00200004u
17#define IVPU_MMU_REG_IDR3 0x0020000cu
18#define IVPU_MMU_REG_IDR5 0x00200014u
19#define IVPU_MMU_REG_CR0 0x00200020u
20#define IVPU_MMU_REG_CR0ACK 0x00200024u
21#define IVPU_MMU_REG_CR1 0x00200028u
22#define IVPU_MMU_REG_CR2 0x0020002cu
23#define IVPU_MMU_REG_IRQ_CTRL 0x00200050u
24#define IVPU_MMU_REG_IRQ_CTRLACK 0x00200054u
25
26#define IVPU_MMU_REG_GERROR 0x00200060u
27#define IVPU_MMU_REG_GERROR_CMDQ_MASK BIT_MASK(0)
28#define IVPU_MMU_REG_GERROR_EVTQ_ABT_MASK BIT_MASK(2)
29#define IVPU_MMU_REG_GERROR_PRIQ_ABT_MASK BIT_MASK(3)
30#define IVPU_MMU_REG_GERROR_MSI_CMDQ_ABT_MASK BIT_MASK(4)
31#define IVPU_MMU_REG_GERROR_MSI_EVTQ_ABT_MASK BIT_MASK(5)
32#define IVPU_MMU_REG_GERROR_MSI_PRIQ_ABT_MASK BIT_MASK(6)
33#define IVPU_MMU_REG_GERROR_MSI_ABT_MASK BIT_MASK(7)
34
35#define IVPU_MMU_REG_GERRORN 0x00200064u
36
37#define IVPU_MMU_REG_STRTAB_BASE 0x00200080u
38#define IVPU_MMU_REG_STRTAB_BASE_CFG 0x00200088u
39#define IVPU_MMU_REG_CMDQ_BASE 0x00200090u
40#define IVPU_MMU_REG_CMDQ_PROD 0x00200098u
41#define IVPU_MMU_REG_CMDQ_CONS 0x0020009cu
42#define IVPU_MMU_REG_EVTQ_BASE 0x002000a0u
43#define IVPU_MMU_REG_EVTQ_PROD 0x002000a8u
44#define IVPU_MMU_REG_EVTQ_CONS 0x002000acu
45#define IVPU_MMU_REG_EVTQ_PROD_SEC (0x002000a8u + SZ_64K)
46#define IVPU_MMU_REG_EVTQ_CONS_SEC (0x002000acu + SZ_64K)
47#define IVPU_MMU_REG_CMDQ_CONS_ERR_MASK GENMASK(30, 24)
48
16#define IVPU_MMU_IDR0_REF 0x080f3e0f
17#define IVPU_MMU_IDR0_REF_SIMICS 0x080f3e1f
18#define IVPU_MMU_IDR1_REF 0x0e739d18
19#define IVPU_MMU_IDR3_REF 0x0000003c
20#define IVPU_MMU_IDR5_REF 0x00040070
21#define IVPU_MMU_IDR5_REF_SIMICS 0x00000075
22#define IVPU_MMU_IDR5_REF_FPGA 0x00800075
23

--- 114 unchanged lines hidden (view full) ---

138#define IVPU_MMU_CD_0_TCR_TBI0 BIT_ULL(38)
139#define IVPU_MMU_CD_0_AA64 BIT(41)
140#define IVPU_MMU_CD_0_S BIT(44)
141#define IVPU_MMU_CD_0_R BIT(45)
142#define IVPU_MMU_CD_0_A BIT(46)
143#define IVPU_MMU_CD_0_ASET BIT(47)
144#define IVPU_MMU_CD_0_ASID GENMASK_ULL(63, 48)
145
49#define IVPU_MMU_IDR0_REF 0x080f3e0f
50#define IVPU_MMU_IDR0_REF_SIMICS 0x080f3e1f
51#define IVPU_MMU_IDR1_REF 0x0e739d18
52#define IVPU_MMU_IDR3_REF 0x0000003c
53#define IVPU_MMU_IDR5_REF 0x00040070
54#define IVPU_MMU_IDR5_REF_SIMICS 0x00000075
55#define IVPU_MMU_IDR5_REF_FPGA 0x00800075
56

--- 114 unchanged lines hidden (view full) ---

171#define IVPU_MMU_CD_0_TCR_TBI0 BIT_ULL(38)
172#define IVPU_MMU_CD_0_AA64 BIT(41)
173#define IVPU_MMU_CD_0_S BIT(44)
174#define IVPU_MMU_CD_0_R BIT(45)
175#define IVPU_MMU_CD_0_A BIT(46)
176#define IVPU_MMU_CD_0_ASET BIT(47)
177#define IVPU_MMU_CD_0_ASID GENMASK_ULL(63, 48)
178
179#define IVPU_MMU_T0SZ_48BIT 16
180#define IVPU_MMU_T0SZ_38BIT 26
181
182#define IVPU_MMU_IPS_48BIT 5
183#define IVPU_MMU_IPS_44BIT 4
184#define IVPU_MMU_IPS_42BIT 3
185#define IVPU_MMU_IPS_40BIT 2
186#define IVPU_MMU_IPS_36BIT 1
187#define IVPU_MMU_IPS_32BIT 0
188
146#define IVPU_MMU_CD_1_TTB0_MASK GENMASK_ULL(51, 4)
147
148#define IVPU_MMU_STE_0_S1CDMAX GENMASK_ULL(63, 59)
149#define IVPU_MMU_STE_0_S1FMT GENMASK_ULL(5, 4)
150#define IVPU_MMU_STE_0_S1FMT_LINEAR 0
151#define IVPU_MMU_STE_DWORDS 8
152#define IVPU_MMU_STE_0_CFG_S1_TRANS 5
153#define IVPU_MMU_STE_0_CFG GENMASK_ULL(3, 1)

--- 17 unchanged lines hidden (view full) ---

171#define IVPU_MMU_STE_1_S1COR GENMASK_ULL(5, 4)
172#define IVPU_MMU_STE_1_S1CSH GENMASK_ULL(7, 6)
173#define IVPU_MMU_STE_1_S1DSS GENMASK_ULL(1, 0)
174#define IVPU_MMU_STE_1_S1DSS_TERMINATE 0x0
175
176#define IVPU_MMU_REG_TIMEOUT_US (10 * USEC_PER_MSEC)
177#define IVPU_MMU_QUEUE_TIMEOUT_US (100 * USEC_PER_MSEC)
178
189#define IVPU_MMU_CD_1_TTB0_MASK GENMASK_ULL(51, 4)
190
191#define IVPU_MMU_STE_0_S1CDMAX GENMASK_ULL(63, 59)
192#define IVPU_MMU_STE_0_S1FMT GENMASK_ULL(5, 4)
193#define IVPU_MMU_STE_0_S1FMT_LINEAR 0
194#define IVPU_MMU_STE_DWORDS 8
195#define IVPU_MMU_STE_0_CFG_S1_TRANS 5
196#define IVPU_MMU_STE_0_CFG GENMASK_ULL(3, 1)

--- 17 unchanged lines hidden (view full) ---

214#define IVPU_MMU_STE_1_S1COR GENMASK_ULL(5, 4)
215#define IVPU_MMU_STE_1_S1CSH GENMASK_ULL(7, 6)
216#define IVPU_MMU_STE_1_S1DSS GENMASK_ULL(1, 0)
217#define IVPU_MMU_STE_1_S1DSS_TERMINATE 0x0
218
219#define IVPU_MMU_REG_TIMEOUT_US (10 * USEC_PER_MSEC)
220#define IVPU_MMU_QUEUE_TIMEOUT_US (100 * USEC_PER_MSEC)
221
179#define IVPU_MMU_GERROR_ERR_MASK ((REG_FLD(MTL_VPU_HOST_MMU_GERROR, CMDQ)) | \
180 (REG_FLD(MTL_VPU_HOST_MMU_GERROR, EVTQ_ABT)) | \
181 (REG_FLD(MTL_VPU_HOST_MMU_GERROR, PRIQ_ABT)) | \
182 (REG_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_CMDQ_ABT)) | \
183 (REG_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_EVTQ_ABT)) | \
184 (REG_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_PRIQ_ABT)) | \
185 (REG_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_ABT)))
222#define IVPU_MMU_GERROR_ERR_MASK ((REG_FLD(IVPU_MMU_REG_GERROR, CMDQ)) | \
223 (REG_FLD(IVPU_MMU_REG_GERROR, EVTQ_ABT)) | \
224 (REG_FLD(IVPU_MMU_REG_GERROR, PRIQ_ABT)) | \
225 (REG_FLD(IVPU_MMU_REG_GERROR, MSI_CMDQ_ABT)) | \
226 (REG_FLD(IVPU_MMU_REG_GERROR, MSI_EVTQ_ABT)) | \
227 (REG_FLD(IVPU_MMU_REG_GERROR, MSI_PRIQ_ABT)) | \
228 (REG_FLD(IVPU_MMU_REG_GERROR, MSI_ABT)))
186
187static char *ivpu_mmu_event_to_str(u32 cmd)
188{
189 switch (cmd) {
190 case IVPU_MMU_EVT_F_UUT:
191 return "Unsupported Upstream Transaction";
192 case IVPU_MMU_EVT_C_BAD_STREAMID:
193 return "Transaction StreamID out of range";

--- 41 unchanged lines hidden (view full) ---

235 u32 val_ref;
236 u32 val;
237
238 if (ivpu_is_simics(vdev))
239 val_ref = IVPU_MMU_IDR0_REF_SIMICS;
240 else
241 val_ref = IVPU_MMU_IDR0_REF;
242
229
230static char *ivpu_mmu_event_to_str(u32 cmd)
231{
232 switch (cmd) {
233 case IVPU_MMU_EVT_F_UUT:
234 return "Unsupported Upstream Transaction";
235 case IVPU_MMU_EVT_C_BAD_STREAMID:
236 return "Transaction StreamID out of range";

--- 41 unchanged lines hidden (view full) ---

278 u32 val_ref;
279 u32 val;
280
281 if (ivpu_is_simics(vdev))
282 val_ref = IVPU_MMU_IDR0_REF_SIMICS;
283 else
284 val_ref = IVPU_MMU_IDR0_REF;
285
243 val = REGV_RD32(MTL_VPU_HOST_MMU_IDR0);
286 val = REGV_RD32(IVPU_MMU_REG_IDR0);
244 if (val != val_ref)
245 ivpu_dbg(vdev, MMU, "IDR0 0x%x != IDR0_REF 0x%x\n", val, val_ref);
246
287 if (val != val_ref)
288 ivpu_dbg(vdev, MMU, "IDR0 0x%x != IDR0_REF 0x%x\n", val, val_ref);
289
247 val = REGV_RD32(MTL_VPU_HOST_MMU_IDR1);
290 val = REGV_RD32(IVPU_MMU_REG_IDR1);
248 if (val != IVPU_MMU_IDR1_REF)
249 ivpu_dbg(vdev, MMU, "IDR1 0x%x != IDR1_REF 0x%x\n", val, IVPU_MMU_IDR1_REF);
250
291 if (val != IVPU_MMU_IDR1_REF)
292 ivpu_dbg(vdev, MMU, "IDR1 0x%x != IDR1_REF 0x%x\n", val, IVPU_MMU_IDR1_REF);
293
251 val = REGV_RD32(MTL_VPU_HOST_MMU_IDR3);
294 val = REGV_RD32(IVPU_MMU_REG_IDR3);
252 if (val != IVPU_MMU_IDR3_REF)
253 ivpu_dbg(vdev, MMU, "IDR3 0x%x != IDR3_REF 0x%x\n", val, IVPU_MMU_IDR3_REF);
254
255 if (ivpu_is_simics(vdev))
256 val_ref = IVPU_MMU_IDR5_REF_SIMICS;
257 else if (ivpu_is_fpga(vdev))
258 val_ref = IVPU_MMU_IDR5_REF_FPGA;
259 else
260 val_ref = IVPU_MMU_IDR5_REF;
261
295 if (val != IVPU_MMU_IDR3_REF)
296 ivpu_dbg(vdev, MMU, "IDR3 0x%x != IDR3_REF 0x%x\n", val, IVPU_MMU_IDR3_REF);
297
298 if (ivpu_is_simics(vdev))
299 val_ref = IVPU_MMU_IDR5_REF_SIMICS;
300 else if (ivpu_is_fpga(vdev))
301 val_ref = IVPU_MMU_IDR5_REF_FPGA;
302 else
303 val_ref = IVPU_MMU_IDR5_REF;
304
262 val = REGV_RD32(MTL_VPU_HOST_MMU_IDR5);
305 val = REGV_RD32(IVPU_MMU_REG_IDR5);
263 if (val != val_ref)
264 ivpu_dbg(vdev, MMU, "IDR5 0x%x != IDR5_REF 0x%x\n", val, val_ref);
265}
266
267static int ivpu_mmu_cdtab_alloc(struct ivpu_device *vdev)
268{
269 struct ivpu_mmu_info *mmu = vdev->mmu;
270 struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab;

--- 110 unchanged lines hidden (view full) ---

381 return ret;
382}
383
384static int ivpu_mmu_irqs_setup(struct ivpu_device *vdev)
385{
386 u32 irq_ctrl = IVPU_MMU_IRQ_EVTQ_EN | IVPU_MMU_IRQ_GERROR_EN;
387 int ret;
388
306 if (val != val_ref)
307 ivpu_dbg(vdev, MMU, "IDR5 0x%x != IDR5_REF 0x%x\n", val, val_ref);
308}
309
310static int ivpu_mmu_cdtab_alloc(struct ivpu_device *vdev)
311{
312 struct ivpu_mmu_info *mmu = vdev->mmu;
313 struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab;

--- 110 unchanged lines hidden (view full) ---

424 return ret;
425}
426
427static int ivpu_mmu_irqs_setup(struct ivpu_device *vdev)
428{
429 u32 irq_ctrl = IVPU_MMU_IRQ_EVTQ_EN | IVPU_MMU_IRQ_GERROR_EN;
430 int ret;
431
389 ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_IRQ_CTRL, 0);
432 ret = ivpu_mmu_reg_write(vdev, IVPU_MMU_REG_IRQ_CTRL, 0);
390 if (ret)
391 return ret;
392
433 if (ret)
434 return ret;
435
393 return ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_IRQ_CTRL, irq_ctrl);
436 return ivpu_mmu_reg_write(vdev, IVPU_MMU_REG_IRQ_CTRL, irq_ctrl);
394}
395
396static int ivpu_mmu_cmdq_wait_for_cons(struct ivpu_device *vdev)
397{
398 struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq;
399
437}
438
439static int ivpu_mmu_cmdq_wait_for_cons(struct ivpu_device *vdev)
440{
441 struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq;
442
400 return REGV_POLL(MTL_VPU_HOST_MMU_CMDQ_CONS, cmdq->cons, (cmdq->prod == cmdq->cons),
443 return REGV_POLL(IVPU_MMU_REG_CMDQ_CONS, cmdq->cons, (cmdq->prod == cmdq->cons),
401 IVPU_MMU_QUEUE_TIMEOUT_US);
402}
403
404static int ivpu_mmu_cmdq_cmd_write(struct ivpu_device *vdev, const char *name, u64 data0, u64 data1)
405{
406 struct ivpu_mmu_queue *q = &vdev->mmu->cmdq;
407 u64 *queue_buffer = q->base;
408 int idx = IVPU_MMU_Q_IDX(q->prod) * (IVPU_MMU_CMDQ_CMD_SIZE / sizeof(*queue_buffer));

--- 23 unchanged lines hidden (view full) ---

432 FIELD_PREP(IVPU_MMU_CMD_SYNC_0_MSH, 0x3) |
433 FIELD_PREP(IVPU_MMU_CMD_SYNC_0_MSI_ATTR, 0xf);
434
435 ret = ivpu_mmu_cmdq_cmd_write(vdev, "SYNC", val, 0);
436 if (ret)
437 return ret;
438
439 clflush_cache_range(q->base, IVPU_MMU_CMDQ_SIZE);
444 IVPU_MMU_QUEUE_TIMEOUT_US);
445}
446
447static int ivpu_mmu_cmdq_cmd_write(struct ivpu_device *vdev, const char *name, u64 data0, u64 data1)
448{
449 struct ivpu_mmu_queue *q = &vdev->mmu->cmdq;
450 u64 *queue_buffer = q->base;
451 int idx = IVPU_MMU_Q_IDX(q->prod) * (IVPU_MMU_CMDQ_CMD_SIZE / sizeof(*queue_buffer));

--- 23 unchanged lines hidden (view full) ---

475 FIELD_PREP(IVPU_MMU_CMD_SYNC_0_MSH, 0x3) |
476 FIELD_PREP(IVPU_MMU_CMD_SYNC_0_MSI_ATTR, 0xf);
477
478 ret = ivpu_mmu_cmdq_cmd_write(vdev, "SYNC", val, 0);
479 if (ret)
480 return ret;
481
482 clflush_cache_range(q->base, IVPU_MMU_CMDQ_SIZE);
440 REGV_WR32(MTL_VPU_HOST_MMU_CMDQ_PROD, q->prod);
483 REGV_WR32(IVPU_MMU_REG_CMDQ_PROD, q->prod);
441
442 ret = ivpu_mmu_cmdq_wait_for_cons(vdev);
443 if (ret)
444 ivpu_err(vdev, "Timed out waiting for consumer: %d\n", ret);
445
446 return ret;
447}
448

--- 31 unchanged lines hidden (view full) ---

480 mmu->cmdq.prod = 0;
481 mmu->cmdq.cons = 0;
482
483 memset(mmu->evtq.base, 0, IVPU_MMU_EVTQ_SIZE);
484 clflush_cache_range(mmu->evtq.base, IVPU_MMU_EVTQ_SIZE);
485 mmu->evtq.prod = 0;
486 mmu->evtq.cons = 0;
487
484
485 ret = ivpu_mmu_cmdq_wait_for_cons(vdev);
486 if (ret)
487 ivpu_err(vdev, "Timed out waiting for consumer: %d\n", ret);
488
489 return ret;
490}
491

--- 31 unchanged lines hidden (view full) ---

523 mmu->cmdq.prod = 0;
524 mmu->cmdq.cons = 0;
525
526 memset(mmu->evtq.base, 0, IVPU_MMU_EVTQ_SIZE);
527 clflush_cache_range(mmu->evtq.base, IVPU_MMU_EVTQ_SIZE);
528 mmu->evtq.prod = 0;
529 mmu->evtq.cons = 0;
530
488 ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, 0);
531 ret = ivpu_mmu_reg_write(vdev, IVPU_MMU_REG_CR0, 0);
489 if (ret)
490 return ret;
491
492 val = FIELD_PREP(IVPU_MMU_CR1_TABLE_SH, IVPU_MMU_SH_ISH) |
493 FIELD_PREP(IVPU_MMU_CR1_TABLE_OC, IVPU_MMU_CACHE_WB) |
494 FIELD_PREP(IVPU_MMU_CR1_TABLE_IC, IVPU_MMU_CACHE_WB) |
495 FIELD_PREP(IVPU_MMU_CR1_QUEUE_SH, IVPU_MMU_SH_ISH) |
496 FIELD_PREP(IVPU_MMU_CR1_QUEUE_OC, IVPU_MMU_CACHE_WB) |
497 FIELD_PREP(IVPU_MMU_CR1_QUEUE_IC, IVPU_MMU_CACHE_WB);
532 if (ret)
533 return ret;
534
535 val = FIELD_PREP(IVPU_MMU_CR1_TABLE_SH, IVPU_MMU_SH_ISH) |
536 FIELD_PREP(IVPU_MMU_CR1_TABLE_OC, IVPU_MMU_CACHE_WB) |
537 FIELD_PREP(IVPU_MMU_CR1_TABLE_IC, IVPU_MMU_CACHE_WB) |
538 FIELD_PREP(IVPU_MMU_CR1_QUEUE_SH, IVPU_MMU_SH_ISH) |
539 FIELD_PREP(IVPU_MMU_CR1_QUEUE_OC, IVPU_MMU_CACHE_WB) |
540 FIELD_PREP(IVPU_MMU_CR1_QUEUE_IC, IVPU_MMU_CACHE_WB);
498 REGV_WR32(MTL_VPU_HOST_MMU_CR1, val);
541 REGV_WR32(IVPU_MMU_REG_CR1, val);
499
542
500 REGV_WR64(MTL_VPU_HOST_MMU_STRTAB_BASE, mmu->strtab.dma_q);
501 REGV_WR32(MTL_VPU_HOST_MMU_STRTAB_BASE_CFG, mmu->strtab.base_cfg);
543 REGV_WR64(IVPU_MMU_REG_STRTAB_BASE, mmu->strtab.dma_q);
544 REGV_WR32(IVPU_MMU_REG_STRTAB_BASE_CFG, mmu->strtab.base_cfg);
502
545
503 REGV_WR64(MTL_VPU_HOST_MMU_CMDQ_BASE, mmu->cmdq.dma_q);
504 REGV_WR32(MTL_VPU_HOST_MMU_CMDQ_PROD, 0);
505 REGV_WR32(MTL_VPU_HOST_MMU_CMDQ_CONS, 0);
546 REGV_WR64(IVPU_MMU_REG_CMDQ_BASE, mmu->cmdq.dma_q);
547 REGV_WR32(IVPU_MMU_REG_CMDQ_PROD, 0);
548 REGV_WR32(IVPU_MMU_REG_CMDQ_CONS, 0);
506
507 val = IVPU_MMU_CR0_CMDQEN;
549
550 val = IVPU_MMU_CR0_CMDQEN;
508 ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, val);
551 ret = ivpu_mmu_reg_write(vdev, IVPU_MMU_REG_CR0, val);
509 if (ret)
510 return ret;
511
512 ret = ivpu_mmu_cmdq_write_cfgi_all(vdev);
513 if (ret)
514 return ret;
515
516 ret = ivpu_mmu_cmdq_write_tlbi_nsnh_all(vdev);
517 if (ret)
518 return ret;
519
520 ret = ivpu_mmu_cmdq_sync(vdev);
521 if (ret)
522 return ret;
523
552 if (ret)
553 return ret;
554
555 ret = ivpu_mmu_cmdq_write_cfgi_all(vdev);
556 if (ret)
557 return ret;
558
559 ret = ivpu_mmu_cmdq_write_tlbi_nsnh_all(vdev);
560 if (ret)
561 return ret;
562
563 ret = ivpu_mmu_cmdq_sync(vdev);
564 if (ret)
565 return ret;
566
524 REGV_WR64(MTL_VPU_HOST_MMU_EVTQ_BASE, mmu->evtq.dma_q);
525 REGV_WR32(MTL_VPU_HOST_MMU_EVTQ_PROD_SEC, 0);
526 REGV_WR32(MTL_VPU_HOST_MMU_EVTQ_CONS_SEC, 0);
567 REGV_WR64(IVPU_MMU_REG_EVTQ_BASE, mmu->evtq.dma_q);
568 REGV_WR32(IVPU_MMU_REG_EVTQ_PROD_SEC, 0);
569 REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, 0);
527
528 val |= IVPU_MMU_CR0_EVTQEN;
570
571 val |= IVPU_MMU_CR0_EVTQEN;
529 ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, val);
572 ret = ivpu_mmu_reg_write(vdev, IVPU_MMU_REG_CR0, val);
530 if (ret)
531 return ret;
532
533 val |= IVPU_MMU_CR0_ATSCHK;
573 if (ret)
574 return ret;
575
576 val |= IVPU_MMU_CR0_ATSCHK;
534 ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, val);
577 ret = ivpu_mmu_reg_write(vdev, IVPU_MMU_REG_CR0, val);
535 if (ret)
536 return ret;
537
538 ret = ivpu_mmu_irqs_setup(vdev);
539 if (ret)
540 return ret;
541
542 val |= IVPU_MMU_CR0_SMMUEN;
578 if (ret)
579 return ret;
580
581 ret = ivpu_mmu_irqs_setup(vdev);
582 if (ret)
583 return ret;
584
585 val |= IVPU_MMU_CR0_SMMUEN;
543 return ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, val);
586 return ivpu_mmu_reg_write(vdev, IVPU_MMU_REG_CR0, val);
544}
545
546static void ivpu_mmu_strtab_link_cd(struct ivpu_device *vdev, u32 sid)
547{
548 struct ivpu_mmu_info *mmu = vdev->mmu;
549 struct ivpu_mmu_strtab *strtab = &mmu->strtab;
550 struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab;
551 u64 *entry = strtab->base + (sid * IVPU_MMU_STRTAB_ENT_SIZE);

--- 60 unchanged lines hidden (view full) ---

612 int ret = 0;
613
614 if (ssid > IVPU_MMU_CDTAB_ENT_COUNT)
615 return -EINVAL;
616
617 entry = cdtab->base + (ssid * IVPU_MMU_CDTAB_ENT_SIZE);
618
619 if (cd_dma != 0) {
587}
588
589static void ivpu_mmu_strtab_link_cd(struct ivpu_device *vdev, u32 sid)
590{
591 struct ivpu_mmu_info *mmu = vdev->mmu;
592 struct ivpu_mmu_strtab *strtab = &mmu->strtab;
593 struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab;
594 u64 *entry = strtab->base + (sid * IVPU_MMU_STRTAB_ENT_SIZE);

--- 60 unchanged lines hidden (view full) ---

655 int ret = 0;
656
657 if (ssid > IVPU_MMU_CDTAB_ENT_COUNT)
658 return -EINVAL;
659
660 entry = cdtab->base + (ssid * IVPU_MMU_CDTAB_ENT_SIZE);
661
662 if (cd_dma != 0) {
620 cd[0] = FIELD_PREP(IVPU_MMU_CD_0_TCR_T0SZ, 26) |
663 cd[0] = FIELD_PREP(IVPU_MMU_CD_0_TCR_T0SZ, IVPU_MMU_T0SZ_48BIT) |
621 FIELD_PREP(IVPU_MMU_CD_0_TCR_TG0, 0) |
622 FIELD_PREP(IVPU_MMU_CD_0_TCR_IRGN0, 0) |
623 FIELD_PREP(IVPU_MMU_CD_0_TCR_ORGN0, 0) |
624 FIELD_PREP(IVPU_MMU_CD_0_TCR_SH0, 0) |
664 FIELD_PREP(IVPU_MMU_CD_0_TCR_TG0, 0) |
665 FIELD_PREP(IVPU_MMU_CD_0_TCR_IRGN0, 0) |
666 FIELD_PREP(IVPU_MMU_CD_0_TCR_ORGN0, 0) |
667 FIELD_PREP(IVPU_MMU_CD_0_TCR_SH0, 0) |
625 FIELD_PREP(IVPU_MMU_CD_0_TCR_IPS, 3) |
668 FIELD_PREP(IVPU_MMU_CD_0_TCR_IPS, IVPU_MMU_IPS_48BIT) |
626 FIELD_PREP(IVPU_MMU_CD_0_ASID, ssid) |
627 IVPU_MMU_CD_0_TCR_EPD1 |
628 IVPU_MMU_CD_0_AA64 |
629 IVPU_MMU_CD_0_R |
630 IVPU_MMU_CD_0_ASET |
631 IVPU_MMU_CD_0_V;
632 cd[1] = cd_dma & IVPU_MMU_CD_1_TTB0_MASK;
633 cd[2] = 0;

--- 152 unchanged lines hidden (view full) ---

786}
787
788static u32 *ivpu_mmu_get_event(struct ivpu_device *vdev)
789{
790 struct ivpu_mmu_queue *evtq = &vdev->mmu->evtq;
791 u32 idx = IVPU_MMU_Q_IDX(evtq->cons);
792 u32 *evt = evtq->base + (idx * IVPU_MMU_EVTQ_CMD_SIZE);
793
669 FIELD_PREP(IVPU_MMU_CD_0_ASID, ssid) |
670 IVPU_MMU_CD_0_TCR_EPD1 |
671 IVPU_MMU_CD_0_AA64 |
672 IVPU_MMU_CD_0_R |
673 IVPU_MMU_CD_0_ASET |
674 IVPU_MMU_CD_0_V;
675 cd[1] = cd_dma & IVPU_MMU_CD_1_TTB0_MASK;
676 cd[2] = 0;

--- 152 unchanged lines hidden (view full) ---

829}
830
831static u32 *ivpu_mmu_get_event(struct ivpu_device *vdev)
832{
833 struct ivpu_mmu_queue *evtq = &vdev->mmu->evtq;
834 u32 idx = IVPU_MMU_Q_IDX(evtq->cons);
835 u32 *evt = evtq->base + (idx * IVPU_MMU_EVTQ_CMD_SIZE);
836
794 evtq->prod = REGV_RD32(MTL_VPU_HOST_MMU_EVTQ_PROD_SEC);
837 evtq->prod = REGV_RD32(IVPU_MMU_REG_EVTQ_PROD_SEC);
795 if (!CIRC_CNT(IVPU_MMU_Q_IDX(evtq->prod), IVPU_MMU_Q_IDX(evtq->cons), IVPU_MMU_Q_COUNT))
796 return NULL;
797
798 clflush_cache_range(evt, IVPU_MMU_EVTQ_CMD_SIZE);
799
800 evtq->cons = (evtq->cons + 1) & IVPU_MMU_Q_WRAP_MASK;
838 if (!CIRC_CNT(IVPU_MMU_Q_IDX(evtq->prod), IVPU_MMU_Q_IDX(evtq->cons), IVPU_MMU_Q_COUNT))
839 return NULL;
840
841 clflush_cache_range(evt, IVPU_MMU_EVTQ_CMD_SIZE);
842
843 evtq->cons = (evtq->cons + 1) & IVPU_MMU_Q_WRAP_MASK;
801 REGV_WR32(MTL_VPU_HOST_MMU_EVTQ_CONS_SEC, evtq->cons);
844 REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, evtq->cons);
802
803 return evt;
804}
805
806void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev)
807{
808 bool schedule_recovery = false;
809 u32 *event;

--- 16 unchanged lines hidden (view full) ---

826}
827
828void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev)
829{
830 u32 gerror_val, gerrorn_val, active;
831
832 ivpu_dbg(vdev, IRQ, "MMU error\n");
833
845
846 return evt;
847}
848
849void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev)
850{
851 bool schedule_recovery = false;
852 u32 *event;

--- 16 unchanged lines hidden (view full) ---

869}
870
871void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev)
872{
873 u32 gerror_val, gerrorn_val, active;
874
875 ivpu_dbg(vdev, IRQ, "MMU error\n");
876
834 gerror_val = REGV_RD32(MTL_VPU_HOST_MMU_GERROR);
835 gerrorn_val = REGV_RD32(MTL_VPU_HOST_MMU_GERRORN);
877 gerror_val = REGV_RD32(IVPU_MMU_REG_GERROR);
878 gerrorn_val = REGV_RD32(IVPU_MMU_REG_GERRORN);
836
837 active = gerror_val ^ gerrorn_val;
838 if (!(active & IVPU_MMU_GERROR_ERR_MASK))
839 return;
840
879
880 active = gerror_val ^ gerrorn_val;
881 if (!(active & IVPU_MMU_GERROR_ERR_MASK))
882 return;
883
841 if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_ABT, active))
884 if (REG_TEST_FLD(IVPU_MMU_REG_GERROR, MSI_ABT, active))
842 ivpu_warn_ratelimited(vdev, "MMU MSI ABT write aborted\n");
843
885 ivpu_warn_ratelimited(vdev, "MMU MSI ABT write aborted\n");
886
844 if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_PRIQ_ABT, active))
887 if (REG_TEST_FLD(IVPU_MMU_REG_GERROR, MSI_PRIQ_ABT, active))
845 ivpu_warn_ratelimited(vdev, "MMU PRIQ MSI ABT write aborted\n");
846
888 ivpu_warn_ratelimited(vdev, "MMU PRIQ MSI ABT write aborted\n");
889
847 if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_EVTQ_ABT, active))
890 if (REG_TEST_FLD(IVPU_MMU_REG_GERROR, MSI_EVTQ_ABT, active))
848 ivpu_warn_ratelimited(vdev, "MMU EVTQ MSI ABT write aborted\n");
849
891 ivpu_warn_ratelimited(vdev, "MMU EVTQ MSI ABT write aborted\n");
892
850 if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_CMDQ_ABT, active))
893 if (REG_TEST_FLD(IVPU_MMU_REG_GERROR, MSI_CMDQ_ABT, active))
851 ivpu_warn_ratelimited(vdev, "MMU CMDQ MSI ABT write aborted\n");
852
894 ivpu_warn_ratelimited(vdev, "MMU CMDQ MSI ABT write aborted\n");
895
853 if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, PRIQ_ABT, active))
896 if (REG_TEST_FLD(IVPU_MMU_REG_GERROR, PRIQ_ABT, active))
854 ivpu_err_ratelimited(vdev, "MMU PRIQ write aborted\n");
855
897 ivpu_err_ratelimited(vdev, "MMU PRIQ write aborted\n");
898
856 if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, EVTQ_ABT, active))
899 if (REG_TEST_FLD(IVPU_MMU_REG_GERROR, EVTQ_ABT, active))
857 ivpu_err_ratelimited(vdev, "MMU EVTQ write aborted\n");
858
900 ivpu_err_ratelimited(vdev, "MMU EVTQ write aborted\n");
901
859 if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, CMDQ, active))
902 if (REG_TEST_FLD(IVPU_MMU_REG_GERROR, CMDQ, active))
860 ivpu_err_ratelimited(vdev, "MMU CMDQ write aborted\n");
861
903 ivpu_err_ratelimited(vdev, "MMU CMDQ write aborted\n");
904
862 REGV_WR32(MTL_VPU_HOST_MMU_GERRORN, gerror_val);
905 REGV_WR32(IVPU_MMU_REG_GERRORN, gerror_val);
863}
864
865int ivpu_mmu_set_pgtable(struct ivpu_device *vdev, int ssid, struct ivpu_mmu_pgtable *pgtable)
866{
867 return ivpu_mmu_cd_add_user(vdev, ssid, pgtable->pgd_dma);
868}
869
870void ivpu_mmu_clear_pgtable(struct ivpu_device *vdev, int ssid)
871{
872 ivpu_mmu_cd_add_user(vdev, ssid, 0); /* 0 will clear CD entry */
873}
906}
907
908int ivpu_mmu_set_pgtable(struct ivpu_device *vdev, int ssid, struct ivpu_mmu_pgtable *pgtable)
909{
910 return ivpu_mmu_cd_add_user(vdev, ssid, pgtable->pgd_dma);
911}
912
913void ivpu_mmu_clear_pgtable(struct ivpu_device *vdev, int ssid)
914{
915 ivpu_mmu_cd_add_user(vdev, ssid, 0); /* 0 will clear CD entry */
916}