ivpu_mmu.c (a940daa52167e9db8ecce82213813b735a9d9f23) ivpu_mmu.c (74ce0f3873821f12391bcf5469d81583d34f4c6c)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2020-2023 Intel Corporation
4 */
5
6#include <linux/circ_buf.h>
7#include <linux/highmem.h>
8
9#include "ivpu_drv.h"
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2020-2023 Intel Corporation
4 */
5
6#include <linux/circ_buf.h>
7#include <linux/highmem.h>
8
9#include "ivpu_drv.h"
10#include "ivpu_hw_37xx_reg.h"
11#include "ivpu_hw_reg_io.h"
12#include "ivpu_mmu.h"
13#include "ivpu_mmu_context.h"
14#include "ivpu_pm.h"
15
10#include "ivpu_hw_reg_io.h"
11#include "ivpu_mmu.h"
12#include "ivpu_mmu_context.h"
13#include "ivpu_pm.h"
14
15#define IVPU_MMU_REG_IDR0 0x00200000u
16#define IVPU_MMU_REG_IDR1 0x00200004u
17#define IVPU_MMU_REG_IDR3 0x0020000cu
18#define IVPU_MMU_REG_IDR5 0x00200014u
19#define IVPU_MMU_REG_CR0 0x00200020u
20#define IVPU_MMU_REG_CR0ACK 0x00200024u
21#define IVPU_MMU_REG_CR0ACK_VAL_MASK GENMASK(31, 0)
22#define IVPU_MMU_REG_CR1 0x00200028u
23#define IVPU_MMU_REG_CR2 0x0020002cu
24#define IVPU_MMU_REG_IRQ_CTRL 0x00200050u
25#define IVPU_MMU_REG_IRQ_CTRLACK 0x00200054u
26#define IVPU_MMU_REG_IRQ_CTRLACK_VAL_MASK GENMASK(31, 0)
27
28#define IVPU_MMU_REG_GERROR 0x00200060u
29#define IVPU_MMU_REG_GERROR_CMDQ_MASK BIT_MASK(0)
30#define IVPU_MMU_REG_GERROR_EVTQ_ABT_MASK BIT_MASK(2)
31#define IVPU_MMU_REG_GERROR_PRIQ_ABT_MASK BIT_MASK(3)
32#define IVPU_MMU_REG_GERROR_MSI_CMDQ_ABT_MASK BIT_MASK(4)
33#define IVPU_MMU_REG_GERROR_MSI_EVTQ_ABT_MASK BIT_MASK(5)
34#define IVPU_MMU_REG_GERROR_MSI_PRIQ_ABT_MASK BIT_MASK(6)
35#define IVPU_MMU_REG_GERROR_MSI_ABT_MASK BIT_MASK(7)
36
37#define IVPU_MMU_REG_GERRORN 0x00200064u
38
39#define IVPU_MMU_REG_STRTAB_BASE 0x00200080u
40#define IVPU_MMU_REG_STRTAB_BASE_CFG 0x00200088u
41#define IVPU_MMU_REG_CMDQ_BASE 0x00200090u
42#define IVPU_MMU_REG_CMDQ_PROD 0x00200098u
43#define IVPU_MMU_REG_CMDQ_CONS 0x0020009cu
44#define IVPU_MMU_REG_CMDQ_CONS_VAL_MASK GENMASK(23, 0)
45#define IVPU_MMU_REG_CMDQ_CONS_ERR_MASK GENMASK(30, 24)
46#define IVPU_MMU_REG_EVTQ_BASE 0x002000a0u
47#define IVPU_MMU_REG_EVTQ_PROD 0x002000a8u
48#define IVPU_MMU_REG_EVTQ_CONS 0x002000acu
49#define IVPU_MMU_REG_EVTQ_PROD_SEC (0x002000a8u + SZ_64K)
50#define IVPU_MMU_REG_EVTQ_CONS_SEC (0x002000acu + SZ_64K)
51
16#define IVPU_MMU_IDR0_REF 0x080f3e0f
17#define IVPU_MMU_IDR0_REF_SIMICS 0x080f3e1f
18#define IVPU_MMU_IDR1_REF 0x0e739d18
19#define IVPU_MMU_IDR3_REF 0x0000003c
20#define IVPU_MMU_IDR5_REF 0x00040070
21#define IVPU_MMU_IDR5_REF_SIMICS 0x00000075
22#define IVPU_MMU_IDR5_REF_FPGA 0x00800075
23

--- 157 unchanged lines hidden (view full) ---

181#define IVPU_MMU_STE_1_S1COR GENMASK_ULL(5, 4)
182#define IVPU_MMU_STE_1_S1CSH GENMASK_ULL(7, 6)
183#define IVPU_MMU_STE_1_S1DSS GENMASK_ULL(1, 0)
184#define IVPU_MMU_STE_1_S1DSS_TERMINATE 0x0
185
186#define IVPU_MMU_REG_TIMEOUT_US (10 * USEC_PER_MSEC)
187#define IVPU_MMU_QUEUE_TIMEOUT_US (100 * USEC_PER_MSEC)
188
52#define IVPU_MMU_IDR0_REF 0x080f3e0f
53#define IVPU_MMU_IDR0_REF_SIMICS 0x080f3e1f
54#define IVPU_MMU_IDR1_REF 0x0e739d18
55#define IVPU_MMU_IDR3_REF 0x0000003c
56#define IVPU_MMU_IDR5_REF 0x00040070
57#define IVPU_MMU_IDR5_REF_SIMICS 0x00000075
58#define IVPU_MMU_IDR5_REF_FPGA 0x00800075
59

--- 157 unchanged lines hidden (view full) ---

217#define IVPU_MMU_STE_1_S1COR GENMASK_ULL(5, 4)
218#define IVPU_MMU_STE_1_S1CSH GENMASK_ULL(7, 6)
219#define IVPU_MMU_STE_1_S1DSS GENMASK_ULL(1, 0)
220#define IVPU_MMU_STE_1_S1DSS_TERMINATE 0x0
221
222#define IVPU_MMU_REG_TIMEOUT_US (10 * USEC_PER_MSEC)
223#define IVPU_MMU_QUEUE_TIMEOUT_US (100 * USEC_PER_MSEC)
224
189#define IVPU_MMU_GERROR_ERR_MASK ((REG_FLD(VPU_37XX_HOST_MMU_GERROR, CMDQ)) | \
190 (REG_FLD(VPU_37XX_HOST_MMU_GERROR, EVTQ_ABT)) | \
191 (REG_FLD(VPU_37XX_HOST_MMU_GERROR, PRIQ_ABT)) | \
192 (REG_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_CMDQ_ABT)) | \
193 (REG_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_EVTQ_ABT)) | \
194 (REG_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_PRIQ_ABT)) | \
195 (REG_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_ABT)))
225#define IVPU_MMU_GERROR_ERR_MASK ((REG_FLD(IVPU_MMU_REG_GERROR, CMDQ)) | \
226 (REG_FLD(IVPU_MMU_REG_GERROR, EVTQ_ABT)) | \
227 (REG_FLD(IVPU_MMU_REG_GERROR, PRIQ_ABT)) | \
228 (REG_FLD(IVPU_MMU_REG_GERROR, MSI_CMDQ_ABT)) | \
229 (REG_FLD(IVPU_MMU_REG_GERROR, MSI_EVTQ_ABT)) | \
230 (REG_FLD(IVPU_MMU_REG_GERROR, MSI_PRIQ_ABT)) | \
231 (REG_FLD(IVPU_MMU_REG_GERROR, MSI_ABT)))
196
197static char *ivpu_mmu_event_to_str(u32 cmd)
198{
199 switch (cmd) {
200 case IVPU_MMU_EVT_F_UUT:
201 return "Unsupported Upstream Transaction";
202 case IVPU_MMU_EVT_C_BAD_STREAMID:
203 return "Transaction StreamID out of range";

--- 41 unchanged lines hidden (view full) ---

245 u32 val_ref;
246 u32 val;
247
248 if (ivpu_is_simics(vdev))
249 val_ref = IVPU_MMU_IDR0_REF_SIMICS;
250 else
251 val_ref = IVPU_MMU_IDR0_REF;
252
232
233static char *ivpu_mmu_event_to_str(u32 cmd)
234{
235 switch (cmd) {
236 case IVPU_MMU_EVT_F_UUT:
237 return "Unsupported Upstream Transaction";
238 case IVPU_MMU_EVT_C_BAD_STREAMID:
239 return "Transaction StreamID out of range";

--- 41 unchanged lines hidden (view full) ---

281 u32 val_ref;
282 u32 val;
283
284 if (ivpu_is_simics(vdev))
285 val_ref = IVPU_MMU_IDR0_REF_SIMICS;
286 else
287 val_ref = IVPU_MMU_IDR0_REF;
288
253 val = REGV_RD32(VPU_37XX_HOST_MMU_IDR0);
289 val = REGV_RD32(IVPU_MMU_REG_IDR0);
254 if (val != val_ref)
255 ivpu_dbg(vdev, MMU, "IDR0 0x%x != IDR0_REF 0x%x\n", val, val_ref);
256
290 if (val != val_ref)
291 ivpu_dbg(vdev, MMU, "IDR0 0x%x != IDR0_REF 0x%x\n", val, val_ref);
292
257 val = REGV_RD32(VPU_37XX_HOST_MMU_IDR1);
293 val = REGV_RD32(IVPU_MMU_REG_IDR1);
258 if (val != IVPU_MMU_IDR1_REF)
259 ivpu_dbg(vdev, MMU, "IDR1 0x%x != IDR1_REF 0x%x\n", val, IVPU_MMU_IDR1_REF);
260
294 if (val != IVPU_MMU_IDR1_REF)
295 ivpu_dbg(vdev, MMU, "IDR1 0x%x != IDR1_REF 0x%x\n", val, IVPU_MMU_IDR1_REF);
296
261 val = REGV_RD32(VPU_37XX_HOST_MMU_IDR3);
297 val = REGV_RD32(IVPU_MMU_REG_IDR3);
262 if (val != IVPU_MMU_IDR3_REF)
263 ivpu_dbg(vdev, MMU, "IDR3 0x%x != IDR3_REF 0x%x\n", val, IVPU_MMU_IDR3_REF);
264
265 if (ivpu_is_simics(vdev))
266 val_ref = IVPU_MMU_IDR5_REF_SIMICS;
267 else if (ivpu_is_fpga(vdev))
268 val_ref = IVPU_MMU_IDR5_REF_FPGA;
269 else
270 val_ref = IVPU_MMU_IDR5_REF;
271
298 if (val != IVPU_MMU_IDR3_REF)
299 ivpu_dbg(vdev, MMU, "IDR3 0x%x != IDR3_REF 0x%x\n", val, IVPU_MMU_IDR3_REF);
300
301 if (ivpu_is_simics(vdev))
302 val_ref = IVPU_MMU_IDR5_REF_SIMICS;
303 else if (ivpu_is_fpga(vdev))
304 val_ref = IVPU_MMU_IDR5_REF_FPGA;
305 else
306 val_ref = IVPU_MMU_IDR5_REF;
307
272 val = REGV_RD32(VPU_37XX_HOST_MMU_IDR5);
308 val = REGV_RD32(IVPU_MMU_REG_IDR5);
273 if (val != val_ref)
274 ivpu_dbg(vdev, MMU, "IDR5 0x%x != IDR5_REF 0x%x\n", val, val_ref);
275}
276
277static int ivpu_mmu_cdtab_alloc(struct ivpu_device *vdev)
278{
279 struct ivpu_mmu_info *mmu = vdev->mmu;
280 struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab;

--- 90 unchanged lines hidden (view full) ---

371
372 ret = ivpu_mmu_evtq_alloc(vdev);
373 if (ret)
374 ivpu_err(vdev, "Failed to allocate evtq: %d\n", ret);
375
376 return ret;
377}
378
309 if (val != val_ref)
310 ivpu_dbg(vdev, MMU, "IDR5 0x%x != IDR5_REF 0x%x\n", val, val_ref);
311}
312
313static int ivpu_mmu_cdtab_alloc(struct ivpu_device *vdev)
314{
315 struct ivpu_mmu_info *mmu = vdev->mmu;
316 struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab;

--- 90 unchanged lines hidden (view full) ---

407
408 ret = ivpu_mmu_evtq_alloc(vdev);
409 if (ret)
410 ivpu_err(vdev, "Failed to allocate evtq: %d\n", ret);
411
412 return ret;
413}
414
379static int ivpu_mmu_reg_write(struct ivpu_device *vdev, u32 reg, u32 val)
415static int ivpu_mmu_reg_write_cr0(struct ivpu_device *vdev, u32 val)
380{
416{
381 u32 reg_ack = reg + 4; /* ACK register is 4B after base register */
382 u32 val_ack;
383 int ret;
417 REGV_WR32(IVPU_MMU_REG_CR0, val);
384
418
385 REGV_WR32(reg, val);
419 return REGV_POLL_FLD(IVPU_MMU_REG_CR0ACK, VAL, val, IVPU_MMU_REG_TIMEOUT_US);
420}
386
421
387 ret = REGV_POLL(reg_ack, val_ack, (val == val_ack), IVPU_MMU_REG_TIMEOUT_US);
388 if (ret)
389 ivpu_err(vdev, "Failed to write register 0x%x\n", reg);
422static int ivpu_mmu_reg_write_irq_ctrl(struct ivpu_device *vdev, u32 val)
423{
424 REGV_WR32(IVPU_MMU_REG_IRQ_CTRL, val);
390
425
391 return ret;
426 return REGV_POLL_FLD(IVPU_MMU_REG_IRQ_CTRLACK, VAL, val, IVPU_MMU_REG_TIMEOUT_US);
392}
393
394static int ivpu_mmu_irqs_setup(struct ivpu_device *vdev)
395{
396 u32 irq_ctrl = IVPU_MMU_IRQ_EVTQ_EN | IVPU_MMU_IRQ_GERROR_EN;
397 int ret;
398
427}
428
429static int ivpu_mmu_irqs_setup(struct ivpu_device *vdev)
430{
431 u32 irq_ctrl = IVPU_MMU_IRQ_EVTQ_EN | IVPU_MMU_IRQ_GERROR_EN;
432 int ret;
433
399 ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_IRQ_CTRL, 0);
434 ret = ivpu_mmu_reg_write_irq_ctrl(vdev, 0);
400 if (ret)
401 return ret;
402
435 if (ret)
436 return ret;
437
403 return ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_IRQ_CTRL, irq_ctrl);
438 return ivpu_mmu_reg_write_irq_ctrl(vdev, irq_ctrl);
404}
405
406static int ivpu_mmu_cmdq_wait_for_cons(struct ivpu_device *vdev)
407{
408 struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq;
439}
440
441static int ivpu_mmu_cmdq_wait_for_cons(struct ivpu_device *vdev)
442{
443 struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq;
444 int ret;
409
445
410 return REGV_POLL(VPU_37XX_HOST_MMU_CMDQ_CONS, cmdq->cons, (cmdq->prod == cmdq->cons),
411 IVPU_MMU_QUEUE_TIMEOUT_US);
446 ret = REGV_POLL_FLD(IVPU_MMU_REG_CMDQ_CONS, VAL, cmdq->prod,
447 IVPU_MMU_QUEUE_TIMEOUT_US);
448 if (ret)
449 return ret;
450
451 cmdq->cons = cmdq->prod;
452
453 return 0;
412}
413
414static int ivpu_mmu_cmdq_cmd_write(struct ivpu_device *vdev, const char *name, u64 data0, u64 data1)
415{
416 struct ivpu_mmu_queue *q = &vdev->mmu->cmdq;
417 u64 *queue_buffer = q->base;
418 int idx = IVPU_MMU_Q_IDX(q->prod) * (IVPU_MMU_CMDQ_CMD_SIZE / sizeof(*queue_buffer));
419

--- 22 unchanged lines hidden (view full) ---

442 FIELD_PREP(IVPU_MMU_CMD_SYNC_0_MSH, 0x3) |
443 FIELD_PREP(IVPU_MMU_CMD_SYNC_0_MSI_ATTR, 0xf);
444
445 ret = ivpu_mmu_cmdq_cmd_write(vdev, "SYNC", val, 0);
446 if (ret)
447 return ret;
448
449 clflush_cache_range(q->base, IVPU_MMU_CMDQ_SIZE);
454}
455
456static int ivpu_mmu_cmdq_cmd_write(struct ivpu_device *vdev, const char *name, u64 data0, u64 data1)
457{
458 struct ivpu_mmu_queue *q = &vdev->mmu->cmdq;
459 u64 *queue_buffer = q->base;
460 int idx = IVPU_MMU_Q_IDX(q->prod) * (IVPU_MMU_CMDQ_CMD_SIZE / sizeof(*queue_buffer));
461

--- 22 unchanged lines hidden (view full) ---

484 FIELD_PREP(IVPU_MMU_CMD_SYNC_0_MSH, 0x3) |
485 FIELD_PREP(IVPU_MMU_CMD_SYNC_0_MSI_ATTR, 0xf);
486
487 ret = ivpu_mmu_cmdq_cmd_write(vdev, "SYNC", val, 0);
488 if (ret)
489 return ret;
490
491 clflush_cache_range(q->base, IVPU_MMU_CMDQ_SIZE);
450 REGV_WR32(VPU_37XX_HOST_MMU_CMDQ_PROD, q->prod);
492 REGV_WR32(IVPU_MMU_REG_CMDQ_PROD, q->prod);
451
452 ret = ivpu_mmu_cmdq_wait_for_cons(vdev);
453 if (ret)
454 ivpu_err(vdev, "Timed out waiting for consumer: %d\n", ret);
455
456 return ret;
457}
458

--- 31 unchanged lines hidden (view full) ---

490 mmu->cmdq.prod = 0;
491 mmu->cmdq.cons = 0;
492
493 memset(mmu->evtq.base, 0, IVPU_MMU_EVTQ_SIZE);
494 clflush_cache_range(mmu->evtq.base, IVPU_MMU_EVTQ_SIZE);
495 mmu->evtq.prod = 0;
496 mmu->evtq.cons = 0;
497
493
494 ret = ivpu_mmu_cmdq_wait_for_cons(vdev);
495 if (ret)
496 ivpu_err(vdev, "Timed out waiting for consumer: %d\n", ret);
497
498 return ret;
499}
500

--- 31 unchanged lines hidden (view full) ---

532 mmu->cmdq.prod = 0;
533 mmu->cmdq.cons = 0;
534
535 memset(mmu->evtq.base, 0, IVPU_MMU_EVTQ_SIZE);
536 clflush_cache_range(mmu->evtq.base, IVPU_MMU_EVTQ_SIZE);
537 mmu->evtq.prod = 0;
538 mmu->evtq.cons = 0;
539
498 ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, 0);
540 ret = ivpu_mmu_reg_write_cr0(vdev, 0);
499 if (ret)
500 return ret;
501
502 val = FIELD_PREP(IVPU_MMU_CR1_TABLE_SH, IVPU_MMU_SH_ISH) |
503 FIELD_PREP(IVPU_MMU_CR1_TABLE_OC, IVPU_MMU_CACHE_WB) |
504 FIELD_PREP(IVPU_MMU_CR1_TABLE_IC, IVPU_MMU_CACHE_WB) |
505 FIELD_PREP(IVPU_MMU_CR1_QUEUE_SH, IVPU_MMU_SH_ISH) |
506 FIELD_PREP(IVPU_MMU_CR1_QUEUE_OC, IVPU_MMU_CACHE_WB) |
507 FIELD_PREP(IVPU_MMU_CR1_QUEUE_IC, IVPU_MMU_CACHE_WB);
541 if (ret)
542 return ret;
543
544 val = FIELD_PREP(IVPU_MMU_CR1_TABLE_SH, IVPU_MMU_SH_ISH) |
545 FIELD_PREP(IVPU_MMU_CR1_TABLE_OC, IVPU_MMU_CACHE_WB) |
546 FIELD_PREP(IVPU_MMU_CR1_TABLE_IC, IVPU_MMU_CACHE_WB) |
547 FIELD_PREP(IVPU_MMU_CR1_QUEUE_SH, IVPU_MMU_SH_ISH) |
548 FIELD_PREP(IVPU_MMU_CR1_QUEUE_OC, IVPU_MMU_CACHE_WB) |
549 FIELD_PREP(IVPU_MMU_CR1_QUEUE_IC, IVPU_MMU_CACHE_WB);
508 REGV_WR32(VPU_37XX_HOST_MMU_CR1, val);
550 REGV_WR32(IVPU_MMU_REG_CR1, val);
509
551
510 REGV_WR64(VPU_37XX_HOST_MMU_STRTAB_BASE, mmu->strtab.dma_q);
511 REGV_WR32(VPU_37XX_HOST_MMU_STRTAB_BASE_CFG, mmu->strtab.base_cfg);
552 REGV_WR64(IVPU_MMU_REG_STRTAB_BASE, mmu->strtab.dma_q);
553 REGV_WR32(IVPU_MMU_REG_STRTAB_BASE_CFG, mmu->strtab.base_cfg);
512
554
513 REGV_WR64(VPU_37XX_HOST_MMU_CMDQ_BASE, mmu->cmdq.dma_q);
514 REGV_WR32(VPU_37XX_HOST_MMU_CMDQ_PROD, 0);
515 REGV_WR32(VPU_37XX_HOST_MMU_CMDQ_CONS, 0);
555 REGV_WR64(IVPU_MMU_REG_CMDQ_BASE, mmu->cmdq.dma_q);
556 REGV_WR32(IVPU_MMU_REG_CMDQ_PROD, 0);
557 REGV_WR32(IVPU_MMU_REG_CMDQ_CONS, 0);
516
517 val = IVPU_MMU_CR0_CMDQEN;
558
559 val = IVPU_MMU_CR0_CMDQEN;
518 ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val);
560 ret = ivpu_mmu_reg_write_cr0(vdev, val);
519 if (ret)
520 return ret;
521
522 ret = ivpu_mmu_cmdq_write_cfgi_all(vdev);
523 if (ret)
524 return ret;
525
526 ret = ivpu_mmu_cmdq_write_tlbi_nsnh_all(vdev);
527 if (ret)
528 return ret;
529
530 ret = ivpu_mmu_cmdq_sync(vdev);
531 if (ret)
532 return ret;
533
561 if (ret)
562 return ret;
563
564 ret = ivpu_mmu_cmdq_write_cfgi_all(vdev);
565 if (ret)
566 return ret;
567
568 ret = ivpu_mmu_cmdq_write_tlbi_nsnh_all(vdev);
569 if (ret)
570 return ret;
571
572 ret = ivpu_mmu_cmdq_sync(vdev);
573 if (ret)
574 return ret;
575
534 REGV_WR64(VPU_37XX_HOST_MMU_EVTQ_BASE, mmu->evtq.dma_q);
535 REGV_WR32(VPU_37XX_HOST_MMU_EVTQ_PROD_SEC, 0);
536 REGV_WR32(VPU_37XX_HOST_MMU_EVTQ_CONS_SEC, 0);
576 REGV_WR64(IVPU_MMU_REG_EVTQ_BASE, mmu->evtq.dma_q);
577 REGV_WR32(IVPU_MMU_REG_EVTQ_PROD_SEC, 0);
578 REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, 0);
537
538 val |= IVPU_MMU_CR0_EVTQEN;
579
580 val |= IVPU_MMU_CR0_EVTQEN;
539 ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val);
581 ret = ivpu_mmu_reg_write_cr0(vdev, val);
540 if (ret)
541 return ret;
542
543 val |= IVPU_MMU_CR0_ATSCHK;
582 if (ret)
583 return ret;
584
585 val |= IVPU_MMU_CR0_ATSCHK;
544 ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val);
586 ret = ivpu_mmu_reg_write_cr0(vdev, val);
545 if (ret)
546 return ret;
547
548 ret = ivpu_mmu_irqs_setup(vdev);
549 if (ret)
550 return ret;
551
552 val |= IVPU_MMU_CR0_SMMUEN;
587 if (ret)
588 return ret;
589
590 ret = ivpu_mmu_irqs_setup(vdev);
591 if (ret)
592 return ret;
593
594 val |= IVPU_MMU_CR0_SMMUEN;
553 return ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val);
595 return ivpu_mmu_reg_write_cr0(vdev, val);
554}
555
556static void ivpu_mmu_strtab_link_cd(struct ivpu_device *vdev, u32 sid)
557{
558 struct ivpu_mmu_info *mmu = vdev->mmu;
559 struct ivpu_mmu_strtab *strtab = &mmu->strtab;
560 struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab;
561 u64 *entry = strtab->base + (sid * IVPU_MMU_STRTAB_ENT_SIZE);

--- 234 unchanged lines hidden (view full) ---

796}
797
798static u32 *ivpu_mmu_get_event(struct ivpu_device *vdev)
799{
800 struct ivpu_mmu_queue *evtq = &vdev->mmu->evtq;
801 u32 idx = IVPU_MMU_Q_IDX(evtq->cons);
802 u32 *evt = evtq->base + (idx * IVPU_MMU_EVTQ_CMD_SIZE);
803
596}
597
598static void ivpu_mmu_strtab_link_cd(struct ivpu_device *vdev, u32 sid)
599{
600 struct ivpu_mmu_info *mmu = vdev->mmu;
601 struct ivpu_mmu_strtab *strtab = &mmu->strtab;
602 struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab;
603 u64 *entry = strtab->base + (sid * IVPU_MMU_STRTAB_ENT_SIZE);

--- 234 unchanged lines hidden (view full) ---

838}
839
840static u32 *ivpu_mmu_get_event(struct ivpu_device *vdev)
841{
842 struct ivpu_mmu_queue *evtq = &vdev->mmu->evtq;
843 u32 idx = IVPU_MMU_Q_IDX(evtq->cons);
844 u32 *evt = evtq->base + (idx * IVPU_MMU_EVTQ_CMD_SIZE);
845
804 evtq->prod = REGV_RD32(VPU_37XX_HOST_MMU_EVTQ_PROD_SEC);
846 evtq->prod = REGV_RD32(IVPU_MMU_REG_EVTQ_PROD_SEC);
805 if (!CIRC_CNT(IVPU_MMU_Q_IDX(evtq->prod), IVPU_MMU_Q_IDX(evtq->cons), IVPU_MMU_Q_COUNT))
806 return NULL;
807
808 clflush_cache_range(evt, IVPU_MMU_EVTQ_CMD_SIZE);
809
810 evtq->cons = (evtq->cons + 1) & IVPU_MMU_Q_WRAP_MASK;
847 if (!CIRC_CNT(IVPU_MMU_Q_IDX(evtq->prod), IVPU_MMU_Q_IDX(evtq->cons), IVPU_MMU_Q_COUNT))
848 return NULL;
849
850 clflush_cache_range(evt, IVPU_MMU_EVTQ_CMD_SIZE);
851
852 evtq->cons = (evtq->cons + 1) & IVPU_MMU_Q_WRAP_MASK;
811 REGV_WR32(VPU_37XX_HOST_MMU_EVTQ_CONS_SEC, evtq->cons);
853 REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, evtq->cons);
812
813 return evt;
814}
815
816void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev)
817{
818 bool schedule_recovery = false;
819 u32 *event;

--- 16 unchanged lines hidden (view full) ---

836}
837
838void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev)
839{
840 u32 gerror_val, gerrorn_val, active;
841
842 ivpu_dbg(vdev, IRQ, "MMU error\n");
843
854
855 return evt;
856}
857
858void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev)
859{
860 bool schedule_recovery = false;
861 u32 *event;

--- 16 unchanged lines hidden (view full) ---

878}
879
880void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev)
881{
882 u32 gerror_val, gerrorn_val, active;
883
884 ivpu_dbg(vdev, IRQ, "MMU error\n");
885
844 gerror_val = REGV_RD32(VPU_37XX_HOST_MMU_GERROR);
845 gerrorn_val = REGV_RD32(VPU_37XX_HOST_MMU_GERRORN);
886 gerror_val = REGV_RD32(IVPU_MMU_REG_GERROR);
887 gerrorn_val = REGV_RD32(IVPU_MMU_REG_GERRORN);
846
847 active = gerror_val ^ gerrorn_val;
848 if (!(active & IVPU_MMU_GERROR_ERR_MASK))
849 return;
850
888
889 active = gerror_val ^ gerrorn_val;
890 if (!(active & IVPU_MMU_GERROR_ERR_MASK))
891 return;
892
851 if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_ABT, active))
893 if (REG_TEST_FLD(IVPU_MMU_REG_GERROR, MSI_ABT, active))
852 ivpu_warn_ratelimited(vdev, "MMU MSI ABT write aborted\n");
853
894 ivpu_warn_ratelimited(vdev, "MMU MSI ABT write aborted\n");
895
854 if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_PRIQ_ABT, active))
896 if (REG_TEST_FLD(IVPU_MMU_REG_GERROR, MSI_PRIQ_ABT, active))
855 ivpu_warn_ratelimited(vdev, "MMU PRIQ MSI ABT write aborted\n");
856
897 ivpu_warn_ratelimited(vdev, "MMU PRIQ MSI ABT write aborted\n");
898
857 if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_EVTQ_ABT, active))
899 if (REG_TEST_FLD(IVPU_MMU_REG_GERROR, MSI_EVTQ_ABT, active))
858 ivpu_warn_ratelimited(vdev, "MMU EVTQ MSI ABT write aborted\n");
859
900 ivpu_warn_ratelimited(vdev, "MMU EVTQ MSI ABT write aborted\n");
901
860 if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_CMDQ_ABT, active))
902 if (REG_TEST_FLD(IVPU_MMU_REG_GERROR, MSI_CMDQ_ABT, active))
861 ivpu_warn_ratelimited(vdev, "MMU CMDQ MSI ABT write aborted\n");
862
903 ivpu_warn_ratelimited(vdev, "MMU CMDQ MSI ABT write aborted\n");
904
863 if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, PRIQ_ABT, active))
905 if (REG_TEST_FLD(IVPU_MMU_REG_GERROR, PRIQ_ABT, active))
864 ivpu_err_ratelimited(vdev, "MMU PRIQ write aborted\n");
865
906 ivpu_err_ratelimited(vdev, "MMU PRIQ write aborted\n");
907
866 if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, EVTQ_ABT, active))
908 if (REG_TEST_FLD(IVPU_MMU_REG_GERROR, EVTQ_ABT, active))
867 ivpu_err_ratelimited(vdev, "MMU EVTQ write aborted\n");
868
909 ivpu_err_ratelimited(vdev, "MMU EVTQ write aborted\n");
910
869 if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, CMDQ, active))
911 if (REG_TEST_FLD(IVPU_MMU_REG_GERROR, CMDQ, active))
870 ivpu_err_ratelimited(vdev, "MMU CMDQ write aborted\n");
871
912 ivpu_err_ratelimited(vdev, "MMU CMDQ write aborted\n");
913
872 REGV_WR32(VPU_37XX_HOST_MMU_GERRORN, gerror_val);
914 REGV_WR32(IVPU_MMU_REG_GERRORN, gerror_val);
873}
874
875int ivpu_mmu_set_pgtable(struct ivpu_device *vdev, int ssid, struct ivpu_mmu_pgtable *pgtable)
876{
877 return ivpu_mmu_cd_add_user(vdev, ssid, pgtable->pgd_dma);
878}
879
880void ivpu_mmu_clear_pgtable(struct ivpu_device *vdev, int ssid)
881{
882 ivpu_mmu_cd_add_user(vdev, ssid, 0); /* 0 will clear CD entry */
883}
915}
916
917int ivpu_mmu_set_pgtable(struct ivpu_device *vdev, int ssid, struct ivpu_mmu_pgtable *pgtable)
918{
919 return ivpu_mmu_cd_add_user(vdev, ssid, pgtable->pgd_dma);
920}
921
922void ivpu_mmu_clear_pgtable(struct ivpu_device *vdev, int ssid)
923{
924 ivpu_mmu_cd_add_user(vdev, ssid, 0); /* 0 will clear CD entry */
925}