ivpu_mmu.c (a1c613ae4c322ddd58d5a8539dbfba2a0380a8c0) | ivpu_mmu.c (e013aa9ab01b400cccc6c3e8b969b8e7f10bc6cb) |
---|---|
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Copyright (C) 2020-2023 Intel Corporation 4 */ 5 6#include <linux/circ_buf.h> 7#include <linux/highmem.h> 8 --- 4 unchanged lines hidden (view full) --- 13#include "ivpu_pm.h" 14 15#define IVPU_MMU_REG_IDR0 0x00200000u 16#define IVPU_MMU_REG_IDR1 0x00200004u 17#define IVPU_MMU_REG_IDR3 0x0020000cu 18#define IVPU_MMU_REG_IDR5 0x00200014u 19#define IVPU_MMU_REG_CR0 0x00200020u 20#define IVPU_MMU_REG_CR0ACK 0x00200024u | 1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Copyright (C) 2020-2023 Intel Corporation 4 */ 5 6#include <linux/circ_buf.h> 7#include <linux/highmem.h> 8 --- 4 unchanged lines hidden (view full) --- 13#include "ivpu_pm.h" 14 15#define IVPU_MMU_REG_IDR0 0x00200000u 16#define IVPU_MMU_REG_IDR1 0x00200004u 17#define IVPU_MMU_REG_IDR3 0x0020000cu 18#define IVPU_MMU_REG_IDR5 0x00200014u 19#define IVPU_MMU_REG_CR0 0x00200020u 20#define IVPU_MMU_REG_CR0ACK 0x00200024u |
21#define IVPU_MMU_REG_CR0ACK_VAL_MASK GENMASK(31, 0) |
|
21#define IVPU_MMU_REG_CR1 0x00200028u 22#define IVPU_MMU_REG_CR2 0x0020002cu 23#define IVPU_MMU_REG_IRQ_CTRL 0x00200050u 24#define IVPU_MMU_REG_IRQ_CTRLACK 0x00200054u | 22#define IVPU_MMU_REG_CR1 0x00200028u 23#define IVPU_MMU_REG_CR2 0x0020002cu 24#define IVPU_MMU_REG_IRQ_CTRL 0x00200050u 25#define IVPU_MMU_REG_IRQ_CTRLACK 0x00200054u |
26#define IVPU_MMU_REG_IRQ_CTRLACK_VAL_MASK GENMASK(31, 0) |
|
25 26#define IVPU_MMU_REG_GERROR 0x00200060u 27#define IVPU_MMU_REG_GERROR_CMDQ_MASK BIT_MASK(0) 28#define IVPU_MMU_REG_GERROR_EVTQ_ABT_MASK BIT_MASK(2) 29#define IVPU_MMU_REG_GERROR_PRIQ_ABT_MASK BIT_MASK(3) 30#define IVPU_MMU_REG_GERROR_MSI_CMDQ_ABT_MASK BIT_MASK(4) 31#define IVPU_MMU_REG_GERROR_MSI_EVTQ_ABT_MASK BIT_MASK(5) 32#define IVPU_MMU_REG_GERROR_MSI_PRIQ_ABT_MASK BIT_MASK(6) 33#define IVPU_MMU_REG_GERROR_MSI_ABT_MASK BIT_MASK(7) 34 35#define IVPU_MMU_REG_GERRORN 0x00200064u 36 37#define IVPU_MMU_REG_STRTAB_BASE 0x00200080u 38#define IVPU_MMU_REG_STRTAB_BASE_CFG 0x00200088u 39#define IVPU_MMU_REG_CMDQ_BASE 0x00200090u 40#define IVPU_MMU_REG_CMDQ_PROD 0x00200098u 41#define IVPU_MMU_REG_CMDQ_CONS 0x0020009cu | 27 28#define IVPU_MMU_REG_GERROR 0x00200060u 29#define IVPU_MMU_REG_GERROR_CMDQ_MASK BIT_MASK(0) 30#define IVPU_MMU_REG_GERROR_EVTQ_ABT_MASK BIT_MASK(2) 31#define IVPU_MMU_REG_GERROR_PRIQ_ABT_MASK BIT_MASK(3) 32#define IVPU_MMU_REG_GERROR_MSI_CMDQ_ABT_MASK BIT_MASK(4) 33#define IVPU_MMU_REG_GERROR_MSI_EVTQ_ABT_MASK BIT_MASK(5) 34#define IVPU_MMU_REG_GERROR_MSI_PRIQ_ABT_MASK BIT_MASK(6) 35#define IVPU_MMU_REG_GERROR_MSI_ABT_MASK BIT_MASK(7) 36 37#define IVPU_MMU_REG_GERRORN 0x00200064u 38 39#define IVPU_MMU_REG_STRTAB_BASE 0x00200080u 40#define IVPU_MMU_REG_STRTAB_BASE_CFG 0x00200088u 41#define IVPU_MMU_REG_CMDQ_BASE 0x00200090u 42#define IVPU_MMU_REG_CMDQ_PROD 0x00200098u 43#define IVPU_MMU_REG_CMDQ_CONS 0x0020009cu |
44#define IVPU_MMU_REG_CMDQ_CONS_VAL_MASK GENMASK(23, 0) 45#define IVPU_MMU_REG_CMDQ_CONS_ERR_MASK GENMASK(30, 24) |
|
42#define IVPU_MMU_REG_EVTQ_BASE 0x002000a0u 43#define IVPU_MMU_REG_EVTQ_PROD 0x002000a8u 44#define IVPU_MMU_REG_EVTQ_CONS 0x002000acu 45#define IVPU_MMU_REG_EVTQ_PROD_SEC (0x002000a8u + SZ_64K) 46#define IVPU_MMU_REG_EVTQ_CONS_SEC (0x002000acu + SZ_64K) | 46#define IVPU_MMU_REG_EVTQ_BASE 0x002000a0u 47#define IVPU_MMU_REG_EVTQ_PROD 0x002000a8u 48#define IVPU_MMU_REG_EVTQ_CONS 0x002000acu 49#define IVPU_MMU_REG_EVTQ_PROD_SEC (0x002000a8u + SZ_64K) 50#define IVPU_MMU_REG_EVTQ_CONS_SEC (0x002000acu + SZ_64K) |
47#define IVPU_MMU_REG_CMDQ_CONS_ERR_MASK GENMASK(30, 24) | |
48 49#define IVPU_MMU_IDR0_REF 0x080f3e0f 50#define IVPU_MMU_IDR0_REF_SIMICS 0x080f3e1f 51#define IVPU_MMU_IDR1_REF 0x0e739d18 52#define IVPU_MMU_IDR3_REF 0x0000003c 53#define IVPU_MMU_IDR5_REF 0x00040070 54#define IVPU_MMU_IDR5_REF_SIMICS 0x00000075 55#define IVPU_MMU_IDR5_REF_FPGA 0x00800075 --- 166 unchanged lines hidden (view full) --- 222#define IVPU_MMU_GERROR_ERR_MASK ((REG_FLD(IVPU_MMU_REG_GERROR, CMDQ)) | \ 223 (REG_FLD(IVPU_MMU_REG_GERROR, EVTQ_ABT)) | \ 224 (REG_FLD(IVPU_MMU_REG_GERROR, PRIQ_ABT)) | \ 225 (REG_FLD(IVPU_MMU_REG_GERROR, MSI_CMDQ_ABT)) | \ 226 (REG_FLD(IVPU_MMU_REG_GERROR, MSI_EVTQ_ABT)) | \ 227 (REG_FLD(IVPU_MMU_REG_GERROR, MSI_PRIQ_ABT)) | \ 228 (REG_FLD(IVPU_MMU_REG_GERROR, MSI_ABT))) 229 | 51 52#define IVPU_MMU_IDR0_REF 0x080f3e0f 53#define IVPU_MMU_IDR0_REF_SIMICS 0x080f3e1f 54#define IVPU_MMU_IDR1_REF 0x0e739d18 55#define IVPU_MMU_IDR3_REF 0x0000003c 56#define IVPU_MMU_IDR5_REF 0x00040070 57#define IVPU_MMU_IDR5_REF_SIMICS 0x00000075 58#define IVPU_MMU_IDR5_REF_FPGA 0x00800075 --- 166 unchanged lines hidden (view full) --- 225#define IVPU_MMU_GERROR_ERR_MASK ((REG_FLD(IVPU_MMU_REG_GERROR, CMDQ)) | \ 226 (REG_FLD(IVPU_MMU_REG_GERROR, EVTQ_ABT)) | \ 227 (REG_FLD(IVPU_MMU_REG_GERROR, PRIQ_ABT)) | \ 228 (REG_FLD(IVPU_MMU_REG_GERROR, MSI_CMDQ_ABT)) | \ 229 (REG_FLD(IVPU_MMU_REG_GERROR, MSI_EVTQ_ABT)) | \ 230 (REG_FLD(IVPU_MMU_REG_GERROR, MSI_PRIQ_ABT)) | \ 231 (REG_FLD(IVPU_MMU_REG_GERROR, MSI_ABT))) 232 |
230static char *ivpu_mmu_event_to_str(u32 cmd) | 233#define IVPU_MMU_CERROR_NONE 0x0 234#define IVPU_MMU_CERROR_ILL 0x1 235#define IVPU_MMU_CERROR_ABT 0x2 236#define IVPU_MMU_CERROR_ATC_INV_SYNC 0x3 237 238static const char *ivpu_mmu_event_to_str(u32 cmd) |
231{ 232 switch (cmd) { 233 case IVPU_MMU_EVT_F_UUT: 234 return "Unsupported Upstream Transaction"; 235 case IVPU_MMU_EVT_C_BAD_STREAMID: 236 return "Transaction StreamID out of range"; 237 case IVPU_MMU_EVT_F_STE_FETCH: 238 return "Fetch of STE caused external abort"; --- 29 unchanged lines hidden (view full) --- 268 return "Page request hint from a client device"; 269 case IVPU_MMU_EVT_F_VMS_FETCH: 270 return "Fetch of VMS caused external abort"; 271 default: 272 return "Unknown CMDQ command"; 273 } 274} 275 | 239{ 240 switch (cmd) { 241 case IVPU_MMU_EVT_F_UUT: 242 return "Unsupported Upstream Transaction"; 243 case IVPU_MMU_EVT_C_BAD_STREAMID: 244 return "Transaction StreamID out of range"; 245 case IVPU_MMU_EVT_F_STE_FETCH: 246 return "Fetch of STE caused external abort"; --- 29 unchanged lines hidden (view full) --- 276 return "Page request hint from a client device"; 277 case IVPU_MMU_EVT_F_VMS_FETCH: 278 return "Fetch of VMS caused external abort"; 279 default: 280 return "Unknown CMDQ command"; 281 } 282} 283 |
284static const char *ivpu_mmu_cmdq_err_to_str(u32 err) 285{ 286 switch (err) { 287 case IVPU_MMU_CERROR_NONE: 288 return "No CMDQ Error"; 289 case IVPU_MMU_CERROR_ILL: 290 return "Illegal command"; 291 case IVPU_MMU_CERROR_ABT: 292 return "External abort on CMDQ read"; 293 case IVPU_MMU_CERROR_ATC_INV_SYNC: 294 return "Sync failed to complete ATS invalidation"; 295 default: 296 return "Unknown CMDQ Error"; 297 } 298} 299 |
|
276static void ivpu_mmu_config_check(struct ivpu_device *vdev) 277{ 278 u32 val_ref; 279 u32 val; 280 281 if (ivpu_is_simics(vdev)) 282 val_ref = IVPU_MMU_IDR0_REF_SIMICS; 283 else --- 120 unchanged lines hidden (view full) --- 404 405 ret = ivpu_mmu_evtq_alloc(vdev); 406 if (ret) 407 ivpu_err(vdev, "Failed to allocate evtq: %d\n", ret); 408 409 return ret; 410} 411 | 300static void ivpu_mmu_config_check(struct ivpu_device *vdev) 301{ 302 u32 val_ref; 303 u32 val; 304 305 if (ivpu_is_simics(vdev)) 306 val_ref = IVPU_MMU_IDR0_REF_SIMICS; 307 else --- 120 unchanged lines hidden (view full) --- 428 429 ret = ivpu_mmu_evtq_alloc(vdev); 430 if (ret) 431 ivpu_err(vdev, "Failed to allocate evtq: %d\n", ret); 432 433 return ret; 434} 435 |
412static int ivpu_mmu_reg_write(struct ivpu_device *vdev, u32 reg, u32 val) | 436static int ivpu_mmu_reg_write_cr0(struct ivpu_device *vdev, u32 val) |
413{ | 437{ |
414 u32 reg_ack = reg + 4; /* ACK register is 4B after base register */ 415 u32 val_ack; 416 int ret; | 438 REGV_WR32(IVPU_MMU_REG_CR0, val); |
417 | 439 |
418 REGV_WR32(reg, val); | 440 return REGV_POLL_FLD(IVPU_MMU_REG_CR0ACK, VAL, val, IVPU_MMU_REG_TIMEOUT_US); 441} |
419 | 442 |
420 ret = REGV_POLL(reg_ack, val_ack, (val == val_ack), IVPU_MMU_REG_TIMEOUT_US); 421 if (ret) 422 ivpu_err(vdev, "Failed to write register 0x%x\n", reg); | 443static int ivpu_mmu_reg_write_irq_ctrl(struct ivpu_device *vdev, u32 val) 444{ 445 REGV_WR32(IVPU_MMU_REG_IRQ_CTRL, val); |
423 | 446 |
424 return ret; | 447 return REGV_POLL_FLD(IVPU_MMU_REG_IRQ_CTRLACK, VAL, val, IVPU_MMU_REG_TIMEOUT_US); |
425} 426 427static int ivpu_mmu_irqs_setup(struct ivpu_device *vdev) 428{ 429 u32 irq_ctrl = IVPU_MMU_IRQ_EVTQ_EN | IVPU_MMU_IRQ_GERROR_EN; 430 int ret; 431 | 448} 449 450static int ivpu_mmu_irqs_setup(struct ivpu_device *vdev) 451{ 452 u32 irq_ctrl = IVPU_MMU_IRQ_EVTQ_EN | IVPU_MMU_IRQ_GERROR_EN; 453 int ret; 454 |
432 ret = ivpu_mmu_reg_write(vdev, IVPU_MMU_REG_IRQ_CTRL, 0); | 455 ret = ivpu_mmu_reg_write_irq_ctrl(vdev, 0); |
433 if (ret) 434 return ret; 435 | 456 if (ret) 457 return ret; 458 |
436 return ivpu_mmu_reg_write(vdev, IVPU_MMU_REG_IRQ_CTRL, irq_ctrl); | 459 return ivpu_mmu_reg_write_irq_ctrl(vdev, irq_ctrl); |
437} 438 439static int ivpu_mmu_cmdq_wait_for_cons(struct ivpu_device *vdev) 440{ 441 struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq; | 460} 461 462static int ivpu_mmu_cmdq_wait_for_cons(struct ivpu_device *vdev) 463{ 464 struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq; |
465 int ret; |
|
442 | 466 |
443 return REGV_POLL(IVPU_MMU_REG_CMDQ_CONS, cmdq->cons, (cmdq->prod == cmdq->cons), 444 IVPU_MMU_QUEUE_TIMEOUT_US); | 467 ret = REGV_POLL_FLD(IVPU_MMU_REG_CMDQ_CONS, VAL, cmdq->prod, 468 IVPU_MMU_QUEUE_TIMEOUT_US); 469 if (ret) 470 return ret; 471 472 cmdq->cons = cmdq->prod; 473 474 return 0; |
445} 446 447static int ivpu_mmu_cmdq_cmd_write(struct ivpu_device *vdev, const char *name, u64 data0, u64 data1) 448{ 449 struct ivpu_mmu_queue *q = &vdev->mmu->cmdq; 450 u64 *queue_buffer = q->base; 451 int idx = IVPU_MMU_Q_IDX(q->prod) * (IVPU_MMU_CMDQ_CMD_SIZE / sizeof(*queue_buffer)); 452 --- 25 unchanged lines hidden (view full) --- 478 ret = ivpu_mmu_cmdq_cmd_write(vdev, "SYNC", val, 0); 479 if (ret) 480 return ret; 481 482 clflush_cache_range(q->base, IVPU_MMU_CMDQ_SIZE); 483 REGV_WR32(IVPU_MMU_REG_CMDQ_PROD, q->prod); 484 485 ret = ivpu_mmu_cmdq_wait_for_cons(vdev); | 475} 476 477static int ivpu_mmu_cmdq_cmd_write(struct ivpu_device *vdev, const char *name, u64 data0, u64 data1) 478{ 479 struct ivpu_mmu_queue *q = &vdev->mmu->cmdq; 480 u64 *queue_buffer = q->base; 481 int idx = IVPU_MMU_Q_IDX(q->prod) * (IVPU_MMU_CMDQ_CMD_SIZE / sizeof(*queue_buffer)); 482 --- 25 unchanged lines hidden (view full) --- 508 ret = ivpu_mmu_cmdq_cmd_write(vdev, "SYNC", val, 0); 509 if (ret) 510 return ret; 511 512 clflush_cache_range(q->base, IVPU_MMU_CMDQ_SIZE); 513 REGV_WR32(IVPU_MMU_REG_CMDQ_PROD, q->prod); 514 515 ret = ivpu_mmu_cmdq_wait_for_cons(vdev); |
486 if (ret) 487 ivpu_err(vdev, "Timed out waiting for consumer: %d\n", ret); | 516 if (ret) { 517 u32 err; |
488 | 518 |
519 val = REGV_RD32(IVPU_MMU_REG_CMDQ_CONS); 520 err = REG_GET_FLD(IVPU_MMU_REG_CMDQ_CONS, ERR, val); 521 522 ivpu_err(vdev, "Timed out waiting for MMU consumer: %d, error: %s\n", ret, 523 ivpu_mmu_cmdq_err_to_str(err)); 524 } 525 |
|
489 return ret; 490} 491 492static int ivpu_mmu_cmdq_write_cfgi_all(struct ivpu_device *vdev) 493{ 494 u64 data0 = FIELD_PREP(IVPU_MMU_CMD_OPCODE, CMD_CFGI_ALL); 495 u64 data1 = FIELD_PREP(IVPU_MMU_CMD_CFGI_1_RANGE, 0x1f); 496 --- 26 unchanged lines hidden (view full) --- 523 mmu->cmdq.prod = 0; 524 mmu->cmdq.cons = 0; 525 526 memset(mmu->evtq.base, 0, IVPU_MMU_EVTQ_SIZE); 527 clflush_cache_range(mmu->evtq.base, IVPU_MMU_EVTQ_SIZE); 528 mmu->evtq.prod = 0; 529 mmu->evtq.cons = 0; 530 | 526 return ret; 527} 528 529static int ivpu_mmu_cmdq_write_cfgi_all(struct ivpu_device *vdev) 530{ 531 u64 data0 = FIELD_PREP(IVPU_MMU_CMD_OPCODE, CMD_CFGI_ALL); 532 u64 data1 = FIELD_PREP(IVPU_MMU_CMD_CFGI_1_RANGE, 0x1f); 533 --- 26 unchanged lines hidden (view full) --- 560 mmu->cmdq.prod = 0; 561 mmu->cmdq.cons = 0; 562 563 memset(mmu->evtq.base, 0, IVPU_MMU_EVTQ_SIZE); 564 clflush_cache_range(mmu->evtq.base, IVPU_MMU_EVTQ_SIZE); 565 mmu->evtq.prod = 0; 566 mmu->evtq.cons = 0; 567 |
531 ret = ivpu_mmu_reg_write(vdev, IVPU_MMU_REG_CR0, 0); | 568 ret = ivpu_mmu_reg_write_cr0(vdev, 0); |
532 if (ret) 533 return ret; 534 535 val = FIELD_PREP(IVPU_MMU_CR1_TABLE_SH, IVPU_MMU_SH_ISH) | 536 FIELD_PREP(IVPU_MMU_CR1_TABLE_OC, IVPU_MMU_CACHE_WB) | 537 FIELD_PREP(IVPU_MMU_CR1_TABLE_IC, IVPU_MMU_CACHE_WB) | 538 FIELD_PREP(IVPU_MMU_CR1_QUEUE_SH, IVPU_MMU_SH_ISH) | 539 FIELD_PREP(IVPU_MMU_CR1_QUEUE_OC, IVPU_MMU_CACHE_WB) | 540 FIELD_PREP(IVPU_MMU_CR1_QUEUE_IC, IVPU_MMU_CACHE_WB); 541 REGV_WR32(IVPU_MMU_REG_CR1, val); 542 543 REGV_WR64(IVPU_MMU_REG_STRTAB_BASE, mmu->strtab.dma_q); 544 REGV_WR32(IVPU_MMU_REG_STRTAB_BASE_CFG, mmu->strtab.base_cfg); 545 546 REGV_WR64(IVPU_MMU_REG_CMDQ_BASE, mmu->cmdq.dma_q); 547 REGV_WR32(IVPU_MMU_REG_CMDQ_PROD, 0); 548 REGV_WR32(IVPU_MMU_REG_CMDQ_CONS, 0); 549 550 val = IVPU_MMU_CR0_CMDQEN; | 569 if (ret) 570 return ret; 571 572 val = FIELD_PREP(IVPU_MMU_CR1_TABLE_SH, IVPU_MMU_SH_ISH) | 573 FIELD_PREP(IVPU_MMU_CR1_TABLE_OC, IVPU_MMU_CACHE_WB) | 574 FIELD_PREP(IVPU_MMU_CR1_TABLE_IC, IVPU_MMU_CACHE_WB) | 575 FIELD_PREP(IVPU_MMU_CR1_QUEUE_SH, IVPU_MMU_SH_ISH) | 576 FIELD_PREP(IVPU_MMU_CR1_QUEUE_OC, IVPU_MMU_CACHE_WB) | 577 FIELD_PREP(IVPU_MMU_CR1_QUEUE_IC, IVPU_MMU_CACHE_WB); 578 REGV_WR32(IVPU_MMU_REG_CR1, val); 579 580 REGV_WR64(IVPU_MMU_REG_STRTAB_BASE, mmu->strtab.dma_q); 581 REGV_WR32(IVPU_MMU_REG_STRTAB_BASE_CFG, mmu->strtab.base_cfg); 582 583 REGV_WR64(IVPU_MMU_REG_CMDQ_BASE, mmu->cmdq.dma_q); 584 REGV_WR32(IVPU_MMU_REG_CMDQ_PROD, 0); 585 REGV_WR32(IVPU_MMU_REG_CMDQ_CONS, 0); 586 587 val = IVPU_MMU_CR0_CMDQEN; |
551 ret = ivpu_mmu_reg_write(vdev, IVPU_MMU_REG_CR0, val); | 588 ret = ivpu_mmu_reg_write_cr0(vdev, val); |
552 if (ret) 553 return ret; 554 555 ret = ivpu_mmu_cmdq_write_cfgi_all(vdev); 556 if (ret) 557 return ret; 558 559 ret = ivpu_mmu_cmdq_write_tlbi_nsnh_all(vdev); --- 4 unchanged lines hidden (view full) --- 564 if (ret) 565 return ret; 566 567 REGV_WR64(IVPU_MMU_REG_EVTQ_BASE, mmu->evtq.dma_q); 568 REGV_WR32(IVPU_MMU_REG_EVTQ_PROD_SEC, 0); 569 REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, 0); 570 571 val |= IVPU_MMU_CR0_EVTQEN; | 589 if (ret) 590 return ret; 591 592 ret = ivpu_mmu_cmdq_write_cfgi_all(vdev); 593 if (ret) 594 return ret; 595 596 ret = ivpu_mmu_cmdq_write_tlbi_nsnh_all(vdev); --- 4 unchanged lines hidden (view full) --- 601 if (ret) 602 return ret; 603 604 REGV_WR64(IVPU_MMU_REG_EVTQ_BASE, mmu->evtq.dma_q); 605 REGV_WR32(IVPU_MMU_REG_EVTQ_PROD_SEC, 0); 606 REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, 0); 607 608 val |= IVPU_MMU_CR0_EVTQEN; |
572 ret = ivpu_mmu_reg_write(vdev, IVPU_MMU_REG_CR0, val); | 609 ret = ivpu_mmu_reg_write_cr0(vdev, val); |
573 if (ret) 574 return ret; 575 576 val |= IVPU_MMU_CR0_ATSCHK; | 610 if (ret) 611 return ret; 612 613 val |= IVPU_MMU_CR0_ATSCHK; |
577 ret = ivpu_mmu_reg_write(vdev, IVPU_MMU_REG_CR0, val); | 614 ret = ivpu_mmu_reg_write_cr0(vdev, val); |
578 if (ret) 579 return ret; 580 581 ret = ivpu_mmu_irqs_setup(vdev); 582 if (ret) 583 return ret; 584 585 val |= IVPU_MMU_CR0_SMMUEN; | 615 if (ret) 616 return ret; 617 618 ret = ivpu_mmu_irqs_setup(vdev); 619 if (ret) 620 return ret; 621 622 val |= IVPU_MMU_CR0_SMMUEN; |
586 return ivpu_mmu_reg_write(vdev, IVPU_MMU_REG_CR0, val); | 623 return ivpu_mmu_reg_write_cr0(vdev, val); |
587} 588 589static void ivpu_mmu_strtab_link_cd(struct ivpu_device *vdev, u32 sid) 590{ 591 struct ivpu_mmu_info *mmu = vdev->mmu; 592 struct ivpu_mmu_strtab *strtab = &mmu->strtab; 593 struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab; 594 u64 *entry = strtab->base + (sid * IVPU_MMU_STRTAB_ENT_SIZE); --- 322 unchanged lines hidden --- | 624} 625 626static void ivpu_mmu_strtab_link_cd(struct ivpu_device *vdev, u32 sid) 627{ 628 struct ivpu_mmu_info *mmu = vdev->mmu; 629 struct ivpu_mmu_strtab *strtab = &mmu->strtab; 630 struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab; 631 u64 *entry = strtab->base + (sid * IVPU_MMU_STRTAB_ENT_SIZE); --- 322 unchanged lines hidden --- |