Lines Matching defs:vr
402 static u64 xe_vram_region_page_to_dpa(struct xe_vram_region *vr,
409 xe_assert(vr->xe, is_device_private_page(page));
410 xe_assert(vr->xe, (pfn << PAGE_SHIFT) >= vr->hpa_base);
412 offset = (pfn << PAGE_SHIFT) - vr->hpa_base;
413 dpa = vr->dpa_base + offset;
481 struct xe_vram_region *vr = NULL;
515 if (!vr && spage) {
516 vr = page_to_vr(spage);
517 gt = xe_migrate_exec_queue(vr->migrate)->gt;
518 xe = vr->xe;
520 XE_WARN_ON(spage && page_to_vr(spage) != vr);
528 __vram_addr = xe_vram_region_page_to_dpa(vr, spage);
536 xe_assert(vr->xe, match);
565 __fence = xe_migrate_from_vram(vr->migrate,
574 __fence = xe_migrate_to_vram(vr->migrate,
604 __fence = xe_migrate_from_vram(vr->migrate, 1,
611 __fence = xe_migrate_to_vram(vr->migrate, 1,
674 static u64 block_offset_to_pfn(struct xe_vram_region *vr, u64 offset)
676 return PHYS_PFN(offset + vr->hpa_base);
694 struct xe_vram_region *vr = block->private;
695 struct drm_buddy *buddy = vram_to_buddy(vr);
696 u64 block_pfn = block_offset_to_pfn(vr, drm_buddy_block_offset(block));
863 struct xe_vram_region *vr = container_of(dpagemap, typeof(*vr), dpagemap);
864 struct xe_device *xe = vr->xe;
881 (IS_DGFX(xe) ? XE_BO_FLAG_VRAM(vr) : XE_BO_FLAG_SYSTEM) |
895 block->private = vr;
1462 * @vr: vram memory region to remap
1469 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
1478 vr->usable_size);
1484 vr->pagemap.type = MEMORY_DEVICE_PRIVATE;
1485 vr->pagemap.range.start = res->start;
1486 vr->pagemap.range.end = res->end;
1487 vr->pagemap.nr_range = 1;
1488 vr->pagemap.ops = drm_pagemap_pagemap_ops_get();
1489 vr->pagemap.owner = xe_svm_devm_owner(xe);
1490 addr = devm_memremap_pages(dev, &vr->pagemap);
1492 vr->dpagemap.dev = dev;
1493 vr->dpagemap.ops = &xe_drm_pagemap_ops;
1502 vr->hpa_base = res->start;
1505 tile->id, vr->io_start, vr->io_start + vr->usable_size, res);
1516 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)