xref: /linux/drivers/gpu/drm/xe/display/xe_dsb_buffer.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright 2023, Intel Corporation.
4  */
5 
6 #include "i915_vma.h"
7 #include "intel_display_types.h"
8 #include "intel_dsb_buffer.h"
9 #include "xe_bo.h"
10 #include "xe_device.h"
11 #include "xe_device_types.h"
12 
13 u32 intel_dsb_buffer_ggtt_offset(struct intel_dsb_buffer *dsb_buf)
14 {
15 	return xe_bo_ggtt_addr(dsb_buf->vma->bo);
16 }
17 
18 void intel_dsb_buffer_write(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val)
19 {
20 	struct xe_device *xe = dsb_buf->vma->bo->tile->xe;
21 
22 	iosys_map_wr(&dsb_buf->vma->bo->vmap, idx * 4, u32, val);
23 	xe_device_l2_flush(xe);
24 }
25 
26 u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx)
27 {
28 	return iosys_map_rd(&dsb_buf->vma->bo->vmap, idx * 4, u32);
29 }
30 
31 void intel_dsb_buffer_memset(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val, size_t size)
32 {
33 	struct xe_device *xe = dsb_buf->vma->bo->tile->xe;
34 
35 	WARN_ON(idx > (dsb_buf->buf_size - size) / sizeof(*dsb_buf->cmd_buf));
36 
37 	iosys_map_memset(&dsb_buf->vma->bo->vmap, idx * 4, val, size);
38 	xe_device_l2_flush(xe);
39 }
40 
41 bool intel_dsb_buffer_create(struct intel_crtc *crtc, struct intel_dsb_buffer *dsb_buf, size_t size)
42 {
43 	struct xe_device *xe = to_xe_device(crtc->base.dev);
44 	struct xe_bo *obj;
45 	struct i915_vma *vma;
46 
47 	vma = kzalloc(sizeof(*vma), GFP_KERNEL);
48 	if (!vma)
49 		return false;
50 
51 	/* Set scanout flag for WC mapping */
52 	obj = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe),
53 				   NULL, PAGE_ALIGN(size),
54 				   ttm_bo_type_kernel,
55 				   XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
56 				   XE_BO_FLAG_SCANOUT | XE_BO_FLAG_GGTT);
57 	if (IS_ERR(obj)) {
58 		kfree(vma);
59 		return false;
60 	}
61 
62 	vma->bo = obj;
63 	dsb_buf->vma = vma;
64 	dsb_buf->buf_size = size;
65 
66 	return true;
67 }
68 
69 void intel_dsb_buffer_cleanup(struct intel_dsb_buffer *dsb_buf)
70 {
71 	xe_bo_unpin_map_no_vm(dsb_buf->vma->bo);
72 	kfree(dsb_buf->vma);
73 }
74 
75 void intel_dsb_buffer_flush_map(struct intel_dsb_buffer *dsb_buf)
76 {
77 	/*
78 	 * The memory barrier here is to ensure coherency of DSB vs MMIO,
79 	 * both for weak ordering archs and discrete cards.
80 	 */
81 	xe_device_wmb(dsb_buf->vma->bo->tile->xe);
82 }
83