xref: /linux/drivers/gpu/drm/xe/display/xe_dsb_buffer.c (revision c0d6f52f9b62479d61f8cd4faf9fb2f8bce6e301)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright 2023, Intel Corporation.
4  */
5 
6 #include "intel_dsb_buffer.h"
7 #include "xe_bo.h"
8 #include "xe_device.h"
9 #include "xe_device_types.h"
10 
11 struct intel_dsb_buffer {
12 	u32 *cmd_buf;
13 	struct xe_bo *bo;
14 	size_t buf_size;
15 };
16 
17 u32 intel_dsb_buffer_ggtt_offset(struct intel_dsb_buffer *dsb_buf)
18 {
19 	return xe_bo_ggtt_addr(dsb_buf->bo);
20 }
21 
22 void intel_dsb_buffer_write(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val)
23 {
24 	iosys_map_wr(&dsb_buf->bo->vmap, idx * 4, u32, val);
25 }
26 
27 u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx)
28 {
29 	return iosys_map_rd(&dsb_buf->bo->vmap, idx * 4, u32);
30 }
31 
32 void intel_dsb_buffer_memset(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val, size_t size)
33 {
34 	WARN_ON(idx > (dsb_buf->buf_size - size) / sizeof(*dsb_buf->cmd_buf));
35 
36 	iosys_map_memset(&dsb_buf->bo->vmap, idx * 4, val, size);
37 }
38 
39 struct intel_dsb_buffer *intel_dsb_buffer_create(struct drm_device *drm, size_t size)
40 {
41 	struct xe_device *xe = to_xe_device(drm);
42 	struct intel_dsb_buffer *dsb_buf;
43 	struct xe_bo *obj;
44 	int ret;
45 
46 	dsb_buf = kzalloc(sizeof(*dsb_buf), GFP_KERNEL);
47 	if (!dsb_buf)
48 		return ERR_PTR(-ENOMEM);
49 
50 	/* Set scanout flag for WC mapping */
51 	obj = xe_bo_create_pin_map_novm(xe, xe_device_get_root_tile(xe),
52 					PAGE_ALIGN(size),
53 					ttm_bo_type_kernel,
54 					XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
55 					XE_BO_FLAG_SCANOUT | XE_BO_FLAG_GGTT, false);
56 	if (IS_ERR(obj)) {
57 		ret = PTR_ERR(obj);
58 		goto err_pin_map;
59 	}
60 
61 	dsb_buf->bo = obj;
62 	dsb_buf->buf_size = size;
63 
64 	return dsb_buf;
65 
66 err_pin_map:
67 	kfree(dsb_buf);
68 
69 	return ERR_PTR(ret);
70 }
71 
72 void intel_dsb_buffer_cleanup(struct intel_dsb_buffer *dsb_buf)
73 {
74 	xe_bo_unpin_map_no_vm(dsb_buf->bo);
75 	kfree(dsb_buf);
76 }
77 
78 void intel_dsb_buffer_flush_map(struct intel_dsb_buffer *dsb_buf)
79 {
80 	struct xe_device *xe = dsb_buf->bo->tile->xe;
81 
82 	/*
83 	 * The memory barrier here is to ensure coherency of DSB vs MMIO,
84 	 * both for weak ordering archs and discrete cards.
85 	 */
86 	xe_device_wmb(xe);
87 	xe_device_l2_flush(xe);
88 }
89