xref: /linux/drivers/gpu/drm/xe/xe_map.h (revision bfb921b2a9d5d1123d1d10b196a39db629ddef87)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #ifndef _XE_MAP_H_
7 #define _XE_MAP_H_
8 
9 #include <linux/iosys-map.h>
10 
11 #include <xe_device.h>
12 
13 /**
14  * DOC: Map layer
15  *
16  * All access to any memory shared with a device (both sysmem and vram) in the
17  * XE driver should go through this layer (xe_map). This layer is built on top
18  * of :ref:`driver-api/device-io:Generalizing Access to System and I/O Memory`
19  * and with extra hooks into the XE driver that allows adding asserts to memory
20  * accesses (e.g. for blocking runtime_pm D3Cold on Discrete Graphics).
21  */
22 
23 static inline void xe_map_memcpy_to(struct xe_device *xe, struct iosys_map *dst,
24 				    size_t dst_offset, const void *src,
25 				    size_t len)
26 {
27 	xe_device_assert_mem_access(xe);
28 	iosys_map_memcpy_to(dst, dst_offset, src, len);
29 }
30 
31 static inline void xe_map_memcpy_from(struct xe_device *xe, void *dst,
32 				      const struct iosys_map *src,
33 				      size_t src_offset, size_t len)
34 {
35 	xe_device_assert_mem_access(xe);
36 	iosys_map_memcpy_from(dst, src, src_offset, len);
37 }
38 
39 static inline void xe_map_memset(struct xe_device *xe,
40 				 struct iosys_map *dst, size_t offset,
41 				 int value, size_t len)
42 {
43 	xe_device_assert_mem_access(xe);
44 	iosys_map_memset(dst, offset, value, len);
45 }
46 
47 /* FIXME: We likely should kill these two functions sooner or later */
48 static inline u32 xe_map_read32(struct xe_device *xe, struct iosys_map *map)
49 {
50 	xe_device_assert_mem_access(xe);
51 
52 	if (map->is_iomem)
53 		return readl(map->vaddr_iomem);
54 	else
55 		return READ_ONCE(*(u32 *)map->vaddr);
56 }
57 
58 static inline void xe_map_write32(struct xe_device *xe, struct iosys_map *map,
59 				  u32 val)
60 {
61 	xe_device_assert_mem_access(xe);
62 
63 	if (map->is_iomem)
64 		writel(val, map->vaddr_iomem);
65 	else
66 		*(u32 *)map->vaddr = val;
67 }
68 
69 #define xe_map_rd(xe__, map__, offset__, type__) ({			\
70 	struct xe_device *__xe = xe__;					\
71 	xe_device_assert_mem_access(__xe);				\
72 	iosys_map_rd(map__, offset__, type__);				\
73 })
74 
75 #define xe_map_wr(xe__, map__, offset__, type__, val__) ({		\
76 	struct xe_device *__xe = xe__;					\
77 	xe_device_assert_mem_access(__xe);				\
78 	iosys_map_wr(map__, offset__, type__, val__);			\
79 })
80 
81 #define xe_map_rd_field(xe__, map__, struct_offset__, struct_type__, field__) ({	\
82 	struct xe_device *__xe = xe__;					\
83 	xe_device_assert_mem_access(__xe);				\
84 	iosys_map_rd_field(map__, struct_offset__, struct_type__, field__);		\
85 })
86 
87 #define xe_map_wr_field(xe__, map__, struct_offset__, struct_type__, field__, val__) ({	\
88 	struct xe_device *__xe = xe__;					\
89 	xe_device_assert_mem_access(__xe);				\
90 	iosys_map_wr_field(map__, struct_offset__, struct_type__, field__, val__);	\
91 })
92 
93 #endif
94