xref: /linux/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h (revision 2c1ed907520c50326b8f604907a8478b27881a2e)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #ifndef __INTEL_UNCORE_H__
7 #define __INTEL_UNCORE_H__
8 
9 #include "xe_device.h"
10 #include "xe_device_types.h"
11 #include "xe_mmio.h"
12 
to_intel_uncore(struct drm_device * drm)13 static inline struct intel_uncore *to_intel_uncore(struct drm_device *drm)
14 {
15 	return &to_xe_device(drm)->uncore;
16 }
17 
__compat_uncore_to_mmio(struct intel_uncore * uncore)18 static inline struct xe_mmio *__compat_uncore_to_mmio(struct intel_uncore *uncore)
19 {
20 	struct xe_device *xe = container_of(uncore, struct xe_device, uncore);
21 
22 	return xe_root_tile_mmio(xe);
23 }
24 
__compat_uncore_to_tile(struct intel_uncore * uncore)25 static inline struct xe_tile *__compat_uncore_to_tile(struct intel_uncore *uncore)
26 {
27 	struct xe_device *xe = container_of(uncore, struct xe_device, uncore);
28 
29 	return xe_device_get_root_tile(xe);
30 }
31 
intel_uncore_read(struct intel_uncore * uncore,i915_reg_t i915_reg)32 static inline u32 intel_uncore_read(struct intel_uncore *uncore,
33 				    i915_reg_t i915_reg)
34 {
35 	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
36 
37 	return xe_mmio_read32(__compat_uncore_to_mmio(uncore), reg);
38 }
39 
intel_uncore_read8(struct intel_uncore * uncore,i915_reg_t i915_reg)40 static inline u8 intel_uncore_read8(struct intel_uncore *uncore,
41 				    i915_reg_t i915_reg)
42 {
43 	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
44 
45 	return xe_mmio_read8(__compat_uncore_to_mmio(uncore), reg);
46 }
47 
intel_uncore_read16(struct intel_uncore * uncore,i915_reg_t i915_reg)48 static inline u16 intel_uncore_read16(struct intel_uncore *uncore,
49 				      i915_reg_t i915_reg)
50 {
51 	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
52 
53 	return xe_mmio_read16(__compat_uncore_to_mmio(uncore), reg);
54 }
55 
56 static inline u64
intel_uncore_read64_2x32(struct intel_uncore * uncore,i915_reg_t i915_lower_reg,i915_reg_t i915_upper_reg)57 intel_uncore_read64_2x32(struct intel_uncore *uncore,
58 			 i915_reg_t i915_lower_reg, i915_reg_t i915_upper_reg)
59 {
60 	struct xe_reg lower_reg = XE_REG(i915_mmio_reg_offset(i915_lower_reg));
61 	struct xe_reg upper_reg = XE_REG(i915_mmio_reg_offset(i915_upper_reg));
62 	u32 upper, lower, old_upper;
63 	int loop = 0;
64 
65 	upper = xe_mmio_read32(__compat_uncore_to_mmio(uncore), upper_reg);
66 	do {
67 		old_upper = upper;
68 		lower = xe_mmio_read32(__compat_uncore_to_mmio(uncore), lower_reg);
69 		upper = xe_mmio_read32(__compat_uncore_to_mmio(uncore), upper_reg);
70 	} while (upper != old_upper && loop++ < 2);
71 
72 	return (u64)upper << 32 | lower;
73 }
74 
intel_uncore_posting_read(struct intel_uncore * uncore,i915_reg_t i915_reg)75 static inline void intel_uncore_posting_read(struct intel_uncore *uncore,
76 					     i915_reg_t i915_reg)
77 {
78 	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
79 
80 	xe_mmio_read32(__compat_uncore_to_mmio(uncore), reg);
81 }
82 
intel_uncore_write(struct intel_uncore * uncore,i915_reg_t i915_reg,u32 val)83 static inline void intel_uncore_write(struct intel_uncore *uncore,
84 				      i915_reg_t i915_reg, u32 val)
85 {
86 	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
87 
88 	xe_mmio_write32(__compat_uncore_to_mmio(uncore), reg, val);
89 }
90 
intel_uncore_rmw(struct intel_uncore * uncore,i915_reg_t i915_reg,u32 clear,u32 set)91 static inline u32 intel_uncore_rmw(struct intel_uncore *uncore,
92 				   i915_reg_t i915_reg, u32 clear, u32 set)
93 {
94 	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
95 
96 	return xe_mmio_rmw32(__compat_uncore_to_mmio(uncore), reg, clear, set);
97 }
98 
intel_wait_for_register(struct intel_uncore * uncore,i915_reg_t i915_reg,u32 mask,u32 value,unsigned int timeout)99 static inline int intel_wait_for_register(struct intel_uncore *uncore,
100 					  i915_reg_t i915_reg, u32 mask,
101 					  u32 value, unsigned int timeout)
102 {
103 	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
104 
105 	return xe_mmio_wait32(__compat_uncore_to_mmio(uncore), reg, mask, value,
106 			      timeout * USEC_PER_MSEC, NULL, false);
107 }
108 
intel_wait_for_register_fw(struct intel_uncore * uncore,i915_reg_t i915_reg,u32 mask,u32 value,unsigned int timeout)109 static inline int intel_wait_for_register_fw(struct intel_uncore *uncore,
110 					     i915_reg_t i915_reg, u32 mask,
111 					     u32 value, unsigned int timeout)
112 {
113 	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
114 
115 	return xe_mmio_wait32(__compat_uncore_to_mmio(uncore), reg, mask, value,
116 			      timeout * USEC_PER_MSEC, NULL, false);
117 }
118 
119 static inline int
__intel_wait_for_register(struct intel_uncore * uncore,i915_reg_t i915_reg,u32 mask,u32 value,unsigned int fast_timeout_us,unsigned int slow_timeout_ms,u32 * out_value)120 __intel_wait_for_register(struct intel_uncore *uncore, i915_reg_t i915_reg,
121 			  u32 mask, u32 value, unsigned int fast_timeout_us,
122 			  unsigned int slow_timeout_ms, u32 *out_value)
123 {
124 	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
125 	bool atomic;
126 
127 	/*
128 	 * Replicate the behavior from i915 here, in which sleep is not
129 	 * performed if slow_timeout_ms == 0. This is necessary because
130 	 * of some paths in display code where waits are done in atomic
131 	 * context.
132 	 */
133 	atomic = !slow_timeout_ms && fast_timeout_us > 0;
134 
135 	return xe_mmio_wait32(__compat_uncore_to_mmio(uncore), reg, mask, value,
136 			      fast_timeout_us + 1000 * slow_timeout_ms,
137 			      out_value, atomic);
138 }
139 
intel_uncore_read_fw(struct intel_uncore * uncore,i915_reg_t i915_reg)140 static inline u32 intel_uncore_read_fw(struct intel_uncore *uncore,
141 				       i915_reg_t i915_reg)
142 {
143 	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
144 
145 	return xe_mmio_read32(__compat_uncore_to_mmio(uncore), reg);
146 }
147 
intel_uncore_write_fw(struct intel_uncore * uncore,i915_reg_t i915_reg,u32 val)148 static inline void intel_uncore_write_fw(struct intel_uncore *uncore,
149 					 i915_reg_t i915_reg, u32 val)
150 {
151 	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
152 
153 	xe_mmio_write32(__compat_uncore_to_mmio(uncore), reg, val);
154 }
155 
intel_uncore_read_notrace(struct intel_uncore * uncore,i915_reg_t i915_reg)156 static inline u32 intel_uncore_read_notrace(struct intel_uncore *uncore,
157 					    i915_reg_t i915_reg)
158 {
159 	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
160 
161 	return xe_mmio_read32(__compat_uncore_to_mmio(uncore), reg);
162 }
163 
intel_uncore_write_notrace(struct intel_uncore * uncore,i915_reg_t i915_reg,u32 val)164 static inline void intel_uncore_write_notrace(struct intel_uncore *uncore,
165 					      i915_reg_t i915_reg, u32 val)
166 {
167 	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
168 
169 	xe_mmio_write32(__compat_uncore_to_mmio(uncore), reg, val);
170 }
171 
172 #define intel_uncore_forcewake_get(x, y) do { } while (0)
173 #define intel_uncore_forcewake_put(x, y) do { } while (0)
174 
175 #define intel_uncore_arm_unclaimed_mmio_detection(x) do { } while (0)
176 
177 #endif /* __INTEL_UNCORE_H__ */
178