1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_RESCTRL_H
3 #define _ASM_X86_RESCTRL_H
4
5 #ifdef CONFIG_X86_CPU_RESCTRL
6
7 #include <linux/jump_label.h>
8 #include <linux/percpu.h>
9 #include <linux/resctrl_types.h>
10 #include <linux/sched.h>
11
12 #include <asm/msr.h>
13
14 /*
15 * This value can never be a valid CLOSID, and is used when mapping a
16 * (closid, rmid) pair to an index and back. On x86 only the RMID is
17 * needed. The index is a software defined value.
18 */
19 #define X86_RESCTRL_EMPTY_CLOSID ((u32)~0)
20
21 /**
22 * struct resctrl_pqr_state - State cache for the PQR MSR
23 * @cur_rmid: The cached Resource Monitoring ID
24 * @cur_closid: The cached Class Of Service ID
25 * @default_rmid: The user assigned Resource Monitoring ID
26 * @default_closid: The user assigned cached Class Of Service ID
27 *
28 * The upper 32 bits of MSR_IA32_PQR_ASSOC contain closid and the
29 * lower 10 bits rmid. The update to MSR_IA32_PQR_ASSOC always
30 * contains both parts, so we need to cache them. This also
31 * stores the user configured per cpu CLOSID and RMID.
32 *
33 * The cache also helps to avoid pointless updates if the value does
34 * not change.
35 */
36 struct resctrl_pqr_state {
37 u32 cur_rmid;
38 u32 cur_closid;
39 u32 default_rmid;
40 u32 default_closid;
41 };
42
43 DECLARE_PER_CPU(struct resctrl_pqr_state, pqr_state);
44
45 extern bool rdt_alloc_capable;
46 extern bool rdt_mon_capable;
47
48 DECLARE_STATIC_KEY_FALSE(rdt_enable_key);
49 DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key);
50 DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key);
51
resctrl_arch_alloc_capable(void)52 static inline bool resctrl_arch_alloc_capable(void)
53 {
54 return rdt_alloc_capable;
55 }
56
resctrl_arch_enable_alloc(void)57 static inline void resctrl_arch_enable_alloc(void)
58 {
59 static_branch_enable_cpuslocked(&rdt_alloc_enable_key);
60 static_branch_inc_cpuslocked(&rdt_enable_key);
61 }
62
resctrl_arch_disable_alloc(void)63 static inline void resctrl_arch_disable_alloc(void)
64 {
65 static_branch_disable_cpuslocked(&rdt_alloc_enable_key);
66 static_branch_dec_cpuslocked(&rdt_enable_key);
67 }
68
resctrl_arch_mon_capable(void)69 static inline bool resctrl_arch_mon_capable(void)
70 {
71 return rdt_mon_capable;
72 }
73
resctrl_arch_enable_mon(void)74 static inline void resctrl_arch_enable_mon(void)
75 {
76 static_branch_enable_cpuslocked(&rdt_mon_enable_key);
77 static_branch_inc_cpuslocked(&rdt_enable_key);
78 }
79
resctrl_arch_disable_mon(void)80 static inline void resctrl_arch_disable_mon(void)
81 {
82 static_branch_disable_cpuslocked(&rdt_mon_enable_key);
83 static_branch_dec_cpuslocked(&rdt_enable_key);
84 }
85
86 /*
87 * __resctrl_sched_in() - Writes the task's CLOSid/RMID to IA32_PQR_MSR
88 *
89 * Following considerations are made so that this has minimal impact
90 * on scheduler hot path:
91 * - This will stay as no-op unless we are running on an Intel SKU
92 * which supports resource control or monitoring and we enable by
93 * mounting the resctrl file system.
94 * - Caches the per cpu CLOSid/RMID values and does the MSR write only
95 * when a task with a different CLOSid/RMID is scheduled in.
96 * - We allocate RMIDs/CLOSids globally in order to keep this as
97 * simple as possible.
98 * Must be called with preemption disabled.
99 */
__resctrl_sched_in(struct task_struct * tsk)100 static inline void __resctrl_sched_in(struct task_struct *tsk)
101 {
102 struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state);
103 u32 closid = READ_ONCE(state->default_closid);
104 u32 rmid = READ_ONCE(state->default_rmid);
105 u32 tmp;
106
107 /*
108 * If this task has a closid/rmid assigned, use it.
109 * Else use the closid/rmid assigned to this cpu.
110 */
111 if (static_branch_likely(&rdt_alloc_enable_key)) {
112 tmp = READ_ONCE(tsk->closid);
113 if (tmp)
114 closid = tmp;
115 }
116
117 if (static_branch_likely(&rdt_mon_enable_key)) {
118 tmp = READ_ONCE(tsk->rmid);
119 if (tmp)
120 rmid = tmp;
121 }
122
123 if (closid != state->cur_closid || rmid != state->cur_rmid) {
124 state->cur_closid = closid;
125 state->cur_rmid = rmid;
126 wrmsr(MSR_IA32_PQR_ASSOC, rmid, closid);
127 }
128 }
129
resctrl_arch_round_mon_val(unsigned int val)130 static inline unsigned int resctrl_arch_round_mon_val(unsigned int val)
131 {
132 unsigned int scale = boot_cpu_data.x86_cache_occ_scale;
133
134 /* h/w works in units of "boot_cpu_data.x86_cache_occ_scale" */
135 val /= scale;
136 return val * scale;
137 }
138
resctrl_arch_set_cpu_default_closid_rmid(int cpu,u32 closid,u32 rmid)139 static inline void resctrl_arch_set_cpu_default_closid_rmid(int cpu, u32 closid,
140 u32 rmid)
141 {
142 WRITE_ONCE(per_cpu(pqr_state.default_closid, cpu), closid);
143 WRITE_ONCE(per_cpu(pqr_state.default_rmid, cpu), rmid);
144 }
145
resctrl_arch_set_closid_rmid(struct task_struct * tsk,u32 closid,u32 rmid)146 static inline void resctrl_arch_set_closid_rmid(struct task_struct *tsk,
147 u32 closid, u32 rmid)
148 {
149 WRITE_ONCE(tsk->closid, closid);
150 WRITE_ONCE(tsk->rmid, rmid);
151 }
152
resctrl_arch_match_closid(struct task_struct * tsk,u32 closid)153 static inline bool resctrl_arch_match_closid(struct task_struct *tsk, u32 closid)
154 {
155 return READ_ONCE(tsk->closid) == closid;
156 }
157
resctrl_arch_match_rmid(struct task_struct * tsk,u32 ignored,u32 rmid)158 static inline bool resctrl_arch_match_rmid(struct task_struct *tsk, u32 ignored,
159 u32 rmid)
160 {
161 return READ_ONCE(tsk->rmid) == rmid;
162 }
163
resctrl_arch_sched_in(struct task_struct * tsk)164 static inline void resctrl_arch_sched_in(struct task_struct *tsk)
165 {
166 if (static_branch_likely(&rdt_enable_key))
167 __resctrl_sched_in(tsk);
168 }
169
resctrl_arch_rmid_idx_decode(u32 idx,u32 * closid,u32 * rmid)170 static inline void resctrl_arch_rmid_idx_decode(u32 idx, u32 *closid, u32 *rmid)
171 {
172 *rmid = idx;
173 *closid = X86_RESCTRL_EMPTY_CLOSID;
174 }
175
resctrl_arch_rmid_idx_encode(u32 ignored,u32 rmid)176 static inline u32 resctrl_arch_rmid_idx_encode(u32 ignored, u32 rmid)
177 {
178 return rmid;
179 }
180
181 /* x86 can always read an rmid, nothing needs allocating */
182 struct rdt_resource;
resctrl_arch_mon_ctx_alloc(struct rdt_resource * r,enum resctrl_event_id evtid)183 static inline void *resctrl_arch_mon_ctx_alloc(struct rdt_resource *r,
184 enum resctrl_event_id evtid)
185 {
186 might_sleep();
187 return NULL;
188 }
189
resctrl_arch_mon_ctx_free(struct rdt_resource * r,enum resctrl_event_id evtid,void * ctx)190 static inline void resctrl_arch_mon_ctx_free(struct rdt_resource *r,
191 enum resctrl_event_id evtid,
192 void *ctx) { }
193
194 void resctrl_cpu_detect(struct cpuinfo_x86 *c);
195
196 #else
197
resctrl_arch_sched_in(struct task_struct * tsk)198 static inline void resctrl_arch_sched_in(struct task_struct *tsk) {}
resctrl_cpu_detect(struct cpuinfo_x86 * c)199 static inline void resctrl_cpu_detect(struct cpuinfo_x86 *c) {}
200
201 #endif /* CONFIG_X86_CPU_RESCTRL */
202
203 #endif /* _ASM_X86_RESCTRL_H */
204