1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _RESCTRL_H
3 #define _RESCTRL_H
4
5 #include <linux/cacheinfo.h>
6 #include <linux/kernel.h>
7 #include <linux/list.h>
8 #include <linux/pid.h>
9 #include <linux/resctrl_types.h>
10
11 #ifdef CONFIG_ARCH_HAS_CPU_RESCTRL
12 #include <asm/resctrl.h>
13 #endif
14
15 /* CLOSID, RMID value used by the default control group */
16 #define RESCTRL_RESERVED_CLOSID 0
17 #define RESCTRL_RESERVED_RMID 0
18
19 #define RESCTRL_PICK_ANY_CPU -1
20
21 #ifdef CONFIG_PROC_CPU_RESCTRL
22
23 int proc_resctrl_show(struct seq_file *m,
24 struct pid_namespace *ns,
25 struct pid *pid,
26 struct task_struct *tsk);
27
28 #endif
29
30 /* max value for struct rdt_domain's mbps_val */
31 #define MBA_MAX_MBPS U32_MAX
32
33 /* Walk all possible resources, with variants for only controls or monitors. */
34 #define for_each_rdt_resource(_r) \
35 for ((_r) = resctrl_arch_get_resource(0); \
36 (_r) && (_r)->rid < RDT_NUM_RESOURCES; \
37 (_r) = resctrl_arch_get_resource((_r)->rid + 1))
38
39 #define for_each_capable_rdt_resource(r) \
40 for_each_rdt_resource((r)) \
41 if ((r)->alloc_capable || (r)->mon_capable)
42
43 #define for_each_alloc_capable_rdt_resource(r) \
44 for_each_rdt_resource((r)) \
45 if ((r)->alloc_capable)
46
47 #define for_each_mon_capable_rdt_resource(r) \
48 for_each_rdt_resource((r)) \
49 if ((r)->mon_capable)
50
51 enum resctrl_res_level {
52 RDT_RESOURCE_L3,
53 RDT_RESOURCE_L2,
54 RDT_RESOURCE_MBA,
55 RDT_RESOURCE_SMBA,
56
57 /* Must be the last */
58 RDT_NUM_RESOURCES,
59 };
60
61 /**
62 * enum resctrl_conf_type - The type of configuration.
63 * @CDP_NONE: No prioritisation, both code and data are controlled or monitored.
64 * @CDP_CODE: Configuration applies to instruction fetches.
65 * @CDP_DATA: Configuration applies to reads and writes.
66 */
67 enum resctrl_conf_type {
68 CDP_NONE,
69 CDP_CODE,
70 CDP_DATA,
71 };
72
73 #define CDP_NUM_TYPES (CDP_DATA + 1)
74
75 /*
76 * struct pseudo_lock_region - pseudo-lock region information
77 * @s: Resctrl schema for the resource to which this
78 * pseudo-locked region belongs
79 * @closid: The closid that this pseudo-locked region uses
80 * @d: RDT domain to which this pseudo-locked region
81 * belongs
82 * @cbm: bitmask of the pseudo-locked region
83 * @lock_thread_wq: waitqueue used to wait on the pseudo-locking thread
84 * completion
85 * @thread_done: variable used by waitqueue to test if pseudo-locking
86 * thread completed
87 * @cpu: core associated with the cache on which the setup code
88 * will be run
89 * @line_size: size of the cache lines
90 * @size: size of pseudo-locked region in bytes
91 * @kmem: the kernel memory associated with pseudo-locked region
92 * @minor: minor number of character device associated with this
93 * region
94 * @debugfs_dir: pointer to this region's directory in the debugfs
95 * filesystem
96 * @pm_reqs: Power management QoS requests related to this region
97 */
98 struct pseudo_lock_region {
99 struct resctrl_schema *s;
100 u32 closid;
101 struct rdt_ctrl_domain *d;
102 u32 cbm;
103 wait_queue_head_t lock_thread_wq;
104 int thread_done;
105 int cpu;
106 unsigned int line_size;
107 unsigned int size;
108 void *kmem;
109 unsigned int minor;
110 struct dentry *debugfs_dir;
111 struct list_head pm_reqs;
112 };
113
114 /**
115 * struct resctrl_staged_config - parsed configuration to be applied
116 * @new_ctrl: new ctrl value to be loaded
117 * @have_new_ctrl: whether the user provided new_ctrl is valid
118 */
119 struct resctrl_staged_config {
120 u32 new_ctrl;
121 bool have_new_ctrl;
122 };
123
124 enum resctrl_domain_type {
125 RESCTRL_CTRL_DOMAIN,
126 RESCTRL_MON_DOMAIN,
127 };
128
129 /**
130 * struct rdt_domain_hdr - common header for different domain types
131 * @list: all instances of this resource
132 * @id: unique id for this instance
133 * @type: type of this instance
134 * @cpu_mask: which CPUs share this resource
135 */
136 struct rdt_domain_hdr {
137 struct list_head list;
138 int id;
139 enum resctrl_domain_type type;
140 struct cpumask cpu_mask;
141 };
142
143 /**
144 * struct rdt_ctrl_domain - group of CPUs sharing a resctrl control resource
145 * @hdr: common header for different domain types
146 * @plr: pseudo-locked region (if any) associated with domain
147 * @staged_config: parsed configuration to be applied
148 * @mbps_val: When mba_sc is enabled, this holds the array of user
149 * specified control values for mba_sc in MBps, indexed
150 * by closid
151 */
152 struct rdt_ctrl_domain {
153 struct rdt_domain_hdr hdr;
154 struct pseudo_lock_region *plr;
155 struct resctrl_staged_config staged_config[CDP_NUM_TYPES];
156 u32 *mbps_val;
157 };
158
159 /**
160 * struct mbm_cntr_cfg - Assignable counter configuration.
161 * @evtid: MBM event to which the counter is assigned. Only valid
162 * if @rdtgroup is not NULL.
163 * @rdtgrp: resctrl group assigned to the counter. NULL if the
164 * counter is free.
165 */
166 struct mbm_cntr_cfg {
167 enum resctrl_event_id evtid;
168 struct rdtgroup *rdtgrp;
169 };
170
171 /**
172 * struct rdt_mon_domain - group of CPUs sharing a resctrl monitor resource
173 * @hdr: common header for different domain types
174 * @ci_id: cache info id for this domain
175 * @rmid_busy_llc: bitmap of which limbo RMIDs are above threshold
176 * @mbm_states: Per-event pointer to the MBM event's saved state.
177 * An MBM event's state is an array of struct mbm_state
178 * indexed by RMID on x86 or combined CLOSID, RMID on Arm.
179 * @mbm_over: worker to periodically read MBM h/w counters
180 * @cqm_limbo: worker to periodically read CQM h/w counters
181 * @mbm_work_cpu: worker CPU for MBM h/w counters
182 * @cqm_work_cpu: worker CPU for CQM h/w counters
183 * @cntr_cfg: array of assignable counters' configuration (indexed
184 * by counter ID)
185 */
186 struct rdt_mon_domain {
187 struct rdt_domain_hdr hdr;
188 unsigned int ci_id;
189 unsigned long *rmid_busy_llc;
190 struct mbm_state *mbm_states[QOS_NUM_L3_MBM_EVENTS];
191 struct delayed_work mbm_over;
192 struct delayed_work cqm_limbo;
193 int mbm_work_cpu;
194 int cqm_work_cpu;
195 struct mbm_cntr_cfg *cntr_cfg;
196 };
197
198 /**
199 * struct resctrl_cache - Cache allocation related data
200 * @cbm_len: Length of the cache bit mask
201 * @min_cbm_bits: Minimum number of consecutive bits to be set.
202 * The value 0 means the architecture can support
203 * zero CBM.
204 * @shareable_bits: Bitmask of shareable resource with other
205 * executing entities
206 * @arch_has_sparse_bitmasks: True if a bitmask like f00f is valid.
207 * @arch_has_per_cpu_cfg: True if QOS_CFG register for this cache
208 * level has CPU scope.
209 */
210 struct resctrl_cache {
211 unsigned int cbm_len;
212 unsigned int min_cbm_bits;
213 unsigned int shareable_bits;
214 bool arch_has_sparse_bitmasks;
215 bool arch_has_per_cpu_cfg;
216 };
217
218 /**
219 * enum membw_throttle_mode - System's memory bandwidth throttling mode
220 * @THREAD_THROTTLE_UNDEFINED: Not relevant to the system
221 * @THREAD_THROTTLE_MAX: Memory bandwidth is throttled at the core
222 * always using smallest bandwidth percentage
223 * assigned to threads, aka "max throttling"
224 * @THREAD_THROTTLE_PER_THREAD: Memory bandwidth is throttled at the thread
225 */
226 enum membw_throttle_mode {
227 THREAD_THROTTLE_UNDEFINED = 0,
228 THREAD_THROTTLE_MAX,
229 THREAD_THROTTLE_PER_THREAD,
230 };
231
232 /**
233 * struct resctrl_membw - Memory bandwidth allocation related data
234 * @min_bw: Minimum memory bandwidth percentage user can request
235 * @max_bw: Maximum memory bandwidth value, used as the reset value
236 * @bw_gran: Granularity at which the memory bandwidth is allocated
237 * @delay_linear: True if memory B/W delay is in linear scale
238 * @arch_needs_linear: True if we can't configure non-linear resources
239 * @throttle_mode: Bandwidth throttling mode when threads request
240 * different memory bandwidths
241 * @mba_sc: True if MBA software controller(mba_sc) is enabled
242 * @mb_map: Mapping of memory B/W percentage to memory B/W delay
243 */
244 struct resctrl_membw {
245 u32 min_bw;
246 u32 max_bw;
247 u32 bw_gran;
248 u32 delay_linear;
249 bool arch_needs_linear;
250 enum membw_throttle_mode throttle_mode;
251 bool mba_sc;
252 u32 *mb_map;
253 };
254
255 struct resctrl_schema;
256
257 enum resctrl_scope {
258 RESCTRL_L2_CACHE = 2,
259 RESCTRL_L3_CACHE = 3,
260 RESCTRL_L3_NODE,
261 };
262
263 /**
264 * enum resctrl_schema_fmt - The format user-space provides for a schema.
265 * @RESCTRL_SCHEMA_BITMAP: The schema is a bitmap in hex.
266 * @RESCTRL_SCHEMA_RANGE: The schema is a decimal number.
267 */
268 enum resctrl_schema_fmt {
269 RESCTRL_SCHEMA_BITMAP,
270 RESCTRL_SCHEMA_RANGE,
271 };
272
273 /**
274 * struct resctrl_mon - Monitoring related data of a resctrl resource.
275 * @num_rmid: Number of RMIDs available.
276 * @mbm_cfg_mask: Memory transactions that can be tracked when bandwidth
277 * monitoring events can be configured.
278 * @num_mbm_cntrs: Number of assignable counters.
279 * @mbm_cntr_assignable:Is system capable of supporting counter assignment?
280 * @mbm_assign_on_mkdir:True if counters should automatically be assigned to MBM
281 * events of monitor groups created via mkdir.
282 */
283 struct resctrl_mon {
284 int num_rmid;
285 unsigned int mbm_cfg_mask;
286 int num_mbm_cntrs;
287 bool mbm_cntr_assignable;
288 bool mbm_assign_on_mkdir;
289 };
290
291 /**
292 * struct rdt_resource - attributes of a resctrl resource
293 * @rid: The index of the resource
294 * @alloc_capable: Is allocation available on this machine
295 * @mon_capable: Is monitor feature available on this machine
296 * @ctrl_scope: Scope of this resource for control functions
297 * @mon_scope: Scope of this resource for monitor functions
298 * @cache: Cache allocation related data
299 * @membw: If the component has bandwidth controls, their properties.
300 * @mon: Monitoring related data.
301 * @ctrl_domains: RCU list of all control domains for this resource
302 * @mon_domains: RCU list of all monitor domains for this resource
303 * @name: Name to use in "schemata" file.
304 * @schema_fmt: Which format string and parser is used for this schema.
305 * @cdp_capable: Is the CDP feature available on this resource
306 */
307 struct rdt_resource {
308 int rid;
309 bool alloc_capable;
310 bool mon_capable;
311 enum resctrl_scope ctrl_scope;
312 enum resctrl_scope mon_scope;
313 struct resctrl_cache cache;
314 struct resctrl_membw membw;
315 struct resctrl_mon mon;
316 struct list_head ctrl_domains;
317 struct list_head mon_domains;
318 char *name;
319 enum resctrl_schema_fmt schema_fmt;
320 bool cdp_capable;
321 };
322
323 /*
324 * Get the resource that exists at this level. If the level is not supported
325 * a dummy/not-capable resource can be returned. Levels >= RDT_NUM_RESOURCES
326 * will return NULL.
327 */
328 struct rdt_resource *resctrl_arch_get_resource(enum resctrl_res_level l);
329
330 /**
331 * struct resctrl_schema - configuration abilities of a resource presented to
332 * user-space
333 * @list: Member of resctrl_schema_all.
334 * @name: The name to use in the "schemata" file.
335 * @fmt_str: Format string to show domain value.
336 * @conf_type: Whether this schema is specific to code/data.
337 * @res: The resource structure exported by the architecture to describe
338 * the hardware that is configured by this schema.
339 * @num_closid: The number of closid that can be used with this schema. When
340 * features like CDP are enabled, this will be lower than the
341 * hardware supports for the resource.
342 */
343 struct resctrl_schema {
344 struct list_head list;
345 char name[8];
346 const char *fmt_str;
347 enum resctrl_conf_type conf_type;
348 struct rdt_resource *res;
349 u32 num_closid;
350 };
351
352 struct resctrl_cpu_defaults {
353 u32 closid;
354 u32 rmid;
355 };
356
357 struct resctrl_mon_config_info {
358 struct rdt_resource *r;
359 struct rdt_mon_domain *d;
360 u32 evtid;
361 u32 mon_config;
362 };
363
364 /**
365 * resctrl_arch_sync_cpu_closid_rmid() - Refresh this CPU's CLOSID and RMID.
366 * Call via IPI.
367 * @info: If non-NULL, a pointer to a struct resctrl_cpu_defaults
368 * specifying the new CLOSID and RMID for tasks in the default
369 * resctrl ctrl and mon group when running on this CPU. If NULL,
370 * this CPU is not re-assigned to a different default group.
371 *
372 * Propagates reassignment of CPUs and/or tasks to different resctrl groups
373 * when requested by the resctrl core code.
374 *
375 * This function records the per-cpu defaults specified by @info (if any),
376 * and then reconfigures the CPU's hardware CLOSID and RMID for subsequent
377 * execution based on @current, in the same way as during a task switch.
378 */
379 void resctrl_arch_sync_cpu_closid_rmid(void *info);
380
381 /**
382 * resctrl_get_default_ctrl() - Return the default control value for this
383 * resource.
384 * @r: The resource whose default control type is queried.
385 */
resctrl_get_default_ctrl(struct rdt_resource * r)386 static inline u32 resctrl_get_default_ctrl(struct rdt_resource *r)
387 {
388 switch (r->schema_fmt) {
389 case RESCTRL_SCHEMA_BITMAP:
390 return BIT_MASK(r->cache.cbm_len) - 1;
391 case RESCTRL_SCHEMA_RANGE:
392 return r->membw.max_bw;
393 }
394
395 return WARN_ON_ONCE(1);
396 }
397
398 /* The number of closid supported by this resource regardless of CDP */
399 u32 resctrl_arch_get_num_closid(struct rdt_resource *r);
400 u32 resctrl_arch_system_num_rmid_idx(void);
401 int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid);
402
403 void resctrl_enable_mon_event(enum resctrl_event_id eventid);
404
405 bool resctrl_is_mon_event_enabled(enum resctrl_event_id eventid);
406
407 bool resctrl_arch_is_evt_configurable(enum resctrl_event_id evt);
408
resctrl_is_mbm_event(enum resctrl_event_id eventid)409 static inline bool resctrl_is_mbm_event(enum resctrl_event_id eventid)
410 {
411 return (eventid >= QOS_L3_MBM_TOTAL_EVENT_ID &&
412 eventid <= QOS_L3_MBM_LOCAL_EVENT_ID);
413 }
414
415 u32 resctrl_get_mon_evt_cfg(enum resctrl_event_id eventid);
416
417 /* Iterate over all memory bandwidth events */
418 #define for_each_mbm_event_id(eventid) \
419 for (eventid = QOS_L3_MBM_TOTAL_EVENT_ID; \
420 eventid <= QOS_L3_MBM_LOCAL_EVENT_ID; eventid++)
421
422 /* Iterate over memory bandwidth arrays in domain structures */
423 #define for_each_mbm_idx(idx) \
424 for (idx = 0; idx < QOS_NUM_L3_MBM_EVENTS; idx++)
425
426 /**
427 * resctrl_arch_mon_event_config_write() - Write the config for an event.
428 * @config_info: struct resctrl_mon_config_info describing the resource, domain
429 * and event.
430 *
431 * Reads resource, domain and eventid from @config_info and writes the
432 * event config_info->mon_config into hardware.
433 *
434 * Called via IPI to reach a CPU that is a member of the specified domain.
435 */
436 void resctrl_arch_mon_event_config_write(void *config_info);
437
438 /**
439 * resctrl_arch_mon_event_config_read() - Read the config for an event.
440 * @config_info: struct resctrl_mon_config_info describing the resource, domain
441 * and event.
442 *
443 * Reads resource, domain and eventid from @config_info and reads the
444 * hardware config value into config_info->mon_config.
445 *
446 * Called via IPI to reach a CPU that is a member of the specified domain.
447 */
448 void resctrl_arch_mon_event_config_read(void *config_info);
449
450 /* For use by arch code to remap resctrl's smaller CDP CLOSID range */
resctrl_get_config_index(u32 closid,enum resctrl_conf_type type)451 static inline u32 resctrl_get_config_index(u32 closid,
452 enum resctrl_conf_type type)
453 {
454 switch (type) {
455 default:
456 case CDP_NONE:
457 return closid;
458 case CDP_CODE:
459 return closid * 2 + 1;
460 case CDP_DATA:
461 return closid * 2;
462 }
463 }
464
465 bool resctrl_arch_get_cdp_enabled(enum resctrl_res_level l);
466 int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable);
467
468 /**
469 * resctrl_arch_mbm_cntr_assign_enabled() - Check if MBM counter assignment
470 * mode is enabled.
471 * @r: Pointer to the resource structure.
472 *
473 * Return:
474 * true if the assignment mode is enabled, false otherwise.
475 */
476 bool resctrl_arch_mbm_cntr_assign_enabled(struct rdt_resource *r);
477
478 /**
479 * resctrl_arch_mbm_cntr_assign_set() - Configure the MBM counter assignment mode.
480 * @r: Pointer to the resource structure.
481 * @enable: Set to true to enable, false to disable the assignment mode.
482 *
483 * Return:
484 * 0 on success, < 0 on error.
485 */
486 int resctrl_arch_mbm_cntr_assign_set(struct rdt_resource *r, bool enable);
487
488 /*
489 * Update the ctrl_val and apply this config right now.
490 * Must be called on one of the domain's CPUs.
491 */
492 int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_ctrl_domain *d,
493 u32 closid, enum resctrl_conf_type t, u32 cfg_val);
494
495 u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_ctrl_domain *d,
496 u32 closid, enum resctrl_conf_type type);
497 int resctrl_online_ctrl_domain(struct rdt_resource *r, struct rdt_ctrl_domain *d);
498 int resctrl_online_mon_domain(struct rdt_resource *r, struct rdt_mon_domain *d);
499 void resctrl_offline_ctrl_domain(struct rdt_resource *r, struct rdt_ctrl_domain *d);
500 void resctrl_offline_mon_domain(struct rdt_resource *r, struct rdt_mon_domain *d);
501 void resctrl_online_cpu(unsigned int cpu);
502 void resctrl_offline_cpu(unsigned int cpu);
503
504 /**
505 * resctrl_arch_rmid_read() - Read the eventid counter corresponding to rmid
506 * for this resource and domain.
507 * @r: resource that the counter should be read from.
508 * @d: domain that the counter should be read from.
509 * @closid: closid that matches the rmid. Depending on the architecture, the
510 * counter may match traffic of both @closid and @rmid, or @rmid
511 * only.
512 * @rmid: rmid of the counter to read.
513 * @eventid: eventid to read, e.g. L3 occupancy.
514 * @val: result of the counter read in bytes.
515 * @arch_mon_ctx: An architecture specific value from
516 * resctrl_arch_mon_ctx_alloc(), for MPAM this identifies
517 * the hardware monitor allocated for this read request.
518 *
519 * Some architectures need to sleep when first programming some of the counters.
520 * (specifically: arm64's MPAM cache occupancy counters can return 'not ready'
521 * for a short period of time). Call from a non-migrateable process context on
522 * a CPU that belongs to domain @d. e.g. use smp_call_on_cpu() or
523 * schedule_work_on(). This function can be called with interrupts masked,
524 * e.g. using smp_call_function_any(), but may consistently return an error.
525 *
526 * Return:
527 * 0 on success, or -EIO, -EINVAL etc on error.
528 */
529 int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_mon_domain *d,
530 u32 closid, u32 rmid, enum resctrl_event_id eventid,
531 u64 *val, void *arch_mon_ctx);
532
533 /**
534 * resctrl_arch_rmid_read_context_check() - warn about invalid contexts
535 *
536 * When built with CONFIG_DEBUG_ATOMIC_SLEEP generate a warning when
537 * resctrl_arch_rmid_read() is called with preemption disabled.
538 *
539 * The contract with resctrl_arch_rmid_read() is that if interrupts
540 * are unmasked, it can sleep. This allows NOHZ_FULL systems to use an
541 * IPI, (and fail if the call needed to sleep), while most of the time
542 * the work is scheduled, allowing the call to sleep.
543 */
resctrl_arch_rmid_read_context_check(void)544 static inline void resctrl_arch_rmid_read_context_check(void)
545 {
546 if (!irqs_disabled())
547 might_sleep();
548 }
549
550 /**
551 * resctrl_find_domain() - Search for a domain id in a resource domain list.
552 * @h: The domain list to search.
553 * @id: The domain id to search for.
554 * @pos: A pointer to position in the list id should be inserted.
555 *
556 * Search the domain list to find the domain id. If the domain id is
557 * found, return the domain. NULL otherwise. If the domain id is not
558 * found (and NULL returned) then the first domain with id bigger than
559 * the input id can be returned to the caller via @pos.
560 */
561 struct rdt_domain_hdr *resctrl_find_domain(struct list_head *h, int id,
562 struct list_head **pos);
563
564 /**
565 * resctrl_arch_reset_rmid() - Reset any private state associated with rmid
566 * and eventid.
567 * @r: The domain's resource.
568 * @d: The rmid's domain.
569 * @closid: closid that matches the rmid. Depending on the architecture, the
570 * counter may match traffic of both @closid and @rmid, or @rmid only.
571 * @rmid: The rmid whose counter values should be reset.
572 * @eventid: The eventid whose counter values should be reset.
573 *
574 * This can be called from any CPU.
575 */
576 void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_mon_domain *d,
577 u32 closid, u32 rmid,
578 enum resctrl_event_id eventid);
579
580 /**
581 * resctrl_arch_reset_rmid_all() - Reset all private state associated with
582 * all rmids and eventids.
583 * @r: The resctrl resource.
584 * @d: The domain for which all architectural counter state will
585 * be cleared.
586 *
587 * This can be called from any CPU.
588 */
589 void resctrl_arch_reset_rmid_all(struct rdt_resource *r, struct rdt_mon_domain *d);
590
591 /**
592 * resctrl_arch_reset_all_ctrls() - Reset the control for each CLOSID to its
593 * default.
594 * @r: The resctrl resource to reset.
595 *
596 * This can be called from any CPU.
597 */
598 void resctrl_arch_reset_all_ctrls(struct rdt_resource *r);
599
600 /**
601 * resctrl_arch_config_cntr() - Configure the counter with its new RMID
602 * and event details.
603 * @r: Resource structure.
604 * @d: The domain in which counter with ID @cntr_id should be configured.
605 * @evtid: Monitoring event type (e.g., QOS_L3_MBM_TOTAL_EVENT_ID
606 * or QOS_L3_MBM_LOCAL_EVENT_ID).
607 * @rmid: RMID.
608 * @closid: CLOSID.
609 * @cntr_id: Counter ID to configure.
610 * @assign: True to assign the counter or update an existing assignment,
611 * false to unassign the counter.
612 *
613 * This can be called from any CPU.
614 */
615 void resctrl_arch_config_cntr(struct rdt_resource *r, struct rdt_mon_domain *d,
616 enum resctrl_event_id evtid, u32 rmid, u32 closid,
617 u32 cntr_id, bool assign);
618
619 /**
620 * resctrl_arch_cntr_read() - Read the event data corresponding to the counter ID
621 * assigned to the RMID, event pair for this resource
622 * and domain.
623 * @r: Resource that the counter should be read from.
624 * @d: Domain that the counter should be read from.
625 * @closid: CLOSID that matches the RMID.
626 * @rmid: The RMID to which @cntr_id is assigned.
627 * @cntr_id: The counter to read.
628 * @eventid: The MBM event to which @cntr_id is assigned.
629 * @val: Result of the counter read in bytes.
630 *
631 * Called on a CPU that belongs to domain @d when "mbm_event" mode is enabled.
632 * Called from a non-migrateable process context via smp_call_on_cpu() unless all
633 * CPUs are nohz_full, in which case it is called via IPI (smp_call_function_any()).
634 *
635 * Return:
636 * 0 on success, or -EIO, -EINVAL etc on error.
637 */
638 int resctrl_arch_cntr_read(struct rdt_resource *r, struct rdt_mon_domain *d,
639 u32 closid, u32 rmid, int cntr_id,
640 enum resctrl_event_id eventid, u64 *val);
641
642 /**
643 * resctrl_arch_reset_cntr() - Reset any private state associated with counter ID.
644 * @r: The domain's resource.
645 * @d: The counter ID's domain.
646 * @closid: CLOSID that matches the RMID.
647 * @rmid: The RMID to which @cntr_id is assigned.
648 * @cntr_id: The counter to reset.
649 * @eventid: The MBM event to which @cntr_id is assigned.
650 *
651 * This can be called from any CPU.
652 */
653 void resctrl_arch_reset_cntr(struct rdt_resource *r, struct rdt_mon_domain *d,
654 u32 closid, u32 rmid, int cntr_id,
655 enum resctrl_event_id eventid);
656
657 extern unsigned int resctrl_rmid_realloc_threshold;
658 extern unsigned int resctrl_rmid_realloc_limit;
659
660 int resctrl_init(void);
661 void resctrl_exit(void);
662
663 #ifdef CONFIG_RESCTRL_FS_PSEUDO_LOCK
664 u64 resctrl_arch_get_prefetch_disable_bits(void);
665 int resctrl_arch_pseudo_lock_fn(void *_plr);
666 int resctrl_arch_measure_cycles_lat_fn(void *_plr);
667 int resctrl_arch_measure_l2_residency(void *_plr);
668 int resctrl_arch_measure_l3_residency(void *_plr);
669 #else
resctrl_arch_get_prefetch_disable_bits(void)670 static inline u64 resctrl_arch_get_prefetch_disable_bits(void) { return 0; }
resctrl_arch_pseudo_lock_fn(void * _plr)671 static inline int resctrl_arch_pseudo_lock_fn(void *_plr) { return 0; }
resctrl_arch_measure_cycles_lat_fn(void * _plr)672 static inline int resctrl_arch_measure_cycles_lat_fn(void *_plr) { return 0; }
resctrl_arch_measure_l2_residency(void * _plr)673 static inline int resctrl_arch_measure_l2_residency(void *_plr) { return 0; }
resctrl_arch_measure_l3_residency(void * _plr)674 static inline int resctrl_arch_measure_l3_residency(void *_plr) { return 0; }
675 #endif /* CONFIG_RESCTRL_FS_PSEUDO_LOCK */
676 #endif /* _RESCTRL_H */
677