xref: /linux/arch/x86/kernel/cpu/resctrl/ctrlmondata.c (revision 59fff63cc2b75dcfe08f9eeb4b2187d73e53843d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Resource Director Technology(RDT)
4  * - Cache Allocation code.
5  *
6  * Copyright (C) 2016 Intel Corporation
7  *
8  * Authors:
9  *    Fenghua Yu <fenghua.yu@intel.com>
10  *    Tony Luck <tony.luck@intel.com>
11  *
12  * More information about RDT be found in the Intel (R) x86 Architecture
13  * Software Developer Manual June 2016, volume 3, section 17.17.
14  */
15 
16 #define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
17 
18 #include <linux/cpu.h>
19 #include <linux/kernfs.h>
20 #include <linux/seq_file.h>
21 #include <linux/slab.h>
22 #include "internal.h"
23 
24 /*
25  * Check whether MBA bandwidth percentage value is correct. The value is
26  * checked against the minimum and max bandwidth values specified by the
27  * hardware. The allocated bandwidth percentage is rounded to the next
28  * control step available on the hardware.
29  */
30 static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r)
31 {
32 	unsigned long bw;
33 	int ret;
34 
35 	/*
36 	 * Only linear delay values is supported for current Intel SKUs.
37 	 */
38 	if (!r->membw.delay_linear && r->membw.arch_needs_linear) {
39 		rdt_last_cmd_puts("No support for non-linear MB domains\n");
40 		return false;
41 	}
42 
43 	ret = kstrtoul(buf, 10, &bw);
44 	if (ret) {
45 		rdt_last_cmd_printf("Non-decimal digit in MB value %s\n", buf);
46 		return false;
47 	}
48 
49 	if ((bw < r->membw.min_bw || bw > r->default_ctrl) &&
50 	    !is_mba_sc(r)) {
51 		rdt_last_cmd_printf("MB value %ld out of range [%d,%d]\n", bw,
52 				    r->membw.min_bw, r->default_ctrl);
53 		return false;
54 	}
55 
56 	*data = roundup(bw, (unsigned long)r->membw.bw_gran);
57 	return true;
58 }
59 
60 int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s,
61 	     struct rdt_domain *d)
62 {
63 	struct resctrl_staged_config *cfg;
64 	u32 closid = data->rdtgrp->closid;
65 	struct rdt_resource *r = s->res;
66 	unsigned long bw_val;
67 
68 	cfg = &d->staged_config[s->conf_type];
69 	if (cfg->have_new_ctrl) {
70 		rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
71 		return -EINVAL;
72 	}
73 
74 	if (!bw_validate(data->buf, &bw_val, r))
75 		return -EINVAL;
76 
77 	if (is_mba_sc(r)) {
78 		d->mbps_val[closid] = bw_val;
79 		return 0;
80 	}
81 
82 	cfg->new_ctrl = bw_val;
83 	cfg->have_new_ctrl = true;
84 
85 	return 0;
86 }
87 
88 /*
89  * Check whether a cache bit mask is valid.
90  * On Intel CPUs, non-contiguous 1s value support is indicated by CPUID:
91  *   - CPUID.0x10.1:ECX[3]: L3 non-contiguous 1s value supported if 1
92  *   - CPUID.0x10.2:ECX[3]: L2 non-contiguous 1s value supported if 1
93  *
94  * Haswell does not support a non-contiguous 1s value and additionally
95  * requires at least two bits set.
96  * AMD allows non-contiguous bitmasks.
97  */
98 static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
99 {
100 	unsigned long first_bit, zero_bit, val;
101 	unsigned int cbm_len = r->cache.cbm_len;
102 	int ret;
103 
104 	ret = kstrtoul(buf, 16, &val);
105 	if (ret) {
106 		rdt_last_cmd_printf("Non-hex character in the mask %s\n", buf);
107 		return false;
108 	}
109 
110 	if ((r->cache.min_cbm_bits > 0 && val == 0) || val > r->default_ctrl) {
111 		rdt_last_cmd_puts("Mask out of range\n");
112 		return false;
113 	}
114 
115 	first_bit = find_first_bit(&val, cbm_len);
116 	zero_bit = find_next_zero_bit(&val, cbm_len, first_bit);
117 
118 	/* Are non-contiguous bitmasks allowed? */
119 	if (!r->cache.arch_has_sparse_bitmasks &&
120 	    (find_next_bit(&val, cbm_len, zero_bit) < cbm_len)) {
121 		rdt_last_cmd_printf("The mask %lx has non-consecutive 1-bits\n", val);
122 		return false;
123 	}
124 
125 	if ((zero_bit - first_bit) < r->cache.min_cbm_bits) {
126 		rdt_last_cmd_printf("Need at least %d bits in the mask\n",
127 				    r->cache.min_cbm_bits);
128 		return false;
129 	}
130 
131 	*data = val;
132 	return true;
133 }
134 
135 /*
136  * Read one cache bit mask (hex). Check that it is valid for the current
137  * resource type.
138  */
139 int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s,
140 	      struct rdt_domain *d)
141 {
142 	struct rdtgroup *rdtgrp = data->rdtgrp;
143 	struct resctrl_staged_config *cfg;
144 	struct rdt_resource *r = s->res;
145 	u32 cbm_val;
146 
147 	cfg = &d->staged_config[s->conf_type];
148 	if (cfg->have_new_ctrl) {
149 		rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
150 		return -EINVAL;
151 	}
152 
153 	/*
154 	 * Cannot set up more than one pseudo-locked region in a cache
155 	 * hierarchy.
156 	 */
157 	if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP &&
158 	    rdtgroup_pseudo_locked_in_hierarchy(d)) {
159 		rdt_last_cmd_puts("Pseudo-locked region in hierarchy\n");
160 		return -EINVAL;
161 	}
162 
163 	if (!cbm_validate(data->buf, &cbm_val, r))
164 		return -EINVAL;
165 
166 	if ((rdtgrp->mode == RDT_MODE_EXCLUSIVE ||
167 	     rdtgrp->mode == RDT_MODE_SHAREABLE) &&
168 	    rdtgroup_cbm_overlaps_pseudo_locked(d, cbm_val)) {
169 		rdt_last_cmd_puts("CBM overlaps with pseudo-locked region\n");
170 		return -EINVAL;
171 	}
172 
173 	/*
174 	 * The CBM may not overlap with the CBM of another closid if
175 	 * either is exclusive.
176 	 */
177 	if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, true)) {
178 		rdt_last_cmd_puts("Overlaps with exclusive group\n");
179 		return -EINVAL;
180 	}
181 
182 	if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, false)) {
183 		if (rdtgrp->mode == RDT_MODE_EXCLUSIVE ||
184 		    rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
185 			rdt_last_cmd_puts("Overlaps with other group\n");
186 			return -EINVAL;
187 		}
188 	}
189 
190 	cfg->new_ctrl = cbm_val;
191 	cfg->have_new_ctrl = true;
192 
193 	return 0;
194 }
195 
196 /*
197  * For each domain in this resource we expect to find a series of:
198  *	id=mask
199  * separated by ";". The "id" is in decimal, and must match one of
200  * the "id"s for this resource.
201  */
202 static int parse_line(char *line, struct resctrl_schema *s,
203 		      struct rdtgroup *rdtgrp)
204 {
205 	enum resctrl_conf_type t = s->conf_type;
206 	struct resctrl_staged_config *cfg;
207 	struct rdt_resource *r = s->res;
208 	struct rdt_parse_data data;
209 	char *dom = NULL, *id;
210 	struct rdt_domain *d;
211 	unsigned long dom_id;
212 
213 	if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP &&
214 	    (r->rid == RDT_RESOURCE_MBA || r->rid == RDT_RESOURCE_SMBA)) {
215 		rdt_last_cmd_puts("Cannot pseudo-lock MBA resource\n");
216 		return -EINVAL;
217 	}
218 
219 next:
220 	if (!line || line[0] == '\0')
221 		return 0;
222 	dom = strsep(&line, ";");
223 	id = strsep(&dom, "=");
224 	if (!dom || kstrtoul(id, 10, &dom_id)) {
225 		rdt_last_cmd_puts("Missing '=' or non-numeric domain\n");
226 		return -EINVAL;
227 	}
228 	dom = strim(dom);
229 	list_for_each_entry(d, &r->domains, list) {
230 		if (d->id == dom_id) {
231 			data.buf = dom;
232 			data.rdtgrp = rdtgrp;
233 			if (r->parse_ctrlval(&data, s, d))
234 				return -EINVAL;
235 			if (rdtgrp->mode ==  RDT_MODE_PSEUDO_LOCKSETUP) {
236 				cfg = &d->staged_config[t];
237 				/*
238 				 * In pseudo-locking setup mode and just
239 				 * parsed a valid CBM that should be
240 				 * pseudo-locked. Only one locked region per
241 				 * resource group and domain so just do
242 				 * the required initialization for single
243 				 * region and return.
244 				 */
245 				rdtgrp->plr->s = s;
246 				rdtgrp->plr->d = d;
247 				rdtgrp->plr->cbm = cfg->new_ctrl;
248 				d->plr = rdtgrp->plr;
249 				return 0;
250 			}
251 			goto next;
252 		}
253 	}
254 	return -EINVAL;
255 }
256 
257 static u32 get_config_index(u32 closid, enum resctrl_conf_type type)
258 {
259 	switch (type) {
260 	default:
261 	case CDP_NONE:
262 		return closid;
263 	case CDP_CODE:
264 		return closid * 2 + 1;
265 	case CDP_DATA:
266 		return closid * 2;
267 	}
268 }
269 
270 static bool apply_config(struct rdt_hw_domain *hw_dom,
271 			 struct resctrl_staged_config *cfg, u32 idx,
272 			 cpumask_var_t cpu_mask)
273 {
274 	struct rdt_domain *dom = &hw_dom->d_resctrl;
275 
276 	if (cfg->new_ctrl != hw_dom->ctrl_val[idx]) {
277 		cpumask_set_cpu(cpumask_any(&dom->cpu_mask), cpu_mask);
278 		hw_dom->ctrl_val[idx] = cfg->new_ctrl;
279 
280 		return true;
281 	}
282 
283 	return false;
284 }
285 
286 int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_domain *d,
287 			    u32 closid, enum resctrl_conf_type t, u32 cfg_val)
288 {
289 	struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
290 	struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
291 	u32 idx = get_config_index(closid, t);
292 	struct msr_param msr_param;
293 
294 	if (!cpumask_test_cpu(smp_processor_id(), &d->cpu_mask))
295 		return -EINVAL;
296 
297 	hw_dom->ctrl_val[idx] = cfg_val;
298 
299 	msr_param.res = r;
300 	msr_param.low = idx;
301 	msr_param.high = idx + 1;
302 	hw_res->msr_update(d, &msr_param, r);
303 
304 	return 0;
305 }
306 
307 int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid)
308 {
309 	struct resctrl_staged_config *cfg;
310 	struct rdt_hw_domain *hw_dom;
311 	struct msr_param msr_param;
312 	enum resctrl_conf_type t;
313 	cpumask_var_t cpu_mask;
314 	struct rdt_domain *d;
315 	u32 idx;
316 
317 	if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
318 		return -ENOMEM;
319 
320 	msr_param.res = NULL;
321 	list_for_each_entry(d, &r->domains, list) {
322 		hw_dom = resctrl_to_arch_dom(d);
323 		for (t = 0; t < CDP_NUM_TYPES; t++) {
324 			cfg = &hw_dom->d_resctrl.staged_config[t];
325 			if (!cfg->have_new_ctrl)
326 				continue;
327 
328 			idx = get_config_index(closid, t);
329 			if (!apply_config(hw_dom, cfg, idx, cpu_mask))
330 				continue;
331 
332 			if (!msr_param.res) {
333 				msr_param.low = idx;
334 				msr_param.high = msr_param.low + 1;
335 				msr_param.res = r;
336 			} else {
337 				msr_param.low = min(msr_param.low, idx);
338 				msr_param.high = max(msr_param.high, idx + 1);
339 			}
340 		}
341 	}
342 
343 	if (cpumask_empty(cpu_mask))
344 		goto done;
345 
346 	/* Update resource control msr on all the CPUs. */
347 	on_each_cpu_mask(cpu_mask, rdt_ctrl_update, &msr_param, 1);
348 
349 done:
350 	free_cpumask_var(cpu_mask);
351 
352 	return 0;
353 }
354 
355 static int rdtgroup_parse_resource(char *resname, char *tok,
356 				   struct rdtgroup *rdtgrp)
357 {
358 	struct resctrl_schema *s;
359 
360 	list_for_each_entry(s, &resctrl_schema_all, list) {
361 		if (!strcmp(resname, s->name) && rdtgrp->closid < s->num_closid)
362 			return parse_line(tok, s, rdtgrp);
363 	}
364 	rdt_last_cmd_printf("Unknown or unsupported resource name '%s'\n", resname);
365 	return -EINVAL;
366 }
367 
368 ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
369 				char *buf, size_t nbytes, loff_t off)
370 {
371 	struct resctrl_schema *s;
372 	struct rdtgroup *rdtgrp;
373 	struct rdt_resource *r;
374 	char *tok, *resname;
375 	int ret = 0;
376 
377 	/* Valid input requires a trailing newline */
378 	if (nbytes == 0 || buf[nbytes - 1] != '\n')
379 		return -EINVAL;
380 	buf[nbytes - 1] = '\0';
381 
382 	cpus_read_lock();
383 	rdtgrp = rdtgroup_kn_lock_live(of->kn);
384 	if (!rdtgrp) {
385 		rdtgroup_kn_unlock(of->kn);
386 		cpus_read_unlock();
387 		return -ENOENT;
388 	}
389 	rdt_last_cmd_clear();
390 
391 	/*
392 	 * No changes to pseudo-locked region allowed. It has to be removed
393 	 * and re-created instead.
394 	 */
395 	if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
396 		ret = -EINVAL;
397 		rdt_last_cmd_puts("Resource group is pseudo-locked\n");
398 		goto out;
399 	}
400 
401 	rdt_staged_configs_clear();
402 
403 	while ((tok = strsep(&buf, "\n")) != NULL) {
404 		resname = strim(strsep(&tok, ":"));
405 		if (!tok) {
406 			rdt_last_cmd_puts("Missing ':'\n");
407 			ret = -EINVAL;
408 			goto out;
409 		}
410 		if (tok[0] == '\0') {
411 			rdt_last_cmd_printf("Missing '%s' value\n", resname);
412 			ret = -EINVAL;
413 			goto out;
414 		}
415 		ret = rdtgroup_parse_resource(resname, tok, rdtgrp);
416 		if (ret)
417 			goto out;
418 	}
419 
420 	list_for_each_entry(s, &resctrl_schema_all, list) {
421 		r = s->res;
422 
423 		/*
424 		 * Writes to mba_sc resources update the software controller,
425 		 * not the control MSR.
426 		 */
427 		if (is_mba_sc(r))
428 			continue;
429 
430 		ret = resctrl_arch_update_domains(r, rdtgrp->closid);
431 		if (ret)
432 			goto out;
433 	}
434 
435 	if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
436 		/*
437 		 * If pseudo-locking fails we keep the resource group in
438 		 * mode RDT_MODE_PSEUDO_LOCKSETUP with its class of service
439 		 * active and updated for just the domain the pseudo-locked
440 		 * region was requested for.
441 		 */
442 		ret = rdtgroup_pseudo_lock_create(rdtgrp);
443 	}
444 
445 out:
446 	rdt_staged_configs_clear();
447 	rdtgroup_kn_unlock(of->kn);
448 	cpus_read_unlock();
449 	return ret ?: nbytes;
450 }
451 
452 u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d,
453 			    u32 closid, enum resctrl_conf_type type)
454 {
455 	struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
456 	u32 idx = get_config_index(closid, type);
457 
458 	return hw_dom->ctrl_val[idx];
459 }
460 
461 static void show_doms(struct seq_file *s, struct resctrl_schema *schema, int closid)
462 {
463 	struct rdt_resource *r = schema->res;
464 	struct rdt_domain *dom;
465 	bool sep = false;
466 	u32 ctrl_val;
467 
468 	seq_printf(s, "%*s:", max_name_width, schema->name);
469 	list_for_each_entry(dom, &r->domains, list) {
470 		if (sep)
471 			seq_puts(s, ";");
472 
473 		if (is_mba_sc(r))
474 			ctrl_val = dom->mbps_val[closid];
475 		else
476 			ctrl_val = resctrl_arch_get_config(r, dom, closid,
477 							   schema->conf_type);
478 
479 		seq_printf(s, r->format_str, dom->id, max_data_width,
480 			   ctrl_val);
481 		sep = true;
482 	}
483 	seq_puts(s, "\n");
484 }
485 
486 int rdtgroup_schemata_show(struct kernfs_open_file *of,
487 			   struct seq_file *s, void *v)
488 {
489 	struct resctrl_schema *schema;
490 	struct rdtgroup *rdtgrp;
491 	int ret = 0;
492 	u32 closid;
493 
494 	rdtgrp = rdtgroup_kn_lock_live(of->kn);
495 	if (rdtgrp) {
496 		if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
497 			list_for_each_entry(schema, &resctrl_schema_all, list) {
498 				seq_printf(s, "%s:uninitialized\n", schema->name);
499 			}
500 		} else if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
501 			if (!rdtgrp->plr->d) {
502 				rdt_last_cmd_clear();
503 				rdt_last_cmd_puts("Cache domain offline\n");
504 				ret = -ENODEV;
505 			} else {
506 				seq_printf(s, "%s:%d=%x\n",
507 					   rdtgrp->plr->s->res->name,
508 					   rdtgrp->plr->d->id,
509 					   rdtgrp->plr->cbm);
510 			}
511 		} else {
512 			closid = rdtgrp->closid;
513 			list_for_each_entry(schema, &resctrl_schema_all, list) {
514 				if (closid < schema->num_closid)
515 					show_doms(s, schema, closid);
516 			}
517 		}
518 	} else {
519 		ret = -ENOENT;
520 	}
521 	rdtgroup_kn_unlock(of->kn);
522 	return ret;
523 }
524 
525 void mon_event_read(struct rmid_read *rr, struct rdt_resource *r,
526 		    struct rdt_domain *d, struct rdtgroup *rdtgrp,
527 		    int evtid, int first)
528 {
529 	/*
530 	 * setup the parameters to send to the IPI to read the data.
531 	 */
532 	rr->rgrp = rdtgrp;
533 	rr->evtid = evtid;
534 	rr->r = r;
535 	rr->d = d;
536 	rr->val = 0;
537 	rr->first = first;
538 
539 	smp_call_function_any(&d->cpu_mask, mon_event_count, rr, 1);
540 }
541 
542 int rdtgroup_mondata_show(struct seq_file *m, void *arg)
543 {
544 	struct kernfs_open_file *of = m->private;
545 	u32 resid, evtid, domid;
546 	struct rdtgroup *rdtgrp;
547 	struct rdt_resource *r;
548 	union mon_data_bits md;
549 	struct rdt_domain *d;
550 	struct rmid_read rr;
551 	int ret = 0;
552 
553 	rdtgrp = rdtgroup_kn_lock_live(of->kn);
554 	if (!rdtgrp) {
555 		ret = -ENOENT;
556 		goto out;
557 	}
558 
559 	md.priv = of->kn->priv;
560 	resid = md.u.rid;
561 	domid = md.u.domid;
562 	evtid = md.u.evtid;
563 
564 	r = &rdt_resources_all[resid].r_resctrl;
565 	d = rdt_find_domain(r, domid, NULL);
566 	if (IS_ERR_OR_NULL(d)) {
567 		ret = -ENOENT;
568 		goto out;
569 	}
570 
571 	mon_event_read(&rr, r, d, rdtgrp, evtid, false);
572 
573 	if (rr.err == -EIO)
574 		seq_puts(m, "Error\n");
575 	else if (rr.err == -EINVAL)
576 		seq_puts(m, "Unavailable\n");
577 	else
578 		seq_printf(m, "%llu\n", rr.val);
579 
580 out:
581 	rdtgroup_kn_unlock(of->kn);
582 	return ret;
583 }
584