Lines Matching +full:0 +full:- +full:mon
1 // SPDX-License-Identifier: GPL-2.0-only
4 * - Monitoring code
45 * 1. The threshold 0 is changed to rmid count - 1 so don't do correction
48 * equal to (x86_cache_max_rmid + 1) / 8 - 1 and is from 0 up to 27.
101 * When Sub-NUMA Cluster (SNC) mode is not enabled (as indicated by
106 * via MSR 0xCA0 (see the "RMID Sharing Mode" section in the "Intel
133 return lrmid + (cpu_to_node(cpu) % snc_nodes_per_l3_cache) * r->mon.num_rmid; in logical_rmid_to_physical_rmid()
141 * As per the SDM, when IA32_QM_EVTSEL.EvtID (bits 7:0) is configured in __rmid_read_phys()
144 * IA32_QM_CTR.data (bits 61:0) reports the monitored data. in __rmid_read_phys()
152 return -EIO; in __rmid_read_phys()
154 return -EINVAL; in __rmid_read_phys()
157 return 0; in __rmid_read_phys()
169 state = hw_dom->arch_mbm_states[MBM_STATE_IDX(eventid)]; in get_arch_mbm_state()
179 int cpu = cpumask_any(&d->hdr.cpu_mask); in resctrl_arch_reset_rmid()
185 memset(am, 0, sizeof(*am)); in resctrl_arch_reset_rmid()
188 /* Record any initial, non-zero count value. */ in resctrl_arch_reset_rmid()
189 __rmid_read_phys(prmid, eventid, &am->prev_msr); in resctrl_arch_reset_rmid()
195 * no need to record initial non-zero counts.
207 memset(hw_dom->arch_mbm_states[idx], 0, in resctrl_arch_reset_rmid_all()
208 sizeof(*hw_dom->arch_mbm_states[0]) * r->mon.num_rmid); in resctrl_arch_reset_rmid_all()
214 u64 shift = 64 - width, chunks; in mbm_overflow_count()
216 chunks = (cur_msr << shift) - (prev_msr << shift); in mbm_overflow_count()
230 am->chunks += mbm_overflow_count(am->prev_msr, msr_val, in get_corrected_val()
231 hw_res->mbm_width); in get_corrected_val()
232 chunks = get_corrected_mbm_count(rmid, am->chunks); in get_corrected_val()
233 am->prev_msr = msr_val; in get_corrected_val()
238 return chunks * hw_res->mon_scale; in get_corrected_val()
246 int cpu = cpumask_any(&d->hdr.cpu_mask); in resctrl_arch_rmid_read()
259 } else if (ret == -EINVAL) { in resctrl_arch_rmid_read()
262 am->prev_msr = 0; in resctrl_arch_rmid_read()
277 * 63:44 -- Reserved in __cntr_id_read()
281 * 30:8 -- Reserved in __cntr_id_read()
282 * 7:0 EvtID Event Identifier in __cntr_id_read()
297 return -EIO; in __cntr_id_read()
299 return -EINVAL; in __cntr_id_read()
302 return 0; in __cntr_id_read()
314 memset(am, 0, sizeof(*am)); in resctrl_arch_reset_cntr()
316 /* Record any initial, non-zero count value. */ in resctrl_arch_reset_cntr()
317 __cntr_id_read(cntr_id, &am->prev_msr); in resctrl_arch_reset_cntr()
334 return 0; in resctrl_arch_cntr_read()
338 * The power-on reset value of MSR_RMID_SNC_CONFIG is 0x1
343 * Clearing bit 0 reconfigures the RMID counters for use
354 msr_clear_bit(MSR_RMID_SNC_CONFIG, 0); in arch_mon_domain_online()
359 X86_MATCH_VFM(INTEL_ICELAKE_X, 0),
360 X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, 0),
361 X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, 0),
362 X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, 0),
363 X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, 0),
369 * in Sub-NUMA Cluster (SNC) mode. Infer the state by comparing the
379 struct cacheinfo *ci = get_cpu_cacheinfo_level(0, RESCTRL_L3_CACHE); in snc_get_config()
392 node0_cpumask = cpumask_of_node(cpu_to_node(0)); in snc_get_config()
395 cpus_per_l3 = cpumask_weight(&ci->shared_cpu_map); in snc_get_config()
408 pr_info("Sub-NUMA Cluster mode detected with %d nodes per L3 cache\n", ret); in snc_get_config()
430 hw_res->mon_scale = boot_cpu_data.x86_cache_occ_scale / snc_nodes_per_l3_cache; in rdt_get_mon_l3_config()
431 r->mon.num_rmid = (boot_cpu_data.x86_cache_max_rmid + 1) / snc_nodes_per_l3_cache; in rdt_get_mon_l3_config()
432 hw_res->mbm_width = MBM_CNTR_WIDTH_BASE; in rdt_get_mon_l3_config()
434 if (mbm_offset > 0 && mbm_offset <= MBM_CNTR_WIDTH_OFFSET_MAX) in rdt_get_mon_l3_config()
435 hw_res->mbm_width += mbm_offset; in rdt_get_mon_l3_config()
446 threshold = resctrl_rmid_realloc_limit / r->mon.num_rmid; in rdt_get_mon_l3_config()
450 * to the nearest multiple of hw_res->mon_scale so it matches a in rdt_get_mon_l3_config()
457 cpuid_count(0x80000020, 3, &eax, &ebx, &ecx, &edx); in rdt_get_mon_l3_config()
458 r->mon.mbm_cfg_mask = ecx & MAX_EVT_CONFIG_BITS; in rdt_get_mon_l3_config()
471 r->mon.mbm_cntr_assignable = true; in rdt_get_mon_l3_config()
472 cpuid_count(0x80000020, 5, &eax, &ebx, &ecx, &edx); in rdt_get_mon_l3_config()
473 r->mon.num_mbm_cntrs = (ebx & GENMASK(15, 0)) + 1; in rdt_get_mon_l3_config()
474 hw_res->mbm_cntr_assign_enabled = true; in rdt_get_mon_l3_config()
477 r->mon_capable = true; in rdt_get_mon_l3_config()
479 return 0; in rdt_get_mon_l3_config()
486 cf_index = (boot_cpu_data.x86_cache_max_rmid + 1) / 8 - 1; in intel_rdt_mbm_apply_quirk()
516 list_for_each_entry(d, &r->mon_domains, hdr.list) { in _resctrl_abmc_enable()
517 on_each_cpu_mask(&d->hdr.cpu_mask, resctrl_abmc_set_one_amd, in _resctrl_abmc_enable()
527 if (r->mon.mbm_cntr_assignable && in resctrl_arch_mbm_cntr_assign_set()
528 hw_res->mbm_cntr_assign_enabled != enable) { in resctrl_arch_mbm_cntr_assign_set()
530 hw_res->mbm_cntr_assign_enabled = enable; in resctrl_arch_mbm_cntr_assign_set()
533 return 0; in resctrl_arch_mbm_cntr_assign_set()
538 return resctrl_to_arch_res(r)->mbm_cntr_assign_enabled; in resctrl_arch_mbm_cntr_assign_enabled()
545 wrmsrl(MSR_IA32_L3_QOS_ABMC_CFG, abmc_cfg->full); in resctrl_abmc_config_one_amd()
556 union l3_qos_abmc_cfg abmc_cfg = { 0 }; in resctrl_arch_config_cntr()
560 abmc_cfg.split.cntr_en = assign ? 1 : 0; in resctrl_arch_config_cntr()
566 smp_call_function_any(&d->hdr.cpu_mask, resctrl_abmc_config_one_amd, &abmc_cfg, 1); in resctrl_arch_config_cntr()
570 * need to record initial non-zero counts. in resctrl_arch_config_cntr()
574 memset(am, 0, sizeof(*am)); in resctrl_arch_config_cntr()
581 resctrl_abmc_set_one_amd(&hw_res->mbm_cntr_assign_enabled); in resctrl_arch_mbm_cntr_assign_set_one()