Lines Matching +full:0 +full:- +full:mon
1 // SPDX-License-Identifier: GPL-2.0-only
4 * - Monitoring code
32 * struct rmid_entry - dirty tracking for all RMID.
36 * @list: Member of the rmid_free_lru list when busy == 0.
51 * @rmid_free_lru - A least recently used list of free RMIDs
65 * @rmid_limbo_count - count of currently unused but (potentially)
74 * @rmid_entry - The entry in the limbo and free lists.
80 * RMID available for re-allocation.
110 WARN_ON_ONCE(entry->closid != closid); in __rmid_entry()
111 WARN_ON_ONCE(entry->rmid != rmid); in __rmid_entry()
120 rmid_limbo_count--; in limbo_release_entry()
121 list_add_tail(&entry->list, &rmid_free_lru); in limbo_release_entry()
124 closid_num_dirty_rmid[entry->closid]--; in limbo_release_entry()
141 u64 val = 0; in __check_limbo()
151 * Skip RMID 0 and start from RMID 1 and check all the RMIDs that in __check_limbo()
154 * RMID and move it to the free list when the counter reaches 0. in __check_limbo()
157 idx = find_next_bit(d->rmid_busy_llc, idx_limit, cur_idx); in __check_limbo()
162 if (resctrl_arch_rmid_read(r, d, entry->closid, entry->rmid, in __check_limbo()
177 trace_mon_llc_occupancy_limbo(entry->closid, entry->rmid, d->hdr.id, val); in __check_limbo()
181 clear_bit(idx, d->rmid_busy_llc); in __check_limbo()
182 if (!--entry->busy) in __check_limbo()
195 return find_first_bit(d->rmid_busy_llc, idx_limit) != idx_limit; in has_busy_rmid()
204 return rmid_limbo_count ? ERR_PTR(-EBUSY) : ERR_PTR(-ENOSPC); in resctrl_find_free_rmid()
214 itr_idx = resctrl_arch_rmid_idx_encode(itr->closid, itr->rmid); in resctrl_find_free_rmid()
215 cmp_idx = resctrl_arch_rmid_idx_encode(closid, itr->rmid); in resctrl_find_free_rmid()
221 return ERR_PTR(-ENOSPC); in resctrl_find_free_rmid()
225 * resctrl_find_cleanest_closid() - Find a CLOSID where all the associated
229 * MPAM's equivalent of RMID are per-CLOSID, meaning a freshly allocated CLOSID
238 u32 cleanest_closid = ~0; in resctrl_find_cleanest_closid()
239 int i = 0; in resctrl_find_cleanest_closid()
244 return -EIO; in resctrl_find_cleanest_closid()
246 for (i = 0; i < closids_supported(); i++) { in resctrl_find_cleanest_closid()
253 if (num_dirty == 0) in resctrl_find_cleanest_closid()
256 if (cleanest_closid == ~0) in resctrl_find_cleanest_closid()
263 if (cleanest_closid == ~0) in resctrl_find_cleanest_closid()
264 return -ENOSPC; in resctrl_find_cleanest_closid()
285 list_del(&entry->list); in alloc_rmid()
286 return entry->rmid; in alloc_rmid()
297 /* Walking r->domains, ensure it can't race with cpuhp */ in add_rmid_to_limbo()
300 idx = resctrl_arch_rmid_idx_encode(entry->closid, entry->rmid); in add_rmid_to_limbo()
302 entry->busy = 0; in add_rmid_to_limbo()
303 list_for_each_entry(d, &r->mon_domains, hdr.list) { in add_rmid_to_limbo()
311 set_bit(idx, d->rmid_busy_llc); in add_rmid_to_limbo()
312 entry->busy++; in add_rmid_to_limbo()
317 closid_num_dirty_rmid[entry->closid]++; in add_rmid_to_limbo()
342 list_add_tail(&entry->list, &rmid_free_lru); in free_rmid()
354 state = d->mbm_states[MBM_STATE_IDX(evtid)]; in get_mbm_state()
360 * mbm_cntr_get() - Return the counter ID for the matching @evtid and @rdtgrp.
363 * Valid counter ID on success, or -ENOENT on failure.
370 if (!r->mon.mbm_cntr_assignable) in mbm_cntr_get()
371 return -ENOENT; in mbm_cntr_get()
374 return -ENOENT; in mbm_cntr_get()
376 for (cntr_id = 0; cntr_id < r->mon.num_mbm_cntrs; cntr_id++) { in mbm_cntr_get()
377 if (d->cntr_cfg[cntr_id].rdtgrp == rdtgrp && in mbm_cntr_get()
378 d->cntr_cfg[cntr_id].evtid == evtid) in mbm_cntr_get()
382 return -ENOENT; in mbm_cntr_get()
386 * mbm_cntr_alloc() - Initialize and return a new counter ID in the domain @d.
390 * Valid counter ID on success, or -ENOSPC on failure.
397 for (cntr_id = 0; cntr_id < r->mon.num_mbm_cntrs; cntr_id++) { in mbm_cntr_alloc()
398 if (!d->cntr_cfg[cntr_id].rdtgrp) { in mbm_cntr_alloc()
399 d->cntr_cfg[cntr_id].rdtgrp = rdtgrp; in mbm_cntr_alloc()
400 d->cntr_cfg[cntr_id].evtid = evtid; in mbm_cntr_alloc()
405 return -ENOSPC; in mbm_cntr_alloc()
409 * mbm_cntr_free() - Clear the counter ID configuration details in the domain @d.
413 memset(&d->cntr_cfg[cntr_id], 0, sizeof(*d->cntr_cfg)); in mbm_cntr_free()
419 u32 closid = rdtgrp->closid; in __mon_event_count()
420 u32 rmid = rdtgrp->mon.rmid; in __mon_event_count()
422 int cntr_id = -ENOENT; in __mon_event_count()
425 u64 tval = 0; in __mon_event_count()
427 if (rr->is_mbm_cntr) { in __mon_event_count()
428 cntr_id = mbm_cntr_get(rr->r, rr->d, rdtgrp, rr->evtid); in __mon_event_count()
429 if (cntr_id < 0) { in __mon_event_count()
430 rr->err = -ENOENT; in __mon_event_count()
431 return -EINVAL; in __mon_event_count()
435 if (rr->first) { in __mon_event_count()
436 if (rr->is_mbm_cntr) in __mon_event_count()
437 resctrl_arch_reset_cntr(rr->r, rr->d, closid, rmid, cntr_id, rr->evtid); in __mon_event_count()
439 resctrl_arch_reset_rmid(rr->r, rr->d, closid, rmid, rr->evtid); in __mon_event_count()
440 m = get_mbm_state(rr->d, closid, rmid, rr->evtid); in __mon_event_count()
442 memset(m, 0, sizeof(struct mbm_state)); in __mon_event_count()
443 return 0; in __mon_event_count()
446 if (rr->d) { in __mon_event_count()
448 if (!cpumask_test_cpu(cpu, &rr->d->hdr.cpu_mask)) in __mon_event_count()
449 return -EINVAL; in __mon_event_count()
450 if (rr->is_mbm_cntr) in __mon_event_count()
451 rr->err = resctrl_arch_cntr_read(rr->r, rr->d, closid, rmid, cntr_id, in __mon_event_count()
452 rr->evtid, &tval); in __mon_event_count()
454 rr->err = resctrl_arch_rmid_read(rr->r, rr->d, closid, rmid, in __mon_event_count()
455 rr->evtid, &tval, rr->arch_mon_ctx); in __mon_event_count()
456 if (rr->err) in __mon_event_count()
457 return rr->err; in __mon_event_count()
459 rr->val += tval; in __mon_event_count()
461 return 0; in __mon_event_count()
465 if (!cpumask_test_cpu(cpu, &rr->ci->shared_cpu_map)) in __mon_event_count()
466 return -EINVAL; in __mon_event_count()
471 * Report success if a read from any domain succeeds, -EINVAL in __mon_event_count()
475 ret = -EINVAL; in __mon_event_count()
476 list_for_each_entry(d, &rr->r->mon_domains, hdr.list) { in __mon_event_count()
477 if (d->ci_id != rr->ci->id) in __mon_event_count()
479 if (rr->is_mbm_cntr) in __mon_event_count()
480 err = resctrl_arch_cntr_read(rr->r, d, closid, rmid, cntr_id, in __mon_event_count()
481 rr->evtid, &tval); in __mon_event_count()
483 err = resctrl_arch_rmid_read(rr->r, d, closid, rmid, in __mon_event_count()
484 rr->evtid, &tval, rr->arch_mon_ctx); in __mon_event_count()
486 rr->val += tval; in __mon_event_count()
487 ret = 0; in __mon_event_count()
492 rr->err = ret; in __mon_event_count()
498 * mbm_bw_count() - Update bw count from values previously read by
512 u32 closid = rdtgrp->closid; in mbm_bw_count()
513 u32 rmid = rdtgrp->mon.rmid; in mbm_bw_count()
516 m = get_mbm_state(rr->d, closid, rmid, rr->evtid); in mbm_bw_count()
520 cur_bytes = rr->val; in mbm_bw_count()
521 bytes = cur_bytes - m->prev_bw_bytes; in mbm_bw_count()
522 m->prev_bw_bytes = cur_bytes; in mbm_bw_count()
526 m->prev_bw = cur_bw; in mbm_bw_count()
540 rdtgrp = rr->rgrp; in mon_event_count()
549 head = &rdtgrp->mon.crdtgrp_list; in mon_event_count()
551 if (rdtgrp->type == RDTCTRL_GROUP) { in mon_event_count()
552 list_for_each_entry(entry, head, mon.crdtgrp_list) { in mon_event_count()
553 if (__mon_event_count(entry, rr) == 0) in mon_event_count()
554 ret = 0; in mon_event_count()
560 * report -EINVAL/Unavailable if the monitor hasn't seen any traffic. in mon_event_count()
563 if (ret == 0) in mon_event_count()
564 rr->err = 0; in mon_event_count()
574 list_for_each_entry(d, &r->ctrl_domains, hdr.list) { in get_ctrl_domain_from_cpu()
576 if (cpumask_test_cpu(cpu, &d->hdr.cpu_mask)) in get_ctrl_domain_from_cpu()
601 * the L2 <-> L3 traffic.
607 * Consider an rdtgroup which had high L3 <-> memory traffic in initial
608 * phases -> mba_sc kicks in and reduced bandwidth percentage values -> but
609 * after some time rdtgroup has mostly L2 <-> L3 traffic.
611 * In this case we may restrict the rdtgroup's L2 <-> L3 traffic as its
627 evt_id = rgrp->mba_mbps_event; in update_mba_bw()
629 closid = rgrp->closid; in update_mba_bw()
630 rmid = rgrp->mon.rmid; in update_mba_bw()
641 cur_bw = pmbm_data->prev_bw; in update_mba_bw()
642 user_bw = dom_mba->mbps_val[closid]; in update_mba_bw()
650 head = &rgrp->mon.crdtgrp_list; in update_mba_bw()
651 list_for_each_entry(entry, head, mon.crdtgrp_list) { in update_mba_bw()
652 cmbm_data = get_mbm_state(dom_mbm, entry->closid, entry->mon.rmid, evt_id); in update_mba_bw()
655 cur_bw += cmbm_data->prev_bw; in update_mba_bw()
671 if (cur_msr_val > r_mba->membw.min_bw && user_bw < cur_bw) { in update_mba_bw()
672 new_msr_val = cur_msr_val - r_mba->membw.bw_gran; in update_mba_bw()
674 (user_bw > (cur_bw * (cur_msr_val + r_mba->membw.min_bw) / cur_msr_val))) { in update_mba_bw()
675 new_msr_val = cur_msr_val + r_mba->membw.bw_gran; in update_mba_bw()
686 struct rmid_read rr = {0}; in mbm_update_one_event()
746 d->cqm_work_cpu = cpumask_any_housekeeping(&d->hdr.cpu_mask, in cqm_handle_limbo()
748 schedule_delayed_work_on(d->cqm_work_cpu, &d->cqm_limbo, in cqm_handle_limbo()
757 * cqm_setup_limbo_handler() - Schedule the limbo handler to run for this
770 cpu = cpumask_any_housekeeping(&dom->hdr.cpu_mask, exclude_cpu); in cqm_setup_limbo_handler()
771 dom->cqm_work_cpu = cpu; in cqm_setup_limbo_handler()
774 schedule_delayed_work_on(cpu, &dom->cqm_limbo, delay); in cqm_setup_limbo_handler()
801 head = &prgrp->mon.crdtgrp_list; in mbm_handle_overflow()
802 list_for_each_entry(crgrp, head, mon.crdtgrp_list) in mbm_handle_overflow()
810 * Re-check for housekeeping CPUs. This allows the overflow handler to in mbm_handle_overflow()
813 d->mbm_work_cpu = cpumask_any_housekeeping(&d->hdr.cpu_mask, in mbm_handle_overflow()
815 schedule_delayed_work_on(d->mbm_work_cpu, &d->mbm_over, delay); in mbm_handle_overflow()
823 * mbm_setup_overflow_handler() - Schedule the overflow handler to run for this
842 cpu = cpumask_any_housekeeping(&dom->hdr.cpu_mask, exclude_cpu); in mbm_setup_overflow_handler()
843 dom->mbm_work_cpu = cpu; in mbm_setup_overflow_handler()
846 schedule_delayed_work_on(cpu, &dom->mbm_over, delay); in mbm_setup_overflow_handler()
854 int err = 0, i; in dom_data_init()
869 err = -ENOMEM; in dom_data_init()
882 err = -ENOMEM; in dom_data_init()
886 for (i = 0; i < idx_limit; i++) { in dom_data_init()
888 INIT_LIST_HEAD(&entry->list); in dom_data_init()
890 resctrl_arch_rmid_idx_decode(i, &entry->closid, &entry->rmid); in dom_data_init()
891 list_add_tail(&entry->list, &rmid_free_lru); in dom_data_init()
902 list_del(&entry->list); in dom_data_init()
914 if (!r->mon_capable) in dom_data_exit()
976 * struct mbm_transaction - Memory transaction an MBM event can be configured with.
999 struct mon_evt *mevt = rdt_kn_parent_priv(of->kn); in event_filter_show()
1002 int ret = 0, i; in event_filter_show()
1007 r = resctrl_arch_get_resource(mevt->rid); in event_filter_show()
1010 ret = -EINVAL; in event_filter_show()
1014 for (i = 0; i < NUM_MBM_TRANSACTIONS; i++) { in event_filter_show()
1015 if (mevt->evt_cfg & mbm_transactions[i].val) { in event_filter_show()
1033 struct rdt_resource *r = rdt_kn_parent_priv(of->kn); in resctrl_mbm_assign_on_mkdir_show()
1034 int ret = 0; in resctrl_mbm_assign_on_mkdir_show()
1041 ret = -EINVAL; in resctrl_mbm_assign_on_mkdir_show()
1045 seq_printf(s, "%u\n", r->mon.mbm_assign_on_mkdir); in resctrl_mbm_assign_on_mkdir_show()
1056 struct rdt_resource *r = rdt_kn_parent_priv(of->kn); in resctrl_mbm_assign_on_mkdir_write()
1069 ret = -EINVAL; in resctrl_mbm_assign_on_mkdir_write()
1073 r->mon.mbm_assign_on_mkdir = value; in resctrl_mbm_assign_on_mkdir_write()
1082 * mbm_cntr_free_all() - Clear all the counter ID configuration details in the
1087 memset(d->cntr_cfg, 0, sizeof(*d->cntr_cfg) * r->mon.num_mbm_cntrs); in mbm_cntr_free_all()
1091 * resctrl_reset_rmid_all() - Reset all non-architecture states for all the
1104 memset(d->mbm_states[idx], 0, sizeof(*d->mbm_states[0]) * idx_limit); in resctrl_reset_rmid_all()
1109 * rdtgroup_assign_cntr() - Assign/unassign the counter ID for the event, RMID
1113 * associated non-architectural state.
1125 memset(m, 0, sizeof(*m)); in rdtgroup_assign_cntr()
1129 * rdtgroup_alloc_assign_cntr() - Allocate a counter ID and assign it to the event
1133 * 0 on success, < 0 on failure.
1141 cntr_id = mbm_cntr_get(r, d, rdtgrp, mevt->evtid); in rdtgroup_alloc_assign_cntr()
1142 if (cntr_id >= 0) in rdtgroup_alloc_assign_cntr()
1143 return 0; in rdtgroup_alloc_assign_cntr()
1145 cntr_id = mbm_cntr_alloc(r, d, rdtgrp, mevt->evtid); in rdtgroup_alloc_assign_cntr()
1146 if (cntr_id < 0) { in rdtgroup_alloc_assign_cntr()
1148 mevt->name, d->hdr.id); in rdtgroup_alloc_assign_cntr()
1152 rdtgroup_assign_cntr(r, d, mevt->evtid, rdtgrp->mon.rmid, rdtgrp->closid, cntr_id, true); in rdtgroup_alloc_assign_cntr()
1154 return 0; in rdtgroup_alloc_assign_cntr()
1158 * rdtgroup_assign_cntr_event() - Assign a hardware counter for the event in
1168 * 0 on success, < 0 on failure.
1173 struct rdt_resource *r = resctrl_arch_get_resource(mevt->rid); in rdtgroup_assign_cntr_event()
1174 int ret = 0; in rdtgroup_assign_cntr_event()
1177 list_for_each_entry(d, &r->mon_domains, hdr.list) { in rdtgroup_assign_cntr_event()
1190 * rdtgroup_assign_cntrs() - Assign counters to MBM events. Called when
1203 if (!r->mon_capable || !resctrl_arch_mbm_cntr_assign_enabled(r) || in rdtgroup_assign_cntrs()
1204 !r->mon.mbm_assign_on_mkdir) in rdtgroup_assign_cntrs()
1217 * rdtgroup_free_unassign_cntr() - Unassign and reset the counter ID configuration
1225 cntr_id = mbm_cntr_get(r, d, rdtgrp, mevt->evtid); in rdtgroup_free_unassign_cntr()
1228 if (cntr_id < 0) in rdtgroup_free_unassign_cntr()
1231 rdtgroup_assign_cntr(r, d, mevt->evtid, rdtgrp->mon.rmid, rdtgrp->closid, cntr_id, false); in rdtgroup_free_unassign_cntr()
1237 * rdtgroup_unassign_cntr_event() - Unassign a hardware counter associated with
1244 struct rdt_resource *r = resctrl_arch_get_resource(mevt->rid); in rdtgroup_unassign_cntr_event()
1247 list_for_each_entry(d, &r->mon_domains, hdr.list) in rdtgroup_unassign_cntr_event()
1255 * rdtgroup_unassign_cntrs() - Unassign the counters associated with MBM events.
1262 if (!r->mon_capable || !resctrl_arch_mbm_cntr_assign_enabled(r)) in rdtgroup_unassign_cntrs()
1276 u32 temp_val = 0; in resctrl_parse_mem_transactions()
1282 if (!tok || tok[0] == '\0') { in resctrl_parse_mem_transactions()
1284 return 0; in resctrl_parse_mem_transactions()
1290 for (i = 0; i < NUM_MBM_TRANSACTIONS; i++) { in resctrl_parse_mem_transactions()
1300 return -EINVAL; in resctrl_parse_mem_transactions()
1307 * rdtgroup_update_cntr_event - Update the counter assignments for the event
1319 list_for_each_entry(d, &r->mon_domains, hdr.list) { in rdtgroup_update_cntr_event()
1321 if (cntr_id >= 0) in rdtgroup_update_cntr_event()
1322 rdtgroup_assign_cntr(r, d, evtid, rdtgrp->mon.rmid, in rdtgroup_update_cntr_event()
1323 rdtgrp->closid, cntr_id, true); in rdtgroup_update_cntr_event()
1328 * resctrl_update_cntr_allrdtgrp - Update the counter assignments for the event
1334 struct rdt_resource *r = resctrl_arch_get_resource(mevt->rid); in resctrl_update_cntr_allrdtgrp()
1342 rdtgroup_update_cntr_event(r, prgrp, mevt->evtid); in resctrl_update_cntr_allrdtgrp()
1344 list_for_each_entry(crgrp, &prgrp->mon.crdtgrp_list, mon.crdtgrp_list) in resctrl_update_cntr_allrdtgrp()
1345 rdtgroup_update_cntr_event(r, crgrp, mevt->evtid); in resctrl_update_cntr_allrdtgrp()
1352 struct mon_evt *mevt = rdt_kn_parent_priv(of->kn); in event_filter_write()
1354 u32 evt_cfg = 0; in event_filter_write()
1355 int ret = 0; in event_filter_write()
1358 if (nbytes == 0 || buf[nbytes - 1] != '\n') in event_filter_write()
1359 return -EINVAL; in event_filter_write()
1361 buf[nbytes - 1] = '\0'; in event_filter_write()
1368 r = resctrl_arch_get_resource(mevt->rid); in event_filter_write()
1371 ret = -EINVAL; in event_filter_write()
1376 if (!ret && mevt->evt_cfg != evt_cfg) { in event_filter_write()
1377 mevt->evt_cfg = evt_cfg; in event_filter_write()
1391 struct rdt_resource *r = rdt_kn_parent_priv(of->kn); in resctrl_mbm_assign_mode_show()
1397 if (r->mon.mbm_cntr_assignable) { in resctrl_mbm_assign_mode_show()
1415 return 0; in resctrl_mbm_assign_mode_show()
1421 struct rdt_resource *r = rdt_kn_parent_priv(of->kn); in resctrl_mbm_assign_mode_write()
1423 int ret = 0; in resctrl_mbm_assign_mode_write()
1427 if (nbytes == 0 || buf[nbytes - 1] != '\n') in resctrl_mbm_assign_mode_write()
1428 return -EINVAL; in resctrl_mbm_assign_mode_write()
1430 buf[nbytes - 1] = '\0'; in resctrl_mbm_assign_mode_write()
1438 enable = 0; in resctrl_mbm_assign_mode_write()
1440 if (r->mon.mbm_cntr_assignable) { in resctrl_mbm_assign_mode_write()
1443 ret = -EINVAL; in resctrl_mbm_assign_mode_write()
1448 ret = -EINVAL; in resctrl_mbm_assign_mode_write()
1466 mon_event_all[QOS_L3_MBM_TOTAL_EVENT_ID].evt_cfg = r->mon.mbm_cfg_mask; in resctrl_mbm_assign_mode_write()
1468 mon_event_all[QOS_L3_MBM_LOCAL_EVENT_ID].evt_cfg = r->mon.mbm_cfg_mask & in resctrl_mbm_assign_mode_write()
1474 r->mon.mbm_assign_on_mkdir = true; in resctrl_mbm_assign_mode_write()
1476 * Reset all the non-achitectural RMID state and assignable counters. in resctrl_mbm_assign_mode_write()
1478 list_for_each_entry(d, &r->mon_domains, hdr.list) { in resctrl_mbm_assign_mode_write()
1494 struct rdt_resource *r = rdt_kn_parent_priv(of->kn); in resctrl_num_mbm_cntrs_show()
1501 list_for_each_entry(dom, &r->mon_domains, hdr.list) { in resctrl_num_mbm_cntrs_show()
1505 seq_printf(s, "%d=%d", dom->hdr.id, r->mon.num_mbm_cntrs); in resctrl_num_mbm_cntrs_show()
1512 return 0; in resctrl_num_mbm_cntrs_show()
1518 struct rdt_resource *r = rdt_kn_parent_priv(of->kn); in resctrl_available_mbm_cntrs_show()
1522 int ret = 0; in resctrl_available_mbm_cntrs_show()
1531 ret = -EINVAL; in resctrl_available_mbm_cntrs_show()
1535 list_for_each_entry(dom, &r->mon_domains, hdr.list) { in resctrl_available_mbm_cntrs_show()
1539 cntrs = 0; in resctrl_available_mbm_cntrs_show()
1540 for (i = 0; i < r->mon.num_mbm_cntrs; i++) { in resctrl_available_mbm_cntrs_show()
1541 if (!dom->cntr_cfg[i].rdtgrp) in resctrl_available_mbm_cntrs_show()
1545 seq_printf(s, "%d=%u", dom->hdr.id, cntrs); in resctrl_available_mbm_cntrs_show()
1563 int ret = 0; in mbm_L3_assignments_show()
1566 rdtgrp = rdtgroup_kn_lock_live(of->kn); in mbm_L3_assignments_show()
1568 ret = -ENOENT; in mbm_L3_assignments_show()
1575 ret = -EINVAL; in mbm_L3_assignments_show()
1580 if (mevt->rid != r->rid || !mevt->enabled || !resctrl_is_mbm_event(mevt->evtid)) in mbm_L3_assignments_show()
1584 seq_printf(s, "%s:", mevt->name); in mbm_L3_assignments_show()
1585 list_for_each_entry(d, &r->mon_domains, hdr.list) { in mbm_L3_assignments_show()
1589 if (mbm_cntr_get(r, d, rdtgrp, mevt->evtid) < 0) in mbm_L3_assignments_show()
1590 seq_printf(s, "%d=_", d->hdr.id); in mbm_L3_assignments_show()
1592 seq_printf(s, "%d=e", d->hdr.id); in mbm_L3_assignments_show()
1600 rdtgroup_kn_unlock(of->kn); in mbm_L3_assignments_show()
1606 * mbm_get_mon_event_by_name() - Return the mon_evt entry for the matching
1614 if (mevt->rid == r->rid && mevt->enabled && in mbm_get_mon_event_by_name()
1615 resctrl_is_mbm_event(mevt->evtid) && in mbm_get_mon_event_by_name()
1616 !strcmp(mevt->name, name)) in mbm_get_mon_event_by_name()
1626 int ret = 0; in rdtgroup_modify_assign_state()
1629 return -EINVAL; in rdtgroup_modify_assign_state()
1639 ret = -EINVAL; in rdtgroup_modify_assign_state()
1650 unsigned long dom_id = 0; in resctrl_parse_mbm_assignment()
1658 return -ENOENT; in resctrl_parse_mbm_assignment()
1662 if (!tok || tok[0] == '\0') in resctrl_parse_mbm_assignment()
1663 return 0; in resctrl_parse_mbm_assignment()
1679 return -EINVAL; in resctrl_parse_mbm_assignment()
1683 list_for_each_entry(d, &r->mon_domains, hdr.list) { in resctrl_parse_mbm_assignment()
1684 if (d->hdr.id == dom_id) { in resctrl_parse_mbm_assignment()
1696 return -EINVAL; in resctrl_parse_mbm_assignment()
1705 int ret = 0; in mbm_L3_assignments_write()
1708 if (nbytes == 0 || buf[nbytes - 1] != '\n') in mbm_L3_assignments_write()
1709 return -EINVAL; in mbm_L3_assignments_write()
1711 buf[nbytes - 1] = '\0'; in mbm_L3_assignments_write()
1713 rdtgrp = rdtgroup_kn_lock_live(of->kn); in mbm_L3_assignments_write()
1715 rdtgroup_kn_unlock(of->kn); in mbm_L3_assignments_write()
1716 return -ENOENT; in mbm_L3_assignments_write()
1722 rdtgroup_kn_unlock(of->kn); in mbm_L3_assignments_write()
1723 return -EINVAL; in mbm_L3_assignments_write()
1739 rdtgroup_kn_unlock(of->kn); in mbm_L3_assignments_write()
1745 * resctrl_mon_resource_init() - Initialise global monitoring structures.
1754 * Returns 0 for success, or -ENOMEM.
1761 if (!r->mon_capable) in resctrl_mon_resource_init()
1762 return 0; in resctrl_mon_resource_init()
1784 if (r->mon.mbm_cntr_assignable) { in resctrl_mon_resource_init()
1786 mon_event_all[QOS_L3_MBM_TOTAL_EVENT_ID].evt_cfg = r->mon.mbm_cfg_mask; in resctrl_mon_resource_init()
1788 mon_event_all[QOS_L3_MBM_LOCAL_EVENT_ID].evt_cfg = r->mon.mbm_cfg_mask & in resctrl_mon_resource_init()
1792 r->mon.mbm_assign_on_mkdir = true; in resctrl_mon_resource_init()
1803 return 0; in resctrl_mon_resource_init()