1#!/usr/bin/env python3 2# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) 3"""Convert directories of JSON events to C code.""" 4import argparse 5import csv 6from functools import lru_cache 7import json 8import metric 9import os 10import sys 11from typing import (Callable, Dict, Optional, Sequence, Set, Tuple) 12import collections 13 14# Global command line arguments. 15_args = None 16# List of regular event tables. 17_event_tables = [] 18# List of event tables generated from "/sys" directories. 19_sys_event_tables = [] 20# List of regular metric tables. 21_metric_tables = [] 22# List of metric tables generated from "/sys" directories. 23_sys_metric_tables = [] 24# Mapping between sys event table names and sys metric table names. 25_sys_event_table_to_metric_table_mapping = {} 26# Map from an event name to an architecture standard 27# JsonEvent. Architecture standard events are in json files in the top 28# f'{_args.starting_dir}/{_args.arch}' directory. 29_arch_std_events = {} 30# Events to write out when the table is closed 31_pending_events = [] 32# Name of events table to be written out 33_pending_events_tblname = None 34# Metrics to write out when the table is closed 35_pending_metrics = [] 36# Name of metrics table to be written out 37_pending_metrics_tblname = None 38# Global BigCString shared by all structures. 39_bcs = None 40# Map from the name of a metric group to a description of the group. 41_metricgroups = {} 42# Order specific JsonEvent attributes will be visited. 43_json_event_attributes = [ 44 # cmp_sevent related attributes. 45 'name', 'topic', 'desc', 46 # Seems useful, put it early. 47 'event', 48 # Short things in alphabetical order. 49 'compat', 'deprecated', 'perpkg', 'unit', 50 # Retirement latency specific to Intel granite rapids currently. 51 'retirement_latency_mean', 'retirement_latency_min', 52 'retirement_latency_max', 53 # Longer things (the last won't be iterated over during decompress). 54 'long_desc' 55] 56 57# Attributes that are in pmu_metric rather than pmu_event. 58_json_metric_attributes = [ 59 'metric_name', 'metric_group', 'metric_expr', 'metric_threshold', 60 'desc', 'long_desc', 'unit', 'compat', 'metricgroup_no_group', 61 'default_metricgroup_name', 'aggr_mode', 'event_grouping', 62 'default_show_events' 63] 64# Attributes that are bools or enum int values, encoded as '0', '1',... 65_json_enum_attributes = ['aggr_mode', 'deprecated', 'event_grouping', 'perpkg', 66 'default_show_events'] 67 68def removesuffix(s: str, suffix: str) -> str: 69 """Remove the suffix from a string 70 71 The removesuffix function is added to str in Python 3.9. We aim for 3.6 72 compatibility and so provide our own function here. 73 """ 74 return s[0:-len(suffix)] if s.endswith(suffix) else s 75 76 77def file_name_to_table_name(prefix: str, parents: Sequence[str], 78 dirname: str) -> str: 79 """Generate a C table name from directory names.""" 80 tblname = prefix 81 for p in parents: 82 tblname += '_' + p 83 tblname += '_' + dirname 84 return tblname.replace('-', '_') 85 86 87def c_len(s: str) -> int: 88 """Return the length of s a C string 89 90 This doesn't handle all escape characters properly. It first assumes 91 all \\ are for escaping, it then adjusts as it will have over counted 92 \\. The code uses \000 rather than \0 as a terminator as an adjacent 93 number would be folded into a string of \0 (ie. "\0" + "5" doesn't 94 equal a terminator followed by the number 5 but the escape of 95 \05). The code adjusts for \000 but not properly for all octal, hex 96 or unicode values. 97 """ 98 try: 99 utf = s.encode(encoding='utf-8',errors='strict') 100 except: 101 print(f'broken string {s}') 102 raise 103 return len(utf) - utf.count(b'\\') + utf.count(b'\\\\') - (utf.count(b'\\000') * 2) 104 105class BigCString: 106 """A class to hold many strings concatenated together. 107 108 Generating a large number of stand-alone C strings creates a large 109 number of relocations in position independent code. The BigCString 110 is a helper for this case. It builds a single string which within it 111 are all the other C strings (to avoid memory issues the string 112 itself is held as a list of strings). The offsets within the big 113 string are recorded and when stored to disk these don't need 114 relocation. To reduce the size of the string further, identical 115 strings are merged. If a longer string ends-with the same value as a 116 shorter string, these entries are also merged. 117 """ 118 strings: Set[str] 119 big_string: Sequence[str] 120 offsets: Dict[str, int] 121 insert_number: int 122 insert_point: Dict[str, int] 123 metrics: Set[str] 124 125 def __init__(self): 126 self.strings = set() 127 self.insert_number = 0; 128 self.insert_point = {} 129 self.metrics = set() 130 131 def add(self, s: str, metric: bool) -> None: 132 """Called to add to the big string.""" 133 if s not in self.strings: 134 self.strings.add(s) 135 self.insert_point[s] = self.insert_number 136 self.insert_number += 1 137 if metric: 138 self.metrics.add(s) 139 140 def compute(self) -> None: 141 """Called once all strings are added to compute the string and offsets.""" 142 143 folded_strings = {} 144 # Determine if two strings can be folded, ie. let 1 string use the 145 # end of another. First reverse all strings and sort them. 146 sorted_reversed_strings = sorted([x[::-1] for x in self.strings]) 147 148 # Strings 'xyz' and 'yz' will now be [ 'zy', 'zyx' ]. Scan forward 149 # for each string to see if there is a better candidate to fold it 150 # into, in the example rather than using 'yz' we can use'xyz' at 151 # an offset of 1. We record which string can be folded into which 152 # in folded_strings, we don't need to record the offset as it is 153 # trivially computed from the string lengths. 154 for pos,s in enumerate(sorted_reversed_strings): 155 best_pos = pos 156 for check_pos in range(pos + 1, len(sorted_reversed_strings)): 157 if sorted_reversed_strings[check_pos].startswith(s): 158 best_pos = check_pos 159 else: 160 break 161 if pos != best_pos: 162 folded_strings[s[::-1]] = sorted_reversed_strings[best_pos][::-1] 163 164 # Compute reverse mappings for debugging. 165 fold_into_strings = collections.defaultdict(set) 166 for key, val in folded_strings.items(): 167 if key != val: 168 fold_into_strings[val].add(key) 169 170 # big_string_offset is the current location within the C string 171 # being appended to - comments, etc. don't count. big_string is 172 # the string contents represented as a list. Strings are immutable 173 # in Python and so appending to one causes memory issues, while 174 # lists are mutable. 175 big_string_offset = 0 176 self.big_string = [] 177 self.offsets = {} 178 179 def string_cmp_key(s: str) -> Tuple[bool, int, str]: 180 return (s in self.metrics, self.insert_point[s], s) 181 182 # Emit all strings that aren't folded in a sorted manner. 183 for s in sorted(self.strings, key=string_cmp_key): 184 if s not in folded_strings: 185 self.offsets[s] = big_string_offset 186 self.big_string.append(f'/* offset={big_string_offset} */ "') 187 self.big_string.append(s) 188 self.big_string.append('"') 189 if s in fold_into_strings: 190 self.big_string.append(' /* also: ' + ', '.join(fold_into_strings[s]) + ' */') 191 self.big_string.append('\n') 192 big_string_offset += c_len(s) 193 continue 194 195 # Compute the offsets of the folded strings. 196 for s in folded_strings.keys(): 197 assert s not in self.offsets 198 folded_s = folded_strings[s] 199 self.offsets[s] = self.offsets[folded_s] + c_len(folded_s) - c_len(s) 200 201_bcs = BigCString() 202 203class JsonEvent: 204 """Representation of an event loaded from a json file dictionary.""" 205 206 def __init__(self, jd: dict): 207 """Constructor passed the dictionary of parsed json values.""" 208 209 def llx(x: int) -> str: 210 """Convert an int to a string similar to a printf modifier of %#llx.""" 211 return str(x) if x >= 0 and x < 10 else hex(x) 212 213 def fixdesc(s: str) -> str: 214 """Fix formatting issue for the desc string.""" 215 if s is None: 216 return None 217 return removesuffix(removesuffix(removesuffix(s, '. '), 218 '. '), '.').replace('\n', '\\n').replace( 219 '\"', '\\"').replace('\r', '\\r') 220 221 def convert_aggr_mode(aggr_mode: str) -> Optional[str]: 222 """Returns the aggr_mode_class enum value associated with the JSON string.""" 223 if not aggr_mode: 224 return None 225 aggr_mode_to_enum = { 226 'PerChip': '1', 227 'PerCore': '2', 228 } 229 return aggr_mode_to_enum[aggr_mode] 230 231 def convert_metric_constraint(metric_constraint: str) -> Optional[str]: 232 """Returns the metric_event_groups enum value associated with the JSON string.""" 233 if not metric_constraint: 234 return None 235 metric_constraint_to_enum = { 236 'NO_GROUP_EVENTS': '1', 237 'NO_GROUP_EVENTS_NMI': '2', 238 'NO_NMI_WATCHDOG': '2', 239 'NO_GROUP_EVENTS_SMT': '3', 240 'NO_THRESHOLD_AND_NMI': '4', 241 } 242 return metric_constraint_to_enum[metric_constraint] 243 244 def lookup_msr(num: str) -> Optional[str]: 245 """Converts the msr number, or first in a list to the appropriate event field.""" 246 if not num: 247 return None 248 msrmap = { 249 0x3F6: 'ldlat=', 250 0x1A6: 'offcore_rsp=', 251 0x1A7: 'offcore_rsp=', 252 0x3F7: 'frontend=', 253 } 254 return msrmap[int(num.split(',', 1)[0], 0)] 255 256 def real_event(name: str, event: str) -> Optional[str]: 257 """Convert well known event names to an event string otherwise use the event argument.""" 258 fixed = { 259 'inst_retired.any': 'event=0xc0,period=2000003', 260 'inst_retired.any_p': 'event=0xc0,period=2000003', 261 'cpu_clk_unhalted.ref': 'event=0x0,umask=0x03,period=2000003', 262 'cpu_clk_unhalted.thread': 'event=0x3c,period=2000003', 263 'cpu_clk_unhalted.core': 'event=0x3c,period=2000003', 264 'cpu_clk_unhalted.thread_any': 'event=0x3c,any=1,period=2000003', 265 } 266 if not name: 267 return None 268 if name.lower() in fixed: 269 return fixed[name.lower()] 270 return event 271 272 def unit_to_pmu(unit: str) -> Optional[str]: 273 """Convert a JSON Unit to Linux PMU name.""" 274 if not unit: 275 return 'default_core' 276 # Comment brought over from jevents.c: 277 # it's not realistic to keep adding these, we need something more scalable ... 278 table = { 279 'CBO': 'uncore_cbox', 280 'QPI LL': 'uncore_qpi', 281 'SBO': 'uncore_sbox', 282 'iMPH-U': 'uncore_arb', 283 'CPU-M-CF': 'cpum_cf', 284 'CPU-M-SF': 'cpum_sf', 285 'PAI-CRYPTO' : 'pai_crypto', 286 'PAI-EXT' : 'pai_ext', 287 'UPI LL': 'uncore_upi', 288 'hisi_sicl,cpa': 'hisi_sicl,cpa', 289 'hisi_sccl,ddrc': 'hisi_sccl,ddrc', 290 'hisi_sccl,hha': 'hisi_sccl,hha', 291 'hisi_sccl,l3c': 'hisi_sccl,l3c', 292 'imx8_ddr': 'imx8_ddr', 293 'imx9_ddr': 'imx9_ddr', 294 'L3PMC': 'amd_l3', 295 'DFPMC': 'amd_df', 296 'UMCPMC': 'amd_umc', 297 'cpu_core': 'cpu_core', 298 'cpu_atom': 'cpu_atom', 299 'ali_drw': 'ali_drw', 300 'arm_cmn': 'arm_cmn', 301 'software': 'software', 302 'tool': 'tool', 303 } 304 return table[unit] if unit in table else f'uncore_{unit.lower()}' 305 306 def is_zero(val: str) -> bool: 307 try: 308 if val.startswith('0x'): 309 return int(val, 16) == 0 310 else: 311 return int(val) == 0 312 except e: 313 return False 314 315 def canonicalize_value(val: str) -> str: 316 try: 317 if val.startswith('0x'): 318 return llx(int(val, 16)) 319 return str(int(val)) 320 except e: 321 return val 322 323 eventcode = 0 324 if 'EventCode' in jd: 325 eventcode = int(jd['EventCode'].split(',', 1)[0], 0) 326 if 'ExtSel' in jd: 327 eventcode |= int(jd['ExtSel']) << 8 328 configcode = int(jd['ConfigCode'], 0) if 'ConfigCode' in jd else None 329 eventidcode = int(jd['EventidCode'], 0) if 'EventidCode' in jd else None 330 legacy_hw_config = int(jd['LegacyConfigCode'], 0) if 'LegacyConfigCode' in jd else None 331 legacy_cache_config = int(jd['LegacyCacheCode'], 0) if 'LegacyCacheCode' in jd else None 332 self.name = jd['EventName'].lower() if 'EventName' in jd else None 333 self.topic = '' 334 self.compat = jd.get('Compat') 335 self.desc = fixdesc(jd.get('BriefDescription')) 336 self.long_desc = fixdesc(jd.get('PublicDescription')) 337 precise = jd.get('PEBS') 338 msr = lookup_msr(jd.get('MSRIndex')) 339 msrval = jd.get('MSRValue') 340 extra_desc = '' 341 if 'Data_LA' in jd: 342 extra_desc += ' Supports address when precise' 343 if 'Errata' in jd: 344 extra_desc += '.' 345 if 'Errata' in jd: 346 extra_desc += ' Spec update: ' + jd['Errata'] 347 self.pmu = unit_to_pmu(jd.get('Unit')) 348 filter = jd.get('Filter') 349 self.unit = jd.get('ScaleUnit') 350 self.perpkg = jd.get('PerPkg') 351 self.aggr_mode = convert_aggr_mode(jd.get('AggregationMode')) 352 self.deprecated = jd.get('Deprecated') 353 self.retirement_latency_mean = jd.get('RetirementLatencyMean') 354 self.retirement_latency_min = jd.get('RetirementLatencyMin') 355 self.retirement_latency_max = jd.get('RetirementLatencyMax') 356 self.metric_name = jd.get('MetricName') 357 self.metric_group = jd.get('MetricGroup') 358 self.metricgroup_no_group = jd.get('MetricgroupNoGroup') 359 self.default_metricgroup_name = jd.get('DefaultMetricgroupName') 360 self.event_grouping = convert_metric_constraint(jd.get('MetricConstraint')) 361 self.default_show_events = jd.get('DefaultShowEvents') 362 self.metric_expr = None 363 if 'MetricExpr' in jd: 364 self.metric_expr = metric.ParsePerfJson(jd['MetricExpr']).Simplify() 365 # Note, the metric formula for the threshold isn't parsed as the & 366 # and > have incorrect precedence. 367 self.metric_threshold = jd.get('MetricThreshold') 368 369 arch_std = jd.get('ArchStdEvent') 370 if precise and self.desc and '(Precise Event)' not in self.desc: 371 extra_desc += ' (Must be precise)' if precise == '2' else (' (Precise ' 372 'event)') 373 event = None 374 if configcode is not None: 375 event = f'config={llx(configcode)}' 376 elif eventidcode is not None: 377 event = f'eventid={llx(eventidcode)}' 378 elif legacy_hw_config is not None: 379 event = f'legacy-hardware-config={llx(legacy_hw_config)}' 380 elif legacy_cache_config is not None: 381 event = f'legacy-cache-config={llx(legacy_cache_config)}' 382 else: 383 event = f'event={llx(eventcode)}' 384 event_fields = [ 385 ('AnyThread', 'any='), 386 ('PortMask', 'ch_mask='), 387 ('CounterMask', 'cmask='), 388 ('EdgeDetect', 'edge='), 389 ('FCMask', 'fc_mask='), 390 ('Invert', 'inv='), 391 ('SampleAfterValue', 'period='), 392 ('UMask', 'umask='), 393 ('NodeType', 'type='), 394 ('RdWrMask', 'rdwrmask='), 395 ('EnAllCores', 'enallcores='), 396 ('EnAllSlices', 'enallslices='), 397 ('SliceId', 'sliceid='), 398 ('ThreadMask', 'threadmask='), 399 ] 400 for key, value in event_fields: 401 if key in jd and not is_zero(jd[key]): 402 event += f',{value}{canonicalize_value(jd[key])}' 403 if filter: 404 event += f',{filter}' 405 if msr: 406 event += f',{msr}{msrval}' 407 if self.desc and extra_desc: 408 self.desc += extra_desc 409 if self.long_desc and extra_desc: 410 self.long_desc += extra_desc 411 if self.desc and self.long_desc and self.desc == self.long_desc: 412 # Avoid duplicated descriptions. 413 self.long_desc = None 414 if arch_std: 415 if arch_std.lower() in _arch_std_events: 416 event = _arch_std_events[arch_std.lower()].event 417 # Copy from the architecture standard event to self for undefined fields. 418 for attr, value in _arch_std_events[arch_std.lower()].__dict__.items(): 419 if hasattr(self, attr) and not getattr(self, attr): 420 setattr(self, attr, value) 421 else: 422 raise argparse.ArgumentTypeError('Cannot find arch std event:', arch_std) 423 424 self.event = real_event(self.name, event) 425 426 def __repr__(self) -> str: 427 """String representation primarily for debugging.""" 428 s = '{\n' 429 for attr, value in self.__dict__.items(): 430 if value: 431 s += f'\t{attr} = {value},\n' 432 return s + '}' 433 434 def build_c_string(self, metric: bool) -> str: 435 s = '' 436 for attr in _json_metric_attributes if metric else _json_event_attributes: 437 x = getattr(self, attr) 438 if metric and x and attr == 'metric_expr': 439 # Convert parsed metric expressions into a string. Slashes 440 # must be doubled in the file. 441 x = x.ToPerfJson().replace('\\', '\\\\') 442 if metric and x and attr == 'metric_threshold': 443 x = x.replace('\\', '\\\\') 444 if attr in _json_enum_attributes: 445 s += x if x else '0' 446 else: 447 s += f'{x}\\000' if x else '\\000' 448 return s 449 450 def to_c_string(self, metric: bool) -> str: 451 """Representation of the event as a C struct initializer.""" 452 453 def fix_comment(s: str) -> str: 454 return s.replace('*/', r'\*\/') 455 456 s = self.build_c_string(metric) 457 return f'{{ { _bcs.offsets[s] } }}, /* {fix_comment(s)} */\n' 458 459 460@lru_cache(maxsize=None) 461def read_json_events(path: str, topic: str) -> Sequence[JsonEvent]: 462 """Read json events from the specified file.""" 463 try: 464 events = json.load(open(path), object_hook=JsonEvent) 465 except BaseException as err: 466 print(f"Exception processing {path}") 467 raise 468 metrics: list[Tuple[str, str, metric.Expression]] = [] 469 for event in events: 470 event.topic = topic 471 if event.metric_name and '-' not in event.metric_name: 472 metrics.append((event.pmu, event.metric_name, event.metric_expr)) 473 updates = metric.RewriteMetricsInTermsOfOthers(metrics) 474 if updates: 475 for event in events: 476 if event.metric_name in updates: 477 # print(f'Updated {event.metric_name} from\n"{event.metric_expr}"\n' 478 # f'to\n"{updates[event.metric_name]}"') 479 event.metric_expr = updates[event.metric_name] 480 481 return events 482 483def preprocess_arch_std_files(archpath: str) -> None: 484 """Read in all architecture standard events.""" 485 global _arch_std_events 486 for item in os.scandir(archpath): 487 if not item.is_file() or not item.name.endswith('.json'): 488 continue 489 try: 490 for event in read_json_events(item.path, topic=''): 491 if event.name: 492 _arch_std_events[event.name.lower()] = event 493 if event.metric_name: 494 _arch_std_events[event.metric_name.lower()] = event 495 except Exception as e: 496 raise RuntimeError(f'Failure processing \'{item.name}\' in \'{archpath}\'') from e 497 498 499def add_events_table_entries(item: os.DirEntry, topic: str) -> None: 500 """Add contents of file to _pending_events table.""" 501 for e in read_json_events(item.path, topic): 502 if e.name: 503 _pending_events.append(e) 504 if e.metric_name and not any(e.metric_name == x.metric_name and 505 e.pmu == x.pmu for x in _pending_metrics): 506 _pending_metrics.append(e) 507 508 509def print_pending_events() -> None: 510 """Optionally close events table.""" 511 512 def event_cmp_key(j: JsonEvent) -> Tuple[str, str, bool, str, str]: 513 def fix_none(s: Optional[str]) -> str: 514 if s is None: 515 return '' 516 return s 517 518 return (fix_none(j.pmu).replace(',','_'), fix_none(j.name), j.desc is not None, fix_none(j.topic), 519 fix_none(j.metric_name)) 520 521 global _pending_events 522 if not _pending_events: 523 return 524 525 global _pending_events_tblname 526 if _pending_events_tblname.endswith('_sys'): 527 global _sys_event_tables 528 _sys_event_tables.append(_pending_events_tblname) 529 else: 530 global event_tables 531 _event_tables.append(_pending_events_tblname) 532 533 first = True 534 last_pmu = None 535 last_name = None 536 pmus = set() 537 for event in sorted(_pending_events, key=event_cmp_key): 538 if last_pmu and last_pmu == event.pmu: 539 assert event.name != last_name, f"Duplicate event: {last_pmu}/{last_name}/ in {_pending_events_tblname}" 540 if event.pmu != last_pmu: 541 if not first: 542 _args.output_file.write('};\n') 543 pmu_name = event.pmu.replace(',', '_') 544 _args.output_file.write( 545 f'static const struct compact_pmu_event {_pending_events_tblname}_{pmu_name}[] = {{\n') 546 first = False 547 last_pmu = event.pmu 548 pmus.add((event.pmu, pmu_name)) 549 550 _args.output_file.write(event.to_c_string(metric=False)) 551 last_name = event.name 552 _pending_events = [] 553 554 _args.output_file.write(f""" 555}}; 556 557static const struct pmu_table_entry {_pending_events_tblname}[] = {{ 558""") 559 for (pmu, tbl_pmu) in sorted(pmus): 560 pmu_name = f"{pmu}\\000" 561 _args.output_file.write(f"""{{ 562 .entries = {_pending_events_tblname}_{tbl_pmu}, 563 .num_entries = ARRAY_SIZE({_pending_events_tblname}_{tbl_pmu}), 564 .pmu_name = {{ {_bcs.offsets[pmu_name]} /* {pmu_name} */ }}, 565}}, 566""") 567 _args.output_file.write('};\n\n') 568 569def print_pending_metrics() -> None: 570 """Optionally close metrics table.""" 571 572 def metric_cmp_key(j: JsonEvent) -> Tuple[bool, str, str]: 573 def fix_none(s: Optional[str]) -> str: 574 if s is None: 575 return '' 576 return s 577 578 return (j.desc is not None, fix_none(j.pmu), fix_none(j.metric_name)) 579 580 global _pending_metrics 581 if not _pending_metrics: 582 return 583 584 global _pending_metrics_tblname 585 if _pending_metrics_tblname.endswith('_sys'): 586 global _sys_metric_tables 587 _sys_metric_tables.append(_pending_metrics_tblname) 588 else: 589 global metric_tables 590 _metric_tables.append(_pending_metrics_tblname) 591 592 first = True 593 last_pmu = None 594 pmus = set() 595 for metric in sorted(_pending_metrics, key=metric_cmp_key): 596 if metric.pmu != last_pmu: 597 if not first: 598 _args.output_file.write('};\n') 599 pmu_name = metric.pmu.replace(',', '_') 600 _args.output_file.write( 601 f'static const struct compact_pmu_event {_pending_metrics_tblname}_{pmu_name}[] = {{\n') 602 first = False 603 last_pmu = metric.pmu 604 pmus.add((metric.pmu, pmu_name)) 605 606 _args.output_file.write(metric.to_c_string(metric=True)) 607 _pending_metrics = [] 608 609 _args.output_file.write(f""" 610}}; 611 612static const struct pmu_table_entry {_pending_metrics_tblname}[] = {{ 613""") 614 for (pmu, tbl_pmu) in sorted(pmus): 615 pmu_name = f"{pmu}\\000" 616 _args.output_file.write(f"""{{ 617 .entries = {_pending_metrics_tblname}_{tbl_pmu}, 618 .num_entries = ARRAY_SIZE({_pending_metrics_tblname}_{tbl_pmu}), 619 .pmu_name = {{ {_bcs.offsets[pmu_name]} /* {pmu_name} */ }}, 620}}, 621""") 622 _args.output_file.write('};\n\n') 623 624def get_topic(topic: str) -> str: 625 if topic.endswith('metrics.json'): 626 return 'metrics' 627 return removesuffix(topic, '.json').replace('-', ' ') 628 629def preprocess_one_file(parents: Sequence[str], item: os.DirEntry) -> None: 630 631 if item.is_dir(): 632 return 633 634 # base dir or too deep 635 level = len(parents) 636 if level == 0 or level > 4: 637 return 638 639 # Ignore other directories. If the file name does not have a .json 640 # extension, ignore it. It could be a readme.txt for instance. 641 if not item.is_file() or not item.name.endswith('.json'): 642 return 643 644 if item.name.endswith('metricgroups.json'): 645 metricgroup_descriptions = json.load(open(item.path)) 646 for mgroup in metricgroup_descriptions: 647 assert len(mgroup) > 1, parents 648 description = f"{metricgroup_descriptions[mgroup]}\\000" 649 mgroup = f"{mgroup}\\000" 650 _bcs.add(mgroup, metric=True) 651 _bcs.add(description, metric=True) 652 _metricgroups[mgroup] = description 653 return 654 655 topic = get_topic(item.name) 656 for event in read_json_events(item.path, topic): 657 pmu_name = f"{event.pmu}\\000" 658 if event.name: 659 _bcs.add(pmu_name, metric=False) 660 _bcs.add(event.build_c_string(metric=False), metric=False) 661 if event.metric_name: 662 _bcs.add(pmu_name, metric=True) 663 _bcs.add(event.build_c_string(metric=True), metric=True) 664 665def process_one_file(parents: Sequence[str], item: os.DirEntry) -> None: 666 """Process a JSON file during the main walk.""" 667 def is_leaf_dir_ignoring_sys(path: str) -> bool: 668 for item in os.scandir(path): 669 if item.is_dir() and item.name != 'sys': 670 return False 671 return True 672 673 # Model directories are leaves (ignoring possible sys 674 # directories). The FTW will walk into the directory next. Flush 675 # pending events and metrics and update the table names for the new 676 # model directory. 677 if item.is_dir() and is_leaf_dir_ignoring_sys(item.path): 678 print_pending_events() 679 print_pending_metrics() 680 681 global _pending_events_tblname 682 _pending_events_tblname = file_name_to_table_name('pmu_events_', parents, item.name) 683 global _pending_metrics_tblname 684 _pending_metrics_tblname = file_name_to_table_name('pmu_metrics_', parents, item.name) 685 686 if item.name == 'sys': 687 _sys_event_table_to_metric_table_mapping[_pending_events_tblname] = _pending_metrics_tblname 688 return 689 690 # base dir or too deep 691 level = len(parents) 692 if level == 0 or level > 4: 693 return 694 695 # Ignore other directories. If the file name does not have a .json 696 # extension, ignore it. It could be a readme.txt for instance. 697 if not item.is_file() or not item.name.endswith('.json') or item.name.endswith('metricgroups.json'): 698 return 699 700 add_events_table_entries(item, get_topic(item.name)) 701 702 703def print_mapping_table(archs: Sequence[str]) -> None: 704 """Read the mapfile and generate the struct from cpuid string to event table.""" 705 _args.output_file.write(""" 706/* Struct used to make the PMU event table implementation opaque to callers. */ 707struct pmu_events_table { 708 const struct pmu_table_entry *pmus; 709 uint32_t num_pmus; 710}; 711 712/* Struct used to make the PMU metric table implementation opaque to callers. */ 713struct pmu_metrics_table { 714 const struct pmu_table_entry *pmus; 715 uint32_t num_pmus; 716}; 717 718/* 719 * Map a CPU to its table of PMU events. The CPU is identified by the 720 * cpuid field, which is an arch-specific identifier for the CPU. 721 * The identifier specified in tools/perf/pmu-events/arch/xxx/mapfile 722 * must match the get_cpuid_str() in tools/perf/arch/xxx/util/header.c) 723 * 724 * The cpuid can contain any character other than the comma. 725 */ 726struct pmu_events_map { 727 const char *arch; 728 const char *cpuid; 729 struct pmu_events_table event_table; 730 struct pmu_metrics_table metric_table; 731}; 732 733/* 734 * Global table mapping each known CPU for the architecture to its 735 * table of PMU events. 736 */ 737static const struct pmu_events_map pmu_events_map[] = { 738""") 739 for arch in archs: 740 if arch == 'test': 741 _args.output_file.write("""{ 742\t.arch = "testarch", 743\t.cpuid = "testcpu", 744\t.event_table = { 745\t\t.pmus = pmu_events__test_soc_cpu, 746\t\t.num_pmus = ARRAY_SIZE(pmu_events__test_soc_cpu), 747\t}, 748\t.metric_table = { 749\t\t.pmus = pmu_metrics__test_soc_cpu, 750\t\t.num_pmus = ARRAY_SIZE(pmu_metrics__test_soc_cpu), 751\t} 752}, 753""") 754 elif arch == 'common': 755 _args.output_file.write("""{ 756\t.arch = "common", 757\t.cpuid = "common", 758\t.event_table = { 759\t\t.pmus = pmu_events__common, 760\t\t.num_pmus = ARRAY_SIZE(pmu_events__common), 761\t}, 762\t.metric_table = { 763\t\t.pmus = pmu_metrics__common, 764\t\t.num_pmus = ARRAY_SIZE(pmu_metrics__common), 765\t}, 766}, 767""") 768 else: 769 with open(f'{_args.starting_dir}/{arch}/mapfile.csv') as csvfile: 770 table = csv.reader(csvfile) 771 first = True 772 for row in table: 773 # Skip the first row or any row beginning with #. 774 if not first and len(row) > 0 and not row[0].startswith('#'): 775 event_tblname = file_name_to_table_name('pmu_events_', [], row[2].replace('/', '_')) 776 if event_tblname in _event_tables: 777 event_size = f'ARRAY_SIZE({event_tblname})' 778 else: 779 event_tblname = 'NULL' 780 event_size = '0' 781 metric_tblname = file_name_to_table_name('pmu_metrics_', [], row[2].replace('/', '_')) 782 if metric_tblname in _metric_tables: 783 metric_size = f'ARRAY_SIZE({metric_tblname})' 784 else: 785 metric_tblname = 'NULL' 786 metric_size = '0' 787 if event_size == '0' and metric_size == '0': 788 continue 789 cpuid = row[0].replace('\\', '\\\\') 790 _args.output_file.write(f"""{{ 791\t.arch = "{arch}", 792\t.cpuid = "{cpuid}", 793\t.event_table = {{ 794\t\t.pmus = {event_tblname}, 795\t\t.num_pmus = {event_size} 796\t}}, 797\t.metric_table = {{ 798\t\t.pmus = {metric_tblname}, 799\t\t.num_pmus = {metric_size} 800\t}} 801}}, 802""") 803 first = False 804 805 _args.output_file.write("""{ 806\t.arch = 0, 807\t.cpuid = 0, 808\t.event_table = { 0, 0 }, 809\t.metric_table = { 0, 0 }, 810} 811}; 812""") 813 814 815def print_system_mapping_table() -> None: 816 """C struct mapping table array for tables from /sys directories.""" 817 _args.output_file.write(""" 818struct pmu_sys_events { 819\tconst char *name; 820\tstruct pmu_events_table event_table; 821\tstruct pmu_metrics_table metric_table; 822}; 823 824static const struct pmu_sys_events pmu_sys_event_tables[] = { 825""") 826 printed_metric_tables = [] 827 for tblname in _sys_event_tables: 828 _args.output_file.write(f"""\t{{ 829\t\t.event_table = {{ 830\t\t\t.pmus = {tblname}, 831\t\t\t.num_pmus = ARRAY_SIZE({tblname}) 832\t\t}},""") 833 metric_tblname = _sys_event_table_to_metric_table_mapping[tblname] 834 if metric_tblname in _sys_metric_tables: 835 _args.output_file.write(f""" 836\t\t.metric_table = {{ 837\t\t\t.pmus = {metric_tblname}, 838\t\t\t.num_pmus = ARRAY_SIZE({metric_tblname}) 839\t\t}},""") 840 printed_metric_tables.append(metric_tblname) 841 _args.output_file.write(f""" 842\t\t.name = \"{tblname}\", 843\t}}, 844""") 845 for tblname in _sys_metric_tables: 846 if tblname in printed_metric_tables: 847 continue 848 _args.output_file.write(f"""\t{{ 849\t\t.metric_table = {{ 850\t\t\t.pmus = {tblname}, 851\t\t\t.num_pmus = ARRAY_SIZE({tblname}) 852\t\t}}, 853\t\t.name = \"{tblname}\", 854\t}}, 855""") 856 _args.output_file.write("""\t{ 857\t\t.event_table = { 0, 0 }, 858\t\t.metric_table = { 0, 0 }, 859\t}, 860}; 861 862static void decompress_event(int offset, struct pmu_event *pe) 863{ 864\tconst char *p = &big_c_string[offset]; 865""") 866 for attr in _json_event_attributes: 867 _args.output_file.write(f'\n\tpe->{attr} = ') 868 if attr in _json_enum_attributes: 869 _args.output_file.write("*p - '0';\n") 870 else: 871 _args.output_file.write("(*p == '\\0' ? NULL : p);\n") 872 if attr == _json_event_attributes[-1]: 873 continue 874 if attr in _json_enum_attributes: 875 _args.output_file.write('\tp++;') 876 else: 877 _args.output_file.write('\twhile (*p++);') 878 _args.output_file.write("""} 879 880static void decompress_metric(int offset, struct pmu_metric *pm) 881{ 882\tconst char *p = &big_c_string[offset]; 883""") 884 for attr in _json_metric_attributes: 885 _args.output_file.write(f'\n\tpm->{attr} = ') 886 if attr in _json_enum_attributes: 887 _args.output_file.write("*p - '0';\n") 888 else: 889 _args.output_file.write("(*p == '\\0' ? NULL : p);\n") 890 if attr == _json_metric_attributes[-1]: 891 continue 892 if attr in _json_enum_attributes: 893 _args.output_file.write('\tp++;') 894 else: 895 _args.output_file.write('\twhile (*p++);') 896 _args.output_file.write("""} 897 898static int pmu_events_table__for_each_event_pmu(const struct pmu_events_table *table, 899 const struct pmu_table_entry *pmu, 900 pmu_event_iter_fn fn, 901 void *data) 902{ 903 int ret; 904 struct pmu_event pe = { 905 .pmu = &big_c_string[pmu->pmu_name.offset], 906 }; 907 908 for (uint32_t i = 0; i < pmu->num_entries; i++) { 909 decompress_event(pmu->entries[i].offset, &pe); 910 if (!pe.name) 911 continue; 912 ret = fn(&pe, table, data); 913 if (ret) 914 return ret; 915 } 916 return 0; 917 } 918 919static int pmu_events_table__find_event_pmu(const struct pmu_events_table *table, 920 const struct pmu_table_entry *pmu, 921 const char *name, 922 pmu_event_iter_fn fn, 923 void *data) 924{ 925 struct pmu_event pe = { 926 .pmu = &big_c_string[pmu->pmu_name.offset], 927 }; 928 int low = 0, high = pmu->num_entries - 1; 929 930 while (low <= high) { 931 int cmp, mid = (low + high) / 2; 932 933 decompress_event(pmu->entries[mid].offset, &pe); 934 935 if (!pe.name && !name) 936 goto do_call; 937 938 if (!pe.name && name) { 939 low = mid + 1; 940 continue; 941 } 942 if (pe.name && !name) { 943 high = mid - 1; 944 continue; 945 } 946 947 cmp = strcasecmp(pe.name, name); 948 if (cmp < 0) { 949 low = mid + 1; 950 continue; 951 } 952 if (cmp > 0) { 953 high = mid - 1; 954 continue; 955 } 956 do_call: 957 return fn ? fn(&pe, table, data) : 0; 958 } 959 return PMU_EVENTS__NOT_FOUND; 960} 961 962int pmu_events_table__for_each_event(const struct pmu_events_table *table, 963 struct perf_pmu *pmu, 964 pmu_event_iter_fn fn, 965 void *data) 966{ 967 if (!table) 968 return 0; 969 for (size_t i = 0; i < table->num_pmus; i++) { 970 const struct pmu_table_entry *table_pmu = &table->pmus[i]; 971 const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset]; 972 int ret; 973 974 if (pmu && !perf_pmu__name_wildcard_match(pmu, pmu_name)) 975 continue; 976 977 ret = pmu_events_table__for_each_event_pmu(table, table_pmu, fn, data); 978 if (ret) 979 return ret; 980 } 981 return 0; 982} 983 984int pmu_events_table__find_event(const struct pmu_events_table *table, 985 struct perf_pmu *pmu, 986 const char *name, 987 pmu_event_iter_fn fn, 988 void *data) 989{ 990 if (!table) 991 return PMU_EVENTS__NOT_FOUND; 992 for (size_t i = 0; i < table->num_pmus; i++) { 993 const struct pmu_table_entry *table_pmu = &table->pmus[i]; 994 const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset]; 995 int ret; 996 997 if (pmu && !perf_pmu__name_wildcard_match(pmu, pmu_name)) 998 continue; 999 1000 ret = pmu_events_table__find_event_pmu(table, table_pmu, name, fn, data); 1001 if (ret != PMU_EVENTS__NOT_FOUND) 1002 return ret; 1003 } 1004 return PMU_EVENTS__NOT_FOUND; 1005} 1006 1007size_t pmu_events_table__num_events(const struct pmu_events_table *table, 1008 struct perf_pmu *pmu) 1009{ 1010 size_t count = 0; 1011 1012 if (!table) 1013 return 0; 1014 for (size_t i = 0; i < table->num_pmus; i++) { 1015 const struct pmu_table_entry *table_pmu = &table->pmus[i]; 1016 const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset]; 1017 1018 if (perf_pmu__name_wildcard_match(pmu, pmu_name)) 1019 count += table_pmu->num_entries; 1020 } 1021 return count; 1022} 1023 1024static int pmu_metrics_table__for_each_metric_pmu(const struct pmu_metrics_table *table, 1025 const struct pmu_table_entry *pmu, 1026 pmu_metric_iter_fn fn, 1027 void *data) 1028{ 1029 int ret; 1030 struct pmu_metric pm = { 1031 .pmu = &big_c_string[pmu->pmu_name.offset], 1032 }; 1033 1034 for (uint32_t i = 0; i < pmu->num_entries; i++) { 1035 decompress_metric(pmu->entries[i].offset, &pm); 1036 if (!pm.metric_expr) 1037 continue; 1038 ret = fn(&pm, table, data); 1039 if (ret) 1040 return ret; 1041 } 1042 return 0; 1043} 1044 1045static int pmu_metrics_table__find_metric_pmu(const struct pmu_metrics_table *table, 1046 const struct pmu_table_entry *pmu, 1047 const char *metric, 1048 pmu_metric_iter_fn fn, 1049 void *data) 1050{ 1051 struct pmu_metric pm = { 1052 .pmu = &big_c_string[pmu->pmu_name.offset], 1053 }; 1054 int low = 0, high = pmu->num_entries - 1; 1055 1056 while (low <= high) { 1057 int cmp, mid = (low + high) / 2; 1058 1059 decompress_metric(pmu->entries[mid].offset, &pm); 1060 1061 if (!pm.metric_name && !metric) 1062 goto do_call; 1063 1064 if (!pm.metric_name && metric) { 1065 low = mid + 1; 1066 continue; 1067 } 1068 if (pm.metric_name && !metric) { 1069 high = mid - 1; 1070 continue; 1071 } 1072 1073 cmp = strcmp(pm.metric_name, metric); 1074 if (cmp < 0) { 1075 low = mid + 1; 1076 continue; 1077 } 1078 if (cmp > 0) { 1079 high = mid - 1; 1080 continue; 1081 } 1082 do_call: 1083 return fn ? fn(&pm, table, data) : 0; 1084 } 1085 return PMU_METRICS__NOT_FOUND; 1086} 1087 1088int pmu_metrics_table__for_each_metric(const struct pmu_metrics_table *table, 1089 pmu_metric_iter_fn fn, 1090 void *data) 1091{ 1092 if (!table) 1093 return 0; 1094 for (size_t i = 0; i < table->num_pmus; i++) { 1095 int ret = pmu_metrics_table__for_each_metric_pmu(table, &table->pmus[i], 1096 fn, data); 1097 1098 if (ret) 1099 return ret; 1100 } 1101 return 0; 1102} 1103 1104int pmu_metrics_table__find_metric(const struct pmu_metrics_table *table, 1105 struct perf_pmu *pmu, 1106 const char *metric, 1107 pmu_metric_iter_fn fn, 1108 void *data) 1109{ 1110 if (!table) 1111 return 0; 1112 for (size_t i = 0; i < table->num_pmus; i++) { 1113 const struct pmu_table_entry *table_pmu = &table->pmus[i]; 1114 const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset]; 1115 int ret; 1116 1117 if (pmu && !perf_pmu__name_wildcard_match(pmu, pmu_name)) 1118 continue; 1119 1120 ret = pmu_metrics_table__find_metric_pmu(table, table_pmu, metric, fn, data); 1121 if (ret != PMU_METRICS__NOT_FOUND) 1122 return ret; 1123 } 1124 return PMU_METRICS__NOT_FOUND; 1125} 1126 1127static const struct pmu_events_map *map_for_cpu(struct perf_cpu cpu) 1128{ 1129 static struct { 1130 const struct pmu_events_map *map; 1131 struct perf_cpu cpu; 1132 } last_result; 1133 static struct { 1134 const struct pmu_events_map *map; 1135 char *cpuid; 1136 } last_map_search; 1137 static bool has_last_result, has_last_map_search; 1138 const struct pmu_events_map *map = NULL; 1139 char *cpuid = NULL; 1140 size_t i; 1141 1142 if (has_last_result && last_result.cpu.cpu == cpu.cpu) 1143 return last_result.map; 1144 1145 cpuid = get_cpuid_allow_env_override(cpu); 1146 1147 /* 1148 * On some platforms which uses cpus map, cpuid can be NULL for 1149 * PMUs other than CORE PMUs. 1150 */ 1151 if (!cpuid) 1152 goto out_update_last_result; 1153 1154 if (has_last_map_search && !strcmp(last_map_search.cpuid, cpuid)) { 1155 map = last_map_search.map; 1156 free(cpuid); 1157 } else { 1158 i = 0; 1159 for (;;) { 1160 map = &pmu_events_map[i++]; 1161 1162 if (!map->arch) { 1163 map = NULL; 1164 break; 1165 } 1166 1167 if (!strcmp_cpuid_str(map->cpuid, cpuid)) 1168 break; 1169 } 1170 free(last_map_search.cpuid); 1171 last_map_search.cpuid = cpuid; 1172 last_map_search.map = map; 1173 has_last_map_search = true; 1174 } 1175out_update_last_result: 1176 last_result.cpu = cpu; 1177 last_result.map = map; 1178 has_last_result = true; 1179 return map; 1180} 1181 1182static const struct pmu_events_map *map_for_pmu(struct perf_pmu *pmu) 1183{ 1184 struct perf_cpu cpu = {-1}; 1185 1186 if (pmu) { 1187 for (size_t i = 0; i < ARRAY_SIZE(pmu_events__common); i++) { 1188 const char *pmu_name = &big_c_string[pmu_events__common[i].pmu_name.offset]; 1189 1190 if (!strcmp(pmu_name, pmu->name)) { 1191 const struct pmu_events_map *map = &pmu_events_map[0]; 1192 1193 while (strcmp("common", map->arch)) 1194 map++; 1195 return map; 1196 } 1197 } 1198 cpu = perf_cpu_map__min(pmu->cpus); 1199 } 1200 return map_for_cpu(cpu); 1201} 1202 1203const struct pmu_events_table *perf_pmu__find_events_table(struct perf_pmu *pmu) 1204{ 1205 const struct pmu_events_map *map = map_for_pmu(pmu); 1206 1207 if (!map) 1208 return NULL; 1209 1210 if (!pmu) 1211 return &map->event_table; 1212 1213 for (size_t i = 0; i < map->event_table.num_pmus; i++) { 1214 const struct pmu_table_entry *table_pmu = &map->event_table.pmus[i]; 1215 const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset]; 1216 1217 if (perf_pmu__name_wildcard_match(pmu, pmu_name)) 1218 return &map->event_table; 1219 } 1220 return NULL; 1221} 1222 1223const struct pmu_events_table *perf_pmu__default_core_events_table(void) 1224{ 1225 int i = 0; 1226 1227 for (;;) { 1228 const struct pmu_events_map *map = &pmu_events_map[i++]; 1229 1230 if (!map->arch) 1231 break; 1232 1233 if (!strcmp(map->cpuid, "common")) 1234 return &map->event_table; 1235 } 1236 return NULL; 1237} 1238 1239const struct pmu_metrics_table *pmu_metrics_table__find(void) 1240{ 1241 struct perf_cpu cpu = {-1}; 1242 const struct pmu_events_map *map = map_for_cpu(cpu); 1243 1244 return map ? &map->metric_table : NULL; 1245} 1246 1247const struct pmu_metrics_table *pmu_metrics_table__default(void) 1248{ 1249 int i = 0; 1250 1251 for (;;) { 1252 const struct pmu_events_map *map = &pmu_events_map[i++]; 1253 1254 if (!map->arch) 1255 break; 1256 1257 if (!strcmp(map->cpuid, "common")) 1258 return &map->metric_table; 1259 } 1260 return NULL; 1261} 1262 1263const struct pmu_events_table *find_core_events_table(const char *arch, const char *cpuid) 1264{ 1265 for (const struct pmu_events_map *tables = &pmu_events_map[0]; 1266 tables->arch; 1267 tables++) { 1268 if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid)) 1269 return &tables->event_table; 1270 } 1271 return NULL; 1272} 1273 1274const struct pmu_metrics_table *find_core_metrics_table(const char *arch, const char *cpuid) 1275{ 1276 for (const struct pmu_events_map *tables = &pmu_events_map[0]; 1277 tables->arch; 1278 tables++) { 1279 if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid)) 1280 return &tables->metric_table; 1281 } 1282 return NULL; 1283} 1284 1285int pmu_for_each_core_event(pmu_event_iter_fn fn, void *data) 1286{ 1287 for (const struct pmu_events_map *tables = &pmu_events_map[0]; 1288 tables->arch; 1289 tables++) { 1290 int ret = pmu_events_table__for_each_event(&tables->event_table, 1291 /*pmu=*/ NULL, fn, data); 1292 1293 if (ret) 1294 return ret; 1295 } 1296 return 0; 1297} 1298 1299int pmu_for_each_core_metric(pmu_metric_iter_fn fn, void *data) 1300{ 1301 for (const struct pmu_events_map *tables = &pmu_events_map[0]; 1302 tables->arch; 1303 tables++) { 1304 int ret = pmu_metrics_table__for_each_metric(&tables->metric_table, fn, data); 1305 1306 if (ret) 1307 return ret; 1308 } 1309 return 0; 1310} 1311 1312const struct pmu_events_table *find_sys_events_table(const char *name) 1313{ 1314 for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0]; 1315 tables->name; 1316 tables++) { 1317 if (!strcmp(tables->name, name)) 1318 return &tables->event_table; 1319 } 1320 return NULL; 1321} 1322 1323int pmu_for_each_sys_event(pmu_event_iter_fn fn, void *data) 1324{ 1325 for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0]; 1326 tables->name; 1327 tables++) { 1328 int ret = pmu_events_table__for_each_event(&tables->event_table, 1329 /*pmu=*/ NULL, fn, data); 1330 1331 if (ret) 1332 return ret; 1333 } 1334 return 0; 1335} 1336 1337int pmu_for_each_sys_metric(pmu_metric_iter_fn fn, void *data) 1338{ 1339 for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0]; 1340 tables->name; 1341 tables++) { 1342 int ret = pmu_metrics_table__for_each_metric(&tables->metric_table, fn, data); 1343 1344 if (ret) 1345 return ret; 1346 } 1347 return 0; 1348} 1349""") 1350 1351def print_metricgroups() -> None: 1352 _args.output_file.write(""" 1353static const int metricgroups[][2] = { 1354""") 1355 for mgroup in sorted(_metricgroups): 1356 description = _metricgroups[mgroup] 1357 _args.output_file.write( 1358 f'\t{{ {_bcs.offsets[mgroup]}, {_bcs.offsets[description]} }}, /* {mgroup} => {description} */\n' 1359 ) 1360 _args.output_file.write(""" 1361}; 1362 1363const char *describe_metricgroup(const char *group) 1364{ 1365 int low = 0, high = (int)ARRAY_SIZE(metricgroups) - 1; 1366 1367 while (low <= high) { 1368 int mid = (low + high) / 2; 1369 const char *mgroup = &big_c_string[metricgroups[mid][0]]; 1370 int cmp = strcmp(mgroup, group); 1371 1372 if (cmp == 0) { 1373 return &big_c_string[metricgroups[mid][1]]; 1374 } else if (cmp < 0) { 1375 low = mid + 1; 1376 } else { 1377 high = mid - 1; 1378 } 1379 } 1380 return NULL; 1381} 1382""") 1383 1384def main() -> None: 1385 global _args 1386 1387 def dir_path(path: str) -> str: 1388 """Validate path is a directory for argparse.""" 1389 if os.path.isdir(path): 1390 return path 1391 raise argparse.ArgumentTypeError(f'\'{path}\' is not a valid directory') 1392 1393 def ftw(path: str, parents: Sequence[str], 1394 action: Callable[[Sequence[str], os.DirEntry], None]) -> None: 1395 """Replicate the directory/file walking behavior of C's file tree walk.""" 1396 for item in sorted(os.scandir(path), key=lambda e: e.name): 1397 if _args.model != 'all' and item.is_dir(): 1398 # Check if the model matches one in _args.model. 1399 if len(parents) == _args.model.split(',')[0].count('/'): 1400 # We're testing the correct directory. 1401 item_path = '/'.join(parents) + ('/' if len(parents) > 0 else '') + item.name 1402 if 'test' not in item_path and 'common' not in item_path and item_path not in _args.model.split(','): 1403 continue 1404 try: 1405 action(parents, item) 1406 except Exception as e: 1407 raise RuntimeError(f'Action failure for \'{item.name}\' in {parents}') from e 1408 if item.is_dir(): 1409 ftw(item.path, parents + [item.name], action) 1410 1411 ap = argparse.ArgumentParser() 1412 ap.add_argument('arch', help='Architecture name like x86') 1413 ap.add_argument('model', help='''Select a model such as skylake to 1414reduce the code size. Normally set to "all". For architectures like 1415ARM64 with an implementor/model, the model must include the implementor 1416such as "arm/cortex-a34".''', 1417 default='all') 1418 ap.add_argument( 1419 'starting_dir', 1420 type=dir_path, 1421 help='Root of tree containing architecture directories containing json files' 1422 ) 1423 ap.add_argument( 1424 'output_file', type=argparse.FileType('w', encoding='utf-8'), nargs='?', default=sys.stdout) 1425 _args = ap.parse_args() 1426 1427 _args.output_file.write(f""" 1428/* SPDX-License-Identifier: GPL-2.0 */ 1429/* THIS FILE WAS AUTOGENERATED BY jevents.py arch={_args.arch} model={_args.model} ! */ 1430""") 1431 _args.output_file.write(""" 1432#include <pmu-events/pmu-events.h> 1433#include "util/header.h" 1434#include "util/pmu.h" 1435#include <string.h> 1436#include <stddef.h> 1437 1438struct compact_pmu_event { 1439 int offset; 1440}; 1441 1442struct pmu_table_entry { 1443 const struct compact_pmu_event *entries; 1444 uint32_t num_entries; 1445 struct compact_pmu_event pmu_name; 1446}; 1447 1448""") 1449 archs = [] 1450 for item in os.scandir(_args.starting_dir): 1451 if not item.is_dir(): 1452 continue 1453 if item.name == _args.arch or _args.arch == 'all' or item.name == 'test' or item.name == 'common': 1454 archs.append(item.name) 1455 1456 if len(archs) < 2 and _args.arch != 'none': 1457 raise IOError(f'Missing architecture directory \'{_args.arch}\'') 1458 1459 archs.sort() 1460 for arch in archs: 1461 arch_path = f'{_args.starting_dir}/{arch}' 1462 preprocess_arch_std_files(arch_path) 1463 ftw(arch_path, [], preprocess_one_file) 1464 1465 _bcs.compute() 1466 _args.output_file.write('static const char *const big_c_string =\n') 1467 for s in _bcs.big_string: 1468 _args.output_file.write(s) 1469 _args.output_file.write(';\n\n') 1470 for arch in archs: 1471 arch_path = f'{_args.starting_dir}/{arch}' 1472 ftw(arch_path, [], process_one_file) 1473 print_pending_events() 1474 print_pending_metrics() 1475 1476 print_mapping_table(archs) 1477 print_system_mapping_table() 1478 print_metricgroups() 1479 1480if __name__ == '__main__': 1481 main() 1482