xref: /linux/tools/perf/pmu-events/jevents.py (revision fb7399cf2d0b33825b8039f95c45395c7deba25c)
1#!/usr/bin/env python3
2# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
3"""Convert directories of JSON events to C code."""
4import argparse
5import csv
6from functools import lru_cache
7import json
8import metric
9import os
10import sys
11from typing import (Callable, Dict, Optional, Sequence, Set, Tuple)
12import collections
13
14# Global command line arguments.
15_args = None
16# List of regular event tables.
17_event_tables = []
18# List of event tables generated from "/sys" directories.
19_sys_event_tables = []
20# List of regular metric tables.
21_metric_tables = []
22# List of metric tables generated from "/sys" directories.
23_sys_metric_tables = []
24# Mapping between sys event table names and sys metric table names.
25_sys_event_table_to_metric_table_mapping = {}
26# Map from an event name to an architecture standard
27# JsonEvent. Architecture standard events are in json files in the top
28# f'{_args.starting_dir}/{_args.arch}' directory.
29_arch_std_events = {}
30# Events to write out when the table is closed
31_pending_events = []
32# Name of events table to be written out
33_pending_events_tblname = None
34# Metrics to write out when the table is closed
35_pending_metrics = []
36# Name of metrics table to be written out
37_pending_metrics_tblname = None
38# Global BigCString shared by all structures.
39_bcs = None
40# Map from the name of a metric group to a description of the group.
41_metricgroups = {}
42# Order specific JsonEvent attributes will be visited.
43_json_event_attributes = [
44    # cmp_sevent related attributes.
45    'name', 'topic', 'desc',
46    # Seems useful, put it early.
47    'event',
48    # Short things in alphabetical order.
49    'compat', 'deprecated', 'perpkg', 'unit',
50    # Retirement latency specific to Intel granite rapids currently.
51    'retirement_latency_mean', 'retirement_latency_min',
52    'retirement_latency_max',
53    # Longer things (the last won't be iterated over during decompress).
54    'long_desc'
55]
56
57# Attributes that are in pmu_metric rather than pmu_event.
58_json_metric_attributes = [
59    'metric_name', 'metric_group', 'metric_expr', 'metric_threshold',
60    'desc', 'long_desc', 'unit', 'compat', 'metricgroup_no_group',
61    'default_metricgroup_name', 'aggr_mode', 'event_grouping'
62]
63# Attributes that are bools or enum int values, encoded as '0', '1',...
64_json_enum_attributes = ['aggr_mode', 'deprecated', 'event_grouping', 'perpkg']
65
66def removesuffix(s: str, suffix: str) -> str:
67  """Remove the suffix from a string
68
69  The removesuffix function is added to str in Python 3.9. We aim for 3.6
70  compatibility and so provide our own function here.
71  """
72  return s[0:-len(suffix)] if s.endswith(suffix) else s
73
74
75def file_name_to_table_name(prefix: str, parents: Sequence[str],
76                            dirname: str) -> str:
77  """Generate a C table name from directory names."""
78  tblname = prefix
79  for p in parents:
80    tblname += '_' + p
81  tblname += '_' + dirname
82  return tblname.replace('-', '_')
83
84
85def c_len(s: str) -> int:
86  """Return the length of s a C string
87
88  This doesn't handle all escape characters properly. It first assumes
89  all \\ are for escaping, it then adjusts as it will have over counted
90  \\. The code uses \000 rather than \0 as a terminator as an adjacent
91  number would be folded into a string of \0 (ie. "\0" + "5" doesn't
92  equal a terminator followed by the number 5 but the escape of
93  \05). The code adjusts for \000 but not properly for all octal, hex
94  or unicode values.
95  """
96  try:
97    utf = s.encode(encoding='utf-8',errors='strict')
98  except:
99    print(f'broken string {s}')
100    raise
101  return len(utf) - utf.count(b'\\') + utf.count(b'\\\\') - (utf.count(b'\\000') * 2)
102
103class BigCString:
104  """A class to hold many strings concatenated together.
105
106  Generating a large number of stand-alone C strings creates a large
107  number of relocations in position independent code. The BigCString
108  is a helper for this case. It builds a single string which within it
109  are all the other C strings (to avoid memory issues the string
110  itself is held as a list of strings). The offsets within the big
111  string are recorded and when stored to disk these don't need
112  relocation. To reduce the size of the string further, identical
113  strings are merged. If a longer string ends-with the same value as a
114  shorter string, these entries are also merged.
115  """
116  strings: Set[str]
117  big_string: Sequence[str]
118  offsets: Dict[str, int]
119  insert_number: int
120  insert_point: Dict[str, int]
121  metrics: Set[str]
122
123  def __init__(self):
124    self.strings = set()
125    self.insert_number = 0;
126    self.insert_point = {}
127    self.metrics = set()
128
129  def add(self, s: str, metric: bool) -> None:
130    """Called to add to the big string."""
131    if s not in self.strings:
132      self.strings.add(s)
133      self.insert_point[s] = self.insert_number
134      self.insert_number += 1
135      if metric:
136        self.metrics.add(s)
137
138  def compute(self) -> None:
139    """Called once all strings are added to compute the string and offsets."""
140
141    folded_strings = {}
142    # Determine if two strings can be folded, ie. let 1 string use the
143    # end of another. First reverse all strings and sort them.
144    sorted_reversed_strings = sorted([x[::-1] for x in self.strings])
145
146    # Strings 'xyz' and 'yz' will now be [ 'zy', 'zyx' ]. Scan forward
147    # for each string to see if there is a better candidate to fold it
148    # into, in the example rather than using 'yz' we can use'xyz' at
149    # an offset of 1. We record which string can be folded into which
150    # in folded_strings, we don't need to record the offset as it is
151    # trivially computed from the string lengths.
152    for pos,s in enumerate(sorted_reversed_strings):
153      best_pos = pos
154      for check_pos in range(pos + 1, len(sorted_reversed_strings)):
155        if sorted_reversed_strings[check_pos].startswith(s):
156          best_pos = check_pos
157        else:
158          break
159      if pos != best_pos:
160        folded_strings[s[::-1]] = sorted_reversed_strings[best_pos][::-1]
161
162    # Compute reverse mappings for debugging.
163    fold_into_strings = collections.defaultdict(set)
164    for key, val in folded_strings.items():
165      if key != val:
166        fold_into_strings[val].add(key)
167
168    # big_string_offset is the current location within the C string
169    # being appended to - comments, etc. don't count. big_string is
170    # the string contents represented as a list. Strings are immutable
171    # in Python and so appending to one causes memory issues, while
172    # lists are mutable.
173    big_string_offset = 0
174    self.big_string = []
175    self.offsets = {}
176
177    def string_cmp_key(s: str) -> Tuple[bool, int, str]:
178      return (s in self.metrics, self.insert_point[s], s)
179
180    # Emit all strings that aren't folded in a sorted manner.
181    for s in sorted(self.strings, key=string_cmp_key):
182      if s not in folded_strings:
183        self.offsets[s] = big_string_offset
184        self.big_string.append(f'/* offset={big_string_offset} */ "')
185        self.big_string.append(s)
186        self.big_string.append('"')
187        if s in fold_into_strings:
188          self.big_string.append(' /* also: ' + ', '.join(fold_into_strings[s]) + ' */')
189        self.big_string.append('\n')
190        big_string_offset += c_len(s)
191        continue
192
193    # Compute the offsets of the folded strings.
194    for s in folded_strings.keys():
195      assert s not in self.offsets
196      folded_s = folded_strings[s]
197      self.offsets[s] = self.offsets[folded_s] + c_len(folded_s) - c_len(s)
198
199_bcs = BigCString()
200
201class JsonEvent:
202  """Representation of an event loaded from a json file dictionary."""
203
204  def __init__(self, jd: dict):
205    """Constructor passed the dictionary of parsed json values."""
206
207    def llx(x: int) -> str:
208      """Convert an int to a string similar to a printf modifier of %#llx."""
209      return str(x) if x >= 0 and x < 10 else hex(x)
210
211    def fixdesc(s: str) -> str:
212      """Fix formatting issue for the desc string."""
213      if s is None:
214        return None
215      return removesuffix(removesuffix(removesuffix(s, '.  '),
216                                       '. '), '.').replace('\n', '\\n').replace(
217                                           '\"', '\\"').replace('\r', '\\r')
218
219    def convert_aggr_mode(aggr_mode: str) -> Optional[str]:
220      """Returns the aggr_mode_class enum value associated with the JSON string."""
221      if not aggr_mode:
222        return None
223      aggr_mode_to_enum = {
224          'PerChip': '1',
225          'PerCore': '2',
226      }
227      return aggr_mode_to_enum[aggr_mode]
228
229    def convert_metric_constraint(metric_constraint: str) -> Optional[str]:
230      """Returns the metric_event_groups enum value associated with the JSON string."""
231      if not metric_constraint:
232        return None
233      metric_constraint_to_enum = {
234          'NO_GROUP_EVENTS': '1',
235          'NO_GROUP_EVENTS_NMI': '2',
236          'NO_NMI_WATCHDOG': '2',
237          'NO_GROUP_EVENTS_SMT': '3',
238      }
239      return metric_constraint_to_enum[metric_constraint]
240
241    def lookup_msr(num: str) -> Optional[str]:
242      """Converts the msr number, or first in a list to the appropriate event field."""
243      if not num:
244        return None
245      msrmap = {
246          0x3F6: 'ldlat=',
247          0x1A6: 'offcore_rsp=',
248          0x1A7: 'offcore_rsp=',
249          0x3F7: 'frontend=',
250      }
251      return msrmap[int(num.split(',', 1)[0], 0)]
252
253    def real_event(name: str, event: str) -> Optional[str]:
254      """Convert well known event names to an event string otherwise use the event argument."""
255      fixed = {
256          'inst_retired.any': 'event=0xc0,period=2000003',
257          'inst_retired.any_p': 'event=0xc0,period=2000003',
258          'cpu_clk_unhalted.ref': 'event=0x0,umask=0x03,period=2000003',
259          'cpu_clk_unhalted.thread': 'event=0x3c,period=2000003',
260          'cpu_clk_unhalted.core': 'event=0x3c,period=2000003',
261          'cpu_clk_unhalted.thread_any': 'event=0x3c,any=1,period=2000003',
262      }
263      if not name:
264        return None
265      if name.lower() in fixed:
266        return fixed[name.lower()]
267      return event
268
269    def unit_to_pmu(unit: str) -> Optional[str]:
270      """Convert a JSON Unit to Linux PMU name."""
271      if not unit:
272        return 'default_core'
273      # Comment brought over from jevents.c:
274      # it's not realistic to keep adding these, we need something more scalable ...
275      table = {
276          'CBO': 'uncore_cbox',
277          'QPI LL': 'uncore_qpi',
278          'SBO': 'uncore_sbox',
279          'iMPH-U': 'uncore_arb',
280          'CPU-M-CF': 'cpum_cf',
281          'CPU-M-SF': 'cpum_sf',
282          'PAI-CRYPTO' : 'pai_crypto',
283          'PAI-EXT' : 'pai_ext',
284          'UPI LL': 'uncore_upi',
285          'hisi_sicl,cpa': 'hisi_sicl,cpa',
286          'hisi_sccl,ddrc': 'hisi_sccl,ddrc',
287          'hisi_sccl,hha': 'hisi_sccl,hha',
288          'hisi_sccl,l3c': 'hisi_sccl,l3c',
289          'imx8_ddr': 'imx8_ddr',
290          'imx9_ddr': 'imx9_ddr',
291          'L3PMC': 'amd_l3',
292          'DFPMC': 'amd_df',
293          'UMCPMC': 'amd_umc',
294          'cpu_core': 'cpu_core',
295          'cpu_atom': 'cpu_atom',
296          'ali_drw': 'ali_drw',
297          'arm_cmn': 'arm_cmn',
298          'tool': 'tool',
299      }
300      return table[unit] if unit in table else f'uncore_{unit.lower()}'
301
302    def is_zero(val: str) -> bool:
303        try:
304            if val.startswith('0x'):
305                return int(val, 16) == 0
306            else:
307                return int(val) == 0
308        except e:
309            return False
310
311    def canonicalize_value(val: str) -> str:
312        try:
313            if val.startswith('0x'):
314                return llx(int(val, 16))
315            return str(int(val))
316        except e:
317            return val
318
319    eventcode = 0
320    if 'EventCode' in jd:
321      eventcode = int(jd['EventCode'].split(',', 1)[0], 0)
322    if 'ExtSel' in jd:
323      eventcode |= int(jd['ExtSel']) << 8
324    configcode = int(jd['ConfigCode'], 0) if 'ConfigCode' in jd else None
325    eventidcode = int(jd['EventidCode'], 0) if 'EventidCode' in jd else None
326    self.name = jd['EventName'].lower() if 'EventName' in jd else None
327    self.topic = ''
328    self.compat = jd.get('Compat')
329    self.desc = fixdesc(jd.get('BriefDescription'))
330    self.long_desc = fixdesc(jd.get('PublicDescription'))
331    precise = jd.get('PEBS')
332    msr = lookup_msr(jd.get('MSRIndex'))
333    msrval = jd.get('MSRValue')
334    extra_desc = ''
335    if 'Data_LA' in jd:
336      extra_desc += '  Supports address when precise'
337      if 'Errata' in jd:
338        extra_desc += '.'
339    if 'Errata' in jd:
340      extra_desc += '  Spec update: ' + jd['Errata']
341    self.pmu = unit_to_pmu(jd.get('Unit'))
342    filter = jd.get('Filter')
343    self.unit = jd.get('ScaleUnit')
344    self.perpkg = jd.get('PerPkg')
345    self.aggr_mode = convert_aggr_mode(jd.get('AggregationMode'))
346    self.deprecated = jd.get('Deprecated')
347    self.retirement_latency_mean = jd.get('RetirementLatencyMean')
348    self.retirement_latency_min = jd.get('RetirementLatencyMin')
349    self.retirement_latency_max = jd.get('RetirementLatencyMax')
350    self.metric_name = jd.get('MetricName')
351    self.metric_group = jd.get('MetricGroup')
352    self.metricgroup_no_group = jd.get('MetricgroupNoGroup')
353    self.default_metricgroup_name = jd.get('DefaultMetricgroupName')
354    self.event_grouping = convert_metric_constraint(jd.get('MetricConstraint'))
355    self.metric_expr = None
356    if 'MetricExpr' in jd:
357      self.metric_expr = metric.ParsePerfJson(jd['MetricExpr']).Simplify()
358    # Note, the metric formula for the threshold isn't parsed as the &
359    # and > have incorrect precedence.
360    self.metric_threshold = jd.get('MetricThreshold')
361
362    arch_std = jd.get('ArchStdEvent')
363    if precise and self.desc and '(Precise Event)' not in self.desc:
364      extra_desc += ' (Must be precise)' if precise == '2' else (' (Precise '
365                                                                 'event)')
366    event = None
367    if configcode is not None:
368      event = f'config={llx(configcode)}'
369    elif eventidcode is not None:
370      event = f'eventid={llx(eventidcode)}'
371    else:
372      event = f'event={llx(eventcode)}'
373    event_fields = [
374        ('AnyThread', 'any='),
375        ('PortMask', 'ch_mask='),
376        ('CounterMask', 'cmask='),
377        ('EdgeDetect', 'edge='),
378        ('FCMask', 'fc_mask='),
379        ('Invert', 'inv='),
380        ('SampleAfterValue', 'period='),
381        ('UMask', 'umask='),
382        ('NodeType', 'type='),
383        ('RdWrMask', 'rdwrmask='),
384        ('EnAllCores', 'enallcores='),
385        ('EnAllSlices', 'enallslices='),
386        ('SliceId', 'sliceid='),
387        ('ThreadMask', 'threadmask='),
388    ]
389    for key, value in event_fields:
390      if key in jd and not is_zero(jd[key]):
391        event += f',{value}{canonicalize_value(jd[key])}'
392    if filter:
393      event += f',{filter}'
394    if msr:
395      event += f',{msr}{msrval}'
396    if self.desc and extra_desc:
397      self.desc += extra_desc
398    if self.long_desc and extra_desc:
399      self.long_desc += extra_desc
400    if arch_std:
401      if arch_std.lower() in _arch_std_events:
402        event = _arch_std_events[arch_std.lower()].event
403        # Copy from the architecture standard event to self for undefined fields.
404        for attr, value in _arch_std_events[arch_std.lower()].__dict__.items():
405          if hasattr(self, attr) and not getattr(self, attr):
406            setattr(self, attr, value)
407      else:
408        raise argparse.ArgumentTypeError('Cannot find arch std event:', arch_std)
409
410    self.event = real_event(self.name, event)
411
412  def __repr__(self) -> str:
413    """String representation primarily for debugging."""
414    s = '{\n'
415    for attr, value in self.__dict__.items():
416      if value:
417        s += f'\t{attr} = {value},\n'
418    return s + '}'
419
420  def build_c_string(self, metric: bool) -> str:
421    s = ''
422    for attr in _json_metric_attributes if metric else _json_event_attributes:
423      x = getattr(self, attr)
424      if metric and x and attr == 'metric_expr':
425        # Convert parsed metric expressions into a string. Slashes
426        # must be doubled in the file.
427        x = x.ToPerfJson().replace('\\', '\\\\')
428      if metric and x and attr == 'metric_threshold':
429        x = x.replace('\\', '\\\\')
430      if attr in _json_enum_attributes:
431        s += x if x else '0'
432      else:
433        s += f'{x}\\000' if x else '\\000'
434    return s
435
436  def to_c_string(self, metric: bool) -> str:
437    """Representation of the event as a C struct initializer."""
438
439    def fix_comment(s: str) -> str:
440        return s.replace('*/', r'\*\/')
441
442    s = self.build_c_string(metric)
443    return f'{{ { _bcs.offsets[s] } }}, /* {fix_comment(s)} */\n'
444
445
446@lru_cache(maxsize=None)
447def read_json_events(path: str, topic: str) -> Sequence[JsonEvent]:
448  """Read json events from the specified file."""
449  try:
450    events = json.load(open(path), object_hook=JsonEvent)
451  except BaseException as err:
452    print(f"Exception processing {path}")
453    raise
454  metrics: list[Tuple[str, str, metric.Expression]] = []
455  for event in events:
456    event.topic = topic
457    if event.metric_name and '-' not in event.metric_name:
458      metrics.append((event.pmu, event.metric_name, event.metric_expr))
459  updates = metric.RewriteMetricsInTermsOfOthers(metrics)
460  if updates:
461    for event in events:
462      if event.metric_name in updates:
463        # print(f'Updated {event.metric_name} from\n"{event.metric_expr}"\n'
464        #       f'to\n"{updates[event.metric_name]}"')
465        event.metric_expr = updates[event.metric_name]
466
467  return events
468
469def preprocess_arch_std_files(archpath: str) -> None:
470  """Read in all architecture standard events."""
471  global _arch_std_events
472  for item in os.scandir(archpath):
473    if not item.is_file() or not item.name.endswith('.json'):
474      continue
475    try:
476      for event in read_json_events(item.path, topic=''):
477        if event.name:
478          _arch_std_events[event.name.lower()] = event
479        if event.metric_name:
480          _arch_std_events[event.metric_name.lower()] = event
481    except Exception as e:
482        raise RuntimeError(f'Failure processing \'{item.name}\' in \'{archpath}\'') from e
483
484
485def add_events_table_entries(item: os.DirEntry, topic: str) -> None:
486  """Add contents of file to _pending_events table."""
487  for e in read_json_events(item.path, topic):
488    if e.name:
489      _pending_events.append(e)
490    if e.metric_name:
491      _pending_metrics.append(e)
492
493
494def print_pending_events() -> None:
495  """Optionally close events table."""
496
497  def event_cmp_key(j: JsonEvent) -> Tuple[str, str, bool, str, str]:
498    def fix_none(s: Optional[str]) -> str:
499      if s is None:
500        return ''
501      return s
502
503    return (fix_none(j.pmu).replace(',','_'), fix_none(j.name), j.desc is not None, fix_none(j.topic),
504            fix_none(j.metric_name))
505
506  global _pending_events
507  if not _pending_events:
508    return
509
510  global _pending_events_tblname
511  if _pending_events_tblname.endswith('_sys'):
512    global _sys_event_tables
513    _sys_event_tables.append(_pending_events_tblname)
514  else:
515    global event_tables
516    _event_tables.append(_pending_events_tblname)
517
518  first = True
519  last_pmu = None
520  last_name = None
521  pmus = set()
522  for event in sorted(_pending_events, key=event_cmp_key):
523    if last_pmu and last_pmu == event.pmu:
524      assert event.name != last_name, f"Duplicate event: {last_pmu}/{last_name}/ in {_pending_events_tblname}"
525    if event.pmu != last_pmu:
526      if not first:
527        _args.output_file.write('};\n')
528      pmu_name = event.pmu.replace(',', '_')
529      _args.output_file.write(
530          f'static const struct compact_pmu_event {_pending_events_tblname}_{pmu_name}[] = {{\n')
531      first = False
532      last_pmu = event.pmu
533      pmus.add((event.pmu, pmu_name))
534
535    _args.output_file.write(event.to_c_string(metric=False))
536    last_name = event.name
537  _pending_events = []
538
539  _args.output_file.write(f"""
540}};
541
542const struct pmu_table_entry {_pending_events_tblname}[] = {{
543""")
544  for (pmu, tbl_pmu) in sorted(pmus):
545    pmu_name = f"{pmu}\\000"
546    _args.output_file.write(f"""{{
547     .entries = {_pending_events_tblname}_{tbl_pmu},
548     .num_entries = ARRAY_SIZE({_pending_events_tblname}_{tbl_pmu}),
549     .pmu_name = {{ {_bcs.offsets[pmu_name]} /* {pmu_name} */ }},
550}},
551""")
552  _args.output_file.write('};\n\n')
553
554def print_pending_metrics() -> None:
555  """Optionally close metrics table."""
556
557  def metric_cmp_key(j: JsonEvent) -> Tuple[bool, str, str]:
558    def fix_none(s: Optional[str]) -> str:
559      if s is None:
560        return ''
561      return s
562
563    return (j.desc is not None, fix_none(j.pmu), fix_none(j.metric_name))
564
565  global _pending_metrics
566  if not _pending_metrics:
567    return
568
569  global _pending_metrics_tblname
570  if _pending_metrics_tblname.endswith('_sys'):
571    global _sys_metric_tables
572    _sys_metric_tables.append(_pending_metrics_tblname)
573  else:
574    global metric_tables
575    _metric_tables.append(_pending_metrics_tblname)
576
577  first = True
578  last_pmu = None
579  pmus = set()
580  for metric in sorted(_pending_metrics, key=metric_cmp_key):
581    if metric.pmu != last_pmu:
582      if not first:
583        _args.output_file.write('};\n')
584      pmu_name = metric.pmu.replace(',', '_')
585      _args.output_file.write(
586          f'static const struct compact_pmu_event {_pending_metrics_tblname}_{pmu_name}[] = {{\n')
587      first = False
588      last_pmu = metric.pmu
589      pmus.add((metric.pmu, pmu_name))
590
591    _args.output_file.write(metric.to_c_string(metric=True))
592  _pending_metrics = []
593
594  _args.output_file.write(f"""
595}};
596
597const struct pmu_table_entry {_pending_metrics_tblname}[] = {{
598""")
599  for (pmu, tbl_pmu) in sorted(pmus):
600    pmu_name = f"{pmu}\\000"
601    _args.output_file.write(f"""{{
602     .entries = {_pending_metrics_tblname}_{tbl_pmu},
603     .num_entries = ARRAY_SIZE({_pending_metrics_tblname}_{tbl_pmu}),
604     .pmu_name = {{ {_bcs.offsets[pmu_name]} /* {pmu_name} */ }},
605}},
606""")
607  _args.output_file.write('};\n\n')
608
609def get_topic(topic: str) -> str:
610  if topic.endswith('metrics.json'):
611    return 'metrics'
612  return removesuffix(topic, '.json').replace('-', ' ')
613
614def preprocess_one_file(parents: Sequence[str], item: os.DirEntry) -> None:
615
616  if item.is_dir():
617    return
618
619  # base dir or too deep
620  level = len(parents)
621  if level == 0 or level > 4:
622    return
623
624  # Ignore other directories. If the file name does not have a .json
625  # extension, ignore it. It could be a readme.txt for instance.
626  if not item.is_file() or not item.name.endswith('.json'):
627    return
628
629  if item.name == 'metricgroups.json':
630    metricgroup_descriptions = json.load(open(item.path))
631    for mgroup in metricgroup_descriptions:
632      assert len(mgroup) > 1, parents
633      description = f"{metricgroup_descriptions[mgroup]}\\000"
634      mgroup = f"{mgroup}\\000"
635      _bcs.add(mgroup, metric=True)
636      _bcs.add(description, metric=True)
637      _metricgroups[mgroup] = description
638    return
639
640  topic = get_topic(item.name)
641  for event in read_json_events(item.path, topic):
642    pmu_name = f"{event.pmu}\\000"
643    if event.name:
644      _bcs.add(pmu_name, metric=False)
645      _bcs.add(event.build_c_string(metric=False), metric=False)
646    if event.metric_name:
647      _bcs.add(pmu_name, metric=True)
648      _bcs.add(event.build_c_string(metric=True), metric=True)
649
650def process_one_file(parents: Sequence[str], item: os.DirEntry) -> None:
651  """Process a JSON file during the main walk."""
652  def is_leaf_dir_ignoring_sys(path: str) -> bool:
653    for item in os.scandir(path):
654      if item.is_dir() and item.name != 'sys':
655        return False
656    return True
657
658  # Model directories are leaves (ignoring possible sys
659  # directories). The FTW will walk into the directory next. Flush
660  # pending events and metrics and update the table names for the new
661  # model directory.
662  if item.is_dir() and is_leaf_dir_ignoring_sys(item.path):
663    print_pending_events()
664    print_pending_metrics()
665
666    global _pending_events_tblname
667    _pending_events_tblname = file_name_to_table_name('pmu_events_', parents, item.name)
668    global _pending_metrics_tblname
669    _pending_metrics_tblname = file_name_to_table_name('pmu_metrics_', parents, item.name)
670
671    if item.name == 'sys':
672      _sys_event_table_to_metric_table_mapping[_pending_events_tblname] = _pending_metrics_tblname
673    return
674
675  # base dir or too deep
676  level = len(parents)
677  if level == 0 or level > 4:
678    return
679
680  # Ignore other directories. If the file name does not have a .json
681  # extension, ignore it. It could be a readme.txt for instance.
682  if not item.is_file() or not item.name.endswith('.json') or item.name == 'metricgroups.json':
683    return
684
685  add_events_table_entries(item, get_topic(item.name))
686
687
688def print_mapping_table(archs: Sequence[str]) -> None:
689  """Read the mapfile and generate the struct from cpuid string to event table."""
690  _args.output_file.write("""
691/* Struct used to make the PMU event table implementation opaque to callers. */
692struct pmu_events_table {
693        const struct pmu_table_entry *pmus;
694        uint32_t num_pmus;
695};
696
697/* Struct used to make the PMU metric table implementation opaque to callers. */
698struct pmu_metrics_table {
699        const struct pmu_table_entry *pmus;
700        uint32_t num_pmus;
701};
702
703/*
704 * Map a CPU to its table of PMU events. The CPU is identified by the
705 * cpuid field, which is an arch-specific identifier for the CPU.
706 * The identifier specified in tools/perf/pmu-events/arch/xxx/mapfile
707 * must match the get_cpuid_str() in tools/perf/arch/xxx/util/header.c)
708 *
709 * The  cpuid can contain any character other than the comma.
710 */
711struct pmu_events_map {
712        const char *arch;
713        const char *cpuid;
714        struct pmu_events_table event_table;
715        struct pmu_metrics_table metric_table;
716};
717
718/*
719 * Global table mapping each known CPU for the architecture to its
720 * table of PMU events.
721 */
722const struct pmu_events_map pmu_events_map[] = {
723""")
724  for arch in archs:
725    if arch == 'test':
726      _args.output_file.write("""{
727\t.arch = "testarch",
728\t.cpuid = "testcpu",
729\t.event_table = {
730\t\t.pmus = pmu_events__test_soc_cpu,
731\t\t.num_pmus = ARRAY_SIZE(pmu_events__test_soc_cpu),
732\t},
733\t.metric_table = {
734\t\t.pmus = pmu_metrics__test_soc_cpu,
735\t\t.num_pmus = ARRAY_SIZE(pmu_metrics__test_soc_cpu),
736\t}
737},
738""")
739    elif arch == 'common':
740      _args.output_file.write("""{
741\t.arch = "common",
742\t.cpuid = "common",
743\t.event_table = {
744\t\t.pmus = pmu_events__common,
745\t\t.num_pmus = ARRAY_SIZE(pmu_events__common),
746\t},
747\t.metric_table = {},
748},
749""")
750    else:
751      with open(f'{_args.starting_dir}/{arch}/mapfile.csv') as csvfile:
752        table = csv.reader(csvfile)
753        first = True
754        for row in table:
755          # Skip the first row or any row beginning with #.
756          if not first and len(row) > 0 and not row[0].startswith('#'):
757            event_tblname = file_name_to_table_name('pmu_events_', [], row[2].replace('/', '_'))
758            if event_tblname in _event_tables:
759              event_size = f'ARRAY_SIZE({event_tblname})'
760            else:
761              event_tblname = 'NULL'
762              event_size = '0'
763            metric_tblname = file_name_to_table_name('pmu_metrics_', [], row[2].replace('/', '_'))
764            if metric_tblname in _metric_tables:
765              metric_size = f'ARRAY_SIZE({metric_tblname})'
766            else:
767              metric_tblname = 'NULL'
768              metric_size = '0'
769            if event_size == '0' and metric_size == '0':
770              continue
771            cpuid = row[0].replace('\\', '\\\\')
772            _args.output_file.write(f"""{{
773\t.arch = "{arch}",
774\t.cpuid = "{cpuid}",
775\t.event_table = {{
776\t\t.pmus = {event_tblname},
777\t\t.num_pmus = {event_size}
778\t}},
779\t.metric_table = {{
780\t\t.pmus = {metric_tblname},
781\t\t.num_pmus = {metric_size}
782\t}}
783}},
784""")
785          first = False
786
787  _args.output_file.write("""{
788\t.arch = 0,
789\t.cpuid = 0,
790\t.event_table = { 0, 0 },
791\t.metric_table = { 0, 0 },
792}
793};
794""")
795
796
797def print_system_mapping_table() -> None:
798  """C struct mapping table array for tables from /sys directories."""
799  _args.output_file.write("""
800struct pmu_sys_events {
801\tconst char *name;
802\tstruct pmu_events_table event_table;
803\tstruct pmu_metrics_table metric_table;
804};
805
806static const struct pmu_sys_events pmu_sys_event_tables[] = {
807""")
808  printed_metric_tables = []
809  for tblname in _sys_event_tables:
810    _args.output_file.write(f"""\t{{
811\t\t.event_table = {{
812\t\t\t.pmus = {tblname},
813\t\t\t.num_pmus = ARRAY_SIZE({tblname})
814\t\t}},""")
815    metric_tblname = _sys_event_table_to_metric_table_mapping[tblname]
816    if metric_tblname in _sys_metric_tables:
817      _args.output_file.write(f"""
818\t\t.metric_table = {{
819\t\t\t.pmus = {metric_tblname},
820\t\t\t.num_pmus = ARRAY_SIZE({metric_tblname})
821\t\t}},""")
822      printed_metric_tables.append(metric_tblname)
823    _args.output_file.write(f"""
824\t\t.name = \"{tblname}\",
825\t}},
826""")
827  for tblname in _sys_metric_tables:
828    if tblname in printed_metric_tables:
829      continue
830    _args.output_file.write(f"""\t{{
831\t\t.metric_table = {{
832\t\t\t.pmus = {tblname},
833\t\t\t.num_pmus = ARRAY_SIZE({tblname})
834\t\t}},
835\t\t.name = \"{tblname}\",
836\t}},
837""")
838  _args.output_file.write("""\t{
839\t\t.event_table = { 0, 0 },
840\t\t.metric_table = { 0, 0 },
841\t},
842};
843
844static void decompress_event(int offset, struct pmu_event *pe)
845{
846\tconst char *p = &big_c_string[offset];
847""")
848  for attr in _json_event_attributes:
849    _args.output_file.write(f'\n\tpe->{attr} = ')
850    if attr in _json_enum_attributes:
851      _args.output_file.write("*p - '0';\n")
852    else:
853      _args.output_file.write("(*p == '\\0' ? NULL : p);\n")
854    if attr == _json_event_attributes[-1]:
855      continue
856    if attr in _json_enum_attributes:
857      _args.output_file.write('\tp++;')
858    else:
859      _args.output_file.write('\twhile (*p++);')
860  _args.output_file.write("""}
861
862static void decompress_metric(int offset, struct pmu_metric *pm)
863{
864\tconst char *p = &big_c_string[offset];
865""")
866  for attr in _json_metric_attributes:
867    _args.output_file.write(f'\n\tpm->{attr} = ')
868    if attr in _json_enum_attributes:
869      _args.output_file.write("*p - '0';\n")
870    else:
871      _args.output_file.write("(*p == '\\0' ? NULL : p);\n")
872    if attr == _json_metric_attributes[-1]:
873      continue
874    if attr in _json_enum_attributes:
875      _args.output_file.write('\tp++;')
876    else:
877      _args.output_file.write('\twhile (*p++);')
878  _args.output_file.write("""}
879
880static int pmu_events_table__for_each_event_pmu(const struct pmu_events_table *table,
881                                                const struct pmu_table_entry *pmu,
882                                                pmu_event_iter_fn fn,
883                                                void *data)
884{
885        int ret;
886        struct pmu_event pe = {
887                .pmu = &big_c_string[pmu->pmu_name.offset],
888        };
889
890        for (uint32_t i = 0; i < pmu->num_entries; i++) {
891                decompress_event(pmu->entries[i].offset, &pe);
892                if (!pe.name)
893                        continue;
894                ret = fn(&pe, table, data);
895                if (ret)
896                        return ret;
897        }
898        return 0;
899 }
900
901static int pmu_events_table__find_event_pmu(const struct pmu_events_table *table,
902                                            const struct pmu_table_entry *pmu,
903                                            const char *name,
904                                            pmu_event_iter_fn fn,
905                                            void *data)
906{
907        struct pmu_event pe = {
908                .pmu = &big_c_string[pmu->pmu_name.offset],
909        };
910        int low = 0, high = pmu->num_entries - 1;
911
912        while (low <= high) {
913                int cmp, mid = (low + high) / 2;
914
915                decompress_event(pmu->entries[mid].offset, &pe);
916
917                if (!pe.name && !name)
918                        goto do_call;
919
920                if (!pe.name && name) {
921                        low = mid + 1;
922                        continue;
923                }
924                if (pe.name && !name) {
925                        high = mid - 1;
926                        continue;
927                }
928
929                cmp = strcasecmp(pe.name, name);
930                if (cmp < 0) {
931                        low = mid + 1;
932                        continue;
933                }
934                if (cmp > 0) {
935                        high = mid - 1;
936                        continue;
937                }
938  do_call:
939                return fn ? fn(&pe, table, data) : 0;
940        }
941        return PMU_EVENTS__NOT_FOUND;
942}
943
944int pmu_events_table__for_each_event(const struct pmu_events_table *table,
945                                    struct perf_pmu *pmu,
946                                    pmu_event_iter_fn fn,
947                                    void *data)
948{
949        for (size_t i = 0; i < table->num_pmus; i++) {
950                const struct pmu_table_entry *table_pmu = &table->pmus[i];
951                const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
952                int ret;
953
954                if (pmu && !perf_pmu__name_wildcard_match(pmu, pmu_name))
955                        continue;
956
957                ret = pmu_events_table__for_each_event_pmu(table, table_pmu, fn, data);
958                if (ret)
959                        return ret;
960        }
961        return 0;
962}
963
964int pmu_events_table__find_event(const struct pmu_events_table *table,
965                                 struct perf_pmu *pmu,
966                                 const char *name,
967                                 pmu_event_iter_fn fn,
968                                 void *data)
969{
970        for (size_t i = 0; i < table->num_pmus; i++) {
971                const struct pmu_table_entry *table_pmu = &table->pmus[i];
972                const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
973                int ret;
974
975                if (pmu && !perf_pmu__name_wildcard_match(pmu, pmu_name))
976                        continue;
977
978                ret = pmu_events_table__find_event_pmu(table, table_pmu, name, fn, data);
979                if (ret != PMU_EVENTS__NOT_FOUND)
980                        return ret;
981        }
982        return PMU_EVENTS__NOT_FOUND;
983}
984
985size_t pmu_events_table__num_events(const struct pmu_events_table *table,
986                                    struct perf_pmu *pmu)
987{
988        size_t count = 0;
989
990        for (size_t i = 0; i < table->num_pmus; i++) {
991                const struct pmu_table_entry *table_pmu = &table->pmus[i];
992                const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
993
994                if (perf_pmu__name_wildcard_match(pmu, pmu_name))
995                        count += table_pmu->num_entries;
996        }
997        return count;
998}
999
1000static int pmu_metrics_table__for_each_metric_pmu(const struct pmu_metrics_table *table,
1001                                                const struct pmu_table_entry *pmu,
1002                                                pmu_metric_iter_fn fn,
1003                                                void *data)
1004{
1005        int ret;
1006        struct pmu_metric pm = {
1007                .pmu = &big_c_string[pmu->pmu_name.offset],
1008        };
1009
1010        for (uint32_t i = 0; i < pmu->num_entries; i++) {
1011                decompress_metric(pmu->entries[i].offset, &pm);
1012                if (!pm.metric_expr)
1013                        continue;
1014                ret = fn(&pm, table, data);
1015                if (ret)
1016                        return ret;
1017        }
1018        return 0;
1019}
1020
1021static int pmu_metrics_table__find_metric_pmu(const struct pmu_metrics_table *table,
1022                                            const struct pmu_table_entry *pmu,
1023                                            const char *metric,
1024                                            pmu_metric_iter_fn fn,
1025                                            void *data)
1026{
1027        struct pmu_metric pm = {
1028                .pmu = &big_c_string[pmu->pmu_name.offset],
1029        };
1030        int low = 0, high = pmu->num_entries - 1;
1031
1032        while (low <= high) {
1033                int cmp, mid = (low + high) / 2;
1034
1035                decompress_metric(pmu->entries[mid].offset, &pm);
1036
1037                if (!pm.metric_name && !metric)
1038                        goto do_call;
1039
1040                if (!pm.metric_name && metric) {
1041                        low = mid + 1;
1042                        continue;
1043                }
1044                if (pm.metric_name && !metric) {
1045                        high = mid - 1;
1046                        continue;
1047                }
1048
1049                cmp = strcmp(pm.metric_name, metric);
1050                if (cmp < 0) {
1051                        low = mid + 1;
1052                        continue;
1053                }
1054                if (cmp > 0) {
1055                        high = mid - 1;
1056                        continue;
1057                }
1058  do_call:
1059                return fn ? fn(&pm, table, data) : 0;
1060        }
1061        return PMU_METRICS__NOT_FOUND;
1062}
1063
1064int pmu_metrics_table__for_each_metric(const struct pmu_metrics_table *table,
1065                                     pmu_metric_iter_fn fn,
1066                                     void *data)
1067{
1068        for (size_t i = 0; i < table->num_pmus; i++) {
1069                int ret = pmu_metrics_table__for_each_metric_pmu(table, &table->pmus[i],
1070                                                                 fn, data);
1071
1072                if (ret)
1073                        return ret;
1074        }
1075        return 0;
1076}
1077
1078int pmu_metrics_table__find_metric(const struct pmu_metrics_table *table,
1079                                 struct perf_pmu *pmu,
1080                                 const char *metric,
1081                                 pmu_metric_iter_fn fn,
1082                                 void *data)
1083{
1084        for (size_t i = 0; i < table->num_pmus; i++) {
1085                const struct pmu_table_entry *table_pmu = &table->pmus[i];
1086                const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
1087                int ret;
1088
1089                if (pmu && !perf_pmu__name_wildcard_match(pmu, pmu_name))
1090                        continue;
1091
1092                ret = pmu_metrics_table__find_metric_pmu(table, table_pmu, metric, fn, data);
1093                if (ret != PMU_METRICS__NOT_FOUND)
1094                        return ret;
1095        }
1096        return PMU_METRICS__NOT_FOUND;
1097}
1098
1099static const struct pmu_events_map *map_for_cpu(struct perf_cpu cpu)
1100{
1101        static struct {
1102                const struct pmu_events_map *map;
1103                struct perf_cpu cpu;
1104        } last_result;
1105        static struct {
1106                const struct pmu_events_map *map;
1107                char *cpuid;
1108        } last_map_search;
1109        static bool has_last_result, has_last_map_search;
1110        const struct pmu_events_map *map = NULL;
1111        char *cpuid = NULL;
1112        size_t i;
1113
1114        if (has_last_result && last_result.cpu.cpu == cpu.cpu)
1115                return last_result.map;
1116
1117        cpuid = get_cpuid_allow_env_override(cpu);
1118
1119        /*
1120         * On some platforms which uses cpus map, cpuid can be NULL for
1121         * PMUs other than CORE PMUs.
1122         */
1123        if (!cpuid)
1124                goto out_update_last_result;
1125
1126        if (has_last_map_search && !strcmp(last_map_search.cpuid, cpuid)) {
1127                map = last_map_search.map;
1128                free(cpuid);
1129        } else {
1130                i = 0;
1131                for (;;) {
1132                        map = &pmu_events_map[i++];
1133
1134                        if (!map->arch) {
1135                                map = NULL;
1136                                break;
1137                        }
1138
1139                        if (!strcmp_cpuid_str(map->cpuid, cpuid))
1140                                break;
1141               }
1142               free(last_map_search.cpuid);
1143               last_map_search.cpuid = cpuid;
1144               last_map_search.map = map;
1145               has_last_map_search = true;
1146        }
1147out_update_last_result:
1148        last_result.cpu = cpu;
1149        last_result.map = map;
1150        has_last_result = true;
1151        return map;
1152}
1153
1154static const struct pmu_events_map *map_for_pmu(struct perf_pmu *pmu)
1155{
1156        struct perf_cpu cpu = {-1};
1157
1158        if (pmu)
1159                cpu = perf_cpu_map__min(pmu->cpus);
1160        return map_for_cpu(cpu);
1161}
1162
1163const struct pmu_events_table *perf_pmu__find_events_table(struct perf_pmu *pmu)
1164{
1165        const struct pmu_events_map *map = map_for_pmu(pmu);
1166
1167        if (!map)
1168                return NULL;
1169
1170        if (!pmu)
1171                return &map->event_table;
1172
1173        for (size_t i = 0; i < map->event_table.num_pmus; i++) {
1174                const struct pmu_table_entry *table_pmu = &map->event_table.pmus[i];
1175                const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
1176
1177                if (perf_pmu__name_wildcard_match(pmu, pmu_name))
1178                         return &map->event_table;
1179        }
1180        return NULL;
1181}
1182
1183const struct pmu_metrics_table *pmu_metrics_table__find(void)
1184{
1185        struct perf_cpu cpu = {-1};
1186        const struct pmu_events_map *map = map_for_cpu(cpu);
1187
1188        return map ? &map->metric_table : NULL;
1189}
1190
1191const struct pmu_events_table *find_core_events_table(const char *arch, const char *cpuid)
1192{
1193        for (const struct pmu_events_map *tables = &pmu_events_map[0];
1194             tables->arch;
1195             tables++) {
1196                if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid))
1197                        return &tables->event_table;
1198        }
1199        return NULL;
1200}
1201
1202const struct pmu_metrics_table *find_core_metrics_table(const char *arch, const char *cpuid)
1203{
1204        for (const struct pmu_events_map *tables = &pmu_events_map[0];
1205             tables->arch;
1206             tables++) {
1207                if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid))
1208                        return &tables->metric_table;
1209        }
1210        return NULL;
1211}
1212
1213int pmu_for_each_core_event(pmu_event_iter_fn fn, void *data)
1214{
1215        for (const struct pmu_events_map *tables = &pmu_events_map[0];
1216             tables->arch;
1217             tables++) {
1218                int ret = pmu_events_table__for_each_event(&tables->event_table,
1219                                                           /*pmu=*/ NULL, fn, data);
1220
1221                if (ret)
1222                        return ret;
1223        }
1224        return 0;
1225}
1226
1227int pmu_for_each_core_metric(pmu_metric_iter_fn fn, void *data)
1228{
1229        for (const struct pmu_events_map *tables = &pmu_events_map[0];
1230             tables->arch;
1231             tables++) {
1232                int ret = pmu_metrics_table__for_each_metric(&tables->metric_table, fn, data);
1233
1234                if (ret)
1235                        return ret;
1236        }
1237        return 0;
1238}
1239
1240const struct pmu_events_table *find_sys_events_table(const char *name)
1241{
1242        for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
1243             tables->name;
1244             tables++) {
1245                if (!strcmp(tables->name, name))
1246                        return &tables->event_table;
1247        }
1248        return NULL;
1249}
1250
1251int pmu_for_each_sys_event(pmu_event_iter_fn fn, void *data)
1252{
1253        for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
1254             tables->name;
1255             tables++) {
1256                int ret = pmu_events_table__for_each_event(&tables->event_table,
1257                                                           /*pmu=*/ NULL, fn, data);
1258
1259                if (ret)
1260                        return ret;
1261        }
1262        return 0;
1263}
1264
1265int pmu_for_each_sys_metric(pmu_metric_iter_fn fn, void *data)
1266{
1267        for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
1268             tables->name;
1269             tables++) {
1270                int ret = pmu_metrics_table__for_each_metric(&tables->metric_table, fn, data);
1271
1272                if (ret)
1273                        return ret;
1274        }
1275        return 0;
1276}
1277""")
1278
1279def print_metricgroups() -> None:
1280  _args.output_file.write("""
1281static const int metricgroups[][2] = {
1282""")
1283  for mgroup in sorted(_metricgroups):
1284    description = _metricgroups[mgroup]
1285    _args.output_file.write(
1286        f'\t{{ {_bcs.offsets[mgroup]}, {_bcs.offsets[description]} }}, /* {mgroup} => {description} */\n'
1287    )
1288  _args.output_file.write("""
1289};
1290
1291const char *describe_metricgroup(const char *group)
1292{
1293        int low = 0, high = (int)ARRAY_SIZE(metricgroups) - 1;
1294
1295        while (low <= high) {
1296                int mid = (low + high) / 2;
1297                const char *mgroup = &big_c_string[metricgroups[mid][0]];
1298                int cmp = strcmp(mgroup, group);
1299
1300                if (cmp == 0) {
1301                        return &big_c_string[metricgroups[mid][1]];
1302                } else if (cmp < 0) {
1303                        low = mid + 1;
1304                } else {
1305                        high = mid - 1;
1306                }
1307        }
1308        return NULL;
1309}
1310""")
1311
1312def main() -> None:
1313  global _args
1314
1315  def dir_path(path: str) -> str:
1316    """Validate path is a directory for argparse."""
1317    if os.path.isdir(path):
1318      return path
1319    raise argparse.ArgumentTypeError(f'\'{path}\' is not a valid directory')
1320
1321  def ftw(path: str, parents: Sequence[str],
1322          action: Callable[[Sequence[str], os.DirEntry], None]) -> None:
1323    """Replicate the directory/file walking behavior of C's file tree walk."""
1324    for item in sorted(os.scandir(path), key=lambda e: e.name):
1325      if _args.model != 'all' and item.is_dir():
1326        # Check if the model matches one in _args.model.
1327        if len(parents) == _args.model.split(',')[0].count('/'):
1328          # We're testing the correct directory.
1329          item_path = '/'.join(parents) + ('/' if len(parents) > 0 else '') + item.name
1330          if 'test' not in item_path and 'common' not in item_path and item_path not in _args.model.split(','):
1331            continue
1332      try:
1333        action(parents, item)
1334      except Exception as e:
1335        raise RuntimeError(f'Action failure for \'{item.name}\' in {parents}') from e
1336      if item.is_dir():
1337        ftw(item.path, parents + [item.name], action)
1338
1339  ap = argparse.ArgumentParser()
1340  ap.add_argument('arch', help='Architecture name like x86')
1341  ap.add_argument('model', help='''Select a model such as skylake to
1342reduce the code size.  Normally set to "all". For architectures like
1343ARM64 with an implementor/model, the model must include the implementor
1344such as "arm/cortex-a34".''',
1345                  default='all')
1346  ap.add_argument(
1347      'starting_dir',
1348      type=dir_path,
1349      help='Root of tree containing architecture directories containing json files'
1350  )
1351  ap.add_argument(
1352      'output_file', type=argparse.FileType('w', encoding='utf-8'), nargs='?', default=sys.stdout)
1353  _args = ap.parse_args()
1354
1355  _args.output_file.write(f"""
1356/* SPDX-License-Identifier: GPL-2.0 */
1357/* THIS FILE WAS AUTOGENERATED BY jevents.py arch={_args.arch} model={_args.model} ! */
1358""")
1359  _args.output_file.write("""
1360#include <pmu-events/pmu-events.h>
1361#include "util/header.h"
1362#include "util/pmu.h"
1363#include <string.h>
1364#include <stddef.h>
1365
1366struct compact_pmu_event {
1367        int offset;
1368};
1369
1370struct pmu_table_entry {
1371        const struct compact_pmu_event *entries;
1372        uint32_t num_entries;
1373        struct compact_pmu_event pmu_name;
1374};
1375
1376""")
1377  archs = []
1378  for item in os.scandir(_args.starting_dir):
1379    if not item.is_dir():
1380      continue
1381    if item.name == _args.arch or _args.arch == 'all' or item.name == 'test' or item.name == 'common':
1382      archs.append(item.name)
1383
1384  if len(archs) < 2 and _args.arch != 'none':
1385    raise IOError(f'Missing architecture directory \'{_args.arch}\'')
1386
1387  archs.sort()
1388  for arch in archs:
1389    arch_path = f'{_args.starting_dir}/{arch}'
1390    preprocess_arch_std_files(arch_path)
1391    ftw(arch_path, [], preprocess_one_file)
1392
1393  _bcs.compute()
1394  _args.output_file.write('static const char *const big_c_string =\n')
1395  for s in _bcs.big_string:
1396    _args.output_file.write(s)
1397  _args.output_file.write(';\n\n')
1398  for arch in archs:
1399    arch_path = f'{_args.starting_dir}/{arch}'
1400    ftw(arch_path, [], process_one_file)
1401    print_pending_events()
1402    print_pending_metrics()
1403
1404  print_mapping_table(archs)
1405  print_system_mapping_table()
1406  print_metricgroups()
1407
1408if __name__ == '__main__':
1409  main()
1410