xref: /linux/tools/perf/pmu-events/jevents.py (revision 95d692f9aba7c13b5b3e8d842656c47bde7e551f)
1#!/usr/bin/env python3
2# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
3"""Convert directories of JSON events to C code."""
4import argparse
5import csv
6from functools import lru_cache
7import json
8import metric
9import os
10import sys
11from typing import (Callable, Dict, Optional, Sequence, Set, Tuple)
12import collections
13
14# Global command line arguments.
15_args = None
16# List of regular event tables.
17_event_tables = []
18# List of event tables generated from "/sys" directories.
19_sys_event_tables = []
20# List of regular metric tables.
21_metric_tables = []
22# List of metric tables generated from "/sys" directories.
23_sys_metric_tables = []
24# Mapping between sys event table names and sys metric table names.
25_sys_event_table_to_metric_table_mapping = {}
26# Map from an event name to an architecture standard
27# JsonEvent. Architecture standard events are in json files in the top
28# f'{_args.starting_dir}/{_args.arch}' directory.
29_arch_std_events = {}
30# Events to write out when the table is closed
31_pending_events = []
32# Name of events table to be written out
33_pending_events_tblname = None
34# Metrics to write out when the table is closed
35_pending_metrics = []
36# Name of metrics table to be written out
37_pending_metrics_tblname = None
38# Global BigCString shared by all structures.
39_bcs = None
40# Map from the name of a metric group to a description of the group.
41_metricgroups = {}
42# Order specific JsonEvent attributes will be visited.
43_json_event_attributes = [
44    # cmp_sevent related attributes.
45    'name', 'topic', 'desc',
46    # Seems useful, put it early.
47    'event',
48    # Short things in alphabetical order.
49    'compat', 'deprecated', 'perpkg', 'unit',
50    # Retirement latency specific to Intel granite rapids currently.
51    'retirement_latency_mean', 'retirement_latency_min',
52    'retirement_latency_max',
53    # Longer things (the last won't be iterated over during decompress).
54    'long_desc'
55]
56
57# Attributes that are in pmu_metric rather than pmu_event.
58_json_metric_attributes = [
59    'metric_name', 'metric_group', 'metric_expr', 'metric_threshold',
60    'desc', 'long_desc', 'unit', 'compat', 'metricgroup_no_group',
61    'default_metricgroup_name', 'aggr_mode', 'event_grouping'
62]
63# Attributes that are bools or enum int values, encoded as '0', '1',...
64_json_enum_attributes = ['aggr_mode', 'deprecated', 'event_grouping', 'perpkg']
65
66def removesuffix(s: str, suffix: str) -> str:
67  """Remove the suffix from a string
68
69  The removesuffix function is added to str in Python 3.9. We aim for 3.6
70  compatibility and so provide our own function here.
71  """
72  return s[0:-len(suffix)] if s.endswith(suffix) else s
73
74
75def file_name_to_table_name(prefix: str, parents: Sequence[str],
76                            dirname: str) -> str:
77  """Generate a C table name from directory names."""
78  tblname = prefix
79  for p in parents:
80    tblname += '_' + p
81  tblname += '_' + dirname
82  return tblname.replace('-', '_')
83
84
85def c_len(s: str) -> int:
86  """Return the length of s a C string
87
88  This doesn't handle all escape characters properly. It first assumes
89  all \\ are for escaping, it then adjusts as it will have over counted
90  \\. The code uses \000 rather than \0 as a terminator as an adjacent
91  number would be folded into a string of \0 (ie. "\0" + "5" doesn't
92  equal a terminator followed by the number 5 but the escape of
93  \05). The code adjusts for \000 but not properly for all octal, hex
94  or unicode values.
95  """
96  try:
97    utf = s.encode(encoding='utf-8',errors='strict')
98  except:
99    print(f'broken string {s}')
100    raise
101  return len(utf) - utf.count(b'\\') + utf.count(b'\\\\') - (utf.count(b'\\000') * 2)
102
103class BigCString:
104  """A class to hold many strings concatenated together.
105
106  Generating a large number of stand-alone C strings creates a large
107  number of relocations in position independent code. The BigCString
108  is a helper for this case. It builds a single string which within it
109  are all the other C strings (to avoid memory issues the string
110  itself is held as a list of strings). The offsets within the big
111  string are recorded and when stored to disk these don't need
112  relocation. To reduce the size of the string further, identical
113  strings are merged. If a longer string ends-with the same value as a
114  shorter string, these entries are also merged.
115  """
116  strings: Set[str]
117  big_string: Sequence[str]
118  offsets: Dict[str, int]
119  insert_number: int
120  insert_point: Dict[str, int]
121  metrics: Set[str]
122
123  def __init__(self):
124    self.strings = set()
125    self.insert_number = 0;
126    self.insert_point = {}
127    self.metrics = set()
128
129  def add(self, s: str, metric: bool) -> None:
130    """Called to add to the big string."""
131    if s not in self.strings:
132      self.strings.add(s)
133      self.insert_point[s] = self.insert_number
134      self.insert_number += 1
135      if metric:
136        self.metrics.add(s)
137
138  def compute(self) -> None:
139    """Called once all strings are added to compute the string and offsets."""
140
141    folded_strings = {}
142    # Determine if two strings can be folded, ie. let 1 string use the
143    # end of another. First reverse all strings and sort them.
144    sorted_reversed_strings = sorted([x[::-1] for x in self.strings])
145
146    # Strings 'xyz' and 'yz' will now be [ 'zy', 'zyx' ]. Scan forward
147    # for each string to see if there is a better candidate to fold it
148    # into, in the example rather than using 'yz' we can use'xyz' at
149    # an offset of 1. We record which string can be folded into which
150    # in folded_strings, we don't need to record the offset as it is
151    # trivially computed from the string lengths.
152    for pos,s in enumerate(sorted_reversed_strings):
153      best_pos = pos
154      for check_pos in range(pos + 1, len(sorted_reversed_strings)):
155        if sorted_reversed_strings[check_pos].startswith(s):
156          best_pos = check_pos
157        else:
158          break
159      if pos != best_pos:
160        folded_strings[s[::-1]] = sorted_reversed_strings[best_pos][::-1]
161
162    # Compute reverse mappings for debugging.
163    fold_into_strings = collections.defaultdict(set)
164    for key, val in folded_strings.items():
165      if key != val:
166        fold_into_strings[val].add(key)
167
168    # big_string_offset is the current location within the C string
169    # being appended to - comments, etc. don't count. big_string is
170    # the string contents represented as a list. Strings are immutable
171    # in Python and so appending to one causes memory issues, while
172    # lists are mutable.
173    big_string_offset = 0
174    self.big_string = []
175    self.offsets = {}
176
177    def string_cmp_key(s: str) -> Tuple[bool, int, str]:
178      return (s in self.metrics, self.insert_point[s], s)
179
180    # Emit all strings that aren't folded in a sorted manner.
181    for s in sorted(self.strings, key=string_cmp_key):
182      if s not in folded_strings:
183        self.offsets[s] = big_string_offset
184        self.big_string.append(f'/* offset={big_string_offset} */ "')
185        self.big_string.append(s)
186        self.big_string.append('"')
187        if s in fold_into_strings:
188          self.big_string.append(' /* also: ' + ', '.join(fold_into_strings[s]) + ' */')
189        self.big_string.append('\n')
190        big_string_offset += c_len(s)
191        continue
192
193    # Compute the offsets of the folded strings.
194    for s in folded_strings.keys():
195      assert s not in self.offsets
196      folded_s = folded_strings[s]
197      self.offsets[s] = self.offsets[folded_s] + c_len(folded_s) - c_len(s)
198
199_bcs = BigCString()
200
201class JsonEvent:
202  """Representation of an event loaded from a json file dictionary."""
203
204  def __init__(self, jd: dict):
205    """Constructor passed the dictionary of parsed json values."""
206
207    def llx(x: int) -> str:
208      """Convert an int to a string similar to a printf modifier of %#llx."""
209      return str(x) if x >= 0 and x < 10 else hex(x)
210
211    def fixdesc(s: str) -> str:
212      """Fix formatting issue for the desc string."""
213      if s is None:
214        return None
215      return removesuffix(removesuffix(removesuffix(s, '.  '),
216                                       '. '), '.').replace('\n', '\\n').replace(
217                                           '\"', '\\"').replace('\r', '\\r')
218
219    def convert_aggr_mode(aggr_mode: str) -> Optional[str]:
220      """Returns the aggr_mode_class enum value associated with the JSON string."""
221      if not aggr_mode:
222        return None
223      aggr_mode_to_enum = {
224          'PerChip': '1',
225          'PerCore': '2',
226      }
227      return aggr_mode_to_enum[aggr_mode]
228
229    def convert_metric_constraint(metric_constraint: str) -> Optional[str]:
230      """Returns the metric_event_groups enum value associated with the JSON string."""
231      if not metric_constraint:
232        return None
233      metric_constraint_to_enum = {
234          'NO_GROUP_EVENTS': '1',
235          'NO_GROUP_EVENTS_NMI': '2',
236          'NO_NMI_WATCHDOG': '2',
237          'NO_GROUP_EVENTS_SMT': '3',
238      }
239      return metric_constraint_to_enum[metric_constraint]
240
241    def lookup_msr(num: str) -> Optional[str]:
242      """Converts the msr number, or first in a list to the appropriate event field."""
243      if not num:
244        return None
245      msrmap = {
246          0x3F6: 'ldlat=',
247          0x1A6: 'offcore_rsp=',
248          0x1A7: 'offcore_rsp=',
249          0x3F7: 'frontend=',
250      }
251      return msrmap[int(num.split(',', 1)[0], 0)]
252
253    def real_event(name: str, event: str) -> Optional[str]:
254      """Convert well known event names to an event string otherwise use the event argument."""
255      fixed = {
256          'inst_retired.any': 'event=0xc0,period=2000003',
257          'inst_retired.any_p': 'event=0xc0,period=2000003',
258          'cpu_clk_unhalted.ref': 'event=0x0,umask=0x03,period=2000003',
259          'cpu_clk_unhalted.thread': 'event=0x3c,period=2000003',
260          'cpu_clk_unhalted.core': 'event=0x3c,period=2000003',
261          'cpu_clk_unhalted.thread_any': 'event=0x3c,any=1,period=2000003',
262      }
263      if not name:
264        return None
265      if name.lower() in fixed:
266        return fixed[name.lower()]
267      return event
268
269    def unit_to_pmu(unit: str) -> Optional[str]:
270      """Convert a JSON Unit to Linux PMU name."""
271      if not unit:
272        return 'default_core'
273      # Comment brought over from jevents.c:
274      # it's not realistic to keep adding these, we need something more scalable ...
275      table = {
276          'CBO': 'uncore_cbox',
277          'QPI LL': 'uncore_qpi',
278          'SBO': 'uncore_sbox',
279          'iMPH-U': 'uncore_arb',
280          'CPU-M-CF': 'cpum_cf',
281          'CPU-M-SF': 'cpum_sf',
282          'PAI-CRYPTO' : 'pai_crypto',
283          'PAI-EXT' : 'pai_ext',
284          'UPI LL': 'uncore_upi',
285          'hisi_sicl,cpa': 'hisi_sicl,cpa',
286          'hisi_sccl,ddrc': 'hisi_sccl,ddrc',
287          'hisi_sccl,hha': 'hisi_sccl,hha',
288          'hisi_sccl,l3c': 'hisi_sccl,l3c',
289          'imx8_ddr': 'imx8_ddr',
290          'imx9_ddr': 'imx9_ddr',
291          'L3PMC': 'amd_l3',
292          'DFPMC': 'amd_df',
293          'UMCPMC': 'amd_umc',
294          'cpu_core': 'cpu_core',
295          'cpu_atom': 'cpu_atom',
296          'ali_drw': 'ali_drw',
297          'arm_cmn': 'arm_cmn',
298          'tool': 'tool',
299      }
300      return table[unit] if unit in table else f'uncore_{unit.lower()}'
301
302    def is_zero(val: str) -> bool:
303        try:
304            if val.startswith('0x'):
305                return int(val, 16) == 0
306            else:
307                return int(val) == 0
308        except e:
309            return False
310
311    def canonicalize_value(val: str) -> str:
312        try:
313            if val.startswith('0x'):
314                return llx(int(val, 16))
315            return str(int(val))
316        except e:
317            return val
318
319    eventcode = 0
320    if 'EventCode' in jd:
321      eventcode = int(jd['EventCode'].split(',', 1)[0], 0)
322    if 'ExtSel' in jd:
323      eventcode |= int(jd['ExtSel']) << 8
324    configcode = int(jd['ConfigCode'], 0) if 'ConfigCode' in jd else None
325    eventidcode = int(jd['EventidCode'], 0) if 'EventidCode' in jd else None
326    self.name = jd['EventName'].lower() if 'EventName' in jd else None
327    self.topic = ''
328    self.compat = jd.get('Compat')
329    self.desc = fixdesc(jd.get('BriefDescription'))
330    self.long_desc = fixdesc(jd.get('PublicDescription'))
331    precise = jd.get('PEBS')
332    msr = lookup_msr(jd.get('MSRIndex'))
333    msrval = jd.get('MSRValue')
334    extra_desc = ''
335    if 'Data_LA' in jd:
336      extra_desc += '  Supports address when precise'
337      if 'Errata' in jd:
338        extra_desc += '.'
339    if 'Errata' in jd:
340      extra_desc += '  Spec update: ' + jd['Errata']
341    self.pmu = unit_to_pmu(jd.get('Unit'))
342    filter = jd.get('Filter')
343    self.unit = jd.get('ScaleUnit')
344    self.perpkg = jd.get('PerPkg')
345    self.aggr_mode = convert_aggr_mode(jd.get('AggregationMode'))
346    self.deprecated = jd.get('Deprecated')
347    self.retirement_latency_mean = jd.get('RetirementLatencyMean')
348    self.retirement_latency_min = jd.get('RetirementLatencyMin')
349    self.retirement_latency_max = jd.get('RetirementLatencyMax')
350    self.metric_name = jd.get('MetricName')
351    self.metric_group = jd.get('MetricGroup')
352    self.metricgroup_no_group = jd.get('MetricgroupNoGroup')
353    self.default_metricgroup_name = jd.get('DefaultMetricgroupName')
354    self.event_grouping = convert_metric_constraint(jd.get('MetricConstraint'))
355    self.metric_expr = None
356    if 'MetricExpr' in jd:
357      self.metric_expr = metric.ParsePerfJson(jd['MetricExpr']).Simplify()
358    # Note, the metric formula for the threshold isn't parsed as the &
359    # and > have incorrect precedence.
360    self.metric_threshold = jd.get('MetricThreshold')
361
362    arch_std = jd.get('ArchStdEvent')
363    if precise and self.desc and '(Precise Event)' not in self.desc:
364      extra_desc += ' (Must be precise)' if precise == '2' else (' (Precise '
365                                                                 'event)')
366    event = None
367    if configcode is not None:
368      event = f'config={llx(configcode)}'
369    elif eventidcode is not None:
370      event = f'eventid={llx(eventidcode)}'
371    else:
372      event = f'event={llx(eventcode)}'
373    event_fields = [
374        ('AnyThread', 'any='),
375        ('PortMask', 'ch_mask='),
376        ('CounterMask', 'cmask='),
377        ('EdgeDetect', 'edge='),
378        ('FCMask', 'fc_mask='),
379        ('Invert', 'inv='),
380        ('SampleAfterValue', 'period='),
381        ('UMask', 'umask='),
382        ('NodeType', 'type='),
383        ('RdWrMask', 'rdwrmask='),
384        ('EnAllCores', 'enallcores='),
385        ('EnAllSlices', 'enallslices='),
386        ('SliceId', 'sliceid='),
387        ('ThreadMask', 'threadmask='),
388    ]
389    for key, value in event_fields:
390      if key in jd and not is_zero(jd[key]):
391        event += f',{value}{canonicalize_value(jd[key])}'
392    if filter:
393      event += f',{filter}'
394    if msr:
395      event += f',{msr}{msrval}'
396    if self.desc and extra_desc:
397      self.desc += extra_desc
398    if self.long_desc and extra_desc:
399      self.long_desc += extra_desc
400    if self.desc and self.long_desc and self.desc == self.long_desc:
401        # Avoid duplicated descriptions.
402        self.long_desc = None
403    if arch_std:
404      if arch_std.lower() in _arch_std_events:
405        event = _arch_std_events[arch_std.lower()].event
406        # Copy from the architecture standard event to self for undefined fields.
407        for attr, value in _arch_std_events[arch_std.lower()].__dict__.items():
408          if hasattr(self, attr) and not getattr(self, attr):
409            setattr(self, attr, value)
410      else:
411        raise argparse.ArgumentTypeError('Cannot find arch std event:', arch_std)
412
413    self.event = real_event(self.name, event)
414
415  def __repr__(self) -> str:
416    """String representation primarily for debugging."""
417    s = '{\n'
418    for attr, value in self.__dict__.items():
419      if value:
420        s += f'\t{attr} = {value},\n'
421    return s + '}'
422
423  def build_c_string(self, metric: bool) -> str:
424    s = ''
425    for attr in _json_metric_attributes if metric else _json_event_attributes:
426      x = getattr(self, attr)
427      if metric and x and attr == 'metric_expr':
428        # Convert parsed metric expressions into a string. Slashes
429        # must be doubled in the file.
430        x = x.ToPerfJson().replace('\\', '\\\\')
431      if metric and x and attr == 'metric_threshold':
432        x = x.replace('\\', '\\\\')
433      if attr in _json_enum_attributes:
434        s += x if x else '0'
435      else:
436        s += f'{x}\\000' if x else '\\000'
437    return s
438
439  def to_c_string(self, metric: bool) -> str:
440    """Representation of the event as a C struct initializer."""
441
442    def fix_comment(s: str) -> str:
443        return s.replace('*/', r'\*\/')
444
445    s = self.build_c_string(metric)
446    return f'{{ { _bcs.offsets[s] } }}, /* {fix_comment(s)} */\n'
447
448
449@lru_cache(maxsize=None)
450def read_json_events(path: str, topic: str) -> Sequence[JsonEvent]:
451  """Read json events from the specified file."""
452  try:
453    events = json.load(open(path), object_hook=JsonEvent)
454  except BaseException as err:
455    print(f"Exception processing {path}")
456    raise
457  metrics: list[Tuple[str, str, metric.Expression]] = []
458  for event in events:
459    event.topic = topic
460    if event.metric_name and '-' not in event.metric_name:
461      metrics.append((event.pmu, event.metric_name, event.metric_expr))
462  updates = metric.RewriteMetricsInTermsOfOthers(metrics)
463  if updates:
464    for event in events:
465      if event.metric_name in updates:
466        # print(f'Updated {event.metric_name} from\n"{event.metric_expr}"\n'
467        #       f'to\n"{updates[event.metric_name]}"')
468        event.metric_expr = updates[event.metric_name]
469
470  return events
471
472def preprocess_arch_std_files(archpath: str) -> None:
473  """Read in all architecture standard events."""
474  global _arch_std_events
475  for item in os.scandir(archpath):
476    if not item.is_file() or not item.name.endswith('.json'):
477      continue
478    try:
479      for event in read_json_events(item.path, topic=''):
480        if event.name:
481          _arch_std_events[event.name.lower()] = event
482        if event.metric_name:
483          _arch_std_events[event.metric_name.lower()] = event
484    except Exception as e:
485        raise RuntimeError(f'Failure processing \'{item.name}\' in \'{archpath}\'') from e
486
487
488def add_events_table_entries(item: os.DirEntry, topic: str) -> None:
489  """Add contents of file to _pending_events table."""
490  for e in read_json_events(item.path, topic):
491    if e.name:
492      _pending_events.append(e)
493    if e.metric_name:
494      _pending_metrics.append(e)
495
496
497def print_pending_events() -> None:
498  """Optionally close events table."""
499
500  def event_cmp_key(j: JsonEvent) -> Tuple[str, str, bool, str, str]:
501    def fix_none(s: Optional[str]) -> str:
502      if s is None:
503        return ''
504      return s
505
506    return (fix_none(j.pmu).replace(',','_'), fix_none(j.name), j.desc is not None, fix_none(j.topic),
507            fix_none(j.metric_name))
508
509  global _pending_events
510  if not _pending_events:
511    return
512
513  global _pending_events_tblname
514  if _pending_events_tblname.endswith('_sys'):
515    global _sys_event_tables
516    _sys_event_tables.append(_pending_events_tblname)
517  else:
518    global event_tables
519    _event_tables.append(_pending_events_tblname)
520
521  first = True
522  last_pmu = None
523  last_name = None
524  pmus = set()
525  for event in sorted(_pending_events, key=event_cmp_key):
526    if last_pmu and last_pmu == event.pmu:
527      assert event.name != last_name, f"Duplicate event: {last_pmu}/{last_name}/ in {_pending_events_tblname}"
528    if event.pmu != last_pmu:
529      if not first:
530        _args.output_file.write('};\n')
531      pmu_name = event.pmu.replace(',', '_')
532      _args.output_file.write(
533          f'static const struct compact_pmu_event {_pending_events_tblname}_{pmu_name}[] = {{\n')
534      first = False
535      last_pmu = event.pmu
536      pmus.add((event.pmu, pmu_name))
537
538    _args.output_file.write(event.to_c_string(metric=False))
539    last_name = event.name
540  _pending_events = []
541
542  _args.output_file.write(f"""
543}};
544
545const struct pmu_table_entry {_pending_events_tblname}[] = {{
546""")
547  for (pmu, tbl_pmu) in sorted(pmus):
548    pmu_name = f"{pmu}\\000"
549    _args.output_file.write(f"""{{
550     .entries = {_pending_events_tblname}_{tbl_pmu},
551     .num_entries = ARRAY_SIZE({_pending_events_tblname}_{tbl_pmu}),
552     .pmu_name = {{ {_bcs.offsets[pmu_name]} /* {pmu_name} */ }},
553}},
554""")
555  _args.output_file.write('};\n\n')
556
557def print_pending_metrics() -> None:
558  """Optionally close metrics table."""
559
560  def metric_cmp_key(j: JsonEvent) -> Tuple[bool, str, str]:
561    def fix_none(s: Optional[str]) -> str:
562      if s is None:
563        return ''
564      return s
565
566    return (j.desc is not None, fix_none(j.pmu), fix_none(j.metric_name))
567
568  global _pending_metrics
569  if not _pending_metrics:
570    return
571
572  global _pending_metrics_tblname
573  if _pending_metrics_tblname.endswith('_sys'):
574    global _sys_metric_tables
575    _sys_metric_tables.append(_pending_metrics_tblname)
576  else:
577    global metric_tables
578    _metric_tables.append(_pending_metrics_tblname)
579
580  first = True
581  last_pmu = None
582  pmus = set()
583  for metric in sorted(_pending_metrics, key=metric_cmp_key):
584    if metric.pmu != last_pmu:
585      if not first:
586        _args.output_file.write('};\n')
587      pmu_name = metric.pmu.replace(',', '_')
588      _args.output_file.write(
589          f'static const struct compact_pmu_event {_pending_metrics_tblname}_{pmu_name}[] = {{\n')
590      first = False
591      last_pmu = metric.pmu
592      pmus.add((metric.pmu, pmu_name))
593
594    _args.output_file.write(metric.to_c_string(metric=True))
595  _pending_metrics = []
596
597  _args.output_file.write(f"""
598}};
599
600const struct pmu_table_entry {_pending_metrics_tblname}[] = {{
601""")
602  for (pmu, tbl_pmu) in sorted(pmus):
603    pmu_name = f"{pmu}\\000"
604    _args.output_file.write(f"""{{
605     .entries = {_pending_metrics_tblname}_{tbl_pmu},
606     .num_entries = ARRAY_SIZE({_pending_metrics_tblname}_{tbl_pmu}),
607     .pmu_name = {{ {_bcs.offsets[pmu_name]} /* {pmu_name} */ }},
608}},
609""")
610  _args.output_file.write('};\n\n')
611
612def get_topic(topic: str) -> str:
613  if topic.endswith('metrics.json'):
614    return 'metrics'
615  return removesuffix(topic, '.json').replace('-', ' ')
616
617def preprocess_one_file(parents: Sequence[str], item: os.DirEntry) -> None:
618
619  if item.is_dir():
620    return
621
622  # base dir or too deep
623  level = len(parents)
624  if level == 0 or level > 4:
625    return
626
627  # Ignore other directories. If the file name does not have a .json
628  # extension, ignore it. It could be a readme.txt for instance.
629  if not item.is_file() or not item.name.endswith('.json'):
630    return
631
632  if item.name == 'metricgroups.json':
633    metricgroup_descriptions = json.load(open(item.path))
634    for mgroup in metricgroup_descriptions:
635      assert len(mgroup) > 1, parents
636      description = f"{metricgroup_descriptions[mgroup]}\\000"
637      mgroup = f"{mgroup}\\000"
638      _bcs.add(mgroup, metric=True)
639      _bcs.add(description, metric=True)
640      _metricgroups[mgroup] = description
641    return
642
643  topic = get_topic(item.name)
644  for event in read_json_events(item.path, topic):
645    pmu_name = f"{event.pmu}\\000"
646    if event.name:
647      _bcs.add(pmu_name, metric=False)
648      _bcs.add(event.build_c_string(metric=False), metric=False)
649    if event.metric_name:
650      _bcs.add(pmu_name, metric=True)
651      _bcs.add(event.build_c_string(metric=True), metric=True)
652
653def process_one_file(parents: Sequence[str], item: os.DirEntry) -> None:
654  """Process a JSON file during the main walk."""
655  def is_leaf_dir_ignoring_sys(path: str) -> bool:
656    for item in os.scandir(path):
657      if item.is_dir() and item.name != 'sys':
658        return False
659    return True
660
661  # Model directories are leaves (ignoring possible sys
662  # directories). The FTW will walk into the directory next. Flush
663  # pending events and metrics and update the table names for the new
664  # model directory.
665  if item.is_dir() and is_leaf_dir_ignoring_sys(item.path):
666    print_pending_events()
667    print_pending_metrics()
668
669    global _pending_events_tblname
670    _pending_events_tblname = file_name_to_table_name('pmu_events_', parents, item.name)
671    global _pending_metrics_tblname
672    _pending_metrics_tblname = file_name_to_table_name('pmu_metrics_', parents, item.name)
673
674    if item.name == 'sys':
675      _sys_event_table_to_metric_table_mapping[_pending_events_tblname] = _pending_metrics_tblname
676    return
677
678  # base dir or too deep
679  level = len(parents)
680  if level == 0 or level > 4:
681    return
682
683  # Ignore other directories. If the file name does not have a .json
684  # extension, ignore it. It could be a readme.txt for instance.
685  if not item.is_file() or not item.name.endswith('.json') or item.name == 'metricgroups.json':
686    return
687
688  add_events_table_entries(item, get_topic(item.name))
689
690
691def print_mapping_table(archs: Sequence[str]) -> None:
692  """Read the mapfile and generate the struct from cpuid string to event table."""
693  _args.output_file.write("""
694/* Struct used to make the PMU event table implementation opaque to callers. */
695struct pmu_events_table {
696        const struct pmu_table_entry *pmus;
697        uint32_t num_pmus;
698};
699
700/* Struct used to make the PMU metric table implementation opaque to callers. */
701struct pmu_metrics_table {
702        const struct pmu_table_entry *pmus;
703        uint32_t num_pmus;
704};
705
706/*
707 * Map a CPU to its table of PMU events. The CPU is identified by the
708 * cpuid field, which is an arch-specific identifier for the CPU.
709 * The identifier specified in tools/perf/pmu-events/arch/xxx/mapfile
710 * must match the get_cpuid_str() in tools/perf/arch/xxx/util/header.c)
711 *
712 * The  cpuid can contain any character other than the comma.
713 */
714struct pmu_events_map {
715        const char *arch;
716        const char *cpuid;
717        struct pmu_events_table event_table;
718        struct pmu_metrics_table metric_table;
719};
720
721/*
722 * Global table mapping each known CPU for the architecture to its
723 * table of PMU events.
724 */
725const struct pmu_events_map pmu_events_map[] = {
726""")
727  for arch in archs:
728    if arch == 'test':
729      _args.output_file.write("""{
730\t.arch = "testarch",
731\t.cpuid = "testcpu",
732\t.event_table = {
733\t\t.pmus = pmu_events__test_soc_cpu,
734\t\t.num_pmus = ARRAY_SIZE(pmu_events__test_soc_cpu),
735\t},
736\t.metric_table = {
737\t\t.pmus = pmu_metrics__test_soc_cpu,
738\t\t.num_pmus = ARRAY_SIZE(pmu_metrics__test_soc_cpu),
739\t}
740},
741""")
742    elif arch == 'common':
743      _args.output_file.write("""{
744\t.arch = "common",
745\t.cpuid = "common",
746\t.event_table = {
747\t\t.pmus = pmu_events__common,
748\t\t.num_pmus = ARRAY_SIZE(pmu_events__common),
749\t},
750\t.metric_table = {},
751},
752""")
753    else:
754      with open(f'{_args.starting_dir}/{arch}/mapfile.csv') as csvfile:
755        table = csv.reader(csvfile)
756        first = True
757        for row in table:
758          # Skip the first row or any row beginning with #.
759          if not first and len(row) > 0 and not row[0].startswith('#'):
760            event_tblname = file_name_to_table_name('pmu_events_', [], row[2].replace('/', '_'))
761            if event_tblname in _event_tables:
762              event_size = f'ARRAY_SIZE({event_tblname})'
763            else:
764              event_tblname = 'NULL'
765              event_size = '0'
766            metric_tblname = file_name_to_table_name('pmu_metrics_', [], row[2].replace('/', '_'))
767            if metric_tblname in _metric_tables:
768              metric_size = f'ARRAY_SIZE({metric_tblname})'
769            else:
770              metric_tblname = 'NULL'
771              metric_size = '0'
772            if event_size == '0' and metric_size == '0':
773              continue
774            cpuid = row[0].replace('\\', '\\\\')
775            _args.output_file.write(f"""{{
776\t.arch = "{arch}",
777\t.cpuid = "{cpuid}",
778\t.event_table = {{
779\t\t.pmus = {event_tblname},
780\t\t.num_pmus = {event_size}
781\t}},
782\t.metric_table = {{
783\t\t.pmus = {metric_tblname},
784\t\t.num_pmus = {metric_size}
785\t}}
786}},
787""")
788          first = False
789
790  _args.output_file.write("""{
791\t.arch = 0,
792\t.cpuid = 0,
793\t.event_table = { 0, 0 },
794\t.metric_table = { 0, 0 },
795}
796};
797""")
798
799
800def print_system_mapping_table() -> None:
801  """C struct mapping table array for tables from /sys directories."""
802  _args.output_file.write("""
803struct pmu_sys_events {
804\tconst char *name;
805\tstruct pmu_events_table event_table;
806\tstruct pmu_metrics_table metric_table;
807};
808
809static const struct pmu_sys_events pmu_sys_event_tables[] = {
810""")
811  printed_metric_tables = []
812  for tblname in _sys_event_tables:
813    _args.output_file.write(f"""\t{{
814\t\t.event_table = {{
815\t\t\t.pmus = {tblname},
816\t\t\t.num_pmus = ARRAY_SIZE({tblname})
817\t\t}},""")
818    metric_tblname = _sys_event_table_to_metric_table_mapping[tblname]
819    if metric_tblname in _sys_metric_tables:
820      _args.output_file.write(f"""
821\t\t.metric_table = {{
822\t\t\t.pmus = {metric_tblname},
823\t\t\t.num_pmus = ARRAY_SIZE({metric_tblname})
824\t\t}},""")
825      printed_metric_tables.append(metric_tblname)
826    _args.output_file.write(f"""
827\t\t.name = \"{tblname}\",
828\t}},
829""")
830  for tblname in _sys_metric_tables:
831    if tblname in printed_metric_tables:
832      continue
833    _args.output_file.write(f"""\t{{
834\t\t.metric_table = {{
835\t\t\t.pmus = {tblname},
836\t\t\t.num_pmus = ARRAY_SIZE({tblname})
837\t\t}},
838\t\t.name = \"{tblname}\",
839\t}},
840""")
841  _args.output_file.write("""\t{
842\t\t.event_table = { 0, 0 },
843\t\t.metric_table = { 0, 0 },
844\t},
845};
846
847static void decompress_event(int offset, struct pmu_event *pe)
848{
849\tconst char *p = &big_c_string[offset];
850""")
851  for attr in _json_event_attributes:
852    _args.output_file.write(f'\n\tpe->{attr} = ')
853    if attr in _json_enum_attributes:
854      _args.output_file.write("*p - '0';\n")
855    else:
856      _args.output_file.write("(*p == '\\0' ? NULL : p);\n")
857    if attr == _json_event_attributes[-1]:
858      continue
859    if attr in _json_enum_attributes:
860      _args.output_file.write('\tp++;')
861    else:
862      _args.output_file.write('\twhile (*p++);')
863  _args.output_file.write("""}
864
865static void decompress_metric(int offset, struct pmu_metric *pm)
866{
867\tconst char *p = &big_c_string[offset];
868""")
869  for attr in _json_metric_attributes:
870    _args.output_file.write(f'\n\tpm->{attr} = ')
871    if attr in _json_enum_attributes:
872      _args.output_file.write("*p - '0';\n")
873    else:
874      _args.output_file.write("(*p == '\\0' ? NULL : p);\n")
875    if attr == _json_metric_attributes[-1]:
876      continue
877    if attr in _json_enum_attributes:
878      _args.output_file.write('\tp++;')
879    else:
880      _args.output_file.write('\twhile (*p++);')
881  _args.output_file.write("""}
882
883static int pmu_events_table__for_each_event_pmu(const struct pmu_events_table *table,
884                                                const struct pmu_table_entry *pmu,
885                                                pmu_event_iter_fn fn,
886                                                void *data)
887{
888        int ret;
889        struct pmu_event pe = {
890                .pmu = &big_c_string[pmu->pmu_name.offset],
891        };
892
893        for (uint32_t i = 0; i < pmu->num_entries; i++) {
894                decompress_event(pmu->entries[i].offset, &pe);
895                if (!pe.name)
896                        continue;
897                ret = fn(&pe, table, data);
898                if (ret)
899                        return ret;
900        }
901        return 0;
902 }
903
904static int pmu_events_table__find_event_pmu(const struct pmu_events_table *table,
905                                            const struct pmu_table_entry *pmu,
906                                            const char *name,
907                                            pmu_event_iter_fn fn,
908                                            void *data)
909{
910        struct pmu_event pe = {
911                .pmu = &big_c_string[pmu->pmu_name.offset],
912        };
913        int low = 0, high = pmu->num_entries - 1;
914
915        while (low <= high) {
916                int cmp, mid = (low + high) / 2;
917
918                decompress_event(pmu->entries[mid].offset, &pe);
919
920                if (!pe.name && !name)
921                        goto do_call;
922
923                if (!pe.name && name) {
924                        low = mid + 1;
925                        continue;
926                }
927                if (pe.name && !name) {
928                        high = mid - 1;
929                        continue;
930                }
931
932                cmp = strcasecmp(pe.name, name);
933                if (cmp < 0) {
934                        low = mid + 1;
935                        continue;
936                }
937                if (cmp > 0) {
938                        high = mid - 1;
939                        continue;
940                }
941  do_call:
942                return fn ? fn(&pe, table, data) : 0;
943        }
944        return PMU_EVENTS__NOT_FOUND;
945}
946
947int pmu_events_table__for_each_event(const struct pmu_events_table *table,
948                                    struct perf_pmu *pmu,
949                                    pmu_event_iter_fn fn,
950                                    void *data)
951{
952        for (size_t i = 0; i < table->num_pmus; i++) {
953                const struct pmu_table_entry *table_pmu = &table->pmus[i];
954                const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
955                int ret;
956
957                if (pmu && !perf_pmu__name_wildcard_match(pmu, pmu_name))
958                        continue;
959
960                ret = pmu_events_table__for_each_event_pmu(table, table_pmu, fn, data);
961                if (ret)
962                        return ret;
963        }
964        return 0;
965}
966
967int pmu_events_table__find_event(const struct pmu_events_table *table,
968                                 struct perf_pmu *pmu,
969                                 const char *name,
970                                 pmu_event_iter_fn fn,
971                                 void *data)
972{
973        for (size_t i = 0; i < table->num_pmus; i++) {
974                const struct pmu_table_entry *table_pmu = &table->pmus[i];
975                const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
976                int ret;
977
978                if (pmu && !perf_pmu__name_wildcard_match(pmu, pmu_name))
979                        continue;
980
981                ret = pmu_events_table__find_event_pmu(table, table_pmu, name, fn, data);
982                if (ret != PMU_EVENTS__NOT_FOUND)
983                        return ret;
984        }
985        return PMU_EVENTS__NOT_FOUND;
986}
987
988size_t pmu_events_table__num_events(const struct pmu_events_table *table,
989                                    struct perf_pmu *pmu)
990{
991        size_t count = 0;
992
993        for (size_t i = 0; i < table->num_pmus; i++) {
994                const struct pmu_table_entry *table_pmu = &table->pmus[i];
995                const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
996
997                if (perf_pmu__name_wildcard_match(pmu, pmu_name))
998                        count += table_pmu->num_entries;
999        }
1000        return count;
1001}
1002
1003static int pmu_metrics_table__for_each_metric_pmu(const struct pmu_metrics_table *table,
1004                                                const struct pmu_table_entry *pmu,
1005                                                pmu_metric_iter_fn fn,
1006                                                void *data)
1007{
1008        int ret;
1009        struct pmu_metric pm = {
1010                .pmu = &big_c_string[pmu->pmu_name.offset],
1011        };
1012
1013        for (uint32_t i = 0; i < pmu->num_entries; i++) {
1014                decompress_metric(pmu->entries[i].offset, &pm);
1015                if (!pm.metric_expr)
1016                        continue;
1017                ret = fn(&pm, table, data);
1018                if (ret)
1019                        return ret;
1020        }
1021        return 0;
1022}
1023
1024static int pmu_metrics_table__find_metric_pmu(const struct pmu_metrics_table *table,
1025                                            const struct pmu_table_entry *pmu,
1026                                            const char *metric,
1027                                            pmu_metric_iter_fn fn,
1028                                            void *data)
1029{
1030        struct pmu_metric pm = {
1031                .pmu = &big_c_string[pmu->pmu_name.offset],
1032        };
1033        int low = 0, high = pmu->num_entries - 1;
1034
1035        while (low <= high) {
1036                int cmp, mid = (low + high) / 2;
1037
1038                decompress_metric(pmu->entries[mid].offset, &pm);
1039
1040                if (!pm.metric_name && !metric)
1041                        goto do_call;
1042
1043                if (!pm.metric_name && metric) {
1044                        low = mid + 1;
1045                        continue;
1046                }
1047                if (pm.metric_name && !metric) {
1048                        high = mid - 1;
1049                        continue;
1050                }
1051
1052                cmp = strcmp(pm.metric_name, metric);
1053                if (cmp < 0) {
1054                        low = mid + 1;
1055                        continue;
1056                }
1057                if (cmp > 0) {
1058                        high = mid - 1;
1059                        continue;
1060                }
1061  do_call:
1062                return fn ? fn(&pm, table, data) : 0;
1063        }
1064        return PMU_METRICS__NOT_FOUND;
1065}
1066
1067int pmu_metrics_table__for_each_metric(const struct pmu_metrics_table *table,
1068                                     pmu_metric_iter_fn fn,
1069                                     void *data)
1070{
1071        for (size_t i = 0; i < table->num_pmus; i++) {
1072                int ret = pmu_metrics_table__for_each_metric_pmu(table, &table->pmus[i],
1073                                                                 fn, data);
1074
1075                if (ret)
1076                        return ret;
1077        }
1078        return 0;
1079}
1080
1081int pmu_metrics_table__find_metric(const struct pmu_metrics_table *table,
1082                                 struct perf_pmu *pmu,
1083                                 const char *metric,
1084                                 pmu_metric_iter_fn fn,
1085                                 void *data)
1086{
1087        for (size_t i = 0; i < table->num_pmus; i++) {
1088                const struct pmu_table_entry *table_pmu = &table->pmus[i];
1089                const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
1090                int ret;
1091
1092                if (pmu && !perf_pmu__name_wildcard_match(pmu, pmu_name))
1093                        continue;
1094
1095                ret = pmu_metrics_table__find_metric_pmu(table, table_pmu, metric, fn, data);
1096                if (ret != PMU_METRICS__NOT_FOUND)
1097                        return ret;
1098        }
1099        return PMU_METRICS__NOT_FOUND;
1100}
1101
1102static const struct pmu_events_map *map_for_cpu(struct perf_cpu cpu)
1103{
1104        static struct {
1105                const struct pmu_events_map *map;
1106                struct perf_cpu cpu;
1107        } last_result;
1108        static struct {
1109                const struct pmu_events_map *map;
1110                char *cpuid;
1111        } last_map_search;
1112        static bool has_last_result, has_last_map_search;
1113        const struct pmu_events_map *map = NULL;
1114        char *cpuid = NULL;
1115        size_t i;
1116
1117        if (has_last_result && last_result.cpu.cpu == cpu.cpu)
1118                return last_result.map;
1119
1120        cpuid = get_cpuid_allow_env_override(cpu);
1121
1122        /*
1123         * On some platforms which uses cpus map, cpuid can be NULL for
1124         * PMUs other than CORE PMUs.
1125         */
1126        if (!cpuid)
1127                goto out_update_last_result;
1128
1129        if (has_last_map_search && !strcmp(last_map_search.cpuid, cpuid)) {
1130                map = last_map_search.map;
1131                free(cpuid);
1132        } else {
1133                i = 0;
1134                for (;;) {
1135                        map = &pmu_events_map[i++];
1136
1137                        if (!map->arch) {
1138                                map = NULL;
1139                                break;
1140                        }
1141
1142                        if (!strcmp_cpuid_str(map->cpuid, cpuid))
1143                                break;
1144               }
1145               free(last_map_search.cpuid);
1146               last_map_search.cpuid = cpuid;
1147               last_map_search.map = map;
1148               has_last_map_search = true;
1149        }
1150out_update_last_result:
1151        last_result.cpu = cpu;
1152        last_result.map = map;
1153        has_last_result = true;
1154        return map;
1155}
1156
1157static const struct pmu_events_map *map_for_pmu(struct perf_pmu *pmu)
1158{
1159        struct perf_cpu cpu = {-1};
1160
1161        if (pmu)
1162                cpu = perf_cpu_map__min(pmu->cpus);
1163        return map_for_cpu(cpu);
1164}
1165
1166const struct pmu_events_table *perf_pmu__find_events_table(struct perf_pmu *pmu)
1167{
1168        const struct pmu_events_map *map = map_for_pmu(pmu);
1169
1170        if (!map)
1171                return NULL;
1172
1173        if (!pmu)
1174                return &map->event_table;
1175
1176        for (size_t i = 0; i < map->event_table.num_pmus; i++) {
1177                const struct pmu_table_entry *table_pmu = &map->event_table.pmus[i];
1178                const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
1179
1180                if (perf_pmu__name_wildcard_match(pmu, pmu_name))
1181                         return &map->event_table;
1182        }
1183        return NULL;
1184}
1185
1186const struct pmu_metrics_table *pmu_metrics_table__find(void)
1187{
1188        struct perf_cpu cpu = {-1};
1189        const struct pmu_events_map *map = map_for_cpu(cpu);
1190
1191        return map ? &map->metric_table : NULL;
1192}
1193
1194const struct pmu_events_table *find_core_events_table(const char *arch, const char *cpuid)
1195{
1196        for (const struct pmu_events_map *tables = &pmu_events_map[0];
1197             tables->arch;
1198             tables++) {
1199                if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid))
1200                        return &tables->event_table;
1201        }
1202        return NULL;
1203}
1204
1205const struct pmu_metrics_table *find_core_metrics_table(const char *arch, const char *cpuid)
1206{
1207        for (const struct pmu_events_map *tables = &pmu_events_map[0];
1208             tables->arch;
1209             tables++) {
1210                if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid))
1211                        return &tables->metric_table;
1212        }
1213        return NULL;
1214}
1215
1216int pmu_for_each_core_event(pmu_event_iter_fn fn, void *data)
1217{
1218        for (const struct pmu_events_map *tables = &pmu_events_map[0];
1219             tables->arch;
1220             tables++) {
1221                int ret = pmu_events_table__for_each_event(&tables->event_table,
1222                                                           /*pmu=*/ NULL, fn, data);
1223
1224                if (ret)
1225                        return ret;
1226        }
1227        return 0;
1228}
1229
1230int pmu_for_each_core_metric(pmu_metric_iter_fn fn, void *data)
1231{
1232        for (const struct pmu_events_map *tables = &pmu_events_map[0];
1233             tables->arch;
1234             tables++) {
1235                int ret = pmu_metrics_table__for_each_metric(&tables->metric_table, fn, data);
1236
1237                if (ret)
1238                        return ret;
1239        }
1240        return 0;
1241}
1242
1243const struct pmu_events_table *find_sys_events_table(const char *name)
1244{
1245        for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
1246             tables->name;
1247             tables++) {
1248                if (!strcmp(tables->name, name))
1249                        return &tables->event_table;
1250        }
1251        return NULL;
1252}
1253
1254int pmu_for_each_sys_event(pmu_event_iter_fn fn, void *data)
1255{
1256        for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
1257             tables->name;
1258             tables++) {
1259                int ret = pmu_events_table__for_each_event(&tables->event_table,
1260                                                           /*pmu=*/ NULL, fn, data);
1261
1262                if (ret)
1263                        return ret;
1264        }
1265        return 0;
1266}
1267
1268int pmu_for_each_sys_metric(pmu_metric_iter_fn fn, void *data)
1269{
1270        for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
1271             tables->name;
1272             tables++) {
1273                int ret = pmu_metrics_table__for_each_metric(&tables->metric_table, fn, data);
1274
1275                if (ret)
1276                        return ret;
1277        }
1278        return 0;
1279}
1280""")
1281
1282def print_metricgroups() -> None:
1283  _args.output_file.write("""
1284static const int metricgroups[][2] = {
1285""")
1286  for mgroup in sorted(_metricgroups):
1287    description = _metricgroups[mgroup]
1288    _args.output_file.write(
1289        f'\t{{ {_bcs.offsets[mgroup]}, {_bcs.offsets[description]} }}, /* {mgroup} => {description} */\n'
1290    )
1291  _args.output_file.write("""
1292};
1293
1294const char *describe_metricgroup(const char *group)
1295{
1296        int low = 0, high = (int)ARRAY_SIZE(metricgroups) - 1;
1297
1298        while (low <= high) {
1299                int mid = (low + high) / 2;
1300                const char *mgroup = &big_c_string[metricgroups[mid][0]];
1301                int cmp = strcmp(mgroup, group);
1302
1303                if (cmp == 0) {
1304                        return &big_c_string[metricgroups[mid][1]];
1305                } else if (cmp < 0) {
1306                        low = mid + 1;
1307                } else {
1308                        high = mid - 1;
1309                }
1310        }
1311        return NULL;
1312}
1313""")
1314
1315def main() -> None:
1316  global _args
1317
1318  def dir_path(path: str) -> str:
1319    """Validate path is a directory for argparse."""
1320    if os.path.isdir(path):
1321      return path
1322    raise argparse.ArgumentTypeError(f'\'{path}\' is not a valid directory')
1323
1324  def ftw(path: str, parents: Sequence[str],
1325          action: Callable[[Sequence[str], os.DirEntry], None]) -> None:
1326    """Replicate the directory/file walking behavior of C's file tree walk."""
1327    for item in sorted(os.scandir(path), key=lambda e: e.name):
1328      if _args.model != 'all' and item.is_dir():
1329        # Check if the model matches one in _args.model.
1330        if len(parents) == _args.model.split(',')[0].count('/'):
1331          # We're testing the correct directory.
1332          item_path = '/'.join(parents) + ('/' if len(parents) > 0 else '') + item.name
1333          if 'test' not in item_path and 'common' not in item_path and item_path not in _args.model.split(','):
1334            continue
1335      try:
1336        action(parents, item)
1337      except Exception as e:
1338        raise RuntimeError(f'Action failure for \'{item.name}\' in {parents}') from e
1339      if item.is_dir():
1340        ftw(item.path, parents + [item.name], action)
1341
1342  ap = argparse.ArgumentParser()
1343  ap.add_argument('arch', help='Architecture name like x86')
1344  ap.add_argument('model', help='''Select a model such as skylake to
1345reduce the code size.  Normally set to "all". For architectures like
1346ARM64 with an implementor/model, the model must include the implementor
1347such as "arm/cortex-a34".''',
1348                  default='all')
1349  ap.add_argument(
1350      'starting_dir',
1351      type=dir_path,
1352      help='Root of tree containing architecture directories containing json files'
1353  )
1354  ap.add_argument(
1355      'output_file', type=argparse.FileType('w', encoding='utf-8'), nargs='?', default=sys.stdout)
1356  _args = ap.parse_args()
1357
1358  _args.output_file.write(f"""
1359/* SPDX-License-Identifier: GPL-2.0 */
1360/* THIS FILE WAS AUTOGENERATED BY jevents.py arch={_args.arch} model={_args.model} ! */
1361""")
1362  _args.output_file.write("""
1363#include <pmu-events/pmu-events.h>
1364#include "util/header.h"
1365#include "util/pmu.h"
1366#include <string.h>
1367#include <stddef.h>
1368
1369struct compact_pmu_event {
1370        int offset;
1371};
1372
1373struct pmu_table_entry {
1374        const struct compact_pmu_event *entries;
1375        uint32_t num_entries;
1376        struct compact_pmu_event pmu_name;
1377};
1378
1379""")
1380  archs = []
1381  for item in os.scandir(_args.starting_dir):
1382    if not item.is_dir():
1383      continue
1384    if item.name == _args.arch or _args.arch == 'all' or item.name == 'test' or item.name == 'common':
1385      archs.append(item.name)
1386
1387  if len(archs) < 2 and _args.arch != 'none':
1388    raise IOError(f'Missing architecture directory \'{_args.arch}\'')
1389
1390  archs.sort()
1391  for arch in archs:
1392    arch_path = f'{_args.starting_dir}/{arch}'
1393    preprocess_arch_std_files(arch_path)
1394    ftw(arch_path, [], preprocess_one_file)
1395
1396  _bcs.compute()
1397  _args.output_file.write('static const char *const big_c_string =\n')
1398  for s in _bcs.big_string:
1399    _args.output_file.write(s)
1400  _args.output_file.write(';\n\n')
1401  for arch in archs:
1402    arch_path = f'{_args.starting_dir}/{arch}'
1403    ftw(arch_path, [], process_one_file)
1404    print_pending_events()
1405    print_pending_metrics()
1406
1407  print_mapping_table(archs)
1408  print_system_mapping_table()
1409  print_metricgroups()
1410
1411if __name__ == '__main__':
1412  main()
1413