xref: /linux/tools/perf/pmu-events/jevents.py (revision f4f346c3465949ebba80c6cc52cd8d2eeaa545fd)
1#!/usr/bin/env python3
2# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
3"""Convert directories of JSON events to C code."""
4import argparse
5import csv
6from functools import lru_cache
7import json
8import metric
9import os
10import sys
11from typing import (Callable, Dict, Optional, Sequence, Set, Tuple)
12import collections
13
14# Global command line arguments.
15_args = None
16# List of regular event tables.
17_event_tables = []
18# List of event tables generated from "/sys" directories.
19_sys_event_tables = []
20# List of regular metric tables.
21_metric_tables = []
22# List of metric tables generated from "/sys" directories.
23_sys_metric_tables = []
24# Mapping between sys event table names and sys metric table names.
25_sys_event_table_to_metric_table_mapping = {}
26# Map from an event name to an architecture standard
27# JsonEvent. Architecture standard events are in json files in the top
28# f'{_args.starting_dir}/{_args.arch}' directory.
29_arch_std_events = {}
30# Events to write out when the table is closed
31_pending_events = []
32# Name of events table to be written out
33_pending_events_tblname = None
34# Metrics to write out when the table is closed
35_pending_metrics = []
36# Name of metrics table to be written out
37_pending_metrics_tblname = None
38# Global BigCString shared by all structures.
39_bcs = None
40# Map from the name of a metric group to a description of the group.
41_metricgroups = {}
42# Order specific JsonEvent attributes will be visited.
43_json_event_attributes = [
44    # cmp_sevent related attributes.
45    'name', 'topic', 'desc',
46    # Seems useful, put it early.
47    'event',
48    # Short things in alphabetical order.
49    'compat', 'deprecated', 'perpkg', 'unit',
50    # Retirement latency specific to Intel granite rapids currently.
51    'retirement_latency_mean', 'retirement_latency_min',
52    'retirement_latency_max',
53    # Longer things (the last won't be iterated over during decompress).
54    'long_desc'
55]
56
57# Attributes that are in pmu_metric rather than pmu_event.
58_json_metric_attributes = [
59    'metric_name', 'metric_group', 'metric_expr', 'metric_threshold',
60    'desc', 'long_desc', 'unit', 'compat', 'metricgroup_no_group',
61    'default_metricgroup_name', 'aggr_mode', 'event_grouping'
62]
63# Attributes that are bools or enum int values, encoded as '0', '1',...
64_json_enum_attributes = ['aggr_mode', 'deprecated', 'event_grouping', 'perpkg']
65
66def removesuffix(s: str, suffix: str) -> str:
67  """Remove the suffix from a string
68
69  The removesuffix function is added to str in Python 3.9. We aim for 3.6
70  compatibility and so provide our own function here.
71  """
72  return s[0:-len(suffix)] if s.endswith(suffix) else s
73
74
75def file_name_to_table_name(prefix: str, parents: Sequence[str],
76                            dirname: str) -> str:
77  """Generate a C table name from directory names."""
78  tblname = prefix
79  for p in parents:
80    tblname += '_' + p
81  tblname += '_' + dirname
82  return tblname.replace('-', '_')
83
84
85def c_len(s: str) -> int:
86  """Return the length of s a C string
87
88  This doesn't handle all escape characters properly. It first assumes
89  all \\ are for escaping, it then adjusts as it will have over counted
90  \\. The code uses \000 rather than \0 as a terminator as an adjacent
91  number would be folded into a string of \0 (ie. "\0" + "5" doesn't
92  equal a terminator followed by the number 5 but the escape of
93  \05). The code adjusts for \000 but not properly for all octal, hex
94  or unicode values.
95  """
96  try:
97    utf = s.encode(encoding='utf-8',errors='strict')
98  except:
99    print(f'broken string {s}')
100    raise
101  return len(utf) - utf.count(b'\\') + utf.count(b'\\\\') - (utf.count(b'\\000') * 2)
102
103class BigCString:
104  """A class to hold many strings concatenated together.
105
106  Generating a large number of stand-alone C strings creates a large
107  number of relocations in position independent code. The BigCString
108  is a helper for this case. It builds a single string which within it
109  are all the other C strings (to avoid memory issues the string
110  itself is held as a list of strings). The offsets within the big
111  string are recorded and when stored to disk these don't need
112  relocation. To reduce the size of the string further, identical
113  strings are merged. If a longer string ends-with the same value as a
114  shorter string, these entries are also merged.
115  """
116  strings: Set[str]
117  big_string: Sequence[str]
118  offsets: Dict[str, int]
119  insert_number: int
120  insert_point: Dict[str, int]
121  metrics: Set[str]
122
123  def __init__(self):
124    self.strings = set()
125    self.insert_number = 0;
126    self.insert_point = {}
127    self.metrics = set()
128
129  def add(self, s: str, metric: bool) -> None:
130    """Called to add to the big string."""
131    if s not in self.strings:
132      self.strings.add(s)
133      self.insert_point[s] = self.insert_number
134      self.insert_number += 1
135      if metric:
136        self.metrics.add(s)
137
138  def compute(self) -> None:
139    """Called once all strings are added to compute the string and offsets."""
140
141    folded_strings = {}
142    # Determine if two strings can be folded, ie. let 1 string use the
143    # end of another. First reverse all strings and sort them.
144    sorted_reversed_strings = sorted([x[::-1] for x in self.strings])
145
146    # Strings 'xyz' and 'yz' will now be [ 'zy', 'zyx' ]. Scan forward
147    # for each string to see if there is a better candidate to fold it
148    # into, in the example rather than using 'yz' we can use'xyz' at
149    # an offset of 1. We record which string can be folded into which
150    # in folded_strings, we don't need to record the offset as it is
151    # trivially computed from the string lengths.
152    for pos,s in enumerate(sorted_reversed_strings):
153      best_pos = pos
154      for check_pos in range(pos + 1, len(sorted_reversed_strings)):
155        if sorted_reversed_strings[check_pos].startswith(s):
156          best_pos = check_pos
157        else:
158          break
159      if pos != best_pos:
160        folded_strings[s[::-1]] = sorted_reversed_strings[best_pos][::-1]
161
162    # Compute reverse mappings for debugging.
163    fold_into_strings = collections.defaultdict(set)
164    for key, val in folded_strings.items():
165      if key != val:
166        fold_into_strings[val].add(key)
167
168    # big_string_offset is the current location within the C string
169    # being appended to - comments, etc. don't count. big_string is
170    # the string contents represented as a list. Strings are immutable
171    # in Python and so appending to one causes memory issues, while
172    # lists are mutable.
173    big_string_offset = 0
174    self.big_string = []
175    self.offsets = {}
176
177    def string_cmp_key(s: str) -> Tuple[bool, int, str]:
178      return (s in self.metrics, self.insert_point[s], s)
179
180    # Emit all strings that aren't folded in a sorted manner.
181    for s in sorted(self.strings, key=string_cmp_key):
182      if s not in folded_strings:
183        self.offsets[s] = big_string_offset
184        self.big_string.append(f'/* offset={big_string_offset} */ "')
185        self.big_string.append(s)
186        self.big_string.append('"')
187        if s in fold_into_strings:
188          self.big_string.append(' /* also: ' + ', '.join(fold_into_strings[s]) + ' */')
189        self.big_string.append('\n')
190        big_string_offset += c_len(s)
191        continue
192
193    # Compute the offsets of the folded strings.
194    for s in folded_strings.keys():
195      assert s not in self.offsets
196      folded_s = folded_strings[s]
197      self.offsets[s] = self.offsets[folded_s] + c_len(folded_s) - c_len(s)
198
199_bcs = BigCString()
200
201class JsonEvent:
202  """Representation of an event loaded from a json file dictionary."""
203
204  def __init__(self, jd: dict):
205    """Constructor passed the dictionary of parsed json values."""
206
207    def llx(x: int) -> str:
208      """Convert an int to a string similar to a printf modifier of %#llx."""
209      return str(x) if x >= 0 and x < 10 else hex(x)
210
211    def fixdesc(s: str) -> str:
212      """Fix formatting issue for the desc string."""
213      if s is None:
214        return None
215      return removesuffix(removesuffix(removesuffix(s, '.  '),
216                                       '. '), '.').replace('\n', '\\n').replace(
217                                           '\"', '\\"').replace('\r', '\\r')
218
219    def convert_aggr_mode(aggr_mode: str) -> Optional[str]:
220      """Returns the aggr_mode_class enum value associated with the JSON string."""
221      if not aggr_mode:
222        return None
223      aggr_mode_to_enum = {
224          'PerChip': '1',
225          'PerCore': '2',
226      }
227      return aggr_mode_to_enum[aggr_mode]
228
229    def convert_metric_constraint(metric_constraint: str) -> Optional[str]:
230      """Returns the metric_event_groups enum value associated with the JSON string."""
231      if not metric_constraint:
232        return None
233      metric_constraint_to_enum = {
234          'NO_GROUP_EVENTS': '1',
235          'NO_GROUP_EVENTS_NMI': '2',
236          'NO_NMI_WATCHDOG': '2',
237          'NO_GROUP_EVENTS_SMT': '3',
238          'NO_THRESHOLD_AND_NMI': '4',
239      }
240      return metric_constraint_to_enum[metric_constraint]
241
242    def lookup_msr(num: str) -> Optional[str]:
243      """Converts the msr number, or first in a list to the appropriate event field."""
244      if not num:
245        return None
246      msrmap = {
247          0x3F6: 'ldlat=',
248          0x1A6: 'offcore_rsp=',
249          0x1A7: 'offcore_rsp=',
250          0x3F7: 'frontend=',
251      }
252      return msrmap[int(num.split(',', 1)[0], 0)]
253
254    def real_event(name: str, event: str) -> Optional[str]:
255      """Convert well known event names to an event string otherwise use the event argument."""
256      fixed = {
257          'inst_retired.any': 'event=0xc0,period=2000003',
258          'inst_retired.any_p': 'event=0xc0,period=2000003',
259          'cpu_clk_unhalted.ref': 'event=0x0,umask=0x03,period=2000003',
260          'cpu_clk_unhalted.thread': 'event=0x3c,period=2000003',
261          'cpu_clk_unhalted.core': 'event=0x3c,period=2000003',
262          'cpu_clk_unhalted.thread_any': 'event=0x3c,any=1,period=2000003',
263      }
264      if not name:
265        return None
266      if name.lower() in fixed:
267        return fixed[name.lower()]
268      return event
269
270    def unit_to_pmu(unit: str) -> Optional[str]:
271      """Convert a JSON Unit to Linux PMU name."""
272      if not unit:
273        return 'default_core'
274      # Comment brought over from jevents.c:
275      # it's not realistic to keep adding these, we need something more scalable ...
276      table = {
277          'CBO': 'uncore_cbox',
278          'QPI LL': 'uncore_qpi',
279          'SBO': 'uncore_sbox',
280          'iMPH-U': 'uncore_arb',
281          'CPU-M-CF': 'cpum_cf',
282          'CPU-M-SF': 'cpum_sf',
283          'PAI-CRYPTO' : 'pai_crypto',
284          'PAI-EXT' : 'pai_ext',
285          'UPI LL': 'uncore_upi',
286          'hisi_sicl,cpa': 'hisi_sicl,cpa',
287          'hisi_sccl,ddrc': 'hisi_sccl,ddrc',
288          'hisi_sccl,hha': 'hisi_sccl,hha',
289          'hisi_sccl,l3c': 'hisi_sccl,l3c',
290          'imx8_ddr': 'imx8_ddr',
291          'imx9_ddr': 'imx9_ddr',
292          'L3PMC': 'amd_l3',
293          'DFPMC': 'amd_df',
294          'UMCPMC': 'amd_umc',
295          'cpu_core': 'cpu_core',
296          'cpu_atom': 'cpu_atom',
297          'ali_drw': 'ali_drw',
298          'arm_cmn': 'arm_cmn',
299          'software': 'software',
300          'tool': 'tool',
301      }
302      return table[unit] if unit in table else f'uncore_{unit.lower()}'
303
304    def is_zero(val: str) -> bool:
305        try:
306            if val.startswith('0x'):
307                return int(val, 16) == 0
308            else:
309                return int(val) == 0
310        except e:
311            return False
312
313    def canonicalize_value(val: str) -> str:
314        try:
315            if val.startswith('0x'):
316                return llx(int(val, 16))
317            return str(int(val))
318        except e:
319            return val
320
321    eventcode = 0
322    if 'EventCode' in jd:
323      eventcode = int(jd['EventCode'].split(',', 1)[0], 0)
324    if 'ExtSel' in jd:
325      eventcode |= int(jd['ExtSel']) << 8
326    configcode = int(jd['ConfigCode'], 0) if 'ConfigCode' in jd else None
327    eventidcode = int(jd['EventidCode'], 0) if 'EventidCode' in jd else None
328    self.name = jd['EventName'].lower() if 'EventName' in jd else None
329    self.topic = ''
330    self.compat = jd.get('Compat')
331    self.desc = fixdesc(jd.get('BriefDescription'))
332    self.long_desc = fixdesc(jd.get('PublicDescription'))
333    precise = jd.get('PEBS')
334    msr = lookup_msr(jd.get('MSRIndex'))
335    msrval = jd.get('MSRValue')
336    extra_desc = ''
337    if 'Data_LA' in jd:
338      extra_desc += '  Supports address when precise'
339      if 'Errata' in jd:
340        extra_desc += '.'
341    if 'Errata' in jd:
342      extra_desc += '  Spec update: ' + jd['Errata']
343    self.pmu = unit_to_pmu(jd.get('Unit'))
344    filter = jd.get('Filter')
345    self.unit = jd.get('ScaleUnit')
346    self.perpkg = jd.get('PerPkg')
347    self.aggr_mode = convert_aggr_mode(jd.get('AggregationMode'))
348    self.deprecated = jd.get('Deprecated')
349    self.retirement_latency_mean = jd.get('RetirementLatencyMean')
350    self.retirement_latency_min = jd.get('RetirementLatencyMin')
351    self.retirement_latency_max = jd.get('RetirementLatencyMax')
352    self.metric_name = jd.get('MetricName')
353    self.metric_group = jd.get('MetricGroup')
354    self.metricgroup_no_group = jd.get('MetricgroupNoGroup')
355    self.default_metricgroup_name = jd.get('DefaultMetricgroupName')
356    self.event_grouping = convert_metric_constraint(jd.get('MetricConstraint'))
357    self.metric_expr = None
358    if 'MetricExpr' in jd:
359      self.metric_expr = metric.ParsePerfJson(jd['MetricExpr']).Simplify()
360    # Note, the metric formula for the threshold isn't parsed as the &
361    # and > have incorrect precedence.
362    self.metric_threshold = jd.get('MetricThreshold')
363
364    arch_std = jd.get('ArchStdEvent')
365    if precise and self.desc and '(Precise Event)' not in self.desc:
366      extra_desc += ' (Must be precise)' if precise == '2' else (' (Precise '
367                                                                 'event)')
368    event = None
369    if configcode is not None:
370      event = f'config={llx(configcode)}'
371    elif eventidcode is not None:
372      event = f'eventid={llx(eventidcode)}'
373    else:
374      event = f'event={llx(eventcode)}'
375    event_fields = [
376        ('AnyThread', 'any='),
377        ('PortMask', 'ch_mask='),
378        ('CounterMask', 'cmask='),
379        ('EdgeDetect', 'edge='),
380        ('FCMask', 'fc_mask='),
381        ('Invert', 'inv='),
382        ('SampleAfterValue', 'period='),
383        ('UMask', 'umask='),
384        ('NodeType', 'type='),
385        ('RdWrMask', 'rdwrmask='),
386        ('EnAllCores', 'enallcores='),
387        ('EnAllSlices', 'enallslices='),
388        ('SliceId', 'sliceid='),
389        ('ThreadMask', 'threadmask='),
390    ]
391    for key, value in event_fields:
392      if key in jd and not is_zero(jd[key]):
393        event += f',{value}{canonicalize_value(jd[key])}'
394    if filter:
395      event += f',{filter}'
396    if msr:
397      event += f',{msr}{msrval}'
398    if self.desc and extra_desc:
399      self.desc += extra_desc
400    if self.long_desc and extra_desc:
401      self.long_desc += extra_desc
402    if self.desc and self.long_desc and self.desc == self.long_desc:
403        # Avoid duplicated descriptions.
404        self.long_desc = None
405    if arch_std:
406      if arch_std.lower() in _arch_std_events:
407        event = _arch_std_events[arch_std.lower()].event
408        # Copy from the architecture standard event to self for undefined fields.
409        for attr, value in _arch_std_events[arch_std.lower()].__dict__.items():
410          if hasattr(self, attr) and not getattr(self, attr):
411            setattr(self, attr, value)
412      else:
413        raise argparse.ArgumentTypeError('Cannot find arch std event:', arch_std)
414
415    self.event = real_event(self.name, event)
416
417  def __repr__(self) -> str:
418    """String representation primarily for debugging."""
419    s = '{\n'
420    for attr, value in self.__dict__.items():
421      if value:
422        s += f'\t{attr} = {value},\n'
423    return s + '}'
424
425  def build_c_string(self, metric: bool) -> str:
426    s = ''
427    for attr in _json_metric_attributes if metric else _json_event_attributes:
428      x = getattr(self, attr)
429      if metric and x and attr == 'metric_expr':
430        # Convert parsed metric expressions into a string. Slashes
431        # must be doubled in the file.
432        x = x.ToPerfJson().replace('\\', '\\\\')
433      if metric and x and attr == 'metric_threshold':
434        x = x.replace('\\', '\\\\')
435      if attr in _json_enum_attributes:
436        s += x if x else '0'
437      else:
438        s += f'{x}\\000' if x else '\\000'
439    return s
440
441  def to_c_string(self, metric: bool) -> str:
442    """Representation of the event as a C struct initializer."""
443
444    def fix_comment(s: str) -> str:
445        return s.replace('*/', r'\*\/')
446
447    s = self.build_c_string(metric)
448    return f'{{ { _bcs.offsets[s] } }}, /* {fix_comment(s)} */\n'
449
450
451@lru_cache(maxsize=None)
452def read_json_events(path: str, topic: str) -> Sequence[JsonEvent]:
453  """Read json events from the specified file."""
454  try:
455    events = json.load(open(path), object_hook=JsonEvent)
456  except BaseException as err:
457    print(f"Exception processing {path}")
458    raise
459  metrics: list[Tuple[str, str, metric.Expression]] = []
460  for event in events:
461    event.topic = topic
462    if event.metric_name and '-' not in event.metric_name:
463      metrics.append((event.pmu, event.metric_name, event.metric_expr))
464  updates = metric.RewriteMetricsInTermsOfOthers(metrics)
465  if updates:
466    for event in events:
467      if event.metric_name in updates:
468        # print(f'Updated {event.metric_name} from\n"{event.metric_expr}"\n'
469        #       f'to\n"{updates[event.metric_name]}"')
470        event.metric_expr = updates[event.metric_name]
471
472  return events
473
474def preprocess_arch_std_files(archpath: str) -> None:
475  """Read in all architecture standard events."""
476  global _arch_std_events
477  for item in os.scandir(archpath):
478    if not item.is_file() or not item.name.endswith('.json'):
479      continue
480    try:
481      for event in read_json_events(item.path, topic=''):
482        if event.name:
483          _arch_std_events[event.name.lower()] = event
484        if event.metric_name:
485          _arch_std_events[event.metric_name.lower()] = event
486    except Exception as e:
487        raise RuntimeError(f'Failure processing \'{item.name}\' in \'{archpath}\'') from e
488
489
490def add_events_table_entries(item: os.DirEntry, topic: str) -> None:
491  """Add contents of file to _pending_events table."""
492  for e in read_json_events(item.path, topic):
493    if e.name:
494      _pending_events.append(e)
495    if e.metric_name:
496      _pending_metrics.append(e)
497
498
499def print_pending_events() -> None:
500  """Optionally close events table."""
501
502  def event_cmp_key(j: JsonEvent) -> Tuple[str, str, bool, str, str]:
503    def fix_none(s: Optional[str]) -> str:
504      if s is None:
505        return ''
506      return s
507
508    return (fix_none(j.pmu).replace(',','_'), fix_none(j.name), j.desc is not None, fix_none(j.topic),
509            fix_none(j.metric_name))
510
511  global _pending_events
512  if not _pending_events:
513    return
514
515  global _pending_events_tblname
516  if _pending_events_tblname.endswith('_sys'):
517    global _sys_event_tables
518    _sys_event_tables.append(_pending_events_tblname)
519  else:
520    global event_tables
521    _event_tables.append(_pending_events_tblname)
522
523  first = True
524  last_pmu = None
525  last_name = None
526  pmus = set()
527  for event in sorted(_pending_events, key=event_cmp_key):
528    if last_pmu and last_pmu == event.pmu:
529      assert event.name != last_name, f"Duplicate event: {last_pmu}/{last_name}/ in {_pending_events_tblname}"
530    if event.pmu != last_pmu:
531      if not first:
532        _args.output_file.write('};\n')
533      pmu_name = event.pmu.replace(',', '_')
534      _args.output_file.write(
535          f'static const struct compact_pmu_event {_pending_events_tblname}_{pmu_name}[] = {{\n')
536      first = False
537      last_pmu = event.pmu
538      pmus.add((event.pmu, pmu_name))
539
540    _args.output_file.write(event.to_c_string(metric=False))
541    last_name = event.name
542  _pending_events = []
543
544  _args.output_file.write(f"""
545}};
546
547const struct pmu_table_entry {_pending_events_tblname}[] = {{
548""")
549  for (pmu, tbl_pmu) in sorted(pmus):
550    pmu_name = f"{pmu}\\000"
551    _args.output_file.write(f"""{{
552     .entries = {_pending_events_tblname}_{tbl_pmu},
553     .num_entries = ARRAY_SIZE({_pending_events_tblname}_{tbl_pmu}),
554     .pmu_name = {{ {_bcs.offsets[pmu_name]} /* {pmu_name} */ }},
555}},
556""")
557  _args.output_file.write('};\n\n')
558
559def print_pending_metrics() -> None:
560  """Optionally close metrics table."""
561
562  def metric_cmp_key(j: JsonEvent) -> Tuple[bool, str, str]:
563    def fix_none(s: Optional[str]) -> str:
564      if s is None:
565        return ''
566      return s
567
568    return (j.desc is not None, fix_none(j.pmu), fix_none(j.metric_name))
569
570  global _pending_metrics
571  if not _pending_metrics:
572    return
573
574  global _pending_metrics_tblname
575  if _pending_metrics_tblname.endswith('_sys'):
576    global _sys_metric_tables
577    _sys_metric_tables.append(_pending_metrics_tblname)
578  else:
579    global metric_tables
580    _metric_tables.append(_pending_metrics_tblname)
581
582  first = True
583  last_pmu = None
584  pmus = set()
585  for metric in sorted(_pending_metrics, key=metric_cmp_key):
586    if metric.pmu != last_pmu:
587      if not first:
588        _args.output_file.write('};\n')
589      pmu_name = metric.pmu.replace(',', '_')
590      _args.output_file.write(
591          f'static const struct compact_pmu_event {_pending_metrics_tblname}_{pmu_name}[] = {{\n')
592      first = False
593      last_pmu = metric.pmu
594      pmus.add((metric.pmu, pmu_name))
595
596    _args.output_file.write(metric.to_c_string(metric=True))
597  _pending_metrics = []
598
599  _args.output_file.write(f"""
600}};
601
602const struct pmu_table_entry {_pending_metrics_tblname}[] = {{
603""")
604  for (pmu, tbl_pmu) in sorted(pmus):
605    pmu_name = f"{pmu}\\000"
606    _args.output_file.write(f"""{{
607     .entries = {_pending_metrics_tblname}_{tbl_pmu},
608     .num_entries = ARRAY_SIZE({_pending_metrics_tblname}_{tbl_pmu}),
609     .pmu_name = {{ {_bcs.offsets[pmu_name]} /* {pmu_name} */ }},
610}},
611""")
612  _args.output_file.write('};\n\n')
613
614def get_topic(topic: str) -> str:
615  if topic.endswith('metrics.json'):
616    return 'metrics'
617  return removesuffix(topic, '.json').replace('-', ' ')
618
619def preprocess_one_file(parents: Sequence[str], item: os.DirEntry) -> None:
620
621  if item.is_dir():
622    return
623
624  # base dir or too deep
625  level = len(parents)
626  if level == 0 or level > 4:
627    return
628
629  # Ignore other directories. If the file name does not have a .json
630  # extension, ignore it. It could be a readme.txt for instance.
631  if not item.is_file() or not item.name.endswith('.json'):
632    return
633
634  if item.name == 'metricgroups.json':
635    metricgroup_descriptions = json.load(open(item.path))
636    for mgroup in metricgroup_descriptions:
637      assert len(mgroup) > 1, parents
638      description = f"{metricgroup_descriptions[mgroup]}\\000"
639      mgroup = f"{mgroup}\\000"
640      _bcs.add(mgroup, metric=True)
641      _bcs.add(description, metric=True)
642      _metricgroups[mgroup] = description
643    return
644
645  topic = get_topic(item.name)
646  for event in read_json_events(item.path, topic):
647    pmu_name = f"{event.pmu}\\000"
648    if event.name:
649      _bcs.add(pmu_name, metric=False)
650      _bcs.add(event.build_c_string(metric=False), metric=False)
651    if event.metric_name:
652      _bcs.add(pmu_name, metric=True)
653      _bcs.add(event.build_c_string(metric=True), metric=True)
654
655def process_one_file(parents: Sequence[str], item: os.DirEntry) -> None:
656  """Process a JSON file during the main walk."""
657  def is_leaf_dir_ignoring_sys(path: str) -> bool:
658    for item in os.scandir(path):
659      if item.is_dir() and item.name != 'sys':
660        return False
661    return True
662
663  # Model directories are leaves (ignoring possible sys
664  # directories). The FTW will walk into the directory next. Flush
665  # pending events and metrics and update the table names for the new
666  # model directory.
667  if item.is_dir() and is_leaf_dir_ignoring_sys(item.path):
668    print_pending_events()
669    print_pending_metrics()
670
671    global _pending_events_tblname
672    _pending_events_tblname = file_name_to_table_name('pmu_events_', parents, item.name)
673    global _pending_metrics_tblname
674    _pending_metrics_tblname = file_name_to_table_name('pmu_metrics_', parents, item.name)
675
676    if item.name == 'sys':
677      _sys_event_table_to_metric_table_mapping[_pending_events_tblname] = _pending_metrics_tblname
678    return
679
680  # base dir or too deep
681  level = len(parents)
682  if level == 0 or level > 4:
683    return
684
685  # Ignore other directories. If the file name does not have a .json
686  # extension, ignore it. It could be a readme.txt for instance.
687  if not item.is_file() or not item.name.endswith('.json') or item.name == 'metricgroups.json':
688    return
689
690  add_events_table_entries(item, get_topic(item.name))
691
692
693def print_mapping_table(archs: Sequence[str]) -> None:
694  """Read the mapfile and generate the struct from cpuid string to event table."""
695  _args.output_file.write("""
696/* Struct used to make the PMU event table implementation opaque to callers. */
697struct pmu_events_table {
698        const struct pmu_table_entry *pmus;
699        uint32_t num_pmus;
700};
701
702/* Struct used to make the PMU metric table implementation opaque to callers. */
703struct pmu_metrics_table {
704        const struct pmu_table_entry *pmus;
705        uint32_t num_pmus;
706};
707
708/*
709 * Map a CPU to its table of PMU events. The CPU is identified by the
710 * cpuid field, which is an arch-specific identifier for the CPU.
711 * The identifier specified in tools/perf/pmu-events/arch/xxx/mapfile
712 * must match the get_cpuid_str() in tools/perf/arch/xxx/util/header.c)
713 *
714 * The  cpuid can contain any character other than the comma.
715 */
716struct pmu_events_map {
717        const char *arch;
718        const char *cpuid;
719        struct pmu_events_table event_table;
720        struct pmu_metrics_table metric_table;
721};
722
723/*
724 * Global table mapping each known CPU for the architecture to its
725 * table of PMU events.
726 */
727const struct pmu_events_map pmu_events_map[] = {
728""")
729  for arch in archs:
730    if arch == 'test':
731      _args.output_file.write("""{
732\t.arch = "testarch",
733\t.cpuid = "testcpu",
734\t.event_table = {
735\t\t.pmus = pmu_events__test_soc_cpu,
736\t\t.num_pmus = ARRAY_SIZE(pmu_events__test_soc_cpu),
737\t},
738\t.metric_table = {
739\t\t.pmus = pmu_metrics__test_soc_cpu,
740\t\t.num_pmus = ARRAY_SIZE(pmu_metrics__test_soc_cpu),
741\t}
742},
743""")
744    elif arch == 'common':
745      _args.output_file.write("""{
746\t.arch = "common",
747\t.cpuid = "common",
748\t.event_table = {
749\t\t.pmus = pmu_events__common,
750\t\t.num_pmus = ARRAY_SIZE(pmu_events__common),
751\t},
752\t.metric_table = {},
753},
754""")
755    else:
756      with open(f'{_args.starting_dir}/{arch}/mapfile.csv') as csvfile:
757        table = csv.reader(csvfile)
758        first = True
759        for row in table:
760          # Skip the first row or any row beginning with #.
761          if not first and len(row) > 0 and not row[0].startswith('#'):
762            event_tblname = file_name_to_table_name('pmu_events_', [], row[2].replace('/', '_'))
763            if event_tblname in _event_tables:
764              event_size = f'ARRAY_SIZE({event_tblname})'
765            else:
766              event_tblname = 'NULL'
767              event_size = '0'
768            metric_tblname = file_name_to_table_name('pmu_metrics_', [], row[2].replace('/', '_'))
769            if metric_tblname in _metric_tables:
770              metric_size = f'ARRAY_SIZE({metric_tblname})'
771            else:
772              metric_tblname = 'NULL'
773              metric_size = '0'
774            if event_size == '0' and metric_size == '0':
775              continue
776            cpuid = row[0].replace('\\', '\\\\')
777            _args.output_file.write(f"""{{
778\t.arch = "{arch}",
779\t.cpuid = "{cpuid}",
780\t.event_table = {{
781\t\t.pmus = {event_tblname},
782\t\t.num_pmus = {event_size}
783\t}},
784\t.metric_table = {{
785\t\t.pmus = {metric_tblname},
786\t\t.num_pmus = {metric_size}
787\t}}
788}},
789""")
790          first = False
791
792  _args.output_file.write("""{
793\t.arch = 0,
794\t.cpuid = 0,
795\t.event_table = { 0, 0 },
796\t.metric_table = { 0, 0 },
797}
798};
799""")
800
801
802def print_system_mapping_table() -> None:
803  """C struct mapping table array for tables from /sys directories."""
804  _args.output_file.write("""
805struct pmu_sys_events {
806\tconst char *name;
807\tstruct pmu_events_table event_table;
808\tstruct pmu_metrics_table metric_table;
809};
810
811static const struct pmu_sys_events pmu_sys_event_tables[] = {
812""")
813  printed_metric_tables = []
814  for tblname in _sys_event_tables:
815    _args.output_file.write(f"""\t{{
816\t\t.event_table = {{
817\t\t\t.pmus = {tblname},
818\t\t\t.num_pmus = ARRAY_SIZE({tblname})
819\t\t}},""")
820    metric_tblname = _sys_event_table_to_metric_table_mapping[tblname]
821    if metric_tblname in _sys_metric_tables:
822      _args.output_file.write(f"""
823\t\t.metric_table = {{
824\t\t\t.pmus = {metric_tblname},
825\t\t\t.num_pmus = ARRAY_SIZE({metric_tblname})
826\t\t}},""")
827      printed_metric_tables.append(metric_tblname)
828    _args.output_file.write(f"""
829\t\t.name = \"{tblname}\",
830\t}},
831""")
832  for tblname in _sys_metric_tables:
833    if tblname in printed_metric_tables:
834      continue
835    _args.output_file.write(f"""\t{{
836\t\t.metric_table = {{
837\t\t\t.pmus = {tblname},
838\t\t\t.num_pmus = ARRAY_SIZE({tblname})
839\t\t}},
840\t\t.name = \"{tblname}\",
841\t}},
842""")
843  _args.output_file.write("""\t{
844\t\t.event_table = { 0, 0 },
845\t\t.metric_table = { 0, 0 },
846\t},
847};
848
849static void decompress_event(int offset, struct pmu_event *pe)
850{
851\tconst char *p = &big_c_string[offset];
852""")
853  for attr in _json_event_attributes:
854    _args.output_file.write(f'\n\tpe->{attr} = ')
855    if attr in _json_enum_attributes:
856      _args.output_file.write("*p - '0';\n")
857    else:
858      _args.output_file.write("(*p == '\\0' ? NULL : p);\n")
859    if attr == _json_event_attributes[-1]:
860      continue
861    if attr in _json_enum_attributes:
862      _args.output_file.write('\tp++;')
863    else:
864      _args.output_file.write('\twhile (*p++);')
865  _args.output_file.write("""}
866
867static void decompress_metric(int offset, struct pmu_metric *pm)
868{
869\tconst char *p = &big_c_string[offset];
870""")
871  for attr in _json_metric_attributes:
872    _args.output_file.write(f'\n\tpm->{attr} = ')
873    if attr in _json_enum_attributes:
874      _args.output_file.write("*p - '0';\n")
875    else:
876      _args.output_file.write("(*p == '\\0' ? NULL : p);\n")
877    if attr == _json_metric_attributes[-1]:
878      continue
879    if attr in _json_enum_attributes:
880      _args.output_file.write('\tp++;')
881    else:
882      _args.output_file.write('\twhile (*p++);')
883  _args.output_file.write("""}
884
885static int pmu_events_table__for_each_event_pmu(const struct pmu_events_table *table,
886                                                const struct pmu_table_entry *pmu,
887                                                pmu_event_iter_fn fn,
888                                                void *data)
889{
890        int ret;
891        struct pmu_event pe = {
892                .pmu = &big_c_string[pmu->pmu_name.offset],
893        };
894
895        for (uint32_t i = 0; i < pmu->num_entries; i++) {
896                decompress_event(pmu->entries[i].offset, &pe);
897                if (!pe.name)
898                        continue;
899                ret = fn(&pe, table, data);
900                if (ret)
901                        return ret;
902        }
903        return 0;
904 }
905
906static int pmu_events_table__find_event_pmu(const struct pmu_events_table *table,
907                                            const struct pmu_table_entry *pmu,
908                                            const char *name,
909                                            pmu_event_iter_fn fn,
910                                            void *data)
911{
912        struct pmu_event pe = {
913                .pmu = &big_c_string[pmu->pmu_name.offset],
914        };
915        int low = 0, high = pmu->num_entries - 1;
916
917        while (low <= high) {
918                int cmp, mid = (low + high) / 2;
919
920                decompress_event(pmu->entries[mid].offset, &pe);
921
922                if (!pe.name && !name)
923                        goto do_call;
924
925                if (!pe.name && name) {
926                        low = mid + 1;
927                        continue;
928                }
929                if (pe.name && !name) {
930                        high = mid - 1;
931                        continue;
932                }
933
934                cmp = strcasecmp(pe.name, name);
935                if (cmp < 0) {
936                        low = mid + 1;
937                        continue;
938                }
939                if (cmp > 0) {
940                        high = mid - 1;
941                        continue;
942                }
943  do_call:
944                return fn ? fn(&pe, table, data) : 0;
945        }
946        return PMU_EVENTS__NOT_FOUND;
947}
948
949int pmu_events_table__for_each_event(const struct pmu_events_table *table,
950                                    struct perf_pmu *pmu,
951                                    pmu_event_iter_fn fn,
952                                    void *data)
953{
954        for (size_t i = 0; i < table->num_pmus; i++) {
955                const struct pmu_table_entry *table_pmu = &table->pmus[i];
956                const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
957                int ret;
958
959                if (pmu && !perf_pmu__name_wildcard_match(pmu, pmu_name))
960                        continue;
961
962                ret = pmu_events_table__for_each_event_pmu(table, table_pmu, fn, data);
963                if (ret)
964                        return ret;
965        }
966        return 0;
967}
968
969int pmu_events_table__find_event(const struct pmu_events_table *table,
970                                 struct perf_pmu *pmu,
971                                 const char *name,
972                                 pmu_event_iter_fn fn,
973                                 void *data)
974{
975        for (size_t i = 0; i < table->num_pmus; i++) {
976                const struct pmu_table_entry *table_pmu = &table->pmus[i];
977                const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
978                int ret;
979
980                if (pmu && !perf_pmu__name_wildcard_match(pmu, pmu_name))
981                        continue;
982
983                ret = pmu_events_table__find_event_pmu(table, table_pmu, name, fn, data);
984                if (ret != PMU_EVENTS__NOT_FOUND)
985                        return ret;
986        }
987        return PMU_EVENTS__NOT_FOUND;
988}
989
990size_t pmu_events_table__num_events(const struct pmu_events_table *table,
991                                    struct perf_pmu *pmu)
992{
993        size_t count = 0;
994
995        for (size_t i = 0; i < table->num_pmus; i++) {
996                const struct pmu_table_entry *table_pmu = &table->pmus[i];
997                const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
998
999                if (perf_pmu__name_wildcard_match(pmu, pmu_name))
1000                        count += table_pmu->num_entries;
1001        }
1002        return count;
1003}
1004
1005static int pmu_metrics_table__for_each_metric_pmu(const struct pmu_metrics_table *table,
1006                                                const struct pmu_table_entry *pmu,
1007                                                pmu_metric_iter_fn fn,
1008                                                void *data)
1009{
1010        int ret;
1011        struct pmu_metric pm = {
1012                .pmu = &big_c_string[pmu->pmu_name.offset],
1013        };
1014
1015        for (uint32_t i = 0; i < pmu->num_entries; i++) {
1016                decompress_metric(pmu->entries[i].offset, &pm);
1017                if (!pm.metric_expr)
1018                        continue;
1019                ret = fn(&pm, table, data);
1020                if (ret)
1021                        return ret;
1022        }
1023        return 0;
1024}
1025
1026static int pmu_metrics_table__find_metric_pmu(const struct pmu_metrics_table *table,
1027                                            const struct pmu_table_entry *pmu,
1028                                            const char *metric,
1029                                            pmu_metric_iter_fn fn,
1030                                            void *data)
1031{
1032        struct pmu_metric pm = {
1033                .pmu = &big_c_string[pmu->pmu_name.offset],
1034        };
1035        int low = 0, high = pmu->num_entries - 1;
1036
1037        while (low <= high) {
1038                int cmp, mid = (low + high) / 2;
1039
1040                decompress_metric(pmu->entries[mid].offset, &pm);
1041
1042                if (!pm.metric_name && !metric)
1043                        goto do_call;
1044
1045                if (!pm.metric_name && metric) {
1046                        low = mid + 1;
1047                        continue;
1048                }
1049                if (pm.metric_name && !metric) {
1050                        high = mid - 1;
1051                        continue;
1052                }
1053
1054                cmp = strcmp(pm.metric_name, metric);
1055                if (cmp < 0) {
1056                        low = mid + 1;
1057                        continue;
1058                }
1059                if (cmp > 0) {
1060                        high = mid - 1;
1061                        continue;
1062                }
1063  do_call:
1064                return fn ? fn(&pm, table, data) : 0;
1065        }
1066        return PMU_METRICS__NOT_FOUND;
1067}
1068
1069int pmu_metrics_table__for_each_metric(const struct pmu_metrics_table *table,
1070                                     pmu_metric_iter_fn fn,
1071                                     void *data)
1072{
1073        for (size_t i = 0; i < table->num_pmus; i++) {
1074                int ret = pmu_metrics_table__for_each_metric_pmu(table, &table->pmus[i],
1075                                                                 fn, data);
1076
1077                if (ret)
1078                        return ret;
1079        }
1080        return 0;
1081}
1082
1083int pmu_metrics_table__find_metric(const struct pmu_metrics_table *table,
1084                                 struct perf_pmu *pmu,
1085                                 const char *metric,
1086                                 pmu_metric_iter_fn fn,
1087                                 void *data)
1088{
1089        for (size_t i = 0; i < table->num_pmus; i++) {
1090                const struct pmu_table_entry *table_pmu = &table->pmus[i];
1091                const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
1092                int ret;
1093
1094                if (pmu && !perf_pmu__name_wildcard_match(pmu, pmu_name))
1095                        continue;
1096
1097                ret = pmu_metrics_table__find_metric_pmu(table, table_pmu, metric, fn, data);
1098                if (ret != PMU_METRICS__NOT_FOUND)
1099                        return ret;
1100        }
1101        return PMU_METRICS__NOT_FOUND;
1102}
1103
1104static const struct pmu_events_map *map_for_cpu(struct perf_cpu cpu)
1105{
1106        static struct {
1107                const struct pmu_events_map *map;
1108                struct perf_cpu cpu;
1109        } last_result;
1110        static struct {
1111                const struct pmu_events_map *map;
1112                char *cpuid;
1113        } last_map_search;
1114        static bool has_last_result, has_last_map_search;
1115        const struct pmu_events_map *map = NULL;
1116        char *cpuid = NULL;
1117        size_t i;
1118
1119        if (has_last_result && last_result.cpu.cpu == cpu.cpu)
1120                return last_result.map;
1121
1122        cpuid = get_cpuid_allow_env_override(cpu);
1123
1124        /*
1125         * On some platforms which uses cpus map, cpuid can be NULL for
1126         * PMUs other than CORE PMUs.
1127         */
1128        if (!cpuid)
1129                goto out_update_last_result;
1130
1131        if (has_last_map_search && !strcmp(last_map_search.cpuid, cpuid)) {
1132                map = last_map_search.map;
1133                free(cpuid);
1134        } else {
1135                i = 0;
1136                for (;;) {
1137                        map = &pmu_events_map[i++];
1138
1139                        if (!map->arch) {
1140                                map = NULL;
1141                                break;
1142                        }
1143
1144                        if (!strcmp_cpuid_str(map->cpuid, cpuid))
1145                                break;
1146               }
1147               free(last_map_search.cpuid);
1148               last_map_search.cpuid = cpuid;
1149               last_map_search.map = map;
1150               has_last_map_search = true;
1151        }
1152out_update_last_result:
1153        last_result.cpu = cpu;
1154        last_result.map = map;
1155        has_last_result = true;
1156        return map;
1157}
1158
1159static const struct pmu_events_map *map_for_pmu(struct perf_pmu *pmu)
1160{
1161        struct perf_cpu cpu = {-1};
1162
1163        if (pmu) {
1164                for (size_t i = 0; i < ARRAY_SIZE(pmu_events__common); i++) {
1165                        const char *pmu_name = &big_c_string[pmu_events__common[i].pmu_name.offset];
1166
1167                        if (!strcmp(pmu_name, pmu->name)) {
1168                                const struct pmu_events_map *map = &pmu_events_map[0];
1169
1170                                while (strcmp("common", map->arch))
1171                                        map++;
1172                                return map;
1173                        }
1174                }
1175                cpu = perf_cpu_map__min(pmu->cpus);
1176        }
1177        return map_for_cpu(cpu);
1178}
1179
1180const struct pmu_events_table *perf_pmu__find_events_table(struct perf_pmu *pmu)
1181{
1182        const struct pmu_events_map *map = map_for_pmu(pmu);
1183
1184        if (!map)
1185                return NULL;
1186
1187        if (!pmu)
1188                return &map->event_table;
1189
1190        for (size_t i = 0; i < map->event_table.num_pmus; i++) {
1191                const struct pmu_table_entry *table_pmu = &map->event_table.pmus[i];
1192                const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
1193
1194                if (perf_pmu__name_wildcard_match(pmu, pmu_name))
1195                         return &map->event_table;
1196        }
1197        return NULL;
1198}
1199
1200const struct pmu_metrics_table *pmu_metrics_table__find(void)
1201{
1202        struct perf_cpu cpu = {-1};
1203        const struct pmu_events_map *map = map_for_cpu(cpu);
1204
1205        return map ? &map->metric_table : NULL;
1206}
1207
1208const struct pmu_events_table *find_core_events_table(const char *arch, const char *cpuid)
1209{
1210        for (const struct pmu_events_map *tables = &pmu_events_map[0];
1211             tables->arch;
1212             tables++) {
1213                if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid))
1214                        return &tables->event_table;
1215        }
1216        return NULL;
1217}
1218
1219const struct pmu_metrics_table *find_core_metrics_table(const char *arch, const char *cpuid)
1220{
1221        for (const struct pmu_events_map *tables = &pmu_events_map[0];
1222             tables->arch;
1223             tables++) {
1224                if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid))
1225                        return &tables->metric_table;
1226        }
1227        return NULL;
1228}
1229
1230int pmu_for_each_core_event(pmu_event_iter_fn fn, void *data)
1231{
1232        for (const struct pmu_events_map *tables = &pmu_events_map[0];
1233             tables->arch;
1234             tables++) {
1235                int ret = pmu_events_table__for_each_event(&tables->event_table,
1236                                                           /*pmu=*/ NULL, fn, data);
1237
1238                if (ret)
1239                        return ret;
1240        }
1241        return 0;
1242}
1243
1244int pmu_for_each_core_metric(pmu_metric_iter_fn fn, void *data)
1245{
1246        for (const struct pmu_events_map *tables = &pmu_events_map[0];
1247             tables->arch;
1248             tables++) {
1249                int ret = pmu_metrics_table__for_each_metric(&tables->metric_table, fn, data);
1250
1251                if (ret)
1252                        return ret;
1253        }
1254        return 0;
1255}
1256
1257const struct pmu_events_table *find_sys_events_table(const char *name)
1258{
1259        for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
1260             tables->name;
1261             tables++) {
1262                if (!strcmp(tables->name, name))
1263                        return &tables->event_table;
1264        }
1265        return NULL;
1266}
1267
1268int pmu_for_each_sys_event(pmu_event_iter_fn fn, void *data)
1269{
1270        for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
1271             tables->name;
1272             tables++) {
1273                int ret = pmu_events_table__for_each_event(&tables->event_table,
1274                                                           /*pmu=*/ NULL, fn, data);
1275
1276                if (ret)
1277                        return ret;
1278        }
1279        return 0;
1280}
1281
1282int pmu_for_each_sys_metric(pmu_metric_iter_fn fn, void *data)
1283{
1284        for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
1285             tables->name;
1286             tables++) {
1287                int ret = pmu_metrics_table__for_each_metric(&tables->metric_table, fn, data);
1288
1289                if (ret)
1290                        return ret;
1291        }
1292        return 0;
1293}
1294""")
1295
1296def print_metricgroups() -> None:
1297  _args.output_file.write("""
1298static const int metricgroups[][2] = {
1299""")
1300  for mgroup in sorted(_metricgroups):
1301    description = _metricgroups[mgroup]
1302    _args.output_file.write(
1303        f'\t{{ {_bcs.offsets[mgroup]}, {_bcs.offsets[description]} }}, /* {mgroup} => {description} */\n'
1304    )
1305  _args.output_file.write("""
1306};
1307
1308const char *describe_metricgroup(const char *group)
1309{
1310        int low = 0, high = (int)ARRAY_SIZE(metricgroups) - 1;
1311
1312        while (low <= high) {
1313                int mid = (low + high) / 2;
1314                const char *mgroup = &big_c_string[metricgroups[mid][0]];
1315                int cmp = strcmp(mgroup, group);
1316
1317                if (cmp == 0) {
1318                        return &big_c_string[metricgroups[mid][1]];
1319                } else if (cmp < 0) {
1320                        low = mid + 1;
1321                } else {
1322                        high = mid - 1;
1323                }
1324        }
1325        return NULL;
1326}
1327""")
1328
1329def main() -> None:
1330  global _args
1331
1332  def dir_path(path: str) -> str:
1333    """Validate path is a directory for argparse."""
1334    if os.path.isdir(path):
1335      return path
1336    raise argparse.ArgumentTypeError(f'\'{path}\' is not a valid directory')
1337
1338  def ftw(path: str, parents: Sequence[str],
1339          action: Callable[[Sequence[str], os.DirEntry], None]) -> None:
1340    """Replicate the directory/file walking behavior of C's file tree walk."""
1341    for item in sorted(os.scandir(path), key=lambda e: e.name):
1342      if _args.model != 'all' and item.is_dir():
1343        # Check if the model matches one in _args.model.
1344        if len(parents) == _args.model.split(',')[0].count('/'):
1345          # We're testing the correct directory.
1346          item_path = '/'.join(parents) + ('/' if len(parents) > 0 else '') + item.name
1347          if 'test' not in item_path and 'common' not in item_path and item_path not in _args.model.split(','):
1348            continue
1349      try:
1350        action(parents, item)
1351      except Exception as e:
1352        raise RuntimeError(f'Action failure for \'{item.name}\' in {parents}') from e
1353      if item.is_dir():
1354        ftw(item.path, parents + [item.name], action)
1355
1356  ap = argparse.ArgumentParser()
1357  ap.add_argument('arch', help='Architecture name like x86')
1358  ap.add_argument('model', help='''Select a model such as skylake to
1359reduce the code size.  Normally set to "all". For architectures like
1360ARM64 with an implementor/model, the model must include the implementor
1361such as "arm/cortex-a34".''',
1362                  default='all')
1363  ap.add_argument(
1364      'starting_dir',
1365      type=dir_path,
1366      help='Root of tree containing architecture directories containing json files'
1367  )
1368  ap.add_argument(
1369      'output_file', type=argparse.FileType('w', encoding='utf-8'), nargs='?', default=sys.stdout)
1370  _args = ap.parse_args()
1371
1372  _args.output_file.write(f"""
1373/* SPDX-License-Identifier: GPL-2.0 */
1374/* THIS FILE WAS AUTOGENERATED BY jevents.py arch={_args.arch} model={_args.model} ! */
1375""")
1376  _args.output_file.write("""
1377#include <pmu-events/pmu-events.h>
1378#include "util/header.h"
1379#include "util/pmu.h"
1380#include <string.h>
1381#include <stddef.h>
1382
1383struct compact_pmu_event {
1384        int offset;
1385};
1386
1387struct pmu_table_entry {
1388        const struct compact_pmu_event *entries;
1389        uint32_t num_entries;
1390        struct compact_pmu_event pmu_name;
1391};
1392
1393""")
1394  archs = []
1395  for item in os.scandir(_args.starting_dir):
1396    if not item.is_dir():
1397      continue
1398    if item.name == _args.arch or _args.arch == 'all' or item.name == 'test' or item.name == 'common':
1399      archs.append(item.name)
1400
1401  if len(archs) < 2 and _args.arch != 'none':
1402    raise IOError(f'Missing architecture directory \'{_args.arch}\'')
1403
1404  archs.sort()
1405  for arch in archs:
1406    arch_path = f'{_args.starting_dir}/{arch}'
1407    preprocess_arch_std_files(arch_path)
1408    ftw(arch_path, [], preprocess_one_file)
1409
1410  _bcs.compute()
1411  _args.output_file.write('static const char *const big_c_string =\n')
1412  for s in _bcs.big_string:
1413    _args.output_file.write(s)
1414  _args.output_file.write(';\n\n')
1415  for arch in archs:
1416    arch_path = f'{_args.starting_dir}/{arch}'
1417    ftw(arch_path, [], process_one_file)
1418    print_pending_events()
1419    print_pending_metrics()
1420
1421  print_mapping_table(archs)
1422  print_system_mapping_table()
1423  print_metricgroups()
1424
1425if __name__ == '__main__':
1426  main()
1427