xref: /linux/tools/workqueue/wq_dump.py (revision dbcedec3a31119d7594baacc743300d127c99c56)
1#!/usr/bin/env drgn
2#
3# Copyright (C) 2023 Tejun Heo <tj@kernel.org>
4# Copyright (C) 2023 Meta Platforms, Inc. and affiliates.
5
6desc = """
7This is a drgn script to show the current workqueue configuration. For more
8info on drgn, visit https://github.com/osandov/drgn.
9
10Affinity Scopes
11===============
12
13Shows the CPUs that can be used for unbound workqueues and how they will be
14grouped by each available affinity type. For each type:
15
16  nr_pods   number of CPU pods in the affinity type
17  pod_cpus  CPUs in each pod
18  pod_node  NUMA node for memory allocation for each pod
19  cpu_pod   pod that each CPU is associated to
20
21Worker Pools
22============
23
24Lists all worker pools indexed by their ID. For each pool:
25
26  ref       number of pool_workqueue's associated with this pool
27  nice      nice value of the worker threads in the pool
28  idle      number of idle workers
29  workers   number of all workers
30  cpu       CPU the pool is associated with (per-cpu pool)
31  cpus      CPUs the workers in the pool can run on (unbound pool)
32
33Workqueue CPU -> pool
34=====================
35
36Lists all workqueues along with their type and worker pool association. For
37each workqueue:
38
39  NAME TYPE[,FLAGS] POOL_ID...
40
41  NAME      name of the workqueue
42  TYPE      percpu, unbound or ordered
43  FLAGS     S: strict affinity scope
44  POOL_ID   worker pool ID associated with each possible CPU
45"""
46
47import sys
48
49import drgn
50from drgn.helpers.linux.list import list_for_each_entry,list_empty
51from drgn.helpers.linux.percpu import per_cpu_ptr
52from drgn.helpers.linux.cpumask import for_each_cpu,for_each_possible_cpu
53from drgn.helpers.linux.nodemask import for_each_node
54from drgn.helpers.linux.idr import idr_for_each
55
56import argparse
57parser = argparse.ArgumentParser(description=desc,
58                                 formatter_class=argparse.RawTextHelpFormatter)
59args = parser.parse_args()
60
61def err(s):
62    print(s, file=sys.stderr, flush=True)
63    sys.exit(1)
64
65def cpumask_str(cpumask):
66    output = ""
67    base = 0
68    v = 0
69    for cpu in for_each_cpu(cpumask[0]):
70        while cpu - base >= 32:
71            output += f'{hex(v)} '
72            base += 32
73            v = 0
74        v |= 1 << (cpu - base)
75    if v > 0:
76        output += f'{v:08x}'
77    return output.strip()
78
79wq_type_len = 9
80
81def wq_type_str(wq):
82    if wq.flags & WQ_BH:
83        return f'{"bh":{wq_type_len}}'
84    elif wq.flags & WQ_UNBOUND:
85        if wq.flags & WQ_ORDERED:
86            return f'{"ordered":{wq_type_len}}'
87        else:
88            if wq.unbound_attrs.affn_strict:
89                return f'{"unbound,S":{wq_type_len}}'
90            else:
91                return f'{"unbound":{wq_type_len}}'
92    else:
93        return f'{"percpu":{wq_type_len}}'
94
95worker_pool_idr         = prog['worker_pool_idr']
96workqueues              = prog['workqueues']
97wq_unbound_cpumask      = prog['wq_unbound_cpumask']
98wq_pod_types            = prog['wq_pod_types']
99wq_affn_dfl             = prog['wq_affn_dfl']
100wq_affn_names           = prog['wq_affn_names']
101
102WQ_BH                   = prog['WQ_BH']
103WQ_UNBOUND              = prog['WQ_UNBOUND']
104WQ_ORDERED              = prog['__WQ_ORDERED']
105WQ_MEM_RECLAIM          = prog['WQ_MEM_RECLAIM']
106
107WQ_AFFN_CPU             = prog['WQ_AFFN_CPU']
108WQ_AFFN_SMT             = prog['WQ_AFFN_SMT']
109WQ_AFFN_CACHE           = prog['WQ_AFFN_CACHE']
110WQ_AFFN_NUMA            = prog['WQ_AFFN_NUMA']
111WQ_AFFN_SYSTEM          = prog['WQ_AFFN_SYSTEM']
112
113POOL_BH                 = prog['POOL_BH']
114
115WQ_NAME_LEN             = prog['WQ_NAME_LEN'].value_()
116cpumask_str_len         = len(cpumask_str(wq_unbound_cpumask))
117
118print('Affinity Scopes')
119print('===============')
120
121print(f'wq_unbound_cpumask={cpumask_str(wq_unbound_cpumask)}')
122
123def print_pod_type(pt):
124    print(f'  nr_pods  {pt.nr_pods.value_()}')
125
126    print('  pod_cpus', end='')
127    for pod in range(pt.nr_pods):
128        print(f' [{pod}]={cpumask_str(pt.pod_cpus[pod])}', end='')
129    print('')
130
131    print('  pod_node', end='')
132    for pod in range(pt.nr_pods):
133        print(f' [{pod}]={pt.pod_node[pod].value_()}', end='')
134    print('')
135
136    print(f'  cpu_pod ', end='')
137    for cpu in for_each_possible_cpu(prog):
138        print(f' [{cpu}]={pt.cpu_pod[cpu].value_()}', end='')
139    print('')
140
141for affn in [WQ_AFFN_CPU, WQ_AFFN_SMT, WQ_AFFN_CACHE, WQ_AFFN_NUMA, WQ_AFFN_SYSTEM]:
142    print('')
143    print(f'{wq_affn_names[affn].string_().decode().upper()}{" (default)" if affn == wq_affn_dfl else ""}')
144    print_pod_type(wq_pod_types[affn])
145
146print('')
147print('Worker Pools')
148print('============')
149
150max_pool_id_len = 0
151max_ref_len = 0
152for pi, pool in idr_for_each(worker_pool_idr):
153    pool = drgn.Object(prog, 'struct worker_pool', address=pool)
154    max_pool_id_len = max(max_pool_id_len, len(f'{pi}'))
155    max_ref_len = max(max_ref_len, len(f'{pool.refcnt.value_()}'))
156
157for pi, pool in idr_for_each(worker_pool_idr):
158    pool = drgn.Object(prog, 'struct worker_pool', address=pool)
159    print(f'pool[{pi:0{max_pool_id_len}}] flags=0x{pool.flags.value_():02x} ref={pool.refcnt.value_():{max_ref_len}} nice={pool.attrs.nice.value_():3} ', end='')
160    print(f'idle/workers={pool.nr_idle.value_():3}/{pool.nr_workers.value_():3} ', end='')
161    if pool.cpu >= 0:
162        print(f'cpu={pool.cpu.value_():3}', end='')
163        if pool.flags & POOL_BH:
164            print(' bh', end='')
165    else:
166        print(f'cpus={cpumask_str(pool.attrs.cpumask)}', end='')
167        print(f' pod_cpus={cpumask_str(pool.attrs.__pod_cpumask)}', end='')
168        if pool.attrs.affn_strict:
169            print(' strict', end='')
170    print('')
171
172print('')
173print('Workqueue CPU -> pool')
174print('=====================')
175
176print(f'[{"workqueue":^{WQ_NAME_LEN-2}}\\ {"type   CPU":{wq_type_len}}', end='')
177for cpu in for_each_possible_cpu(prog):
178    print(f' {cpu:{max_pool_id_len}}', end='')
179print(' dfl]')
180
181for wq in list_for_each_entry('struct workqueue_struct', workqueues.address_of_(), 'list'):
182    print(f'{wq.name.string_().decode():{WQ_NAME_LEN}} {wq_type_str(wq):10}', end='')
183
184    for cpu in for_each_possible_cpu(prog):
185        pool_id = per_cpu_ptr(wq.cpu_pwq, cpu)[0].pool.id.value_()
186        field_len = max(len(str(cpu)), max_pool_id_len)
187        print(f' {pool_id:{field_len}}', end='')
188
189    if wq.flags & WQ_UNBOUND:
190        print(f' {wq.dfl_pwq.pool.id.value_():{max_pool_id_len}}', end='')
191    print('')
192
193print('')
194print('Workqueue -> rescuer')
195print('====================')
196
197ucpus_len = max(cpumask_str_len, len("unbound_cpus"))
198rcpus_len = max(cpumask_str_len, len("rescuer_cpus"))
199
200print(f'[{"workqueue":^{WQ_NAME_LEN-2}}\\ {"unbound_cpus":{ucpus_len}}    pid {"rescuer_cpus":{rcpus_len}} ]')
201
202for wq in list_for_each_entry('struct workqueue_struct', workqueues.address_of_(), 'list'):
203    if not (wq.flags & WQ_MEM_RECLAIM):
204        continue
205
206    print(f'{wq.name.string_().decode():{WQ_NAME_LEN}}', end='')
207    if wq.unbound_attrs.value_() != 0:
208        print(f' {cpumask_str(wq.unbound_attrs.cpumask):{ucpus_len}}', end='')
209    else:
210        print(f' {"":{ucpus_len}}', end='')
211
212    print(f' {wq.rescuer.task.pid.value_():6}', end='')
213    print(f' {cpumask_str(wq.rescuer.task.cpus_ptr):{rcpus_len}}', end='')
214    print('')
215
216print('')
217print('Unbound workqueue -> node_nr/max_active')
218print('=======================================')
219
220if 'node_to_cpumask_map' in prog:
221    __cpu_online_mask = prog['__cpu_online_mask']
222    node_to_cpumask_map = prog['node_to_cpumask_map']
223    nr_node_ids = prog['nr_node_ids'].value_()
224
225    print(f'online_cpus={cpumask_str(__cpu_online_mask.address_of_())}')
226    for node in for_each_node():
227        print(f'NODE[{node:02}]={cpumask_str(node_to_cpumask_map[node])}')
228    print('')
229
230    print(f'[{"workqueue":^{WQ_NAME_LEN-2}}\\ min max', end='')
231    first = True
232    for node in for_each_node():
233        if first:
234            print(f'  NODE {node}', end='')
235            first = False
236        else:
237            print(f' {node:7}', end='')
238    print(f' {"dfl":>7} ]')
239    print('')
240
241    for wq in list_for_each_entry('struct workqueue_struct', workqueues.address_of_(), 'list'):
242        if not (wq.flags & WQ_UNBOUND):
243            continue
244
245        print(f'{wq.name.string_().decode():{WQ_NAME_LEN}} ', end='')
246        print(f'{wq.min_active.value_():3} {wq.max_active.value_():3}', end='')
247        for node in for_each_node():
248            nna = wq.node_nr_active[node]
249            print(f' {nna.nr.counter.value_():3}/{nna.max.value_():3}', end='')
250        nna = wq.node_nr_active[nr_node_ids]
251        print(f' {nna.nr.counter.value_():3}/{nna.max.value_():3}')
252else:
253    printf(f'node_to_cpumask_map not present, is NUMA enabled?')
254