1#!/usr/bin/env drgn 2# 3# Copyright (C) 2023 Tejun Heo <tj@kernel.org> 4# Copyright (C) 2023 Meta Platforms, Inc. and affiliates. 5 6desc = """ 7This is a drgn script to show the current workqueue configuration. For more 8info on drgn, visit https://github.com/osandov/drgn. 9 10Affinity Scopes 11=============== 12 13Shows the CPUs that can be used for unbound workqueues and how they will be 14grouped by each available affinity type. For each type: 15 16 nr_pods number of CPU pods in the affinity type 17 pod_cpus CPUs in each pod 18 pod_node NUMA node for memory allocation for each pod 19 cpu_pod pod that each CPU is associated to 20 21Worker Pools 22============ 23 24Lists all worker pools indexed by their ID. For each pool: 25 26 ref number of pool_workqueue's associated with this pool 27 nice nice value of the worker threads in the pool 28 idle number of idle workers 29 workers number of all workers 30 cpu CPU the pool is associated with (per-cpu pool) 31 cpus CPUs the workers in the pool can run on (unbound pool) 32 33Workqueue CPU -> pool 34===================== 35 36Lists all workqueues along with their type and worker pool association. For 37each workqueue: 38 39 NAME TYPE[,FLAGS] POOL_ID... 40 41 NAME name of the workqueue 42 TYPE percpu, unbound or ordered 43 FLAGS S: strict affinity scope 44 POOL_ID worker pool ID associated with each possible CPU 45""" 46 47import sys 48 49import drgn 50from drgn.helpers.linux.list import list_for_each_entry,list_empty 51from drgn.helpers.linux.percpu import per_cpu_ptr 52from drgn.helpers.linux.cpumask import for_each_cpu,for_each_possible_cpu 53from drgn.helpers.linux.nodemask import for_each_node 54from drgn.helpers.linux.idr import idr_for_each 55 56import argparse 57parser = argparse.ArgumentParser(description=desc, 58 formatter_class=argparse.RawTextHelpFormatter) 59args = parser.parse_args() 60 61def err(s): 62 print(s, file=sys.stderr, flush=True) 63 sys.exit(1) 64 65def cpumask_str(cpumask): 66 output = "" 67 base = 0 68 v = 0 69 for cpu in for_each_cpu(cpumask[0]): 70 while cpu - base >= 32: 71 output += f'{hex(v)} ' 72 base += 32 73 v = 0 74 v |= 1 << (cpu - base) 75 if v > 0: 76 output += f'{v:08x}' 77 return output.strip() 78 79wq_type_len = 9 80 81def wq_type_str(wq): 82 if wq.flags & WQ_UNBOUND: 83 if wq.flags & WQ_ORDERED: 84 return f'{"ordered":{wq_type_len}}' 85 else: 86 if wq.unbound_attrs.affn_strict: 87 return f'{"unbound,S":{wq_type_len}}' 88 else: 89 return f'{"unbound":{wq_type_len}}' 90 else: 91 return f'{"percpu":{wq_type_len}}' 92 93worker_pool_idr = prog['worker_pool_idr'] 94workqueues = prog['workqueues'] 95wq_unbound_cpumask = prog['wq_unbound_cpumask'] 96wq_pod_types = prog['wq_pod_types'] 97wq_affn_dfl = prog['wq_affn_dfl'] 98wq_affn_names = prog['wq_affn_names'] 99 100WQ_UNBOUND = prog['WQ_UNBOUND'] 101WQ_ORDERED = prog['__WQ_ORDERED'] 102WQ_MEM_RECLAIM = prog['WQ_MEM_RECLAIM'] 103 104WQ_AFFN_CPU = prog['WQ_AFFN_CPU'] 105WQ_AFFN_SMT = prog['WQ_AFFN_SMT'] 106WQ_AFFN_CACHE = prog['WQ_AFFN_CACHE'] 107WQ_AFFN_NUMA = prog['WQ_AFFN_NUMA'] 108WQ_AFFN_SYSTEM = prog['WQ_AFFN_SYSTEM'] 109 110WQ_NAME_LEN = prog['WQ_NAME_LEN'].value_() 111cpumask_str_len = len(cpumask_str(wq_unbound_cpumask)) 112 113print('Affinity Scopes') 114print('===============') 115 116print(f'wq_unbound_cpumask={cpumask_str(wq_unbound_cpumask)}') 117 118def print_pod_type(pt): 119 print(f' nr_pods {pt.nr_pods.value_()}') 120 121 print(' pod_cpus', end='') 122 for pod in range(pt.nr_pods): 123 print(f' [{pod}]={cpumask_str(pt.pod_cpus[pod])}', end='') 124 print('') 125 126 print(' pod_node', end='') 127 for pod in range(pt.nr_pods): 128 print(f' [{pod}]={pt.pod_node[pod].value_()}', end='') 129 print('') 130 131 print(f' cpu_pod ', end='') 132 for cpu in for_each_possible_cpu(prog): 133 print(f' [{cpu}]={pt.cpu_pod[cpu].value_()}', end='') 134 print('') 135 136for affn in [WQ_AFFN_CPU, WQ_AFFN_SMT, WQ_AFFN_CACHE, WQ_AFFN_NUMA, WQ_AFFN_SYSTEM]: 137 print('') 138 print(f'{wq_affn_names[affn].string_().decode().upper()}{" (default)" if affn == wq_affn_dfl else ""}') 139 print_pod_type(wq_pod_types[affn]) 140 141print('') 142print('Worker Pools') 143print('============') 144 145max_pool_id_len = 0 146max_ref_len = 0 147for pi, pool in idr_for_each(worker_pool_idr): 148 pool = drgn.Object(prog, 'struct worker_pool', address=pool) 149 max_pool_id_len = max(max_pool_id_len, len(f'{pi}')) 150 max_ref_len = max(max_ref_len, len(f'{pool.refcnt.value_()}')) 151 152for pi, pool in idr_for_each(worker_pool_idr): 153 pool = drgn.Object(prog, 'struct worker_pool', address=pool) 154 print(f'pool[{pi:0{max_pool_id_len}}] ref={pool.refcnt.value_():{max_ref_len}} nice={pool.attrs.nice.value_():3} ', end='') 155 print(f'idle/workers={pool.nr_idle.value_():3}/{pool.nr_workers.value_():3} ', end='') 156 if pool.cpu >= 0: 157 print(f'cpu={pool.cpu.value_():3}', end='') 158 else: 159 print(f'cpus={cpumask_str(pool.attrs.cpumask)}', end='') 160 print(f' pod_cpus={cpumask_str(pool.attrs.__pod_cpumask)}', end='') 161 if pool.attrs.affn_strict: 162 print(' strict', end='') 163 print('') 164 165print('') 166print('Workqueue CPU -> pool') 167print('=====================') 168 169print(f'[{"workqueue":^{WQ_NAME_LEN-2}}\\ {"type CPU":{wq_type_len}}', end='') 170for cpu in for_each_possible_cpu(prog): 171 print(f' {cpu:{max_pool_id_len}}', end='') 172print(' dfl]') 173 174for wq in list_for_each_entry('struct workqueue_struct', workqueues.address_of_(), 'list'): 175 print(f'{wq.name.string_().decode():{WQ_NAME_LEN}} {wq_type_str(wq):10}', end='') 176 177 for cpu in for_each_possible_cpu(prog): 178 pool_id = per_cpu_ptr(wq.cpu_pwq, cpu)[0].pool.id.value_() 179 field_len = max(len(str(cpu)), max_pool_id_len) 180 print(f' {pool_id:{field_len}}', end='') 181 182 if wq.flags & WQ_UNBOUND: 183 print(f' {wq.dfl_pwq.pool.id.value_():{max_pool_id_len}}', end='') 184 print('') 185 186print('') 187print('Workqueue -> rescuer') 188print('====================') 189 190ucpus_len = max(cpumask_str_len, len("unbound_cpus")) 191rcpus_len = max(cpumask_str_len, len("rescuer_cpus")) 192 193print(f'[{"workqueue":^{WQ_NAME_LEN-2}}\\ {"unbound_cpus":{ucpus_len}} pid {"rescuer_cpus":{rcpus_len}} ]') 194 195for wq in list_for_each_entry('struct workqueue_struct', workqueues.address_of_(), 'list'): 196 if not (wq.flags & WQ_MEM_RECLAIM): 197 continue 198 199 print(f'{wq.name.string_().decode():{WQ_NAME_LEN}}', end='') 200 if wq.unbound_attrs.value_() != 0: 201 print(f' {cpumask_str(wq.unbound_attrs.cpumask):{ucpus_len}}', end='') 202 else: 203 print(f' {"":{ucpus_len}}', end='') 204 205 print(f' {wq.rescuer.task.pid.value_():6}', end='') 206 print(f' {cpumask_str(wq.rescuer.task.cpus_ptr):{rcpus_len}}', end='') 207 print('') 208 209print('') 210print('Unbound workqueue -> node_nr/max_active') 211print('=======================================') 212 213if 'node_to_cpumask_map' in prog: 214 __cpu_online_mask = prog['__cpu_online_mask'] 215 node_to_cpumask_map = prog['node_to_cpumask_map'] 216 nr_node_ids = prog['nr_node_ids'].value_() 217 218 print(f'online_cpus={cpumask_str(__cpu_online_mask.address_of_())}') 219 for node in for_each_node(): 220 print(f'NODE[{node:02}]={cpumask_str(node_to_cpumask_map[node])}') 221 print('') 222 223 print(f'[{"workqueue":^{WQ_NAME_LEN-2}}\\ min max', end='') 224 first = True 225 for node in for_each_node(): 226 if first: 227 print(f' NODE {node}', end='') 228 first = False 229 else: 230 print(f' {node:7}', end='') 231 print(f' {"dfl":>7} ]') 232 print('') 233 234 for wq in list_for_each_entry('struct workqueue_struct', workqueues.address_of_(), 'list'): 235 if not (wq.flags & WQ_UNBOUND): 236 continue 237 238 print(f'{wq.name.string_().decode():{WQ_NAME_LEN}} ', end='') 239 print(f'{wq.min_active.value_():3} {wq.max_active.value_():3}', end='') 240 for node in for_each_node(): 241 nna = wq.node_nr_active[node] 242 print(f' {nna.nr.counter.value_():3}/{nna.max.value_():3}', end='') 243 nna = wq.node_nr_active[nr_node_ids] 244 print(f' {nna.nr.counter.value_():3}/{nna.max.value_():3}') 245else: 246 printf(f'node_to_cpumask_map not present, is NUMA enabled?') 247