1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 3 #undef TRACE_SYSTEM 4 #define TRACE_SYSTEM timer_migration 5 6 #if !defined(_TRACE_TIMER_MIGRATION_H) || defined(TRACE_HEADER_MULTI_READ) 7 #define _TRACE_TIMER_MIGRATION_H 8 9 #include <linux/tracepoint.h> 10 11 /* Group events */ 12 TRACE_EVENT(tmigr_group_set, 13 14 TP_PROTO(struct tmigr_group *group), 15 16 TP_ARGS(group), 17 18 TP_STRUCT__entry( 19 __field( void *, group ) 20 __field( unsigned int, lvl ) 21 __field( unsigned int, numa_node ) 22 ), 23 24 TP_fast_assign( 25 __entry->group = group; 26 __entry->lvl = group->level; 27 __entry->numa_node = group->numa_node; 28 ), 29 30 TP_printk("group=%p lvl=%d numa=%d", 31 __entry->group, __entry->lvl, __entry->numa_node) 32 ); 33 34 TRACE_EVENT(tmigr_connect_child_parent, 35 36 TP_PROTO(struct tmigr_group *child), 37 38 TP_ARGS(child), 39 40 TP_STRUCT__entry( 41 __field( void *, child ) 42 __field( void *, parent ) 43 __field( unsigned int, lvl ) 44 __field( unsigned int, numa_node ) 45 __field( unsigned int, num_children ) 46 __field( u32, groupmask ) 47 ), 48 49 TP_fast_assign( 50 __entry->child = child; 51 __entry->parent = child->parent; 52 __entry->lvl = child->parent->level; 53 __entry->numa_node = child->parent->numa_node; 54 __entry->num_children = child->parent->num_children; 55 __entry->groupmask = child->groupmask; 56 ), 57 58 TP_printk("group=%p groupmask=%0x parent=%p lvl=%d numa=%d num_children=%d", 59 __entry->child, __entry->groupmask, __entry->parent, 60 __entry->lvl, __entry->numa_node, __entry->num_children) 61 ); 62 63 TRACE_EVENT(tmigr_connect_cpu_parent, 64 65 TP_PROTO(struct tmigr_cpu *tmc), 66 67 TP_ARGS(tmc), 68 69 TP_STRUCT__entry( 70 __field( void *, parent ) 71 __field( unsigned int, cpu ) 72 __field( unsigned int, lvl ) 73 __field( unsigned int, numa_node ) 74 __field( unsigned int, num_children ) 75 __field( u32, groupmask ) 76 ), 77 78 TP_fast_assign( 79 __entry->parent = tmc->tmgroup; 80 __entry->cpu = tmc->cpuevt.cpu; 81 __entry->lvl = tmc->tmgroup->level; 82 __entry->numa_node = tmc->tmgroup->numa_node; 83 __entry->num_children = tmc->tmgroup->num_children; 84 __entry->groupmask = tmc->groupmask; 85 ), 86 87 TP_printk("cpu=%d groupmask=%0x parent=%p lvl=%d numa=%d num_children=%d", 88 __entry->cpu, __entry->groupmask, __entry->parent, 89 __entry->lvl, __entry->numa_node, __entry->num_children) 90 ); 91 92 DECLARE_EVENT_CLASS(tmigr_group_and_cpu, 93 94 TP_PROTO(struct tmigr_group *group, union tmigr_state state, u32 childmask), 95 96 TP_ARGS(group, state, childmask), 97 98 TP_STRUCT__entry( 99 __field( void *, group ) 100 __field( void *, parent ) 101 __field( unsigned int, lvl ) 102 __field( unsigned int, numa_node ) 103 __field( u32, childmask ) 104 __field( u8, active ) 105 __field( u8, migrator ) 106 ), 107 108 TP_fast_assign( 109 __entry->group = group; 110 __entry->parent = group->parent; 111 __entry->lvl = group->level; 112 __entry->numa_node = group->numa_node; 113 __entry->childmask = childmask; 114 __entry->active = state.active; 115 __entry->migrator = state.migrator; 116 ), 117 118 TP_printk("group=%p lvl=%d numa=%d active=%0x migrator=%0x " 119 "parent=%p childmask=%0x", 120 __entry->group, __entry->lvl, __entry->numa_node, 121 __entry->active, __entry->migrator, 122 __entry->parent, __entry->childmask) 123 ); 124 125 DEFINE_EVENT(tmigr_group_and_cpu, tmigr_group_set_cpu_inactive, 126 127 TP_PROTO(struct tmigr_group *group, union tmigr_state state, u32 childmask), 128 129 TP_ARGS(group, state, childmask) 130 ); 131 132 DEFINE_EVENT(tmigr_group_and_cpu, tmigr_group_set_cpu_active, 133 134 TP_PROTO(struct tmigr_group *group, union tmigr_state state, u32 childmask), 135 136 TP_ARGS(group, state, childmask) 137 ); 138 139 /* CPU events*/ 140 DECLARE_EVENT_CLASS(tmigr_cpugroup, 141 142 TP_PROTO(struct tmigr_cpu *tmc), 143 144 TP_ARGS(tmc), 145 146 TP_STRUCT__entry( 147 __field( u64, wakeup ) 148 __field( void *, parent ) 149 __field( unsigned int, cpu ) 150 151 ), 152 153 TP_fast_assign( 154 __entry->wakeup = tmc->wakeup; 155 __entry->parent = tmc->tmgroup; 156 __entry->cpu = tmc->cpuevt.cpu; 157 ), 158 159 TP_printk("cpu=%d parent=%p wakeup=%llu", __entry->cpu, __entry->parent, __entry->wakeup) 160 ); 161 162 DEFINE_EVENT(tmigr_cpugroup, tmigr_cpu_new_timer, 163 164 TP_PROTO(struct tmigr_cpu *tmc), 165 166 TP_ARGS(tmc) 167 ); 168 169 DEFINE_EVENT(tmigr_cpugroup, tmigr_cpu_active, 170 171 TP_PROTO(struct tmigr_cpu *tmc), 172 173 TP_ARGS(tmc) 174 ); 175 176 DEFINE_EVENT(tmigr_cpugroup, tmigr_cpu_online, 177 178 TP_PROTO(struct tmigr_cpu *tmc), 179 180 TP_ARGS(tmc) 181 ); 182 183 DEFINE_EVENT(tmigr_cpugroup, tmigr_cpu_offline, 184 185 TP_PROTO(struct tmigr_cpu *tmc), 186 187 TP_ARGS(tmc) 188 ); 189 190 DEFINE_EVENT(tmigr_cpugroup, tmigr_handle_remote_cpu, 191 192 TP_PROTO(struct tmigr_cpu *tmc), 193 194 TP_ARGS(tmc) 195 ); 196 197 DECLARE_EVENT_CLASS(tmigr_idle, 198 199 TP_PROTO(struct tmigr_cpu *tmc, u64 nextevt), 200 201 TP_ARGS(tmc, nextevt), 202 203 TP_STRUCT__entry( 204 __field( u64, nextevt) 205 __field( u64, wakeup) 206 __field( void *, parent) 207 __field( unsigned int, cpu) 208 ), 209 210 TP_fast_assign( 211 __entry->nextevt = nextevt; 212 __entry->wakeup = tmc->wakeup; 213 __entry->parent = tmc->tmgroup; 214 __entry->cpu = tmc->cpuevt.cpu; 215 ), 216 217 TP_printk("cpu=%d parent=%p nextevt=%llu wakeup=%llu", 218 __entry->cpu, __entry->parent, __entry->nextevt, __entry->wakeup) 219 ); 220 221 DEFINE_EVENT(tmigr_idle, tmigr_cpu_idle, 222 223 TP_PROTO(struct tmigr_cpu *tmc, u64 nextevt), 224 225 TP_ARGS(tmc, nextevt) 226 ); 227 228 DEFINE_EVENT(tmigr_idle, tmigr_cpu_new_timer_idle, 229 230 TP_PROTO(struct tmigr_cpu *tmc, u64 nextevt), 231 232 TP_ARGS(tmc, nextevt) 233 ); 234 235 TRACE_EVENT(tmigr_update_events, 236 237 TP_PROTO(struct tmigr_group *child, struct tmigr_group *group, 238 union tmigr_state childstate, union tmigr_state groupstate, 239 u64 nextevt), 240 241 TP_ARGS(child, group, childstate, groupstate, nextevt), 242 243 TP_STRUCT__entry( 244 __field( void *, child ) 245 __field( void *, group ) 246 __field( u64, nextevt ) 247 __field( u64, group_next_expiry ) 248 __field( u64, child_evt_expiry ) 249 __field( unsigned int, group_lvl ) 250 __field( unsigned int, child_evtcpu ) 251 __field( u8, child_active ) 252 __field( u8, group_active ) 253 ), 254 255 TP_fast_assign( 256 __entry->child = child; 257 __entry->group = group; 258 __entry->nextevt = nextevt; 259 __entry->group_next_expiry = group->next_expiry; 260 __entry->child_evt_expiry = child ? child->groupevt.nextevt.expires : 0; 261 __entry->group_lvl = group->level; 262 __entry->child_evtcpu = child ? child->groupevt.cpu : 0; 263 __entry->child_active = childstate.active; 264 __entry->group_active = groupstate.active; 265 ), 266 267 TP_printk("child=%p group=%p group_lvl=%d child_active=%0x group_active=%0x " 268 "nextevt=%llu next_expiry=%llu child_evt_expiry=%llu child_evtcpu=%d", 269 __entry->child, __entry->group, __entry->group_lvl, __entry->child_active, 270 __entry->group_active, 271 __entry->nextevt, __entry->group_next_expiry, __entry->child_evt_expiry, 272 __entry->child_evtcpu) 273 ); 274 275 TRACE_EVENT(tmigr_handle_remote, 276 277 TP_PROTO(struct tmigr_group *group), 278 279 TP_ARGS(group), 280 281 TP_STRUCT__entry( 282 __field( void * , group ) 283 __field( unsigned int , lvl ) 284 ), 285 286 TP_fast_assign( 287 __entry->group = group; 288 __entry->lvl = group->level; 289 ), 290 291 TP_printk("group=%p lvl=%d", 292 __entry->group, __entry->lvl) 293 ); 294 295 #endif /* _TRACE_TIMER_MIGRATION_H */ 296 297 /* This part must be outside protection */ 298 #include <trace/define_trace.h> 299