trace.c (186a73dc9a81b087d0708f21a990615957ec9c1c) | trace.c (bdffd893a0e9c431304142d12d9a0a21d365c502) |
---|---|
1/* 2 * ring buffer based function tracer 3 * 4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com> 5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> 6 * 7 * Originally taken from the RT patch by: 8 * Arnaldo Carvalho de Melo <acme@redhat.com> --- 261 unchanged lines hidden (view full) --- 270 ring_buffer_discard_commit(buffer, event); 271 return 1; 272 } 273 274 return 0; 275} 276EXPORT_SYMBOL_GPL(call_filter_check_discard); 277 | 1/* 2 * ring buffer based function tracer 3 * 4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com> 5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> 6 * 7 * Originally taken from the RT patch by: 8 * Arnaldo Carvalho de Melo <acme@redhat.com> --- 261 unchanged lines hidden (view full) --- 270 ring_buffer_discard_commit(buffer, event); 271 return 1; 272 } 273 274 return 0; 275} 276EXPORT_SYMBOL_GPL(call_filter_check_discard); 277 |
278cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu) | 278static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu) |
279{ 280 u64 ts; 281 282 /* Early boot up does not have a buffer yet */ 283 if (!buf->buffer) 284 return trace_clock_local(); 285 286 ts = ring_buffer_time_stamp(buf->buffer, cpu); --- 307 unchanged lines hidden (view full) --- 594 return ret; 595 596 tr->allocated_snapshot = true; 597 } 598 599 return 0; 600} 601 | 279{ 280 u64 ts; 281 282 /* Early boot up does not have a buffer yet */ 283 if (!buf->buffer) 284 return trace_clock_local(); 285 286 ts = ring_buffer_time_stamp(buf->buffer, cpu); --- 307 unchanged lines hidden (view full) --- 594 return ret; 595 596 tr->allocated_snapshot = true; 597 } 598 599 return 0; 600} 601 |
602void free_snapshot(struct trace_array *tr) | 602static void free_snapshot(struct trace_array *tr) |
603{ 604 /* 605 * We don't free the ring buffer. instead, resize it because 606 * The max_tr ring buffer has some state (e.g. ring->clock) and 607 * we want preserve it. 608 */ 609 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS); 610 set_buffer_entries(&tr->max_buffer, 1); --- 347 unchanged lines hidden (view full) --- 958 if (cnt > len) 959 cnt = len; 960 memcpy(buf, s->buffer + s->readpos, cnt); 961 962 s->readpos += cnt; 963 return cnt; 964} 965 | 603{ 604 /* 605 * We don't free the ring buffer. instead, resize it because 606 * The max_tr ring buffer has some state (e.g. ring->clock) and 607 * we want preserve it. 608 */ 609 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS); 610 set_buffer_entries(&tr->max_buffer, 1); --- 347 unchanged lines hidden (view full) --- 958 if (cnt > len) 959 cnt = len; 960 memcpy(buf, s->buffer + s->readpos, cnt); 961 962 s->readpos += cnt; 963 return cnt; 964} 965 |
966/* 967 * ftrace_max_lock is used to protect the swapping of buffers 968 * when taking a max snapshot. The buffers themselves are 969 * protected by per_cpu spinlocks. But the action of the swap 970 * needs its own lock. 971 * 972 * This is defined as a arch_spinlock_t in order to help 973 * with performance when lockdep debugging is enabled. 974 * 975 * It is also used in other places outside the update_max_tr 976 * so it needs to be defined outside of the 977 * CONFIG_TRACER_MAX_TRACE. 978 */ 979static arch_spinlock_t ftrace_max_lock = 980 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 981 | |
982unsigned long __read_mostly tracing_thresh; 983 984#ifdef CONFIG_TRACER_MAX_TRACE | 966unsigned long __read_mostly tracing_thresh; 967 968#ifdef CONFIG_TRACER_MAX_TRACE |
985unsigned long __read_mostly tracing_max_latency; 986 | |
987/* 988 * Copy the new maximum trace into the separate maximum-trace 989 * structure. (this way the maximum trace is permanently saved, 990 * for later retrieval via /sys/kernel/debug/tracing/latency_trace) 991 */ 992static void 993__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) 994{ 995 struct trace_buffer *trace_buf = &tr->trace_buffer; 996 struct trace_buffer *max_buf = &tr->max_buffer; 997 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu); 998 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu); 999 1000 max_buf->cpu = cpu; 1001 max_buf->time_start = data->preempt_timestamp; 1002 | 969/* 970 * Copy the new maximum trace into the separate maximum-trace 971 * structure. (this way the maximum trace is permanently saved, 972 * for later retrieval via /sys/kernel/debug/tracing/latency_trace) 973 */ 974static void 975__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) 976{ 977 struct trace_buffer *trace_buf = &tr->trace_buffer; 978 struct trace_buffer *max_buf = &tr->max_buffer; 979 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu); 980 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu); 981 982 max_buf->cpu = cpu; 983 max_buf->time_start = data->preempt_timestamp; 984 |
1003 max_data->saved_latency = tracing_max_latency; | 985 max_data->saved_latency = tr->max_latency; |
1004 max_data->critical_start = data->critical_start; 1005 max_data->critical_end = data->critical_end; 1006 1007 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN); 1008 max_data->pid = tsk->pid; 1009 /* 1010 * If tsk == current, then use current_uid(), as that does not use 1011 * RCU. The irq tracer can be called out of RCU scope. --- 31 unchanged lines hidden (view full) --- 1043 WARN_ON_ONCE(!irqs_disabled()); 1044 1045 if (!tr->allocated_snapshot) { 1046 /* Only the nop tracer should hit this when disabling */ 1047 WARN_ON_ONCE(tr->current_trace != &nop_trace); 1048 return; 1049 } 1050 | 986 max_data->critical_start = data->critical_start; 987 max_data->critical_end = data->critical_end; 988 989 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN); 990 max_data->pid = tsk->pid; 991 /* 992 * If tsk == current, then use current_uid(), as that does not use 993 * RCU. The irq tracer can be called out of RCU scope. --- 31 unchanged lines hidden (view full) --- 1025 WARN_ON_ONCE(!irqs_disabled()); 1026 1027 if (!tr->allocated_snapshot) { 1028 /* Only the nop tracer should hit this when disabling */ 1029 WARN_ON_ONCE(tr->current_trace != &nop_trace); 1030 return; 1031 } 1032 |
1051 arch_spin_lock(&ftrace_max_lock); | 1033 arch_spin_lock(&tr->max_lock); |
1052 1053 buf = tr->trace_buffer.buffer; 1054 tr->trace_buffer.buffer = tr->max_buffer.buffer; 1055 tr->max_buffer.buffer = buf; 1056 1057 __update_max_tr(tr, tsk, cpu); | 1034 1035 buf = tr->trace_buffer.buffer; 1036 tr->trace_buffer.buffer = tr->max_buffer.buffer; 1037 tr->max_buffer.buffer = buf; 1038 1039 __update_max_tr(tr, tsk, cpu); |
1058 arch_spin_unlock(&ftrace_max_lock); | 1040 arch_spin_unlock(&tr->max_lock); |
1059} 1060 1061/** 1062 * update_max_tr_single - only copy one trace over, and reset the rest 1063 * @tr - tracer 1064 * @tsk - task with the latency 1065 * @cpu - the cpu of the buffer to copy. 1066 * --- 9 unchanged lines hidden (view full) --- 1076 1077 WARN_ON_ONCE(!irqs_disabled()); 1078 if (!tr->allocated_snapshot) { 1079 /* Only the nop tracer should hit this when disabling */ 1080 WARN_ON_ONCE(tr->current_trace != &nop_trace); 1081 return; 1082 } 1083 | 1041} 1042 1043/** 1044 * update_max_tr_single - only copy one trace over, and reset the rest 1045 * @tr - tracer 1046 * @tsk - task with the latency 1047 * @cpu - the cpu of the buffer to copy. 1048 * --- 9 unchanged lines hidden (view full) --- 1058 1059 WARN_ON_ONCE(!irqs_disabled()); 1060 if (!tr->allocated_snapshot) { 1061 /* Only the nop tracer should hit this when disabling */ 1062 WARN_ON_ONCE(tr->current_trace != &nop_trace); 1063 return; 1064 } 1065 |
1084 arch_spin_lock(&ftrace_max_lock); | 1066 arch_spin_lock(&tr->max_lock); |
1085 1086 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu); 1087 1088 if (ret == -EBUSY) { 1089 /* 1090 * We failed to swap the buffer due to a commit taking 1091 * place on this CPU. We fail to record, but we reset 1092 * the max trace buffer (no one writes directly to it) 1093 * and flag that it failed. 1094 */ 1095 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_, 1096 "Failed to swap buffers due to commit in progress\n"); 1097 } 1098 1099 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); 1100 1101 __update_max_tr(tr, tsk, cpu); | 1067 1068 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu); 1069 1070 if (ret == -EBUSY) { 1071 /* 1072 * We failed to swap the buffer due to a commit taking 1073 * place on this CPU. We fail to record, but we reset 1074 * the max trace buffer (no one writes directly to it) 1075 * and flag that it failed. 1076 */ 1077 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_, 1078 "Failed to swap buffers due to commit in progress\n"); 1079 } 1080 1081 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); 1082 1083 __update_max_tr(tr, tsk, cpu); |
1102 arch_spin_unlock(&ftrace_max_lock); | 1084 arch_spin_unlock(&tr->max_lock); |
1103} 1104#endif /* CONFIG_TRACER_MAX_TRACE */ 1105 | 1085} 1086#endif /* CONFIG_TRACER_MAX_TRACE */ 1087 |
1106static void default_wait_pipe(struct trace_iterator *iter) | 1088static void wait_on_pipe(struct trace_iterator *iter) |
1107{ 1108 /* Iterators are static, they should be filled or empty */ 1109 if (trace_buffer_iter(iter, iter->cpu_file)) 1110 return; 1111 1112 ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file); 1113} 1114 --- 100 unchanged lines hidden (view full) --- 1215 1216 if (!type->set_flag) 1217 type->set_flag = &dummy_set_flag; 1218 if (!type->flags) 1219 type->flags = &dummy_tracer_flags; 1220 else 1221 if (!type->flags->opts) 1222 type->flags->opts = dummy_tracer_opt; | 1089{ 1090 /* Iterators are static, they should be filled or empty */ 1091 if (trace_buffer_iter(iter, iter->cpu_file)) 1092 return; 1093 1094 ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file); 1095} 1096 --- 100 unchanged lines hidden (view full) --- 1197 1198 if (!type->set_flag) 1199 type->set_flag = &dummy_set_flag; 1200 if (!type->flags) 1201 type->flags = &dummy_tracer_flags; 1202 else 1203 if (!type->flags->opts) 1204 type->flags->opts = dummy_tracer_opt; |
1223 if (!type->wait_pipe) 1224 type->wait_pipe = default_wait_pipe; | |
1225 1226 ret = run_tracer_selftest(type); 1227 if (ret < 0) 1228 goto out; 1229 1230 type->next = trace_types; 1231 trace_types = type; 1232 --- 115 unchanged lines hidden (view full) --- 1348 /* Someone screwed up their debugging */ 1349 WARN_ON_ONCE(1); 1350 global_trace.stop_count = 0; 1351 } 1352 goto out; 1353 } 1354 1355 /* Prevent the buffers from switching */ | 1205 1206 ret = run_tracer_selftest(type); 1207 if (ret < 0) 1208 goto out; 1209 1210 type->next = trace_types; 1211 trace_types = type; 1212 --- 115 unchanged lines hidden (view full) --- 1328 /* Someone screwed up their debugging */ 1329 WARN_ON_ONCE(1); 1330 global_trace.stop_count = 0; 1331 } 1332 goto out; 1333 } 1334 1335 /* Prevent the buffers from switching */ |
1356 arch_spin_lock(&ftrace_max_lock); | 1336 arch_spin_lock(&global_trace.max_lock); |
1357 1358 buffer = global_trace.trace_buffer.buffer; 1359 if (buffer) 1360 ring_buffer_record_enable(buffer); 1361 1362#ifdef CONFIG_TRACER_MAX_TRACE 1363 buffer = global_trace.max_buffer.buffer; 1364 if (buffer) 1365 ring_buffer_record_enable(buffer); 1366#endif 1367 | 1337 1338 buffer = global_trace.trace_buffer.buffer; 1339 if (buffer) 1340 ring_buffer_record_enable(buffer); 1341 1342#ifdef CONFIG_TRACER_MAX_TRACE 1343 buffer = global_trace.max_buffer.buffer; 1344 if (buffer) 1345 ring_buffer_record_enable(buffer); 1346#endif 1347 |
1368 arch_spin_unlock(&ftrace_max_lock); | 1348 arch_spin_unlock(&global_trace.max_lock); |
1369 1370 ftrace_start(); 1371 out: 1372 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags); 1373} 1374 1375static void tracing_start_tr(struct trace_array *tr) 1376{ --- 38 unchanged lines hidden (view full) --- 1415 unsigned long flags; 1416 1417 ftrace_stop(); 1418 raw_spin_lock_irqsave(&global_trace.start_lock, flags); 1419 if (global_trace.stop_count++) 1420 goto out; 1421 1422 /* Prevent the buffers from switching */ | 1349 1350 ftrace_start(); 1351 out: 1352 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags); 1353} 1354 1355static void tracing_start_tr(struct trace_array *tr) 1356{ --- 38 unchanged lines hidden (view full) --- 1395 unsigned long flags; 1396 1397 ftrace_stop(); 1398 raw_spin_lock_irqsave(&global_trace.start_lock, flags); 1399 if (global_trace.stop_count++) 1400 goto out; 1401 1402 /* Prevent the buffers from switching */ |
1423 arch_spin_lock(&ftrace_max_lock); | 1403 arch_spin_lock(&global_trace.max_lock); |
1424 1425 buffer = global_trace.trace_buffer.buffer; 1426 if (buffer) 1427 ring_buffer_record_disable(buffer); 1428 1429#ifdef CONFIG_TRACER_MAX_TRACE 1430 buffer = global_trace.max_buffer.buffer; 1431 if (buffer) 1432 ring_buffer_record_disable(buffer); 1433#endif 1434 | 1404 1405 buffer = global_trace.trace_buffer.buffer; 1406 if (buffer) 1407 ring_buffer_record_disable(buffer); 1408 1409#ifdef CONFIG_TRACER_MAX_TRACE 1410 buffer = global_trace.max_buffer.buffer; 1411 if (buffer) 1412 ring_buffer_record_disable(buffer); 1413#endif 1414 |
1435 arch_spin_unlock(&ftrace_max_lock); | 1415 arch_spin_unlock(&global_trace.max_lock); |
1436 1437 out: 1438 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags); 1439} 1440 1441static void tracing_stop_tr(struct trace_array *tr) 1442{ 1443 struct ring_buffer *buffer; --- 297 unchanged lines hidden (view full) --- 1741 * We don't need any atomic variables, just a barrier. 1742 * If an interrupt comes in, we don't care, because it would 1743 * have exited and put the counter back to what we want. 1744 * We just need a barrier to keep gcc from moving things 1745 * around. 1746 */ 1747 barrier(); 1748 if (use_stack == 1) { | 1416 1417 out: 1418 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags); 1419} 1420 1421static void tracing_stop_tr(struct trace_array *tr) 1422{ 1423 struct ring_buffer *buffer; --- 297 unchanged lines hidden (view full) --- 1721 * We don't need any atomic variables, just a barrier. 1722 * If an interrupt comes in, we don't care, because it would 1723 * have exited and put the counter back to what we want. 1724 * We just need a barrier to keep gcc from moving things 1725 * around. 1726 */ 1727 barrier(); 1728 if (use_stack == 1) { |
1749 trace.entries = &__get_cpu_var(ftrace_stack).calls[0]; | 1729 trace.entries = this_cpu_ptr(ftrace_stack.calls); |
1750 trace.max_entries = FTRACE_STACK_MAX_ENTRIES; 1751 1752 if (regs) 1753 save_stack_trace_regs(regs, &trace); 1754 else 1755 save_stack_trace(&trace); 1756 1757 if (trace.nr_entries > size) --- 1570 unchanged lines hidden (view full) --- 3328 3329 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); 3330 if (err) 3331 goto err_unlock; 3332 3333 mutex_lock(&tracing_cpumask_update_lock); 3334 3335 local_irq_disable(); | 1730 trace.max_entries = FTRACE_STACK_MAX_ENTRIES; 1731 1732 if (regs) 1733 save_stack_trace_regs(regs, &trace); 1734 else 1735 save_stack_trace(&trace); 1736 1737 if (trace.nr_entries > size) --- 1570 unchanged lines hidden (view full) --- 3308 3309 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); 3310 if (err) 3311 goto err_unlock; 3312 3313 mutex_lock(&tracing_cpumask_update_lock); 3314 3315 local_irq_disable(); |
3336 arch_spin_lock(&ftrace_max_lock); | 3316 arch_spin_lock(&tr->max_lock); |
3337 for_each_tracing_cpu(cpu) { 3338 /* 3339 * Increase/decrease the disabled counter if we are 3340 * about to flip a bit in the cpumask: 3341 */ 3342 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) && 3343 !cpumask_test_cpu(cpu, tracing_cpumask_new)) { 3344 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); 3345 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu); 3346 } 3347 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) && 3348 cpumask_test_cpu(cpu, tracing_cpumask_new)) { 3349 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); 3350 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu); 3351 } 3352 } | 3317 for_each_tracing_cpu(cpu) { 3318 /* 3319 * Increase/decrease the disabled counter if we are 3320 * about to flip a bit in the cpumask: 3321 */ 3322 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) && 3323 !cpumask_test_cpu(cpu, tracing_cpumask_new)) { 3324 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); 3325 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu); 3326 } 3327 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) && 3328 cpumask_test_cpu(cpu, tracing_cpumask_new)) { 3329 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); 3330 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu); 3331 } 3332 } |
3353 arch_spin_unlock(&ftrace_max_lock); | 3333 arch_spin_unlock(&tr->max_lock); |
3354 local_irq_enable(); 3355 3356 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new); 3357 3358 mutex_unlock(&tracing_cpumask_update_lock); 3359 free_cpumask_var(tracing_cpumask_new); 3360 3361 return count; --- 858 unchanged lines hidden (view full) --- 4220static unsigned int 4221tracing_poll_pipe(struct file *filp, poll_table *poll_table) 4222{ 4223 struct trace_iterator *iter = filp->private_data; 4224 4225 return trace_poll(iter, filp, poll_table); 4226} 4227 | 3334 local_irq_enable(); 3335 3336 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new); 3337 3338 mutex_unlock(&tracing_cpumask_update_lock); 3339 free_cpumask_var(tracing_cpumask_new); 3340 3341 return count; --- 858 unchanged lines hidden (view full) --- 4200static unsigned int 4201tracing_poll_pipe(struct file *filp, poll_table *poll_table) 4202{ 4203 struct trace_iterator *iter = filp->private_data; 4204 4205 return trace_poll(iter, filp, poll_table); 4206} 4207 |
4228/* 4229 * This is a make-shift waitqueue. 4230 * A tracer might use this callback on some rare cases: 4231 * 4232 * 1) the current tracer might hold the runqueue lock when it wakes up 4233 * a reader, hence a deadlock (sched, function, and function graph tracers) 4234 * 2) the function tracers, trace all functions, we don't want 4235 * the overhead of calling wake_up and friends 4236 * (and tracing them too) 4237 * 4238 * Anyway, this is really very primitive wakeup. 4239 */ 4240void poll_wait_pipe(struct trace_iterator *iter) 4241{ 4242 set_current_state(TASK_INTERRUPTIBLE); 4243 /* sleep for 100 msecs, and try again. */ 4244 schedule_timeout(HZ / 10); 4245} 4246 | |
4247/* Must be called with trace_types_lock mutex held. */ 4248static int tracing_wait_pipe(struct file *filp) 4249{ 4250 struct trace_iterator *iter = filp->private_data; 4251 4252 while (trace_empty(iter)) { 4253 4254 if ((filp->f_flags & O_NONBLOCK)) { 4255 return -EAGAIN; 4256 } 4257 | 4208/* Must be called with trace_types_lock mutex held. */ 4209static int tracing_wait_pipe(struct file *filp) 4210{ 4211 struct trace_iterator *iter = filp->private_data; 4212 4213 while (trace_empty(iter)) { 4214 4215 if ((filp->f_flags & O_NONBLOCK)) { 4216 return -EAGAIN; 4217 } 4218 |
4258 mutex_unlock(&iter->mutex); 4259 4260 iter->trace->wait_pipe(iter); 4261 4262 mutex_lock(&iter->mutex); 4263 4264 if (signal_pending(current)) 4265 return -EINTR; 4266 | |
4267 /* 4268 * We block until we read something and tracing is disabled. 4269 * We still block if tracing is disabled, but we have never 4270 * read anything. This allows a user to cat this file, and 4271 * then enable tracing. But after we have read something, 4272 * we give an EOF when tracing is again disabled. 4273 * 4274 * iter->pos will be 0 if we haven't read anything. 4275 */ 4276 if (!tracing_is_on() && iter->pos) 4277 break; | 4219 /* 4220 * We block until we read something and tracing is disabled. 4221 * We still block if tracing is disabled, but we have never 4222 * read anything. This allows a user to cat this file, and 4223 * then enable tracing. But after we have read something, 4224 * we give an EOF when tracing is again disabled. 4225 * 4226 * iter->pos will be 0 if we haven't read anything. 4227 */ 4228 if (!tracing_is_on() && iter->pos) 4229 break; |
4230 4231 mutex_unlock(&iter->mutex); 4232 4233 wait_on_pipe(iter); 4234 4235 mutex_lock(&iter->mutex); 4236 4237 if (signal_pending(current)) 4238 return -EINTR; |
|
4278 } 4279 4280 return 1; 4281} 4282 4283/* 4284 * Consumer reader. 4285 */ --- 906 unchanged lines hidden (view full) --- 5192 5193 if (ret < 0) { 5194 if (trace_empty(iter)) { 5195 if ((filp->f_flags & O_NONBLOCK)) { 5196 size = -EAGAIN; 5197 goto out_unlock; 5198 } 5199 mutex_unlock(&trace_types_lock); | 4239 } 4240 4241 return 1; 4242} 4243 4244/* 4245 * Consumer reader. 4246 */ --- 906 unchanged lines hidden (view full) --- 5153 5154 if (ret < 0) { 5155 if (trace_empty(iter)) { 5156 if ((filp->f_flags & O_NONBLOCK)) { 5157 size = -EAGAIN; 5158 goto out_unlock; 5159 } 5160 mutex_unlock(&trace_types_lock); |
5200 iter->trace->wait_pipe(iter); | 5161 wait_on_pipe(iter); |
5201 mutex_lock(&trace_types_lock); 5202 if (signal_pending(current)) { 5203 size = -EINTR; 5204 goto out_unlock; 5205 } 5206 goto again; 5207 } 5208 size = 0; --- 194 unchanged lines hidden (view full) --- 5403 5404 /* did we read anything? */ 5405 if (!spd.nr_pages) { 5406 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) { 5407 ret = -EAGAIN; 5408 goto out; 5409 } 5410 mutex_unlock(&trace_types_lock); | 5162 mutex_lock(&trace_types_lock); 5163 if (signal_pending(current)) { 5164 size = -EINTR; 5165 goto out_unlock; 5166 } 5167 goto again; 5168 } 5169 size = 0; --- 194 unchanged lines hidden (view full) --- 5364 5365 /* did we read anything? */ 5366 if (!spd.nr_pages) { 5367 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) { 5368 ret = -EAGAIN; 5369 goto out; 5370 } 5371 mutex_unlock(&trace_types_lock); |
5411 iter->trace->wait_pipe(iter); | 5372 wait_on_pipe(iter); |
5412 mutex_lock(&trace_types_lock); 5413 if (signal_pending(current)) { 5414 ret = -EINTR; 5415 goto out; 5416 } 5417 goto again; 5418 } 5419 --- 706 unchanged lines hidden (view full) --- 6126 6127 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL)) 6128 goto out_free_tr; 6129 6130 cpumask_copy(tr->tracing_cpumask, cpu_all_mask); 6131 6132 raw_spin_lock_init(&tr->start_lock); 6133 | 5373 mutex_lock(&trace_types_lock); 5374 if (signal_pending(current)) { 5375 ret = -EINTR; 5376 goto out; 5377 } 5378 goto again; 5379 } 5380 --- 706 unchanged lines hidden (view full) --- 6087 6088 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL)) 6089 goto out_free_tr; 6090 6091 cpumask_copy(tr->tracing_cpumask, cpu_all_mask); 6092 6093 raw_spin_lock_init(&tr->start_lock); 6094 |
6095 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 6096 |
|
6134 tr->current_trace = &nop_trace; 6135 6136 INIT_LIST_HEAD(&tr->systems); 6137 INIT_LIST_HEAD(&tr->events); 6138 6139 if (allocate_trace_buffers(tr, trace_buf_size) < 0) 6140 goto out_free_tr; 6141 --- 181 unchanged lines hidden (view full) --- 6323 tr, &tracing_mark_fops); 6324 6325 trace_create_file("trace_clock", 0644, d_tracer, tr, 6326 &trace_clock_fops); 6327 6328 trace_create_file("tracing_on", 0644, d_tracer, 6329 tr, &rb_simple_fops); 6330 | 6097 tr->current_trace = &nop_trace; 6098 6099 INIT_LIST_HEAD(&tr->systems); 6100 INIT_LIST_HEAD(&tr->events); 6101 6102 if (allocate_trace_buffers(tr, trace_buf_size) < 0) 6103 goto out_free_tr; 6104 --- 181 unchanged lines hidden (view full) --- 6286 tr, &tracing_mark_fops); 6287 6288 trace_create_file("trace_clock", 0644, d_tracer, tr, 6289 &trace_clock_fops); 6290 6291 trace_create_file("tracing_on", 0644, d_tracer, 6292 tr, &rb_simple_fops); 6293 |
6294#ifdef CONFIG_TRACER_MAX_TRACE 6295 trace_create_file("tracing_max_latency", 0644, d_tracer, 6296 &tr->max_latency, &tracing_max_lat_fops); 6297#endif 6298 |
|
6331 if (ftrace_create_function_files(tr, d_tracer)) 6332 WARN(1, "Could not allocate function filter files"); 6333 6334#ifdef CONFIG_TRACER_SNAPSHOT 6335 trace_create_file("snapshot", 0644, d_tracer, 6336 tr, &snapshot_fops); 6337#endif 6338 --- 9 unchanged lines hidden (view full) --- 6348 trace_access_lock_init(); 6349 6350 d_tracer = tracing_init_dentry(); 6351 if (!d_tracer) 6352 return 0; 6353 6354 init_tracer_debugfs(&global_trace, d_tracer); 6355 | 6299 if (ftrace_create_function_files(tr, d_tracer)) 6300 WARN(1, "Could not allocate function filter files"); 6301 6302#ifdef CONFIG_TRACER_SNAPSHOT 6303 trace_create_file("snapshot", 0644, d_tracer, 6304 tr, &snapshot_fops); 6305#endif 6306 --- 9 unchanged lines hidden (view full) --- 6316 trace_access_lock_init(); 6317 6318 d_tracer = tracing_init_dentry(); 6319 if (!d_tracer) 6320 return 0; 6321 6322 init_tracer_debugfs(&global_trace, d_tracer); 6323 |
6356#ifdef CONFIG_TRACER_MAX_TRACE 6357 trace_create_file("tracing_max_latency", 0644, d_tracer, 6358 &tracing_max_latency, &tracing_max_lat_fops); 6359#endif 6360 | |
6361 trace_create_file("tracing_thresh", 0644, d_tracer, 6362 &tracing_thresh, &tracing_max_lat_fops); 6363 6364 trace_create_file("README", 0444, d_tracer, 6365 NULL, &tracing_readme_fops); 6366 6367 trace_create_file("saved_cmdlines", 0444, d_tracer, 6368 NULL, &tracing_saved_cmdlines_fops); --- 255 unchanged lines hidden (view full) --- 6624 6625 /* 6626 * register_tracer() might reference current_trace, so it 6627 * needs to be set before we register anything. This is 6628 * just a bootstrap of current_trace anyway. 6629 */ 6630 global_trace.current_trace = &nop_trace; 6631 | 6324 trace_create_file("tracing_thresh", 0644, d_tracer, 6325 &tracing_thresh, &tracing_max_lat_fops); 6326 6327 trace_create_file("README", 0444, d_tracer, 6328 NULL, &tracing_readme_fops); 6329 6330 trace_create_file("saved_cmdlines", 0444, d_tracer, 6331 NULL, &tracing_saved_cmdlines_fops); --- 255 unchanged lines hidden (view full) --- 6587 6588 /* 6589 * register_tracer() might reference current_trace, so it 6590 * needs to be set before we register anything. This is 6591 * just a bootstrap of current_trace anyway. 6592 */ 6593 global_trace.current_trace = &nop_trace; 6594 |
6595 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 6596 6597 ftrace_init_global_array_ops(&global_trace); 6598 |
|
6632 register_tracer(&nop_trace); 6633 6634 /* All seems OK, enable tracing */ 6635 tracing_disabled = 0; 6636 6637 atomic_notifier_chain_register(&panic_notifier_list, 6638 &trace_panic_notifier); 6639 --- 55 unchanged lines hidden --- | 6599 register_tracer(&nop_trace); 6600 6601 /* All seems OK, enable tracing */ 6602 tracing_disabled = 0; 6603 6604 atomic_notifier_chain_register(&panic_notifier_list, 6605 &trace_panic_notifier); 6606 --- 55 unchanged lines hidden --- |