1 /* 2 * Generic ring buffer 3 * 4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> 5 */ 6 #include <linux/ring_buffer.h> 7 #include <linux/trace_clock.h> 8 #include <linux/ftrace_irq.h> 9 #include <linux/spinlock.h> 10 #include <linux/debugfs.h> 11 #include <linux/uaccess.h> 12 #include <linux/hardirq.h> 13 #include <linux/kmemcheck.h> 14 #include <linux/module.h> 15 #include <linux/percpu.h> 16 #include <linux/mutex.h> 17 #include <linux/init.h> 18 #include <linux/hash.h> 19 #include <linux/list.h> 20 #include <linux/cpu.h> 21 #include <linux/fs.h> 22 23 #include "trace.h" 24 25 /* 26 * The ring buffer header is special. We must manually up keep it. 27 */ 28 int ring_buffer_print_entry_header(struct trace_seq *s) 29 { 30 int ret; 31 32 ret = trace_seq_printf(s, "# compressed entry header\n"); 33 ret = trace_seq_printf(s, "\ttype_len : 5 bits\n"); 34 ret = trace_seq_printf(s, "\ttime_delta : 27 bits\n"); 35 ret = trace_seq_printf(s, "\tarray : 32 bits\n"); 36 ret = trace_seq_printf(s, "\n"); 37 ret = trace_seq_printf(s, "\tpadding : type == %d\n", 38 RINGBUF_TYPE_PADDING); 39 ret = trace_seq_printf(s, "\ttime_extend : type == %d\n", 40 RINGBUF_TYPE_TIME_EXTEND); 41 ret = trace_seq_printf(s, "\tdata max type_len == %d\n", 42 RINGBUF_TYPE_DATA_TYPE_LEN_MAX); 43 44 return ret; 45 } 46 47 /* 48 * The ring buffer is made up of a list of pages. A separate list of pages is 49 * allocated for each CPU. A writer may only write to a buffer that is 50 * associated with the CPU it is currently executing on. A reader may read 51 * from any per cpu buffer. 52 * 53 * The reader is special. For each per cpu buffer, the reader has its own 54 * reader page. When a reader has read the entire reader page, this reader 55 * page is swapped with another page in the ring buffer. 56 * 57 * Now, as long as the writer is off the reader page, the reader can do what 58 * ever it wants with that page. The writer will never write to that page 59 * again (as long as it is out of the ring buffer). 60 * 61 * Here's some silly ASCII art. 62 * 63 * +------+ 64 * |reader| RING BUFFER 65 * |page | 66 * +------+ +---+ +---+ +---+ 67 * | |-->| |-->| | 68 * +---+ +---+ +---+ 69 * ^ | 70 * | | 71 * +---------------+ 72 * 73 * 74 * +------+ 75 * |reader| RING BUFFER 76 * |page |------------------v 77 * +------+ +---+ +---+ +---+ 78 * | |-->| |-->| | 79 * +---+ +---+ +---+ 80 * ^ | 81 * | | 82 * +---------------+ 83 * 84 * 85 * +------+ 86 * |reader| RING BUFFER 87 * |page |------------------v 88 * +------+ +---+ +---+ +---+ 89 * ^ | |-->| |-->| | 90 * | +---+ +---+ +---+ 91 * | | 92 * | | 93 * +------------------------------+ 94 * 95 * 96 * +------+ 97 * |buffer| RING BUFFER 98 * |page |------------------v 99 * +------+ +---+ +---+ +---+ 100 * ^ | | | |-->| | 101 * | New +---+ +---+ +---+ 102 * | Reader------^ | 103 * | page | 104 * +------------------------------+ 105 * 106 * 107 * After we make this swap, the reader can hand this page off to the splice 108 * code and be done with it. It can even allocate a new page if it needs to 109 * and swap that into the ring buffer. 110 * 111 * We will be using cmpxchg soon to make all this lockless. 112 * 113 */ 114 115 /* 116 * A fast way to enable or disable all ring buffers is to 117 * call tracing_on or tracing_off. Turning off the ring buffers 118 * prevents all ring buffers from being recorded to. 119 * Turning this switch on, makes it OK to write to the 120 * ring buffer, if the ring buffer is enabled itself. 121 * 122 * There's three layers that must be on in order to write 123 * to the ring buffer. 124 * 125 * 1) This global flag must be set. 126 * 2) The ring buffer must be enabled for recording. 127 * 3) The per cpu buffer must be enabled for recording. 128 * 129 * In case of an anomaly, this global flag has a bit set that 130 * will permantly disable all ring buffers. 131 */ 132 133 /* 134 * Global flag to disable all recording to ring buffers 135 * This has two bits: ON, DISABLED 136 * 137 * ON DISABLED 138 * ---- ---------- 139 * 0 0 : ring buffers are off 140 * 1 0 : ring buffers are on 141 * X 1 : ring buffers are permanently disabled 142 */ 143 144 enum { 145 RB_BUFFERS_ON_BIT = 0, 146 RB_BUFFERS_DISABLED_BIT = 1, 147 }; 148 149 enum { 150 RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT, 151 RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT, 152 }; 153 154 static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON; 155 156 #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data) 157 158 /** 159 * tracing_on - enable all tracing buffers 160 * 161 * This function enables all tracing buffers that may have been 162 * disabled with tracing_off. 163 */ 164 void tracing_on(void) 165 { 166 set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags); 167 } 168 EXPORT_SYMBOL_GPL(tracing_on); 169 170 /** 171 * tracing_off - turn off all tracing buffers 172 * 173 * This function stops all tracing buffers from recording data. 174 * It does not disable any overhead the tracers themselves may 175 * be causing. This function simply causes all recording to 176 * the ring buffers to fail. 177 */ 178 void tracing_off(void) 179 { 180 clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags); 181 } 182 EXPORT_SYMBOL_GPL(tracing_off); 183 184 /** 185 * tracing_off_permanent - permanently disable ring buffers 186 * 187 * This function, once called, will disable all ring buffers 188 * permanently. 189 */ 190 void tracing_off_permanent(void) 191 { 192 set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags); 193 } 194 195 /** 196 * tracing_is_on - show state of ring buffers enabled 197 */ 198 int tracing_is_on(void) 199 { 200 return ring_buffer_flags == RB_BUFFERS_ON; 201 } 202 EXPORT_SYMBOL_GPL(tracing_is_on); 203 204 #include "trace.h" 205 206 #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array)) 207 #define RB_ALIGNMENT 4U 208 #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 209 210 /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */ 211 #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX 212 213 enum { 214 RB_LEN_TIME_EXTEND = 8, 215 RB_LEN_TIME_STAMP = 16, 216 }; 217 218 static inline int rb_null_event(struct ring_buffer_event *event) 219 { 220 return event->type_len == RINGBUF_TYPE_PADDING 221 && event->time_delta == 0; 222 } 223 224 static inline int rb_discarded_event(struct ring_buffer_event *event) 225 { 226 return event->type_len == RINGBUF_TYPE_PADDING && event->time_delta; 227 } 228 229 static void rb_event_set_padding(struct ring_buffer_event *event) 230 { 231 event->type_len = RINGBUF_TYPE_PADDING; 232 event->time_delta = 0; 233 } 234 235 static unsigned 236 rb_event_data_length(struct ring_buffer_event *event) 237 { 238 unsigned length; 239 240 if (event->type_len) 241 length = event->type_len * RB_ALIGNMENT; 242 else 243 length = event->array[0]; 244 return length + RB_EVNT_HDR_SIZE; 245 } 246 247 /* inline for ring buffer fast paths */ 248 static unsigned 249 rb_event_length(struct ring_buffer_event *event) 250 { 251 switch (event->type_len) { 252 case RINGBUF_TYPE_PADDING: 253 if (rb_null_event(event)) 254 /* undefined */ 255 return -1; 256 return event->array[0] + RB_EVNT_HDR_SIZE; 257 258 case RINGBUF_TYPE_TIME_EXTEND: 259 return RB_LEN_TIME_EXTEND; 260 261 case RINGBUF_TYPE_TIME_STAMP: 262 return RB_LEN_TIME_STAMP; 263 264 case RINGBUF_TYPE_DATA: 265 return rb_event_data_length(event); 266 default: 267 BUG(); 268 } 269 /* not hit */ 270 return 0; 271 } 272 273 /** 274 * ring_buffer_event_length - return the length of the event 275 * @event: the event to get the length of 276 */ 277 unsigned ring_buffer_event_length(struct ring_buffer_event *event) 278 { 279 unsigned length = rb_event_length(event); 280 if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 281 return length; 282 length -= RB_EVNT_HDR_SIZE; 283 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0])) 284 length -= sizeof(event->array[0]); 285 return length; 286 } 287 EXPORT_SYMBOL_GPL(ring_buffer_event_length); 288 289 /* inline for ring buffer fast paths */ 290 static void * 291 rb_event_data(struct ring_buffer_event *event) 292 { 293 BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX); 294 /* If length is in len field, then array[0] has the data */ 295 if (event->type_len) 296 return (void *)&event->array[0]; 297 /* Otherwise length is in array[0] and array[1] has the data */ 298 return (void *)&event->array[1]; 299 } 300 301 /** 302 * ring_buffer_event_data - return the data of the event 303 * @event: the event to get the data from 304 */ 305 void *ring_buffer_event_data(struct ring_buffer_event *event) 306 { 307 return rb_event_data(event); 308 } 309 EXPORT_SYMBOL_GPL(ring_buffer_event_data); 310 311 #define for_each_buffer_cpu(buffer, cpu) \ 312 for_each_cpu(cpu, buffer->cpumask) 313 314 #define TS_SHIFT 27 315 #define TS_MASK ((1ULL << TS_SHIFT) - 1) 316 #define TS_DELTA_TEST (~TS_MASK) 317 318 struct buffer_data_page { 319 u64 time_stamp; /* page time stamp */ 320 local_t commit; /* write committed index */ 321 unsigned char data[]; /* data of buffer page */ 322 }; 323 324 struct buffer_page { 325 struct list_head list; /* list of buffer pages */ 326 local_t write; /* index for next write */ 327 unsigned read; /* index for next read */ 328 local_t entries; /* entries on this page */ 329 struct buffer_data_page *page; /* Actual data page */ 330 }; 331 332 static void rb_init_page(struct buffer_data_page *bpage) 333 { 334 local_set(&bpage->commit, 0); 335 } 336 337 /** 338 * ring_buffer_page_len - the size of data on the page. 339 * @page: The page to read 340 * 341 * Returns the amount of data on the page, including buffer page header. 342 */ 343 size_t ring_buffer_page_len(void *page) 344 { 345 return local_read(&((struct buffer_data_page *)page)->commit) 346 + BUF_PAGE_HDR_SIZE; 347 } 348 349 /* 350 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing 351 * this issue out. 352 */ 353 static void free_buffer_page(struct buffer_page *bpage) 354 { 355 free_page((unsigned long)bpage->page); 356 kfree(bpage); 357 } 358 359 /* 360 * We need to fit the time_stamp delta into 27 bits. 361 */ 362 static inline int test_time_stamp(u64 delta) 363 { 364 if (delta & TS_DELTA_TEST) 365 return 1; 366 return 0; 367 } 368 369 #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE) 370 371 /* Max payload is BUF_PAGE_SIZE - header (8bytes) */ 372 #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2)) 373 374 /* Max number of timestamps that can fit on a page */ 375 #define RB_TIMESTAMPS_PER_PAGE (BUF_PAGE_SIZE / RB_LEN_TIME_STAMP) 376 377 int ring_buffer_print_page_header(struct trace_seq *s) 378 { 379 struct buffer_data_page field; 380 int ret; 381 382 ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t" 383 "offset:0;\tsize:%u;\n", 384 (unsigned int)sizeof(field.time_stamp)); 385 386 ret = trace_seq_printf(s, "\tfield: local_t commit;\t" 387 "offset:%u;\tsize:%u;\n", 388 (unsigned int)offsetof(typeof(field), commit), 389 (unsigned int)sizeof(field.commit)); 390 391 ret = trace_seq_printf(s, "\tfield: char data;\t" 392 "offset:%u;\tsize:%u;\n", 393 (unsigned int)offsetof(typeof(field), data), 394 (unsigned int)BUF_PAGE_SIZE); 395 396 return ret; 397 } 398 399 /* 400 * head_page == tail_page && head == tail then buffer is empty. 401 */ 402 struct ring_buffer_per_cpu { 403 int cpu; 404 struct ring_buffer *buffer; 405 spinlock_t reader_lock; /* serialize readers */ 406 raw_spinlock_t lock; 407 struct lock_class_key lock_key; 408 struct list_head pages; 409 struct buffer_page *head_page; /* read from head */ 410 struct buffer_page *tail_page; /* write to tail */ 411 struct buffer_page *commit_page; /* committed pages */ 412 struct buffer_page *reader_page; 413 unsigned long nmi_dropped; 414 unsigned long commit_overrun; 415 unsigned long overrun; 416 unsigned long read; 417 local_t entries; 418 u64 write_stamp; 419 u64 read_stamp; 420 atomic_t record_disabled; 421 }; 422 423 struct ring_buffer { 424 unsigned pages; 425 unsigned flags; 426 int cpus; 427 atomic_t record_disabled; 428 cpumask_var_t cpumask; 429 430 struct lock_class_key *reader_lock_key; 431 432 struct mutex mutex; 433 434 struct ring_buffer_per_cpu **buffers; 435 436 #ifdef CONFIG_HOTPLUG_CPU 437 struct notifier_block cpu_notify; 438 #endif 439 u64 (*clock)(void); 440 }; 441 442 struct ring_buffer_iter { 443 struct ring_buffer_per_cpu *cpu_buffer; 444 unsigned long head; 445 struct buffer_page *head_page; 446 u64 read_stamp; 447 }; 448 449 /* buffer may be either ring_buffer or ring_buffer_per_cpu */ 450 #define RB_WARN_ON(buffer, cond) \ 451 ({ \ 452 int _____ret = unlikely(cond); \ 453 if (_____ret) { \ 454 atomic_inc(&buffer->record_disabled); \ 455 WARN_ON(1); \ 456 } \ 457 _____ret; \ 458 }) 459 460 /* Up this if you want to test the TIME_EXTENTS and normalization */ 461 #define DEBUG_SHIFT 0 462 463 static inline u64 rb_time_stamp(struct ring_buffer *buffer, int cpu) 464 { 465 /* shift to debug/test normalization and TIME_EXTENTS */ 466 return buffer->clock() << DEBUG_SHIFT; 467 } 468 469 u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu) 470 { 471 u64 time; 472 473 preempt_disable_notrace(); 474 time = rb_time_stamp(buffer, cpu); 475 preempt_enable_no_resched_notrace(); 476 477 return time; 478 } 479 EXPORT_SYMBOL_GPL(ring_buffer_time_stamp); 480 481 void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer, 482 int cpu, u64 *ts) 483 { 484 /* Just stupid testing the normalize function and deltas */ 485 *ts >>= DEBUG_SHIFT; 486 } 487 EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp); 488 489 /** 490 * check_pages - integrity check of buffer pages 491 * @cpu_buffer: CPU buffer with pages to test 492 * 493 * As a safety measure we check to make sure the data pages have not 494 * been corrupted. 495 */ 496 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) 497 { 498 struct list_head *head = &cpu_buffer->pages; 499 struct buffer_page *bpage, *tmp; 500 501 if (RB_WARN_ON(cpu_buffer, head->next->prev != head)) 502 return -1; 503 if (RB_WARN_ON(cpu_buffer, head->prev->next != head)) 504 return -1; 505 506 list_for_each_entry_safe(bpage, tmp, head, list) { 507 if (RB_WARN_ON(cpu_buffer, 508 bpage->list.next->prev != &bpage->list)) 509 return -1; 510 if (RB_WARN_ON(cpu_buffer, 511 bpage->list.prev->next != &bpage->list)) 512 return -1; 513 } 514 515 return 0; 516 } 517 518 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, 519 unsigned nr_pages) 520 { 521 struct list_head *head = &cpu_buffer->pages; 522 struct buffer_page *bpage, *tmp; 523 unsigned long addr; 524 LIST_HEAD(pages); 525 unsigned i; 526 527 for (i = 0; i < nr_pages; i++) { 528 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), 529 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu)); 530 if (!bpage) 531 goto free_pages; 532 list_add(&bpage->list, &pages); 533 534 addr = __get_free_page(GFP_KERNEL); 535 if (!addr) 536 goto free_pages; 537 bpage->page = (void *)addr; 538 rb_init_page(bpage->page); 539 } 540 541 list_splice(&pages, head); 542 543 rb_check_pages(cpu_buffer); 544 545 return 0; 546 547 free_pages: 548 list_for_each_entry_safe(bpage, tmp, &pages, list) { 549 list_del_init(&bpage->list); 550 free_buffer_page(bpage); 551 } 552 return -ENOMEM; 553 } 554 555 static struct ring_buffer_per_cpu * 556 rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) 557 { 558 struct ring_buffer_per_cpu *cpu_buffer; 559 struct buffer_page *bpage; 560 unsigned long addr; 561 int ret; 562 563 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()), 564 GFP_KERNEL, cpu_to_node(cpu)); 565 if (!cpu_buffer) 566 return NULL; 567 568 cpu_buffer->cpu = cpu; 569 cpu_buffer->buffer = buffer; 570 spin_lock_init(&cpu_buffer->reader_lock); 571 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); 572 cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 573 INIT_LIST_HEAD(&cpu_buffer->pages); 574 575 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), 576 GFP_KERNEL, cpu_to_node(cpu)); 577 if (!bpage) 578 goto fail_free_buffer; 579 580 cpu_buffer->reader_page = bpage; 581 addr = __get_free_page(GFP_KERNEL); 582 if (!addr) 583 goto fail_free_reader; 584 bpage->page = (void *)addr; 585 rb_init_page(bpage->page); 586 587 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); 588 589 ret = rb_allocate_pages(cpu_buffer, buffer->pages); 590 if (ret < 0) 591 goto fail_free_reader; 592 593 cpu_buffer->head_page 594 = list_entry(cpu_buffer->pages.next, struct buffer_page, list); 595 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page; 596 597 return cpu_buffer; 598 599 fail_free_reader: 600 free_buffer_page(cpu_buffer->reader_page); 601 602 fail_free_buffer: 603 kfree(cpu_buffer); 604 return NULL; 605 } 606 607 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) 608 { 609 struct list_head *head = &cpu_buffer->pages; 610 struct buffer_page *bpage, *tmp; 611 612 free_buffer_page(cpu_buffer->reader_page); 613 614 list_for_each_entry_safe(bpage, tmp, head, list) { 615 list_del_init(&bpage->list); 616 free_buffer_page(bpage); 617 } 618 kfree(cpu_buffer); 619 } 620 621 /* 622 * Causes compile errors if the struct buffer_page gets bigger 623 * than the struct page. 624 */ 625 extern int ring_buffer_page_too_big(void); 626 627 #ifdef CONFIG_HOTPLUG_CPU 628 static int rb_cpu_notify(struct notifier_block *self, 629 unsigned long action, void *hcpu); 630 #endif 631 632 /** 633 * ring_buffer_alloc - allocate a new ring_buffer 634 * @size: the size in bytes per cpu that is needed. 635 * @flags: attributes to set for the ring buffer. 636 * 637 * Currently the only flag that is available is the RB_FL_OVERWRITE 638 * flag. This flag means that the buffer will overwrite old data 639 * when the buffer wraps. If this flag is not set, the buffer will 640 * drop data when the tail hits the head. 641 */ 642 struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags, 643 struct lock_class_key *key) 644 { 645 struct ring_buffer *buffer; 646 int bsize; 647 int cpu; 648 649 /* Paranoid! Optimizes out when all is well */ 650 if (sizeof(struct buffer_page) > sizeof(struct page)) 651 ring_buffer_page_too_big(); 652 653 654 /* keep it in its own cache line */ 655 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()), 656 GFP_KERNEL); 657 if (!buffer) 658 return NULL; 659 660 if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL)) 661 goto fail_free_buffer; 662 663 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 664 buffer->flags = flags; 665 buffer->clock = trace_clock_local; 666 buffer->reader_lock_key = key; 667 668 /* need at least two pages */ 669 if (buffer->pages == 1) 670 buffer->pages++; 671 672 /* 673 * In case of non-hotplug cpu, if the ring-buffer is allocated 674 * in early initcall, it will not be notified of secondary cpus. 675 * In that off case, we need to allocate for all possible cpus. 676 */ 677 #ifdef CONFIG_HOTPLUG_CPU 678 get_online_cpus(); 679 cpumask_copy(buffer->cpumask, cpu_online_mask); 680 #else 681 cpumask_copy(buffer->cpumask, cpu_possible_mask); 682 #endif 683 buffer->cpus = nr_cpu_ids; 684 685 bsize = sizeof(void *) * nr_cpu_ids; 686 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), 687 GFP_KERNEL); 688 if (!buffer->buffers) 689 goto fail_free_cpumask; 690 691 for_each_buffer_cpu(buffer, cpu) { 692 buffer->buffers[cpu] = 693 rb_allocate_cpu_buffer(buffer, cpu); 694 if (!buffer->buffers[cpu]) 695 goto fail_free_buffers; 696 } 697 698 #ifdef CONFIG_HOTPLUG_CPU 699 buffer->cpu_notify.notifier_call = rb_cpu_notify; 700 buffer->cpu_notify.priority = 0; 701 register_cpu_notifier(&buffer->cpu_notify); 702 #endif 703 704 put_online_cpus(); 705 mutex_init(&buffer->mutex); 706 707 return buffer; 708 709 fail_free_buffers: 710 for_each_buffer_cpu(buffer, cpu) { 711 if (buffer->buffers[cpu]) 712 rb_free_cpu_buffer(buffer->buffers[cpu]); 713 } 714 kfree(buffer->buffers); 715 716 fail_free_cpumask: 717 free_cpumask_var(buffer->cpumask); 718 put_online_cpus(); 719 720 fail_free_buffer: 721 kfree(buffer); 722 return NULL; 723 } 724 EXPORT_SYMBOL_GPL(__ring_buffer_alloc); 725 726 /** 727 * ring_buffer_free - free a ring buffer. 728 * @buffer: the buffer to free. 729 */ 730 void 731 ring_buffer_free(struct ring_buffer *buffer) 732 { 733 int cpu; 734 735 get_online_cpus(); 736 737 #ifdef CONFIG_HOTPLUG_CPU 738 unregister_cpu_notifier(&buffer->cpu_notify); 739 #endif 740 741 for_each_buffer_cpu(buffer, cpu) 742 rb_free_cpu_buffer(buffer->buffers[cpu]); 743 744 put_online_cpus(); 745 746 free_cpumask_var(buffer->cpumask); 747 748 kfree(buffer); 749 } 750 EXPORT_SYMBOL_GPL(ring_buffer_free); 751 752 void ring_buffer_set_clock(struct ring_buffer *buffer, 753 u64 (*clock)(void)) 754 { 755 buffer->clock = clock; 756 } 757 758 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); 759 760 static void 761 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) 762 { 763 struct buffer_page *bpage; 764 struct list_head *p; 765 unsigned i; 766 767 atomic_inc(&cpu_buffer->record_disabled); 768 synchronize_sched(); 769 770 for (i = 0; i < nr_pages; i++) { 771 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages))) 772 return; 773 p = cpu_buffer->pages.next; 774 bpage = list_entry(p, struct buffer_page, list); 775 list_del_init(&bpage->list); 776 free_buffer_page(bpage); 777 } 778 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages))) 779 return; 780 781 rb_reset_cpu(cpu_buffer); 782 783 rb_check_pages(cpu_buffer); 784 785 atomic_dec(&cpu_buffer->record_disabled); 786 787 } 788 789 static void 790 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, 791 struct list_head *pages, unsigned nr_pages) 792 { 793 struct buffer_page *bpage; 794 struct list_head *p; 795 unsigned i; 796 797 atomic_inc(&cpu_buffer->record_disabled); 798 synchronize_sched(); 799 800 for (i = 0; i < nr_pages; i++) { 801 if (RB_WARN_ON(cpu_buffer, list_empty(pages))) 802 return; 803 p = pages->next; 804 bpage = list_entry(p, struct buffer_page, list); 805 list_del_init(&bpage->list); 806 list_add_tail(&bpage->list, &cpu_buffer->pages); 807 } 808 rb_reset_cpu(cpu_buffer); 809 810 rb_check_pages(cpu_buffer); 811 812 atomic_dec(&cpu_buffer->record_disabled); 813 } 814 815 /** 816 * ring_buffer_resize - resize the ring buffer 817 * @buffer: the buffer to resize. 818 * @size: the new size. 819 * 820 * The tracer is responsible for making sure that the buffer is 821 * not being used while changing the size. 822 * Note: We may be able to change the above requirement by using 823 * RCU synchronizations. 824 * 825 * Minimum size is 2 * BUF_PAGE_SIZE. 826 * 827 * Returns -1 on failure. 828 */ 829 int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) 830 { 831 struct ring_buffer_per_cpu *cpu_buffer; 832 unsigned nr_pages, rm_pages, new_pages; 833 struct buffer_page *bpage, *tmp; 834 unsigned long buffer_size; 835 unsigned long addr; 836 LIST_HEAD(pages); 837 int i, cpu; 838 839 /* 840 * Always succeed at resizing a non-existent buffer: 841 */ 842 if (!buffer) 843 return size; 844 845 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 846 size *= BUF_PAGE_SIZE; 847 buffer_size = buffer->pages * BUF_PAGE_SIZE; 848 849 /* we need a minimum of two pages */ 850 if (size < BUF_PAGE_SIZE * 2) 851 size = BUF_PAGE_SIZE * 2; 852 853 if (size == buffer_size) 854 return size; 855 856 mutex_lock(&buffer->mutex); 857 get_online_cpus(); 858 859 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 860 861 if (size < buffer_size) { 862 863 /* easy case, just free pages */ 864 if (RB_WARN_ON(buffer, nr_pages >= buffer->pages)) 865 goto out_fail; 866 867 rm_pages = buffer->pages - nr_pages; 868 869 for_each_buffer_cpu(buffer, cpu) { 870 cpu_buffer = buffer->buffers[cpu]; 871 rb_remove_pages(cpu_buffer, rm_pages); 872 } 873 goto out; 874 } 875 876 /* 877 * This is a bit more difficult. We only want to add pages 878 * when we can allocate enough for all CPUs. We do this 879 * by allocating all the pages and storing them on a local 880 * link list. If we succeed in our allocation, then we 881 * add these pages to the cpu_buffers. Otherwise we just free 882 * them all and return -ENOMEM; 883 */ 884 if (RB_WARN_ON(buffer, nr_pages <= buffer->pages)) 885 goto out_fail; 886 887 new_pages = nr_pages - buffer->pages; 888 889 for_each_buffer_cpu(buffer, cpu) { 890 for (i = 0; i < new_pages; i++) { 891 bpage = kzalloc_node(ALIGN(sizeof(*bpage), 892 cache_line_size()), 893 GFP_KERNEL, cpu_to_node(cpu)); 894 if (!bpage) 895 goto free_pages; 896 list_add(&bpage->list, &pages); 897 addr = __get_free_page(GFP_KERNEL); 898 if (!addr) 899 goto free_pages; 900 bpage->page = (void *)addr; 901 rb_init_page(bpage->page); 902 } 903 } 904 905 for_each_buffer_cpu(buffer, cpu) { 906 cpu_buffer = buffer->buffers[cpu]; 907 rb_insert_pages(cpu_buffer, &pages, new_pages); 908 } 909 910 if (RB_WARN_ON(buffer, !list_empty(&pages))) 911 goto out_fail; 912 913 out: 914 buffer->pages = nr_pages; 915 put_online_cpus(); 916 mutex_unlock(&buffer->mutex); 917 918 return size; 919 920 free_pages: 921 list_for_each_entry_safe(bpage, tmp, &pages, list) { 922 list_del_init(&bpage->list); 923 free_buffer_page(bpage); 924 } 925 put_online_cpus(); 926 mutex_unlock(&buffer->mutex); 927 return -ENOMEM; 928 929 /* 930 * Something went totally wrong, and we are too paranoid 931 * to even clean up the mess. 932 */ 933 out_fail: 934 put_online_cpus(); 935 mutex_unlock(&buffer->mutex); 936 return -1; 937 } 938 EXPORT_SYMBOL_GPL(ring_buffer_resize); 939 940 static inline void * 941 __rb_data_page_index(struct buffer_data_page *bpage, unsigned index) 942 { 943 return bpage->data + index; 944 } 945 946 static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index) 947 { 948 return bpage->page->data + index; 949 } 950 951 static inline struct ring_buffer_event * 952 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer) 953 { 954 return __rb_page_index(cpu_buffer->reader_page, 955 cpu_buffer->reader_page->read); 956 } 957 958 static inline struct ring_buffer_event * 959 rb_head_event(struct ring_buffer_per_cpu *cpu_buffer) 960 { 961 return __rb_page_index(cpu_buffer->head_page, 962 cpu_buffer->head_page->read); 963 } 964 965 static inline struct ring_buffer_event * 966 rb_iter_head_event(struct ring_buffer_iter *iter) 967 { 968 return __rb_page_index(iter->head_page, iter->head); 969 } 970 971 static inline unsigned rb_page_write(struct buffer_page *bpage) 972 { 973 return local_read(&bpage->write); 974 } 975 976 static inline unsigned rb_page_commit(struct buffer_page *bpage) 977 { 978 return local_read(&bpage->page->commit); 979 } 980 981 /* Size is determined by what has been commited */ 982 static inline unsigned rb_page_size(struct buffer_page *bpage) 983 { 984 return rb_page_commit(bpage); 985 } 986 987 static inline unsigned 988 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer) 989 { 990 return rb_page_commit(cpu_buffer->commit_page); 991 } 992 993 static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer) 994 { 995 return rb_page_commit(cpu_buffer->head_page); 996 } 997 998 static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer, 999 struct buffer_page **bpage) 1000 { 1001 struct list_head *p = (*bpage)->list.next; 1002 1003 if (p == &cpu_buffer->pages) 1004 p = p->next; 1005 1006 *bpage = list_entry(p, struct buffer_page, list); 1007 } 1008 1009 static inline unsigned 1010 rb_event_index(struct ring_buffer_event *event) 1011 { 1012 unsigned long addr = (unsigned long)event; 1013 1014 return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE); 1015 } 1016 1017 static inline int 1018 rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer, 1019 struct ring_buffer_event *event) 1020 { 1021 unsigned long addr = (unsigned long)event; 1022 unsigned long index; 1023 1024 index = rb_event_index(event); 1025 addr &= PAGE_MASK; 1026 1027 return cpu_buffer->commit_page->page == (void *)addr && 1028 rb_commit_index(cpu_buffer) == index; 1029 } 1030 1031 static void 1032 rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer, 1033 struct ring_buffer_event *event) 1034 { 1035 unsigned long addr = (unsigned long)event; 1036 unsigned long index; 1037 1038 index = rb_event_index(event); 1039 addr &= PAGE_MASK; 1040 1041 while (cpu_buffer->commit_page->page != (void *)addr) { 1042 if (RB_WARN_ON(cpu_buffer, 1043 cpu_buffer->commit_page == cpu_buffer->tail_page)) 1044 return; 1045 cpu_buffer->commit_page->page->commit = 1046 cpu_buffer->commit_page->write; 1047 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); 1048 cpu_buffer->write_stamp = 1049 cpu_buffer->commit_page->page->time_stamp; 1050 } 1051 1052 /* Now set the commit to the event's index */ 1053 local_set(&cpu_buffer->commit_page->page->commit, index); 1054 } 1055 1056 static void 1057 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) 1058 { 1059 /* 1060 * We only race with interrupts and NMIs on this CPU. 1061 * If we own the commit event, then we can commit 1062 * all others that interrupted us, since the interruptions 1063 * are in stack format (they finish before they come 1064 * back to us). This allows us to do a simple loop to 1065 * assign the commit to the tail. 1066 */ 1067 again: 1068 while (cpu_buffer->commit_page != cpu_buffer->tail_page) { 1069 cpu_buffer->commit_page->page->commit = 1070 cpu_buffer->commit_page->write; 1071 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); 1072 cpu_buffer->write_stamp = 1073 cpu_buffer->commit_page->page->time_stamp; 1074 /* add barrier to keep gcc from optimizing too much */ 1075 barrier(); 1076 } 1077 while (rb_commit_index(cpu_buffer) != 1078 rb_page_write(cpu_buffer->commit_page)) { 1079 cpu_buffer->commit_page->page->commit = 1080 cpu_buffer->commit_page->write; 1081 barrier(); 1082 } 1083 1084 /* again, keep gcc from optimizing */ 1085 barrier(); 1086 1087 /* 1088 * If an interrupt came in just after the first while loop 1089 * and pushed the tail page forward, we will be left with 1090 * a dangling commit that will never go forward. 1091 */ 1092 if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page)) 1093 goto again; 1094 } 1095 1096 static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer) 1097 { 1098 cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp; 1099 cpu_buffer->reader_page->read = 0; 1100 } 1101 1102 static void rb_inc_iter(struct ring_buffer_iter *iter) 1103 { 1104 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 1105 1106 /* 1107 * The iterator could be on the reader page (it starts there). 1108 * But the head could have moved, since the reader was 1109 * found. Check for this case and assign the iterator 1110 * to the head page instead of next. 1111 */ 1112 if (iter->head_page == cpu_buffer->reader_page) 1113 iter->head_page = cpu_buffer->head_page; 1114 else 1115 rb_inc_page(cpu_buffer, &iter->head_page); 1116 1117 iter->read_stamp = iter->head_page->page->time_stamp; 1118 iter->head = 0; 1119 } 1120 1121 /** 1122 * ring_buffer_update_event - update event type and data 1123 * @event: the even to update 1124 * @type: the type of event 1125 * @length: the size of the event field in the ring buffer 1126 * 1127 * Update the type and data fields of the event. The length 1128 * is the actual size that is written to the ring buffer, 1129 * and with this, we can determine what to place into the 1130 * data field. 1131 */ 1132 static void 1133 rb_update_event(struct ring_buffer_event *event, 1134 unsigned type, unsigned length) 1135 { 1136 event->type_len = type; 1137 1138 switch (type) { 1139 1140 case RINGBUF_TYPE_PADDING: 1141 case RINGBUF_TYPE_TIME_EXTEND: 1142 case RINGBUF_TYPE_TIME_STAMP: 1143 break; 1144 1145 case 0: 1146 length -= RB_EVNT_HDR_SIZE; 1147 if (length > RB_MAX_SMALL_DATA) 1148 event->array[0] = length; 1149 else 1150 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT); 1151 break; 1152 default: 1153 BUG(); 1154 } 1155 } 1156 1157 static unsigned rb_calculate_event_length(unsigned length) 1158 { 1159 struct ring_buffer_event event; /* Used only for sizeof array */ 1160 1161 /* zero length can cause confusions */ 1162 if (!length) 1163 length = 1; 1164 1165 if (length > RB_MAX_SMALL_DATA) 1166 length += sizeof(event.array[0]); 1167 1168 length += RB_EVNT_HDR_SIZE; 1169 length = ALIGN(length, RB_ALIGNMENT); 1170 1171 return length; 1172 } 1173 1174 1175 static struct ring_buffer_event * 1176 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, 1177 unsigned long length, unsigned long tail, 1178 struct buffer_page *commit_page, 1179 struct buffer_page *tail_page, u64 *ts) 1180 { 1181 struct buffer_page *next_page, *head_page, *reader_page; 1182 struct ring_buffer *buffer = cpu_buffer->buffer; 1183 struct ring_buffer_event *event; 1184 bool lock_taken = false; 1185 unsigned long flags; 1186 1187 next_page = tail_page; 1188 1189 local_irq_save(flags); 1190 /* 1191 * Since the write to the buffer is still not 1192 * fully lockless, we must be careful with NMIs. 1193 * The locks in the writers are taken when a write 1194 * crosses to a new page. The locks protect against 1195 * races with the readers (this will soon be fixed 1196 * with a lockless solution). 1197 * 1198 * Because we can not protect against NMIs, and we 1199 * want to keep traces reentrant, we need to manage 1200 * what happens when we are in an NMI. 1201 * 1202 * NMIs can happen after we take the lock. 1203 * If we are in an NMI, only take the lock 1204 * if it is not already taken. Otherwise 1205 * simply fail. 1206 */ 1207 if (unlikely(in_nmi())) { 1208 if (!__raw_spin_trylock(&cpu_buffer->lock)) { 1209 cpu_buffer->nmi_dropped++; 1210 goto out_reset; 1211 } 1212 } else 1213 __raw_spin_lock(&cpu_buffer->lock); 1214 1215 lock_taken = true; 1216 1217 rb_inc_page(cpu_buffer, &next_page); 1218 1219 head_page = cpu_buffer->head_page; 1220 reader_page = cpu_buffer->reader_page; 1221 1222 /* we grabbed the lock before incrementing */ 1223 if (RB_WARN_ON(cpu_buffer, next_page == reader_page)) 1224 goto out_reset; 1225 1226 /* 1227 * If for some reason, we had an interrupt storm that made 1228 * it all the way around the buffer, bail, and warn 1229 * about it. 1230 */ 1231 if (unlikely(next_page == commit_page)) { 1232 cpu_buffer->commit_overrun++; 1233 goto out_reset; 1234 } 1235 1236 if (next_page == head_page) { 1237 if (!(buffer->flags & RB_FL_OVERWRITE)) 1238 goto out_reset; 1239 1240 /* tail_page has not moved yet? */ 1241 if (tail_page == cpu_buffer->tail_page) { 1242 /* count overflows */ 1243 cpu_buffer->overrun += 1244 local_read(&head_page->entries); 1245 1246 rb_inc_page(cpu_buffer, &head_page); 1247 cpu_buffer->head_page = head_page; 1248 cpu_buffer->head_page->read = 0; 1249 } 1250 } 1251 1252 /* 1253 * If the tail page is still the same as what we think 1254 * it is, then it is up to us to update the tail 1255 * pointer. 1256 */ 1257 if (tail_page == cpu_buffer->tail_page) { 1258 local_set(&next_page->write, 0); 1259 local_set(&next_page->entries, 0); 1260 local_set(&next_page->page->commit, 0); 1261 cpu_buffer->tail_page = next_page; 1262 1263 /* reread the time stamp */ 1264 *ts = rb_time_stamp(buffer, cpu_buffer->cpu); 1265 cpu_buffer->tail_page->page->time_stamp = *ts; 1266 } 1267 1268 /* 1269 * The actual tail page has moved forward. 1270 */ 1271 if (tail < BUF_PAGE_SIZE) { 1272 /* Mark the rest of the page with padding */ 1273 event = __rb_page_index(tail_page, tail); 1274 kmemcheck_annotate_bitfield(event, bitfield); 1275 rb_event_set_padding(event); 1276 } 1277 1278 /* Set the write back to the previous setting */ 1279 local_sub(length, &tail_page->write); 1280 1281 /* 1282 * If this was a commit entry that failed, 1283 * increment that too 1284 */ 1285 if (tail_page == cpu_buffer->commit_page && 1286 tail == rb_commit_index(cpu_buffer)) { 1287 rb_set_commit_to_write(cpu_buffer); 1288 } 1289 1290 __raw_spin_unlock(&cpu_buffer->lock); 1291 local_irq_restore(flags); 1292 1293 /* fail and let the caller try again */ 1294 return ERR_PTR(-EAGAIN); 1295 1296 out_reset: 1297 /* reset write */ 1298 local_sub(length, &tail_page->write); 1299 1300 if (likely(lock_taken)) 1301 __raw_spin_unlock(&cpu_buffer->lock); 1302 local_irq_restore(flags); 1303 return NULL; 1304 } 1305 1306 static struct ring_buffer_event * 1307 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, 1308 unsigned type, unsigned long length, u64 *ts) 1309 { 1310 struct buffer_page *tail_page, *commit_page; 1311 struct ring_buffer_event *event; 1312 unsigned long tail, write; 1313 1314 commit_page = cpu_buffer->commit_page; 1315 /* we just need to protect against interrupts */ 1316 barrier(); 1317 tail_page = cpu_buffer->tail_page; 1318 write = local_add_return(length, &tail_page->write); 1319 tail = write - length; 1320 1321 /* See if we shot pass the end of this buffer page */ 1322 if (write > BUF_PAGE_SIZE) 1323 return rb_move_tail(cpu_buffer, length, tail, 1324 commit_page, tail_page, ts); 1325 1326 /* We reserved something on the buffer */ 1327 1328 if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE)) 1329 return NULL; 1330 1331 event = __rb_page_index(tail_page, tail); 1332 kmemcheck_annotate_bitfield(event, bitfield); 1333 rb_update_event(event, type, length); 1334 1335 /* The passed in type is zero for DATA */ 1336 if (likely(!type)) 1337 local_inc(&tail_page->entries); 1338 1339 /* 1340 * If this is a commit and the tail is zero, then update 1341 * this page's time stamp. 1342 */ 1343 if (!tail && rb_is_commit(cpu_buffer, event)) 1344 cpu_buffer->commit_page->page->time_stamp = *ts; 1345 1346 return event; 1347 } 1348 1349 static inline int 1350 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, 1351 struct ring_buffer_event *event) 1352 { 1353 unsigned long new_index, old_index; 1354 struct buffer_page *bpage; 1355 unsigned long index; 1356 unsigned long addr; 1357 1358 new_index = rb_event_index(event); 1359 old_index = new_index + rb_event_length(event); 1360 addr = (unsigned long)event; 1361 addr &= PAGE_MASK; 1362 1363 bpage = cpu_buffer->tail_page; 1364 1365 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) { 1366 /* 1367 * This is on the tail page. It is possible that 1368 * a write could come in and move the tail page 1369 * and write to the next page. That is fine 1370 * because we just shorten what is on this page. 1371 */ 1372 index = local_cmpxchg(&bpage->write, old_index, new_index); 1373 if (index == old_index) 1374 return 1; 1375 } 1376 1377 /* could not discard */ 1378 return 0; 1379 } 1380 1381 static int 1382 rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer, 1383 u64 *ts, u64 *delta) 1384 { 1385 struct ring_buffer_event *event; 1386 static int once; 1387 int ret; 1388 1389 if (unlikely(*delta > (1ULL << 59) && !once++)) { 1390 printk(KERN_WARNING "Delta way too big! %llu" 1391 " ts=%llu write stamp = %llu\n", 1392 (unsigned long long)*delta, 1393 (unsigned long long)*ts, 1394 (unsigned long long)cpu_buffer->write_stamp); 1395 WARN_ON(1); 1396 } 1397 1398 /* 1399 * The delta is too big, we to add a 1400 * new timestamp. 1401 */ 1402 event = __rb_reserve_next(cpu_buffer, 1403 RINGBUF_TYPE_TIME_EXTEND, 1404 RB_LEN_TIME_EXTEND, 1405 ts); 1406 if (!event) 1407 return -EBUSY; 1408 1409 if (PTR_ERR(event) == -EAGAIN) 1410 return -EAGAIN; 1411 1412 /* Only a commited time event can update the write stamp */ 1413 if (rb_is_commit(cpu_buffer, event)) { 1414 /* 1415 * If this is the first on the page, then we need to 1416 * update the page itself, and just put in a zero. 1417 */ 1418 if (rb_event_index(event)) { 1419 event->time_delta = *delta & TS_MASK; 1420 event->array[0] = *delta >> TS_SHIFT; 1421 } else { 1422 cpu_buffer->commit_page->page->time_stamp = *ts; 1423 /* try to discard, since we do not need this */ 1424 if (!rb_try_to_discard(cpu_buffer, event)) { 1425 /* nope, just zero it */ 1426 event->time_delta = 0; 1427 event->array[0] = 0; 1428 } 1429 } 1430 cpu_buffer->write_stamp = *ts; 1431 /* let the caller know this was the commit */ 1432 ret = 1; 1433 } else { 1434 /* Try to discard the event */ 1435 if (!rb_try_to_discard(cpu_buffer, event)) { 1436 /* Darn, this is just wasted space */ 1437 event->time_delta = 0; 1438 event->array[0] = 0; 1439 } 1440 ret = 0; 1441 } 1442 1443 *delta = 0; 1444 1445 return ret; 1446 } 1447 1448 static struct ring_buffer_event * 1449 rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer, 1450 unsigned long length) 1451 { 1452 struct ring_buffer_event *event; 1453 u64 ts, delta = 0; 1454 int commit = 0; 1455 int nr_loops = 0; 1456 1457 length = rb_calculate_event_length(length); 1458 again: 1459 /* 1460 * We allow for interrupts to reenter here and do a trace. 1461 * If one does, it will cause this original code to loop 1462 * back here. Even with heavy interrupts happening, this 1463 * should only happen a few times in a row. If this happens 1464 * 1000 times in a row, there must be either an interrupt 1465 * storm or we have something buggy. 1466 * Bail! 1467 */ 1468 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) 1469 return NULL; 1470 1471 ts = rb_time_stamp(cpu_buffer->buffer, cpu_buffer->cpu); 1472 1473 /* 1474 * Only the first commit can update the timestamp. 1475 * Yes there is a race here. If an interrupt comes in 1476 * just after the conditional and it traces too, then it 1477 * will also check the deltas. More than one timestamp may 1478 * also be made. But only the entry that did the actual 1479 * commit will be something other than zero. 1480 */ 1481 if (likely(cpu_buffer->tail_page == cpu_buffer->commit_page && 1482 rb_page_write(cpu_buffer->tail_page) == 1483 rb_commit_index(cpu_buffer))) { 1484 u64 diff; 1485 1486 diff = ts - cpu_buffer->write_stamp; 1487 1488 /* make sure this diff is calculated here */ 1489 barrier(); 1490 1491 /* Did the write stamp get updated already? */ 1492 if (unlikely(ts < cpu_buffer->write_stamp)) 1493 goto get_event; 1494 1495 delta = diff; 1496 if (unlikely(test_time_stamp(delta))) { 1497 1498 commit = rb_add_time_stamp(cpu_buffer, &ts, &delta); 1499 if (commit == -EBUSY) 1500 return NULL; 1501 1502 if (commit == -EAGAIN) 1503 goto again; 1504 1505 RB_WARN_ON(cpu_buffer, commit < 0); 1506 } 1507 } 1508 1509 get_event: 1510 event = __rb_reserve_next(cpu_buffer, 0, length, &ts); 1511 if (unlikely(PTR_ERR(event) == -EAGAIN)) 1512 goto again; 1513 1514 if (!event) { 1515 if (unlikely(commit)) 1516 /* 1517 * Ouch! We needed a timestamp and it was commited. But 1518 * we didn't get our event reserved. 1519 */ 1520 rb_set_commit_to_write(cpu_buffer); 1521 return NULL; 1522 } 1523 1524 /* 1525 * If the timestamp was commited, make the commit our entry 1526 * now so that we will update it when needed. 1527 */ 1528 if (unlikely(commit)) 1529 rb_set_commit_event(cpu_buffer, event); 1530 else if (!rb_is_commit(cpu_buffer, event)) 1531 delta = 0; 1532 1533 event->time_delta = delta; 1534 1535 return event; 1536 } 1537 1538 #define TRACE_RECURSIVE_DEPTH 16 1539 1540 static int trace_recursive_lock(void) 1541 { 1542 current->trace_recursion++; 1543 1544 if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH)) 1545 return 0; 1546 1547 /* Disable all tracing before we do anything else */ 1548 tracing_off_permanent(); 1549 1550 printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:" 1551 "HC[%lu]:SC[%lu]:NMI[%lu]\n", 1552 current->trace_recursion, 1553 hardirq_count() >> HARDIRQ_SHIFT, 1554 softirq_count() >> SOFTIRQ_SHIFT, 1555 in_nmi()); 1556 1557 WARN_ON_ONCE(1); 1558 return -1; 1559 } 1560 1561 static void trace_recursive_unlock(void) 1562 { 1563 WARN_ON_ONCE(!current->trace_recursion); 1564 1565 current->trace_recursion--; 1566 } 1567 1568 static DEFINE_PER_CPU(int, rb_need_resched); 1569 1570 /** 1571 * ring_buffer_lock_reserve - reserve a part of the buffer 1572 * @buffer: the ring buffer to reserve from 1573 * @length: the length of the data to reserve (excluding event header) 1574 * 1575 * Returns a reseverd event on the ring buffer to copy directly to. 1576 * The user of this interface will need to get the body to write into 1577 * and can use the ring_buffer_event_data() interface. 1578 * 1579 * The length is the length of the data needed, not the event length 1580 * which also includes the event header. 1581 * 1582 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned. 1583 * If NULL is returned, then nothing has been allocated or locked. 1584 */ 1585 struct ring_buffer_event * 1586 ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length) 1587 { 1588 struct ring_buffer_per_cpu *cpu_buffer; 1589 struct ring_buffer_event *event; 1590 int cpu, resched; 1591 1592 if (ring_buffer_flags != RB_BUFFERS_ON) 1593 return NULL; 1594 1595 if (atomic_read(&buffer->record_disabled)) 1596 return NULL; 1597 1598 /* If we are tracing schedule, we don't want to recurse */ 1599 resched = ftrace_preempt_disable(); 1600 1601 if (trace_recursive_lock()) 1602 goto out_nocheck; 1603 1604 cpu = raw_smp_processor_id(); 1605 1606 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 1607 goto out; 1608 1609 cpu_buffer = buffer->buffers[cpu]; 1610 1611 if (atomic_read(&cpu_buffer->record_disabled)) 1612 goto out; 1613 1614 if (length > BUF_MAX_DATA_SIZE) 1615 goto out; 1616 1617 event = rb_reserve_next_event(cpu_buffer, length); 1618 if (!event) 1619 goto out; 1620 1621 /* 1622 * Need to store resched state on this cpu. 1623 * Only the first needs to. 1624 */ 1625 1626 if (preempt_count() == 1) 1627 per_cpu(rb_need_resched, cpu) = resched; 1628 1629 return event; 1630 1631 out: 1632 trace_recursive_unlock(); 1633 1634 out_nocheck: 1635 ftrace_preempt_enable(resched); 1636 return NULL; 1637 } 1638 EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve); 1639 1640 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, 1641 struct ring_buffer_event *event) 1642 { 1643 local_inc(&cpu_buffer->entries); 1644 1645 /* Only process further if we own the commit */ 1646 if (!rb_is_commit(cpu_buffer, event)) 1647 return; 1648 1649 cpu_buffer->write_stamp += event->time_delta; 1650 1651 rb_set_commit_to_write(cpu_buffer); 1652 } 1653 1654 /** 1655 * ring_buffer_unlock_commit - commit a reserved 1656 * @buffer: The buffer to commit to 1657 * @event: The event pointer to commit. 1658 * 1659 * This commits the data to the ring buffer, and releases any locks held. 1660 * 1661 * Must be paired with ring_buffer_lock_reserve. 1662 */ 1663 int ring_buffer_unlock_commit(struct ring_buffer *buffer, 1664 struct ring_buffer_event *event) 1665 { 1666 struct ring_buffer_per_cpu *cpu_buffer; 1667 int cpu = raw_smp_processor_id(); 1668 1669 cpu_buffer = buffer->buffers[cpu]; 1670 1671 rb_commit(cpu_buffer, event); 1672 1673 trace_recursive_unlock(); 1674 1675 /* 1676 * Only the last preempt count needs to restore preemption. 1677 */ 1678 if (preempt_count() == 1) 1679 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu)); 1680 else 1681 preempt_enable_no_resched_notrace(); 1682 1683 return 0; 1684 } 1685 EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit); 1686 1687 static inline void rb_event_discard(struct ring_buffer_event *event) 1688 { 1689 /* array[0] holds the actual length for the discarded event */ 1690 event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE; 1691 event->type_len = RINGBUF_TYPE_PADDING; 1692 /* time delta must be non zero */ 1693 if (!event->time_delta) 1694 event->time_delta = 1; 1695 } 1696 1697 /** 1698 * ring_buffer_event_discard - discard any event in the ring buffer 1699 * @event: the event to discard 1700 * 1701 * Sometimes a event that is in the ring buffer needs to be ignored. 1702 * This function lets the user discard an event in the ring buffer 1703 * and then that event will not be read later. 1704 * 1705 * Note, it is up to the user to be careful with this, and protect 1706 * against races. If the user discards an event that has been consumed 1707 * it is possible that it could corrupt the ring buffer. 1708 */ 1709 void ring_buffer_event_discard(struct ring_buffer_event *event) 1710 { 1711 rb_event_discard(event); 1712 } 1713 EXPORT_SYMBOL_GPL(ring_buffer_event_discard); 1714 1715 /** 1716 * ring_buffer_commit_discard - discard an event that has not been committed 1717 * @buffer: the ring buffer 1718 * @event: non committed event to discard 1719 * 1720 * This is similar to ring_buffer_event_discard but must only be 1721 * performed on an event that has not been committed yet. The difference 1722 * is that this will also try to free the event from the ring buffer 1723 * if another event has not been added behind it. 1724 * 1725 * If another event has been added behind it, it will set the event 1726 * up as discarded, and perform the commit. 1727 * 1728 * If this function is called, do not call ring_buffer_unlock_commit on 1729 * the event. 1730 */ 1731 void ring_buffer_discard_commit(struct ring_buffer *buffer, 1732 struct ring_buffer_event *event) 1733 { 1734 struct ring_buffer_per_cpu *cpu_buffer; 1735 int cpu; 1736 1737 /* The event is discarded regardless */ 1738 rb_event_discard(event); 1739 1740 /* 1741 * This must only be called if the event has not been 1742 * committed yet. Thus we can assume that preemption 1743 * is still disabled. 1744 */ 1745 RB_WARN_ON(buffer, preemptible()); 1746 1747 cpu = smp_processor_id(); 1748 cpu_buffer = buffer->buffers[cpu]; 1749 1750 if (!rb_try_to_discard(cpu_buffer, event)) 1751 goto out; 1752 1753 /* 1754 * The commit is still visible by the reader, so we 1755 * must increment entries. 1756 */ 1757 local_inc(&cpu_buffer->entries); 1758 out: 1759 /* 1760 * If a write came in and pushed the tail page 1761 * we still need to update the commit pointer 1762 * if we were the commit. 1763 */ 1764 if (rb_is_commit(cpu_buffer, event)) 1765 rb_set_commit_to_write(cpu_buffer); 1766 1767 trace_recursive_unlock(); 1768 1769 /* 1770 * Only the last preempt count needs to restore preemption. 1771 */ 1772 if (preempt_count() == 1) 1773 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu)); 1774 else 1775 preempt_enable_no_resched_notrace(); 1776 1777 } 1778 EXPORT_SYMBOL_GPL(ring_buffer_discard_commit); 1779 1780 /** 1781 * ring_buffer_write - write data to the buffer without reserving 1782 * @buffer: The ring buffer to write to. 1783 * @length: The length of the data being written (excluding the event header) 1784 * @data: The data to write to the buffer. 1785 * 1786 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as 1787 * one function. If you already have the data to write to the buffer, it 1788 * may be easier to simply call this function. 1789 * 1790 * Note, like ring_buffer_lock_reserve, the length is the length of the data 1791 * and not the length of the event which would hold the header. 1792 */ 1793 int ring_buffer_write(struct ring_buffer *buffer, 1794 unsigned long length, 1795 void *data) 1796 { 1797 struct ring_buffer_per_cpu *cpu_buffer; 1798 struct ring_buffer_event *event; 1799 void *body; 1800 int ret = -EBUSY; 1801 int cpu, resched; 1802 1803 if (ring_buffer_flags != RB_BUFFERS_ON) 1804 return -EBUSY; 1805 1806 if (atomic_read(&buffer->record_disabled)) 1807 return -EBUSY; 1808 1809 resched = ftrace_preempt_disable(); 1810 1811 cpu = raw_smp_processor_id(); 1812 1813 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 1814 goto out; 1815 1816 cpu_buffer = buffer->buffers[cpu]; 1817 1818 if (atomic_read(&cpu_buffer->record_disabled)) 1819 goto out; 1820 1821 if (length > BUF_MAX_DATA_SIZE) 1822 goto out; 1823 1824 event = rb_reserve_next_event(cpu_buffer, length); 1825 if (!event) 1826 goto out; 1827 1828 body = rb_event_data(event); 1829 1830 memcpy(body, data, length); 1831 1832 rb_commit(cpu_buffer, event); 1833 1834 ret = 0; 1835 out: 1836 ftrace_preempt_enable(resched); 1837 1838 return ret; 1839 } 1840 EXPORT_SYMBOL_GPL(ring_buffer_write); 1841 1842 static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) 1843 { 1844 struct buffer_page *reader = cpu_buffer->reader_page; 1845 struct buffer_page *head = cpu_buffer->head_page; 1846 struct buffer_page *commit = cpu_buffer->commit_page; 1847 1848 return reader->read == rb_page_commit(reader) && 1849 (commit == reader || 1850 (commit == head && 1851 head->read == rb_page_commit(commit))); 1852 } 1853 1854 /** 1855 * ring_buffer_record_disable - stop all writes into the buffer 1856 * @buffer: The ring buffer to stop writes to. 1857 * 1858 * This prevents all writes to the buffer. Any attempt to write 1859 * to the buffer after this will fail and return NULL. 1860 * 1861 * The caller should call synchronize_sched() after this. 1862 */ 1863 void ring_buffer_record_disable(struct ring_buffer *buffer) 1864 { 1865 atomic_inc(&buffer->record_disabled); 1866 } 1867 EXPORT_SYMBOL_GPL(ring_buffer_record_disable); 1868 1869 /** 1870 * ring_buffer_record_enable - enable writes to the buffer 1871 * @buffer: The ring buffer to enable writes 1872 * 1873 * Note, multiple disables will need the same number of enables 1874 * to truely enable the writing (much like preempt_disable). 1875 */ 1876 void ring_buffer_record_enable(struct ring_buffer *buffer) 1877 { 1878 atomic_dec(&buffer->record_disabled); 1879 } 1880 EXPORT_SYMBOL_GPL(ring_buffer_record_enable); 1881 1882 /** 1883 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer 1884 * @buffer: The ring buffer to stop writes to. 1885 * @cpu: The CPU buffer to stop 1886 * 1887 * This prevents all writes to the buffer. Any attempt to write 1888 * to the buffer after this will fail and return NULL. 1889 * 1890 * The caller should call synchronize_sched() after this. 1891 */ 1892 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu) 1893 { 1894 struct ring_buffer_per_cpu *cpu_buffer; 1895 1896 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 1897 return; 1898 1899 cpu_buffer = buffer->buffers[cpu]; 1900 atomic_inc(&cpu_buffer->record_disabled); 1901 } 1902 EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu); 1903 1904 /** 1905 * ring_buffer_record_enable_cpu - enable writes to the buffer 1906 * @buffer: The ring buffer to enable writes 1907 * @cpu: The CPU to enable. 1908 * 1909 * Note, multiple disables will need the same number of enables 1910 * to truely enable the writing (much like preempt_disable). 1911 */ 1912 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu) 1913 { 1914 struct ring_buffer_per_cpu *cpu_buffer; 1915 1916 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 1917 return; 1918 1919 cpu_buffer = buffer->buffers[cpu]; 1920 atomic_dec(&cpu_buffer->record_disabled); 1921 } 1922 EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu); 1923 1924 /** 1925 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer 1926 * @buffer: The ring buffer 1927 * @cpu: The per CPU buffer to get the entries from. 1928 */ 1929 unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu) 1930 { 1931 struct ring_buffer_per_cpu *cpu_buffer; 1932 unsigned long ret; 1933 1934 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 1935 return 0; 1936 1937 cpu_buffer = buffer->buffers[cpu]; 1938 ret = (local_read(&cpu_buffer->entries) - cpu_buffer->overrun) 1939 - cpu_buffer->read; 1940 1941 return ret; 1942 } 1943 EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu); 1944 1945 /** 1946 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer 1947 * @buffer: The ring buffer 1948 * @cpu: The per CPU buffer to get the number of overruns from 1949 */ 1950 unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu) 1951 { 1952 struct ring_buffer_per_cpu *cpu_buffer; 1953 unsigned long ret; 1954 1955 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 1956 return 0; 1957 1958 cpu_buffer = buffer->buffers[cpu]; 1959 ret = cpu_buffer->overrun; 1960 1961 return ret; 1962 } 1963 EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu); 1964 1965 /** 1966 * ring_buffer_nmi_dropped_cpu - get the number of nmis that were dropped 1967 * @buffer: The ring buffer 1968 * @cpu: The per CPU buffer to get the number of overruns from 1969 */ 1970 unsigned long ring_buffer_nmi_dropped_cpu(struct ring_buffer *buffer, int cpu) 1971 { 1972 struct ring_buffer_per_cpu *cpu_buffer; 1973 unsigned long ret; 1974 1975 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 1976 return 0; 1977 1978 cpu_buffer = buffer->buffers[cpu]; 1979 ret = cpu_buffer->nmi_dropped; 1980 1981 return ret; 1982 } 1983 EXPORT_SYMBOL_GPL(ring_buffer_nmi_dropped_cpu); 1984 1985 /** 1986 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by commits 1987 * @buffer: The ring buffer 1988 * @cpu: The per CPU buffer to get the number of overruns from 1989 */ 1990 unsigned long 1991 ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu) 1992 { 1993 struct ring_buffer_per_cpu *cpu_buffer; 1994 unsigned long ret; 1995 1996 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 1997 return 0; 1998 1999 cpu_buffer = buffer->buffers[cpu]; 2000 ret = cpu_buffer->commit_overrun; 2001 2002 return ret; 2003 } 2004 EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu); 2005 2006 /** 2007 * ring_buffer_entries - get the number of entries in a buffer 2008 * @buffer: The ring buffer 2009 * 2010 * Returns the total number of entries in the ring buffer 2011 * (all CPU entries) 2012 */ 2013 unsigned long ring_buffer_entries(struct ring_buffer *buffer) 2014 { 2015 struct ring_buffer_per_cpu *cpu_buffer; 2016 unsigned long entries = 0; 2017 int cpu; 2018 2019 /* if you care about this being correct, lock the buffer */ 2020 for_each_buffer_cpu(buffer, cpu) { 2021 cpu_buffer = buffer->buffers[cpu]; 2022 entries += (local_read(&cpu_buffer->entries) - 2023 cpu_buffer->overrun) - cpu_buffer->read; 2024 } 2025 2026 return entries; 2027 } 2028 EXPORT_SYMBOL_GPL(ring_buffer_entries); 2029 2030 /** 2031 * ring_buffer_overrun_cpu - get the number of overruns in buffer 2032 * @buffer: The ring buffer 2033 * 2034 * Returns the total number of overruns in the ring buffer 2035 * (all CPU entries) 2036 */ 2037 unsigned long ring_buffer_overruns(struct ring_buffer *buffer) 2038 { 2039 struct ring_buffer_per_cpu *cpu_buffer; 2040 unsigned long overruns = 0; 2041 int cpu; 2042 2043 /* if you care about this being correct, lock the buffer */ 2044 for_each_buffer_cpu(buffer, cpu) { 2045 cpu_buffer = buffer->buffers[cpu]; 2046 overruns += cpu_buffer->overrun; 2047 } 2048 2049 return overruns; 2050 } 2051 EXPORT_SYMBOL_GPL(ring_buffer_overruns); 2052 2053 static void rb_iter_reset(struct ring_buffer_iter *iter) 2054 { 2055 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 2056 2057 /* Iterator usage is expected to have record disabled */ 2058 if (list_empty(&cpu_buffer->reader_page->list)) { 2059 iter->head_page = cpu_buffer->head_page; 2060 iter->head = cpu_buffer->head_page->read; 2061 } else { 2062 iter->head_page = cpu_buffer->reader_page; 2063 iter->head = cpu_buffer->reader_page->read; 2064 } 2065 if (iter->head) 2066 iter->read_stamp = cpu_buffer->read_stamp; 2067 else 2068 iter->read_stamp = iter->head_page->page->time_stamp; 2069 } 2070 2071 /** 2072 * ring_buffer_iter_reset - reset an iterator 2073 * @iter: The iterator to reset 2074 * 2075 * Resets the iterator, so that it will start from the beginning 2076 * again. 2077 */ 2078 void ring_buffer_iter_reset(struct ring_buffer_iter *iter) 2079 { 2080 struct ring_buffer_per_cpu *cpu_buffer; 2081 unsigned long flags; 2082 2083 if (!iter) 2084 return; 2085 2086 cpu_buffer = iter->cpu_buffer; 2087 2088 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2089 rb_iter_reset(iter); 2090 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2091 } 2092 EXPORT_SYMBOL_GPL(ring_buffer_iter_reset); 2093 2094 /** 2095 * ring_buffer_iter_empty - check if an iterator has no more to read 2096 * @iter: The iterator to check 2097 */ 2098 int ring_buffer_iter_empty(struct ring_buffer_iter *iter) 2099 { 2100 struct ring_buffer_per_cpu *cpu_buffer; 2101 2102 cpu_buffer = iter->cpu_buffer; 2103 2104 return iter->head_page == cpu_buffer->commit_page && 2105 iter->head == rb_commit_index(cpu_buffer); 2106 } 2107 EXPORT_SYMBOL_GPL(ring_buffer_iter_empty); 2108 2109 static void 2110 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, 2111 struct ring_buffer_event *event) 2112 { 2113 u64 delta; 2114 2115 switch (event->type_len) { 2116 case RINGBUF_TYPE_PADDING: 2117 return; 2118 2119 case RINGBUF_TYPE_TIME_EXTEND: 2120 delta = event->array[0]; 2121 delta <<= TS_SHIFT; 2122 delta += event->time_delta; 2123 cpu_buffer->read_stamp += delta; 2124 return; 2125 2126 case RINGBUF_TYPE_TIME_STAMP: 2127 /* FIXME: not implemented */ 2128 return; 2129 2130 case RINGBUF_TYPE_DATA: 2131 cpu_buffer->read_stamp += event->time_delta; 2132 return; 2133 2134 default: 2135 BUG(); 2136 } 2137 return; 2138 } 2139 2140 static void 2141 rb_update_iter_read_stamp(struct ring_buffer_iter *iter, 2142 struct ring_buffer_event *event) 2143 { 2144 u64 delta; 2145 2146 switch (event->type_len) { 2147 case RINGBUF_TYPE_PADDING: 2148 return; 2149 2150 case RINGBUF_TYPE_TIME_EXTEND: 2151 delta = event->array[0]; 2152 delta <<= TS_SHIFT; 2153 delta += event->time_delta; 2154 iter->read_stamp += delta; 2155 return; 2156 2157 case RINGBUF_TYPE_TIME_STAMP: 2158 /* FIXME: not implemented */ 2159 return; 2160 2161 case RINGBUF_TYPE_DATA: 2162 iter->read_stamp += event->time_delta; 2163 return; 2164 2165 default: 2166 BUG(); 2167 } 2168 return; 2169 } 2170 2171 static struct buffer_page * 2172 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) 2173 { 2174 struct buffer_page *reader = NULL; 2175 unsigned long flags; 2176 int nr_loops = 0; 2177 2178 local_irq_save(flags); 2179 __raw_spin_lock(&cpu_buffer->lock); 2180 2181 again: 2182 /* 2183 * This should normally only loop twice. But because the 2184 * start of the reader inserts an empty page, it causes 2185 * a case where we will loop three times. There should be no 2186 * reason to loop four times (that I know of). 2187 */ 2188 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) { 2189 reader = NULL; 2190 goto out; 2191 } 2192 2193 reader = cpu_buffer->reader_page; 2194 2195 /* If there's more to read, return this page */ 2196 if (cpu_buffer->reader_page->read < rb_page_size(reader)) 2197 goto out; 2198 2199 /* Never should we have an index greater than the size */ 2200 if (RB_WARN_ON(cpu_buffer, 2201 cpu_buffer->reader_page->read > rb_page_size(reader))) 2202 goto out; 2203 2204 /* check if we caught up to the tail */ 2205 reader = NULL; 2206 if (cpu_buffer->commit_page == cpu_buffer->reader_page) 2207 goto out; 2208 2209 /* 2210 * Splice the empty reader page into the list around the head. 2211 * Reset the reader page to size zero. 2212 */ 2213 2214 reader = cpu_buffer->head_page; 2215 cpu_buffer->reader_page->list.next = reader->list.next; 2216 cpu_buffer->reader_page->list.prev = reader->list.prev; 2217 2218 local_set(&cpu_buffer->reader_page->write, 0); 2219 local_set(&cpu_buffer->reader_page->entries, 0); 2220 local_set(&cpu_buffer->reader_page->page->commit, 0); 2221 2222 /* Make the reader page now replace the head */ 2223 reader->list.prev->next = &cpu_buffer->reader_page->list; 2224 reader->list.next->prev = &cpu_buffer->reader_page->list; 2225 2226 /* 2227 * If the tail is on the reader, then we must set the head 2228 * to the inserted page, otherwise we set it one before. 2229 */ 2230 cpu_buffer->head_page = cpu_buffer->reader_page; 2231 2232 if (cpu_buffer->commit_page != reader) 2233 rb_inc_page(cpu_buffer, &cpu_buffer->head_page); 2234 2235 /* Finally update the reader page to the new head */ 2236 cpu_buffer->reader_page = reader; 2237 rb_reset_reader_page(cpu_buffer); 2238 2239 goto again; 2240 2241 out: 2242 __raw_spin_unlock(&cpu_buffer->lock); 2243 local_irq_restore(flags); 2244 2245 return reader; 2246 } 2247 2248 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) 2249 { 2250 struct ring_buffer_event *event; 2251 struct buffer_page *reader; 2252 unsigned length; 2253 2254 reader = rb_get_reader_page(cpu_buffer); 2255 2256 /* This function should not be called when buffer is empty */ 2257 if (RB_WARN_ON(cpu_buffer, !reader)) 2258 return; 2259 2260 event = rb_reader_event(cpu_buffer); 2261 2262 if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX 2263 || rb_discarded_event(event)) 2264 cpu_buffer->read++; 2265 2266 rb_update_read_stamp(cpu_buffer, event); 2267 2268 length = rb_event_length(event); 2269 cpu_buffer->reader_page->read += length; 2270 } 2271 2272 static void rb_advance_iter(struct ring_buffer_iter *iter) 2273 { 2274 struct ring_buffer *buffer; 2275 struct ring_buffer_per_cpu *cpu_buffer; 2276 struct ring_buffer_event *event; 2277 unsigned length; 2278 2279 cpu_buffer = iter->cpu_buffer; 2280 buffer = cpu_buffer->buffer; 2281 2282 /* 2283 * Check if we are at the end of the buffer. 2284 */ 2285 if (iter->head >= rb_page_size(iter->head_page)) { 2286 /* discarded commits can make the page empty */ 2287 if (iter->head_page == cpu_buffer->commit_page) 2288 return; 2289 rb_inc_iter(iter); 2290 return; 2291 } 2292 2293 event = rb_iter_head_event(iter); 2294 2295 length = rb_event_length(event); 2296 2297 /* 2298 * This should not be called to advance the header if we are 2299 * at the tail of the buffer. 2300 */ 2301 if (RB_WARN_ON(cpu_buffer, 2302 (iter->head_page == cpu_buffer->commit_page) && 2303 (iter->head + length > rb_commit_index(cpu_buffer)))) 2304 return; 2305 2306 rb_update_iter_read_stamp(iter, event); 2307 2308 iter->head += length; 2309 2310 /* check for end of page padding */ 2311 if ((iter->head >= rb_page_size(iter->head_page)) && 2312 (iter->head_page != cpu_buffer->commit_page)) 2313 rb_advance_iter(iter); 2314 } 2315 2316 static struct ring_buffer_event * 2317 rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) 2318 { 2319 struct ring_buffer_per_cpu *cpu_buffer; 2320 struct ring_buffer_event *event; 2321 struct buffer_page *reader; 2322 int nr_loops = 0; 2323 2324 cpu_buffer = buffer->buffers[cpu]; 2325 2326 again: 2327 /* 2328 * We repeat when a timestamp is encountered. It is possible 2329 * to get multiple timestamps from an interrupt entering just 2330 * as one timestamp is about to be written, or from discarded 2331 * commits. The most that we can have is the number on a single page. 2332 */ 2333 if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE)) 2334 return NULL; 2335 2336 reader = rb_get_reader_page(cpu_buffer); 2337 if (!reader) 2338 return NULL; 2339 2340 event = rb_reader_event(cpu_buffer); 2341 2342 switch (event->type_len) { 2343 case RINGBUF_TYPE_PADDING: 2344 if (rb_null_event(event)) 2345 RB_WARN_ON(cpu_buffer, 1); 2346 /* 2347 * Because the writer could be discarding every 2348 * event it creates (which would probably be bad) 2349 * if we were to go back to "again" then we may never 2350 * catch up, and will trigger the warn on, or lock 2351 * the box. Return the padding, and we will release 2352 * the current locks, and try again. 2353 */ 2354 rb_advance_reader(cpu_buffer); 2355 return event; 2356 2357 case RINGBUF_TYPE_TIME_EXTEND: 2358 /* Internal data, OK to advance */ 2359 rb_advance_reader(cpu_buffer); 2360 goto again; 2361 2362 case RINGBUF_TYPE_TIME_STAMP: 2363 /* FIXME: not implemented */ 2364 rb_advance_reader(cpu_buffer); 2365 goto again; 2366 2367 case RINGBUF_TYPE_DATA: 2368 if (ts) { 2369 *ts = cpu_buffer->read_stamp + event->time_delta; 2370 ring_buffer_normalize_time_stamp(buffer, 2371 cpu_buffer->cpu, ts); 2372 } 2373 return event; 2374 2375 default: 2376 BUG(); 2377 } 2378 2379 return NULL; 2380 } 2381 EXPORT_SYMBOL_GPL(ring_buffer_peek); 2382 2383 static struct ring_buffer_event * 2384 rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) 2385 { 2386 struct ring_buffer *buffer; 2387 struct ring_buffer_per_cpu *cpu_buffer; 2388 struct ring_buffer_event *event; 2389 int nr_loops = 0; 2390 2391 if (ring_buffer_iter_empty(iter)) 2392 return NULL; 2393 2394 cpu_buffer = iter->cpu_buffer; 2395 buffer = cpu_buffer->buffer; 2396 2397 again: 2398 /* 2399 * We repeat when a timestamp is encountered. 2400 * We can get multiple timestamps by nested interrupts or also 2401 * if filtering is on (discarding commits). Since discarding 2402 * commits can be frequent we can get a lot of timestamps. 2403 * But we limit them by not adding timestamps if they begin 2404 * at the start of a page. 2405 */ 2406 if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE)) 2407 return NULL; 2408 2409 if (rb_per_cpu_empty(cpu_buffer)) 2410 return NULL; 2411 2412 event = rb_iter_head_event(iter); 2413 2414 switch (event->type_len) { 2415 case RINGBUF_TYPE_PADDING: 2416 if (rb_null_event(event)) { 2417 rb_inc_iter(iter); 2418 goto again; 2419 } 2420 rb_advance_iter(iter); 2421 return event; 2422 2423 case RINGBUF_TYPE_TIME_EXTEND: 2424 /* Internal data, OK to advance */ 2425 rb_advance_iter(iter); 2426 goto again; 2427 2428 case RINGBUF_TYPE_TIME_STAMP: 2429 /* FIXME: not implemented */ 2430 rb_advance_iter(iter); 2431 goto again; 2432 2433 case RINGBUF_TYPE_DATA: 2434 if (ts) { 2435 *ts = iter->read_stamp + event->time_delta; 2436 ring_buffer_normalize_time_stamp(buffer, 2437 cpu_buffer->cpu, ts); 2438 } 2439 return event; 2440 2441 default: 2442 BUG(); 2443 } 2444 2445 return NULL; 2446 } 2447 EXPORT_SYMBOL_GPL(ring_buffer_iter_peek); 2448 2449 /** 2450 * ring_buffer_peek - peek at the next event to be read 2451 * @buffer: The ring buffer to read 2452 * @cpu: The cpu to peak at 2453 * @ts: The timestamp counter of this event. 2454 * 2455 * This will return the event that will be read next, but does 2456 * not consume the data. 2457 */ 2458 struct ring_buffer_event * 2459 ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) 2460 { 2461 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 2462 struct ring_buffer_event *event; 2463 unsigned long flags; 2464 2465 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 2466 return NULL; 2467 2468 again: 2469 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2470 event = rb_buffer_peek(buffer, cpu, ts); 2471 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2472 2473 if (event && event->type_len == RINGBUF_TYPE_PADDING) { 2474 cpu_relax(); 2475 goto again; 2476 } 2477 2478 return event; 2479 } 2480 2481 /** 2482 * ring_buffer_iter_peek - peek at the next event to be read 2483 * @iter: The ring buffer iterator 2484 * @ts: The timestamp counter of this event. 2485 * 2486 * This will return the event that will be read next, but does 2487 * not increment the iterator. 2488 */ 2489 struct ring_buffer_event * 2490 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) 2491 { 2492 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 2493 struct ring_buffer_event *event; 2494 unsigned long flags; 2495 2496 again: 2497 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2498 event = rb_iter_peek(iter, ts); 2499 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2500 2501 if (event && event->type_len == RINGBUF_TYPE_PADDING) { 2502 cpu_relax(); 2503 goto again; 2504 } 2505 2506 return event; 2507 } 2508 2509 /** 2510 * ring_buffer_consume - return an event and consume it 2511 * @buffer: The ring buffer to get the next event from 2512 * 2513 * Returns the next event in the ring buffer, and that event is consumed. 2514 * Meaning, that sequential reads will keep returning a different event, 2515 * and eventually empty the ring buffer if the producer is slower. 2516 */ 2517 struct ring_buffer_event * 2518 ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) 2519 { 2520 struct ring_buffer_per_cpu *cpu_buffer; 2521 struct ring_buffer_event *event = NULL; 2522 unsigned long flags; 2523 2524 again: 2525 /* might be called in atomic */ 2526 preempt_disable(); 2527 2528 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 2529 goto out; 2530 2531 cpu_buffer = buffer->buffers[cpu]; 2532 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2533 2534 event = rb_buffer_peek(buffer, cpu, ts); 2535 if (!event) 2536 goto out_unlock; 2537 2538 rb_advance_reader(cpu_buffer); 2539 2540 out_unlock: 2541 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2542 2543 out: 2544 preempt_enable(); 2545 2546 if (event && event->type_len == RINGBUF_TYPE_PADDING) { 2547 cpu_relax(); 2548 goto again; 2549 } 2550 2551 return event; 2552 } 2553 EXPORT_SYMBOL_GPL(ring_buffer_consume); 2554 2555 /** 2556 * ring_buffer_read_start - start a non consuming read of the buffer 2557 * @buffer: The ring buffer to read from 2558 * @cpu: The cpu buffer to iterate over 2559 * 2560 * This starts up an iteration through the buffer. It also disables 2561 * the recording to the buffer until the reading is finished. 2562 * This prevents the reading from being corrupted. This is not 2563 * a consuming read, so a producer is not expected. 2564 * 2565 * Must be paired with ring_buffer_finish. 2566 */ 2567 struct ring_buffer_iter * 2568 ring_buffer_read_start(struct ring_buffer *buffer, int cpu) 2569 { 2570 struct ring_buffer_per_cpu *cpu_buffer; 2571 struct ring_buffer_iter *iter; 2572 unsigned long flags; 2573 2574 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 2575 return NULL; 2576 2577 iter = kmalloc(sizeof(*iter), GFP_KERNEL); 2578 if (!iter) 2579 return NULL; 2580 2581 cpu_buffer = buffer->buffers[cpu]; 2582 2583 iter->cpu_buffer = cpu_buffer; 2584 2585 atomic_inc(&cpu_buffer->record_disabled); 2586 synchronize_sched(); 2587 2588 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2589 __raw_spin_lock(&cpu_buffer->lock); 2590 rb_iter_reset(iter); 2591 __raw_spin_unlock(&cpu_buffer->lock); 2592 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2593 2594 return iter; 2595 } 2596 EXPORT_SYMBOL_GPL(ring_buffer_read_start); 2597 2598 /** 2599 * ring_buffer_finish - finish reading the iterator of the buffer 2600 * @iter: The iterator retrieved by ring_buffer_start 2601 * 2602 * This re-enables the recording to the buffer, and frees the 2603 * iterator. 2604 */ 2605 void 2606 ring_buffer_read_finish(struct ring_buffer_iter *iter) 2607 { 2608 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 2609 2610 atomic_dec(&cpu_buffer->record_disabled); 2611 kfree(iter); 2612 } 2613 EXPORT_SYMBOL_GPL(ring_buffer_read_finish); 2614 2615 /** 2616 * ring_buffer_read - read the next item in the ring buffer by the iterator 2617 * @iter: The ring buffer iterator 2618 * @ts: The time stamp of the event read. 2619 * 2620 * This reads the next event in the ring buffer and increments the iterator. 2621 */ 2622 struct ring_buffer_event * 2623 ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts) 2624 { 2625 struct ring_buffer_event *event; 2626 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 2627 unsigned long flags; 2628 2629 again: 2630 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2631 event = rb_iter_peek(iter, ts); 2632 if (!event) 2633 goto out; 2634 2635 rb_advance_iter(iter); 2636 out: 2637 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2638 2639 if (event && event->type_len == RINGBUF_TYPE_PADDING) { 2640 cpu_relax(); 2641 goto again; 2642 } 2643 2644 return event; 2645 } 2646 EXPORT_SYMBOL_GPL(ring_buffer_read); 2647 2648 /** 2649 * ring_buffer_size - return the size of the ring buffer (in bytes) 2650 * @buffer: The ring buffer. 2651 */ 2652 unsigned long ring_buffer_size(struct ring_buffer *buffer) 2653 { 2654 return BUF_PAGE_SIZE * buffer->pages; 2655 } 2656 EXPORT_SYMBOL_GPL(ring_buffer_size); 2657 2658 static void 2659 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) 2660 { 2661 cpu_buffer->head_page 2662 = list_entry(cpu_buffer->pages.next, struct buffer_page, list); 2663 local_set(&cpu_buffer->head_page->write, 0); 2664 local_set(&cpu_buffer->head_page->entries, 0); 2665 local_set(&cpu_buffer->head_page->page->commit, 0); 2666 2667 cpu_buffer->head_page->read = 0; 2668 2669 cpu_buffer->tail_page = cpu_buffer->head_page; 2670 cpu_buffer->commit_page = cpu_buffer->head_page; 2671 2672 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); 2673 local_set(&cpu_buffer->reader_page->write, 0); 2674 local_set(&cpu_buffer->reader_page->entries, 0); 2675 local_set(&cpu_buffer->reader_page->page->commit, 0); 2676 cpu_buffer->reader_page->read = 0; 2677 2678 cpu_buffer->nmi_dropped = 0; 2679 cpu_buffer->commit_overrun = 0; 2680 cpu_buffer->overrun = 0; 2681 cpu_buffer->read = 0; 2682 local_set(&cpu_buffer->entries, 0); 2683 2684 cpu_buffer->write_stamp = 0; 2685 cpu_buffer->read_stamp = 0; 2686 } 2687 2688 /** 2689 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer 2690 * @buffer: The ring buffer to reset a per cpu buffer of 2691 * @cpu: The CPU buffer to be reset 2692 */ 2693 void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) 2694 { 2695 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 2696 unsigned long flags; 2697 2698 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 2699 return; 2700 2701 atomic_inc(&cpu_buffer->record_disabled); 2702 2703 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2704 2705 __raw_spin_lock(&cpu_buffer->lock); 2706 2707 rb_reset_cpu(cpu_buffer); 2708 2709 __raw_spin_unlock(&cpu_buffer->lock); 2710 2711 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2712 2713 atomic_dec(&cpu_buffer->record_disabled); 2714 } 2715 EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu); 2716 2717 /** 2718 * ring_buffer_reset - reset a ring buffer 2719 * @buffer: The ring buffer to reset all cpu buffers 2720 */ 2721 void ring_buffer_reset(struct ring_buffer *buffer) 2722 { 2723 int cpu; 2724 2725 for_each_buffer_cpu(buffer, cpu) 2726 ring_buffer_reset_cpu(buffer, cpu); 2727 } 2728 EXPORT_SYMBOL_GPL(ring_buffer_reset); 2729 2730 /** 2731 * rind_buffer_empty - is the ring buffer empty? 2732 * @buffer: The ring buffer to test 2733 */ 2734 int ring_buffer_empty(struct ring_buffer *buffer) 2735 { 2736 struct ring_buffer_per_cpu *cpu_buffer; 2737 int cpu; 2738 2739 /* yes this is racy, but if you don't like the race, lock the buffer */ 2740 for_each_buffer_cpu(buffer, cpu) { 2741 cpu_buffer = buffer->buffers[cpu]; 2742 if (!rb_per_cpu_empty(cpu_buffer)) 2743 return 0; 2744 } 2745 2746 return 1; 2747 } 2748 EXPORT_SYMBOL_GPL(ring_buffer_empty); 2749 2750 /** 2751 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty? 2752 * @buffer: The ring buffer 2753 * @cpu: The CPU buffer to test 2754 */ 2755 int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu) 2756 { 2757 struct ring_buffer_per_cpu *cpu_buffer; 2758 int ret; 2759 2760 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 2761 return 1; 2762 2763 cpu_buffer = buffer->buffers[cpu]; 2764 ret = rb_per_cpu_empty(cpu_buffer); 2765 2766 2767 return ret; 2768 } 2769 EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu); 2770 2771 /** 2772 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers 2773 * @buffer_a: One buffer to swap with 2774 * @buffer_b: The other buffer to swap with 2775 * 2776 * This function is useful for tracers that want to take a "snapshot" 2777 * of a CPU buffer and has another back up buffer lying around. 2778 * it is expected that the tracer handles the cpu buffer not being 2779 * used at the moment. 2780 */ 2781 int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, 2782 struct ring_buffer *buffer_b, int cpu) 2783 { 2784 struct ring_buffer_per_cpu *cpu_buffer_a; 2785 struct ring_buffer_per_cpu *cpu_buffer_b; 2786 int ret = -EINVAL; 2787 2788 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) || 2789 !cpumask_test_cpu(cpu, buffer_b->cpumask)) 2790 goto out; 2791 2792 /* At least make sure the two buffers are somewhat the same */ 2793 if (buffer_a->pages != buffer_b->pages) 2794 goto out; 2795 2796 ret = -EAGAIN; 2797 2798 if (ring_buffer_flags != RB_BUFFERS_ON) 2799 goto out; 2800 2801 if (atomic_read(&buffer_a->record_disabled)) 2802 goto out; 2803 2804 if (atomic_read(&buffer_b->record_disabled)) 2805 goto out; 2806 2807 cpu_buffer_a = buffer_a->buffers[cpu]; 2808 cpu_buffer_b = buffer_b->buffers[cpu]; 2809 2810 if (atomic_read(&cpu_buffer_a->record_disabled)) 2811 goto out; 2812 2813 if (atomic_read(&cpu_buffer_b->record_disabled)) 2814 goto out; 2815 2816 /* 2817 * We can't do a synchronize_sched here because this 2818 * function can be called in atomic context. 2819 * Normally this will be called from the same CPU as cpu. 2820 * If not it's up to the caller to protect this. 2821 */ 2822 atomic_inc(&cpu_buffer_a->record_disabled); 2823 atomic_inc(&cpu_buffer_b->record_disabled); 2824 2825 buffer_a->buffers[cpu] = cpu_buffer_b; 2826 buffer_b->buffers[cpu] = cpu_buffer_a; 2827 2828 cpu_buffer_b->buffer = buffer_a; 2829 cpu_buffer_a->buffer = buffer_b; 2830 2831 atomic_dec(&cpu_buffer_a->record_disabled); 2832 atomic_dec(&cpu_buffer_b->record_disabled); 2833 2834 ret = 0; 2835 out: 2836 return ret; 2837 } 2838 EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); 2839 2840 /** 2841 * ring_buffer_alloc_read_page - allocate a page to read from buffer 2842 * @buffer: the buffer to allocate for. 2843 * 2844 * This function is used in conjunction with ring_buffer_read_page. 2845 * When reading a full page from the ring buffer, these functions 2846 * can be used to speed up the process. The calling function should 2847 * allocate a few pages first with this function. Then when it 2848 * needs to get pages from the ring buffer, it passes the result 2849 * of this function into ring_buffer_read_page, which will swap 2850 * the page that was allocated, with the read page of the buffer. 2851 * 2852 * Returns: 2853 * The page allocated, or NULL on error. 2854 */ 2855 void *ring_buffer_alloc_read_page(struct ring_buffer *buffer) 2856 { 2857 struct buffer_data_page *bpage; 2858 unsigned long addr; 2859 2860 addr = __get_free_page(GFP_KERNEL); 2861 if (!addr) 2862 return NULL; 2863 2864 bpage = (void *)addr; 2865 2866 rb_init_page(bpage); 2867 2868 return bpage; 2869 } 2870 EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page); 2871 2872 /** 2873 * ring_buffer_free_read_page - free an allocated read page 2874 * @buffer: the buffer the page was allocate for 2875 * @data: the page to free 2876 * 2877 * Free a page allocated from ring_buffer_alloc_read_page. 2878 */ 2879 void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data) 2880 { 2881 free_page((unsigned long)data); 2882 } 2883 EXPORT_SYMBOL_GPL(ring_buffer_free_read_page); 2884 2885 /** 2886 * ring_buffer_read_page - extract a page from the ring buffer 2887 * @buffer: buffer to extract from 2888 * @data_page: the page to use allocated from ring_buffer_alloc_read_page 2889 * @len: amount to extract 2890 * @cpu: the cpu of the buffer to extract 2891 * @full: should the extraction only happen when the page is full. 2892 * 2893 * This function will pull out a page from the ring buffer and consume it. 2894 * @data_page must be the address of the variable that was returned 2895 * from ring_buffer_alloc_read_page. This is because the page might be used 2896 * to swap with a page in the ring buffer. 2897 * 2898 * for example: 2899 * rpage = ring_buffer_alloc_read_page(buffer); 2900 * if (!rpage) 2901 * return error; 2902 * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0); 2903 * if (ret >= 0) 2904 * process_page(rpage, ret); 2905 * 2906 * When @full is set, the function will not return true unless 2907 * the writer is off the reader page. 2908 * 2909 * Note: it is up to the calling functions to handle sleeps and wakeups. 2910 * The ring buffer can be used anywhere in the kernel and can not 2911 * blindly call wake_up. The layer that uses the ring buffer must be 2912 * responsible for that. 2913 * 2914 * Returns: 2915 * >=0 if data has been transferred, returns the offset of consumed data. 2916 * <0 if no data has been transferred. 2917 */ 2918 int ring_buffer_read_page(struct ring_buffer *buffer, 2919 void **data_page, size_t len, int cpu, int full) 2920 { 2921 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 2922 struct ring_buffer_event *event; 2923 struct buffer_data_page *bpage; 2924 struct buffer_page *reader; 2925 unsigned long flags; 2926 unsigned int commit; 2927 unsigned int read; 2928 u64 save_timestamp; 2929 int ret = -1; 2930 2931 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 2932 goto out; 2933 2934 /* 2935 * If len is not big enough to hold the page header, then 2936 * we can not copy anything. 2937 */ 2938 if (len <= BUF_PAGE_HDR_SIZE) 2939 goto out; 2940 2941 len -= BUF_PAGE_HDR_SIZE; 2942 2943 if (!data_page) 2944 goto out; 2945 2946 bpage = *data_page; 2947 if (!bpage) 2948 goto out; 2949 2950 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2951 2952 reader = rb_get_reader_page(cpu_buffer); 2953 if (!reader) 2954 goto out_unlock; 2955 2956 event = rb_reader_event(cpu_buffer); 2957 2958 read = reader->read; 2959 commit = rb_page_commit(reader); 2960 2961 /* 2962 * If this page has been partially read or 2963 * if len is not big enough to read the rest of the page or 2964 * a writer is still on the page, then 2965 * we must copy the data from the page to the buffer. 2966 * Otherwise, we can simply swap the page with the one passed in. 2967 */ 2968 if (read || (len < (commit - read)) || 2969 cpu_buffer->reader_page == cpu_buffer->commit_page) { 2970 struct buffer_data_page *rpage = cpu_buffer->reader_page->page; 2971 unsigned int rpos = read; 2972 unsigned int pos = 0; 2973 unsigned int size; 2974 2975 if (full) 2976 goto out_unlock; 2977 2978 if (len > (commit - read)) 2979 len = (commit - read); 2980 2981 size = rb_event_length(event); 2982 2983 if (len < size) 2984 goto out_unlock; 2985 2986 /* save the current timestamp, since the user will need it */ 2987 save_timestamp = cpu_buffer->read_stamp; 2988 2989 /* Need to copy one event at a time */ 2990 do { 2991 memcpy(bpage->data + pos, rpage->data + rpos, size); 2992 2993 len -= size; 2994 2995 rb_advance_reader(cpu_buffer); 2996 rpos = reader->read; 2997 pos += size; 2998 2999 event = rb_reader_event(cpu_buffer); 3000 size = rb_event_length(event); 3001 } while (len > size); 3002 3003 /* update bpage */ 3004 local_set(&bpage->commit, pos); 3005 bpage->time_stamp = save_timestamp; 3006 3007 /* we copied everything to the beginning */ 3008 read = 0; 3009 } else { 3010 /* update the entry counter */ 3011 cpu_buffer->read += local_read(&reader->entries); 3012 3013 /* swap the pages */ 3014 rb_init_page(bpage); 3015 bpage = reader->page; 3016 reader->page = *data_page; 3017 local_set(&reader->write, 0); 3018 local_set(&reader->entries, 0); 3019 reader->read = 0; 3020 *data_page = bpage; 3021 } 3022 ret = read; 3023 3024 out_unlock: 3025 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3026 3027 out: 3028 return ret; 3029 } 3030 EXPORT_SYMBOL_GPL(ring_buffer_read_page); 3031 3032 static ssize_t 3033 rb_simple_read(struct file *filp, char __user *ubuf, 3034 size_t cnt, loff_t *ppos) 3035 { 3036 unsigned long *p = filp->private_data; 3037 char buf[64]; 3038 int r; 3039 3040 if (test_bit(RB_BUFFERS_DISABLED_BIT, p)) 3041 r = sprintf(buf, "permanently disabled\n"); 3042 else 3043 r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p)); 3044 3045 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 3046 } 3047 3048 static ssize_t 3049 rb_simple_write(struct file *filp, const char __user *ubuf, 3050 size_t cnt, loff_t *ppos) 3051 { 3052 unsigned long *p = filp->private_data; 3053 char buf[64]; 3054 unsigned long val; 3055 int ret; 3056 3057 if (cnt >= sizeof(buf)) 3058 return -EINVAL; 3059 3060 if (copy_from_user(&buf, ubuf, cnt)) 3061 return -EFAULT; 3062 3063 buf[cnt] = 0; 3064 3065 ret = strict_strtoul(buf, 10, &val); 3066 if (ret < 0) 3067 return ret; 3068 3069 if (val) 3070 set_bit(RB_BUFFERS_ON_BIT, p); 3071 else 3072 clear_bit(RB_BUFFERS_ON_BIT, p); 3073 3074 (*ppos)++; 3075 3076 return cnt; 3077 } 3078 3079 static const struct file_operations rb_simple_fops = { 3080 .open = tracing_open_generic, 3081 .read = rb_simple_read, 3082 .write = rb_simple_write, 3083 }; 3084 3085 3086 static __init int rb_init_debugfs(void) 3087 { 3088 struct dentry *d_tracer; 3089 3090 d_tracer = tracing_init_dentry(); 3091 3092 trace_create_file("tracing_on", 0644, d_tracer, 3093 &ring_buffer_flags, &rb_simple_fops); 3094 3095 return 0; 3096 } 3097 3098 fs_initcall(rb_init_debugfs); 3099 3100 #ifdef CONFIG_HOTPLUG_CPU 3101 static int rb_cpu_notify(struct notifier_block *self, 3102 unsigned long action, void *hcpu) 3103 { 3104 struct ring_buffer *buffer = 3105 container_of(self, struct ring_buffer, cpu_notify); 3106 long cpu = (long)hcpu; 3107 3108 switch (action) { 3109 case CPU_UP_PREPARE: 3110 case CPU_UP_PREPARE_FROZEN: 3111 if (cpu_isset(cpu, *buffer->cpumask)) 3112 return NOTIFY_OK; 3113 3114 buffer->buffers[cpu] = 3115 rb_allocate_cpu_buffer(buffer, cpu); 3116 if (!buffer->buffers[cpu]) { 3117 WARN(1, "failed to allocate ring buffer on CPU %ld\n", 3118 cpu); 3119 return NOTIFY_OK; 3120 } 3121 smp_wmb(); 3122 cpu_set(cpu, *buffer->cpumask); 3123 break; 3124 case CPU_DOWN_PREPARE: 3125 case CPU_DOWN_PREPARE_FROZEN: 3126 /* 3127 * Do nothing. 3128 * If we were to free the buffer, then the user would 3129 * lose any trace that was in the buffer. 3130 */ 3131 break; 3132 default: 3133 break; 3134 } 3135 return NOTIFY_OK; 3136 } 3137 #endif 3138