1 /* 2 * Generic ring buffer 3 * 4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> 5 */ 6 #include <linux/ring_buffer.h> 7 #include <linux/trace_clock.h> 8 #include <linux/spinlock.h> 9 #include <linux/debugfs.h> 10 #include <linux/uaccess.h> 11 #include <linux/hardirq.h> 12 #include <linux/kmemcheck.h> 13 #include <linux/module.h> 14 #include <linux/percpu.h> 15 #include <linux/mutex.h> 16 #include <linux/slab.h> 17 #include <linux/init.h> 18 #include <linux/hash.h> 19 #include <linux/list.h> 20 #include <linux/cpu.h> 21 #include <linux/fs.h> 22 23 #include <asm/local.h> 24 #include "trace.h" 25 26 static void update_pages_handler(struct work_struct *work); 27 28 /* 29 * The ring buffer header is special. We must manually up keep it. 30 */ 31 int ring_buffer_print_entry_header(struct trace_seq *s) 32 { 33 int ret; 34 35 ret = trace_seq_printf(s, "# compressed entry header\n"); 36 ret = trace_seq_printf(s, "\ttype_len : 5 bits\n"); 37 ret = trace_seq_printf(s, "\ttime_delta : 27 bits\n"); 38 ret = trace_seq_printf(s, "\tarray : 32 bits\n"); 39 ret = trace_seq_printf(s, "\n"); 40 ret = trace_seq_printf(s, "\tpadding : type == %d\n", 41 RINGBUF_TYPE_PADDING); 42 ret = trace_seq_printf(s, "\ttime_extend : type == %d\n", 43 RINGBUF_TYPE_TIME_EXTEND); 44 ret = trace_seq_printf(s, "\tdata max type_len == %d\n", 45 RINGBUF_TYPE_DATA_TYPE_LEN_MAX); 46 47 return ret; 48 } 49 50 /* 51 * The ring buffer is made up of a list of pages. A separate list of pages is 52 * allocated for each CPU. A writer may only write to a buffer that is 53 * associated with the CPU it is currently executing on. A reader may read 54 * from any per cpu buffer. 55 * 56 * The reader is special. For each per cpu buffer, the reader has its own 57 * reader page. When a reader has read the entire reader page, this reader 58 * page is swapped with another page in the ring buffer. 59 * 60 * Now, as long as the writer is off the reader page, the reader can do what 61 * ever it wants with that page. The writer will never write to that page 62 * again (as long as it is out of the ring buffer). 63 * 64 * Here's some silly ASCII art. 65 * 66 * +------+ 67 * |reader| RING BUFFER 68 * |page | 69 * +------+ +---+ +---+ +---+ 70 * | |-->| |-->| | 71 * +---+ +---+ +---+ 72 * ^ | 73 * | | 74 * +---------------+ 75 * 76 * 77 * +------+ 78 * |reader| RING BUFFER 79 * |page |------------------v 80 * +------+ +---+ +---+ +---+ 81 * | |-->| |-->| | 82 * +---+ +---+ +---+ 83 * ^ | 84 * | | 85 * +---------------+ 86 * 87 * 88 * +------+ 89 * |reader| RING BUFFER 90 * |page |------------------v 91 * +------+ +---+ +---+ +---+ 92 * ^ | |-->| |-->| | 93 * | +---+ +---+ +---+ 94 * | | 95 * | | 96 * +------------------------------+ 97 * 98 * 99 * +------+ 100 * |buffer| RING BUFFER 101 * |page |------------------v 102 * +------+ +---+ +---+ +---+ 103 * ^ | | | |-->| | 104 * | New +---+ +---+ +---+ 105 * | Reader------^ | 106 * | page | 107 * +------------------------------+ 108 * 109 * 110 * After we make this swap, the reader can hand this page off to the splice 111 * code and be done with it. It can even allocate a new page if it needs to 112 * and swap that into the ring buffer. 113 * 114 * We will be using cmpxchg soon to make all this lockless. 115 * 116 */ 117 118 /* 119 * A fast way to enable or disable all ring buffers is to 120 * call tracing_on or tracing_off. Turning off the ring buffers 121 * prevents all ring buffers from being recorded to. 122 * Turning this switch on, makes it OK to write to the 123 * ring buffer, if the ring buffer is enabled itself. 124 * 125 * There's three layers that must be on in order to write 126 * to the ring buffer. 127 * 128 * 1) This global flag must be set. 129 * 2) The ring buffer must be enabled for recording. 130 * 3) The per cpu buffer must be enabled for recording. 131 * 132 * In case of an anomaly, this global flag has a bit set that 133 * will permantly disable all ring buffers. 134 */ 135 136 /* 137 * Global flag to disable all recording to ring buffers 138 * This has two bits: ON, DISABLED 139 * 140 * ON DISABLED 141 * ---- ---------- 142 * 0 0 : ring buffers are off 143 * 1 0 : ring buffers are on 144 * X 1 : ring buffers are permanently disabled 145 */ 146 147 enum { 148 RB_BUFFERS_ON_BIT = 0, 149 RB_BUFFERS_DISABLED_BIT = 1, 150 }; 151 152 enum { 153 RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT, 154 RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT, 155 }; 156 157 static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON; 158 159 /* Used for individual buffers (after the counter) */ 160 #define RB_BUFFER_OFF (1 << 20) 161 162 #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data) 163 164 /** 165 * tracing_off_permanent - permanently disable ring buffers 166 * 167 * This function, once called, will disable all ring buffers 168 * permanently. 169 */ 170 void tracing_off_permanent(void) 171 { 172 set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags); 173 } 174 175 #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array)) 176 #define RB_ALIGNMENT 4U 177 #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 178 #define RB_EVNT_MIN_SIZE 8U /* two 32bit words */ 179 180 #if !defined(CONFIG_64BIT) || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) 181 # define RB_FORCE_8BYTE_ALIGNMENT 0 182 # define RB_ARCH_ALIGNMENT RB_ALIGNMENT 183 #else 184 # define RB_FORCE_8BYTE_ALIGNMENT 1 185 # define RB_ARCH_ALIGNMENT 8U 186 #endif 187 188 /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */ 189 #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX 190 191 enum { 192 RB_LEN_TIME_EXTEND = 8, 193 RB_LEN_TIME_STAMP = 16, 194 }; 195 196 #define skip_time_extend(event) \ 197 ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND)) 198 199 static inline int rb_null_event(struct ring_buffer_event *event) 200 { 201 return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta; 202 } 203 204 static void rb_event_set_padding(struct ring_buffer_event *event) 205 { 206 /* padding has a NULL time_delta */ 207 event->type_len = RINGBUF_TYPE_PADDING; 208 event->time_delta = 0; 209 } 210 211 static unsigned 212 rb_event_data_length(struct ring_buffer_event *event) 213 { 214 unsigned length; 215 216 if (event->type_len) 217 length = event->type_len * RB_ALIGNMENT; 218 else 219 length = event->array[0]; 220 return length + RB_EVNT_HDR_SIZE; 221 } 222 223 /* 224 * Return the length of the given event. Will return 225 * the length of the time extend if the event is a 226 * time extend. 227 */ 228 static inline unsigned 229 rb_event_length(struct ring_buffer_event *event) 230 { 231 switch (event->type_len) { 232 case RINGBUF_TYPE_PADDING: 233 if (rb_null_event(event)) 234 /* undefined */ 235 return -1; 236 return event->array[0] + RB_EVNT_HDR_SIZE; 237 238 case RINGBUF_TYPE_TIME_EXTEND: 239 return RB_LEN_TIME_EXTEND; 240 241 case RINGBUF_TYPE_TIME_STAMP: 242 return RB_LEN_TIME_STAMP; 243 244 case RINGBUF_TYPE_DATA: 245 return rb_event_data_length(event); 246 default: 247 BUG(); 248 } 249 /* not hit */ 250 return 0; 251 } 252 253 /* 254 * Return total length of time extend and data, 255 * or just the event length for all other events. 256 */ 257 static inline unsigned 258 rb_event_ts_length(struct ring_buffer_event *event) 259 { 260 unsigned len = 0; 261 262 if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) { 263 /* time extends include the data event after it */ 264 len = RB_LEN_TIME_EXTEND; 265 event = skip_time_extend(event); 266 } 267 return len + rb_event_length(event); 268 } 269 270 /** 271 * ring_buffer_event_length - return the length of the event 272 * @event: the event to get the length of 273 * 274 * Returns the size of the data load of a data event. 275 * If the event is something other than a data event, it 276 * returns the size of the event itself. With the exception 277 * of a TIME EXTEND, where it still returns the size of the 278 * data load of the data event after it. 279 */ 280 unsigned ring_buffer_event_length(struct ring_buffer_event *event) 281 { 282 unsigned length; 283 284 if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) 285 event = skip_time_extend(event); 286 287 length = rb_event_length(event); 288 if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 289 return length; 290 length -= RB_EVNT_HDR_SIZE; 291 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0])) 292 length -= sizeof(event->array[0]); 293 return length; 294 } 295 EXPORT_SYMBOL_GPL(ring_buffer_event_length); 296 297 /* inline for ring buffer fast paths */ 298 static void * 299 rb_event_data(struct ring_buffer_event *event) 300 { 301 if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) 302 event = skip_time_extend(event); 303 BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX); 304 /* If length is in len field, then array[0] has the data */ 305 if (event->type_len) 306 return (void *)&event->array[0]; 307 /* Otherwise length is in array[0] and array[1] has the data */ 308 return (void *)&event->array[1]; 309 } 310 311 /** 312 * ring_buffer_event_data - return the data of the event 313 * @event: the event to get the data from 314 */ 315 void *ring_buffer_event_data(struct ring_buffer_event *event) 316 { 317 return rb_event_data(event); 318 } 319 EXPORT_SYMBOL_GPL(ring_buffer_event_data); 320 321 #define for_each_buffer_cpu(buffer, cpu) \ 322 for_each_cpu(cpu, buffer->cpumask) 323 324 #define TS_SHIFT 27 325 #define TS_MASK ((1ULL << TS_SHIFT) - 1) 326 #define TS_DELTA_TEST (~TS_MASK) 327 328 /* Flag when events were overwritten */ 329 #define RB_MISSED_EVENTS (1 << 31) 330 /* Missed count stored at end */ 331 #define RB_MISSED_STORED (1 << 30) 332 333 struct buffer_data_page { 334 u64 time_stamp; /* page time stamp */ 335 local_t commit; /* write committed index */ 336 unsigned char data[]; /* data of buffer page */ 337 }; 338 339 /* 340 * Note, the buffer_page list must be first. The buffer pages 341 * are allocated in cache lines, which means that each buffer 342 * page will be at the beginning of a cache line, and thus 343 * the least significant bits will be zero. We use this to 344 * add flags in the list struct pointers, to make the ring buffer 345 * lockless. 346 */ 347 struct buffer_page { 348 struct list_head list; /* list of buffer pages */ 349 local_t write; /* index for next write */ 350 unsigned read; /* index for next read */ 351 local_t entries; /* entries on this page */ 352 unsigned long real_end; /* real end of data */ 353 struct buffer_data_page *page; /* Actual data page */ 354 }; 355 356 /* 357 * The buffer page counters, write and entries, must be reset 358 * atomically when crossing page boundaries. To synchronize this 359 * update, two counters are inserted into the number. One is 360 * the actual counter for the write position or count on the page. 361 * 362 * The other is a counter of updaters. Before an update happens 363 * the update partition of the counter is incremented. This will 364 * allow the updater to update the counter atomically. 365 * 366 * The counter is 20 bits, and the state data is 12. 367 */ 368 #define RB_WRITE_MASK 0xfffff 369 #define RB_WRITE_INTCNT (1 << 20) 370 371 static void rb_init_page(struct buffer_data_page *bpage) 372 { 373 local_set(&bpage->commit, 0); 374 } 375 376 /** 377 * ring_buffer_page_len - the size of data on the page. 378 * @page: The page to read 379 * 380 * Returns the amount of data on the page, including buffer page header. 381 */ 382 size_t ring_buffer_page_len(void *page) 383 { 384 return local_read(&((struct buffer_data_page *)page)->commit) 385 + BUF_PAGE_HDR_SIZE; 386 } 387 388 /* 389 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing 390 * this issue out. 391 */ 392 static void free_buffer_page(struct buffer_page *bpage) 393 { 394 free_page((unsigned long)bpage->page); 395 kfree(bpage); 396 } 397 398 /* 399 * We need to fit the time_stamp delta into 27 bits. 400 */ 401 static inline int test_time_stamp(u64 delta) 402 { 403 if (delta & TS_DELTA_TEST) 404 return 1; 405 return 0; 406 } 407 408 #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE) 409 410 /* Max payload is BUF_PAGE_SIZE - header (8bytes) */ 411 #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2)) 412 413 int ring_buffer_print_page_header(struct trace_seq *s) 414 { 415 struct buffer_data_page field; 416 int ret; 417 418 ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t" 419 "offset:0;\tsize:%u;\tsigned:%u;\n", 420 (unsigned int)sizeof(field.time_stamp), 421 (unsigned int)is_signed_type(u64)); 422 423 ret = trace_seq_printf(s, "\tfield: local_t commit;\t" 424 "offset:%u;\tsize:%u;\tsigned:%u;\n", 425 (unsigned int)offsetof(typeof(field), commit), 426 (unsigned int)sizeof(field.commit), 427 (unsigned int)is_signed_type(long)); 428 429 ret = trace_seq_printf(s, "\tfield: int overwrite;\t" 430 "offset:%u;\tsize:%u;\tsigned:%u;\n", 431 (unsigned int)offsetof(typeof(field), commit), 432 1, 433 (unsigned int)is_signed_type(long)); 434 435 ret = trace_seq_printf(s, "\tfield: char data;\t" 436 "offset:%u;\tsize:%u;\tsigned:%u;\n", 437 (unsigned int)offsetof(typeof(field), data), 438 (unsigned int)BUF_PAGE_SIZE, 439 (unsigned int)is_signed_type(char)); 440 441 return ret; 442 } 443 444 /* 445 * head_page == tail_page && head == tail then buffer is empty. 446 */ 447 struct ring_buffer_per_cpu { 448 int cpu; 449 atomic_t record_disabled; 450 struct ring_buffer *buffer; 451 raw_spinlock_t reader_lock; /* serialize readers */ 452 arch_spinlock_t lock; 453 struct lock_class_key lock_key; 454 unsigned int nr_pages; 455 struct list_head *pages; 456 struct buffer_page *head_page; /* read from head */ 457 struct buffer_page *tail_page; /* write to tail */ 458 struct buffer_page *commit_page; /* committed pages */ 459 struct buffer_page *reader_page; 460 unsigned long lost_events; 461 unsigned long last_overrun; 462 local_t entries_bytes; 463 local_t commit_overrun; 464 local_t overrun; 465 local_t entries; 466 local_t committing; 467 local_t commits; 468 unsigned long read; 469 unsigned long read_bytes; 470 u64 write_stamp; 471 u64 read_stamp; 472 /* ring buffer pages to update, > 0 to add, < 0 to remove */ 473 int nr_pages_to_update; 474 struct list_head new_pages; /* new pages to add */ 475 struct work_struct update_pages_work; 476 struct completion update_done; 477 }; 478 479 struct ring_buffer { 480 unsigned flags; 481 int cpus; 482 atomic_t record_disabled; 483 atomic_t resize_disabled; 484 cpumask_var_t cpumask; 485 486 struct lock_class_key *reader_lock_key; 487 488 struct mutex mutex; 489 490 struct ring_buffer_per_cpu **buffers; 491 492 #ifdef CONFIG_HOTPLUG_CPU 493 struct notifier_block cpu_notify; 494 #endif 495 u64 (*clock)(void); 496 }; 497 498 struct ring_buffer_iter { 499 struct ring_buffer_per_cpu *cpu_buffer; 500 unsigned long head; 501 struct buffer_page *head_page; 502 struct buffer_page *cache_reader_page; 503 unsigned long cache_read; 504 u64 read_stamp; 505 }; 506 507 /* buffer may be either ring_buffer or ring_buffer_per_cpu */ 508 #define RB_WARN_ON(b, cond) \ 509 ({ \ 510 int _____ret = unlikely(cond); \ 511 if (_____ret) { \ 512 if (__same_type(*(b), struct ring_buffer_per_cpu)) { \ 513 struct ring_buffer_per_cpu *__b = \ 514 (void *)b; \ 515 atomic_inc(&__b->buffer->record_disabled); \ 516 } else \ 517 atomic_inc(&b->record_disabled); \ 518 WARN_ON(1); \ 519 } \ 520 _____ret; \ 521 }) 522 523 /* Up this if you want to test the TIME_EXTENTS and normalization */ 524 #define DEBUG_SHIFT 0 525 526 static inline u64 rb_time_stamp(struct ring_buffer *buffer) 527 { 528 /* shift to debug/test normalization and TIME_EXTENTS */ 529 return buffer->clock() << DEBUG_SHIFT; 530 } 531 532 u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu) 533 { 534 u64 time; 535 536 preempt_disable_notrace(); 537 time = rb_time_stamp(buffer); 538 preempt_enable_no_resched_notrace(); 539 540 return time; 541 } 542 EXPORT_SYMBOL_GPL(ring_buffer_time_stamp); 543 544 void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer, 545 int cpu, u64 *ts) 546 { 547 /* Just stupid testing the normalize function and deltas */ 548 *ts >>= DEBUG_SHIFT; 549 } 550 EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp); 551 552 /* 553 * Making the ring buffer lockless makes things tricky. 554 * Although writes only happen on the CPU that they are on, 555 * and they only need to worry about interrupts. Reads can 556 * happen on any CPU. 557 * 558 * The reader page is always off the ring buffer, but when the 559 * reader finishes with a page, it needs to swap its page with 560 * a new one from the buffer. The reader needs to take from 561 * the head (writes go to the tail). But if a writer is in overwrite 562 * mode and wraps, it must push the head page forward. 563 * 564 * Here lies the problem. 565 * 566 * The reader must be careful to replace only the head page, and 567 * not another one. As described at the top of the file in the 568 * ASCII art, the reader sets its old page to point to the next 569 * page after head. It then sets the page after head to point to 570 * the old reader page. But if the writer moves the head page 571 * during this operation, the reader could end up with the tail. 572 * 573 * We use cmpxchg to help prevent this race. We also do something 574 * special with the page before head. We set the LSB to 1. 575 * 576 * When the writer must push the page forward, it will clear the 577 * bit that points to the head page, move the head, and then set 578 * the bit that points to the new head page. 579 * 580 * We also don't want an interrupt coming in and moving the head 581 * page on another writer. Thus we use the second LSB to catch 582 * that too. Thus: 583 * 584 * head->list->prev->next bit 1 bit 0 585 * ------- ------- 586 * Normal page 0 0 587 * Points to head page 0 1 588 * New head page 1 0 589 * 590 * Note we can not trust the prev pointer of the head page, because: 591 * 592 * +----+ +-----+ +-----+ 593 * | |------>| T |---X--->| N | 594 * | |<------| | | | 595 * +----+ +-----+ +-----+ 596 * ^ ^ | 597 * | +-----+ | | 598 * +----------| R |----------+ | 599 * | |<-----------+ 600 * +-----+ 601 * 602 * Key: ---X--> HEAD flag set in pointer 603 * T Tail page 604 * R Reader page 605 * N Next page 606 * 607 * (see __rb_reserve_next() to see where this happens) 608 * 609 * What the above shows is that the reader just swapped out 610 * the reader page with a page in the buffer, but before it 611 * could make the new header point back to the new page added 612 * it was preempted by a writer. The writer moved forward onto 613 * the new page added by the reader and is about to move forward 614 * again. 615 * 616 * You can see, it is legitimate for the previous pointer of 617 * the head (or any page) not to point back to itself. But only 618 * temporarially. 619 */ 620 621 #define RB_PAGE_NORMAL 0UL 622 #define RB_PAGE_HEAD 1UL 623 #define RB_PAGE_UPDATE 2UL 624 625 626 #define RB_FLAG_MASK 3UL 627 628 /* PAGE_MOVED is not part of the mask */ 629 #define RB_PAGE_MOVED 4UL 630 631 /* 632 * rb_list_head - remove any bit 633 */ 634 static struct list_head *rb_list_head(struct list_head *list) 635 { 636 unsigned long val = (unsigned long)list; 637 638 return (struct list_head *)(val & ~RB_FLAG_MASK); 639 } 640 641 /* 642 * rb_is_head_page - test if the given page is the head page 643 * 644 * Because the reader may move the head_page pointer, we can 645 * not trust what the head page is (it may be pointing to 646 * the reader page). But if the next page is a header page, 647 * its flags will be non zero. 648 */ 649 static inline int 650 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer, 651 struct buffer_page *page, struct list_head *list) 652 { 653 unsigned long val; 654 655 val = (unsigned long)list->next; 656 657 if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list) 658 return RB_PAGE_MOVED; 659 660 return val & RB_FLAG_MASK; 661 } 662 663 /* 664 * rb_is_reader_page 665 * 666 * The unique thing about the reader page, is that, if the 667 * writer is ever on it, the previous pointer never points 668 * back to the reader page. 669 */ 670 static int rb_is_reader_page(struct buffer_page *page) 671 { 672 struct list_head *list = page->list.prev; 673 674 return rb_list_head(list->next) != &page->list; 675 } 676 677 /* 678 * rb_set_list_to_head - set a list_head to be pointing to head. 679 */ 680 static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer, 681 struct list_head *list) 682 { 683 unsigned long *ptr; 684 685 ptr = (unsigned long *)&list->next; 686 *ptr |= RB_PAGE_HEAD; 687 *ptr &= ~RB_PAGE_UPDATE; 688 } 689 690 /* 691 * rb_head_page_activate - sets up head page 692 */ 693 static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer) 694 { 695 struct buffer_page *head; 696 697 head = cpu_buffer->head_page; 698 if (!head) 699 return; 700 701 /* 702 * Set the previous list pointer to have the HEAD flag. 703 */ 704 rb_set_list_to_head(cpu_buffer, head->list.prev); 705 } 706 707 static void rb_list_head_clear(struct list_head *list) 708 { 709 unsigned long *ptr = (unsigned long *)&list->next; 710 711 *ptr &= ~RB_FLAG_MASK; 712 } 713 714 /* 715 * rb_head_page_dactivate - clears head page ptr (for free list) 716 */ 717 static void 718 rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer) 719 { 720 struct list_head *hd; 721 722 /* Go through the whole list and clear any pointers found. */ 723 rb_list_head_clear(cpu_buffer->pages); 724 725 list_for_each(hd, cpu_buffer->pages) 726 rb_list_head_clear(hd); 727 } 728 729 static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer, 730 struct buffer_page *head, 731 struct buffer_page *prev, 732 int old_flag, int new_flag) 733 { 734 struct list_head *list; 735 unsigned long val = (unsigned long)&head->list; 736 unsigned long ret; 737 738 list = &prev->list; 739 740 val &= ~RB_FLAG_MASK; 741 742 ret = cmpxchg((unsigned long *)&list->next, 743 val | old_flag, val | new_flag); 744 745 /* check if the reader took the page */ 746 if ((ret & ~RB_FLAG_MASK) != val) 747 return RB_PAGE_MOVED; 748 749 return ret & RB_FLAG_MASK; 750 } 751 752 static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer, 753 struct buffer_page *head, 754 struct buffer_page *prev, 755 int old_flag) 756 { 757 return rb_head_page_set(cpu_buffer, head, prev, 758 old_flag, RB_PAGE_UPDATE); 759 } 760 761 static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer, 762 struct buffer_page *head, 763 struct buffer_page *prev, 764 int old_flag) 765 { 766 return rb_head_page_set(cpu_buffer, head, prev, 767 old_flag, RB_PAGE_HEAD); 768 } 769 770 static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer, 771 struct buffer_page *head, 772 struct buffer_page *prev, 773 int old_flag) 774 { 775 return rb_head_page_set(cpu_buffer, head, prev, 776 old_flag, RB_PAGE_NORMAL); 777 } 778 779 static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer, 780 struct buffer_page **bpage) 781 { 782 struct list_head *p = rb_list_head((*bpage)->list.next); 783 784 *bpage = list_entry(p, struct buffer_page, list); 785 } 786 787 static struct buffer_page * 788 rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer) 789 { 790 struct buffer_page *head; 791 struct buffer_page *page; 792 struct list_head *list; 793 int i; 794 795 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page)) 796 return NULL; 797 798 /* sanity check */ 799 list = cpu_buffer->pages; 800 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list)) 801 return NULL; 802 803 page = head = cpu_buffer->head_page; 804 /* 805 * It is possible that the writer moves the header behind 806 * where we started, and we miss in one loop. 807 * A second loop should grab the header, but we'll do 808 * three loops just because I'm paranoid. 809 */ 810 for (i = 0; i < 3; i++) { 811 do { 812 if (rb_is_head_page(cpu_buffer, page, page->list.prev)) { 813 cpu_buffer->head_page = page; 814 return page; 815 } 816 rb_inc_page(cpu_buffer, &page); 817 } while (page != head); 818 } 819 820 RB_WARN_ON(cpu_buffer, 1); 821 822 return NULL; 823 } 824 825 static int rb_head_page_replace(struct buffer_page *old, 826 struct buffer_page *new) 827 { 828 unsigned long *ptr = (unsigned long *)&old->list.prev->next; 829 unsigned long val; 830 unsigned long ret; 831 832 val = *ptr & ~RB_FLAG_MASK; 833 val |= RB_PAGE_HEAD; 834 835 ret = cmpxchg(ptr, val, (unsigned long)&new->list); 836 837 return ret == val; 838 } 839 840 /* 841 * rb_tail_page_update - move the tail page forward 842 * 843 * Returns 1 if moved tail page, 0 if someone else did. 844 */ 845 static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer, 846 struct buffer_page *tail_page, 847 struct buffer_page *next_page) 848 { 849 struct buffer_page *old_tail; 850 unsigned long old_entries; 851 unsigned long old_write; 852 int ret = 0; 853 854 /* 855 * The tail page now needs to be moved forward. 856 * 857 * We need to reset the tail page, but without messing 858 * with possible erasing of data brought in by interrupts 859 * that have moved the tail page and are currently on it. 860 * 861 * We add a counter to the write field to denote this. 862 */ 863 old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write); 864 old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries); 865 866 /* 867 * Just make sure we have seen our old_write and synchronize 868 * with any interrupts that come in. 869 */ 870 barrier(); 871 872 /* 873 * If the tail page is still the same as what we think 874 * it is, then it is up to us to update the tail 875 * pointer. 876 */ 877 if (tail_page == cpu_buffer->tail_page) { 878 /* Zero the write counter */ 879 unsigned long val = old_write & ~RB_WRITE_MASK; 880 unsigned long eval = old_entries & ~RB_WRITE_MASK; 881 882 /* 883 * This will only succeed if an interrupt did 884 * not come in and change it. In which case, we 885 * do not want to modify it. 886 * 887 * We add (void) to let the compiler know that we do not care 888 * about the return value of these functions. We use the 889 * cmpxchg to only update if an interrupt did not already 890 * do it for us. If the cmpxchg fails, we don't care. 891 */ 892 (void)local_cmpxchg(&next_page->write, old_write, val); 893 (void)local_cmpxchg(&next_page->entries, old_entries, eval); 894 895 /* 896 * No need to worry about races with clearing out the commit. 897 * it only can increment when a commit takes place. But that 898 * only happens in the outer most nested commit. 899 */ 900 local_set(&next_page->page->commit, 0); 901 902 old_tail = cmpxchg(&cpu_buffer->tail_page, 903 tail_page, next_page); 904 905 if (old_tail == tail_page) 906 ret = 1; 907 } 908 909 return ret; 910 } 911 912 static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer, 913 struct buffer_page *bpage) 914 { 915 unsigned long val = (unsigned long)bpage; 916 917 if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK)) 918 return 1; 919 920 return 0; 921 } 922 923 /** 924 * rb_check_list - make sure a pointer to a list has the last bits zero 925 */ 926 static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer, 927 struct list_head *list) 928 { 929 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev)) 930 return 1; 931 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next)) 932 return 1; 933 return 0; 934 } 935 936 /** 937 * check_pages - integrity check of buffer pages 938 * @cpu_buffer: CPU buffer with pages to test 939 * 940 * As a safety measure we check to make sure the data pages have not 941 * been corrupted. 942 */ 943 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) 944 { 945 struct list_head *head = cpu_buffer->pages; 946 struct buffer_page *bpage, *tmp; 947 948 /* Reset the head page if it exists */ 949 if (cpu_buffer->head_page) 950 rb_set_head_page(cpu_buffer); 951 952 rb_head_page_deactivate(cpu_buffer); 953 954 if (RB_WARN_ON(cpu_buffer, head->next->prev != head)) 955 return -1; 956 if (RB_WARN_ON(cpu_buffer, head->prev->next != head)) 957 return -1; 958 959 if (rb_check_list(cpu_buffer, head)) 960 return -1; 961 962 list_for_each_entry_safe(bpage, tmp, head, list) { 963 if (RB_WARN_ON(cpu_buffer, 964 bpage->list.next->prev != &bpage->list)) 965 return -1; 966 if (RB_WARN_ON(cpu_buffer, 967 bpage->list.prev->next != &bpage->list)) 968 return -1; 969 if (rb_check_list(cpu_buffer, &bpage->list)) 970 return -1; 971 } 972 973 rb_head_page_activate(cpu_buffer); 974 975 return 0; 976 } 977 978 static int __rb_allocate_pages(int nr_pages, struct list_head *pages, int cpu) 979 { 980 int i; 981 struct buffer_page *bpage, *tmp; 982 983 for (i = 0; i < nr_pages; i++) { 984 struct page *page; 985 /* 986 * __GFP_NORETRY flag makes sure that the allocation fails 987 * gracefully without invoking oom-killer and the system is 988 * not destabilized. 989 */ 990 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), 991 GFP_KERNEL | __GFP_NORETRY, 992 cpu_to_node(cpu)); 993 if (!bpage) 994 goto free_pages; 995 996 list_add(&bpage->list, pages); 997 998 page = alloc_pages_node(cpu_to_node(cpu), 999 GFP_KERNEL | __GFP_NORETRY, 0); 1000 if (!page) 1001 goto free_pages; 1002 bpage->page = page_address(page); 1003 rb_init_page(bpage->page); 1004 } 1005 1006 return 0; 1007 1008 free_pages: 1009 list_for_each_entry_safe(bpage, tmp, pages, list) { 1010 list_del_init(&bpage->list); 1011 free_buffer_page(bpage); 1012 } 1013 1014 return -ENOMEM; 1015 } 1016 1017 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, 1018 unsigned nr_pages) 1019 { 1020 LIST_HEAD(pages); 1021 1022 WARN_ON(!nr_pages); 1023 1024 if (__rb_allocate_pages(nr_pages, &pages, cpu_buffer->cpu)) 1025 return -ENOMEM; 1026 1027 /* 1028 * The ring buffer page list is a circular list that does not 1029 * start and end with a list head. All page list items point to 1030 * other pages. 1031 */ 1032 cpu_buffer->pages = pages.next; 1033 list_del(&pages); 1034 1035 cpu_buffer->nr_pages = nr_pages; 1036 1037 rb_check_pages(cpu_buffer); 1038 1039 return 0; 1040 } 1041 1042 static struct ring_buffer_per_cpu * 1043 rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu) 1044 { 1045 struct ring_buffer_per_cpu *cpu_buffer; 1046 struct buffer_page *bpage; 1047 struct page *page; 1048 int ret; 1049 1050 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()), 1051 GFP_KERNEL, cpu_to_node(cpu)); 1052 if (!cpu_buffer) 1053 return NULL; 1054 1055 cpu_buffer->cpu = cpu; 1056 cpu_buffer->buffer = buffer; 1057 raw_spin_lock_init(&cpu_buffer->reader_lock); 1058 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); 1059 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 1060 INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler); 1061 init_completion(&cpu_buffer->update_done); 1062 1063 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), 1064 GFP_KERNEL, cpu_to_node(cpu)); 1065 if (!bpage) 1066 goto fail_free_buffer; 1067 1068 rb_check_bpage(cpu_buffer, bpage); 1069 1070 cpu_buffer->reader_page = bpage; 1071 page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0); 1072 if (!page) 1073 goto fail_free_reader; 1074 bpage->page = page_address(page); 1075 rb_init_page(bpage->page); 1076 1077 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); 1078 INIT_LIST_HEAD(&cpu_buffer->new_pages); 1079 1080 ret = rb_allocate_pages(cpu_buffer, nr_pages); 1081 if (ret < 0) 1082 goto fail_free_reader; 1083 1084 cpu_buffer->head_page 1085 = list_entry(cpu_buffer->pages, struct buffer_page, list); 1086 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page; 1087 1088 rb_head_page_activate(cpu_buffer); 1089 1090 return cpu_buffer; 1091 1092 fail_free_reader: 1093 free_buffer_page(cpu_buffer->reader_page); 1094 1095 fail_free_buffer: 1096 kfree(cpu_buffer); 1097 return NULL; 1098 } 1099 1100 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) 1101 { 1102 struct list_head *head = cpu_buffer->pages; 1103 struct buffer_page *bpage, *tmp; 1104 1105 free_buffer_page(cpu_buffer->reader_page); 1106 1107 rb_head_page_deactivate(cpu_buffer); 1108 1109 if (head) { 1110 list_for_each_entry_safe(bpage, tmp, head, list) { 1111 list_del_init(&bpage->list); 1112 free_buffer_page(bpage); 1113 } 1114 bpage = list_entry(head, struct buffer_page, list); 1115 free_buffer_page(bpage); 1116 } 1117 1118 kfree(cpu_buffer); 1119 } 1120 1121 #ifdef CONFIG_HOTPLUG_CPU 1122 static int rb_cpu_notify(struct notifier_block *self, 1123 unsigned long action, void *hcpu); 1124 #endif 1125 1126 /** 1127 * ring_buffer_alloc - allocate a new ring_buffer 1128 * @size: the size in bytes per cpu that is needed. 1129 * @flags: attributes to set for the ring buffer. 1130 * 1131 * Currently the only flag that is available is the RB_FL_OVERWRITE 1132 * flag. This flag means that the buffer will overwrite old data 1133 * when the buffer wraps. If this flag is not set, the buffer will 1134 * drop data when the tail hits the head. 1135 */ 1136 struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags, 1137 struct lock_class_key *key) 1138 { 1139 struct ring_buffer *buffer; 1140 int bsize; 1141 int cpu, nr_pages; 1142 1143 /* keep it in its own cache line */ 1144 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()), 1145 GFP_KERNEL); 1146 if (!buffer) 1147 return NULL; 1148 1149 if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL)) 1150 goto fail_free_buffer; 1151 1152 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 1153 buffer->flags = flags; 1154 buffer->clock = trace_clock_local; 1155 buffer->reader_lock_key = key; 1156 1157 /* need at least two pages */ 1158 if (nr_pages < 2) 1159 nr_pages = 2; 1160 1161 /* 1162 * In case of non-hotplug cpu, if the ring-buffer is allocated 1163 * in early initcall, it will not be notified of secondary cpus. 1164 * In that off case, we need to allocate for all possible cpus. 1165 */ 1166 #ifdef CONFIG_HOTPLUG_CPU 1167 get_online_cpus(); 1168 cpumask_copy(buffer->cpumask, cpu_online_mask); 1169 #else 1170 cpumask_copy(buffer->cpumask, cpu_possible_mask); 1171 #endif 1172 buffer->cpus = nr_cpu_ids; 1173 1174 bsize = sizeof(void *) * nr_cpu_ids; 1175 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), 1176 GFP_KERNEL); 1177 if (!buffer->buffers) 1178 goto fail_free_cpumask; 1179 1180 for_each_buffer_cpu(buffer, cpu) { 1181 buffer->buffers[cpu] = 1182 rb_allocate_cpu_buffer(buffer, nr_pages, cpu); 1183 if (!buffer->buffers[cpu]) 1184 goto fail_free_buffers; 1185 } 1186 1187 #ifdef CONFIG_HOTPLUG_CPU 1188 buffer->cpu_notify.notifier_call = rb_cpu_notify; 1189 buffer->cpu_notify.priority = 0; 1190 register_cpu_notifier(&buffer->cpu_notify); 1191 #endif 1192 1193 put_online_cpus(); 1194 mutex_init(&buffer->mutex); 1195 1196 return buffer; 1197 1198 fail_free_buffers: 1199 for_each_buffer_cpu(buffer, cpu) { 1200 if (buffer->buffers[cpu]) 1201 rb_free_cpu_buffer(buffer->buffers[cpu]); 1202 } 1203 kfree(buffer->buffers); 1204 1205 fail_free_cpumask: 1206 free_cpumask_var(buffer->cpumask); 1207 put_online_cpus(); 1208 1209 fail_free_buffer: 1210 kfree(buffer); 1211 return NULL; 1212 } 1213 EXPORT_SYMBOL_GPL(__ring_buffer_alloc); 1214 1215 /** 1216 * ring_buffer_free - free a ring buffer. 1217 * @buffer: the buffer to free. 1218 */ 1219 void 1220 ring_buffer_free(struct ring_buffer *buffer) 1221 { 1222 int cpu; 1223 1224 get_online_cpus(); 1225 1226 #ifdef CONFIG_HOTPLUG_CPU 1227 unregister_cpu_notifier(&buffer->cpu_notify); 1228 #endif 1229 1230 for_each_buffer_cpu(buffer, cpu) 1231 rb_free_cpu_buffer(buffer->buffers[cpu]); 1232 1233 put_online_cpus(); 1234 1235 kfree(buffer->buffers); 1236 free_cpumask_var(buffer->cpumask); 1237 1238 kfree(buffer); 1239 } 1240 EXPORT_SYMBOL_GPL(ring_buffer_free); 1241 1242 void ring_buffer_set_clock(struct ring_buffer *buffer, 1243 u64 (*clock)(void)) 1244 { 1245 buffer->clock = clock; 1246 } 1247 1248 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); 1249 1250 static inline unsigned long rb_page_entries(struct buffer_page *bpage) 1251 { 1252 return local_read(&bpage->entries) & RB_WRITE_MASK; 1253 } 1254 1255 static inline unsigned long rb_page_write(struct buffer_page *bpage) 1256 { 1257 return local_read(&bpage->write) & RB_WRITE_MASK; 1258 } 1259 1260 static int 1261 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages) 1262 { 1263 struct list_head *tail_page, *to_remove, *next_page; 1264 struct buffer_page *to_remove_page, *tmp_iter_page; 1265 struct buffer_page *last_page, *first_page; 1266 unsigned int nr_removed; 1267 unsigned long head_bit; 1268 int page_entries; 1269 1270 head_bit = 0; 1271 1272 raw_spin_lock_irq(&cpu_buffer->reader_lock); 1273 atomic_inc(&cpu_buffer->record_disabled); 1274 /* 1275 * We don't race with the readers since we have acquired the reader 1276 * lock. We also don't race with writers after disabling recording. 1277 * This makes it easy to figure out the first and the last page to be 1278 * removed from the list. We unlink all the pages in between including 1279 * the first and last pages. This is done in a busy loop so that we 1280 * lose the least number of traces. 1281 * The pages are freed after we restart recording and unlock readers. 1282 */ 1283 tail_page = &cpu_buffer->tail_page->list; 1284 1285 /* 1286 * tail page might be on reader page, we remove the next page 1287 * from the ring buffer 1288 */ 1289 if (cpu_buffer->tail_page == cpu_buffer->reader_page) 1290 tail_page = rb_list_head(tail_page->next); 1291 to_remove = tail_page; 1292 1293 /* start of pages to remove */ 1294 first_page = list_entry(rb_list_head(to_remove->next), 1295 struct buffer_page, list); 1296 1297 for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) { 1298 to_remove = rb_list_head(to_remove)->next; 1299 head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD; 1300 } 1301 1302 next_page = rb_list_head(to_remove)->next; 1303 1304 /* 1305 * Now we remove all pages between tail_page and next_page. 1306 * Make sure that we have head_bit value preserved for the 1307 * next page 1308 */ 1309 tail_page->next = (struct list_head *)((unsigned long)next_page | 1310 head_bit); 1311 next_page = rb_list_head(next_page); 1312 next_page->prev = tail_page; 1313 1314 /* make sure pages points to a valid page in the ring buffer */ 1315 cpu_buffer->pages = next_page; 1316 1317 /* update head page */ 1318 if (head_bit) 1319 cpu_buffer->head_page = list_entry(next_page, 1320 struct buffer_page, list); 1321 1322 /* 1323 * change read pointer to make sure any read iterators reset 1324 * themselves 1325 */ 1326 cpu_buffer->read = 0; 1327 1328 /* pages are removed, resume tracing and then free the pages */ 1329 atomic_dec(&cpu_buffer->record_disabled); 1330 raw_spin_unlock_irq(&cpu_buffer->reader_lock); 1331 1332 RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)); 1333 1334 /* last buffer page to remove */ 1335 last_page = list_entry(rb_list_head(to_remove), struct buffer_page, 1336 list); 1337 tmp_iter_page = first_page; 1338 1339 do { 1340 to_remove_page = tmp_iter_page; 1341 rb_inc_page(cpu_buffer, &tmp_iter_page); 1342 1343 /* update the counters */ 1344 page_entries = rb_page_entries(to_remove_page); 1345 if (page_entries) { 1346 /* 1347 * If something was added to this page, it was full 1348 * since it is not the tail page. So we deduct the 1349 * bytes consumed in ring buffer from here. 1350 * Increment overrun to account for the lost events. 1351 */ 1352 local_add(page_entries, &cpu_buffer->overrun); 1353 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); 1354 } 1355 1356 /* 1357 * We have already removed references to this list item, just 1358 * free up the buffer_page and its page 1359 */ 1360 free_buffer_page(to_remove_page); 1361 nr_removed--; 1362 1363 } while (to_remove_page != last_page); 1364 1365 RB_WARN_ON(cpu_buffer, nr_removed); 1366 1367 return nr_removed == 0; 1368 } 1369 1370 static int 1371 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer) 1372 { 1373 struct list_head *pages = &cpu_buffer->new_pages; 1374 int retries, success; 1375 1376 raw_spin_lock_irq(&cpu_buffer->reader_lock); 1377 /* 1378 * We are holding the reader lock, so the reader page won't be swapped 1379 * in the ring buffer. Now we are racing with the writer trying to 1380 * move head page and the tail page. 1381 * We are going to adapt the reader page update process where: 1382 * 1. We first splice the start and end of list of new pages between 1383 * the head page and its previous page. 1384 * 2. We cmpxchg the prev_page->next to point from head page to the 1385 * start of new pages list. 1386 * 3. Finally, we update the head->prev to the end of new list. 1387 * 1388 * We will try this process 10 times, to make sure that we don't keep 1389 * spinning. 1390 */ 1391 retries = 10; 1392 success = 0; 1393 while (retries--) { 1394 struct list_head *head_page, *prev_page, *r; 1395 struct list_head *last_page, *first_page; 1396 struct list_head *head_page_with_bit; 1397 1398 head_page = &rb_set_head_page(cpu_buffer)->list; 1399 prev_page = head_page->prev; 1400 1401 first_page = pages->next; 1402 last_page = pages->prev; 1403 1404 head_page_with_bit = (struct list_head *) 1405 ((unsigned long)head_page | RB_PAGE_HEAD); 1406 1407 last_page->next = head_page_with_bit; 1408 first_page->prev = prev_page; 1409 1410 r = cmpxchg(&prev_page->next, head_page_with_bit, first_page); 1411 1412 if (r == head_page_with_bit) { 1413 /* 1414 * yay, we replaced the page pointer to our new list, 1415 * now, we just have to update to head page's prev 1416 * pointer to point to end of list 1417 */ 1418 head_page->prev = last_page; 1419 success = 1; 1420 break; 1421 } 1422 } 1423 1424 if (success) 1425 INIT_LIST_HEAD(pages); 1426 /* 1427 * If we weren't successful in adding in new pages, warn and stop 1428 * tracing 1429 */ 1430 RB_WARN_ON(cpu_buffer, !success); 1431 raw_spin_unlock_irq(&cpu_buffer->reader_lock); 1432 1433 /* free pages if they weren't inserted */ 1434 if (!success) { 1435 struct buffer_page *bpage, *tmp; 1436 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, 1437 list) { 1438 list_del_init(&bpage->list); 1439 free_buffer_page(bpage); 1440 } 1441 } 1442 return success; 1443 } 1444 1445 static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer) 1446 { 1447 int success; 1448 1449 if (cpu_buffer->nr_pages_to_update > 0) 1450 success = rb_insert_pages(cpu_buffer); 1451 else 1452 success = rb_remove_pages(cpu_buffer, 1453 -cpu_buffer->nr_pages_to_update); 1454 1455 if (success) 1456 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update; 1457 } 1458 1459 static void update_pages_handler(struct work_struct *work) 1460 { 1461 struct ring_buffer_per_cpu *cpu_buffer = container_of(work, 1462 struct ring_buffer_per_cpu, update_pages_work); 1463 rb_update_pages(cpu_buffer); 1464 complete(&cpu_buffer->update_done); 1465 } 1466 1467 /** 1468 * ring_buffer_resize - resize the ring buffer 1469 * @buffer: the buffer to resize. 1470 * @size: the new size. 1471 * 1472 * Minimum size is 2 * BUF_PAGE_SIZE. 1473 * 1474 * Returns 0 on success and < 0 on failure. 1475 */ 1476 int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, 1477 int cpu_id) 1478 { 1479 struct ring_buffer_per_cpu *cpu_buffer; 1480 unsigned nr_pages; 1481 int cpu, err = 0; 1482 1483 /* 1484 * Always succeed at resizing a non-existent buffer: 1485 */ 1486 if (!buffer) 1487 return size; 1488 1489 /* Make sure the requested buffer exists */ 1490 if (cpu_id != RING_BUFFER_ALL_CPUS && 1491 !cpumask_test_cpu(cpu_id, buffer->cpumask)) 1492 return size; 1493 1494 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 1495 size *= BUF_PAGE_SIZE; 1496 1497 /* we need a minimum of two pages */ 1498 if (size < BUF_PAGE_SIZE * 2) 1499 size = BUF_PAGE_SIZE * 2; 1500 1501 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 1502 1503 /* 1504 * Don't succeed if resizing is disabled, as a reader might be 1505 * manipulating the ring buffer and is expecting a sane state while 1506 * this is true. 1507 */ 1508 if (atomic_read(&buffer->resize_disabled)) 1509 return -EBUSY; 1510 1511 /* prevent another thread from changing buffer sizes */ 1512 mutex_lock(&buffer->mutex); 1513 1514 if (cpu_id == RING_BUFFER_ALL_CPUS) { 1515 /* calculate the pages to update */ 1516 for_each_buffer_cpu(buffer, cpu) { 1517 cpu_buffer = buffer->buffers[cpu]; 1518 1519 cpu_buffer->nr_pages_to_update = nr_pages - 1520 cpu_buffer->nr_pages; 1521 /* 1522 * nothing more to do for removing pages or no update 1523 */ 1524 if (cpu_buffer->nr_pages_to_update <= 0) 1525 continue; 1526 /* 1527 * to add pages, make sure all new pages can be 1528 * allocated without receiving ENOMEM 1529 */ 1530 INIT_LIST_HEAD(&cpu_buffer->new_pages); 1531 if (__rb_allocate_pages(cpu_buffer->nr_pages_to_update, 1532 &cpu_buffer->new_pages, cpu)) { 1533 /* not enough memory for new pages */ 1534 err = -ENOMEM; 1535 goto out_err; 1536 } 1537 } 1538 1539 get_online_cpus(); 1540 /* 1541 * Fire off all the required work handlers 1542 * We can't schedule on offline CPUs, but it's not necessary 1543 * since we can change their buffer sizes without any race. 1544 */ 1545 for_each_buffer_cpu(buffer, cpu) { 1546 cpu_buffer = buffer->buffers[cpu]; 1547 if (!cpu_buffer->nr_pages_to_update) 1548 continue; 1549 1550 if (cpu_online(cpu)) 1551 schedule_work_on(cpu, 1552 &cpu_buffer->update_pages_work); 1553 else 1554 rb_update_pages(cpu_buffer); 1555 } 1556 1557 /* wait for all the updates to complete */ 1558 for_each_buffer_cpu(buffer, cpu) { 1559 cpu_buffer = buffer->buffers[cpu]; 1560 if (!cpu_buffer->nr_pages_to_update) 1561 continue; 1562 1563 if (cpu_online(cpu)) 1564 wait_for_completion(&cpu_buffer->update_done); 1565 cpu_buffer->nr_pages_to_update = 0; 1566 } 1567 1568 put_online_cpus(); 1569 } else { 1570 /* Make sure this CPU has been intitialized */ 1571 if (!cpumask_test_cpu(cpu_id, buffer->cpumask)) 1572 goto out; 1573 1574 cpu_buffer = buffer->buffers[cpu_id]; 1575 1576 if (nr_pages == cpu_buffer->nr_pages) 1577 goto out; 1578 1579 cpu_buffer->nr_pages_to_update = nr_pages - 1580 cpu_buffer->nr_pages; 1581 1582 INIT_LIST_HEAD(&cpu_buffer->new_pages); 1583 if (cpu_buffer->nr_pages_to_update > 0 && 1584 __rb_allocate_pages(cpu_buffer->nr_pages_to_update, 1585 &cpu_buffer->new_pages, cpu_id)) { 1586 err = -ENOMEM; 1587 goto out_err; 1588 } 1589 1590 get_online_cpus(); 1591 1592 if (cpu_online(cpu_id)) { 1593 schedule_work_on(cpu_id, 1594 &cpu_buffer->update_pages_work); 1595 wait_for_completion(&cpu_buffer->update_done); 1596 } else 1597 rb_update_pages(cpu_buffer); 1598 1599 cpu_buffer->nr_pages_to_update = 0; 1600 put_online_cpus(); 1601 } 1602 1603 out: 1604 /* 1605 * The ring buffer resize can happen with the ring buffer 1606 * enabled, so that the update disturbs the tracing as little 1607 * as possible. But if the buffer is disabled, we do not need 1608 * to worry about that, and we can take the time to verify 1609 * that the buffer is not corrupt. 1610 */ 1611 if (atomic_read(&buffer->record_disabled)) { 1612 atomic_inc(&buffer->record_disabled); 1613 /* 1614 * Even though the buffer was disabled, we must make sure 1615 * that it is truly disabled before calling rb_check_pages. 1616 * There could have been a race between checking 1617 * record_disable and incrementing it. 1618 */ 1619 synchronize_sched(); 1620 for_each_buffer_cpu(buffer, cpu) { 1621 cpu_buffer = buffer->buffers[cpu]; 1622 rb_check_pages(cpu_buffer); 1623 } 1624 atomic_dec(&buffer->record_disabled); 1625 } 1626 1627 mutex_unlock(&buffer->mutex); 1628 return size; 1629 1630 out_err: 1631 for_each_buffer_cpu(buffer, cpu) { 1632 struct buffer_page *bpage, *tmp; 1633 1634 cpu_buffer = buffer->buffers[cpu]; 1635 cpu_buffer->nr_pages_to_update = 0; 1636 1637 if (list_empty(&cpu_buffer->new_pages)) 1638 continue; 1639 1640 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, 1641 list) { 1642 list_del_init(&bpage->list); 1643 free_buffer_page(bpage); 1644 } 1645 } 1646 mutex_unlock(&buffer->mutex); 1647 return err; 1648 } 1649 EXPORT_SYMBOL_GPL(ring_buffer_resize); 1650 1651 void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val) 1652 { 1653 mutex_lock(&buffer->mutex); 1654 if (val) 1655 buffer->flags |= RB_FL_OVERWRITE; 1656 else 1657 buffer->flags &= ~RB_FL_OVERWRITE; 1658 mutex_unlock(&buffer->mutex); 1659 } 1660 EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite); 1661 1662 static inline void * 1663 __rb_data_page_index(struct buffer_data_page *bpage, unsigned index) 1664 { 1665 return bpage->data + index; 1666 } 1667 1668 static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index) 1669 { 1670 return bpage->page->data + index; 1671 } 1672 1673 static inline struct ring_buffer_event * 1674 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer) 1675 { 1676 return __rb_page_index(cpu_buffer->reader_page, 1677 cpu_buffer->reader_page->read); 1678 } 1679 1680 static inline struct ring_buffer_event * 1681 rb_iter_head_event(struct ring_buffer_iter *iter) 1682 { 1683 return __rb_page_index(iter->head_page, iter->head); 1684 } 1685 1686 static inline unsigned rb_page_commit(struct buffer_page *bpage) 1687 { 1688 return local_read(&bpage->page->commit); 1689 } 1690 1691 /* Size is determined by what has been committed */ 1692 static inline unsigned rb_page_size(struct buffer_page *bpage) 1693 { 1694 return rb_page_commit(bpage); 1695 } 1696 1697 static inline unsigned 1698 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer) 1699 { 1700 return rb_page_commit(cpu_buffer->commit_page); 1701 } 1702 1703 static inline unsigned 1704 rb_event_index(struct ring_buffer_event *event) 1705 { 1706 unsigned long addr = (unsigned long)event; 1707 1708 return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE; 1709 } 1710 1711 static inline int 1712 rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer, 1713 struct ring_buffer_event *event) 1714 { 1715 unsigned long addr = (unsigned long)event; 1716 unsigned long index; 1717 1718 index = rb_event_index(event); 1719 addr &= PAGE_MASK; 1720 1721 return cpu_buffer->commit_page->page == (void *)addr && 1722 rb_commit_index(cpu_buffer) == index; 1723 } 1724 1725 static void 1726 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) 1727 { 1728 unsigned long max_count; 1729 1730 /* 1731 * We only race with interrupts and NMIs on this CPU. 1732 * If we own the commit event, then we can commit 1733 * all others that interrupted us, since the interruptions 1734 * are in stack format (they finish before they come 1735 * back to us). This allows us to do a simple loop to 1736 * assign the commit to the tail. 1737 */ 1738 again: 1739 max_count = cpu_buffer->nr_pages * 100; 1740 1741 while (cpu_buffer->commit_page != cpu_buffer->tail_page) { 1742 if (RB_WARN_ON(cpu_buffer, !(--max_count))) 1743 return; 1744 if (RB_WARN_ON(cpu_buffer, 1745 rb_is_reader_page(cpu_buffer->tail_page))) 1746 return; 1747 local_set(&cpu_buffer->commit_page->page->commit, 1748 rb_page_write(cpu_buffer->commit_page)); 1749 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); 1750 cpu_buffer->write_stamp = 1751 cpu_buffer->commit_page->page->time_stamp; 1752 /* add barrier to keep gcc from optimizing too much */ 1753 barrier(); 1754 } 1755 while (rb_commit_index(cpu_buffer) != 1756 rb_page_write(cpu_buffer->commit_page)) { 1757 1758 local_set(&cpu_buffer->commit_page->page->commit, 1759 rb_page_write(cpu_buffer->commit_page)); 1760 RB_WARN_ON(cpu_buffer, 1761 local_read(&cpu_buffer->commit_page->page->commit) & 1762 ~RB_WRITE_MASK); 1763 barrier(); 1764 } 1765 1766 /* again, keep gcc from optimizing */ 1767 barrier(); 1768 1769 /* 1770 * If an interrupt came in just after the first while loop 1771 * and pushed the tail page forward, we will be left with 1772 * a dangling commit that will never go forward. 1773 */ 1774 if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page)) 1775 goto again; 1776 } 1777 1778 static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer) 1779 { 1780 cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp; 1781 cpu_buffer->reader_page->read = 0; 1782 } 1783 1784 static void rb_inc_iter(struct ring_buffer_iter *iter) 1785 { 1786 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 1787 1788 /* 1789 * The iterator could be on the reader page (it starts there). 1790 * But the head could have moved, since the reader was 1791 * found. Check for this case and assign the iterator 1792 * to the head page instead of next. 1793 */ 1794 if (iter->head_page == cpu_buffer->reader_page) 1795 iter->head_page = rb_set_head_page(cpu_buffer); 1796 else 1797 rb_inc_page(cpu_buffer, &iter->head_page); 1798 1799 iter->read_stamp = iter->head_page->page->time_stamp; 1800 iter->head = 0; 1801 } 1802 1803 /* Slow path, do not inline */ 1804 static noinline struct ring_buffer_event * 1805 rb_add_time_stamp(struct ring_buffer_event *event, u64 delta) 1806 { 1807 event->type_len = RINGBUF_TYPE_TIME_EXTEND; 1808 1809 /* Not the first event on the page? */ 1810 if (rb_event_index(event)) { 1811 event->time_delta = delta & TS_MASK; 1812 event->array[0] = delta >> TS_SHIFT; 1813 } else { 1814 /* nope, just zero it */ 1815 event->time_delta = 0; 1816 event->array[0] = 0; 1817 } 1818 1819 return skip_time_extend(event); 1820 } 1821 1822 /** 1823 * ring_buffer_update_event - update event type and data 1824 * @event: the even to update 1825 * @type: the type of event 1826 * @length: the size of the event field in the ring buffer 1827 * 1828 * Update the type and data fields of the event. The length 1829 * is the actual size that is written to the ring buffer, 1830 * and with this, we can determine what to place into the 1831 * data field. 1832 */ 1833 static void 1834 rb_update_event(struct ring_buffer_per_cpu *cpu_buffer, 1835 struct ring_buffer_event *event, unsigned length, 1836 int add_timestamp, u64 delta) 1837 { 1838 /* Only a commit updates the timestamp */ 1839 if (unlikely(!rb_event_is_commit(cpu_buffer, event))) 1840 delta = 0; 1841 1842 /* 1843 * If we need to add a timestamp, then we 1844 * add it to the start of the resevered space. 1845 */ 1846 if (unlikely(add_timestamp)) { 1847 event = rb_add_time_stamp(event, delta); 1848 length -= RB_LEN_TIME_EXTEND; 1849 delta = 0; 1850 } 1851 1852 event->time_delta = delta; 1853 length -= RB_EVNT_HDR_SIZE; 1854 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) { 1855 event->type_len = 0; 1856 event->array[0] = length; 1857 } else 1858 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT); 1859 } 1860 1861 /* 1862 * rb_handle_head_page - writer hit the head page 1863 * 1864 * Returns: +1 to retry page 1865 * 0 to continue 1866 * -1 on error 1867 */ 1868 static int 1869 rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer, 1870 struct buffer_page *tail_page, 1871 struct buffer_page *next_page) 1872 { 1873 struct buffer_page *new_head; 1874 int entries; 1875 int type; 1876 int ret; 1877 1878 entries = rb_page_entries(next_page); 1879 1880 /* 1881 * The hard part is here. We need to move the head 1882 * forward, and protect against both readers on 1883 * other CPUs and writers coming in via interrupts. 1884 */ 1885 type = rb_head_page_set_update(cpu_buffer, next_page, tail_page, 1886 RB_PAGE_HEAD); 1887 1888 /* 1889 * type can be one of four: 1890 * NORMAL - an interrupt already moved it for us 1891 * HEAD - we are the first to get here. 1892 * UPDATE - we are the interrupt interrupting 1893 * a current move. 1894 * MOVED - a reader on another CPU moved the next 1895 * pointer to its reader page. Give up 1896 * and try again. 1897 */ 1898 1899 switch (type) { 1900 case RB_PAGE_HEAD: 1901 /* 1902 * We changed the head to UPDATE, thus 1903 * it is our responsibility to update 1904 * the counters. 1905 */ 1906 local_add(entries, &cpu_buffer->overrun); 1907 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); 1908 1909 /* 1910 * The entries will be zeroed out when we move the 1911 * tail page. 1912 */ 1913 1914 /* still more to do */ 1915 break; 1916 1917 case RB_PAGE_UPDATE: 1918 /* 1919 * This is an interrupt that interrupt the 1920 * previous update. Still more to do. 1921 */ 1922 break; 1923 case RB_PAGE_NORMAL: 1924 /* 1925 * An interrupt came in before the update 1926 * and processed this for us. 1927 * Nothing left to do. 1928 */ 1929 return 1; 1930 case RB_PAGE_MOVED: 1931 /* 1932 * The reader is on another CPU and just did 1933 * a swap with our next_page. 1934 * Try again. 1935 */ 1936 return 1; 1937 default: 1938 RB_WARN_ON(cpu_buffer, 1); /* WTF??? */ 1939 return -1; 1940 } 1941 1942 /* 1943 * Now that we are here, the old head pointer is 1944 * set to UPDATE. This will keep the reader from 1945 * swapping the head page with the reader page. 1946 * The reader (on another CPU) will spin till 1947 * we are finished. 1948 * 1949 * We just need to protect against interrupts 1950 * doing the job. We will set the next pointer 1951 * to HEAD. After that, we set the old pointer 1952 * to NORMAL, but only if it was HEAD before. 1953 * otherwise we are an interrupt, and only 1954 * want the outer most commit to reset it. 1955 */ 1956 new_head = next_page; 1957 rb_inc_page(cpu_buffer, &new_head); 1958 1959 ret = rb_head_page_set_head(cpu_buffer, new_head, next_page, 1960 RB_PAGE_NORMAL); 1961 1962 /* 1963 * Valid returns are: 1964 * HEAD - an interrupt came in and already set it. 1965 * NORMAL - One of two things: 1966 * 1) We really set it. 1967 * 2) A bunch of interrupts came in and moved 1968 * the page forward again. 1969 */ 1970 switch (ret) { 1971 case RB_PAGE_HEAD: 1972 case RB_PAGE_NORMAL: 1973 /* OK */ 1974 break; 1975 default: 1976 RB_WARN_ON(cpu_buffer, 1); 1977 return -1; 1978 } 1979 1980 /* 1981 * It is possible that an interrupt came in, 1982 * set the head up, then more interrupts came in 1983 * and moved it again. When we get back here, 1984 * the page would have been set to NORMAL but we 1985 * just set it back to HEAD. 1986 * 1987 * How do you detect this? Well, if that happened 1988 * the tail page would have moved. 1989 */ 1990 if (ret == RB_PAGE_NORMAL) { 1991 /* 1992 * If the tail had moved passed next, then we need 1993 * to reset the pointer. 1994 */ 1995 if (cpu_buffer->tail_page != tail_page && 1996 cpu_buffer->tail_page != next_page) 1997 rb_head_page_set_normal(cpu_buffer, new_head, 1998 next_page, 1999 RB_PAGE_HEAD); 2000 } 2001 2002 /* 2003 * If this was the outer most commit (the one that 2004 * changed the original pointer from HEAD to UPDATE), 2005 * then it is up to us to reset it to NORMAL. 2006 */ 2007 if (type == RB_PAGE_HEAD) { 2008 ret = rb_head_page_set_normal(cpu_buffer, next_page, 2009 tail_page, 2010 RB_PAGE_UPDATE); 2011 if (RB_WARN_ON(cpu_buffer, 2012 ret != RB_PAGE_UPDATE)) 2013 return -1; 2014 } 2015 2016 return 0; 2017 } 2018 2019 static unsigned rb_calculate_event_length(unsigned length) 2020 { 2021 struct ring_buffer_event event; /* Used only for sizeof array */ 2022 2023 /* zero length can cause confusions */ 2024 if (!length) 2025 length = 1; 2026 2027 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) 2028 length += sizeof(event.array[0]); 2029 2030 length += RB_EVNT_HDR_SIZE; 2031 length = ALIGN(length, RB_ARCH_ALIGNMENT); 2032 2033 return length; 2034 } 2035 2036 static inline void 2037 rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, 2038 struct buffer_page *tail_page, 2039 unsigned long tail, unsigned long length) 2040 { 2041 struct ring_buffer_event *event; 2042 2043 /* 2044 * Only the event that crossed the page boundary 2045 * must fill the old tail_page with padding. 2046 */ 2047 if (tail >= BUF_PAGE_SIZE) { 2048 /* 2049 * If the page was filled, then we still need 2050 * to update the real_end. Reset it to zero 2051 * and the reader will ignore it. 2052 */ 2053 if (tail == BUF_PAGE_SIZE) 2054 tail_page->real_end = 0; 2055 2056 local_sub(length, &tail_page->write); 2057 return; 2058 } 2059 2060 event = __rb_page_index(tail_page, tail); 2061 kmemcheck_annotate_bitfield(event, bitfield); 2062 2063 /* account for padding bytes */ 2064 local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes); 2065 2066 /* 2067 * Save the original length to the meta data. 2068 * This will be used by the reader to add lost event 2069 * counter. 2070 */ 2071 tail_page->real_end = tail; 2072 2073 /* 2074 * If this event is bigger than the minimum size, then 2075 * we need to be careful that we don't subtract the 2076 * write counter enough to allow another writer to slip 2077 * in on this page. 2078 * We put in a discarded commit instead, to make sure 2079 * that this space is not used again. 2080 * 2081 * If we are less than the minimum size, we don't need to 2082 * worry about it. 2083 */ 2084 if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) { 2085 /* No room for any events */ 2086 2087 /* Mark the rest of the page with padding */ 2088 rb_event_set_padding(event); 2089 2090 /* Set the write back to the previous setting */ 2091 local_sub(length, &tail_page->write); 2092 return; 2093 } 2094 2095 /* Put in a discarded event */ 2096 event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE; 2097 event->type_len = RINGBUF_TYPE_PADDING; 2098 /* time delta must be non zero */ 2099 event->time_delta = 1; 2100 2101 /* Set write to end of buffer */ 2102 length = (tail + length) - BUF_PAGE_SIZE; 2103 local_sub(length, &tail_page->write); 2104 } 2105 2106 /* 2107 * This is the slow path, force gcc not to inline it. 2108 */ 2109 static noinline struct ring_buffer_event * 2110 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, 2111 unsigned long length, unsigned long tail, 2112 struct buffer_page *tail_page, u64 ts) 2113 { 2114 struct buffer_page *commit_page = cpu_buffer->commit_page; 2115 struct ring_buffer *buffer = cpu_buffer->buffer; 2116 struct buffer_page *next_page; 2117 int ret; 2118 2119 next_page = tail_page; 2120 2121 rb_inc_page(cpu_buffer, &next_page); 2122 2123 /* 2124 * If for some reason, we had an interrupt storm that made 2125 * it all the way around the buffer, bail, and warn 2126 * about it. 2127 */ 2128 if (unlikely(next_page == commit_page)) { 2129 local_inc(&cpu_buffer->commit_overrun); 2130 goto out_reset; 2131 } 2132 2133 /* 2134 * This is where the fun begins! 2135 * 2136 * We are fighting against races between a reader that 2137 * could be on another CPU trying to swap its reader 2138 * page with the buffer head. 2139 * 2140 * We are also fighting against interrupts coming in and 2141 * moving the head or tail on us as well. 2142 * 2143 * If the next page is the head page then we have filled 2144 * the buffer, unless the commit page is still on the 2145 * reader page. 2146 */ 2147 if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) { 2148 2149 /* 2150 * If the commit is not on the reader page, then 2151 * move the header page. 2152 */ 2153 if (!rb_is_reader_page(cpu_buffer->commit_page)) { 2154 /* 2155 * If we are not in overwrite mode, 2156 * this is easy, just stop here. 2157 */ 2158 if (!(buffer->flags & RB_FL_OVERWRITE)) 2159 goto out_reset; 2160 2161 ret = rb_handle_head_page(cpu_buffer, 2162 tail_page, 2163 next_page); 2164 if (ret < 0) 2165 goto out_reset; 2166 if (ret) 2167 goto out_again; 2168 } else { 2169 /* 2170 * We need to be careful here too. The 2171 * commit page could still be on the reader 2172 * page. We could have a small buffer, and 2173 * have filled up the buffer with events 2174 * from interrupts and such, and wrapped. 2175 * 2176 * Note, if the tail page is also the on the 2177 * reader_page, we let it move out. 2178 */ 2179 if (unlikely((cpu_buffer->commit_page != 2180 cpu_buffer->tail_page) && 2181 (cpu_buffer->commit_page == 2182 cpu_buffer->reader_page))) { 2183 local_inc(&cpu_buffer->commit_overrun); 2184 goto out_reset; 2185 } 2186 } 2187 } 2188 2189 ret = rb_tail_page_update(cpu_buffer, tail_page, next_page); 2190 if (ret) { 2191 /* 2192 * Nested commits always have zero deltas, so 2193 * just reread the time stamp 2194 */ 2195 ts = rb_time_stamp(buffer); 2196 next_page->page->time_stamp = ts; 2197 } 2198 2199 out_again: 2200 2201 rb_reset_tail(cpu_buffer, tail_page, tail, length); 2202 2203 /* fail and let the caller try again */ 2204 return ERR_PTR(-EAGAIN); 2205 2206 out_reset: 2207 /* reset write */ 2208 rb_reset_tail(cpu_buffer, tail_page, tail, length); 2209 2210 return NULL; 2211 } 2212 2213 static struct ring_buffer_event * 2214 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, 2215 unsigned long length, u64 ts, 2216 u64 delta, int add_timestamp) 2217 { 2218 struct buffer_page *tail_page; 2219 struct ring_buffer_event *event; 2220 unsigned long tail, write; 2221 2222 /* 2223 * If the time delta since the last event is too big to 2224 * hold in the time field of the event, then we append a 2225 * TIME EXTEND event ahead of the data event. 2226 */ 2227 if (unlikely(add_timestamp)) 2228 length += RB_LEN_TIME_EXTEND; 2229 2230 tail_page = cpu_buffer->tail_page; 2231 write = local_add_return(length, &tail_page->write); 2232 2233 /* set write to only the index of the write */ 2234 write &= RB_WRITE_MASK; 2235 tail = write - length; 2236 2237 /* See if we shot pass the end of this buffer page */ 2238 if (unlikely(write > BUF_PAGE_SIZE)) 2239 return rb_move_tail(cpu_buffer, length, tail, 2240 tail_page, ts); 2241 2242 /* We reserved something on the buffer */ 2243 2244 event = __rb_page_index(tail_page, tail); 2245 kmemcheck_annotate_bitfield(event, bitfield); 2246 rb_update_event(cpu_buffer, event, length, add_timestamp, delta); 2247 2248 local_inc(&tail_page->entries); 2249 2250 /* 2251 * If this is the first commit on the page, then update 2252 * its timestamp. 2253 */ 2254 if (!tail) 2255 tail_page->page->time_stamp = ts; 2256 2257 /* account for these added bytes */ 2258 local_add(length, &cpu_buffer->entries_bytes); 2259 2260 return event; 2261 } 2262 2263 static inline int 2264 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, 2265 struct ring_buffer_event *event) 2266 { 2267 unsigned long new_index, old_index; 2268 struct buffer_page *bpage; 2269 unsigned long index; 2270 unsigned long addr; 2271 2272 new_index = rb_event_index(event); 2273 old_index = new_index + rb_event_ts_length(event); 2274 addr = (unsigned long)event; 2275 addr &= PAGE_MASK; 2276 2277 bpage = cpu_buffer->tail_page; 2278 2279 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) { 2280 unsigned long write_mask = 2281 local_read(&bpage->write) & ~RB_WRITE_MASK; 2282 unsigned long event_length = rb_event_length(event); 2283 /* 2284 * This is on the tail page. It is possible that 2285 * a write could come in and move the tail page 2286 * and write to the next page. That is fine 2287 * because we just shorten what is on this page. 2288 */ 2289 old_index += write_mask; 2290 new_index += write_mask; 2291 index = local_cmpxchg(&bpage->write, old_index, new_index); 2292 if (index == old_index) { 2293 /* update counters */ 2294 local_sub(event_length, &cpu_buffer->entries_bytes); 2295 return 1; 2296 } 2297 } 2298 2299 /* could not discard */ 2300 return 0; 2301 } 2302 2303 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer) 2304 { 2305 local_inc(&cpu_buffer->committing); 2306 local_inc(&cpu_buffer->commits); 2307 } 2308 2309 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer) 2310 { 2311 unsigned long commits; 2312 2313 if (RB_WARN_ON(cpu_buffer, 2314 !local_read(&cpu_buffer->committing))) 2315 return; 2316 2317 again: 2318 commits = local_read(&cpu_buffer->commits); 2319 /* synchronize with interrupts */ 2320 barrier(); 2321 if (local_read(&cpu_buffer->committing) == 1) 2322 rb_set_commit_to_write(cpu_buffer); 2323 2324 local_dec(&cpu_buffer->committing); 2325 2326 /* synchronize with interrupts */ 2327 barrier(); 2328 2329 /* 2330 * Need to account for interrupts coming in between the 2331 * updating of the commit page and the clearing of the 2332 * committing counter. 2333 */ 2334 if (unlikely(local_read(&cpu_buffer->commits) != commits) && 2335 !local_read(&cpu_buffer->committing)) { 2336 local_inc(&cpu_buffer->committing); 2337 goto again; 2338 } 2339 } 2340 2341 static struct ring_buffer_event * 2342 rb_reserve_next_event(struct ring_buffer *buffer, 2343 struct ring_buffer_per_cpu *cpu_buffer, 2344 unsigned long length) 2345 { 2346 struct ring_buffer_event *event; 2347 u64 ts, delta; 2348 int nr_loops = 0; 2349 int add_timestamp; 2350 u64 diff; 2351 2352 rb_start_commit(cpu_buffer); 2353 2354 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP 2355 /* 2356 * Due to the ability to swap a cpu buffer from a buffer 2357 * it is possible it was swapped before we committed. 2358 * (committing stops a swap). We check for it here and 2359 * if it happened, we have to fail the write. 2360 */ 2361 barrier(); 2362 if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) { 2363 local_dec(&cpu_buffer->committing); 2364 local_dec(&cpu_buffer->commits); 2365 return NULL; 2366 } 2367 #endif 2368 2369 length = rb_calculate_event_length(length); 2370 again: 2371 add_timestamp = 0; 2372 delta = 0; 2373 2374 /* 2375 * We allow for interrupts to reenter here and do a trace. 2376 * If one does, it will cause this original code to loop 2377 * back here. Even with heavy interrupts happening, this 2378 * should only happen a few times in a row. If this happens 2379 * 1000 times in a row, there must be either an interrupt 2380 * storm or we have something buggy. 2381 * Bail! 2382 */ 2383 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) 2384 goto out_fail; 2385 2386 ts = rb_time_stamp(cpu_buffer->buffer); 2387 diff = ts - cpu_buffer->write_stamp; 2388 2389 /* make sure this diff is calculated here */ 2390 barrier(); 2391 2392 /* Did the write stamp get updated already? */ 2393 if (likely(ts >= cpu_buffer->write_stamp)) { 2394 delta = diff; 2395 if (unlikely(test_time_stamp(delta))) { 2396 int local_clock_stable = 1; 2397 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 2398 local_clock_stable = sched_clock_stable; 2399 #endif 2400 WARN_ONCE(delta > (1ULL << 59), 2401 KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n%s", 2402 (unsigned long long)delta, 2403 (unsigned long long)ts, 2404 (unsigned long long)cpu_buffer->write_stamp, 2405 local_clock_stable ? "" : 2406 "If you just came from a suspend/resume,\n" 2407 "please switch to the trace global clock:\n" 2408 " echo global > /sys/kernel/debug/tracing/trace_clock\n"); 2409 add_timestamp = 1; 2410 } 2411 } 2412 2413 event = __rb_reserve_next(cpu_buffer, length, ts, 2414 delta, add_timestamp); 2415 if (unlikely(PTR_ERR(event) == -EAGAIN)) 2416 goto again; 2417 2418 if (!event) 2419 goto out_fail; 2420 2421 return event; 2422 2423 out_fail: 2424 rb_end_commit(cpu_buffer); 2425 return NULL; 2426 } 2427 2428 #ifdef CONFIG_TRACING 2429 2430 #define TRACE_RECURSIVE_DEPTH 16 2431 2432 /* Keep this code out of the fast path cache */ 2433 static noinline void trace_recursive_fail(void) 2434 { 2435 /* Disable all tracing before we do anything else */ 2436 tracing_off_permanent(); 2437 2438 printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:" 2439 "HC[%lu]:SC[%lu]:NMI[%lu]\n", 2440 trace_recursion_buffer(), 2441 hardirq_count() >> HARDIRQ_SHIFT, 2442 softirq_count() >> SOFTIRQ_SHIFT, 2443 in_nmi()); 2444 2445 WARN_ON_ONCE(1); 2446 } 2447 2448 static inline int trace_recursive_lock(void) 2449 { 2450 trace_recursion_inc(); 2451 2452 if (likely(trace_recursion_buffer() < TRACE_RECURSIVE_DEPTH)) 2453 return 0; 2454 2455 trace_recursive_fail(); 2456 2457 return -1; 2458 } 2459 2460 static inline void trace_recursive_unlock(void) 2461 { 2462 WARN_ON_ONCE(!trace_recursion_buffer()); 2463 2464 trace_recursion_dec(); 2465 } 2466 2467 #else 2468 2469 #define trace_recursive_lock() (0) 2470 #define trace_recursive_unlock() do { } while (0) 2471 2472 #endif 2473 2474 /** 2475 * ring_buffer_lock_reserve - reserve a part of the buffer 2476 * @buffer: the ring buffer to reserve from 2477 * @length: the length of the data to reserve (excluding event header) 2478 * 2479 * Returns a reseverd event on the ring buffer to copy directly to. 2480 * The user of this interface will need to get the body to write into 2481 * and can use the ring_buffer_event_data() interface. 2482 * 2483 * The length is the length of the data needed, not the event length 2484 * which also includes the event header. 2485 * 2486 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned. 2487 * If NULL is returned, then nothing has been allocated or locked. 2488 */ 2489 struct ring_buffer_event * 2490 ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length) 2491 { 2492 struct ring_buffer_per_cpu *cpu_buffer; 2493 struct ring_buffer_event *event; 2494 int cpu; 2495 2496 if (ring_buffer_flags != RB_BUFFERS_ON) 2497 return NULL; 2498 2499 /* If we are tracing schedule, we don't want to recurse */ 2500 preempt_disable_notrace(); 2501 2502 if (atomic_read(&buffer->record_disabled)) 2503 goto out_nocheck; 2504 2505 if (trace_recursive_lock()) 2506 goto out_nocheck; 2507 2508 cpu = raw_smp_processor_id(); 2509 2510 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 2511 goto out; 2512 2513 cpu_buffer = buffer->buffers[cpu]; 2514 2515 if (atomic_read(&cpu_buffer->record_disabled)) 2516 goto out; 2517 2518 if (length > BUF_MAX_DATA_SIZE) 2519 goto out; 2520 2521 event = rb_reserve_next_event(buffer, cpu_buffer, length); 2522 if (!event) 2523 goto out; 2524 2525 return event; 2526 2527 out: 2528 trace_recursive_unlock(); 2529 2530 out_nocheck: 2531 preempt_enable_notrace(); 2532 return NULL; 2533 } 2534 EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve); 2535 2536 static void 2537 rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer, 2538 struct ring_buffer_event *event) 2539 { 2540 u64 delta; 2541 2542 /* 2543 * The event first in the commit queue updates the 2544 * time stamp. 2545 */ 2546 if (rb_event_is_commit(cpu_buffer, event)) { 2547 /* 2548 * A commit event that is first on a page 2549 * updates the write timestamp with the page stamp 2550 */ 2551 if (!rb_event_index(event)) 2552 cpu_buffer->write_stamp = 2553 cpu_buffer->commit_page->page->time_stamp; 2554 else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) { 2555 delta = event->array[0]; 2556 delta <<= TS_SHIFT; 2557 delta += event->time_delta; 2558 cpu_buffer->write_stamp += delta; 2559 } else 2560 cpu_buffer->write_stamp += event->time_delta; 2561 } 2562 } 2563 2564 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, 2565 struct ring_buffer_event *event) 2566 { 2567 local_inc(&cpu_buffer->entries); 2568 rb_update_write_stamp(cpu_buffer, event); 2569 rb_end_commit(cpu_buffer); 2570 } 2571 2572 /** 2573 * ring_buffer_unlock_commit - commit a reserved 2574 * @buffer: The buffer to commit to 2575 * @event: The event pointer to commit. 2576 * 2577 * This commits the data to the ring buffer, and releases any locks held. 2578 * 2579 * Must be paired with ring_buffer_lock_reserve. 2580 */ 2581 int ring_buffer_unlock_commit(struct ring_buffer *buffer, 2582 struct ring_buffer_event *event) 2583 { 2584 struct ring_buffer_per_cpu *cpu_buffer; 2585 int cpu = raw_smp_processor_id(); 2586 2587 cpu_buffer = buffer->buffers[cpu]; 2588 2589 rb_commit(cpu_buffer, event); 2590 2591 trace_recursive_unlock(); 2592 2593 preempt_enable_notrace(); 2594 2595 return 0; 2596 } 2597 EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit); 2598 2599 static inline void rb_event_discard(struct ring_buffer_event *event) 2600 { 2601 if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) 2602 event = skip_time_extend(event); 2603 2604 /* array[0] holds the actual length for the discarded event */ 2605 event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE; 2606 event->type_len = RINGBUF_TYPE_PADDING; 2607 /* time delta must be non zero */ 2608 if (!event->time_delta) 2609 event->time_delta = 1; 2610 } 2611 2612 /* 2613 * Decrement the entries to the page that an event is on. 2614 * The event does not even need to exist, only the pointer 2615 * to the page it is on. This may only be called before the commit 2616 * takes place. 2617 */ 2618 static inline void 2619 rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer, 2620 struct ring_buffer_event *event) 2621 { 2622 unsigned long addr = (unsigned long)event; 2623 struct buffer_page *bpage = cpu_buffer->commit_page; 2624 struct buffer_page *start; 2625 2626 addr &= PAGE_MASK; 2627 2628 /* Do the likely case first */ 2629 if (likely(bpage->page == (void *)addr)) { 2630 local_dec(&bpage->entries); 2631 return; 2632 } 2633 2634 /* 2635 * Because the commit page may be on the reader page we 2636 * start with the next page and check the end loop there. 2637 */ 2638 rb_inc_page(cpu_buffer, &bpage); 2639 start = bpage; 2640 do { 2641 if (bpage->page == (void *)addr) { 2642 local_dec(&bpage->entries); 2643 return; 2644 } 2645 rb_inc_page(cpu_buffer, &bpage); 2646 } while (bpage != start); 2647 2648 /* commit not part of this buffer?? */ 2649 RB_WARN_ON(cpu_buffer, 1); 2650 } 2651 2652 /** 2653 * ring_buffer_commit_discard - discard an event that has not been committed 2654 * @buffer: the ring buffer 2655 * @event: non committed event to discard 2656 * 2657 * Sometimes an event that is in the ring buffer needs to be ignored. 2658 * This function lets the user discard an event in the ring buffer 2659 * and then that event will not be read later. 2660 * 2661 * This function only works if it is called before the the item has been 2662 * committed. It will try to free the event from the ring buffer 2663 * if another event has not been added behind it. 2664 * 2665 * If another event has been added behind it, it will set the event 2666 * up as discarded, and perform the commit. 2667 * 2668 * If this function is called, do not call ring_buffer_unlock_commit on 2669 * the event. 2670 */ 2671 void ring_buffer_discard_commit(struct ring_buffer *buffer, 2672 struct ring_buffer_event *event) 2673 { 2674 struct ring_buffer_per_cpu *cpu_buffer; 2675 int cpu; 2676 2677 /* The event is discarded regardless */ 2678 rb_event_discard(event); 2679 2680 cpu = smp_processor_id(); 2681 cpu_buffer = buffer->buffers[cpu]; 2682 2683 /* 2684 * This must only be called if the event has not been 2685 * committed yet. Thus we can assume that preemption 2686 * is still disabled. 2687 */ 2688 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); 2689 2690 rb_decrement_entry(cpu_buffer, event); 2691 if (rb_try_to_discard(cpu_buffer, event)) 2692 goto out; 2693 2694 /* 2695 * The commit is still visible by the reader, so we 2696 * must still update the timestamp. 2697 */ 2698 rb_update_write_stamp(cpu_buffer, event); 2699 out: 2700 rb_end_commit(cpu_buffer); 2701 2702 trace_recursive_unlock(); 2703 2704 preempt_enable_notrace(); 2705 2706 } 2707 EXPORT_SYMBOL_GPL(ring_buffer_discard_commit); 2708 2709 /** 2710 * ring_buffer_write - write data to the buffer without reserving 2711 * @buffer: The ring buffer to write to. 2712 * @length: The length of the data being written (excluding the event header) 2713 * @data: The data to write to the buffer. 2714 * 2715 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as 2716 * one function. If you already have the data to write to the buffer, it 2717 * may be easier to simply call this function. 2718 * 2719 * Note, like ring_buffer_lock_reserve, the length is the length of the data 2720 * and not the length of the event which would hold the header. 2721 */ 2722 int ring_buffer_write(struct ring_buffer *buffer, 2723 unsigned long length, 2724 void *data) 2725 { 2726 struct ring_buffer_per_cpu *cpu_buffer; 2727 struct ring_buffer_event *event; 2728 void *body; 2729 int ret = -EBUSY; 2730 int cpu; 2731 2732 if (ring_buffer_flags != RB_BUFFERS_ON) 2733 return -EBUSY; 2734 2735 preempt_disable_notrace(); 2736 2737 if (atomic_read(&buffer->record_disabled)) 2738 goto out; 2739 2740 cpu = raw_smp_processor_id(); 2741 2742 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 2743 goto out; 2744 2745 cpu_buffer = buffer->buffers[cpu]; 2746 2747 if (atomic_read(&cpu_buffer->record_disabled)) 2748 goto out; 2749 2750 if (length > BUF_MAX_DATA_SIZE) 2751 goto out; 2752 2753 event = rb_reserve_next_event(buffer, cpu_buffer, length); 2754 if (!event) 2755 goto out; 2756 2757 body = rb_event_data(event); 2758 2759 memcpy(body, data, length); 2760 2761 rb_commit(cpu_buffer, event); 2762 2763 ret = 0; 2764 out: 2765 preempt_enable_notrace(); 2766 2767 return ret; 2768 } 2769 EXPORT_SYMBOL_GPL(ring_buffer_write); 2770 2771 static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) 2772 { 2773 struct buffer_page *reader = cpu_buffer->reader_page; 2774 struct buffer_page *head = rb_set_head_page(cpu_buffer); 2775 struct buffer_page *commit = cpu_buffer->commit_page; 2776 2777 /* In case of error, head will be NULL */ 2778 if (unlikely(!head)) 2779 return 1; 2780 2781 return reader->read == rb_page_commit(reader) && 2782 (commit == reader || 2783 (commit == head && 2784 head->read == rb_page_commit(commit))); 2785 } 2786 2787 /** 2788 * ring_buffer_record_disable - stop all writes into the buffer 2789 * @buffer: The ring buffer to stop writes to. 2790 * 2791 * This prevents all writes to the buffer. Any attempt to write 2792 * to the buffer after this will fail and return NULL. 2793 * 2794 * The caller should call synchronize_sched() after this. 2795 */ 2796 void ring_buffer_record_disable(struct ring_buffer *buffer) 2797 { 2798 atomic_inc(&buffer->record_disabled); 2799 } 2800 EXPORT_SYMBOL_GPL(ring_buffer_record_disable); 2801 2802 /** 2803 * ring_buffer_record_enable - enable writes to the buffer 2804 * @buffer: The ring buffer to enable writes 2805 * 2806 * Note, multiple disables will need the same number of enables 2807 * to truly enable the writing (much like preempt_disable). 2808 */ 2809 void ring_buffer_record_enable(struct ring_buffer *buffer) 2810 { 2811 atomic_dec(&buffer->record_disabled); 2812 } 2813 EXPORT_SYMBOL_GPL(ring_buffer_record_enable); 2814 2815 /** 2816 * ring_buffer_record_off - stop all writes into the buffer 2817 * @buffer: The ring buffer to stop writes to. 2818 * 2819 * This prevents all writes to the buffer. Any attempt to write 2820 * to the buffer after this will fail and return NULL. 2821 * 2822 * This is different than ring_buffer_record_disable() as 2823 * it works like an on/off switch, where as the disable() version 2824 * must be paired with a enable(). 2825 */ 2826 void ring_buffer_record_off(struct ring_buffer *buffer) 2827 { 2828 unsigned int rd; 2829 unsigned int new_rd; 2830 2831 do { 2832 rd = atomic_read(&buffer->record_disabled); 2833 new_rd = rd | RB_BUFFER_OFF; 2834 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd); 2835 } 2836 EXPORT_SYMBOL_GPL(ring_buffer_record_off); 2837 2838 /** 2839 * ring_buffer_record_on - restart writes into the buffer 2840 * @buffer: The ring buffer to start writes to. 2841 * 2842 * This enables all writes to the buffer that was disabled by 2843 * ring_buffer_record_off(). 2844 * 2845 * This is different than ring_buffer_record_enable() as 2846 * it works like an on/off switch, where as the enable() version 2847 * must be paired with a disable(). 2848 */ 2849 void ring_buffer_record_on(struct ring_buffer *buffer) 2850 { 2851 unsigned int rd; 2852 unsigned int new_rd; 2853 2854 do { 2855 rd = atomic_read(&buffer->record_disabled); 2856 new_rd = rd & ~RB_BUFFER_OFF; 2857 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd); 2858 } 2859 EXPORT_SYMBOL_GPL(ring_buffer_record_on); 2860 2861 /** 2862 * ring_buffer_record_is_on - return true if the ring buffer can write 2863 * @buffer: The ring buffer to see if write is enabled 2864 * 2865 * Returns true if the ring buffer is in a state that it accepts writes. 2866 */ 2867 int ring_buffer_record_is_on(struct ring_buffer *buffer) 2868 { 2869 return !atomic_read(&buffer->record_disabled); 2870 } 2871 2872 /** 2873 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer 2874 * @buffer: The ring buffer to stop writes to. 2875 * @cpu: The CPU buffer to stop 2876 * 2877 * This prevents all writes to the buffer. Any attempt to write 2878 * to the buffer after this will fail and return NULL. 2879 * 2880 * The caller should call synchronize_sched() after this. 2881 */ 2882 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu) 2883 { 2884 struct ring_buffer_per_cpu *cpu_buffer; 2885 2886 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 2887 return; 2888 2889 cpu_buffer = buffer->buffers[cpu]; 2890 atomic_inc(&cpu_buffer->record_disabled); 2891 } 2892 EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu); 2893 2894 /** 2895 * ring_buffer_record_enable_cpu - enable writes to the buffer 2896 * @buffer: The ring buffer to enable writes 2897 * @cpu: The CPU to enable. 2898 * 2899 * Note, multiple disables will need the same number of enables 2900 * to truly enable the writing (much like preempt_disable). 2901 */ 2902 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu) 2903 { 2904 struct ring_buffer_per_cpu *cpu_buffer; 2905 2906 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 2907 return; 2908 2909 cpu_buffer = buffer->buffers[cpu]; 2910 atomic_dec(&cpu_buffer->record_disabled); 2911 } 2912 EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu); 2913 2914 /* 2915 * The total entries in the ring buffer is the running counter 2916 * of entries entered into the ring buffer, minus the sum of 2917 * the entries read from the ring buffer and the number of 2918 * entries that were overwritten. 2919 */ 2920 static inline unsigned long 2921 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer) 2922 { 2923 return local_read(&cpu_buffer->entries) - 2924 (local_read(&cpu_buffer->overrun) + cpu_buffer->read); 2925 } 2926 2927 /** 2928 * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer 2929 * @buffer: The ring buffer 2930 * @cpu: The per CPU buffer to read from. 2931 */ 2932 unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu) 2933 { 2934 unsigned long flags; 2935 struct ring_buffer_per_cpu *cpu_buffer; 2936 struct buffer_page *bpage; 2937 unsigned long ret; 2938 2939 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 2940 return 0; 2941 2942 cpu_buffer = buffer->buffers[cpu]; 2943 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2944 /* 2945 * if the tail is on reader_page, oldest time stamp is on the reader 2946 * page 2947 */ 2948 if (cpu_buffer->tail_page == cpu_buffer->reader_page) 2949 bpage = cpu_buffer->reader_page; 2950 else 2951 bpage = rb_set_head_page(cpu_buffer); 2952 ret = bpage->page->time_stamp; 2953 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2954 2955 return ret; 2956 } 2957 EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts); 2958 2959 /** 2960 * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer 2961 * @buffer: The ring buffer 2962 * @cpu: The per CPU buffer to read from. 2963 */ 2964 unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu) 2965 { 2966 struct ring_buffer_per_cpu *cpu_buffer; 2967 unsigned long ret; 2968 2969 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 2970 return 0; 2971 2972 cpu_buffer = buffer->buffers[cpu]; 2973 ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes; 2974 2975 return ret; 2976 } 2977 EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu); 2978 2979 /** 2980 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer 2981 * @buffer: The ring buffer 2982 * @cpu: The per CPU buffer to get the entries from. 2983 */ 2984 unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu) 2985 { 2986 struct ring_buffer_per_cpu *cpu_buffer; 2987 2988 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 2989 return 0; 2990 2991 cpu_buffer = buffer->buffers[cpu]; 2992 2993 return rb_num_of_entries(cpu_buffer); 2994 } 2995 EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu); 2996 2997 /** 2998 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer 2999 * @buffer: The ring buffer 3000 * @cpu: The per CPU buffer to get the number of overruns from 3001 */ 3002 unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu) 3003 { 3004 struct ring_buffer_per_cpu *cpu_buffer; 3005 unsigned long ret; 3006 3007 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 3008 return 0; 3009 3010 cpu_buffer = buffer->buffers[cpu]; 3011 ret = local_read(&cpu_buffer->overrun); 3012 3013 return ret; 3014 } 3015 EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu); 3016 3017 /** 3018 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by commits 3019 * @buffer: The ring buffer 3020 * @cpu: The per CPU buffer to get the number of overruns from 3021 */ 3022 unsigned long 3023 ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu) 3024 { 3025 struct ring_buffer_per_cpu *cpu_buffer; 3026 unsigned long ret; 3027 3028 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 3029 return 0; 3030 3031 cpu_buffer = buffer->buffers[cpu]; 3032 ret = local_read(&cpu_buffer->commit_overrun); 3033 3034 return ret; 3035 } 3036 EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu); 3037 3038 /** 3039 * ring_buffer_entries - get the number of entries in a buffer 3040 * @buffer: The ring buffer 3041 * 3042 * Returns the total number of entries in the ring buffer 3043 * (all CPU entries) 3044 */ 3045 unsigned long ring_buffer_entries(struct ring_buffer *buffer) 3046 { 3047 struct ring_buffer_per_cpu *cpu_buffer; 3048 unsigned long entries = 0; 3049 int cpu; 3050 3051 /* if you care about this being correct, lock the buffer */ 3052 for_each_buffer_cpu(buffer, cpu) { 3053 cpu_buffer = buffer->buffers[cpu]; 3054 entries += rb_num_of_entries(cpu_buffer); 3055 } 3056 3057 return entries; 3058 } 3059 EXPORT_SYMBOL_GPL(ring_buffer_entries); 3060 3061 /** 3062 * ring_buffer_overruns - get the number of overruns in buffer 3063 * @buffer: The ring buffer 3064 * 3065 * Returns the total number of overruns in the ring buffer 3066 * (all CPU entries) 3067 */ 3068 unsigned long ring_buffer_overruns(struct ring_buffer *buffer) 3069 { 3070 struct ring_buffer_per_cpu *cpu_buffer; 3071 unsigned long overruns = 0; 3072 int cpu; 3073 3074 /* if you care about this being correct, lock the buffer */ 3075 for_each_buffer_cpu(buffer, cpu) { 3076 cpu_buffer = buffer->buffers[cpu]; 3077 overruns += local_read(&cpu_buffer->overrun); 3078 } 3079 3080 return overruns; 3081 } 3082 EXPORT_SYMBOL_GPL(ring_buffer_overruns); 3083 3084 static void rb_iter_reset(struct ring_buffer_iter *iter) 3085 { 3086 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 3087 3088 /* Iterator usage is expected to have record disabled */ 3089 if (list_empty(&cpu_buffer->reader_page->list)) { 3090 iter->head_page = rb_set_head_page(cpu_buffer); 3091 if (unlikely(!iter->head_page)) 3092 return; 3093 iter->head = iter->head_page->read; 3094 } else { 3095 iter->head_page = cpu_buffer->reader_page; 3096 iter->head = cpu_buffer->reader_page->read; 3097 } 3098 if (iter->head) 3099 iter->read_stamp = cpu_buffer->read_stamp; 3100 else 3101 iter->read_stamp = iter->head_page->page->time_stamp; 3102 iter->cache_reader_page = cpu_buffer->reader_page; 3103 iter->cache_read = cpu_buffer->read; 3104 } 3105 3106 /** 3107 * ring_buffer_iter_reset - reset an iterator 3108 * @iter: The iterator to reset 3109 * 3110 * Resets the iterator, so that it will start from the beginning 3111 * again. 3112 */ 3113 void ring_buffer_iter_reset(struct ring_buffer_iter *iter) 3114 { 3115 struct ring_buffer_per_cpu *cpu_buffer; 3116 unsigned long flags; 3117 3118 if (!iter) 3119 return; 3120 3121 cpu_buffer = iter->cpu_buffer; 3122 3123 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3124 rb_iter_reset(iter); 3125 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3126 } 3127 EXPORT_SYMBOL_GPL(ring_buffer_iter_reset); 3128 3129 /** 3130 * ring_buffer_iter_empty - check if an iterator has no more to read 3131 * @iter: The iterator to check 3132 */ 3133 int ring_buffer_iter_empty(struct ring_buffer_iter *iter) 3134 { 3135 struct ring_buffer_per_cpu *cpu_buffer; 3136 3137 cpu_buffer = iter->cpu_buffer; 3138 3139 return iter->head_page == cpu_buffer->commit_page && 3140 iter->head == rb_commit_index(cpu_buffer); 3141 } 3142 EXPORT_SYMBOL_GPL(ring_buffer_iter_empty); 3143 3144 static void 3145 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, 3146 struct ring_buffer_event *event) 3147 { 3148 u64 delta; 3149 3150 switch (event->type_len) { 3151 case RINGBUF_TYPE_PADDING: 3152 return; 3153 3154 case RINGBUF_TYPE_TIME_EXTEND: 3155 delta = event->array[0]; 3156 delta <<= TS_SHIFT; 3157 delta += event->time_delta; 3158 cpu_buffer->read_stamp += delta; 3159 return; 3160 3161 case RINGBUF_TYPE_TIME_STAMP: 3162 /* FIXME: not implemented */ 3163 return; 3164 3165 case RINGBUF_TYPE_DATA: 3166 cpu_buffer->read_stamp += event->time_delta; 3167 return; 3168 3169 default: 3170 BUG(); 3171 } 3172 return; 3173 } 3174 3175 static void 3176 rb_update_iter_read_stamp(struct ring_buffer_iter *iter, 3177 struct ring_buffer_event *event) 3178 { 3179 u64 delta; 3180 3181 switch (event->type_len) { 3182 case RINGBUF_TYPE_PADDING: 3183 return; 3184 3185 case RINGBUF_TYPE_TIME_EXTEND: 3186 delta = event->array[0]; 3187 delta <<= TS_SHIFT; 3188 delta += event->time_delta; 3189 iter->read_stamp += delta; 3190 return; 3191 3192 case RINGBUF_TYPE_TIME_STAMP: 3193 /* FIXME: not implemented */ 3194 return; 3195 3196 case RINGBUF_TYPE_DATA: 3197 iter->read_stamp += event->time_delta; 3198 return; 3199 3200 default: 3201 BUG(); 3202 } 3203 return; 3204 } 3205 3206 static struct buffer_page * 3207 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) 3208 { 3209 struct buffer_page *reader = NULL; 3210 unsigned long overwrite; 3211 unsigned long flags; 3212 int nr_loops = 0; 3213 int ret; 3214 3215 local_irq_save(flags); 3216 arch_spin_lock(&cpu_buffer->lock); 3217 3218 again: 3219 /* 3220 * This should normally only loop twice. But because the 3221 * start of the reader inserts an empty page, it causes 3222 * a case where we will loop three times. There should be no 3223 * reason to loop four times (that I know of). 3224 */ 3225 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) { 3226 reader = NULL; 3227 goto out; 3228 } 3229 3230 reader = cpu_buffer->reader_page; 3231 3232 /* If there's more to read, return this page */ 3233 if (cpu_buffer->reader_page->read < rb_page_size(reader)) 3234 goto out; 3235 3236 /* Never should we have an index greater than the size */ 3237 if (RB_WARN_ON(cpu_buffer, 3238 cpu_buffer->reader_page->read > rb_page_size(reader))) 3239 goto out; 3240 3241 /* check if we caught up to the tail */ 3242 reader = NULL; 3243 if (cpu_buffer->commit_page == cpu_buffer->reader_page) 3244 goto out; 3245 3246 /* Don't bother swapping if the ring buffer is empty */ 3247 if (rb_num_of_entries(cpu_buffer) == 0) 3248 goto out; 3249 3250 /* 3251 * Reset the reader page to size zero. 3252 */ 3253 local_set(&cpu_buffer->reader_page->write, 0); 3254 local_set(&cpu_buffer->reader_page->entries, 0); 3255 local_set(&cpu_buffer->reader_page->page->commit, 0); 3256 cpu_buffer->reader_page->real_end = 0; 3257 3258 spin: 3259 /* 3260 * Splice the empty reader page into the list around the head. 3261 */ 3262 reader = rb_set_head_page(cpu_buffer); 3263 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next); 3264 cpu_buffer->reader_page->list.prev = reader->list.prev; 3265 3266 /* 3267 * cpu_buffer->pages just needs to point to the buffer, it 3268 * has no specific buffer page to point to. Lets move it out 3269 * of our way so we don't accidentally swap it. 3270 */ 3271 cpu_buffer->pages = reader->list.prev; 3272 3273 /* The reader page will be pointing to the new head */ 3274 rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list); 3275 3276 /* 3277 * We want to make sure we read the overruns after we set up our 3278 * pointers to the next object. The writer side does a 3279 * cmpxchg to cross pages which acts as the mb on the writer 3280 * side. Note, the reader will constantly fail the swap 3281 * while the writer is updating the pointers, so this 3282 * guarantees that the overwrite recorded here is the one we 3283 * want to compare with the last_overrun. 3284 */ 3285 smp_mb(); 3286 overwrite = local_read(&(cpu_buffer->overrun)); 3287 3288 /* 3289 * Here's the tricky part. 3290 * 3291 * We need to move the pointer past the header page. 3292 * But we can only do that if a writer is not currently 3293 * moving it. The page before the header page has the 3294 * flag bit '1' set if it is pointing to the page we want. 3295 * but if the writer is in the process of moving it 3296 * than it will be '2' or already moved '0'. 3297 */ 3298 3299 ret = rb_head_page_replace(reader, cpu_buffer->reader_page); 3300 3301 /* 3302 * If we did not convert it, then we must try again. 3303 */ 3304 if (!ret) 3305 goto spin; 3306 3307 /* 3308 * Yeah! We succeeded in replacing the page. 3309 * 3310 * Now make the new head point back to the reader page. 3311 */ 3312 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list; 3313 rb_inc_page(cpu_buffer, &cpu_buffer->head_page); 3314 3315 /* Finally update the reader page to the new head */ 3316 cpu_buffer->reader_page = reader; 3317 rb_reset_reader_page(cpu_buffer); 3318 3319 if (overwrite != cpu_buffer->last_overrun) { 3320 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; 3321 cpu_buffer->last_overrun = overwrite; 3322 } 3323 3324 goto again; 3325 3326 out: 3327 arch_spin_unlock(&cpu_buffer->lock); 3328 local_irq_restore(flags); 3329 3330 return reader; 3331 } 3332 3333 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) 3334 { 3335 struct ring_buffer_event *event; 3336 struct buffer_page *reader; 3337 unsigned length; 3338 3339 reader = rb_get_reader_page(cpu_buffer); 3340 3341 /* This function should not be called when buffer is empty */ 3342 if (RB_WARN_ON(cpu_buffer, !reader)) 3343 return; 3344 3345 event = rb_reader_event(cpu_buffer); 3346 3347 if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 3348 cpu_buffer->read++; 3349 3350 rb_update_read_stamp(cpu_buffer, event); 3351 3352 length = rb_event_length(event); 3353 cpu_buffer->reader_page->read += length; 3354 } 3355 3356 static void rb_advance_iter(struct ring_buffer_iter *iter) 3357 { 3358 struct ring_buffer_per_cpu *cpu_buffer; 3359 struct ring_buffer_event *event; 3360 unsigned length; 3361 3362 cpu_buffer = iter->cpu_buffer; 3363 3364 /* 3365 * Check if we are at the end of the buffer. 3366 */ 3367 if (iter->head >= rb_page_size(iter->head_page)) { 3368 /* discarded commits can make the page empty */ 3369 if (iter->head_page == cpu_buffer->commit_page) 3370 return; 3371 rb_inc_iter(iter); 3372 return; 3373 } 3374 3375 event = rb_iter_head_event(iter); 3376 3377 length = rb_event_length(event); 3378 3379 /* 3380 * This should not be called to advance the header if we are 3381 * at the tail of the buffer. 3382 */ 3383 if (RB_WARN_ON(cpu_buffer, 3384 (iter->head_page == cpu_buffer->commit_page) && 3385 (iter->head + length > rb_commit_index(cpu_buffer)))) 3386 return; 3387 3388 rb_update_iter_read_stamp(iter, event); 3389 3390 iter->head += length; 3391 3392 /* check for end of page padding */ 3393 if ((iter->head >= rb_page_size(iter->head_page)) && 3394 (iter->head_page != cpu_buffer->commit_page)) 3395 rb_advance_iter(iter); 3396 } 3397 3398 static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer) 3399 { 3400 return cpu_buffer->lost_events; 3401 } 3402 3403 static struct ring_buffer_event * 3404 rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts, 3405 unsigned long *lost_events) 3406 { 3407 struct ring_buffer_event *event; 3408 struct buffer_page *reader; 3409 int nr_loops = 0; 3410 3411 again: 3412 /* 3413 * We repeat when a time extend is encountered. 3414 * Since the time extend is always attached to a data event, 3415 * we should never loop more than once. 3416 * (We never hit the following condition more than twice). 3417 */ 3418 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2)) 3419 return NULL; 3420 3421 reader = rb_get_reader_page(cpu_buffer); 3422 if (!reader) 3423 return NULL; 3424 3425 event = rb_reader_event(cpu_buffer); 3426 3427 switch (event->type_len) { 3428 case RINGBUF_TYPE_PADDING: 3429 if (rb_null_event(event)) 3430 RB_WARN_ON(cpu_buffer, 1); 3431 /* 3432 * Because the writer could be discarding every 3433 * event it creates (which would probably be bad) 3434 * if we were to go back to "again" then we may never 3435 * catch up, and will trigger the warn on, or lock 3436 * the box. Return the padding, and we will release 3437 * the current locks, and try again. 3438 */ 3439 return event; 3440 3441 case RINGBUF_TYPE_TIME_EXTEND: 3442 /* Internal data, OK to advance */ 3443 rb_advance_reader(cpu_buffer); 3444 goto again; 3445 3446 case RINGBUF_TYPE_TIME_STAMP: 3447 /* FIXME: not implemented */ 3448 rb_advance_reader(cpu_buffer); 3449 goto again; 3450 3451 case RINGBUF_TYPE_DATA: 3452 if (ts) { 3453 *ts = cpu_buffer->read_stamp + event->time_delta; 3454 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, 3455 cpu_buffer->cpu, ts); 3456 } 3457 if (lost_events) 3458 *lost_events = rb_lost_events(cpu_buffer); 3459 return event; 3460 3461 default: 3462 BUG(); 3463 } 3464 3465 return NULL; 3466 } 3467 EXPORT_SYMBOL_GPL(ring_buffer_peek); 3468 3469 static struct ring_buffer_event * 3470 rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) 3471 { 3472 struct ring_buffer *buffer; 3473 struct ring_buffer_per_cpu *cpu_buffer; 3474 struct ring_buffer_event *event; 3475 int nr_loops = 0; 3476 3477 cpu_buffer = iter->cpu_buffer; 3478 buffer = cpu_buffer->buffer; 3479 3480 /* 3481 * Check if someone performed a consuming read to 3482 * the buffer. A consuming read invalidates the iterator 3483 * and we need to reset the iterator in this case. 3484 */ 3485 if (unlikely(iter->cache_read != cpu_buffer->read || 3486 iter->cache_reader_page != cpu_buffer->reader_page)) 3487 rb_iter_reset(iter); 3488 3489 again: 3490 if (ring_buffer_iter_empty(iter)) 3491 return NULL; 3492 3493 /* 3494 * We repeat when a time extend is encountered. 3495 * Since the time extend is always attached to a data event, 3496 * we should never loop more than once. 3497 * (We never hit the following condition more than twice). 3498 */ 3499 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2)) 3500 return NULL; 3501 3502 if (rb_per_cpu_empty(cpu_buffer)) 3503 return NULL; 3504 3505 if (iter->head >= local_read(&iter->head_page->page->commit)) { 3506 rb_inc_iter(iter); 3507 goto again; 3508 } 3509 3510 event = rb_iter_head_event(iter); 3511 3512 switch (event->type_len) { 3513 case RINGBUF_TYPE_PADDING: 3514 if (rb_null_event(event)) { 3515 rb_inc_iter(iter); 3516 goto again; 3517 } 3518 rb_advance_iter(iter); 3519 return event; 3520 3521 case RINGBUF_TYPE_TIME_EXTEND: 3522 /* Internal data, OK to advance */ 3523 rb_advance_iter(iter); 3524 goto again; 3525 3526 case RINGBUF_TYPE_TIME_STAMP: 3527 /* FIXME: not implemented */ 3528 rb_advance_iter(iter); 3529 goto again; 3530 3531 case RINGBUF_TYPE_DATA: 3532 if (ts) { 3533 *ts = iter->read_stamp + event->time_delta; 3534 ring_buffer_normalize_time_stamp(buffer, 3535 cpu_buffer->cpu, ts); 3536 } 3537 return event; 3538 3539 default: 3540 BUG(); 3541 } 3542 3543 return NULL; 3544 } 3545 EXPORT_SYMBOL_GPL(ring_buffer_iter_peek); 3546 3547 static inline int rb_ok_to_lock(void) 3548 { 3549 /* 3550 * If an NMI die dumps out the content of the ring buffer 3551 * do not grab locks. We also permanently disable the ring 3552 * buffer too. A one time deal is all you get from reading 3553 * the ring buffer from an NMI. 3554 */ 3555 if (likely(!in_nmi())) 3556 return 1; 3557 3558 tracing_off_permanent(); 3559 return 0; 3560 } 3561 3562 /** 3563 * ring_buffer_peek - peek at the next event to be read 3564 * @buffer: The ring buffer to read 3565 * @cpu: The cpu to peak at 3566 * @ts: The timestamp counter of this event. 3567 * @lost_events: a variable to store if events were lost (may be NULL) 3568 * 3569 * This will return the event that will be read next, but does 3570 * not consume the data. 3571 */ 3572 struct ring_buffer_event * 3573 ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts, 3574 unsigned long *lost_events) 3575 { 3576 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 3577 struct ring_buffer_event *event; 3578 unsigned long flags; 3579 int dolock; 3580 3581 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 3582 return NULL; 3583 3584 dolock = rb_ok_to_lock(); 3585 again: 3586 local_irq_save(flags); 3587 if (dolock) 3588 raw_spin_lock(&cpu_buffer->reader_lock); 3589 event = rb_buffer_peek(cpu_buffer, ts, lost_events); 3590 if (event && event->type_len == RINGBUF_TYPE_PADDING) 3591 rb_advance_reader(cpu_buffer); 3592 if (dolock) 3593 raw_spin_unlock(&cpu_buffer->reader_lock); 3594 local_irq_restore(flags); 3595 3596 if (event && event->type_len == RINGBUF_TYPE_PADDING) 3597 goto again; 3598 3599 return event; 3600 } 3601 3602 /** 3603 * ring_buffer_iter_peek - peek at the next event to be read 3604 * @iter: The ring buffer iterator 3605 * @ts: The timestamp counter of this event. 3606 * 3607 * This will return the event that will be read next, but does 3608 * not increment the iterator. 3609 */ 3610 struct ring_buffer_event * 3611 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) 3612 { 3613 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 3614 struct ring_buffer_event *event; 3615 unsigned long flags; 3616 3617 again: 3618 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3619 event = rb_iter_peek(iter, ts); 3620 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3621 3622 if (event && event->type_len == RINGBUF_TYPE_PADDING) 3623 goto again; 3624 3625 return event; 3626 } 3627 3628 /** 3629 * ring_buffer_consume - return an event and consume it 3630 * @buffer: The ring buffer to get the next event from 3631 * @cpu: the cpu to read the buffer from 3632 * @ts: a variable to store the timestamp (may be NULL) 3633 * @lost_events: a variable to store if events were lost (may be NULL) 3634 * 3635 * Returns the next event in the ring buffer, and that event is consumed. 3636 * Meaning, that sequential reads will keep returning a different event, 3637 * and eventually empty the ring buffer if the producer is slower. 3638 */ 3639 struct ring_buffer_event * 3640 ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts, 3641 unsigned long *lost_events) 3642 { 3643 struct ring_buffer_per_cpu *cpu_buffer; 3644 struct ring_buffer_event *event = NULL; 3645 unsigned long flags; 3646 int dolock; 3647 3648 dolock = rb_ok_to_lock(); 3649 3650 again: 3651 /* might be called in atomic */ 3652 preempt_disable(); 3653 3654 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 3655 goto out; 3656 3657 cpu_buffer = buffer->buffers[cpu]; 3658 local_irq_save(flags); 3659 if (dolock) 3660 raw_spin_lock(&cpu_buffer->reader_lock); 3661 3662 event = rb_buffer_peek(cpu_buffer, ts, lost_events); 3663 if (event) { 3664 cpu_buffer->lost_events = 0; 3665 rb_advance_reader(cpu_buffer); 3666 } 3667 3668 if (dolock) 3669 raw_spin_unlock(&cpu_buffer->reader_lock); 3670 local_irq_restore(flags); 3671 3672 out: 3673 preempt_enable(); 3674 3675 if (event && event->type_len == RINGBUF_TYPE_PADDING) 3676 goto again; 3677 3678 return event; 3679 } 3680 EXPORT_SYMBOL_GPL(ring_buffer_consume); 3681 3682 /** 3683 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer 3684 * @buffer: The ring buffer to read from 3685 * @cpu: The cpu buffer to iterate over 3686 * 3687 * This performs the initial preparations necessary to iterate 3688 * through the buffer. Memory is allocated, buffer recording 3689 * is disabled, and the iterator pointer is returned to the caller. 3690 * 3691 * Disabling buffer recordng prevents the reading from being 3692 * corrupted. This is not a consuming read, so a producer is not 3693 * expected. 3694 * 3695 * After a sequence of ring_buffer_read_prepare calls, the user is 3696 * expected to make at least one call to ring_buffer_prepare_sync. 3697 * Afterwards, ring_buffer_read_start is invoked to get things going 3698 * for real. 3699 * 3700 * This overall must be paired with ring_buffer_finish. 3701 */ 3702 struct ring_buffer_iter * 3703 ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu) 3704 { 3705 struct ring_buffer_per_cpu *cpu_buffer; 3706 struct ring_buffer_iter *iter; 3707 3708 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 3709 return NULL; 3710 3711 iter = kmalloc(sizeof(*iter), GFP_KERNEL); 3712 if (!iter) 3713 return NULL; 3714 3715 cpu_buffer = buffer->buffers[cpu]; 3716 3717 iter->cpu_buffer = cpu_buffer; 3718 3719 atomic_inc(&buffer->resize_disabled); 3720 atomic_inc(&cpu_buffer->record_disabled); 3721 3722 return iter; 3723 } 3724 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare); 3725 3726 /** 3727 * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls 3728 * 3729 * All previously invoked ring_buffer_read_prepare calls to prepare 3730 * iterators will be synchronized. Afterwards, read_buffer_read_start 3731 * calls on those iterators are allowed. 3732 */ 3733 void 3734 ring_buffer_read_prepare_sync(void) 3735 { 3736 synchronize_sched(); 3737 } 3738 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync); 3739 3740 /** 3741 * ring_buffer_read_start - start a non consuming read of the buffer 3742 * @iter: The iterator returned by ring_buffer_read_prepare 3743 * 3744 * This finalizes the startup of an iteration through the buffer. 3745 * The iterator comes from a call to ring_buffer_read_prepare and 3746 * an intervening ring_buffer_read_prepare_sync must have been 3747 * performed. 3748 * 3749 * Must be paired with ring_buffer_finish. 3750 */ 3751 void 3752 ring_buffer_read_start(struct ring_buffer_iter *iter) 3753 { 3754 struct ring_buffer_per_cpu *cpu_buffer; 3755 unsigned long flags; 3756 3757 if (!iter) 3758 return; 3759 3760 cpu_buffer = iter->cpu_buffer; 3761 3762 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3763 arch_spin_lock(&cpu_buffer->lock); 3764 rb_iter_reset(iter); 3765 arch_spin_unlock(&cpu_buffer->lock); 3766 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3767 } 3768 EXPORT_SYMBOL_GPL(ring_buffer_read_start); 3769 3770 /** 3771 * ring_buffer_finish - finish reading the iterator of the buffer 3772 * @iter: The iterator retrieved by ring_buffer_start 3773 * 3774 * This re-enables the recording to the buffer, and frees the 3775 * iterator. 3776 */ 3777 void 3778 ring_buffer_read_finish(struct ring_buffer_iter *iter) 3779 { 3780 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 3781 3782 /* 3783 * Ring buffer is disabled from recording, here's a good place 3784 * to check the integrity of the ring buffer. 3785 */ 3786 rb_check_pages(cpu_buffer); 3787 3788 atomic_dec(&cpu_buffer->record_disabled); 3789 atomic_dec(&cpu_buffer->buffer->resize_disabled); 3790 kfree(iter); 3791 } 3792 EXPORT_SYMBOL_GPL(ring_buffer_read_finish); 3793 3794 /** 3795 * ring_buffer_read - read the next item in the ring buffer by the iterator 3796 * @iter: The ring buffer iterator 3797 * @ts: The time stamp of the event read. 3798 * 3799 * This reads the next event in the ring buffer and increments the iterator. 3800 */ 3801 struct ring_buffer_event * 3802 ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts) 3803 { 3804 struct ring_buffer_event *event; 3805 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 3806 unsigned long flags; 3807 3808 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3809 again: 3810 event = rb_iter_peek(iter, ts); 3811 if (!event) 3812 goto out; 3813 3814 if (event->type_len == RINGBUF_TYPE_PADDING) 3815 goto again; 3816 3817 rb_advance_iter(iter); 3818 out: 3819 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3820 3821 return event; 3822 } 3823 EXPORT_SYMBOL_GPL(ring_buffer_read); 3824 3825 /** 3826 * ring_buffer_size - return the size of the ring buffer (in bytes) 3827 * @buffer: The ring buffer. 3828 */ 3829 unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu) 3830 { 3831 /* 3832 * Earlier, this method returned 3833 * BUF_PAGE_SIZE * buffer->nr_pages 3834 * Since the nr_pages field is now removed, we have converted this to 3835 * return the per cpu buffer value. 3836 */ 3837 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 3838 return 0; 3839 3840 return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages; 3841 } 3842 EXPORT_SYMBOL_GPL(ring_buffer_size); 3843 3844 static void 3845 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) 3846 { 3847 rb_head_page_deactivate(cpu_buffer); 3848 3849 cpu_buffer->head_page 3850 = list_entry(cpu_buffer->pages, struct buffer_page, list); 3851 local_set(&cpu_buffer->head_page->write, 0); 3852 local_set(&cpu_buffer->head_page->entries, 0); 3853 local_set(&cpu_buffer->head_page->page->commit, 0); 3854 3855 cpu_buffer->head_page->read = 0; 3856 3857 cpu_buffer->tail_page = cpu_buffer->head_page; 3858 cpu_buffer->commit_page = cpu_buffer->head_page; 3859 3860 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); 3861 INIT_LIST_HEAD(&cpu_buffer->new_pages); 3862 local_set(&cpu_buffer->reader_page->write, 0); 3863 local_set(&cpu_buffer->reader_page->entries, 0); 3864 local_set(&cpu_buffer->reader_page->page->commit, 0); 3865 cpu_buffer->reader_page->read = 0; 3866 3867 local_set(&cpu_buffer->commit_overrun, 0); 3868 local_set(&cpu_buffer->entries_bytes, 0); 3869 local_set(&cpu_buffer->overrun, 0); 3870 local_set(&cpu_buffer->entries, 0); 3871 local_set(&cpu_buffer->committing, 0); 3872 local_set(&cpu_buffer->commits, 0); 3873 cpu_buffer->read = 0; 3874 cpu_buffer->read_bytes = 0; 3875 3876 cpu_buffer->write_stamp = 0; 3877 cpu_buffer->read_stamp = 0; 3878 3879 cpu_buffer->lost_events = 0; 3880 cpu_buffer->last_overrun = 0; 3881 3882 rb_head_page_activate(cpu_buffer); 3883 } 3884 3885 /** 3886 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer 3887 * @buffer: The ring buffer to reset a per cpu buffer of 3888 * @cpu: The CPU buffer to be reset 3889 */ 3890 void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) 3891 { 3892 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 3893 unsigned long flags; 3894 3895 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 3896 return; 3897 3898 atomic_inc(&buffer->resize_disabled); 3899 atomic_inc(&cpu_buffer->record_disabled); 3900 3901 /* Make sure all commits have finished */ 3902 synchronize_sched(); 3903 3904 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3905 3906 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) 3907 goto out; 3908 3909 arch_spin_lock(&cpu_buffer->lock); 3910 3911 rb_reset_cpu(cpu_buffer); 3912 3913 arch_spin_unlock(&cpu_buffer->lock); 3914 3915 out: 3916 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3917 3918 atomic_dec(&cpu_buffer->record_disabled); 3919 atomic_dec(&buffer->resize_disabled); 3920 } 3921 EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu); 3922 3923 /** 3924 * ring_buffer_reset - reset a ring buffer 3925 * @buffer: The ring buffer to reset all cpu buffers 3926 */ 3927 void ring_buffer_reset(struct ring_buffer *buffer) 3928 { 3929 int cpu; 3930 3931 for_each_buffer_cpu(buffer, cpu) 3932 ring_buffer_reset_cpu(buffer, cpu); 3933 } 3934 EXPORT_SYMBOL_GPL(ring_buffer_reset); 3935 3936 /** 3937 * rind_buffer_empty - is the ring buffer empty? 3938 * @buffer: The ring buffer to test 3939 */ 3940 int ring_buffer_empty(struct ring_buffer *buffer) 3941 { 3942 struct ring_buffer_per_cpu *cpu_buffer; 3943 unsigned long flags; 3944 int dolock; 3945 int cpu; 3946 int ret; 3947 3948 dolock = rb_ok_to_lock(); 3949 3950 /* yes this is racy, but if you don't like the race, lock the buffer */ 3951 for_each_buffer_cpu(buffer, cpu) { 3952 cpu_buffer = buffer->buffers[cpu]; 3953 local_irq_save(flags); 3954 if (dolock) 3955 raw_spin_lock(&cpu_buffer->reader_lock); 3956 ret = rb_per_cpu_empty(cpu_buffer); 3957 if (dolock) 3958 raw_spin_unlock(&cpu_buffer->reader_lock); 3959 local_irq_restore(flags); 3960 3961 if (!ret) 3962 return 0; 3963 } 3964 3965 return 1; 3966 } 3967 EXPORT_SYMBOL_GPL(ring_buffer_empty); 3968 3969 /** 3970 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty? 3971 * @buffer: The ring buffer 3972 * @cpu: The CPU buffer to test 3973 */ 3974 int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu) 3975 { 3976 struct ring_buffer_per_cpu *cpu_buffer; 3977 unsigned long flags; 3978 int dolock; 3979 int ret; 3980 3981 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 3982 return 1; 3983 3984 dolock = rb_ok_to_lock(); 3985 3986 cpu_buffer = buffer->buffers[cpu]; 3987 local_irq_save(flags); 3988 if (dolock) 3989 raw_spin_lock(&cpu_buffer->reader_lock); 3990 ret = rb_per_cpu_empty(cpu_buffer); 3991 if (dolock) 3992 raw_spin_unlock(&cpu_buffer->reader_lock); 3993 local_irq_restore(flags); 3994 3995 return ret; 3996 } 3997 EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu); 3998 3999 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP 4000 /** 4001 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers 4002 * @buffer_a: One buffer to swap with 4003 * @buffer_b: The other buffer to swap with 4004 * 4005 * This function is useful for tracers that want to take a "snapshot" 4006 * of a CPU buffer and has another back up buffer lying around. 4007 * it is expected that the tracer handles the cpu buffer not being 4008 * used at the moment. 4009 */ 4010 int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, 4011 struct ring_buffer *buffer_b, int cpu) 4012 { 4013 struct ring_buffer_per_cpu *cpu_buffer_a; 4014 struct ring_buffer_per_cpu *cpu_buffer_b; 4015 int ret = -EINVAL; 4016 4017 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) || 4018 !cpumask_test_cpu(cpu, buffer_b->cpumask)) 4019 goto out; 4020 4021 cpu_buffer_a = buffer_a->buffers[cpu]; 4022 cpu_buffer_b = buffer_b->buffers[cpu]; 4023 4024 /* At least make sure the two buffers are somewhat the same */ 4025 if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages) 4026 goto out; 4027 4028 ret = -EAGAIN; 4029 4030 if (ring_buffer_flags != RB_BUFFERS_ON) 4031 goto out; 4032 4033 if (atomic_read(&buffer_a->record_disabled)) 4034 goto out; 4035 4036 if (atomic_read(&buffer_b->record_disabled)) 4037 goto out; 4038 4039 if (atomic_read(&cpu_buffer_a->record_disabled)) 4040 goto out; 4041 4042 if (atomic_read(&cpu_buffer_b->record_disabled)) 4043 goto out; 4044 4045 /* 4046 * We can't do a synchronize_sched here because this 4047 * function can be called in atomic context. 4048 * Normally this will be called from the same CPU as cpu. 4049 * If not it's up to the caller to protect this. 4050 */ 4051 atomic_inc(&cpu_buffer_a->record_disabled); 4052 atomic_inc(&cpu_buffer_b->record_disabled); 4053 4054 ret = -EBUSY; 4055 if (local_read(&cpu_buffer_a->committing)) 4056 goto out_dec; 4057 if (local_read(&cpu_buffer_b->committing)) 4058 goto out_dec; 4059 4060 buffer_a->buffers[cpu] = cpu_buffer_b; 4061 buffer_b->buffers[cpu] = cpu_buffer_a; 4062 4063 cpu_buffer_b->buffer = buffer_a; 4064 cpu_buffer_a->buffer = buffer_b; 4065 4066 ret = 0; 4067 4068 out_dec: 4069 atomic_dec(&cpu_buffer_a->record_disabled); 4070 atomic_dec(&cpu_buffer_b->record_disabled); 4071 out: 4072 return ret; 4073 } 4074 EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); 4075 #endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */ 4076 4077 /** 4078 * ring_buffer_alloc_read_page - allocate a page to read from buffer 4079 * @buffer: the buffer to allocate for. 4080 * 4081 * This function is used in conjunction with ring_buffer_read_page. 4082 * When reading a full page from the ring buffer, these functions 4083 * can be used to speed up the process. The calling function should 4084 * allocate a few pages first with this function. Then when it 4085 * needs to get pages from the ring buffer, it passes the result 4086 * of this function into ring_buffer_read_page, which will swap 4087 * the page that was allocated, with the read page of the buffer. 4088 * 4089 * Returns: 4090 * The page allocated, or NULL on error. 4091 */ 4092 void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu) 4093 { 4094 struct buffer_data_page *bpage; 4095 struct page *page; 4096 4097 page = alloc_pages_node(cpu_to_node(cpu), 4098 GFP_KERNEL | __GFP_NORETRY, 0); 4099 if (!page) 4100 return NULL; 4101 4102 bpage = page_address(page); 4103 4104 rb_init_page(bpage); 4105 4106 return bpage; 4107 } 4108 EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page); 4109 4110 /** 4111 * ring_buffer_free_read_page - free an allocated read page 4112 * @buffer: the buffer the page was allocate for 4113 * @data: the page to free 4114 * 4115 * Free a page allocated from ring_buffer_alloc_read_page. 4116 */ 4117 void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data) 4118 { 4119 free_page((unsigned long)data); 4120 } 4121 EXPORT_SYMBOL_GPL(ring_buffer_free_read_page); 4122 4123 /** 4124 * ring_buffer_read_page - extract a page from the ring buffer 4125 * @buffer: buffer to extract from 4126 * @data_page: the page to use allocated from ring_buffer_alloc_read_page 4127 * @len: amount to extract 4128 * @cpu: the cpu of the buffer to extract 4129 * @full: should the extraction only happen when the page is full. 4130 * 4131 * This function will pull out a page from the ring buffer and consume it. 4132 * @data_page must be the address of the variable that was returned 4133 * from ring_buffer_alloc_read_page. This is because the page might be used 4134 * to swap with a page in the ring buffer. 4135 * 4136 * for example: 4137 * rpage = ring_buffer_alloc_read_page(buffer); 4138 * if (!rpage) 4139 * return error; 4140 * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0); 4141 * if (ret >= 0) 4142 * process_page(rpage, ret); 4143 * 4144 * When @full is set, the function will not return true unless 4145 * the writer is off the reader page. 4146 * 4147 * Note: it is up to the calling functions to handle sleeps and wakeups. 4148 * The ring buffer can be used anywhere in the kernel and can not 4149 * blindly call wake_up. The layer that uses the ring buffer must be 4150 * responsible for that. 4151 * 4152 * Returns: 4153 * >=0 if data has been transferred, returns the offset of consumed data. 4154 * <0 if no data has been transferred. 4155 */ 4156 int ring_buffer_read_page(struct ring_buffer *buffer, 4157 void **data_page, size_t len, int cpu, int full) 4158 { 4159 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 4160 struct ring_buffer_event *event; 4161 struct buffer_data_page *bpage; 4162 struct buffer_page *reader; 4163 unsigned long missed_events; 4164 unsigned long flags; 4165 unsigned int commit; 4166 unsigned int read; 4167 u64 save_timestamp; 4168 int ret = -1; 4169 4170 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4171 goto out; 4172 4173 /* 4174 * If len is not big enough to hold the page header, then 4175 * we can not copy anything. 4176 */ 4177 if (len <= BUF_PAGE_HDR_SIZE) 4178 goto out; 4179 4180 len -= BUF_PAGE_HDR_SIZE; 4181 4182 if (!data_page) 4183 goto out; 4184 4185 bpage = *data_page; 4186 if (!bpage) 4187 goto out; 4188 4189 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 4190 4191 reader = rb_get_reader_page(cpu_buffer); 4192 if (!reader) 4193 goto out_unlock; 4194 4195 event = rb_reader_event(cpu_buffer); 4196 4197 read = reader->read; 4198 commit = rb_page_commit(reader); 4199 4200 /* Check if any events were dropped */ 4201 missed_events = cpu_buffer->lost_events; 4202 4203 /* 4204 * If this page has been partially read or 4205 * if len is not big enough to read the rest of the page or 4206 * a writer is still on the page, then 4207 * we must copy the data from the page to the buffer. 4208 * Otherwise, we can simply swap the page with the one passed in. 4209 */ 4210 if (read || (len < (commit - read)) || 4211 cpu_buffer->reader_page == cpu_buffer->commit_page) { 4212 struct buffer_data_page *rpage = cpu_buffer->reader_page->page; 4213 unsigned int rpos = read; 4214 unsigned int pos = 0; 4215 unsigned int size; 4216 4217 if (full) 4218 goto out_unlock; 4219 4220 if (len > (commit - read)) 4221 len = (commit - read); 4222 4223 /* Always keep the time extend and data together */ 4224 size = rb_event_ts_length(event); 4225 4226 if (len < size) 4227 goto out_unlock; 4228 4229 /* save the current timestamp, since the user will need it */ 4230 save_timestamp = cpu_buffer->read_stamp; 4231 4232 /* Need to copy one event at a time */ 4233 do { 4234 /* We need the size of one event, because 4235 * rb_advance_reader only advances by one event, 4236 * whereas rb_event_ts_length may include the size of 4237 * one or two events. 4238 * We have already ensured there's enough space if this 4239 * is a time extend. */ 4240 size = rb_event_length(event); 4241 memcpy(bpage->data + pos, rpage->data + rpos, size); 4242 4243 len -= size; 4244 4245 rb_advance_reader(cpu_buffer); 4246 rpos = reader->read; 4247 pos += size; 4248 4249 if (rpos >= commit) 4250 break; 4251 4252 event = rb_reader_event(cpu_buffer); 4253 /* Always keep the time extend and data together */ 4254 size = rb_event_ts_length(event); 4255 } while (len >= size); 4256 4257 /* update bpage */ 4258 local_set(&bpage->commit, pos); 4259 bpage->time_stamp = save_timestamp; 4260 4261 /* we copied everything to the beginning */ 4262 read = 0; 4263 } else { 4264 /* update the entry counter */ 4265 cpu_buffer->read += rb_page_entries(reader); 4266 cpu_buffer->read_bytes += BUF_PAGE_SIZE; 4267 4268 /* swap the pages */ 4269 rb_init_page(bpage); 4270 bpage = reader->page; 4271 reader->page = *data_page; 4272 local_set(&reader->write, 0); 4273 local_set(&reader->entries, 0); 4274 reader->read = 0; 4275 *data_page = bpage; 4276 4277 /* 4278 * Use the real_end for the data size, 4279 * This gives us a chance to store the lost events 4280 * on the page. 4281 */ 4282 if (reader->real_end) 4283 local_set(&bpage->commit, reader->real_end); 4284 } 4285 ret = read; 4286 4287 cpu_buffer->lost_events = 0; 4288 4289 commit = local_read(&bpage->commit); 4290 /* 4291 * Set a flag in the commit field if we lost events 4292 */ 4293 if (missed_events) { 4294 /* If there is room at the end of the page to save the 4295 * missed events, then record it there. 4296 */ 4297 if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) { 4298 memcpy(&bpage->data[commit], &missed_events, 4299 sizeof(missed_events)); 4300 local_add(RB_MISSED_STORED, &bpage->commit); 4301 commit += sizeof(missed_events); 4302 } 4303 local_add(RB_MISSED_EVENTS, &bpage->commit); 4304 } 4305 4306 /* 4307 * This page may be off to user land. Zero it out here. 4308 */ 4309 if (commit < BUF_PAGE_SIZE) 4310 memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit); 4311 4312 out_unlock: 4313 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 4314 4315 out: 4316 return ret; 4317 } 4318 EXPORT_SYMBOL_GPL(ring_buffer_read_page); 4319 4320 #ifdef CONFIG_HOTPLUG_CPU 4321 static int rb_cpu_notify(struct notifier_block *self, 4322 unsigned long action, void *hcpu) 4323 { 4324 struct ring_buffer *buffer = 4325 container_of(self, struct ring_buffer, cpu_notify); 4326 long cpu = (long)hcpu; 4327 int cpu_i, nr_pages_same; 4328 unsigned int nr_pages; 4329 4330 switch (action) { 4331 case CPU_UP_PREPARE: 4332 case CPU_UP_PREPARE_FROZEN: 4333 if (cpumask_test_cpu(cpu, buffer->cpumask)) 4334 return NOTIFY_OK; 4335 4336 nr_pages = 0; 4337 nr_pages_same = 1; 4338 /* check if all cpu sizes are same */ 4339 for_each_buffer_cpu(buffer, cpu_i) { 4340 /* fill in the size from first enabled cpu */ 4341 if (nr_pages == 0) 4342 nr_pages = buffer->buffers[cpu_i]->nr_pages; 4343 if (nr_pages != buffer->buffers[cpu_i]->nr_pages) { 4344 nr_pages_same = 0; 4345 break; 4346 } 4347 } 4348 /* allocate minimum pages, user can later expand it */ 4349 if (!nr_pages_same) 4350 nr_pages = 2; 4351 buffer->buffers[cpu] = 4352 rb_allocate_cpu_buffer(buffer, nr_pages, cpu); 4353 if (!buffer->buffers[cpu]) { 4354 WARN(1, "failed to allocate ring buffer on CPU %ld\n", 4355 cpu); 4356 return NOTIFY_OK; 4357 } 4358 smp_wmb(); 4359 cpumask_set_cpu(cpu, buffer->cpumask); 4360 break; 4361 case CPU_DOWN_PREPARE: 4362 case CPU_DOWN_PREPARE_FROZEN: 4363 /* 4364 * Do nothing. 4365 * If we were to free the buffer, then the user would 4366 * lose any trace that was in the buffer. 4367 */ 4368 break; 4369 default: 4370 break; 4371 } 4372 return NOTIFY_OK; 4373 } 4374 #endif 4375