1 /* 2 * Generic ring buffer 3 * 4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> 5 */ 6 #include <linux/ftrace_event.h> 7 #include <linux/ring_buffer.h> 8 #include <linux/trace_clock.h> 9 #include <linux/trace_seq.h> 10 #include <linux/spinlock.h> 11 #include <linux/irq_work.h> 12 #include <linux/debugfs.h> 13 #include <linux/uaccess.h> 14 #include <linux/hardirq.h> 15 #include <linux/kthread.h> /* for self test */ 16 #include <linux/kmemcheck.h> 17 #include <linux/module.h> 18 #include <linux/percpu.h> 19 #include <linux/mutex.h> 20 #include <linux/delay.h> 21 #include <linux/slab.h> 22 #include <linux/init.h> 23 #include <linux/hash.h> 24 #include <linux/list.h> 25 #include <linux/cpu.h> 26 #include <linux/fs.h> 27 28 #include <asm/local.h> 29 30 static void update_pages_handler(struct work_struct *work); 31 32 /* 33 * The ring buffer header is special. We must manually up keep it. 34 */ 35 int ring_buffer_print_entry_header(struct trace_seq *s) 36 { 37 int ret; 38 39 ret = trace_seq_puts(s, "# compressed entry header\n"); 40 ret = trace_seq_puts(s, "\ttype_len : 5 bits\n"); 41 ret = trace_seq_puts(s, "\ttime_delta : 27 bits\n"); 42 ret = trace_seq_puts(s, "\tarray : 32 bits\n"); 43 ret = trace_seq_putc(s, '\n'); 44 ret = trace_seq_printf(s, "\tpadding : type == %d\n", 45 RINGBUF_TYPE_PADDING); 46 ret = trace_seq_printf(s, "\ttime_extend : type == %d\n", 47 RINGBUF_TYPE_TIME_EXTEND); 48 ret = trace_seq_printf(s, "\tdata max type_len == %d\n", 49 RINGBUF_TYPE_DATA_TYPE_LEN_MAX); 50 51 return ret; 52 } 53 54 /* 55 * The ring buffer is made up of a list of pages. A separate list of pages is 56 * allocated for each CPU. A writer may only write to a buffer that is 57 * associated with the CPU it is currently executing on. A reader may read 58 * from any per cpu buffer. 59 * 60 * The reader is special. For each per cpu buffer, the reader has its own 61 * reader page. When a reader has read the entire reader page, this reader 62 * page is swapped with another page in the ring buffer. 63 * 64 * Now, as long as the writer is off the reader page, the reader can do what 65 * ever it wants with that page. The writer will never write to that page 66 * again (as long as it is out of the ring buffer). 67 * 68 * Here's some silly ASCII art. 69 * 70 * +------+ 71 * |reader| RING BUFFER 72 * |page | 73 * +------+ +---+ +---+ +---+ 74 * | |-->| |-->| | 75 * +---+ +---+ +---+ 76 * ^ | 77 * | | 78 * +---------------+ 79 * 80 * 81 * +------+ 82 * |reader| RING BUFFER 83 * |page |------------------v 84 * +------+ +---+ +---+ +---+ 85 * | |-->| |-->| | 86 * +---+ +---+ +---+ 87 * ^ | 88 * | | 89 * +---------------+ 90 * 91 * 92 * +------+ 93 * |reader| RING BUFFER 94 * |page |------------------v 95 * +------+ +---+ +---+ +---+ 96 * ^ | |-->| |-->| | 97 * | +---+ +---+ +---+ 98 * | | 99 * | | 100 * +------------------------------+ 101 * 102 * 103 * +------+ 104 * |buffer| RING BUFFER 105 * |page |------------------v 106 * +------+ +---+ +---+ +---+ 107 * ^ | | | |-->| | 108 * | New +---+ +---+ +---+ 109 * | Reader------^ | 110 * | page | 111 * +------------------------------+ 112 * 113 * 114 * After we make this swap, the reader can hand this page off to the splice 115 * code and be done with it. It can even allocate a new page if it needs to 116 * and swap that into the ring buffer. 117 * 118 * We will be using cmpxchg soon to make all this lockless. 119 * 120 */ 121 122 /* 123 * A fast way to enable or disable all ring buffers is to 124 * call tracing_on or tracing_off. Turning off the ring buffers 125 * prevents all ring buffers from being recorded to. 126 * Turning this switch on, makes it OK to write to the 127 * ring buffer, if the ring buffer is enabled itself. 128 * 129 * There's three layers that must be on in order to write 130 * to the ring buffer. 131 * 132 * 1) This global flag must be set. 133 * 2) The ring buffer must be enabled for recording. 134 * 3) The per cpu buffer must be enabled for recording. 135 * 136 * In case of an anomaly, this global flag has a bit set that 137 * will permantly disable all ring buffers. 138 */ 139 140 /* 141 * Global flag to disable all recording to ring buffers 142 * This has two bits: ON, DISABLED 143 * 144 * ON DISABLED 145 * ---- ---------- 146 * 0 0 : ring buffers are off 147 * 1 0 : ring buffers are on 148 * X 1 : ring buffers are permanently disabled 149 */ 150 151 enum { 152 RB_BUFFERS_ON_BIT = 0, 153 RB_BUFFERS_DISABLED_BIT = 1, 154 }; 155 156 enum { 157 RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT, 158 RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT, 159 }; 160 161 static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON; 162 163 /* Used for individual buffers (after the counter) */ 164 #define RB_BUFFER_OFF (1 << 20) 165 166 #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data) 167 168 /** 169 * tracing_off_permanent - permanently disable ring buffers 170 * 171 * This function, once called, will disable all ring buffers 172 * permanently. 173 */ 174 void tracing_off_permanent(void) 175 { 176 set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags); 177 } 178 179 #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array)) 180 #define RB_ALIGNMENT 4U 181 #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 182 #define RB_EVNT_MIN_SIZE 8U /* two 32bit words */ 183 184 #ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS 185 # define RB_FORCE_8BYTE_ALIGNMENT 0 186 # define RB_ARCH_ALIGNMENT RB_ALIGNMENT 187 #else 188 # define RB_FORCE_8BYTE_ALIGNMENT 1 189 # define RB_ARCH_ALIGNMENT 8U 190 #endif 191 192 #define RB_ALIGN_DATA __aligned(RB_ARCH_ALIGNMENT) 193 194 /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */ 195 #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX 196 197 enum { 198 RB_LEN_TIME_EXTEND = 8, 199 RB_LEN_TIME_STAMP = 16, 200 }; 201 202 #define skip_time_extend(event) \ 203 ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND)) 204 205 static inline int rb_null_event(struct ring_buffer_event *event) 206 { 207 return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta; 208 } 209 210 static void rb_event_set_padding(struct ring_buffer_event *event) 211 { 212 /* padding has a NULL time_delta */ 213 event->type_len = RINGBUF_TYPE_PADDING; 214 event->time_delta = 0; 215 } 216 217 static unsigned 218 rb_event_data_length(struct ring_buffer_event *event) 219 { 220 unsigned length; 221 222 if (event->type_len) 223 length = event->type_len * RB_ALIGNMENT; 224 else 225 length = event->array[0]; 226 return length + RB_EVNT_HDR_SIZE; 227 } 228 229 /* 230 * Return the length of the given event. Will return 231 * the length of the time extend if the event is a 232 * time extend. 233 */ 234 static inline unsigned 235 rb_event_length(struct ring_buffer_event *event) 236 { 237 switch (event->type_len) { 238 case RINGBUF_TYPE_PADDING: 239 if (rb_null_event(event)) 240 /* undefined */ 241 return -1; 242 return event->array[0] + RB_EVNT_HDR_SIZE; 243 244 case RINGBUF_TYPE_TIME_EXTEND: 245 return RB_LEN_TIME_EXTEND; 246 247 case RINGBUF_TYPE_TIME_STAMP: 248 return RB_LEN_TIME_STAMP; 249 250 case RINGBUF_TYPE_DATA: 251 return rb_event_data_length(event); 252 default: 253 BUG(); 254 } 255 /* not hit */ 256 return 0; 257 } 258 259 /* 260 * Return total length of time extend and data, 261 * or just the event length for all other events. 262 */ 263 static inline unsigned 264 rb_event_ts_length(struct ring_buffer_event *event) 265 { 266 unsigned len = 0; 267 268 if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) { 269 /* time extends include the data event after it */ 270 len = RB_LEN_TIME_EXTEND; 271 event = skip_time_extend(event); 272 } 273 return len + rb_event_length(event); 274 } 275 276 /** 277 * ring_buffer_event_length - return the length of the event 278 * @event: the event to get the length of 279 * 280 * Returns the size of the data load of a data event. 281 * If the event is something other than a data event, it 282 * returns the size of the event itself. With the exception 283 * of a TIME EXTEND, where it still returns the size of the 284 * data load of the data event after it. 285 */ 286 unsigned ring_buffer_event_length(struct ring_buffer_event *event) 287 { 288 unsigned length; 289 290 if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) 291 event = skip_time_extend(event); 292 293 length = rb_event_length(event); 294 if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 295 return length; 296 length -= RB_EVNT_HDR_SIZE; 297 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0])) 298 length -= sizeof(event->array[0]); 299 return length; 300 } 301 EXPORT_SYMBOL_GPL(ring_buffer_event_length); 302 303 /* inline for ring buffer fast paths */ 304 static void * 305 rb_event_data(struct ring_buffer_event *event) 306 { 307 if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) 308 event = skip_time_extend(event); 309 BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX); 310 /* If length is in len field, then array[0] has the data */ 311 if (event->type_len) 312 return (void *)&event->array[0]; 313 /* Otherwise length is in array[0] and array[1] has the data */ 314 return (void *)&event->array[1]; 315 } 316 317 /** 318 * ring_buffer_event_data - return the data of the event 319 * @event: the event to get the data from 320 */ 321 void *ring_buffer_event_data(struct ring_buffer_event *event) 322 { 323 return rb_event_data(event); 324 } 325 EXPORT_SYMBOL_GPL(ring_buffer_event_data); 326 327 #define for_each_buffer_cpu(buffer, cpu) \ 328 for_each_cpu(cpu, buffer->cpumask) 329 330 #define TS_SHIFT 27 331 #define TS_MASK ((1ULL << TS_SHIFT) - 1) 332 #define TS_DELTA_TEST (~TS_MASK) 333 334 /* Flag when events were overwritten */ 335 #define RB_MISSED_EVENTS (1 << 31) 336 /* Missed count stored at end */ 337 #define RB_MISSED_STORED (1 << 30) 338 339 struct buffer_data_page { 340 u64 time_stamp; /* page time stamp */ 341 local_t commit; /* write committed index */ 342 unsigned char data[] RB_ALIGN_DATA; /* data of buffer page */ 343 }; 344 345 /* 346 * Note, the buffer_page list must be first. The buffer pages 347 * are allocated in cache lines, which means that each buffer 348 * page will be at the beginning of a cache line, and thus 349 * the least significant bits will be zero. We use this to 350 * add flags in the list struct pointers, to make the ring buffer 351 * lockless. 352 */ 353 struct buffer_page { 354 struct list_head list; /* list of buffer pages */ 355 local_t write; /* index for next write */ 356 unsigned read; /* index for next read */ 357 local_t entries; /* entries on this page */ 358 unsigned long real_end; /* real end of data */ 359 struct buffer_data_page *page; /* Actual data page */ 360 }; 361 362 /* 363 * The buffer page counters, write and entries, must be reset 364 * atomically when crossing page boundaries. To synchronize this 365 * update, two counters are inserted into the number. One is 366 * the actual counter for the write position or count on the page. 367 * 368 * The other is a counter of updaters. Before an update happens 369 * the update partition of the counter is incremented. This will 370 * allow the updater to update the counter atomically. 371 * 372 * The counter is 20 bits, and the state data is 12. 373 */ 374 #define RB_WRITE_MASK 0xfffff 375 #define RB_WRITE_INTCNT (1 << 20) 376 377 static void rb_init_page(struct buffer_data_page *bpage) 378 { 379 local_set(&bpage->commit, 0); 380 } 381 382 /** 383 * ring_buffer_page_len - the size of data on the page. 384 * @page: The page to read 385 * 386 * Returns the amount of data on the page, including buffer page header. 387 */ 388 size_t ring_buffer_page_len(void *page) 389 { 390 return local_read(&((struct buffer_data_page *)page)->commit) 391 + BUF_PAGE_HDR_SIZE; 392 } 393 394 /* 395 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing 396 * this issue out. 397 */ 398 static void free_buffer_page(struct buffer_page *bpage) 399 { 400 free_page((unsigned long)bpage->page); 401 kfree(bpage); 402 } 403 404 /* 405 * We need to fit the time_stamp delta into 27 bits. 406 */ 407 static inline int test_time_stamp(u64 delta) 408 { 409 if (delta & TS_DELTA_TEST) 410 return 1; 411 return 0; 412 } 413 414 #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE) 415 416 /* Max payload is BUF_PAGE_SIZE - header (8bytes) */ 417 #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2)) 418 419 int ring_buffer_print_page_header(struct trace_seq *s) 420 { 421 struct buffer_data_page field; 422 int ret; 423 424 ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t" 425 "offset:0;\tsize:%u;\tsigned:%u;\n", 426 (unsigned int)sizeof(field.time_stamp), 427 (unsigned int)is_signed_type(u64)); 428 429 ret = trace_seq_printf(s, "\tfield: local_t commit;\t" 430 "offset:%u;\tsize:%u;\tsigned:%u;\n", 431 (unsigned int)offsetof(typeof(field), commit), 432 (unsigned int)sizeof(field.commit), 433 (unsigned int)is_signed_type(long)); 434 435 ret = trace_seq_printf(s, "\tfield: int overwrite;\t" 436 "offset:%u;\tsize:%u;\tsigned:%u;\n", 437 (unsigned int)offsetof(typeof(field), commit), 438 1, 439 (unsigned int)is_signed_type(long)); 440 441 ret = trace_seq_printf(s, "\tfield: char data;\t" 442 "offset:%u;\tsize:%u;\tsigned:%u;\n", 443 (unsigned int)offsetof(typeof(field), data), 444 (unsigned int)BUF_PAGE_SIZE, 445 (unsigned int)is_signed_type(char)); 446 447 return ret; 448 } 449 450 struct rb_irq_work { 451 struct irq_work work; 452 wait_queue_head_t waiters; 453 bool waiters_pending; 454 }; 455 456 /* 457 * head_page == tail_page && head == tail then buffer is empty. 458 */ 459 struct ring_buffer_per_cpu { 460 int cpu; 461 atomic_t record_disabled; 462 struct ring_buffer *buffer; 463 raw_spinlock_t reader_lock; /* serialize readers */ 464 arch_spinlock_t lock; 465 struct lock_class_key lock_key; 466 unsigned int nr_pages; 467 struct list_head *pages; 468 struct buffer_page *head_page; /* read from head */ 469 struct buffer_page *tail_page; /* write to tail */ 470 struct buffer_page *commit_page; /* committed pages */ 471 struct buffer_page *reader_page; 472 unsigned long lost_events; 473 unsigned long last_overrun; 474 local_t entries_bytes; 475 local_t entries; 476 local_t overrun; 477 local_t commit_overrun; 478 local_t dropped_events; 479 local_t committing; 480 local_t commits; 481 unsigned long read; 482 unsigned long read_bytes; 483 u64 write_stamp; 484 u64 read_stamp; 485 /* ring buffer pages to update, > 0 to add, < 0 to remove */ 486 int nr_pages_to_update; 487 struct list_head new_pages; /* new pages to add */ 488 struct work_struct update_pages_work; 489 struct completion update_done; 490 491 struct rb_irq_work irq_work; 492 }; 493 494 struct ring_buffer { 495 unsigned flags; 496 int cpus; 497 atomic_t record_disabled; 498 atomic_t resize_disabled; 499 cpumask_var_t cpumask; 500 501 struct lock_class_key *reader_lock_key; 502 503 struct mutex mutex; 504 505 struct ring_buffer_per_cpu **buffers; 506 507 #ifdef CONFIG_HOTPLUG_CPU 508 struct notifier_block cpu_notify; 509 #endif 510 u64 (*clock)(void); 511 512 struct rb_irq_work irq_work; 513 }; 514 515 struct ring_buffer_iter { 516 struct ring_buffer_per_cpu *cpu_buffer; 517 unsigned long head; 518 struct buffer_page *head_page; 519 struct buffer_page *cache_reader_page; 520 unsigned long cache_read; 521 u64 read_stamp; 522 }; 523 524 /* 525 * rb_wake_up_waiters - wake up tasks waiting for ring buffer input 526 * 527 * Schedules a delayed work to wake up any task that is blocked on the 528 * ring buffer waiters queue. 529 */ 530 static void rb_wake_up_waiters(struct irq_work *work) 531 { 532 struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work); 533 534 wake_up_all(&rbwork->waiters); 535 } 536 537 /** 538 * ring_buffer_wait - wait for input to the ring buffer 539 * @buffer: buffer to wait on 540 * @cpu: the cpu buffer to wait on 541 * 542 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon 543 * as data is added to any of the @buffer's cpu buffers. Otherwise 544 * it will wait for data to be added to a specific cpu buffer. 545 */ 546 void ring_buffer_wait(struct ring_buffer *buffer, int cpu) 547 { 548 struct ring_buffer_per_cpu *cpu_buffer; 549 DEFINE_WAIT(wait); 550 struct rb_irq_work *work; 551 552 /* 553 * Depending on what the caller is waiting for, either any 554 * data in any cpu buffer, or a specific buffer, put the 555 * caller on the appropriate wait queue. 556 */ 557 if (cpu == RING_BUFFER_ALL_CPUS) 558 work = &buffer->irq_work; 559 else { 560 cpu_buffer = buffer->buffers[cpu]; 561 work = &cpu_buffer->irq_work; 562 } 563 564 565 prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE); 566 567 /* 568 * The events can happen in critical sections where 569 * checking a work queue can cause deadlocks. 570 * After adding a task to the queue, this flag is set 571 * only to notify events to try to wake up the queue 572 * using irq_work. 573 * 574 * We don't clear it even if the buffer is no longer 575 * empty. The flag only causes the next event to run 576 * irq_work to do the work queue wake up. The worse 577 * that can happen if we race with !trace_empty() is that 578 * an event will cause an irq_work to try to wake up 579 * an empty queue. 580 * 581 * There's no reason to protect this flag either, as 582 * the work queue and irq_work logic will do the necessary 583 * synchronization for the wake ups. The only thing 584 * that is necessary is that the wake up happens after 585 * a task has been queued. It's OK for spurious wake ups. 586 */ 587 work->waiters_pending = true; 588 589 if ((cpu == RING_BUFFER_ALL_CPUS && ring_buffer_empty(buffer)) || 590 (cpu != RING_BUFFER_ALL_CPUS && ring_buffer_empty_cpu(buffer, cpu))) 591 schedule(); 592 593 finish_wait(&work->waiters, &wait); 594 } 595 596 /** 597 * ring_buffer_poll_wait - poll on buffer input 598 * @buffer: buffer to wait on 599 * @cpu: the cpu buffer to wait on 600 * @filp: the file descriptor 601 * @poll_table: The poll descriptor 602 * 603 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon 604 * as data is added to any of the @buffer's cpu buffers. Otherwise 605 * it will wait for data to be added to a specific cpu buffer. 606 * 607 * Returns POLLIN | POLLRDNORM if data exists in the buffers, 608 * zero otherwise. 609 */ 610 int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, 611 struct file *filp, poll_table *poll_table) 612 { 613 struct ring_buffer_per_cpu *cpu_buffer; 614 struct rb_irq_work *work; 615 616 if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) || 617 (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu))) 618 return POLLIN | POLLRDNORM; 619 620 if (cpu == RING_BUFFER_ALL_CPUS) 621 work = &buffer->irq_work; 622 else { 623 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 624 return -EINVAL; 625 626 cpu_buffer = buffer->buffers[cpu]; 627 work = &cpu_buffer->irq_work; 628 } 629 630 work->waiters_pending = true; 631 poll_wait(filp, &work->waiters, poll_table); 632 633 if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) || 634 (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu))) 635 return POLLIN | POLLRDNORM; 636 return 0; 637 } 638 639 /* buffer may be either ring_buffer or ring_buffer_per_cpu */ 640 #define RB_WARN_ON(b, cond) \ 641 ({ \ 642 int _____ret = unlikely(cond); \ 643 if (_____ret) { \ 644 if (__same_type(*(b), struct ring_buffer_per_cpu)) { \ 645 struct ring_buffer_per_cpu *__b = \ 646 (void *)b; \ 647 atomic_inc(&__b->buffer->record_disabled); \ 648 } else \ 649 atomic_inc(&b->record_disabled); \ 650 WARN_ON(1); \ 651 } \ 652 _____ret; \ 653 }) 654 655 /* Up this if you want to test the TIME_EXTENTS and normalization */ 656 #define DEBUG_SHIFT 0 657 658 static inline u64 rb_time_stamp(struct ring_buffer *buffer) 659 { 660 /* shift to debug/test normalization and TIME_EXTENTS */ 661 return buffer->clock() << DEBUG_SHIFT; 662 } 663 664 u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu) 665 { 666 u64 time; 667 668 preempt_disable_notrace(); 669 time = rb_time_stamp(buffer); 670 preempt_enable_no_resched_notrace(); 671 672 return time; 673 } 674 EXPORT_SYMBOL_GPL(ring_buffer_time_stamp); 675 676 void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer, 677 int cpu, u64 *ts) 678 { 679 /* Just stupid testing the normalize function and deltas */ 680 *ts >>= DEBUG_SHIFT; 681 } 682 EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp); 683 684 /* 685 * Making the ring buffer lockless makes things tricky. 686 * Although writes only happen on the CPU that they are on, 687 * and they only need to worry about interrupts. Reads can 688 * happen on any CPU. 689 * 690 * The reader page is always off the ring buffer, but when the 691 * reader finishes with a page, it needs to swap its page with 692 * a new one from the buffer. The reader needs to take from 693 * the head (writes go to the tail). But if a writer is in overwrite 694 * mode and wraps, it must push the head page forward. 695 * 696 * Here lies the problem. 697 * 698 * The reader must be careful to replace only the head page, and 699 * not another one. As described at the top of the file in the 700 * ASCII art, the reader sets its old page to point to the next 701 * page after head. It then sets the page after head to point to 702 * the old reader page. But if the writer moves the head page 703 * during this operation, the reader could end up with the tail. 704 * 705 * We use cmpxchg to help prevent this race. We also do something 706 * special with the page before head. We set the LSB to 1. 707 * 708 * When the writer must push the page forward, it will clear the 709 * bit that points to the head page, move the head, and then set 710 * the bit that points to the new head page. 711 * 712 * We also don't want an interrupt coming in and moving the head 713 * page on another writer. Thus we use the second LSB to catch 714 * that too. Thus: 715 * 716 * head->list->prev->next bit 1 bit 0 717 * ------- ------- 718 * Normal page 0 0 719 * Points to head page 0 1 720 * New head page 1 0 721 * 722 * Note we can not trust the prev pointer of the head page, because: 723 * 724 * +----+ +-----+ +-----+ 725 * | |------>| T |---X--->| N | 726 * | |<------| | | | 727 * +----+ +-----+ +-----+ 728 * ^ ^ | 729 * | +-----+ | | 730 * +----------| R |----------+ | 731 * | |<-----------+ 732 * +-----+ 733 * 734 * Key: ---X--> HEAD flag set in pointer 735 * T Tail page 736 * R Reader page 737 * N Next page 738 * 739 * (see __rb_reserve_next() to see where this happens) 740 * 741 * What the above shows is that the reader just swapped out 742 * the reader page with a page in the buffer, but before it 743 * could make the new header point back to the new page added 744 * it was preempted by a writer. The writer moved forward onto 745 * the new page added by the reader and is about to move forward 746 * again. 747 * 748 * You can see, it is legitimate for the previous pointer of 749 * the head (or any page) not to point back to itself. But only 750 * temporarially. 751 */ 752 753 #define RB_PAGE_NORMAL 0UL 754 #define RB_PAGE_HEAD 1UL 755 #define RB_PAGE_UPDATE 2UL 756 757 758 #define RB_FLAG_MASK 3UL 759 760 /* PAGE_MOVED is not part of the mask */ 761 #define RB_PAGE_MOVED 4UL 762 763 /* 764 * rb_list_head - remove any bit 765 */ 766 static struct list_head *rb_list_head(struct list_head *list) 767 { 768 unsigned long val = (unsigned long)list; 769 770 return (struct list_head *)(val & ~RB_FLAG_MASK); 771 } 772 773 /* 774 * rb_is_head_page - test if the given page is the head page 775 * 776 * Because the reader may move the head_page pointer, we can 777 * not trust what the head page is (it may be pointing to 778 * the reader page). But if the next page is a header page, 779 * its flags will be non zero. 780 */ 781 static inline int 782 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer, 783 struct buffer_page *page, struct list_head *list) 784 { 785 unsigned long val; 786 787 val = (unsigned long)list->next; 788 789 if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list) 790 return RB_PAGE_MOVED; 791 792 return val & RB_FLAG_MASK; 793 } 794 795 /* 796 * rb_is_reader_page 797 * 798 * The unique thing about the reader page, is that, if the 799 * writer is ever on it, the previous pointer never points 800 * back to the reader page. 801 */ 802 static int rb_is_reader_page(struct buffer_page *page) 803 { 804 struct list_head *list = page->list.prev; 805 806 return rb_list_head(list->next) != &page->list; 807 } 808 809 /* 810 * rb_set_list_to_head - set a list_head to be pointing to head. 811 */ 812 static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer, 813 struct list_head *list) 814 { 815 unsigned long *ptr; 816 817 ptr = (unsigned long *)&list->next; 818 *ptr |= RB_PAGE_HEAD; 819 *ptr &= ~RB_PAGE_UPDATE; 820 } 821 822 /* 823 * rb_head_page_activate - sets up head page 824 */ 825 static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer) 826 { 827 struct buffer_page *head; 828 829 head = cpu_buffer->head_page; 830 if (!head) 831 return; 832 833 /* 834 * Set the previous list pointer to have the HEAD flag. 835 */ 836 rb_set_list_to_head(cpu_buffer, head->list.prev); 837 } 838 839 static void rb_list_head_clear(struct list_head *list) 840 { 841 unsigned long *ptr = (unsigned long *)&list->next; 842 843 *ptr &= ~RB_FLAG_MASK; 844 } 845 846 /* 847 * rb_head_page_dactivate - clears head page ptr (for free list) 848 */ 849 static void 850 rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer) 851 { 852 struct list_head *hd; 853 854 /* Go through the whole list and clear any pointers found. */ 855 rb_list_head_clear(cpu_buffer->pages); 856 857 list_for_each(hd, cpu_buffer->pages) 858 rb_list_head_clear(hd); 859 } 860 861 static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer, 862 struct buffer_page *head, 863 struct buffer_page *prev, 864 int old_flag, int new_flag) 865 { 866 struct list_head *list; 867 unsigned long val = (unsigned long)&head->list; 868 unsigned long ret; 869 870 list = &prev->list; 871 872 val &= ~RB_FLAG_MASK; 873 874 ret = cmpxchg((unsigned long *)&list->next, 875 val | old_flag, val | new_flag); 876 877 /* check if the reader took the page */ 878 if ((ret & ~RB_FLAG_MASK) != val) 879 return RB_PAGE_MOVED; 880 881 return ret & RB_FLAG_MASK; 882 } 883 884 static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer, 885 struct buffer_page *head, 886 struct buffer_page *prev, 887 int old_flag) 888 { 889 return rb_head_page_set(cpu_buffer, head, prev, 890 old_flag, RB_PAGE_UPDATE); 891 } 892 893 static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer, 894 struct buffer_page *head, 895 struct buffer_page *prev, 896 int old_flag) 897 { 898 return rb_head_page_set(cpu_buffer, head, prev, 899 old_flag, RB_PAGE_HEAD); 900 } 901 902 static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer, 903 struct buffer_page *head, 904 struct buffer_page *prev, 905 int old_flag) 906 { 907 return rb_head_page_set(cpu_buffer, head, prev, 908 old_flag, RB_PAGE_NORMAL); 909 } 910 911 static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer, 912 struct buffer_page **bpage) 913 { 914 struct list_head *p = rb_list_head((*bpage)->list.next); 915 916 *bpage = list_entry(p, struct buffer_page, list); 917 } 918 919 static struct buffer_page * 920 rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer) 921 { 922 struct buffer_page *head; 923 struct buffer_page *page; 924 struct list_head *list; 925 int i; 926 927 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page)) 928 return NULL; 929 930 /* sanity check */ 931 list = cpu_buffer->pages; 932 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list)) 933 return NULL; 934 935 page = head = cpu_buffer->head_page; 936 /* 937 * It is possible that the writer moves the header behind 938 * where we started, and we miss in one loop. 939 * A second loop should grab the header, but we'll do 940 * three loops just because I'm paranoid. 941 */ 942 for (i = 0; i < 3; i++) { 943 do { 944 if (rb_is_head_page(cpu_buffer, page, page->list.prev)) { 945 cpu_buffer->head_page = page; 946 return page; 947 } 948 rb_inc_page(cpu_buffer, &page); 949 } while (page != head); 950 } 951 952 RB_WARN_ON(cpu_buffer, 1); 953 954 return NULL; 955 } 956 957 static int rb_head_page_replace(struct buffer_page *old, 958 struct buffer_page *new) 959 { 960 unsigned long *ptr = (unsigned long *)&old->list.prev->next; 961 unsigned long val; 962 unsigned long ret; 963 964 val = *ptr & ~RB_FLAG_MASK; 965 val |= RB_PAGE_HEAD; 966 967 ret = cmpxchg(ptr, val, (unsigned long)&new->list); 968 969 return ret == val; 970 } 971 972 /* 973 * rb_tail_page_update - move the tail page forward 974 * 975 * Returns 1 if moved tail page, 0 if someone else did. 976 */ 977 static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer, 978 struct buffer_page *tail_page, 979 struct buffer_page *next_page) 980 { 981 struct buffer_page *old_tail; 982 unsigned long old_entries; 983 unsigned long old_write; 984 int ret = 0; 985 986 /* 987 * The tail page now needs to be moved forward. 988 * 989 * We need to reset the tail page, but without messing 990 * with possible erasing of data brought in by interrupts 991 * that have moved the tail page and are currently on it. 992 * 993 * We add a counter to the write field to denote this. 994 */ 995 old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write); 996 old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries); 997 998 /* 999 * Just make sure we have seen our old_write and synchronize 1000 * with any interrupts that come in. 1001 */ 1002 barrier(); 1003 1004 /* 1005 * If the tail page is still the same as what we think 1006 * it is, then it is up to us to update the tail 1007 * pointer. 1008 */ 1009 if (tail_page == cpu_buffer->tail_page) { 1010 /* Zero the write counter */ 1011 unsigned long val = old_write & ~RB_WRITE_MASK; 1012 unsigned long eval = old_entries & ~RB_WRITE_MASK; 1013 1014 /* 1015 * This will only succeed if an interrupt did 1016 * not come in and change it. In which case, we 1017 * do not want to modify it. 1018 * 1019 * We add (void) to let the compiler know that we do not care 1020 * about the return value of these functions. We use the 1021 * cmpxchg to only update if an interrupt did not already 1022 * do it for us. If the cmpxchg fails, we don't care. 1023 */ 1024 (void)local_cmpxchg(&next_page->write, old_write, val); 1025 (void)local_cmpxchg(&next_page->entries, old_entries, eval); 1026 1027 /* 1028 * No need to worry about races with clearing out the commit. 1029 * it only can increment when a commit takes place. But that 1030 * only happens in the outer most nested commit. 1031 */ 1032 local_set(&next_page->page->commit, 0); 1033 1034 old_tail = cmpxchg(&cpu_buffer->tail_page, 1035 tail_page, next_page); 1036 1037 if (old_tail == tail_page) 1038 ret = 1; 1039 } 1040 1041 return ret; 1042 } 1043 1044 static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer, 1045 struct buffer_page *bpage) 1046 { 1047 unsigned long val = (unsigned long)bpage; 1048 1049 if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK)) 1050 return 1; 1051 1052 return 0; 1053 } 1054 1055 /** 1056 * rb_check_list - make sure a pointer to a list has the last bits zero 1057 */ 1058 static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer, 1059 struct list_head *list) 1060 { 1061 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev)) 1062 return 1; 1063 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next)) 1064 return 1; 1065 return 0; 1066 } 1067 1068 /** 1069 * rb_check_pages - integrity check of buffer pages 1070 * @cpu_buffer: CPU buffer with pages to test 1071 * 1072 * As a safety measure we check to make sure the data pages have not 1073 * been corrupted. 1074 */ 1075 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) 1076 { 1077 struct list_head *head = cpu_buffer->pages; 1078 struct buffer_page *bpage, *tmp; 1079 1080 /* Reset the head page if it exists */ 1081 if (cpu_buffer->head_page) 1082 rb_set_head_page(cpu_buffer); 1083 1084 rb_head_page_deactivate(cpu_buffer); 1085 1086 if (RB_WARN_ON(cpu_buffer, head->next->prev != head)) 1087 return -1; 1088 if (RB_WARN_ON(cpu_buffer, head->prev->next != head)) 1089 return -1; 1090 1091 if (rb_check_list(cpu_buffer, head)) 1092 return -1; 1093 1094 list_for_each_entry_safe(bpage, tmp, head, list) { 1095 if (RB_WARN_ON(cpu_buffer, 1096 bpage->list.next->prev != &bpage->list)) 1097 return -1; 1098 if (RB_WARN_ON(cpu_buffer, 1099 bpage->list.prev->next != &bpage->list)) 1100 return -1; 1101 if (rb_check_list(cpu_buffer, &bpage->list)) 1102 return -1; 1103 } 1104 1105 rb_head_page_activate(cpu_buffer); 1106 1107 return 0; 1108 } 1109 1110 static int __rb_allocate_pages(int nr_pages, struct list_head *pages, int cpu) 1111 { 1112 int i; 1113 struct buffer_page *bpage, *tmp; 1114 1115 for (i = 0; i < nr_pages; i++) { 1116 struct page *page; 1117 /* 1118 * __GFP_NORETRY flag makes sure that the allocation fails 1119 * gracefully without invoking oom-killer and the system is 1120 * not destabilized. 1121 */ 1122 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), 1123 GFP_KERNEL | __GFP_NORETRY, 1124 cpu_to_node(cpu)); 1125 if (!bpage) 1126 goto free_pages; 1127 1128 list_add(&bpage->list, pages); 1129 1130 page = alloc_pages_node(cpu_to_node(cpu), 1131 GFP_KERNEL | __GFP_NORETRY, 0); 1132 if (!page) 1133 goto free_pages; 1134 bpage->page = page_address(page); 1135 rb_init_page(bpage->page); 1136 } 1137 1138 return 0; 1139 1140 free_pages: 1141 list_for_each_entry_safe(bpage, tmp, pages, list) { 1142 list_del_init(&bpage->list); 1143 free_buffer_page(bpage); 1144 } 1145 1146 return -ENOMEM; 1147 } 1148 1149 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, 1150 unsigned nr_pages) 1151 { 1152 LIST_HEAD(pages); 1153 1154 WARN_ON(!nr_pages); 1155 1156 if (__rb_allocate_pages(nr_pages, &pages, cpu_buffer->cpu)) 1157 return -ENOMEM; 1158 1159 /* 1160 * The ring buffer page list is a circular list that does not 1161 * start and end with a list head. All page list items point to 1162 * other pages. 1163 */ 1164 cpu_buffer->pages = pages.next; 1165 list_del(&pages); 1166 1167 cpu_buffer->nr_pages = nr_pages; 1168 1169 rb_check_pages(cpu_buffer); 1170 1171 return 0; 1172 } 1173 1174 static struct ring_buffer_per_cpu * 1175 rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu) 1176 { 1177 struct ring_buffer_per_cpu *cpu_buffer; 1178 struct buffer_page *bpage; 1179 struct page *page; 1180 int ret; 1181 1182 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()), 1183 GFP_KERNEL, cpu_to_node(cpu)); 1184 if (!cpu_buffer) 1185 return NULL; 1186 1187 cpu_buffer->cpu = cpu; 1188 cpu_buffer->buffer = buffer; 1189 raw_spin_lock_init(&cpu_buffer->reader_lock); 1190 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); 1191 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 1192 INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler); 1193 init_completion(&cpu_buffer->update_done); 1194 init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters); 1195 init_waitqueue_head(&cpu_buffer->irq_work.waiters); 1196 1197 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), 1198 GFP_KERNEL, cpu_to_node(cpu)); 1199 if (!bpage) 1200 goto fail_free_buffer; 1201 1202 rb_check_bpage(cpu_buffer, bpage); 1203 1204 cpu_buffer->reader_page = bpage; 1205 page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0); 1206 if (!page) 1207 goto fail_free_reader; 1208 bpage->page = page_address(page); 1209 rb_init_page(bpage->page); 1210 1211 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); 1212 INIT_LIST_HEAD(&cpu_buffer->new_pages); 1213 1214 ret = rb_allocate_pages(cpu_buffer, nr_pages); 1215 if (ret < 0) 1216 goto fail_free_reader; 1217 1218 cpu_buffer->head_page 1219 = list_entry(cpu_buffer->pages, struct buffer_page, list); 1220 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page; 1221 1222 rb_head_page_activate(cpu_buffer); 1223 1224 return cpu_buffer; 1225 1226 fail_free_reader: 1227 free_buffer_page(cpu_buffer->reader_page); 1228 1229 fail_free_buffer: 1230 kfree(cpu_buffer); 1231 return NULL; 1232 } 1233 1234 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) 1235 { 1236 struct list_head *head = cpu_buffer->pages; 1237 struct buffer_page *bpage, *tmp; 1238 1239 free_buffer_page(cpu_buffer->reader_page); 1240 1241 rb_head_page_deactivate(cpu_buffer); 1242 1243 if (head) { 1244 list_for_each_entry_safe(bpage, tmp, head, list) { 1245 list_del_init(&bpage->list); 1246 free_buffer_page(bpage); 1247 } 1248 bpage = list_entry(head, struct buffer_page, list); 1249 free_buffer_page(bpage); 1250 } 1251 1252 kfree(cpu_buffer); 1253 } 1254 1255 #ifdef CONFIG_HOTPLUG_CPU 1256 static int rb_cpu_notify(struct notifier_block *self, 1257 unsigned long action, void *hcpu); 1258 #endif 1259 1260 /** 1261 * __ring_buffer_alloc - allocate a new ring_buffer 1262 * @size: the size in bytes per cpu that is needed. 1263 * @flags: attributes to set for the ring buffer. 1264 * 1265 * Currently the only flag that is available is the RB_FL_OVERWRITE 1266 * flag. This flag means that the buffer will overwrite old data 1267 * when the buffer wraps. If this flag is not set, the buffer will 1268 * drop data when the tail hits the head. 1269 */ 1270 struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags, 1271 struct lock_class_key *key) 1272 { 1273 struct ring_buffer *buffer; 1274 int bsize; 1275 int cpu, nr_pages; 1276 1277 /* keep it in its own cache line */ 1278 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()), 1279 GFP_KERNEL); 1280 if (!buffer) 1281 return NULL; 1282 1283 if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL)) 1284 goto fail_free_buffer; 1285 1286 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 1287 buffer->flags = flags; 1288 buffer->clock = trace_clock_local; 1289 buffer->reader_lock_key = key; 1290 1291 init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters); 1292 init_waitqueue_head(&buffer->irq_work.waiters); 1293 1294 /* need at least two pages */ 1295 if (nr_pages < 2) 1296 nr_pages = 2; 1297 1298 /* 1299 * In case of non-hotplug cpu, if the ring-buffer is allocated 1300 * in early initcall, it will not be notified of secondary cpus. 1301 * In that off case, we need to allocate for all possible cpus. 1302 */ 1303 #ifdef CONFIG_HOTPLUG_CPU 1304 get_online_cpus(); 1305 cpumask_copy(buffer->cpumask, cpu_online_mask); 1306 #else 1307 cpumask_copy(buffer->cpumask, cpu_possible_mask); 1308 #endif 1309 buffer->cpus = nr_cpu_ids; 1310 1311 bsize = sizeof(void *) * nr_cpu_ids; 1312 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), 1313 GFP_KERNEL); 1314 if (!buffer->buffers) 1315 goto fail_free_cpumask; 1316 1317 for_each_buffer_cpu(buffer, cpu) { 1318 buffer->buffers[cpu] = 1319 rb_allocate_cpu_buffer(buffer, nr_pages, cpu); 1320 if (!buffer->buffers[cpu]) 1321 goto fail_free_buffers; 1322 } 1323 1324 #ifdef CONFIG_HOTPLUG_CPU 1325 buffer->cpu_notify.notifier_call = rb_cpu_notify; 1326 buffer->cpu_notify.priority = 0; 1327 register_cpu_notifier(&buffer->cpu_notify); 1328 #endif 1329 1330 put_online_cpus(); 1331 mutex_init(&buffer->mutex); 1332 1333 return buffer; 1334 1335 fail_free_buffers: 1336 for_each_buffer_cpu(buffer, cpu) { 1337 if (buffer->buffers[cpu]) 1338 rb_free_cpu_buffer(buffer->buffers[cpu]); 1339 } 1340 kfree(buffer->buffers); 1341 1342 fail_free_cpumask: 1343 free_cpumask_var(buffer->cpumask); 1344 put_online_cpus(); 1345 1346 fail_free_buffer: 1347 kfree(buffer); 1348 return NULL; 1349 } 1350 EXPORT_SYMBOL_GPL(__ring_buffer_alloc); 1351 1352 /** 1353 * ring_buffer_free - free a ring buffer. 1354 * @buffer: the buffer to free. 1355 */ 1356 void 1357 ring_buffer_free(struct ring_buffer *buffer) 1358 { 1359 int cpu; 1360 1361 get_online_cpus(); 1362 1363 #ifdef CONFIG_HOTPLUG_CPU 1364 unregister_cpu_notifier(&buffer->cpu_notify); 1365 #endif 1366 1367 for_each_buffer_cpu(buffer, cpu) 1368 rb_free_cpu_buffer(buffer->buffers[cpu]); 1369 1370 put_online_cpus(); 1371 1372 kfree(buffer->buffers); 1373 free_cpumask_var(buffer->cpumask); 1374 1375 kfree(buffer); 1376 } 1377 EXPORT_SYMBOL_GPL(ring_buffer_free); 1378 1379 void ring_buffer_set_clock(struct ring_buffer *buffer, 1380 u64 (*clock)(void)) 1381 { 1382 buffer->clock = clock; 1383 } 1384 1385 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); 1386 1387 static inline unsigned long rb_page_entries(struct buffer_page *bpage) 1388 { 1389 return local_read(&bpage->entries) & RB_WRITE_MASK; 1390 } 1391 1392 static inline unsigned long rb_page_write(struct buffer_page *bpage) 1393 { 1394 return local_read(&bpage->write) & RB_WRITE_MASK; 1395 } 1396 1397 static int 1398 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages) 1399 { 1400 struct list_head *tail_page, *to_remove, *next_page; 1401 struct buffer_page *to_remove_page, *tmp_iter_page; 1402 struct buffer_page *last_page, *first_page; 1403 unsigned int nr_removed; 1404 unsigned long head_bit; 1405 int page_entries; 1406 1407 head_bit = 0; 1408 1409 raw_spin_lock_irq(&cpu_buffer->reader_lock); 1410 atomic_inc(&cpu_buffer->record_disabled); 1411 /* 1412 * We don't race with the readers since we have acquired the reader 1413 * lock. We also don't race with writers after disabling recording. 1414 * This makes it easy to figure out the first and the last page to be 1415 * removed from the list. We unlink all the pages in between including 1416 * the first and last pages. This is done in a busy loop so that we 1417 * lose the least number of traces. 1418 * The pages are freed after we restart recording and unlock readers. 1419 */ 1420 tail_page = &cpu_buffer->tail_page->list; 1421 1422 /* 1423 * tail page might be on reader page, we remove the next page 1424 * from the ring buffer 1425 */ 1426 if (cpu_buffer->tail_page == cpu_buffer->reader_page) 1427 tail_page = rb_list_head(tail_page->next); 1428 to_remove = tail_page; 1429 1430 /* start of pages to remove */ 1431 first_page = list_entry(rb_list_head(to_remove->next), 1432 struct buffer_page, list); 1433 1434 for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) { 1435 to_remove = rb_list_head(to_remove)->next; 1436 head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD; 1437 } 1438 1439 next_page = rb_list_head(to_remove)->next; 1440 1441 /* 1442 * Now we remove all pages between tail_page and next_page. 1443 * Make sure that we have head_bit value preserved for the 1444 * next page 1445 */ 1446 tail_page->next = (struct list_head *)((unsigned long)next_page | 1447 head_bit); 1448 next_page = rb_list_head(next_page); 1449 next_page->prev = tail_page; 1450 1451 /* make sure pages points to a valid page in the ring buffer */ 1452 cpu_buffer->pages = next_page; 1453 1454 /* update head page */ 1455 if (head_bit) 1456 cpu_buffer->head_page = list_entry(next_page, 1457 struct buffer_page, list); 1458 1459 /* 1460 * change read pointer to make sure any read iterators reset 1461 * themselves 1462 */ 1463 cpu_buffer->read = 0; 1464 1465 /* pages are removed, resume tracing and then free the pages */ 1466 atomic_dec(&cpu_buffer->record_disabled); 1467 raw_spin_unlock_irq(&cpu_buffer->reader_lock); 1468 1469 RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)); 1470 1471 /* last buffer page to remove */ 1472 last_page = list_entry(rb_list_head(to_remove), struct buffer_page, 1473 list); 1474 tmp_iter_page = first_page; 1475 1476 do { 1477 to_remove_page = tmp_iter_page; 1478 rb_inc_page(cpu_buffer, &tmp_iter_page); 1479 1480 /* update the counters */ 1481 page_entries = rb_page_entries(to_remove_page); 1482 if (page_entries) { 1483 /* 1484 * If something was added to this page, it was full 1485 * since it is not the tail page. So we deduct the 1486 * bytes consumed in ring buffer from here. 1487 * Increment overrun to account for the lost events. 1488 */ 1489 local_add(page_entries, &cpu_buffer->overrun); 1490 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); 1491 } 1492 1493 /* 1494 * We have already removed references to this list item, just 1495 * free up the buffer_page and its page 1496 */ 1497 free_buffer_page(to_remove_page); 1498 nr_removed--; 1499 1500 } while (to_remove_page != last_page); 1501 1502 RB_WARN_ON(cpu_buffer, nr_removed); 1503 1504 return nr_removed == 0; 1505 } 1506 1507 static int 1508 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer) 1509 { 1510 struct list_head *pages = &cpu_buffer->new_pages; 1511 int retries, success; 1512 1513 raw_spin_lock_irq(&cpu_buffer->reader_lock); 1514 /* 1515 * We are holding the reader lock, so the reader page won't be swapped 1516 * in the ring buffer. Now we are racing with the writer trying to 1517 * move head page and the tail page. 1518 * We are going to adapt the reader page update process where: 1519 * 1. We first splice the start and end of list of new pages between 1520 * the head page and its previous page. 1521 * 2. We cmpxchg the prev_page->next to point from head page to the 1522 * start of new pages list. 1523 * 3. Finally, we update the head->prev to the end of new list. 1524 * 1525 * We will try this process 10 times, to make sure that we don't keep 1526 * spinning. 1527 */ 1528 retries = 10; 1529 success = 0; 1530 while (retries--) { 1531 struct list_head *head_page, *prev_page, *r; 1532 struct list_head *last_page, *first_page; 1533 struct list_head *head_page_with_bit; 1534 1535 head_page = &rb_set_head_page(cpu_buffer)->list; 1536 if (!head_page) 1537 break; 1538 prev_page = head_page->prev; 1539 1540 first_page = pages->next; 1541 last_page = pages->prev; 1542 1543 head_page_with_bit = (struct list_head *) 1544 ((unsigned long)head_page | RB_PAGE_HEAD); 1545 1546 last_page->next = head_page_with_bit; 1547 first_page->prev = prev_page; 1548 1549 r = cmpxchg(&prev_page->next, head_page_with_bit, first_page); 1550 1551 if (r == head_page_with_bit) { 1552 /* 1553 * yay, we replaced the page pointer to our new list, 1554 * now, we just have to update to head page's prev 1555 * pointer to point to end of list 1556 */ 1557 head_page->prev = last_page; 1558 success = 1; 1559 break; 1560 } 1561 } 1562 1563 if (success) 1564 INIT_LIST_HEAD(pages); 1565 /* 1566 * If we weren't successful in adding in new pages, warn and stop 1567 * tracing 1568 */ 1569 RB_WARN_ON(cpu_buffer, !success); 1570 raw_spin_unlock_irq(&cpu_buffer->reader_lock); 1571 1572 /* free pages if they weren't inserted */ 1573 if (!success) { 1574 struct buffer_page *bpage, *tmp; 1575 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, 1576 list) { 1577 list_del_init(&bpage->list); 1578 free_buffer_page(bpage); 1579 } 1580 } 1581 return success; 1582 } 1583 1584 static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer) 1585 { 1586 int success; 1587 1588 if (cpu_buffer->nr_pages_to_update > 0) 1589 success = rb_insert_pages(cpu_buffer); 1590 else 1591 success = rb_remove_pages(cpu_buffer, 1592 -cpu_buffer->nr_pages_to_update); 1593 1594 if (success) 1595 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update; 1596 } 1597 1598 static void update_pages_handler(struct work_struct *work) 1599 { 1600 struct ring_buffer_per_cpu *cpu_buffer = container_of(work, 1601 struct ring_buffer_per_cpu, update_pages_work); 1602 rb_update_pages(cpu_buffer); 1603 complete(&cpu_buffer->update_done); 1604 } 1605 1606 /** 1607 * ring_buffer_resize - resize the ring buffer 1608 * @buffer: the buffer to resize. 1609 * @size: the new size. 1610 * @cpu_id: the cpu buffer to resize 1611 * 1612 * Minimum size is 2 * BUF_PAGE_SIZE. 1613 * 1614 * Returns 0 on success and < 0 on failure. 1615 */ 1616 int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, 1617 int cpu_id) 1618 { 1619 struct ring_buffer_per_cpu *cpu_buffer; 1620 unsigned nr_pages; 1621 int cpu, err = 0; 1622 1623 /* 1624 * Always succeed at resizing a non-existent buffer: 1625 */ 1626 if (!buffer) 1627 return size; 1628 1629 /* Make sure the requested buffer exists */ 1630 if (cpu_id != RING_BUFFER_ALL_CPUS && 1631 !cpumask_test_cpu(cpu_id, buffer->cpumask)) 1632 return size; 1633 1634 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 1635 size *= BUF_PAGE_SIZE; 1636 1637 /* we need a minimum of two pages */ 1638 if (size < BUF_PAGE_SIZE * 2) 1639 size = BUF_PAGE_SIZE * 2; 1640 1641 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 1642 1643 /* 1644 * Don't succeed if resizing is disabled, as a reader might be 1645 * manipulating the ring buffer and is expecting a sane state while 1646 * this is true. 1647 */ 1648 if (atomic_read(&buffer->resize_disabled)) 1649 return -EBUSY; 1650 1651 /* prevent another thread from changing buffer sizes */ 1652 mutex_lock(&buffer->mutex); 1653 1654 if (cpu_id == RING_BUFFER_ALL_CPUS) { 1655 /* calculate the pages to update */ 1656 for_each_buffer_cpu(buffer, cpu) { 1657 cpu_buffer = buffer->buffers[cpu]; 1658 1659 cpu_buffer->nr_pages_to_update = nr_pages - 1660 cpu_buffer->nr_pages; 1661 /* 1662 * nothing more to do for removing pages or no update 1663 */ 1664 if (cpu_buffer->nr_pages_to_update <= 0) 1665 continue; 1666 /* 1667 * to add pages, make sure all new pages can be 1668 * allocated without receiving ENOMEM 1669 */ 1670 INIT_LIST_HEAD(&cpu_buffer->new_pages); 1671 if (__rb_allocate_pages(cpu_buffer->nr_pages_to_update, 1672 &cpu_buffer->new_pages, cpu)) { 1673 /* not enough memory for new pages */ 1674 err = -ENOMEM; 1675 goto out_err; 1676 } 1677 } 1678 1679 get_online_cpus(); 1680 /* 1681 * Fire off all the required work handlers 1682 * We can't schedule on offline CPUs, but it's not necessary 1683 * since we can change their buffer sizes without any race. 1684 */ 1685 for_each_buffer_cpu(buffer, cpu) { 1686 cpu_buffer = buffer->buffers[cpu]; 1687 if (!cpu_buffer->nr_pages_to_update) 1688 continue; 1689 1690 /* The update must run on the CPU that is being updated. */ 1691 preempt_disable(); 1692 if (cpu == smp_processor_id() || !cpu_online(cpu)) { 1693 rb_update_pages(cpu_buffer); 1694 cpu_buffer->nr_pages_to_update = 0; 1695 } else { 1696 /* 1697 * Can not disable preemption for schedule_work_on() 1698 * on PREEMPT_RT. 1699 */ 1700 preempt_enable(); 1701 schedule_work_on(cpu, 1702 &cpu_buffer->update_pages_work); 1703 preempt_disable(); 1704 } 1705 preempt_enable(); 1706 } 1707 1708 /* wait for all the updates to complete */ 1709 for_each_buffer_cpu(buffer, cpu) { 1710 cpu_buffer = buffer->buffers[cpu]; 1711 if (!cpu_buffer->nr_pages_to_update) 1712 continue; 1713 1714 if (cpu_online(cpu)) 1715 wait_for_completion(&cpu_buffer->update_done); 1716 cpu_buffer->nr_pages_to_update = 0; 1717 } 1718 1719 put_online_cpus(); 1720 } else { 1721 /* Make sure this CPU has been intitialized */ 1722 if (!cpumask_test_cpu(cpu_id, buffer->cpumask)) 1723 goto out; 1724 1725 cpu_buffer = buffer->buffers[cpu_id]; 1726 1727 if (nr_pages == cpu_buffer->nr_pages) 1728 goto out; 1729 1730 cpu_buffer->nr_pages_to_update = nr_pages - 1731 cpu_buffer->nr_pages; 1732 1733 INIT_LIST_HEAD(&cpu_buffer->new_pages); 1734 if (cpu_buffer->nr_pages_to_update > 0 && 1735 __rb_allocate_pages(cpu_buffer->nr_pages_to_update, 1736 &cpu_buffer->new_pages, cpu_id)) { 1737 err = -ENOMEM; 1738 goto out_err; 1739 } 1740 1741 get_online_cpus(); 1742 1743 preempt_disable(); 1744 /* The update must run on the CPU that is being updated. */ 1745 if (cpu_id == smp_processor_id() || !cpu_online(cpu_id)) 1746 rb_update_pages(cpu_buffer); 1747 else { 1748 /* 1749 * Can not disable preemption for schedule_work_on() 1750 * on PREEMPT_RT. 1751 */ 1752 preempt_enable(); 1753 schedule_work_on(cpu_id, 1754 &cpu_buffer->update_pages_work); 1755 wait_for_completion(&cpu_buffer->update_done); 1756 preempt_disable(); 1757 } 1758 preempt_enable(); 1759 1760 cpu_buffer->nr_pages_to_update = 0; 1761 put_online_cpus(); 1762 } 1763 1764 out: 1765 /* 1766 * The ring buffer resize can happen with the ring buffer 1767 * enabled, so that the update disturbs the tracing as little 1768 * as possible. But if the buffer is disabled, we do not need 1769 * to worry about that, and we can take the time to verify 1770 * that the buffer is not corrupt. 1771 */ 1772 if (atomic_read(&buffer->record_disabled)) { 1773 atomic_inc(&buffer->record_disabled); 1774 /* 1775 * Even though the buffer was disabled, we must make sure 1776 * that it is truly disabled before calling rb_check_pages. 1777 * There could have been a race between checking 1778 * record_disable and incrementing it. 1779 */ 1780 synchronize_sched(); 1781 for_each_buffer_cpu(buffer, cpu) { 1782 cpu_buffer = buffer->buffers[cpu]; 1783 rb_check_pages(cpu_buffer); 1784 } 1785 atomic_dec(&buffer->record_disabled); 1786 } 1787 1788 mutex_unlock(&buffer->mutex); 1789 return size; 1790 1791 out_err: 1792 for_each_buffer_cpu(buffer, cpu) { 1793 struct buffer_page *bpage, *tmp; 1794 1795 cpu_buffer = buffer->buffers[cpu]; 1796 cpu_buffer->nr_pages_to_update = 0; 1797 1798 if (list_empty(&cpu_buffer->new_pages)) 1799 continue; 1800 1801 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, 1802 list) { 1803 list_del_init(&bpage->list); 1804 free_buffer_page(bpage); 1805 } 1806 } 1807 mutex_unlock(&buffer->mutex); 1808 return err; 1809 } 1810 EXPORT_SYMBOL_GPL(ring_buffer_resize); 1811 1812 void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val) 1813 { 1814 mutex_lock(&buffer->mutex); 1815 if (val) 1816 buffer->flags |= RB_FL_OVERWRITE; 1817 else 1818 buffer->flags &= ~RB_FL_OVERWRITE; 1819 mutex_unlock(&buffer->mutex); 1820 } 1821 EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite); 1822 1823 static inline void * 1824 __rb_data_page_index(struct buffer_data_page *bpage, unsigned index) 1825 { 1826 return bpage->data + index; 1827 } 1828 1829 static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index) 1830 { 1831 return bpage->page->data + index; 1832 } 1833 1834 static inline struct ring_buffer_event * 1835 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer) 1836 { 1837 return __rb_page_index(cpu_buffer->reader_page, 1838 cpu_buffer->reader_page->read); 1839 } 1840 1841 static inline struct ring_buffer_event * 1842 rb_iter_head_event(struct ring_buffer_iter *iter) 1843 { 1844 return __rb_page_index(iter->head_page, iter->head); 1845 } 1846 1847 static inline unsigned rb_page_commit(struct buffer_page *bpage) 1848 { 1849 return local_read(&bpage->page->commit); 1850 } 1851 1852 /* Size is determined by what has been committed */ 1853 static inline unsigned rb_page_size(struct buffer_page *bpage) 1854 { 1855 return rb_page_commit(bpage); 1856 } 1857 1858 static inline unsigned 1859 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer) 1860 { 1861 return rb_page_commit(cpu_buffer->commit_page); 1862 } 1863 1864 static inline unsigned 1865 rb_event_index(struct ring_buffer_event *event) 1866 { 1867 unsigned long addr = (unsigned long)event; 1868 1869 return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE; 1870 } 1871 1872 static inline int 1873 rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer, 1874 struct ring_buffer_event *event) 1875 { 1876 unsigned long addr = (unsigned long)event; 1877 unsigned long index; 1878 1879 index = rb_event_index(event); 1880 addr &= PAGE_MASK; 1881 1882 return cpu_buffer->commit_page->page == (void *)addr && 1883 rb_commit_index(cpu_buffer) == index; 1884 } 1885 1886 static void 1887 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) 1888 { 1889 unsigned long max_count; 1890 1891 /* 1892 * We only race with interrupts and NMIs on this CPU. 1893 * If we own the commit event, then we can commit 1894 * all others that interrupted us, since the interruptions 1895 * are in stack format (they finish before they come 1896 * back to us). This allows us to do a simple loop to 1897 * assign the commit to the tail. 1898 */ 1899 again: 1900 max_count = cpu_buffer->nr_pages * 100; 1901 1902 while (cpu_buffer->commit_page != cpu_buffer->tail_page) { 1903 if (RB_WARN_ON(cpu_buffer, !(--max_count))) 1904 return; 1905 if (RB_WARN_ON(cpu_buffer, 1906 rb_is_reader_page(cpu_buffer->tail_page))) 1907 return; 1908 local_set(&cpu_buffer->commit_page->page->commit, 1909 rb_page_write(cpu_buffer->commit_page)); 1910 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); 1911 cpu_buffer->write_stamp = 1912 cpu_buffer->commit_page->page->time_stamp; 1913 /* add barrier to keep gcc from optimizing too much */ 1914 barrier(); 1915 } 1916 while (rb_commit_index(cpu_buffer) != 1917 rb_page_write(cpu_buffer->commit_page)) { 1918 1919 local_set(&cpu_buffer->commit_page->page->commit, 1920 rb_page_write(cpu_buffer->commit_page)); 1921 RB_WARN_ON(cpu_buffer, 1922 local_read(&cpu_buffer->commit_page->page->commit) & 1923 ~RB_WRITE_MASK); 1924 barrier(); 1925 } 1926 1927 /* again, keep gcc from optimizing */ 1928 barrier(); 1929 1930 /* 1931 * If an interrupt came in just after the first while loop 1932 * and pushed the tail page forward, we will be left with 1933 * a dangling commit that will never go forward. 1934 */ 1935 if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page)) 1936 goto again; 1937 } 1938 1939 static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer) 1940 { 1941 cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp; 1942 cpu_buffer->reader_page->read = 0; 1943 } 1944 1945 static void rb_inc_iter(struct ring_buffer_iter *iter) 1946 { 1947 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 1948 1949 /* 1950 * The iterator could be on the reader page (it starts there). 1951 * But the head could have moved, since the reader was 1952 * found. Check for this case and assign the iterator 1953 * to the head page instead of next. 1954 */ 1955 if (iter->head_page == cpu_buffer->reader_page) 1956 iter->head_page = rb_set_head_page(cpu_buffer); 1957 else 1958 rb_inc_page(cpu_buffer, &iter->head_page); 1959 1960 iter->read_stamp = iter->head_page->page->time_stamp; 1961 iter->head = 0; 1962 } 1963 1964 /* Slow path, do not inline */ 1965 static noinline struct ring_buffer_event * 1966 rb_add_time_stamp(struct ring_buffer_event *event, u64 delta) 1967 { 1968 event->type_len = RINGBUF_TYPE_TIME_EXTEND; 1969 1970 /* Not the first event on the page? */ 1971 if (rb_event_index(event)) { 1972 event->time_delta = delta & TS_MASK; 1973 event->array[0] = delta >> TS_SHIFT; 1974 } else { 1975 /* nope, just zero it */ 1976 event->time_delta = 0; 1977 event->array[0] = 0; 1978 } 1979 1980 return skip_time_extend(event); 1981 } 1982 1983 /** 1984 * rb_update_event - update event type and data 1985 * @event: the even to update 1986 * @type: the type of event 1987 * @length: the size of the event field in the ring buffer 1988 * 1989 * Update the type and data fields of the event. The length 1990 * is the actual size that is written to the ring buffer, 1991 * and with this, we can determine what to place into the 1992 * data field. 1993 */ 1994 static void 1995 rb_update_event(struct ring_buffer_per_cpu *cpu_buffer, 1996 struct ring_buffer_event *event, unsigned length, 1997 int add_timestamp, u64 delta) 1998 { 1999 /* Only a commit updates the timestamp */ 2000 if (unlikely(!rb_event_is_commit(cpu_buffer, event))) 2001 delta = 0; 2002 2003 /* 2004 * If we need to add a timestamp, then we 2005 * add it to the start of the resevered space. 2006 */ 2007 if (unlikely(add_timestamp)) { 2008 event = rb_add_time_stamp(event, delta); 2009 length -= RB_LEN_TIME_EXTEND; 2010 delta = 0; 2011 } 2012 2013 event->time_delta = delta; 2014 length -= RB_EVNT_HDR_SIZE; 2015 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) { 2016 event->type_len = 0; 2017 event->array[0] = length; 2018 } else 2019 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT); 2020 } 2021 2022 /* 2023 * rb_handle_head_page - writer hit the head page 2024 * 2025 * Returns: +1 to retry page 2026 * 0 to continue 2027 * -1 on error 2028 */ 2029 static int 2030 rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer, 2031 struct buffer_page *tail_page, 2032 struct buffer_page *next_page) 2033 { 2034 struct buffer_page *new_head; 2035 int entries; 2036 int type; 2037 int ret; 2038 2039 entries = rb_page_entries(next_page); 2040 2041 /* 2042 * The hard part is here. We need to move the head 2043 * forward, and protect against both readers on 2044 * other CPUs and writers coming in via interrupts. 2045 */ 2046 type = rb_head_page_set_update(cpu_buffer, next_page, tail_page, 2047 RB_PAGE_HEAD); 2048 2049 /* 2050 * type can be one of four: 2051 * NORMAL - an interrupt already moved it for us 2052 * HEAD - we are the first to get here. 2053 * UPDATE - we are the interrupt interrupting 2054 * a current move. 2055 * MOVED - a reader on another CPU moved the next 2056 * pointer to its reader page. Give up 2057 * and try again. 2058 */ 2059 2060 switch (type) { 2061 case RB_PAGE_HEAD: 2062 /* 2063 * We changed the head to UPDATE, thus 2064 * it is our responsibility to update 2065 * the counters. 2066 */ 2067 local_add(entries, &cpu_buffer->overrun); 2068 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); 2069 2070 /* 2071 * The entries will be zeroed out when we move the 2072 * tail page. 2073 */ 2074 2075 /* still more to do */ 2076 break; 2077 2078 case RB_PAGE_UPDATE: 2079 /* 2080 * This is an interrupt that interrupt the 2081 * previous update. Still more to do. 2082 */ 2083 break; 2084 case RB_PAGE_NORMAL: 2085 /* 2086 * An interrupt came in before the update 2087 * and processed this for us. 2088 * Nothing left to do. 2089 */ 2090 return 1; 2091 case RB_PAGE_MOVED: 2092 /* 2093 * The reader is on another CPU and just did 2094 * a swap with our next_page. 2095 * Try again. 2096 */ 2097 return 1; 2098 default: 2099 RB_WARN_ON(cpu_buffer, 1); /* WTF??? */ 2100 return -1; 2101 } 2102 2103 /* 2104 * Now that we are here, the old head pointer is 2105 * set to UPDATE. This will keep the reader from 2106 * swapping the head page with the reader page. 2107 * The reader (on another CPU) will spin till 2108 * we are finished. 2109 * 2110 * We just need to protect against interrupts 2111 * doing the job. We will set the next pointer 2112 * to HEAD. After that, we set the old pointer 2113 * to NORMAL, but only if it was HEAD before. 2114 * otherwise we are an interrupt, and only 2115 * want the outer most commit to reset it. 2116 */ 2117 new_head = next_page; 2118 rb_inc_page(cpu_buffer, &new_head); 2119 2120 ret = rb_head_page_set_head(cpu_buffer, new_head, next_page, 2121 RB_PAGE_NORMAL); 2122 2123 /* 2124 * Valid returns are: 2125 * HEAD - an interrupt came in and already set it. 2126 * NORMAL - One of two things: 2127 * 1) We really set it. 2128 * 2) A bunch of interrupts came in and moved 2129 * the page forward again. 2130 */ 2131 switch (ret) { 2132 case RB_PAGE_HEAD: 2133 case RB_PAGE_NORMAL: 2134 /* OK */ 2135 break; 2136 default: 2137 RB_WARN_ON(cpu_buffer, 1); 2138 return -1; 2139 } 2140 2141 /* 2142 * It is possible that an interrupt came in, 2143 * set the head up, then more interrupts came in 2144 * and moved it again. When we get back here, 2145 * the page would have been set to NORMAL but we 2146 * just set it back to HEAD. 2147 * 2148 * How do you detect this? Well, if that happened 2149 * the tail page would have moved. 2150 */ 2151 if (ret == RB_PAGE_NORMAL) { 2152 /* 2153 * If the tail had moved passed next, then we need 2154 * to reset the pointer. 2155 */ 2156 if (cpu_buffer->tail_page != tail_page && 2157 cpu_buffer->tail_page != next_page) 2158 rb_head_page_set_normal(cpu_buffer, new_head, 2159 next_page, 2160 RB_PAGE_HEAD); 2161 } 2162 2163 /* 2164 * If this was the outer most commit (the one that 2165 * changed the original pointer from HEAD to UPDATE), 2166 * then it is up to us to reset it to NORMAL. 2167 */ 2168 if (type == RB_PAGE_HEAD) { 2169 ret = rb_head_page_set_normal(cpu_buffer, next_page, 2170 tail_page, 2171 RB_PAGE_UPDATE); 2172 if (RB_WARN_ON(cpu_buffer, 2173 ret != RB_PAGE_UPDATE)) 2174 return -1; 2175 } 2176 2177 return 0; 2178 } 2179 2180 static unsigned rb_calculate_event_length(unsigned length) 2181 { 2182 struct ring_buffer_event event; /* Used only for sizeof array */ 2183 2184 /* zero length can cause confusions */ 2185 if (!length) 2186 length = 1; 2187 2188 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) 2189 length += sizeof(event.array[0]); 2190 2191 length += RB_EVNT_HDR_SIZE; 2192 length = ALIGN(length, RB_ARCH_ALIGNMENT); 2193 2194 return length; 2195 } 2196 2197 static inline void 2198 rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, 2199 struct buffer_page *tail_page, 2200 unsigned long tail, unsigned long length) 2201 { 2202 struct ring_buffer_event *event; 2203 2204 /* 2205 * Only the event that crossed the page boundary 2206 * must fill the old tail_page with padding. 2207 */ 2208 if (tail >= BUF_PAGE_SIZE) { 2209 /* 2210 * If the page was filled, then we still need 2211 * to update the real_end. Reset it to zero 2212 * and the reader will ignore it. 2213 */ 2214 if (tail == BUF_PAGE_SIZE) 2215 tail_page->real_end = 0; 2216 2217 local_sub(length, &tail_page->write); 2218 return; 2219 } 2220 2221 event = __rb_page_index(tail_page, tail); 2222 kmemcheck_annotate_bitfield(event, bitfield); 2223 2224 /* account for padding bytes */ 2225 local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes); 2226 2227 /* 2228 * Save the original length to the meta data. 2229 * This will be used by the reader to add lost event 2230 * counter. 2231 */ 2232 tail_page->real_end = tail; 2233 2234 /* 2235 * If this event is bigger than the minimum size, then 2236 * we need to be careful that we don't subtract the 2237 * write counter enough to allow another writer to slip 2238 * in on this page. 2239 * We put in a discarded commit instead, to make sure 2240 * that this space is not used again. 2241 * 2242 * If we are less than the minimum size, we don't need to 2243 * worry about it. 2244 */ 2245 if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) { 2246 /* No room for any events */ 2247 2248 /* Mark the rest of the page with padding */ 2249 rb_event_set_padding(event); 2250 2251 /* Set the write back to the previous setting */ 2252 local_sub(length, &tail_page->write); 2253 return; 2254 } 2255 2256 /* Put in a discarded event */ 2257 event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE; 2258 event->type_len = RINGBUF_TYPE_PADDING; 2259 /* time delta must be non zero */ 2260 event->time_delta = 1; 2261 2262 /* Set write to end of buffer */ 2263 length = (tail + length) - BUF_PAGE_SIZE; 2264 local_sub(length, &tail_page->write); 2265 } 2266 2267 /* 2268 * This is the slow path, force gcc not to inline it. 2269 */ 2270 static noinline struct ring_buffer_event * 2271 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, 2272 unsigned long length, unsigned long tail, 2273 struct buffer_page *tail_page, u64 ts) 2274 { 2275 struct buffer_page *commit_page = cpu_buffer->commit_page; 2276 struct ring_buffer *buffer = cpu_buffer->buffer; 2277 struct buffer_page *next_page; 2278 int ret; 2279 2280 next_page = tail_page; 2281 2282 rb_inc_page(cpu_buffer, &next_page); 2283 2284 /* 2285 * If for some reason, we had an interrupt storm that made 2286 * it all the way around the buffer, bail, and warn 2287 * about it. 2288 */ 2289 if (unlikely(next_page == commit_page)) { 2290 local_inc(&cpu_buffer->commit_overrun); 2291 goto out_reset; 2292 } 2293 2294 /* 2295 * This is where the fun begins! 2296 * 2297 * We are fighting against races between a reader that 2298 * could be on another CPU trying to swap its reader 2299 * page with the buffer head. 2300 * 2301 * We are also fighting against interrupts coming in and 2302 * moving the head or tail on us as well. 2303 * 2304 * If the next page is the head page then we have filled 2305 * the buffer, unless the commit page is still on the 2306 * reader page. 2307 */ 2308 if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) { 2309 2310 /* 2311 * If the commit is not on the reader page, then 2312 * move the header page. 2313 */ 2314 if (!rb_is_reader_page(cpu_buffer->commit_page)) { 2315 /* 2316 * If we are not in overwrite mode, 2317 * this is easy, just stop here. 2318 */ 2319 if (!(buffer->flags & RB_FL_OVERWRITE)) { 2320 local_inc(&cpu_buffer->dropped_events); 2321 goto out_reset; 2322 } 2323 2324 ret = rb_handle_head_page(cpu_buffer, 2325 tail_page, 2326 next_page); 2327 if (ret < 0) 2328 goto out_reset; 2329 if (ret) 2330 goto out_again; 2331 } else { 2332 /* 2333 * We need to be careful here too. The 2334 * commit page could still be on the reader 2335 * page. We could have a small buffer, and 2336 * have filled up the buffer with events 2337 * from interrupts and such, and wrapped. 2338 * 2339 * Note, if the tail page is also the on the 2340 * reader_page, we let it move out. 2341 */ 2342 if (unlikely((cpu_buffer->commit_page != 2343 cpu_buffer->tail_page) && 2344 (cpu_buffer->commit_page == 2345 cpu_buffer->reader_page))) { 2346 local_inc(&cpu_buffer->commit_overrun); 2347 goto out_reset; 2348 } 2349 } 2350 } 2351 2352 ret = rb_tail_page_update(cpu_buffer, tail_page, next_page); 2353 if (ret) { 2354 /* 2355 * Nested commits always have zero deltas, so 2356 * just reread the time stamp 2357 */ 2358 ts = rb_time_stamp(buffer); 2359 next_page->page->time_stamp = ts; 2360 } 2361 2362 out_again: 2363 2364 rb_reset_tail(cpu_buffer, tail_page, tail, length); 2365 2366 /* fail and let the caller try again */ 2367 return ERR_PTR(-EAGAIN); 2368 2369 out_reset: 2370 /* reset write */ 2371 rb_reset_tail(cpu_buffer, tail_page, tail, length); 2372 2373 return NULL; 2374 } 2375 2376 static struct ring_buffer_event * 2377 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, 2378 unsigned long length, u64 ts, 2379 u64 delta, int add_timestamp) 2380 { 2381 struct buffer_page *tail_page; 2382 struct ring_buffer_event *event; 2383 unsigned long tail, write; 2384 2385 /* 2386 * If the time delta since the last event is too big to 2387 * hold in the time field of the event, then we append a 2388 * TIME EXTEND event ahead of the data event. 2389 */ 2390 if (unlikely(add_timestamp)) 2391 length += RB_LEN_TIME_EXTEND; 2392 2393 tail_page = cpu_buffer->tail_page; 2394 write = local_add_return(length, &tail_page->write); 2395 2396 /* set write to only the index of the write */ 2397 write &= RB_WRITE_MASK; 2398 tail = write - length; 2399 2400 /* See if we shot pass the end of this buffer page */ 2401 if (unlikely(write > BUF_PAGE_SIZE)) 2402 return rb_move_tail(cpu_buffer, length, tail, 2403 tail_page, ts); 2404 2405 /* We reserved something on the buffer */ 2406 2407 event = __rb_page_index(tail_page, tail); 2408 kmemcheck_annotate_bitfield(event, bitfield); 2409 rb_update_event(cpu_buffer, event, length, add_timestamp, delta); 2410 2411 local_inc(&tail_page->entries); 2412 2413 /* 2414 * If this is the first commit on the page, then update 2415 * its timestamp. 2416 */ 2417 if (!tail) 2418 tail_page->page->time_stamp = ts; 2419 2420 /* account for these added bytes */ 2421 local_add(length, &cpu_buffer->entries_bytes); 2422 2423 return event; 2424 } 2425 2426 static inline int 2427 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, 2428 struct ring_buffer_event *event) 2429 { 2430 unsigned long new_index, old_index; 2431 struct buffer_page *bpage; 2432 unsigned long index; 2433 unsigned long addr; 2434 2435 new_index = rb_event_index(event); 2436 old_index = new_index + rb_event_ts_length(event); 2437 addr = (unsigned long)event; 2438 addr &= PAGE_MASK; 2439 2440 bpage = cpu_buffer->tail_page; 2441 2442 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) { 2443 unsigned long write_mask = 2444 local_read(&bpage->write) & ~RB_WRITE_MASK; 2445 unsigned long event_length = rb_event_length(event); 2446 /* 2447 * This is on the tail page. It is possible that 2448 * a write could come in and move the tail page 2449 * and write to the next page. That is fine 2450 * because we just shorten what is on this page. 2451 */ 2452 old_index += write_mask; 2453 new_index += write_mask; 2454 index = local_cmpxchg(&bpage->write, old_index, new_index); 2455 if (index == old_index) { 2456 /* update counters */ 2457 local_sub(event_length, &cpu_buffer->entries_bytes); 2458 return 1; 2459 } 2460 } 2461 2462 /* could not discard */ 2463 return 0; 2464 } 2465 2466 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer) 2467 { 2468 local_inc(&cpu_buffer->committing); 2469 local_inc(&cpu_buffer->commits); 2470 } 2471 2472 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer) 2473 { 2474 unsigned long commits; 2475 2476 if (RB_WARN_ON(cpu_buffer, 2477 !local_read(&cpu_buffer->committing))) 2478 return; 2479 2480 again: 2481 commits = local_read(&cpu_buffer->commits); 2482 /* synchronize with interrupts */ 2483 barrier(); 2484 if (local_read(&cpu_buffer->committing) == 1) 2485 rb_set_commit_to_write(cpu_buffer); 2486 2487 local_dec(&cpu_buffer->committing); 2488 2489 /* synchronize with interrupts */ 2490 barrier(); 2491 2492 /* 2493 * Need to account for interrupts coming in between the 2494 * updating of the commit page and the clearing of the 2495 * committing counter. 2496 */ 2497 if (unlikely(local_read(&cpu_buffer->commits) != commits) && 2498 !local_read(&cpu_buffer->committing)) { 2499 local_inc(&cpu_buffer->committing); 2500 goto again; 2501 } 2502 } 2503 2504 static struct ring_buffer_event * 2505 rb_reserve_next_event(struct ring_buffer *buffer, 2506 struct ring_buffer_per_cpu *cpu_buffer, 2507 unsigned long length) 2508 { 2509 struct ring_buffer_event *event; 2510 u64 ts, delta; 2511 int nr_loops = 0; 2512 int add_timestamp; 2513 u64 diff; 2514 2515 rb_start_commit(cpu_buffer); 2516 2517 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP 2518 /* 2519 * Due to the ability to swap a cpu buffer from a buffer 2520 * it is possible it was swapped before we committed. 2521 * (committing stops a swap). We check for it here and 2522 * if it happened, we have to fail the write. 2523 */ 2524 barrier(); 2525 if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) { 2526 local_dec(&cpu_buffer->committing); 2527 local_dec(&cpu_buffer->commits); 2528 return NULL; 2529 } 2530 #endif 2531 2532 length = rb_calculate_event_length(length); 2533 again: 2534 add_timestamp = 0; 2535 delta = 0; 2536 2537 /* 2538 * We allow for interrupts to reenter here and do a trace. 2539 * If one does, it will cause this original code to loop 2540 * back here. Even with heavy interrupts happening, this 2541 * should only happen a few times in a row. If this happens 2542 * 1000 times in a row, there must be either an interrupt 2543 * storm or we have something buggy. 2544 * Bail! 2545 */ 2546 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) 2547 goto out_fail; 2548 2549 ts = rb_time_stamp(cpu_buffer->buffer); 2550 diff = ts - cpu_buffer->write_stamp; 2551 2552 /* make sure this diff is calculated here */ 2553 barrier(); 2554 2555 /* Did the write stamp get updated already? */ 2556 if (likely(ts >= cpu_buffer->write_stamp)) { 2557 delta = diff; 2558 if (unlikely(test_time_stamp(delta))) { 2559 int local_clock_stable = 1; 2560 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 2561 local_clock_stable = sched_clock_stable; 2562 #endif 2563 WARN_ONCE(delta > (1ULL << 59), 2564 KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n%s", 2565 (unsigned long long)delta, 2566 (unsigned long long)ts, 2567 (unsigned long long)cpu_buffer->write_stamp, 2568 local_clock_stable ? "" : 2569 "If you just came from a suspend/resume,\n" 2570 "please switch to the trace global clock:\n" 2571 " echo global > /sys/kernel/debug/tracing/trace_clock\n"); 2572 add_timestamp = 1; 2573 } 2574 } 2575 2576 event = __rb_reserve_next(cpu_buffer, length, ts, 2577 delta, add_timestamp); 2578 if (unlikely(PTR_ERR(event) == -EAGAIN)) 2579 goto again; 2580 2581 if (!event) 2582 goto out_fail; 2583 2584 return event; 2585 2586 out_fail: 2587 rb_end_commit(cpu_buffer); 2588 return NULL; 2589 } 2590 2591 #ifdef CONFIG_TRACING 2592 2593 /* 2594 * The lock and unlock are done within a preempt disable section. 2595 * The current_context per_cpu variable can only be modified 2596 * by the current task between lock and unlock. But it can 2597 * be modified more than once via an interrupt. To pass this 2598 * information from the lock to the unlock without having to 2599 * access the 'in_interrupt()' functions again (which do show 2600 * a bit of overhead in something as critical as function tracing, 2601 * we use a bitmask trick. 2602 * 2603 * bit 0 = NMI context 2604 * bit 1 = IRQ context 2605 * bit 2 = SoftIRQ context 2606 * bit 3 = normal context. 2607 * 2608 * This works because this is the order of contexts that can 2609 * preempt other contexts. A SoftIRQ never preempts an IRQ 2610 * context. 2611 * 2612 * When the context is determined, the corresponding bit is 2613 * checked and set (if it was set, then a recursion of that context 2614 * happened). 2615 * 2616 * On unlock, we need to clear this bit. To do so, just subtract 2617 * 1 from the current_context and AND it to itself. 2618 * 2619 * (binary) 2620 * 101 - 1 = 100 2621 * 101 & 100 = 100 (clearing bit zero) 2622 * 2623 * 1010 - 1 = 1001 2624 * 1010 & 1001 = 1000 (clearing bit 1) 2625 * 2626 * The least significant bit can be cleared this way, and it 2627 * just so happens that it is the same bit corresponding to 2628 * the current context. 2629 */ 2630 static DEFINE_PER_CPU(unsigned int, current_context); 2631 2632 static __always_inline int trace_recursive_lock(void) 2633 { 2634 unsigned int val = this_cpu_read(current_context); 2635 int bit; 2636 2637 if (in_interrupt()) { 2638 if (in_nmi()) 2639 bit = 0; 2640 else if (in_irq()) 2641 bit = 1; 2642 else 2643 bit = 2; 2644 } else 2645 bit = 3; 2646 2647 if (unlikely(val & (1 << bit))) 2648 return 1; 2649 2650 val |= (1 << bit); 2651 this_cpu_write(current_context, val); 2652 2653 return 0; 2654 } 2655 2656 static __always_inline void trace_recursive_unlock(void) 2657 { 2658 unsigned int val = this_cpu_read(current_context); 2659 2660 val--; 2661 val &= this_cpu_read(current_context); 2662 this_cpu_write(current_context, val); 2663 } 2664 2665 #else 2666 2667 #define trace_recursive_lock() (0) 2668 #define trace_recursive_unlock() do { } while (0) 2669 2670 #endif 2671 2672 /** 2673 * ring_buffer_lock_reserve - reserve a part of the buffer 2674 * @buffer: the ring buffer to reserve from 2675 * @length: the length of the data to reserve (excluding event header) 2676 * 2677 * Returns a reseverd event on the ring buffer to copy directly to. 2678 * The user of this interface will need to get the body to write into 2679 * and can use the ring_buffer_event_data() interface. 2680 * 2681 * The length is the length of the data needed, not the event length 2682 * which also includes the event header. 2683 * 2684 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned. 2685 * If NULL is returned, then nothing has been allocated or locked. 2686 */ 2687 struct ring_buffer_event * 2688 ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length) 2689 { 2690 struct ring_buffer_per_cpu *cpu_buffer; 2691 struct ring_buffer_event *event; 2692 int cpu; 2693 2694 if (ring_buffer_flags != RB_BUFFERS_ON) 2695 return NULL; 2696 2697 /* If we are tracing schedule, we don't want to recurse */ 2698 preempt_disable_notrace(); 2699 2700 if (atomic_read(&buffer->record_disabled)) 2701 goto out_nocheck; 2702 2703 if (trace_recursive_lock()) 2704 goto out_nocheck; 2705 2706 cpu = raw_smp_processor_id(); 2707 2708 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 2709 goto out; 2710 2711 cpu_buffer = buffer->buffers[cpu]; 2712 2713 if (atomic_read(&cpu_buffer->record_disabled)) 2714 goto out; 2715 2716 if (length > BUF_MAX_DATA_SIZE) 2717 goto out; 2718 2719 event = rb_reserve_next_event(buffer, cpu_buffer, length); 2720 if (!event) 2721 goto out; 2722 2723 return event; 2724 2725 out: 2726 trace_recursive_unlock(); 2727 2728 out_nocheck: 2729 preempt_enable_notrace(); 2730 return NULL; 2731 } 2732 EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve); 2733 2734 static void 2735 rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer, 2736 struct ring_buffer_event *event) 2737 { 2738 u64 delta; 2739 2740 /* 2741 * The event first in the commit queue updates the 2742 * time stamp. 2743 */ 2744 if (rb_event_is_commit(cpu_buffer, event)) { 2745 /* 2746 * A commit event that is first on a page 2747 * updates the write timestamp with the page stamp 2748 */ 2749 if (!rb_event_index(event)) 2750 cpu_buffer->write_stamp = 2751 cpu_buffer->commit_page->page->time_stamp; 2752 else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) { 2753 delta = event->array[0]; 2754 delta <<= TS_SHIFT; 2755 delta += event->time_delta; 2756 cpu_buffer->write_stamp += delta; 2757 } else 2758 cpu_buffer->write_stamp += event->time_delta; 2759 } 2760 } 2761 2762 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, 2763 struct ring_buffer_event *event) 2764 { 2765 local_inc(&cpu_buffer->entries); 2766 rb_update_write_stamp(cpu_buffer, event); 2767 rb_end_commit(cpu_buffer); 2768 } 2769 2770 static __always_inline void 2771 rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) 2772 { 2773 if (buffer->irq_work.waiters_pending) { 2774 buffer->irq_work.waiters_pending = false; 2775 /* irq_work_queue() supplies it's own memory barriers */ 2776 irq_work_queue(&buffer->irq_work.work); 2777 } 2778 2779 if (cpu_buffer->irq_work.waiters_pending) { 2780 cpu_buffer->irq_work.waiters_pending = false; 2781 /* irq_work_queue() supplies it's own memory barriers */ 2782 irq_work_queue(&cpu_buffer->irq_work.work); 2783 } 2784 } 2785 2786 /** 2787 * ring_buffer_unlock_commit - commit a reserved 2788 * @buffer: The buffer to commit to 2789 * @event: The event pointer to commit. 2790 * 2791 * This commits the data to the ring buffer, and releases any locks held. 2792 * 2793 * Must be paired with ring_buffer_lock_reserve. 2794 */ 2795 int ring_buffer_unlock_commit(struct ring_buffer *buffer, 2796 struct ring_buffer_event *event) 2797 { 2798 struct ring_buffer_per_cpu *cpu_buffer; 2799 int cpu = raw_smp_processor_id(); 2800 2801 cpu_buffer = buffer->buffers[cpu]; 2802 2803 rb_commit(cpu_buffer, event); 2804 2805 rb_wakeups(buffer, cpu_buffer); 2806 2807 trace_recursive_unlock(); 2808 2809 preempt_enable_notrace(); 2810 2811 return 0; 2812 } 2813 EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit); 2814 2815 static inline void rb_event_discard(struct ring_buffer_event *event) 2816 { 2817 if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) 2818 event = skip_time_extend(event); 2819 2820 /* array[0] holds the actual length for the discarded event */ 2821 event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE; 2822 event->type_len = RINGBUF_TYPE_PADDING; 2823 /* time delta must be non zero */ 2824 if (!event->time_delta) 2825 event->time_delta = 1; 2826 } 2827 2828 /* 2829 * Decrement the entries to the page that an event is on. 2830 * The event does not even need to exist, only the pointer 2831 * to the page it is on. This may only be called before the commit 2832 * takes place. 2833 */ 2834 static inline void 2835 rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer, 2836 struct ring_buffer_event *event) 2837 { 2838 unsigned long addr = (unsigned long)event; 2839 struct buffer_page *bpage = cpu_buffer->commit_page; 2840 struct buffer_page *start; 2841 2842 addr &= PAGE_MASK; 2843 2844 /* Do the likely case first */ 2845 if (likely(bpage->page == (void *)addr)) { 2846 local_dec(&bpage->entries); 2847 return; 2848 } 2849 2850 /* 2851 * Because the commit page may be on the reader page we 2852 * start with the next page and check the end loop there. 2853 */ 2854 rb_inc_page(cpu_buffer, &bpage); 2855 start = bpage; 2856 do { 2857 if (bpage->page == (void *)addr) { 2858 local_dec(&bpage->entries); 2859 return; 2860 } 2861 rb_inc_page(cpu_buffer, &bpage); 2862 } while (bpage != start); 2863 2864 /* commit not part of this buffer?? */ 2865 RB_WARN_ON(cpu_buffer, 1); 2866 } 2867 2868 /** 2869 * ring_buffer_commit_discard - discard an event that has not been committed 2870 * @buffer: the ring buffer 2871 * @event: non committed event to discard 2872 * 2873 * Sometimes an event that is in the ring buffer needs to be ignored. 2874 * This function lets the user discard an event in the ring buffer 2875 * and then that event will not be read later. 2876 * 2877 * This function only works if it is called before the the item has been 2878 * committed. It will try to free the event from the ring buffer 2879 * if another event has not been added behind it. 2880 * 2881 * If another event has been added behind it, it will set the event 2882 * up as discarded, and perform the commit. 2883 * 2884 * If this function is called, do not call ring_buffer_unlock_commit on 2885 * the event. 2886 */ 2887 void ring_buffer_discard_commit(struct ring_buffer *buffer, 2888 struct ring_buffer_event *event) 2889 { 2890 struct ring_buffer_per_cpu *cpu_buffer; 2891 int cpu; 2892 2893 /* The event is discarded regardless */ 2894 rb_event_discard(event); 2895 2896 cpu = smp_processor_id(); 2897 cpu_buffer = buffer->buffers[cpu]; 2898 2899 /* 2900 * This must only be called if the event has not been 2901 * committed yet. Thus we can assume that preemption 2902 * is still disabled. 2903 */ 2904 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); 2905 2906 rb_decrement_entry(cpu_buffer, event); 2907 if (rb_try_to_discard(cpu_buffer, event)) 2908 goto out; 2909 2910 /* 2911 * The commit is still visible by the reader, so we 2912 * must still update the timestamp. 2913 */ 2914 rb_update_write_stamp(cpu_buffer, event); 2915 out: 2916 rb_end_commit(cpu_buffer); 2917 2918 trace_recursive_unlock(); 2919 2920 preempt_enable_notrace(); 2921 2922 } 2923 EXPORT_SYMBOL_GPL(ring_buffer_discard_commit); 2924 2925 /** 2926 * ring_buffer_write - write data to the buffer without reserving 2927 * @buffer: The ring buffer to write to. 2928 * @length: The length of the data being written (excluding the event header) 2929 * @data: The data to write to the buffer. 2930 * 2931 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as 2932 * one function. If you already have the data to write to the buffer, it 2933 * may be easier to simply call this function. 2934 * 2935 * Note, like ring_buffer_lock_reserve, the length is the length of the data 2936 * and not the length of the event which would hold the header. 2937 */ 2938 int ring_buffer_write(struct ring_buffer *buffer, 2939 unsigned long length, 2940 void *data) 2941 { 2942 struct ring_buffer_per_cpu *cpu_buffer; 2943 struct ring_buffer_event *event; 2944 void *body; 2945 int ret = -EBUSY; 2946 int cpu; 2947 2948 if (ring_buffer_flags != RB_BUFFERS_ON) 2949 return -EBUSY; 2950 2951 preempt_disable_notrace(); 2952 2953 if (atomic_read(&buffer->record_disabled)) 2954 goto out; 2955 2956 cpu = raw_smp_processor_id(); 2957 2958 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 2959 goto out; 2960 2961 cpu_buffer = buffer->buffers[cpu]; 2962 2963 if (atomic_read(&cpu_buffer->record_disabled)) 2964 goto out; 2965 2966 if (length > BUF_MAX_DATA_SIZE) 2967 goto out; 2968 2969 event = rb_reserve_next_event(buffer, cpu_buffer, length); 2970 if (!event) 2971 goto out; 2972 2973 body = rb_event_data(event); 2974 2975 memcpy(body, data, length); 2976 2977 rb_commit(cpu_buffer, event); 2978 2979 rb_wakeups(buffer, cpu_buffer); 2980 2981 ret = 0; 2982 out: 2983 preempt_enable_notrace(); 2984 2985 return ret; 2986 } 2987 EXPORT_SYMBOL_GPL(ring_buffer_write); 2988 2989 static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) 2990 { 2991 struct buffer_page *reader = cpu_buffer->reader_page; 2992 struct buffer_page *head = rb_set_head_page(cpu_buffer); 2993 struct buffer_page *commit = cpu_buffer->commit_page; 2994 2995 /* In case of error, head will be NULL */ 2996 if (unlikely(!head)) 2997 return 1; 2998 2999 return reader->read == rb_page_commit(reader) && 3000 (commit == reader || 3001 (commit == head && 3002 head->read == rb_page_commit(commit))); 3003 } 3004 3005 /** 3006 * ring_buffer_record_disable - stop all writes into the buffer 3007 * @buffer: The ring buffer to stop writes to. 3008 * 3009 * This prevents all writes to the buffer. Any attempt to write 3010 * to the buffer after this will fail and return NULL. 3011 * 3012 * The caller should call synchronize_sched() after this. 3013 */ 3014 void ring_buffer_record_disable(struct ring_buffer *buffer) 3015 { 3016 atomic_inc(&buffer->record_disabled); 3017 } 3018 EXPORT_SYMBOL_GPL(ring_buffer_record_disable); 3019 3020 /** 3021 * ring_buffer_record_enable - enable writes to the buffer 3022 * @buffer: The ring buffer to enable writes 3023 * 3024 * Note, multiple disables will need the same number of enables 3025 * to truly enable the writing (much like preempt_disable). 3026 */ 3027 void ring_buffer_record_enable(struct ring_buffer *buffer) 3028 { 3029 atomic_dec(&buffer->record_disabled); 3030 } 3031 EXPORT_SYMBOL_GPL(ring_buffer_record_enable); 3032 3033 /** 3034 * ring_buffer_record_off - stop all writes into the buffer 3035 * @buffer: The ring buffer to stop writes to. 3036 * 3037 * This prevents all writes to the buffer. Any attempt to write 3038 * to the buffer after this will fail and return NULL. 3039 * 3040 * This is different than ring_buffer_record_disable() as 3041 * it works like an on/off switch, where as the disable() version 3042 * must be paired with a enable(). 3043 */ 3044 void ring_buffer_record_off(struct ring_buffer *buffer) 3045 { 3046 unsigned int rd; 3047 unsigned int new_rd; 3048 3049 do { 3050 rd = atomic_read(&buffer->record_disabled); 3051 new_rd = rd | RB_BUFFER_OFF; 3052 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd); 3053 } 3054 EXPORT_SYMBOL_GPL(ring_buffer_record_off); 3055 3056 /** 3057 * ring_buffer_record_on - restart writes into the buffer 3058 * @buffer: The ring buffer to start writes to. 3059 * 3060 * This enables all writes to the buffer that was disabled by 3061 * ring_buffer_record_off(). 3062 * 3063 * This is different than ring_buffer_record_enable() as 3064 * it works like an on/off switch, where as the enable() version 3065 * must be paired with a disable(). 3066 */ 3067 void ring_buffer_record_on(struct ring_buffer *buffer) 3068 { 3069 unsigned int rd; 3070 unsigned int new_rd; 3071 3072 do { 3073 rd = atomic_read(&buffer->record_disabled); 3074 new_rd = rd & ~RB_BUFFER_OFF; 3075 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd); 3076 } 3077 EXPORT_SYMBOL_GPL(ring_buffer_record_on); 3078 3079 /** 3080 * ring_buffer_record_is_on - return true if the ring buffer can write 3081 * @buffer: The ring buffer to see if write is enabled 3082 * 3083 * Returns true if the ring buffer is in a state that it accepts writes. 3084 */ 3085 int ring_buffer_record_is_on(struct ring_buffer *buffer) 3086 { 3087 return !atomic_read(&buffer->record_disabled); 3088 } 3089 3090 /** 3091 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer 3092 * @buffer: The ring buffer to stop writes to. 3093 * @cpu: The CPU buffer to stop 3094 * 3095 * This prevents all writes to the buffer. Any attempt to write 3096 * to the buffer after this will fail and return NULL. 3097 * 3098 * The caller should call synchronize_sched() after this. 3099 */ 3100 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu) 3101 { 3102 struct ring_buffer_per_cpu *cpu_buffer; 3103 3104 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 3105 return; 3106 3107 cpu_buffer = buffer->buffers[cpu]; 3108 atomic_inc(&cpu_buffer->record_disabled); 3109 } 3110 EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu); 3111 3112 /** 3113 * ring_buffer_record_enable_cpu - enable writes to the buffer 3114 * @buffer: The ring buffer to enable writes 3115 * @cpu: The CPU to enable. 3116 * 3117 * Note, multiple disables will need the same number of enables 3118 * to truly enable the writing (much like preempt_disable). 3119 */ 3120 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu) 3121 { 3122 struct ring_buffer_per_cpu *cpu_buffer; 3123 3124 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 3125 return; 3126 3127 cpu_buffer = buffer->buffers[cpu]; 3128 atomic_dec(&cpu_buffer->record_disabled); 3129 } 3130 EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu); 3131 3132 /* 3133 * The total entries in the ring buffer is the running counter 3134 * of entries entered into the ring buffer, minus the sum of 3135 * the entries read from the ring buffer and the number of 3136 * entries that were overwritten. 3137 */ 3138 static inline unsigned long 3139 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer) 3140 { 3141 return local_read(&cpu_buffer->entries) - 3142 (local_read(&cpu_buffer->overrun) + cpu_buffer->read); 3143 } 3144 3145 /** 3146 * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer 3147 * @buffer: The ring buffer 3148 * @cpu: The per CPU buffer to read from. 3149 */ 3150 u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu) 3151 { 3152 unsigned long flags; 3153 struct ring_buffer_per_cpu *cpu_buffer; 3154 struct buffer_page *bpage; 3155 u64 ret = 0; 3156 3157 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 3158 return 0; 3159 3160 cpu_buffer = buffer->buffers[cpu]; 3161 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3162 /* 3163 * if the tail is on reader_page, oldest time stamp is on the reader 3164 * page 3165 */ 3166 if (cpu_buffer->tail_page == cpu_buffer->reader_page) 3167 bpage = cpu_buffer->reader_page; 3168 else 3169 bpage = rb_set_head_page(cpu_buffer); 3170 if (bpage) 3171 ret = bpage->page->time_stamp; 3172 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3173 3174 return ret; 3175 } 3176 EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts); 3177 3178 /** 3179 * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer 3180 * @buffer: The ring buffer 3181 * @cpu: The per CPU buffer to read from. 3182 */ 3183 unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu) 3184 { 3185 struct ring_buffer_per_cpu *cpu_buffer; 3186 unsigned long ret; 3187 3188 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 3189 return 0; 3190 3191 cpu_buffer = buffer->buffers[cpu]; 3192 ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes; 3193 3194 return ret; 3195 } 3196 EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu); 3197 3198 /** 3199 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer 3200 * @buffer: The ring buffer 3201 * @cpu: The per CPU buffer to get the entries from. 3202 */ 3203 unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu) 3204 { 3205 struct ring_buffer_per_cpu *cpu_buffer; 3206 3207 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 3208 return 0; 3209 3210 cpu_buffer = buffer->buffers[cpu]; 3211 3212 return rb_num_of_entries(cpu_buffer); 3213 } 3214 EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu); 3215 3216 /** 3217 * ring_buffer_overrun_cpu - get the number of overruns caused by the ring 3218 * buffer wrapping around (only if RB_FL_OVERWRITE is on). 3219 * @buffer: The ring buffer 3220 * @cpu: The per CPU buffer to get the number of overruns from 3221 */ 3222 unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu) 3223 { 3224 struct ring_buffer_per_cpu *cpu_buffer; 3225 unsigned long ret; 3226 3227 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 3228 return 0; 3229 3230 cpu_buffer = buffer->buffers[cpu]; 3231 ret = local_read(&cpu_buffer->overrun); 3232 3233 return ret; 3234 } 3235 EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu); 3236 3237 /** 3238 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by 3239 * commits failing due to the buffer wrapping around while there are uncommitted 3240 * events, such as during an interrupt storm. 3241 * @buffer: The ring buffer 3242 * @cpu: The per CPU buffer to get the number of overruns from 3243 */ 3244 unsigned long 3245 ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu) 3246 { 3247 struct ring_buffer_per_cpu *cpu_buffer; 3248 unsigned long ret; 3249 3250 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 3251 return 0; 3252 3253 cpu_buffer = buffer->buffers[cpu]; 3254 ret = local_read(&cpu_buffer->commit_overrun); 3255 3256 return ret; 3257 } 3258 EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu); 3259 3260 /** 3261 * ring_buffer_dropped_events_cpu - get the number of dropped events caused by 3262 * the ring buffer filling up (only if RB_FL_OVERWRITE is off). 3263 * @buffer: The ring buffer 3264 * @cpu: The per CPU buffer to get the number of overruns from 3265 */ 3266 unsigned long 3267 ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu) 3268 { 3269 struct ring_buffer_per_cpu *cpu_buffer; 3270 unsigned long ret; 3271 3272 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 3273 return 0; 3274 3275 cpu_buffer = buffer->buffers[cpu]; 3276 ret = local_read(&cpu_buffer->dropped_events); 3277 3278 return ret; 3279 } 3280 EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu); 3281 3282 /** 3283 * ring_buffer_read_events_cpu - get the number of events successfully read 3284 * @buffer: The ring buffer 3285 * @cpu: The per CPU buffer to get the number of events read 3286 */ 3287 unsigned long 3288 ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu) 3289 { 3290 struct ring_buffer_per_cpu *cpu_buffer; 3291 3292 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 3293 return 0; 3294 3295 cpu_buffer = buffer->buffers[cpu]; 3296 return cpu_buffer->read; 3297 } 3298 EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu); 3299 3300 /** 3301 * ring_buffer_entries - get the number of entries in a buffer 3302 * @buffer: The ring buffer 3303 * 3304 * Returns the total number of entries in the ring buffer 3305 * (all CPU entries) 3306 */ 3307 unsigned long ring_buffer_entries(struct ring_buffer *buffer) 3308 { 3309 struct ring_buffer_per_cpu *cpu_buffer; 3310 unsigned long entries = 0; 3311 int cpu; 3312 3313 /* if you care about this being correct, lock the buffer */ 3314 for_each_buffer_cpu(buffer, cpu) { 3315 cpu_buffer = buffer->buffers[cpu]; 3316 entries += rb_num_of_entries(cpu_buffer); 3317 } 3318 3319 return entries; 3320 } 3321 EXPORT_SYMBOL_GPL(ring_buffer_entries); 3322 3323 /** 3324 * ring_buffer_overruns - get the number of overruns in buffer 3325 * @buffer: The ring buffer 3326 * 3327 * Returns the total number of overruns in the ring buffer 3328 * (all CPU entries) 3329 */ 3330 unsigned long ring_buffer_overruns(struct ring_buffer *buffer) 3331 { 3332 struct ring_buffer_per_cpu *cpu_buffer; 3333 unsigned long overruns = 0; 3334 int cpu; 3335 3336 /* if you care about this being correct, lock the buffer */ 3337 for_each_buffer_cpu(buffer, cpu) { 3338 cpu_buffer = buffer->buffers[cpu]; 3339 overruns += local_read(&cpu_buffer->overrun); 3340 } 3341 3342 return overruns; 3343 } 3344 EXPORT_SYMBOL_GPL(ring_buffer_overruns); 3345 3346 static void rb_iter_reset(struct ring_buffer_iter *iter) 3347 { 3348 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 3349 3350 /* Iterator usage is expected to have record disabled */ 3351 if (list_empty(&cpu_buffer->reader_page->list)) { 3352 iter->head_page = rb_set_head_page(cpu_buffer); 3353 if (unlikely(!iter->head_page)) 3354 return; 3355 iter->head = iter->head_page->read; 3356 } else { 3357 iter->head_page = cpu_buffer->reader_page; 3358 iter->head = cpu_buffer->reader_page->read; 3359 } 3360 if (iter->head) 3361 iter->read_stamp = cpu_buffer->read_stamp; 3362 else 3363 iter->read_stamp = iter->head_page->page->time_stamp; 3364 iter->cache_reader_page = cpu_buffer->reader_page; 3365 iter->cache_read = cpu_buffer->read; 3366 } 3367 3368 /** 3369 * ring_buffer_iter_reset - reset an iterator 3370 * @iter: The iterator to reset 3371 * 3372 * Resets the iterator, so that it will start from the beginning 3373 * again. 3374 */ 3375 void ring_buffer_iter_reset(struct ring_buffer_iter *iter) 3376 { 3377 struct ring_buffer_per_cpu *cpu_buffer; 3378 unsigned long flags; 3379 3380 if (!iter) 3381 return; 3382 3383 cpu_buffer = iter->cpu_buffer; 3384 3385 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3386 rb_iter_reset(iter); 3387 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3388 } 3389 EXPORT_SYMBOL_GPL(ring_buffer_iter_reset); 3390 3391 /** 3392 * ring_buffer_iter_empty - check if an iterator has no more to read 3393 * @iter: The iterator to check 3394 */ 3395 int ring_buffer_iter_empty(struct ring_buffer_iter *iter) 3396 { 3397 struct ring_buffer_per_cpu *cpu_buffer; 3398 3399 cpu_buffer = iter->cpu_buffer; 3400 3401 return iter->head_page == cpu_buffer->commit_page && 3402 iter->head == rb_commit_index(cpu_buffer); 3403 } 3404 EXPORT_SYMBOL_GPL(ring_buffer_iter_empty); 3405 3406 static void 3407 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, 3408 struct ring_buffer_event *event) 3409 { 3410 u64 delta; 3411 3412 switch (event->type_len) { 3413 case RINGBUF_TYPE_PADDING: 3414 return; 3415 3416 case RINGBUF_TYPE_TIME_EXTEND: 3417 delta = event->array[0]; 3418 delta <<= TS_SHIFT; 3419 delta += event->time_delta; 3420 cpu_buffer->read_stamp += delta; 3421 return; 3422 3423 case RINGBUF_TYPE_TIME_STAMP: 3424 /* FIXME: not implemented */ 3425 return; 3426 3427 case RINGBUF_TYPE_DATA: 3428 cpu_buffer->read_stamp += event->time_delta; 3429 return; 3430 3431 default: 3432 BUG(); 3433 } 3434 return; 3435 } 3436 3437 static void 3438 rb_update_iter_read_stamp(struct ring_buffer_iter *iter, 3439 struct ring_buffer_event *event) 3440 { 3441 u64 delta; 3442 3443 switch (event->type_len) { 3444 case RINGBUF_TYPE_PADDING: 3445 return; 3446 3447 case RINGBUF_TYPE_TIME_EXTEND: 3448 delta = event->array[0]; 3449 delta <<= TS_SHIFT; 3450 delta += event->time_delta; 3451 iter->read_stamp += delta; 3452 return; 3453 3454 case RINGBUF_TYPE_TIME_STAMP: 3455 /* FIXME: not implemented */ 3456 return; 3457 3458 case RINGBUF_TYPE_DATA: 3459 iter->read_stamp += event->time_delta; 3460 return; 3461 3462 default: 3463 BUG(); 3464 } 3465 return; 3466 } 3467 3468 static struct buffer_page * 3469 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) 3470 { 3471 struct buffer_page *reader = NULL; 3472 unsigned long overwrite; 3473 unsigned long flags; 3474 int nr_loops = 0; 3475 int ret; 3476 3477 local_irq_save(flags); 3478 arch_spin_lock(&cpu_buffer->lock); 3479 3480 again: 3481 /* 3482 * This should normally only loop twice. But because the 3483 * start of the reader inserts an empty page, it causes 3484 * a case where we will loop three times. There should be no 3485 * reason to loop four times (that I know of). 3486 */ 3487 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) { 3488 reader = NULL; 3489 goto out; 3490 } 3491 3492 reader = cpu_buffer->reader_page; 3493 3494 /* If there's more to read, return this page */ 3495 if (cpu_buffer->reader_page->read < rb_page_size(reader)) 3496 goto out; 3497 3498 /* Never should we have an index greater than the size */ 3499 if (RB_WARN_ON(cpu_buffer, 3500 cpu_buffer->reader_page->read > rb_page_size(reader))) 3501 goto out; 3502 3503 /* check if we caught up to the tail */ 3504 reader = NULL; 3505 if (cpu_buffer->commit_page == cpu_buffer->reader_page) 3506 goto out; 3507 3508 /* Don't bother swapping if the ring buffer is empty */ 3509 if (rb_num_of_entries(cpu_buffer) == 0) 3510 goto out; 3511 3512 /* 3513 * Reset the reader page to size zero. 3514 */ 3515 local_set(&cpu_buffer->reader_page->write, 0); 3516 local_set(&cpu_buffer->reader_page->entries, 0); 3517 local_set(&cpu_buffer->reader_page->page->commit, 0); 3518 cpu_buffer->reader_page->real_end = 0; 3519 3520 spin: 3521 /* 3522 * Splice the empty reader page into the list around the head. 3523 */ 3524 reader = rb_set_head_page(cpu_buffer); 3525 if (!reader) 3526 goto out; 3527 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next); 3528 cpu_buffer->reader_page->list.prev = reader->list.prev; 3529 3530 /* 3531 * cpu_buffer->pages just needs to point to the buffer, it 3532 * has no specific buffer page to point to. Lets move it out 3533 * of our way so we don't accidentally swap it. 3534 */ 3535 cpu_buffer->pages = reader->list.prev; 3536 3537 /* The reader page will be pointing to the new head */ 3538 rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list); 3539 3540 /* 3541 * We want to make sure we read the overruns after we set up our 3542 * pointers to the next object. The writer side does a 3543 * cmpxchg to cross pages which acts as the mb on the writer 3544 * side. Note, the reader will constantly fail the swap 3545 * while the writer is updating the pointers, so this 3546 * guarantees that the overwrite recorded here is the one we 3547 * want to compare with the last_overrun. 3548 */ 3549 smp_mb(); 3550 overwrite = local_read(&(cpu_buffer->overrun)); 3551 3552 /* 3553 * Here's the tricky part. 3554 * 3555 * We need to move the pointer past the header page. 3556 * But we can only do that if a writer is not currently 3557 * moving it. The page before the header page has the 3558 * flag bit '1' set if it is pointing to the page we want. 3559 * but if the writer is in the process of moving it 3560 * than it will be '2' or already moved '0'. 3561 */ 3562 3563 ret = rb_head_page_replace(reader, cpu_buffer->reader_page); 3564 3565 /* 3566 * If we did not convert it, then we must try again. 3567 */ 3568 if (!ret) 3569 goto spin; 3570 3571 /* 3572 * Yeah! We succeeded in replacing the page. 3573 * 3574 * Now make the new head point back to the reader page. 3575 */ 3576 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list; 3577 rb_inc_page(cpu_buffer, &cpu_buffer->head_page); 3578 3579 /* Finally update the reader page to the new head */ 3580 cpu_buffer->reader_page = reader; 3581 rb_reset_reader_page(cpu_buffer); 3582 3583 if (overwrite != cpu_buffer->last_overrun) { 3584 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; 3585 cpu_buffer->last_overrun = overwrite; 3586 } 3587 3588 goto again; 3589 3590 out: 3591 arch_spin_unlock(&cpu_buffer->lock); 3592 local_irq_restore(flags); 3593 3594 return reader; 3595 } 3596 3597 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) 3598 { 3599 struct ring_buffer_event *event; 3600 struct buffer_page *reader; 3601 unsigned length; 3602 3603 reader = rb_get_reader_page(cpu_buffer); 3604 3605 /* This function should not be called when buffer is empty */ 3606 if (RB_WARN_ON(cpu_buffer, !reader)) 3607 return; 3608 3609 event = rb_reader_event(cpu_buffer); 3610 3611 if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 3612 cpu_buffer->read++; 3613 3614 rb_update_read_stamp(cpu_buffer, event); 3615 3616 length = rb_event_length(event); 3617 cpu_buffer->reader_page->read += length; 3618 } 3619 3620 static void rb_advance_iter(struct ring_buffer_iter *iter) 3621 { 3622 struct ring_buffer_per_cpu *cpu_buffer; 3623 struct ring_buffer_event *event; 3624 unsigned length; 3625 3626 cpu_buffer = iter->cpu_buffer; 3627 3628 /* 3629 * Check if we are at the end of the buffer. 3630 */ 3631 if (iter->head >= rb_page_size(iter->head_page)) { 3632 /* discarded commits can make the page empty */ 3633 if (iter->head_page == cpu_buffer->commit_page) 3634 return; 3635 rb_inc_iter(iter); 3636 return; 3637 } 3638 3639 event = rb_iter_head_event(iter); 3640 3641 length = rb_event_length(event); 3642 3643 /* 3644 * This should not be called to advance the header if we are 3645 * at the tail of the buffer. 3646 */ 3647 if (RB_WARN_ON(cpu_buffer, 3648 (iter->head_page == cpu_buffer->commit_page) && 3649 (iter->head + length > rb_commit_index(cpu_buffer)))) 3650 return; 3651 3652 rb_update_iter_read_stamp(iter, event); 3653 3654 iter->head += length; 3655 3656 /* check for end of page padding */ 3657 if ((iter->head >= rb_page_size(iter->head_page)) && 3658 (iter->head_page != cpu_buffer->commit_page)) 3659 rb_inc_iter(iter); 3660 } 3661 3662 static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer) 3663 { 3664 return cpu_buffer->lost_events; 3665 } 3666 3667 static struct ring_buffer_event * 3668 rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts, 3669 unsigned long *lost_events) 3670 { 3671 struct ring_buffer_event *event; 3672 struct buffer_page *reader; 3673 int nr_loops = 0; 3674 3675 again: 3676 /* 3677 * We repeat when a time extend is encountered. 3678 * Since the time extend is always attached to a data event, 3679 * we should never loop more than once. 3680 * (We never hit the following condition more than twice). 3681 */ 3682 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2)) 3683 return NULL; 3684 3685 reader = rb_get_reader_page(cpu_buffer); 3686 if (!reader) 3687 return NULL; 3688 3689 event = rb_reader_event(cpu_buffer); 3690 3691 switch (event->type_len) { 3692 case RINGBUF_TYPE_PADDING: 3693 if (rb_null_event(event)) 3694 RB_WARN_ON(cpu_buffer, 1); 3695 /* 3696 * Because the writer could be discarding every 3697 * event it creates (which would probably be bad) 3698 * if we were to go back to "again" then we may never 3699 * catch up, and will trigger the warn on, or lock 3700 * the box. Return the padding, and we will release 3701 * the current locks, and try again. 3702 */ 3703 return event; 3704 3705 case RINGBUF_TYPE_TIME_EXTEND: 3706 /* Internal data, OK to advance */ 3707 rb_advance_reader(cpu_buffer); 3708 goto again; 3709 3710 case RINGBUF_TYPE_TIME_STAMP: 3711 /* FIXME: not implemented */ 3712 rb_advance_reader(cpu_buffer); 3713 goto again; 3714 3715 case RINGBUF_TYPE_DATA: 3716 if (ts) { 3717 *ts = cpu_buffer->read_stamp + event->time_delta; 3718 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, 3719 cpu_buffer->cpu, ts); 3720 } 3721 if (lost_events) 3722 *lost_events = rb_lost_events(cpu_buffer); 3723 return event; 3724 3725 default: 3726 BUG(); 3727 } 3728 3729 return NULL; 3730 } 3731 EXPORT_SYMBOL_GPL(ring_buffer_peek); 3732 3733 static struct ring_buffer_event * 3734 rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) 3735 { 3736 struct ring_buffer *buffer; 3737 struct ring_buffer_per_cpu *cpu_buffer; 3738 struct ring_buffer_event *event; 3739 int nr_loops = 0; 3740 3741 cpu_buffer = iter->cpu_buffer; 3742 buffer = cpu_buffer->buffer; 3743 3744 /* 3745 * Check if someone performed a consuming read to 3746 * the buffer. A consuming read invalidates the iterator 3747 * and we need to reset the iterator in this case. 3748 */ 3749 if (unlikely(iter->cache_read != cpu_buffer->read || 3750 iter->cache_reader_page != cpu_buffer->reader_page)) 3751 rb_iter_reset(iter); 3752 3753 again: 3754 if (ring_buffer_iter_empty(iter)) 3755 return NULL; 3756 3757 /* 3758 * We repeat when a time extend is encountered. 3759 * Since the time extend is always attached to a data event, 3760 * we should never loop more than once. 3761 * (We never hit the following condition more than twice). 3762 */ 3763 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2)) 3764 return NULL; 3765 3766 if (rb_per_cpu_empty(cpu_buffer)) 3767 return NULL; 3768 3769 if (iter->head >= local_read(&iter->head_page->page->commit)) { 3770 rb_inc_iter(iter); 3771 goto again; 3772 } 3773 3774 event = rb_iter_head_event(iter); 3775 3776 switch (event->type_len) { 3777 case RINGBUF_TYPE_PADDING: 3778 if (rb_null_event(event)) { 3779 rb_inc_iter(iter); 3780 goto again; 3781 } 3782 rb_advance_iter(iter); 3783 return event; 3784 3785 case RINGBUF_TYPE_TIME_EXTEND: 3786 /* Internal data, OK to advance */ 3787 rb_advance_iter(iter); 3788 goto again; 3789 3790 case RINGBUF_TYPE_TIME_STAMP: 3791 /* FIXME: not implemented */ 3792 rb_advance_iter(iter); 3793 goto again; 3794 3795 case RINGBUF_TYPE_DATA: 3796 if (ts) { 3797 *ts = iter->read_stamp + event->time_delta; 3798 ring_buffer_normalize_time_stamp(buffer, 3799 cpu_buffer->cpu, ts); 3800 } 3801 return event; 3802 3803 default: 3804 BUG(); 3805 } 3806 3807 return NULL; 3808 } 3809 EXPORT_SYMBOL_GPL(ring_buffer_iter_peek); 3810 3811 static inline int rb_ok_to_lock(void) 3812 { 3813 /* 3814 * If an NMI die dumps out the content of the ring buffer 3815 * do not grab locks. We also permanently disable the ring 3816 * buffer too. A one time deal is all you get from reading 3817 * the ring buffer from an NMI. 3818 */ 3819 if (likely(!in_nmi())) 3820 return 1; 3821 3822 tracing_off_permanent(); 3823 return 0; 3824 } 3825 3826 /** 3827 * ring_buffer_peek - peek at the next event to be read 3828 * @buffer: The ring buffer to read 3829 * @cpu: The cpu to peak at 3830 * @ts: The timestamp counter of this event. 3831 * @lost_events: a variable to store if events were lost (may be NULL) 3832 * 3833 * This will return the event that will be read next, but does 3834 * not consume the data. 3835 */ 3836 struct ring_buffer_event * 3837 ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts, 3838 unsigned long *lost_events) 3839 { 3840 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 3841 struct ring_buffer_event *event; 3842 unsigned long flags; 3843 int dolock; 3844 3845 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 3846 return NULL; 3847 3848 dolock = rb_ok_to_lock(); 3849 again: 3850 local_irq_save(flags); 3851 if (dolock) 3852 raw_spin_lock(&cpu_buffer->reader_lock); 3853 event = rb_buffer_peek(cpu_buffer, ts, lost_events); 3854 if (event && event->type_len == RINGBUF_TYPE_PADDING) 3855 rb_advance_reader(cpu_buffer); 3856 if (dolock) 3857 raw_spin_unlock(&cpu_buffer->reader_lock); 3858 local_irq_restore(flags); 3859 3860 if (event && event->type_len == RINGBUF_TYPE_PADDING) 3861 goto again; 3862 3863 return event; 3864 } 3865 3866 /** 3867 * ring_buffer_iter_peek - peek at the next event to be read 3868 * @iter: The ring buffer iterator 3869 * @ts: The timestamp counter of this event. 3870 * 3871 * This will return the event that will be read next, but does 3872 * not increment the iterator. 3873 */ 3874 struct ring_buffer_event * 3875 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) 3876 { 3877 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 3878 struct ring_buffer_event *event; 3879 unsigned long flags; 3880 3881 again: 3882 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3883 event = rb_iter_peek(iter, ts); 3884 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3885 3886 if (event && event->type_len == RINGBUF_TYPE_PADDING) 3887 goto again; 3888 3889 return event; 3890 } 3891 3892 /** 3893 * ring_buffer_consume - return an event and consume it 3894 * @buffer: The ring buffer to get the next event from 3895 * @cpu: the cpu to read the buffer from 3896 * @ts: a variable to store the timestamp (may be NULL) 3897 * @lost_events: a variable to store if events were lost (may be NULL) 3898 * 3899 * Returns the next event in the ring buffer, and that event is consumed. 3900 * Meaning, that sequential reads will keep returning a different event, 3901 * and eventually empty the ring buffer if the producer is slower. 3902 */ 3903 struct ring_buffer_event * 3904 ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts, 3905 unsigned long *lost_events) 3906 { 3907 struct ring_buffer_per_cpu *cpu_buffer; 3908 struct ring_buffer_event *event = NULL; 3909 unsigned long flags; 3910 int dolock; 3911 3912 dolock = rb_ok_to_lock(); 3913 3914 again: 3915 /* might be called in atomic */ 3916 preempt_disable(); 3917 3918 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 3919 goto out; 3920 3921 cpu_buffer = buffer->buffers[cpu]; 3922 local_irq_save(flags); 3923 if (dolock) 3924 raw_spin_lock(&cpu_buffer->reader_lock); 3925 3926 event = rb_buffer_peek(cpu_buffer, ts, lost_events); 3927 if (event) { 3928 cpu_buffer->lost_events = 0; 3929 rb_advance_reader(cpu_buffer); 3930 } 3931 3932 if (dolock) 3933 raw_spin_unlock(&cpu_buffer->reader_lock); 3934 local_irq_restore(flags); 3935 3936 out: 3937 preempt_enable(); 3938 3939 if (event && event->type_len == RINGBUF_TYPE_PADDING) 3940 goto again; 3941 3942 return event; 3943 } 3944 EXPORT_SYMBOL_GPL(ring_buffer_consume); 3945 3946 /** 3947 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer 3948 * @buffer: The ring buffer to read from 3949 * @cpu: The cpu buffer to iterate over 3950 * 3951 * This performs the initial preparations necessary to iterate 3952 * through the buffer. Memory is allocated, buffer recording 3953 * is disabled, and the iterator pointer is returned to the caller. 3954 * 3955 * Disabling buffer recordng prevents the reading from being 3956 * corrupted. This is not a consuming read, so a producer is not 3957 * expected. 3958 * 3959 * After a sequence of ring_buffer_read_prepare calls, the user is 3960 * expected to make at least one call to ring_buffer_read_prepare_sync. 3961 * Afterwards, ring_buffer_read_start is invoked to get things going 3962 * for real. 3963 * 3964 * This overall must be paired with ring_buffer_read_finish. 3965 */ 3966 struct ring_buffer_iter * 3967 ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu) 3968 { 3969 struct ring_buffer_per_cpu *cpu_buffer; 3970 struct ring_buffer_iter *iter; 3971 3972 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 3973 return NULL; 3974 3975 iter = kmalloc(sizeof(*iter), GFP_KERNEL); 3976 if (!iter) 3977 return NULL; 3978 3979 cpu_buffer = buffer->buffers[cpu]; 3980 3981 iter->cpu_buffer = cpu_buffer; 3982 3983 atomic_inc(&buffer->resize_disabled); 3984 atomic_inc(&cpu_buffer->record_disabled); 3985 3986 return iter; 3987 } 3988 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare); 3989 3990 /** 3991 * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls 3992 * 3993 * All previously invoked ring_buffer_read_prepare calls to prepare 3994 * iterators will be synchronized. Afterwards, read_buffer_read_start 3995 * calls on those iterators are allowed. 3996 */ 3997 void 3998 ring_buffer_read_prepare_sync(void) 3999 { 4000 synchronize_sched(); 4001 } 4002 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync); 4003 4004 /** 4005 * ring_buffer_read_start - start a non consuming read of the buffer 4006 * @iter: The iterator returned by ring_buffer_read_prepare 4007 * 4008 * This finalizes the startup of an iteration through the buffer. 4009 * The iterator comes from a call to ring_buffer_read_prepare and 4010 * an intervening ring_buffer_read_prepare_sync must have been 4011 * performed. 4012 * 4013 * Must be paired with ring_buffer_read_finish. 4014 */ 4015 void 4016 ring_buffer_read_start(struct ring_buffer_iter *iter) 4017 { 4018 struct ring_buffer_per_cpu *cpu_buffer; 4019 unsigned long flags; 4020 4021 if (!iter) 4022 return; 4023 4024 cpu_buffer = iter->cpu_buffer; 4025 4026 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 4027 arch_spin_lock(&cpu_buffer->lock); 4028 rb_iter_reset(iter); 4029 arch_spin_unlock(&cpu_buffer->lock); 4030 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 4031 } 4032 EXPORT_SYMBOL_GPL(ring_buffer_read_start); 4033 4034 /** 4035 * ring_buffer_read_finish - finish reading the iterator of the buffer 4036 * @iter: The iterator retrieved by ring_buffer_start 4037 * 4038 * This re-enables the recording to the buffer, and frees the 4039 * iterator. 4040 */ 4041 void 4042 ring_buffer_read_finish(struct ring_buffer_iter *iter) 4043 { 4044 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 4045 unsigned long flags; 4046 4047 /* 4048 * Ring buffer is disabled from recording, here's a good place 4049 * to check the integrity of the ring buffer. 4050 * Must prevent readers from trying to read, as the check 4051 * clears the HEAD page and readers require it. 4052 */ 4053 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 4054 rb_check_pages(cpu_buffer); 4055 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 4056 4057 atomic_dec(&cpu_buffer->record_disabled); 4058 atomic_dec(&cpu_buffer->buffer->resize_disabled); 4059 kfree(iter); 4060 } 4061 EXPORT_SYMBOL_GPL(ring_buffer_read_finish); 4062 4063 /** 4064 * ring_buffer_read - read the next item in the ring buffer by the iterator 4065 * @iter: The ring buffer iterator 4066 * @ts: The time stamp of the event read. 4067 * 4068 * This reads the next event in the ring buffer and increments the iterator. 4069 */ 4070 struct ring_buffer_event * 4071 ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts) 4072 { 4073 struct ring_buffer_event *event; 4074 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 4075 unsigned long flags; 4076 4077 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 4078 again: 4079 event = rb_iter_peek(iter, ts); 4080 if (!event) 4081 goto out; 4082 4083 if (event->type_len == RINGBUF_TYPE_PADDING) 4084 goto again; 4085 4086 rb_advance_iter(iter); 4087 out: 4088 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 4089 4090 return event; 4091 } 4092 EXPORT_SYMBOL_GPL(ring_buffer_read); 4093 4094 /** 4095 * ring_buffer_size - return the size of the ring buffer (in bytes) 4096 * @buffer: The ring buffer. 4097 */ 4098 unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu) 4099 { 4100 /* 4101 * Earlier, this method returned 4102 * BUF_PAGE_SIZE * buffer->nr_pages 4103 * Since the nr_pages field is now removed, we have converted this to 4104 * return the per cpu buffer value. 4105 */ 4106 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4107 return 0; 4108 4109 return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages; 4110 } 4111 EXPORT_SYMBOL_GPL(ring_buffer_size); 4112 4113 static void 4114 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) 4115 { 4116 rb_head_page_deactivate(cpu_buffer); 4117 4118 cpu_buffer->head_page 4119 = list_entry(cpu_buffer->pages, struct buffer_page, list); 4120 local_set(&cpu_buffer->head_page->write, 0); 4121 local_set(&cpu_buffer->head_page->entries, 0); 4122 local_set(&cpu_buffer->head_page->page->commit, 0); 4123 4124 cpu_buffer->head_page->read = 0; 4125 4126 cpu_buffer->tail_page = cpu_buffer->head_page; 4127 cpu_buffer->commit_page = cpu_buffer->head_page; 4128 4129 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); 4130 INIT_LIST_HEAD(&cpu_buffer->new_pages); 4131 local_set(&cpu_buffer->reader_page->write, 0); 4132 local_set(&cpu_buffer->reader_page->entries, 0); 4133 local_set(&cpu_buffer->reader_page->page->commit, 0); 4134 cpu_buffer->reader_page->read = 0; 4135 4136 local_set(&cpu_buffer->entries_bytes, 0); 4137 local_set(&cpu_buffer->overrun, 0); 4138 local_set(&cpu_buffer->commit_overrun, 0); 4139 local_set(&cpu_buffer->dropped_events, 0); 4140 local_set(&cpu_buffer->entries, 0); 4141 local_set(&cpu_buffer->committing, 0); 4142 local_set(&cpu_buffer->commits, 0); 4143 cpu_buffer->read = 0; 4144 cpu_buffer->read_bytes = 0; 4145 4146 cpu_buffer->write_stamp = 0; 4147 cpu_buffer->read_stamp = 0; 4148 4149 cpu_buffer->lost_events = 0; 4150 cpu_buffer->last_overrun = 0; 4151 4152 rb_head_page_activate(cpu_buffer); 4153 } 4154 4155 /** 4156 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer 4157 * @buffer: The ring buffer to reset a per cpu buffer of 4158 * @cpu: The CPU buffer to be reset 4159 */ 4160 void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) 4161 { 4162 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 4163 unsigned long flags; 4164 4165 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4166 return; 4167 4168 atomic_inc(&buffer->resize_disabled); 4169 atomic_inc(&cpu_buffer->record_disabled); 4170 4171 /* Make sure all commits have finished */ 4172 synchronize_sched(); 4173 4174 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 4175 4176 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) 4177 goto out; 4178 4179 arch_spin_lock(&cpu_buffer->lock); 4180 4181 rb_reset_cpu(cpu_buffer); 4182 4183 arch_spin_unlock(&cpu_buffer->lock); 4184 4185 out: 4186 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 4187 4188 atomic_dec(&cpu_buffer->record_disabled); 4189 atomic_dec(&buffer->resize_disabled); 4190 } 4191 EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu); 4192 4193 /** 4194 * ring_buffer_reset - reset a ring buffer 4195 * @buffer: The ring buffer to reset all cpu buffers 4196 */ 4197 void ring_buffer_reset(struct ring_buffer *buffer) 4198 { 4199 int cpu; 4200 4201 for_each_buffer_cpu(buffer, cpu) 4202 ring_buffer_reset_cpu(buffer, cpu); 4203 } 4204 EXPORT_SYMBOL_GPL(ring_buffer_reset); 4205 4206 /** 4207 * rind_buffer_empty - is the ring buffer empty? 4208 * @buffer: The ring buffer to test 4209 */ 4210 int ring_buffer_empty(struct ring_buffer *buffer) 4211 { 4212 struct ring_buffer_per_cpu *cpu_buffer; 4213 unsigned long flags; 4214 int dolock; 4215 int cpu; 4216 int ret; 4217 4218 dolock = rb_ok_to_lock(); 4219 4220 /* yes this is racy, but if you don't like the race, lock the buffer */ 4221 for_each_buffer_cpu(buffer, cpu) { 4222 cpu_buffer = buffer->buffers[cpu]; 4223 local_irq_save(flags); 4224 if (dolock) 4225 raw_spin_lock(&cpu_buffer->reader_lock); 4226 ret = rb_per_cpu_empty(cpu_buffer); 4227 if (dolock) 4228 raw_spin_unlock(&cpu_buffer->reader_lock); 4229 local_irq_restore(flags); 4230 4231 if (!ret) 4232 return 0; 4233 } 4234 4235 return 1; 4236 } 4237 EXPORT_SYMBOL_GPL(ring_buffer_empty); 4238 4239 /** 4240 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty? 4241 * @buffer: The ring buffer 4242 * @cpu: The CPU buffer to test 4243 */ 4244 int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu) 4245 { 4246 struct ring_buffer_per_cpu *cpu_buffer; 4247 unsigned long flags; 4248 int dolock; 4249 int ret; 4250 4251 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4252 return 1; 4253 4254 dolock = rb_ok_to_lock(); 4255 4256 cpu_buffer = buffer->buffers[cpu]; 4257 local_irq_save(flags); 4258 if (dolock) 4259 raw_spin_lock(&cpu_buffer->reader_lock); 4260 ret = rb_per_cpu_empty(cpu_buffer); 4261 if (dolock) 4262 raw_spin_unlock(&cpu_buffer->reader_lock); 4263 local_irq_restore(flags); 4264 4265 return ret; 4266 } 4267 EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu); 4268 4269 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP 4270 /** 4271 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers 4272 * @buffer_a: One buffer to swap with 4273 * @buffer_b: The other buffer to swap with 4274 * 4275 * This function is useful for tracers that want to take a "snapshot" 4276 * of a CPU buffer and has another back up buffer lying around. 4277 * it is expected that the tracer handles the cpu buffer not being 4278 * used at the moment. 4279 */ 4280 int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, 4281 struct ring_buffer *buffer_b, int cpu) 4282 { 4283 struct ring_buffer_per_cpu *cpu_buffer_a; 4284 struct ring_buffer_per_cpu *cpu_buffer_b; 4285 int ret = -EINVAL; 4286 4287 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) || 4288 !cpumask_test_cpu(cpu, buffer_b->cpumask)) 4289 goto out; 4290 4291 cpu_buffer_a = buffer_a->buffers[cpu]; 4292 cpu_buffer_b = buffer_b->buffers[cpu]; 4293 4294 /* At least make sure the two buffers are somewhat the same */ 4295 if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages) 4296 goto out; 4297 4298 ret = -EAGAIN; 4299 4300 if (ring_buffer_flags != RB_BUFFERS_ON) 4301 goto out; 4302 4303 if (atomic_read(&buffer_a->record_disabled)) 4304 goto out; 4305 4306 if (atomic_read(&buffer_b->record_disabled)) 4307 goto out; 4308 4309 if (atomic_read(&cpu_buffer_a->record_disabled)) 4310 goto out; 4311 4312 if (atomic_read(&cpu_buffer_b->record_disabled)) 4313 goto out; 4314 4315 /* 4316 * We can't do a synchronize_sched here because this 4317 * function can be called in atomic context. 4318 * Normally this will be called from the same CPU as cpu. 4319 * If not it's up to the caller to protect this. 4320 */ 4321 atomic_inc(&cpu_buffer_a->record_disabled); 4322 atomic_inc(&cpu_buffer_b->record_disabled); 4323 4324 ret = -EBUSY; 4325 if (local_read(&cpu_buffer_a->committing)) 4326 goto out_dec; 4327 if (local_read(&cpu_buffer_b->committing)) 4328 goto out_dec; 4329 4330 buffer_a->buffers[cpu] = cpu_buffer_b; 4331 buffer_b->buffers[cpu] = cpu_buffer_a; 4332 4333 cpu_buffer_b->buffer = buffer_a; 4334 cpu_buffer_a->buffer = buffer_b; 4335 4336 ret = 0; 4337 4338 out_dec: 4339 atomic_dec(&cpu_buffer_a->record_disabled); 4340 atomic_dec(&cpu_buffer_b->record_disabled); 4341 out: 4342 return ret; 4343 } 4344 EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); 4345 #endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */ 4346 4347 /** 4348 * ring_buffer_alloc_read_page - allocate a page to read from buffer 4349 * @buffer: the buffer to allocate for. 4350 * @cpu: the cpu buffer to allocate. 4351 * 4352 * This function is used in conjunction with ring_buffer_read_page. 4353 * When reading a full page from the ring buffer, these functions 4354 * can be used to speed up the process. The calling function should 4355 * allocate a few pages first with this function. Then when it 4356 * needs to get pages from the ring buffer, it passes the result 4357 * of this function into ring_buffer_read_page, which will swap 4358 * the page that was allocated, with the read page of the buffer. 4359 * 4360 * Returns: 4361 * The page allocated, or NULL on error. 4362 */ 4363 void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu) 4364 { 4365 struct buffer_data_page *bpage; 4366 struct page *page; 4367 4368 page = alloc_pages_node(cpu_to_node(cpu), 4369 GFP_KERNEL | __GFP_NORETRY, 0); 4370 if (!page) 4371 return NULL; 4372 4373 bpage = page_address(page); 4374 4375 rb_init_page(bpage); 4376 4377 return bpage; 4378 } 4379 EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page); 4380 4381 /** 4382 * ring_buffer_free_read_page - free an allocated read page 4383 * @buffer: the buffer the page was allocate for 4384 * @data: the page to free 4385 * 4386 * Free a page allocated from ring_buffer_alloc_read_page. 4387 */ 4388 void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data) 4389 { 4390 free_page((unsigned long)data); 4391 } 4392 EXPORT_SYMBOL_GPL(ring_buffer_free_read_page); 4393 4394 /** 4395 * ring_buffer_read_page - extract a page from the ring buffer 4396 * @buffer: buffer to extract from 4397 * @data_page: the page to use allocated from ring_buffer_alloc_read_page 4398 * @len: amount to extract 4399 * @cpu: the cpu of the buffer to extract 4400 * @full: should the extraction only happen when the page is full. 4401 * 4402 * This function will pull out a page from the ring buffer and consume it. 4403 * @data_page must be the address of the variable that was returned 4404 * from ring_buffer_alloc_read_page. This is because the page might be used 4405 * to swap with a page in the ring buffer. 4406 * 4407 * for example: 4408 * rpage = ring_buffer_alloc_read_page(buffer, cpu); 4409 * if (!rpage) 4410 * return error; 4411 * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0); 4412 * if (ret >= 0) 4413 * process_page(rpage, ret); 4414 * 4415 * When @full is set, the function will not return true unless 4416 * the writer is off the reader page. 4417 * 4418 * Note: it is up to the calling functions to handle sleeps and wakeups. 4419 * The ring buffer can be used anywhere in the kernel and can not 4420 * blindly call wake_up. The layer that uses the ring buffer must be 4421 * responsible for that. 4422 * 4423 * Returns: 4424 * >=0 if data has been transferred, returns the offset of consumed data. 4425 * <0 if no data has been transferred. 4426 */ 4427 int ring_buffer_read_page(struct ring_buffer *buffer, 4428 void **data_page, size_t len, int cpu, int full) 4429 { 4430 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 4431 struct ring_buffer_event *event; 4432 struct buffer_data_page *bpage; 4433 struct buffer_page *reader; 4434 unsigned long missed_events; 4435 unsigned long flags; 4436 unsigned int commit; 4437 unsigned int read; 4438 u64 save_timestamp; 4439 int ret = -1; 4440 4441 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4442 goto out; 4443 4444 /* 4445 * If len is not big enough to hold the page header, then 4446 * we can not copy anything. 4447 */ 4448 if (len <= BUF_PAGE_HDR_SIZE) 4449 goto out; 4450 4451 len -= BUF_PAGE_HDR_SIZE; 4452 4453 if (!data_page) 4454 goto out; 4455 4456 bpage = *data_page; 4457 if (!bpage) 4458 goto out; 4459 4460 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 4461 4462 reader = rb_get_reader_page(cpu_buffer); 4463 if (!reader) 4464 goto out_unlock; 4465 4466 event = rb_reader_event(cpu_buffer); 4467 4468 read = reader->read; 4469 commit = rb_page_commit(reader); 4470 4471 /* Check if any events were dropped */ 4472 missed_events = cpu_buffer->lost_events; 4473 4474 /* 4475 * If this page has been partially read or 4476 * if len is not big enough to read the rest of the page or 4477 * a writer is still on the page, then 4478 * we must copy the data from the page to the buffer. 4479 * Otherwise, we can simply swap the page with the one passed in. 4480 */ 4481 if (read || (len < (commit - read)) || 4482 cpu_buffer->reader_page == cpu_buffer->commit_page) { 4483 struct buffer_data_page *rpage = cpu_buffer->reader_page->page; 4484 unsigned int rpos = read; 4485 unsigned int pos = 0; 4486 unsigned int size; 4487 4488 if (full) 4489 goto out_unlock; 4490 4491 if (len > (commit - read)) 4492 len = (commit - read); 4493 4494 /* Always keep the time extend and data together */ 4495 size = rb_event_ts_length(event); 4496 4497 if (len < size) 4498 goto out_unlock; 4499 4500 /* save the current timestamp, since the user will need it */ 4501 save_timestamp = cpu_buffer->read_stamp; 4502 4503 /* Need to copy one event at a time */ 4504 do { 4505 /* We need the size of one event, because 4506 * rb_advance_reader only advances by one event, 4507 * whereas rb_event_ts_length may include the size of 4508 * one or two events. 4509 * We have already ensured there's enough space if this 4510 * is a time extend. */ 4511 size = rb_event_length(event); 4512 memcpy(bpage->data + pos, rpage->data + rpos, size); 4513 4514 len -= size; 4515 4516 rb_advance_reader(cpu_buffer); 4517 rpos = reader->read; 4518 pos += size; 4519 4520 if (rpos >= commit) 4521 break; 4522 4523 event = rb_reader_event(cpu_buffer); 4524 /* Always keep the time extend and data together */ 4525 size = rb_event_ts_length(event); 4526 } while (len >= size); 4527 4528 /* update bpage */ 4529 local_set(&bpage->commit, pos); 4530 bpage->time_stamp = save_timestamp; 4531 4532 /* we copied everything to the beginning */ 4533 read = 0; 4534 } else { 4535 /* update the entry counter */ 4536 cpu_buffer->read += rb_page_entries(reader); 4537 cpu_buffer->read_bytes += BUF_PAGE_SIZE; 4538 4539 /* swap the pages */ 4540 rb_init_page(bpage); 4541 bpage = reader->page; 4542 reader->page = *data_page; 4543 local_set(&reader->write, 0); 4544 local_set(&reader->entries, 0); 4545 reader->read = 0; 4546 *data_page = bpage; 4547 4548 /* 4549 * Use the real_end for the data size, 4550 * This gives us a chance to store the lost events 4551 * on the page. 4552 */ 4553 if (reader->real_end) 4554 local_set(&bpage->commit, reader->real_end); 4555 } 4556 ret = read; 4557 4558 cpu_buffer->lost_events = 0; 4559 4560 commit = local_read(&bpage->commit); 4561 /* 4562 * Set a flag in the commit field if we lost events 4563 */ 4564 if (missed_events) { 4565 /* If there is room at the end of the page to save the 4566 * missed events, then record it there. 4567 */ 4568 if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) { 4569 memcpy(&bpage->data[commit], &missed_events, 4570 sizeof(missed_events)); 4571 local_add(RB_MISSED_STORED, &bpage->commit); 4572 commit += sizeof(missed_events); 4573 } 4574 local_add(RB_MISSED_EVENTS, &bpage->commit); 4575 } 4576 4577 /* 4578 * This page may be off to user land. Zero it out here. 4579 */ 4580 if (commit < BUF_PAGE_SIZE) 4581 memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit); 4582 4583 out_unlock: 4584 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 4585 4586 out: 4587 return ret; 4588 } 4589 EXPORT_SYMBOL_GPL(ring_buffer_read_page); 4590 4591 #ifdef CONFIG_HOTPLUG_CPU 4592 static int rb_cpu_notify(struct notifier_block *self, 4593 unsigned long action, void *hcpu) 4594 { 4595 struct ring_buffer *buffer = 4596 container_of(self, struct ring_buffer, cpu_notify); 4597 long cpu = (long)hcpu; 4598 int cpu_i, nr_pages_same; 4599 unsigned int nr_pages; 4600 4601 switch (action) { 4602 case CPU_UP_PREPARE: 4603 case CPU_UP_PREPARE_FROZEN: 4604 if (cpumask_test_cpu(cpu, buffer->cpumask)) 4605 return NOTIFY_OK; 4606 4607 nr_pages = 0; 4608 nr_pages_same = 1; 4609 /* check if all cpu sizes are same */ 4610 for_each_buffer_cpu(buffer, cpu_i) { 4611 /* fill in the size from first enabled cpu */ 4612 if (nr_pages == 0) 4613 nr_pages = buffer->buffers[cpu_i]->nr_pages; 4614 if (nr_pages != buffer->buffers[cpu_i]->nr_pages) { 4615 nr_pages_same = 0; 4616 break; 4617 } 4618 } 4619 /* allocate minimum pages, user can later expand it */ 4620 if (!nr_pages_same) 4621 nr_pages = 2; 4622 buffer->buffers[cpu] = 4623 rb_allocate_cpu_buffer(buffer, nr_pages, cpu); 4624 if (!buffer->buffers[cpu]) { 4625 WARN(1, "failed to allocate ring buffer on CPU %ld\n", 4626 cpu); 4627 return NOTIFY_OK; 4628 } 4629 smp_wmb(); 4630 cpumask_set_cpu(cpu, buffer->cpumask); 4631 break; 4632 case CPU_DOWN_PREPARE: 4633 case CPU_DOWN_PREPARE_FROZEN: 4634 /* 4635 * Do nothing. 4636 * If we were to free the buffer, then the user would 4637 * lose any trace that was in the buffer. 4638 */ 4639 break; 4640 default: 4641 break; 4642 } 4643 return NOTIFY_OK; 4644 } 4645 #endif 4646 4647 #ifdef CONFIG_RING_BUFFER_STARTUP_TEST 4648 /* 4649 * This is a basic integrity check of the ring buffer. 4650 * Late in the boot cycle this test will run when configured in. 4651 * It will kick off a thread per CPU that will go into a loop 4652 * writing to the per cpu ring buffer various sizes of data. 4653 * Some of the data will be large items, some small. 4654 * 4655 * Another thread is created that goes into a spin, sending out 4656 * IPIs to the other CPUs to also write into the ring buffer. 4657 * this is to test the nesting ability of the buffer. 4658 * 4659 * Basic stats are recorded and reported. If something in the 4660 * ring buffer should happen that's not expected, a big warning 4661 * is displayed and all ring buffers are disabled. 4662 */ 4663 static struct task_struct *rb_threads[NR_CPUS] __initdata; 4664 4665 struct rb_test_data { 4666 struct ring_buffer *buffer; 4667 unsigned long events; 4668 unsigned long bytes_written; 4669 unsigned long bytes_alloc; 4670 unsigned long bytes_dropped; 4671 unsigned long events_nested; 4672 unsigned long bytes_written_nested; 4673 unsigned long bytes_alloc_nested; 4674 unsigned long bytes_dropped_nested; 4675 int min_size_nested; 4676 int max_size_nested; 4677 int max_size; 4678 int min_size; 4679 int cpu; 4680 int cnt; 4681 }; 4682 4683 static struct rb_test_data rb_data[NR_CPUS] __initdata; 4684 4685 /* 1 meg per cpu */ 4686 #define RB_TEST_BUFFER_SIZE 1048576 4687 4688 static char rb_string[] __initdata = 4689 "abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()?+\\" 4690 "?+|:';\",.<>/?abcdefghijklmnopqrstuvwxyz1234567890" 4691 "!@#$%^&*()?+\\?+|:';\",.<>/?abcdefghijklmnopqrstuv"; 4692 4693 static bool rb_test_started __initdata; 4694 4695 struct rb_item { 4696 int size; 4697 char str[]; 4698 }; 4699 4700 static __init int rb_write_something(struct rb_test_data *data, bool nested) 4701 { 4702 struct ring_buffer_event *event; 4703 struct rb_item *item; 4704 bool started; 4705 int event_len; 4706 int size; 4707 int len; 4708 int cnt; 4709 4710 /* Have nested writes different that what is written */ 4711 cnt = data->cnt + (nested ? 27 : 0); 4712 4713 /* Multiply cnt by ~e, to make some unique increment */ 4714 size = (data->cnt * 68 / 25) % (sizeof(rb_string) - 1); 4715 4716 len = size + sizeof(struct rb_item); 4717 4718 started = rb_test_started; 4719 /* read rb_test_started before checking buffer enabled */ 4720 smp_rmb(); 4721 4722 event = ring_buffer_lock_reserve(data->buffer, len); 4723 if (!event) { 4724 /* Ignore dropped events before test starts. */ 4725 if (started) { 4726 if (nested) 4727 data->bytes_dropped += len; 4728 else 4729 data->bytes_dropped_nested += len; 4730 } 4731 return len; 4732 } 4733 4734 event_len = ring_buffer_event_length(event); 4735 4736 if (RB_WARN_ON(data->buffer, event_len < len)) 4737 goto out; 4738 4739 item = ring_buffer_event_data(event); 4740 item->size = size; 4741 memcpy(item->str, rb_string, size); 4742 4743 if (nested) { 4744 data->bytes_alloc_nested += event_len; 4745 data->bytes_written_nested += len; 4746 data->events_nested++; 4747 if (!data->min_size_nested || len < data->min_size_nested) 4748 data->min_size_nested = len; 4749 if (len > data->max_size_nested) 4750 data->max_size_nested = len; 4751 } else { 4752 data->bytes_alloc += event_len; 4753 data->bytes_written += len; 4754 data->events++; 4755 if (!data->min_size || len < data->min_size) 4756 data->max_size = len; 4757 if (len > data->max_size) 4758 data->max_size = len; 4759 } 4760 4761 out: 4762 ring_buffer_unlock_commit(data->buffer, event); 4763 4764 return 0; 4765 } 4766 4767 static __init int rb_test(void *arg) 4768 { 4769 struct rb_test_data *data = arg; 4770 4771 while (!kthread_should_stop()) { 4772 rb_write_something(data, false); 4773 data->cnt++; 4774 4775 set_current_state(TASK_INTERRUPTIBLE); 4776 /* Now sleep between a min of 100-300us and a max of 1ms */ 4777 usleep_range(((data->cnt % 3) + 1) * 100, 1000); 4778 } 4779 4780 return 0; 4781 } 4782 4783 static __init void rb_ipi(void *ignore) 4784 { 4785 struct rb_test_data *data; 4786 int cpu = smp_processor_id(); 4787 4788 data = &rb_data[cpu]; 4789 rb_write_something(data, true); 4790 } 4791 4792 static __init int rb_hammer_test(void *arg) 4793 { 4794 while (!kthread_should_stop()) { 4795 4796 /* Send an IPI to all cpus to write data! */ 4797 smp_call_function(rb_ipi, NULL, 1); 4798 /* No sleep, but for non preempt, let others run */ 4799 schedule(); 4800 } 4801 4802 return 0; 4803 } 4804 4805 static __init int test_ringbuffer(void) 4806 { 4807 struct task_struct *rb_hammer; 4808 struct ring_buffer *buffer; 4809 int cpu; 4810 int ret = 0; 4811 4812 pr_info("Running ring buffer tests...\n"); 4813 4814 buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE); 4815 if (WARN_ON(!buffer)) 4816 return 0; 4817 4818 /* Disable buffer so that threads can't write to it yet */ 4819 ring_buffer_record_off(buffer); 4820 4821 for_each_online_cpu(cpu) { 4822 rb_data[cpu].buffer = buffer; 4823 rb_data[cpu].cpu = cpu; 4824 rb_data[cpu].cnt = cpu; 4825 rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu], 4826 "rbtester/%d", cpu); 4827 if (WARN_ON(!rb_threads[cpu])) { 4828 pr_cont("FAILED\n"); 4829 ret = -1; 4830 goto out_free; 4831 } 4832 4833 kthread_bind(rb_threads[cpu], cpu); 4834 wake_up_process(rb_threads[cpu]); 4835 } 4836 4837 /* Now create the rb hammer! */ 4838 rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer"); 4839 if (WARN_ON(!rb_hammer)) { 4840 pr_cont("FAILED\n"); 4841 ret = -1; 4842 goto out_free; 4843 } 4844 4845 ring_buffer_record_on(buffer); 4846 /* 4847 * Show buffer is enabled before setting rb_test_started. 4848 * Yes there's a small race window where events could be 4849 * dropped and the thread wont catch it. But when a ring 4850 * buffer gets enabled, there will always be some kind of 4851 * delay before other CPUs see it. Thus, we don't care about 4852 * those dropped events. We care about events dropped after 4853 * the threads see that the buffer is active. 4854 */ 4855 smp_wmb(); 4856 rb_test_started = true; 4857 4858 set_current_state(TASK_INTERRUPTIBLE); 4859 /* Just run for 10 seconds */; 4860 schedule_timeout(10 * HZ); 4861 4862 kthread_stop(rb_hammer); 4863 4864 out_free: 4865 for_each_online_cpu(cpu) { 4866 if (!rb_threads[cpu]) 4867 break; 4868 kthread_stop(rb_threads[cpu]); 4869 } 4870 if (ret) { 4871 ring_buffer_free(buffer); 4872 return ret; 4873 } 4874 4875 /* Report! */ 4876 pr_info("finished\n"); 4877 for_each_online_cpu(cpu) { 4878 struct ring_buffer_event *event; 4879 struct rb_test_data *data = &rb_data[cpu]; 4880 struct rb_item *item; 4881 unsigned long total_events; 4882 unsigned long total_dropped; 4883 unsigned long total_written; 4884 unsigned long total_alloc; 4885 unsigned long total_read = 0; 4886 unsigned long total_size = 0; 4887 unsigned long total_len = 0; 4888 unsigned long total_lost = 0; 4889 unsigned long lost; 4890 int big_event_size; 4891 int small_event_size; 4892 4893 ret = -1; 4894 4895 total_events = data->events + data->events_nested; 4896 total_written = data->bytes_written + data->bytes_written_nested; 4897 total_alloc = data->bytes_alloc + data->bytes_alloc_nested; 4898 total_dropped = data->bytes_dropped + data->bytes_dropped_nested; 4899 4900 big_event_size = data->max_size + data->max_size_nested; 4901 small_event_size = data->min_size + data->min_size_nested; 4902 4903 pr_info("CPU %d:\n", cpu); 4904 pr_info(" events: %ld\n", total_events); 4905 pr_info(" dropped bytes: %ld\n", total_dropped); 4906 pr_info(" alloced bytes: %ld\n", total_alloc); 4907 pr_info(" written bytes: %ld\n", total_written); 4908 pr_info(" biggest event: %d\n", big_event_size); 4909 pr_info(" smallest event: %d\n", small_event_size); 4910 4911 if (RB_WARN_ON(buffer, total_dropped)) 4912 break; 4913 4914 ret = 0; 4915 4916 while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) { 4917 total_lost += lost; 4918 item = ring_buffer_event_data(event); 4919 total_len += ring_buffer_event_length(event); 4920 total_size += item->size + sizeof(struct rb_item); 4921 if (memcmp(&item->str[0], rb_string, item->size) != 0) { 4922 pr_info("FAILED!\n"); 4923 pr_info("buffer had: %.*s\n", item->size, item->str); 4924 pr_info("expected: %.*s\n", item->size, rb_string); 4925 RB_WARN_ON(buffer, 1); 4926 ret = -1; 4927 break; 4928 } 4929 total_read++; 4930 } 4931 if (ret) 4932 break; 4933 4934 ret = -1; 4935 4936 pr_info(" read events: %ld\n", total_read); 4937 pr_info(" lost events: %ld\n", total_lost); 4938 pr_info(" total events: %ld\n", total_lost + total_read); 4939 pr_info(" recorded len bytes: %ld\n", total_len); 4940 pr_info(" recorded size bytes: %ld\n", total_size); 4941 if (total_lost) 4942 pr_info(" With dropped events, record len and size may not match\n" 4943 " alloced and written from above\n"); 4944 if (!total_lost) { 4945 if (RB_WARN_ON(buffer, total_len != total_alloc || 4946 total_size != total_written)) 4947 break; 4948 } 4949 if (RB_WARN_ON(buffer, total_lost + total_read != total_events)) 4950 break; 4951 4952 ret = 0; 4953 } 4954 if (!ret) 4955 pr_info("Ring buffer PASSED!\n"); 4956 4957 ring_buffer_free(buffer); 4958 return 0; 4959 } 4960 4961 late_initcall(test_ringbuffer); 4962 #endif /* CONFIG_RING_BUFFER_STARTUP_TEST */ 4963