1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Generic ring buffer 4 * 5 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> 6 */ 7 #include <linux/trace_recursion.h> 8 #include <linux/trace_events.h> 9 #include <linux/ring_buffer.h> 10 #include <linux/trace_clock.h> 11 #include <linux/sched/clock.h> 12 #include <linux/trace_seq.h> 13 #include <linux/spinlock.h> 14 #include <linux/irq_work.h> 15 #include <linux/security.h> 16 #include <linux/uaccess.h> 17 #include <linux/hardirq.h> 18 #include <linux/kthread.h> /* for self test */ 19 #include <linux/module.h> 20 #include <linux/percpu.h> 21 #include <linux/mutex.h> 22 #include <linux/delay.h> 23 #include <linux/slab.h> 24 #include <linux/init.h> 25 #include <linux/hash.h> 26 #include <linux/list.h> 27 #include <linux/cpu.h> 28 #include <linux/oom.h> 29 30 #include <asm/local.h> 31 32 /* 33 * The "absolute" timestamp in the buffer is only 59 bits. 34 * If a clock has the 5 MSBs set, it needs to be saved and 35 * reinserted. 36 */ 37 #define TS_MSB (0xf8ULL << 56) 38 #define ABS_TS_MASK (~TS_MSB) 39 40 static void update_pages_handler(struct work_struct *work); 41 42 /* 43 * The ring buffer header is special. We must manually up keep it. 44 */ 45 int ring_buffer_print_entry_header(struct trace_seq *s) 46 { 47 trace_seq_puts(s, "# compressed entry header\n"); 48 trace_seq_puts(s, "\ttype_len : 5 bits\n"); 49 trace_seq_puts(s, "\ttime_delta : 27 bits\n"); 50 trace_seq_puts(s, "\tarray : 32 bits\n"); 51 trace_seq_putc(s, '\n'); 52 trace_seq_printf(s, "\tpadding : type == %d\n", 53 RINGBUF_TYPE_PADDING); 54 trace_seq_printf(s, "\ttime_extend : type == %d\n", 55 RINGBUF_TYPE_TIME_EXTEND); 56 trace_seq_printf(s, "\ttime_stamp : type == %d\n", 57 RINGBUF_TYPE_TIME_STAMP); 58 trace_seq_printf(s, "\tdata max type_len == %d\n", 59 RINGBUF_TYPE_DATA_TYPE_LEN_MAX); 60 61 return !trace_seq_has_overflowed(s); 62 } 63 64 /* 65 * The ring buffer is made up of a list of pages. A separate list of pages is 66 * allocated for each CPU. A writer may only write to a buffer that is 67 * associated with the CPU it is currently executing on. A reader may read 68 * from any per cpu buffer. 69 * 70 * The reader is special. For each per cpu buffer, the reader has its own 71 * reader page. When a reader has read the entire reader page, this reader 72 * page is swapped with another page in the ring buffer. 73 * 74 * Now, as long as the writer is off the reader page, the reader can do what 75 * ever it wants with that page. The writer will never write to that page 76 * again (as long as it is out of the ring buffer). 77 * 78 * Here's some silly ASCII art. 79 * 80 * +------+ 81 * |reader| RING BUFFER 82 * |page | 83 * +------+ +---+ +---+ +---+ 84 * | |-->| |-->| | 85 * +---+ +---+ +---+ 86 * ^ | 87 * | | 88 * +---------------+ 89 * 90 * 91 * +------+ 92 * |reader| RING BUFFER 93 * |page |------------------v 94 * +------+ +---+ +---+ +---+ 95 * | |-->| |-->| | 96 * +---+ +---+ +---+ 97 * ^ | 98 * | | 99 * +---------------+ 100 * 101 * 102 * +------+ 103 * |reader| RING BUFFER 104 * |page |------------------v 105 * +------+ +---+ +---+ +---+ 106 * ^ | |-->| |-->| | 107 * | +---+ +---+ +---+ 108 * | | 109 * | | 110 * +------------------------------+ 111 * 112 * 113 * +------+ 114 * |buffer| RING BUFFER 115 * |page |------------------v 116 * +------+ +---+ +---+ +---+ 117 * ^ | | | |-->| | 118 * | New +---+ +---+ +---+ 119 * | Reader------^ | 120 * | page | 121 * +------------------------------+ 122 * 123 * 124 * After we make this swap, the reader can hand this page off to the splice 125 * code and be done with it. It can even allocate a new page if it needs to 126 * and swap that into the ring buffer. 127 * 128 * We will be using cmpxchg soon to make all this lockless. 129 * 130 */ 131 132 /* Used for individual buffers (after the counter) */ 133 #define RB_BUFFER_OFF (1 << 20) 134 135 #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data) 136 137 #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array)) 138 #define RB_ALIGNMENT 4U 139 #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 140 #define RB_EVNT_MIN_SIZE 8U /* two 32bit words */ 141 142 #ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS 143 # define RB_FORCE_8BYTE_ALIGNMENT 0 144 # define RB_ARCH_ALIGNMENT RB_ALIGNMENT 145 #else 146 # define RB_FORCE_8BYTE_ALIGNMENT 1 147 # define RB_ARCH_ALIGNMENT 8U 148 #endif 149 150 #define RB_ALIGN_DATA __aligned(RB_ARCH_ALIGNMENT) 151 152 /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */ 153 #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX 154 155 enum { 156 RB_LEN_TIME_EXTEND = 8, 157 RB_LEN_TIME_STAMP = 8, 158 }; 159 160 #define skip_time_extend(event) \ 161 ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND)) 162 163 #define extended_time(event) \ 164 (event->type_len >= RINGBUF_TYPE_TIME_EXTEND) 165 166 static inline bool rb_null_event(struct ring_buffer_event *event) 167 { 168 return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta; 169 } 170 171 static void rb_event_set_padding(struct ring_buffer_event *event) 172 { 173 /* padding has a NULL time_delta */ 174 event->type_len = RINGBUF_TYPE_PADDING; 175 event->time_delta = 0; 176 } 177 178 static unsigned 179 rb_event_data_length(struct ring_buffer_event *event) 180 { 181 unsigned length; 182 183 if (event->type_len) 184 length = event->type_len * RB_ALIGNMENT; 185 else 186 length = event->array[0]; 187 return length + RB_EVNT_HDR_SIZE; 188 } 189 190 /* 191 * Return the length of the given event. Will return 192 * the length of the time extend if the event is a 193 * time extend. 194 */ 195 static inline unsigned 196 rb_event_length(struct ring_buffer_event *event) 197 { 198 switch (event->type_len) { 199 case RINGBUF_TYPE_PADDING: 200 if (rb_null_event(event)) 201 /* undefined */ 202 return -1; 203 return event->array[0] + RB_EVNT_HDR_SIZE; 204 205 case RINGBUF_TYPE_TIME_EXTEND: 206 return RB_LEN_TIME_EXTEND; 207 208 case RINGBUF_TYPE_TIME_STAMP: 209 return RB_LEN_TIME_STAMP; 210 211 case RINGBUF_TYPE_DATA: 212 return rb_event_data_length(event); 213 default: 214 WARN_ON_ONCE(1); 215 } 216 /* not hit */ 217 return 0; 218 } 219 220 /* 221 * Return total length of time extend and data, 222 * or just the event length for all other events. 223 */ 224 static inline unsigned 225 rb_event_ts_length(struct ring_buffer_event *event) 226 { 227 unsigned len = 0; 228 229 if (extended_time(event)) { 230 /* time extends include the data event after it */ 231 len = RB_LEN_TIME_EXTEND; 232 event = skip_time_extend(event); 233 } 234 return len + rb_event_length(event); 235 } 236 237 /** 238 * ring_buffer_event_length - return the length of the event 239 * @event: the event to get the length of 240 * 241 * Returns the size of the data load of a data event. 242 * If the event is something other than a data event, it 243 * returns the size of the event itself. With the exception 244 * of a TIME EXTEND, where it still returns the size of the 245 * data load of the data event after it. 246 */ 247 unsigned ring_buffer_event_length(struct ring_buffer_event *event) 248 { 249 unsigned length; 250 251 if (extended_time(event)) 252 event = skip_time_extend(event); 253 254 length = rb_event_length(event); 255 if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 256 return length; 257 length -= RB_EVNT_HDR_SIZE; 258 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0])) 259 length -= sizeof(event->array[0]); 260 return length; 261 } 262 EXPORT_SYMBOL_GPL(ring_buffer_event_length); 263 264 /* inline for ring buffer fast paths */ 265 static __always_inline void * 266 rb_event_data(struct ring_buffer_event *event) 267 { 268 if (extended_time(event)) 269 event = skip_time_extend(event); 270 WARN_ON_ONCE(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX); 271 /* If length is in len field, then array[0] has the data */ 272 if (event->type_len) 273 return (void *)&event->array[0]; 274 /* Otherwise length is in array[0] and array[1] has the data */ 275 return (void *)&event->array[1]; 276 } 277 278 /** 279 * ring_buffer_event_data - return the data of the event 280 * @event: the event to get the data from 281 */ 282 void *ring_buffer_event_data(struct ring_buffer_event *event) 283 { 284 return rb_event_data(event); 285 } 286 EXPORT_SYMBOL_GPL(ring_buffer_event_data); 287 288 #define for_each_buffer_cpu(buffer, cpu) \ 289 for_each_cpu(cpu, buffer->cpumask) 290 291 #define for_each_online_buffer_cpu(buffer, cpu) \ 292 for_each_cpu_and(cpu, buffer->cpumask, cpu_online_mask) 293 294 #define TS_SHIFT 27 295 #define TS_MASK ((1ULL << TS_SHIFT) - 1) 296 #define TS_DELTA_TEST (~TS_MASK) 297 298 static u64 rb_event_time_stamp(struct ring_buffer_event *event) 299 { 300 u64 ts; 301 302 ts = event->array[0]; 303 ts <<= TS_SHIFT; 304 ts += event->time_delta; 305 306 return ts; 307 } 308 309 /* Flag when events were overwritten */ 310 #define RB_MISSED_EVENTS (1 << 31) 311 /* Missed count stored at end */ 312 #define RB_MISSED_STORED (1 << 30) 313 314 struct buffer_data_page { 315 u64 time_stamp; /* page time stamp */ 316 local_t commit; /* write committed index */ 317 unsigned char data[] RB_ALIGN_DATA; /* data of buffer page */ 318 }; 319 320 /* 321 * Note, the buffer_page list must be first. The buffer pages 322 * are allocated in cache lines, which means that each buffer 323 * page will be at the beginning of a cache line, and thus 324 * the least significant bits will be zero. We use this to 325 * add flags in the list struct pointers, to make the ring buffer 326 * lockless. 327 */ 328 struct buffer_page { 329 struct list_head list; /* list of buffer pages */ 330 local_t write; /* index for next write */ 331 unsigned read; /* index for next read */ 332 local_t entries; /* entries on this page */ 333 unsigned long real_end; /* real end of data */ 334 struct buffer_data_page *page; /* Actual data page */ 335 }; 336 337 /* 338 * The buffer page counters, write and entries, must be reset 339 * atomically when crossing page boundaries. To synchronize this 340 * update, two counters are inserted into the number. One is 341 * the actual counter for the write position or count on the page. 342 * 343 * The other is a counter of updaters. Before an update happens 344 * the update partition of the counter is incremented. This will 345 * allow the updater to update the counter atomically. 346 * 347 * The counter is 20 bits, and the state data is 12. 348 */ 349 #define RB_WRITE_MASK 0xfffff 350 #define RB_WRITE_INTCNT (1 << 20) 351 352 static void rb_init_page(struct buffer_data_page *bpage) 353 { 354 local_set(&bpage->commit, 0); 355 } 356 357 static __always_inline unsigned int rb_page_commit(struct buffer_page *bpage) 358 { 359 return local_read(&bpage->page->commit); 360 } 361 362 static void free_buffer_page(struct buffer_page *bpage) 363 { 364 free_page((unsigned long)bpage->page); 365 kfree(bpage); 366 } 367 368 /* 369 * We need to fit the time_stamp delta into 27 bits. 370 */ 371 static inline bool test_time_stamp(u64 delta) 372 { 373 return !!(delta & TS_DELTA_TEST); 374 } 375 376 #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE) 377 378 /* Max payload is BUF_PAGE_SIZE - header (8bytes) */ 379 #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2)) 380 381 int ring_buffer_print_page_header(struct trace_seq *s) 382 { 383 struct buffer_data_page field; 384 385 trace_seq_printf(s, "\tfield: u64 timestamp;\t" 386 "offset:0;\tsize:%u;\tsigned:%u;\n", 387 (unsigned int)sizeof(field.time_stamp), 388 (unsigned int)is_signed_type(u64)); 389 390 trace_seq_printf(s, "\tfield: local_t commit;\t" 391 "offset:%u;\tsize:%u;\tsigned:%u;\n", 392 (unsigned int)offsetof(typeof(field), commit), 393 (unsigned int)sizeof(field.commit), 394 (unsigned int)is_signed_type(long)); 395 396 trace_seq_printf(s, "\tfield: int overwrite;\t" 397 "offset:%u;\tsize:%u;\tsigned:%u;\n", 398 (unsigned int)offsetof(typeof(field), commit), 399 1, 400 (unsigned int)is_signed_type(long)); 401 402 trace_seq_printf(s, "\tfield: char data;\t" 403 "offset:%u;\tsize:%u;\tsigned:%u;\n", 404 (unsigned int)offsetof(typeof(field), data), 405 (unsigned int)BUF_PAGE_SIZE, 406 (unsigned int)is_signed_type(char)); 407 408 return !trace_seq_has_overflowed(s); 409 } 410 411 struct rb_irq_work { 412 struct irq_work work; 413 wait_queue_head_t waiters; 414 wait_queue_head_t full_waiters; 415 long wait_index; 416 bool waiters_pending; 417 bool full_waiters_pending; 418 bool wakeup_full; 419 }; 420 421 /* 422 * Structure to hold event state and handle nested events. 423 */ 424 struct rb_event_info { 425 u64 ts; 426 u64 delta; 427 u64 before; 428 u64 after; 429 unsigned long length; 430 struct buffer_page *tail_page; 431 int add_timestamp; 432 }; 433 434 /* 435 * Used for the add_timestamp 436 * NONE 437 * EXTEND - wants a time extend 438 * ABSOLUTE - the buffer requests all events to have absolute time stamps 439 * FORCE - force a full time stamp. 440 */ 441 enum { 442 RB_ADD_STAMP_NONE = 0, 443 RB_ADD_STAMP_EXTEND = BIT(1), 444 RB_ADD_STAMP_ABSOLUTE = BIT(2), 445 RB_ADD_STAMP_FORCE = BIT(3) 446 }; 447 /* 448 * Used for which event context the event is in. 449 * TRANSITION = 0 450 * NMI = 1 451 * IRQ = 2 452 * SOFTIRQ = 3 453 * NORMAL = 4 454 * 455 * See trace_recursive_lock() comment below for more details. 456 */ 457 enum { 458 RB_CTX_TRANSITION, 459 RB_CTX_NMI, 460 RB_CTX_IRQ, 461 RB_CTX_SOFTIRQ, 462 RB_CTX_NORMAL, 463 RB_CTX_MAX 464 }; 465 466 #if BITS_PER_LONG == 32 467 #define RB_TIME_32 468 #endif 469 470 /* To test on 64 bit machines */ 471 //#define RB_TIME_32 472 473 #ifdef RB_TIME_32 474 475 struct rb_time_struct { 476 local_t cnt; 477 local_t top; 478 local_t bottom; 479 local_t msb; 480 }; 481 #else 482 #include <asm/local64.h> 483 struct rb_time_struct { 484 local64_t time; 485 }; 486 #endif 487 typedef struct rb_time_struct rb_time_t; 488 489 #define MAX_NEST 5 490 491 /* 492 * head_page == tail_page && head == tail then buffer is empty. 493 */ 494 struct ring_buffer_per_cpu { 495 int cpu; 496 atomic_t record_disabled; 497 atomic_t resize_disabled; 498 struct trace_buffer *buffer; 499 raw_spinlock_t reader_lock; /* serialize readers */ 500 arch_spinlock_t lock; 501 struct lock_class_key lock_key; 502 struct buffer_data_page *free_page; 503 unsigned long nr_pages; 504 unsigned int current_context; 505 struct list_head *pages; 506 struct buffer_page *head_page; /* read from head */ 507 struct buffer_page *tail_page; /* write to tail */ 508 struct buffer_page *commit_page; /* committed pages */ 509 struct buffer_page *reader_page; 510 unsigned long lost_events; 511 unsigned long last_overrun; 512 unsigned long nest; 513 local_t entries_bytes; 514 local_t entries; 515 local_t overrun; 516 local_t commit_overrun; 517 local_t dropped_events; 518 local_t committing; 519 local_t commits; 520 local_t pages_touched; 521 local_t pages_lost; 522 local_t pages_read; 523 long last_pages_touch; 524 size_t shortest_full; 525 unsigned long read; 526 unsigned long read_bytes; 527 rb_time_t write_stamp; 528 rb_time_t before_stamp; 529 u64 event_stamp[MAX_NEST]; 530 u64 read_stamp; 531 /* pages removed since last reset */ 532 unsigned long pages_removed; 533 /* ring buffer pages to update, > 0 to add, < 0 to remove */ 534 long nr_pages_to_update; 535 struct list_head new_pages; /* new pages to add */ 536 struct work_struct update_pages_work; 537 struct completion update_done; 538 539 struct rb_irq_work irq_work; 540 }; 541 542 struct trace_buffer { 543 unsigned flags; 544 int cpus; 545 atomic_t record_disabled; 546 atomic_t resizing; 547 cpumask_var_t cpumask; 548 549 struct lock_class_key *reader_lock_key; 550 551 struct mutex mutex; 552 553 struct ring_buffer_per_cpu **buffers; 554 555 struct hlist_node node; 556 u64 (*clock)(void); 557 558 struct rb_irq_work irq_work; 559 bool time_stamp_abs; 560 }; 561 562 struct ring_buffer_iter { 563 struct ring_buffer_per_cpu *cpu_buffer; 564 unsigned long head; 565 unsigned long next_event; 566 struct buffer_page *head_page; 567 struct buffer_page *cache_reader_page; 568 unsigned long cache_read; 569 unsigned long cache_pages_removed; 570 u64 read_stamp; 571 u64 page_stamp; 572 struct ring_buffer_event *event; 573 int missed_events; 574 }; 575 576 #ifdef RB_TIME_32 577 578 /* 579 * On 32 bit machines, local64_t is very expensive. As the ring 580 * buffer doesn't need all the features of a true 64 bit atomic, 581 * on 32 bit, it uses these functions (64 still uses local64_t). 582 * 583 * For the ring buffer, 64 bit required operations for the time is 584 * the following: 585 * 586 * - Reads may fail if it interrupted a modification of the time stamp. 587 * It will succeed if it did not interrupt another write even if 588 * the read itself is interrupted by a write. 589 * It returns whether it was successful or not. 590 * 591 * - Writes always succeed and will overwrite other writes and writes 592 * that were done by events interrupting the current write. 593 * 594 * - A write followed by a read of the same time stamp will always succeed, 595 * but may not contain the same value. 596 * 597 * - A cmpxchg will fail if it interrupted another write or cmpxchg. 598 * Other than that, it acts like a normal cmpxchg. 599 * 600 * The 60 bit time stamp is broken up by 30 bits in a top and bottom half 601 * (bottom being the least significant 30 bits of the 60 bit time stamp). 602 * 603 * The two most significant bits of each half holds a 2 bit counter (0-3). 604 * Each update will increment this counter by one. 605 * When reading the top and bottom, if the two counter bits match then the 606 * top and bottom together make a valid 60 bit number. 607 */ 608 #define RB_TIME_SHIFT 30 609 #define RB_TIME_VAL_MASK ((1 << RB_TIME_SHIFT) - 1) 610 #define RB_TIME_MSB_SHIFT 60 611 612 static inline int rb_time_cnt(unsigned long val) 613 { 614 return (val >> RB_TIME_SHIFT) & 3; 615 } 616 617 static inline u64 rb_time_val(unsigned long top, unsigned long bottom) 618 { 619 u64 val; 620 621 val = top & RB_TIME_VAL_MASK; 622 val <<= RB_TIME_SHIFT; 623 val |= bottom & RB_TIME_VAL_MASK; 624 625 return val; 626 } 627 628 static inline bool __rb_time_read(rb_time_t *t, u64 *ret, unsigned long *cnt) 629 { 630 unsigned long top, bottom, msb; 631 unsigned long c; 632 633 /* 634 * If the read is interrupted by a write, then the cnt will 635 * be different. Loop until both top and bottom have been read 636 * without interruption. 637 */ 638 do { 639 c = local_read(&t->cnt); 640 top = local_read(&t->top); 641 bottom = local_read(&t->bottom); 642 msb = local_read(&t->msb); 643 } while (c != local_read(&t->cnt)); 644 645 *cnt = rb_time_cnt(top); 646 647 /* If top, msb or bottom counts don't match, this interrupted a write */ 648 if (*cnt != rb_time_cnt(msb) || *cnt != rb_time_cnt(bottom)) 649 return false; 650 651 /* The shift to msb will lose its cnt bits */ 652 *ret = rb_time_val(top, bottom) | ((u64)msb << RB_TIME_MSB_SHIFT); 653 return true; 654 } 655 656 static bool rb_time_read(rb_time_t *t, u64 *ret) 657 { 658 unsigned long cnt; 659 660 return __rb_time_read(t, ret, &cnt); 661 } 662 663 static inline unsigned long rb_time_val_cnt(unsigned long val, unsigned long cnt) 664 { 665 return (val & RB_TIME_VAL_MASK) | ((cnt & 3) << RB_TIME_SHIFT); 666 } 667 668 static inline void rb_time_split(u64 val, unsigned long *top, unsigned long *bottom, 669 unsigned long *msb) 670 { 671 *top = (unsigned long)((val >> RB_TIME_SHIFT) & RB_TIME_VAL_MASK); 672 *bottom = (unsigned long)(val & RB_TIME_VAL_MASK); 673 *msb = (unsigned long)(val >> RB_TIME_MSB_SHIFT); 674 } 675 676 static inline void rb_time_val_set(local_t *t, unsigned long val, unsigned long cnt) 677 { 678 val = rb_time_val_cnt(val, cnt); 679 local_set(t, val); 680 } 681 682 static void rb_time_set(rb_time_t *t, u64 val) 683 { 684 unsigned long cnt, top, bottom, msb; 685 686 rb_time_split(val, &top, &bottom, &msb); 687 688 /* Writes always succeed with a valid number even if it gets interrupted. */ 689 do { 690 cnt = local_inc_return(&t->cnt); 691 rb_time_val_set(&t->top, top, cnt); 692 rb_time_val_set(&t->bottom, bottom, cnt); 693 rb_time_val_set(&t->msb, val >> RB_TIME_MSB_SHIFT, cnt); 694 } while (cnt != local_read(&t->cnt)); 695 } 696 697 static inline bool 698 rb_time_read_cmpxchg(local_t *l, unsigned long expect, unsigned long set) 699 { 700 return local_try_cmpxchg(l, &expect, set); 701 } 702 703 #else /* 64 bits */ 704 705 /* local64_t always succeeds */ 706 707 static inline bool rb_time_read(rb_time_t *t, u64 *ret) 708 { 709 *ret = local64_read(&t->time); 710 return true; 711 } 712 static void rb_time_set(rb_time_t *t, u64 val) 713 { 714 local64_set(&t->time, val); 715 } 716 #endif 717 718 /* 719 * Enable this to make sure that the event passed to 720 * ring_buffer_event_time_stamp() is not committed and also 721 * is on the buffer that it passed in. 722 */ 723 //#define RB_VERIFY_EVENT 724 #ifdef RB_VERIFY_EVENT 725 static struct list_head *rb_list_head(struct list_head *list); 726 static void verify_event(struct ring_buffer_per_cpu *cpu_buffer, 727 void *event) 728 { 729 struct buffer_page *page = cpu_buffer->commit_page; 730 struct buffer_page *tail_page = READ_ONCE(cpu_buffer->tail_page); 731 struct list_head *next; 732 long commit, write; 733 unsigned long addr = (unsigned long)event; 734 bool done = false; 735 int stop = 0; 736 737 /* Make sure the event exists and is not committed yet */ 738 do { 739 if (page == tail_page || WARN_ON_ONCE(stop++ > 100)) 740 done = true; 741 commit = local_read(&page->page->commit); 742 write = local_read(&page->write); 743 if (addr >= (unsigned long)&page->page->data[commit] && 744 addr < (unsigned long)&page->page->data[write]) 745 return; 746 747 next = rb_list_head(page->list.next); 748 page = list_entry(next, struct buffer_page, list); 749 } while (!done); 750 WARN_ON_ONCE(1); 751 } 752 #else 753 static inline void verify_event(struct ring_buffer_per_cpu *cpu_buffer, 754 void *event) 755 { 756 } 757 #endif 758 759 /* 760 * The absolute time stamp drops the 5 MSBs and some clocks may 761 * require them. The rb_fix_abs_ts() will take a previous full 762 * time stamp, and add the 5 MSB of that time stamp on to the 763 * saved absolute time stamp. Then they are compared in case of 764 * the unlikely event that the latest time stamp incremented 765 * the 5 MSB. 766 */ 767 static inline u64 rb_fix_abs_ts(u64 abs, u64 save_ts) 768 { 769 if (save_ts & TS_MSB) { 770 abs |= save_ts & TS_MSB; 771 /* Check for overflow */ 772 if (unlikely(abs < save_ts)) 773 abs += 1ULL << 59; 774 } 775 return abs; 776 } 777 778 static inline u64 rb_time_stamp(struct trace_buffer *buffer); 779 780 /** 781 * ring_buffer_event_time_stamp - return the event's current time stamp 782 * @buffer: The buffer that the event is on 783 * @event: the event to get the time stamp of 784 * 785 * Note, this must be called after @event is reserved, and before it is 786 * committed to the ring buffer. And must be called from the same 787 * context where the event was reserved (normal, softirq, irq, etc). 788 * 789 * Returns the time stamp associated with the current event. 790 * If the event has an extended time stamp, then that is used as 791 * the time stamp to return. 792 * In the highly unlikely case that the event was nested more than 793 * the max nesting, then the write_stamp of the buffer is returned, 794 * otherwise current time is returned, but that really neither of 795 * the last two cases should ever happen. 796 */ 797 u64 ring_buffer_event_time_stamp(struct trace_buffer *buffer, 798 struct ring_buffer_event *event) 799 { 800 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[smp_processor_id()]; 801 unsigned int nest; 802 u64 ts; 803 804 /* If the event includes an absolute time, then just use that */ 805 if (event->type_len == RINGBUF_TYPE_TIME_STAMP) { 806 ts = rb_event_time_stamp(event); 807 return rb_fix_abs_ts(ts, cpu_buffer->tail_page->page->time_stamp); 808 } 809 810 nest = local_read(&cpu_buffer->committing); 811 verify_event(cpu_buffer, event); 812 if (WARN_ON_ONCE(!nest)) 813 goto fail; 814 815 /* Read the current saved nesting level time stamp */ 816 if (likely(--nest < MAX_NEST)) 817 return cpu_buffer->event_stamp[nest]; 818 819 /* Shouldn't happen, warn if it does */ 820 WARN_ONCE(1, "nest (%d) greater than max", nest); 821 822 fail: 823 /* Can only fail on 32 bit */ 824 if (!rb_time_read(&cpu_buffer->write_stamp, &ts)) 825 /* Screw it, just read the current time */ 826 ts = rb_time_stamp(cpu_buffer->buffer); 827 828 return ts; 829 } 830 831 /** 832 * ring_buffer_nr_pages - get the number of buffer pages in the ring buffer 833 * @buffer: The ring_buffer to get the number of pages from 834 * @cpu: The cpu of the ring_buffer to get the number of pages from 835 * 836 * Returns the number of pages used by a per_cpu buffer of the ring buffer. 837 */ 838 size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu) 839 { 840 return buffer->buffers[cpu]->nr_pages; 841 } 842 843 /** 844 * ring_buffer_nr_dirty_pages - get the number of used pages in the ring buffer 845 * @buffer: The ring_buffer to get the number of pages from 846 * @cpu: The cpu of the ring_buffer to get the number of pages from 847 * 848 * Returns the number of pages that have content in the ring buffer. 849 */ 850 size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu) 851 { 852 size_t read; 853 size_t lost; 854 size_t cnt; 855 856 read = local_read(&buffer->buffers[cpu]->pages_read); 857 lost = local_read(&buffer->buffers[cpu]->pages_lost); 858 cnt = local_read(&buffer->buffers[cpu]->pages_touched); 859 860 if (WARN_ON_ONCE(cnt < lost)) 861 return 0; 862 863 cnt -= lost; 864 865 /* The reader can read an empty page, but not more than that */ 866 if (cnt < read) { 867 WARN_ON_ONCE(read > cnt + 1); 868 return 0; 869 } 870 871 return cnt - read; 872 } 873 874 static __always_inline bool full_hit(struct trace_buffer *buffer, int cpu, int full) 875 { 876 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 877 size_t nr_pages; 878 size_t dirty; 879 880 nr_pages = cpu_buffer->nr_pages; 881 if (!nr_pages || !full) 882 return true; 883 884 dirty = ring_buffer_nr_dirty_pages(buffer, cpu); 885 886 return (dirty * 100) > (full * nr_pages); 887 } 888 889 /* 890 * rb_wake_up_waiters - wake up tasks waiting for ring buffer input 891 * 892 * Schedules a delayed work to wake up any task that is blocked on the 893 * ring buffer waiters queue. 894 */ 895 static void rb_wake_up_waiters(struct irq_work *work) 896 { 897 struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work); 898 899 wake_up_all(&rbwork->waiters); 900 if (rbwork->full_waiters_pending || rbwork->wakeup_full) { 901 rbwork->wakeup_full = false; 902 rbwork->full_waiters_pending = false; 903 wake_up_all(&rbwork->full_waiters); 904 } 905 } 906 907 /** 908 * ring_buffer_wake_waiters - wake up any waiters on this ring buffer 909 * @buffer: The ring buffer to wake waiters on 910 * @cpu: The CPU buffer to wake waiters on 911 * 912 * In the case of a file that represents a ring buffer is closing, 913 * it is prudent to wake up any waiters that are on this. 914 */ 915 void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu) 916 { 917 struct ring_buffer_per_cpu *cpu_buffer; 918 struct rb_irq_work *rbwork; 919 920 if (!buffer) 921 return; 922 923 if (cpu == RING_BUFFER_ALL_CPUS) { 924 925 /* Wake up individual ones too. One level recursion */ 926 for_each_buffer_cpu(buffer, cpu) 927 ring_buffer_wake_waiters(buffer, cpu); 928 929 rbwork = &buffer->irq_work; 930 } else { 931 if (WARN_ON_ONCE(!buffer->buffers)) 932 return; 933 if (WARN_ON_ONCE(cpu >= nr_cpu_ids)) 934 return; 935 936 cpu_buffer = buffer->buffers[cpu]; 937 /* The CPU buffer may not have been initialized yet */ 938 if (!cpu_buffer) 939 return; 940 rbwork = &cpu_buffer->irq_work; 941 } 942 943 rbwork->wait_index++; 944 /* make sure the waiters see the new index */ 945 smp_wmb(); 946 947 rb_wake_up_waiters(&rbwork->work); 948 } 949 950 /** 951 * ring_buffer_wait - wait for input to the ring buffer 952 * @buffer: buffer to wait on 953 * @cpu: the cpu buffer to wait on 954 * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS 955 * 956 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon 957 * as data is added to any of the @buffer's cpu buffers. Otherwise 958 * it will wait for data to be added to a specific cpu buffer. 959 */ 960 int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full) 961 { 962 struct ring_buffer_per_cpu *cpu_buffer; 963 DEFINE_WAIT(wait); 964 struct rb_irq_work *work; 965 long wait_index; 966 int ret = 0; 967 968 /* 969 * Depending on what the caller is waiting for, either any 970 * data in any cpu buffer, or a specific buffer, put the 971 * caller on the appropriate wait queue. 972 */ 973 if (cpu == RING_BUFFER_ALL_CPUS) { 974 work = &buffer->irq_work; 975 /* Full only makes sense on per cpu reads */ 976 full = 0; 977 } else { 978 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 979 return -ENODEV; 980 cpu_buffer = buffer->buffers[cpu]; 981 work = &cpu_buffer->irq_work; 982 } 983 984 wait_index = READ_ONCE(work->wait_index); 985 986 while (true) { 987 if (full) 988 prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE); 989 else 990 prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE); 991 992 /* 993 * The events can happen in critical sections where 994 * checking a work queue can cause deadlocks. 995 * After adding a task to the queue, this flag is set 996 * only to notify events to try to wake up the queue 997 * using irq_work. 998 * 999 * We don't clear it even if the buffer is no longer 1000 * empty. The flag only causes the next event to run 1001 * irq_work to do the work queue wake up. The worse 1002 * that can happen if we race with !trace_empty() is that 1003 * an event will cause an irq_work to try to wake up 1004 * an empty queue. 1005 * 1006 * There's no reason to protect this flag either, as 1007 * the work queue and irq_work logic will do the necessary 1008 * synchronization for the wake ups. The only thing 1009 * that is necessary is that the wake up happens after 1010 * a task has been queued. It's OK for spurious wake ups. 1011 */ 1012 if (full) 1013 work->full_waiters_pending = true; 1014 else 1015 work->waiters_pending = true; 1016 1017 if (signal_pending(current)) { 1018 ret = -EINTR; 1019 break; 1020 } 1021 1022 if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) 1023 break; 1024 1025 if (cpu != RING_BUFFER_ALL_CPUS && 1026 !ring_buffer_empty_cpu(buffer, cpu)) { 1027 unsigned long flags; 1028 bool pagebusy; 1029 bool done; 1030 1031 if (!full) 1032 break; 1033 1034 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 1035 pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; 1036 done = !pagebusy && full_hit(buffer, cpu, full); 1037 1038 if (!cpu_buffer->shortest_full || 1039 cpu_buffer->shortest_full > full) 1040 cpu_buffer->shortest_full = full; 1041 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 1042 if (done) 1043 break; 1044 } 1045 1046 schedule(); 1047 1048 /* Make sure to see the new wait index */ 1049 smp_rmb(); 1050 if (wait_index != work->wait_index) 1051 break; 1052 } 1053 1054 if (full) 1055 finish_wait(&work->full_waiters, &wait); 1056 else 1057 finish_wait(&work->waiters, &wait); 1058 1059 return ret; 1060 } 1061 1062 /** 1063 * ring_buffer_poll_wait - poll on buffer input 1064 * @buffer: buffer to wait on 1065 * @cpu: the cpu buffer to wait on 1066 * @filp: the file descriptor 1067 * @poll_table: The poll descriptor 1068 * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS 1069 * 1070 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon 1071 * as data is added to any of the @buffer's cpu buffers. Otherwise 1072 * it will wait for data to be added to a specific cpu buffer. 1073 * 1074 * Returns EPOLLIN | EPOLLRDNORM if data exists in the buffers, 1075 * zero otherwise. 1076 */ 1077 __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu, 1078 struct file *filp, poll_table *poll_table, int full) 1079 { 1080 struct ring_buffer_per_cpu *cpu_buffer; 1081 struct rb_irq_work *work; 1082 1083 if (cpu == RING_BUFFER_ALL_CPUS) { 1084 work = &buffer->irq_work; 1085 full = 0; 1086 } else { 1087 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 1088 return -EINVAL; 1089 1090 cpu_buffer = buffer->buffers[cpu]; 1091 work = &cpu_buffer->irq_work; 1092 } 1093 1094 if (full) { 1095 poll_wait(filp, &work->full_waiters, poll_table); 1096 work->full_waiters_pending = true; 1097 if (!cpu_buffer->shortest_full || 1098 cpu_buffer->shortest_full > full) 1099 cpu_buffer->shortest_full = full; 1100 } else { 1101 poll_wait(filp, &work->waiters, poll_table); 1102 work->waiters_pending = true; 1103 } 1104 1105 /* 1106 * There's a tight race between setting the waiters_pending and 1107 * checking if the ring buffer is empty. Once the waiters_pending bit 1108 * is set, the next event will wake the task up, but we can get stuck 1109 * if there's only a single event in. 1110 * 1111 * FIXME: Ideally, we need a memory barrier on the writer side as well, 1112 * but adding a memory barrier to all events will cause too much of a 1113 * performance hit in the fast path. We only need a memory barrier when 1114 * the buffer goes from empty to having content. But as this race is 1115 * extremely small, and it's not a problem if another event comes in, we 1116 * will fix it later. 1117 */ 1118 smp_mb(); 1119 1120 if (full) 1121 return full_hit(buffer, cpu, full) ? EPOLLIN | EPOLLRDNORM : 0; 1122 1123 if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) || 1124 (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu))) 1125 return EPOLLIN | EPOLLRDNORM; 1126 return 0; 1127 } 1128 1129 /* buffer may be either ring_buffer or ring_buffer_per_cpu */ 1130 #define RB_WARN_ON(b, cond) \ 1131 ({ \ 1132 int _____ret = unlikely(cond); \ 1133 if (_____ret) { \ 1134 if (__same_type(*(b), struct ring_buffer_per_cpu)) { \ 1135 struct ring_buffer_per_cpu *__b = \ 1136 (void *)b; \ 1137 atomic_inc(&__b->buffer->record_disabled); \ 1138 } else \ 1139 atomic_inc(&b->record_disabled); \ 1140 WARN_ON(1); \ 1141 } \ 1142 _____ret; \ 1143 }) 1144 1145 /* Up this if you want to test the TIME_EXTENTS and normalization */ 1146 #define DEBUG_SHIFT 0 1147 1148 static inline u64 rb_time_stamp(struct trace_buffer *buffer) 1149 { 1150 u64 ts; 1151 1152 /* Skip retpolines :-( */ 1153 if (IS_ENABLED(CONFIG_RETPOLINE) && likely(buffer->clock == trace_clock_local)) 1154 ts = trace_clock_local(); 1155 else 1156 ts = buffer->clock(); 1157 1158 /* shift to debug/test normalization and TIME_EXTENTS */ 1159 return ts << DEBUG_SHIFT; 1160 } 1161 1162 u64 ring_buffer_time_stamp(struct trace_buffer *buffer) 1163 { 1164 u64 time; 1165 1166 preempt_disable_notrace(); 1167 time = rb_time_stamp(buffer); 1168 preempt_enable_notrace(); 1169 1170 return time; 1171 } 1172 EXPORT_SYMBOL_GPL(ring_buffer_time_stamp); 1173 1174 void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer, 1175 int cpu, u64 *ts) 1176 { 1177 /* Just stupid testing the normalize function and deltas */ 1178 *ts >>= DEBUG_SHIFT; 1179 } 1180 EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp); 1181 1182 /* 1183 * Making the ring buffer lockless makes things tricky. 1184 * Although writes only happen on the CPU that they are on, 1185 * and they only need to worry about interrupts. Reads can 1186 * happen on any CPU. 1187 * 1188 * The reader page is always off the ring buffer, but when the 1189 * reader finishes with a page, it needs to swap its page with 1190 * a new one from the buffer. The reader needs to take from 1191 * the head (writes go to the tail). But if a writer is in overwrite 1192 * mode and wraps, it must push the head page forward. 1193 * 1194 * Here lies the problem. 1195 * 1196 * The reader must be careful to replace only the head page, and 1197 * not another one. As described at the top of the file in the 1198 * ASCII art, the reader sets its old page to point to the next 1199 * page after head. It then sets the page after head to point to 1200 * the old reader page. But if the writer moves the head page 1201 * during this operation, the reader could end up with the tail. 1202 * 1203 * We use cmpxchg to help prevent this race. We also do something 1204 * special with the page before head. We set the LSB to 1. 1205 * 1206 * When the writer must push the page forward, it will clear the 1207 * bit that points to the head page, move the head, and then set 1208 * the bit that points to the new head page. 1209 * 1210 * We also don't want an interrupt coming in and moving the head 1211 * page on another writer. Thus we use the second LSB to catch 1212 * that too. Thus: 1213 * 1214 * head->list->prev->next bit 1 bit 0 1215 * ------- ------- 1216 * Normal page 0 0 1217 * Points to head page 0 1 1218 * New head page 1 0 1219 * 1220 * Note we can not trust the prev pointer of the head page, because: 1221 * 1222 * +----+ +-----+ +-----+ 1223 * | |------>| T |---X--->| N | 1224 * | |<------| | | | 1225 * +----+ +-----+ +-----+ 1226 * ^ ^ | 1227 * | +-----+ | | 1228 * +----------| R |----------+ | 1229 * | |<-----------+ 1230 * +-----+ 1231 * 1232 * Key: ---X--> HEAD flag set in pointer 1233 * T Tail page 1234 * R Reader page 1235 * N Next page 1236 * 1237 * (see __rb_reserve_next() to see where this happens) 1238 * 1239 * What the above shows is that the reader just swapped out 1240 * the reader page with a page in the buffer, but before it 1241 * could make the new header point back to the new page added 1242 * it was preempted by a writer. The writer moved forward onto 1243 * the new page added by the reader and is about to move forward 1244 * again. 1245 * 1246 * You can see, it is legitimate for the previous pointer of 1247 * the head (or any page) not to point back to itself. But only 1248 * temporarily. 1249 */ 1250 1251 #define RB_PAGE_NORMAL 0UL 1252 #define RB_PAGE_HEAD 1UL 1253 #define RB_PAGE_UPDATE 2UL 1254 1255 1256 #define RB_FLAG_MASK 3UL 1257 1258 /* PAGE_MOVED is not part of the mask */ 1259 #define RB_PAGE_MOVED 4UL 1260 1261 /* 1262 * rb_list_head - remove any bit 1263 */ 1264 static struct list_head *rb_list_head(struct list_head *list) 1265 { 1266 unsigned long val = (unsigned long)list; 1267 1268 return (struct list_head *)(val & ~RB_FLAG_MASK); 1269 } 1270 1271 /* 1272 * rb_is_head_page - test if the given page is the head page 1273 * 1274 * Because the reader may move the head_page pointer, we can 1275 * not trust what the head page is (it may be pointing to 1276 * the reader page). But if the next page is a header page, 1277 * its flags will be non zero. 1278 */ 1279 static inline int 1280 rb_is_head_page(struct buffer_page *page, struct list_head *list) 1281 { 1282 unsigned long val; 1283 1284 val = (unsigned long)list->next; 1285 1286 if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list) 1287 return RB_PAGE_MOVED; 1288 1289 return val & RB_FLAG_MASK; 1290 } 1291 1292 /* 1293 * rb_is_reader_page 1294 * 1295 * The unique thing about the reader page, is that, if the 1296 * writer is ever on it, the previous pointer never points 1297 * back to the reader page. 1298 */ 1299 static bool rb_is_reader_page(struct buffer_page *page) 1300 { 1301 struct list_head *list = page->list.prev; 1302 1303 return rb_list_head(list->next) != &page->list; 1304 } 1305 1306 /* 1307 * rb_set_list_to_head - set a list_head to be pointing to head. 1308 */ 1309 static void rb_set_list_to_head(struct list_head *list) 1310 { 1311 unsigned long *ptr; 1312 1313 ptr = (unsigned long *)&list->next; 1314 *ptr |= RB_PAGE_HEAD; 1315 *ptr &= ~RB_PAGE_UPDATE; 1316 } 1317 1318 /* 1319 * rb_head_page_activate - sets up head page 1320 */ 1321 static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer) 1322 { 1323 struct buffer_page *head; 1324 1325 head = cpu_buffer->head_page; 1326 if (!head) 1327 return; 1328 1329 /* 1330 * Set the previous list pointer to have the HEAD flag. 1331 */ 1332 rb_set_list_to_head(head->list.prev); 1333 } 1334 1335 static void rb_list_head_clear(struct list_head *list) 1336 { 1337 unsigned long *ptr = (unsigned long *)&list->next; 1338 1339 *ptr &= ~RB_FLAG_MASK; 1340 } 1341 1342 /* 1343 * rb_head_page_deactivate - clears head page ptr (for free list) 1344 */ 1345 static void 1346 rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer) 1347 { 1348 struct list_head *hd; 1349 1350 /* Go through the whole list and clear any pointers found. */ 1351 rb_list_head_clear(cpu_buffer->pages); 1352 1353 list_for_each(hd, cpu_buffer->pages) 1354 rb_list_head_clear(hd); 1355 } 1356 1357 static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer, 1358 struct buffer_page *head, 1359 struct buffer_page *prev, 1360 int old_flag, int new_flag) 1361 { 1362 struct list_head *list; 1363 unsigned long val = (unsigned long)&head->list; 1364 unsigned long ret; 1365 1366 list = &prev->list; 1367 1368 val &= ~RB_FLAG_MASK; 1369 1370 ret = cmpxchg((unsigned long *)&list->next, 1371 val | old_flag, val | new_flag); 1372 1373 /* check if the reader took the page */ 1374 if ((ret & ~RB_FLAG_MASK) != val) 1375 return RB_PAGE_MOVED; 1376 1377 return ret & RB_FLAG_MASK; 1378 } 1379 1380 static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer, 1381 struct buffer_page *head, 1382 struct buffer_page *prev, 1383 int old_flag) 1384 { 1385 return rb_head_page_set(cpu_buffer, head, prev, 1386 old_flag, RB_PAGE_UPDATE); 1387 } 1388 1389 static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer, 1390 struct buffer_page *head, 1391 struct buffer_page *prev, 1392 int old_flag) 1393 { 1394 return rb_head_page_set(cpu_buffer, head, prev, 1395 old_flag, RB_PAGE_HEAD); 1396 } 1397 1398 static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer, 1399 struct buffer_page *head, 1400 struct buffer_page *prev, 1401 int old_flag) 1402 { 1403 return rb_head_page_set(cpu_buffer, head, prev, 1404 old_flag, RB_PAGE_NORMAL); 1405 } 1406 1407 static inline void rb_inc_page(struct buffer_page **bpage) 1408 { 1409 struct list_head *p = rb_list_head((*bpage)->list.next); 1410 1411 *bpage = list_entry(p, struct buffer_page, list); 1412 } 1413 1414 static struct buffer_page * 1415 rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer) 1416 { 1417 struct buffer_page *head; 1418 struct buffer_page *page; 1419 struct list_head *list; 1420 int i; 1421 1422 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page)) 1423 return NULL; 1424 1425 /* sanity check */ 1426 list = cpu_buffer->pages; 1427 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list)) 1428 return NULL; 1429 1430 page = head = cpu_buffer->head_page; 1431 /* 1432 * It is possible that the writer moves the header behind 1433 * where we started, and we miss in one loop. 1434 * A second loop should grab the header, but we'll do 1435 * three loops just because I'm paranoid. 1436 */ 1437 for (i = 0; i < 3; i++) { 1438 do { 1439 if (rb_is_head_page(page, page->list.prev)) { 1440 cpu_buffer->head_page = page; 1441 return page; 1442 } 1443 rb_inc_page(&page); 1444 } while (page != head); 1445 } 1446 1447 RB_WARN_ON(cpu_buffer, 1); 1448 1449 return NULL; 1450 } 1451 1452 static bool rb_head_page_replace(struct buffer_page *old, 1453 struct buffer_page *new) 1454 { 1455 unsigned long *ptr = (unsigned long *)&old->list.prev->next; 1456 unsigned long val; 1457 1458 val = *ptr & ~RB_FLAG_MASK; 1459 val |= RB_PAGE_HEAD; 1460 1461 return try_cmpxchg(ptr, &val, (unsigned long)&new->list); 1462 } 1463 1464 /* 1465 * rb_tail_page_update - move the tail page forward 1466 */ 1467 static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer, 1468 struct buffer_page *tail_page, 1469 struct buffer_page *next_page) 1470 { 1471 unsigned long old_entries; 1472 unsigned long old_write; 1473 1474 /* 1475 * The tail page now needs to be moved forward. 1476 * 1477 * We need to reset the tail page, but without messing 1478 * with possible erasing of data brought in by interrupts 1479 * that have moved the tail page and are currently on it. 1480 * 1481 * We add a counter to the write field to denote this. 1482 */ 1483 old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write); 1484 old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries); 1485 1486 local_inc(&cpu_buffer->pages_touched); 1487 /* 1488 * Just make sure we have seen our old_write and synchronize 1489 * with any interrupts that come in. 1490 */ 1491 barrier(); 1492 1493 /* 1494 * If the tail page is still the same as what we think 1495 * it is, then it is up to us to update the tail 1496 * pointer. 1497 */ 1498 if (tail_page == READ_ONCE(cpu_buffer->tail_page)) { 1499 /* Zero the write counter */ 1500 unsigned long val = old_write & ~RB_WRITE_MASK; 1501 unsigned long eval = old_entries & ~RB_WRITE_MASK; 1502 1503 /* 1504 * This will only succeed if an interrupt did 1505 * not come in and change it. In which case, we 1506 * do not want to modify it. 1507 * 1508 * We add (void) to let the compiler know that we do not care 1509 * about the return value of these functions. We use the 1510 * cmpxchg to only update if an interrupt did not already 1511 * do it for us. If the cmpxchg fails, we don't care. 1512 */ 1513 (void)local_cmpxchg(&next_page->write, old_write, val); 1514 (void)local_cmpxchg(&next_page->entries, old_entries, eval); 1515 1516 /* 1517 * No need to worry about races with clearing out the commit. 1518 * it only can increment when a commit takes place. But that 1519 * only happens in the outer most nested commit. 1520 */ 1521 local_set(&next_page->page->commit, 0); 1522 1523 /* Again, either we update tail_page or an interrupt does */ 1524 (void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page); 1525 } 1526 } 1527 1528 static void rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer, 1529 struct buffer_page *bpage) 1530 { 1531 unsigned long val = (unsigned long)bpage; 1532 1533 RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK); 1534 } 1535 1536 /** 1537 * rb_check_pages - integrity check of buffer pages 1538 * @cpu_buffer: CPU buffer with pages to test 1539 * 1540 * As a safety measure we check to make sure the data pages have not 1541 * been corrupted. 1542 */ 1543 static void rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) 1544 { 1545 struct list_head *head = rb_list_head(cpu_buffer->pages); 1546 struct list_head *tmp; 1547 1548 if (RB_WARN_ON(cpu_buffer, 1549 rb_list_head(rb_list_head(head->next)->prev) != head)) 1550 return; 1551 1552 if (RB_WARN_ON(cpu_buffer, 1553 rb_list_head(rb_list_head(head->prev)->next) != head)) 1554 return; 1555 1556 for (tmp = rb_list_head(head->next); tmp != head; tmp = rb_list_head(tmp->next)) { 1557 if (RB_WARN_ON(cpu_buffer, 1558 rb_list_head(rb_list_head(tmp->next)->prev) != tmp)) 1559 return; 1560 1561 if (RB_WARN_ON(cpu_buffer, 1562 rb_list_head(rb_list_head(tmp->prev)->next) != tmp)) 1563 return; 1564 } 1565 } 1566 1567 static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, 1568 long nr_pages, struct list_head *pages) 1569 { 1570 struct buffer_page *bpage, *tmp; 1571 bool user_thread = current->mm != NULL; 1572 gfp_t mflags; 1573 long i; 1574 1575 /* 1576 * Check if the available memory is there first. 1577 * Note, si_mem_available() only gives us a rough estimate of available 1578 * memory. It may not be accurate. But we don't care, we just want 1579 * to prevent doing any allocation when it is obvious that it is 1580 * not going to succeed. 1581 */ 1582 i = si_mem_available(); 1583 if (i < nr_pages) 1584 return -ENOMEM; 1585 1586 /* 1587 * __GFP_RETRY_MAYFAIL flag makes sure that the allocation fails 1588 * gracefully without invoking oom-killer and the system is not 1589 * destabilized. 1590 */ 1591 mflags = GFP_KERNEL | __GFP_RETRY_MAYFAIL; 1592 1593 /* 1594 * If a user thread allocates too much, and si_mem_available() 1595 * reports there's enough memory, even though there is not. 1596 * Make sure the OOM killer kills this thread. This can happen 1597 * even with RETRY_MAYFAIL because another task may be doing 1598 * an allocation after this task has taken all memory. 1599 * This is the task the OOM killer needs to take out during this 1600 * loop, even if it was triggered by an allocation somewhere else. 1601 */ 1602 if (user_thread) 1603 set_current_oom_origin(); 1604 for (i = 0; i < nr_pages; i++) { 1605 struct page *page; 1606 1607 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), 1608 mflags, cpu_to_node(cpu_buffer->cpu)); 1609 if (!bpage) 1610 goto free_pages; 1611 1612 rb_check_bpage(cpu_buffer, bpage); 1613 1614 list_add(&bpage->list, pages); 1615 1616 page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu), mflags, 0); 1617 if (!page) 1618 goto free_pages; 1619 bpage->page = page_address(page); 1620 rb_init_page(bpage->page); 1621 1622 if (user_thread && fatal_signal_pending(current)) 1623 goto free_pages; 1624 } 1625 if (user_thread) 1626 clear_current_oom_origin(); 1627 1628 return 0; 1629 1630 free_pages: 1631 list_for_each_entry_safe(bpage, tmp, pages, list) { 1632 list_del_init(&bpage->list); 1633 free_buffer_page(bpage); 1634 } 1635 if (user_thread) 1636 clear_current_oom_origin(); 1637 1638 return -ENOMEM; 1639 } 1640 1641 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, 1642 unsigned long nr_pages) 1643 { 1644 LIST_HEAD(pages); 1645 1646 WARN_ON(!nr_pages); 1647 1648 if (__rb_allocate_pages(cpu_buffer, nr_pages, &pages)) 1649 return -ENOMEM; 1650 1651 /* 1652 * The ring buffer page list is a circular list that does not 1653 * start and end with a list head. All page list items point to 1654 * other pages. 1655 */ 1656 cpu_buffer->pages = pages.next; 1657 list_del(&pages); 1658 1659 cpu_buffer->nr_pages = nr_pages; 1660 1661 rb_check_pages(cpu_buffer); 1662 1663 return 0; 1664 } 1665 1666 static struct ring_buffer_per_cpu * 1667 rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu) 1668 { 1669 struct ring_buffer_per_cpu *cpu_buffer; 1670 struct buffer_page *bpage; 1671 struct page *page; 1672 int ret; 1673 1674 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()), 1675 GFP_KERNEL, cpu_to_node(cpu)); 1676 if (!cpu_buffer) 1677 return NULL; 1678 1679 cpu_buffer->cpu = cpu; 1680 cpu_buffer->buffer = buffer; 1681 raw_spin_lock_init(&cpu_buffer->reader_lock); 1682 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); 1683 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 1684 INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler); 1685 init_completion(&cpu_buffer->update_done); 1686 init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters); 1687 init_waitqueue_head(&cpu_buffer->irq_work.waiters); 1688 init_waitqueue_head(&cpu_buffer->irq_work.full_waiters); 1689 1690 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), 1691 GFP_KERNEL, cpu_to_node(cpu)); 1692 if (!bpage) 1693 goto fail_free_buffer; 1694 1695 rb_check_bpage(cpu_buffer, bpage); 1696 1697 cpu_buffer->reader_page = bpage; 1698 page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0); 1699 if (!page) 1700 goto fail_free_reader; 1701 bpage->page = page_address(page); 1702 rb_init_page(bpage->page); 1703 1704 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); 1705 INIT_LIST_HEAD(&cpu_buffer->new_pages); 1706 1707 ret = rb_allocate_pages(cpu_buffer, nr_pages); 1708 if (ret < 0) 1709 goto fail_free_reader; 1710 1711 cpu_buffer->head_page 1712 = list_entry(cpu_buffer->pages, struct buffer_page, list); 1713 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page; 1714 1715 rb_head_page_activate(cpu_buffer); 1716 1717 return cpu_buffer; 1718 1719 fail_free_reader: 1720 free_buffer_page(cpu_buffer->reader_page); 1721 1722 fail_free_buffer: 1723 kfree(cpu_buffer); 1724 return NULL; 1725 } 1726 1727 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) 1728 { 1729 struct list_head *head = cpu_buffer->pages; 1730 struct buffer_page *bpage, *tmp; 1731 1732 irq_work_sync(&cpu_buffer->irq_work.work); 1733 1734 free_buffer_page(cpu_buffer->reader_page); 1735 1736 if (head) { 1737 rb_head_page_deactivate(cpu_buffer); 1738 1739 list_for_each_entry_safe(bpage, tmp, head, list) { 1740 list_del_init(&bpage->list); 1741 free_buffer_page(bpage); 1742 } 1743 bpage = list_entry(head, struct buffer_page, list); 1744 free_buffer_page(bpage); 1745 } 1746 1747 free_page((unsigned long)cpu_buffer->free_page); 1748 1749 kfree(cpu_buffer); 1750 } 1751 1752 /** 1753 * __ring_buffer_alloc - allocate a new ring_buffer 1754 * @size: the size in bytes per cpu that is needed. 1755 * @flags: attributes to set for the ring buffer. 1756 * @key: ring buffer reader_lock_key. 1757 * 1758 * Currently the only flag that is available is the RB_FL_OVERWRITE 1759 * flag. This flag means that the buffer will overwrite old data 1760 * when the buffer wraps. If this flag is not set, the buffer will 1761 * drop data when the tail hits the head. 1762 */ 1763 struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags, 1764 struct lock_class_key *key) 1765 { 1766 struct trace_buffer *buffer; 1767 long nr_pages; 1768 int bsize; 1769 int cpu; 1770 int ret; 1771 1772 /* keep it in its own cache line */ 1773 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()), 1774 GFP_KERNEL); 1775 if (!buffer) 1776 return NULL; 1777 1778 if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL)) 1779 goto fail_free_buffer; 1780 1781 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 1782 buffer->flags = flags; 1783 buffer->clock = trace_clock_local; 1784 buffer->reader_lock_key = key; 1785 1786 init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters); 1787 init_waitqueue_head(&buffer->irq_work.waiters); 1788 1789 /* need at least two pages */ 1790 if (nr_pages < 2) 1791 nr_pages = 2; 1792 1793 buffer->cpus = nr_cpu_ids; 1794 1795 bsize = sizeof(void *) * nr_cpu_ids; 1796 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), 1797 GFP_KERNEL); 1798 if (!buffer->buffers) 1799 goto fail_free_cpumask; 1800 1801 cpu = raw_smp_processor_id(); 1802 cpumask_set_cpu(cpu, buffer->cpumask); 1803 buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu); 1804 if (!buffer->buffers[cpu]) 1805 goto fail_free_buffers; 1806 1807 ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node); 1808 if (ret < 0) 1809 goto fail_free_buffers; 1810 1811 mutex_init(&buffer->mutex); 1812 1813 return buffer; 1814 1815 fail_free_buffers: 1816 for_each_buffer_cpu(buffer, cpu) { 1817 if (buffer->buffers[cpu]) 1818 rb_free_cpu_buffer(buffer->buffers[cpu]); 1819 } 1820 kfree(buffer->buffers); 1821 1822 fail_free_cpumask: 1823 free_cpumask_var(buffer->cpumask); 1824 1825 fail_free_buffer: 1826 kfree(buffer); 1827 return NULL; 1828 } 1829 EXPORT_SYMBOL_GPL(__ring_buffer_alloc); 1830 1831 /** 1832 * ring_buffer_free - free a ring buffer. 1833 * @buffer: the buffer to free. 1834 */ 1835 void 1836 ring_buffer_free(struct trace_buffer *buffer) 1837 { 1838 int cpu; 1839 1840 cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node); 1841 1842 irq_work_sync(&buffer->irq_work.work); 1843 1844 for_each_buffer_cpu(buffer, cpu) 1845 rb_free_cpu_buffer(buffer->buffers[cpu]); 1846 1847 kfree(buffer->buffers); 1848 free_cpumask_var(buffer->cpumask); 1849 1850 kfree(buffer); 1851 } 1852 EXPORT_SYMBOL_GPL(ring_buffer_free); 1853 1854 void ring_buffer_set_clock(struct trace_buffer *buffer, 1855 u64 (*clock)(void)) 1856 { 1857 buffer->clock = clock; 1858 } 1859 1860 void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs) 1861 { 1862 buffer->time_stamp_abs = abs; 1863 } 1864 1865 bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer) 1866 { 1867 return buffer->time_stamp_abs; 1868 } 1869 1870 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); 1871 1872 static inline unsigned long rb_page_entries(struct buffer_page *bpage) 1873 { 1874 return local_read(&bpage->entries) & RB_WRITE_MASK; 1875 } 1876 1877 static inline unsigned long rb_page_write(struct buffer_page *bpage) 1878 { 1879 return local_read(&bpage->write) & RB_WRITE_MASK; 1880 } 1881 1882 static bool 1883 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages) 1884 { 1885 struct list_head *tail_page, *to_remove, *next_page; 1886 struct buffer_page *to_remove_page, *tmp_iter_page; 1887 struct buffer_page *last_page, *first_page; 1888 unsigned long nr_removed; 1889 unsigned long head_bit; 1890 int page_entries; 1891 1892 head_bit = 0; 1893 1894 raw_spin_lock_irq(&cpu_buffer->reader_lock); 1895 atomic_inc(&cpu_buffer->record_disabled); 1896 /* 1897 * We don't race with the readers since we have acquired the reader 1898 * lock. We also don't race with writers after disabling recording. 1899 * This makes it easy to figure out the first and the last page to be 1900 * removed from the list. We unlink all the pages in between including 1901 * the first and last pages. This is done in a busy loop so that we 1902 * lose the least number of traces. 1903 * The pages are freed after we restart recording and unlock readers. 1904 */ 1905 tail_page = &cpu_buffer->tail_page->list; 1906 1907 /* 1908 * tail page might be on reader page, we remove the next page 1909 * from the ring buffer 1910 */ 1911 if (cpu_buffer->tail_page == cpu_buffer->reader_page) 1912 tail_page = rb_list_head(tail_page->next); 1913 to_remove = tail_page; 1914 1915 /* start of pages to remove */ 1916 first_page = list_entry(rb_list_head(to_remove->next), 1917 struct buffer_page, list); 1918 1919 for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) { 1920 to_remove = rb_list_head(to_remove)->next; 1921 head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD; 1922 } 1923 /* Read iterators need to reset themselves when some pages removed */ 1924 cpu_buffer->pages_removed += nr_removed; 1925 1926 next_page = rb_list_head(to_remove)->next; 1927 1928 /* 1929 * Now we remove all pages between tail_page and next_page. 1930 * Make sure that we have head_bit value preserved for the 1931 * next page 1932 */ 1933 tail_page->next = (struct list_head *)((unsigned long)next_page | 1934 head_bit); 1935 next_page = rb_list_head(next_page); 1936 next_page->prev = tail_page; 1937 1938 /* make sure pages points to a valid page in the ring buffer */ 1939 cpu_buffer->pages = next_page; 1940 1941 /* update head page */ 1942 if (head_bit) 1943 cpu_buffer->head_page = list_entry(next_page, 1944 struct buffer_page, list); 1945 1946 /* pages are removed, resume tracing and then free the pages */ 1947 atomic_dec(&cpu_buffer->record_disabled); 1948 raw_spin_unlock_irq(&cpu_buffer->reader_lock); 1949 1950 RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)); 1951 1952 /* last buffer page to remove */ 1953 last_page = list_entry(rb_list_head(to_remove), struct buffer_page, 1954 list); 1955 tmp_iter_page = first_page; 1956 1957 do { 1958 cond_resched(); 1959 1960 to_remove_page = tmp_iter_page; 1961 rb_inc_page(&tmp_iter_page); 1962 1963 /* update the counters */ 1964 page_entries = rb_page_entries(to_remove_page); 1965 if (page_entries) { 1966 /* 1967 * If something was added to this page, it was full 1968 * since it is not the tail page. So we deduct the 1969 * bytes consumed in ring buffer from here. 1970 * Increment overrun to account for the lost events. 1971 */ 1972 local_add(page_entries, &cpu_buffer->overrun); 1973 local_sub(rb_page_commit(to_remove_page), &cpu_buffer->entries_bytes); 1974 local_inc(&cpu_buffer->pages_lost); 1975 } 1976 1977 /* 1978 * We have already removed references to this list item, just 1979 * free up the buffer_page and its page 1980 */ 1981 free_buffer_page(to_remove_page); 1982 nr_removed--; 1983 1984 } while (to_remove_page != last_page); 1985 1986 RB_WARN_ON(cpu_buffer, nr_removed); 1987 1988 return nr_removed == 0; 1989 } 1990 1991 static bool 1992 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer) 1993 { 1994 struct list_head *pages = &cpu_buffer->new_pages; 1995 unsigned long flags; 1996 bool success; 1997 int retries; 1998 1999 /* Can be called at early boot up, where interrupts must not been enabled */ 2000 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2001 /* 2002 * We are holding the reader lock, so the reader page won't be swapped 2003 * in the ring buffer. Now we are racing with the writer trying to 2004 * move head page and the tail page. 2005 * We are going to adapt the reader page update process where: 2006 * 1. We first splice the start and end of list of new pages between 2007 * the head page and its previous page. 2008 * 2. We cmpxchg the prev_page->next to point from head page to the 2009 * start of new pages list. 2010 * 3. Finally, we update the head->prev to the end of new list. 2011 * 2012 * We will try this process 10 times, to make sure that we don't keep 2013 * spinning. 2014 */ 2015 retries = 10; 2016 success = false; 2017 while (retries--) { 2018 struct list_head *head_page, *prev_page; 2019 struct list_head *last_page, *first_page; 2020 struct list_head *head_page_with_bit; 2021 struct buffer_page *hpage = rb_set_head_page(cpu_buffer); 2022 2023 if (!hpage) 2024 break; 2025 head_page = &hpage->list; 2026 prev_page = head_page->prev; 2027 2028 first_page = pages->next; 2029 last_page = pages->prev; 2030 2031 head_page_with_bit = (struct list_head *) 2032 ((unsigned long)head_page | RB_PAGE_HEAD); 2033 2034 last_page->next = head_page_with_bit; 2035 first_page->prev = prev_page; 2036 2037 /* caution: head_page_with_bit gets updated on cmpxchg failure */ 2038 if (try_cmpxchg(&prev_page->next, 2039 &head_page_with_bit, first_page)) { 2040 /* 2041 * yay, we replaced the page pointer to our new list, 2042 * now, we just have to update to head page's prev 2043 * pointer to point to end of list 2044 */ 2045 head_page->prev = last_page; 2046 success = true; 2047 break; 2048 } 2049 } 2050 2051 if (success) 2052 INIT_LIST_HEAD(pages); 2053 /* 2054 * If we weren't successful in adding in new pages, warn and stop 2055 * tracing 2056 */ 2057 RB_WARN_ON(cpu_buffer, !success); 2058 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2059 2060 /* free pages if they weren't inserted */ 2061 if (!success) { 2062 struct buffer_page *bpage, *tmp; 2063 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, 2064 list) { 2065 list_del_init(&bpage->list); 2066 free_buffer_page(bpage); 2067 } 2068 } 2069 return success; 2070 } 2071 2072 static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer) 2073 { 2074 bool success; 2075 2076 if (cpu_buffer->nr_pages_to_update > 0) 2077 success = rb_insert_pages(cpu_buffer); 2078 else 2079 success = rb_remove_pages(cpu_buffer, 2080 -cpu_buffer->nr_pages_to_update); 2081 2082 if (success) 2083 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update; 2084 } 2085 2086 static void update_pages_handler(struct work_struct *work) 2087 { 2088 struct ring_buffer_per_cpu *cpu_buffer = container_of(work, 2089 struct ring_buffer_per_cpu, update_pages_work); 2090 rb_update_pages(cpu_buffer); 2091 complete(&cpu_buffer->update_done); 2092 } 2093 2094 /** 2095 * ring_buffer_resize - resize the ring buffer 2096 * @buffer: the buffer to resize. 2097 * @size: the new size. 2098 * @cpu_id: the cpu buffer to resize 2099 * 2100 * Minimum size is 2 * BUF_PAGE_SIZE. 2101 * 2102 * Returns 0 on success and < 0 on failure. 2103 */ 2104 int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, 2105 int cpu_id) 2106 { 2107 struct ring_buffer_per_cpu *cpu_buffer; 2108 unsigned long nr_pages; 2109 int cpu, err; 2110 2111 /* 2112 * Always succeed at resizing a non-existent buffer: 2113 */ 2114 if (!buffer) 2115 return 0; 2116 2117 /* Make sure the requested buffer exists */ 2118 if (cpu_id != RING_BUFFER_ALL_CPUS && 2119 !cpumask_test_cpu(cpu_id, buffer->cpumask)) 2120 return 0; 2121 2122 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 2123 2124 /* we need a minimum of two pages */ 2125 if (nr_pages < 2) 2126 nr_pages = 2; 2127 2128 /* prevent another thread from changing buffer sizes */ 2129 mutex_lock(&buffer->mutex); 2130 atomic_inc(&buffer->resizing); 2131 2132 if (cpu_id == RING_BUFFER_ALL_CPUS) { 2133 /* 2134 * Don't succeed if resizing is disabled, as a reader might be 2135 * manipulating the ring buffer and is expecting a sane state while 2136 * this is true. 2137 */ 2138 for_each_buffer_cpu(buffer, cpu) { 2139 cpu_buffer = buffer->buffers[cpu]; 2140 if (atomic_read(&cpu_buffer->resize_disabled)) { 2141 err = -EBUSY; 2142 goto out_err_unlock; 2143 } 2144 } 2145 2146 /* calculate the pages to update */ 2147 for_each_buffer_cpu(buffer, cpu) { 2148 cpu_buffer = buffer->buffers[cpu]; 2149 2150 cpu_buffer->nr_pages_to_update = nr_pages - 2151 cpu_buffer->nr_pages; 2152 /* 2153 * nothing more to do for removing pages or no update 2154 */ 2155 if (cpu_buffer->nr_pages_to_update <= 0) 2156 continue; 2157 /* 2158 * to add pages, make sure all new pages can be 2159 * allocated without receiving ENOMEM 2160 */ 2161 INIT_LIST_HEAD(&cpu_buffer->new_pages); 2162 if (__rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update, 2163 &cpu_buffer->new_pages)) { 2164 /* not enough memory for new pages */ 2165 err = -ENOMEM; 2166 goto out_err; 2167 } 2168 2169 cond_resched(); 2170 } 2171 2172 cpus_read_lock(); 2173 /* 2174 * Fire off all the required work handlers 2175 * We can't schedule on offline CPUs, but it's not necessary 2176 * since we can change their buffer sizes without any race. 2177 */ 2178 for_each_buffer_cpu(buffer, cpu) { 2179 cpu_buffer = buffer->buffers[cpu]; 2180 if (!cpu_buffer->nr_pages_to_update) 2181 continue; 2182 2183 /* Can't run something on an offline CPU. */ 2184 if (!cpu_online(cpu)) { 2185 rb_update_pages(cpu_buffer); 2186 cpu_buffer->nr_pages_to_update = 0; 2187 } else { 2188 /* Run directly if possible. */ 2189 migrate_disable(); 2190 if (cpu != smp_processor_id()) { 2191 migrate_enable(); 2192 schedule_work_on(cpu, 2193 &cpu_buffer->update_pages_work); 2194 } else { 2195 update_pages_handler(&cpu_buffer->update_pages_work); 2196 migrate_enable(); 2197 } 2198 } 2199 } 2200 2201 /* wait for all the updates to complete */ 2202 for_each_buffer_cpu(buffer, cpu) { 2203 cpu_buffer = buffer->buffers[cpu]; 2204 if (!cpu_buffer->nr_pages_to_update) 2205 continue; 2206 2207 if (cpu_online(cpu)) 2208 wait_for_completion(&cpu_buffer->update_done); 2209 cpu_buffer->nr_pages_to_update = 0; 2210 } 2211 2212 cpus_read_unlock(); 2213 } else { 2214 cpu_buffer = buffer->buffers[cpu_id]; 2215 2216 if (nr_pages == cpu_buffer->nr_pages) 2217 goto out; 2218 2219 /* 2220 * Don't succeed if resizing is disabled, as a reader might be 2221 * manipulating the ring buffer and is expecting a sane state while 2222 * this is true. 2223 */ 2224 if (atomic_read(&cpu_buffer->resize_disabled)) { 2225 err = -EBUSY; 2226 goto out_err_unlock; 2227 } 2228 2229 cpu_buffer->nr_pages_to_update = nr_pages - 2230 cpu_buffer->nr_pages; 2231 2232 INIT_LIST_HEAD(&cpu_buffer->new_pages); 2233 if (cpu_buffer->nr_pages_to_update > 0 && 2234 __rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update, 2235 &cpu_buffer->new_pages)) { 2236 err = -ENOMEM; 2237 goto out_err; 2238 } 2239 2240 cpus_read_lock(); 2241 2242 /* Can't run something on an offline CPU. */ 2243 if (!cpu_online(cpu_id)) 2244 rb_update_pages(cpu_buffer); 2245 else { 2246 /* Run directly if possible. */ 2247 migrate_disable(); 2248 if (cpu_id == smp_processor_id()) { 2249 rb_update_pages(cpu_buffer); 2250 migrate_enable(); 2251 } else { 2252 migrate_enable(); 2253 schedule_work_on(cpu_id, 2254 &cpu_buffer->update_pages_work); 2255 wait_for_completion(&cpu_buffer->update_done); 2256 } 2257 } 2258 2259 cpu_buffer->nr_pages_to_update = 0; 2260 cpus_read_unlock(); 2261 } 2262 2263 out: 2264 /* 2265 * The ring buffer resize can happen with the ring buffer 2266 * enabled, so that the update disturbs the tracing as little 2267 * as possible. But if the buffer is disabled, we do not need 2268 * to worry about that, and we can take the time to verify 2269 * that the buffer is not corrupt. 2270 */ 2271 if (atomic_read(&buffer->record_disabled)) { 2272 atomic_inc(&buffer->record_disabled); 2273 /* 2274 * Even though the buffer was disabled, we must make sure 2275 * that it is truly disabled before calling rb_check_pages. 2276 * There could have been a race between checking 2277 * record_disable and incrementing it. 2278 */ 2279 synchronize_rcu(); 2280 for_each_buffer_cpu(buffer, cpu) { 2281 cpu_buffer = buffer->buffers[cpu]; 2282 rb_check_pages(cpu_buffer); 2283 } 2284 atomic_dec(&buffer->record_disabled); 2285 } 2286 2287 atomic_dec(&buffer->resizing); 2288 mutex_unlock(&buffer->mutex); 2289 return 0; 2290 2291 out_err: 2292 for_each_buffer_cpu(buffer, cpu) { 2293 struct buffer_page *bpage, *tmp; 2294 2295 cpu_buffer = buffer->buffers[cpu]; 2296 cpu_buffer->nr_pages_to_update = 0; 2297 2298 if (list_empty(&cpu_buffer->new_pages)) 2299 continue; 2300 2301 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, 2302 list) { 2303 list_del_init(&bpage->list); 2304 free_buffer_page(bpage); 2305 } 2306 } 2307 out_err_unlock: 2308 atomic_dec(&buffer->resizing); 2309 mutex_unlock(&buffer->mutex); 2310 return err; 2311 } 2312 EXPORT_SYMBOL_GPL(ring_buffer_resize); 2313 2314 void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val) 2315 { 2316 mutex_lock(&buffer->mutex); 2317 if (val) 2318 buffer->flags |= RB_FL_OVERWRITE; 2319 else 2320 buffer->flags &= ~RB_FL_OVERWRITE; 2321 mutex_unlock(&buffer->mutex); 2322 } 2323 EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite); 2324 2325 static __always_inline void *__rb_page_index(struct buffer_page *bpage, unsigned index) 2326 { 2327 return bpage->page->data + index; 2328 } 2329 2330 static __always_inline struct ring_buffer_event * 2331 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer) 2332 { 2333 return __rb_page_index(cpu_buffer->reader_page, 2334 cpu_buffer->reader_page->read); 2335 } 2336 2337 static struct ring_buffer_event * 2338 rb_iter_head_event(struct ring_buffer_iter *iter) 2339 { 2340 struct ring_buffer_event *event; 2341 struct buffer_page *iter_head_page = iter->head_page; 2342 unsigned long commit; 2343 unsigned length; 2344 2345 if (iter->head != iter->next_event) 2346 return iter->event; 2347 2348 /* 2349 * When the writer goes across pages, it issues a cmpxchg which 2350 * is a mb(), which will synchronize with the rmb here. 2351 * (see rb_tail_page_update() and __rb_reserve_next()) 2352 */ 2353 commit = rb_page_commit(iter_head_page); 2354 smp_rmb(); 2355 2356 /* An event needs to be at least 8 bytes in size */ 2357 if (iter->head > commit - 8) 2358 goto reset; 2359 2360 event = __rb_page_index(iter_head_page, iter->head); 2361 length = rb_event_length(event); 2362 2363 /* 2364 * READ_ONCE() doesn't work on functions and we don't want the 2365 * compiler doing any crazy optimizations with length. 2366 */ 2367 barrier(); 2368 2369 if ((iter->head + length) > commit || length > BUF_PAGE_SIZE) 2370 /* Writer corrupted the read? */ 2371 goto reset; 2372 2373 memcpy(iter->event, event, length); 2374 /* 2375 * If the page stamp is still the same after this rmb() then the 2376 * event was safely copied without the writer entering the page. 2377 */ 2378 smp_rmb(); 2379 2380 /* Make sure the page didn't change since we read this */ 2381 if (iter->page_stamp != iter_head_page->page->time_stamp || 2382 commit > rb_page_commit(iter_head_page)) 2383 goto reset; 2384 2385 iter->next_event = iter->head + length; 2386 return iter->event; 2387 reset: 2388 /* Reset to the beginning */ 2389 iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp; 2390 iter->head = 0; 2391 iter->next_event = 0; 2392 iter->missed_events = 1; 2393 return NULL; 2394 } 2395 2396 /* Size is determined by what has been committed */ 2397 static __always_inline unsigned rb_page_size(struct buffer_page *bpage) 2398 { 2399 return rb_page_commit(bpage); 2400 } 2401 2402 static __always_inline unsigned 2403 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer) 2404 { 2405 return rb_page_commit(cpu_buffer->commit_page); 2406 } 2407 2408 static __always_inline unsigned 2409 rb_event_index(struct ring_buffer_event *event) 2410 { 2411 unsigned long addr = (unsigned long)event; 2412 2413 return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE; 2414 } 2415 2416 static void rb_inc_iter(struct ring_buffer_iter *iter) 2417 { 2418 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 2419 2420 /* 2421 * The iterator could be on the reader page (it starts there). 2422 * But the head could have moved, since the reader was 2423 * found. Check for this case and assign the iterator 2424 * to the head page instead of next. 2425 */ 2426 if (iter->head_page == cpu_buffer->reader_page) 2427 iter->head_page = rb_set_head_page(cpu_buffer); 2428 else 2429 rb_inc_page(&iter->head_page); 2430 2431 iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp; 2432 iter->head = 0; 2433 iter->next_event = 0; 2434 } 2435 2436 /* 2437 * rb_handle_head_page - writer hit the head page 2438 * 2439 * Returns: +1 to retry page 2440 * 0 to continue 2441 * -1 on error 2442 */ 2443 static int 2444 rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer, 2445 struct buffer_page *tail_page, 2446 struct buffer_page *next_page) 2447 { 2448 struct buffer_page *new_head; 2449 int entries; 2450 int type; 2451 int ret; 2452 2453 entries = rb_page_entries(next_page); 2454 2455 /* 2456 * The hard part is here. We need to move the head 2457 * forward, and protect against both readers on 2458 * other CPUs and writers coming in via interrupts. 2459 */ 2460 type = rb_head_page_set_update(cpu_buffer, next_page, tail_page, 2461 RB_PAGE_HEAD); 2462 2463 /* 2464 * type can be one of four: 2465 * NORMAL - an interrupt already moved it for us 2466 * HEAD - we are the first to get here. 2467 * UPDATE - we are the interrupt interrupting 2468 * a current move. 2469 * MOVED - a reader on another CPU moved the next 2470 * pointer to its reader page. Give up 2471 * and try again. 2472 */ 2473 2474 switch (type) { 2475 case RB_PAGE_HEAD: 2476 /* 2477 * We changed the head to UPDATE, thus 2478 * it is our responsibility to update 2479 * the counters. 2480 */ 2481 local_add(entries, &cpu_buffer->overrun); 2482 local_sub(rb_page_commit(next_page), &cpu_buffer->entries_bytes); 2483 local_inc(&cpu_buffer->pages_lost); 2484 2485 /* 2486 * The entries will be zeroed out when we move the 2487 * tail page. 2488 */ 2489 2490 /* still more to do */ 2491 break; 2492 2493 case RB_PAGE_UPDATE: 2494 /* 2495 * This is an interrupt that interrupt the 2496 * previous update. Still more to do. 2497 */ 2498 break; 2499 case RB_PAGE_NORMAL: 2500 /* 2501 * An interrupt came in before the update 2502 * and processed this for us. 2503 * Nothing left to do. 2504 */ 2505 return 1; 2506 case RB_PAGE_MOVED: 2507 /* 2508 * The reader is on another CPU and just did 2509 * a swap with our next_page. 2510 * Try again. 2511 */ 2512 return 1; 2513 default: 2514 RB_WARN_ON(cpu_buffer, 1); /* WTF??? */ 2515 return -1; 2516 } 2517 2518 /* 2519 * Now that we are here, the old head pointer is 2520 * set to UPDATE. This will keep the reader from 2521 * swapping the head page with the reader page. 2522 * The reader (on another CPU) will spin till 2523 * we are finished. 2524 * 2525 * We just need to protect against interrupts 2526 * doing the job. We will set the next pointer 2527 * to HEAD. After that, we set the old pointer 2528 * to NORMAL, but only if it was HEAD before. 2529 * otherwise we are an interrupt, and only 2530 * want the outer most commit to reset it. 2531 */ 2532 new_head = next_page; 2533 rb_inc_page(&new_head); 2534 2535 ret = rb_head_page_set_head(cpu_buffer, new_head, next_page, 2536 RB_PAGE_NORMAL); 2537 2538 /* 2539 * Valid returns are: 2540 * HEAD - an interrupt came in and already set it. 2541 * NORMAL - One of two things: 2542 * 1) We really set it. 2543 * 2) A bunch of interrupts came in and moved 2544 * the page forward again. 2545 */ 2546 switch (ret) { 2547 case RB_PAGE_HEAD: 2548 case RB_PAGE_NORMAL: 2549 /* OK */ 2550 break; 2551 default: 2552 RB_WARN_ON(cpu_buffer, 1); 2553 return -1; 2554 } 2555 2556 /* 2557 * It is possible that an interrupt came in, 2558 * set the head up, then more interrupts came in 2559 * and moved it again. When we get back here, 2560 * the page would have been set to NORMAL but we 2561 * just set it back to HEAD. 2562 * 2563 * How do you detect this? Well, if that happened 2564 * the tail page would have moved. 2565 */ 2566 if (ret == RB_PAGE_NORMAL) { 2567 struct buffer_page *buffer_tail_page; 2568 2569 buffer_tail_page = READ_ONCE(cpu_buffer->tail_page); 2570 /* 2571 * If the tail had moved passed next, then we need 2572 * to reset the pointer. 2573 */ 2574 if (buffer_tail_page != tail_page && 2575 buffer_tail_page != next_page) 2576 rb_head_page_set_normal(cpu_buffer, new_head, 2577 next_page, 2578 RB_PAGE_HEAD); 2579 } 2580 2581 /* 2582 * If this was the outer most commit (the one that 2583 * changed the original pointer from HEAD to UPDATE), 2584 * then it is up to us to reset it to NORMAL. 2585 */ 2586 if (type == RB_PAGE_HEAD) { 2587 ret = rb_head_page_set_normal(cpu_buffer, next_page, 2588 tail_page, 2589 RB_PAGE_UPDATE); 2590 if (RB_WARN_ON(cpu_buffer, 2591 ret != RB_PAGE_UPDATE)) 2592 return -1; 2593 } 2594 2595 return 0; 2596 } 2597 2598 static inline void 2599 rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, 2600 unsigned long tail, struct rb_event_info *info) 2601 { 2602 struct buffer_page *tail_page = info->tail_page; 2603 struct ring_buffer_event *event; 2604 unsigned long length = info->length; 2605 2606 /* 2607 * Only the event that crossed the page boundary 2608 * must fill the old tail_page with padding. 2609 */ 2610 if (tail >= BUF_PAGE_SIZE) { 2611 /* 2612 * If the page was filled, then we still need 2613 * to update the real_end. Reset it to zero 2614 * and the reader will ignore it. 2615 */ 2616 if (tail == BUF_PAGE_SIZE) 2617 tail_page->real_end = 0; 2618 2619 local_sub(length, &tail_page->write); 2620 return; 2621 } 2622 2623 event = __rb_page_index(tail_page, tail); 2624 2625 /* 2626 * Save the original length to the meta data. 2627 * This will be used by the reader to add lost event 2628 * counter. 2629 */ 2630 tail_page->real_end = tail; 2631 2632 /* 2633 * If this event is bigger than the minimum size, then 2634 * we need to be careful that we don't subtract the 2635 * write counter enough to allow another writer to slip 2636 * in on this page. 2637 * We put in a discarded commit instead, to make sure 2638 * that this space is not used again, and this space will 2639 * not be accounted into 'entries_bytes'. 2640 * 2641 * If we are less than the minimum size, we don't need to 2642 * worry about it. 2643 */ 2644 if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) { 2645 /* No room for any events */ 2646 2647 /* Mark the rest of the page with padding */ 2648 rb_event_set_padding(event); 2649 2650 /* Make sure the padding is visible before the write update */ 2651 smp_wmb(); 2652 2653 /* Set the write back to the previous setting */ 2654 local_sub(length, &tail_page->write); 2655 return; 2656 } 2657 2658 /* Put in a discarded event */ 2659 event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE; 2660 event->type_len = RINGBUF_TYPE_PADDING; 2661 /* time delta must be non zero */ 2662 event->time_delta = 1; 2663 2664 /* account for padding bytes */ 2665 local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes); 2666 2667 /* Make sure the padding is visible before the tail_page->write update */ 2668 smp_wmb(); 2669 2670 /* Set write to end of buffer */ 2671 length = (tail + length) - BUF_PAGE_SIZE; 2672 local_sub(length, &tail_page->write); 2673 } 2674 2675 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer); 2676 2677 /* 2678 * This is the slow path, force gcc not to inline it. 2679 */ 2680 static noinline struct ring_buffer_event * 2681 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, 2682 unsigned long tail, struct rb_event_info *info) 2683 { 2684 struct buffer_page *tail_page = info->tail_page; 2685 struct buffer_page *commit_page = cpu_buffer->commit_page; 2686 struct trace_buffer *buffer = cpu_buffer->buffer; 2687 struct buffer_page *next_page; 2688 int ret; 2689 2690 next_page = tail_page; 2691 2692 rb_inc_page(&next_page); 2693 2694 /* 2695 * If for some reason, we had an interrupt storm that made 2696 * it all the way around the buffer, bail, and warn 2697 * about it. 2698 */ 2699 if (unlikely(next_page == commit_page)) { 2700 local_inc(&cpu_buffer->commit_overrun); 2701 goto out_reset; 2702 } 2703 2704 /* 2705 * This is where the fun begins! 2706 * 2707 * We are fighting against races between a reader that 2708 * could be on another CPU trying to swap its reader 2709 * page with the buffer head. 2710 * 2711 * We are also fighting against interrupts coming in and 2712 * moving the head or tail on us as well. 2713 * 2714 * If the next page is the head page then we have filled 2715 * the buffer, unless the commit page is still on the 2716 * reader page. 2717 */ 2718 if (rb_is_head_page(next_page, &tail_page->list)) { 2719 2720 /* 2721 * If the commit is not on the reader page, then 2722 * move the header page. 2723 */ 2724 if (!rb_is_reader_page(cpu_buffer->commit_page)) { 2725 /* 2726 * If we are not in overwrite mode, 2727 * this is easy, just stop here. 2728 */ 2729 if (!(buffer->flags & RB_FL_OVERWRITE)) { 2730 local_inc(&cpu_buffer->dropped_events); 2731 goto out_reset; 2732 } 2733 2734 ret = rb_handle_head_page(cpu_buffer, 2735 tail_page, 2736 next_page); 2737 if (ret < 0) 2738 goto out_reset; 2739 if (ret) 2740 goto out_again; 2741 } else { 2742 /* 2743 * We need to be careful here too. The 2744 * commit page could still be on the reader 2745 * page. We could have a small buffer, and 2746 * have filled up the buffer with events 2747 * from interrupts and such, and wrapped. 2748 * 2749 * Note, if the tail page is also on the 2750 * reader_page, we let it move out. 2751 */ 2752 if (unlikely((cpu_buffer->commit_page != 2753 cpu_buffer->tail_page) && 2754 (cpu_buffer->commit_page == 2755 cpu_buffer->reader_page))) { 2756 local_inc(&cpu_buffer->commit_overrun); 2757 goto out_reset; 2758 } 2759 } 2760 } 2761 2762 rb_tail_page_update(cpu_buffer, tail_page, next_page); 2763 2764 out_again: 2765 2766 rb_reset_tail(cpu_buffer, tail, info); 2767 2768 /* Commit what we have for now. */ 2769 rb_end_commit(cpu_buffer); 2770 /* rb_end_commit() decs committing */ 2771 local_inc(&cpu_buffer->committing); 2772 2773 /* fail and let the caller try again */ 2774 return ERR_PTR(-EAGAIN); 2775 2776 out_reset: 2777 /* reset write */ 2778 rb_reset_tail(cpu_buffer, tail, info); 2779 2780 return NULL; 2781 } 2782 2783 /* Slow path */ 2784 static struct ring_buffer_event * 2785 rb_add_time_stamp(struct ring_buffer_event *event, u64 delta, bool abs) 2786 { 2787 if (abs) 2788 event->type_len = RINGBUF_TYPE_TIME_STAMP; 2789 else 2790 event->type_len = RINGBUF_TYPE_TIME_EXTEND; 2791 2792 /* Not the first event on the page, or not delta? */ 2793 if (abs || rb_event_index(event)) { 2794 event->time_delta = delta & TS_MASK; 2795 event->array[0] = delta >> TS_SHIFT; 2796 } else { 2797 /* nope, just zero it */ 2798 event->time_delta = 0; 2799 event->array[0] = 0; 2800 } 2801 2802 return skip_time_extend(event); 2803 } 2804 2805 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 2806 static inline bool sched_clock_stable(void) 2807 { 2808 return true; 2809 } 2810 #endif 2811 2812 static void 2813 rb_check_timestamp(struct ring_buffer_per_cpu *cpu_buffer, 2814 struct rb_event_info *info) 2815 { 2816 u64 write_stamp; 2817 2818 WARN_ONCE(1, "Delta way too big! %llu ts=%llu before=%llu after=%llu write stamp=%llu\n%s", 2819 (unsigned long long)info->delta, 2820 (unsigned long long)info->ts, 2821 (unsigned long long)info->before, 2822 (unsigned long long)info->after, 2823 (unsigned long long)(rb_time_read(&cpu_buffer->write_stamp, &write_stamp) ? write_stamp : 0), 2824 sched_clock_stable() ? "" : 2825 "If you just came from a suspend/resume,\n" 2826 "please switch to the trace global clock:\n" 2827 " echo global > /sys/kernel/tracing/trace_clock\n" 2828 "or add trace_clock=global to the kernel command line\n"); 2829 } 2830 2831 static void rb_add_timestamp(struct ring_buffer_per_cpu *cpu_buffer, 2832 struct ring_buffer_event **event, 2833 struct rb_event_info *info, 2834 u64 *delta, 2835 unsigned int *length) 2836 { 2837 bool abs = info->add_timestamp & 2838 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE); 2839 2840 if (unlikely(info->delta > (1ULL << 59))) { 2841 /* 2842 * Some timers can use more than 59 bits, and when a timestamp 2843 * is added to the buffer, it will lose those bits. 2844 */ 2845 if (abs && (info->ts & TS_MSB)) { 2846 info->delta &= ABS_TS_MASK; 2847 2848 /* did the clock go backwards */ 2849 } else if (info->before == info->after && info->before > info->ts) { 2850 /* not interrupted */ 2851 static int once; 2852 2853 /* 2854 * This is possible with a recalibrating of the TSC. 2855 * Do not produce a call stack, but just report it. 2856 */ 2857 if (!once) { 2858 once++; 2859 pr_warn("Ring buffer clock went backwards: %llu -> %llu\n", 2860 info->before, info->ts); 2861 } 2862 } else 2863 rb_check_timestamp(cpu_buffer, info); 2864 if (!abs) 2865 info->delta = 0; 2866 } 2867 *event = rb_add_time_stamp(*event, info->delta, abs); 2868 *length -= RB_LEN_TIME_EXTEND; 2869 *delta = 0; 2870 } 2871 2872 /** 2873 * rb_update_event - update event type and data 2874 * @cpu_buffer: The per cpu buffer of the @event 2875 * @event: the event to update 2876 * @info: The info to update the @event with (contains length and delta) 2877 * 2878 * Update the type and data fields of the @event. The length 2879 * is the actual size that is written to the ring buffer, 2880 * and with this, we can determine what to place into the 2881 * data field. 2882 */ 2883 static void 2884 rb_update_event(struct ring_buffer_per_cpu *cpu_buffer, 2885 struct ring_buffer_event *event, 2886 struct rb_event_info *info) 2887 { 2888 unsigned length = info->length; 2889 u64 delta = info->delta; 2890 unsigned int nest = local_read(&cpu_buffer->committing) - 1; 2891 2892 if (!WARN_ON_ONCE(nest >= MAX_NEST)) 2893 cpu_buffer->event_stamp[nest] = info->ts; 2894 2895 /* 2896 * If we need to add a timestamp, then we 2897 * add it to the start of the reserved space. 2898 */ 2899 if (unlikely(info->add_timestamp)) 2900 rb_add_timestamp(cpu_buffer, &event, info, &delta, &length); 2901 2902 event->time_delta = delta; 2903 length -= RB_EVNT_HDR_SIZE; 2904 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) { 2905 event->type_len = 0; 2906 event->array[0] = length; 2907 } else 2908 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT); 2909 } 2910 2911 static unsigned rb_calculate_event_length(unsigned length) 2912 { 2913 struct ring_buffer_event event; /* Used only for sizeof array */ 2914 2915 /* zero length can cause confusions */ 2916 if (!length) 2917 length++; 2918 2919 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) 2920 length += sizeof(event.array[0]); 2921 2922 length += RB_EVNT_HDR_SIZE; 2923 length = ALIGN(length, RB_ARCH_ALIGNMENT); 2924 2925 /* 2926 * In case the time delta is larger than the 27 bits for it 2927 * in the header, we need to add a timestamp. If another 2928 * event comes in when trying to discard this one to increase 2929 * the length, then the timestamp will be added in the allocated 2930 * space of this event. If length is bigger than the size needed 2931 * for the TIME_EXTEND, then padding has to be used. The events 2932 * length must be either RB_LEN_TIME_EXTEND, or greater than or equal 2933 * to RB_LEN_TIME_EXTEND + 8, as 8 is the minimum size for padding. 2934 * As length is a multiple of 4, we only need to worry if it 2935 * is 12 (RB_LEN_TIME_EXTEND + 4). 2936 */ 2937 if (length == RB_LEN_TIME_EXTEND + RB_ALIGNMENT) 2938 length += RB_ALIGNMENT; 2939 2940 return length; 2941 } 2942 2943 static inline bool 2944 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, 2945 struct ring_buffer_event *event) 2946 { 2947 unsigned long new_index, old_index; 2948 struct buffer_page *bpage; 2949 unsigned long addr; 2950 2951 new_index = rb_event_index(event); 2952 old_index = new_index + rb_event_ts_length(event); 2953 addr = (unsigned long)event; 2954 addr &= PAGE_MASK; 2955 2956 bpage = READ_ONCE(cpu_buffer->tail_page); 2957 2958 /* 2959 * Make sure the tail_page is still the same and 2960 * the next write location is the end of this event 2961 */ 2962 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) { 2963 unsigned long write_mask = 2964 local_read(&bpage->write) & ~RB_WRITE_MASK; 2965 unsigned long event_length = rb_event_length(event); 2966 2967 /* 2968 * For the before_stamp to be different than the write_stamp 2969 * to make sure that the next event adds an absolute 2970 * value and does not rely on the saved write stamp, which 2971 * is now going to be bogus. 2972 * 2973 * By setting the before_stamp to zero, the next event 2974 * is not going to use the write_stamp and will instead 2975 * create an absolute timestamp. This means there's no 2976 * reason to update the wirte_stamp! 2977 */ 2978 rb_time_set(&cpu_buffer->before_stamp, 0); 2979 2980 /* 2981 * If an event were to come in now, it would see that the 2982 * write_stamp and the before_stamp are different, and assume 2983 * that this event just added itself before updating 2984 * the write stamp. The interrupting event will fix the 2985 * write stamp for us, and use an absolute timestamp. 2986 */ 2987 2988 /* 2989 * This is on the tail page. It is possible that 2990 * a write could come in and move the tail page 2991 * and write to the next page. That is fine 2992 * because we just shorten what is on this page. 2993 */ 2994 old_index += write_mask; 2995 new_index += write_mask; 2996 2997 /* caution: old_index gets updated on cmpxchg failure */ 2998 if (local_try_cmpxchg(&bpage->write, &old_index, new_index)) { 2999 /* update counters */ 3000 local_sub(event_length, &cpu_buffer->entries_bytes); 3001 return true; 3002 } 3003 } 3004 3005 /* could not discard */ 3006 return false; 3007 } 3008 3009 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer) 3010 { 3011 local_inc(&cpu_buffer->committing); 3012 local_inc(&cpu_buffer->commits); 3013 } 3014 3015 static __always_inline void 3016 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) 3017 { 3018 unsigned long max_count; 3019 3020 /* 3021 * We only race with interrupts and NMIs on this CPU. 3022 * If we own the commit event, then we can commit 3023 * all others that interrupted us, since the interruptions 3024 * are in stack format (they finish before they come 3025 * back to us). This allows us to do a simple loop to 3026 * assign the commit to the tail. 3027 */ 3028 again: 3029 max_count = cpu_buffer->nr_pages * 100; 3030 3031 while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) { 3032 if (RB_WARN_ON(cpu_buffer, !(--max_count))) 3033 return; 3034 if (RB_WARN_ON(cpu_buffer, 3035 rb_is_reader_page(cpu_buffer->tail_page))) 3036 return; 3037 /* 3038 * No need for a memory barrier here, as the update 3039 * of the tail_page did it for this page. 3040 */ 3041 local_set(&cpu_buffer->commit_page->page->commit, 3042 rb_page_write(cpu_buffer->commit_page)); 3043 rb_inc_page(&cpu_buffer->commit_page); 3044 /* add barrier to keep gcc from optimizing too much */ 3045 barrier(); 3046 } 3047 while (rb_commit_index(cpu_buffer) != 3048 rb_page_write(cpu_buffer->commit_page)) { 3049 3050 /* Make sure the readers see the content of what is committed. */ 3051 smp_wmb(); 3052 local_set(&cpu_buffer->commit_page->page->commit, 3053 rb_page_write(cpu_buffer->commit_page)); 3054 RB_WARN_ON(cpu_buffer, 3055 local_read(&cpu_buffer->commit_page->page->commit) & 3056 ~RB_WRITE_MASK); 3057 barrier(); 3058 } 3059 3060 /* again, keep gcc from optimizing */ 3061 barrier(); 3062 3063 /* 3064 * If an interrupt came in just after the first while loop 3065 * and pushed the tail page forward, we will be left with 3066 * a dangling commit that will never go forward. 3067 */ 3068 if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page))) 3069 goto again; 3070 } 3071 3072 static __always_inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer) 3073 { 3074 unsigned long commits; 3075 3076 if (RB_WARN_ON(cpu_buffer, 3077 !local_read(&cpu_buffer->committing))) 3078 return; 3079 3080 again: 3081 commits = local_read(&cpu_buffer->commits); 3082 /* synchronize with interrupts */ 3083 barrier(); 3084 if (local_read(&cpu_buffer->committing) == 1) 3085 rb_set_commit_to_write(cpu_buffer); 3086 3087 local_dec(&cpu_buffer->committing); 3088 3089 /* synchronize with interrupts */ 3090 barrier(); 3091 3092 /* 3093 * Need to account for interrupts coming in between the 3094 * updating of the commit page and the clearing of the 3095 * committing counter. 3096 */ 3097 if (unlikely(local_read(&cpu_buffer->commits) != commits) && 3098 !local_read(&cpu_buffer->committing)) { 3099 local_inc(&cpu_buffer->committing); 3100 goto again; 3101 } 3102 } 3103 3104 static inline void rb_event_discard(struct ring_buffer_event *event) 3105 { 3106 if (extended_time(event)) 3107 event = skip_time_extend(event); 3108 3109 /* array[0] holds the actual length for the discarded event */ 3110 event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE; 3111 event->type_len = RINGBUF_TYPE_PADDING; 3112 /* time delta must be non zero */ 3113 if (!event->time_delta) 3114 event->time_delta = 1; 3115 } 3116 3117 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer) 3118 { 3119 local_inc(&cpu_buffer->entries); 3120 rb_end_commit(cpu_buffer); 3121 } 3122 3123 static __always_inline void 3124 rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) 3125 { 3126 if (buffer->irq_work.waiters_pending) { 3127 buffer->irq_work.waiters_pending = false; 3128 /* irq_work_queue() supplies it's own memory barriers */ 3129 irq_work_queue(&buffer->irq_work.work); 3130 } 3131 3132 if (cpu_buffer->irq_work.waiters_pending) { 3133 cpu_buffer->irq_work.waiters_pending = false; 3134 /* irq_work_queue() supplies it's own memory barriers */ 3135 irq_work_queue(&cpu_buffer->irq_work.work); 3136 } 3137 3138 if (cpu_buffer->last_pages_touch == local_read(&cpu_buffer->pages_touched)) 3139 return; 3140 3141 if (cpu_buffer->reader_page == cpu_buffer->commit_page) 3142 return; 3143 3144 if (!cpu_buffer->irq_work.full_waiters_pending) 3145 return; 3146 3147 cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched); 3148 3149 if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full)) 3150 return; 3151 3152 cpu_buffer->irq_work.wakeup_full = true; 3153 cpu_buffer->irq_work.full_waiters_pending = false; 3154 /* irq_work_queue() supplies it's own memory barriers */ 3155 irq_work_queue(&cpu_buffer->irq_work.work); 3156 } 3157 3158 #ifdef CONFIG_RING_BUFFER_RECORD_RECURSION 3159 # define do_ring_buffer_record_recursion() \ 3160 do_ftrace_record_recursion(_THIS_IP_, _RET_IP_) 3161 #else 3162 # define do_ring_buffer_record_recursion() do { } while (0) 3163 #endif 3164 3165 /* 3166 * The lock and unlock are done within a preempt disable section. 3167 * The current_context per_cpu variable can only be modified 3168 * by the current task between lock and unlock. But it can 3169 * be modified more than once via an interrupt. To pass this 3170 * information from the lock to the unlock without having to 3171 * access the 'in_interrupt()' functions again (which do show 3172 * a bit of overhead in something as critical as function tracing, 3173 * we use a bitmask trick. 3174 * 3175 * bit 1 = NMI context 3176 * bit 2 = IRQ context 3177 * bit 3 = SoftIRQ context 3178 * bit 4 = normal context. 3179 * 3180 * This works because this is the order of contexts that can 3181 * preempt other contexts. A SoftIRQ never preempts an IRQ 3182 * context. 3183 * 3184 * When the context is determined, the corresponding bit is 3185 * checked and set (if it was set, then a recursion of that context 3186 * happened). 3187 * 3188 * On unlock, we need to clear this bit. To do so, just subtract 3189 * 1 from the current_context and AND it to itself. 3190 * 3191 * (binary) 3192 * 101 - 1 = 100 3193 * 101 & 100 = 100 (clearing bit zero) 3194 * 3195 * 1010 - 1 = 1001 3196 * 1010 & 1001 = 1000 (clearing bit 1) 3197 * 3198 * The least significant bit can be cleared this way, and it 3199 * just so happens that it is the same bit corresponding to 3200 * the current context. 3201 * 3202 * Now the TRANSITION bit breaks the above slightly. The TRANSITION bit 3203 * is set when a recursion is detected at the current context, and if 3204 * the TRANSITION bit is already set, it will fail the recursion. 3205 * This is needed because there's a lag between the changing of 3206 * interrupt context and updating the preempt count. In this case, 3207 * a false positive will be found. To handle this, one extra recursion 3208 * is allowed, and this is done by the TRANSITION bit. If the TRANSITION 3209 * bit is already set, then it is considered a recursion and the function 3210 * ends. Otherwise, the TRANSITION bit is set, and that bit is returned. 3211 * 3212 * On the trace_recursive_unlock(), the TRANSITION bit will be the first 3213 * to be cleared. Even if it wasn't the context that set it. That is, 3214 * if an interrupt comes in while NORMAL bit is set and the ring buffer 3215 * is called before preempt_count() is updated, since the check will 3216 * be on the NORMAL bit, the TRANSITION bit will then be set. If an 3217 * NMI then comes in, it will set the NMI bit, but when the NMI code 3218 * does the trace_recursive_unlock() it will clear the TRANSITION bit 3219 * and leave the NMI bit set. But this is fine, because the interrupt 3220 * code that set the TRANSITION bit will then clear the NMI bit when it 3221 * calls trace_recursive_unlock(). If another NMI comes in, it will 3222 * set the TRANSITION bit and continue. 3223 * 3224 * Note: The TRANSITION bit only handles a single transition between context. 3225 */ 3226 3227 static __always_inline bool 3228 trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer) 3229 { 3230 unsigned int val = cpu_buffer->current_context; 3231 int bit = interrupt_context_level(); 3232 3233 bit = RB_CTX_NORMAL - bit; 3234 3235 if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) { 3236 /* 3237 * It is possible that this was called by transitioning 3238 * between interrupt context, and preempt_count() has not 3239 * been updated yet. In this case, use the TRANSITION bit. 3240 */ 3241 bit = RB_CTX_TRANSITION; 3242 if (val & (1 << (bit + cpu_buffer->nest))) { 3243 do_ring_buffer_record_recursion(); 3244 return true; 3245 } 3246 } 3247 3248 val |= (1 << (bit + cpu_buffer->nest)); 3249 cpu_buffer->current_context = val; 3250 3251 return false; 3252 } 3253 3254 static __always_inline void 3255 trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer) 3256 { 3257 cpu_buffer->current_context &= 3258 cpu_buffer->current_context - (1 << cpu_buffer->nest); 3259 } 3260 3261 /* The recursive locking above uses 5 bits */ 3262 #define NESTED_BITS 5 3263 3264 /** 3265 * ring_buffer_nest_start - Allow to trace while nested 3266 * @buffer: The ring buffer to modify 3267 * 3268 * The ring buffer has a safety mechanism to prevent recursion. 3269 * But there may be a case where a trace needs to be done while 3270 * tracing something else. In this case, calling this function 3271 * will allow this function to nest within a currently active 3272 * ring_buffer_lock_reserve(). 3273 * 3274 * Call this function before calling another ring_buffer_lock_reserve() and 3275 * call ring_buffer_nest_end() after the nested ring_buffer_unlock_commit(). 3276 */ 3277 void ring_buffer_nest_start(struct trace_buffer *buffer) 3278 { 3279 struct ring_buffer_per_cpu *cpu_buffer; 3280 int cpu; 3281 3282 /* Enabled by ring_buffer_nest_end() */ 3283 preempt_disable_notrace(); 3284 cpu = raw_smp_processor_id(); 3285 cpu_buffer = buffer->buffers[cpu]; 3286 /* This is the shift value for the above recursive locking */ 3287 cpu_buffer->nest += NESTED_BITS; 3288 } 3289 3290 /** 3291 * ring_buffer_nest_end - Allow to trace while nested 3292 * @buffer: The ring buffer to modify 3293 * 3294 * Must be called after ring_buffer_nest_start() and after the 3295 * ring_buffer_unlock_commit(). 3296 */ 3297 void ring_buffer_nest_end(struct trace_buffer *buffer) 3298 { 3299 struct ring_buffer_per_cpu *cpu_buffer; 3300 int cpu; 3301 3302 /* disabled by ring_buffer_nest_start() */ 3303 cpu = raw_smp_processor_id(); 3304 cpu_buffer = buffer->buffers[cpu]; 3305 /* This is the shift value for the above recursive locking */ 3306 cpu_buffer->nest -= NESTED_BITS; 3307 preempt_enable_notrace(); 3308 } 3309 3310 /** 3311 * ring_buffer_unlock_commit - commit a reserved 3312 * @buffer: The buffer to commit to 3313 * 3314 * This commits the data to the ring buffer, and releases any locks held. 3315 * 3316 * Must be paired with ring_buffer_lock_reserve. 3317 */ 3318 int ring_buffer_unlock_commit(struct trace_buffer *buffer) 3319 { 3320 struct ring_buffer_per_cpu *cpu_buffer; 3321 int cpu = raw_smp_processor_id(); 3322 3323 cpu_buffer = buffer->buffers[cpu]; 3324 3325 rb_commit(cpu_buffer); 3326 3327 rb_wakeups(buffer, cpu_buffer); 3328 3329 trace_recursive_unlock(cpu_buffer); 3330 3331 preempt_enable_notrace(); 3332 3333 return 0; 3334 } 3335 EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit); 3336 3337 /* Special value to validate all deltas on a page. */ 3338 #define CHECK_FULL_PAGE 1L 3339 3340 #ifdef CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS 3341 static void dump_buffer_page(struct buffer_data_page *bpage, 3342 struct rb_event_info *info, 3343 unsigned long tail) 3344 { 3345 struct ring_buffer_event *event; 3346 u64 ts, delta; 3347 int e; 3348 3349 ts = bpage->time_stamp; 3350 pr_warn(" [%lld] PAGE TIME STAMP\n", ts); 3351 3352 for (e = 0; e < tail; e += rb_event_length(event)) { 3353 3354 event = (struct ring_buffer_event *)(bpage->data + e); 3355 3356 switch (event->type_len) { 3357 3358 case RINGBUF_TYPE_TIME_EXTEND: 3359 delta = rb_event_time_stamp(event); 3360 ts += delta; 3361 pr_warn(" [%lld] delta:%lld TIME EXTEND\n", ts, delta); 3362 break; 3363 3364 case RINGBUF_TYPE_TIME_STAMP: 3365 delta = rb_event_time_stamp(event); 3366 ts = rb_fix_abs_ts(delta, ts); 3367 pr_warn(" [%lld] absolute:%lld TIME STAMP\n", ts, delta); 3368 break; 3369 3370 case RINGBUF_TYPE_PADDING: 3371 ts += event->time_delta; 3372 pr_warn(" [%lld] delta:%d PADDING\n", ts, event->time_delta); 3373 break; 3374 3375 case RINGBUF_TYPE_DATA: 3376 ts += event->time_delta; 3377 pr_warn(" [%lld] delta:%d\n", ts, event->time_delta); 3378 break; 3379 3380 default: 3381 break; 3382 } 3383 } 3384 } 3385 3386 static DEFINE_PER_CPU(atomic_t, checking); 3387 static atomic_t ts_dump; 3388 3389 /* 3390 * Check if the current event time stamp matches the deltas on 3391 * the buffer page. 3392 */ 3393 static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer, 3394 struct rb_event_info *info, 3395 unsigned long tail) 3396 { 3397 struct ring_buffer_event *event; 3398 struct buffer_data_page *bpage; 3399 u64 ts, delta; 3400 bool full = false; 3401 int e; 3402 3403 bpage = info->tail_page->page; 3404 3405 if (tail == CHECK_FULL_PAGE) { 3406 full = true; 3407 tail = local_read(&bpage->commit); 3408 } else if (info->add_timestamp & 3409 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE)) { 3410 /* Ignore events with absolute time stamps */ 3411 return; 3412 } 3413 3414 /* 3415 * Do not check the first event (skip possible extends too). 3416 * Also do not check if previous events have not been committed. 3417 */ 3418 if (tail <= 8 || tail > local_read(&bpage->commit)) 3419 return; 3420 3421 /* 3422 * If this interrupted another event, 3423 */ 3424 if (atomic_inc_return(this_cpu_ptr(&checking)) != 1) 3425 goto out; 3426 3427 ts = bpage->time_stamp; 3428 3429 for (e = 0; e < tail; e += rb_event_length(event)) { 3430 3431 event = (struct ring_buffer_event *)(bpage->data + e); 3432 3433 switch (event->type_len) { 3434 3435 case RINGBUF_TYPE_TIME_EXTEND: 3436 delta = rb_event_time_stamp(event); 3437 ts += delta; 3438 break; 3439 3440 case RINGBUF_TYPE_TIME_STAMP: 3441 delta = rb_event_time_stamp(event); 3442 ts = rb_fix_abs_ts(delta, ts); 3443 break; 3444 3445 case RINGBUF_TYPE_PADDING: 3446 if (event->time_delta == 1) 3447 break; 3448 fallthrough; 3449 case RINGBUF_TYPE_DATA: 3450 ts += event->time_delta; 3451 break; 3452 3453 default: 3454 RB_WARN_ON(cpu_buffer, 1); 3455 } 3456 } 3457 if ((full && ts > info->ts) || 3458 (!full && ts + info->delta != info->ts)) { 3459 /* If another report is happening, ignore this one */ 3460 if (atomic_inc_return(&ts_dump) != 1) { 3461 atomic_dec(&ts_dump); 3462 goto out; 3463 } 3464 atomic_inc(&cpu_buffer->record_disabled); 3465 /* There's some cases in boot up that this can happen */ 3466 WARN_ON_ONCE(system_state != SYSTEM_BOOTING); 3467 pr_warn("[CPU: %d]TIME DOES NOT MATCH expected:%lld actual:%lld delta:%lld before:%lld after:%lld%s\n", 3468 cpu_buffer->cpu, 3469 ts + info->delta, info->ts, info->delta, 3470 info->before, info->after, 3471 full ? " (full)" : ""); 3472 dump_buffer_page(bpage, info, tail); 3473 atomic_dec(&ts_dump); 3474 /* Do not re-enable checking */ 3475 return; 3476 } 3477 out: 3478 atomic_dec(this_cpu_ptr(&checking)); 3479 } 3480 #else 3481 static inline void check_buffer(struct ring_buffer_per_cpu *cpu_buffer, 3482 struct rb_event_info *info, 3483 unsigned long tail) 3484 { 3485 } 3486 #endif /* CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS */ 3487 3488 static struct ring_buffer_event * 3489 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, 3490 struct rb_event_info *info) 3491 { 3492 struct ring_buffer_event *event; 3493 struct buffer_page *tail_page; 3494 unsigned long tail, write, w; 3495 bool a_ok; 3496 bool b_ok; 3497 3498 /* Don't let the compiler play games with cpu_buffer->tail_page */ 3499 tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page); 3500 3501 /*A*/ w = local_read(&tail_page->write) & RB_WRITE_MASK; 3502 barrier(); 3503 b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before); 3504 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); 3505 barrier(); 3506 info->ts = rb_time_stamp(cpu_buffer->buffer); 3507 3508 if ((info->add_timestamp & RB_ADD_STAMP_ABSOLUTE)) { 3509 info->delta = info->ts; 3510 } else { 3511 /* 3512 * If interrupting an event time update, we may need an 3513 * absolute timestamp. 3514 * Don't bother if this is the start of a new page (w == 0). 3515 */ 3516 if (!w) { 3517 /* Use the sub-buffer timestamp */ 3518 info->delta = 0; 3519 } else if (unlikely(!a_ok || !b_ok || info->before != info->after)) { 3520 info->add_timestamp |= RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND; 3521 info->length += RB_LEN_TIME_EXTEND; 3522 } else { 3523 info->delta = info->ts - info->after; 3524 if (unlikely(test_time_stamp(info->delta))) { 3525 info->add_timestamp |= RB_ADD_STAMP_EXTEND; 3526 info->length += RB_LEN_TIME_EXTEND; 3527 } 3528 } 3529 } 3530 3531 /*B*/ rb_time_set(&cpu_buffer->before_stamp, info->ts); 3532 3533 /*C*/ write = local_add_return(info->length, &tail_page->write); 3534 3535 /* set write to only the index of the write */ 3536 write &= RB_WRITE_MASK; 3537 3538 tail = write - info->length; 3539 3540 /* See if we shot pass the end of this buffer page */ 3541 if (unlikely(write > BUF_PAGE_SIZE)) { 3542 check_buffer(cpu_buffer, info, CHECK_FULL_PAGE); 3543 return rb_move_tail(cpu_buffer, tail, info); 3544 } 3545 3546 if (likely(tail == w)) { 3547 /* Nothing interrupted us between A and C */ 3548 /*D*/ rb_time_set(&cpu_buffer->write_stamp, info->ts); 3549 /* 3550 * If something came in between C and D, the write stamp 3551 * may now not be in sync. But that's fine as the before_stamp 3552 * will be different and then next event will just be forced 3553 * to use an absolute timestamp. 3554 */ 3555 if (likely(!(info->add_timestamp & 3556 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE)))) 3557 /* This did not interrupt any time update */ 3558 info->delta = info->ts - info->after; 3559 else 3560 /* Just use full timestamp for interrupting event */ 3561 info->delta = info->ts; 3562 check_buffer(cpu_buffer, info, tail); 3563 } else { 3564 u64 ts; 3565 /* SLOW PATH - Interrupted between A and C */ 3566 3567 /* Save the old before_stamp */ 3568 a_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before); 3569 RB_WARN_ON(cpu_buffer, !a_ok); 3570 3571 /* 3572 * Read a new timestamp and update the before_stamp to make 3573 * the next event after this one force using an absolute 3574 * timestamp. This is in case an interrupt were to come in 3575 * between E and F. 3576 */ 3577 ts = rb_time_stamp(cpu_buffer->buffer); 3578 rb_time_set(&cpu_buffer->before_stamp, ts); 3579 3580 barrier(); 3581 /*E*/ a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); 3582 /* Was interrupted before here, write_stamp must be valid */ 3583 RB_WARN_ON(cpu_buffer, !a_ok); 3584 barrier(); 3585 /*F*/ if (write == (local_read(&tail_page->write) & RB_WRITE_MASK) && 3586 info->after == info->before && info->after < ts) { 3587 /* 3588 * Nothing came after this event between C and F, it is 3589 * safe to use info->after for the delta as it 3590 * matched info->before and is still valid. 3591 */ 3592 info->delta = ts - info->after; 3593 } else { 3594 /* 3595 * Interrupted between C and F: 3596 * Lost the previous events time stamp. Just set the 3597 * delta to zero, and this will be the same time as 3598 * the event this event interrupted. And the events that 3599 * came after this will still be correct (as they would 3600 * have built their delta on the previous event. 3601 */ 3602 info->delta = 0; 3603 } 3604 info->ts = ts; 3605 info->add_timestamp &= ~RB_ADD_STAMP_FORCE; 3606 } 3607 3608 /* 3609 * If this is the first commit on the page, then it has the same 3610 * timestamp as the page itself. 3611 */ 3612 if (unlikely(!tail && !(info->add_timestamp & 3613 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE)))) 3614 info->delta = 0; 3615 3616 /* We reserved something on the buffer */ 3617 3618 event = __rb_page_index(tail_page, tail); 3619 rb_update_event(cpu_buffer, event, info); 3620 3621 local_inc(&tail_page->entries); 3622 3623 /* 3624 * If this is the first commit on the page, then update 3625 * its timestamp. 3626 */ 3627 if (unlikely(!tail)) 3628 tail_page->page->time_stamp = info->ts; 3629 3630 /* account for these added bytes */ 3631 local_add(info->length, &cpu_buffer->entries_bytes); 3632 3633 return event; 3634 } 3635 3636 static __always_inline struct ring_buffer_event * 3637 rb_reserve_next_event(struct trace_buffer *buffer, 3638 struct ring_buffer_per_cpu *cpu_buffer, 3639 unsigned long length) 3640 { 3641 struct ring_buffer_event *event; 3642 struct rb_event_info info; 3643 int nr_loops = 0; 3644 int add_ts_default; 3645 3646 /* ring buffer does cmpxchg, make sure it is safe in NMI context */ 3647 if (!IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG) && 3648 (unlikely(in_nmi()))) { 3649 return NULL; 3650 } 3651 3652 rb_start_commit(cpu_buffer); 3653 /* The commit page can not change after this */ 3654 3655 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP 3656 /* 3657 * Due to the ability to swap a cpu buffer from a buffer 3658 * it is possible it was swapped before we committed. 3659 * (committing stops a swap). We check for it here and 3660 * if it happened, we have to fail the write. 3661 */ 3662 barrier(); 3663 if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) { 3664 local_dec(&cpu_buffer->committing); 3665 local_dec(&cpu_buffer->commits); 3666 return NULL; 3667 } 3668 #endif 3669 3670 info.length = rb_calculate_event_length(length); 3671 3672 if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) { 3673 add_ts_default = RB_ADD_STAMP_ABSOLUTE; 3674 info.length += RB_LEN_TIME_EXTEND; 3675 if (info.length > BUF_MAX_DATA_SIZE) 3676 goto out_fail; 3677 } else { 3678 add_ts_default = RB_ADD_STAMP_NONE; 3679 } 3680 3681 again: 3682 info.add_timestamp = add_ts_default; 3683 info.delta = 0; 3684 3685 /* 3686 * We allow for interrupts to reenter here and do a trace. 3687 * If one does, it will cause this original code to loop 3688 * back here. Even with heavy interrupts happening, this 3689 * should only happen a few times in a row. If this happens 3690 * 1000 times in a row, there must be either an interrupt 3691 * storm or we have something buggy. 3692 * Bail! 3693 */ 3694 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) 3695 goto out_fail; 3696 3697 event = __rb_reserve_next(cpu_buffer, &info); 3698 3699 if (unlikely(PTR_ERR(event) == -EAGAIN)) { 3700 if (info.add_timestamp & (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND)) 3701 info.length -= RB_LEN_TIME_EXTEND; 3702 goto again; 3703 } 3704 3705 if (likely(event)) 3706 return event; 3707 out_fail: 3708 rb_end_commit(cpu_buffer); 3709 return NULL; 3710 } 3711 3712 /** 3713 * ring_buffer_lock_reserve - reserve a part of the buffer 3714 * @buffer: the ring buffer to reserve from 3715 * @length: the length of the data to reserve (excluding event header) 3716 * 3717 * Returns a reserved event on the ring buffer to copy directly to. 3718 * The user of this interface will need to get the body to write into 3719 * and can use the ring_buffer_event_data() interface. 3720 * 3721 * The length is the length of the data needed, not the event length 3722 * which also includes the event header. 3723 * 3724 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned. 3725 * If NULL is returned, then nothing has been allocated or locked. 3726 */ 3727 struct ring_buffer_event * 3728 ring_buffer_lock_reserve(struct trace_buffer *buffer, unsigned long length) 3729 { 3730 struct ring_buffer_per_cpu *cpu_buffer; 3731 struct ring_buffer_event *event; 3732 int cpu; 3733 3734 /* If we are tracing schedule, we don't want to recurse */ 3735 preempt_disable_notrace(); 3736 3737 if (unlikely(atomic_read(&buffer->record_disabled))) 3738 goto out; 3739 3740 cpu = raw_smp_processor_id(); 3741 3742 if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask))) 3743 goto out; 3744 3745 cpu_buffer = buffer->buffers[cpu]; 3746 3747 if (unlikely(atomic_read(&cpu_buffer->record_disabled))) 3748 goto out; 3749 3750 if (unlikely(length > BUF_MAX_DATA_SIZE)) 3751 goto out; 3752 3753 if (unlikely(trace_recursive_lock(cpu_buffer))) 3754 goto out; 3755 3756 event = rb_reserve_next_event(buffer, cpu_buffer, length); 3757 if (!event) 3758 goto out_unlock; 3759 3760 return event; 3761 3762 out_unlock: 3763 trace_recursive_unlock(cpu_buffer); 3764 out: 3765 preempt_enable_notrace(); 3766 return NULL; 3767 } 3768 EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve); 3769 3770 /* 3771 * Decrement the entries to the page that an event is on. 3772 * The event does not even need to exist, only the pointer 3773 * to the page it is on. This may only be called before the commit 3774 * takes place. 3775 */ 3776 static inline void 3777 rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer, 3778 struct ring_buffer_event *event) 3779 { 3780 unsigned long addr = (unsigned long)event; 3781 struct buffer_page *bpage = cpu_buffer->commit_page; 3782 struct buffer_page *start; 3783 3784 addr &= PAGE_MASK; 3785 3786 /* Do the likely case first */ 3787 if (likely(bpage->page == (void *)addr)) { 3788 local_dec(&bpage->entries); 3789 return; 3790 } 3791 3792 /* 3793 * Because the commit page may be on the reader page we 3794 * start with the next page and check the end loop there. 3795 */ 3796 rb_inc_page(&bpage); 3797 start = bpage; 3798 do { 3799 if (bpage->page == (void *)addr) { 3800 local_dec(&bpage->entries); 3801 return; 3802 } 3803 rb_inc_page(&bpage); 3804 } while (bpage != start); 3805 3806 /* commit not part of this buffer?? */ 3807 RB_WARN_ON(cpu_buffer, 1); 3808 } 3809 3810 /** 3811 * ring_buffer_discard_commit - discard an event that has not been committed 3812 * @buffer: the ring buffer 3813 * @event: non committed event to discard 3814 * 3815 * Sometimes an event that is in the ring buffer needs to be ignored. 3816 * This function lets the user discard an event in the ring buffer 3817 * and then that event will not be read later. 3818 * 3819 * This function only works if it is called before the item has been 3820 * committed. It will try to free the event from the ring buffer 3821 * if another event has not been added behind it. 3822 * 3823 * If another event has been added behind it, it will set the event 3824 * up as discarded, and perform the commit. 3825 * 3826 * If this function is called, do not call ring_buffer_unlock_commit on 3827 * the event. 3828 */ 3829 void ring_buffer_discard_commit(struct trace_buffer *buffer, 3830 struct ring_buffer_event *event) 3831 { 3832 struct ring_buffer_per_cpu *cpu_buffer; 3833 int cpu; 3834 3835 /* The event is discarded regardless */ 3836 rb_event_discard(event); 3837 3838 cpu = smp_processor_id(); 3839 cpu_buffer = buffer->buffers[cpu]; 3840 3841 /* 3842 * This must only be called if the event has not been 3843 * committed yet. Thus we can assume that preemption 3844 * is still disabled. 3845 */ 3846 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); 3847 3848 rb_decrement_entry(cpu_buffer, event); 3849 if (rb_try_to_discard(cpu_buffer, event)) 3850 goto out; 3851 3852 out: 3853 rb_end_commit(cpu_buffer); 3854 3855 trace_recursive_unlock(cpu_buffer); 3856 3857 preempt_enable_notrace(); 3858 3859 } 3860 EXPORT_SYMBOL_GPL(ring_buffer_discard_commit); 3861 3862 /** 3863 * ring_buffer_write - write data to the buffer without reserving 3864 * @buffer: The ring buffer to write to. 3865 * @length: The length of the data being written (excluding the event header) 3866 * @data: The data to write to the buffer. 3867 * 3868 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as 3869 * one function. If you already have the data to write to the buffer, it 3870 * may be easier to simply call this function. 3871 * 3872 * Note, like ring_buffer_lock_reserve, the length is the length of the data 3873 * and not the length of the event which would hold the header. 3874 */ 3875 int ring_buffer_write(struct trace_buffer *buffer, 3876 unsigned long length, 3877 void *data) 3878 { 3879 struct ring_buffer_per_cpu *cpu_buffer; 3880 struct ring_buffer_event *event; 3881 void *body; 3882 int ret = -EBUSY; 3883 int cpu; 3884 3885 preempt_disable_notrace(); 3886 3887 if (atomic_read(&buffer->record_disabled)) 3888 goto out; 3889 3890 cpu = raw_smp_processor_id(); 3891 3892 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 3893 goto out; 3894 3895 cpu_buffer = buffer->buffers[cpu]; 3896 3897 if (atomic_read(&cpu_buffer->record_disabled)) 3898 goto out; 3899 3900 if (length > BUF_MAX_DATA_SIZE) 3901 goto out; 3902 3903 if (unlikely(trace_recursive_lock(cpu_buffer))) 3904 goto out; 3905 3906 event = rb_reserve_next_event(buffer, cpu_buffer, length); 3907 if (!event) 3908 goto out_unlock; 3909 3910 body = rb_event_data(event); 3911 3912 memcpy(body, data, length); 3913 3914 rb_commit(cpu_buffer); 3915 3916 rb_wakeups(buffer, cpu_buffer); 3917 3918 ret = 0; 3919 3920 out_unlock: 3921 trace_recursive_unlock(cpu_buffer); 3922 3923 out: 3924 preempt_enable_notrace(); 3925 3926 return ret; 3927 } 3928 EXPORT_SYMBOL_GPL(ring_buffer_write); 3929 3930 static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) 3931 { 3932 struct buffer_page *reader = cpu_buffer->reader_page; 3933 struct buffer_page *head = rb_set_head_page(cpu_buffer); 3934 struct buffer_page *commit = cpu_buffer->commit_page; 3935 3936 /* In case of error, head will be NULL */ 3937 if (unlikely(!head)) 3938 return true; 3939 3940 /* Reader should exhaust content in reader page */ 3941 if (reader->read != rb_page_commit(reader)) 3942 return false; 3943 3944 /* 3945 * If writers are committing on the reader page, knowing all 3946 * committed content has been read, the ring buffer is empty. 3947 */ 3948 if (commit == reader) 3949 return true; 3950 3951 /* 3952 * If writers are committing on a page other than reader page 3953 * and head page, there should always be content to read. 3954 */ 3955 if (commit != head) 3956 return false; 3957 3958 /* 3959 * Writers are committing on the head page, we just need 3960 * to care about there're committed data, and the reader will 3961 * swap reader page with head page when it is to read data. 3962 */ 3963 return rb_page_commit(commit) == 0; 3964 } 3965 3966 /** 3967 * ring_buffer_record_disable - stop all writes into the buffer 3968 * @buffer: The ring buffer to stop writes to. 3969 * 3970 * This prevents all writes to the buffer. Any attempt to write 3971 * to the buffer after this will fail and return NULL. 3972 * 3973 * The caller should call synchronize_rcu() after this. 3974 */ 3975 void ring_buffer_record_disable(struct trace_buffer *buffer) 3976 { 3977 atomic_inc(&buffer->record_disabled); 3978 } 3979 EXPORT_SYMBOL_GPL(ring_buffer_record_disable); 3980 3981 /** 3982 * ring_buffer_record_enable - enable writes to the buffer 3983 * @buffer: The ring buffer to enable writes 3984 * 3985 * Note, multiple disables will need the same number of enables 3986 * to truly enable the writing (much like preempt_disable). 3987 */ 3988 void ring_buffer_record_enable(struct trace_buffer *buffer) 3989 { 3990 atomic_dec(&buffer->record_disabled); 3991 } 3992 EXPORT_SYMBOL_GPL(ring_buffer_record_enable); 3993 3994 /** 3995 * ring_buffer_record_off - stop all writes into the buffer 3996 * @buffer: The ring buffer to stop writes to. 3997 * 3998 * This prevents all writes to the buffer. Any attempt to write 3999 * to the buffer after this will fail and return NULL. 4000 * 4001 * This is different than ring_buffer_record_disable() as 4002 * it works like an on/off switch, where as the disable() version 4003 * must be paired with a enable(). 4004 */ 4005 void ring_buffer_record_off(struct trace_buffer *buffer) 4006 { 4007 unsigned int rd; 4008 unsigned int new_rd; 4009 4010 rd = atomic_read(&buffer->record_disabled); 4011 do { 4012 new_rd = rd | RB_BUFFER_OFF; 4013 } while (!atomic_try_cmpxchg(&buffer->record_disabled, &rd, new_rd)); 4014 } 4015 EXPORT_SYMBOL_GPL(ring_buffer_record_off); 4016 4017 /** 4018 * ring_buffer_record_on - restart writes into the buffer 4019 * @buffer: The ring buffer to start writes to. 4020 * 4021 * This enables all writes to the buffer that was disabled by 4022 * ring_buffer_record_off(). 4023 * 4024 * This is different than ring_buffer_record_enable() as 4025 * it works like an on/off switch, where as the enable() version 4026 * must be paired with a disable(). 4027 */ 4028 void ring_buffer_record_on(struct trace_buffer *buffer) 4029 { 4030 unsigned int rd; 4031 unsigned int new_rd; 4032 4033 rd = atomic_read(&buffer->record_disabled); 4034 do { 4035 new_rd = rd & ~RB_BUFFER_OFF; 4036 } while (!atomic_try_cmpxchg(&buffer->record_disabled, &rd, new_rd)); 4037 } 4038 EXPORT_SYMBOL_GPL(ring_buffer_record_on); 4039 4040 /** 4041 * ring_buffer_record_is_on - return true if the ring buffer can write 4042 * @buffer: The ring buffer to see if write is enabled 4043 * 4044 * Returns true if the ring buffer is in a state that it accepts writes. 4045 */ 4046 bool ring_buffer_record_is_on(struct trace_buffer *buffer) 4047 { 4048 return !atomic_read(&buffer->record_disabled); 4049 } 4050 4051 /** 4052 * ring_buffer_record_is_set_on - return true if the ring buffer is set writable 4053 * @buffer: The ring buffer to see if write is set enabled 4054 * 4055 * Returns true if the ring buffer is set writable by ring_buffer_record_on(). 4056 * Note that this does NOT mean it is in a writable state. 4057 * 4058 * It may return true when the ring buffer has been disabled by 4059 * ring_buffer_record_disable(), as that is a temporary disabling of 4060 * the ring buffer. 4061 */ 4062 bool ring_buffer_record_is_set_on(struct trace_buffer *buffer) 4063 { 4064 return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF); 4065 } 4066 4067 /** 4068 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer 4069 * @buffer: The ring buffer to stop writes to. 4070 * @cpu: The CPU buffer to stop 4071 * 4072 * This prevents all writes to the buffer. Any attempt to write 4073 * to the buffer after this will fail and return NULL. 4074 * 4075 * The caller should call synchronize_rcu() after this. 4076 */ 4077 void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu) 4078 { 4079 struct ring_buffer_per_cpu *cpu_buffer; 4080 4081 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4082 return; 4083 4084 cpu_buffer = buffer->buffers[cpu]; 4085 atomic_inc(&cpu_buffer->record_disabled); 4086 } 4087 EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu); 4088 4089 /** 4090 * ring_buffer_record_enable_cpu - enable writes to the buffer 4091 * @buffer: The ring buffer to enable writes 4092 * @cpu: The CPU to enable. 4093 * 4094 * Note, multiple disables will need the same number of enables 4095 * to truly enable the writing (much like preempt_disable). 4096 */ 4097 void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu) 4098 { 4099 struct ring_buffer_per_cpu *cpu_buffer; 4100 4101 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4102 return; 4103 4104 cpu_buffer = buffer->buffers[cpu]; 4105 atomic_dec(&cpu_buffer->record_disabled); 4106 } 4107 EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu); 4108 4109 /* 4110 * The total entries in the ring buffer is the running counter 4111 * of entries entered into the ring buffer, minus the sum of 4112 * the entries read from the ring buffer and the number of 4113 * entries that were overwritten. 4114 */ 4115 static inline unsigned long 4116 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer) 4117 { 4118 return local_read(&cpu_buffer->entries) - 4119 (local_read(&cpu_buffer->overrun) + cpu_buffer->read); 4120 } 4121 4122 /** 4123 * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer 4124 * @buffer: The ring buffer 4125 * @cpu: The per CPU buffer to read from. 4126 */ 4127 u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu) 4128 { 4129 unsigned long flags; 4130 struct ring_buffer_per_cpu *cpu_buffer; 4131 struct buffer_page *bpage; 4132 u64 ret = 0; 4133 4134 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4135 return 0; 4136 4137 cpu_buffer = buffer->buffers[cpu]; 4138 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 4139 /* 4140 * if the tail is on reader_page, oldest time stamp is on the reader 4141 * page 4142 */ 4143 if (cpu_buffer->tail_page == cpu_buffer->reader_page) 4144 bpage = cpu_buffer->reader_page; 4145 else 4146 bpage = rb_set_head_page(cpu_buffer); 4147 if (bpage) 4148 ret = bpage->page->time_stamp; 4149 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 4150 4151 return ret; 4152 } 4153 EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts); 4154 4155 /** 4156 * ring_buffer_bytes_cpu - get the number of bytes unconsumed in a cpu buffer 4157 * @buffer: The ring buffer 4158 * @cpu: The per CPU buffer to read from. 4159 */ 4160 unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu) 4161 { 4162 struct ring_buffer_per_cpu *cpu_buffer; 4163 unsigned long ret; 4164 4165 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4166 return 0; 4167 4168 cpu_buffer = buffer->buffers[cpu]; 4169 ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes; 4170 4171 return ret; 4172 } 4173 EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu); 4174 4175 /** 4176 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer 4177 * @buffer: The ring buffer 4178 * @cpu: The per CPU buffer to get the entries from. 4179 */ 4180 unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu) 4181 { 4182 struct ring_buffer_per_cpu *cpu_buffer; 4183 4184 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4185 return 0; 4186 4187 cpu_buffer = buffer->buffers[cpu]; 4188 4189 return rb_num_of_entries(cpu_buffer); 4190 } 4191 EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu); 4192 4193 /** 4194 * ring_buffer_overrun_cpu - get the number of overruns caused by the ring 4195 * buffer wrapping around (only if RB_FL_OVERWRITE is on). 4196 * @buffer: The ring buffer 4197 * @cpu: The per CPU buffer to get the number of overruns from 4198 */ 4199 unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu) 4200 { 4201 struct ring_buffer_per_cpu *cpu_buffer; 4202 unsigned long ret; 4203 4204 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4205 return 0; 4206 4207 cpu_buffer = buffer->buffers[cpu]; 4208 ret = local_read(&cpu_buffer->overrun); 4209 4210 return ret; 4211 } 4212 EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu); 4213 4214 /** 4215 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by 4216 * commits failing due to the buffer wrapping around while there are uncommitted 4217 * events, such as during an interrupt storm. 4218 * @buffer: The ring buffer 4219 * @cpu: The per CPU buffer to get the number of overruns from 4220 */ 4221 unsigned long 4222 ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu) 4223 { 4224 struct ring_buffer_per_cpu *cpu_buffer; 4225 unsigned long ret; 4226 4227 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4228 return 0; 4229 4230 cpu_buffer = buffer->buffers[cpu]; 4231 ret = local_read(&cpu_buffer->commit_overrun); 4232 4233 return ret; 4234 } 4235 EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu); 4236 4237 /** 4238 * ring_buffer_dropped_events_cpu - get the number of dropped events caused by 4239 * the ring buffer filling up (only if RB_FL_OVERWRITE is off). 4240 * @buffer: The ring buffer 4241 * @cpu: The per CPU buffer to get the number of overruns from 4242 */ 4243 unsigned long 4244 ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu) 4245 { 4246 struct ring_buffer_per_cpu *cpu_buffer; 4247 unsigned long ret; 4248 4249 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4250 return 0; 4251 4252 cpu_buffer = buffer->buffers[cpu]; 4253 ret = local_read(&cpu_buffer->dropped_events); 4254 4255 return ret; 4256 } 4257 EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu); 4258 4259 /** 4260 * ring_buffer_read_events_cpu - get the number of events successfully read 4261 * @buffer: The ring buffer 4262 * @cpu: The per CPU buffer to get the number of events read 4263 */ 4264 unsigned long 4265 ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu) 4266 { 4267 struct ring_buffer_per_cpu *cpu_buffer; 4268 4269 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4270 return 0; 4271 4272 cpu_buffer = buffer->buffers[cpu]; 4273 return cpu_buffer->read; 4274 } 4275 EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu); 4276 4277 /** 4278 * ring_buffer_entries - get the number of entries in a buffer 4279 * @buffer: The ring buffer 4280 * 4281 * Returns the total number of entries in the ring buffer 4282 * (all CPU entries) 4283 */ 4284 unsigned long ring_buffer_entries(struct trace_buffer *buffer) 4285 { 4286 struct ring_buffer_per_cpu *cpu_buffer; 4287 unsigned long entries = 0; 4288 int cpu; 4289 4290 /* if you care about this being correct, lock the buffer */ 4291 for_each_buffer_cpu(buffer, cpu) { 4292 cpu_buffer = buffer->buffers[cpu]; 4293 entries += rb_num_of_entries(cpu_buffer); 4294 } 4295 4296 return entries; 4297 } 4298 EXPORT_SYMBOL_GPL(ring_buffer_entries); 4299 4300 /** 4301 * ring_buffer_overruns - get the number of overruns in buffer 4302 * @buffer: The ring buffer 4303 * 4304 * Returns the total number of overruns in the ring buffer 4305 * (all CPU entries) 4306 */ 4307 unsigned long ring_buffer_overruns(struct trace_buffer *buffer) 4308 { 4309 struct ring_buffer_per_cpu *cpu_buffer; 4310 unsigned long overruns = 0; 4311 int cpu; 4312 4313 /* if you care about this being correct, lock the buffer */ 4314 for_each_buffer_cpu(buffer, cpu) { 4315 cpu_buffer = buffer->buffers[cpu]; 4316 overruns += local_read(&cpu_buffer->overrun); 4317 } 4318 4319 return overruns; 4320 } 4321 EXPORT_SYMBOL_GPL(ring_buffer_overruns); 4322 4323 static void rb_iter_reset(struct ring_buffer_iter *iter) 4324 { 4325 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 4326 4327 /* Iterator usage is expected to have record disabled */ 4328 iter->head_page = cpu_buffer->reader_page; 4329 iter->head = cpu_buffer->reader_page->read; 4330 iter->next_event = iter->head; 4331 4332 iter->cache_reader_page = iter->head_page; 4333 iter->cache_read = cpu_buffer->read; 4334 iter->cache_pages_removed = cpu_buffer->pages_removed; 4335 4336 if (iter->head) { 4337 iter->read_stamp = cpu_buffer->read_stamp; 4338 iter->page_stamp = cpu_buffer->reader_page->page->time_stamp; 4339 } else { 4340 iter->read_stamp = iter->head_page->page->time_stamp; 4341 iter->page_stamp = iter->read_stamp; 4342 } 4343 } 4344 4345 /** 4346 * ring_buffer_iter_reset - reset an iterator 4347 * @iter: The iterator to reset 4348 * 4349 * Resets the iterator, so that it will start from the beginning 4350 * again. 4351 */ 4352 void ring_buffer_iter_reset(struct ring_buffer_iter *iter) 4353 { 4354 struct ring_buffer_per_cpu *cpu_buffer; 4355 unsigned long flags; 4356 4357 if (!iter) 4358 return; 4359 4360 cpu_buffer = iter->cpu_buffer; 4361 4362 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 4363 rb_iter_reset(iter); 4364 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 4365 } 4366 EXPORT_SYMBOL_GPL(ring_buffer_iter_reset); 4367 4368 /** 4369 * ring_buffer_iter_empty - check if an iterator has no more to read 4370 * @iter: The iterator to check 4371 */ 4372 int ring_buffer_iter_empty(struct ring_buffer_iter *iter) 4373 { 4374 struct ring_buffer_per_cpu *cpu_buffer; 4375 struct buffer_page *reader; 4376 struct buffer_page *head_page; 4377 struct buffer_page *commit_page; 4378 struct buffer_page *curr_commit_page; 4379 unsigned commit; 4380 u64 curr_commit_ts; 4381 u64 commit_ts; 4382 4383 cpu_buffer = iter->cpu_buffer; 4384 reader = cpu_buffer->reader_page; 4385 head_page = cpu_buffer->head_page; 4386 commit_page = cpu_buffer->commit_page; 4387 commit_ts = commit_page->page->time_stamp; 4388 4389 /* 4390 * When the writer goes across pages, it issues a cmpxchg which 4391 * is a mb(), which will synchronize with the rmb here. 4392 * (see rb_tail_page_update()) 4393 */ 4394 smp_rmb(); 4395 commit = rb_page_commit(commit_page); 4396 /* We want to make sure that the commit page doesn't change */ 4397 smp_rmb(); 4398 4399 /* Make sure commit page didn't change */ 4400 curr_commit_page = READ_ONCE(cpu_buffer->commit_page); 4401 curr_commit_ts = READ_ONCE(curr_commit_page->page->time_stamp); 4402 4403 /* If the commit page changed, then there's more data */ 4404 if (curr_commit_page != commit_page || 4405 curr_commit_ts != commit_ts) 4406 return 0; 4407 4408 /* Still racy, as it may return a false positive, but that's OK */ 4409 return ((iter->head_page == commit_page && iter->head >= commit) || 4410 (iter->head_page == reader && commit_page == head_page && 4411 head_page->read == commit && 4412 iter->head == rb_page_commit(cpu_buffer->reader_page))); 4413 } 4414 EXPORT_SYMBOL_GPL(ring_buffer_iter_empty); 4415 4416 static void 4417 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, 4418 struct ring_buffer_event *event) 4419 { 4420 u64 delta; 4421 4422 switch (event->type_len) { 4423 case RINGBUF_TYPE_PADDING: 4424 return; 4425 4426 case RINGBUF_TYPE_TIME_EXTEND: 4427 delta = rb_event_time_stamp(event); 4428 cpu_buffer->read_stamp += delta; 4429 return; 4430 4431 case RINGBUF_TYPE_TIME_STAMP: 4432 delta = rb_event_time_stamp(event); 4433 delta = rb_fix_abs_ts(delta, cpu_buffer->read_stamp); 4434 cpu_buffer->read_stamp = delta; 4435 return; 4436 4437 case RINGBUF_TYPE_DATA: 4438 cpu_buffer->read_stamp += event->time_delta; 4439 return; 4440 4441 default: 4442 RB_WARN_ON(cpu_buffer, 1); 4443 } 4444 } 4445 4446 static void 4447 rb_update_iter_read_stamp(struct ring_buffer_iter *iter, 4448 struct ring_buffer_event *event) 4449 { 4450 u64 delta; 4451 4452 switch (event->type_len) { 4453 case RINGBUF_TYPE_PADDING: 4454 return; 4455 4456 case RINGBUF_TYPE_TIME_EXTEND: 4457 delta = rb_event_time_stamp(event); 4458 iter->read_stamp += delta; 4459 return; 4460 4461 case RINGBUF_TYPE_TIME_STAMP: 4462 delta = rb_event_time_stamp(event); 4463 delta = rb_fix_abs_ts(delta, iter->read_stamp); 4464 iter->read_stamp = delta; 4465 return; 4466 4467 case RINGBUF_TYPE_DATA: 4468 iter->read_stamp += event->time_delta; 4469 return; 4470 4471 default: 4472 RB_WARN_ON(iter->cpu_buffer, 1); 4473 } 4474 } 4475 4476 static struct buffer_page * 4477 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) 4478 { 4479 struct buffer_page *reader = NULL; 4480 unsigned long overwrite; 4481 unsigned long flags; 4482 int nr_loops = 0; 4483 bool ret; 4484 4485 local_irq_save(flags); 4486 arch_spin_lock(&cpu_buffer->lock); 4487 4488 again: 4489 /* 4490 * This should normally only loop twice. But because the 4491 * start of the reader inserts an empty page, it causes 4492 * a case where we will loop three times. There should be no 4493 * reason to loop four times (that I know of). 4494 */ 4495 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) { 4496 reader = NULL; 4497 goto out; 4498 } 4499 4500 reader = cpu_buffer->reader_page; 4501 4502 /* If there's more to read, return this page */ 4503 if (cpu_buffer->reader_page->read < rb_page_size(reader)) 4504 goto out; 4505 4506 /* Never should we have an index greater than the size */ 4507 if (RB_WARN_ON(cpu_buffer, 4508 cpu_buffer->reader_page->read > rb_page_size(reader))) 4509 goto out; 4510 4511 /* check if we caught up to the tail */ 4512 reader = NULL; 4513 if (cpu_buffer->commit_page == cpu_buffer->reader_page) 4514 goto out; 4515 4516 /* Don't bother swapping if the ring buffer is empty */ 4517 if (rb_num_of_entries(cpu_buffer) == 0) 4518 goto out; 4519 4520 /* 4521 * Reset the reader page to size zero. 4522 */ 4523 local_set(&cpu_buffer->reader_page->write, 0); 4524 local_set(&cpu_buffer->reader_page->entries, 0); 4525 local_set(&cpu_buffer->reader_page->page->commit, 0); 4526 cpu_buffer->reader_page->real_end = 0; 4527 4528 spin: 4529 /* 4530 * Splice the empty reader page into the list around the head. 4531 */ 4532 reader = rb_set_head_page(cpu_buffer); 4533 if (!reader) 4534 goto out; 4535 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next); 4536 cpu_buffer->reader_page->list.prev = reader->list.prev; 4537 4538 /* 4539 * cpu_buffer->pages just needs to point to the buffer, it 4540 * has no specific buffer page to point to. Lets move it out 4541 * of our way so we don't accidentally swap it. 4542 */ 4543 cpu_buffer->pages = reader->list.prev; 4544 4545 /* The reader page will be pointing to the new head */ 4546 rb_set_list_to_head(&cpu_buffer->reader_page->list); 4547 4548 /* 4549 * We want to make sure we read the overruns after we set up our 4550 * pointers to the next object. The writer side does a 4551 * cmpxchg to cross pages which acts as the mb on the writer 4552 * side. Note, the reader will constantly fail the swap 4553 * while the writer is updating the pointers, so this 4554 * guarantees that the overwrite recorded here is the one we 4555 * want to compare with the last_overrun. 4556 */ 4557 smp_mb(); 4558 overwrite = local_read(&(cpu_buffer->overrun)); 4559 4560 /* 4561 * Here's the tricky part. 4562 * 4563 * We need to move the pointer past the header page. 4564 * But we can only do that if a writer is not currently 4565 * moving it. The page before the header page has the 4566 * flag bit '1' set if it is pointing to the page we want. 4567 * but if the writer is in the process of moving it 4568 * than it will be '2' or already moved '0'. 4569 */ 4570 4571 ret = rb_head_page_replace(reader, cpu_buffer->reader_page); 4572 4573 /* 4574 * If we did not convert it, then we must try again. 4575 */ 4576 if (!ret) 4577 goto spin; 4578 4579 /* 4580 * Yay! We succeeded in replacing the page. 4581 * 4582 * Now make the new head point back to the reader page. 4583 */ 4584 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list; 4585 rb_inc_page(&cpu_buffer->head_page); 4586 4587 local_inc(&cpu_buffer->pages_read); 4588 4589 /* Finally update the reader page to the new head */ 4590 cpu_buffer->reader_page = reader; 4591 cpu_buffer->reader_page->read = 0; 4592 4593 if (overwrite != cpu_buffer->last_overrun) { 4594 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; 4595 cpu_buffer->last_overrun = overwrite; 4596 } 4597 4598 goto again; 4599 4600 out: 4601 /* Update the read_stamp on the first event */ 4602 if (reader && reader->read == 0) 4603 cpu_buffer->read_stamp = reader->page->time_stamp; 4604 4605 arch_spin_unlock(&cpu_buffer->lock); 4606 local_irq_restore(flags); 4607 4608 /* 4609 * The writer has preempt disable, wait for it. But not forever 4610 * Although, 1 second is pretty much "forever" 4611 */ 4612 #define USECS_WAIT 1000000 4613 for (nr_loops = 0; nr_loops < USECS_WAIT; nr_loops++) { 4614 /* If the write is past the end of page, a writer is still updating it */ 4615 if (likely(!reader || rb_page_write(reader) <= BUF_PAGE_SIZE)) 4616 break; 4617 4618 udelay(1); 4619 4620 /* Get the latest version of the reader write value */ 4621 smp_rmb(); 4622 } 4623 4624 /* The writer is not moving forward? Something is wrong */ 4625 if (RB_WARN_ON(cpu_buffer, nr_loops == USECS_WAIT)) 4626 reader = NULL; 4627 4628 /* 4629 * Make sure we see any padding after the write update 4630 * (see rb_reset_tail()). 4631 * 4632 * In addition, a writer may be writing on the reader page 4633 * if the page has not been fully filled, so the read barrier 4634 * is also needed to make sure we see the content of what is 4635 * committed by the writer (see rb_set_commit_to_write()). 4636 */ 4637 smp_rmb(); 4638 4639 4640 return reader; 4641 } 4642 4643 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) 4644 { 4645 struct ring_buffer_event *event; 4646 struct buffer_page *reader; 4647 unsigned length; 4648 4649 reader = rb_get_reader_page(cpu_buffer); 4650 4651 /* This function should not be called when buffer is empty */ 4652 if (RB_WARN_ON(cpu_buffer, !reader)) 4653 return; 4654 4655 event = rb_reader_event(cpu_buffer); 4656 4657 if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 4658 cpu_buffer->read++; 4659 4660 rb_update_read_stamp(cpu_buffer, event); 4661 4662 length = rb_event_length(event); 4663 cpu_buffer->reader_page->read += length; 4664 cpu_buffer->read_bytes += length; 4665 } 4666 4667 static void rb_advance_iter(struct ring_buffer_iter *iter) 4668 { 4669 struct ring_buffer_per_cpu *cpu_buffer; 4670 4671 cpu_buffer = iter->cpu_buffer; 4672 4673 /* If head == next_event then we need to jump to the next event */ 4674 if (iter->head == iter->next_event) { 4675 /* If the event gets overwritten again, there's nothing to do */ 4676 if (rb_iter_head_event(iter) == NULL) 4677 return; 4678 } 4679 4680 iter->head = iter->next_event; 4681 4682 /* 4683 * Check if we are at the end of the buffer. 4684 */ 4685 if (iter->next_event >= rb_page_size(iter->head_page)) { 4686 /* discarded commits can make the page empty */ 4687 if (iter->head_page == cpu_buffer->commit_page) 4688 return; 4689 rb_inc_iter(iter); 4690 return; 4691 } 4692 4693 rb_update_iter_read_stamp(iter, iter->event); 4694 } 4695 4696 static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer) 4697 { 4698 return cpu_buffer->lost_events; 4699 } 4700 4701 static struct ring_buffer_event * 4702 rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts, 4703 unsigned long *lost_events) 4704 { 4705 struct ring_buffer_event *event; 4706 struct buffer_page *reader; 4707 int nr_loops = 0; 4708 4709 if (ts) 4710 *ts = 0; 4711 again: 4712 /* 4713 * We repeat when a time extend is encountered. 4714 * Since the time extend is always attached to a data event, 4715 * we should never loop more than once. 4716 * (We never hit the following condition more than twice). 4717 */ 4718 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2)) 4719 return NULL; 4720 4721 reader = rb_get_reader_page(cpu_buffer); 4722 if (!reader) 4723 return NULL; 4724 4725 event = rb_reader_event(cpu_buffer); 4726 4727 switch (event->type_len) { 4728 case RINGBUF_TYPE_PADDING: 4729 if (rb_null_event(event)) 4730 RB_WARN_ON(cpu_buffer, 1); 4731 /* 4732 * Because the writer could be discarding every 4733 * event it creates (which would probably be bad) 4734 * if we were to go back to "again" then we may never 4735 * catch up, and will trigger the warn on, or lock 4736 * the box. Return the padding, and we will release 4737 * the current locks, and try again. 4738 */ 4739 return event; 4740 4741 case RINGBUF_TYPE_TIME_EXTEND: 4742 /* Internal data, OK to advance */ 4743 rb_advance_reader(cpu_buffer); 4744 goto again; 4745 4746 case RINGBUF_TYPE_TIME_STAMP: 4747 if (ts) { 4748 *ts = rb_event_time_stamp(event); 4749 *ts = rb_fix_abs_ts(*ts, reader->page->time_stamp); 4750 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, 4751 cpu_buffer->cpu, ts); 4752 } 4753 /* Internal data, OK to advance */ 4754 rb_advance_reader(cpu_buffer); 4755 goto again; 4756 4757 case RINGBUF_TYPE_DATA: 4758 if (ts && !(*ts)) { 4759 *ts = cpu_buffer->read_stamp + event->time_delta; 4760 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, 4761 cpu_buffer->cpu, ts); 4762 } 4763 if (lost_events) 4764 *lost_events = rb_lost_events(cpu_buffer); 4765 return event; 4766 4767 default: 4768 RB_WARN_ON(cpu_buffer, 1); 4769 } 4770 4771 return NULL; 4772 } 4773 EXPORT_SYMBOL_GPL(ring_buffer_peek); 4774 4775 static struct ring_buffer_event * 4776 rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) 4777 { 4778 struct trace_buffer *buffer; 4779 struct ring_buffer_per_cpu *cpu_buffer; 4780 struct ring_buffer_event *event; 4781 int nr_loops = 0; 4782 4783 if (ts) 4784 *ts = 0; 4785 4786 cpu_buffer = iter->cpu_buffer; 4787 buffer = cpu_buffer->buffer; 4788 4789 /* 4790 * Check if someone performed a consuming read to the buffer 4791 * or removed some pages from the buffer. In these cases, 4792 * iterator was invalidated and we need to reset it. 4793 */ 4794 if (unlikely(iter->cache_read != cpu_buffer->read || 4795 iter->cache_reader_page != cpu_buffer->reader_page || 4796 iter->cache_pages_removed != cpu_buffer->pages_removed)) 4797 rb_iter_reset(iter); 4798 4799 again: 4800 if (ring_buffer_iter_empty(iter)) 4801 return NULL; 4802 4803 /* 4804 * As the writer can mess with what the iterator is trying 4805 * to read, just give up if we fail to get an event after 4806 * three tries. The iterator is not as reliable when reading 4807 * the ring buffer with an active write as the consumer is. 4808 * Do not warn if the three failures is reached. 4809 */ 4810 if (++nr_loops > 3) 4811 return NULL; 4812 4813 if (rb_per_cpu_empty(cpu_buffer)) 4814 return NULL; 4815 4816 if (iter->head >= rb_page_size(iter->head_page)) { 4817 rb_inc_iter(iter); 4818 goto again; 4819 } 4820 4821 event = rb_iter_head_event(iter); 4822 if (!event) 4823 goto again; 4824 4825 switch (event->type_len) { 4826 case RINGBUF_TYPE_PADDING: 4827 if (rb_null_event(event)) { 4828 rb_inc_iter(iter); 4829 goto again; 4830 } 4831 rb_advance_iter(iter); 4832 return event; 4833 4834 case RINGBUF_TYPE_TIME_EXTEND: 4835 /* Internal data, OK to advance */ 4836 rb_advance_iter(iter); 4837 goto again; 4838 4839 case RINGBUF_TYPE_TIME_STAMP: 4840 if (ts) { 4841 *ts = rb_event_time_stamp(event); 4842 *ts = rb_fix_abs_ts(*ts, iter->head_page->page->time_stamp); 4843 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, 4844 cpu_buffer->cpu, ts); 4845 } 4846 /* Internal data, OK to advance */ 4847 rb_advance_iter(iter); 4848 goto again; 4849 4850 case RINGBUF_TYPE_DATA: 4851 if (ts && !(*ts)) { 4852 *ts = iter->read_stamp + event->time_delta; 4853 ring_buffer_normalize_time_stamp(buffer, 4854 cpu_buffer->cpu, ts); 4855 } 4856 return event; 4857 4858 default: 4859 RB_WARN_ON(cpu_buffer, 1); 4860 } 4861 4862 return NULL; 4863 } 4864 EXPORT_SYMBOL_GPL(ring_buffer_iter_peek); 4865 4866 static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer) 4867 { 4868 if (likely(!in_nmi())) { 4869 raw_spin_lock(&cpu_buffer->reader_lock); 4870 return true; 4871 } 4872 4873 /* 4874 * If an NMI die dumps out the content of the ring buffer 4875 * trylock must be used to prevent a deadlock if the NMI 4876 * preempted a task that holds the ring buffer locks. If 4877 * we get the lock then all is fine, if not, then continue 4878 * to do the read, but this can corrupt the ring buffer, 4879 * so it must be permanently disabled from future writes. 4880 * Reading from NMI is a oneshot deal. 4881 */ 4882 if (raw_spin_trylock(&cpu_buffer->reader_lock)) 4883 return true; 4884 4885 /* Continue without locking, but disable the ring buffer */ 4886 atomic_inc(&cpu_buffer->record_disabled); 4887 return false; 4888 } 4889 4890 static inline void 4891 rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked) 4892 { 4893 if (likely(locked)) 4894 raw_spin_unlock(&cpu_buffer->reader_lock); 4895 } 4896 4897 /** 4898 * ring_buffer_peek - peek at the next event to be read 4899 * @buffer: The ring buffer to read 4900 * @cpu: The cpu to peak at 4901 * @ts: The timestamp counter of this event. 4902 * @lost_events: a variable to store if events were lost (may be NULL) 4903 * 4904 * This will return the event that will be read next, but does 4905 * not consume the data. 4906 */ 4907 struct ring_buffer_event * 4908 ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts, 4909 unsigned long *lost_events) 4910 { 4911 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 4912 struct ring_buffer_event *event; 4913 unsigned long flags; 4914 bool dolock; 4915 4916 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4917 return NULL; 4918 4919 again: 4920 local_irq_save(flags); 4921 dolock = rb_reader_lock(cpu_buffer); 4922 event = rb_buffer_peek(cpu_buffer, ts, lost_events); 4923 if (event && event->type_len == RINGBUF_TYPE_PADDING) 4924 rb_advance_reader(cpu_buffer); 4925 rb_reader_unlock(cpu_buffer, dolock); 4926 local_irq_restore(flags); 4927 4928 if (event && event->type_len == RINGBUF_TYPE_PADDING) 4929 goto again; 4930 4931 return event; 4932 } 4933 4934 /** ring_buffer_iter_dropped - report if there are dropped events 4935 * @iter: The ring buffer iterator 4936 * 4937 * Returns true if there was dropped events since the last peek. 4938 */ 4939 bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter) 4940 { 4941 bool ret = iter->missed_events != 0; 4942 4943 iter->missed_events = 0; 4944 return ret; 4945 } 4946 EXPORT_SYMBOL_GPL(ring_buffer_iter_dropped); 4947 4948 /** 4949 * ring_buffer_iter_peek - peek at the next event to be read 4950 * @iter: The ring buffer iterator 4951 * @ts: The timestamp counter of this event. 4952 * 4953 * This will return the event that will be read next, but does 4954 * not increment the iterator. 4955 */ 4956 struct ring_buffer_event * 4957 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) 4958 { 4959 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 4960 struct ring_buffer_event *event; 4961 unsigned long flags; 4962 4963 again: 4964 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 4965 event = rb_iter_peek(iter, ts); 4966 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 4967 4968 if (event && event->type_len == RINGBUF_TYPE_PADDING) 4969 goto again; 4970 4971 return event; 4972 } 4973 4974 /** 4975 * ring_buffer_consume - return an event and consume it 4976 * @buffer: The ring buffer to get the next event from 4977 * @cpu: the cpu to read the buffer from 4978 * @ts: a variable to store the timestamp (may be NULL) 4979 * @lost_events: a variable to store if events were lost (may be NULL) 4980 * 4981 * Returns the next event in the ring buffer, and that event is consumed. 4982 * Meaning, that sequential reads will keep returning a different event, 4983 * and eventually empty the ring buffer if the producer is slower. 4984 */ 4985 struct ring_buffer_event * 4986 ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts, 4987 unsigned long *lost_events) 4988 { 4989 struct ring_buffer_per_cpu *cpu_buffer; 4990 struct ring_buffer_event *event = NULL; 4991 unsigned long flags; 4992 bool dolock; 4993 4994 again: 4995 /* might be called in atomic */ 4996 preempt_disable(); 4997 4998 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4999 goto out; 5000 5001 cpu_buffer = buffer->buffers[cpu]; 5002 local_irq_save(flags); 5003 dolock = rb_reader_lock(cpu_buffer); 5004 5005 event = rb_buffer_peek(cpu_buffer, ts, lost_events); 5006 if (event) { 5007 cpu_buffer->lost_events = 0; 5008 rb_advance_reader(cpu_buffer); 5009 } 5010 5011 rb_reader_unlock(cpu_buffer, dolock); 5012 local_irq_restore(flags); 5013 5014 out: 5015 preempt_enable(); 5016 5017 if (event && event->type_len == RINGBUF_TYPE_PADDING) 5018 goto again; 5019 5020 return event; 5021 } 5022 EXPORT_SYMBOL_GPL(ring_buffer_consume); 5023 5024 /** 5025 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer 5026 * @buffer: The ring buffer to read from 5027 * @cpu: The cpu buffer to iterate over 5028 * @flags: gfp flags to use for memory allocation 5029 * 5030 * This performs the initial preparations necessary to iterate 5031 * through the buffer. Memory is allocated, buffer recording 5032 * is disabled, and the iterator pointer is returned to the caller. 5033 * 5034 * Disabling buffer recording prevents the reading from being 5035 * corrupted. This is not a consuming read, so a producer is not 5036 * expected. 5037 * 5038 * After a sequence of ring_buffer_read_prepare calls, the user is 5039 * expected to make at least one call to ring_buffer_read_prepare_sync. 5040 * Afterwards, ring_buffer_read_start is invoked to get things going 5041 * for real. 5042 * 5043 * This overall must be paired with ring_buffer_read_finish. 5044 */ 5045 struct ring_buffer_iter * 5046 ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags) 5047 { 5048 struct ring_buffer_per_cpu *cpu_buffer; 5049 struct ring_buffer_iter *iter; 5050 5051 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5052 return NULL; 5053 5054 iter = kzalloc(sizeof(*iter), flags); 5055 if (!iter) 5056 return NULL; 5057 5058 /* Holds the entire event: data and meta data */ 5059 iter->event = kmalloc(BUF_PAGE_SIZE, flags); 5060 if (!iter->event) { 5061 kfree(iter); 5062 return NULL; 5063 } 5064 5065 cpu_buffer = buffer->buffers[cpu]; 5066 5067 iter->cpu_buffer = cpu_buffer; 5068 5069 atomic_inc(&cpu_buffer->resize_disabled); 5070 5071 return iter; 5072 } 5073 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare); 5074 5075 /** 5076 * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls 5077 * 5078 * All previously invoked ring_buffer_read_prepare calls to prepare 5079 * iterators will be synchronized. Afterwards, read_buffer_read_start 5080 * calls on those iterators are allowed. 5081 */ 5082 void 5083 ring_buffer_read_prepare_sync(void) 5084 { 5085 synchronize_rcu(); 5086 } 5087 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync); 5088 5089 /** 5090 * ring_buffer_read_start - start a non consuming read of the buffer 5091 * @iter: The iterator returned by ring_buffer_read_prepare 5092 * 5093 * This finalizes the startup of an iteration through the buffer. 5094 * The iterator comes from a call to ring_buffer_read_prepare and 5095 * an intervening ring_buffer_read_prepare_sync must have been 5096 * performed. 5097 * 5098 * Must be paired with ring_buffer_read_finish. 5099 */ 5100 void 5101 ring_buffer_read_start(struct ring_buffer_iter *iter) 5102 { 5103 struct ring_buffer_per_cpu *cpu_buffer; 5104 unsigned long flags; 5105 5106 if (!iter) 5107 return; 5108 5109 cpu_buffer = iter->cpu_buffer; 5110 5111 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5112 arch_spin_lock(&cpu_buffer->lock); 5113 rb_iter_reset(iter); 5114 arch_spin_unlock(&cpu_buffer->lock); 5115 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5116 } 5117 EXPORT_SYMBOL_GPL(ring_buffer_read_start); 5118 5119 /** 5120 * ring_buffer_read_finish - finish reading the iterator of the buffer 5121 * @iter: The iterator retrieved by ring_buffer_start 5122 * 5123 * This re-enables the recording to the buffer, and frees the 5124 * iterator. 5125 */ 5126 void 5127 ring_buffer_read_finish(struct ring_buffer_iter *iter) 5128 { 5129 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 5130 unsigned long flags; 5131 5132 /* 5133 * Ring buffer is disabled from recording, here's a good place 5134 * to check the integrity of the ring buffer. 5135 * Must prevent readers from trying to read, as the check 5136 * clears the HEAD page and readers require it. 5137 */ 5138 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5139 rb_check_pages(cpu_buffer); 5140 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5141 5142 atomic_dec(&cpu_buffer->resize_disabled); 5143 kfree(iter->event); 5144 kfree(iter); 5145 } 5146 EXPORT_SYMBOL_GPL(ring_buffer_read_finish); 5147 5148 /** 5149 * ring_buffer_iter_advance - advance the iterator to the next location 5150 * @iter: The ring buffer iterator 5151 * 5152 * Move the location of the iterator such that the next read will 5153 * be the next location of the iterator. 5154 */ 5155 void ring_buffer_iter_advance(struct ring_buffer_iter *iter) 5156 { 5157 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 5158 unsigned long flags; 5159 5160 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5161 5162 rb_advance_iter(iter); 5163 5164 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5165 } 5166 EXPORT_SYMBOL_GPL(ring_buffer_iter_advance); 5167 5168 /** 5169 * ring_buffer_size - return the size of the ring buffer (in bytes) 5170 * @buffer: The ring buffer. 5171 * @cpu: The CPU to get ring buffer size from. 5172 */ 5173 unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu) 5174 { 5175 /* 5176 * Earlier, this method returned 5177 * BUF_PAGE_SIZE * buffer->nr_pages 5178 * Since the nr_pages field is now removed, we have converted this to 5179 * return the per cpu buffer value. 5180 */ 5181 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5182 return 0; 5183 5184 return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages; 5185 } 5186 EXPORT_SYMBOL_GPL(ring_buffer_size); 5187 5188 static void rb_clear_buffer_page(struct buffer_page *page) 5189 { 5190 local_set(&page->write, 0); 5191 local_set(&page->entries, 0); 5192 rb_init_page(page->page); 5193 page->read = 0; 5194 } 5195 5196 static void 5197 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) 5198 { 5199 struct buffer_page *page; 5200 5201 rb_head_page_deactivate(cpu_buffer); 5202 5203 cpu_buffer->head_page 5204 = list_entry(cpu_buffer->pages, struct buffer_page, list); 5205 rb_clear_buffer_page(cpu_buffer->head_page); 5206 list_for_each_entry(page, cpu_buffer->pages, list) { 5207 rb_clear_buffer_page(page); 5208 } 5209 5210 cpu_buffer->tail_page = cpu_buffer->head_page; 5211 cpu_buffer->commit_page = cpu_buffer->head_page; 5212 5213 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); 5214 INIT_LIST_HEAD(&cpu_buffer->new_pages); 5215 rb_clear_buffer_page(cpu_buffer->reader_page); 5216 5217 local_set(&cpu_buffer->entries_bytes, 0); 5218 local_set(&cpu_buffer->overrun, 0); 5219 local_set(&cpu_buffer->commit_overrun, 0); 5220 local_set(&cpu_buffer->dropped_events, 0); 5221 local_set(&cpu_buffer->entries, 0); 5222 local_set(&cpu_buffer->committing, 0); 5223 local_set(&cpu_buffer->commits, 0); 5224 local_set(&cpu_buffer->pages_touched, 0); 5225 local_set(&cpu_buffer->pages_lost, 0); 5226 local_set(&cpu_buffer->pages_read, 0); 5227 cpu_buffer->last_pages_touch = 0; 5228 cpu_buffer->shortest_full = 0; 5229 cpu_buffer->read = 0; 5230 cpu_buffer->read_bytes = 0; 5231 5232 rb_time_set(&cpu_buffer->write_stamp, 0); 5233 rb_time_set(&cpu_buffer->before_stamp, 0); 5234 5235 memset(cpu_buffer->event_stamp, 0, sizeof(cpu_buffer->event_stamp)); 5236 5237 cpu_buffer->lost_events = 0; 5238 cpu_buffer->last_overrun = 0; 5239 5240 rb_head_page_activate(cpu_buffer); 5241 cpu_buffer->pages_removed = 0; 5242 } 5243 5244 /* Must have disabled the cpu buffer then done a synchronize_rcu */ 5245 static void reset_disabled_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) 5246 { 5247 unsigned long flags; 5248 5249 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5250 5251 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) 5252 goto out; 5253 5254 arch_spin_lock(&cpu_buffer->lock); 5255 5256 rb_reset_cpu(cpu_buffer); 5257 5258 arch_spin_unlock(&cpu_buffer->lock); 5259 5260 out: 5261 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5262 } 5263 5264 /** 5265 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer 5266 * @buffer: The ring buffer to reset a per cpu buffer of 5267 * @cpu: The CPU buffer to be reset 5268 */ 5269 void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu) 5270 { 5271 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 5272 5273 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5274 return; 5275 5276 /* prevent another thread from changing buffer sizes */ 5277 mutex_lock(&buffer->mutex); 5278 5279 atomic_inc(&cpu_buffer->resize_disabled); 5280 atomic_inc(&cpu_buffer->record_disabled); 5281 5282 /* Make sure all commits have finished */ 5283 synchronize_rcu(); 5284 5285 reset_disabled_cpu_buffer(cpu_buffer); 5286 5287 atomic_dec(&cpu_buffer->record_disabled); 5288 atomic_dec(&cpu_buffer->resize_disabled); 5289 5290 mutex_unlock(&buffer->mutex); 5291 } 5292 EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu); 5293 5294 /* Flag to ensure proper resetting of atomic variables */ 5295 #define RESET_BIT (1 << 30) 5296 5297 /** 5298 * ring_buffer_reset_online_cpus - reset a ring buffer per CPU buffer 5299 * @buffer: The ring buffer to reset a per cpu buffer of 5300 */ 5301 void ring_buffer_reset_online_cpus(struct trace_buffer *buffer) 5302 { 5303 struct ring_buffer_per_cpu *cpu_buffer; 5304 int cpu; 5305 5306 /* prevent another thread from changing buffer sizes */ 5307 mutex_lock(&buffer->mutex); 5308 5309 for_each_online_buffer_cpu(buffer, cpu) { 5310 cpu_buffer = buffer->buffers[cpu]; 5311 5312 atomic_add(RESET_BIT, &cpu_buffer->resize_disabled); 5313 atomic_inc(&cpu_buffer->record_disabled); 5314 } 5315 5316 /* Make sure all commits have finished */ 5317 synchronize_rcu(); 5318 5319 for_each_buffer_cpu(buffer, cpu) { 5320 cpu_buffer = buffer->buffers[cpu]; 5321 5322 /* 5323 * If a CPU came online during the synchronize_rcu(), then 5324 * ignore it. 5325 */ 5326 if (!(atomic_read(&cpu_buffer->resize_disabled) & RESET_BIT)) 5327 continue; 5328 5329 reset_disabled_cpu_buffer(cpu_buffer); 5330 5331 atomic_dec(&cpu_buffer->record_disabled); 5332 atomic_sub(RESET_BIT, &cpu_buffer->resize_disabled); 5333 } 5334 5335 mutex_unlock(&buffer->mutex); 5336 } 5337 5338 /** 5339 * ring_buffer_reset - reset a ring buffer 5340 * @buffer: The ring buffer to reset all cpu buffers 5341 */ 5342 void ring_buffer_reset(struct trace_buffer *buffer) 5343 { 5344 struct ring_buffer_per_cpu *cpu_buffer; 5345 int cpu; 5346 5347 /* prevent another thread from changing buffer sizes */ 5348 mutex_lock(&buffer->mutex); 5349 5350 for_each_buffer_cpu(buffer, cpu) { 5351 cpu_buffer = buffer->buffers[cpu]; 5352 5353 atomic_inc(&cpu_buffer->resize_disabled); 5354 atomic_inc(&cpu_buffer->record_disabled); 5355 } 5356 5357 /* Make sure all commits have finished */ 5358 synchronize_rcu(); 5359 5360 for_each_buffer_cpu(buffer, cpu) { 5361 cpu_buffer = buffer->buffers[cpu]; 5362 5363 reset_disabled_cpu_buffer(cpu_buffer); 5364 5365 atomic_dec(&cpu_buffer->record_disabled); 5366 atomic_dec(&cpu_buffer->resize_disabled); 5367 } 5368 5369 mutex_unlock(&buffer->mutex); 5370 } 5371 EXPORT_SYMBOL_GPL(ring_buffer_reset); 5372 5373 /** 5374 * ring_buffer_empty - is the ring buffer empty? 5375 * @buffer: The ring buffer to test 5376 */ 5377 bool ring_buffer_empty(struct trace_buffer *buffer) 5378 { 5379 struct ring_buffer_per_cpu *cpu_buffer; 5380 unsigned long flags; 5381 bool dolock; 5382 bool ret; 5383 int cpu; 5384 5385 /* yes this is racy, but if you don't like the race, lock the buffer */ 5386 for_each_buffer_cpu(buffer, cpu) { 5387 cpu_buffer = buffer->buffers[cpu]; 5388 local_irq_save(flags); 5389 dolock = rb_reader_lock(cpu_buffer); 5390 ret = rb_per_cpu_empty(cpu_buffer); 5391 rb_reader_unlock(cpu_buffer, dolock); 5392 local_irq_restore(flags); 5393 5394 if (!ret) 5395 return false; 5396 } 5397 5398 return true; 5399 } 5400 EXPORT_SYMBOL_GPL(ring_buffer_empty); 5401 5402 /** 5403 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty? 5404 * @buffer: The ring buffer 5405 * @cpu: The CPU buffer to test 5406 */ 5407 bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu) 5408 { 5409 struct ring_buffer_per_cpu *cpu_buffer; 5410 unsigned long flags; 5411 bool dolock; 5412 bool ret; 5413 5414 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5415 return true; 5416 5417 cpu_buffer = buffer->buffers[cpu]; 5418 local_irq_save(flags); 5419 dolock = rb_reader_lock(cpu_buffer); 5420 ret = rb_per_cpu_empty(cpu_buffer); 5421 rb_reader_unlock(cpu_buffer, dolock); 5422 local_irq_restore(flags); 5423 5424 return ret; 5425 } 5426 EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu); 5427 5428 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP 5429 /** 5430 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers 5431 * @buffer_a: One buffer to swap with 5432 * @buffer_b: The other buffer to swap with 5433 * @cpu: the CPU of the buffers to swap 5434 * 5435 * This function is useful for tracers that want to take a "snapshot" 5436 * of a CPU buffer and has another back up buffer lying around. 5437 * it is expected that the tracer handles the cpu buffer not being 5438 * used at the moment. 5439 */ 5440 int ring_buffer_swap_cpu(struct trace_buffer *buffer_a, 5441 struct trace_buffer *buffer_b, int cpu) 5442 { 5443 struct ring_buffer_per_cpu *cpu_buffer_a; 5444 struct ring_buffer_per_cpu *cpu_buffer_b; 5445 int ret = -EINVAL; 5446 5447 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) || 5448 !cpumask_test_cpu(cpu, buffer_b->cpumask)) 5449 goto out; 5450 5451 cpu_buffer_a = buffer_a->buffers[cpu]; 5452 cpu_buffer_b = buffer_b->buffers[cpu]; 5453 5454 /* At least make sure the two buffers are somewhat the same */ 5455 if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages) 5456 goto out; 5457 5458 ret = -EAGAIN; 5459 5460 if (atomic_read(&buffer_a->record_disabled)) 5461 goto out; 5462 5463 if (atomic_read(&buffer_b->record_disabled)) 5464 goto out; 5465 5466 if (atomic_read(&cpu_buffer_a->record_disabled)) 5467 goto out; 5468 5469 if (atomic_read(&cpu_buffer_b->record_disabled)) 5470 goto out; 5471 5472 /* 5473 * We can't do a synchronize_rcu here because this 5474 * function can be called in atomic context. 5475 * Normally this will be called from the same CPU as cpu. 5476 * If not it's up to the caller to protect this. 5477 */ 5478 atomic_inc(&cpu_buffer_a->record_disabled); 5479 atomic_inc(&cpu_buffer_b->record_disabled); 5480 5481 ret = -EBUSY; 5482 if (local_read(&cpu_buffer_a->committing)) 5483 goto out_dec; 5484 if (local_read(&cpu_buffer_b->committing)) 5485 goto out_dec; 5486 5487 /* 5488 * When resize is in progress, we cannot swap it because 5489 * it will mess the state of the cpu buffer. 5490 */ 5491 if (atomic_read(&buffer_a->resizing)) 5492 goto out_dec; 5493 if (atomic_read(&buffer_b->resizing)) 5494 goto out_dec; 5495 5496 buffer_a->buffers[cpu] = cpu_buffer_b; 5497 buffer_b->buffers[cpu] = cpu_buffer_a; 5498 5499 cpu_buffer_b->buffer = buffer_a; 5500 cpu_buffer_a->buffer = buffer_b; 5501 5502 ret = 0; 5503 5504 out_dec: 5505 atomic_dec(&cpu_buffer_a->record_disabled); 5506 atomic_dec(&cpu_buffer_b->record_disabled); 5507 out: 5508 return ret; 5509 } 5510 EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); 5511 #endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */ 5512 5513 /** 5514 * ring_buffer_alloc_read_page - allocate a page to read from buffer 5515 * @buffer: the buffer to allocate for. 5516 * @cpu: the cpu buffer to allocate. 5517 * 5518 * This function is used in conjunction with ring_buffer_read_page. 5519 * When reading a full page from the ring buffer, these functions 5520 * can be used to speed up the process. The calling function should 5521 * allocate a few pages first with this function. Then when it 5522 * needs to get pages from the ring buffer, it passes the result 5523 * of this function into ring_buffer_read_page, which will swap 5524 * the page that was allocated, with the read page of the buffer. 5525 * 5526 * Returns: 5527 * The page allocated, or ERR_PTR 5528 */ 5529 void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu) 5530 { 5531 struct ring_buffer_per_cpu *cpu_buffer; 5532 struct buffer_data_page *bpage = NULL; 5533 unsigned long flags; 5534 struct page *page; 5535 5536 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5537 return ERR_PTR(-ENODEV); 5538 5539 cpu_buffer = buffer->buffers[cpu]; 5540 local_irq_save(flags); 5541 arch_spin_lock(&cpu_buffer->lock); 5542 5543 if (cpu_buffer->free_page) { 5544 bpage = cpu_buffer->free_page; 5545 cpu_buffer->free_page = NULL; 5546 } 5547 5548 arch_spin_unlock(&cpu_buffer->lock); 5549 local_irq_restore(flags); 5550 5551 if (bpage) 5552 goto out; 5553 5554 page = alloc_pages_node(cpu_to_node(cpu), 5555 GFP_KERNEL | __GFP_NORETRY, 0); 5556 if (!page) 5557 return ERR_PTR(-ENOMEM); 5558 5559 bpage = page_address(page); 5560 5561 out: 5562 rb_init_page(bpage); 5563 5564 return bpage; 5565 } 5566 EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page); 5567 5568 /** 5569 * ring_buffer_free_read_page - free an allocated read page 5570 * @buffer: the buffer the page was allocate for 5571 * @cpu: the cpu buffer the page came from 5572 * @data: the page to free 5573 * 5574 * Free a page allocated from ring_buffer_alloc_read_page. 5575 */ 5576 void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data) 5577 { 5578 struct ring_buffer_per_cpu *cpu_buffer; 5579 struct buffer_data_page *bpage = data; 5580 struct page *page = virt_to_page(bpage); 5581 unsigned long flags; 5582 5583 if (!buffer || !buffer->buffers || !buffer->buffers[cpu]) 5584 return; 5585 5586 cpu_buffer = buffer->buffers[cpu]; 5587 5588 /* If the page is still in use someplace else, we can't reuse it */ 5589 if (page_ref_count(page) > 1) 5590 goto out; 5591 5592 local_irq_save(flags); 5593 arch_spin_lock(&cpu_buffer->lock); 5594 5595 if (!cpu_buffer->free_page) { 5596 cpu_buffer->free_page = bpage; 5597 bpage = NULL; 5598 } 5599 5600 arch_spin_unlock(&cpu_buffer->lock); 5601 local_irq_restore(flags); 5602 5603 out: 5604 free_page((unsigned long)bpage); 5605 } 5606 EXPORT_SYMBOL_GPL(ring_buffer_free_read_page); 5607 5608 /** 5609 * ring_buffer_read_page - extract a page from the ring buffer 5610 * @buffer: buffer to extract from 5611 * @data_page: the page to use allocated from ring_buffer_alloc_read_page 5612 * @len: amount to extract 5613 * @cpu: the cpu of the buffer to extract 5614 * @full: should the extraction only happen when the page is full. 5615 * 5616 * This function will pull out a page from the ring buffer and consume it. 5617 * @data_page must be the address of the variable that was returned 5618 * from ring_buffer_alloc_read_page. This is because the page might be used 5619 * to swap with a page in the ring buffer. 5620 * 5621 * for example: 5622 * rpage = ring_buffer_alloc_read_page(buffer, cpu); 5623 * if (IS_ERR(rpage)) 5624 * return PTR_ERR(rpage); 5625 * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0); 5626 * if (ret >= 0) 5627 * process_page(rpage, ret); 5628 * 5629 * When @full is set, the function will not return true unless 5630 * the writer is off the reader page. 5631 * 5632 * Note: it is up to the calling functions to handle sleeps and wakeups. 5633 * The ring buffer can be used anywhere in the kernel and can not 5634 * blindly call wake_up. The layer that uses the ring buffer must be 5635 * responsible for that. 5636 * 5637 * Returns: 5638 * >=0 if data has been transferred, returns the offset of consumed data. 5639 * <0 if no data has been transferred. 5640 */ 5641 int ring_buffer_read_page(struct trace_buffer *buffer, 5642 void **data_page, size_t len, int cpu, int full) 5643 { 5644 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 5645 struct ring_buffer_event *event; 5646 struct buffer_data_page *bpage; 5647 struct buffer_page *reader; 5648 unsigned long missed_events; 5649 unsigned long flags; 5650 unsigned int commit; 5651 unsigned int read; 5652 u64 save_timestamp; 5653 int ret = -1; 5654 5655 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5656 goto out; 5657 5658 /* 5659 * If len is not big enough to hold the page header, then 5660 * we can not copy anything. 5661 */ 5662 if (len <= BUF_PAGE_HDR_SIZE) 5663 goto out; 5664 5665 len -= BUF_PAGE_HDR_SIZE; 5666 5667 if (!data_page) 5668 goto out; 5669 5670 bpage = *data_page; 5671 if (!bpage) 5672 goto out; 5673 5674 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5675 5676 reader = rb_get_reader_page(cpu_buffer); 5677 if (!reader) 5678 goto out_unlock; 5679 5680 event = rb_reader_event(cpu_buffer); 5681 5682 read = reader->read; 5683 commit = rb_page_commit(reader); 5684 5685 /* Check if any events were dropped */ 5686 missed_events = cpu_buffer->lost_events; 5687 5688 /* 5689 * If this page has been partially read or 5690 * if len is not big enough to read the rest of the page or 5691 * a writer is still on the page, then 5692 * we must copy the data from the page to the buffer. 5693 * Otherwise, we can simply swap the page with the one passed in. 5694 */ 5695 if (read || (len < (commit - read)) || 5696 cpu_buffer->reader_page == cpu_buffer->commit_page) { 5697 struct buffer_data_page *rpage = cpu_buffer->reader_page->page; 5698 unsigned int rpos = read; 5699 unsigned int pos = 0; 5700 unsigned int size; 5701 5702 /* 5703 * If a full page is expected, this can still be returned 5704 * if there's been a previous partial read and the 5705 * rest of the page can be read and the commit page is off 5706 * the reader page. 5707 */ 5708 if (full && 5709 (!read || (len < (commit - read)) || 5710 cpu_buffer->reader_page == cpu_buffer->commit_page)) 5711 goto out_unlock; 5712 5713 if (len > (commit - read)) 5714 len = (commit - read); 5715 5716 /* Always keep the time extend and data together */ 5717 size = rb_event_ts_length(event); 5718 5719 if (len < size) 5720 goto out_unlock; 5721 5722 /* save the current timestamp, since the user will need it */ 5723 save_timestamp = cpu_buffer->read_stamp; 5724 5725 /* Need to copy one event at a time */ 5726 do { 5727 /* We need the size of one event, because 5728 * rb_advance_reader only advances by one event, 5729 * whereas rb_event_ts_length may include the size of 5730 * one or two events. 5731 * We have already ensured there's enough space if this 5732 * is a time extend. */ 5733 size = rb_event_length(event); 5734 memcpy(bpage->data + pos, rpage->data + rpos, size); 5735 5736 len -= size; 5737 5738 rb_advance_reader(cpu_buffer); 5739 rpos = reader->read; 5740 pos += size; 5741 5742 if (rpos >= commit) 5743 break; 5744 5745 event = rb_reader_event(cpu_buffer); 5746 /* Always keep the time extend and data together */ 5747 size = rb_event_ts_length(event); 5748 } while (len >= size); 5749 5750 /* update bpage */ 5751 local_set(&bpage->commit, pos); 5752 bpage->time_stamp = save_timestamp; 5753 5754 /* we copied everything to the beginning */ 5755 read = 0; 5756 } else { 5757 /* update the entry counter */ 5758 cpu_buffer->read += rb_page_entries(reader); 5759 cpu_buffer->read_bytes += rb_page_commit(reader); 5760 5761 /* swap the pages */ 5762 rb_init_page(bpage); 5763 bpage = reader->page; 5764 reader->page = *data_page; 5765 local_set(&reader->write, 0); 5766 local_set(&reader->entries, 0); 5767 reader->read = 0; 5768 *data_page = bpage; 5769 5770 /* 5771 * Use the real_end for the data size, 5772 * This gives us a chance to store the lost events 5773 * on the page. 5774 */ 5775 if (reader->real_end) 5776 local_set(&bpage->commit, reader->real_end); 5777 } 5778 ret = read; 5779 5780 cpu_buffer->lost_events = 0; 5781 5782 commit = local_read(&bpage->commit); 5783 /* 5784 * Set a flag in the commit field if we lost events 5785 */ 5786 if (missed_events) { 5787 /* If there is room at the end of the page to save the 5788 * missed events, then record it there. 5789 */ 5790 if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) { 5791 memcpy(&bpage->data[commit], &missed_events, 5792 sizeof(missed_events)); 5793 local_add(RB_MISSED_STORED, &bpage->commit); 5794 commit += sizeof(missed_events); 5795 } 5796 local_add(RB_MISSED_EVENTS, &bpage->commit); 5797 } 5798 5799 /* 5800 * This page may be off to user land. Zero it out here. 5801 */ 5802 if (commit < BUF_PAGE_SIZE) 5803 memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit); 5804 5805 out_unlock: 5806 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5807 5808 out: 5809 return ret; 5810 } 5811 EXPORT_SYMBOL_GPL(ring_buffer_read_page); 5812 5813 /* 5814 * We only allocate new buffers, never free them if the CPU goes down. 5815 * If we were to free the buffer, then the user would lose any trace that was in 5816 * the buffer. 5817 */ 5818 int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node) 5819 { 5820 struct trace_buffer *buffer; 5821 long nr_pages_same; 5822 int cpu_i; 5823 unsigned long nr_pages; 5824 5825 buffer = container_of(node, struct trace_buffer, node); 5826 if (cpumask_test_cpu(cpu, buffer->cpumask)) 5827 return 0; 5828 5829 nr_pages = 0; 5830 nr_pages_same = 1; 5831 /* check if all cpu sizes are same */ 5832 for_each_buffer_cpu(buffer, cpu_i) { 5833 /* fill in the size from first enabled cpu */ 5834 if (nr_pages == 0) 5835 nr_pages = buffer->buffers[cpu_i]->nr_pages; 5836 if (nr_pages != buffer->buffers[cpu_i]->nr_pages) { 5837 nr_pages_same = 0; 5838 break; 5839 } 5840 } 5841 /* allocate minimum pages, user can later expand it */ 5842 if (!nr_pages_same) 5843 nr_pages = 2; 5844 buffer->buffers[cpu] = 5845 rb_allocate_cpu_buffer(buffer, nr_pages, cpu); 5846 if (!buffer->buffers[cpu]) { 5847 WARN(1, "failed to allocate ring buffer on CPU %u\n", 5848 cpu); 5849 return -ENOMEM; 5850 } 5851 smp_wmb(); 5852 cpumask_set_cpu(cpu, buffer->cpumask); 5853 return 0; 5854 } 5855 5856 #ifdef CONFIG_RING_BUFFER_STARTUP_TEST 5857 /* 5858 * This is a basic integrity check of the ring buffer. 5859 * Late in the boot cycle this test will run when configured in. 5860 * It will kick off a thread per CPU that will go into a loop 5861 * writing to the per cpu ring buffer various sizes of data. 5862 * Some of the data will be large items, some small. 5863 * 5864 * Another thread is created that goes into a spin, sending out 5865 * IPIs to the other CPUs to also write into the ring buffer. 5866 * this is to test the nesting ability of the buffer. 5867 * 5868 * Basic stats are recorded and reported. If something in the 5869 * ring buffer should happen that's not expected, a big warning 5870 * is displayed and all ring buffers are disabled. 5871 */ 5872 static struct task_struct *rb_threads[NR_CPUS] __initdata; 5873 5874 struct rb_test_data { 5875 struct trace_buffer *buffer; 5876 unsigned long events; 5877 unsigned long bytes_written; 5878 unsigned long bytes_alloc; 5879 unsigned long bytes_dropped; 5880 unsigned long events_nested; 5881 unsigned long bytes_written_nested; 5882 unsigned long bytes_alloc_nested; 5883 unsigned long bytes_dropped_nested; 5884 int min_size_nested; 5885 int max_size_nested; 5886 int max_size; 5887 int min_size; 5888 int cpu; 5889 int cnt; 5890 }; 5891 5892 static struct rb_test_data rb_data[NR_CPUS] __initdata; 5893 5894 /* 1 meg per cpu */ 5895 #define RB_TEST_BUFFER_SIZE 1048576 5896 5897 static char rb_string[] __initdata = 5898 "abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()?+\\" 5899 "?+|:';\",.<>/?abcdefghijklmnopqrstuvwxyz1234567890" 5900 "!@#$%^&*()?+\\?+|:';\",.<>/?abcdefghijklmnopqrstuv"; 5901 5902 static bool rb_test_started __initdata; 5903 5904 struct rb_item { 5905 int size; 5906 char str[]; 5907 }; 5908 5909 static __init int rb_write_something(struct rb_test_data *data, bool nested) 5910 { 5911 struct ring_buffer_event *event; 5912 struct rb_item *item; 5913 bool started; 5914 int event_len; 5915 int size; 5916 int len; 5917 int cnt; 5918 5919 /* Have nested writes different that what is written */ 5920 cnt = data->cnt + (nested ? 27 : 0); 5921 5922 /* Multiply cnt by ~e, to make some unique increment */ 5923 size = (cnt * 68 / 25) % (sizeof(rb_string) - 1); 5924 5925 len = size + sizeof(struct rb_item); 5926 5927 started = rb_test_started; 5928 /* read rb_test_started before checking buffer enabled */ 5929 smp_rmb(); 5930 5931 event = ring_buffer_lock_reserve(data->buffer, len); 5932 if (!event) { 5933 /* Ignore dropped events before test starts. */ 5934 if (started) { 5935 if (nested) 5936 data->bytes_dropped += len; 5937 else 5938 data->bytes_dropped_nested += len; 5939 } 5940 return len; 5941 } 5942 5943 event_len = ring_buffer_event_length(event); 5944 5945 if (RB_WARN_ON(data->buffer, event_len < len)) 5946 goto out; 5947 5948 item = ring_buffer_event_data(event); 5949 item->size = size; 5950 memcpy(item->str, rb_string, size); 5951 5952 if (nested) { 5953 data->bytes_alloc_nested += event_len; 5954 data->bytes_written_nested += len; 5955 data->events_nested++; 5956 if (!data->min_size_nested || len < data->min_size_nested) 5957 data->min_size_nested = len; 5958 if (len > data->max_size_nested) 5959 data->max_size_nested = len; 5960 } else { 5961 data->bytes_alloc += event_len; 5962 data->bytes_written += len; 5963 data->events++; 5964 if (!data->min_size || len < data->min_size) 5965 data->max_size = len; 5966 if (len > data->max_size) 5967 data->max_size = len; 5968 } 5969 5970 out: 5971 ring_buffer_unlock_commit(data->buffer); 5972 5973 return 0; 5974 } 5975 5976 static __init int rb_test(void *arg) 5977 { 5978 struct rb_test_data *data = arg; 5979 5980 while (!kthread_should_stop()) { 5981 rb_write_something(data, false); 5982 data->cnt++; 5983 5984 set_current_state(TASK_INTERRUPTIBLE); 5985 /* Now sleep between a min of 100-300us and a max of 1ms */ 5986 usleep_range(((data->cnt % 3) + 1) * 100, 1000); 5987 } 5988 5989 return 0; 5990 } 5991 5992 static __init void rb_ipi(void *ignore) 5993 { 5994 struct rb_test_data *data; 5995 int cpu = smp_processor_id(); 5996 5997 data = &rb_data[cpu]; 5998 rb_write_something(data, true); 5999 } 6000 6001 static __init int rb_hammer_test(void *arg) 6002 { 6003 while (!kthread_should_stop()) { 6004 6005 /* Send an IPI to all cpus to write data! */ 6006 smp_call_function(rb_ipi, NULL, 1); 6007 /* No sleep, but for non preempt, let others run */ 6008 schedule(); 6009 } 6010 6011 return 0; 6012 } 6013 6014 static __init int test_ringbuffer(void) 6015 { 6016 struct task_struct *rb_hammer; 6017 struct trace_buffer *buffer; 6018 int cpu; 6019 int ret = 0; 6020 6021 if (security_locked_down(LOCKDOWN_TRACEFS)) { 6022 pr_warn("Lockdown is enabled, skipping ring buffer tests\n"); 6023 return 0; 6024 } 6025 6026 pr_info("Running ring buffer tests...\n"); 6027 6028 buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE); 6029 if (WARN_ON(!buffer)) 6030 return 0; 6031 6032 /* Disable buffer so that threads can't write to it yet */ 6033 ring_buffer_record_off(buffer); 6034 6035 for_each_online_cpu(cpu) { 6036 rb_data[cpu].buffer = buffer; 6037 rb_data[cpu].cpu = cpu; 6038 rb_data[cpu].cnt = cpu; 6039 rb_threads[cpu] = kthread_run_on_cpu(rb_test, &rb_data[cpu], 6040 cpu, "rbtester/%u"); 6041 if (WARN_ON(IS_ERR(rb_threads[cpu]))) { 6042 pr_cont("FAILED\n"); 6043 ret = PTR_ERR(rb_threads[cpu]); 6044 goto out_free; 6045 } 6046 } 6047 6048 /* Now create the rb hammer! */ 6049 rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer"); 6050 if (WARN_ON(IS_ERR(rb_hammer))) { 6051 pr_cont("FAILED\n"); 6052 ret = PTR_ERR(rb_hammer); 6053 goto out_free; 6054 } 6055 6056 ring_buffer_record_on(buffer); 6057 /* 6058 * Show buffer is enabled before setting rb_test_started. 6059 * Yes there's a small race window where events could be 6060 * dropped and the thread wont catch it. But when a ring 6061 * buffer gets enabled, there will always be some kind of 6062 * delay before other CPUs see it. Thus, we don't care about 6063 * those dropped events. We care about events dropped after 6064 * the threads see that the buffer is active. 6065 */ 6066 smp_wmb(); 6067 rb_test_started = true; 6068 6069 set_current_state(TASK_INTERRUPTIBLE); 6070 /* Just run for 10 seconds */; 6071 schedule_timeout(10 * HZ); 6072 6073 kthread_stop(rb_hammer); 6074 6075 out_free: 6076 for_each_online_cpu(cpu) { 6077 if (!rb_threads[cpu]) 6078 break; 6079 kthread_stop(rb_threads[cpu]); 6080 } 6081 if (ret) { 6082 ring_buffer_free(buffer); 6083 return ret; 6084 } 6085 6086 /* Report! */ 6087 pr_info("finished\n"); 6088 for_each_online_cpu(cpu) { 6089 struct ring_buffer_event *event; 6090 struct rb_test_data *data = &rb_data[cpu]; 6091 struct rb_item *item; 6092 unsigned long total_events; 6093 unsigned long total_dropped; 6094 unsigned long total_written; 6095 unsigned long total_alloc; 6096 unsigned long total_read = 0; 6097 unsigned long total_size = 0; 6098 unsigned long total_len = 0; 6099 unsigned long total_lost = 0; 6100 unsigned long lost; 6101 int big_event_size; 6102 int small_event_size; 6103 6104 ret = -1; 6105 6106 total_events = data->events + data->events_nested; 6107 total_written = data->bytes_written + data->bytes_written_nested; 6108 total_alloc = data->bytes_alloc + data->bytes_alloc_nested; 6109 total_dropped = data->bytes_dropped + data->bytes_dropped_nested; 6110 6111 big_event_size = data->max_size + data->max_size_nested; 6112 small_event_size = data->min_size + data->min_size_nested; 6113 6114 pr_info("CPU %d:\n", cpu); 6115 pr_info(" events: %ld\n", total_events); 6116 pr_info(" dropped bytes: %ld\n", total_dropped); 6117 pr_info(" alloced bytes: %ld\n", total_alloc); 6118 pr_info(" written bytes: %ld\n", total_written); 6119 pr_info(" biggest event: %d\n", big_event_size); 6120 pr_info(" smallest event: %d\n", small_event_size); 6121 6122 if (RB_WARN_ON(buffer, total_dropped)) 6123 break; 6124 6125 ret = 0; 6126 6127 while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) { 6128 total_lost += lost; 6129 item = ring_buffer_event_data(event); 6130 total_len += ring_buffer_event_length(event); 6131 total_size += item->size + sizeof(struct rb_item); 6132 if (memcmp(&item->str[0], rb_string, item->size) != 0) { 6133 pr_info("FAILED!\n"); 6134 pr_info("buffer had: %.*s\n", item->size, item->str); 6135 pr_info("expected: %.*s\n", item->size, rb_string); 6136 RB_WARN_ON(buffer, 1); 6137 ret = -1; 6138 break; 6139 } 6140 total_read++; 6141 } 6142 if (ret) 6143 break; 6144 6145 ret = -1; 6146 6147 pr_info(" read events: %ld\n", total_read); 6148 pr_info(" lost events: %ld\n", total_lost); 6149 pr_info(" total events: %ld\n", total_lost + total_read); 6150 pr_info(" recorded len bytes: %ld\n", total_len); 6151 pr_info(" recorded size bytes: %ld\n", total_size); 6152 if (total_lost) { 6153 pr_info(" With dropped events, record len and size may not match\n" 6154 " alloced and written from above\n"); 6155 } else { 6156 if (RB_WARN_ON(buffer, total_len != total_alloc || 6157 total_size != total_written)) 6158 break; 6159 } 6160 if (RB_WARN_ON(buffer, total_lost + total_read != total_events)) 6161 break; 6162 6163 ret = 0; 6164 } 6165 if (!ret) 6166 pr_info("Ring buffer PASSED!\n"); 6167 6168 ring_buffer_free(buffer); 6169 return 0; 6170 } 6171 6172 late_initcall(test_ringbuffer); 6173 #endif /* CONFIG_RING_BUFFER_STARTUP_TEST */ 6174