1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Generic ring buffer 4 * 5 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> 6 */ 7 #include <linux/trace_recursion.h> 8 #include <linux/trace_events.h> 9 #include <linux/ring_buffer.h> 10 #include <linux/trace_clock.h> 11 #include <linux/sched/clock.h> 12 #include <linux/trace_seq.h> 13 #include <linux/spinlock.h> 14 #include <linux/irq_work.h> 15 #include <linux/security.h> 16 #include <linux/uaccess.h> 17 #include <linux/hardirq.h> 18 #include <linux/kthread.h> /* for self test */ 19 #include <linux/module.h> 20 #include <linux/percpu.h> 21 #include <linux/mutex.h> 22 #include <linux/delay.h> 23 #include <linux/slab.h> 24 #include <linux/init.h> 25 #include <linux/hash.h> 26 #include <linux/list.h> 27 #include <linux/cpu.h> 28 #include <linux/oom.h> 29 30 #include <asm/local.h> 31 32 /* 33 * The "absolute" timestamp in the buffer is only 59 bits. 34 * If a clock has the 5 MSBs set, it needs to be saved and 35 * reinserted. 36 */ 37 #define TS_MSB (0xf8ULL << 56) 38 #define ABS_TS_MASK (~TS_MSB) 39 40 static void update_pages_handler(struct work_struct *work); 41 42 /* 43 * The ring buffer header is special. We must manually up keep it. 44 */ 45 int ring_buffer_print_entry_header(struct trace_seq *s) 46 { 47 trace_seq_puts(s, "# compressed entry header\n"); 48 trace_seq_puts(s, "\ttype_len : 5 bits\n"); 49 trace_seq_puts(s, "\ttime_delta : 27 bits\n"); 50 trace_seq_puts(s, "\tarray : 32 bits\n"); 51 trace_seq_putc(s, '\n'); 52 trace_seq_printf(s, "\tpadding : type == %d\n", 53 RINGBUF_TYPE_PADDING); 54 trace_seq_printf(s, "\ttime_extend : type == %d\n", 55 RINGBUF_TYPE_TIME_EXTEND); 56 trace_seq_printf(s, "\ttime_stamp : type == %d\n", 57 RINGBUF_TYPE_TIME_STAMP); 58 trace_seq_printf(s, "\tdata max type_len == %d\n", 59 RINGBUF_TYPE_DATA_TYPE_LEN_MAX); 60 61 return !trace_seq_has_overflowed(s); 62 } 63 64 /* 65 * The ring buffer is made up of a list of pages. A separate list of pages is 66 * allocated for each CPU. A writer may only write to a buffer that is 67 * associated with the CPU it is currently executing on. A reader may read 68 * from any per cpu buffer. 69 * 70 * The reader is special. For each per cpu buffer, the reader has its own 71 * reader page. When a reader has read the entire reader page, this reader 72 * page is swapped with another page in the ring buffer. 73 * 74 * Now, as long as the writer is off the reader page, the reader can do what 75 * ever it wants with that page. The writer will never write to that page 76 * again (as long as it is out of the ring buffer). 77 * 78 * Here's some silly ASCII art. 79 * 80 * +------+ 81 * |reader| RING BUFFER 82 * |page | 83 * +------+ +---+ +---+ +---+ 84 * | |-->| |-->| | 85 * +---+ +---+ +---+ 86 * ^ | 87 * | | 88 * +---------------+ 89 * 90 * 91 * +------+ 92 * |reader| RING BUFFER 93 * |page |------------------v 94 * +------+ +---+ +---+ +---+ 95 * | |-->| |-->| | 96 * +---+ +---+ +---+ 97 * ^ | 98 * | | 99 * +---------------+ 100 * 101 * 102 * +------+ 103 * |reader| RING BUFFER 104 * |page |------------------v 105 * +------+ +---+ +---+ +---+ 106 * ^ | |-->| |-->| | 107 * | +---+ +---+ +---+ 108 * | | 109 * | | 110 * +------------------------------+ 111 * 112 * 113 * +------+ 114 * |buffer| RING BUFFER 115 * |page |------------------v 116 * +------+ +---+ +---+ +---+ 117 * ^ | | | |-->| | 118 * | New +---+ +---+ +---+ 119 * | Reader------^ | 120 * | page | 121 * +------------------------------+ 122 * 123 * 124 * After we make this swap, the reader can hand this page off to the splice 125 * code and be done with it. It can even allocate a new page if it needs to 126 * and swap that into the ring buffer. 127 * 128 * We will be using cmpxchg soon to make all this lockless. 129 * 130 */ 131 132 /* Used for individual buffers (after the counter) */ 133 #define RB_BUFFER_OFF (1 << 20) 134 135 #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data) 136 137 #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array)) 138 #define RB_ALIGNMENT 4U 139 #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 140 #define RB_EVNT_MIN_SIZE 8U /* two 32bit words */ 141 142 #ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS 143 # define RB_FORCE_8BYTE_ALIGNMENT 0 144 # define RB_ARCH_ALIGNMENT RB_ALIGNMENT 145 #else 146 # define RB_FORCE_8BYTE_ALIGNMENT 1 147 # define RB_ARCH_ALIGNMENT 8U 148 #endif 149 150 #define RB_ALIGN_DATA __aligned(RB_ARCH_ALIGNMENT) 151 152 /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */ 153 #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX 154 155 enum { 156 RB_LEN_TIME_EXTEND = 8, 157 RB_LEN_TIME_STAMP = 8, 158 }; 159 160 #define skip_time_extend(event) \ 161 ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND)) 162 163 #define extended_time(event) \ 164 (event->type_len >= RINGBUF_TYPE_TIME_EXTEND) 165 166 static inline int rb_null_event(struct ring_buffer_event *event) 167 { 168 return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta; 169 } 170 171 static void rb_event_set_padding(struct ring_buffer_event *event) 172 { 173 /* padding has a NULL time_delta */ 174 event->type_len = RINGBUF_TYPE_PADDING; 175 event->time_delta = 0; 176 } 177 178 static unsigned 179 rb_event_data_length(struct ring_buffer_event *event) 180 { 181 unsigned length; 182 183 if (event->type_len) 184 length = event->type_len * RB_ALIGNMENT; 185 else 186 length = event->array[0]; 187 return length + RB_EVNT_HDR_SIZE; 188 } 189 190 /* 191 * Return the length of the given event. Will return 192 * the length of the time extend if the event is a 193 * time extend. 194 */ 195 static inline unsigned 196 rb_event_length(struct ring_buffer_event *event) 197 { 198 switch (event->type_len) { 199 case RINGBUF_TYPE_PADDING: 200 if (rb_null_event(event)) 201 /* undefined */ 202 return -1; 203 return event->array[0] + RB_EVNT_HDR_SIZE; 204 205 case RINGBUF_TYPE_TIME_EXTEND: 206 return RB_LEN_TIME_EXTEND; 207 208 case RINGBUF_TYPE_TIME_STAMP: 209 return RB_LEN_TIME_STAMP; 210 211 case RINGBUF_TYPE_DATA: 212 return rb_event_data_length(event); 213 default: 214 WARN_ON_ONCE(1); 215 } 216 /* not hit */ 217 return 0; 218 } 219 220 /* 221 * Return total length of time extend and data, 222 * or just the event length for all other events. 223 */ 224 static inline unsigned 225 rb_event_ts_length(struct ring_buffer_event *event) 226 { 227 unsigned len = 0; 228 229 if (extended_time(event)) { 230 /* time extends include the data event after it */ 231 len = RB_LEN_TIME_EXTEND; 232 event = skip_time_extend(event); 233 } 234 return len + rb_event_length(event); 235 } 236 237 /** 238 * ring_buffer_event_length - return the length of the event 239 * @event: the event to get the length of 240 * 241 * Returns the size of the data load of a data event. 242 * If the event is something other than a data event, it 243 * returns the size of the event itself. With the exception 244 * of a TIME EXTEND, where it still returns the size of the 245 * data load of the data event after it. 246 */ 247 unsigned ring_buffer_event_length(struct ring_buffer_event *event) 248 { 249 unsigned length; 250 251 if (extended_time(event)) 252 event = skip_time_extend(event); 253 254 length = rb_event_length(event); 255 if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 256 return length; 257 length -= RB_EVNT_HDR_SIZE; 258 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0])) 259 length -= sizeof(event->array[0]); 260 return length; 261 } 262 EXPORT_SYMBOL_GPL(ring_buffer_event_length); 263 264 /* inline for ring buffer fast paths */ 265 static __always_inline void * 266 rb_event_data(struct ring_buffer_event *event) 267 { 268 if (extended_time(event)) 269 event = skip_time_extend(event); 270 WARN_ON_ONCE(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX); 271 /* If length is in len field, then array[0] has the data */ 272 if (event->type_len) 273 return (void *)&event->array[0]; 274 /* Otherwise length is in array[0] and array[1] has the data */ 275 return (void *)&event->array[1]; 276 } 277 278 /** 279 * ring_buffer_event_data - return the data of the event 280 * @event: the event to get the data from 281 */ 282 void *ring_buffer_event_data(struct ring_buffer_event *event) 283 { 284 return rb_event_data(event); 285 } 286 EXPORT_SYMBOL_GPL(ring_buffer_event_data); 287 288 #define for_each_buffer_cpu(buffer, cpu) \ 289 for_each_cpu(cpu, buffer->cpumask) 290 291 #define for_each_online_buffer_cpu(buffer, cpu) \ 292 for_each_cpu_and(cpu, buffer->cpumask, cpu_online_mask) 293 294 #define TS_SHIFT 27 295 #define TS_MASK ((1ULL << TS_SHIFT) - 1) 296 #define TS_DELTA_TEST (~TS_MASK) 297 298 static u64 rb_event_time_stamp(struct ring_buffer_event *event) 299 { 300 u64 ts; 301 302 ts = event->array[0]; 303 ts <<= TS_SHIFT; 304 ts += event->time_delta; 305 306 return ts; 307 } 308 309 /* Flag when events were overwritten */ 310 #define RB_MISSED_EVENTS (1 << 31) 311 /* Missed count stored at end */ 312 #define RB_MISSED_STORED (1 << 30) 313 314 struct buffer_data_page { 315 u64 time_stamp; /* page time stamp */ 316 local_t commit; /* write committed index */ 317 unsigned char data[] RB_ALIGN_DATA; /* data of buffer page */ 318 }; 319 320 /* 321 * Note, the buffer_page list must be first. The buffer pages 322 * are allocated in cache lines, which means that each buffer 323 * page will be at the beginning of a cache line, and thus 324 * the least significant bits will be zero. We use this to 325 * add flags in the list struct pointers, to make the ring buffer 326 * lockless. 327 */ 328 struct buffer_page { 329 struct list_head list; /* list of buffer pages */ 330 local_t write; /* index for next write */ 331 unsigned read; /* index for next read */ 332 local_t entries; /* entries on this page */ 333 unsigned long real_end; /* real end of data */ 334 struct buffer_data_page *page; /* Actual data page */ 335 }; 336 337 /* 338 * The buffer page counters, write and entries, must be reset 339 * atomically when crossing page boundaries. To synchronize this 340 * update, two counters are inserted into the number. One is 341 * the actual counter for the write position or count on the page. 342 * 343 * The other is a counter of updaters. Before an update happens 344 * the update partition of the counter is incremented. This will 345 * allow the updater to update the counter atomically. 346 * 347 * The counter is 20 bits, and the state data is 12. 348 */ 349 #define RB_WRITE_MASK 0xfffff 350 #define RB_WRITE_INTCNT (1 << 20) 351 352 static void rb_init_page(struct buffer_data_page *bpage) 353 { 354 local_set(&bpage->commit, 0); 355 } 356 357 static void free_buffer_page(struct buffer_page *bpage) 358 { 359 free_page((unsigned long)bpage->page); 360 kfree(bpage); 361 } 362 363 /* 364 * We need to fit the time_stamp delta into 27 bits. 365 */ 366 static inline int test_time_stamp(u64 delta) 367 { 368 if (delta & TS_DELTA_TEST) 369 return 1; 370 return 0; 371 } 372 373 #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE) 374 375 /* Max payload is BUF_PAGE_SIZE - header (8bytes) */ 376 #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2)) 377 378 int ring_buffer_print_page_header(struct trace_seq *s) 379 { 380 struct buffer_data_page field; 381 382 trace_seq_printf(s, "\tfield: u64 timestamp;\t" 383 "offset:0;\tsize:%u;\tsigned:%u;\n", 384 (unsigned int)sizeof(field.time_stamp), 385 (unsigned int)is_signed_type(u64)); 386 387 trace_seq_printf(s, "\tfield: local_t commit;\t" 388 "offset:%u;\tsize:%u;\tsigned:%u;\n", 389 (unsigned int)offsetof(typeof(field), commit), 390 (unsigned int)sizeof(field.commit), 391 (unsigned int)is_signed_type(long)); 392 393 trace_seq_printf(s, "\tfield: int overwrite;\t" 394 "offset:%u;\tsize:%u;\tsigned:%u;\n", 395 (unsigned int)offsetof(typeof(field), commit), 396 1, 397 (unsigned int)is_signed_type(long)); 398 399 trace_seq_printf(s, "\tfield: char data;\t" 400 "offset:%u;\tsize:%u;\tsigned:%u;\n", 401 (unsigned int)offsetof(typeof(field), data), 402 (unsigned int)BUF_PAGE_SIZE, 403 (unsigned int)is_signed_type(char)); 404 405 return !trace_seq_has_overflowed(s); 406 } 407 408 struct rb_irq_work { 409 struct irq_work work; 410 wait_queue_head_t waiters; 411 wait_queue_head_t full_waiters; 412 long wait_index; 413 bool waiters_pending; 414 bool full_waiters_pending; 415 bool wakeup_full; 416 }; 417 418 /* 419 * Structure to hold event state and handle nested events. 420 */ 421 struct rb_event_info { 422 u64 ts; 423 u64 delta; 424 u64 before; 425 u64 after; 426 unsigned long length; 427 struct buffer_page *tail_page; 428 int add_timestamp; 429 }; 430 431 /* 432 * Used for the add_timestamp 433 * NONE 434 * EXTEND - wants a time extend 435 * ABSOLUTE - the buffer requests all events to have absolute time stamps 436 * FORCE - force a full time stamp. 437 */ 438 enum { 439 RB_ADD_STAMP_NONE = 0, 440 RB_ADD_STAMP_EXTEND = BIT(1), 441 RB_ADD_STAMP_ABSOLUTE = BIT(2), 442 RB_ADD_STAMP_FORCE = BIT(3) 443 }; 444 /* 445 * Used for which event context the event is in. 446 * TRANSITION = 0 447 * NMI = 1 448 * IRQ = 2 449 * SOFTIRQ = 3 450 * NORMAL = 4 451 * 452 * See trace_recursive_lock() comment below for more details. 453 */ 454 enum { 455 RB_CTX_TRANSITION, 456 RB_CTX_NMI, 457 RB_CTX_IRQ, 458 RB_CTX_SOFTIRQ, 459 RB_CTX_NORMAL, 460 RB_CTX_MAX 461 }; 462 463 #if BITS_PER_LONG == 32 464 #define RB_TIME_32 465 #endif 466 467 /* To test on 64 bit machines */ 468 //#define RB_TIME_32 469 470 #ifdef RB_TIME_32 471 472 struct rb_time_struct { 473 local_t cnt; 474 local_t top; 475 local_t bottom; 476 local_t msb; 477 }; 478 #else 479 #include <asm/local64.h> 480 struct rb_time_struct { 481 local64_t time; 482 }; 483 #endif 484 typedef struct rb_time_struct rb_time_t; 485 486 #define MAX_NEST 5 487 488 /* 489 * head_page == tail_page && head == tail then buffer is empty. 490 */ 491 struct ring_buffer_per_cpu { 492 int cpu; 493 atomic_t record_disabled; 494 atomic_t resize_disabled; 495 struct trace_buffer *buffer; 496 raw_spinlock_t reader_lock; /* serialize readers */ 497 arch_spinlock_t lock; 498 struct lock_class_key lock_key; 499 struct buffer_data_page *free_page; 500 unsigned long nr_pages; 501 unsigned int current_context; 502 struct list_head *pages; 503 struct buffer_page *head_page; /* read from head */ 504 struct buffer_page *tail_page; /* write to tail */ 505 struct buffer_page *commit_page; /* committed pages */ 506 struct buffer_page *reader_page; 507 unsigned long lost_events; 508 unsigned long last_overrun; 509 unsigned long nest; 510 local_t entries_bytes; 511 local_t entries; 512 local_t overrun; 513 local_t commit_overrun; 514 local_t dropped_events; 515 local_t committing; 516 local_t commits; 517 local_t pages_touched; 518 local_t pages_lost; 519 local_t pages_read; 520 long last_pages_touch; 521 size_t shortest_full; 522 unsigned long read; 523 unsigned long read_bytes; 524 rb_time_t write_stamp; 525 rb_time_t before_stamp; 526 u64 event_stamp[MAX_NEST]; 527 u64 read_stamp; 528 /* ring buffer pages to update, > 0 to add, < 0 to remove */ 529 long nr_pages_to_update; 530 struct list_head new_pages; /* new pages to add */ 531 struct work_struct update_pages_work; 532 struct completion update_done; 533 534 struct rb_irq_work irq_work; 535 }; 536 537 struct trace_buffer { 538 unsigned flags; 539 int cpus; 540 atomic_t record_disabled; 541 cpumask_var_t cpumask; 542 543 struct lock_class_key *reader_lock_key; 544 545 struct mutex mutex; 546 547 struct ring_buffer_per_cpu **buffers; 548 549 struct hlist_node node; 550 u64 (*clock)(void); 551 552 struct rb_irq_work irq_work; 553 bool time_stamp_abs; 554 }; 555 556 struct ring_buffer_iter { 557 struct ring_buffer_per_cpu *cpu_buffer; 558 unsigned long head; 559 unsigned long next_event; 560 struct buffer_page *head_page; 561 struct buffer_page *cache_reader_page; 562 unsigned long cache_read; 563 u64 read_stamp; 564 u64 page_stamp; 565 struct ring_buffer_event *event; 566 int missed_events; 567 }; 568 569 #ifdef RB_TIME_32 570 571 /* 572 * On 32 bit machines, local64_t is very expensive. As the ring 573 * buffer doesn't need all the features of a true 64 bit atomic, 574 * on 32 bit, it uses these functions (64 still uses local64_t). 575 * 576 * For the ring buffer, 64 bit required operations for the time is 577 * the following: 578 * 579 * - Reads may fail if it interrupted a modification of the time stamp. 580 * It will succeed if it did not interrupt another write even if 581 * the read itself is interrupted by a write. 582 * It returns whether it was successful or not. 583 * 584 * - Writes always succeed and will overwrite other writes and writes 585 * that were done by events interrupting the current write. 586 * 587 * - A write followed by a read of the same time stamp will always succeed, 588 * but may not contain the same value. 589 * 590 * - A cmpxchg will fail if it interrupted another write or cmpxchg. 591 * Other than that, it acts like a normal cmpxchg. 592 * 593 * The 60 bit time stamp is broken up by 30 bits in a top and bottom half 594 * (bottom being the least significant 30 bits of the 60 bit time stamp). 595 * 596 * The two most significant bits of each half holds a 2 bit counter (0-3). 597 * Each update will increment this counter by one. 598 * When reading the top and bottom, if the two counter bits match then the 599 * top and bottom together make a valid 60 bit number. 600 */ 601 #define RB_TIME_SHIFT 30 602 #define RB_TIME_VAL_MASK ((1 << RB_TIME_SHIFT) - 1) 603 #define RB_TIME_MSB_SHIFT 60 604 605 static inline int rb_time_cnt(unsigned long val) 606 { 607 return (val >> RB_TIME_SHIFT) & 3; 608 } 609 610 static inline u64 rb_time_val(unsigned long top, unsigned long bottom) 611 { 612 u64 val; 613 614 val = top & RB_TIME_VAL_MASK; 615 val <<= RB_TIME_SHIFT; 616 val |= bottom & RB_TIME_VAL_MASK; 617 618 return val; 619 } 620 621 static inline bool __rb_time_read(rb_time_t *t, u64 *ret, unsigned long *cnt) 622 { 623 unsigned long top, bottom, msb; 624 unsigned long c; 625 626 /* 627 * If the read is interrupted by a write, then the cnt will 628 * be different. Loop until both top and bottom have been read 629 * without interruption. 630 */ 631 do { 632 c = local_read(&t->cnt); 633 top = local_read(&t->top); 634 bottom = local_read(&t->bottom); 635 msb = local_read(&t->msb); 636 } while (c != local_read(&t->cnt)); 637 638 *cnt = rb_time_cnt(top); 639 640 /* If top and bottom counts don't match, this interrupted a write */ 641 if (*cnt != rb_time_cnt(bottom)) 642 return false; 643 644 /* The shift to msb will lose its cnt bits */ 645 *ret = rb_time_val(top, bottom) | ((u64)msb << RB_TIME_MSB_SHIFT); 646 return true; 647 } 648 649 static bool rb_time_read(rb_time_t *t, u64 *ret) 650 { 651 unsigned long cnt; 652 653 return __rb_time_read(t, ret, &cnt); 654 } 655 656 static inline unsigned long rb_time_val_cnt(unsigned long val, unsigned long cnt) 657 { 658 return (val & RB_TIME_VAL_MASK) | ((cnt & 3) << RB_TIME_SHIFT); 659 } 660 661 static inline void rb_time_split(u64 val, unsigned long *top, unsigned long *bottom, 662 unsigned long *msb) 663 { 664 *top = (unsigned long)((val >> RB_TIME_SHIFT) & RB_TIME_VAL_MASK); 665 *bottom = (unsigned long)(val & RB_TIME_VAL_MASK); 666 *msb = (unsigned long)(val >> RB_TIME_MSB_SHIFT); 667 } 668 669 static inline void rb_time_val_set(local_t *t, unsigned long val, unsigned long cnt) 670 { 671 val = rb_time_val_cnt(val, cnt); 672 local_set(t, val); 673 } 674 675 static void rb_time_set(rb_time_t *t, u64 val) 676 { 677 unsigned long cnt, top, bottom, msb; 678 679 rb_time_split(val, &top, &bottom, &msb); 680 681 /* Writes always succeed with a valid number even if it gets interrupted. */ 682 do { 683 cnt = local_inc_return(&t->cnt); 684 rb_time_val_set(&t->top, top, cnt); 685 rb_time_val_set(&t->bottom, bottom, cnt); 686 rb_time_val_set(&t->msb, val >> RB_TIME_MSB_SHIFT, cnt); 687 } while (cnt != local_read(&t->cnt)); 688 } 689 690 static inline bool 691 rb_time_read_cmpxchg(local_t *l, unsigned long expect, unsigned long set) 692 { 693 unsigned long ret; 694 695 ret = local_cmpxchg(l, expect, set); 696 return ret == expect; 697 } 698 699 static int rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set) 700 { 701 unsigned long cnt, top, bottom, msb; 702 unsigned long cnt2, top2, bottom2, msb2; 703 u64 val; 704 705 /* The cmpxchg always fails if it interrupted an update */ 706 if (!__rb_time_read(t, &val, &cnt2)) 707 return false; 708 709 if (val != expect) 710 return false; 711 712 cnt = local_read(&t->cnt); 713 if ((cnt & 3) != cnt2) 714 return false; 715 716 cnt2 = cnt + 1; 717 718 rb_time_split(val, &top, &bottom, &msb); 719 top = rb_time_val_cnt(top, cnt); 720 bottom = rb_time_val_cnt(bottom, cnt); 721 722 rb_time_split(set, &top2, &bottom2, &msb2); 723 top2 = rb_time_val_cnt(top2, cnt2); 724 bottom2 = rb_time_val_cnt(bottom2, cnt2); 725 726 if (!rb_time_read_cmpxchg(&t->cnt, cnt, cnt2)) 727 return false; 728 if (!rb_time_read_cmpxchg(&t->msb, msb, msb2)) 729 return false; 730 if (!rb_time_read_cmpxchg(&t->top, top, top2)) 731 return false; 732 if (!rb_time_read_cmpxchg(&t->bottom, bottom, bottom2)) 733 return false; 734 return true; 735 } 736 737 #else /* 64 bits */ 738 739 /* local64_t always succeeds */ 740 741 static inline bool rb_time_read(rb_time_t *t, u64 *ret) 742 { 743 *ret = local64_read(&t->time); 744 return true; 745 } 746 static void rb_time_set(rb_time_t *t, u64 val) 747 { 748 local64_set(&t->time, val); 749 } 750 751 static bool rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set) 752 { 753 u64 val; 754 val = local64_cmpxchg(&t->time, expect, set); 755 return val == expect; 756 } 757 #endif 758 759 /* 760 * Enable this to make sure that the event passed to 761 * ring_buffer_event_time_stamp() is not committed and also 762 * is on the buffer that it passed in. 763 */ 764 //#define RB_VERIFY_EVENT 765 #ifdef RB_VERIFY_EVENT 766 static struct list_head *rb_list_head(struct list_head *list); 767 static void verify_event(struct ring_buffer_per_cpu *cpu_buffer, 768 void *event) 769 { 770 struct buffer_page *page = cpu_buffer->commit_page; 771 struct buffer_page *tail_page = READ_ONCE(cpu_buffer->tail_page); 772 struct list_head *next; 773 long commit, write; 774 unsigned long addr = (unsigned long)event; 775 bool done = false; 776 int stop = 0; 777 778 /* Make sure the event exists and is not committed yet */ 779 do { 780 if (page == tail_page || WARN_ON_ONCE(stop++ > 100)) 781 done = true; 782 commit = local_read(&page->page->commit); 783 write = local_read(&page->write); 784 if (addr >= (unsigned long)&page->page->data[commit] && 785 addr < (unsigned long)&page->page->data[write]) 786 return; 787 788 next = rb_list_head(page->list.next); 789 page = list_entry(next, struct buffer_page, list); 790 } while (!done); 791 WARN_ON_ONCE(1); 792 } 793 #else 794 static inline void verify_event(struct ring_buffer_per_cpu *cpu_buffer, 795 void *event) 796 { 797 } 798 #endif 799 800 /* 801 * The absolute time stamp drops the 5 MSBs and some clocks may 802 * require them. The rb_fix_abs_ts() will take a previous full 803 * time stamp, and add the 5 MSB of that time stamp on to the 804 * saved absolute time stamp. Then they are compared in case of 805 * the unlikely event that the latest time stamp incremented 806 * the 5 MSB. 807 */ 808 static inline u64 rb_fix_abs_ts(u64 abs, u64 save_ts) 809 { 810 if (save_ts & TS_MSB) { 811 abs |= save_ts & TS_MSB; 812 /* Check for overflow */ 813 if (unlikely(abs < save_ts)) 814 abs += 1ULL << 59; 815 } 816 return abs; 817 } 818 819 static inline u64 rb_time_stamp(struct trace_buffer *buffer); 820 821 /** 822 * ring_buffer_event_time_stamp - return the event's current time stamp 823 * @buffer: The buffer that the event is on 824 * @event: the event to get the time stamp of 825 * 826 * Note, this must be called after @event is reserved, and before it is 827 * committed to the ring buffer. And must be called from the same 828 * context where the event was reserved (normal, softirq, irq, etc). 829 * 830 * Returns the time stamp associated with the current event. 831 * If the event has an extended time stamp, then that is used as 832 * the time stamp to return. 833 * In the highly unlikely case that the event was nested more than 834 * the max nesting, then the write_stamp of the buffer is returned, 835 * otherwise current time is returned, but that really neither of 836 * the last two cases should ever happen. 837 */ 838 u64 ring_buffer_event_time_stamp(struct trace_buffer *buffer, 839 struct ring_buffer_event *event) 840 { 841 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[smp_processor_id()]; 842 unsigned int nest; 843 u64 ts; 844 845 /* If the event includes an absolute time, then just use that */ 846 if (event->type_len == RINGBUF_TYPE_TIME_STAMP) { 847 ts = rb_event_time_stamp(event); 848 return rb_fix_abs_ts(ts, cpu_buffer->tail_page->page->time_stamp); 849 } 850 851 nest = local_read(&cpu_buffer->committing); 852 verify_event(cpu_buffer, event); 853 if (WARN_ON_ONCE(!nest)) 854 goto fail; 855 856 /* Read the current saved nesting level time stamp */ 857 if (likely(--nest < MAX_NEST)) 858 return cpu_buffer->event_stamp[nest]; 859 860 /* Shouldn't happen, warn if it does */ 861 WARN_ONCE(1, "nest (%d) greater than max", nest); 862 863 fail: 864 /* Can only fail on 32 bit */ 865 if (!rb_time_read(&cpu_buffer->write_stamp, &ts)) 866 /* Screw it, just read the current time */ 867 ts = rb_time_stamp(cpu_buffer->buffer); 868 869 return ts; 870 } 871 872 /** 873 * ring_buffer_nr_pages - get the number of buffer pages in the ring buffer 874 * @buffer: The ring_buffer to get the number of pages from 875 * @cpu: The cpu of the ring_buffer to get the number of pages from 876 * 877 * Returns the number of pages used by a per_cpu buffer of the ring buffer. 878 */ 879 size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu) 880 { 881 return buffer->buffers[cpu]->nr_pages; 882 } 883 884 /** 885 * ring_buffer_nr_dirty_pages - get the number of used pages in the ring buffer 886 * @buffer: The ring_buffer to get the number of pages from 887 * @cpu: The cpu of the ring_buffer to get the number of pages from 888 * 889 * Returns the number of pages that have content in the ring buffer. 890 */ 891 size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu) 892 { 893 size_t read; 894 size_t lost; 895 size_t cnt; 896 897 read = local_read(&buffer->buffers[cpu]->pages_read); 898 lost = local_read(&buffer->buffers[cpu]->pages_lost); 899 cnt = local_read(&buffer->buffers[cpu]->pages_touched); 900 901 if (WARN_ON_ONCE(cnt < lost)) 902 return 0; 903 904 cnt -= lost; 905 906 /* The reader can read an empty page, but not more than that */ 907 if (cnt < read) { 908 WARN_ON_ONCE(read > cnt + 1); 909 return 0; 910 } 911 912 return cnt - read; 913 } 914 915 static __always_inline bool full_hit(struct trace_buffer *buffer, int cpu, int full) 916 { 917 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 918 size_t nr_pages; 919 size_t dirty; 920 921 nr_pages = cpu_buffer->nr_pages; 922 if (!nr_pages || !full) 923 return true; 924 925 dirty = ring_buffer_nr_dirty_pages(buffer, cpu); 926 927 return (dirty * 100) > (full * nr_pages); 928 } 929 930 /* 931 * rb_wake_up_waiters - wake up tasks waiting for ring buffer input 932 * 933 * Schedules a delayed work to wake up any task that is blocked on the 934 * ring buffer waiters queue. 935 */ 936 static void rb_wake_up_waiters(struct irq_work *work) 937 { 938 struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work); 939 940 wake_up_all(&rbwork->waiters); 941 if (rbwork->full_waiters_pending || rbwork->wakeup_full) { 942 rbwork->wakeup_full = false; 943 rbwork->full_waiters_pending = false; 944 wake_up_all(&rbwork->full_waiters); 945 } 946 } 947 948 /** 949 * ring_buffer_wake_waiters - wake up any waiters on this ring buffer 950 * @buffer: The ring buffer to wake waiters on 951 * 952 * In the case of a file that represents a ring buffer is closing, 953 * it is prudent to wake up any waiters that are on this. 954 */ 955 void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu) 956 { 957 struct ring_buffer_per_cpu *cpu_buffer; 958 struct rb_irq_work *rbwork; 959 960 if (!buffer) 961 return; 962 963 if (cpu == RING_BUFFER_ALL_CPUS) { 964 965 /* Wake up individual ones too. One level recursion */ 966 for_each_buffer_cpu(buffer, cpu) 967 ring_buffer_wake_waiters(buffer, cpu); 968 969 rbwork = &buffer->irq_work; 970 } else { 971 if (WARN_ON_ONCE(!buffer->buffers)) 972 return; 973 if (WARN_ON_ONCE(cpu >= nr_cpu_ids)) 974 return; 975 976 cpu_buffer = buffer->buffers[cpu]; 977 /* The CPU buffer may not have been initialized yet */ 978 if (!cpu_buffer) 979 return; 980 rbwork = &cpu_buffer->irq_work; 981 } 982 983 rbwork->wait_index++; 984 /* make sure the waiters see the new index */ 985 smp_wmb(); 986 987 rb_wake_up_waiters(&rbwork->work); 988 } 989 990 /** 991 * ring_buffer_wait - wait for input to the ring buffer 992 * @buffer: buffer to wait on 993 * @cpu: the cpu buffer to wait on 994 * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS 995 * 996 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon 997 * as data is added to any of the @buffer's cpu buffers. Otherwise 998 * it will wait for data to be added to a specific cpu buffer. 999 */ 1000 int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full) 1001 { 1002 struct ring_buffer_per_cpu *cpu_buffer; 1003 DEFINE_WAIT(wait); 1004 struct rb_irq_work *work; 1005 long wait_index; 1006 int ret = 0; 1007 1008 /* 1009 * Depending on what the caller is waiting for, either any 1010 * data in any cpu buffer, or a specific buffer, put the 1011 * caller on the appropriate wait queue. 1012 */ 1013 if (cpu == RING_BUFFER_ALL_CPUS) { 1014 work = &buffer->irq_work; 1015 /* Full only makes sense on per cpu reads */ 1016 full = 0; 1017 } else { 1018 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 1019 return -ENODEV; 1020 cpu_buffer = buffer->buffers[cpu]; 1021 work = &cpu_buffer->irq_work; 1022 } 1023 1024 wait_index = READ_ONCE(work->wait_index); 1025 1026 while (true) { 1027 if (full) 1028 prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE); 1029 else 1030 prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE); 1031 1032 /* 1033 * The events can happen in critical sections where 1034 * checking a work queue can cause deadlocks. 1035 * After adding a task to the queue, this flag is set 1036 * only to notify events to try to wake up the queue 1037 * using irq_work. 1038 * 1039 * We don't clear it even if the buffer is no longer 1040 * empty. The flag only causes the next event to run 1041 * irq_work to do the work queue wake up. The worse 1042 * that can happen if we race with !trace_empty() is that 1043 * an event will cause an irq_work to try to wake up 1044 * an empty queue. 1045 * 1046 * There's no reason to protect this flag either, as 1047 * the work queue and irq_work logic will do the necessary 1048 * synchronization for the wake ups. The only thing 1049 * that is necessary is that the wake up happens after 1050 * a task has been queued. It's OK for spurious wake ups. 1051 */ 1052 if (full) 1053 work->full_waiters_pending = true; 1054 else 1055 work->waiters_pending = true; 1056 1057 if (signal_pending(current)) { 1058 ret = -EINTR; 1059 break; 1060 } 1061 1062 if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) 1063 break; 1064 1065 if (cpu != RING_BUFFER_ALL_CPUS && 1066 !ring_buffer_empty_cpu(buffer, cpu)) { 1067 unsigned long flags; 1068 bool pagebusy; 1069 bool done; 1070 1071 if (!full) 1072 break; 1073 1074 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 1075 pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; 1076 done = !pagebusy && full_hit(buffer, cpu, full); 1077 1078 if (!cpu_buffer->shortest_full || 1079 cpu_buffer->shortest_full > full) 1080 cpu_buffer->shortest_full = full; 1081 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 1082 if (done) 1083 break; 1084 } 1085 1086 schedule(); 1087 1088 /* Make sure to see the new wait index */ 1089 smp_rmb(); 1090 if (wait_index != work->wait_index) 1091 break; 1092 } 1093 1094 if (full) 1095 finish_wait(&work->full_waiters, &wait); 1096 else 1097 finish_wait(&work->waiters, &wait); 1098 1099 return ret; 1100 } 1101 1102 /** 1103 * ring_buffer_poll_wait - poll on buffer input 1104 * @buffer: buffer to wait on 1105 * @cpu: the cpu buffer to wait on 1106 * @filp: the file descriptor 1107 * @poll_table: The poll descriptor 1108 * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS 1109 * 1110 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon 1111 * as data is added to any of the @buffer's cpu buffers. Otherwise 1112 * it will wait for data to be added to a specific cpu buffer. 1113 * 1114 * Returns EPOLLIN | EPOLLRDNORM if data exists in the buffers, 1115 * zero otherwise. 1116 */ 1117 __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu, 1118 struct file *filp, poll_table *poll_table, int full) 1119 { 1120 struct ring_buffer_per_cpu *cpu_buffer; 1121 struct rb_irq_work *work; 1122 1123 if (cpu == RING_BUFFER_ALL_CPUS) { 1124 work = &buffer->irq_work; 1125 full = 0; 1126 } else { 1127 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 1128 return -EINVAL; 1129 1130 cpu_buffer = buffer->buffers[cpu]; 1131 work = &cpu_buffer->irq_work; 1132 } 1133 1134 if (full) { 1135 poll_wait(filp, &work->full_waiters, poll_table); 1136 work->full_waiters_pending = true; 1137 } else { 1138 poll_wait(filp, &work->waiters, poll_table); 1139 work->waiters_pending = true; 1140 } 1141 1142 /* 1143 * There's a tight race between setting the waiters_pending and 1144 * checking if the ring buffer is empty. Once the waiters_pending bit 1145 * is set, the next event will wake the task up, but we can get stuck 1146 * if there's only a single event in. 1147 * 1148 * FIXME: Ideally, we need a memory barrier on the writer side as well, 1149 * but adding a memory barrier to all events will cause too much of a 1150 * performance hit in the fast path. We only need a memory barrier when 1151 * the buffer goes from empty to having content. But as this race is 1152 * extremely small, and it's not a problem if another event comes in, we 1153 * will fix it later. 1154 */ 1155 smp_mb(); 1156 1157 if (full) 1158 return full_hit(buffer, cpu, full) ? EPOLLIN | EPOLLRDNORM : 0; 1159 1160 if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) || 1161 (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu))) 1162 return EPOLLIN | EPOLLRDNORM; 1163 return 0; 1164 } 1165 1166 /* buffer may be either ring_buffer or ring_buffer_per_cpu */ 1167 #define RB_WARN_ON(b, cond) \ 1168 ({ \ 1169 int _____ret = unlikely(cond); \ 1170 if (_____ret) { \ 1171 if (__same_type(*(b), struct ring_buffer_per_cpu)) { \ 1172 struct ring_buffer_per_cpu *__b = \ 1173 (void *)b; \ 1174 atomic_inc(&__b->buffer->record_disabled); \ 1175 } else \ 1176 atomic_inc(&b->record_disabled); \ 1177 WARN_ON(1); \ 1178 } \ 1179 _____ret; \ 1180 }) 1181 1182 /* Up this if you want to test the TIME_EXTENTS and normalization */ 1183 #define DEBUG_SHIFT 0 1184 1185 static inline u64 rb_time_stamp(struct trace_buffer *buffer) 1186 { 1187 u64 ts; 1188 1189 /* Skip retpolines :-( */ 1190 if (IS_ENABLED(CONFIG_RETPOLINE) && likely(buffer->clock == trace_clock_local)) 1191 ts = trace_clock_local(); 1192 else 1193 ts = buffer->clock(); 1194 1195 /* shift to debug/test normalization and TIME_EXTENTS */ 1196 return ts << DEBUG_SHIFT; 1197 } 1198 1199 u64 ring_buffer_time_stamp(struct trace_buffer *buffer) 1200 { 1201 u64 time; 1202 1203 preempt_disable_notrace(); 1204 time = rb_time_stamp(buffer); 1205 preempt_enable_notrace(); 1206 1207 return time; 1208 } 1209 EXPORT_SYMBOL_GPL(ring_buffer_time_stamp); 1210 1211 void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer, 1212 int cpu, u64 *ts) 1213 { 1214 /* Just stupid testing the normalize function and deltas */ 1215 *ts >>= DEBUG_SHIFT; 1216 } 1217 EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp); 1218 1219 /* 1220 * Making the ring buffer lockless makes things tricky. 1221 * Although writes only happen on the CPU that they are on, 1222 * and they only need to worry about interrupts. Reads can 1223 * happen on any CPU. 1224 * 1225 * The reader page is always off the ring buffer, but when the 1226 * reader finishes with a page, it needs to swap its page with 1227 * a new one from the buffer. The reader needs to take from 1228 * the head (writes go to the tail). But if a writer is in overwrite 1229 * mode and wraps, it must push the head page forward. 1230 * 1231 * Here lies the problem. 1232 * 1233 * The reader must be careful to replace only the head page, and 1234 * not another one. As described at the top of the file in the 1235 * ASCII art, the reader sets its old page to point to the next 1236 * page after head. It then sets the page after head to point to 1237 * the old reader page. But if the writer moves the head page 1238 * during this operation, the reader could end up with the tail. 1239 * 1240 * We use cmpxchg to help prevent this race. We also do something 1241 * special with the page before head. We set the LSB to 1. 1242 * 1243 * When the writer must push the page forward, it will clear the 1244 * bit that points to the head page, move the head, and then set 1245 * the bit that points to the new head page. 1246 * 1247 * We also don't want an interrupt coming in and moving the head 1248 * page on another writer. Thus we use the second LSB to catch 1249 * that too. Thus: 1250 * 1251 * head->list->prev->next bit 1 bit 0 1252 * ------- ------- 1253 * Normal page 0 0 1254 * Points to head page 0 1 1255 * New head page 1 0 1256 * 1257 * Note we can not trust the prev pointer of the head page, because: 1258 * 1259 * +----+ +-----+ +-----+ 1260 * | |------>| T |---X--->| N | 1261 * | |<------| | | | 1262 * +----+ +-----+ +-----+ 1263 * ^ ^ | 1264 * | +-----+ | | 1265 * +----------| R |----------+ | 1266 * | |<-----------+ 1267 * +-----+ 1268 * 1269 * Key: ---X--> HEAD flag set in pointer 1270 * T Tail page 1271 * R Reader page 1272 * N Next page 1273 * 1274 * (see __rb_reserve_next() to see where this happens) 1275 * 1276 * What the above shows is that the reader just swapped out 1277 * the reader page with a page in the buffer, but before it 1278 * could make the new header point back to the new page added 1279 * it was preempted by a writer. The writer moved forward onto 1280 * the new page added by the reader and is about to move forward 1281 * again. 1282 * 1283 * You can see, it is legitimate for the previous pointer of 1284 * the head (or any page) not to point back to itself. But only 1285 * temporarily. 1286 */ 1287 1288 #define RB_PAGE_NORMAL 0UL 1289 #define RB_PAGE_HEAD 1UL 1290 #define RB_PAGE_UPDATE 2UL 1291 1292 1293 #define RB_FLAG_MASK 3UL 1294 1295 /* PAGE_MOVED is not part of the mask */ 1296 #define RB_PAGE_MOVED 4UL 1297 1298 /* 1299 * rb_list_head - remove any bit 1300 */ 1301 static struct list_head *rb_list_head(struct list_head *list) 1302 { 1303 unsigned long val = (unsigned long)list; 1304 1305 return (struct list_head *)(val & ~RB_FLAG_MASK); 1306 } 1307 1308 /* 1309 * rb_is_head_page - test if the given page is the head page 1310 * 1311 * Because the reader may move the head_page pointer, we can 1312 * not trust what the head page is (it may be pointing to 1313 * the reader page). But if the next page is a header page, 1314 * its flags will be non zero. 1315 */ 1316 static inline int 1317 rb_is_head_page(struct buffer_page *page, struct list_head *list) 1318 { 1319 unsigned long val; 1320 1321 val = (unsigned long)list->next; 1322 1323 if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list) 1324 return RB_PAGE_MOVED; 1325 1326 return val & RB_FLAG_MASK; 1327 } 1328 1329 /* 1330 * rb_is_reader_page 1331 * 1332 * The unique thing about the reader page, is that, if the 1333 * writer is ever on it, the previous pointer never points 1334 * back to the reader page. 1335 */ 1336 static bool rb_is_reader_page(struct buffer_page *page) 1337 { 1338 struct list_head *list = page->list.prev; 1339 1340 return rb_list_head(list->next) != &page->list; 1341 } 1342 1343 /* 1344 * rb_set_list_to_head - set a list_head to be pointing to head. 1345 */ 1346 static void rb_set_list_to_head(struct list_head *list) 1347 { 1348 unsigned long *ptr; 1349 1350 ptr = (unsigned long *)&list->next; 1351 *ptr |= RB_PAGE_HEAD; 1352 *ptr &= ~RB_PAGE_UPDATE; 1353 } 1354 1355 /* 1356 * rb_head_page_activate - sets up head page 1357 */ 1358 static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer) 1359 { 1360 struct buffer_page *head; 1361 1362 head = cpu_buffer->head_page; 1363 if (!head) 1364 return; 1365 1366 /* 1367 * Set the previous list pointer to have the HEAD flag. 1368 */ 1369 rb_set_list_to_head(head->list.prev); 1370 } 1371 1372 static void rb_list_head_clear(struct list_head *list) 1373 { 1374 unsigned long *ptr = (unsigned long *)&list->next; 1375 1376 *ptr &= ~RB_FLAG_MASK; 1377 } 1378 1379 /* 1380 * rb_head_page_deactivate - clears head page ptr (for free list) 1381 */ 1382 static void 1383 rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer) 1384 { 1385 struct list_head *hd; 1386 1387 /* Go through the whole list and clear any pointers found. */ 1388 rb_list_head_clear(cpu_buffer->pages); 1389 1390 list_for_each(hd, cpu_buffer->pages) 1391 rb_list_head_clear(hd); 1392 } 1393 1394 static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer, 1395 struct buffer_page *head, 1396 struct buffer_page *prev, 1397 int old_flag, int new_flag) 1398 { 1399 struct list_head *list; 1400 unsigned long val = (unsigned long)&head->list; 1401 unsigned long ret; 1402 1403 list = &prev->list; 1404 1405 val &= ~RB_FLAG_MASK; 1406 1407 ret = cmpxchg((unsigned long *)&list->next, 1408 val | old_flag, val | new_flag); 1409 1410 /* check if the reader took the page */ 1411 if ((ret & ~RB_FLAG_MASK) != val) 1412 return RB_PAGE_MOVED; 1413 1414 return ret & RB_FLAG_MASK; 1415 } 1416 1417 static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer, 1418 struct buffer_page *head, 1419 struct buffer_page *prev, 1420 int old_flag) 1421 { 1422 return rb_head_page_set(cpu_buffer, head, prev, 1423 old_flag, RB_PAGE_UPDATE); 1424 } 1425 1426 static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer, 1427 struct buffer_page *head, 1428 struct buffer_page *prev, 1429 int old_flag) 1430 { 1431 return rb_head_page_set(cpu_buffer, head, prev, 1432 old_flag, RB_PAGE_HEAD); 1433 } 1434 1435 static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer, 1436 struct buffer_page *head, 1437 struct buffer_page *prev, 1438 int old_flag) 1439 { 1440 return rb_head_page_set(cpu_buffer, head, prev, 1441 old_flag, RB_PAGE_NORMAL); 1442 } 1443 1444 static inline void rb_inc_page(struct buffer_page **bpage) 1445 { 1446 struct list_head *p = rb_list_head((*bpage)->list.next); 1447 1448 *bpage = list_entry(p, struct buffer_page, list); 1449 } 1450 1451 static struct buffer_page * 1452 rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer) 1453 { 1454 struct buffer_page *head; 1455 struct buffer_page *page; 1456 struct list_head *list; 1457 int i; 1458 1459 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page)) 1460 return NULL; 1461 1462 /* sanity check */ 1463 list = cpu_buffer->pages; 1464 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list)) 1465 return NULL; 1466 1467 page = head = cpu_buffer->head_page; 1468 /* 1469 * It is possible that the writer moves the header behind 1470 * where we started, and we miss in one loop. 1471 * A second loop should grab the header, but we'll do 1472 * three loops just because I'm paranoid. 1473 */ 1474 for (i = 0; i < 3; i++) { 1475 do { 1476 if (rb_is_head_page(page, page->list.prev)) { 1477 cpu_buffer->head_page = page; 1478 return page; 1479 } 1480 rb_inc_page(&page); 1481 } while (page != head); 1482 } 1483 1484 RB_WARN_ON(cpu_buffer, 1); 1485 1486 return NULL; 1487 } 1488 1489 static int rb_head_page_replace(struct buffer_page *old, 1490 struct buffer_page *new) 1491 { 1492 unsigned long *ptr = (unsigned long *)&old->list.prev->next; 1493 unsigned long val; 1494 unsigned long ret; 1495 1496 val = *ptr & ~RB_FLAG_MASK; 1497 val |= RB_PAGE_HEAD; 1498 1499 ret = cmpxchg(ptr, val, (unsigned long)&new->list); 1500 1501 return ret == val; 1502 } 1503 1504 /* 1505 * rb_tail_page_update - move the tail page forward 1506 */ 1507 static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer, 1508 struct buffer_page *tail_page, 1509 struct buffer_page *next_page) 1510 { 1511 unsigned long old_entries; 1512 unsigned long old_write; 1513 1514 /* 1515 * The tail page now needs to be moved forward. 1516 * 1517 * We need to reset the tail page, but without messing 1518 * with possible erasing of data brought in by interrupts 1519 * that have moved the tail page and are currently on it. 1520 * 1521 * We add a counter to the write field to denote this. 1522 */ 1523 old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write); 1524 old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries); 1525 1526 local_inc(&cpu_buffer->pages_touched); 1527 /* 1528 * Just make sure we have seen our old_write and synchronize 1529 * with any interrupts that come in. 1530 */ 1531 barrier(); 1532 1533 /* 1534 * If the tail page is still the same as what we think 1535 * it is, then it is up to us to update the tail 1536 * pointer. 1537 */ 1538 if (tail_page == READ_ONCE(cpu_buffer->tail_page)) { 1539 /* Zero the write counter */ 1540 unsigned long val = old_write & ~RB_WRITE_MASK; 1541 unsigned long eval = old_entries & ~RB_WRITE_MASK; 1542 1543 /* 1544 * This will only succeed if an interrupt did 1545 * not come in and change it. In which case, we 1546 * do not want to modify it. 1547 * 1548 * We add (void) to let the compiler know that we do not care 1549 * about the return value of these functions. We use the 1550 * cmpxchg to only update if an interrupt did not already 1551 * do it for us. If the cmpxchg fails, we don't care. 1552 */ 1553 (void)local_cmpxchg(&next_page->write, old_write, val); 1554 (void)local_cmpxchg(&next_page->entries, old_entries, eval); 1555 1556 /* 1557 * No need to worry about races with clearing out the commit. 1558 * it only can increment when a commit takes place. But that 1559 * only happens in the outer most nested commit. 1560 */ 1561 local_set(&next_page->page->commit, 0); 1562 1563 /* Again, either we update tail_page or an interrupt does */ 1564 (void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page); 1565 } 1566 } 1567 1568 static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer, 1569 struct buffer_page *bpage) 1570 { 1571 unsigned long val = (unsigned long)bpage; 1572 1573 if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK)) 1574 return 1; 1575 1576 return 0; 1577 } 1578 1579 /** 1580 * rb_check_pages - integrity check of buffer pages 1581 * @cpu_buffer: CPU buffer with pages to test 1582 * 1583 * As a safety measure we check to make sure the data pages have not 1584 * been corrupted. 1585 */ 1586 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) 1587 { 1588 struct list_head *head = rb_list_head(cpu_buffer->pages); 1589 struct list_head *tmp; 1590 1591 if (RB_WARN_ON(cpu_buffer, 1592 rb_list_head(rb_list_head(head->next)->prev) != head)) 1593 return -1; 1594 1595 if (RB_WARN_ON(cpu_buffer, 1596 rb_list_head(rb_list_head(head->prev)->next) != head)) 1597 return -1; 1598 1599 for (tmp = rb_list_head(head->next); tmp != head; tmp = rb_list_head(tmp->next)) { 1600 if (RB_WARN_ON(cpu_buffer, 1601 rb_list_head(rb_list_head(tmp->next)->prev) != tmp)) 1602 return -1; 1603 1604 if (RB_WARN_ON(cpu_buffer, 1605 rb_list_head(rb_list_head(tmp->prev)->next) != tmp)) 1606 return -1; 1607 } 1608 1609 return 0; 1610 } 1611 1612 static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, 1613 long nr_pages, struct list_head *pages) 1614 { 1615 struct buffer_page *bpage, *tmp; 1616 bool user_thread = current->mm != NULL; 1617 gfp_t mflags; 1618 long i; 1619 1620 /* 1621 * Check if the available memory is there first. 1622 * Note, si_mem_available() only gives us a rough estimate of available 1623 * memory. It may not be accurate. But we don't care, we just want 1624 * to prevent doing any allocation when it is obvious that it is 1625 * not going to succeed. 1626 */ 1627 i = si_mem_available(); 1628 if (i < nr_pages) 1629 return -ENOMEM; 1630 1631 /* 1632 * __GFP_RETRY_MAYFAIL flag makes sure that the allocation fails 1633 * gracefully without invoking oom-killer and the system is not 1634 * destabilized. 1635 */ 1636 mflags = GFP_KERNEL | __GFP_RETRY_MAYFAIL; 1637 1638 /* 1639 * If a user thread allocates too much, and si_mem_available() 1640 * reports there's enough memory, even though there is not. 1641 * Make sure the OOM killer kills this thread. This can happen 1642 * even with RETRY_MAYFAIL because another task may be doing 1643 * an allocation after this task has taken all memory. 1644 * This is the task the OOM killer needs to take out during this 1645 * loop, even if it was triggered by an allocation somewhere else. 1646 */ 1647 if (user_thread) 1648 set_current_oom_origin(); 1649 for (i = 0; i < nr_pages; i++) { 1650 struct page *page; 1651 1652 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), 1653 mflags, cpu_to_node(cpu_buffer->cpu)); 1654 if (!bpage) 1655 goto free_pages; 1656 1657 rb_check_bpage(cpu_buffer, bpage); 1658 1659 list_add(&bpage->list, pages); 1660 1661 page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu), mflags, 0); 1662 if (!page) 1663 goto free_pages; 1664 bpage->page = page_address(page); 1665 rb_init_page(bpage->page); 1666 1667 if (user_thread && fatal_signal_pending(current)) 1668 goto free_pages; 1669 } 1670 if (user_thread) 1671 clear_current_oom_origin(); 1672 1673 return 0; 1674 1675 free_pages: 1676 list_for_each_entry_safe(bpage, tmp, pages, list) { 1677 list_del_init(&bpage->list); 1678 free_buffer_page(bpage); 1679 } 1680 if (user_thread) 1681 clear_current_oom_origin(); 1682 1683 return -ENOMEM; 1684 } 1685 1686 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, 1687 unsigned long nr_pages) 1688 { 1689 LIST_HEAD(pages); 1690 1691 WARN_ON(!nr_pages); 1692 1693 if (__rb_allocate_pages(cpu_buffer, nr_pages, &pages)) 1694 return -ENOMEM; 1695 1696 /* 1697 * The ring buffer page list is a circular list that does not 1698 * start and end with a list head. All page list items point to 1699 * other pages. 1700 */ 1701 cpu_buffer->pages = pages.next; 1702 list_del(&pages); 1703 1704 cpu_buffer->nr_pages = nr_pages; 1705 1706 rb_check_pages(cpu_buffer); 1707 1708 return 0; 1709 } 1710 1711 static struct ring_buffer_per_cpu * 1712 rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu) 1713 { 1714 struct ring_buffer_per_cpu *cpu_buffer; 1715 struct buffer_page *bpage; 1716 struct page *page; 1717 int ret; 1718 1719 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()), 1720 GFP_KERNEL, cpu_to_node(cpu)); 1721 if (!cpu_buffer) 1722 return NULL; 1723 1724 cpu_buffer->cpu = cpu; 1725 cpu_buffer->buffer = buffer; 1726 raw_spin_lock_init(&cpu_buffer->reader_lock); 1727 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); 1728 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 1729 INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler); 1730 init_completion(&cpu_buffer->update_done); 1731 init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters); 1732 init_waitqueue_head(&cpu_buffer->irq_work.waiters); 1733 init_waitqueue_head(&cpu_buffer->irq_work.full_waiters); 1734 1735 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), 1736 GFP_KERNEL, cpu_to_node(cpu)); 1737 if (!bpage) 1738 goto fail_free_buffer; 1739 1740 rb_check_bpage(cpu_buffer, bpage); 1741 1742 cpu_buffer->reader_page = bpage; 1743 page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0); 1744 if (!page) 1745 goto fail_free_reader; 1746 bpage->page = page_address(page); 1747 rb_init_page(bpage->page); 1748 1749 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); 1750 INIT_LIST_HEAD(&cpu_buffer->new_pages); 1751 1752 ret = rb_allocate_pages(cpu_buffer, nr_pages); 1753 if (ret < 0) 1754 goto fail_free_reader; 1755 1756 cpu_buffer->head_page 1757 = list_entry(cpu_buffer->pages, struct buffer_page, list); 1758 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page; 1759 1760 rb_head_page_activate(cpu_buffer); 1761 1762 return cpu_buffer; 1763 1764 fail_free_reader: 1765 free_buffer_page(cpu_buffer->reader_page); 1766 1767 fail_free_buffer: 1768 kfree(cpu_buffer); 1769 return NULL; 1770 } 1771 1772 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) 1773 { 1774 struct list_head *head = cpu_buffer->pages; 1775 struct buffer_page *bpage, *tmp; 1776 1777 free_buffer_page(cpu_buffer->reader_page); 1778 1779 if (head) { 1780 rb_head_page_deactivate(cpu_buffer); 1781 1782 list_for_each_entry_safe(bpage, tmp, head, list) { 1783 list_del_init(&bpage->list); 1784 free_buffer_page(bpage); 1785 } 1786 bpage = list_entry(head, struct buffer_page, list); 1787 free_buffer_page(bpage); 1788 } 1789 1790 kfree(cpu_buffer); 1791 } 1792 1793 /** 1794 * __ring_buffer_alloc - allocate a new ring_buffer 1795 * @size: the size in bytes per cpu that is needed. 1796 * @flags: attributes to set for the ring buffer. 1797 * @key: ring buffer reader_lock_key. 1798 * 1799 * Currently the only flag that is available is the RB_FL_OVERWRITE 1800 * flag. This flag means that the buffer will overwrite old data 1801 * when the buffer wraps. If this flag is not set, the buffer will 1802 * drop data when the tail hits the head. 1803 */ 1804 struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags, 1805 struct lock_class_key *key) 1806 { 1807 struct trace_buffer *buffer; 1808 long nr_pages; 1809 int bsize; 1810 int cpu; 1811 int ret; 1812 1813 /* keep it in its own cache line */ 1814 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()), 1815 GFP_KERNEL); 1816 if (!buffer) 1817 return NULL; 1818 1819 if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL)) 1820 goto fail_free_buffer; 1821 1822 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 1823 buffer->flags = flags; 1824 buffer->clock = trace_clock_local; 1825 buffer->reader_lock_key = key; 1826 1827 init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters); 1828 init_waitqueue_head(&buffer->irq_work.waiters); 1829 1830 /* need at least two pages */ 1831 if (nr_pages < 2) 1832 nr_pages = 2; 1833 1834 buffer->cpus = nr_cpu_ids; 1835 1836 bsize = sizeof(void *) * nr_cpu_ids; 1837 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), 1838 GFP_KERNEL); 1839 if (!buffer->buffers) 1840 goto fail_free_cpumask; 1841 1842 cpu = raw_smp_processor_id(); 1843 cpumask_set_cpu(cpu, buffer->cpumask); 1844 buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu); 1845 if (!buffer->buffers[cpu]) 1846 goto fail_free_buffers; 1847 1848 ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node); 1849 if (ret < 0) 1850 goto fail_free_buffers; 1851 1852 mutex_init(&buffer->mutex); 1853 1854 return buffer; 1855 1856 fail_free_buffers: 1857 for_each_buffer_cpu(buffer, cpu) { 1858 if (buffer->buffers[cpu]) 1859 rb_free_cpu_buffer(buffer->buffers[cpu]); 1860 } 1861 kfree(buffer->buffers); 1862 1863 fail_free_cpumask: 1864 free_cpumask_var(buffer->cpumask); 1865 1866 fail_free_buffer: 1867 kfree(buffer); 1868 return NULL; 1869 } 1870 EXPORT_SYMBOL_GPL(__ring_buffer_alloc); 1871 1872 /** 1873 * ring_buffer_free - free a ring buffer. 1874 * @buffer: the buffer to free. 1875 */ 1876 void 1877 ring_buffer_free(struct trace_buffer *buffer) 1878 { 1879 int cpu; 1880 1881 cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node); 1882 1883 for_each_buffer_cpu(buffer, cpu) 1884 rb_free_cpu_buffer(buffer->buffers[cpu]); 1885 1886 kfree(buffer->buffers); 1887 free_cpumask_var(buffer->cpumask); 1888 1889 kfree(buffer); 1890 } 1891 EXPORT_SYMBOL_GPL(ring_buffer_free); 1892 1893 void ring_buffer_set_clock(struct trace_buffer *buffer, 1894 u64 (*clock)(void)) 1895 { 1896 buffer->clock = clock; 1897 } 1898 1899 void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs) 1900 { 1901 buffer->time_stamp_abs = abs; 1902 } 1903 1904 bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer) 1905 { 1906 return buffer->time_stamp_abs; 1907 } 1908 1909 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); 1910 1911 static inline unsigned long rb_page_entries(struct buffer_page *bpage) 1912 { 1913 return local_read(&bpage->entries) & RB_WRITE_MASK; 1914 } 1915 1916 static inline unsigned long rb_page_write(struct buffer_page *bpage) 1917 { 1918 return local_read(&bpage->write) & RB_WRITE_MASK; 1919 } 1920 1921 static int 1922 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages) 1923 { 1924 struct list_head *tail_page, *to_remove, *next_page; 1925 struct buffer_page *to_remove_page, *tmp_iter_page; 1926 struct buffer_page *last_page, *first_page; 1927 unsigned long nr_removed; 1928 unsigned long head_bit; 1929 int page_entries; 1930 1931 head_bit = 0; 1932 1933 raw_spin_lock_irq(&cpu_buffer->reader_lock); 1934 atomic_inc(&cpu_buffer->record_disabled); 1935 /* 1936 * We don't race with the readers since we have acquired the reader 1937 * lock. We also don't race with writers after disabling recording. 1938 * This makes it easy to figure out the first and the last page to be 1939 * removed from the list. We unlink all the pages in between including 1940 * the first and last pages. This is done in a busy loop so that we 1941 * lose the least number of traces. 1942 * The pages are freed after we restart recording and unlock readers. 1943 */ 1944 tail_page = &cpu_buffer->tail_page->list; 1945 1946 /* 1947 * tail page might be on reader page, we remove the next page 1948 * from the ring buffer 1949 */ 1950 if (cpu_buffer->tail_page == cpu_buffer->reader_page) 1951 tail_page = rb_list_head(tail_page->next); 1952 to_remove = tail_page; 1953 1954 /* start of pages to remove */ 1955 first_page = list_entry(rb_list_head(to_remove->next), 1956 struct buffer_page, list); 1957 1958 for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) { 1959 to_remove = rb_list_head(to_remove)->next; 1960 head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD; 1961 } 1962 1963 next_page = rb_list_head(to_remove)->next; 1964 1965 /* 1966 * Now we remove all pages between tail_page and next_page. 1967 * Make sure that we have head_bit value preserved for the 1968 * next page 1969 */ 1970 tail_page->next = (struct list_head *)((unsigned long)next_page | 1971 head_bit); 1972 next_page = rb_list_head(next_page); 1973 next_page->prev = tail_page; 1974 1975 /* make sure pages points to a valid page in the ring buffer */ 1976 cpu_buffer->pages = next_page; 1977 1978 /* update head page */ 1979 if (head_bit) 1980 cpu_buffer->head_page = list_entry(next_page, 1981 struct buffer_page, list); 1982 1983 /* 1984 * change read pointer to make sure any read iterators reset 1985 * themselves 1986 */ 1987 cpu_buffer->read = 0; 1988 1989 /* pages are removed, resume tracing and then free the pages */ 1990 atomic_dec(&cpu_buffer->record_disabled); 1991 raw_spin_unlock_irq(&cpu_buffer->reader_lock); 1992 1993 RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)); 1994 1995 /* last buffer page to remove */ 1996 last_page = list_entry(rb_list_head(to_remove), struct buffer_page, 1997 list); 1998 tmp_iter_page = first_page; 1999 2000 do { 2001 cond_resched(); 2002 2003 to_remove_page = tmp_iter_page; 2004 rb_inc_page(&tmp_iter_page); 2005 2006 /* update the counters */ 2007 page_entries = rb_page_entries(to_remove_page); 2008 if (page_entries) { 2009 /* 2010 * If something was added to this page, it was full 2011 * since it is not the tail page. So we deduct the 2012 * bytes consumed in ring buffer from here. 2013 * Increment overrun to account for the lost events. 2014 */ 2015 local_add(page_entries, &cpu_buffer->overrun); 2016 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); 2017 local_inc(&cpu_buffer->pages_lost); 2018 } 2019 2020 /* 2021 * We have already removed references to this list item, just 2022 * free up the buffer_page and its page 2023 */ 2024 free_buffer_page(to_remove_page); 2025 nr_removed--; 2026 2027 } while (to_remove_page != last_page); 2028 2029 RB_WARN_ON(cpu_buffer, nr_removed); 2030 2031 return nr_removed == 0; 2032 } 2033 2034 static int 2035 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer) 2036 { 2037 struct list_head *pages = &cpu_buffer->new_pages; 2038 int retries, success; 2039 unsigned long flags; 2040 2041 /* Can be called at early boot up, where interrupts must not been enabled */ 2042 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2043 /* 2044 * We are holding the reader lock, so the reader page won't be swapped 2045 * in the ring buffer. Now we are racing with the writer trying to 2046 * move head page and the tail page. 2047 * We are going to adapt the reader page update process where: 2048 * 1. We first splice the start and end of list of new pages between 2049 * the head page and its previous page. 2050 * 2. We cmpxchg the prev_page->next to point from head page to the 2051 * start of new pages list. 2052 * 3. Finally, we update the head->prev to the end of new list. 2053 * 2054 * We will try this process 10 times, to make sure that we don't keep 2055 * spinning. 2056 */ 2057 retries = 10; 2058 success = 0; 2059 while (retries--) { 2060 struct list_head *head_page, *prev_page, *r; 2061 struct list_head *last_page, *first_page; 2062 struct list_head *head_page_with_bit; 2063 2064 head_page = &rb_set_head_page(cpu_buffer)->list; 2065 if (!head_page) 2066 break; 2067 prev_page = head_page->prev; 2068 2069 first_page = pages->next; 2070 last_page = pages->prev; 2071 2072 head_page_with_bit = (struct list_head *) 2073 ((unsigned long)head_page | RB_PAGE_HEAD); 2074 2075 last_page->next = head_page_with_bit; 2076 first_page->prev = prev_page; 2077 2078 r = cmpxchg(&prev_page->next, head_page_with_bit, first_page); 2079 2080 if (r == head_page_with_bit) { 2081 /* 2082 * yay, we replaced the page pointer to our new list, 2083 * now, we just have to update to head page's prev 2084 * pointer to point to end of list 2085 */ 2086 head_page->prev = last_page; 2087 success = 1; 2088 break; 2089 } 2090 } 2091 2092 if (success) 2093 INIT_LIST_HEAD(pages); 2094 /* 2095 * If we weren't successful in adding in new pages, warn and stop 2096 * tracing 2097 */ 2098 RB_WARN_ON(cpu_buffer, !success); 2099 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2100 2101 /* free pages if they weren't inserted */ 2102 if (!success) { 2103 struct buffer_page *bpage, *tmp; 2104 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, 2105 list) { 2106 list_del_init(&bpage->list); 2107 free_buffer_page(bpage); 2108 } 2109 } 2110 return success; 2111 } 2112 2113 static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer) 2114 { 2115 int success; 2116 2117 if (cpu_buffer->nr_pages_to_update > 0) 2118 success = rb_insert_pages(cpu_buffer); 2119 else 2120 success = rb_remove_pages(cpu_buffer, 2121 -cpu_buffer->nr_pages_to_update); 2122 2123 if (success) 2124 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update; 2125 } 2126 2127 static void update_pages_handler(struct work_struct *work) 2128 { 2129 struct ring_buffer_per_cpu *cpu_buffer = container_of(work, 2130 struct ring_buffer_per_cpu, update_pages_work); 2131 rb_update_pages(cpu_buffer); 2132 complete(&cpu_buffer->update_done); 2133 } 2134 2135 /** 2136 * ring_buffer_resize - resize the ring buffer 2137 * @buffer: the buffer to resize. 2138 * @size: the new size. 2139 * @cpu_id: the cpu buffer to resize 2140 * 2141 * Minimum size is 2 * BUF_PAGE_SIZE. 2142 * 2143 * Returns 0 on success and < 0 on failure. 2144 */ 2145 int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, 2146 int cpu_id) 2147 { 2148 struct ring_buffer_per_cpu *cpu_buffer; 2149 unsigned long nr_pages; 2150 int cpu, err; 2151 2152 /* 2153 * Always succeed at resizing a non-existent buffer: 2154 */ 2155 if (!buffer) 2156 return 0; 2157 2158 /* Make sure the requested buffer exists */ 2159 if (cpu_id != RING_BUFFER_ALL_CPUS && 2160 !cpumask_test_cpu(cpu_id, buffer->cpumask)) 2161 return 0; 2162 2163 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 2164 2165 /* we need a minimum of two pages */ 2166 if (nr_pages < 2) 2167 nr_pages = 2; 2168 2169 /* prevent another thread from changing buffer sizes */ 2170 mutex_lock(&buffer->mutex); 2171 2172 2173 if (cpu_id == RING_BUFFER_ALL_CPUS) { 2174 /* 2175 * Don't succeed if resizing is disabled, as a reader might be 2176 * manipulating the ring buffer and is expecting a sane state while 2177 * this is true. 2178 */ 2179 for_each_buffer_cpu(buffer, cpu) { 2180 cpu_buffer = buffer->buffers[cpu]; 2181 if (atomic_read(&cpu_buffer->resize_disabled)) { 2182 err = -EBUSY; 2183 goto out_err_unlock; 2184 } 2185 } 2186 2187 /* calculate the pages to update */ 2188 for_each_buffer_cpu(buffer, cpu) { 2189 cpu_buffer = buffer->buffers[cpu]; 2190 2191 cpu_buffer->nr_pages_to_update = nr_pages - 2192 cpu_buffer->nr_pages; 2193 /* 2194 * nothing more to do for removing pages or no update 2195 */ 2196 if (cpu_buffer->nr_pages_to_update <= 0) 2197 continue; 2198 /* 2199 * to add pages, make sure all new pages can be 2200 * allocated without receiving ENOMEM 2201 */ 2202 INIT_LIST_HEAD(&cpu_buffer->new_pages); 2203 if (__rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update, 2204 &cpu_buffer->new_pages)) { 2205 /* not enough memory for new pages */ 2206 err = -ENOMEM; 2207 goto out_err; 2208 } 2209 } 2210 2211 cpus_read_lock(); 2212 /* 2213 * Fire off all the required work handlers 2214 * We can't schedule on offline CPUs, but it's not necessary 2215 * since we can change their buffer sizes without any race. 2216 */ 2217 for_each_buffer_cpu(buffer, cpu) { 2218 cpu_buffer = buffer->buffers[cpu]; 2219 if (!cpu_buffer->nr_pages_to_update) 2220 continue; 2221 2222 /* Can't run something on an offline CPU. */ 2223 if (!cpu_online(cpu)) { 2224 rb_update_pages(cpu_buffer); 2225 cpu_buffer->nr_pages_to_update = 0; 2226 } else { 2227 /* Run directly if possible. */ 2228 migrate_disable(); 2229 if (cpu != smp_processor_id()) { 2230 migrate_enable(); 2231 schedule_work_on(cpu, 2232 &cpu_buffer->update_pages_work); 2233 } else { 2234 update_pages_handler(&cpu_buffer->update_pages_work); 2235 migrate_enable(); 2236 } 2237 } 2238 } 2239 2240 /* wait for all the updates to complete */ 2241 for_each_buffer_cpu(buffer, cpu) { 2242 cpu_buffer = buffer->buffers[cpu]; 2243 if (!cpu_buffer->nr_pages_to_update) 2244 continue; 2245 2246 if (cpu_online(cpu)) 2247 wait_for_completion(&cpu_buffer->update_done); 2248 cpu_buffer->nr_pages_to_update = 0; 2249 } 2250 2251 cpus_read_unlock(); 2252 } else { 2253 cpu_buffer = buffer->buffers[cpu_id]; 2254 2255 if (nr_pages == cpu_buffer->nr_pages) 2256 goto out; 2257 2258 /* 2259 * Don't succeed if resizing is disabled, as a reader might be 2260 * manipulating the ring buffer and is expecting a sane state while 2261 * this is true. 2262 */ 2263 if (atomic_read(&cpu_buffer->resize_disabled)) { 2264 err = -EBUSY; 2265 goto out_err_unlock; 2266 } 2267 2268 cpu_buffer->nr_pages_to_update = nr_pages - 2269 cpu_buffer->nr_pages; 2270 2271 INIT_LIST_HEAD(&cpu_buffer->new_pages); 2272 if (cpu_buffer->nr_pages_to_update > 0 && 2273 __rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update, 2274 &cpu_buffer->new_pages)) { 2275 err = -ENOMEM; 2276 goto out_err; 2277 } 2278 2279 cpus_read_lock(); 2280 2281 /* Can't run something on an offline CPU. */ 2282 if (!cpu_online(cpu_id)) 2283 rb_update_pages(cpu_buffer); 2284 else { 2285 /* Run directly if possible. */ 2286 migrate_disable(); 2287 if (cpu_id == smp_processor_id()) { 2288 rb_update_pages(cpu_buffer); 2289 migrate_enable(); 2290 } else { 2291 migrate_enable(); 2292 schedule_work_on(cpu_id, 2293 &cpu_buffer->update_pages_work); 2294 wait_for_completion(&cpu_buffer->update_done); 2295 } 2296 } 2297 2298 cpu_buffer->nr_pages_to_update = 0; 2299 cpus_read_unlock(); 2300 } 2301 2302 out: 2303 /* 2304 * The ring buffer resize can happen with the ring buffer 2305 * enabled, so that the update disturbs the tracing as little 2306 * as possible. But if the buffer is disabled, we do not need 2307 * to worry about that, and we can take the time to verify 2308 * that the buffer is not corrupt. 2309 */ 2310 if (atomic_read(&buffer->record_disabled)) { 2311 atomic_inc(&buffer->record_disabled); 2312 /* 2313 * Even though the buffer was disabled, we must make sure 2314 * that it is truly disabled before calling rb_check_pages. 2315 * There could have been a race between checking 2316 * record_disable and incrementing it. 2317 */ 2318 synchronize_rcu(); 2319 for_each_buffer_cpu(buffer, cpu) { 2320 cpu_buffer = buffer->buffers[cpu]; 2321 rb_check_pages(cpu_buffer); 2322 } 2323 atomic_dec(&buffer->record_disabled); 2324 } 2325 2326 mutex_unlock(&buffer->mutex); 2327 return 0; 2328 2329 out_err: 2330 for_each_buffer_cpu(buffer, cpu) { 2331 struct buffer_page *bpage, *tmp; 2332 2333 cpu_buffer = buffer->buffers[cpu]; 2334 cpu_buffer->nr_pages_to_update = 0; 2335 2336 if (list_empty(&cpu_buffer->new_pages)) 2337 continue; 2338 2339 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, 2340 list) { 2341 list_del_init(&bpage->list); 2342 free_buffer_page(bpage); 2343 } 2344 } 2345 out_err_unlock: 2346 mutex_unlock(&buffer->mutex); 2347 return err; 2348 } 2349 EXPORT_SYMBOL_GPL(ring_buffer_resize); 2350 2351 void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val) 2352 { 2353 mutex_lock(&buffer->mutex); 2354 if (val) 2355 buffer->flags |= RB_FL_OVERWRITE; 2356 else 2357 buffer->flags &= ~RB_FL_OVERWRITE; 2358 mutex_unlock(&buffer->mutex); 2359 } 2360 EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite); 2361 2362 static __always_inline void *__rb_page_index(struct buffer_page *bpage, unsigned index) 2363 { 2364 return bpage->page->data + index; 2365 } 2366 2367 static __always_inline struct ring_buffer_event * 2368 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer) 2369 { 2370 return __rb_page_index(cpu_buffer->reader_page, 2371 cpu_buffer->reader_page->read); 2372 } 2373 2374 static __always_inline unsigned rb_page_commit(struct buffer_page *bpage) 2375 { 2376 return local_read(&bpage->page->commit); 2377 } 2378 2379 static struct ring_buffer_event * 2380 rb_iter_head_event(struct ring_buffer_iter *iter) 2381 { 2382 struct ring_buffer_event *event; 2383 struct buffer_page *iter_head_page = iter->head_page; 2384 unsigned long commit; 2385 unsigned length; 2386 2387 if (iter->head != iter->next_event) 2388 return iter->event; 2389 2390 /* 2391 * When the writer goes across pages, it issues a cmpxchg which 2392 * is a mb(), which will synchronize with the rmb here. 2393 * (see rb_tail_page_update() and __rb_reserve_next()) 2394 */ 2395 commit = rb_page_commit(iter_head_page); 2396 smp_rmb(); 2397 event = __rb_page_index(iter_head_page, iter->head); 2398 length = rb_event_length(event); 2399 2400 /* 2401 * READ_ONCE() doesn't work on functions and we don't want the 2402 * compiler doing any crazy optimizations with length. 2403 */ 2404 barrier(); 2405 2406 if ((iter->head + length) > commit || length > BUF_MAX_DATA_SIZE) 2407 /* Writer corrupted the read? */ 2408 goto reset; 2409 2410 memcpy(iter->event, event, length); 2411 /* 2412 * If the page stamp is still the same after this rmb() then the 2413 * event was safely copied without the writer entering the page. 2414 */ 2415 smp_rmb(); 2416 2417 /* Make sure the page didn't change since we read this */ 2418 if (iter->page_stamp != iter_head_page->page->time_stamp || 2419 commit > rb_page_commit(iter_head_page)) 2420 goto reset; 2421 2422 iter->next_event = iter->head + length; 2423 return iter->event; 2424 reset: 2425 /* Reset to the beginning */ 2426 iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp; 2427 iter->head = 0; 2428 iter->next_event = 0; 2429 iter->missed_events = 1; 2430 return NULL; 2431 } 2432 2433 /* Size is determined by what has been committed */ 2434 static __always_inline unsigned rb_page_size(struct buffer_page *bpage) 2435 { 2436 return rb_page_commit(bpage); 2437 } 2438 2439 static __always_inline unsigned 2440 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer) 2441 { 2442 return rb_page_commit(cpu_buffer->commit_page); 2443 } 2444 2445 static __always_inline unsigned 2446 rb_event_index(struct ring_buffer_event *event) 2447 { 2448 unsigned long addr = (unsigned long)event; 2449 2450 return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE; 2451 } 2452 2453 static void rb_inc_iter(struct ring_buffer_iter *iter) 2454 { 2455 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 2456 2457 /* 2458 * The iterator could be on the reader page (it starts there). 2459 * But the head could have moved, since the reader was 2460 * found. Check for this case and assign the iterator 2461 * to the head page instead of next. 2462 */ 2463 if (iter->head_page == cpu_buffer->reader_page) 2464 iter->head_page = rb_set_head_page(cpu_buffer); 2465 else 2466 rb_inc_page(&iter->head_page); 2467 2468 iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp; 2469 iter->head = 0; 2470 iter->next_event = 0; 2471 } 2472 2473 /* 2474 * rb_handle_head_page - writer hit the head page 2475 * 2476 * Returns: +1 to retry page 2477 * 0 to continue 2478 * -1 on error 2479 */ 2480 static int 2481 rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer, 2482 struct buffer_page *tail_page, 2483 struct buffer_page *next_page) 2484 { 2485 struct buffer_page *new_head; 2486 int entries; 2487 int type; 2488 int ret; 2489 2490 entries = rb_page_entries(next_page); 2491 2492 /* 2493 * The hard part is here. We need to move the head 2494 * forward, and protect against both readers on 2495 * other CPUs and writers coming in via interrupts. 2496 */ 2497 type = rb_head_page_set_update(cpu_buffer, next_page, tail_page, 2498 RB_PAGE_HEAD); 2499 2500 /* 2501 * type can be one of four: 2502 * NORMAL - an interrupt already moved it for us 2503 * HEAD - we are the first to get here. 2504 * UPDATE - we are the interrupt interrupting 2505 * a current move. 2506 * MOVED - a reader on another CPU moved the next 2507 * pointer to its reader page. Give up 2508 * and try again. 2509 */ 2510 2511 switch (type) { 2512 case RB_PAGE_HEAD: 2513 /* 2514 * We changed the head to UPDATE, thus 2515 * it is our responsibility to update 2516 * the counters. 2517 */ 2518 local_add(entries, &cpu_buffer->overrun); 2519 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); 2520 local_inc(&cpu_buffer->pages_lost); 2521 2522 /* 2523 * The entries will be zeroed out when we move the 2524 * tail page. 2525 */ 2526 2527 /* still more to do */ 2528 break; 2529 2530 case RB_PAGE_UPDATE: 2531 /* 2532 * This is an interrupt that interrupt the 2533 * previous update. Still more to do. 2534 */ 2535 break; 2536 case RB_PAGE_NORMAL: 2537 /* 2538 * An interrupt came in before the update 2539 * and processed this for us. 2540 * Nothing left to do. 2541 */ 2542 return 1; 2543 case RB_PAGE_MOVED: 2544 /* 2545 * The reader is on another CPU and just did 2546 * a swap with our next_page. 2547 * Try again. 2548 */ 2549 return 1; 2550 default: 2551 RB_WARN_ON(cpu_buffer, 1); /* WTF??? */ 2552 return -1; 2553 } 2554 2555 /* 2556 * Now that we are here, the old head pointer is 2557 * set to UPDATE. This will keep the reader from 2558 * swapping the head page with the reader page. 2559 * The reader (on another CPU) will spin till 2560 * we are finished. 2561 * 2562 * We just need to protect against interrupts 2563 * doing the job. We will set the next pointer 2564 * to HEAD. After that, we set the old pointer 2565 * to NORMAL, but only if it was HEAD before. 2566 * otherwise we are an interrupt, and only 2567 * want the outer most commit to reset it. 2568 */ 2569 new_head = next_page; 2570 rb_inc_page(&new_head); 2571 2572 ret = rb_head_page_set_head(cpu_buffer, new_head, next_page, 2573 RB_PAGE_NORMAL); 2574 2575 /* 2576 * Valid returns are: 2577 * HEAD - an interrupt came in and already set it. 2578 * NORMAL - One of two things: 2579 * 1) We really set it. 2580 * 2) A bunch of interrupts came in and moved 2581 * the page forward again. 2582 */ 2583 switch (ret) { 2584 case RB_PAGE_HEAD: 2585 case RB_PAGE_NORMAL: 2586 /* OK */ 2587 break; 2588 default: 2589 RB_WARN_ON(cpu_buffer, 1); 2590 return -1; 2591 } 2592 2593 /* 2594 * It is possible that an interrupt came in, 2595 * set the head up, then more interrupts came in 2596 * and moved it again. When we get back here, 2597 * the page would have been set to NORMAL but we 2598 * just set it back to HEAD. 2599 * 2600 * How do you detect this? Well, if that happened 2601 * the tail page would have moved. 2602 */ 2603 if (ret == RB_PAGE_NORMAL) { 2604 struct buffer_page *buffer_tail_page; 2605 2606 buffer_tail_page = READ_ONCE(cpu_buffer->tail_page); 2607 /* 2608 * If the tail had moved passed next, then we need 2609 * to reset the pointer. 2610 */ 2611 if (buffer_tail_page != tail_page && 2612 buffer_tail_page != next_page) 2613 rb_head_page_set_normal(cpu_buffer, new_head, 2614 next_page, 2615 RB_PAGE_HEAD); 2616 } 2617 2618 /* 2619 * If this was the outer most commit (the one that 2620 * changed the original pointer from HEAD to UPDATE), 2621 * then it is up to us to reset it to NORMAL. 2622 */ 2623 if (type == RB_PAGE_HEAD) { 2624 ret = rb_head_page_set_normal(cpu_buffer, next_page, 2625 tail_page, 2626 RB_PAGE_UPDATE); 2627 if (RB_WARN_ON(cpu_buffer, 2628 ret != RB_PAGE_UPDATE)) 2629 return -1; 2630 } 2631 2632 return 0; 2633 } 2634 2635 static inline void 2636 rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, 2637 unsigned long tail, struct rb_event_info *info) 2638 { 2639 struct buffer_page *tail_page = info->tail_page; 2640 struct ring_buffer_event *event; 2641 unsigned long length = info->length; 2642 2643 /* 2644 * Only the event that crossed the page boundary 2645 * must fill the old tail_page with padding. 2646 */ 2647 if (tail >= BUF_PAGE_SIZE) { 2648 /* 2649 * If the page was filled, then we still need 2650 * to update the real_end. Reset it to zero 2651 * and the reader will ignore it. 2652 */ 2653 if (tail == BUF_PAGE_SIZE) 2654 tail_page->real_end = 0; 2655 2656 local_sub(length, &tail_page->write); 2657 return; 2658 } 2659 2660 event = __rb_page_index(tail_page, tail); 2661 2662 /* account for padding bytes */ 2663 local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes); 2664 2665 /* 2666 * Save the original length to the meta data. 2667 * This will be used by the reader to add lost event 2668 * counter. 2669 */ 2670 tail_page->real_end = tail; 2671 2672 /* 2673 * If this event is bigger than the minimum size, then 2674 * we need to be careful that we don't subtract the 2675 * write counter enough to allow another writer to slip 2676 * in on this page. 2677 * We put in a discarded commit instead, to make sure 2678 * that this space is not used again. 2679 * 2680 * If we are less than the minimum size, we don't need to 2681 * worry about it. 2682 */ 2683 if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) { 2684 /* No room for any events */ 2685 2686 /* Mark the rest of the page with padding */ 2687 rb_event_set_padding(event); 2688 2689 /* Make sure the padding is visible before the write update */ 2690 smp_wmb(); 2691 2692 /* Set the write back to the previous setting */ 2693 local_sub(length, &tail_page->write); 2694 return; 2695 } 2696 2697 /* Put in a discarded event */ 2698 event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE; 2699 event->type_len = RINGBUF_TYPE_PADDING; 2700 /* time delta must be non zero */ 2701 event->time_delta = 1; 2702 2703 /* Make sure the padding is visible before the tail_page->write update */ 2704 smp_wmb(); 2705 2706 /* Set write to end of buffer */ 2707 length = (tail + length) - BUF_PAGE_SIZE; 2708 local_sub(length, &tail_page->write); 2709 } 2710 2711 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer); 2712 2713 /* 2714 * This is the slow path, force gcc not to inline it. 2715 */ 2716 static noinline struct ring_buffer_event * 2717 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, 2718 unsigned long tail, struct rb_event_info *info) 2719 { 2720 struct buffer_page *tail_page = info->tail_page; 2721 struct buffer_page *commit_page = cpu_buffer->commit_page; 2722 struct trace_buffer *buffer = cpu_buffer->buffer; 2723 struct buffer_page *next_page; 2724 int ret; 2725 2726 next_page = tail_page; 2727 2728 rb_inc_page(&next_page); 2729 2730 /* 2731 * If for some reason, we had an interrupt storm that made 2732 * it all the way around the buffer, bail, and warn 2733 * about it. 2734 */ 2735 if (unlikely(next_page == commit_page)) { 2736 local_inc(&cpu_buffer->commit_overrun); 2737 goto out_reset; 2738 } 2739 2740 /* 2741 * This is where the fun begins! 2742 * 2743 * We are fighting against races between a reader that 2744 * could be on another CPU trying to swap its reader 2745 * page with the buffer head. 2746 * 2747 * We are also fighting against interrupts coming in and 2748 * moving the head or tail on us as well. 2749 * 2750 * If the next page is the head page then we have filled 2751 * the buffer, unless the commit page is still on the 2752 * reader page. 2753 */ 2754 if (rb_is_head_page(next_page, &tail_page->list)) { 2755 2756 /* 2757 * If the commit is not on the reader page, then 2758 * move the header page. 2759 */ 2760 if (!rb_is_reader_page(cpu_buffer->commit_page)) { 2761 /* 2762 * If we are not in overwrite mode, 2763 * this is easy, just stop here. 2764 */ 2765 if (!(buffer->flags & RB_FL_OVERWRITE)) { 2766 local_inc(&cpu_buffer->dropped_events); 2767 goto out_reset; 2768 } 2769 2770 ret = rb_handle_head_page(cpu_buffer, 2771 tail_page, 2772 next_page); 2773 if (ret < 0) 2774 goto out_reset; 2775 if (ret) 2776 goto out_again; 2777 } else { 2778 /* 2779 * We need to be careful here too. The 2780 * commit page could still be on the reader 2781 * page. We could have a small buffer, and 2782 * have filled up the buffer with events 2783 * from interrupts and such, and wrapped. 2784 * 2785 * Note, if the tail page is also on the 2786 * reader_page, we let it move out. 2787 */ 2788 if (unlikely((cpu_buffer->commit_page != 2789 cpu_buffer->tail_page) && 2790 (cpu_buffer->commit_page == 2791 cpu_buffer->reader_page))) { 2792 local_inc(&cpu_buffer->commit_overrun); 2793 goto out_reset; 2794 } 2795 } 2796 } 2797 2798 rb_tail_page_update(cpu_buffer, tail_page, next_page); 2799 2800 out_again: 2801 2802 rb_reset_tail(cpu_buffer, tail, info); 2803 2804 /* Commit what we have for now. */ 2805 rb_end_commit(cpu_buffer); 2806 /* rb_end_commit() decs committing */ 2807 local_inc(&cpu_buffer->committing); 2808 2809 /* fail and let the caller try again */ 2810 return ERR_PTR(-EAGAIN); 2811 2812 out_reset: 2813 /* reset write */ 2814 rb_reset_tail(cpu_buffer, tail, info); 2815 2816 return NULL; 2817 } 2818 2819 /* Slow path */ 2820 static struct ring_buffer_event * 2821 rb_add_time_stamp(struct ring_buffer_event *event, u64 delta, bool abs) 2822 { 2823 if (abs) 2824 event->type_len = RINGBUF_TYPE_TIME_STAMP; 2825 else 2826 event->type_len = RINGBUF_TYPE_TIME_EXTEND; 2827 2828 /* Not the first event on the page, or not delta? */ 2829 if (abs || rb_event_index(event)) { 2830 event->time_delta = delta & TS_MASK; 2831 event->array[0] = delta >> TS_SHIFT; 2832 } else { 2833 /* nope, just zero it */ 2834 event->time_delta = 0; 2835 event->array[0] = 0; 2836 } 2837 2838 return skip_time_extend(event); 2839 } 2840 2841 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 2842 static inline bool sched_clock_stable(void) 2843 { 2844 return true; 2845 } 2846 #endif 2847 2848 static void 2849 rb_check_timestamp(struct ring_buffer_per_cpu *cpu_buffer, 2850 struct rb_event_info *info) 2851 { 2852 u64 write_stamp; 2853 2854 WARN_ONCE(1, "Delta way too big! %llu ts=%llu before=%llu after=%llu write stamp=%llu\n%s", 2855 (unsigned long long)info->delta, 2856 (unsigned long long)info->ts, 2857 (unsigned long long)info->before, 2858 (unsigned long long)info->after, 2859 (unsigned long long)(rb_time_read(&cpu_buffer->write_stamp, &write_stamp) ? write_stamp : 0), 2860 sched_clock_stable() ? "" : 2861 "If you just came from a suspend/resume,\n" 2862 "please switch to the trace global clock:\n" 2863 " echo global > /sys/kernel/tracing/trace_clock\n" 2864 "or add trace_clock=global to the kernel command line\n"); 2865 } 2866 2867 static void rb_add_timestamp(struct ring_buffer_per_cpu *cpu_buffer, 2868 struct ring_buffer_event **event, 2869 struct rb_event_info *info, 2870 u64 *delta, 2871 unsigned int *length) 2872 { 2873 bool abs = info->add_timestamp & 2874 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE); 2875 2876 if (unlikely(info->delta > (1ULL << 59))) { 2877 /* 2878 * Some timers can use more than 59 bits, and when a timestamp 2879 * is added to the buffer, it will lose those bits. 2880 */ 2881 if (abs && (info->ts & TS_MSB)) { 2882 info->delta &= ABS_TS_MASK; 2883 2884 /* did the clock go backwards */ 2885 } else if (info->before == info->after && info->before > info->ts) { 2886 /* not interrupted */ 2887 static int once; 2888 2889 /* 2890 * This is possible with a recalibrating of the TSC. 2891 * Do not produce a call stack, but just report it. 2892 */ 2893 if (!once) { 2894 once++; 2895 pr_warn("Ring buffer clock went backwards: %llu -> %llu\n", 2896 info->before, info->ts); 2897 } 2898 } else 2899 rb_check_timestamp(cpu_buffer, info); 2900 if (!abs) 2901 info->delta = 0; 2902 } 2903 *event = rb_add_time_stamp(*event, info->delta, abs); 2904 *length -= RB_LEN_TIME_EXTEND; 2905 *delta = 0; 2906 } 2907 2908 /** 2909 * rb_update_event - update event type and data 2910 * @cpu_buffer: The per cpu buffer of the @event 2911 * @event: the event to update 2912 * @info: The info to update the @event with (contains length and delta) 2913 * 2914 * Update the type and data fields of the @event. The length 2915 * is the actual size that is written to the ring buffer, 2916 * and with this, we can determine what to place into the 2917 * data field. 2918 */ 2919 static void 2920 rb_update_event(struct ring_buffer_per_cpu *cpu_buffer, 2921 struct ring_buffer_event *event, 2922 struct rb_event_info *info) 2923 { 2924 unsigned length = info->length; 2925 u64 delta = info->delta; 2926 unsigned int nest = local_read(&cpu_buffer->committing) - 1; 2927 2928 if (!WARN_ON_ONCE(nest >= MAX_NEST)) 2929 cpu_buffer->event_stamp[nest] = info->ts; 2930 2931 /* 2932 * If we need to add a timestamp, then we 2933 * add it to the start of the reserved space. 2934 */ 2935 if (unlikely(info->add_timestamp)) 2936 rb_add_timestamp(cpu_buffer, &event, info, &delta, &length); 2937 2938 event->time_delta = delta; 2939 length -= RB_EVNT_HDR_SIZE; 2940 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) { 2941 event->type_len = 0; 2942 event->array[0] = length; 2943 } else 2944 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT); 2945 } 2946 2947 static unsigned rb_calculate_event_length(unsigned length) 2948 { 2949 struct ring_buffer_event event; /* Used only for sizeof array */ 2950 2951 /* zero length can cause confusions */ 2952 if (!length) 2953 length++; 2954 2955 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) 2956 length += sizeof(event.array[0]); 2957 2958 length += RB_EVNT_HDR_SIZE; 2959 length = ALIGN(length, RB_ARCH_ALIGNMENT); 2960 2961 /* 2962 * In case the time delta is larger than the 27 bits for it 2963 * in the header, we need to add a timestamp. If another 2964 * event comes in when trying to discard this one to increase 2965 * the length, then the timestamp will be added in the allocated 2966 * space of this event. If length is bigger than the size needed 2967 * for the TIME_EXTEND, then padding has to be used. The events 2968 * length must be either RB_LEN_TIME_EXTEND, or greater than or equal 2969 * to RB_LEN_TIME_EXTEND + 8, as 8 is the minimum size for padding. 2970 * As length is a multiple of 4, we only need to worry if it 2971 * is 12 (RB_LEN_TIME_EXTEND + 4). 2972 */ 2973 if (length == RB_LEN_TIME_EXTEND + RB_ALIGNMENT) 2974 length += RB_ALIGNMENT; 2975 2976 return length; 2977 } 2978 2979 static u64 rb_time_delta(struct ring_buffer_event *event) 2980 { 2981 switch (event->type_len) { 2982 case RINGBUF_TYPE_PADDING: 2983 return 0; 2984 2985 case RINGBUF_TYPE_TIME_EXTEND: 2986 return rb_event_time_stamp(event); 2987 2988 case RINGBUF_TYPE_TIME_STAMP: 2989 return 0; 2990 2991 case RINGBUF_TYPE_DATA: 2992 return event->time_delta; 2993 default: 2994 return 0; 2995 } 2996 } 2997 2998 static inline int 2999 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, 3000 struct ring_buffer_event *event) 3001 { 3002 unsigned long new_index, old_index; 3003 struct buffer_page *bpage; 3004 unsigned long index; 3005 unsigned long addr; 3006 u64 write_stamp; 3007 u64 delta; 3008 3009 new_index = rb_event_index(event); 3010 old_index = new_index + rb_event_ts_length(event); 3011 addr = (unsigned long)event; 3012 addr &= PAGE_MASK; 3013 3014 bpage = READ_ONCE(cpu_buffer->tail_page); 3015 3016 delta = rb_time_delta(event); 3017 3018 if (!rb_time_read(&cpu_buffer->write_stamp, &write_stamp)) 3019 return 0; 3020 3021 /* Make sure the write stamp is read before testing the location */ 3022 barrier(); 3023 3024 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) { 3025 unsigned long write_mask = 3026 local_read(&bpage->write) & ~RB_WRITE_MASK; 3027 unsigned long event_length = rb_event_length(event); 3028 3029 /* Something came in, can't discard */ 3030 if (!rb_time_cmpxchg(&cpu_buffer->write_stamp, 3031 write_stamp, write_stamp - delta)) 3032 return 0; 3033 3034 /* 3035 * It's possible that the event time delta is zero 3036 * (has the same time stamp as the previous event) 3037 * in which case write_stamp and before_stamp could 3038 * be the same. In such a case, force before_stamp 3039 * to be different than write_stamp. It doesn't 3040 * matter what it is, as long as its different. 3041 */ 3042 if (!delta) 3043 rb_time_set(&cpu_buffer->before_stamp, 0); 3044 3045 /* 3046 * If an event were to come in now, it would see that the 3047 * write_stamp and the before_stamp are different, and assume 3048 * that this event just added itself before updating 3049 * the write stamp. The interrupting event will fix the 3050 * write stamp for us, and use the before stamp as its delta. 3051 */ 3052 3053 /* 3054 * This is on the tail page. It is possible that 3055 * a write could come in and move the tail page 3056 * and write to the next page. That is fine 3057 * because we just shorten what is on this page. 3058 */ 3059 old_index += write_mask; 3060 new_index += write_mask; 3061 index = local_cmpxchg(&bpage->write, old_index, new_index); 3062 if (index == old_index) { 3063 /* update counters */ 3064 local_sub(event_length, &cpu_buffer->entries_bytes); 3065 return 1; 3066 } 3067 } 3068 3069 /* could not discard */ 3070 return 0; 3071 } 3072 3073 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer) 3074 { 3075 local_inc(&cpu_buffer->committing); 3076 local_inc(&cpu_buffer->commits); 3077 } 3078 3079 static __always_inline void 3080 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) 3081 { 3082 unsigned long max_count; 3083 3084 /* 3085 * We only race with interrupts and NMIs on this CPU. 3086 * If we own the commit event, then we can commit 3087 * all others that interrupted us, since the interruptions 3088 * are in stack format (they finish before they come 3089 * back to us). This allows us to do a simple loop to 3090 * assign the commit to the tail. 3091 */ 3092 again: 3093 max_count = cpu_buffer->nr_pages * 100; 3094 3095 while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) { 3096 if (RB_WARN_ON(cpu_buffer, !(--max_count))) 3097 return; 3098 if (RB_WARN_ON(cpu_buffer, 3099 rb_is_reader_page(cpu_buffer->tail_page))) 3100 return; 3101 local_set(&cpu_buffer->commit_page->page->commit, 3102 rb_page_write(cpu_buffer->commit_page)); 3103 rb_inc_page(&cpu_buffer->commit_page); 3104 /* add barrier to keep gcc from optimizing too much */ 3105 barrier(); 3106 } 3107 while (rb_commit_index(cpu_buffer) != 3108 rb_page_write(cpu_buffer->commit_page)) { 3109 3110 local_set(&cpu_buffer->commit_page->page->commit, 3111 rb_page_write(cpu_buffer->commit_page)); 3112 RB_WARN_ON(cpu_buffer, 3113 local_read(&cpu_buffer->commit_page->page->commit) & 3114 ~RB_WRITE_MASK); 3115 barrier(); 3116 } 3117 3118 /* again, keep gcc from optimizing */ 3119 barrier(); 3120 3121 /* 3122 * If an interrupt came in just after the first while loop 3123 * and pushed the tail page forward, we will be left with 3124 * a dangling commit that will never go forward. 3125 */ 3126 if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page))) 3127 goto again; 3128 } 3129 3130 static __always_inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer) 3131 { 3132 unsigned long commits; 3133 3134 if (RB_WARN_ON(cpu_buffer, 3135 !local_read(&cpu_buffer->committing))) 3136 return; 3137 3138 again: 3139 commits = local_read(&cpu_buffer->commits); 3140 /* synchronize with interrupts */ 3141 barrier(); 3142 if (local_read(&cpu_buffer->committing) == 1) 3143 rb_set_commit_to_write(cpu_buffer); 3144 3145 local_dec(&cpu_buffer->committing); 3146 3147 /* synchronize with interrupts */ 3148 barrier(); 3149 3150 /* 3151 * Need to account for interrupts coming in between the 3152 * updating of the commit page and the clearing of the 3153 * committing counter. 3154 */ 3155 if (unlikely(local_read(&cpu_buffer->commits) != commits) && 3156 !local_read(&cpu_buffer->committing)) { 3157 local_inc(&cpu_buffer->committing); 3158 goto again; 3159 } 3160 } 3161 3162 static inline void rb_event_discard(struct ring_buffer_event *event) 3163 { 3164 if (extended_time(event)) 3165 event = skip_time_extend(event); 3166 3167 /* array[0] holds the actual length for the discarded event */ 3168 event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE; 3169 event->type_len = RINGBUF_TYPE_PADDING; 3170 /* time delta must be non zero */ 3171 if (!event->time_delta) 3172 event->time_delta = 1; 3173 } 3174 3175 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer) 3176 { 3177 local_inc(&cpu_buffer->entries); 3178 rb_end_commit(cpu_buffer); 3179 } 3180 3181 static __always_inline void 3182 rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) 3183 { 3184 if (buffer->irq_work.waiters_pending) { 3185 buffer->irq_work.waiters_pending = false; 3186 /* irq_work_queue() supplies it's own memory barriers */ 3187 irq_work_queue(&buffer->irq_work.work); 3188 } 3189 3190 if (cpu_buffer->irq_work.waiters_pending) { 3191 cpu_buffer->irq_work.waiters_pending = false; 3192 /* irq_work_queue() supplies it's own memory barriers */ 3193 irq_work_queue(&cpu_buffer->irq_work.work); 3194 } 3195 3196 if (cpu_buffer->last_pages_touch == local_read(&cpu_buffer->pages_touched)) 3197 return; 3198 3199 if (cpu_buffer->reader_page == cpu_buffer->commit_page) 3200 return; 3201 3202 if (!cpu_buffer->irq_work.full_waiters_pending) 3203 return; 3204 3205 cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched); 3206 3207 if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full)) 3208 return; 3209 3210 cpu_buffer->irq_work.wakeup_full = true; 3211 cpu_buffer->irq_work.full_waiters_pending = false; 3212 /* irq_work_queue() supplies it's own memory barriers */ 3213 irq_work_queue(&cpu_buffer->irq_work.work); 3214 } 3215 3216 #ifdef CONFIG_RING_BUFFER_RECORD_RECURSION 3217 # define do_ring_buffer_record_recursion() \ 3218 do_ftrace_record_recursion(_THIS_IP_, _RET_IP_) 3219 #else 3220 # define do_ring_buffer_record_recursion() do { } while (0) 3221 #endif 3222 3223 /* 3224 * The lock and unlock are done within a preempt disable section. 3225 * The current_context per_cpu variable can only be modified 3226 * by the current task between lock and unlock. But it can 3227 * be modified more than once via an interrupt. To pass this 3228 * information from the lock to the unlock without having to 3229 * access the 'in_interrupt()' functions again (which do show 3230 * a bit of overhead in something as critical as function tracing, 3231 * we use a bitmask trick. 3232 * 3233 * bit 1 = NMI context 3234 * bit 2 = IRQ context 3235 * bit 3 = SoftIRQ context 3236 * bit 4 = normal context. 3237 * 3238 * This works because this is the order of contexts that can 3239 * preempt other contexts. A SoftIRQ never preempts an IRQ 3240 * context. 3241 * 3242 * When the context is determined, the corresponding bit is 3243 * checked and set (if it was set, then a recursion of that context 3244 * happened). 3245 * 3246 * On unlock, we need to clear this bit. To do so, just subtract 3247 * 1 from the current_context and AND it to itself. 3248 * 3249 * (binary) 3250 * 101 - 1 = 100 3251 * 101 & 100 = 100 (clearing bit zero) 3252 * 3253 * 1010 - 1 = 1001 3254 * 1010 & 1001 = 1000 (clearing bit 1) 3255 * 3256 * The least significant bit can be cleared this way, and it 3257 * just so happens that it is the same bit corresponding to 3258 * the current context. 3259 * 3260 * Now the TRANSITION bit breaks the above slightly. The TRANSITION bit 3261 * is set when a recursion is detected at the current context, and if 3262 * the TRANSITION bit is already set, it will fail the recursion. 3263 * This is needed because there's a lag between the changing of 3264 * interrupt context and updating the preempt count. In this case, 3265 * a false positive will be found. To handle this, one extra recursion 3266 * is allowed, and this is done by the TRANSITION bit. If the TRANSITION 3267 * bit is already set, then it is considered a recursion and the function 3268 * ends. Otherwise, the TRANSITION bit is set, and that bit is returned. 3269 * 3270 * On the trace_recursive_unlock(), the TRANSITION bit will be the first 3271 * to be cleared. Even if it wasn't the context that set it. That is, 3272 * if an interrupt comes in while NORMAL bit is set and the ring buffer 3273 * is called before preempt_count() is updated, since the check will 3274 * be on the NORMAL bit, the TRANSITION bit will then be set. If an 3275 * NMI then comes in, it will set the NMI bit, but when the NMI code 3276 * does the trace_recursive_unlock() it will clear the TRANSITION bit 3277 * and leave the NMI bit set. But this is fine, because the interrupt 3278 * code that set the TRANSITION bit will then clear the NMI bit when it 3279 * calls trace_recursive_unlock(). If another NMI comes in, it will 3280 * set the TRANSITION bit and continue. 3281 * 3282 * Note: The TRANSITION bit only handles a single transition between context. 3283 */ 3284 3285 static __always_inline int 3286 trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer) 3287 { 3288 unsigned int val = cpu_buffer->current_context; 3289 int bit = interrupt_context_level(); 3290 3291 bit = RB_CTX_NORMAL - bit; 3292 3293 if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) { 3294 /* 3295 * It is possible that this was called by transitioning 3296 * between interrupt context, and preempt_count() has not 3297 * been updated yet. In this case, use the TRANSITION bit. 3298 */ 3299 bit = RB_CTX_TRANSITION; 3300 if (val & (1 << (bit + cpu_buffer->nest))) { 3301 do_ring_buffer_record_recursion(); 3302 return 1; 3303 } 3304 } 3305 3306 val |= (1 << (bit + cpu_buffer->nest)); 3307 cpu_buffer->current_context = val; 3308 3309 return 0; 3310 } 3311 3312 static __always_inline void 3313 trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer) 3314 { 3315 cpu_buffer->current_context &= 3316 cpu_buffer->current_context - (1 << cpu_buffer->nest); 3317 } 3318 3319 /* The recursive locking above uses 5 bits */ 3320 #define NESTED_BITS 5 3321 3322 /** 3323 * ring_buffer_nest_start - Allow to trace while nested 3324 * @buffer: The ring buffer to modify 3325 * 3326 * The ring buffer has a safety mechanism to prevent recursion. 3327 * But there may be a case where a trace needs to be done while 3328 * tracing something else. In this case, calling this function 3329 * will allow this function to nest within a currently active 3330 * ring_buffer_lock_reserve(). 3331 * 3332 * Call this function before calling another ring_buffer_lock_reserve() and 3333 * call ring_buffer_nest_end() after the nested ring_buffer_unlock_commit(). 3334 */ 3335 void ring_buffer_nest_start(struct trace_buffer *buffer) 3336 { 3337 struct ring_buffer_per_cpu *cpu_buffer; 3338 int cpu; 3339 3340 /* Enabled by ring_buffer_nest_end() */ 3341 preempt_disable_notrace(); 3342 cpu = raw_smp_processor_id(); 3343 cpu_buffer = buffer->buffers[cpu]; 3344 /* This is the shift value for the above recursive locking */ 3345 cpu_buffer->nest += NESTED_BITS; 3346 } 3347 3348 /** 3349 * ring_buffer_nest_end - Allow to trace while nested 3350 * @buffer: The ring buffer to modify 3351 * 3352 * Must be called after ring_buffer_nest_start() and after the 3353 * ring_buffer_unlock_commit(). 3354 */ 3355 void ring_buffer_nest_end(struct trace_buffer *buffer) 3356 { 3357 struct ring_buffer_per_cpu *cpu_buffer; 3358 int cpu; 3359 3360 /* disabled by ring_buffer_nest_start() */ 3361 cpu = raw_smp_processor_id(); 3362 cpu_buffer = buffer->buffers[cpu]; 3363 /* This is the shift value for the above recursive locking */ 3364 cpu_buffer->nest -= NESTED_BITS; 3365 preempt_enable_notrace(); 3366 } 3367 3368 /** 3369 * ring_buffer_unlock_commit - commit a reserved 3370 * @buffer: The buffer to commit to 3371 * @event: The event pointer to commit. 3372 * 3373 * This commits the data to the ring buffer, and releases any locks held. 3374 * 3375 * Must be paired with ring_buffer_lock_reserve. 3376 */ 3377 int ring_buffer_unlock_commit(struct trace_buffer *buffer) 3378 { 3379 struct ring_buffer_per_cpu *cpu_buffer; 3380 int cpu = raw_smp_processor_id(); 3381 3382 cpu_buffer = buffer->buffers[cpu]; 3383 3384 rb_commit(cpu_buffer); 3385 3386 rb_wakeups(buffer, cpu_buffer); 3387 3388 trace_recursive_unlock(cpu_buffer); 3389 3390 preempt_enable_notrace(); 3391 3392 return 0; 3393 } 3394 EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit); 3395 3396 /* Special value to validate all deltas on a page. */ 3397 #define CHECK_FULL_PAGE 1L 3398 3399 #ifdef CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS 3400 static void dump_buffer_page(struct buffer_data_page *bpage, 3401 struct rb_event_info *info, 3402 unsigned long tail) 3403 { 3404 struct ring_buffer_event *event; 3405 u64 ts, delta; 3406 int e; 3407 3408 ts = bpage->time_stamp; 3409 pr_warn(" [%lld] PAGE TIME STAMP\n", ts); 3410 3411 for (e = 0; e < tail; e += rb_event_length(event)) { 3412 3413 event = (struct ring_buffer_event *)(bpage->data + e); 3414 3415 switch (event->type_len) { 3416 3417 case RINGBUF_TYPE_TIME_EXTEND: 3418 delta = rb_event_time_stamp(event); 3419 ts += delta; 3420 pr_warn(" [%lld] delta:%lld TIME EXTEND\n", ts, delta); 3421 break; 3422 3423 case RINGBUF_TYPE_TIME_STAMP: 3424 delta = rb_event_time_stamp(event); 3425 ts = rb_fix_abs_ts(delta, ts); 3426 pr_warn(" [%lld] absolute:%lld TIME STAMP\n", ts, delta); 3427 break; 3428 3429 case RINGBUF_TYPE_PADDING: 3430 ts += event->time_delta; 3431 pr_warn(" [%lld] delta:%d PADDING\n", ts, event->time_delta); 3432 break; 3433 3434 case RINGBUF_TYPE_DATA: 3435 ts += event->time_delta; 3436 pr_warn(" [%lld] delta:%d\n", ts, event->time_delta); 3437 break; 3438 3439 default: 3440 break; 3441 } 3442 } 3443 } 3444 3445 static DEFINE_PER_CPU(atomic_t, checking); 3446 static atomic_t ts_dump; 3447 3448 /* 3449 * Check if the current event time stamp matches the deltas on 3450 * the buffer page. 3451 */ 3452 static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer, 3453 struct rb_event_info *info, 3454 unsigned long tail) 3455 { 3456 struct ring_buffer_event *event; 3457 struct buffer_data_page *bpage; 3458 u64 ts, delta; 3459 bool full = false; 3460 int e; 3461 3462 bpage = info->tail_page->page; 3463 3464 if (tail == CHECK_FULL_PAGE) { 3465 full = true; 3466 tail = local_read(&bpage->commit); 3467 } else if (info->add_timestamp & 3468 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE)) { 3469 /* Ignore events with absolute time stamps */ 3470 return; 3471 } 3472 3473 /* 3474 * Do not check the first event (skip possible extends too). 3475 * Also do not check if previous events have not been committed. 3476 */ 3477 if (tail <= 8 || tail > local_read(&bpage->commit)) 3478 return; 3479 3480 /* 3481 * If this interrupted another event, 3482 */ 3483 if (atomic_inc_return(this_cpu_ptr(&checking)) != 1) 3484 goto out; 3485 3486 ts = bpage->time_stamp; 3487 3488 for (e = 0; e < tail; e += rb_event_length(event)) { 3489 3490 event = (struct ring_buffer_event *)(bpage->data + e); 3491 3492 switch (event->type_len) { 3493 3494 case RINGBUF_TYPE_TIME_EXTEND: 3495 delta = rb_event_time_stamp(event); 3496 ts += delta; 3497 break; 3498 3499 case RINGBUF_TYPE_TIME_STAMP: 3500 delta = rb_event_time_stamp(event); 3501 ts = rb_fix_abs_ts(delta, ts); 3502 break; 3503 3504 case RINGBUF_TYPE_PADDING: 3505 if (event->time_delta == 1) 3506 break; 3507 fallthrough; 3508 case RINGBUF_TYPE_DATA: 3509 ts += event->time_delta; 3510 break; 3511 3512 default: 3513 RB_WARN_ON(cpu_buffer, 1); 3514 } 3515 } 3516 if ((full && ts > info->ts) || 3517 (!full && ts + info->delta != info->ts)) { 3518 /* If another report is happening, ignore this one */ 3519 if (atomic_inc_return(&ts_dump) != 1) { 3520 atomic_dec(&ts_dump); 3521 goto out; 3522 } 3523 atomic_inc(&cpu_buffer->record_disabled); 3524 /* There's some cases in boot up that this can happen */ 3525 WARN_ON_ONCE(system_state != SYSTEM_BOOTING); 3526 pr_warn("[CPU: %d]TIME DOES NOT MATCH expected:%lld actual:%lld delta:%lld before:%lld after:%lld%s\n", 3527 cpu_buffer->cpu, 3528 ts + info->delta, info->ts, info->delta, 3529 info->before, info->after, 3530 full ? " (full)" : ""); 3531 dump_buffer_page(bpage, info, tail); 3532 atomic_dec(&ts_dump); 3533 /* Do not re-enable checking */ 3534 return; 3535 } 3536 out: 3537 atomic_dec(this_cpu_ptr(&checking)); 3538 } 3539 #else 3540 static inline void check_buffer(struct ring_buffer_per_cpu *cpu_buffer, 3541 struct rb_event_info *info, 3542 unsigned long tail) 3543 { 3544 } 3545 #endif /* CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS */ 3546 3547 static struct ring_buffer_event * 3548 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, 3549 struct rb_event_info *info) 3550 { 3551 struct ring_buffer_event *event; 3552 struct buffer_page *tail_page; 3553 unsigned long tail, write, w; 3554 bool a_ok; 3555 bool b_ok; 3556 3557 /* Don't let the compiler play games with cpu_buffer->tail_page */ 3558 tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page); 3559 3560 /*A*/ w = local_read(&tail_page->write) & RB_WRITE_MASK; 3561 barrier(); 3562 b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before); 3563 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); 3564 barrier(); 3565 info->ts = rb_time_stamp(cpu_buffer->buffer); 3566 3567 if ((info->add_timestamp & RB_ADD_STAMP_ABSOLUTE)) { 3568 info->delta = info->ts; 3569 } else { 3570 /* 3571 * If interrupting an event time update, we may need an 3572 * absolute timestamp. 3573 * Don't bother if this is the start of a new page (w == 0). 3574 */ 3575 if (unlikely(!a_ok || !b_ok || (info->before != info->after && w))) { 3576 info->add_timestamp |= RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND; 3577 info->length += RB_LEN_TIME_EXTEND; 3578 } else { 3579 info->delta = info->ts - info->after; 3580 if (unlikely(test_time_stamp(info->delta))) { 3581 info->add_timestamp |= RB_ADD_STAMP_EXTEND; 3582 info->length += RB_LEN_TIME_EXTEND; 3583 } 3584 } 3585 } 3586 3587 /*B*/ rb_time_set(&cpu_buffer->before_stamp, info->ts); 3588 3589 /*C*/ write = local_add_return(info->length, &tail_page->write); 3590 3591 /* set write to only the index of the write */ 3592 write &= RB_WRITE_MASK; 3593 3594 tail = write - info->length; 3595 3596 /* See if we shot pass the end of this buffer page */ 3597 if (unlikely(write > BUF_PAGE_SIZE)) { 3598 /* before and after may now different, fix it up*/ 3599 b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before); 3600 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); 3601 if (a_ok && b_ok && info->before != info->after) 3602 (void)rb_time_cmpxchg(&cpu_buffer->before_stamp, 3603 info->before, info->after); 3604 if (a_ok && b_ok) 3605 check_buffer(cpu_buffer, info, CHECK_FULL_PAGE); 3606 return rb_move_tail(cpu_buffer, tail, info); 3607 } 3608 3609 if (likely(tail == w)) { 3610 u64 save_before; 3611 bool s_ok; 3612 3613 /* Nothing interrupted us between A and C */ 3614 /*D*/ rb_time_set(&cpu_buffer->write_stamp, info->ts); 3615 barrier(); 3616 /*E*/ s_ok = rb_time_read(&cpu_buffer->before_stamp, &save_before); 3617 RB_WARN_ON(cpu_buffer, !s_ok); 3618 if (likely(!(info->add_timestamp & 3619 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE)))) 3620 /* This did not interrupt any time update */ 3621 info->delta = info->ts - info->after; 3622 else 3623 /* Just use full timestamp for interrupting event */ 3624 info->delta = info->ts; 3625 barrier(); 3626 check_buffer(cpu_buffer, info, tail); 3627 if (unlikely(info->ts != save_before)) { 3628 /* SLOW PATH - Interrupted between C and E */ 3629 3630 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); 3631 RB_WARN_ON(cpu_buffer, !a_ok); 3632 3633 /* Write stamp must only go forward */ 3634 if (save_before > info->after) { 3635 /* 3636 * We do not care about the result, only that 3637 * it gets updated atomically. 3638 */ 3639 (void)rb_time_cmpxchg(&cpu_buffer->write_stamp, 3640 info->after, save_before); 3641 } 3642 } 3643 } else { 3644 u64 ts; 3645 /* SLOW PATH - Interrupted between A and C */ 3646 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); 3647 /* Was interrupted before here, write_stamp must be valid */ 3648 RB_WARN_ON(cpu_buffer, !a_ok); 3649 ts = rb_time_stamp(cpu_buffer->buffer); 3650 barrier(); 3651 /*E*/ if (write == (local_read(&tail_page->write) & RB_WRITE_MASK) && 3652 info->after < ts && 3653 rb_time_cmpxchg(&cpu_buffer->write_stamp, 3654 info->after, ts)) { 3655 /* Nothing came after this event between C and E */ 3656 info->delta = ts - info->after; 3657 } else { 3658 /* 3659 * Interrupted between C and E: 3660 * Lost the previous events time stamp. Just set the 3661 * delta to zero, and this will be the same time as 3662 * the event this event interrupted. And the events that 3663 * came after this will still be correct (as they would 3664 * have built their delta on the previous event. 3665 */ 3666 info->delta = 0; 3667 } 3668 info->ts = ts; 3669 info->add_timestamp &= ~RB_ADD_STAMP_FORCE; 3670 } 3671 3672 /* 3673 * If this is the first commit on the page, then it has the same 3674 * timestamp as the page itself. 3675 */ 3676 if (unlikely(!tail && !(info->add_timestamp & 3677 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE)))) 3678 info->delta = 0; 3679 3680 /* We reserved something on the buffer */ 3681 3682 event = __rb_page_index(tail_page, tail); 3683 rb_update_event(cpu_buffer, event, info); 3684 3685 local_inc(&tail_page->entries); 3686 3687 /* 3688 * If this is the first commit on the page, then update 3689 * its timestamp. 3690 */ 3691 if (unlikely(!tail)) 3692 tail_page->page->time_stamp = info->ts; 3693 3694 /* account for these added bytes */ 3695 local_add(info->length, &cpu_buffer->entries_bytes); 3696 3697 return event; 3698 } 3699 3700 static __always_inline struct ring_buffer_event * 3701 rb_reserve_next_event(struct trace_buffer *buffer, 3702 struct ring_buffer_per_cpu *cpu_buffer, 3703 unsigned long length) 3704 { 3705 struct ring_buffer_event *event; 3706 struct rb_event_info info; 3707 int nr_loops = 0; 3708 int add_ts_default; 3709 3710 rb_start_commit(cpu_buffer); 3711 /* The commit page can not change after this */ 3712 3713 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP 3714 /* 3715 * Due to the ability to swap a cpu buffer from a buffer 3716 * it is possible it was swapped before we committed. 3717 * (committing stops a swap). We check for it here and 3718 * if it happened, we have to fail the write. 3719 */ 3720 barrier(); 3721 if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) { 3722 local_dec(&cpu_buffer->committing); 3723 local_dec(&cpu_buffer->commits); 3724 return NULL; 3725 } 3726 #endif 3727 3728 info.length = rb_calculate_event_length(length); 3729 3730 if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) { 3731 add_ts_default = RB_ADD_STAMP_ABSOLUTE; 3732 info.length += RB_LEN_TIME_EXTEND; 3733 } else { 3734 add_ts_default = RB_ADD_STAMP_NONE; 3735 } 3736 3737 again: 3738 info.add_timestamp = add_ts_default; 3739 info.delta = 0; 3740 3741 /* 3742 * We allow for interrupts to reenter here and do a trace. 3743 * If one does, it will cause this original code to loop 3744 * back here. Even with heavy interrupts happening, this 3745 * should only happen a few times in a row. If this happens 3746 * 1000 times in a row, there must be either an interrupt 3747 * storm or we have something buggy. 3748 * Bail! 3749 */ 3750 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) 3751 goto out_fail; 3752 3753 event = __rb_reserve_next(cpu_buffer, &info); 3754 3755 if (unlikely(PTR_ERR(event) == -EAGAIN)) { 3756 if (info.add_timestamp & (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND)) 3757 info.length -= RB_LEN_TIME_EXTEND; 3758 goto again; 3759 } 3760 3761 if (likely(event)) 3762 return event; 3763 out_fail: 3764 rb_end_commit(cpu_buffer); 3765 return NULL; 3766 } 3767 3768 /** 3769 * ring_buffer_lock_reserve - reserve a part of the buffer 3770 * @buffer: the ring buffer to reserve from 3771 * @length: the length of the data to reserve (excluding event header) 3772 * 3773 * Returns a reserved event on the ring buffer to copy directly to. 3774 * The user of this interface will need to get the body to write into 3775 * and can use the ring_buffer_event_data() interface. 3776 * 3777 * The length is the length of the data needed, not the event length 3778 * which also includes the event header. 3779 * 3780 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned. 3781 * If NULL is returned, then nothing has been allocated or locked. 3782 */ 3783 struct ring_buffer_event * 3784 ring_buffer_lock_reserve(struct trace_buffer *buffer, unsigned long length) 3785 { 3786 struct ring_buffer_per_cpu *cpu_buffer; 3787 struct ring_buffer_event *event; 3788 int cpu; 3789 3790 /* If we are tracing schedule, we don't want to recurse */ 3791 preempt_disable_notrace(); 3792 3793 if (unlikely(atomic_read(&buffer->record_disabled))) 3794 goto out; 3795 3796 cpu = raw_smp_processor_id(); 3797 3798 if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask))) 3799 goto out; 3800 3801 cpu_buffer = buffer->buffers[cpu]; 3802 3803 if (unlikely(atomic_read(&cpu_buffer->record_disabled))) 3804 goto out; 3805 3806 if (unlikely(length > BUF_MAX_DATA_SIZE)) 3807 goto out; 3808 3809 if (unlikely(trace_recursive_lock(cpu_buffer))) 3810 goto out; 3811 3812 event = rb_reserve_next_event(buffer, cpu_buffer, length); 3813 if (!event) 3814 goto out_unlock; 3815 3816 return event; 3817 3818 out_unlock: 3819 trace_recursive_unlock(cpu_buffer); 3820 out: 3821 preempt_enable_notrace(); 3822 return NULL; 3823 } 3824 EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve); 3825 3826 /* 3827 * Decrement the entries to the page that an event is on. 3828 * The event does not even need to exist, only the pointer 3829 * to the page it is on. This may only be called before the commit 3830 * takes place. 3831 */ 3832 static inline void 3833 rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer, 3834 struct ring_buffer_event *event) 3835 { 3836 unsigned long addr = (unsigned long)event; 3837 struct buffer_page *bpage = cpu_buffer->commit_page; 3838 struct buffer_page *start; 3839 3840 addr &= PAGE_MASK; 3841 3842 /* Do the likely case first */ 3843 if (likely(bpage->page == (void *)addr)) { 3844 local_dec(&bpage->entries); 3845 return; 3846 } 3847 3848 /* 3849 * Because the commit page may be on the reader page we 3850 * start with the next page and check the end loop there. 3851 */ 3852 rb_inc_page(&bpage); 3853 start = bpage; 3854 do { 3855 if (bpage->page == (void *)addr) { 3856 local_dec(&bpage->entries); 3857 return; 3858 } 3859 rb_inc_page(&bpage); 3860 } while (bpage != start); 3861 3862 /* commit not part of this buffer?? */ 3863 RB_WARN_ON(cpu_buffer, 1); 3864 } 3865 3866 /** 3867 * ring_buffer_discard_commit - discard an event that has not been committed 3868 * @buffer: the ring buffer 3869 * @event: non committed event to discard 3870 * 3871 * Sometimes an event that is in the ring buffer needs to be ignored. 3872 * This function lets the user discard an event in the ring buffer 3873 * and then that event will not be read later. 3874 * 3875 * This function only works if it is called before the item has been 3876 * committed. It will try to free the event from the ring buffer 3877 * if another event has not been added behind it. 3878 * 3879 * If another event has been added behind it, it will set the event 3880 * up as discarded, and perform the commit. 3881 * 3882 * If this function is called, do not call ring_buffer_unlock_commit on 3883 * the event. 3884 */ 3885 void ring_buffer_discard_commit(struct trace_buffer *buffer, 3886 struct ring_buffer_event *event) 3887 { 3888 struct ring_buffer_per_cpu *cpu_buffer; 3889 int cpu; 3890 3891 /* The event is discarded regardless */ 3892 rb_event_discard(event); 3893 3894 cpu = smp_processor_id(); 3895 cpu_buffer = buffer->buffers[cpu]; 3896 3897 /* 3898 * This must only be called if the event has not been 3899 * committed yet. Thus we can assume that preemption 3900 * is still disabled. 3901 */ 3902 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); 3903 3904 rb_decrement_entry(cpu_buffer, event); 3905 if (rb_try_to_discard(cpu_buffer, event)) 3906 goto out; 3907 3908 out: 3909 rb_end_commit(cpu_buffer); 3910 3911 trace_recursive_unlock(cpu_buffer); 3912 3913 preempt_enable_notrace(); 3914 3915 } 3916 EXPORT_SYMBOL_GPL(ring_buffer_discard_commit); 3917 3918 /** 3919 * ring_buffer_write - write data to the buffer without reserving 3920 * @buffer: The ring buffer to write to. 3921 * @length: The length of the data being written (excluding the event header) 3922 * @data: The data to write to the buffer. 3923 * 3924 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as 3925 * one function. If you already have the data to write to the buffer, it 3926 * may be easier to simply call this function. 3927 * 3928 * Note, like ring_buffer_lock_reserve, the length is the length of the data 3929 * and not the length of the event which would hold the header. 3930 */ 3931 int ring_buffer_write(struct trace_buffer *buffer, 3932 unsigned long length, 3933 void *data) 3934 { 3935 struct ring_buffer_per_cpu *cpu_buffer; 3936 struct ring_buffer_event *event; 3937 void *body; 3938 int ret = -EBUSY; 3939 int cpu; 3940 3941 preempt_disable_notrace(); 3942 3943 if (atomic_read(&buffer->record_disabled)) 3944 goto out; 3945 3946 cpu = raw_smp_processor_id(); 3947 3948 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 3949 goto out; 3950 3951 cpu_buffer = buffer->buffers[cpu]; 3952 3953 if (atomic_read(&cpu_buffer->record_disabled)) 3954 goto out; 3955 3956 if (length > BUF_MAX_DATA_SIZE) 3957 goto out; 3958 3959 if (unlikely(trace_recursive_lock(cpu_buffer))) 3960 goto out; 3961 3962 event = rb_reserve_next_event(buffer, cpu_buffer, length); 3963 if (!event) 3964 goto out_unlock; 3965 3966 body = rb_event_data(event); 3967 3968 memcpy(body, data, length); 3969 3970 rb_commit(cpu_buffer); 3971 3972 rb_wakeups(buffer, cpu_buffer); 3973 3974 ret = 0; 3975 3976 out_unlock: 3977 trace_recursive_unlock(cpu_buffer); 3978 3979 out: 3980 preempt_enable_notrace(); 3981 3982 return ret; 3983 } 3984 EXPORT_SYMBOL_GPL(ring_buffer_write); 3985 3986 static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) 3987 { 3988 struct buffer_page *reader = cpu_buffer->reader_page; 3989 struct buffer_page *head = rb_set_head_page(cpu_buffer); 3990 struct buffer_page *commit = cpu_buffer->commit_page; 3991 3992 /* In case of error, head will be NULL */ 3993 if (unlikely(!head)) 3994 return true; 3995 3996 /* Reader should exhaust content in reader page */ 3997 if (reader->read != rb_page_commit(reader)) 3998 return false; 3999 4000 /* 4001 * If writers are committing on the reader page, knowing all 4002 * committed content has been read, the ring buffer is empty. 4003 */ 4004 if (commit == reader) 4005 return true; 4006 4007 /* 4008 * If writers are committing on a page other than reader page 4009 * and head page, there should always be content to read. 4010 */ 4011 if (commit != head) 4012 return false; 4013 4014 /* 4015 * Writers are committing on the head page, we just need 4016 * to care about there're committed data, and the reader will 4017 * swap reader page with head page when it is to read data. 4018 */ 4019 return rb_page_commit(commit) == 0; 4020 } 4021 4022 /** 4023 * ring_buffer_record_disable - stop all writes into the buffer 4024 * @buffer: The ring buffer to stop writes to. 4025 * 4026 * This prevents all writes to the buffer. Any attempt to write 4027 * to the buffer after this will fail and return NULL. 4028 * 4029 * The caller should call synchronize_rcu() after this. 4030 */ 4031 void ring_buffer_record_disable(struct trace_buffer *buffer) 4032 { 4033 atomic_inc(&buffer->record_disabled); 4034 } 4035 EXPORT_SYMBOL_GPL(ring_buffer_record_disable); 4036 4037 /** 4038 * ring_buffer_record_enable - enable writes to the buffer 4039 * @buffer: The ring buffer to enable writes 4040 * 4041 * Note, multiple disables will need the same number of enables 4042 * to truly enable the writing (much like preempt_disable). 4043 */ 4044 void ring_buffer_record_enable(struct trace_buffer *buffer) 4045 { 4046 atomic_dec(&buffer->record_disabled); 4047 } 4048 EXPORT_SYMBOL_GPL(ring_buffer_record_enable); 4049 4050 /** 4051 * ring_buffer_record_off - stop all writes into the buffer 4052 * @buffer: The ring buffer to stop writes to. 4053 * 4054 * This prevents all writes to the buffer. Any attempt to write 4055 * to the buffer after this will fail and return NULL. 4056 * 4057 * This is different than ring_buffer_record_disable() as 4058 * it works like an on/off switch, where as the disable() version 4059 * must be paired with a enable(). 4060 */ 4061 void ring_buffer_record_off(struct trace_buffer *buffer) 4062 { 4063 unsigned int rd; 4064 unsigned int new_rd; 4065 4066 do { 4067 rd = atomic_read(&buffer->record_disabled); 4068 new_rd = rd | RB_BUFFER_OFF; 4069 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd); 4070 } 4071 EXPORT_SYMBOL_GPL(ring_buffer_record_off); 4072 4073 /** 4074 * ring_buffer_record_on - restart writes into the buffer 4075 * @buffer: The ring buffer to start writes to. 4076 * 4077 * This enables all writes to the buffer that was disabled by 4078 * ring_buffer_record_off(). 4079 * 4080 * This is different than ring_buffer_record_enable() as 4081 * it works like an on/off switch, where as the enable() version 4082 * must be paired with a disable(). 4083 */ 4084 void ring_buffer_record_on(struct trace_buffer *buffer) 4085 { 4086 unsigned int rd; 4087 unsigned int new_rd; 4088 4089 do { 4090 rd = atomic_read(&buffer->record_disabled); 4091 new_rd = rd & ~RB_BUFFER_OFF; 4092 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd); 4093 } 4094 EXPORT_SYMBOL_GPL(ring_buffer_record_on); 4095 4096 /** 4097 * ring_buffer_record_is_on - return true if the ring buffer can write 4098 * @buffer: The ring buffer to see if write is enabled 4099 * 4100 * Returns true if the ring buffer is in a state that it accepts writes. 4101 */ 4102 bool ring_buffer_record_is_on(struct trace_buffer *buffer) 4103 { 4104 return !atomic_read(&buffer->record_disabled); 4105 } 4106 4107 /** 4108 * ring_buffer_record_is_set_on - return true if the ring buffer is set writable 4109 * @buffer: The ring buffer to see if write is set enabled 4110 * 4111 * Returns true if the ring buffer is set writable by ring_buffer_record_on(). 4112 * Note that this does NOT mean it is in a writable state. 4113 * 4114 * It may return true when the ring buffer has been disabled by 4115 * ring_buffer_record_disable(), as that is a temporary disabling of 4116 * the ring buffer. 4117 */ 4118 bool ring_buffer_record_is_set_on(struct trace_buffer *buffer) 4119 { 4120 return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF); 4121 } 4122 4123 /** 4124 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer 4125 * @buffer: The ring buffer to stop writes to. 4126 * @cpu: The CPU buffer to stop 4127 * 4128 * This prevents all writes to the buffer. Any attempt to write 4129 * to the buffer after this will fail and return NULL. 4130 * 4131 * The caller should call synchronize_rcu() after this. 4132 */ 4133 void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu) 4134 { 4135 struct ring_buffer_per_cpu *cpu_buffer; 4136 4137 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4138 return; 4139 4140 cpu_buffer = buffer->buffers[cpu]; 4141 atomic_inc(&cpu_buffer->record_disabled); 4142 } 4143 EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu); 4144 4145 /** 4146 * ring_buffer_record_enable_cpu - enable writes to the buffer 4147 * @buffer: The ring buffer to enable writes 4148 * @cpu: The CPU to enable. 4149 * 4150 * Note, multiple disables will need the same number of enables 4151 * to truly enable the writing (much like preempt_disable). 4152 */ 4153 void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu) 4154 { 4155 struct ring_buffer_per_cpu *cpu_buffer; 4156 4157 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4158 return; 4159 4160 cpu_buffer = buffer->buffers[cpu]; 4161 atomic_dec(&cpu_buffer->record_disabled); 4162 } 4163 EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu); 4164 4165 /* 4166 * The total entries in the ring buffer is the running counter 4167 * of entries entered into the ring buffer, minus the sum of 4168 * the entries read from the ring buffer and the number of 4169 * entries that were overwritten. 4170 */ 4171 static inline unsigned long 4172 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer) 4173 { 4174 return local_read(&cpu_buffer->entries) - 4175 (local_read(&cpu_buffer->overrun) + cpu_buffer->read); 4176 } 4177 4178 /** 4179 * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer 4180 * @buffer: The ring buffer 4181 * @cpu: The per CPU buffer to read from. 4182 */ 4183 u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu) 4184 { 4185 unsigned long flags; 4186 struct ring_buffer_per_cpu *cpu_buffer; 4187 struct buffer_page *bpage; 4188 u64 ret = 0; 4189 4190 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4191 return 0; 4192 4193 cpu_buffer = buffer->buffers[cpu]; 4194 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 4195 /* 4196 * if the tail is on reader_page, oldest time stamp is on the reader 4197 * page 4198 */ 4199 if (cpu_buffer->tail_page == cpu_buffer->reader_page) 4200 bpage = cpu_buffer->reader_page; 4201 else 4202 bpage = rb_set_head_page(cpu_buffer); 4203 if (bpage) 4204 ret = bpage->page->time_stamp; 4205 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 4206 4207 return ret; 4208 } 4209 EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts); 4210 4211 /** 4212 * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer 4213 * @buffer: The ring buffer 4214 * @cpu: The per CPU buffer to read from. 4215 */ 4216 unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu) 4217 { 4218 struct ring_buffer_per_cpu *cpu_buffer; 4219 unsigned long ret; 4220 4221 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4222 return 0; 4223 4224 cpu_buffer = buffer->buffers[cpu]; 4225 ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes; 4226 4227 return ret; 4228 } 4229 EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu); 4230 4231 /** 4232 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer 4233 * @buffer: The ring buffer 4234 * @cpu: The per CPU buffer to get the entries from. 4235 */ 4236 unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu) 4237 { 4238 struct ring_buffer_per_cpu *cpu_buffer; 4239 4240 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4241 return 0; 4242 4243 cpu_buffer = buffer->buffers[cpu]; 4244 4245 return rb_num_of_entries(cpu_buffer); 4246 } 4247 EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu); 4248 4249 /** 4250 * ring_buffer_overrun_cpu - get the number of overruns caused by the ring 4251 * buffer wrapping around (only if RB_FL_OVERWRITE is on). 4252 * @buffer: The ring buffer 4253 * @cpu: The per CPU buffer to get the number of overruns from 4254 */ 4255 unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu) 4256 { 4257 struct ring_buffer_per_cpu *cpu_buffer; 4258 unsigned long ret; 4259 4260 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4261 return 0; 4262 4263 cpu_buffer = buffer->buffers[cpu]; 4264 ret = local_read(&cpu_buffer->overrun); 4265 4266 return ret; 4267 } 4268 EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu); 4269 4270 /** 4271 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by 4272 * commits failing due to the buffer wrapping around while there are uncommitted 4273 * events, such as during an interrupt storm. 4274 * @buffer: The ring buffer 4275 * @cpu: The per CPU buffer to get the number of overruns from 4276 */ 4277 unsigned long 4278 ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu) 4279 { 4280 struct ring_buffer_per_cpu *cpu_buffer; 4281 unsigned long ret; 4282 4283 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4284 return 0; 4285 4286 cpu_buffer = buffer->buffers[cpu]; 4287 ret = local_read(&cpu_buffer->commit_overrun); 4288 4289 return ret; 4290 } 4291 EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu); 4292 4293 /** 4294 * ring_buffer_dropped_events_cpu - get the number of dropped events caused by 4295 * the ring buffer filling up (only if RB_FL_OVERWRITE is off). 4296 * @buffer: The ring buffer 4297 * @cpu: The per CPU buffer to get the number of overruns from 4298 */ 4299 unsigned long 4300 ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu) 4301 { 4302 struct ring_buffer_per_cpu *cpu_buffer; 4303 unsigned long ret; 4304 4305 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4306 return 0; 4307 4308 cpu_buffer = buffer->buffers[cpu]; 4309 ret = local_read(&cpu_buffer->dropped_events); 4310 4311 return ret; 4312 } 4313 EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu); 4314 4315 /** 4316 * ring_buffer_read_events_cpu - get the number of events successfully read 4317 * @buffer: The ring buffer 4318 * @cpu: The per CPU buffer to get the number of events read 4319 */ 4320 unsigned long 4321 ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu) 4322 { 4323 struct ring_buffer_per_cpu *cpu_buffer; 4324 4325 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4326 return 0; 4327 4328 cpu_buffer = buffer->buffers[cpu]; 4329 return cpu_buffer->read; 4330 } 4331 EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu); 4332 4333 /** 4334 * ring_buffer_entries - get the number of entries in a buffer 4335 * @buffer: The ring buffer 4336 * 4337 * Returns the total number of entries in the ring buffer 4338 * (all CPU entries) 4339 */ 4340 unsigned long ring_buffer_entries(struct trace_buffer *buffer) 4341 { 4342 struct ring_buffer_per_cpu *cpu_buffer; 4343 unsigned long entries = 0; 4344 int cpu; 4345 4346 /* if you care about this being correct, lock the buffer */ 4347 for_each_buffer_cpu(buffer, cpu) { 4348 cpu_buffer = buffer->buffers[cpu]; 4349 entries += rb_num_of_entries(cpu_buffer); 4350 } 4351 4352 return entries; 4353 } 4354 EXPORT_SYMBOL_GPL(ring_buffer_entries); 4355 4356 /** 4357 * ring_buffer_overruns - get the number of overruns in buffer 4358 * @buffer: The ring buffer 4359 * 4360 * Returns the total number of overruns in the ring buffer 4361 * (all CPU entries) 4362 */ 4363 unsigned long ring_buffer_overruns(struct trace_buffer *buffer) 4364 { 4365 struct ring_buffer_per_cpu *cpu_buffer; 4366 unsigned long overruns = 0; 4367 int cpu; 4368 4369 /* if you care about this being correct, lock the buffer */ 4370 for_each_buffer_cpu(buffer, cpu) { 4371 cpu_buffer = buffer->buffers[cpu]; 4372 overruns += local_read(&cpu_buffer->overrun); 4373 } 4374 4375 return overruns; 4376 } 4377 EXPORT_SYMBOL_GPL(ring_buffer_overruns); 4378 4379 static void rb_iter_reset(struct ring_buffer_iter *iter) 4380 { 4381 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 4382 4383 /* Iterator usage is expected to have record disabled */ 4384 iter->head_page = cpu_buffer->reader_page; 4385 iter->head = cpu_buffer->reader_page->read; 4386 iter->next_event = iter->head; 4387 4388 iter->cache_reader_page = iter->head_page; 4389 iter->cache_read = cpu_buffer->read; 4390 4391 if (iter->head) { 4392 iter->read_stamp = cpu_buffer->read_stamp; 4393 iter->page_stamp = cpu_buffer->reader_page->page->time_stamp; 4394 } else { 4395 iter->read_stamp = iter->head_page->page->time_stamp; 4396 iter->page_stamp = iter->read_stamp; 4397 } 4398 } 4399 4400 /** 4401 * ring_buffer_iter_reset - reset an iterator 4402 * @iter: The iterator to reset 4403 * 4404 * Resets the iterator, so that it will start from the beginning 4405 * again. 4406 */ 4407 void ring_buffer_iter_reset(struct ring_buffer_iter *iter) 4408 { 4409 struct ring_buffer_per_cpu *cpu_buffer; 4410 unsigned long flags; 4411 4412 if (!iter) 4413 return; 4414 4415 cpu_buffer = iter->cpu_buffer; 4416 4417 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 4418 rb_iter_reset(iter); 4419 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 4420 } 4421 EXPORT_SYMBOL_GPL(ring_buffer_iter_reset); 4422 4423 /** 4424 * ring_buffer_iter_empty - check if an iterator has no more to read 4425 * @iter: The iterator to check 4426 */ 4427 int ring_buffer_iter_empty(struct ring_buffer_iter *iter) 4428 { 4429 struct ring_buffer_per_cpu *cpu_buffer; 4430 struct buffer_page *reader; 4431 struct buffer_page *head_page; 4432 struct buffer_page *commit_page; 4433 struct buffer_page *curr_commit_page; 4434 unsigned commit; 4435 u64 curr_commit_ts; 4436 u64 commit_ts; 4437 4438 cpu_buffer = iter->cpu_buffer; 4439 reader = cpu_buffer->reader_page; 4440 head_page = cpu_buffer->head_page; 4441 commit_page = cpu_buffer->commit_page; 4442 commit_ts = commit_page->page->time_stamp; 4443 4444 /* 4445 * When the writer goes across pages, it issues a cmpxchg which 4446 * is a mb(), which will synchronize with the rmb here. 4447 * (see rb_tail_page_update()) 4448 */ 4449 smp_rmb(); 4450 commit = rb_page_commit(commit_page); 4451 /* We want to make sure that the commit page doesn't change */ 4452 smp_rmb(); 4453 4454 /* Make sure commit page didn't change */ 4455 curr_commit_page = READ_ONCE(cpu_buffer->commit_page); 4456 curr_commit_ts = READ_ONCE(curr_commit_page->page->time_stamp); 4457 4458 /* If the commit page changed, then there's more data */ 4459 if (curr_commit_page != commit_page || 4460 curr_commit_ts != commit_ts) 4461 return 0; 4462 4463 /* Still racy, as it may return a false positive, but that's OK */ 4464 return ((iter->head_page == commit_page && iter->head >= commit) || 4465 (iter->head_page == reader && commit_page == head_page && 4466 head_page->read == commit && 4467 iter->head == rb_page_commit(cpu_buffer->reader_page))); 4468 } 4469 EXPORT_SYMBOL_GPL(ring_buffer_iter_empty); 4470 4471 static void 4472 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, 4473 struct ring_buffer_event *event) 4474 { 4475 u64 delta; 4476 4477 switch (event->type_len) { 4478 case RINGBUF_TYPE_PADDING: 4479 return; 4480 4481 case RINGBUF_TYPE_TIME_EXTEND: 4482 delta = rb_event_time_stamp(event); 4483 cpu_buffer->read_stamp += delta; 4484 return; 4485 4486 case RINGBUF_TYPE_TIME_STAMP: 4487 delta = rb_event_time_stamp(event); 4488 delta = rb_fix_abs_ts(delta, cpu_buffer->read_stamp); 4489 cpu_buffer->read_stamp = delta; 4490 return; 4491 4492 case RINGBUF_TYPE_DATA: 4493 cpu_buffer->read_stamp += event->time_delta; 4494 return; 4495 4496 default: 4497 RB_WARN_ON(cpu_buffer, 1); 4498 } 4499 return; 4500 } 4501 4502 static void 4503 rb_update_iter_read_stamp(struct ring_buffer_iter *iter, 4504 struct ring_buffer_event *event) 4505 { 4506 u64 delta; 4507 4508 switch (event->type_len) { 4509 case RINGBUF_TYPE_PADDING: 4510 return; 4511 4512 case RINGBUF_TYPE_TIME_EXTEND: 4513 delta = rb_event_time_stamp(event); 4514 iter->read_stamp += delta; 4515 return; 4516 4517 case RINGBUF_TYPE_TIME_STAMP: 4518 delta = rb_event_time_stamp(event); 4519 delta = rb_fix_abs_ts(delta, iter->read_stamp); 4520 iter->read_stamp = delta; 4521 return; 4522 4523 case RINGBUF_TYPE_DATA: 4524 iter->read_stamp += event->time_delta; 4525 return; 4526 4527 default: 4528 RB_WARN_ON(iter->cpu_buffer, 1); 4529 } 4530 return; 4531 } 4532 4533 static struct buffer_page * 4534 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) 4535 { 4536 struct buffer_page *reader = NULL; 4537 unsigned long overwrite; 4538 unsigned long flags; 4539 int nr_loops = 0; 4540 int ret; 4541 4542 local_irq_save(flags); 4543 arch_spin_lock(&cpu_buffer->lock); 4544 4545 again: 4546 /* 4547 * This should normally only loop twice. But because the 4548 * start of the reader inserts an empty page, it causes 4549 * a case where we will loop three times. There should be no 4550 * reason to loop four times (that I know of). 4551 */ 4552 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) { 4553 reader = NULL; 4554 goto out; 4555 } 4556 4557 reader = cpu_buffer->reader_page; 4558 4559 /* If there's more to read, return this page */ 4560 if (cpu_buffer->reader_page->read < rb_page_size(reader)) 4561 goto out; 4562 4563 /* Never should we have an index greater than the size */ 4564 if (RB_WARN_ON(cpu_buffer, 4565 cpu_buffer->reader_page->read > rb_page_size(reader))) 4566 goto out; 4567 4568 /* check if we caught up to the tail */ 4569 reader = NULL; 4570 if (cpu_buffer->commit_page == cpu_buffer->reader_page) 4571 goto out; 4572 4573 /* Don't bother swapping if the ring buffer is empty */ 4574 if (rb_num_of_entries(cpu_buffer) == 0) 4575 goto out; 4576 4577 /* 4578 * Reset the reader page to size zero. 4579 */ 4580 local_set(&cpu_buffer->reader_page->write, 0); 4581 local_set(&cpu_buffer->reader_page->entries, 0); 4582 local_set(&cpu_buffer->reader_page->page->commit, 0); 4583 cpu_buffer->reader_page->real_end = 0; 4584 4585 spin: 4586 /* 4587 * Splice the empty reader page into the list around the head. 4588 */ 4589 reader = rb_set_head_page(cpu_buffer); 4590 if (!reader) 4591 goto out; 4592 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next); 4593 cpu_buffer->reader_page->list.prev = reader->list.prev; 4594 4595 /* 4596 * cpu_buffer->pages just needs to point to the buffer, it 4597 * has no specific buffer page to point to. Lets move it out 4598 * of our way so we don't accidentally swap it. 4599 */ 4600 cpu_buffer->pages = reader->list.prev; 4601 4602 /* The reader page will be pointing to the new head */ 4603 rb_set_list_to_head(&cpu_buffer->reader_page->list); 4604 4605 /* 4606 * We want to make sure we read the overruns after we set up our 4607 * pointers to the next object. The writer side does a 4608 * cmpxchg to cross pages which acts as the mb on the writer 4609 * side. Note, the reader will constantly fail the swap 4610 * while the writer is updating the pointers, so this 4611 * guarantees that the overwrite recorded here is the one we 4612 * want to compare with the last_overrun. 4613 */ 4614 smp_mb(); 4615 overwrite = local_read(&(cpu_buffer->overrun)); 4616 4617 /* 4618 * Here's the tricky part. 4619 * 4620 * We need to move the pointer past the header page. 4621 * But we can only do that if a writer is not currently 4622 * moving it. The page before the header page has the 4623 * flag bit '1' set if it is pointing to the page we want. 4624 * but if the writer is in the process of moving it 4625 * than it will be '2' or already moved '0'. 4626 */ 4627 4628 ret = rb_head_page_replace(reader, cpu_buffer->reader_page); 4629 4630 /* 4631 * If we did not convert it, then we must try again. 4632 */ 4633 if (!ret) 4634 goto spin; 4635 4636 /* 4637 * Yay! We succeeded in replacing the page. 4638 * 4639 * Now make the new head point back to the reader page. 4640 */ 4641 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list; 4642 rb_inc_page(&cpu_buffer->head_page); 4643 4644 local_inc(&cpu_buffer->pages_read); 4645 4646 /* Finally update the reader page to the new head */ 4647 cpu_buffer->reader_page = reader; 4648 cpu_buffer->reader_page->read = 0; 4649 4650 if (overwrite != cpu_buffer->last_overrun) { 4651 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; 4652 cpu_buffer->last_overrun = overwrite; 4653 } 4654 4655 goto again; 4656 4657 out: 4658 /* Update the read_stamp on the first event */ 4659 if (reader && reader->read == 0) 4660 cpu_buffer->read_stamp = reader->page->time_stamp; 4661 4662 arch_spin_unlock(&cpu_buffer->lock); 4663 local_irq_restore(flags); 4664 4665 /* 4666 * The writer has preempt disable, wait for it. But not forever 4667 * Although, 1 second is pretty much "forever" 4668 */ 4669 #define USECS_WAIT 1000000 4670 for (nr_loops = 0; nr_loops < USECS_WAIT; nr_loops++) { 4671 /* If the write is past the end of page, a writer is still updating it */ 4672 if (likely(!reader || rb_page_write(reader) <= BUF_PAGE_SIZE)) 4673 break; 4674 4675 udelay(1); 4676 4677 /* Get the latest version of the reader write value */ 4678 smp_rmb(); 4679 } 4680 4681 /* The writer is not moving forward? Something is wrong */ 4682 if (RB_WARN_ON(cpu_buffer, nr_loops == USECS_WAIT)) 4683 reader = NULL; 4684 4685 /* 4686 * Make sure we see any padding after the write update 4687 * (see rb_reset_tail()) 4688 */ 4689 smp_rmb(); 4690 4691 4692 return reader; 4693 } 4694 4695 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) 4696 { 4697 struct ring_buffer_event *event; 4698 struct buffer_page *reader; 4699 unsigned length; 4700 4701 reader = rb_get_reader_page(cpu_buffer); 4702 4703 /* This function should not be called when buffer is empty */ 4704 if (RB_WARN_ON(cpu_buffer, !reader)) 4705 return; 4706 4707 event = rb_reader_event(cpu_buffer); 4708 4709 if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 4710 cpu_buffer->read++; 4711 4712 rb_update_read_stamp(cpu_buffer, event); 4713 4714 length = rb_event_length(event); 4715 cpu_buffer->reader_page->read += length; 4716 } 4717 4718 static void rb_advance_iter(struct ring_buffer_iter *iter) 4719 { 4720 struct ring_buffer_per_cpu *cpu_buffer; 4721 4722 cpu_buffer = iter->cpu_buffer; 4723 4724 /* If head == next_event then we need to jump to the next event */ 4725 if (iter->head == iter->next_event) { 4726 /* If the event gets overwritten again, there's nothing to do */ 4727 if (rb_iter_head_event(iter) == NULL) 4728 return; 4729 } 4730 4731 iter->head = iter->next_event; 4732 4733 /* 4734 * Check if we are at the end of the buffer. 4735 */ 4736 if (iter->next_event >= rb_page_size(iter->head_page)) { 4737 /* discarded commits can make the page empty */ 4738 if (iter->head_page == cpu_buffer->commit_page) 4739 return; 4740 rb_inc_iter(iter); 4741 return; 4742 } 4743 4744 rb_update_iter_read_stamp(iter, iter->event); 4745 } 4746 4747 static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer) 4748 { 4749 return cpu_buffer->lost_events; 4750 } 4751 4752 static struct ring_buffer_event * 4753 rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts, 4754 unsigned long *lost_events) 4755 { 4756 struct ring_buffer_event *event; 4757 struct buffer_page *reader; 4758 int nr_loops = 0; 4759 4760 if (ts) 4761 *ts = 0; 4762 again: 4763 /* 4764 * We repeat when a time extend is encountered. 4765 * Since the time extend is always attached to a data event, 4766 * we should never loop more than once. 4767 * (We never hit the following condition more than twice). 4768 */ 4769 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2)) 4770 return NULL; 4771 4772 reader = rb_get_reader_page(cpu_buffer); 4773 if (!reader) 4774 return NULL; 4775 4776 event = rb_reader_event(cpu_buffer); 4777 4778 switch (event->type_len) { 4779 case RINGBUF_TYPE_PADDING: 4780 if (rb_null_event(event)) 4781 RB_WARN_ON(cpu_buffer, 1); 4782 /* 4783 * Because the writer could be discarding every 4784 * event it creates (which would probably be bad) 4785 * if we were to go back to "again" then we may never 4786 * catch up, and will trigger the warn on, or lock 4787 * the box. Return the padding, and we will release 4788 * the current locks, and try again. 4789 */ 4790 return event; 4791 4792 case RINGBUF_TYPE_TIME_EXTEND: 4793 /* Internal data, OK to advance */ 4794 rb_advance_reader(cpu_buffer); 4795 goto again; 4796 4797 case RINGBUF_TYPE_TIME_STAMP: 4798 if (ts) { 4799 *ts = rb_event_time_stamp(event); 4800 *ts = rb_fix_abs_ts(*ts, reader->page->time_stamp); 4801 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, 4802 cpu_buffer->cpu, ts); 4803 } 4804 /* Internal data, OK to advance */ 4805 rb_advance_reader(cpu_buffer); 4806 goto again; 4807 4808 case RINGBUF_TYPE_DATA: 4809 if (ts && !(*ts)) { 4810 *ts = cpu_buffer->read_stamp + event->time_delta; 4811 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, 4812 cpu_buffer->cpu, ts); 4813 } 4814 if (lost_events) 4815 *lost_events = rb_lost_events(cpu_buffer); 4816 return event; 4817 4818 default: 4819 RB_WARN_ON(cpu_buffer, 1); 4820 } 4821 4822 return NULL; 4823 } 4824 EXPORT_SYMBOL_GPL(ring_buffer_peek); 4825 4826 static struct ring_buffer_event * 4827 rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) 4828 { 4829 struct trace_buffer *buffer; 4830 struct ring_buffer_per_cpu *cpu_buffer; 4831 struct ring_buffer_event *event; 4832 int nr_loops = 0; 4833 4834 if (ts) 4835 *ts = 0; 4836 4837 cpu_buffer = iter->cpu_buffer; 4838 buffer = cpu_buffer->buffer; 4839 4840 /* 4841 * Check if someone performed a consuming read to 4842 * the buffer. A consuming read invalidates the iterator 4843 * and we need to reset the iterator in this case. 4844 */ 4845 if (unlikely(iter->cache_read != cpu_buffer->read || 4846 iter->cache_reader_page != cpu_buffer->reader_page)) 4847 rb_iter_reset(iter); 4848 4849 again: 4850 if (ring_buffer_iter_empty(iter)) 4851 return NULL; 4852 4853 /* 4854 * As the writer can mess with what the iterator is trying 4855 * to read, just give up if we fail to get an event after 4856 * three tries. The iterator is not as reliable when reading 4857 * the ring buffer with an active write as the consumer is. 4858 * Do not warn if the three failures is reached. 4859 */ 4860 if (++nr_loops > 3) 4861 return NULL; 4862 4863 if (rb_per_cpu_empty(cpu_buffer)) 4864 return NULL; 4865 4866 if (iter->head >= rb_page_size(iter->head_page)) { 4867 rb_inc_iter(iter); 4868 goto again; 4869 } 4870 4871 event = rb_iter_head_event(iter); 4872 if (!event) 4873 goto again; 4874 4875 switch (event->type_len) { 4876 case RINGBUF_TYPE_PADDING: 4877 if (rb_null_event(event)) { 4878 rb_inc_iter(iter); 4879 goto again; 4880 } 4881 rb_advance_iter(iter); 4882 return event; 4883 4884 case RINGBUF_TYPE_TIME_EXTEND: 4885 /* Internal data, OK to advance */ 4886 rb_advance_iter(iter); 4887 goto again; 4888 4889 case RINGBUF_TYPE_TIME_STAMP: 4890 if (ts) { 4891 *ts = rb_event_time_stamp(event); 4892 *ts = rb_fix_abs_ts(*ts, iter->head_page->page->time_stamp); 4893 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, 4894 cpu_buffer->cpu, ts); 4895 } 4896 /* Internal data, OK to advance */ 4897 rb_advance_iter(iter); 4898 goto again; 4899 4900 case RINGBUF_TYPE_DATA: 4901 if (ts && !(*ts)) { 4902 *ts = iter->read_stamp + event->time_delta; 4903 ring_buffer_normalize_time_stamp(buffer, 4904 cpu_buffer->cpu, ts); 4905 } 4906 return event; 4907 4908 default: 4909 RB_WARN_ON(cpu_buffer, 1); 4910 } 4911 4912 return NULL; 4913 } 4914 EXPORT_SYMBOL_GPL(ring_buffer_iter_peek); 4915 4916 static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer) 4917 { 4918 if (likely(!in_nmi())) { 4919 raw_spin_lock(&cpu_buffer->reader_lock); 4920 return true; 4921 } 4922 4923 /* 4924 * If an NMI die dumps out the content of the ring buffer 4925 * trylock must be used to prevent a deadlock if the NMI 4926 * preempted a task that holds the ring buffer locks. If 4927 * we get the lock then all is fine, if not, then continue 4928 * to do the read, but this can corrupt the ring buffer, 4929 * so it must be permanently disabled from future writes. 4930 * Reading from NMI is a oneshot deal. 4931 */ 4932 if (raw_spin_trylock(&cpu_buffer->reader_lock)) 4933 return true; 4934 4935 /* Continue without locking, but disable the ring buffer */ 4936 atomic_inc(&cpu_buffer->record_disabled); 4937 return false; 4938 } 4939 4940 static inline void 4941 rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked) 4942 { 4943 if (likely(locked)) 4944 raw_spin_unlock(&cpu_buffer->reader_lock); 4945 return; 4946 } 4947 4948 /** 4949 * ring_buffer_peek - peek at the next event to be read 4950 * @buffer: The ring buffer to read 4951 * @cpu: The cpu to peak at 4952 * @ts: The timestamp counter of this event. 4953 * @lost_events: a variable to store if events were lost (may be NULL) 4954 * 4955 * This will return the event that will be read next, but does 4956 * not consume the data. 4957 */ 4958 struct ring_buffer_event * 4959 ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts, 4960 unsigned long *lost_events) 4961 { 4962 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 4963 struct ring_buffer_event *event; 4964 unsigned long flags; 4965 bool dolock; 4966 4967 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4968 return NULL; 4969 4970 again: 4971 local_irq_save(flags); 4972 dolock = rb_reader_lock(cpu_buffer); 4973 event = rb_buffer_peek(cpu_buffer, ts, lost_events); 4974 if (event && event->type_len == RINGBUF_TYPE_PADDING) 4975 rb_advance_reader(cpu_buffer); 4976 rb_reader_unlock(cpu_buffer, dolock); 4977 local_irq_restore(flags); 4978 4979 if (event && event->type_len == RINGBUF_TYPE_PADDING) 4980 goto again; 4981 4982 return event; 4983 } 4984 4985 /** ring_buffer_iter_dropped - report if there are dropped events 4986 * @iter: The ring buffer iterator 4987 * 4988 * Returns true if there was dropped events since the last peek. 4989 */ 4990 bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter) 4991 { 4992 bool ret = iter->missed_events != 0; 4993 4994 iter->missed_events = 0; 4995 return ret; 4996 } 4997 EXPORT_SYMBOL_GPL(ring_buffer_iter_dropped); 4998 4999 /** 5000 * ring_buffer_iter_peek - peek at the next event to be read 5001 * @iter: The ring buffer iterator 5002 * @ts: The timestamp counter of this event. 5003 * 5004 * This will return the event that will be read next, but does 5005 * not increment the iterator. 5006 */ 5007 struct ring_buffer_event * 5008 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) 5009 { 5010 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 5011 struct ring_buffer_event *event; 5012 unsigned long flags; 5013 5014 again: 5015 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5016 event = rb_iter_peek(iter, ts); 5017 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5018 5019 if (event && event->type_len == RINGBUF_TYPE_PADDING) 5020 goto again; 5021 5022 return event; 5023 } 5024 5025 /** 5026 * ring_buffer_consume - return an event and consume it 5027 * @buffer: The ring buffer to get the next event from 5028 * @cpu: the cpu to read the buffer from 5029 * @ts: a variable to store the timestamp (may be NULL) 5030 * @lost_events: a variable to store if events were lost (may be NULL) 5031 * 5032 * Returns the next event in the ring buffer, and that event is consumed. 5033 * Meaning, that sequential reads will keep returning a different event, 5034 * and eventually empty the ring buffer if the producer is slower. 5035 */ 5036 struct ring_buffer_event * 5037 ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts, 5038 unsigned long *lost_events) 5039 { 5040 struct ring_buffer_per_cpu *cpu_buffer; 5041 struct ring_buffer_event *event = NULL; 5042 unsigned long flags; 5043 bool dolock; 5044 5045 again: 5046 /* might be called in atomic */ 5047 preempt_disable(); 5048 5049 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5050 goto out; 5051 5052 cpu_buffer = buffer->buffers[cpu]; 5053 local_irq_save(flags); 5054 dolock = rb_reader_lock(cpu_buffer); 5055 5056 event = rb_buffer_peek(cpu_buffer, ts, lost_events); 5057 if (event) { 5058 cpu_buffer->lost_events = 0; 5059 rb_advance_reader(cpu_buffer); 5060 } 5061 5062 rb_reader_unlock(cpu_buffer, dolock); 5063 local_irq_restore(flags); 5064 5065 out: 5066 preempt_enable(); 5067 5068 if (event && event->type_len == RINGBUF_TYPE_PADDING) 5069 goto again; 5070 5071 return event; 5072 } 5073 EXPORT_SYMBOL_GPL(ring_buffer_consume); 5074 5075 /** 5076 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer 5077 * @buffer: The ring buffer to read from 5078 * @cpu: The cpu buffer to iterate over 5079 * @flags: gfp flags to use for memory allocation 5080 * 5081 * This performs the initial preparations necessary to iterate 5082 * through the buffer. Memory is allocated, buffer recording 5083 * is disabled, and the iterator pointer is returned to the caller. 5084 * 5085 * Disabling buffer recording prevents the reading from being 5086 * corrupted. This is not a consuming read, so a producer is not 5087 * expected. 5088 * 5089 * After a sequence of ring_buffer_read_prepare calls, the user is 5090 * expected to make at least one call to ring_buffer_read_prepare_sync. 5091 * Afterwards, ring_buffer_read_start is invoked to get things going 5092 * for real. 5093 * 5094 * This overall must be paired with ring_buffer_read_finish. 5095 */ 5096 struct ring_buffer_iter * 5097 ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags) 5098 { 5099 struct ring_buffer_per_cpu *cpu_buffer; 5100 struct ring_buffer_iter *iter; 5101 5102 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5103 return NULL; 5104 5105 iter = kzalloc(sizeof(*iter), flags); 5106 if (!iter) 5107 return NULL; 5108 5109 iter->event = kmalloc(BUF_MAX_DATA_SIZE, flags); 5110 if (!iter->event) { 5111 kfree(iter); 5112 return NULL; 5113 } 5114 5115 cpu_buffer = buffer->buffers[cpu]; 5116 5117 iter->cpu_buffer = cpu_buffer; 5118 5119 atomic_inc(&cpu_buffer->resize_disabled); 5120 5121 return iter; 5122 } 5123 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare); 5124 5125 /** 5126 * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls 5127 * 5128 * All previously invoked ring_buffer_read_prepare calls to prepare 5129 * iterators will be synchronized. Afterwards, read_buffer_read_start 5130 * calls on those iterators are allowed. 5131 */ 5132 void 5133 ring_buffer_read_prepare_sync(void) 5134 { 5135 synchronize_rcu(); 5136 } 5137 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync); 5138 5139 /** 5140 * ring_buffer_read_start - start a non consuming read of the buffer 5141 * @iter: The iterator returned by ring_buffer_read_prepare 5142 * 5143 * This finalizes the startup of an iteration through the buffer. 5144 * The iterator comes from a call to ring_buffer_read_prepare and 5145 * an intervening ring_buffer_read_prepare_sync must have been 5146 * performed. 5147 * 5148 * Must be paired with ring_buffer_read_finish. 5149 */ 5150 void 5151 ring_buffer_read_start(struct ring_buffer_iter *iter) 5152 { 5153 struct ring_buffer_per_cpu *cpu_buffer; 5154 unsigned long flags; 5155 5156 if (!iter) 5157 return; 5158 5159 cpu_buffer = iter->cpu_buffer; 5160 5161 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5162 arch_spin_lock(&cpu_buffer->lock); 5163 rb_iter_reset(iter); 5164 arch_spin_unlock(&cpu_buffer->lock); 5165 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5166 } 5167 EXPORT_SYMBOL_GPL(ring_buffer_read_start); 5168 5169 /** 5170 * ring_buffer_read_finish - finish reading the iterator of the buffer 5171 * @iter: The iterator retrieved by ring_buffer_start 5172 * 5173 * This re-enables the recording to the buffer, and frees the 5174 * iterator. 5175 */ 5176 void 5177 ring_buffer_read_finish(struct ring_buffer_iter *iter) 5178 { 5179 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 5180 unsigned long flags; 5181 5182 /* 5183 * Ring buffer is disabled from recording, here's a good place 5184 * to check the integrity of the ring buffer. 5185 * Must prevent readers from trying to read, as the check 5186 * clears the HEAD page and readers require it. 5187 */ 5188 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5189 rb_check_pages(cpu_buffer); 5190 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5191 5192 atomic_dec(&cpu_buffer->resize_disabled); 5193 kfree(iter->event); 5194 kfree(iter); 5195 } 5196 EXPORT_SYMBOL_GPL(ring_buffer_read_finish); 5197 5198 /** 5199 * ring_buffer_iter_advance - advance the iterator to the next location 5200 * @iter: The ring buffer iterator 5201 * 5202 * Move the location of the iterator such that the next read will 5203 * be the next location of the iterator. 5204 */ 5205 void ring_buffer_iter_advance(struct ring_buffer_iter *iter) 5206 { 5207 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 5208 unsigned long flags; 5209 5210 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5211 5212 rb_advance_iter(iter); 5213 5214 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5215 } 5216 EXPORT_SYMBOL_GPL(ring_buffer_iter_advance); 5217 5218 /** 5219 * ring_buffer_size - return the size of the ring buffer (in bytes) 5220 * @buffer: The ring buffer. 5221 * @cpu: The CPU to get ring buffer size from. 5222 */ 5223 unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu) 5224 { 5225 /* 5226 * Earlier, this method returned 5227 * BUF_PAGE_SIZE * buffer->nr_pages 5228 * Since the nr_pages field is now removed, we have converted this to 5229 * return the per cpu buffer value. 5230 */ 5231 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5232 return 0; 5233 5234 return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages; 5235 } 5236 EXPORT_SYMBOL_GPL(ring_buffer_size); 5237 5238 static void 5239 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) 5240 { 5241 rb_head_page_deactivate(cpu_buffer); 5242 5243 cpu_buffer->head_page 5244 = list_entry(cpu_buffer->pages, struct buffer_page, list); 5245 local_set(&cpu_buffer->head_page->write, 0); 5246 local_set(&cpu_buffer->head_page->entries, 0); 5247 local_set(&cpu_buffer->head_page->page->commit, 0); 5248 5249 cpu_buffer->head_page->read = 0; 5250 5251 cpu_buffer->tail_page = cpu_buffer->head_page; 5252 cpu_buffer->commit_page = cpu_buffer->head_page; 5253 5254 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); 5255 INIT_LIST_HEAD(&cpu_buffer->new_pages); 5256 local_set(&cpu_buffer->reader_page->write, 0); 5257 local_set(&cpu_buffer->reader_page->entries, 0); 5258 local_set(&cpu_buffer->reader_page->page->commit, 0); 5259 cpu_buffer->reader_page->read = 0; 5260 5261 local_set(&cpu_buffer->entries_bytes, 0); 5262 local_set(&cpu_buffer->overrun, 0); 5263 local_set(&cpu_buffer->commit_overrun, 0); 5264 local_set(&cpu_buffer->dropped_events, 0); 5265 local_set(&cpu_buffer->entries, 0); 5266 local_set(&cpu_buffer->committing, 0); 5267 local_set(&cpu_buffer->commits, 0); 5268 local_set(&cpu_buffer->pages_touched, 0); 5269 local_set(&cpu_buffer->pages_lost, 0); 5270 local_set(&cpu_buffer->pages_read, 0); 5271 cpu_buffer->last_pages_touch = 0; 5272 cpu_buffer->shortest_full = 0; 5273 cpu_buffer->read = 0; 5274 cpu_buffer->read_bytes = 0; 5275 5276 rb_time_set(&cpu_buffer->write_stamp, 0); 5277 rb_time_set(&cpu_buffer->before_stamp, 0); 5278 5279 memset(cpu_buffer->event_stamp, 0, sizeof(cpu_buffer->event_stamp)); 5280 5281 cpu_buffer->lost_events = 0; 5282 cpu_buffer->last_overrun = 0; 5283 5284 rb_head_page_activate(cpu_buffer); 5285 } 5286 5287 /* Must have disabled the cpu buffer then done a synchronize_rcu */ 5288 static void reset_disabled_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) 5289 { 5290 unsigned long flags; 5291 5292 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5293 5294 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) 5295 goto out; 5296 5297 arch_spin_lock(&cpu_buffer->lock); 5298 5299 rb_reset_cpu(cpu_buffer); 5300 5301 arch_spin_unlock(&cpu_buffer->lock); 5302 5303 out: 5304 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5305 } 5306 5307 /** 5308 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer 5309 * @buffer: The ring buffer to reset a per cpu buffer of 5310 * @cpu: The CPU buffer to be reset 5311 */ 5312 void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu) 5313 { 5314 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 5315 5316 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5317 return; 5318 5319 /* prevent another thread from changing buffer sizes */ 5320 mutex_lock(&buffer->mutex); 5321 5322 atomic_inc(&cpu_buffer->resize_disabled); 5323 atomic_inc(&cpu_buffer->record_disabled); 5324 5325 /* Make sure all commits have finished */ 5326 synchronize_rcu(); 5327 5328 reset_disabled_cpu_buffer(cpu_buffer); 5329 5330 atomic_dec(&cpu_buffer->record_disabled); 5331 atomic_dec(&cpu_buffer->resize_disabled); 5332 5333 mutex_unlock(&buffer->mutex); 5334 } 5335 EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu); 5336 5337 /** 5338 * ring_buffer_reset_online_cpus - reset a ring buffer per CPU buffer 5339 * @buffer: The ring buffer to reset a per cpu buffer of 5340 * @cpu: The CPU buffer to be reset 5341 */ 5342 void ring_buffer_reset_online_cpus(struct trace_buffer *buffer) 5343 { 5344 struct ring_buffer_per_cpu *cpu_buffer; 5345 int cpu; 5346 5347 /* prevent another thread from changing buffer sizes */ 5348 mutex_lock(&buffer->mutex); 5349 5350 for_each_online_buffer_cpu(buffer, cpu) { 5351 cpu_buffer = buffer->buffers[cpu]; 5352 5353 atomic_inc(&cpu_buffer->resize_disabled); 5354 atomic_inc(&cpu_buffer->record_disabled); 5355 } 5356 5357 /* Make sure all commits have finished */ 5358 synchronize_rcu(); 5359 5360 for_each_online_buffer_cpu(buffer, cpu) { 5361 cpu_buffer = buffer->buffers[cpu]; 5362 5363 reset_disabled_cpu_buffer(cpu_buffer); 5364 5365 atomic_dec(&cpu_buffer->record_disabled); 5366 atomic_dec(&cpu_buffer->resize_disabled); 5367 } 5368 5369 mutex_unlock(&buffer->mutex); 5370 } 5371 5372 /** 5373 * ring_buffer_reset - reset a ring buffer 5374 * @buffer: The ring buffer to reset all cpu buffers 5375 */ 5376 void ring_buffer_reset(struct trace_buffer *buffer) 5377 { 5378 struct ring_buffer_per_cpu *cpu_buffer; 5379 int cpu; 5380 5381 /* prevent another thread from changing buffer sizes */ 5382 mutex_lock(&buffer->mutex); 5383 5384 for_each_buffer_cpu(buffer, cpu) { 5385 cpu_buffer = buffer->buffers[cpu]; 5386 5387 atomic_inc(&cpu_buffer->resize_disabled); 5388 atomic_inc(&cpu_buffer->record_disabled); 5389 } 5390 5391 /* Make sure all commits have finished */ 5392 synchronize_rcu(); 5393 5394 for_each_buffer_cpu(buffer, cpu) { 5395 cpu_buffer = buffer->buffers[cpu]; 5396 5397 reset_disabled_cpu_buffer(cpu_buffer); 5398 5399 atomic_dec(&cpu_buffer->record_disabled); 5400 atomic_dec(&cpu_buffer->resize_disabled); 5401 } 5402 5403 mutex_unlock(&buffer->mutex); 5404 } 5405 EXPORT_SYMBOL_GPL(ring_buffer_reset); 5406 5407 /** 5408 * ring_buffer_empty - is the ring buffer empty? 5409 * @buffer: The ring buffer to test 5410 */ 5411 bool ring_buffer_empty(struct trace_buffer *buffer) 5412 { 5413 struct ring_buffer_per_cpu *cpu_buffer; 5414 unsigned long flags; 5415 bool dolock; 5416 int cpu; 5417 int ret; 5418 5419 /* yes this is racy, but if you don't like the race, lock the buffer */ 5420 for_each_buffer_cpu(buffer, cpu) { 5421 cpu_buffer = buffer->buffers[cpu]; 5422 local_irq_save(flags); 5423 dolock = rb_reader_lock(cpu_buffer); 5424 ret = rb_per_cpu_empty(cpu_buffer); 5425 rb_reader_unlock(cpu_buffer, dolock); 5426 local_irq_restore(flags); 5427 5428 if (!ret) 5429 return false; 5430 } 5431 5432 return true; 5433 } 5434 EXPORT_SYMBOL_GPL(ring_buffer_empty); 5435 5436 /** 5437 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty? 5438 * @buffer: The ring buffer 5439 * @cpu: The CPU buffer to test 5440 */ 5441 bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu) 5442 { 5443 struct ring_buffer_per_cpu *cpu_buffer; 5444 unsigned long flags; 5445 bool dolock; 5446 int ret; 5447 5448 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5449 return true; 5450 5451 cpu_buffer = buffer->buffers[cpu]; 5452 local_irq_save(flags); 5453 dolock = rb_reader_lock(cpu_buffer); 5454 ret = rb_per_cpu_empty(cpu_buffer); 5455 rb_reader_unlock(cpu_buffer, dolock); 5456 local_irq_restore(flags); 5457 5458 return ret; 5459 } 5460 EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu); 5461 5462 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP 5463 /** 5464 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers 5465 * @buffer_a: One buffer to swap with 5466 * @buffer_b: The other buffer to swap with 5467 * @cpu: the CPU of the buffers to swap 5468 * 5469 * This function is useful for tracers that want to take a "snapshot" 5470 * of a CPU buffer and has another back up buffer lying around. 5471 * it is expected that the tracer handles the cpu buffer not being 5472 * used at the moment. 5473 */ 5474 int ring_buffer_swap_cpu(struct trace_buffer *buffer_a, 5475 struct trace_buffer *buffer_b, int cpu) 5476 { 5477 struct ring_buffer_per_cpu *cpu_buffer_a; 5478 struct ring_buffer_per_cpu *cpu_buffer_b; 5479 int ret = -EINVAL; 5480 5481 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) || 5482 !cpumask_test_cpu(cpu, buffer_b->cpumask)) 5483 goto out; 5484 5485 cpu_buffer_a = buffer_a->buffers[cpu]; 5486 cpu_buffer_b = buffer_b->buffers[cpu]; 5487 5488 /* At least make sure the two buffers are somewhat the same */ 5489 if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages) 5490 goto out; 5491 5492 ret = -EAGAIN; 5493 5494 if (atomic_read(&buffer_a->record_disabled)) 5495 goto out; 5496 5497 if (atomic_read(&buffer_b->record_disabled)) 5498 goto out; 5499 5500 if (atomic_read(&cpu_buffer_a->record_disabled)) 5501 goto out; 5502 5503 if (atomic_read(&cpu_buffer_b->record_disabled)) 5504 goto out; 5505 5506 /* 5507 * We can't do a synchronize_rcu here because this 5508 * function can be called in atomic context. 5509 * Normally this will be called from the same CPU as cpu. 5510 * If not it's up to the caller to protect this. 5511 */ 5512 atomic_inc(&cpu_buffer_a->record_disabled); 5513 atomic_inc(&cpu_buffer_b->record_disabled); 5514 5515 ret = -EBUSY; 5516 if (local_read(&cpu_buffer_a->committing)) 5517 goto out_dec; 5518 if (local_read(&cpu_buffer_b->committing)) 5519 goto out_dec; 5520 5521 buffer_a->buffers[cpu] = cpu_buffer_b; 5522 buffer_b->buffers[cpu] = cpu_buffer_a; 5523 5524 cpu_buffer_b->buffer = buffer_a; 5525 cpu_buffer_a->buffer = buffer_b; 5526 5527 ret = 0; 5528 5529 out_dec: 5530 atomic_dec(&cpu_buffer_a->record_disabled); 5531 atomic_dec(&cpu_buffer_b->record_disabled); 5532 out: 5533 return ret; 5534 } 5535 EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); 5536 #endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */ 5537 5538 /** 5539 * ring_buffer_alloc_read_page - allocate a page to read from buffer 5540 * @buffer: the buffer to allocate for. 5541 * @cpu: the cpu buffer to allocate. 5542 * 5543 * This function is used in conjunction with ring_buffer_read_page. 5544 * When reading a full page from the ring buffer, these functions 5545 * can be used to speed up the process. The calling function should 5546 * allocate a few pages first with this function. Then when it 5547 * needs to get pages from the ring buffer, it passes the result 5548 * of this function into ring_buffer_read_page, which will swap 5549 * the page that was allocated, with the read page of the buffer. 5550 * 5551 * Returns: 5552 * The page allocated, or ERR_PTR 5553 */ 5554 void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu) 5555 { 5556 struct ring_buffer_per_cpu *cpu_buffer; 5557 struct buffer_data_page *bpage = NULL; 5558 unsigned long flags; 5559 struct page *page; 5560 5561 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5562 return ERR_PTR(-ENODEV); 5563 5564 cpu_buffer = buffer->buffers[cpu]; 5565 local_irq_save(flags); 5566 arch_spin_lock(&cpu_buffer->lock); 5567 5568 if (cpu_buffer->free_page) { 5569 bpage = cpu_buffer->free_page; 5570 cpu_buffer->free_page = NULL; 5571 } 5572 5573 arch_spin_unlock(&cpu_buffer->lock); 5574 local_irq_restore(flags); 5575 5576 if (bpage) 5577 goto out; 5578 5579 page = alloc_pages_node(cpu_to_node(cpu), 5580 GFP_KERNEL | __GFP_NORETRY, 0); 5581 if (!page) 5582 return ERR_PTR(-ENOMEM); 5583 5584 bpage = page_address(page); 5585 5586 out: 5587 rb_init_page(bpage); 5588 5589 return bpage; 5590 } 5591 EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page); 5592 5593 /** 5594 * ring_buffer_free_read_page - free an allocated read page 5595 * @buffer: the buffer the page was allocate for 5596 * @cpu: the cpu buffer the page came from 5597 * @data: the page to free 5598 * 5599 * Free a page allocated from ring_buffer_alloc_read_page. 5600 */ 5601 void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data) 5602 { 5603 struct ring_buffer_per_cpu *cpu_buffer; 5604 struct buffer_data_page *bpage = data; 5605 struct page *page = virt_to_page(bpage); 5606 unsigned long flags; 5607 5608 if (!buffer || !buffer->buffers || !buffer->buffers[cpu]) 5609 return; 5610 5611 cpu_buffer = buffer->buffers[cpu]; 5612 5613 /* If the page is still in use someplace else, we can't reuse it */ 5614 if (page_ref_count(page) > 1) 5615 goto out; 5616 5617 local_irq_save(flags); 5618 arch_spin_lock(&cpu_buffer->lock); 5619 5620 if (!cpu_buffer->free_page) { 5621 cpu_buffer->free_page = bpage; 5622 bpage = NULL; 5623 } 5624 5625 arch_spin_unlock(&cpu_buffer->lock); 5626 local_irq_restore(flags); 5627 5628 out: 5629 free_page((unsigned long)bpage); 5630 } 5631 EXPORT_SYMBOL_GPL(ring_buffer_free_read_page); 5632 5633 /** 5634 * ring_buffer_read_page - extract a page from the ring buffer 5635 * @buffer: buffer to extract from 5636 * @data_page: the page to use allocated from ring_buffer_alloc_read_page 5637 * @len: amount to extract 5638 * @cpu: the cpu of the buffer to extract 5639 * @full: should the extraction only happen when the page is full. 5640 * 5641 * This function will pull out a page from the ring buffer and consume it. 5642 * @data_page must be the address of the variable that was returned 5643 * from ring_buffer_alloc_read_page. This is because the page might be used 5644 * to swap with a page in the ring buffer. 5645 * 5646 * for example: 5647 * rpage = ring_buffer_alloc_read_page(buffer, cpu); 5648 * if (IS_ERR(rpage)) 5649 * return PTR_ERR(rpage); 5650 * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0); 5651 * if (ret >= 0) 5652 * process_page(rpage, ret); 5653 * 5654 * When @full is set, the function will not return true unless 5655 * the writer is off the reader page. 5656 * 5657 * Note: it is up to the calling functions to handle sleeps and wakeups. 5658 * The ring buffer can be used anywhere in the kernel and can not 5659 * blindly call wake_up. The layer that uses the ring buffer must be 5660 * responsible for that. 5661 * 5662 * Returns: 5663 * >=0 if data has been transferred, returns the offset of consumed data. 5664 * <0 if no data has been transferred. 5665 */ 5666 int ring_buffer_read_page(struct trace_buffer *buffer, 5667 void **data_page, size_t len, int cpu, int full) 5668 { 5669 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 5670 struct ring_buffer_event *event; 5671 struct buffer_data_page *bpage; 5672 struct buffer_page *reader; 5673 unsigned long missed_events; 5674 unsigned long flags; 5675 unsigned int commit; 5676 unsigned int read; 5677 u64 save_timestamp; 5678 int ret = -1; 5679 5680 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5681 goto out; 5682 5683 /* 5684 * If len is not big enough to hold the page header, then 5685 * we can not copy anything. 5686 */ 5687 if (len <= BUF_PAGE_HDR_SIZE) 5688 goto out; 5689 5690 len -= BUF_PAGE_HDR_SIZE; 5691 5692 if (!data_page) 5693 goto out; 5694 5695 bpage = *data_page; 5696 if (!bpage) 5697 goto out; 5698 5699 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5700 5701 reader = rb_get_reader_page(cpu_buffer); 5702 if (!reader) 5703 goto out_unlock; 5704 5705 event = rb_reader_event(cpu_buffer); 5706 5707 read = reader->read; 5708 commit = rb_page_commit(reader); 5709 5710 /* Check if any events were dropped */ 5711 missed_events = cpu_buffer->lost_events; 5712 5713 /* 5714 * If this page has been partially read or 5715 * if len is not big enough to read the rest of the page or 5716 * a writer is still on the page, then 5717 * we must copy the data from the page to the buffer. 5718 * Otherwise, we can simply swap the page with the one passed in. 5719 */ 5720 if (read || (len < (commit - read)) || 5721 cpu_buffer->reader_page == cpu_buffer->commit_page) { 5722 struct buffer_data_page *rpage = cpu_buffer->reader_page->page; 5723 unsigned int rpos = read; 5724 unsigned int pos = 0; 5725 unsigned int size; 5726 5727 /* 5728 * If a full page is expected, this can still be returned 5729 * if there's been a previous partial read and the 5730 * rest of the page can be read and the commit page is off 5731 * the reader page. 5732 */ 5733 if (full && 5734 (!read || (len < (commit - read)) || 5735 cpu_buffer->reader_page == cpu_buffer->commit_page)) 5736 goto out_unlock; 5737 5738 if (len > (commit - read)) 5739 len = (commit - read); 5740 5741 /* Always keep the time extend and data together */ 5742 size = rb_event_ts_length(event); 5743 5744 if (len < size) 5745 goto out_unlock; 5746 5747 /* save the current timestamp, since the user will need it */ 5748 save_timestamp = cpu_buffer->read_stamp; 5749 5750 /* Need to copy one event at a time */ 5751 do { 5752 /* We need the size of one event, because 5753 * rb_advance_reader only advances by one event, 5754 * whereas rb_event_ts_length may include the size of 5755 * one or two events. 5756 * We have already ensured there's enough space if this 5757 * is a time extend. */ 5758 size = rb_event_length(event); 5759 memcpy(bpage->data + pos, rpage->data + rpos, size); 5760 5761 len -= size; 5762 5763 rb_advance_reader(cpu_buffer); 5764 rpos = reader->read; 5765 pos += size; 5766 5767 if (rpos >= commit) 5768 break; 5769 5770 event = rb_reader_event(cpu_buffer); 5771 /* Always keep the time extend and data together */ 5772 size = rb_event_ts_length(event); 5773 } while (len >= size); 5774 5775 /* update bpage */ 5776 local_set(&bpage->commit, pos); 5777 bpage->time_stamp = save_timestamp; 5778 5779 /* we copied everything to the beginning */ 5780 read = 0; 5781 } else { 5782 /* update the entry counter */ 5783 cpu_buffer->read += rb_page_entries(reader); 5784 cpu_buffer->read_bytes += BUF_PAGE_SIZE; 5785 5786 /* swap the pages */ 5787 rb_init_page(bpage); 5788 bpage = reader->page; 5789 reader->page = *data_page; 5790 local_set(&reader->write, 0); 5791 local_set(&reader->entries, 0); 5792 reader->read = 0; 5793 *data_page = bpage; 5794 5795 /* 5796 * Use the real_end for the data size, 5797 * This gives us a chance to store the lost events 5798 * on the page. 5799 */ 5800 if (reader->real_end) 5801 local_set(&bpage->commit, reader->real_end); 5802 } 5803 ret = read; 5804 5805 cpu_buffer->lost_events = 0; 5806 5807 commit = local_read(&bpage->commit); 5808 /* 5809 * Set a flag in the commit field if we lost events 5810 */ 5811 if (missed_events) { 5812 /* If there is room at the end of the page to save the 5813 * missed events, then record it there. 5814 */ 5815 if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) { 5816 memcpy(&bpage->data[commit], &missed_events, 5817 sizeof(missed_events)); 5818 local_add(RB_MISSED_STORED, &bpage->commit); 5819 commit += sizeof(missed_events); 5820 } 5821 local_add(RB_MISSED_EVENTS, &bpage->commit); 5822 } 5823 5824 /* 5825 * This page may be off to user land. Zero it out here. 5826 */ 5827 if (commit < BUF_PAGE_SIZE) 5828 memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit); 5829 5830 out_unlock: 5831 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5832 5833 out: 5834 return ret; 5835 } 5836 EXPORT_SYMBOL_GPL(ring_buffer_read_page); 5837 5838 /* 5839 * We only allocate new buffers, never free them if the CPU goes down. 5840 * If we were to free the buffer, then the user would lose any trace that was in 5841 * the buffer. 5842 */ 5843 int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node) 5844 { 5845 struct trace_buffer *buffer; 5846 long nr_pages_same; 5847 int cpu_i; 5848 unsigned long nr_pages; 5849 5850 buffer = container_of(node, struct trace_buffer, node); 5851 if (cpumask_test_cpu(cpu, buffer->cpumask)) 5852 return 0; 5853 5854 nr_pages = 0; 5855 nr_pages_same = 1; 5856 /* check if all cpu sizes are same */ 5857 for_each_buffer_cpu(buffer, cpu_i) { 5858 /* fill in the size from first enabled cpu */ 5859 if (nr_pages == 0) 5860 nr_pages = buffer->buffers[cpu_i]->nr_pages; 5861 if (nr_pages != buffer->buffers[cpu_i]->nr_pages) { 5862 nr_pages_same = 0; 5863 break; 5864 } 5865 } 5866 /* allocate minimum pages, user can later expand it */ 5867 if (!nr_pages_same) 5868 nr_pages = 2; 5869 buffer->buffers[cpu] = 5870 rb_allocate_cpu_buffer(buffer, nr_pages, cpu); 5871 if (!buffer->buffers[cpu]) { 5872 WARN(1, "failed to allocate ring buffer on CPU %u\n", 5873 cpu); 5874 return -ENOMEM; 5875 } 5876 smp_wmb(); 5877 cpumask_set_cpu(cpu, buffer->cpumask); 5878 return 0; 5879 } 5880 5881 #ifdef CONFIG_RING_BUFFER_STARTUP_TEST 5882 /* 5883 * This is a basic integrity check of the ring buffer. 5884 * Late in the boot cycle this test will run when configured in. 5885 * It will kick off a thread per CPU that will go into a loop 5886 * writing to the per cpu ring buffer various sizes of data. 5887 * Some of the data will be large items, some small. 5888 * 5889 * Another thread is created that goes into a spin, sending out 5890 * IPIs to the other CPUs to also write into the ring buffer. 5891 * this is to test the nesting ability of the buffer. 5892 * 5893 * Basic stats are recorded and reported. If something in the 5894 * ring buffer should happen that's not expected, a big warning 5895 * is displayed and all ring buffers are disabled. 5896 */ 5897 static struct task_struct *rb_threads[NR_CPUS] __initdata; 5898 5899 struct rb_test_data { 5900 struct trace_buffer *buffer; 5901 unsigned long events; 5902 unsigned long bytes_written; 5903 unsigned long bytes_alloc; 5904 unsigned long bytes_dropped; 5905 unsigned long events_nested; 5906 unsigned long bytes_written_nested; 5907 unsigned long bytes_alloc_nested; 5908 unsigned long bytes_dropped_nested; 5909 int min_size_nested; 5910 int max_size_nested; 5911 int max_size; 5912 int min_size; 5913 int cpu; 5914 int cnt; 5915 }; 5916 5917 static struct rb_test_data rb_data[NR_CPUS] __initdata; 5918 5919 /* 1 meg per cpu */ 5920 #define RB_TEST_BUFFER_SIZE 1048576 5921 5922 static char rb_string[] __initdata = 5923 "abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()?+\\" 5924 "?+|:';\",.<>/?abcdefghijklmnopqrstuvwxyz1234567890" 5925 "!@#$%^&*()?+\\?+|:';\",.<>/?abcdefghijklmnopqrstuv"; 5926 5927 static bool rb_test_started __initdata; 5928 5929 struct rb_item { 5930 int size; 5931 char str[]; 5932 }; 5933 5934 static __init int rb_write_something(struct rb_test_data *data, bool nested) 5935 { 5936 struct ring_buffer_event *event; 5937 struct rb_item *item; 5938 bool started; 5939 int event_len; 5940 int size; 5941 int len; 5942 int cnt; 5943 5944 /* Have nested writes different that what is written */ 5945 cnt = data->cnt + (nested ? 27 : 0); 5946 5947 /* Multiply cnt by ~e, to make some unique increment */ 5948 size = (cnt * 68 / 25) % (sizeof(rb_string) - 1); 5949 5950 len = size + sizeof(struct rb_item); 5951 5952 started = rb_test_started; 5953 /* read rb_test_started before checking buffer enabled */ 5954 smp_rmb(); 5955 5956 event = ring_buffer_lock_reserve(data->buffer, len); 5957 if (!event) { 5958 /* Ignore dropped events before test starts. */ 5959 if (started) { 5960 if (nested) 5961 data->bytes_dropped += len; 5962 else 5963 data->bytes_dropped_nested += len; 5964 } 5965 return len; 5966 } 5967 5968 event_len = ring_buffer_event_length(event); 5969 5970 if (RB_WARN_ON(data->buffer, event_len < len)) 5971 goto out; 5972 5973 item = ring_buffer_event_data(event); 5974 item->size = size; 5975 memcpy(item->str, rb_string, size); 5976 5977 if (nested) { 5978 data->bytes_alloc_nested += event_len; 5979 data->bytes_written_nested += len; 5980 data->events_nested++; 5981 if (!data->min_size_nested || len < data->min_size_nested) 5982 data->min_size_nested = len; 5983 if (len > data->max_size_nested) 5984 data->max_size_nested = len; 5985 } else { 5986 data->bytes_alloc += event_len; 5987 data->bytes_written += len; 5988 data->events++; 5989 if (!data->min_size || len < data->min_size) 5990 data->max_size = len; 5991 if (len > data->max_size) 5992 data->max_size = len; 5993 } 5994 5995 out: 5996 ring_buffer_unlock_commit(data->buffer); 5997 5998 return 0; 5999 } 6000 6001 static __init int rb_test(void *arg) 6002 { 6003 struct rb_test_data *data = arg; 6004 6005 while (!kthread_should_stop()) { 6006 rb_write_something(data, false); 6007 data->cnt++; 6008 6009 set_current_state(TASK_INTERRUPTIBLE); 6010 /* Now sleep between a min of 100-300us and a max of 1ms */ 6011 usleep_range(((data->cnt % 3) + 1) * 100, 1000); 6012 } 6013 6014 return 0; 6015 } 6016 6017 static __init void rb_ipi(void *ignore) 6018 { 6019 struct rb_test_data *data; 6020 int cpu = smp_processor_id(); 6021 6022 data = &rb_data[cpu]; 6023 rb_write_something(data, true); 6024 } 6025 6026 static __init int rb_hammer_test(void *arg) 6027 { 6028 while (!kthread_should_stop()) { 6029 6030 /* Send an IPI to all cpus to write data! */ 6031 smp_call_function(rb_ipi, NULL, 1); 6032 /* No sleep, but for non preempt, let others run */ 6033 schedule(); 6034 } 6035 6036 return 0; 6037 } 6038 6039 static __init int test_ringbuffer(void) 6040 { 6041 struct task_struct *rb_hammer; 6042 struct trace_buffer *buffer; 6043 int cpu; 6044 int ret = 0; 6045 6046 if (security_locked_down(LOCKDOWN_TRACEFS)) { 6047 pr_warn("Lockdown is enabled, skipping ring buffer tests\n"); 6048 return 0; 6049 } 6050 6051 pr_info("Running ring buffer tests...\n"); 6052 6053 buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE); 6054 if (WARN_ON(!buffer)) 6055 return 0; 6056 6057 /* Disable buffer so that threads can't write to it yet */ 6058 ring_buffer_record_off(buffer); 6059 6060 for_each_online_cpu(cpu) { 6061 rb_data[cpu].buffer = buffer; 6062 rb_data[cpu].cpu = cpu; 6063 rb_data[cpu].cnt = cpu; 6064 rb_threads[cpu] = kthread_run_on_cpu(rb_test, &rb_data[cpu], 6065 cpu, "rbtester/%u"); 6066 if (WARN_ON(IS_ERR(rb_threads[cpu]))) { 6067 pr_cont("FAILED\n"); 6068 ret = PTR_ERR(rb_threads[cpu]); 6069 goto out_free; 6070 } 6071 } 6072 6073 /* Now create the rb hammer! */ 6074 rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer"); 6075 if (WARN_ON(IS_ERR(rb_hammer))) { 6076 pr_cont("FAILED\n"); 6077 ret = PTR_ERR(rb_hammer); 6078 goto out_free; 6079 } 6080 6081 ring_buffer_record_on(buffer); 6082 /* 6083 * Show buffer is enabled before setting rb_test_started. 6084 * Yes there's a small race window where events could be 6085 * dropped and the thread wont catch it. But when a ring 6086 * buffer gets enabled, there will always be some kind of 6087 * delay before other CPUs see it. Thus, we don't care about 6088 * those dropped events. We care about events dropped after 6089 * the threads see that the buffer is active. 6090 */ 6091 smp_wmb(); 6092 rb_test_started = true; 6093 6094 set_current_state(TASK_INTERRUPTIBLE); 6095 /* Just run for 10 seconds */; 6096 schedule_timeout(10 * HZ); 6097 6098 kthread_stop(rb_hammer); 6099 6100 out_free: 6101 for_each_online_cpu(cpu) { 6102 if (!rb_threads[cpu]) 6103 break; 6104 kthread_stop(rb_threads[cpu]); 6105 } 6106 if (ret) { 6107 ring_buffer_free(buffer); 6108 return ret; 6109 } 6110 6111 /* Report! */ 6112 pr_info("finished\n"); 6113 for_each_online_cpu(cpu) { 6114 struct ring_buffer_event *event; 6115 struct rb_test_data *data = &rb_data[cpu]; 6116 struct rb_item *item; 6117 unsigned long total_events; 6118 unsigned long total_dropped; 6119 unsigned long total_written; 6120 unsigned long total_alloc; 6121 unsigned long total_read = 0; 6122 unsigned long total_size = 0; 6123 unsigned long total_len = 0; 6124 unsigned long total_lost = 0; 6125 unsigned long lost; 6126 int big_event_size; 6127 int small_event_size; 6128 6129 ret = -1; 6130 6131 total_events = data->events + data->events_nested; 6132 total_written = data->bytes_written + data->bytes_written_nested; 6133 total_alloc = data->bytes_alloc + data->bytes_alloc_nested; 6134 total_dropped = data->bytes_dropped + data->bytes_dropped_nested; 6135 6136 big_event_size = data->max_size + data->max_size_nested; 6137 small_event_size = data->min_size + data->min_size_nested; 6138 6139 pr_info("CPU %d:\n", cpu); 6140 pr_info(" events: %ld\n", total_events); 6141 pr_info(" dropped bytes: %ld\n", total_dropped); 6142 pr_info(" alloced bytes: %ld\n", total_alloc); 6143 pr_info(" written bytes: %ld\n", total_written); 6144 pr_info(" biggest event: %d\n", big_event_size); 6145 pr_info(" smallest event: %d\n", small_event_size); 6146 6147 if (RB_WARN_ON(buffer, total_dropped)) 6148 break; 6149 6150 ret = 0; 6151 6152 while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) { 6153 total_lost += lost; 6154 item = ring_buffer_event_data(event); 6155 total_len += ring_buffer_event_length(event); 6156 total_size += item->size + sizeof(struct rb_item); 6157 if (memcmp(&item->str[0], rb_string, item->size) != 0) { 6158 pr_info("FAILED!\n"); 6159 pr_info("buffer had: %.*s\n", item->size, item->str); 6160 pr_info("expected: %.*s\n", item->size, rb_string); 6161 RB_WARN_ON(buffer, 1); 6162 ret = -1; 6163 break; 6164 } 6165 total_read++; 6166 } 6167 if (ret) 6168 break; 6169 6170 ret = -1; 6171 6172 pr_info(" read events: %ld\n", total_read); 6173 pr_info(" lost events: %ld\n", total_lost); 6174 pr_info(" total events: %ld\n", total_lost + total_read); 6175 pr_info(" recorded len bytes: %ld\n", total_len); 6176 pr_info(" recorded size bytes: %ld\n", total_size); 6177 if (total_lost) { 6178 pr_info(" With dropped events, record len and size may not match\n" 6179 " alloced and written from above\n"); 6180 } else { 6181 if (RB_WARN_ON(buffer, total_len != total_alloc || 6182 total_size != total_written)) 6183 break; 6184 } 6185 if (RB_WARN_ON(buffer, total_lost + total_read != total_events)) 6186 break; 6187 6188 ret = 0; 6189 } 6190 if (!ret) 6191 pr_info("Ring buffer PASSED!\n"); 6192 6193 ring_buffer_free(buffer); 6194 return 0; 6195 } 6196 6197 late_initcall(test_ringbuffer); 6198 #endif /* CONFIG_RING_BUFFER_STARTUP_TEST */ 6199