1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Generic ring buffer 4 * 5 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> 6 */ 7 #include <linux/trace_recursion.h> 8 #include <linux/trace_events.h> 9 #include <linux/ring_buffer.h> 10 #include <linux/trace_clock.h> 11 #include <linux/sched/clock.h> 12 #include <linux/cacheflush.h> 13 #include <linux/trace_seq.h> 14 #include <linux/spinlock.h> 15 #include <linux/irq_work.h> 16 #include <linux/security.h> 17 #include <linux/uaccess.h> 18 #include <linux/hardirq.h> 19 #include <linux/kthread.h> /* for self test */ 20 #include <linux/module.h> 21 #include <linux/percpu.h> 22 #include <linux/mutex.h> 23 #include <linux/delay.h> 24 #include <linux/slab.h> 25 #include <linux/init.h> 26 #include <linux/hash.h> 27 #include <linux/list.h> 28 #include <linux/cpu.h> 29 #include <linux/oom.h> 30 #include <linux/mm.h> 31 32 #include <asm/local64.h> 33 #include <asm/local.h> 34 35 #include "trace.h" 36 37 /* 38 * The "absolute" timestamp in the buffer is only 59 bits. 39 * If a clock has the 5 MSBs set, it needs to be saved and 40 * reinserted. 41 */ 42 #define TS_MSB (0xf8ULL << 56) 43 #define ABS_TS_MASK (~TS_MSB) 44 45 static void update_pages_handler(struct work_struct *work); 46 47 #define RING_BUFFER_META_MAGIC 0xBADFEED 48 49 struct ring_buffer_meta { 50 int magic; 51 int struct_size; 52 unsigned long text_addr; 53 unsigned long data_addr; 54 unsigned long first_buffer; 55 unsigned long head_buffer; 56 unsigned long commit_buffer; 57 __u32 subbuf_size; 58 __u32 nr_subbufs; 59 int buffers[]; 60 }; 61 62 /* 63 * The ring buffer header is special. We must manually up keep it. 64 */ 65 int ring_buffer_print_entry_header(struct trace_seq *s) 66 { 67 trace_seq_puts(s, "# compressed entry header\n"); 68 trace_seq_puts(s, "\ttype_len : 5 bits\n"); 69 trace_seq_puts(s, "\ttime_delta : 27 bits\n"); 70 trace_seq_puts(s, "\tarray : 32 bits\n"); 71 trace_seq_putc(s, '\n'); 72 trace_seq_printf(s, "\tpadding : type == %d\n", 73 RINGBUF_TYPE_PADDING); 74 trace_seq_printf(s, "\ttime_extend : type == %d\n", 75 RINGBUF_TYPE_TIME_EXTEND); 76 trace_seq_printf(s, "\ttime_stamp : type == %d\n", 77 RINGBUF_TYPE_TIME_STAMP); 78 trace_seq_printf(s, "\tdata max type_len == %d\n", 79 RINGBUF_TYPE_DATA_TYPE_LEN_MAX); 80 81 return !trace_seq_has_overflowed(s); 82 } 83 84 /* 85 * The ring buffer is made up of a list of pages. A separate list of pages is 86 * allocated for each CPU. A writer may only write to a buffer that is 87 * associated with the CPU it is currently executing on. A reader may read 88 * from any per cpu buffer. 89 * 90 * The reader is special. For each per cpu buffer, the reader has its own 91 * reader page. When a reader has read the entire reader page, this reader 92 * page is swapped with another page in the ring buffer. 93 * 94 * Now, as long as the writer is off the reader page, the reader can do what 95 * ever it wants with that page. The writer will never write to that page 96 * again (as long as it is out of the ring buffer). 97 * 98 * Here's some silly ASCII art. 99 * 100 * +------+ 101 * |reader| RING BUFFER 102 * |page | 103 * +------+ +---+ +---+ +---+ 104 * | |-->| |-->| | 105 * +---+ +---+ +---+ 106 * ^ | 107 * | | 108 * +---------------+ 109 * 110 * 111 * +------+ 112 * |reader| RING BUFFER 113 * |page |------------------v 114 * +------+ +---+ +---+ +---+ 115 * | |-->| |-->| | 116 * +---+ +---+ +---+ 117 * ^ | 118 * | | 119 * +---------------+ 120 * 121 * 122 * +------+ 123 * |reader| RING BUFFER 124 * |page |------------------v 125 * +------+ +---+ +---+ +---+ 126 * ^ | |-->| |-->| | 127 * | +---+ +---+ +---+ 128 * | | 129 * | | 130 * +------------------------------+ 131 * 132 * 133 * +------+ 134 * |buffer| RING BUFFER 135 * |page |------------------v 136 * +------+ +---+ +---+ +---+ 137 * ^ | | | |-->| | 138 * | New +---+ +---+ +---+ 139 * | Reader------^ | 140 * | page | 141 * +------------------------------+ 142 * 143 * 144 * After we make this swap, the reader can hand this page off to the splice 145 * code and be done with it. It can even allocate a new page if it needs to 146 * and swap that into the ring buffer. 147 * 148 * We will be using cmpxchg soon to make all this lockless. 149 * 150 */ 151 152 /* Used for individual buffers (after the counter) */ 153 #define RB_BUFFER_OFF (1 << 20) 154 155 #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data) 156 157 #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array)) 158 #define RB_ALIGNMENT 4U 159 #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 160 #define RB_EVNT_MIN_SIZE 8U /* two 32bit words */ 161 162 #ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS 163 # define RB_FORCE_8BYTE_ALIGNMENT 0 164 # define RB_ARCH_ALIGNMENT RB_ALIGNMENT 165 #else 166 # define RB_FORCE_8BYTE_ALIGNMENT 1 167 # define RB_ARCH_ALIGNMENT 8U 168 #endif 169 170 #define RB_ALIGN_DATA __aligned(RB_ARCH_ALIGNMENT) 171 172 /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */ 173 #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX 174 175 enum { 176 RB_LEN_TIME_EXTEND = 8, 177 RB_LEN_TIME_STAMP = 8, 178 }; 179 180 #define skip_time_extend(event) \ 181 ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND)) 182 183 #define extended_time(event) \ 184 (event->type_len >= RINGBUF_TYPE_TIME_EXTEND) 185 186 static inline bool rb_null_event(struct ring_buffer_event *event) 187 { 188 return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta; 189 } 190 191 static void rb_event_set_padding(struct ring_buffer_event *event) 192 { 193 /* padding has a NULL time_delta */ 194 event->type_len = RINGBUF_TYPE_PADDING; 195 event->time_delta = 0; 196 } 197 198 static unsigned 199 rb_event_data_length(struct ring_buffer_event *event) 200 { 201 unsigned length; 202 203 if (event->type_len) 204 length = event->type_len * RB_ALIGNMENT; 205 else 206 length = event->array[0]; 207 return length + RB_EVNT_HDR_SIZE; 208 } 209 210 /* 211 * Return the length of the given event. Will return 212 * the length of the time extend if the event is a 213 * time extend. 214 */ 215 static inline unsigned 216 rb_event_length(struct ring_buffer_event *event) 217 { 218 switch (event->type_len) { 219 case RINGBUF_TYPE_PADDING: 220 if (rb_null_event(event)) 221 /* undefined */ 222 return -1; 223 return event->array[0] + RB_EVNT_HDR_SIZE; 224 225 case RINGBUF_TYPE_TIME_EXTEND: 226 return RB_LEN_TIME_EXTEND; 227 228 case RINGBUF_TYPE_TIME_STAMP: 229 return RB_LEN_TIME_STAMP; 230 231 case RINGBUF_TYPE_DATA: 232 return rb_event_data_length(event); 233 default: 234 WARN_ON_ONCE(1); 235 } 236 /* not hit */ 237 return 0; 238 } 239 240 /* 241 * Return total length of time extend and data, 242 * or just the event length for all other events. 243 */ 244 static inline unsigned 245 rb_event_ts_length(struct ring_buffer_event *event) 246 { 247 unsigned len = 0; 248 249 if (extended_time(event)) { 250 /* time extends include the data event after it */ 251 len = RB_LEN_TIME_EXTEND; 252 event = skip_time_extend(event); 253 } 254 return len + rb_event_length(event); 255 } 256 257 /** 258 * ring_buffer_event_length - return the length of the event 259 * @event: the event to get the length of 260 * 261 * Returns the size of the data load of a data event. 262 * If the event is something other than a data event, it 263 * returns the size of the event itself. With the exception 264 * of a TIME EXTEND, where it still returns the size of the 265 * data load of the data event after it. 266 */ 267 unsigned ring_buffer_event_length(struct ring_buffer_event *event) 268 { 269 unsigned length; 270 271 if (extended_time(event)) 272 event = skip_time_extend(event); 273 274 length = rb_event_length(event); 275 if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 276 return length; 277 length -= RB_EVNT_HDR_SIZE; 278 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0])) 279 length -= sizeof(event->array[0]); 280 return length; 281 } 282 EXPORT_SYMBOL_GPL(ring_buffer_event_length); 283 284 /* inline for ring buffer fast paths */ 285 static __always_inline void * 286 rb_event_data(struct ring_buffer_event *event) 287 { 288 if (extended_time(event)) 289 event = skip_time_extend(event); 290 WARN_ON_ONCE(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX); 291 /* If length is in len field, then array[0] has the data */ 292 if (event->type_len) 293 return (void *)&event->array[0]; 294 /* Otherwise length is in array[0] and array[1] has the data */ 295 return (void *)&event->array[1]; 296 } 297 298 /** 299 * ring_buffer_event_data - return the data of the event 300 * @event: the event to get the data from 301 */ 302 void *ring_buffer_event_data(struct ring_buffer_event *event) 303 { 304 return rb_event_data(event); 305 } 306 EXPORT_SYMBOL_GPL(ring_buffer_event_data); 307 308 #define for_each_buffer_cpu(buffer, cpu) \ 309 for_each_cpu(cpu, buffer->cpumask) 310 311 #define for_each_online_buffer_cpu(buffer, cpu) \ 312 for_each_cpu_and(cpu, buffer->cpumask, cpu_online_mask) 313 314 #define TS_SHIFT 27 315 #define TS_MASK ((1ULL << TS_SHIFT) - 1) 316 #define TS_DELTA_TEST (~TS_MASK) 317 318 static u64 rb_event_time_stamp(struct ring_buffer_event *event) 319 { 320 u64 ts; 321 322 ts = event->array[0]; 323 ts <<= TS_SHIFT; 324 ts += event->time_delta; 325 326 return ts; 327 } 328 329 /* Flag when events were overwritten */ 330 #define RB_MISSED_EVENTS (1 << 31) 331 /* Missed count stored at end */ 332 #define RB_MISSED_STORED (1 << 30) 333 334 #define RB_MISSED_MASK (3 << 30) 335 336 struct buffer_data_page { 337 u64 time_stamp; /* page time stamp */ 338 local_t commit; /* write committed index */ 339 unsigned char data[] RB_ALIGN_DATA; /* data of buffer page */ 340 }; 341 342 struct buffer_data_read_page { 343 unsigned order; /* order of the page */ 344 struct buffer_data_page *data; /* actual data, stored in this page */ 345 }; 346 347 /* 348 * Note, the buffer_page list must be first. The buffer pages 349 * are allocated in cache lines, which means that each buffer 350 * page will be at the beginning of a cache line, and thus 351 * the least significant bits will be zero. We use this to 352 * add flags in the list struct pointers, to make the ring buffer 353 * lockless. 354 */ 355 struct buffer_page { 356 struct list_head list; /* list of buffer pages */ 357 local_t write; /* index for next write */ 358 unsigned read; /* index for next read */ 359 local_t entries; /* entries on this page */ 360 unsigned long real_end; /* real end of data */ 361 unsigned order; /* order of the page */ 362 u32 id:30; /* ID for external mapping */ 363 u32 range:1; /* Mapped via a range */ 364 struct buffer_data_page *page; /* Actual data page */ 365 }; 366 367 /* 368 * The buffer page counters, write and entries, must be reset 369 * atomically when crossing page boundaries. To synchronize this 370 * update, two counters are inserted into the number. One is 371 * the actual counter for the write position or count on the page. 372 * 373 * The other is a counter of updaters. Before an update happens 374 * the update partition of the counter is incremented. This will 375 * allow the updater to update the counter atomically. 376 * 377 * The counter is 20 bits, and the state data is 12. 378 */ 379 #define RB_WRITE_MASK 0xfffff 380 #define RB_WRITE_INTCNT (1 << 20) 381 382 static void rb_init_page(struct buffer_data_page *bpage) 383 { 384 local_set(&bpage->commit, 0); 385 } 386 387 static __always_inline unsigned int rb_page_commit(struct buffer_page *bpage) 388 { 389 return local_read(&bpage->page->commit); 390 } 391 392 static void free_buffer_page(struct buffer_page *bpage) 393 { 394 /* Range pages are not to be freed */ 395 if (!bpage->range) 396 free_pages((unsigned long)bpage->page, bpage->order); 397 kfree(bpage); 398 } 399 400 /* 401 * We need to fit the time_stamp delta into 27 bits. 402 */ 403 static inline bool test_time_stamp(u64 delta) 404 { 405 return !!(delta & TS_DELTA_TEST); 406 } 407 408 struct rb_irq_work { 409 struct irq_work work; 410 wait_queue_head_t waiters; 411 wait_queue_head_t full_waiters; 412 atomic_t seq; 413 bool waiters_pending; 414 bool full_waiters_pending; 415 bool wakeup_full; 416 }; 417 418 /* 419 * Structure to hold event state and handle nested events. 420 */ 421 struct rb_event_info { 422 u64 ts; 423 u64 delta; 424 u64 before; 425 u64 after; 426 unsigned long length; 427 struct buffer_page *tail_page; 428 int add_timestamp; 429 }; 430 431 /* 432 * Used for the add_timestamp 433 * NONE 434 * EXTEND - wants a time extend 435 * ABSOLUTE - the buffer requests all events to have absolute time stamps 436 * FORCE - force a full time stamp. 437 */ 438 enum { 439 RB_ADD_STAMP_NONE = 0, 440 RB_ADD_STAMP_EXTEND = BIT(1), 441 RB_ADD_STAMP_ABSOLUTE = BIT(2), 442 RB_ADD_STAMP_FORCE = BIT(3) 443 }; 444 /* 445 * Used for which event context the event is in. 446 * TRANSITION = 0 447 * NMI = 1 448 * IRQ = 2 449 * SOFTIRQ = 3 450 * NORMAL = 4 451 * 452 * See trace_recursive_lock() comment below for more details. 453 */ 454 enum { 455 RB_CTX_TRANSITION, 456 RB_CTX_NMI, 457 RB_CTX_IRQ, 458 RB_CTX_SOFTIRQ, 459 RB_CTX_NORMAL, 460 RB_CTX_MAX 461 }; 462 463 struct rb_time_struct { 464 local64_t time; 465 }; 466 typedef struct rb_time_struct rb_time_t; 467 468 #define MAX_NEST 5 469 470 /* 471 * head_page == tail_page && head == tail then buffer is empty. 472 */ 473 struct ring_buffer_per_cpu { 474 int cpu; 475 atomic_t record_disabled; 476 atomic_t resize_disabled; 477 struct trace_buffer *buffer; 478 raw_spinlock_t reader_lock; /* serialize readers */ 479 arch_spinlock_t lock; 480 struct lock_class_key lock_key; 481 struct buffer_data_page *free_page; 482 unsigned long nr_pages; 483 unsigned int current_context; 484 struct list_head *pages; 485 /* pages generation counter, incremented when the list changes */ 486 unsigned long cnt; 487 struct buffer_page *head_page; /* read from head */ 488 struct buffer_page *tail_page; /* write to tail */ 489 struct buffer_page *commit_page; /* committed pages */ 490 struct buffer_page *reader_page; 491 unsigned long lost_events; 492 unsigned long last_overrun; 493 unsigned long nest; 494 local_t entries_bytes; 495 local_t entries; 496 local_t overrun; 497 local_t commit_overrun; 498 local_t dropped_events; 499 local_t committing; 500 local_t commits; 501 local_t pages_touched; 502 local_t pages_lost; 503 local_t pages_read; 504 long last_pages_touch; 505 size_t shortest_full; 506 unsigned long read; 507 unsigned long read_bytes; 508 rb_time_t write_stamp; 509 rb_time_t before_stamp; 510 u64 event_stamp[MAX_NEST]; 511 u64 read_stamp; 512 /* pages removed since last reset */ 513 unsigned long pages_removed; 514 515 unsigned int mapped; 516 unsigned int user_mapped; /* user space mapping */ 517 struct mutex mapping_lock; 518 unsigned long *subbuf_ids; /* ID to subbuf VA */ 519 struct trace_buffer_meta *meta_page; 520 struct ring_buffer_meta *ring_meta; 521 522 /* ring buffer pages to update, > 0 to add, < 0 to remove */ 523 long nr_pages_to_update; 524 struct list_head new_pages; /* new pages to add */ 525 struct work_struct update_pages_work; 526 struct completion update_done; 527 528 struct rb_irq_work irq_work; 529 }; 530 531 struct trace_buffer { 532 unsigned flags; 533 int cpus; 534 atomic_t record_disabled; 535 atomic_t resizing; 536 cpumask_var_t cpumask; 537 538 struct lock_class_key *reader_lock_key; 539 540 struct mutex mutex; 541 542 struct ring_buffer_per_cpu **buffers; 543 544 struct hlist_node node; 545 u64 (*clock)(void); 546 547 struct rb_irq_work irq_work; 548 bool time_stamp_abs; 549 550 unsigned long range_addr_start; 551 unsigned long range_addr_end; 552 553 long last_text_delta; 554 long last_data_delta; 555 556 unsigned int subbuf_size; 557 unsigned int subbuf_order; 558 unsigned int max_data_size; 559 }; 560 561 struct ring_buffer_iter { 562 struct ring_buffer_per_cpu *cpu_buffer; 563 unsigned long head; 564 unsigned long next_event; 565 struct buffer_page *head_page; 566 struct buffer_page *cache_reader_page; 567 unsigned long cache_read; 568 unsigned long cache_pages_removed; 569 u64 read_stamp; 570 u64 page_stamp; 571 struct ring_buffer_event *event; 572 size_t event_size; 573 int missed_events; 574 }; 575 576 int ring_buffer_print_page_header(struct trace_buffer *buffer, struct trace_seq *s) 577 { 578 struct buffer_data_page field; 579 580 trace_seq_printf(s, "\tfield: u64 timestamp;\t" 581 "offset:0;\tsize:%u;\tsigned:%u;\n", 582 (unsigned int)sizeof(field.time_stamp), 583 (unsigned int)is_signed_type(u64)); 584 585 trace_seq_printf(s, "\tfield: local_t commit;\t" 586 "offset:%u;\tsize:%u;\tsigned:%u;\n", 587 (unsigned int)offsetof(typeof(field), commit), 588 (unsigned int)sizeof(field.commit), 589 (unsigned int)is_signed_type(long)); 590 591 trace_seq_printf(s, "\tfield: int overwrite;\t" 592 "offset:%u;\tsize:%u;\tsigned:%u;\n", 593 (unsigned int)offsetof(typeof(field), commit), 594 1, 595 (unsigned int)is_signed_type(long)); 596 597 trace_seq_printf(s, "\tfield: char data;\t" 598 "offset:%u;\tsize:%u;\tsigned:%u;\n", 599 (unsigned int)offsetof(typeof(field), data), 600 (unsigned int)buffer->subbuf_size, 601 (unsigned int)is_signed_type(char)); 602 603 return !trace_seq_has_overflowed(s); 604 } 605 606 static inline void rb_time_read(rb_time_t *t, u64 *ret) 607 { 608 *ret = local64_read(&t->time); 609 } 610 static void rb_time_set(rb_time_t *t, u64 val) 611 { 612 local64_set(&t->time, val); 613 } 614 615 /* 616 * Enable this to make sure that the event passed to 617 * ring_buffer_event_time_stamp() is not committed and also 618 * is on the buffer that it passed in. 619 */ 620 //#define RB_VERIFY_EVENT 621 #ifdef RB_VERIFY_EVENT 622 static struct list_head *rb_list_head(struct list_head *list); 623 static void verify_event(struct ring_buffer_per_cpu *cpu_buffer, 624 void *event) 625 { 626 struct buffer_page *page = cpu_buffer->commit_page; 627 struct buffer_page *tail_page = READ_ONCE(cpu_buffer->tail_page); 628 struct list_head *next; 629 long commit, write; 630 unsigned long addr = (unsigned long)event; 631 bool done = false; 632 int stop = 0; 633 634 /* Make sure the event exists and is not committed yet */ 635 do { 636 if (page == tail_page || WARN_ON_ONCE(stop++ > 100)) 637 done = true; 638 commit = local_read(&page->page->commit); 639 write = local_read(&page->write); 640 if (addr >= (unsigned long)&page->page->data[commit] && 641 addr < (unsigned long)&page->page->data[write]) 642 return; 643 644 next = rb_list_head(page->list.next); 645 page = list_entry(next, struct buffer_page, list); 646 } while (!done); 647 WARN_ON_ONCE(1); 648 } 649 #else 650 static inline void verify_event(struct ring_buffer_per_cpu *cpu_buffer, 651 void *event) 652 { 653 } 654 #endif 655 656 /* 657 * The absolute time stamp drops the 5 MSBs and some clocks may 658 * require them. The rb_fix_abs_ts() will take a previous full 659 * time stamp, and add the 5 MSB of that time stamp on to the 660 * saved absolute time stamp. Then they are compared in case of 661 * the unlikely event that the latest time stamp incremented 662 * the 5 MSB. 663 */ 664 static inline u64 rb_fix_abs_ts(u64 abs, u64 save_ts) 665 { 666 if (save_ts & TS_MSB) { 667 abs |= save_ts & TS_MSB; 668 /* Check for overflow */ 669 if (unlikely(abs < save_ts)) 670 abs += 1ULL << 59; 671 } 672 return abs; 673 } 674 675 static inline u64 rb_time_stamp(struct trace_buffer *buffer); 676 677 /** 678 * ring_buffer_event_time_stamp - return the event's current time stamp 679 * @buffer: The buffer that the event is on 680 * @event: the event to get the time stamp of 681 * 682 * Note, this must be called after @event is reserved, and before it is 683 * committed to the ring buffer. And must be called from the same 684 * context where the event was reserved (normal, softirq, irq, etc). 685 * 686 * Returns the time stamp associated with the current event. 687 * If the event has an extended time stamp, then that is used as 688 * the time stamp to return. 689 * In the highly unlikely case that the event was nested more than 690 * the max nesting, then the write_stamp of the buffer is returned, 691 * otherwise current time is returned, but that really neither of 692 * the last two cases should ever happen. 693 */ 694 u64 ring_buffer_event_time_stamp(struct trace_buffer *buffer, 695 struct ring_buffer_event *event) 696 { 697 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[smp_processor_id()]; 698 unsigned int nest; 699 u64 ts; 700 701 /* If the event includes an absolute time, then just use that */ 702 if (event->type_len == RINGBUF_TYPE_TIME_STAMP) { 703 ts = rb_event_time_stamp(event); 704 return rb_fix_abs_ts(ts, cpu_buffer->tail_page->page->time_stamp); 705 } 706 707 nest = local_read(&cpu_buffer->committing); 708 verify_event(cpu_buffer, event); 709 if (WARN_ON_ONCE(!nest)) 710 goto fail; 711 712 /* Read the current saved nesting level time stamp */ 713 if (likely(--nest < MAX_NEST)) 714 return cpu_buffer->event_stamp[nest]; 715 716 /* Shouldn't happen, warn if it does */ 717 WARN_ONCE(1, "nest (%d) greater than max", nest); 718 719 fail: 720 rb_time_read(&cpu_buffer->write_stamp, &ts); 721 722 return ts; 723 } 724 725 /** 726 * ring_buffer_nr_dirty_pages - get the number of used pages in the ring buffer 727 * @buffer: The ring_buffer to get the number of pages from 728 * @cpu: The cpu of the ring_buffer to get the number of pages from 729 * 730 * Returns the number of pages that have content in the ring buffer. 731 */ 732 size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu) 733 { 734 size_t read; 735 size_t lost; 736 size_t cnt; 737 738 read = local_read(&buffer->buffers[cpu]->pages_read); 739 lost = local_read(&buffer->buffers[cpu]->pages_lost); 740 cnt = local_read(&buffer->buffers[cpu]->pages_touched); 741 742 if (WARN_ON_ONCE(cnt < lost)) 743 return 0; 744 745 cnt -= lost; 746 747 /* The reader can read an empty page, but not more than that */ 748 if (cnt < read) { 749 WARN_ON_ONCE(read > cnt + 1); 750 return 0; 751 } 752 753 return cnt - read; 754 } 755 756 static __always_inline bool full_hit(struct trace_buffer *buffer, int cpu, int full) 757 { 758 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 759 size_t nr_pages; 760 size_t dirty; 761 762 nr_pages = cpu_buffer->nr_pages; 763 if (!nr_pages || !full) 764 return true; 765 766 /* 767 * Add one as dirty will never equal nr_pages, as the sub-buffer 768 * that the writer is on is not counted as dirty. 769 * This is needed if "buffer_percent" is set to 100. 770 */ 771 dirty = ring_buffer_nr_dirty_pages(buffer, cpu) + 1; 772 773 return (dirty * 100) >= (full * nr_pages); 774 } 775 776 /* 777 * rb_wake_up_waiters - wake up tasks waiting for ring buffer input 778 * 779 * Schedules a delayed work to wake up any task that is blocked on the 780 * ring buffer waiters queue. 781 */ 782 static void rb_wake_up_waiters(struct irq_work *work) 783 { 784 struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work); 785 786 /* For waiters waiting for the first wake up */ 787 (void)atomic_fetch_inc_release(&rbwork->seq); 788 789 wake_up_all(&rbwork->waiters); 790 if (rbwork->full_waiters_pending || rbwork->wakeup_full) { 791 /* Only cpu_buffer sets the above flags */ 792 struct ring_buffer_per_cpu *cpu_buffer = 793 container_of(rbwork, struct ring_buffer_per_cpu, irq_work); 794 795 /* Called from interrupt context */ 796 raw_spin_lock(&cpu_buffer->reader_lock); 797 rbwork->wakeup_full = false; 798 rbwork->full_waiters_pending = false; 799 800 /* Waking up all waiters, they will reset the shortest full */ 801 cpu_buffer->shortest_full = 0; 802 raw_spin_unlock(&cpu_buffer->reader_lock); 803 804 wake_up_all(&rbwork->full_waiters); 805 } 806 } 807 808 /** 809 * ring_buffer_wake_waiters - wake up any waiters on this ring buffer 810 * @buffer: The ring buffer to wake waiters on 811 * @cpu: The CPU buffer to wake waiters on 812 * 813 * In the case of a file that represents a ring buffer is closing, 814 * it is prudent to wake up any waiters that are on this. 815 */ 816 void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu) 817 { 818 struct ring_buffer_per_cpu *cpu_buffer; 819 struct rb_irq_work *rbwork; 820 821 if (!buffer) 822 return; 823 824 if (cpu == RING_BUFFER_ALL_CPUS) { 825 826 /* Wake up individual ones too. One level recursion */ 827 for_each_buffer_cpu(buffer, cpu) 828 ring_buffer_wake_waiters(buffer, cpu); 829 830 rbwork = &buffer->irq_work; 831 } else { 832 if (WARN_ON_ONCE(!buffer->buffers)) 833 return; 834 if (WARN_ON_ONCE(cpu >= nr_cpu_ids)) 835 return; 836 837 cpu_buffer = buffer->buffers[cpu]; 838 /* The CPU buffer may not have been initialized yet */ 839 if (!cpu_buffer) 840 return; 841 rbwork = &cpu_buffer->irq_work; 842 } 843 844 /* This can be called in any context */ 845 irq_work_queue(&rbwork->work); 846 } 847 848 static bool rb_watermark_hit(struct trace_buffer *buffer, int cpu, int full) 849 { 850 struct ring_buffer_per_cpu *cpu_buffer; 851 bool ret = false; 852 853 /* Reads of all CPUs always waits for any data */ 854 if (cpu == RING_BUFFER_ALL_CPUS) 855 return !ring_buffer_empty(buffer); 856 857 cpu_buffer = buffer->buffers[cpu]; 858 859 if (!ring_buffer_empty_cpu(buffer, cpu)) { 860 unsigned long flags; 861 bool pagebusy; 862 863 if (!full) 864 return true; 865 866 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 867 pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; 868 ret = !pagebusy && full_hit(buffer, cpu, full); 869 870 if (!ret && (!cpu_buffer->shortest_full || 871 cpu_buffer->shortest_full > full)) { 872 cpu_buffer->shortest_full = full; 873 } 874 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 875 } 876 return ret; 877 } 878 879 static inline bool 880 rb_wait_cond(struct rb_irq_work *rbwork, struct trace_buffer *buffer, 881 int cpu, int full, ring_buffer_cond_fn cond, void *data) 882 { 883 if (rb_watermark_hit(buffer, cpu, full)) 884 return true; 885 886 if (cond(data)) 887 return true; 888 889 /* 890 * The events can happen in critical sections where 891 * checking a work queue can cause deadlocks. 892 * After adding a task to the queue, this flag is set 893 * only to notify events to try to wake up the queue 894 * using irq_work. 895 * 896 * We don't clear it even if the buffer is no longer 897 * empty. The flag only causes the next event to run 898 * irq_work to do the work queue wake up. The worse 899 * that can happen if we race with !trace_empty() is that 900 * an event will cause an irq_work to try to wake up 901 * an empty queue. 902 * 903 * There's no reason to protect this flag either, as 904 * the work queue and irq_work logic will do the necessary 905 * synchronization for the wake ups. The only thing 906 * that is necessary is that the wake up happens after 907 * a task has been queued. It's OK for spurious wake ups. 908 */ 909 if (full) 910 rbwork->full_waiters_pending = true; 911 else 912 rbwork->waiters_pending = true; 913 914 return false; 915 } 916 917 struct rb_wait_data { 918 struct rb_irq_work *irq_work; 919 int seq; 920 }; 921 922 /* 923 * The default wait condition for ring_buffer_wait() is to just to exit the 924 * wait loop the first time it is woken up. 925 */ 926 static bool rb_wait_once(void *data) 927 { 928 struct rb_wait_data *rdata = data; 929 struct rb_irq_work *rbwork = rdata->irq_work; 930 931 return atomic_read_acquire(&rbwork->seq) != rdata->seq; 932 } 933 934 /** 935 * ring_buffer_wait - wait for input to the ring buffer 936 * @buffer: buffer to wait on 937 * @cpu: the cpu buffer to wait on 938 * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS 939 * @cond: condition function to break out of wait (NULL to run once) 940 * @data: the data to pass to @cond. 941 * 942 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon 943 * as data is added to any of the @buffer's cpu buffers. Otherwise 944 * it will wait for data to be added to a specific cpu buffer. 945 */ 946 int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full, 947 ring_buffer_cond_fn cond, void *data) 948 { 949 struct ring_buffer_per_cpu *cpu_buffer; 950 struct wait_queue_head *waitq; 951 struct rb_irq_work *rbwork; 952 struct rb_wait_data rdata; 953 int ret = 0; 954 955 /* 956 * Depending on what the caller is waiting for, either any 957 * data in any cpu buffer, or a specific buffer, put the 958 * caller on the appropriate wait queue. 959 */ 960 if (cpu == RING_BUFFER_ALL_CPUS) { 961 rbwork = &buffer->irq_work; 962 /* Full only makes sense on per cpu reads */ 963 full = 0; 964 } else { 965 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 966 return -ENODEV; 967 cpu_buffer = buffer->buffers[cpu]; 968 rbwork = &cpu_buffer->irq_work; 969 } 970 971 if (full) 972 waitq = &rbwork->full_waiters; 973 else 974 waitq = &rbwork->waiters; 975 976 /* Set up to exit loop as soon as it is woken */ 977 if (!cond) { 978 cond = rb_wait_once; 979 rdata.irq_work = rbwork; 980 rdata.seq = atomic_read_acquire(&rbwork->seq); 981 data = &rdata; 982 } 983 984 ret = wait_event_interruptible((*waitq), 985 rb_wait_cond(rbwork, buffer, cpu, full, cond, data)); 986 987 return ret; 988 } 989 990 /** 991 * ring_buffer_poll_wait - poll on buffer input 992 * @buffer: buffer to wait on 993 * @cpu: the cpu buffer to wait on 994 * @filp: the file descriptor 995 * @poll_table: The poll descriptor 996 * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS 997 * 998 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon 999 * as data is added to any of the @buffer's cpu buffers. Otherwise 1000 * it will wait for data to be added to a specific cpu buffer. 1001 * 1002 * Returns EPOLLIN | EPOLLRDNORM if data exists in the buffers, 1003 * zero otherwise. 1004 */ 1005 __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu, 1006 struct file *filp, poll_table *poll_table, int full) 1007 { 1008 struct ring_buffer_per_cpu *cpu_buffer; 1009 struct rb_irq_work *rbwork; 1010 1011 if (cpu == RING_BUFFER_ALL_CPUS) { 1012 rbwork = &buffer->irq_work; 1013 full = 0; 1014 } else { 1015 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 1016 return EPOLLERR; 1017 1018 cpu_buffer = buffer->buffers[cpu]; 1019 rbwork = &cpu_buffer->irq_work; 1020 } 1021 1022 if (full) { 1023 poll_wait(filp, &rbwork->full_waiters, poll_table); 1024 1025 if (rb_watermark_hit(buffer, cpu, full)) 1026 return EPOLLIN | EPOLLRDNORM; 1027 /* 1028 * Only allow full_waiters_pending update to be seen after 1029 * the shortest_full is set (in rb_watermark_hit). If the 1030 * writer sees the full_waiters_pending flag set, it will 1031 * compare the amount in the ring buffer to shortest_full. 1032 * If the amount in the ring buffer is greater than the 1033 * shortest_full percent, it will call the irq_work handler 1034 * to wake up this list. The irq_handler will reset shortest_full 1035 * back to zero. That's done under the reader_lock, but 1036 * the below smp_mb() makes sure that the update to 1037 * full_waiters_pending doesn't leak up into the above. 1038 */ 1039 smp_mb(); 1040 rbwork->full_waiters_pending = true; 1041 return 0; 1042 } 1043 1044 poll_wait(filp, &rbwork->waiters, poll_table); 1045 rbwork->waiters_pending = true; 1046 1047 /* 1048 * There's a tight race between setting the waiters_pending and 1049 * checking if the ring buffer is empty. Once the waiters_pending bit 1050 * is set, the next event will wake the task up, but we can get stuck 1051 * if there's only a single event in. 1052 * 1053 * FIXME: Ideally, we need a memory barrier on the writer side as well, 1054 * but adding a memory barrier to all events will cause too much of a 1055 * performance hit in the fast path. We only need a memory barrier when 1056 * the buffer goes from empty to having content. But as this race is 1057 * extremely small, and it's not a problem if another event comes in, we 1058 * will fix it later. 1059 */ 1060 smp_mb(); 1061 1062 if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) || 1063 (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu))) 1064 return EPOLLIN | EPOLLRDNORM; 1065 return 0; 1066 } 1067 1068 /* buffer may be either ring_buffer or ring_buffer_per_cpu */ 1069 #define RB_WARN_ON(b, cond) \ 1070 ({ \ 1071 int _____ret = unlikely(cond); \ 1072 if (_____ret) { \ 1073 if (__same_type(*(b), struct ring_buffer_per_cpu)) { \ 1074 struct ring_buffer_per_cpu *__b = \ 1075 (void *)b; \ 1076 atomic_inc(&__b->buffer->record_disabled); \ 1077 } else \ 1078 atomic_inc(&b->record_disabled); \ 1079 WARN_ON(1); \ 1080 } \ 1081 _____ret; \ 1082 }) 1083 1084 /* Up this if you want to test the TIME_EXTENTS and normalization */ 1085 #define DEBUG_SHIFT 0 1086 1087 static inline u64 rb_time_stamp(struct trace_buffer *buffer) 1088 { 1089 u64 ts; 1090 1091 /* Skip retpolines :-( */ 1092 if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) && likely(buffer->clock == trace_clock_local)) 1093 ts = trace_clock_local(); 1094 else 1095 ts = buffer->clock(); 1096 1097 /* shift to debug/test normalization and TIME_EXTENTS */ 1098 return ts << DEBUG_SHIFT; 1099 } 1100 1101 u64 ring_buffer_time_stamp(struct trace_buffer *buffer) 1102 { 1103 u64 time; 1104 1105 preempt_disable_notrace(); 1106 time = rb_time_stamp(buffer); 1107 preempt_enable_notrace(); 1108 1109 return time; 1110 } 1111 EXPORT_SYMBOL_GPL(ring_buffer_time_stamp); 1112 1113 void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer, 1114 int cpu, u64 *ts) 1115 { 1116 /* Just stupid testing the normalize function and deltas */ 1117 *ts >>= DEBUG_SHIFT; 1118 } 1119 EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp); 1120 1121 /* 1122 * Making the ring buffer lockless makes things tricky. 1123 * Although writes only happen on the CPU that they are on, 1124 * and they only need to worry about interrupts. Reads can 1125 * happen on any CPU. 1126 * 1127 * The reader page is always off the ring buffer, but when the 1128 * reader finishes with a page, it needs to swap its page with 1129 * a new one from the buffer. The reader needs to take from 1130 * the head (writes go to the tail). But if a writer is in overwrite 1131 * mode and wraps, it must push the head page forward. 1132 * 1133 * Here lies the problem. 1134 * 1135 * The reader must be careful to replace only the head page, and 1136 * not another one. As described at the top of the file in the 1137 * ASCII art, the reader sets its old page to point to the next 1138 * page after head. It then sets the page after head to point to 1139 * the old reader page. But if the writer moves the head page 1140 * during this operation, the reader could end up with the tail. 1141 * 1142 * We use cmpxchg to help prevent this race. We also do something 1143 * special with the page before head. We set the LSB to 1. 1144 * 1145 * When the writer must push the page forward, it will clear the 1146 * bit that points to the head page, move the head, and then set 1147 * the bit that points to the new head page. 1148 * 1149 * We also don't want an interrupt coming in and moving the head 1150 * page on another writer. Thus we use the second LSB to catch 1151 * that too. Thus: 1152 * 1153 * head->list->prev->next bit 1 bit 0 1154 * ------- ------- 1155 * Normal page 0 0 1156 * Points to head page 0 1 1157 * New head page 1 0 1158 * 1159 * Note we can not trust the prev pointer of the head page, because: 1160 * 1161 * +----+ +-----+ +-----+ 1162 * | |------>| T |---X--->| N | 1163 * | |<------| | | | 1164 * +----+ +-----+ +-----+ 1165 * ^ ^ | 1166 * | +-----+ | | 1167 * +----------| R |----------+ | 1168 * | |<-----------+ 1169 * +-----+ 1170 * 1171 * Key: ---X--> HEAD flag set in pointer 1172 * T Tail page 1173 * R Reader page 1174 * N Next page 1175 * 1176 * (see __rb_reserve_next() to see where this happens) 1177 * 1178 * What the above shows is that the reader just swapped out 1179 * the reader page with a page in the buffer, but before it 1180 * could make the new header point back to the new page added 1181 * it was preempted by a writer. The writer moved forward onto 1182 * the new page added by the reader and is about to move forward 1183 * again. 1184 * 1185 * You can see, it is legitimate for the previous pointer of 1186 * the head (or any page) not to point back to itself. But only 1187 * temporarily. 1188 */ 1189 1190 #define RB_PAGE_NORMAL 0UL 1191 #define RB_PAGE_HEAD 1UL 1192 #define RB_PAGE_UPDATE 2UL 1193 1194 1195 #define RB_FLAG_MASK 3UL 1196 1197 /* PAGE_MOVED is not part of the mask */ 1198 #define RB_PAGE_MOVED 4UL 1199 1200 /* 1201 * rb_list_head - remove any bit 1202 */ 1203 static struct list_head *rb_list_head(struct list_head *list) 1204 { 1205 unsigned long val = (unsigned long)list; 1206 1207 return (struct list_head *)(val & ~RB_FLAG_MASK); 1208 } 1209 1210 /* 1211 * rb_is_head_page - test if the given page is the head page 1212 * 1213 * Because the reader may move the head_page pointer, we can 1214 * not trust what the head page is (it may be pointing to 1215 * the reader page). But if the next page is a header page, 1216 * its flags will be non zero. 1217 */ 1218 static inline int 1219 rb_is_head_page(struct buffer_page *page, struct list_head *list) 1220 { 1221 unsigned long val; 1222 1223 val = (unsigned long)list->next; 1224 1225 if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list) 1226 return RB_PAGE_MOVED; 1227 1228 return val & RB_FLAG_MASK; 1229 } 1230 1231 /* 1232 * rb_is_reader_page 1233 * 1234 * The unique thing about the reader page, is that, if the 1235 * writer is ever on it, the previous pointer never points 1236 * back to the reader page. 1237 */ 1238 static bool rb_is_reader_page(struct buffer_page *page) 1239 { 1240 struct list_head *list = page->list.prev; 1241 1242 return rb_list_head(list->next) != &page->list; 1243 } 1244 1245 /* 1246 * rb_set_list_to_head - set a list_head to be pointing to head. 1247 */ 1248 static void rb_set_list_to_head(struct list_head *list) 1249 { 1250 unsigned long *ptr; 1251 1252 ptr = (unsigned long *)&list->next; 1253 *ptr |= RB_PAGE_HEAD; 1254 *ptr &= ~RB_PAGE_UPDATE; 1255 } 1256 1257 /* 1258 * rb_head_page_activate - sets up head page 1259 */ 1260 static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer) 1261 { 1262 struct buffer_page *head; 1263 1264 head = cpu_buffer->head_page; 1265 if (!head) 1266 return; 1267 1268 /* 1269 * Set the previous list pointer to have the HEAD flag. 1270 */ 1271 rb_set_list_to_head(head->list.prev); 1272 1273 if (cpu_buffer->ring_meta) { 1274 struct ring_buffer_meta *meta = cpu_buffer->ring_meta; 1275 meta->head_buffer = (unsigned long)head->page; 1276 } 1277 } 1278 1279 static void rb_list_head_clear(struct list_head *list) 1280 { 1281 unsigned long *ptr = (unsigned long *)&list->next; 1282 1283 *ptr &= ~RB_FLAG_MASK; 1284 } 1285 1286 /* 1287 * rb_head_page_deactivate - clears head page ptr (for free list) 1288 */ 1289 static void 1290 rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer) 1291 { 1292 struct list_head *hd; 1293 1294 /* Go through the whole list and clear any pointers found. */ 1295 rb_list_head_clear(cpu_buffer->pages); 1296 1297 list_for_each(hd, cpu_buffer->pages) 1298 rb_list_head_clear(hd); 1299 } 1300 1301 static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer, 1302 struct buffer_page *head, 1303 struct buffer_page *prev, 1304 int old_flag, int new_flag) 1305 { 1306 struct list_head *list; 1307 unsigned long val = (unsigned long)&head->list; 1308 unsigned long ret; 1309 1310 list = &prev->list; 1311 1312 val &= ~RB_FLAG_MASK; 1313 1314 ret = cmpxchg((unsigned long *)&list->next, 1315 val | old_flag, val | new_flag); 1316 1317 /* check if the reader took the page */ 1318 if ((ret & ~RB_FLAG_MASK) != val) 1319 return RB_PAGE_MOVED; 1320 1321 return ret & RB_FLAG_MASK; 1322 } 1323 1324 static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer, 1325 struct buffer_page *head, 1326 struct buffer_page *prev, 1327 int old_flag) 1328 { 1329 return rb_head_page_set(cpu_buffer, head, prev, 1330 old_flag, RB_PAGE_UPDATE); 1331 } 1332 1333 static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer, 1334 struct buffer_page *head, 1335 struct buffer_page *prev, 1336 int old_flag) 1337 { 1338 return rb_head_page_set(cpu_buffer, head, prev, 1339 old_flag, RB_PAGE_HEAD); 1340 } 1341 1342 static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer, 1343 struct buffer_page *head, 1344 struct buffer_page *prev, 1345 int old_flag) 1346 { 1347 return rb_head_page_set(cpu_buffer, head, prev, 1348 old_flag, RB_PAGE_NORMAL); 1349 } 1350 1351 static inline void rb_inc_page(struct buffer_page **bpage) 1352 { 1353 struct list_head *p = rb_list_head((*bpage)->list.next); 1354 1355 *bpage = list_entry(p, struct buffer_page, list); 1356 } 1357 1358 static struct buffer_page * 1359 rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer) 1360 { 1361 struct buffer_page *head; 1362 struct buffer_page *page; 1363 struct list_head *list; 1364 int i; 1365 1366 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page)) 1367 return NULL; 1368 1369 /* sanity check */ 1370 list = cpu_buffer->pages; 1371 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list)) 1372 return NULL; 1373 1374 page = head = cpu_buffer->head_page; 1375 /* 1376 * It is possible that the writer moves the header behind 1377 * where we started, and we miss in one loop. 1378 * A second loop should grab the header, but we'll do 1379 * three loops just because I'm paranoid. 1380 */ 1381 for (i = 0; i < 3; i++) { 1382 do { 1383 if (rb_is_head_page(page, page->list.prev)) { 1384 cpu_buffer->head_page = page; 1385 return page; 1386 } 1387 rb_inc_page(&page); 1388 } while (page != head); 1389 } 1390 1391 RB_WARN_ON(cpu_buffer, 1); 1392 1393 return NULL; 1394 } 1395 1396 static bool rb_head_page_replace(struct buffer_page *old, 1397 struct buffer_page *new) 1398 { 1399 unsigned long *ptr = (unsigned long *)&old->list.prev->next; 1400 unsigned long val; 1401 1402 val = *ptr & ~RB_FLAG_MASK; 1403 val |= RB_PAGE_HEAD; 1404 1405 return try_cmpxchg(ptr, &val, (unsigned long)&new->list); 1406 } 1407 1408 /* 1409 * rb_tail_page_update - move the tail page forward 1410 */ 1411 static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer, 1412 struct buffer_page *tail_page, 1413 struct buffer_page *next_page) 1414 { 1415 unsigned long old_entries; 1416 unsigned long old_write; 1417 1418 /* 1419 * The tail page now needs to be moved forward. 1420 * 1421 * We need to reset the tail page, but without messing 1422 * with possible erasing of data brought in by interrupts 1423 * that have moved the tail page and are currently on it. 1424 * 1425 * We add a counter to the write field to denote this. 1426 */ 1427 old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write); 1428 old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries); 1429 1430 /* 1431 * Just make sure we have seen our old_write and synchronize 1432 * with any interrupts that come in. 1433 */ 1434 barrier(); 1435 1436 /* 1437 * If the tail page is still the same as what we think 1438 * it is, then it is up to us to update the tail 1439 * pointer. 1440 */ 1441 if (tail_page == READ_ONCE(cpu_buffer->tail_page)) { 1442 /* Zero the write counter */ 1443 unsigned long val = old_write & ~RB_WRITE_MASK; 1444 unsigned long eval = old_entries & ~RB_WRITE_MASK; 1445 1446 /* 1447 * This will only succeed if an interrupt did 1448 * not come in and change it. In which case, we 1449 * do not want to modify it. 1450 * 1451 * We add (void) to let the compiler know that we do not care 1452 * about the return value of these functions. We use the 1453 * cmpxchg to only update if an interrupt did not already 1454 * do it for us. If the cmpxchg fails, we don't care. 1455 */ 1456 (void)local_cmpxchg(&next_page->write, old_write, val); 1457 (void)local_cmpxchg(&next_page->entries, old_entries, eval); 1458 1459 /* 1460 * No need to worry about races with clearing out the commit. 1461 * it only can increment when a commit takes place. But that 1462 * only happens in the outer most nested commit. 1463 */ 1464 local_set(&next_page->page->commit, 0); 1465 1466 /* Either we update tail_page or an interrupt does */ 1467 if (try_cmpxchg(&cpu_buffer->tail_page, &tail_page, next_page)) 1468 local_inc(&cpu_buffer->pages_touched); 1469 } 1470 } 1471 1472 static void rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer, 1473 struct buffer_page *bpage) 1474 { 1475 unsigned long val = (unsigned long)bpage; 1476 1477 RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK); 1478 } 1479 1480 static bool rb_check_links(struct ring_buffer_per_cpu *cpu_buffer, 1481 struct list_head *list) 1482 { 1483 if (RB_WARN_ON(cpu_buffer, 1484 rb_list_head(rb_list_head(list->next)->prev) != list)) 1485 return false; 1486 1487 if (RB_WARN_ON(cpu_buffer, 1488 rb_list_head(rb_list_head(list->prev)->next) != list)) 1489 return false; 1490 1491 return true; 1492 } 1493 1494 /** 1495 * rb_check_pages - integrity check of buffer pages 1496 * @cpu_buffer: CPU buffer with pages to test 1497 * 1498 * As a safety measure we check to make sure the data pages have not 1499 * been corrupted. 1500 */ 1501 static void rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) 1502 { 1503 struct list_head *head, *tmp; 1504 unsigned long buffer_cnt; 1505 unsigned long flags; 1506 int nr_loops = 0; 1507 1508 /* 1509 * Walk the linked list underpinning the ring buffer and validate all 1510 * its next and prev links. 1511 * 1512 * The check acquires the reader_lock to avoid concurrent processing 1513 * with code that could be modifying the list. However, the lock cannot 1514 * be held for the entire duration of the walk, as this would make the 1515 * time when interrupts are disabled non-deterministic, dependent on the 1516 * ring buffer size. Therefore, the code releases and re-acquires the 1517 * lock after checking each page. The ring_buffer_per_cpu.cnt variable 1518 * is then used to detect if the list was modified while the lock was 1519 * not held, in which case the check needs to be restarted. 1520 * 1521 * The code attempts to perform the check at most three times before 1522 * giving up. This is acceptable because this is only a self-validation 1523 * to detect problems early on. In practice, the list modification 1524 * operations are fairly spaced, and so this check typically succeeds at 1525 * most on the second try. 1526 */ 1527 again: 1528 if (++nr_loops > 3) 1529 return; 1530 1531 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 1532 head = rb_list_head(cpu_buffer->pages); 1533 if (!rb_check_links(cpu_buffer, head)) 1534 goto out_locked; 1535 buffer_cnt = cpu_buffer->cnt; 1536 tmp = head; 1537 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 1538 1539 while (true) { 1540 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 1541 1542 if (buffer_cnt != cpu_buffer->cnt) { 1543 /* The list was updated, try again. */ 1544 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 1545 goto again; 1546 } 1547 1548 tmp = rb_list_head(tmp->next); 1549 if (tmp == head) 1550 /* The iteration circled back, all is done. */ 1551 goto out_locked; 1552 1553 if (!rb_check_links(cpu_buffer, tmp)) 1554 goto out_locked; 1555 1556 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 1557 } 1558 1559 out_locked: 1560 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 1561 } 1562 1563 /* 1564 * Take an address, add the meta data size as well as the array of 1565 * array subbuffer indexes, then align it to a subbuffer size. 1566 * 1567 * This is used to help find the next per cpu subbuffer within a mapped range. 1568 */ 1569 static unsigned long 1570 rb_range_align_subbuf(unsigned long addr, int subbuf_size, int nr_subbufs) 1571 { 1572 addr += sizeof(struct ring_buffer_meta) + 1573 sizeof(int) * nr_subbufs; 1574 return ALIGN(addr, subbuf_size); 1575 } 1576 1577 /* 1578 * Return the ring_buffer_meta for a given @cpu. 1579 */ 1580 static void *rb_range_meta(struct trace_buffer *buffer, int nr_pages, int cpu) 1581 { 1582 int subbuf_size = buffer->subbuf_size + BUF_PAGE_HDR_SIZE; 1583 unsigned long ptr = buffer->range_addr_start; 1584 struct ring_buffer_meta *meta; 1585 int nr_subbufs; 1586 1587 if (!ptr) 1588 return NULL; 1589 1590 /* When nr_pages passed in is zero, the first meta has already been initialized */ 1591 if (!nr_pages) { 1592 meta = (struct ring_buffer_meta *)ptr; 1593 nr_subbufs = meta->nr_subbufs; 1594 } else { 1595 meta = NULL; 1596 /* Include the reader page */ 1597 nr_subbufs = nr_pages + 1; 1598 } 1599 1600 /* 1601 * The first chunk may not be subbuffer aligned, where as 1602 * the rest of the chunks are. 1603 */ 1604 if (cpu) { 1605 ptr = rb_range_align_subbuf(ptr, subbuf_size, nr_subbufs); 1606 ptr += subbuf_size * nr_subbufs; 1607 1608 /* We can use multiplication to find chunks greater than 1 */ 1609 if (cpu > 1) { 1610 unsigned long size; 1611 unsigned long p; 1612 1613 /* Save the beginning of this CPU chunk */ 1614 p = ptr; 1615 ptr = rb_range_align_subbuf(ptr, subbuf_size, nr_subbufs); 1616 ptr += subbuf_size * nr_subbufs; 1617 1618 /* Now all chunks after this are the same size */ 1619 size = ptr - p; 1620 ptr += size * (cpu - 2); 1621 } 1622 } 1623 return (void *)ptr; 1624 } 1625 1626 /* Return the start of subbufs given the meta pointer */ 1627 static void *rb_subbufs_from_meta(struct ring_buffer_meta *meta) 1628 { 1629 int subbuf_size = meta->subbuf_size; 1630 unsigned long ptr; 1631 1632 ptr = (unsigned long)meta; 1633 ptr = rb_range_align_subbuf(ptr, subbuf_size, meta->nr_subbufs); 1634 1635 return (void *)ptr; 1636 } 1637 1638 /* 1639 * Return a specific sub-buffer for a given @cpu defined by @idx. 1640 */ 1641 static void *rb_range_buffer(struct ring_buffer_per_cpu *cpu_buffer, int idx) 1642 { 1643 struct ring_buffer_meta *meta; 1644 unsigned long ptr; 1645 int subbuf_size; 1646 1647 meta = rb_range_meta(cpu_buffer->buffer, 0, cpu_buffer->cpu); 1648 if (!meta) 1649 return NULL; 1650 1651 if (WARN_ON_ONCE(idx >= meta->nr_subbufs)) 1652 return NULL; 1653 1654 subbuf_size = meta->subbuf_size; 1655 1656 /* Map this buffer to the order that's in meta->buffers[] */ 1657 idx = meta->buffers[idx]; 1658 1659 ptr = (unsigned long)rb_subbufs_from_meta(meta); 1660 1661 ptr += subbuf_size * idx; 1662 if (ptr + subbuf_size > cpu_buffer->buffer->range_addr_end) 1663 return NULL; 1664 1665 return (void *)ptr; 1666 } 1667 1668 /* 1669 * See if the existing memory contains valid ring buffer data. 1670 * As the previous kernel must be the same as this kernel, all 1671 * the calculations (size of buffers and number of buffers) 1672 * must be the same. 1673 */ 1674 static bool rb_meta_valid(struct ring_buffer_meta *meta, int cpu, 1675 struct trace_buffer *buffer, int nr_pages) 1676 { 1677 int subbuf_size = PAGE_SIZE; 1678 struct buffer_data_page *subbuf; 1679 unsigned long buffers_start; 1680 unsigned long buffers_end; 1681 int i; 1682 1683 /* Check the meta magic and meta struct size */ 1684 if (meta->magic != RING_BUFFER_META_MAGIC || 1685 meta->struct_size != sizeof(*meta)) { 1686 pr_info("Ring buffer boot meta[%d] mismatch of magic or struct size\n", cpu); 1687 return false; 1688 } 1689 1690 /* The subbuffer's size and number of subbuffers must match */ 1691 if (meta->subbuf_size != subbuf_size || 1692 meta->nr_subbufs != nr_pages + 1) { 1693 pr_info("Ring buffer boot meta [%d] mismatch of subbuf_size/nr_pages\n", cpu); 1694 return false; 1695 } 1696 1697 buffers_start = meta->first_buffer; 1698 buffers_end = meta->first_buffer + (subbuf_size * meta->nr_subbufs); 1699 1700 /* Is the head and commit buffers within the range of buffers? */ 1701 if (meta->head_buffer < buffers_start || 1702 meta->head_buffer >= buffers_end) { 1703 pr_info("Ring buffer boot meta [%d] head buffer out of range\n", cpu); 1704 return false; 1705 } 1706 1707 if (meta->commit_buffer < buffers_start || 1708 meta->commit_buffer >= buffers_end) { 1709 pr_info("Ring buffer boot meta [%d] commit buffer out of range\n", cpu); 1710 return false; 1711 } 1712 1713 subbuf = rb_subbufs_from_meta(meta); 1714 1715 /* Is the meta buffers and the subbufs themselves have correct data? */ 1716 for (i = 0; i < meta->nr_subbufs; i++) { 1717 if (meta->buffers[i] < 0 || 1718 meta->buffers[i] >= meta->nr_subbufs) { 1719 pr_info("Ring buffer boot meta [%d] array out of range\n", cpu); 1720 return false; 1721 } 1722 1723 if ((unsigned)local_read(&subbuf->commit) > subbuf_size) { 1724 pr_info("Ring buffer boot meta [%d] buffer invalid commit\n", cpu); 1725 return false; 1726 } 1727 1728 subbuf = (void *)subbuf + subbuf_size; 1729 } 1730 1731 return true; 1732 } 1733 1734 static int rb_meta_subbuf_idx(struct ring_buffer_meta *meta, void *subbuf); 1735 1736 static int rb_read_data_buffer(struct buffer_data_page *dpage, int tail, int cpu, 1737 unsigned long long *timestamp, u64 *delta_ptr) 1738 { 1739 struct ring_buffer_event *event; 1740 u64 ts, delta; 1741 int events = 0; 1742 int e; 1743 1744 *delta_ptr = 0; 1745 *timestamp = 0; 1746 1747 ts = dpage->time_stamp; 1748 1749 for (e = 0; e < tail; e += rb_event_length(event)) { 1750 1751 event = (struct ring_buffer_event *)(dpage->data + e); 1752 1753 switch (event->type_len) { 1754 1755 case RINGBUF_TYPE_TIME_EXTEND: 1756 delta = rb_event_time_stamp(event); 1757 ts += delta; 1758 break; 1759 1760 case RINGBUF_TYPE_TIME_STAMP: 1761 delta = rb_event_time_stamp(event); 1762 delta = rb_fix_abs_ts(delta, ts); 1763 if (delta < ts) { 1764 *delta_ptr = delta; 1765 *timestamp = ts; 1766 return -1; 1767 } 1768 ts = delta; 1769 break; 1770 1771 case RINGBUF_TYPE_PADDING: 1772 if (event->time_delta == 1) 1773 break; 1774 fallthrough; 1775 case RINGBUF_TYPE_DATA: 1776 events++; 1777 ts += event->time_delta; 1778 break; 1779 1780 default: 1781 return -1; 1782 } 1783 } 1784 *timestamp = ts; 1785 return events; 1786 } 1787 1788 static int rb_validate_buffer(struct buffer_data_page *dpage, int cpu) 1789 { 1790 unsigned long long ts; 1791 u64 delta; 1792 int tail; 1793 1794 tail = local_read(&dpage->commit); 1795 return rb_read_data_buffer(dpage, tail, cpu, &ts, &delta); 1796 } 1797 1798 /* If the meta data has been validated, now validate the events */ 1799 static void rb_meta_validate_events(struct ring_buffer_per_cpu *cpu_buffer) 1800 { 1801 struct ring_buffer_meta *meta = cpu_buffer->ring_meta; 1802 struct buffer_page *head_page; 1803 unsigned long entry_bytes = 0; 1804 unsigned long entries = 0; 1805 int ret; 1806 int i; 1807 1808 if (!meta || !meta->head_buffer) 1809 return; 1810 1811 /* Do the reader page first */ 1812 ret = rb_validate_buffer(cpu_buffer->reader_page->page, cpu_buffer->cpu); 1813 if (ret < 0) { 1814 pr_info("Ring buffer reader page is invalid\n"); 1815 goto invalid; 1816 } 1817 entries += ret; 1818 entry_bytes += local_read(&cpu_buffer->reader_page->page->commit); 1819 local_set(&cpu_buffer->reader_page->entries, ret); 1820 1821 head_page = cpu_buffer->head_page; 1822 1823 /* If both the head and commit are on the reader_page then we are done. */ 1824 if (head_page == cpu_buffer->reader_page && 1825 head_page == cpu_buffer->commit_page) 1826 goto done; 1827 1828 /* Iterate until finding the commit page */ 1829 for (i = 0; i < meta->nr_subbufs + 1; i++, rb_inc_page(&head_page)) { 1830 1831 /* Reader page has already been done */ 1832 if (head_page == cpu_buffer->reader_page) 1833 continue; 1834 1835 ret = rb_validate_buffer(head_page->page, cpu_buffer->cpu); 1836 if (ret < 0) { 1837 pr_info("Ring buffer meta [%d] invalid buffer page\n", 1838 cpu_buffer->cpu); 1839 goto invalid; 1840 } 1841 entries += ret; 1842 entry_bytes += local_read(&head_page->page->commit); 1843 local_set(&cpu_buffer->head_page->entries, ret); 1844 1845 if (head_page == cpu_buffer->commit_page) 1846 break; 1847 } 1848 1849 if (head_page != cpu_buffer->commit_page) { 1850 pr_info("Ring buffer meta [%d] commit page not found\n", 1851 cpu_buffer->cpu); 1852 goto invalid; 1853 } 1854 done: 1855 local_set(&cpu_buffer->entries, entries); 1856 local_set(&cpu_buffer->entries_bytes, entry_bytes); 1857 1858 pr_info("Ring buffer meta [%d] is from previous boot!\n", cpu_buffer->cpu); 1859 return; 1860 1861 invalid: 1862 /* The content of the buffers are invalid, reset the meta data */ 1863 meta->head_buffer = 0; 1864 meta->commit_buffer = 0; 1865 1866 /* Reset the reader page */ 1867 local_set(&cpu_buffer->reader_page->entries, 0); 1868 local_set(&cpu_buffer->reader_page->page->commit, 0); 1869 1870 /* Reset all the subbuffers */ 1871 for (i = 0; i < meta->nr_subbufs - 1; i++, rb_inc_page(&head_page)) { 1872 local_set(&head_page->entries, 0); 1873 local_set(&head_page->page->commit, 0); 1874 } 1875 } 1876 1877 /* Used to calculate data delta */ 1878 static char rb_data_ptr[] = ""; 1879 1880 #define THIS_TEXT_PTR ((unsigned long)rb_meta_init_text_addr) 1881 #define THIS_DATA_PTR ((unsigned long)rb_data_ptr) 1882 1883 static void rb_meta_init_text_addr(struct ring_buffer_meta *meta) 1884 { 1885 meta->text_addr = THIS_TEXT_PTR; 1886 meta->data_addr = THIS_DATA_PTR; 1887 } 1888 1889 static void rb_range_meta_init(struct trace_buffer *buffer, int nr_pages) 1890 { 1891 struct ring_buffer_meta *meta; 1892 unsigned long delta; 1893 void *subbuf; 1894 int cpu; 1895 int i; 1896 1897 for (cpu = 0; cpu < nr_cpu_ids; cpu++) { 1898 void *next_meta; 1899 1900 meta = rb_range_meta(buffer, nr_pages, cpu); 1901 1902 if (rb_meta_valid(meta, cpu, buffer, nr_pages)) { 1903 /* Make the mappings match the current address */ 1904 subbuf = rb_subbufs_from_meta(meta); 1905 delta = (unsigned long)subbuf - meta->first_buffer; 1906 meta->first_buffer += delta; 1907 meta->head_buffer += delta; 1908 meta->commit_buffer += delta; 1909 buffer->last_text_delta = THIS_TEXT_PTR - meta->text_addr; 1910 buffer->last_data_delta = THIS_DATA_PTR - meta->data_addr; 1911 continue; 1912 } 1913 1914 if (cpu < nr_cpu_ids - 1) 1915 next_meta = rb_range_meta(buffer, nr_pages, cpu + 1); 1916 else 1917 next_meta = (void *)buffer->range_addr_end; 1918 1919 memset(meta, 0, next_meta - (void *)meta); 1920 1921 meta->magic = RING_BUFFER_META_MAGIC; 1922 meta->struct_size = sizeof(*meta); 1923 1924 meta->nr_subbufs = nr_pages + 1; 1925 meta->subbuf_size = PAGE_SIZE; 1926 1927 subbuf = rb_subbufs_from_meta(meta); 1928 1929 meta->first_buffer = (unsigned long)subbuf; 1930 rb_meta_init_text_addr(meta); 1931 1932 /* 1933 * The buffers[] array holds the order of the sub-buffers 1934 * that are after the meta data. The sub-buffers may 1935 * be swapped out when read and inserted into a different 1936 * location of the ring buffer. Although their addresses 1937 * remain the same, the buffers[] array contains the 1938 * index into the sub-buffers holding their actual order. 1939 */ 1940 for (i = 0; i < meta->nr_subbufs; i++) { 1941 meta->buffers[i] = i; 1942 rb_init_page(subbuf); 1943 subbuf += meta->subbuf_size; 1944 } 1945 } 1946 } 1947 1948 static void *rbm_start(struct seq_file *m, loff_t *pos) 1949 { 1950 struct ring_buffer_per_cpu *cpu_buffer = m->private; 1951 struct ring_buffer_meta *meta = cpu_buffer->ring_meta; 1952 unsigned long val; 1953 1954 if (!meta) 1955 return NULL; 1956 1957 if (*pos > meta->nr_subbufs) 1958 return NULL; 1959 1960 val = *pos; 1961 val++; 1962 1963 return (void *)val; 1964 } 1965 1966 static void *rbm_next(struct seq_file *m, void *v, loff_t *pos) 1967 { 1968 (*pos)++; 1969 1970 return rbm_start(m, pos); 1971 } 1972 1973 static int rbm_show(struct seq_file *m, void *v) 1974 { 1975 struct ring_buffer_per_cpu *cpu_buffer = m->private; 1976 struct ring_buffer_meta *meta = cpu_buffer->ring_meta; 1977 unsigned long val = (unsigned long)v; 1978 1979 if (val == 1) { 1980 seq_printf(m, "head_buffer: %d\n", 1981 rb_meta_subbuf_idx(meta, (void *)meta->head_buffer)); 1982 seq_printf(m, "commit_buffer: %d\n", 1983 rb_meta_subbuf_idx(meta, (void *)meta->commit_buffer)); 1984 seq_printf(m, "subbuf_size: %d\n", meta->subbuf_size); 1985 seq_printf(m, "nr_subbufs: %d\n", meta->nr_subbufs); 1986 return 0; 1987 } 1988 1989 val -= 2; 1990 seq_printf(m, "buffer[%ld]: %d\n", val, meta->buffers[val]); 1991 1992 return 0; 1993 } 1994 1995 static void rbm_stop(struct seq_file *m, void *p) 1996 { 1997 } 1998 1999 static const struct seq_operations rb_meta_seq_ops = { 2000 .start = rbm_start, 2001 .next = rbm_next, 2002 .show = rbm_show, 2003 .stop = rbm_stop, 2004 }; 2005 2006 int ring_buffer_meta_seq_init(struct file *file, struct trace_buffer *buffer, int cpu) 2007 { 2008 struct seq_file *m; 2009 int ret; 2010 2011 ret = seq_open(file, &rb_meta_seq_ops); 2012 if (ret) 2013 return ret; 2014 2015 m = file->private_data; 2016 m->private = buffer->buffers[cpu]; 2017 2018 return 0; 2019 } 2020 2021 /* Map the buffer_pages to the previous head and commit pages */ 2022 static void rb_meta_buffer_update(struct ring_buffer_per_cpu *cpu_buffer, 2023 struct buffer_page *bpage) 2024 { 2025 struct ring_buffer_meta *meta = cpu_buffer->ring_meta; 2026 2027 if (meta->head_buffer == (unsigned long)bpage->page) 2028 cpu_buffer->head_page = bpage; 2029 2030 if (meta->commit_buffer == (unsigned long)bpage->page) { 2031 cpu_buffer->commit_page = bpage; 2032 cpu_buffer->tail_page = bpage; 2033 } 2034 } 2035 2036 static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, 2037 long nr_pages, struct list_head *pages) 2038 { 2039 struct trace_buffer *buffer = cpu_buffer->buffer; 2040 struct ring_buffer_meta *meta = NULL; 2041 struct buffer_page *bpage, *tmp; 2042 bool user_thread = current->mm != NULL; 2043 gfp_t mflags; 2044 long i; 2045 2046 /* 2047 * Check if the available memory is there first. 2048 * Note, si_mem_available() only gives us a rough estimate of available 2049 * memory. It may not be accurate. But we don't care, we just want 2050 * to prevent doing any allocation when it is obvious that it is 2051 * not going to succeed. 2052 */ 2053 i = si_mem_available(); 2054 if (i < nr_pages) 2055 return -ENOMEM; 2056 2057 /* 2058 * __GFP_RETRY_MAYFAIL flag makes sure that the allocation fails 2059 * gracefully without invoking oom-killer and the system is not 2060 * destabilized. 2061 */ 2062 mflags = GFP_KERNEL | __GFP_RETRY_MAYFAIL; 2063 2064 /* 2065 * If a user thread allocates too much, and si_mem_available() 2066 * reports there's enough memory, even though there is not. 2067 * Make sure the OOM killer kills this thread. This can happen 2068 * even with RETRY_MAYFAIL because another task may be doing 2069 * an allocation after this task has taken all memory. 2070 * This is the task the OOM killer needs to take out during this 2071 * loop, even if it was triggered by an allocation somewhere else. 2072 */ 2073 if (user_thread) 2074 set_current_oom_origin(); 2075 2076 if (buffer->range_addr_start) 2077 meta = rb_range_meta(buffer, nr_pages, cpu_buffer->cpu); 2078 2079 for (i = 0; i < nr_pages; i++) { 2080 struct page *page; 2081 2082 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), 2083 mflags, cpu_to_node(cpu_buffer->cpu)); 2084 if (!bpage) 2085 goto free_pages; 2086 2087 rb_check_bpage(cpu_buffer, bpage); 2088 2089 /* 2090 * Append the pages as for mapped buffers we want to keep 2091 * the order 2092 */ 2093 list_add_tail(&bpage->list, pages); 2094 2095 if (meta) { 2096 /* A range was given. Use that for the buffer page */ 2097 bpage->page = rb_range_buffer(cpu_buffer, i + 1); 2098 if (!bpage->page) 2099 goto free_pages; 2100 /* If this is valid from a previous boot */ 2101 if (meta->head_buffer) 2102 rb_meta_buffer_update(cpu_buffer, bpage); 2103 bpage->range = 1; 2104 bpage->id = i + 1; 2105 } else { 2106 page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu), 2107 mflags | __GFP_COMP | __GFP_ZERO, 2108 cpu_buffer->buffer->subbuf_order); 2109 if (!page) 2110 goto free_pages; 2111 bpage->page = page_address(page); 2112 rb_init_page(bpage->page); 2113 } 2114 bpage->order = cpu_buffer->buffer->subbuf_order; 2115 2116 if (user_thread && fatal_signal_pending(current)) 2117 goto free_pages; 2118 } 2119 if (user_thread) 2120 clear_current_oom_origin(); 2121 2122 return 0; 2123 2124 free_pages: 2125 list_for_each_entry_safe(bpage, tmp, pages, list) { 2126 list_del_init(&bpage->list); 2127 free_buffer_page(bpage); 2128 } 2129 if (user_thread) 2130 clear_current_oom_origin(); 2131 2132 return -ENOMEM; 2133 } 2134 2135 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, 2136 unsigned long nr_pages) 2137 { 2138 LIST_HEAD(pages); 2139 2140 WARN_ON(!nr_pages); 2141 2142 if (__rb_allocate_pages(cpu_buffer, nr_pages, &pages)) 2143 return -ENOMEM; 2144 2145 /* 2146 * The ring buffer page list is a circular list that does not 2147 * start and end with a list head. All page list items point to 2148 * other pages. 2149 */ 2150 cpu_buffer->pages = pages.next; 2151 list_del(&pages); 2152 2153 cpu_buffer->nr_pages = nr_pages; 2154 2155 rb_check_pages(cpu_buffer); 2156 2157 return 0; 2158 } 2159 2160 static struct ring_buffer_per_cpu * 2161 rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu) 2162 { 2163 struct ring_buffer_per_cpu *cpu_buffer; 2164 struct ring_buffer_meta *meta; 2165 struct buffer_page *bpage; 2166 struct page *page; 2167 int ret; 2168 2169 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()), 2170 GFP_KERNEL, cpu_to_node(cpu)); 2171 if (!cpu_buffer) 2172 return NULL; 2173 2174 cpu_buffer->cpu = cpu; 2175 cpu_buffer->buffer = buffer; 2176 raw_spin_lock_init(&cpu_buffer->reader_lock); 2177 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); 2178 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 2179 INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler); 2180 init_completion(&cpu_buffer->update_done); 2181 init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters); 2182 init_waitqueue_head(&cpu_buffer->irq_work.waiters); 2183 init_waitqueue_head(&cpu_buffer->irq_work.full_waiters); 2184 mutex_init(&cpu_buffer->mapping_lock); 2185 2186 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), 2187 GFP_KERNEL, cpu_to_node(cpu)); 2188 if (!bpage) 2189 goto fail_free_buffer; 2190 2191 rb_check_bpage(cpu_buffer, bpage); 2192 2193 cpu_buffer->reader_page = bpage; 2194 2195 if (buffer->range_addr_start) { 2196 /* 2197 * Range mapped buffers have the same restrictions as memory 2198 * mapped ones do. 2199 */ 2200 cpu_buffer->mapped = 1; 2201 cpu_buffer->ring_meta = rb_range_meta(buffer, nr_pages, cpu); 2202 bpage->page = rb_range_buffer(cpu_buffer, 0); 2203 if (!bpage->page) 2204 goto fail_free_reader; 2205 if (cpu_buffer->ring_meta->head_buffer) 2206 rb_meta_buffer_update(cpu_buffer, bpage); 2207 bpage->range = 1; 2208 } else { 2209 page = alloc_pages_node(cpu_to_node(cpu), 2210 GFP_KERNEL | __GFP_COMP | __GFP_ZERO, 2211 cpu_buffer->buffer->subbuf_order); 2212 if (!page) 2213 goto fail_free_reader; 2214 bpage->page = page_address(page); 2215 rb_init_page(bpage->page); 2216 } 2217 2218 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); 2219 INIT_LIST_HEAD(&cpu_buffer->new_pages); 2220 2221 ret = rb_allocate_pages(cpu_buffer, nr_pages); 2222 if (ret < 0) 2223 goto fail_free_reader; 2224 2225 rb_meta_validate_events(cpu_buffer); 2226 2227 /* If the boot meta was valid then this has already been updated */ 2228 meta = cpu_buffer->ring_meta; 2229 if (!meta || !meta->head_buffer || 2230 !cpu_buffer->head_page || !cpu_buffer->commit_page || !cpu_buffer->tail_page) { 2231 if (meta && meta->head_buffer && 2232 (cpu_buffer->head_page || cpu_buffer->commit_page || cpu_buffer->tail_page)) { 2233 pr_warn("Ring buffer meta buffers not all mapped\n"); 2234 if (!cpu_buffer->head_page) 2235 pr_warn(" Missing head_page\n"); 2236 if (!cpu_buffer->commit_page) 2237 pr_warn(" Missing commit_page\n"); 2238 if (!cpu_buffer->tail_page) 2239 pr_warn(" Missing tail_page\n"); 2240 } 2241 2242 cpu_buffer->head_page 2243 = list_entry(cpu_buffer->pages, struct buffer_page, list); 2244 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page; 2245 2246 rb_head_page_activate(cpu_buffer); 2247 2248 if (cpu_buffer->ring_meta) 2249 meta->commit_buffer = meta->head_buffer; 2250 } else { 2251 /* The valid meta buffer still needs to activate the head page */ 2252 rb_head_page_activate(cpu_buffer); 2253 } 2254 2255 return cpu_buffer; 2256 2257 fail_free_reader: 2258 free_buffer_page(cpu_buffer->reader_page); 2259 2260 fail_free_buffer: 2261 kfree(cpu_buffer); 2262 return NULL; 2263 } 2264 2265 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) 2266 { 2267 struct list_head *head = cpu_buffer->pages; 2268 struct buffer_page *bpage, *tmp; 2269 2270 irq_work_sync(&cpu_buffer->irq_work.work); 2271 2272 free_buffer_page(cpu_buffer->reader_page); 2273 2274 if (head) { 2275 rb_head_page_deactivate(cpu_buffer); 2276 2277 list_for_each_entry_safe(bpage, tmp, head, list) { 2278 list_del_init(&bpage->list); 2279 free_buffer_page(bpage); 2280 } 2281 bpage = list_entry(head, struct buffer_page, list); 2282 free_buffer_page(bpage); 2283 } 2284 2285 free_page((unsigned long)cpu_buffer->free_page); 2286 2287 kfree(cpu_buffer); 2288 } 2289 2290 static struct trace_buffer *alloc_buffer(unsigned long size, unsigned flags, 2291 int order, unsigned long start, 2292 unsigned long end, 2293 struct lock_class_key *key) 2294 { 2295 struct trace_buffer *buffer; 2296 long nr_pages; 2297 int subbuf_size; 2298 int bsize; 2299 int cpu; 2300 int ret; 2301 2302 /* keep it in its own cache line */ 2303 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()), 2304 GFP_KERNEL); 2305 if (!buffer) 2306 return NULL; 2307 2308 if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL)) 2309 goto fail_free_buffer; 2310 2311 buffer->subbuf_order = order; 2312 subbuf_size = (PAGE_SIZE << order); 2313 buffer->subbuf_size = subbuf_size - BUF_PAGE_HDR_SIZE; 2314 2315 /* Max payload is buffer page size - header (8bytes) */ 2316 buffer->max_data_size = buffer->subbuf_size - (sizeof(u32) * 2); 2317 2318 buffer->flags = flags; 2319 buffer->clock = trace_clock_local; 2320 buffer->reader_lock_key = key; 2321 2322 init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters); 2323 init_waitqueue_head(&buffer->irq_work.waiters); 2324 2325 buffer->cpus = nr_cpu_ids; 2326 2327 bsize = sizeof(void *) * nr_cpu_ids; 2328 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), 2329 GFP_KERNEL); 2330 if (!buffer->buffers) 2331 goto fail_free_cpumask; 2332 2333 /* If start/end are specified, then that overrides size */ 2334 if (start && end) { 2335 unsigned long ptr; 2336 int n; 2337 2338 size = end - start; 2339 size = size / nr_cpu_ids; 2340 2341 /* 2342 * The number of sub-buffers (nr_pages) is determined by the 2343 * total size allocated minus the meta data size. 2344 * Then that is divided by the number of per CPU buffers 2345 * needed, plus account for the integer array index that 2346 * will be appended to the meta data. 2347 */ 2348 nr_pages = (size - sizeof(struct ring_buffer_meta)) / 2349 (subbuf_size + sizeof(int)); 2350 /* Need at least two pages plus the reader page */ 2351 if (nr_pages < 3) 2352 goto fail_free_buffers; 2353 2354 again: 2355 /* Make sure that the size fits aligned */ 2356 for (n = 0, ptr = start; n < nr_cpu_ids; n++) { 2357 ptr += sizeof(struct ring_buffer_meta) + 2358 sizeof(int) * nr_pages; 2359 ptr = ALIGN(ptr, subbuf_size); 2360 ptr += subbuf_size * nr_pages; 2361 } 2362 if (ptr > end) { 2363 if (nr_pages <= 3) 2364 goto fail_free_buffers; 2365 nr_pages--; 2366 goto again; 2367 } 2368 2369 /* nr_pages should not count the reader page */ 2370 nr_pages--; 2371 buffer->range_addr_start = start; 2372 buffer->range_addr_end = end; 2373 2374 rb_range_meta_init(buffer, nr_pages); 2375 } else { 2376 2377 /* need at least two pages */ 2378 nr_pages = DIV_ROUND_UP(size, buffer->subbuf_size); 2379 if (nr_pages < 2) 2380 nr_pages = 2; 2381 } 2382 2383 cpu = raw_smp_processor_id(); 2384 cpumask_set_cpu(cpu, buffer->cpumask); 2385 buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu); 2386 if (!buffer->buffers[cpu]) 2387 goto fail_free_buffers; 2388 2389 ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node); 2390 if (ret < 0) 2391 goto fail_free_buffers; 2392 2393 mutex_init(&buffer->mutex); 2394 2395 return buffer; 2396 2397 fail_free_buffers: 2398 for_each_buffer_cpu(buffer, cpu) { 2399 if (buffer->buffers[cpu]) 2400 rb_free_cpu_buffer(buffer->buffers[cpu]); 2401 } 2402 kfree(buffer->buffers); 2403 2404 fail_free_cpumask: 2405 free_cpumask_var(buffer->cpumask); 2406 2407 fail_free_buffer: 2408 kfree(buffer); 2409 return NULL; 2410 } 2411 2412 /** 2413 * __ring_buffer_alloc - allocate a new ring_buffer 2414 * @size: the size in bytes per cpu that is needed. 2415 * @flags: attributes to set for the ring buffer. 2416 * @key: ring buffer reader_lock_key. 2417 * 2418 * Currently the only flag that is available is the RB_FL_OVERWRITE 2419 * flag. This flag means that the buffer will overwrite old data 2420 * when the buffer wraps. If this flag is not set, the buffer will 2421 * drop data when the tail hits the head. 2422 */ 2423 struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags, 2424 struct lock_class_key *key) 2425 { 2426 /* Default buffer page size - one system page */ 2427 return alloc_buffer(size, flags, 0, 0, 0,key); 2428 2429 } 2430 EXPORT_SYMBOL_GPL(__ring_buffer_alloc); 2431 2432 /** 2433 * __ring_buffer_alloc_range - allocate a new ring_buffer from existing memory 2434 * @size: the size in bytes per cpu that is needed. 2435 * @flags: attributes to set for the ring buffer. 2436 * @order: sub-buffer order 2437 * @start: start of allocated range 2438 * @range_size: size of allocated range 2439 * @key: ring buffer reader_lock_key. 2440 * 2441 * Currently the only flag that is available is the RB_FL_OVERWRITE 2442 * flag. This flag means that the buffer will overwrite old data 2443 * when the buffer wraps. If this flag is not set, the buffer will 2444 * drop data when the tail hits the head. 2445 */ 2446 struct trace_buffer *__ring_buffer_alloc_range(unsigned long size, unsigned flags, 2447 int order, unsigned long start, 2448 unsigned long range_size, 2449 struct lock_class_key *key) 2450 { 2451 return alloc_buffer(size, flags, order, start, start + range_size, key); 2452 } 2453 2454 /** 2455 * ring_buffer_last_boot_delta - return the delta offset from last boot 2456 * @buffer: The buffer to return the delta from 2457 * @text: Return text delta 2458 * @data: Return data delta 2459 * 2460 * Returns: The true if the delta is non zero 2461 */ 2462 bool ring_buffer_last_boot_delta(struct trace_buffer *buffer, long *text, 2463 long *data) 2464 { 2465 if (!buffer) 2466 return false; 2467 2468 if (!buffer->last_text_delta) 2469 return false; 2470 2471 *text = buffer->last_text_delta; 2472 *data = buffer->last_data_delta; 2473 2474 return true; 2475 } 2476 2477 /** 2478 * ring_buffer_free - free a ring buffer. 2479 * @buffer: the buffer to free. 2480 */ 2481 void 2482 ring_buffer_free(struct trace_buffer *buffer) 2483 { 2484 int cpu; 2485 2486 cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node); 2487 2488 irq_work_sync(&buffer->irq_work.work); 2489 2490 for_each_buffer_cpu(buffer, cpu) 2491 rb_free_cpu_buffer(buffer->buffers[cpu]); 2492 2493 kfree(buffer->buffers); 2494 free_cpumask_var(buffer->cpumask); 2495 2496 kfree(buffer); 2497 } 2498 EXPORT_SYMBOL_GPL(ring_buffer_free); 2499 2500 void ring_buffer_set_clock(struct trace_buffer *buffer, 2501 u64 (*clock)(void)) 2502 { 2503 buffer->clock = clock; 2504 } 2505 2506 void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs) 2507 { 2508 buffer->time_stamp_abs = abs; 2509 } 2510 2511 bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer) 2512 { 2513 return buffer->time_stamp_abs; 2514 } 2515 2516 static inline unsigned long rb_page_entries(struct buffer_page *bpage) 2517 { 2518 return local_read(&bpage->entries) & RB_WRITE_MASK; 2519 } 2520 2521 static inline unsigned long rb_page_write(struct buffer_page *bpage) 2522 { 2523 return local_read(&bpage->write) & RB_WRITE_MASK; 2524 } 2525 2526 static bool 2527 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages) 2528 { 2529 struct list_head *tail_page, *to_remove, *next_page; 2530 struct buffer_page *to_remove_page, *tmp_iter_page; 2531 struct buffer_page *last_page, *first_page; 2532 unsigned long nr_removed; 2533 unsigned long head_bit; 2534 int page_entries; 2535 2536 head_bit = 0; 2537 2538 raw_spin_lock_irq(&cpu_buffer->reader_lock); 2539 atomic_inc(&cpu_buffer->record_disabled); 2540 /* 2541 * We don't race with the readers since we have acquired the reader 2542 * lock. We also don't race with writers after disabling recording. 2543 * This makes it easy to figure out the first and the last page to be 2544 * removed from the list. We unlink all the pages in between including 2545 * the first and last pages. This is done in a busy loop so that we 2546 * lose the least number of traces. 2547 * The pages are freed after we restart recording and unlock readers. 2548 */ 2549 tail_page = &cpu_buffer->tail_page->list; 2550 2551 /* 2552 * tail page might be on reader page, we remove the next page 2553 * from the ring buffer 2554 */ 2555 if (cpu_buffer->tail_page == cpu_buffer->reader_page) 2556 tail_page = rb_list_head(tail_page->next); 2557 to_remove = tail_page; 2558 2559 /* start of pages to remove */ 2560 first_page = list_entry(rb_list_head(to_remove->next), 2561 struct buffer_page, list); 2562 2563 for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) { 2564 to_remove = rb_list_head(to_remove)->next; 2565 head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD; 2566 } 2567 /* Read iterators need to reset themselves when some pages removed */ 2568 cpu_buffer->pages_removed += nr_removed; 2569 2570 next_page = rb_list_head(to_remove)->next; 2571 2572 /* 2573 * Now we remove all pages between tail_page and next_page. 2574 * Make sure that we have head_bit value preserved for the 2575 * next page 2576 */ 2577 tail_page->next = (struct list_head *)((unsigned long)next_page | 2578 head_bit); 2579 next_page = rb_list_head(next_page); 2580 next_page->prev = tail_page; 2581 2582 /* make sure pages points to a valid page in the ring buffer */ 2583 cpu_buffer->pages = next_page; 2584 cpu_buffer->cnt++; 2585 2586 /* update head page */ 2587 if (head_bit) 2588 cpu_buffer->head_page = list_entry(next_page, 2589 struct buffer_page, list); 2590 2591 /* pages are removed, resume tracing and then free the pages */ 2592 atomic_dec(&cpu_buffer->record_disabled); 2593 raw_spin_unlock_irq(&cpu_buffer->reader_lock); 2594 2595 RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)); 2596 2597 /* last buffer page to remove */ 2598 last_page = list_entry(rb_list_head(to_remove), struct buffer_page, 2599 list); 2600 tmp_iter_page = first_page; 2601 2602 do { 2603 cond_resched(); 2604 2605 to_remove_page = tmp_iter_page; 2606 rb_inc_page(&tmp_iter_page); 2607 2608 /* update the counters */ 2609 page_entries = rb_page_entries(to_remove_page); 2610 if (page_entries) { 2611 /* 2612 * If something was added to this page, it was full 2613 * since it is not the tail page. So we deduct the 2614 * bytes consumed in ring buffer from here. 2615 * Increment overrun to account for the lost events. 2616 */ 2617 local_add(page_entries, &cpu_buffer->overrun); 2618 local_sub(rb_page_commit(to_remove_page), &cpu_buffer->entries_bytes); 2619 local_inc(&cpu_buffer->pages_lost); 2620 } 2621 2622 /* 2623 * We have already removed references to this list item, just 2624 * free up the buffer_page and its page 2625 */ 2626 free_buffer_page(to_remove_page); 2627 nr_removed--; 2628 2629 } while (to_remove_page != last_page); 2630 2631 RB_WARN_ON(cpu_buffer, nr_removed); 2632 2633 return nr_removed == 0; 2634 } 2635 2636 static bool 2637 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer) 2638 { 2639 struct list_head *pages = &cpu_buffer->new_pages; 2640 unsigned long flags; 2641 bool success; 2642 int retries; 2643 2644 /* Can be called at early boot up, where interrupts must not been enabled */ 2645 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2646 /* 2647 * We are holding the reader lock, so the reader page won't be swapped 2648 * in the ring buffer. Now we are racing with the writer trying to 2649 * move head page and the tail page. 2650 * We are going to adapt the reader page update process where: 2651 * 1. We first splice the start and end of list of new pages between 2652 * the head page and its previous page. 2653 * 2. We cmpxchg the prev_page->next to point from head page to the 2654 * start of new pages list. 2655 * 3. Finally, we update the head->prev to the end of new list. 2656 * 2657 * We will try this process 10 times, to make sure that we don't keep 2658 * spinning. 2659 */ 2660 retries = 10; 2661 success = false; 2662 while (retries--) { 2663 struct list_head *head_page, *prev_page; 2664 struct list_head *last_page, *first_page; 2665 struct list_head *head_page_with_bit; 2666 struct buffer_page *hpage = rb_set_head_page(cpu_buffer); 2667 2668 if (!hpage) 2669 break; 2670 head_page = &hpage->list; 2671 prev_page = head_page->prev; 2672 2673 first_page = pages->next; 2674 last_page = pages->prev; 2675 2676 head_page_with_bit = (struct list_head *) 2677 ((unsigned long)head_page | RB_PAGE_HEAD); 2678 2679 last_page->next = head_page_with_bit; 2680 first_page->prev = prev_page; 2681 2682 /* caution: head_page_with_bit gets updated on cmpxchg failure */ 2683 if (try_cmpxchg(&prev_page->next, 2684 &head_page_with_bit, first_page)) { 2685 /* 2686 * yay, we replaced the page pointer to our new list, 2687 * now, we just have to update to head page's prev 2688 * pointer to point to end of list 2689 */ 2690 head_page->prev = last_page; 2691 cpu_buffer->cnt++; 2692 success = true; 2693 break; 2694 } 2695 } 2696 2697 if (success) 2698 INIT_LIST_HEAD(pages); 2699 /* 2700 * If we weren't successful in adding in new pages, warn and stop 2701 * tracing 2702 */ 2703 RB_WARN_ON(cpu_buffer, !success); 2704 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2705 2706 /* free pages if they weren't inserted */ 2707 if (!success) { 2708 struct buffer_page *bpage, *tmp; 2709 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, 2710 list) { 2711 list_del_init(&bpage->list); 2712 free_buffer_page(bpage); 2713 } 2714 } 2715 return success; 2716 } 2717 2718 static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer) 2719 { 2720 bool success; 2721 2722 if (cpu_buffer->nr_pages_to_update > 0) 2723 success = rb_insert_pages(cpu_buffer); 2724 else 2725 success = rb_remove_pages(cpu_buffer, 2726 -cpu_buffer->nr_pages_to_update); 2727 2728 if (success) 2729 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update; 2730 } 2731 2732 static void update_pages_handler(struct work_struct *work) 2733 { 2734 struct ring_buffer_per_cpu *cpu_buffer = container_of(work, 2735 struct ring_buffer_per_cpu, update_pages_work); 2736 rb_update_pages(cpu_buffer); 2737 complete(&cpu_buffer->update_done); 2738 } 2739 2740 /** 2741 * ring_buffer_resize - resize the ring buffer 2742 * @buffer: the buffer to resize. 2743 * @size: the new size. 2744 * @cpu_id: the cpu buffer to resize 2745 * 2746 * Minimum size is 2 * buffer->subbuf_size. 2747 * 2748 * Returns 0 on success and < 0 on failure. 2749 */ 2750 int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, 2751 int cpu_id) 2752 { 2753 struct ring_buffer_per_cpu *cpu_buffer; 2754 unsigned long nr_pages; 2755 int cpu, err; 2756 2757 /* 2758 * Always succeed at resizing a non-existent buffer: 2759 */ 2760 if (!buffer) 2761 return 0; 2762 2763 /* Make sure the requested buffer exists */ 2764 if (cpu_id != RING_BUFFER_ALL_CPUS && 2765 !cpumask_test_cpu(cpu_id, buffer->cpumask)) 2766 return 0; 2767 2768 nr_pages = DIV_ROUND_UP(size, buffer->subbuf_size); 2769 2770 /* we need a minimum of two pages */ 2771 if (nr_pages < 2) 2772 nr_pages = 2; 2773 2774 /* prevent another thread from changing buffer sizes */ 2775 mutex_lock(&buffer->mutex); 2776 atomic_inc(&buffer->resizing); 2777 2778 if (cpu_id == RING_BUFFER_ALL_CPUS) { 2779 /* 2780 * Don't succeed if resizing is disabled, as a reader might be 2781 * manipulating the ring buffer and is expecting a sane state while 2782 * this is true. 2783 */ 2784 for_each_buffer_cpu(buffer, cpu) { 2785 cpu_buffer = buffer->buffers[cpu]; 2786 if (atomic_read(&cpu_buffer->resize_disabled)) { 2787 err = -EBUSY; 2788 goto out_err_unlock; 2789 } 2790 } 2791 2792 /* calculate the pages to update */ 2793 for_each_buffer_cpu(buffer, cpu) { 2794 cpu_buffer = buffer->buffers[cpu]; 2795 2796 cpu_buffer->nr_pages_to_update = nr_pages - 2797 cpu_buffer->nr_pages; 2798 /* 2799 * nothing more to do for removing pages or no update 2800 */ 2801 if (cpu_buffer->nr_pages_to_update <= 0) 2802 continue; 2803 /* 2804 * to add pages, make sure all new pages can be 2805 * allocated without receiving ENOMEM 2806 */ 2807 INIT_LIST_HEAD(&cpu_buffer->new_pages); 2808 if (__rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update, 2809 &cpu_buffer->new_pages)) { 2810 /* not enough memory for new pages */ 2811 err = -ENOMEM; 2812 goto out_err; 2813 } 2814 2815 cond_resched(); 2816 } 2817 2818 cpus_read_lock(); 2819 /* 2820 * Fire off all the required work handlers 2821 * We can't schedule on offline CPUs, but it's not necessary 2822 * since we can change their buffer sizes without any race. 2823 */ 2824 for_each_buffer_cpu(buffer, cpu) { 2825 cpu_buffer = buffer->buffers[cpu]; 2826 if (!cpu_buffer->nr_pages_to_update) 2827 continue; 2828 2829 /* Can't run something on an offline CPU. */ 2830 if (!cpu_online(cpu)) { 2831 rb_update_pages(cpu_buffer); 2832 cpu_buffer->nr_pages_to_update = 0; 2833 } else { 2834 /* Run directly if possible. */ 2835 migrate_disable(); 2836 if (cpu != smp_processor_id()) { 2837 migrate_enable(); 2838 schedule_work_on(cpu, 2839 &cpu_buffer->update_pages_work); 2840 } else { 2841 update_pages_handler(&cpu_buffer->update_pages_work); 2842 migrate_enable(); 2843 } 2844 } 2845 } 2846 2847 /* wait for all the updates to complete */ 2848 for_each_buffer_cpu(buffer, cpu) { 2849 cpu_buffer = buffer->buffers[cpu]; 2850 if (!cpu_buffer->nr_pages_to_update) 2851 continue; 2852 2853 if (cpu_online(cpu)) 2854 wait_for_completion(&cpu_buffer->update_done); 2855 cpu_buffer->nr_pages_to_update = 0; 2856 } 2857 2858 cpus_read_unlock(); 2859 } else { 2860 cpu_buffer = buffer->buffers[cpu_id]; 2861 2862 if (nr_pages == cpu_buffer->nr_pages) 2863 goto out; 2864 2865 /* 2866 * Don't succeed if resizing is disabled, as a reader might be 2867 * manipulating the ring buffer and is expecting a sane state while 2868 * this is true. 2869 */ 2870 if (atomic_read(&cpu_buffer->resize_disabled)) { 2871 err = -EBUSY; 2872 goto out_err_unlock; 2873 } 2874 2875 cpu_buffer->nr_pages_to_update = nr_pages - 2876 cpu_buffer->nr_pages; 2877 2878 INIT_LIST_HEAD(&cpu_buffer->new_pages); 2879 if (cpu_buffer->nr_pages_to_update > 0 && 2880 __rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update, 2881 &cpu_buffer->new_pages)) { 2882 err = -ENOMEM; 2883 goto out_err; 2884 } 2885 2886 cpus_read_lock(); 2887 2888 /* Can't run something on an offline CPU. */ 2889 if (!cpu_online(cpu_id)) 2890 rb_update_pages(cpu_buffer); 2891 else { 2892 /* Run directly if possible. */ 2893 migrate_disable(); 2894 if (cpu_id == smp_processor_id()) { 2895 rb_update_pages(cpu_buffer); 2896 migrate_enable(); 2897 } else { 2898 migrate_enable(); 2899 schedule_work_on(cpu_id, 2900 &cpu_buffer->update_pages_work); 2901 wait_for_completion(&cpu_buffer->update_done); 2902 } 2903 } 2904 2905 cpu_buffer->nr_pages_to_update = 0; 2906 cpus_read_unlock(); 2907 } 2908 2909 out: 2910 /* 2911 * The ring buffer resize can happen with the ring buffer 2912 * enabled, so that the update disturbs the tracing as little 2913 * as possible. But if the buffer is disabled, we do not need 2914 * to worry about that, and we can take the time to verify 2915 * that the buffer is not corrupt. 2916 */ 2917 if (atomic_read(&buffer->record_disabled)) { 2918 atomic_inc(&buffer->record_disabled); 2919 /* 2920 * Even though the buffer was disabled, we must make sure 2921 * that it is truly disabled before calling rb_check_pages. 2922 * There could have been a race between checking 2923 * record_disable and incrementing it. 2924 */ 2925 synchronize_rcu(); 2926 for_each_buffer_cpu(buffer, cpu) { 2927 cpu_buffer = buffer->buffers[cpu]; 2928 rb_check_pages(cpu_buffer); 2929 } 2930 atomic_dec(&buffer->record_disabled); 2931 } 2932 2933 atomic_dec(&buffer->resizing); 2934 mutex_unlock(&buffer->mutex); 2935 return 0; 2936 2937 out_err: 2938 for_each_buffer_cpu(buffer, cpu) { 2939 struct buffer_page *bpage, *tmp; 2940 2941 cpu_buffer = buffer->buffers[cpu]; 2942 cpu_buffer->nr_pages_to_update = 0; 2943 2944 if (list_empty(&cpu_buffer->new_pages)) 2945 continue; 2946 2947 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, 2948 list) { 2949 list_del_init(&bpage->list); 2950 free_buffer_page(bpage); 2951 } 2952 } 2953 out_err_unlock: 2954 atomic_dec(&buffer->resizing); 2955 mutex_unlock(&buffer->mutex); 2956 return err; 2957 } 2958 EXPORT_SYMBOL_GPL(ring_buffer_resize); 2959 2960 void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val) 2961 { 2962 mutex_lock(&buffer->mutex); 2963 if (val) 2964 buffer->flags |= RB_FL_OVERWRITE; 2965 else 2966 buffer->flags &= ~RB_FL_OVERWRITE; 2967 mutex_unlock(&buffer->mutex); 2968 } 2969 EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite); 2970 2971 static __always_inline void *__rb_page_index(struct buffer_page *bpage, unsigned index) 2972 { 2973 return bpage->page->data + index; 2974 } 2975 2976 static __always_inline struct ring_buffer_event * 2977 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer) 2978 { 2979 return __rb_page_index(cpu_buffer->reader_page, 2980 cpu_buffer->reader_page->read); 2981 } 2982 2983 static struct ring_buffer_event * 2984 rb_iter_head_event(struct ring_buffer_iter *iter) 2985 { 2986 struct ring_buffer_event *event; 2987 struct buffer_page *iter_head_page = iter->head_page; 2988 unsigned long commit; 2989 unsigned length; 2990 2991 if (iter->head != iter->next_event) 2992 return iter->event; 2993 2994 /* 2995 * When the writer goes across pages, it issues a cmpxchg which 2996 * is a mb(), which will synchronize with the rmb here. 2997 * (see rb_tail_page_update() and __rb_reserve_next()) 2998 */ 2999 commit = rb_page_commit(iter_head_page); 3000 smp_rmb(); 3001 3002 /* An event needs to be at least 8 bytes in size */ 3003 if (iter->head > commit - 8) 3004 goto reset; 3005 3006 event = __rb_page_index(iter_head_page, iter->head); 3007 length = rb_event_length(event); 3008 3009 /* 3010 * READ_ONCE() doesn't work on functions and we don't want the 3011 * compiler doing any crazy optimizations with length. 3012 */ 3013 barrier(); 3014 3015 if ((iter->head + length) > commit || length > iter->event_size) 3016 /* Writer corrupted the read? */ 3017 goto reset; 3018 3019 memcpy(iter->event, event, length); 3020 /* 3021 * If the page stamp is still the same after this rmb() then the 3022 * event was safely copied without the writer entering the page. 3023 */ 3024 smp_rmb(); 3025 3026 /* Make sure the page didn't change since we read this */ 3027 if (iter->page_stamp != iter_head_page->page->time_stamp || 3028 commit > rb_page_commit(iter_head_page)) 3029 goto reset; 3030 3031 iter->next_event = iter->head + length; 3032 return iter->event; 3033 reset: 3034 /* Reset to the beginning */ 3035 iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp; 3036 iter->head = 0; 3037 iter->next_event = 0; 3038 iter->missed_events = 1; 3039 return NULL; 3040 } 3041 3042 /* Size is determined by what has been committed */ 3043 static __always_inline unsigned rb_page_size(struct buffer_page *bpage) 3044 { 3045 return rb_page_commit(bpage) & ~RB_MISSED_MASK; 3046 } 3047 3048 static __always_inline unsigned 3049 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer) 3050 { 3051 return rb_page_commit(cpu_buffer->commit_page); 3052 } 3053 3054 static __always_inline unsigned 3055 rb_event_index(struct ring_buffer_per_cpu *cpu_buffer, struct ring_buffer_event *event) 3056 { 3057 unsigned long addr = (unsigned long)event; 3058 3059 addr &= (PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1; 3060 3061 return addr - BUF_PAGE_HDR_SIZE; 3062 } 3063 3064 static void rb_inc_iter(struct ring_buffer_iter *iter) 3065 { 3066 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 3067 3068 /* 3069 * The iterator could be on the reader page (it starts there). 3070 * But the head could have moved, since the reader was 3071 * found. Check for this case and assign the iterator 3072 * to the head page instead of next. 3073 */ 3074 if (iter->head_page == cpu_buffer->reader_page) 3075 iter->head_page = rb_set_head_page(cpu_buffer); 3076 else 3077 rb_inc_page(&iter->head_page); 3078 3079 iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp; 3080 iter->head = 0; 3081 iter->next_event = 0; 3082 } 3083 3084 /* Return the index into the sub-buffers for a given sub-buffer */ 3085 static int rb_meta_subbuf_idx(struct ring_buffer_meta *meta, void *subbuf) 3086 { 3087 void *subbuf_array; 3088 3089 subbuf_array = (void *)meta + sizeof(int) * meta->nr_subbufs; 3090 subbuf_array = (void *)ALIGN((unsigned long)subbuf_array, meta->subbuf_size); 3091 return (subbuf - subbuf_array) / meta->subbuf_size; 3092 } 3093 3094 static void rb_update_meta_head(struct ring_buffer_per_cpu *cpu_buffer, 3095 struct buffer_page *next_page) 3096 { 3097 struct ring_buffer_meta *meta = cpu_buffer->ring_meta; 3098 unsigned long old_head = (unsigned long)next_page->page; 3099 unsigned long new_head; 3100 3101 rb_inc_page(&next_page); 3102 new_head = (unsigned long)next_page->page; 3103 3104 /* 3105 * Only move it forward once, if something else came in and 3106 * moved it forward, then we don't want to touch it. 3107 */ 3108 (void)cmpxchg(&meta->head_buffer, old_head, new_head); 3109 } 3110 3111 static void rb_update_meta_reader(struct ring_buffer_per_cpu *cpu_buffer, 3112 struct buffer_page *reader) 3113 { 3114 struct ring_buffer_meta *meta = cpu_buffer->ring_meta; 3115 void *old_reader = cpu_buffer->reader_page->page; 3116 void *new_reader = reader->page; 3117 int id; 3118 3119 id = reader->id; 3120 cpu_buffer->reader_page->id = id; 3121 reader->id = 0; 3122 3123 meta->buffers[0] = rb_meta_subbuf_idx(meta, new_reader); 3124 meta->buffers[id] = rb_meta_subbuf_idx(meta, old_reader); 3125 3126 /* The head pointer is the one after the reader */ 3127 rb_update_meta_head(cpu_buffer, reader); 3128 } 3129 3130 /* 3131 * rb_handle_head_page - writer hit the head page 3132 * 3133 * Returns: +1 to retry page 3134 * 0 to continue 3135 * -1 on error 3136 */ 3137 static int 3138 rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer, 3139 struct buffer_page *tail_page, 3140 struct buffer_page *next_page) 3141 { 3142 struct buffer_page *new_head; 3143 int entries; 3144 int type; 3145 int ret; 3146 3147 entries = rb_page_entries(next_page); 3148 3149 /* 3150 * The hard part is here. We need to move the head 3151 * forward, and protect against both readers on 3152 * other CPUs and writers coming in via interrupts. 3153 */ 3154 type = rb_head_page_set_update(cpu_buffer, next_page, tail_page, 3155 RB_PAGE_HEAD); 3156 3157 /* 3158 * type can be one of four: 3159 * NORMAL - an interrupt already moved it for us 3160 * HEAD - we are the first to get here. 3161 * UPDATE - we are the interrupt interrupting 3162 * a current move. 3163 * MOVED - a reader on another CPU moved the next 3164 * pointer to its reader page. Give up 3165 * and try again. 3166 */ 3167 3168 switch (type) { 3169 case RB_PAGE_HEAD: 3170 /* 3171 * We changed the head to UPDATE, thus 3172 * it is our responsibility to update 3173 * the counters. 3174 */ 3175 local_add(entries, &cpu_buffer->overrun); 3176 local_sub(rb_page_commit(next_page), &cpu_buffer->entries_bytes); 3177 local_inc(&cpu_buffer->pages_lost); 3178 3179 if (cpu_buffer->ring_meta) 3180 rb_update_meta_head(cpu_buffer, next_page); 3181 /* 3182 * The entries will be zeroed out when we move the 3183 * tail page. 3184 */ 3185 3186 /* still more to do */ 3187 break; 3188 3189 case RB_PAGE_UPDATE: 3190 /* 3191 * This is an interrupt that interrupt the 3192 * previous update. Still more to do. 3193 */ 3194 break; 3195 case RB_PAGE_NORMAL: 3196 /* 3197 * An interrupt came in before the update 3198 * and processed this for us. 3199 * Nothing left to do. 3200 */ 3201 return 1; 3202 case RB_PAGE_MOVED: 3203 /* 3204 * The reader is on another CPU and just did 3205 * a swap with our next_page. 3206 * Try again. 3207 */ 3208 return 1; 3209 default: 3210 RB_WARN_ON(cpu_buffer, 1); /* WTF??? */ 3211 return -1; 3212 } 3213 3214 /* 3215 * Now that we are here, the old head pointer is 3216 * set to UPDATE. This will keep the reader from 3217 * swapping the head page with the reader page. 3218 * The reader (on another CPU) will spin till 3219 * we are finished. 3220 * 3221 * We just need to protect against interrupts 3222 * doing the job. We will set the next pointer 3223 * to HEAD. After that, we set the old pointer 3224 * to NORMAL, but only if it was HEAD before. 3225 * otherwise we are an interrupt, and only 3226 * want the outer most commit to reset it. 3227 */ 3228 new_head = next_page; 3229 rb_inc_page(&new_head); 3230 3231 ret = rb_head_page_set_head(cpu_buffer, new_head, next_page, 3232 RB_PAGE_NORMAL); 3233 3234 /* 3235 * Valid returns are: 3236 * HEAD - an interrupt came in and already set it. 3237 * NORMAL - One of two things: 3238 * 1) We really set it. 3239 * 2) A bunch of interrupts came in and moved 3240 * the page forward again. 3241 */ 3242 switch (ret) { 3243 case RB_PAGE_HEAD: 3244 case RB_PAGE_NORMAL: 3245 /* OK */ 3246 break; 3247 default: 3248 RB_WARN_ON(cpu_buffer, 1); 3249 return -1; 3250 } 3251 3252 /* 3253 * It is possible that an interrupt came in, 3254 * set the head up, then more interrupts came in 3255 * and moved it again. When we get back here, 3256 * the page would have been set to NORMAL but we 3257 * just set it back to HEAD. 3258 * 3259 * How do you detect this? Well, if that happened 3260 * the tail page would have moved. 3261 */ 3262 if (ret == RB_PAGE_NORMAL) { 3263 struct buffer_page *buffer_tail_page; 3264 3265 buffer_tail_page = READ_ONCE(cpu_buffer->tail_page); 3266 /* 3267 * If the tail had moved passed next, then we need 3268 * to reset the pointer. 3269 */ 3270 if (buffer_tail_page != tail_page && 3271 buffer_tail_page != next_page) 3272 rb_head_page_set_normal(cpu_buffer, new_head, 3273 next_page, 3274 RB_PAGE_HEAD); 3275 } 3276 3277 /* 3278 * If this was the outer most commit (the one that 3279 * changed the original pointer from HEAD to UPDATE), 3280 * then it is up to us to reset it to NORMAL. 3281 */ 3282 if (type == RB_PAGE_HEAD) { 3283 ret = rb_head_page_set_normal(cpu_buffer, next_page, 3284 tail_page, 3285 RB_PAGE_UPDATE); 3286 if (RB_WARN_ON(cpu_buffer, 3287 ret != RB_PAGE_UPDATE)) 3288 return -1; 3289 } 3290 3291 return 0; 3292 } 3293 3294 static inline void 3295 rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, 3296 unsigned long tail, struct rb_event_info *info) 3297 { 3298 unsigned long bsize = READ_ONCE(cpu_buffer->buffer->subbuf_size); 3299 struct buffer_page *tail_page = info->tail_page; 3300 struct ring_buffer_event *event; 3301 unsigned long length = info->length; 3302 3303 /* 3304 * Only the event that crossed the page boundary 3305 * must fill the old tail_page with padding. 3306 */ 3307 if (tail >= bsize) { 3308 /* 3309 * If the page was filled, then we still need 3310 * to update the real_end. Reset it to zero 3311 * and the reader will ignore it. 3312 */ 3313 if (tail == bsize) 3314 tail_page->real_end = 0; 3315 3316 local_sub(length, &tail_page->write); 3317 return; 3318 } 3319 3320 event = __rb_page_index(tail_page, tail); 3321 3322 /* 3323 * Save the original length to the meta data. 3324 * This will be used by the reader to add lost event 3325 * counter. 3326 */ 3327 tail_page->real_end = tail; 3328 3329 /* 3330 * If this event is bigger than the minimum size, then 3331 * we need to be careful that we don't subtract the 3332 * write counter enough to allow another writer to slip 3333 * in on this page. 3334 * We put in a discarded commit instead, to make sure 3335 * that this space is not used again, and this space will 3336 * not be accounted into 'entries_bytes'. 3337 * 3338 * If we are less than the minimum size, we don't need to 3339 * worry about it. 3340 */ 3341 if (tail > (bsize - RB_EVNT_MIN_SIZE)) { 3342 /* No room for any events */ 3343 3344 /* Mark the rest of the page with padding */ 3345 rb_event_set_padding(event); 3346 3347 /* Make sure the padding is visible before the write update */ 3348 smp_wmb(); 3349 3350 /* Set the write back to the previous setting */ 3351 local_sub(length, &tail_page->write); 3352 return; 3353 } 3354 3355 /* Put in a discarded event */ 3356 event->array[0] = (bsize - tail) - RB_EVNT_HDR_SIZE; 3357 event->type_len = RINGBUF_TYPE_PADDING; 3358 /* time delta must be non zero */ 3359 event->time_delta = 1; 3360 3361 /* account for padding bytes */ 3362 local_add(bsize - tail, &cpu_buffer->entries_bytes); 3363 3364 /* Make sure the padding is visible before the tail_page->write update */ 3365 smp_wmb(); 3366 3367 /* Set write to end of buffer */ 3368 length = (tail + length) - bsize; 3369 local_sub(length, &tail_page->write); 3370 } 3371 3372 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer); 3373 3374 /* 3375 * This is the slow path, force gcc not to inline it. 3376 */ 3377 static noinline struct ring_buffer_event * 3378 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, 3379 unsigned long tail, struct rb_event_info *info) 3380 { 3381 struct buffer_page *tail_page = info->tail_page; 3382 struct buffer_page *commit_page = cpu_buffer->commit_page; 3383 struct trace_buffer *buffer = cpu_buffer->buffer; 3384 struct buffer_page *next_page; 3385 int ret; 3386 3387 next_page = tail_page; 3388 3389 rb_inc_page(&next_page); 3390 3391 /* 3392 * If for some reason, we had an interrupt storm that made 3393 * it all the way around the buffer, bail, and warn 3394 * about it. 3395 */ 3396 if (unlikely(next_page == commit_page)) { 3397 local_inc(&cpu_buffer->commit_overrun); 3398 goto out_reset; 3399 } 3400 3401 /* 3402 * This is where the fun begins! 3403 * 3404 * We are fighting against races between a reader that 3405 * could be on another CPU trying to swap its reader 3406 * page with the buffer head. 3407 * 3408 * We are also fighting against interrupts coming in and 3409 * moving the head or tail on us as well. 3410 * 3411 * If the next page is the head page then we have filled 3412 * the buffer, unless the commit page is still on the 3413 * reader page. 3414 */ 3415 if (rb_is_head_page(next_page, &tail_page->list)) { 3416 3417 /* 3418 * If the commit is not on the reader page, then 3419 * move the header page. 3420 */ 3421 if (!rb_is_reader_page(cpu_buffer->commit_page)) { 3422 /* 3423 * If we are not in overwrite mode, 3424 * this is easy, just stop here. 3425 */ 3426 if (!(buffer->flags & RB_FL_OVERWRITE)) { 3427 local_inc(&cpu_buffer->dropped_events); 3428 goto out_reset; 3429 } 3430 3431 ret = rb_handle_head_page(cpu_buffer, 3432 tail_page, 3433 next_page); 3434 if (ret < 0) 3435 goto out_reset; 3436 if (ret) 3437 goto out_again; 3438 } else { 3439 /* 3440 * We need to be careful here too. The 3441 * commit page could still be on the reader 3442 * page. We could have a small buffer, and 3443 * have filled up the buffer with events 3444 * from interrupts and such, and wrapped. 3445 * 3446 * Note, if the tail page is also on the 3447 * reader_page, we let it move out. 3448 */ 3449 if (unlikely((cpu_buffer->commit_page != 3450 cpu_buffer->tail_page) && 3451 (cpu_buffer->commit_page == 3452 cpu_buffer->reader_page))) { 3453 local_inc(&cpu_buffer->commit_overrun); 3454 goto out_reset; 3455 } 3456 } 3457 } 3458 3459 rb_tail_page_update(cpu_buffer, tail_page, next_page); 3460 3461 out_again: 3462 3463 rb_reset_tail(cpu_buffer, tail, info); 3464 3465 /* Commit what we have for now. */ 3466 rb_end_commit(cpu_buffer); 3467 /* rb_end_commit() decs committing */ 3468 local_inc(&cpu_buffer->committing); 3469 3470 /* fail and let the caller try again */ 3471 return ERR_PTR(-EAGAIN); 3472 3473 out_reset: 3474 /* reset write */ 3475 rb_reset_tail(cpu_buffer, tail, info); 3476 3477 return NULL; 3478 } 3479 3480 /* Slow path */ 3481 static struct ring_buffer_event * 3482 rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer, 3483 struct ring_buffer_event *event, u64 delta, bool abs) 3484 { 3485 if (abs) 3486 event->type_len = RINGBUF_TYPE_TIME_STAMP; 3487 else 3488 event->type_len = RINGBUF_TYPE_TIME_EXTEND; 3489 3490 /* Not the first event on the page, or not delta? */ 3491 if (abs || rb_event_index(cpu_buffer, event)) { 3492 event->time_delta = delta & TS_MASK; 3493 event->array[0] = delta >> TS_SHIFT; 3494 } else { 3495 /* nope, just zero it */ 3496 event->time_delta = 0; 3497 event->array[0] = 0; 3498 } 3499 3500 return skip_time_extend(event); 3501 } 3502 3503 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 3504 static inline bool sched_clock_stable(void) 3505 { 3506 return true; 3507 } 3508 #endif 3509 3510 static void 3511 rb_check_timestamp(struct ring_buffer_per_cpu *cpu_buffer, 3512 struct rb_event_info *info) 3513 { 3514 u64 write_stamp; 3515 3516 WARN_ONCE(1, "Delta way too big! %llu ts=%llu before=%llu after=%llu write stamp=%llu\n%s", 3517 (unsigned long long)info->delta, 3518 (unsigned long long)info->ts, 3519 (unsigned long long)info->before, 3520 (unsigned long long)info->after, 3521 (unsigned long long)({rb_time_read(&cpu_buffer->write_stamp, &write_stamp); write_stamp;}), 3522 sched_clock_stable() ? "" : 3523 "If you just came from a suspend/resume,\n" 3524 "please switch to the trace global clock:\n" 3525 " echo global > /sys/kernel/tracing/trace_clock\n" 3526 "or add trace_clock=global to the kernel command line\n"); 3527 } 3528 3529 static void rb_add_timestamp(struct ring_buffer_per_cpu *cpu_buffer, 3530 struct ring_buffer_event **event, 3531 struct rb_event_info *info, 3532 u64 *delta, 3533 unsigned int *length) 3534 { 3535 bool abs = info->add_timestamp & 3536 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE); 3537 3538 if (unlikely(info->delta > (1ULL << 59))) { 3539 /* 3540 * Some timers can use more than 59 bits, and when a timestamp 3541 * is added to the buffer, it will lose those bits. 3542 */ 3543 if (abs && (info->ts & TS_MSB)) { 3544 info->delta &= ABS_TS_MASK; 3545 3546 /* did the clock go backwards */ 3547 } else if (info->before == info->after && info->before > info->ts) { 3548 /* not interrupted */ 3549 static int once; 3550 3551 /* 3552 * This is possible with a recalibrating of the TSC. 3553 * Do not produce a call stack, but just report it. 3554 */ 3555 if (!once) { 3556 once++; 3557 pr_warn("Ring buffer clock went backwards: %llu -> %llu\n", 3558 info->before, info->ts); 3559 } 3560 } else 3561 rb_check_timestamp(cpu_buffer, info); 3562 if (!abs) 3563 info->delta = 0; 3564 } 3565 *event = rb_add_time_stamp(cpu_buffer, *event, info->delta, abs); 3566 *length -= RB_LEN_TIME_EXTEND; 3567 *delta = 0; 3568 } 3569 3570 /** 3571 * rb_update_event - update event type and data 3572 * @cpu_buffer: The per cpu buffer of the @event 3573 * @event: the event to update 3574 * @info: The info to update the @event with (contains length and delta) 3575 * 3576 * Update the type and data fields of the @event. The length 3577 * is the actual size that is written to the ring buffer, 3578 * and with this, we can determine what to place into the 3579 * data field. 3580 */ 3581 static void 3582 rb_update_event(struct ring_buffer_per_cpu *cpu_buffer, 3583 struct ring_buffer_event *event, 3584 struct rb_event_info *info) 3585 { 3586 unsigned length = info->length; 3587 u64 delta = info->delta; 3588 unsigned int nest = local_read(&cpu_buffer->committing) - 1; 3589 3590 if (!WARN_ON_ONCE(nest >= MAX_NEST)) 3591 cpu_buffer->event_stamp[nest] = info->ts; 3592 3593 /* 3594 * If we need to add a timestamp, then we 3595 * add it to the start of the reserved space. 3596 */ 3597 if (unlikely(info->add_timestamp)) 3598 rb_add_timestamp(cpu_buffer, &event, info, &delta, &length); 3599 3600 event->time_delta = delta; 3601 length -= RB_EVNT_HDR_SIZE; 3602 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) { 3603 event->type_len = 0; 3604 event->array[0] = length; 3605 } else 3606 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT); 3607 } 3608 3609 static unsigned rb_calculate_event_length(unsigned length) 3610 { 3611 struct ring_buffer_event event; /* Used only for sizeof array */ 3612 3613 /* zero length can cause confusions */ 3614 if (!length) 3615 length++; 3616 3617 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) 3618 length += sizeof(event.array[0]); 3619 3620 length += RB_EVNT_HDR_SIZE; 3621 length = ALIGN(length, RB_ARCH_ALIGNMENT); 3622 3623 /* 3624 * In case the time delta is larger than the 27 bits for it 3625 * in the header, we need to add a timestamp. If another 3626 * event comes in when trying to discard this one to increase 3627 * the length, then the timestamp will be added in the allocated 3628 * space of this event. If length is bigger than the size needed 3629 * for the TIME_EXTEND, then padding has to be used. The events 3630 * length must be either RB_LEN_TIME_EXTEND, or greater than or equal 3631 * to RB_LEN_TIME_EXTEND + 8, as 8 is the minimum size for padding. 3632 * As length is a multiple of 4, we only need to worry if it 3633 * is 12 (RB_LEN_TIME_EXTEND + 4). 3634 */ 3635 if (length == RB_LEN_TIME_EXTEND + RB_ALIGNMENT) 3636 length += RB_ALIGNMENT; 3637 3638 return length; 3639 } 3640 3641 static inline bool 3642 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, 3643 struct ring_buffer_event *event) 3644 { 3645 unsigned long new_index, old_index; 3646 struct buffer_page *bpage; 3647 unsigned long addr; 3648 3649 new_index = rb_event_index(cpu_buffer, event); 3650 old_index = new_index + rb_event_ts_length(event); 3651 addr = (unsigned long)event; 3652 addr &= ~((PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1); 3653 3654 bpage = READ_ONCE(cpu_buffer->tail_page); 3655 3656 /* 3657 * Make sure the tail_page is still the same and 3658 * the next write location is the end of this event 3659 */ 3660 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) { 3661 unsigned long write_mask = 3662 local_read(&bpage->write) & ~RB_WRITE_MASK; 3663 unsigned long event_length = rb_event_length(event); 3664 3665 /* 3666 * For the before_stamp to be different than the write_stamp 3667 * to make sure that the next event adds an absolute 3668 * value and does not rely on the saved write stamp, which 3669 * is now going to be bogus. 3670 * 3671 * By setting the before_stamp to zero, the next event 3672 * is not going to use the write_stamp and will instead 3673 * create an absolute timestamp. This means there's no 3674 * reason to update the wirte_stamp! 3675 */ 3676 rb_time_set(&cpu_buffer->before_stamp, 0); 3677 3678 /* 3679 * If an event were to come in now, it would see that the 3680 * write_stamp and the before_stamp are different, and assume 3681 * that this event just added itself before updating 3682 * the write stamp. The interrupting event will fix the 3683 * write stamp for us, and use an absolute timestamp. 3684 */ 3685 3686 /* 3687 * This is on the tail page. It is possible that 3688 * a write could come in and move the tail page 3689 * and write to the next page. That is fine 3690 * because we just shorten what is on this page. 3691 */ 3692 old_index += write_mask; 3693 new_index += write_mask; 3694 3695 /* caution: old_index gets updated on cmpxchg failure */ 3696 if (local_try_cmpxchg(&bpage->write, &old_index, new_index)) { 3697 /* update counters */ 3698 local_sub(event_length, &cpu_buffer->entries_bytes); 3699 return true; 3700 } 3701 } 3702 3703 /* could not discard */ 3704 return false; 3705 } 3706 3707 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer) 3708 { 3709 local_inc(&cpu_buffer->committing); 3710 local_inc(&cpu_buffer->commits); 3711 } 3712 3713 static __always_inline void 3714 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) 3715 { 3716 unsigned long max_count; 3717 3718 /* 3719 * We only race with interrupts and NMIs on this CPU. 3720 * If we own the commit event, then we can commit 3721 * all others that interrupted us, since the interruptions 3722 * are in stack format (they finish before they come 3723 * back to us). This allows us to do a simple loop to 3724 * assign the commit to the tail. 3725 */ 3726 again: 3727 max_count = cpu_buffer->nr_pages * 100; 3728 3729 while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) { 3730 if (RB_WARN_ON(cpu_buffer, !(--max_count))) 3731 return; 3732 if (RB_WARN_ON(cpu_buffer, 3733 rb_is_reader_page(cpu_buffer->tail_page))) 3734 return; 3735 /* 3736 * No need for a memory barrier here, as the update 3737 * of the tail_page did it for this page. 3738 */ 3739 local_set(&cpu_buffer->commit_page->page->commit, 3740 rb_page_write(cpu_buffer->commit_page)); 3741 rb_inc_page(&cpu_buffer->commit_page); 3742 if (cpu_buffer->ring_meta) { 3743 struct ring_buffer_meta *meta = cpu_buffer->ring_meta; 3744 meta->commit_buffer = (unsigned long)cpu_buffer->commit_page->page; 3745 } 3746 /* add barrier to keep gcc from optimizing too much */ 3747 barrier(); 3748 } 3749 while (rb_commit_index(cpu_buffer) != 3750 rb_page_write(cpu_buffer->commit_page)) { 3751 3752 /* Make sure the readers see the content of what is committed. */ 3753 smp_wmb(); 3754 local_set(&cpu_buffer->commit_page->page->commit, 3755 rb_page_write(cpu_buffer->commit_page)); 3756 RB_WARN_ON(cpu_buffer, 3757 local_read(&cpu_buffer->commit_page->page->commit) & 3758 ~RB_WRITE_MASK); 3759 barrier(); 3760 } 3761 3762 /* again, keep gcc from optimizing */ 3763 barrier(); 3764 3765 /* 3766 * If an interrupt came in just after the first while loop 3767 * and pushed the tail page forward, we will be left with 3768 * a dangling commit that will never go forward. 3769 */ 3770 if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page))) 3771 goto again; 3772 } 3773 3774 static __always_inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer) 3775 { 3776 unsigned long commits; 3777 3778 if (RB_WARN_ON(cpu_buffer, 3779 !local_read(&cpu_buffer->committing))) 3780 return; 3781 3782 again: 3783 commits = local_read(&cpu_buffer->commits); 3784 /* synchronize with interrupts */ 3785 barrier(); 3786 if (local_read(&cpu_buffer->committing) == 1) 3787 rb_set_commit_to_write(cpu_buffer); 3788 3789 local_dec(&cpu_buffer->committing); 3790 3791 /* synchronize with interrupts */ 3792 barrier(); 3793 3794 /* 3795 * Need to account for interrupts coming in between the 3796 * updating of the commit page and the clearing of the 3797 * committing counter. 3798 */ 3799 if (unlikely(local_read(&cpu_buffer->commits) != commits) && 3800 !local_read(&cpu_buffer->committing)) { 3801 local_inc(&cpu_buffer->committing); 3802 goto again; 3803 } 3804 } 3805 3806 static inline void rb_event_discard(struct ring_buffer_event *event) 3807 { 3808 if (extended_time(event)) 3809 event = skip_time_extend(event); 3810 3811 /* array[0] holds the actual length for the discarded event */ 3812 event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE; 3813 event->type_len = RINGBUF_TYPE_PADDING; 3814 /* time delta must be non zero */ 3815 if (!event->time_delta) 3816 event->time_delta = 1; 3817 } 3818 3819 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer) 3820 { 3821 local_inc(&cpu_buffer->entries); 3822 rb_end_commit(cpu_buffer); 3823 } 3824 3825 static __always_inline void 3826 rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) 3827 { 3828 if (buffer->irq_work.waiters_pending) { 3829 buffer->irq_work.waiters_pending = false; 3830 /* irq_work_queue() supplies it's own memory barriers */ 3831 irq_work_queue(&buffer->irq_work.work); 3832 } 3833 3834 if (cpu_buffer->irq_work.waiters_pending) { 3835 cpu_buffer->irq_work.waiters_pending = false; 3836 /* irq_work_queue() supplies it's own memory barriers */ 3837 irq_work_queue(&cpu_buffer->irq_work.work); 3838 } 3839 3840 if (cpu_buffer->last_pages_touch == local_read(&cpu_buffer->pages_touched)) 3841 return; 3842 3843 if (cpu_buffer->reader_page == cpu_buffer->commit_page) 3844 return; 3845 3846 if (!cpu_buffer->irq_work.full_waiters_pending) 3847 return; 3848 3849 cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched); 3850 3851 if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full)) 3852 return; 3853 3854 cpu_buffer->irq_work.wakeup_full = true; 3855 cpu_buffer->irq_work.full_waiters_pending = false; 3856 /* irq_work_queue() supplies it's own memory barriers */ 3857 irq_work_queue(&cpu_buffer->irq_work.work); 3858 } 3859 3860 #ifdef CONFIG_RING_BUFFER_RECORD_RECURSION 3861 # define do_ring_buffer_record_recursion() \ 3862 do_ftrace_record_recursion(_THIS_IP_, _RET_IP_) 3863 #else 3864 # define do_ring_buffer_record_recursion() do { } while (0) 3865 #endif 3866 3867 /* 3868 * The lock and unlock are done within a preempt disable section. 3869 * The current_context per_cpu variable can only be modified 3870 * by the current task between lock and unlock. But it can 3871 * be modified more than once via an interrupt. To pass this 3872 * information from the lock to the unlock without having to 3873 * access the 'in_interrupt()' functions again (which do show 3874 * a bit of overhead in something as critical as function tracing, 3875 * we use a bitmask trick. 3876 * 3877 * bit 1 = NMI context 3878 * bit 2 = IRQ context 3879 * bit 3 = SoftIRQ context 3880 * bit 4 = normal context. 3881 * 3882 * This works because this is the order of contexts that can 3883 * preempt other contexts. A SoftIRQ never preempts an IRQ 3884 * context. 3885 * 3886 * When the context is determined, the corresponding bit is 3887 * checked and set (if it was set, then a recursion of that context 3888 * happened). 3889 * 3890 * On unlock, we need to clear this bit. To do so, just subtract 3891 * 1 from the current_context and AND it to itself. 3892 * 3893 * (binary) 3894 * 101 - 1 = 100 3895 * 101 & 100 = 100 (clearing bit zero) 3896 * 3897 * 1010 - 1 = 1001 3898 * 1010 & 1001 = 1000 (clearing bit 1) 3899 * 3900 * The least significant bit can be cleared this way, and it 3901 * just so happens that it is the same bit corresponding to 3902 * the current context. 3903 * 3904 * Now the TRANSITION bit breaks the above slightly. The TRANSITION bit 3905 * is set when a recursion is detected at the current context, and if 3906 * the TRANSITION bit is already set, it will fail the recursion. 3907 * This is needed because there's a lag between the changing of 3908 * interrupt context and updating the preempt count. In this case, 3909 * a false positive will be found. To handle this, one extra recursion 3910 * is allowed, and this is done by the TRANSITION bit. If the TRANSITION 3911 * bit is already set, then it is considered a recursion and the function 3912 * ends. Otherwise, the TRANSITION bit is set, and that bit is returned. 3913 * 3914 * On the trace_recursive_unlock(), the TRANSITION bit will be the first 3915 * to be cleared. Even if it wasn't the context that set it. That is, 3916 * if an interrupt comes in while NORMAL bit is set and the ring buffer 3917 * is called before preempt_count() is updated, since the check will 3918 * be on the NORMAL bit, the TRANSITION bit will then be set. If an 3919 * NMI then comes in, it will set the NMI bit, but when the NMI code 3920 * does the trace_recursive_unlock() it will clear the TRANSITION bit 3921 * and leave the NMI bit set. But this is fine, because the interrupt 3922 * code that set the TRANSITION bit will then clear the NMI bit when it 3923 * calls trace_recursive_unlock(). If another NMI comes in, it will 3924 * set the TRANSITION bit and continue. 3925 * 3926 * Note: The TRANSITION bit only handles a single transition between context. 3927 */ 3928 3929 static __always_inline bool 3930 trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer) 3931 { 3932 unsigned int val = cpu_buffer->current_context; 3933 int bit = interrupt_context_level(); 3934 3935 bit = RB_CTX_NORMAL - bit; 3936 3937 if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) { 3938 /* 3939 * It is possible that this was called by transitioning 3940 * between interrupt context, and preempt_count() has not 3941 * been updated yet. In this case, use the TRANSITION bit. 3942 */ 3943 bit = RB_CTX_TRANSITION; 3944 if (val & (1 << (bit + cpu_buffer->nest))) { 3945 do_ring_buffer_record_recursion(); 3946 return true; 3947 } 3948 } 3949 3950 val |= (1 << (bit + cpu_buffer->nest)); 3951 cpu_buffer->current_context = val; 3952 3953 return false; 3954 } 3955 3956 static __always_inline void 3957 trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer) 3958 { 3959 cpu_buffer->current_context &= 3960 cpu_buffer->current_context - (1 << cpu_buffer->nest); 3961 } 3962 3963 /* The recursive locking above uses 5 bits */ 3964 #define NESTED_BITS 5 3965 3966 /** 3967 * ring_buffer_nest_start - Allow to trace while nested 3968 * @buffer: The ring buffer to modify 3969 * 3970 * The ring buffer has a safety mechanism to prevent recursion. 3971 * But there may be a case where a trace needs to be done while 3972 * tracing something else. In this case, calling this function 3973 * will allow this function to nest within a currently active 3974 * ring_buffer_lock_reserve(). 3975 * 3976 * Call this function before calling another ring_buffer_lock_reserve() and 3977 * call ring_buffer_nest_end() after the nested ring_buffer_unlock_commit(). 3978 */ 3979 void ring_buffer_nest_start(struct trace_buffer *buffer) 3980 { 3981 struct ring_buffer_per_cpu *cpu_buffer; 3982 int cpu; 3983 3984 /* Enabled by ring_buffer_nest_end() */ 3985 preempt_disable_notrace(); 3986 cpu = raw_smp_processor_id(); 3987 cpu_buffer = buffer->buffers[cpu]; 3988 /* This is the shift value for the above recursive locking */ 3989 cpu_buffer->nest += NESTED_BITS; 3990 } 3991 3992 /** 3993 * ring_buffer_nest_end - Allow to trace while nested 3994 * @buffer: The ring buffer to modify 3995 * 3996 * Must be called after ring_buffer_nest_start() and after the 3997 * ring_buffer_unlock_commit(). 3998 */ 3999 void ring_buffer_nest_end(struct trace_buffer *buffer) 4000 { 4001 struct ring_buffer_per_cpu *cpu_buffer; 4002 int cpu; 4003 4004 /* disabled by ring_buffer_nest_start() */ 4005 cpu = raw_smp_processor_id(); 4006 cpu_buffer = buffer->buffers[cpu]; 4007 /* This is the shift value for the above recursive locking */ 4008 cpu_buffer->nest -= NESTED_BITS; 4009 preempt_enable_notrace(); 4010 } 4011 4012 /** 4013 * ring_buffer_unlock_commit - commit a reserved 4014 * @buffer: The buffer to commit to 4015 * 4016 * This commits the data to the ring buffer, and releases any locks held. 4017 * 4018 * Must be paired with ring_buffer_lock_reserve. 4019 */ 4020 int ring_buffer_unlock_commit(struct trace_buffer *buffer) 4021 { 4022 struct ring_buffer_per_cpu *cpu_buffer; 4023 int cpu = raw_smp_processor_id(); 4024 4025 cpu_buffer = buffer->buffers[cpu]; 4026 4027 rb_commit(cpu_buffer); 4028 4029 rb_wakeups(buffer, cpu_buffer); 4030 4031 trace_recursive_unlock(cpu_buffer); 4032 4033 preempt_enable_notrace(); 4034 4035 return 0; 4036 } 4037 EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit); 4038 4039 /* Special value to validate all deltas on a page. */ 4040 #define CHECK_FULL_PAGE 1L 4041 4042 #ifdef CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS 4043 4044 static const char *show_irq_str(int bits) 4045 { 4046 const char *type[] = { 4047 ".", // 0 4048 "s", // 1 4049 "h", // 2 4050 "Hs", // 3 4051 "n", // 4 4052 "Ns", // 5 4053 "Nh", // 6 4054 "NHs", // 7 4055 }; 4056 4057 return type[bits]; 4058 } 4059 4060 /* Assume this is a trace event */ 4061 static const char *show_flags(struct ring_buffer_event *event) 4062 { 4063 struct trace_entry *entry; 4064 int bits = 0; 4065 4066 if (rb_event_data_length(event) - RB_EVNT_HDR_SIZE < sizeof(*entry)) 4067 return "X"; 4068 4069 entry = ring_buffer_event_data(event); 4070 4071 if (entry->flags & TRACE_FLAG_SOFTIRQ) 4072 bits |= 1; 4073 4074 if (entry->flags & TRACE_FLAG_HARDIRQ) 4075 bits |= 2; 4076 4077 if (entry->flags & TRACE_FLAG_NMI) 4078 bits |= 4; 4079 4080 return show_irq_str(bits); 4081 } 4082 4083 static const char *show_irq(struct ring_buffer_event *event) 4084 { 4085 struct trace_entry *entry; 4086 4087 if (rb_event_data_length(event) - RB_EVNT_HDR_SIZE < sizeof(*entry)) 4088 return ""; 4089 4090 entry = ring_buffer_event_data(event); 4091 if (entry->flags & TRACE_FLAG_IRQS_OFF) 4092 return "d"; 4093 return ""; 4094 } 4095 4096 static const char *show_interrupt_level(void) 4097 { 4098 unsigned long pc = preempt_count(); 4099 unsigned char level = 0; 4100 4101 if (pc & SOFTIRQ_OFFSET) 4102 level |= 1; 4103 4104 if (pc & HARDIRQ_MASK) 4105 level |= 2; 4106 4107 if (pc & NMI_MASK) 4108 level |= 4; 4109 4110 return show_irq_str(level); 4111 } 4112 4113 static void dump_buffer_page(struct buffer_data_page *bpage, 4114 struct rb_event_info *info, 4115 unsigned long tail) 4116 { 4117 struct ring_buffer_event *event; 4118 u64 ts, delta; 4119 int e; 4120 4121 ts = bpage->time_stamp; 4122 pr_warn(" [%lld] PAGE TIME STAMP\n", ts); 4123 4124 for (e = 0; e < tail; e += rb_event_length(event)) { 4125 4126 event = (struct ring_buffer_event *)(bpage->data + e); 4127 4128 switch (event->type_len) { 4129 4130 case RINGBUF_TYPE_TIME_EXTEND: 4131 delta = rb_event_time_stamp(event); 4132 ts += delta; 4133 pr_warn(" 0x%x: [%lld] delta:%lld TIME EXTEND\n", 4134 e, ts, delta); 4135 break; 4136 4137 case RINGBUF_TYPE_TIME_STAMP: 4138 delta = rb_event_time_stamp(event); 4139 ts = rb_fix_abs_ts(delta, ts); 4140 pr_warn(" 0x%x: [%lld] absolute:%lld TIME STAMP\n", 4141 e, ts, delta); 4142 break; 4143 4144 case RINGBUF_TYPE_PADDING: 4145 ts += event->time_delta; 4146 pr_warn(" 0x%x: [%lld] delta:%d PADDING\n", 4147 e, ts, event->time_delta); 4148 break; 4149 4150 case RINGBUF_TYPE_DATA: 4151 ts += event->time_delta; 4152 pr_warn(" 0x%x: [%lld] delta:%d %s%s\n", 4153 e, ts, event->time_delta, 4154 show_flags(event), show_irq(event)); 4155 break; 4156 4157 default: 4158 break; 4159 } 4160 } 4161 pr_warn("expected end:0x%lx last event actually ended at:0x%x\n", tail, e); 4162 } 4163 4164 static DEFINE_PER_CPU(atomic_t, checking); 4165 static atomic_t ts_dump; 4166 4167 #define buffer_warn_return(fmt, ...) \ 4168 do { \ 4169 /* If another report is happening, ignore this one */ \ 4170 if (atomic_inc_return(&ts_dump) != 1) { \ 4171 atomic_dec(&ts_dump); \ 4172 goto out; \ 4173 } \ 4174 atomic_inc(&cpu_buffer->record_disabled); \ 4175 pr_warn(fmt, ##__VA_ARGS__); \ 4176 dump_buffer_page(bpage, info, tail); \ 4177 atomic_dec(&ts_dump); \ 4178 /* There's some cases in boot up that this can happen */ \ 4179 if (WARN_ON_ONCE(system_state != SYSTEM_BOOTING)) \ 4180 /* Do not re-enable checking */ \ 4181 return; \ 4182 } while (0) 4183 4184 /* 4185 * Check if the current event time stamp matches the deltas on 4186 * the buffer page. 4187 */ 4188 static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer, 4189 struct rb_event_info *info, 4190 unsigned long tail) 4191 { 4192 struct buffer_data_page *bpage; 4193 u64 ts, delta; 4194 bool full = false; 4195 int ret; 4196 4197 bpage = info->tail_page->page; 4198 4199 if (tail == CHECK_FULL_PAGE) { 4200 full = true; 4201 tail = local_read(&bpage->commit); 4202 } else if (info->add_timestamp & 4203 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE)) { 4204 /* Ignore events with absolute time stamps */ 4205 return; 4206 } 4207 4208 /* 4209 * Do not check the first event (skip possible extends too). 4210 * Also do not check if previous events have not been committed. 4211 */ 4212 if (tail <= 8 || tail > local_read(&bpage->commit)) 4213 return; 4214 4215 /* 4216 * If this interrupted another event, 4217 */ 4218 if (atomic_inc_return(this_cpu_ptr(&checking)) != 1) 4219 goto out; 4220 4221 ret = rb_read_data_buffer(bpage, tail, cpu_buffer->cpu, &ts, &delta); 4222 if (ret < 0) { 4223 if (delta < ts) { 4224 buffer_warn_return("[CPU: %d]ABSOLUTE TIME WENT BACKWARDS: last ts: %lld absolute ts: %lld\n", 4225 cpu_buffer->cpu, ts, delta); 4226 goto out; 4227 } 4228 } 4229 if ((full && ts > info->ts) || 4230 (!full && ts + info->delta != info->ts)) { 4231 buffer_warn_return("[CPU: %d]TIME DOES NOT MATCH expected:%lld actual:%lld delta:%lld before:%lld after:%lld%s context:%s\n", 4232 cpu_buffer->cpu, 4233 ts + info->delta, info->ts, info->delta, 4234 info->before, info->after, 4235 full ? " (full)" : "", show_interrupt_level()); 4236 } 4237 out: 4238 atomic_dec(this_cpu_ptr(&checking)); 4239 } 4240 #else 4241 static inline void check_buffer(struct ring_buffer_per_cpu *cpu_buffer, 4242 struct rb_event_info *info, 4243 unsigned long tail) 4244 { 4245 } 4246 #endif /* CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS */ 4247 4248 static struct ring_buffer_event * 4249 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, 4250 struct rb_event_info *info) 4251 { 4252 struct ring_buffer_event *event; 4253 struct buffer_page *tail_page; 4254 unsigned long tail, write, w; 4255 4256 /* Don't let the compiler play games with cpu_buffer->tail_page */ 4257 tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page); 4258 4259 /*A*/ w = local_read(&tail_page->write) & RB_WRITE_MASK; 4260 barrier(); 4261 rb_time_read(&cpu_buffer->before_stamp, &info->before); 4262 rb_time_read(&cpu_buffer->write_stamp, &info->after); 4263 barrier(); 4264 info->ts = rb_time_stamp(cpu_buffer->buffer); 4265 4266 if ((info->add_timestamp & RB_ADD_STAMP_ABSOLUTE)) { 4267 info->delta = info->ts; 4268 } else { 4269 /* 4270 * If interrupting an event time update, we may need an 4271 * absolute timestamp. 4272 * Don't bother if this is the start of a new page (w == 0). 4273 */ 4274 if (!w) { 4275 /* Use the sub-buffer timestamp */ 4276 info->delta = 0; 4277 } else if (unlikely(info->before != info->after)) { 4278 info->add_timestamp |= RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND; 4279 info->length += RB_LEN_TIME_EXTEND; 4280 } else { 4281 info->delta = info->ts - info->after; 4282 if (unlikely(test_time_stamp(info->delta))) { 4283 info->add_timestamp |= RB_ADD_STAMP_EXTEND; 4284 info->length += RB_LEN_TIME_EXTEND; 4285 } 4286 } 4287 } 4288 4289 /*B*/ rb_time_set(&cpu_buffer->before_stamp, info->ts); 4290 4291 /*C*/ write = local_add_return(info->length, &tail_page->write); 4292 4293 /* set write to only the index of the write */ 4294 write &= RB_WRITE_MASK; 4295 4296 tail = write - info->length; 4297 4298 /* See if we shot pass the end of this buffer page */ 4299 if (unlikely(write > cpu_buffer->buffer->subbuf_size)) { 4300 check_buffer(cpu_buffer, info, CHECK_FULL_PAGE); 4301 return rb_move_tail(cpu_buffer, tail, info); 4302 } 4303 4304 if (likely(tail == w)) { 4305 /* Nothing interrupted us between A and C */ 4306 /*D*/ rb_time_set(&cpu_buffer->write_stamp, info->ts); 4307 /* 4308 * If something came in between C and D, the write stamp 4309 * may now not be in sync. But that's fine as the before_stamp 4310 * will be different and then next event will just be forced 4311 * to use an absolute timestamp. 4312 */ 4313 if (likely(!(info->add_timestamp & 4314 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE)))) 4315 /* This did not interrupt any time update */ 4316 info->delta = info->ts - info->after; 4317 else 4318 /* Just use full timestamp for interrupting event */ 4319 info->delta = info->ts; 4320 check_buffer(cpu_buffer, info, tail); 4321 } else { 4322 u64 ts; 4323 /* SLOW PATH - Interrupted between A and C */ 4324 4325 /* Save the old before_stamp */ 4326 rb_time_read(&cpu_buffer->before_stamp, &info->before); 4327 4328 /* 4329 * Read a new timestamp and update the before_stamp to make 4330 * the next event after this one force using an absolute 4331 * timestamp. This is in case an interrupt were to come in 4332 * between E and F. 4333 */ 4334 ts = rb_time_stamp(cpu_buffer->buffer); 4335 rb_time_set(&cpu_buffer->before_stamp, ts); 4336 4337 barrier(); 4338 /*E*/ rb_time_read(&cpu_buffer->write_stamp, &info->after); 4339 barrier(); 4340 /*F*/ if (write == (local_read(&tail_page->write) & RB_WRITE_MASK) && 4341 info->after == info->before && info->after < ts) { 4342 /* 4343 * Nothing came after this event between C and F, it is 4344 * safe to use info->after for the delta as it 4345 * matched info->before and is still valid. 4346 */ 4347 info->delta = ts - info->after; 4348 } else { 4349 /* 4350 * Interrupted between C and F: 4351 * Lost the previous events time stamp. Just set the 4352 * delta to zero, and this will be the same time as 4353 * the event this event interrupted. And the events that 4354 * came after this will still be correct (as they would 4355 * have built their delta on the previous event. 4356 */ 4357 info->delta = 0; 4358 } 4359 info->ts = ts; 4360 info->add_timestamp &= ~RB_ADD_STAMP_FORCE; 4361 } 4362 4363 /* 4364 * If this is the first commit on the page, then it has the same 4365 * timestamp as the page itself. 4366 */ 4367 if (unlikely(!tail && !(info->add_timestamp & 4368 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE)))) 4369 info->delta = 0; 4370 4371 /* We reserved something on the buffer */ 4372 4373 event = __rb_page_index(tail_page, tail); 4374 rb_update_event(cpu_buffer, event, info); 4375 4376 local_inc(&tail_page->entries); 4377 4378 /* 4379 * If this is the first commit on the page, then update 4380 * its timestamp. 4381 */ 4382 if (unlikely(!tail)) 4383 tail_page->page->time_stamp = info->ts; 4384 4385 /* account for these added bytes */ 4386 local_add(info->length, &cpu_buffer->entries_bytes); 4387 4388 return event; 4389 } 4390 4391 static __always_inline struct ring_buffer_event * 4392 rb_reserve_next_event(struct trace_buffer *buffer, 4393 struct ring_buffer_per_cpu *cpu_buffer, 4394 unsigned long length) 4395 { 4396 struct ring_buffer_event *event; 4397 struct rb_event_info info; 4398 int nr_loops = 0; 4399 int add_ts_default; 4400 4401 /* ring buffer does cmpxchg, make sure it is safe in NMI context */ 4402 if (!IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG) && 4403 (unlikely(in_nmi()))) { 4404 return NULL; 4405 } 4406 4407 rb_start_commit(cpu_buffer); 4408 /* The commit page can not change after this */ 4409 4410 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP 4411 /* 4412 * Due to the ability to swap a cpu buffer from a buffer 4413 * it is possible it was swapped before we committed. 4414 * (committing stops a swap). We check for it here and 4415 * if it happened, we have to fail the write. 4416 */ 4417 barrier(); 4418 if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) { 4419 local_dec(&cpu_buffer->committing); 4420 local_dec(&cpu_buffer->commits); 4421 return NULL; 4422 } 4423 #endif 4424 4425 info.length = rb_calculate_event_length(length); 4426 4427 if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) { 4428 add_ts_default = RB_ADD_STAMP_ABSOLUTE; 4429 info.length += RB_LEN_TIME_EXTEND; 4430 if (info.length > cpu_buffer->buffer->max_data_size) 4431 goto out_fail; 4432 } else { 4433 add_ts_default = RB_ADD_STAMP_NONE; 4434 } 4435 4436 again: 4437 info.add_timestamp = add_ts_default; 4438 info.delta = 0; 4439 4440 /* 4441 * We allow for interrupts to reenter here and do a trace. 4442 * If one does, it will cause this original code to loop 4443 * back here. Even with heavy interrupts happening, this 4444 * should only happen a few times in a row. If this happens 4445 * 1000 times in a row, there must be either an interrupt 4446 * storm or we have something buggy. 4447 * Bail! 4448 */ 4449 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) 4450 goto out_fail; 4451 4452 event = __rb_reserve_next(cpu_buffer, &info); 4453 4454 if (unlikely(PTR_ERR(event) == -EAGAIN)) { 4455 if (info.add_timestamp & (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND)) 4456 info.length -= RB_LEN_TIME_EXTEND; 4457 goto again; 4458 } 4459 4460 if (likely(event)) 4461 return event; 4462 out_fail: 4463 rb_end_commit(cpu_buffer); 4464 return NULL; 4465 } 4466 4467 /** 4468 * ring_buffer_lock_reserve - reserve a part of the buffer 4469 * @buffer: the ring buffer to reserve from 4470 * @length: the length of the data to reserve (excluding event header) 4471 * 4472 * Returns a reserved event on the ring buffer to copy directly to. 4473 * The user of this interface will need to get the body to write into 4474 * and can use the ring_buffer_event_data() interface. 4475 * 4476 * The length is the length of the data needed, not the event length 4477 * which also includes the event header. 4478 * 4479 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned. 4480 * If NULL is returned, then nothing has been allocated or locked. 4481 */ 4482 struct ring_buffer_event * 4483 ring_buffer_lock_reserve(struct trace_buffer *buffer, unsigned long length) 4484 { 4485 struct ring_buffer_per_cpu *cpu_buffer; 4486 struct ring_buffer_event *event; 4487 int cpu; 4488 4489 /* If we are tracing schedule, we don't want to recurse */ 4490 preempt_disable_notrace(); 4491 4492 if (unlikely(atomic_read(&buffer->record_disabled))) 4493 goto out; 4494 4495 cpu = raw_smp_processor_id(); 4496 4497 if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask))) 4498 goto out; 4499 4500 cpu_buffer = buffer->buffers[cpu]; 4501 4502 if (unlikely(atomic_read(&cpu_buffer->record_disabled))) 4503 goto out; 4504 4505 if (unlikely(length > buffer->max_data_size)) 4506 goto out; 4507 4508 if (unlikely(trace_recursive_lock(cpu_buffer))) 4509 goto out; 4510 4511 event = rb_reserve_next_event(buffer, cpu_buffer, length); 4512 if (!event) 4513 goto out_unlock; 4514 4515 return event; 4516 4517 out_unlock: 4518 trace_recursive_unlock(cpu_buffer); 4519 out: 4520 preempt_enable_notrace(); 4521 return NULL; 4522 } 4523 EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve); 4524 4525 /* 4526 * Decrement the entries to the page that an event is on. 4527 * The event does not even need to exist, only the pointer 4528 * to the page it is on. This may only be called before the commit 4529 * takes place. 4530 */ 4531 static inline void 4532 rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer, 4533 struct ring_buffer_event *event) 4534 { 4535 unsigned long addr = (unsigned long)event; 4536 struct buffer_page *bpage = cpu_buffer->commit_page; 4537 struct buffer_page *start; 4538 4539 addr &= ~((PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1); 4540 4541 /* Do the likely case first */ 4542 if (likely(bpage->page == (void *)addr)) { 4543 local_dec(&bpage->entries); 4544 return; 4545 } 4546 4547 /* 4548 * Because the commit page may be on the reader page we 4549 * start with the next page and check the end loop there. 4550 */ 4551 rb_inc_page(&bpage); 4552 start = bpage; 4553 do { 4554 if (bpage->page == (void *)addr) { 4555 local_dec(&bpage->entries); 4556 return; 4557 } 4558 rb_inc_page(&bpage); 4559 } while (bpage != start); 4560 4561 /* commit not part of this buffer?? */ 4562 RB_WARN_ON(cpu_buffer, 1); 4563 } 4564 4565 /** 4566 * ring_buffer_discard_commit - discard an event that has not been committed 4567 * @buffer: the ring buffer 4568 * @event: non committed event to discard 4569 * 4570 * Sometimes an event that is in the ring buffer needs to be ignored. 4571 * This function lets the user discard an event in the ring buffer 4572 * and then that event will not be read later. 4573 * 4574 * This function only works if it is called before the item has been 4575 * committed. It will try to free the event from the ring buffer 4576 * if another event has not been added behind it. 4577 * 4578 * If another event has been added behind it, it will set the event 4579 * up as discarded, and perform the commit. 4580 * 4581 * If this function is called, do not call ring_buffer_unlock_commit on 4582 * the event. 4583 */ 4584 void ring_buffer_discard_commit(struct trace_buffer *buffer, 4585 struct ring_buffer_event *event) 4586 { 4587 struct ring_buffer_per_cpu *cpu_buffer; 4588 int cpu; 4589 4590 /* The event is discarded regardless */ 4591 rb_event_discard(event); 4592 4593 cpu = smp_processor_id(); 4594 cpu_buffer = buffer->buffers[cpu]; 4595 4596 /* 4597 * This must only be called if the event has not been 4598 * committed yet. Thus we can assume that preemption 4599 * is still disabled. 4600 */ 4601 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); 4602 4603 rb_decrement_entry(cpu_buffer, event); 4604 if (rb_try_to_discard(cpu_buffer, event)) 4605 goto out; 4606 4607 out: 4608 rb_end_commit(cpu_buffer); 4609 4610 trace_recursive_unlock(cpu_buffer); 4611 4612 preempt_enable_notrace(); 4613 4614 } 4615 EXPORT_SYMBOL_GPL(ring_buffer_discard_commit); 4616 4617 /** 4618 * ring_buffer_write - write data to the buffer without reserving 4619 * @buffer: The ring buffer to write to. 4620 * @length: The length of the data being written (excluding the event header) 4621 * @data: The data to write to the buffer. 4622 * 4623 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as 4624 * one function. If you already have the data to write to the buffer, it 4625 * may be easier to simply call this function. 4626 * 4627 * Note, like ring_buffer_lock_reserve, the length is the length of the data 4628 * and not the length of the event which would hold the header. 4629 */ 4630 int ring_buffer_write(struct trace_buffer *buffer, 4631 unsigned long length, 4632 void *data) 4633 { 4634 struct ring_buffer_per_cpu *cpu_buffer; 4635 struct ring_buffer_event *event; 4636 void *body; 4637 int ret = -EBUSY; 4638 int cpu; 4639 4640 preempt_disable_notrace(); 4641 4642 if (atomic_read(&buffer->record_disabled)) 4643 goto out; 4644 4645 cpu = raw_smp_processor_id(); 4646 4647 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4648 goto out; 4649 4650 cpu_buffer = buffer->buffers[cpu]; 4651 4652 if (atomic_read(&cpu_buffer->record_disabled)) 4653 goto out; 4654 4655 if (length > buffer->max_data_size) 4656 goto out; 4657 4658 if (unlikely(trace_recursive_lock(cpu_buffer))) 4659 goto out; 4660 4661 event = rb_reserve_next_event(buffer, cpu_buffer, length); 4662 if (!event) 4663 goto out_unlock; 4664 4665 body = rb_event_data(event); 4666 4667 memcpy(body, data, length); 4668 4669 rb_commit(cpu_buffer); 4670 4671 rb_wakeups(buffer, cpu_buffer); 4672 4673 ret = 0; 4674 4675 out_unlock: 4676 trace_recursive_unlock(cpu_buffer); 4677 4678 out: 4679 preempt_enable_notrace(); 4680 4681 return ret; 4682 } 4683 EXPORT_SYMBOL_GPL(ring_buffer_write); 4684 4685 static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) 4686 { 4687 struct buffer_page *reader = cpu_buffer->reader_page; 4688 struct buffer_page *head = rb_set_head_page(cpu_buffer); 4689 struct buffer_page *commit = cpu_buffer->commit_page; 4690 4691 /* In case of error, head will be NULL */ 4692 if (unlikely(!head)) 4693 return true; 4694 4695 /* Reader should exhaust content in reader page */ 4696 if (reader->read != rb_page_size(reader)) 4697 return false; 4698 4699 /* 4700 * If writers are committing on the reader page, knowing all 4701 * committed content has been read, the ring buffer is empty. 4702 */ 4703 if (commit == reader) 4704 return true; 4705 4706 /* 4707 * If writers are committing on a page other than reader page 4708 * and head page, there should always be content to read. 4709 */ 4710 if (commit != head) 4711 return false; 4712 4713 /* 4714 * Writers are committing on the head page, we just need 4715 * to care about there're committed data, and the reader will 4716 * swap reader page with head page when it is to read data. 4717 */ 4718 return rb_page_commit(commit) == 0; 4719 } 4720 4721 /** 4722 * ring_buffer_record_disable - stop all writes into the buffer 4723 * @buffer: The ring buffer to stop writes to. 4724 * 4725 * This prevents all writes to the buffer. Any attempt to write 4726 * to the buffer after this will fail and return NULL. 4727 * 4728 * The caller should call synchronize_rcu() after this. 4729 */ 4730 void ring_buffer_record_disable(struct trace_buffer *buffer) 4731 { 4732 atomic_inc(&buffer->record_disabled); 4733 } 4734 EXPORT_SYMBOL_GPL(ring_buffer_record_disable); 4735 4736 /** 4737 * ring_buffer_record_enable - enable writes to the buffer 4738 * @buffer: The ring buffer to enable writes 4739 * 4740 * Note, multiple disables will need the same number of enables 4741 * to truly enable the writing (much like preempt_disable). 4742 */ 4743 void ring_buffer_record_enable(struct trace_buffer *buffer) 4744 { 4745 atomic_dec(&buffer->record_disabled); 4746 } 4747 EXPORT_SYMBOL_GPL(ring_buffer_record_enable); 4748 4749 /** 4750 * ring_buffer_record_off - stop all writes into the buffer 4751 * @buffer: The ring buffer to stop writes to. 4752 * 4753 * This prevents all writes to the buffer. Any attempt to write 4754 * to the buffer after this will fail and return NULL. 4755 * 4756 * This is different than ring_buffer_record_disable() as 4757 * it works like an on/off switch, where as the disable() version 4758 * must be paired with a enable(). 4759 */ 4760 void ring_buffer_record_off(struct trace_buffer *buffer) 4761 { 4762 unsigned int rd; 4763 unsigned int new_rd; 4764 4765 rd = atomic_read(&buffer->record_disabled); 4766 do { 4767 new_rd = rd | RB_BUFFER_OFF; 4768 } while (!atomic_try_cmpxchg(&buffer->record_disabled, &rd, new_rd)); 4769 } 4770 EXPORT_SYMBOL_GPL(ring_buffer_record_off); 4771 4772 /** 4773 * ring_buffer_record_on - restart writes into the buffer 4774 * @buffer: The ring buffer to start writes to. 4775 * 4776 * This enables all writes to the buffer that was disabled by 4777 * ring_buffer_record_off(). 4778 * 4779 * This is different than ring_buffer_record_enable() as 4780 * it works like an on/off switch, where as the enable() version 4781 * must be paired with a disable(). 4782 */ 4783 void ring_buffer_record_on(struct trace_buffer *buffer) 4784 { 4785 unsigned int rd; 4786 unsigned int new_rd; 4787 4788 rd = atomic_read(&buffer->record_disabled); 4789 do { 4790 new_rd = rd & ~RB_BUFFER_OFF; 4791 } while (!atomic_try_cmpxchg(&buffer->record_disabled, &rd, new_rd)); 4792 } 4793 EXPORT_SYMBOL_GPL(ring_buffer_record_on); 4794 4795 /** 4796 * ring_buffer_record_is_on - return true if the ring buffer can write 4797 * @buffer: The ring buffer to see if write is enabled 4798 * 4799 * Returns true if the ring buffer is in a state that it accepts writes. 4800 */ 4801 bool ring_buffer_record_is_on(struct trace_buffer *buffer) 4802 { 4803 return !atomic_read(&buffer->record_disabled); 4804 } 4805 4806 /** 4807 * ring_buffer_record_is_set_on - return true if the ring buffer is set writable 4808 * @buffer: The ring buffer to see if write is set enabled 4809 * 4810 * Returns true if the ring buffer is set writable by ring_buffer_record_on(). 4811 * Note that this does NOT mean it is in a writable state. 4812 * 4813 * It may return true when the ring buffer has been disabled by 4814 * ring_buffer_record_disable(), as that is a temporary disabling of 4815 * the ring buffer. 4816 */ 4817 bool ring_buffer_record_is_set_on(struct trace_buffer *buffer) 4818 { 4819 return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF); 4820 } 4821 4822 /** 4823 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer 4824 * @buffer: The ring buffer to stop writes to. 4825 * @cpu: The CPU buffer to stop 4826 * 4827 * This prevents all writes to the buffer. Any attempt to write 4828 * to the buffer after this will fail and return NULL. 4829 * 4830 * The caller should call synchronize_rcu() after this. 4831 */ 4832 void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu) 4833 { 4834 struct ring_buffer_per_cpu *cpu_buffer; 4835 4836 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4837 return; 4838 4839 cpu_buffer = buffer->buffers[cpu]; 4840 atomic_inc(&cpu_buffer->record_disabled); 4841 } 4842 EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu); 4843 4844 /** 4845 * ring_buffer_record_enable_cpu - enable writes to the buffer 4846 * @buffer: The ring buffer to enable writes 4847 * @cpu: The CPU to enable. 4848 * 4849 * Note, multiple disables will need the same number of enables 4850 * to truly enable the writing (much like preempt_disable). 4851 */ 4852 void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu) 4853 { 4854 struct ring_buffer_per_cpu *cpu_buffer; 4855 4856 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4857 return; 4858 4859 cpu_buffer = buffer->buffers[cpu]; 4860 atomic_dec(&cpu_buffer->record_disabled); 4861 } 4862 EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu); 4863 4864 /* 4865 * The total entries in the ring buffer is the running counter 4866 * of entries entered into the ring buffer, minus the sum of 4867 * the entries read from the ring buffer and the number of 4868 * entries that were overwritten. 4869 */ 4870 static inline unsigned long 4871 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer) 4872 { 4873 return local_read(&cpu_buffer->entries) - 4874 (local_read(&cpu_buffer->overrun) + cpu_buffer->read); 4875 } 4876 4877 /** 4878 * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer 4879 * @buffer: The ring buffer 4880 * @cpu: The per CPU buffer to read from. 4881 */ 4882 u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu) 4883 { 4884 unsigned long flags; 4885 struct ring_buffer_per_cpu *cpu_buffer; 4886 struct buffer_page *bpage; 4887 u64 ret = 0; 4888 4889 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4890 return 0; 4891 4892 cpu_buffer = buffer->buffers[cpu]; 4893 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 4894 /* 4895 * if the tail is on reader_page, oldest time stamp is on the reader 4896 * page 4897 */ 4898 if (cpu_buffer->tail_page == cpu_buffer->reader_page) 4899 bpage = cpu_buffer->reader_page; 4900 else 4901 bpage = rb_set_head_page(cpu_buffer); 4902 if (bpage) 4903 ret = bpage->page->time_stamp; 4904 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 4905 4906 return ret; 4907 } 4908 EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts); 4909 4910 /** 4911 * ring_buffer_bytes_cpu - get the number of bytes unconsumed in a cpu buffer 4912 * @buffer: The ring buffer 4913 * @cpu: The per CPU buffer to read from. 4914 */ 4915 unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu) 4916 { 4917 struct ring_buffer_per_cpu *cpu_buffer; 4918 unsigned long ret; 4919 4920 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4921 return 0; 4922 4923 cpu_buffer = buffer->buffers[cpu]; 4924 ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes; 4925 4926 return ret; 4927 } 4928 EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu); 4929 4930 /** 4931 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer 4932 * @buffer: The ring buffer 4933 * @cpu: The per CPU buffer to get the entries from. 4934 */ 4935 unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu) 4936 { 4937 struct ring_buffer_per_cpu *cpu_buffer; 4938 4939 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4940 return 0; 4941 4942 cpu_buffer = buffer->buffers[cpu]; 4943 4944 return rb_num_of_entries(cpu_buffer); 4945 } 4946 EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu); 4947 4948 /** 4949 * ring_buffer_overrun_cpu - get the number of overruns caused by the ring 4950 * buffer wrapping around (only if RB_FL_OVERWRITE is on). 4951 * @buffer: The ring buffer 4952 * @cpu: The per CPU buffer to get the number of overruns from 4953 */ 4954 unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu) 4955 { 4956 struct ring_buffer_per_cpu *cpu_buffer; 4957 unsigned long ret; 4958 4959 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4960 return 0; 4961 4962 cpu_buffer = buffer->buffers[cpu]; 4963 ret = local_read(&cpu_buffer->overrun); 4964 4965 return ret; 4966 } 4967 EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu); 4968 4969 /** 4970 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by 4971 * commits failing due to the buffer wrapping around while there are uncommitted 4972 * events, such as during an interrupt storm. 4973 * @buffer: The ring buffer 4974 * @cpu: The per CPU buffer to get the number of overruns from 4975 */ 4976 unsigned long 4977 ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu) 4978 { 4979 struct ring_buffer_per_cpu *cpu_buffer; 4980 unsigned long ret; 4981 4982 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4983 return 0; 4984 4985 cpu_buffer = buffer->buffers[cpu]; 4986 ret = local_read(&cpu_buffer->commit_overrun); 4987 4988 return ret; 4989 } 4990 EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu); 4991 4992 /** 4993 * ring_buffer_dropped_events_cpu - get the number of dropped events caused by 4994 * the ring buffer filling up (only if RB_FL_OVERWRITE is off). 4995 * @buffer: The ring buffer 4996 * @cpu: The per CPU buffer to get the number of overruns from 4997 */ 4998 unsigned long 4999 ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu) 5000 { 5001 struct ring_buffer_per_cpu *cpu_buffer; 5002 unsigned long ret; 5003 5004 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5005 return 0; 5006 5007 cpu_buffer = buffer->buffers[cpu]; 5008 ret = local_read(&cpu_buffer->dropped_events); 5009 5010 return ret; 5011 } 5012 EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu); 5013 5014 /** 5015 * ring_buffer_read_events_cpu - get the number of events successfully read 5016 * @buffer: The ring buffer 5017 * @cpu: The per CPU buffer to get the number of events read 5018 */ 5019 unsigned long 5020 ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu) 5021 { 5022 struct ring_buffer_per_cpu *cpu_buffer; 5023 5024 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5025 return 0; 5026 5027 cpu_buffer = buffer->buffers[cpu]; 5028 return cpu_buffer->read; 5029 } 5030 EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu); 5031 5032 /** 5033 * ring_buffer_entries - get the number of entries in a buffer 5034 * @buffer: The ring buffer 5035 * 5036 * Returns the total number of entries in the ring buffer 5037 * (all CPU entries) 5038 */ 5039 unsigned long ring_buffer_entries(struct trace_buffer *buffer) 5040 { 5041 struct ring_buffer_per_cpu *cpu_buffer; 5042 unsigned long entries = 0; 5043 int cpu; 5044 5045 /* if you care about this being correct, lock the buffer */ 5046 for_each_buffer_cpu(buffer, cpu) { 5047 cpu_buffer = buffer->buffers[cpu]; 5048 entries += rb_num_of_entries(cpu_buffer); 5049 } 5050 5051 return entries; 5052 } 5053 EXPORT_SYMBOL_GPL(ring_buffer_entries); 5054 5055 /** 5056 * ring_buffer_overruns - get the number of overruns in buffer 5057 * @buffer: The ring buffer 5058 * 5059 * Returns the total number of overruns in the ring buffer 5060 * (all CPU entries) 5061 */ 5062 unsigned long ring_buffer_overruns(struct trace_buffer *buffer) 5063 { 5064 struct ring_buffer_per_cpu *cpu_buffer; 5065 unsigned long overruns = 0; 5066 int cpu; 5067 5068 /* if you care about this being correct, lock the buffer */ 5069 for_each_buffer_cpu(buffer, cpu) { 5070 cpu_buffer = buffer->buffers[cpu]; 5071 overruns += local_read(&cpu_buffer->overrun); 5072 } 5073 5074 return overruns; 5075 } 5076 EXPORT_SYMBOL_GPL(ring_buffer_overruns); 5077 5078 static void rb_iter_reset(struct ring_buffer_iter *iter) 5079 { 5080 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 5081 5082 /* Iterator usage is expected to have record disabled */ 5083 iter->head_page = cpu_buffer->reader_page; 5084 iter->head = cpu_buffer->reader_page->read; 5085 iter->next_event = iter->head; 5086 5087 iter->cache_reader_page = iter->head_page; 5088 iter->cache_read = cpu_buffer->read; 5089 iter->cache_pages_removed = cpu_buffer->pages_removed; 5090 5091 if (iter->head) { 5092 iter->read_stamp = cpu_buffer->read_stamp; 5093 iter->page_stamp = cpu_buffer->reader_page->page->time_stamp; 5094 } else { 5095 iter->read_stamp = iter->head_page->page->time_stamp; 5096 iter->page_stamp = iter->read_stamp; 5097 } 5098 } 5099 5100 /** 5101 * ring_buffer_iter_reset - reset an iterator 5102 * @iter: The iterator to reset 5103 * 5104 * Resets the iterator, so that it will start from the beginning 5105 * again. 5106 */ 5107 void ring_buffer_iter_reset(struct ring_buffer_iter *iter) 5108 { 5109 struct ring_buffer_per_cpu *cpu_buffer; 5110 unsigned long flags; 5111 5112 if (!iter) 5113 return; 5114 5115 cpu_buffer = iter->cpu_buffer; 5116 5117 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5118 rb_iter_reset(iter); 5119 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5120 } 5121 EXPORT_SYMBOL_GPL(ring_buffer_iter_reset); 5122 5123 /** 5124 * ring_buffer_iter_empty - check if an iterator has no more to read 5125 * @iter: The iterator to check 5126 */ 5127 int ring_buffer_iter_empty(struct ring_buffer_iter *iter) 5128 { 5129 struct ring_buffer_per_cpu *cpu_buffer; 5130 struct buffer_page *reader; 5131 struct buffer_page *head_page; 5132 struct buffer_page *commit_page; 5133 struct buffer_page *curr_commit_page; 5134 unsigned commit; 5135 u64 curr_commit_ts; 5136 u64 commit_ts; 5137 5138 cpu_buffer = iter->cpu_buffer; 5139 reader = cpu_buffer->reader_page; 5140 head_page = cpu_buffer->head_page; 5141 commit_page = READ_ONCE(cpu_buffer->commit_page); 5142 commit_ts = commit_page->page->time_stamp; 5143 5144 /* 5145 * When the writer goes across pages, it issues a cmpxchg which 5146 * is a mb(), which will synchronize with the rmb here. 5147 * (see rb_tail_page_update()) 5148 */ 5149 smp_rmb(); 5150 commit = rb_page_commit(commit_page); 5151 /* We want to make sure that the commit page doesn't change */ 5152 smp_rmb(); 5153 5154 /* Make sure commit page didn't change */ 5155 curr_commit_page = READ_ONCE(cpu_buffer->commit_page); 5156 curr_commit_ts = READ_ONCE(curr_commit_page->page->time_stamp); 5157 5158 /* If the commit page changed, then there's more data */ 5159 if (curr_commit_page != commit_page || 5160 curr_commit_ts != commit_ts) 5161 return 0; 5162 5163 /* Still racy, as it may return a false positive, but that's OK */ 5164 return ((iter->head_page == commit_page && iter->head >= commit) || 5165 (iter->head_page == reader && commit_page == head_page && 5166 head_page->read == commit && 5167 iter->head == rb_page_size(cpu_buffer->reader_page))); 5168 } 5169 EXPORT_SYMBOL_GPL(ring_buffer_iter_empty); 5170 5171 static void 5172 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, 5173 struct ring_buffer_event *event) 5174 { 5175 u64 delta; 5176 5177 switch (event->type_len) { 5178 case RINGBUF_TYPE_PADDING: 5179 return; 5180 5181 case RINGBUF_TYPE_TIME_EXTEND: 5182 delta = rb_event_time_stamp(event); 5183 cpu_buffer->read_stamp += delta; 5184 return; 5185 5186 case RINGBUF_TYPE_TIME_STAMP: 5187 delta = rb_event_time_stamp(event); 5188 delta = rb_fix_abs_ts(delta, cpu_buffer->read_stamp); 5189 cpu_buffer->read_stamp = delta; 5190 return; 5191 5192 case RINGBUF_TYPE_DATA: 5193 cpu_buffer->read_stamp += event->time_delta; 5194 return; 5195 5196 default: 5197 RB_WARN_ON(cpu_buffer, 1); 5198 } 5199 } 5200 5201 static void 5202 rb_update_iter_read_stamp(struct ring_buffer_iter *iter, 5203 struct ring_buffer_event *event) 5204 { 5205 u64 delta; 5206 5207 switch (event->type_len) { 5208 case RINGBUF_TYPE_PADDING: 5209 return; 5210 5211 case RINGBUF_TYPE_TIME_EXTEND: 5212 delta = rb_event_time_stamp(event); 5213 iter->read_stamp += delta; 5214 return; 5215 5216 case RINGBUF_TYPE_TIME_STAMP: 5217 delta = rb_event_time_stamp(event); 5218 delta = rb_fix_abs_ts(delta, iter->read_stamp); 5219 iter->read_stamp = delta; 5220 return; 5221 5222 case RINGBUF_TYPE_DATA: 5223 iter->read_stamp += event->time_delta; 5224 return; 5225 5226 default: 5227 RB_WARN_ON(iter->cpu_buffer, 1); 5228 } 5229 } 5230 5231 static struct buffer_page * 5232 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) 5233 { 5234 struct buffer_page *reader = NULL; 5235 unsigned long bsize = READ_ONCE(cpu_buffer->buffer->subbuf_size); 5236 unsigned long overwrite; 5237 unsigned long flags; 5238 int nr_loops = 0; 5239 bool ret; 5240 5241 local_irq_save(flags); 5242 arch_spin_lock(&cpu_buffer->lock); 5243 5244 again: 5245 /* 5246 * This should normally only loop twice. But because the 5247 * start of the reader inserts an empty page, it causes 5248 * a case where we will loop three times. There should be no 5249 * reason to loop four times (that I know of). 5250 */ 5251 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) { 5252 reader = NULL; 5253 goto out; 5254 } 5255 5256 reader = cpu_buffer->reader_page; 5257 5258 /* If there's more to read, return this page */ 5259 if (cpu_buffer->reader_page->read < rb_page_size(reader)) 5260 goto out; 5261 5262 /* Never should we have an index greater than the size */ 5263 if (RB_WARN_ON(cpu_buffer, 5264 cpu_buffer->reader_page->read > rb_page_size(reader))) 5265 goto out; 5266 5267 /* check if we caught up to the tail */ 5268 reader = NULL; 5269 if (cpu_buffer->commit_page == cpu_buffer->reader_page) 5270 goto out; 5271 5272 /* Don't bother swapping if the ring buffer is empty */ 5273 if (rb_num_of_entries(cpu_buffer) == 0) 5274 goto out; 5275 5276 /* 5277 * Reset the reader page to size zero. 5278 */ 5279 local_set(&cpu_buffer->reader_page->write, 0); 5280 local_set(&cpu_buffer->reader_page->entries, 0); 5281 local_set(&cpu_buffer->reader_page->page->commit, 0); 5282 cpu_buffer->reader_page->real_end = 0; 5283 5284 spin: 5285 /* 5286 * Splice the empty reader page into the list around the head. 5287 */ 5288 reader = rb_set_head_page(cpu_buffer); 5289 if (!reader) 5290 goto out; 5291 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next); 5292 cpu_buffer->reader_page->list.prev = reader->list.prev; 5293 5294 /* 5295 * cpu_buffer->pages just needs to point to the buffer, it 5296 * has no specific buffer page to point to. Lets move it out 5297 * of our way so we don't accidentally swap it. 5298 */ 5299 cpu_buffer->pages = reader->list.prev; 5300 5301 /* The reader page will be pointing to the new head */ 5302 rb_set_list_to_head(&cpu_buffer->reader_page->list); 5303 5304 /* 5305 * We want to make sure we read the overruns after we set up our 5306 * pointers to the next object. The writer side does a 5307 * cmpxchg to cross pages which acts as the mb on the writer 5308 * side. Note, the reader will constantly fail the swap 5309 * while the writer is updating the pointers, so this 5310 * guarantees that the overwrite recorded here is the one we 5311 * want to compare with the last_overrun. 5312 */ 5313 smp_mb(); 5314 overwrite = local_read(&(cpu_buffer->overrun)); 5315 5316 /* 5317 * Here's the tricky part. 5318 * 5319 * We need to move the pointer past the header page. 5320 * But we can only do that if a writer is not currently 5321 * moving it. The page before the header page has the 5322 * flag bit '1' set if it is pointing to the page we want. 5323 * but if the writer is in the process of moving it 5324 * than it will be '2' or already moved '0'. 5325 */ 5326 5327 ret = rb_head_page_replace(reader, cpu_buffer->reader_page); 5328 5329 /* 5330 * If we did not convert it, then we must try again. 5331 */ 5332 if (!ret) 5333 goto spin; 5334 5335 if (cpu_buffer->ring_meta) 5336 rb_update_meta_reader(cpu_buffer, reader); 5337 5338 /* 5339 * Yay! We succeeded in replacing the page. 5340 * 5341 * Now make the new head point back to the reader page. 5342 */ 5343 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list; 5344 rb_inc_page(&cpu_buffer->head_page); 5345 5346 cpu_buffer->cnt++; 5347 local_inc(&cpu_buffer->pages_read); 5348 5349 /* Finally update the reader page to the new head */ 5350 cpu_buffer->reader_page = reader; 5351 cpu_buffer->reader_page->read = 0; 5352 5353 if (overwrite != cpu_buffer->last_overrun) { 5354 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; 5355 cpu_buffer->last_overrun = overwrite; 5356 } 5357 5358 goto again; 5359 5360 out: 5361 /* Update the read_stamp on the first event */ 5362 if (reader && reader->read == 0) 5363 cpu_buffer->read_stamp = reader->page->time_stamp; 5364 5365 arch_spin_unlock(&cpu_buffer->lock); 5366 local_irq_restore(flags); 5367 5368 /* 5369 * The writer has preempt disable, wait for it. But not forever 5370 * Although, 1 second is pretty much "forever" 5371 */ 5372 #define USECS_WAIT 1000000 5373 for (nr_loops = 0; nr_loops < USECS_WAIT; nr_loops++) { 5374 /* If the write is past the end of page, a writer is still updating it */ 5375 if (likely(!reader || rb_page_write(reader) <= bsize)) 5376 break; 5377 5378 udelay(1); 5379 5380 /* Get the latest version of the reader write value */ 5381 smp_rmb(); 5382 } 5383 5384 /* The writer is not moving forward? Something is wrong */ 5385 if (RB_WARN_ON(cpu_buffer, nr_loops == USECS_WAIT)) 5386 reader = NULL; 5387 5388 /* 5389 * Make sure we see any padding after the write update 5390 * (see rb_reset_tail()). 5391 * 5392 * In addition, a writer may be writing on the reader page 5393 * if the page has not been fully filled, so the read barrier 5394 * is also needed to make sure we see the content of what is 5395 * committed by the writer (see rb_set_commit_to_write()). 5396 */ 5397 smp_rmb(); 5398 5399 5400 return reader; 5401 } 5402 5403 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) 5404 { 5405 struct ring_buffer_event *event; 5406 struct buffer_page *reader; 5407 unsigned length; 5408 5409 reader = rb_get_reader_page(cpu_buffer); 5410 5411 /* This function should not be called when buffer is empty */ 5412 if (RB_WARN_ON(cpu_buffer, !reader)) 5413 return; 5414 5415 event = rb_reader_event(cpu_buffer); 5416 5417 if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 5418 cpu_buffer->read++; 5419 5420 rb_update_read_stamp(cpu_buffer, event); 5421 5422 length = rb_event_length(event); 5423 cpu_buffer->reader_page->read += length; 5424 cpu_buffer->read_bytes += length; 5425 } 5426 5427 static void rb_advance_iter(struct ring_buffer_iter *iter) 5428 { 5429 struct ring_buffer_per_cpu *cpu_buffer; 5430 5431 cpu_buffer = iter->cpu_buffer; 5432 5433 /* If head == next_event then we need to jump to the next event */ 5434 if (iter->head == iter->next_event) { 5435 /* If the event gets overwritten again, there's nothing to do */ 5436 if (rb_iter_head_event(iter) == NULL) 5437 return; 5438 } 5439 5440 iter->head = iter->next_event; 5441 5442 /* 5443 * Check if we are at the end of the buffer. 5444 */ 5445 if (iter->next_event >= rb_page_size(iter->head_page)) { 5446 /* discarded commits can make the page empty */ 5447 if (iter->head_page == cpu_buffer->commit_page) 5448 return; 5449 rb_inc_iter(iter); 5450 return; 5451 } 5452 5453 rb_update_iter_read_stamp(iter, iter->event); 5454 } 5455 5456 static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer) 5457 { 5458 return cpu_buffer->lost_events; 5459 } 5460 5461 static struct ring_buffer_event * 5462 rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts, 5463 unsigned long *lost_events) 5464 { 5465 struct ring_buffer_event *event; 5466 struct buffer_page *reader; 5467 int nr_loops = 0; 5468 5469 if (ts) 5470 *ts = 0; 5471 again: 5472 /* 5473 * We repeat when a time extend is encountered. 5474 * Since the time extend is always attached to a data event, 5475 * we should never loop more than once. 5476 * (We never hit the following condition more than twice). 5477 */ 5478 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2)) 5479 return NULL; 5480 5481 reader = rb_get_reader_page(cpu_buffer); 5482 if (!reader) 5483 return NULL; 5484 5485 event = rb_reader_event(cpu_buffer); 5486 5487 switch (event->type_len) { 5488 case RINGBUF_TYPE_PADDING: 5489 if (rb_null_event(event)) 5490 RB_WARN_ON(cpu_buffer, 1); 5491 /* 5492 * Because the writer could be discarding every 5493 * event it creates (which would probably be bad) 5494 * if we were to go back to "again" then we may never 5495 * catch up, and will trigger the warn on, or lock 5496 * the box. Return the padding, and we will release 5497 * the current locks, and try again. 5498 */ 5499 return event; 5500 5501 case RINGBUF_TYPE_TIME_EXTEND: 5502 /* Internal data, OK to advance */ 5503 rb_advance_reader(cpu_buffer); 5504 goto again; 5505 5506 case RINGBUF_TYPE_TIME_STAMP: 5507 if (ts) { 5508 *ts = rb_event_time_stamp(event); 5509 *ts = rb_fix_abs_ts(*ts, reader->page->time_stamp); 5510 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, 5511 cpu_buffer->cpu, ts); 5512 } 5513 /* Internal data, OK to advance */ 5514 rb_advance_reader(cpu_buffer); 5515 goto again; 5516 5517 case RINGBUF_TYPE_DATA: 5518 if (ts && !(*ts)) { 5519 *ts = cpu_buffer->read_stamp + event->time_delta; 5520 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, 5521 cpu_buffer->cpu, ts); 5522 } 5523 if (lost_events) 5524 *lost_events = rb_lost_events(cpu_buffer); 5525 return event; 5526 5527 default: 5528 RB_WARN_ON(cpu_buffer, 1); 5529 } 5530 5531 return NULL; 5532 } 5533 EXPORT_SYMBOL_GPL(ring_buffer_peek); 5534 5535 static struct ring_buffer_event * 5536 rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) 5537 { 5538 struct trace_buffer *buffer; 5539 struct ring_buffer_per_cpu *cpu_buffer; 5540 struct ring_buffer_event *event; 5541 int nr_loops = 0; 5542 5543 if (ts) 5544 *ts = 0; 5545 5546 cpu_buffer = iter->cpu_buffer; 5547 buffer = cpu_buffer->buffer; 5548 5549 /* 5550 * Check if someone performed a consuming read to the buffer 5551 * or removed some pages from the buffer. In these cases, 5552 * iterator was invalidated and we need to reset it. 5553 */ 5554 if (unlikely(iter->cache_read != cpu_buffer->read || 5555 iter->cache_reader_page != cpu_buffer->reader_page || 5556 iter->cache_pages_removed != cpu_buffer->pages_removed)) 5557 rb_iter_reset(iter); 5558 5559 again: 5560 if (ring_buffer_iter_empty(iter)) 5561 return NULL; 5562 5563 /* 5564 * As the writer can mess with what the iterator is trying 5565 * to read, just give up if we fail to get an event after 5566 * three tries. The iterator is not as reliable when reading 5567 * the ring buffer with an active write as the consumer is. 5568 * Do not warn if the three failures is reached. 5569 */ 5570 if (++nr_loops > 3) 5571 return NULL; 5572 5573 if (rb_per_cpu_empty(cpu_buffer)) 5574 return NULL; 5575 5576 if (iter->head >= rb_page_size(iter->head_page)) { 5577 rb_inc_iter(iter); 5578 goto again; 5579 } 5580 5581 event = rb_iter_head_event(iter); 5582 if (!event) 5583 goto again; 5584 5585 switch (event->type_len) { 5586 case RINGBUF_TYPE_PADDING: 5587 if (rb_null_event(event)) { 5588 rb_inc_iter(iter); 5589 goto again; 5590 } 5591 rb_advance_iter(iter); 5592 return event; 5593 5594 case RINGBUF_TYPE_TIME_EXTEND: 5595 /* Internal data, OK to advance */ 5596 rb_advance_iter(iter); 5597 goto again; 5598 5599 case RINGBUF_TYPE_TIME_STAMP: 5600 if (ts) { 5601 *ts = rb_event_time_stamp(event); 5602 *ts = rb_fix_abs_ts(*ts, iter->head_page->page->time_stamp); 5603 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, 5604 cpu_buffer->cpu, ts); 5605 } 5606 /* Internal data, OK to advance */ 5607 rb_advance_iter(iter); 5608 goto again; 5609 5610 case RINGBUF_TYPE_DATA: 5611 if (ts && !(*ts)) { 5612 *ts = iter->read_stamp + event->time_delta; 5613 ring_buffer_normalize_time_stamp(buffer, 5614 cpu_buffer->cpu, ts); 5615 } 5616 return event; 5617 5618 default: 5619 RB_WARN_ON(cpu_buffer, 1); 5620 } 5621 5622 return NULL; 5623 } 5624 EXPORT_SYMBOL_GPL(ring_buffer_iter_peek); 5625 5626 static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer) 5627 { 5628 if (likely(!in_nmi())) { 5629 raw_spin_lock(&cpu_buffer->reader_lock); 5630 return true; 5631 } 5632 5633 /* 5634 * If an NMI die dumps out the content of the ring buffer 5635 * trylock must be used to prevent a deadlock if the NMI 5636 * preempted a task that holds the ring buffer locks. If 5637 * we get the lock then all is fine, if not, then continue 5638 * to do the read, but this can corrupt the ring buffer, 5639 * so it must be permanently disabled from future writes. 5640 * Reading from NMI is a oneshot deal. 5641 */ 5642 if (raw_spin_trylock(&cpu_buffer->reader_lock)) 5643 return true; 5644 5645 /* Continue without locking, but disable the ring buffer */ 5646 atomic_inc(&cpu_buffer->record_disabled); 5647 return false; 5648 } 5649 5650 static inline void 5651 rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked) 5652 { 5653 if (likely(locked)) 5654 raw_spin_unlock(&cpu_buffer->reader_lock); 5655 } 5656 5657 /** 5658 * ring_buffer_peek - peek at the next event to be read 5659 * @buffer: The ring buffer to read 5660 * @cpu: The cpu to peak at 5661 * @ts: The timestamp counter of this event. 5662 * @lost_events: a variable to store if events were lost (may be NULL) 5663 * 5664 * This will return the event that will be read next, but does 5665 * not consume the data. 5666 */ 5667 struct ring_buffer_event * 5668 ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts, 5669 unsigned long *lost_events) 5670 { 5671 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 5672 struct ring_buffer_event *event; 5673 unsigned long flags; 5674 bool dolock; 5675 5676 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5677 return NULL; 5678 5679 again: 5680 local_irq_save(flags); 5681 dolock = rb_reader_lock(cpu_buffer); 5682 event = rb_buffer_peek(cpu_buffer, ts, lost_events); 5683 if (event && event->type_len == RINGBUF_TYPE_PADDING) 5684 rb_advance_reader(cpu_buffer); 5685 rb_reader_unlock(cpu_buffer, dolock); 5686 local_irq_restore(flags); 5687 5688 if (event && event->type_len == RINGBUF_TYPE_PADDING) 5689 goto again; 5690 5691 return event; 5692 } 5693 5694 /** ring_buffer_iter_dropped - report if there are dropped events 5695 * @iter: The ring buffer iterator 5696 * 5697 * Returns true if there was dropped events since the last peek. 5698 */ 5699 bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter) 5700 { 5701 bool ret = iter->missed_events != 0; 5702 5703 iter->missed_events = 0; 5704 return ret; 5705 } 5706 EXPORT_SYMBOL_GPL(ring_buffer_iter_dropped); 5707 5708 /** 5709 * ring_buffer_iter_peek - peek at the next event to be read 5710 * @iter: The ring buffer iterator 5711 * @ts: The timestamp counter of this event. 5712 * 5713 * This will return the event that will be read next, but does 5714 * not increment the iterator. 5715 */ 5716 struct ring_buffer_event * 5717 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) 5718 { 5719 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 5720 struct ring_buffer_event *event; 5721 unsigned long flags; 5722 5723 again: 5724 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5725 event = rb_iter_peek(iter, ts); 5726 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5727 5728 if (event && event->type_len == RINGBUF_TYPE_PADDING) 5729 goto again; 5730 5731 return event; 5732 } 5733 5734 /** 5735 * ring_buffer_consume - return an event and consume it 5736 * @buffer: The ring buffer to get the next event from 5737 * @cpu: the cpu to read the buffer from 5738 * @ts: a variable to store the timestamp (may be NULL) 5739 * @lost_events: a variable to store if events were lost (may be NULL) 5740 * 5741 * Returns the next event in the ring buffer, and that event is consumed. 5742 * Meaning, that sequential reads will keep returning a different event, 5743 * and eventually empty the ring buffer if the producer is slower. 5744 */ 5745 struct ring_buffer_event * 5746 ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts, 5747 unsigned long *lost_events) 5748 { 5749 struct ring_buffer_per_cpu *cpu_buffer; 5750 struct ring_buffer_event *event = NULL; 5751 unsigned long flags; 5752 bool dolock; 5753 5754 again: 5755 /* might be called in atomic */ 5756 preempt_disable(); 5757 5758 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5759 goto out; 5760 5761 cpu_buffer = buffer->buffers[cpu]; 5762 local_irq_save(flags); 5763 dolock = rb_reader_lock(cpu_buffer); 5764 5765 event = rb_buffer_peek(cpu_buffer, ts, lost_events); 5766 if (event) { 5767 cpu_buffer->lost_events = 0; 5768 rb_advance_reader(cpu_buffer); 5769 } 5770 5771 rb_reader_unlock(cpu_buffer, dolock); 5772 local_irq_restore(flags); 5773 5774 out: 5775 preempt_enable(); 5776 5777 if (event && event->type_len == RINGBUF_TYPE_PADDING) 5778 goto again; 5779 5780 return event; 5781 } 5782 EXPORT_SYMBOL_GPL(ring_buffer_consume); 5783 5784 /** 5785 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer 5786 * @buffer: The ring buffer to read from 5787 * @cpu: The cpu buffer to iterate over 5788 * @flags: gfp flags to use for memory allocation 5789 * 5790 * This performs the initial preparations necessary to iterate 5791 * through the buffer. Memory is allocated, buffer resizing 5792 * is disabled, and the iterator pointer is returned to the caller. 5793 * 5794 * After a sequence of ring_buffer_read_prepare calls, the user is 5795 * expected to make at least one call to ring_buffer_read_prepare_sync. 5796 * Afterwards, ring_buffer_read_start is invoked to get things going 5797 * for real. 5798 * 5799 * This overall must be paired with ring_buffer_read_finish. 5800 */ 5801 struct ring_buffer_iter * 5802 ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags) 5803 { 5804 struct ring_buffer_per_cpu *cpu_buffer; 5805 struct ring_buffer_iter *iter; 5806 5807 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5808 return NULL; 5809 5810 iter = kzalloc(sizeof(*iter), flags); 5811 if (!iter) 5812 return NULL; 5813 5814 /* Holds the entire event: data and meta data */ 5815 iter->event_size = buffer->subbuf_size; 5816 iter->event = kmalloc(iter->event_size, flags); 5817 if (!iter->event) { 5818 kfree(iter); 5819 return NULL; 5820 } 5821 5822 cpu_buffer = buffer->buffers[cpu]; 5823 5824 iter->cpu_buffer = cpu_buffer; 5825 5826 atomic_inc(&cpu_buffer->resize_disabled); 5827 5828 return iter; 5829 } 5830 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare); 5831 5832 /** 5833 * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls 5834 * 5835 * All previously invoked ring_buffer_read_prepare calls to prepare 5836 * iterators will be synchronized. Afterwards, read_buffer_read_start 5837 * calls on those iterators are allowed. 5838 */ 5839 void 5840 ring_buffer_read_prepare_sync(void) 5841 { 5842 synchronize_rcu(); 5843 } 5844 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync); 5845 5846 /** 5847 * ring_buffer_read_start - start a non consuming read of the buffer 5848 * @iter: The iterator returned by ring_buffer_read_prepare 5849 * 5850 * This finalizes the startup of an iteration through the buffer. 5851 * The iterator comes from a call to ring_buffer_read_prepare and 5852 * an intervening ring_buffer_read_prepare_sync must have been 5853 * performed. 5854 * 5855 * Must be paired with ring_buffer_read_finish. 5856 */ 5857 void 5858 ring_buffer_read_start(struct ring_buffer_iter *iter) 5859 { 5860 struct ring_buffer_per_cpu *cpu_buffer; 5861 unsigned long flags; 5862 5863 if (!iter) 5864 return; 5865 5866 cpu_buffer = iter->cpu_buffer; 5867 5868 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5869 arch_spin_lock(&cpu_buffer->lock); 5870 rb_iter_reset(iter); 5871 arch_spin_unlock(&cpu_buffer->lock); 5872 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5873 } 5874 EXPORT_SYMBOL_GPL(ring_buffer_read_start); 5875 5876 /** 5877 * ring_buffer_read_finish - finish reading the iterator of the buffer 5878 * @iter: The iterator retrieved by ring_buffer_start 5879 * 5880 * This re-enables resizing of the buffer, and frees the iterator. 5881 */ 5882 void 5883 ring_buffer_read_finish(struct ring_buffer_iter *iter) 5884 { 5885 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 5886 5887 /* Use this opportunity to check the integrity of the ring buffer. */ 5888 rb_check_pages(cpu_buffer); 5889 5890 atomic_dec(&cpu_buffer->resize_disabled); 5891 kfree(iter->event); 5892 kfree(iter); 5893 } 5894 EXPORT_SYMBOL_GPL(ring_buffer_read_finish); 5895 5896 /** 5897 * ring_buffer_iter_advance - advance the iterator to the next location 5898 * @iter: The ring buffer iterator 5899 * 5900 * Move the location of the iterator such that the next read will 5901 * be the next location of the iterator. 5902 */ 5903 void ring_buffer_iter_advance(struct ring_buffer_iter *iter) 5904 { 5905 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 5906 unsigned long flags; 5907 5908 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5909 5910 rb_advance_iter(iter); 5911 5912 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5913 } 5914 EXPORT_SYMBOL_GPL(ring_buffer_iter_advance); 5915 5916 /** 5917 * ring_buffer_size - return the size of the ring buffer (in bytes) 5918 * @buffer: The ring buffer. 5919 * @cpu: The CPU to get ring buffer size from. 5920 */ 5921 unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu) 5922 { 5923 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5924 return 0; 5925 5926 return buffer->subbuf_size * buffer->buffers[cpu]->nr_pages; 5927 } 5928 EXPORT_SYMBOL_GPL(ring_buffer_size); 5929 5930 /** 5931 * ring_buffer_max_event_size - return the max data size of an event 5932 * @buffer: The ring buffer. 5933 * 5934 * Returns the maximum size an event can be. 5935 */ 5936 unsigned long ring_buffer_max_event_size(struct trace_buffer *buffer) 5937 { 5938 /* If abs timestamp is requested, events have a timestamp too */ 5939 if (ring_buffer_time_stamp_abs(buffer)) 5940 return buffer->max_data_size - RB_LEN_TIME_EXTEND; 5941 return buffer->max_data_size; 5942 } 5943 EXPORT_SYMBOL_GPL(ring_buffer_max_event_size); 5944 5945 static void rb_clear_buffer_page(struct buffer_page *page) 5946 { 5947 local_set(&page->write, 0); 5948 local_set(&page->entries, 0); 5949 rb_init_page(page->page); 5950 page->read = 0; 5951 } 5952 5953 static void rb_update_meta_page(struct ring_buffer_per_cpu *cpu_buffer) 5954 { 5955 struct trace_buffer_meta *meta = cpu_buffer->meta_page; 5956 5957 if (!meta) 5958 return; 5959 5960 meta->reader.read = cpu_buffer->reader_page->read; 5961 meta->reader.id = cpu_buffer->reader_page->id; 5962 meta->reader.lost_events = cpu_buffer->lost_events; 5963 5964 meta->entries = local_read(&cpu_buffer->entries); 5965 meta->overrun = local_read(&cpu_buffer->overrun); 5966 meta->read = cpu_buffer->read; 5967 5968 /* Some archs do not have data cache coherency between kernel and user-space */ 5969 flush_dcache_folio(virt_to_folio(cpu_buffer->meta_page)); 5970 } 5971 5972 static void 5973 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) 5974 { 5975 struct buffer_page *page; 5976 5977 rb_head_page_deactivate(cpu_buffer); 5978 5979 cpu_buffer->head_page 5980 = list_entry(cpu_buffer->pages, struct buffer_page, list); 5981 rb_clear_buffer_page(cpu_buffer->head_page); 5982 list_for_each_entry(page, cpu_buffer->pages, list) { 5983 rb_clear_buffer_page(page); 5984 } 5985 5986 cpu_buffer->tail_page = cpu_buffer->head_page; 5987 cpu_buffer->commit_page = cpu_buffer->head_page; 5988 5989 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); 5990 INIT_LIST_HEAD(&cpu_buffer->new_pages); 5991 rb_clear_buffer_page(cpu_buffer->reader_page); 5992 5993 local_set(&cpu_buffer->entries_bytes, 0); 5994 local_set(&cpu_buffer->overrun, 0); 5995 local_set(&cpu_buffer->commit_overrun, 0); 5996 local_set(&cpu_buffer->dropped_events, 0); 5997 local_set(&cpu_buffer->entries, 0); 5998 local_set(&cpu_buffer->committing, 0); 5999 local_set(&cpu_buffer->commits, 0); 6000 local_set(&cpu_buffer->pages_touched, 0); 6001 local_set(&cpu_buffer->pages_lost, 0); 6002 local_set(&cpu_buffer->pages_read, 0); 6003 cpu_buffer->last_pages_touch = 0; 6004 cpu_buffer->shortest_full = 0; 6005 cpu_buffer->read = 0; 6006 cpu_buffer->read_bytes = 0; 6007 6008 rb_time_set(&cpu_buffer->write_stamp, 0); 6009 rb_time_set(&cpu_buffer->before_stamp, 0); 6010 6011 memset(cpu_buffer->event_stamp, 0, sizeof(cpu_buffer->event_stamp)); 6012 6013 cpu_buffer->lost_events = 0; 6014 cpu_buffer->last_overrun = 0; 6015 6016 rb_head_page_activate(cpu_buffer); 6017 cpu_buffer->pages_removed = 0; 6018 6019 if (cpu_buffer->mapped) { 6020 rb_update_meta_page(cpu_buffer); 6021 if (cpu_buffer->ring_meta) { 6022 struct ring_buffer_meta *meta = cpu_buffer->ring_meta; 6023 meta->commit_buffer = meta->head_buffer; 6024 } 6025 } 6026 } 6027 6028 /* Must have disabled the cpu buffer then done a synchronize_rcu */ 6029 static void reset_disabled_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) 6030 { 6031 unsigned long flags; 6032 6033 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 6034 6035 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) 6036 goto out; 6037 6038 arch_spin_lock(&cpu_buffer->lock); 6039 6040 rb_reset_cpu(cpu_buffer); 6041 6042 arch_spin_unlock(&cpu_buffer->lock); 6043 6044 out: 6045 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 6046 } 6047 6048 /** 6049 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer 6050 * @buffer: The ring buffer to reset a per cpu buffer of 6051 * @cpu: The CPU buffer to be reset 6052 */ 6053 void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu) 6054 { 6055 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 6056 struct ring_buffer_meta *meta; 6057 6058 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 6059 return; 6060 6061 /* prevent another thread from changing buffer sizes */ 6062 mutex_lock(&buffer->mutex); 6063 6064 atomic_inc(&cpu_buffer->resize_disabled); 6065 atomic_inc(&cpu_buffer->record_disabled); 6066 6067 /* Make sure all commits have finished */ 6068 synchronize_rcu(); 6069 6070 reset_disabled_cpu_buffer(cpu_buffer); 6071 6072 atomic_dec(&cpu_buffer->record_disabled); 6073 atomic_dec(&cpu_buffer->resize_disabled); 6074 6075 /* Make sure persistent meta now uses this buffer's addresses */ 6076 meta = rb_range_meta(buffer, 0, cpu_buffer->cpu); 6077 if (meta) 6078 rb_meta_init_text_addr(meta); 6079 6080 mutex_unlock(&buffer->mutex); 6081 } 6082 EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu); 6083 6084 /* Flag to ensure proper resetting of atomic variables */ 6085 #define RESET_BIT (1 << 30) 6086 6087 /** 6088 * ring_buffer_reset_online_cpus - reset a ring buffer per CPU buffer 6089 * @buffer: The ring buffer to reset a per cpu buffer of 6090 */ 6091 void ring_buffer_reset_online_cpus(struct trace_buffer *buffer) 6092 { 6093 struct ring_buffer_per_cpu *cpu_buffer; 6094 struct ring_buffer_meta *meta; 6095 int cpu; 6096 6097 /* prevent another thread from changing buffer sizes */ 6098 mutex_lock(&buffer->mutex); 6099 6100 for_each_online_buffer_cpu(buffer, cpu) { 6101 cpu_buffer = buffer->buffers[cpu]; 6102 6103 atomic_add(RESET_BIT, &cpu_buffer->resize_disabled); 6104 atomic_inc(&cpu_buffer->record_disabled); 6105 } 6106 6107 /* Make sure all commits have finished */ 6108 synchronize_rcu(); 6109 6110 for_each_buffer_cpu(buffer, cpu) { 6111 cpu_buffer = buffer->buffers[cpu]; 6112 6113 /* 6114 * If a CPU came online during the synchronize_rcu(), then 6115 * ignore it. 6116 */ 6117 if (!(atomic_read(&cpu_buffer->resize_disabled) & RESET_BIT)) 6118 continue; 6119 6120 reset_disabled_cpu_buffer(cpu_buffer); 6121 6122 /* Make sure persistent meta now uses this buffer's addresses */ 6123 meta = rb_range_meta(buffer, 0, cpu_buffer->cpu); 6124 if (meta) 6125 rb_meta_init_text_addr(meta); 6126 6127 atomic_dec(&cpu_buffer->record_disabled); 6128 atomic_sub(RESET_BIT, &cpu_buffer->resize_disabled); 6129 } 6130 6131 mutex_unlock(&buffer->mutex); 6132 } 6133 6134 /** 6135 * ring_buffer_reset - reset a ring buffer 6136 * @buffer: The ring buffer to reset all cpu buffers 6137 */ 6138 void ring_buffer_reset(struct trace_buffer *buffer) 6139 { 6140 struct ring_buffer_per_cpu *cpu_buffer; 6141 int cpu; 6142 6143 /* prevent another thread from changing buffer sizes */ 6144 mutex_lock(&buffer->mutex); 6145 6146 for_each_buffer_cpu(buffer, cpu) { 6147 cpu_buffer = buffer->buffers[cpu]; 6148 6149 atomic_inc(&cpu_buffer->resize_disabled); 6150 atomic_inc(&cpu_buffer->record_disabled); 6151 } 6152 6153 /* Make sure all commits have finished */ 6154 synchronize_rcu(); 6155 6156 for_each_buffer_cpu(buffer, cpu) { 6157 cpu_buffer = buffer->buffers[cpu]; 6158 6159 reset_disabled_cpu_buffer(cpu_buffer); 6160 6161 atomic_dec(&cpu_buffer->record_disabled); 6162 atomic_dec(&cpu_buffer->resize_disabled); 6163 } 6164 6165 mutex_unlock(&buffer->mutex); 6166 } 6167 EXPORT_SYMBOL_GPL(ring_buffer_reset); 6168 6169 /** 6170 * ring_buffer_empty - is the ring buffer empty? 6171 * @buffer: The ring buffer to test 6172 */ 6173 bool ring_buffer_empty(struct trace_buffer *buffer) 6174 { 6175 struct ring_buffer_per_cpu *cpu_buffer; 6176 unsigned long flags; 6177 bool dolock; 6178 bool ret; 6179 int cpu; 6180 6181 /* yes this is racy, but if you don't like the race, lock the buffer */ 6182 for_each_buffer_cpu(buffer, cpu) { 6183 cpu_buffer = buffer->buffers[cpu]; 6184 local_irq_save(flags); 6185 dolock = rb_reader_lock(cpu_buffer); 6186 ret = rb_per_cpu_empty(cpu_buffer); 6187 rb_reader_unlock(cpu_buffer, dolock); 6188 local_irq_restore(flags); 6189 6190 if (!ret) 6191 return false; 6192 } 6193 6194 return true; 6195 } 6196 EXPORT_SYMBOL_GPL(ring_buffer_empty); 6197 6198 /** 6199 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty? 6200 * @buffer: The ring buffer 6201 * @cpu: The CPU buffer to test 6202 */ 6203 bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu) 6204 { 6205 struct ring_buffer_per_cpu *cpu_buffer; 6206 unsigned long flags; 6207 bool dolock; 6208 bool ret; 6209 6210 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 6211 return true; 6212 6213 cpu_buffer = buffer->buffers[cpu]; 6214 local_irq_save(flags); 6215 dolock = rb_reader_lock(cpu_buffer); 6216 ret = rb_per_cpu_empty(cpu_buffer); 6217 rb_reader_unlock(cpu_buffer, dolock); 6218 local_irq_restore(flags); 6219 6220 return ret; 6221 } 6222 EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu); 6223 6224 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP 6225 /** 6226 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers 6227 * @buffer_a: One buffer to swap with 6228 * @buffer_b: The other buffer to swap with 6229 * @cpu: the CPU of the buffers to swap 6230 * 6231 * This function is useful for tracers that want to take a "snapshot" 6232 * of a CPU buffer and has another back up buffer lying around. 6233 * it is expected that the tracer handles the cpu buffer not being 6234 * used at the moment. 6235 */ 6236 int ring_buffer_swap_cpu(struct trace_buffer *buffer_a, 6237 struct trace_buffer *buffer_b, int cpu) 6238 { 6239 struct ring_buffer_per_cpu *cpu_buffer_a; 6240 struct ring_buffer_per_cpu *cpu_buffer_b; 6241 int ret = -EINVAL; 6242 6243 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) || 6244 !cpumask_test_cpu(cpu, buffer_b->cpumask)) 6245 goto out; 6246 6247 cpu_buffer_a = buffer_a->buffers[cpu]; 6248 cpu_buffer_b = buffer_b->buffers[cpu]; 6249 6250 /* It's up to the callers to not try to swap mapped buffers */ 6251 if (WARN_ON_ONCE(cpu_buffer_a->mapped || cpu_buffer_b->mapped)) { 6252 ret = -EBUSY; 6253 goto out; 6254 } 6255 6256 /* At least make sure the two buffers are somewhat the same */ 6257 if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages) 6258 goto out; 6259 6260 if (buffer_a->subbuf_order != buffer_b->subbuf_order) 6261 goto out; 6262 6263 ret = -EAGAIN; 6264 6265 if (atomic_read(&buffer_a->record_disabled)) 6266 goto out; 6267 6268 if (atomic_read(&buffer_b->record_disabled)) 6269 goto out; 6270 6271 if (atomic_read(&cpu_buffer_a->record_disabled)) 6272 goto out; 6273 6274 if (atomic_read(&cpu_buffer_b->record_disabled)) 6275 goto out; 6276 6277 /* 6278 * We can't do a synchronize_rcu here because this 6279 * function can be called in atomic context. 6280 * Normally this will be called from the same CPU as cpu. 6281 * If not it's up to the caller to protect this. 6282 */ 6283 atomic_inc(&cpu_buffer_a->record_disabled); 6284 atomic_inc(&cpu_buffer_b->record_disabled); 6285 6286 ret = -EBUSY; 6287 if (local_read(&cpu_buffer_a->committing)) 6288 goto out_dec; 6289 if (local_read(&cpu_buffer_b->committing)) 6290 goto out_dec; 6291 6292 /* 6293 * When resize is in progress, we cannot swap it because 6294 * it will mess the state of the cpu buffer. 6295 */ 6296 if (atomic_read(&buffer_a->resizing)) 6297 goto out_dec; 6298 if (atomic_read(&buffer_b->resizing)) 6299 goto out_dec; 6300 6301 buffer_a->buffers[cpu] = cpu_buffer_b; 6302 buffer_b->buffers[cpu] = cpu_buffer_a; 6303 6304 cpu_buffer_b->buffer = buffer_a; 6305 cpu_buffer_a->buffer = buffer_b; 6306 6307 ret = 0; 6308 6309 out_dec: 6310 atomic_dec(&cpu_buffer_a->record_disabled); 6311 atomic_dec(&cpu_buffer_b->record_disabled); 6312 out: 6313 return ret; 6314 } 6315 EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); 6316 #endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */ 6317 6318 /** 6319 * ring_buffer_alloc_read_page - allocate a page to read from buffer 6320 * @buffer: the buffer to allocate for. 6321 * @cpu: the cpu buffer to allocate. 6322 * 6323 * This function is used in conjunction with ring_buffer_read_page. 6324 * When reading a full page from the ring buffer, these functions 6325 * can be used to speed up the process. The calling function should 6326 * allocate a few pages first with this function. Then when it 6327 * needs to get pages from the ring buffer, it passes the result 6328 * of this function into ring_buffer_read_page, which will swap 6329 * the page that was allocated, with the read page of the buffer. 6330 * 6331 * Returns: 6332 * The page allocated, or ERR_PTR 6333 */ 6334 struct buffer_data_read_page * 6335 ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu) 6336 { 6337 struct ring_buffer_per_cpu *cpu_buffer; 6338 struct buffer_data_read_page *bpage = NULL; 6339 unsigned long flags; 6340 struct page *page; 6341 6342 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 6343 return ERR_PTR(-ENODEV); 6344 6345 bpage = kzalloc(sizeof(*bpage), GFP_KERNEL); 6346 if (!bpage) 6347 return ERR_PTR(-ENOMEM); 6348 6349 bpage->order = buffer->subbuf_order; 6350 cpu_buffer = buffer->buffers[cpu]; 6351 local_irq_save(flags); 6352 arch_spin_lock(&cpu_buffer->lock); 6353 6354 if (cpu_buffer->free_page) { 6355 bpage->data = cpu_buffer->free_page; 6356 cpu_buffer->free_page = NULL; 6357 } 6358 6359 arch_spin_unlock(&cpu_buffer->lock); 6360 local_irq_restore(flags); 6361 6362 if (bpage->data) 6363 goto out; 6364 6365 page = alloc_pages_node(cpu_to_node(cpu), 6366 GFP_KERNEL | __GFP_NORETRY | __GFP_COMP | __GFP_ZERO, 6367 cpu_buffer->buffer->subbuf_order); 6368 if (!page) { 6369 kfree(bpage); 6370 return ERR_PTR(-ENOMEM); 6371 } 6372 6373 bpage->data = page_address(page); 6374 6375 out: 6376 rb_init_page(bpage->data); 6377 6378 return bpage; 6379 } 6380 EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page); 6381 6382 /** 6383 * ring_buffer_free_read_page - free an allocated read page 6384 * @buffer: the buffer the page was allocate for 6385 * @cpu: the cpu buffer the page came from 6386 * @data_page: the page to free 6387 * 6388 * Free a page allocated from ring_buffer_alloc_read_page. 6389 */ 6390 void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, 6391 struct buffer_data_read_page *data_page) 6392 { 6393 struct ring_buffer_per_cpu *cpu_buffer; 6394 struct buffer_data_page *bpage = data_page->data; 6395 struct page *page = virt_to_page(bpage); 6396 unsigned long flags; 6397 6398 if (!buffer || !buffer->buffers || !buffer->buffers[cpu]) 6399 return; 6400 6401 cpu_buffer = buffer->buffers[cpu]; 6402 6403 /* 6404 * If the page is still in use someplace else, or order of the page 6405 * is different from the subbuffer order of the buffer - 6406 * we can't reuse it 6407 */ 6408 if (page_ref_count(page) > 1 || data_page->order != buffer->subbuf_order) 6409 goto out; 6410 6411 local_irq_save(flags); 6412 arch_spin_lock(&cpu_buffer->lock); 6413 6414 if (!cpu_buffer->free_page) { 6415 cpu_buffer->free_page = bpage; 6416 bpage = NULL; 6417 } 6418 6419 arch_spin_unlock(&cpu_buffer->lock); 6420 local_irq_restore(flags); 6421 6422 out: 6423 free_pages((unsigned long)bpage, data_page->order); 6424 kfree(data_page); 6425 } 6426 EXPORT_SYMBOL_GPL(ring_buffer_free_read_page); 6427 6428 /** 6429 * ring_buffer_read_page - extract a page from the ring buffer 6430 * @buffer: buffer to extract from 6431 * @data_page: the page to use allocated from ring_buffer_alloc_read_page 6432 * @len: amount to extract 6433 * @cpu: the cpu of the buffer to extract 6434 * @full: should the extraction only happen when the page is full. 6435 * 6436 * This function will pull out a page from the ring buffer and consume it. 6437 * @data_page must be the address of the variable that was returned 6438 * from ring_buffer_alloc_read_page. This is because the page might be used 6439 * to swap with a page in the ring buffer. 6440 * 6441 * for example: 6442 * rpage = ring_buffer_alloc_read_page(buffer, cpu); 6443 * if (IS_ERR(rpage)) 6444 * return PTR_ERR(rpage); 6445 * ret = ring_buffer_read_page(buffer, rpage, len, cpu, 0); 6446 * if (ret >= 0) 6447 * process_page(ring_buffer_read_page_data(rpage), ret); 6448 * ring_buffer_free_read_page(buffer, cpu, rpage); 6449 * 6450 * When @full is set, the function will not return true unless 6451 * the writer is off the reader page. 6452 * 6453 * Note: it is up to the calling functions to handle sleeps and wakeups. 6454 * The ring buffer can be used anywhere in the kernel and can not 6455 * blindly call wake_up. The layer that uses the ring buffer must be 6456 * responsible for that. 6457 * 6458 * Returns: 6459 * >=0 if data has been transferred, returns the offset of consumed data. 6460 * <0 if no data has been transferred. 6461 */ 6462 int ring_buffer_read_page(struct trace_buffer *buffer, 6463 struct buffer_data_read_page *data_page, 6464 size_t len, int cpu, int full) 6465 { 6466 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 6467 struct ring_buffer_event *event; 6468 struct buffer_data_page *bpage; 6469 struct buffer_page *reader; 6470 unsigned long missed_events; 6471 unsigned long flags; 6472 unsigned int commit; 6473 unsigned int read; 6474 u64 save_timestamp; 6475 int ret = -1; 6476 6477 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 6478 goto out; 6479 6480 /* 6481 * If len is not big enough to hold the page header, then 6482 * we can not copy anything. 6483 */ 6484 if (len <= BUF_PAGE_HDR_SIZE) 6485 goto out; 6486 6487 len -= BUF_PAGE_HDR_SIZE; 6488 6489 if (!data_page || !data_page->data) 6490 goto out; 6491 if (data_page->order != buffer->subbuf_order) 6492 goto out; 6493 6494 bpage = data_page->data; 6495 if (!bpage) 6496 goto out; 6497 6498 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 6499 6500 reader = rb_get_reader_page(cpu_buffer); 6501 if (!reader) 6502 goto out_unlock; 6503 6504 event = rb_reader_event(cpu_buffer); 6505 6506 read = reader->read; 6507 commit = rb_page_size(reader); 6508 6509 /* Check if any events were dropped */ 6510 missed_events = cpu_buffer->lost_events; 6511 6512 /* 6513 * If this page has been partially read or 6514 * if len is not big enough to read the rest of the page or 6515 * a writer is still on the page, then 6516 * we must copy the data from the page to the buffer. 6517 * Otherwise, we can simply swap the page with the one passed in. 6518 */ 6519 if (read || (len < (commit - read)) || 6520 cpu_buffer->reader_page == cpu_buffer->commit_page || 6521 cpu_buffer->mapped) { 6522 struct buffer_data_page *rpage = cpu_buffer->reader_page->page; 6523 unsigned int rpos = read; 6524 unsigned int pos = 0; 6525 unsigned int size; 6526 6527 /* 6528 * If a full page is expected, this can still be returned 6529 * if there's been a previous partial read and the 6530 * rest of the page can be read and the commit page is off 6531 * the reader page. 6532 */ 6533 if (full && 6534 (!read || (len < (commit - read)) || 6535 cpu_buffer->reader_page == cpu_buffer->commit_page)) 6536 goto out_unlock; 6537 6538 if (len > (commit - read)) 6539 len = (commit - read); 6540 6541 /* Always keep the time extend and data together */ 6542 size = rb_event_ts_length(event); 6543 6544 if (len < size) 6545 goto out_unlock; 6546 6547 /* save the current timestamp, since the user will need it */ 6548 save_timestamp = cpu_buffer->read_stamp; 6549 6550 /* Need to copy one event at a time */ 6551 do { 6552 /* We need the size of one event, because 6553 * rb_advance_reader only advances by one event, 6554 * whereas rb_event_ts_length may include the size of 6555 * one or two events. 6556 * We have already ensured there's enough space if this 6557 * is a time extend. */ 6558 size = rb_event_length(event); 6559 memcpy(bpage->data + pos, rpage->data + rpos, size); 6560 6561 len -= size; 6562 6563 rb_advance_reader(cpu_buffer); 6564 rpos = reader->read; 6565 pos += size; 6566 6567 if (rpos >= commit) 6568 break; 6569 6570 event = rb_reader_event(cpu_buffer); 6571 /* Always keep the time extend and data together */ 6572 size = rb_event_ts_length(event); 6573 } while (len >= size); 6574 6575 /* update bpage */ 6576 local_set(&bpage->commit, pos); 6577 bpage->time_stamp = save_timestamp; 6578 6579 /* we copied everything to the beginning */ 6580 read = 0; 6581 } else { 6582 /* update the entry counter */ 6583 cpu_buffer->read += rb_page_entries(reader); 6584 cpu_buffer->read_bytes += rb_page_size(reader); 6585 6586 /* swap the pages */ 6587 rb_init_page(bpage); 6588 bpage = reader->page; 6589 reader->page = data_page->data; 6590 local_set(&reader->write, 0); 6591 local_set(&reader->entries, 0); 6592 reader->read = 0; 6593 data_page->data = bpage; 6594 6595 /* 6596 * Use the real_end for the data size, 6597 * This gives us a chance to store the lost events 6598 * on the page. 6599 */ 6600 if (reader->real_end) 6601 local_set(&bpage->commit, reader->real_end); 6602 } 6603 ret = read; 6604 6605 cpu_buffer->lost_events = 0; 6606 6607 commit = local_read(&bpage->commit); 6608 /* 6609 * Set a flag in the commit field if we lost events 6610 */ 6611 if (missed_events) { 6612 /* If there is room at the end of the page to save the 6613 * missed events, then record it there. 6614 */ 6615 if (buffer->subbuf_size - commit >= sizeof(missed_events)) { 6616 memcpy(&bpage->data[commit], &missed_events, 6617 sizeof(missed_events)); 6618 local_add(RB_MISSED_STORED, &bpage->commit); 6619 commit += sizeof(missed_events); 6620 } 6621 local_add(RB_MISSED_EVENTS, &bpage->commit); 6622 } 6623 6624 /* 6625 * This page may be off to user land. Zero it out here. 6626 */ 6627 if (commit < buffer->subbuf_size) 6628 memset(&bpage->data[commit], 0, buffer->subbuf_size - commit); 6629 6630 out_unlock: 6631 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 6632 6633 out: 6634 return ret; 6635 } 6636 EXPORT_SYMBOL_GPL(ring_buffer_read_page); 6637 6638 /** 6639 * ring_buffer_read_page_data - get pointer to the data in the page. 6640 * @page: the page to get the data from 6641 * 6642 * Returns pointer to the actual data in this page. 6643 */ 6644 void *ring_buffer_read_page_data(struct buffer_data_read_page *page) 6645 { 6646 return page->data; 6647 } 6648 EXPORT_SYMBOL_GPL(ring_buffer_read_page_data); 6649 6650 /** 6651 * ring_buffer_subbuf_size_get - get size of the sub buffer. 6652 * @buffer: the buffer to get the sub buffer size from 6653 * 6654 * Returns size of the sub buffer, in bytes. 6655 */ 6656 int ring_buffer_subbuf_size_get(struct trace_buffer *buffer) 6657 { 6658 return buffer->subbuf_size + BUF_PAGE_HDR_SIZE; 6659 } 6660 EXPORT_SYMBOL_GPL(ring_buffer_subbuf_size_get); 6661 6662 /** 6663 * ring_buffer_subbuf_order_get - get order of system sub pages in one buffer page. 6664 * @buffer: The ring_buffer to get the system sub page order from 6665 * 6666 * By default, one ring buffer sub page equals to one system page. This parameter 6667 * is configurable, per ring buffer. The size of the ring buffer sub page can be 6668 * extended, but must be an order of system page size. 6669 * 6670 * Returns the order of buffer sub page size, in system pages: 6671 * 0 means the sub buffer size is 1 system page and so forth. 6672 * In case of an error < 0 is returned. 6673 */ 6674 int ring_buffer_subbuf_order_get(struct trace_buffer *buffer) 6675 { 6676 if (!buffer) 6677 return -EINVAL; 6678 6679 return buffer->subbuf_order; 6680 } 6681 EXPORT_SYMBOL_GPL(ring_buffer_subbuf_order_get); 6682 6683 /** 6684 * ring_buffer_subbuf_order_set - set the size of ring buffer sub page. 6685 * @buffer: The ring_buffer to set the new page size. 6686 * @order: Order of the system pages in one sub buffer page 6687 * 6688 * By default, one ring buffer pages equals to one system page. This API can be 6689 * used to set new size of the ring buffer page. The size must be order of 6690 * system page size, that's why the input parameter @order is the order of 6691 * system pages that are allocated for one ring buffer page: 6692 * 0 - 1 system page 6693 * 1 - 2 system pages 6694 * 3 - 4 system pages 6695 * ... 6696 * 6697 * Returns 0 on success or < 0 in case of an error. 6698 */ 6699 int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order) 6700 { 6701 struct ring_buffer_per_cpu *cpu_buffer; 6702 struct buffer_page *bpage, *tmp; 6703 int old_order, old_size; 6704 int nr_pages; 6705 int psize; 6706 int err; 6707 int cpu; 6708 6709 if (!buffer || order < 0) 6710 return -EINVAL; 6711 6712 if (buffer->subbuf_order == order) 6713 return 0; 6714 6715 psize = (1 << order) * PAGE_SIZE; 6716 if (psize <= BUF_PAGE_HDR_SIZE) 6717 return -EINVAL; 6718 6719 /* Size of a subbuf cannot be greater than the write counter */ 6720 if (psize > RB_WRITE_MASK + 1) 6721 return -EINVAL; 6722 6723 old_order = buffer->subbuf_order; 6724 old_size = buffer->subbuf_size; 6725 6726 /* prevent another thread from changing buffer sizes */ 6727 mutex_lock(&buffer->mutex); 6728 atomic_inc(&buffer->record_disabled); 6729 6730 /* Make sure all commits have finished */ 6731 synchronize_rcu(); 6732 6733 buffer->subbuf_order = order; 6734 buffer->subbuf_size = psize - BUF_PAGE_HDR_SIZE; 6735 6736 /* Make sure all new buffers are allocated, before deleting the old ones */ 6737 for_each_buffer_cpu(buffer, cpu) { 6738 6739 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 6740 continue; 6741 6742 cpu_buffer = buffer->buffers[cpu]; 6743 6744 if (cpu_buffer->mapped) { 6745 err = -EBUSY; 6746 goto error; 6747 } 6748 6749 /* Update the number of pages to match the new size */ 6750 nr_pages = old_size * buffer->buffers[cpu]->nr_pages; 6751 nr_pages = DIV_ROUND_UP(nr_pages, buffer->subbuf_size); 6752 6753 /* we need a minimum of two pages */ 6754 if (nr_pages < 2) 6755 nr_pages = 2; 6756 6757 cpu_buffer->nr_pages_to_update = nr_pages; 6758 6759 /* Include the reader page */ 6760 nr_pages++; 6761 6762 /* Allocate the new size buffer */ 6763 INIT_LIST_HEAD(&cpu_buffer->new_pages); 6764 if (__rb_allocate_pages(cpu_buffer, nr_pages, 6765 &cpu_buffer->new_pages)) { 6766 /* not enough memory for new pages */ 6767 err = -ENOMEM; 6768 goto error; 6769 } 6770 } 6771 6772 for_each_buffer_cpu(buffer, cpu) { 6773 struct buffer_data_page *old_free_data_page; 6774 struct list_head old_pages; 6775 unsigned long flags; 6776 6777 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 6778 continue; 6779 6780 cpu_buffer = buffer->buffers[cpu]; 6781 6782 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 6783 6784 /* Clear the head bit to make the link list normal to read */ 6785 rb_head_page_deactivate(cpu_buffer); 6786 6787 /* 6788 * Collect buffers from the cpu_buffer pages list and the 6789 * reader_page on old_pages, so they can be freed later when not 6790 * under a spinlock. The pages list is a linked list with no 6791 * head, adding old_pages turns it into a regular list with 6792 * old_pages being the head. 6793 */ 6794 list_add(&old_pages, cpu_buffer->pages); 6795 list_add(&cpu_buffer->reader_page->list, &old_pages); 6796 6797 /* One page was allocated for the reader page */ 6798 cpu_buffer->reader_page = list_entry(cpu_buffer->new_pages.next, 6799 struct buffer_page, list); 6800 list_del_init(&cpu_buffer->reader_page->list); 6801 6802 /* Install the new pages, remove the head from the list */ 6803 cpu_buffer->pages = cpu_buffer->new_pages.next; 6804 list_del_init(&cpu_buffer->new_pages); 6805 cpu_buffer->cnt++; 6806 6807 cpu_buffer->head_page 6808 = list_entry(cpu_buffer->pages, struct buffer_page, list); 6809 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page; 6810 6811 cpu_buffer->nr_pages = cpu_buffer->nr_pages_to_update; 6812 cpu_buffer->nr_pages_to_update = 0; 6813 6814 old_free_data_page = cpu_buffer->free_page; 6815 cpu_buffer->free_page = NULL; 6816 6817 rb_head_page_activate(cpu_buffer); 6818 6819 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 6820 6821 /* Free old sub buffers */ 6822 list_for_each_entry_safe(bpage, tmp, &old_pages, list) { 6823 list_del_init(&bpage->list); 6824 free_buffer_page(bpage); 6825 } 6826 free_pages((unsigned long)old_free_data_page, old_order); 6827 6828 rb_check_pages(cpu_buffer); 6829 } 6830 6831 atomic_dec(&buffer->record_disabled); 6832 mutex_unlock(&buffer->mutex); 6833 6834 return 0; 6835 6836 error: 6837 buffer->subbuf_order = old_order; 6838 buffer->subbuf_size = old_size; 6839 6840 atomic_dec(&buffer->record_disabled); 6841 mutex_unlock(&buffer->mutex); 6842 6843 for_each_buffer_cpu(buffer, cpu) { 6844 cpu_buffer = buffer->buffers[cpu]; 6845 6846 if (!cpu_buffer->nr_pages_to_update) 6847 continue; 6848 6849 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, list) { 6850 list_del_init(&bpage->list); 6851 free_buffer_page(bpage); 6852 } 6853 } 6854 6855 return err; 6856 } 6857 EXPORT_SYMBOL_GPL(ring_buffer_subbuf_order_set); 6858 6859 static int rb_alloc_meta_page(struct ring_buffer_per_cpu *cpu_buffer) 6860 { 6861 struct page *page; 6862 6863 if (cpu_buffer->meta_page) 6864 return 0; 6865 6866 page = alloc_page(GFP_USER | __GFP_ZERO); 6867 if (!page) 6868 return -ENOMEM; 6869 6870 cpu_buffer->meta_page = page_to_virt(page); 6871 6872 return 0; 6873 } 6874 6875 static void rb_free_meta_page(struct ring_buffer_per_cpu *cpu_buffer) 6876 { 6877 unsigned long addr = (unsigned long)cpu_buffer->meta_page; 6878 6879 free_page(addr); 6880 cpu_buffer->meta_page = NULL; 6881 } 6882 6883 static void rb_setup_ids_meta_page(struct ring_buffer_per_cpu *cpu_buffer, 6884 unsigned long *subbuf_ids) 6885 { 6886 struct trace_buffer_meta *meta = cpu_buffer->meta_page; 6887 unsigned int nr_subbufs = cpu_buffer->nr_pages + 1; 6888 struct buffer_page *first_subbuf, *subbuf; 6889 int id = 0; 6890 6891 subbuf_ids[id] = (unsigned long)cpu_buffer->reader_page->page; 6892 cpu_buffer->reader_page->id = id++; 6893 6894 first_subbuf = subbuf = rb_set_head_page(cpu_buffer); 6895 do { 6896 if (WARN_ON(id >= nr_subbufs)) 6897 break; 6898 6899 subbuf_ids[id] = (unsigned long)subbuf->page; 6900 subbuf->id = id; 6901 6902 rb_inc_page(&subbuf); 6903 id++; 6904 } while (subbuf != first_subbuf); 6905 6906 /* install subbuf ID to kern VA translation */ 6907 cpu_buffer->subbuf_ids = subbuf_ids; 6908 6909 meta->meta_struct_len = sizeof(*meta); 6910 meta->nr_subbufs = nr_subbufs; 6911 meta->subbuf_size = cpu_buffer->buffer->subbuf_size + BUF_PAGE_HDR_SIZE; 6912 meta->meta_page_size = meta->subbuf_size; 6913 6914 rb_update_meta_page(cpu_buffer); 6915 } 6916 6917 static struct ring_buffer_per_cpu * 6918 rb_get_mapped_buffer(struct trace_buffer *buffer, int cpu) 6919 { 6920 struct ring_buffer_per_cpu *cpu_buffer; 6921 6922 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 6923 return ERR_PTR(-EINVAL); 6924 6925 cpu_buffer = buffer->buffers[cpu]; 6926 6927 mutex_lock(&cpu_buffer->mapping_lock); 6928 6929 if (!cpu_buffer->user_mapped) { 6930 mutex_unlock(&cpu_buffer->mapping_lock); 6931 return ERR_PTR(-ENODEV); 6932 } 6933 6934 return cpu_buffer; 6935 } 6936 6937 static void rb_put_mapped_buffer(struct ring_buffer_per_cpu *cpu_buffer) 6938 { 6939 mutex_unlock(&cpu_buffer->mapping_lock); 6940 } 6941 6942 /* 6943 * Fast-path for rb_buffer_(un)map(). Called whenever the meta-page doesn't need 6944 * to be set-up or torn-down. 6945 */ 6946 static int __rb_inc_dec_mapped(struct ring_buffer_per_cpu *cpu_buffer, 6947 bool inc) 6948 { 6949 unsigned long flags; 6950 6951 lockdep_assert_held(&cpu_buffer->mapping_lock); 6952 6953 /* mapped is always greater or equal to user_mapped */ 6954 if (WARN_ON(cpu_buffer->mapped < cpu_buffer->user_mapped)) 6955 return -EINVAL; 6956 6957 if (inc && cpu_buffer->mapped == UINT_MAX) 6958 return -EBUSY; 6959 6960 if (WARN_ON(!inc && cpu_buffer->user_mapped == 0)) 6961 return -EINVAL; 6962 6963 mutex_lock(&cpu_buffer->buffer->mutex); 6964 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 6965 6966 if (inc) { 6967 cpu_buffer->user_mapped++; 6968 cpu_buffer->mapped++; 6969 } else { 6970 cpu_buffer->user_mapped--; 6971 cpu_buffer->mapped--; 6972 } 6973 6974 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 6975 mutex_unlock(&cpu_buffer->buffer->mutex); 6976 6977 return 0; 6978 } 6979 6980 /* 6981 * +--------------+ pgoff == 0 6982 * | meta page | 6983 * +--------------+ pgoff == 1 6984 * | subbuffer 0 | 6985 * | | 6986 * +--------------+ pgoff == (1 + (1 << subbuf_order)) 6987 * | subbuffer 1 | 6988 * | | 6989 * ... 6990 */ 6991 #ifdef CONFIG_MMU 6992 static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer, 6993 struct vm_area_struct *vma) 6994 { 6995 unsigned long nr_subbufs, nr_pages, nr_vma_pages, pgoff = vma->vm_pgoff; 6996 unsigned int subbuf_pages, subbuf_order; 6997 struct page **pages; 6998 int p = 0, s = 0; 6999 int err; 7000 7001 /* Refuse MP_PRIVATE or writable mappings */ 7002 if (vma->vm_flags & VM_WRITE || vma->vm_flags & VM_EXEC || 7003 !(vma->vm_flags & VM_MAYSHARE)) 7004 return -EPERM; 7005 7006 subbuf_order = cpu_buffer->buffer->subbuf_order; 7007 subbuf_pages = 1 << subbuf_order; 7008 7009 if (subbuf_order && pgoff % subbuf_pages) 7010 return -EINVAL; 7011 7012 /* 7013 * Make sure the mapping cannot become writable later. Also tell the VM 7014 * to not touch these pages (VM_DONTCOPY | VM_DONTEXPAND). 7015 */ 7016 vm_flags_mod(vma, VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP, 7017 VM_MAYWRITE); 7018 7019 lockdep_assert_held(&cpu_buffer->mapping_lock); 7020 7021 nr_subbufs = cpu_buffer->nr_pages + 1; /* + reader-subbuf */ 7022 nr_pages = ((nr_subbufs + 1) << subbuf_order) - pgoff; /* + meta-page */ 7023 7024 nr_vma_pages = vma_pages(vma); 7025 if (!nr_vma_pages || nr_vma_pages > nr_pages) 7026 return -EINVAL; 7027 7028 nr_pages = nr_vma_pages; 7029 7030 pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL); 7031 if (!pages) 7032 return -ENOMEM; 7033 7034 if (!pgoff) { 7035 unsigned long meta_page_padding; 7036 7037 pages[p++] = virt_to_page(cpu_buffer->meta_page); 7038 7039 /* 7040 * Pad with the zero-page to align the meta-page with the 7041 * sub-buffers. 7042 */ 7043 meta_page_padding = subbuf_pages - 1; 7044 while (meta_page_padding-- && p < nr_pages) { 7045 unsigned long __maybe_unused zero_addr = 7046 vma->vm_start + (PAGE_SIZE * p); 7047 7048 pages[p++] = ZERO_PAGE(zero_addr); 7049 } 7050 } else { 7051 /* Skip the meta-page */ 7052 pgoff -= subbuf_pages; 7053 7054 s += pgoff / subbuf_pages; 7055 } 7056 7057 while (p < nr_pages) { 7058 struct page *page = virt_to_page((void *)cpu_buffer->subbuf_ids[s]); 7059 int off = 0; 7060 7061 if (WARN_ON_ONCE(s >= nr_subbufs)) { 7062 err = -EINVAL; 7063 goto out; 7064 } 7065 7066 for (; off < (1 << (subbuf_order)); off++, page++) { 7067 if (p >= nr_pages) 7068 break; 7069 7070 pages[p++] = page; 7071 } 7072 s++; 7073 } 7074 7075 err = vm_insert_pages(vma, vma->vm_start, pages, &nr_pages); 7076 7077 out: 7078 kfree(pages); 7079 7080 return err; 7081 } 7082 #else 7083 static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer, 7084 struct vm_area_struct *vma) 7085 { 7086 return -EOPNOTSUPP; 7087 } 7088 #endif 7089 7090 int ring_buffer_map(struct trace_buffer *buffer, int cpu, 7091 struct vm_area_struct *vma) 7092 { 7093 struct ring_buffer_per_cpu *cpu_buffer; 7094 unsigned long flags, *subbuf_ids; 7095 int err = 0; 7096 7097 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 7098 return -EINVAL; 7099 7100 cpu_buffer = buffer->buffers[cpu]; 7101 7102 mutex_lock(&cpu_buffer->mapping_lock); 7103 7104 if (cpu_buffer->user_mapped) { 7105 err = __rb_map_vma(cpu_buffer, vma); 7106 if (!err) 7107 err = __rb_inc_dec_mapped(cpu_buffer, true); 7108 mutex_unlock(&cpu_buffer->mapping_lock); 7109 return err; 7110 } 7111 7112 /* prevent another thread from changing buffer/sub-buffer sizes */ 7113 mutex_lock(&buffer->mutex); 7114 7115 err = rb_alloc_meta_page(cpu_buffer); 7116 if (err) 7117 goto unlock; 7118 7119 /* subbuf_ids include the reader while nr_pages does not */ 7120 subbuf_ids = kcalloc(cpu_buffer->nr_pages + 1, sizeof(*subbuf_ids), GFP_KERNEL); 7121 if (!subbuf_ids) { 7122 rb_free_meta_page(cpu_buffer); 7123 err = -ENOMEM; 7124 goto unlock; 7125 } 7126 7127 atomic_inc(&cpu_buffer->resize_disabled); 7128 7129 /* 7130 * Lock all readers to block any subbuf swap until the subbuf IDs are 7131 * assigned. 7132 */ 7133 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 7134 rb_setup_ids_meta_page(cpu_buffer, subbuf_ids); 7135 7136 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 7137 7138 err = __rb_map_vma(cpu_buffer, vma); 7139 if (!err) { 7140 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 7141 /* This is the first time it is mapped by user */ 7142 cpu_buffer->mapped++; 7143 cpu_buffer->user_mapped = 1; 7144 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 7145 } else { 7146 kfree(cpu_buffer->subbuf_ids); 7147 cpu_buffer->subbuf_ids = NULL; 7148 rb_free_meta_page(cpu_buffer); 7149 } 7150 7151 unlock: 7152 mutex_unlock(&buffer->mutex); 7153 mutex_unlock(&cpu_buffer->mapping_lock); 7154 7155 return err; 7156 } 7157 7158 int ring_buffer_unmap(struct trace_buffer *buffer, int cpu) 7159 { 7160 struct ring_buffer_per_cpu *cpu_buffer; 7161 unsigned long flags; 7162 int err = 0; 7163 7164 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 7165 return -EINVAL; 7166 7167 cpu_buffer = buffer->buffers[cpu]; 7168 7169 mutex_lock(&cpu_buffer->mapping_lock); 7170 7171 if (!cpu_buffer->user_mapped) { 7172 err = -ENODEV; 7173 goto out; 7174 } else if (cpu_buffer->user_mapped > 1) { 7175 __rb_inc_dec_mapped(cpu_buffer, false); 7176 goto out; 7177 } 7178 7179 mutex_lock(&buffer->mutex); 7180 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 7181 7182 /* This is the last user space mapping */ 7183 if (!WARN_ON_ONCE(cpu_buffer->mapped < cpu_buffer->user_mapped)) 7184 cpu_buffer->mapped--; 7185 cpu_buffer->user_mapped = 0; 7186 7187 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 7188 7189 kfree(cpu_buffer->subbuf_ids); 7190 cpu_buffer->subbuf_ids = NULL; 7191 rb_free_meta_page(cpu_buffer); 7192 atomic_dec(&cpu_buffer->resize_disabled); 7193 7194 mutex_unlock(&buffer->mutex); 7195 7196 out: 7197 mutex_unlock(&cpu_buffer->mapping_lock); 7198 7199 return err; 7200 } 7201 7202 int ring_buffer_map_get_reader(struct trace_buffer *buffer, int cpu) 7203 { 7204 struct ring_buffer_per_cpu *cpu_buffer; 7205 struct buffer_page *reader; 7206 unsigned long missed_events; 7207 unsigned long reader_size; 7208 unsigned long flags; 7209 7210 cpu_buffer = rb_get_mapped_buffer(buffer, cpu); 7211 if (IS_ERR(cpu_buffer)) 7212 return (int)PTR_ERR(cpu_buffer); 7213 7214 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 7215 7216 consume: 7217 if (rb_per_cpu_empty(cpu_buffer)) 7218 goto out; 7219 7220 reader_size = rb_page_size(cpu_buffer->reader_page); 7221 7222 /* 7223 * There are data to be read on the current reader page, we can 7224 * return to the caller. But before that, we assume the latter will read 7225 * everything. Let's update the kernel reader accordingly. 7226 */ 7227 if (cpu_buffer->reader_page->read < reader_size) { 7228 while (cpu_buffer->reader_page->read < reader_size) 7229 rb_advance_reader(cpu_buffer); 7230 goto out; 7231 } 7232 7233 reader = rb_get_reader_page(cpu_buffer); 7234 if (WARN_ON(!reader)) 7235 goto out; 7236 7237 /* Check if any events were dropped */ 7238 missed_events = cpu_buffer->lost_events; 7239 7240 if (cpu_buffer->reader_page != cpu_buffer->commit_page) { 7241 if (missed_events) { 7242 struct buffer_data_page *bpage = reader->page; 7243 unsigned int commit; 7244 /* 7245 * Use the real_end for the data size, 7246 * This gives us a chance to store the lost events 7247 * on the page. 7248 */ 7249 if (reader->real_end) 7250 local_set(&bpage->commit, reader->real_end); 7251 /* 7252 * If there is room at the end of the page to save the 7253 * missed events, then record it there. 7254 */ 7255 commit = rb_page_size(reader); 7256 if (buffer->subbuf_size - commit >= sizeof(missed_events)) { 7257 memcpy(&bpage->data[commit], &missed_events, 7258 sizeof(missed_events)); 7259 local_add(RB_MISSED_STORED, &bpage->commit); 7260 } 7261 local_add(RB_MISSED_EVENTS, &bpage->commit); 7262 } 7263 } else { 7264 /* 7265 * There really shouldn't be any missed events if the commit 7266 * is on the reader page. 7267 */ 7268 WARN_ON_ONCE(missed_events); 7269 } 7270 7271 cpu_buffer->lost_events = 0; 7272 7273 goto consume; 7274 7275 out: 7276 /* Some archs do not have data cache coherency between kernel and user-space */ 7277 flush_dcache_folio(virt_to_folio(cpu_buffer->reader_page->page)); 7278 7279 rb_update_meta_page(cpu_buffer); 7280 7281 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 7282 rb_put_mapped_buffer(cpu_buffer); 7283 7284 return 0; 7285 } 7286 7287 /* 7288 * We only allocate new buffers, never free them if the CPU goes down. 7289 * If we were to free the buffer, then the user would lose any trace that was in 7290 * the buffer. 7291 */ 7292 int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node) 7293 { 7294 struct trace_buffer *buffer; 7295 long nr_pages_same; 7296 int cpu_i; 7297 unsigned long nr_pages; 7298 7299 buffer = container_of(node, struct trace_buffer, node); 7300 if (cpumask_test_cpu(cpu, buffer->cpumask)) 7301 return 0; 7302 7303 nr_pages = 0; 7304 nr_pages_same = 1; 7305 /* check if all cpu sizes are same */ 7306 for_each_buffer_cpu(buffer, cpu_i) { 7307 /* fill in the size from first enabled cpu */ 7308 if (nr_pages == 0) 7309 nr_pages = buffer->buffers[cpu_i]->nr_pages; 7310 if (nr_pages != buffer->buffers[cpu_i]->nr_pages) { 7311 nr_pages_same = 0; 7312 break; 7313 } 7314 } 7315 /* allocate minimum pages, user can later expand it */ 7316 if (!nr_pages_same) 7317 nr_pages = 2; 7318 buffer->buffers[cpu] = 7319 rb_allocate_cpu_buffer(buffer, nr_pages, cpu); 7320 if (!buffer->buffers[cpu]) { 7321 WARN(1, "failed to allocate ring buffer on CPU %u\n", 7322 cpu); 7323 return -ENOMEM; 7324 } 7325 smp_wmb(); 7326 cpumask_set_cpu(cpu, buffer->cpumask); 7327 return 0; 7328 } 7329 7330 #ifdef CONFIG_RING_BUFFER_STARTUP_TEST 7331 /* 7332 * This is a basic integrity check of the ring buffer. 7333 * Late in the boot cycle this test will run when configured in. 7334 * It will kick off a thread per CPU that will go into a loop 7335 * writing to the per cpu ring buffer various sizes of data. 7336 * Some of the data will be large items, some small. 7337 * 7338 * Another thread is created that goes into a spin, sending out 7339 * IPIs to the other CPUs to also write into the ring buffer. 7340 * this is to test the nesting ability of the buffer. 7341 * 7342 * Basic stats are recorded and reported. If something in the 7343 * ring buffer should happen that's not expected, a big warning 7344 * is displayed and all ring buffers are disabled. 7345 */ 7346 static struct task_struct *rb_threads[NR_CPUS] __initdata; 7347 7348 struct rb_test_data { 7349 struct trace_buffer *buffer; 7350 unsigned long events; 7351 unsigned long bytes_written; 7352 unsigned long bytes_alloc; 7353 unsigned long bytes_dropped; 7354 unsigned long events_nested; 7355 unsigned long bytes_written_nested; 7356 unsigned long bytes_alloc_nested; 7357 unsigned long bytes_dropped_nested; 7358 int min_size_nested; 7359 int max_size_nested; 7360 int max_size; 7361 int min_size; 7362 int cpu; 7363 int cnt; 7364 }; 7365 7366 static struct rb_test_data rb_data[NR_CPUS] __initdata; 7367 7368 /* 1 meg per cpu */ 7369 #define RB_TEST_BUFFER_SIZE 1048576 7370 7371 static char rb_string[] __initdata = 7372 "abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()?+\\" 7373 "?+|:';\",.<>/?abcdefghijklmnopqrstuvwxyz1234567890" 7374 "!@#$%^&*()?+\\?+|:';\",.<>/?abcdefghijklmnopqrstuv"; 7375 7376 static bool rb_test_started __initdata; 7377 7378 struct rb_item { 7379 int size; 7380 char str[]; 7381 }; 7382 7383 static __init int rb_write_something(struct rb_test_data *data, bool nested) 7384 { 7385 struct ring_buffer_event *event; 7386 struct rb_item *item; 7387 bool started; 7388 int event_len; 7389 int size; 7390 int len; 7391 int cnt; 7392 7393 /* Have nested writes different that what is written */ 7394 cnt = data->cnt + (nested ? 27 : 0); 7395 7396 /* Multiply cnt by ~e, to make some unique increment */ 7397 size = (cnt * 68 / 25) % (sizeof(rb_string) - 1); 7398 7399 len = size + sizeof(struct rb_item); 7400 7401 started = rb_test_started; 7402 /* read rb_test_started before checking buffer enabled */ 7403 smp_rmb(); 7404 7405 event = ring_buffer_lock_reserve(data->buffer, len); 7406 if (!event) { 7407 /* Ignore dropped events before test starts. */ 7408 if (started) { 7409 if (nested) 7410 data->bytes_dropped += len; 7411 else 7412 data->bytes_dropped_nested += len; 7413 } 7414 return len; 7415 } 7416 7417 event_len = ring_buffer_event_length(event); 7418 7419 if (RB_WARN_ON(data->buffer, event_len < len)) 7420 goto out; 7421 7422 item = ring_buffer_event_data(event); 7423 item->size = size; 7424 memcpy(item->str, rb_string, size); 7425 7426 if (nested) { 7427 data->bytes_alloc_nested += event_len; 7428 data->bytes_written_nested += len; 7429 data->events_nested++; 7430 if (!data->min_size_nested || len < data->min_size_nested) 7431 data->min_size_nested = len; 7432 if (len > data->max_size_nested) 7433 data->max_size_nested = len; 7434 } else { 7435 data->bytes_alloc += event_len; 7436 data->bytes_written += len; 7437 data->events++; 7438 if (!data->min_size || len < data->min_size) 7439 data->max_size = len; 7440 if (len > data->max_size) 7441 data->max_size = len; 7442 } 7443 7444 out: 7445 ring_buffer_unlock_commit(data->buffer); 7446 7447 return 0; 7448 } 7449 7450 static __init int rb_test(void *arg) 7451 { 7452 struct rb_test_data *data = arg; 7453 7454 while (!kthread_should_stop()) { 7455 rb_write_something(data, false); 7456 data->cnt++; 7457 7458 set_current_state(TASK_INTERRUPTIBLE); 7459 /* Now sleep between a min of 100-300us and a max of 1ms */ 7460 usleep_range(((data->cnt % 3) + 1) * 100, 1000); 7461 } 7462 7463 return 0; 7464 } 7465 7466 static __init void rb_ipi(void *ignore) 7467 { 7468 struct rb_test_data *data; 7469 int cpu = smp_processor_id(); 7470 7471 data = &rb_data[cpu]; 7472 rb_write_something(data, true); 7473 } 7474 7475 static __init int rb_hammer_test(void *arg) 7476 { 7477 while (!kthread_should_stop()) { 7478 7479 /* Send an IPI to all cpus to write data! */ 7480 smp_call_function(rb_ipi, NULL, 1); 7481 /* No sleep, but for non preempt, let others run */ 7482 schedule(); 7483 } 7484 7485 return 0; 7486 } 7487 7488 static __init int test_ringbuffer(void) 7489 { 7490 struct task_struct *rb_hammer; 7491 struct trace_buffer *buffer; 7492 int cpu; 7493 int ret = 0; 7494 7495 if (security_locked_down(LOCKDOWN_TRACEFS)) { 7496 pr_warn("Lockdown is enabled, skipping ring buffer tests\n"); 7497 return 0; 7498 } 7499 7500 pr_info("Running ring buffer tests...\n"); 7501 7502 buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE); 7503 if (WARN_ON(!buffer)) 7504 return 0; 7505 7506 /* Disable buffer so that threads can't write to it yet */ 7507 ring_buffer_record_off(buffer); 7508 7509 for_each_online_cpu(cpu) { 7510 rb_data[cpu].buffer = buffer; 7511 rb_data[cpu].cpu = cpu; 7512 rb_data[cpu].cnt = cpu; 7513 rb_threads[cpu] = kthread_run_on_cpu(rb_test, &rb_data[cpu], 7514 cpu, "rbtester/%u"); 7515 if (WARN_ON(IS_ERR(rb_threads[cpu]))) { 7516 pr_cont("FAILED\n"); 7517 ret = PTR_ERR(rb_threads[cpu]); 7518 goto out_free; 7519 } 7520 } 7521 7522 /* Now create the rb hammer! */ 7523 rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer"); 7524 if (WARN_ON(IS_ERR(rb_hammer))) { 7525 pr_cont("FAILED\n"); 7526 ret = PTR_ERR(rb_hammer); 7527 goto out_free; 7528 } 7529 7530 ring_buffer_record_on(buffer); 7531 /* 7532 * Show buffer is enabled before setting rb_test_started. 7533 * Yes there's a small race window where events could be 7534 * dropped and the thread wont catch it. But when a ring 7535 * buffer gets enabled, there will always be some kind of 7536 * delay before other CPUs see it. Thus, we don't care about 7537 * those dropped events. We care about events dropped after 7538 * the threads see that the buffer is active. 7539 */ 7540 smp_wmb(); 7541 rb_test_started = true; 7542 7543 set_current_state(TASK_INTERRUPTIBLE); 7544 /* Just run for 10 seconds */; 7545 schedule_timeout(10 * HZ); 7546 7547 kthread_stop(rb_hammer); 7548 7549 out_free: 7550 for_each_online_cpu(cpu) { 7551 if (!rb_threads[cpu]) 7552 break; 7553 kthread_stop(rb_threads[cpu]); 7554 } 7555 if (ret) { 7556 ring_buffer_free(buffer); 7557 return ret; 7558 } 7559 7560 /* Report! */ 7561 pr_info("finished\n"); 7562 for_each_online_cpu(cpu) { 7563 struct ring_buffer_event *event; 7564 struct rb_test_data *data = &rb_data[cpu]; 7565 struct rb_item *item; 7566 unsigned long total_events; 7567 unsigned long total_dropped; 7568 unsigned long total_written; 7569 unsigned long total_alloc; 7570 unsigned long total_read = 0; 7571 unsigned long total_size = 0; 7572 unsigned long total_len = 0; 7573 unsigned long total_lost = 0; 7574 unsigned long lost; 7575 int big_event_size; 7576 int small_event_size; 7577 7578 ret = -1; 7579 7580 total_events = data->events + data->events_nested; 7581 total_written = data->bytes_written + data->bytes_written_nested; 7582 total_alloc = data->bytes_alloc + data->bytes_alloc_nested; 7583 total_dropped = data->bytes_dropped + data->bytes_dropped_nested; 7584 7585 big_event_size = data->max_size + data->max_size_nested; 7586 small_event_size = data->min_size + data->min_size_nested; 7587 7588 pr_info("CPU %d:\n", cpu); 7589 pr_info(" events: %ld\n", total_events); 7590 pr_info(" dropped bytes: %ld\n", total_dropped); 7591 pr_info(" alloced bytes: %ld\n", total_alloc); 7592 pr_info(" written bytes: %ld\n", total_written); 7593 pr_info(" biggest event: %d\n", big_event_size); 7594 pr_info(" smallest event: %d\n", small_event_size); 7595 7596 if (RB_WARN_ON(buffer, total_dropped)) 7597 break; 7598 7599 ret = 0; 7600 7601 while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) { 7602 total_lost += lost; 7603 item = ring_buffer_event_data(event); 7604 total_len += ring_buffer_event_length(event); 7605 total_size += item->size + sizeof(struct rb_item); 7606 if (memcmp(&item->str[0], rb_string, item->size) != 0) { 7607 pr_info("FAILED!\n"); 7608 pr_info("buffer had: %.*s\n", item->size, item->str); 7609 pr_info("expected: %.*s\n", item->size, rb_string); 7610 RB_WARN_ON(buffer, 1); 7611 ret = -1; 7612 break; 7613 } 7614 total_read++; 7615 } 7616 if (ret) 7617 break; 7618 7619 ret = -1; 7620 7621 pr_info(" read events: %ld\n", total_read); 7622 pr_info(" lost events: %ld\n", total_lost); 7623 pr_info(" total events: %ld\n", total_lost + total_read); 7624 pr_info(" recorded len bytes: %ld\n", total_len); 7625 pr_info(" recorded size bytes: %ld\n", total_size); 7626 if (total_lost) { 7627 pr_info(" With dropped events, record len and size may not match\n" 7628 " alloced and written from above\n"); 7629 } else { 7630 if (RB_WARN_ON(buffer, total_len != total_alloc || 7631 total_size != total_written)) 7632 break; 7633 } 7634 if (RB_WARN_ON(buffer, total_lost + total_read != total_events)) 7635 break; 7636 7637 ret = 0; 7638 } 7639 if (!ret) 7640 pr_info("Ring buffer PASSED!\n"); 7641 7642 ring_buffer_free(buffer); 7643 return 0; 7644 } 7645 7646 late_initcall(test_ringbuffer); 7647 #endif /* CONFIG_RING_BUFFER_STARTUP_TEST */ 7648