1 /* 2 * Performance events ring-buffer code: 3 * 4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> 5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar 6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra 7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 8 * 9 * For licensing details see kernel-base/COPYING 10 */ 11 12 #include <linux/perf_event.h> 13 #include <linux/vmalloc.h> 14 #include <linux/slab.h> 15 #include <linux/circ_buf.h> 16 #include <linux/poll.h> 17 18 #include "internal.h" 19 20 static void perf_output_wakeup(struct perf_output_handle *handle) 21 { 22 atomic_set(&handle->rb->poll, POLLIN); 23 24 handle->event->pending_wakeup = 1; 25 irq_work_queue(&handle->event->pending); 26 } 27 28 /* 29 * We need to ensure a later event_id doesn't publish a head when a former 30 * event isn't done writing. However since we need to deal with NMIs we 31 * cannot fully serialize things. 32 * 33 * We only publish the head (and generate a wakeup) when the outer-most 34 * event completes. 35 */ 36 static void perf_output_get_handle(struct perf_output_handle *handle) 37 { 38 struct ring_buffer *rb = handle->rb; 39 40 preempt_disable(); 41 local_inc(&rb->nest); 42 handle->wakeup = local_read(&rb->wakeup); 43 } 44 45 static void perf_output_put_handle(struct perf_output_handle *handle) 46 { 47 struct ring_buffer *rb = handle->rb; 48 unsigned long head; 49 50 again: 51 head = local_read(&rb->head); 52 53 /* 54 * IRQ/NMI can happen here, which means we can miss a head update. 55 */ 56 57 if (!local_dec_and_test(&rb->nest)) 58 goto out; 59 60 /* 61 * Since the mmap() consumer (userspace) can run on a different CPU: 62 * 63 * kernel user 64 * 65 * if (LOAD ->data_tail) { LOAD ->data_head 66 * (A) smp_rmb() (C) 67 * STORE $data LOAD $data 68 * smp_wmb() (B) smp_mb() (D) 69 * STORE ->data_head STORE ->data_tail 70 * } 71 * 72 * Where A pairs with D, and B pairs with C. 73 * 74 * In our case (A) is a control dependency that separates the load of 75 * the ->data_tail and the stores of $data. In case ->data_tail 76 * indicates there is no room in the buffer to store $data we do not. 77 * 78 * D needs to be a full barrier since it separates the data READ 79 * from the tail WRITE. 80 * 81 * For B a WMB is sufficient since it separates two WRITEs, and for C 82 * an RMB is sufficient since it separates two READs. 83 * 84 * See perf_output_begin(). 85 */ 86 smp_wmb(); /* B, matches C */ 87 rb->user_page->data_head = head; 88 89 /* 90 * Now check if we missed an update -- rely on previous implied 91 * compiler barriers to force a re-read. 92 */ 93 if (unlikely(head != local_read(&rb->head))) { 94 local_inc(&rb->nest); 95 goto again; 96 } 97 98 if (handle->wakeup != local_read(&rb->wakeup)) 99 perf_output_wakeup(handle); 100 101 out: 102 preempt_enable(); 103 } 104 105 static bool __always_inline 106 ring_buffer_has_space(unsigned long head, unsigned long tail, 107 unsigned long data_size, unsigned int size, 108 bool backward) 109 { 110 if (!backward) 111 return CIRC_SPACE(head, tail, data_size) >= size; 112 else 113 return CIRC_SPACE(tail, head, data_size) >= size; 114 } 115 116 static int __always_inline 117 __perf_output_begin(struct perf_output_handle *handle, 118 struct perf_event *event, unsigned int size, 119 bool backward) 120 { 121 struct ring_buffer *rb; 122 unsigned long tail, offset, head; 123 int have_lost, page_shift; 124 struct { 125 struct perf_event_header header; 126 u64 id; 127 u64 lost; 128 } lost_event; 129 130 rcu_read_lock(); 131 /* 132 * For inherited events we send all the output towards the parent. 133 */ 134 if (event->parent) 135 event = event->parent; 136 137 rb = rcu_dereference(event->rb); 138 if (unlikely(!rb)) 139 goto out; 140 141 if (unlikely(rb->paused)) { 142 if (rb->nr_pages) 143 local_inc(&rb->lost); 144 goto out; 145 } 146 147 handle->rb = rb; 148 handle->event = event; 149 150 have_lost = local_read(&rb->lost); 151 if (unlikely(have_lost)) { 152 size += sizeof(lost_event); 153 if (event->attr.sample_id_all) 154 size += event->id_header_size; 155 } 156 157 perf_output_get_handle(handle); 158 159 do { 160 tail = READ_ONCE(rb->user_page->data_tail); 161 offset = head = local_read(&rb->head); 162 if (!rb->overwrite) { 163 if (unlikely(!ring_buffer_has_space(head, tail, 164 perf_data_size(rb), 165 size, backward))) 166 goto fail; 167 } 168 169 /* 170 * The above forms a control dependency barrier separating the 171 * @tail load above from the data stores below. Since the @tail 172 * load is required to compute the branch to fail below. 173 * 174 * A, matches D; the full memory barrier userspace SHOULD issue 175 * after reading the data and before storing the new tail 176 * position. 177 * 178 * See perf_output_put_handle(). 179 */ 180 181 if (!backward) 182 head += size; 183 else 184 head -= size; 185 } while (local_cmpxchg(&rb->head, offset, head) != offset); 186 187 if (backward) { 188 offset = head; 189 head = (u64)(-head); 190 } 191 192 /* 193 * We rely on the implied barrier() by local_cmpxchg() to ensure 194 * none of the data stores below can be lifted up by the compiler. 195 */ 196 197 if (unlikely(head - local_read(&rb->wakeup) > rb->watermark)) 198 local_add(rb->watermark, &rb->wakeup); 199 200 page_shift = PAGE_SHIFT + page_order(rb); 201 202 handle->page = (offset >> page_shift) & (rb->nr_pages - 1); 203 offset &= (1UL << page_shift) - 1; 204 handle->addr = rb->data_pages[handle->page] + offset; 205 handle->size = (1UL << page_shift) - offset; 206 207 if (unlikely(have_lost)) { 208 struct perf_sample_data sample_data; 209 210 lost_event.header.size = sizeof(lost_event); 211 lost_event.header.type = PERF_RECORD_LOST; 212 lost_event.header.misc = 0; 213 lost_event.id = event->id; 214 lost_event.lost = local_xchg(&rb->lost, 0); 215 216 perf_event_header__init_id(&lost_event.header, 217 &sample_data, event); 218 perf_output_put(handle, lost_event); 219 perf_event__output_id_sample(event, handle, &sample_data); 220 } 221 222 return 0; 223 224 fail: 225 local_inc(&rb->lost); 226 perf_output_put_handle(handle); 227 out: 228 rcu_read_unlock(); 229 230 return -ENOSPC; 231 } 232 233 int perf_output_begin_forward(struct perf_output_handle *handle, 234 struct perf_event *event, unsigned int size) 235 { 236 return __perf_output_begin(handle, event, size, false); 237 } 238 239 int perf_output_begin_backward(struct perf_output_handle *handle, 240 struct perf_event *event, unsigned int size) 241 { 242 return __perf_output_begin(handle, event, size, true); 243 } 244 245 int perf_output_begin(struct perf_output_handle *handle, 246 struct perf_event *event, unsigned int size) 247 { 248 249 return __perf_output_begin(handle, event, size, 250 unlikely(is_write_backward(event))); 251 } 252 253 unsigned int perf_output_copy(struct perf_output_handle *handle, 254 const void *buf, unsigned int len) 255 { 256 return __output_copy(handle, buf, len); 257 } 258 259 unsigned int perf_output_skip(struct perf_output_handle *handle, 260 unsigned int len) 261 { 262 return __output_skip(handle, NULL, len); 263 } 264 265 void perf_output_end(struct perf_output_handle *handle) 266 { 267 perf_output_put_handle(handle); 268 rcu_read_unlock(); 269 } 270 271 static void 272 ring_buffer_init(struct ring_buffer *rb, long watermark, int flags) 273 { 274 long max_size = perf_data_size(rb); 275 276 if (watermark) 277 rb->watermark = min(max_size, watermark); 278 279 if (!rb->watermark) 280 rb->watermark = max_size / 2; 281 282 if (flags & RING_BUFFER_WRITABLE) 283 rb->overwrite = 0; 284 else 285 rb->overwrite = 1; 286 287 atomic_set(&rb->refcount, 1); 288 289 INIT_LIST_HEAD(&rb->event_list); 290 spin_lock_init(&rb->event_lock); 291 292 /* 293 * perf_output_begin() only checks rb->paused, therefore 294 * rb->paused must be true if we have no pages for output. 295 */ 296 if (!rb->nr_pages) 297 rb->paused = 1; 298 } 299 300 /* 301 * This is called before hardware starts writing to the AUX area to 302 * obtain an output handle and make sure there's room in the buffer. 303 * When the capture completes, call perf_aux_output_end() to commit 304 * the recorded data to the buffer. 305 * 306 * The ordering is similar to that of perf_output_{begin,end}, with 307 * the exception of (B), which should be taken care of by the pmu 308 * driver, since ordering rules will differ depending on hardware. 309 * 310 * Call this from pmu::start(); see the comment in perf_aux_output_end() 311 * about its use in pmu callbacks. Both can also be called from the PMI 312 * handler if needed. 313 */ 314 void *perf_aux_output_begin(struct perf_output_handle *handle, 315 struct perf_event *event) 316 { 317 struct perf_event *output_event = event; 318 unsigned long aux_head, aux_tail; 319 struct ring_buffer *rb; 320 321 if (output_event->parent) 322 output_event = output_event->parent; 323 324 /* 325 * Since this will typically be open across pmu::add/pmu::del, we 326 * grab ring_buffer's refcount instead of holding rcu read lock 327 * to make sure it doesn't disappear under us. 328 */ 329 rb = ring_buffer_get(output_event); 330 if (!rb) 331 return NULL; 332 333 if (!rb_has_aux(rb)) 334 goto err; 335 336 /* 337 * If aux_mmap_count is zero, the aux buffer is in perf_mmap_close(), 338 * about to get freed, so we leave immediately. 339 * 340 * Checking rb::aux_mmap_count and rb::refcount has to be done in 341 * the same order, see perf_mmap_close. Otherwise we end up freeing 342 * aux pages in this path, which is a bug, because in_atomic(). 343 */ 344 if (!atomic_read(&rb->aux_mmap_count)) 345 goto err; 346 347 if (!atomic_inc_not_zero(&rb->aux_refcount)) 348 goto err; 349 350 /* 351 * Nesting is not supported for AUX area, make sure nested 352 * writers are caught early 353 */ 354 if (WARN_ON_ONCE(local_xchg(&rb->aux_nest, 1))) 355 goto err_put; 356 357 aux_head = local_read(&rb->aux_head); 358 359 handle->rb = rb; 360 handle->event = event; 361 handle->head = aux_head; 362 handle->size = 0; 363 364 /* 365 * In overwrite mode, AUX data stores do not depend on aux_tail, 366 * therefore (A) control dependency barrier does not exist. The 367 * (B) <-> (C) ordering is still observed by the pmu driver. 368 */ 369 if (!rb->aux_overwrite) { 370 aux_tail = ACCESS_ONCE(rb->user_page->aux_tail); 371 handle->wakeup = local_read(&rb->aux_wakeup) + rb->aux_watermark; 372 if (aux_head - aux_tail < perf_aux_size(rb)) 373 handle->size = CIRC_SPACE(aux_head, aux_tail, perf_aux_size(rb)); 374 375 /* 376 * handle->size computation depends on aux_tail load; this forms a 377 * control dependency barrier separating aux_tail load from aux data 378 * store that will be enabled on successful return 379 */ 380 if (!handle->size) { /* A, matches D */ 381 event->pending_disable = 1; 382 perf_output_wakeup(handle); 383 local_set(&rb->aux_nest, 0); 384 goto err_put; 385 } 386 } 387 388 return handle->rb->aux_priv; 389 390 err_put: 391 /* can't be last */ 392 rb_free_aux(rb); 393 394 err: 395 ring_buffer_put(rb); 396 handle->event = NULL; 397 398 return NULL; 399 } 400 401 /* 402 * Commit the data written by hardware into the ring buffer by adjusting 403 * aux_head and posting a PERF_RECORD_AUX into the perf buffer. It is the 404 * pmu driver's responsibility to observe ordering rules of the hardware, 405 * so that all the data is externally visible before this is called. 406 * 407 * Note: this has to be called from pmu::stop() callback, as the assumption 408 * of the AUX buffer management code is that after pmu::stop(), the AUX 409 * transaction must be stopped and therefore drop the AUX reference count. 410 */ 411 void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size, 412 bool truncated) 413 { 414 struct ring_buffer *rb = handle->rb; 415 bool wakeup = truncated; 416 unsigned long aux_head; 417 u64 flags = 0; 418 419 if (truncated) 420 flags |= PERF_AUX_FLAG_TRUNCATED; 421 422 /* in overwrite mode, driver provides aux_head via handle */ 423 if (rb->aux_overwrite) { 424 flags |= PERF_AUX_FLAG_OVERWRITE; 425 426 aux_head = handle->head; 427 local_set(&rb->aux_head, aux_head); 428 } else { 429 aux_head = local_read(&rb->aux_head); 430 local_add(size, &rb->aux_head); 431 } 432 433 if (size || flags) { 434 /* 435 * Only send RECORD_AUX if we have something useful to communicate 436 */ 437 438 perf_event_aux_event(handle->event, aux_head, size, flags); 439 } 440 441 aux_head = rb->user_page->aux_head = local_read(&rb->aux_head); 442 443 if (aux_head - local_read(&rb->aux_wakeup) >= rb->aux_watermark) { 444 wakeup = true; 445 local_add(rb->aux_watermark, &rb->aux_wakeup); 446 } 447 448 if (wakeup) { 449 if (truncated) 450 handle->event->pending_disable = 1; 451 perf_output_wakeup(handle); 452 } 453 454 handle->event = NULL; 455 456 local_set(&rb->aux_nest, 0); 457 /* can't be last */ 458 rb_free_aux(rb); 459 ring_buffer_put(rb); 460 } 461 462 /* 463 * Skip over a given number of bytes in the AUX buffer, due to, for example, 464 * hardware's alignment constraints. 465 */ 466 int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size) 467 { 468 struct ring_buffer *rb = handle->rb; 469 unsigned long aux_head; 470 471 if (size > handle->size) 472 return -ENOSPC; 473 474 local_add(size, &rb->aux_head); 475 476 aux_head = rb->user_page->aux_head = local_read(&rb->aux_head); 477 if (aux_head - local_read(&rb->aux_wakeup) >= rb->aux_watermark) { 478 perf_output_wakeup(handle); 479 local_add(rb->aux_watermark, &rb->aux_wakeup); 480 handle->wakeup = local_read(&rb->aux_wakeup) + 481 rb->aux_watermark; 482 } 483 484 handle->head = aux_head; 485 handle->size -= size; 486 487 return 0; 488 } 489 490 void *perf_get_aux(struct perf_output_handle *handle) 491 { 492 /* this is only valid between perf_aux_output_begin and *_end */ 493 if (!handle->event) 494 return NULL; 495 496 return handle->rb->aux_priv; 497 } 498 499 #define PERF_AUX_GFP (GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY) 500 501 static struct page *rb_alloc_aux_page(int node, int order) 502 { 503 struct page *page; 504 505 if (order > MAX_ORDER) 506 order = MAX_ORDER; 507 508 do { 509 page = alloc_pages_node(node, PERF_AUX_GFP, order); 510 } while (!page && order--); 511 512 if (page && order) { 513 /* 514 * Communicate the allocation size to the driver: 515 * if we managed to secure a high-order allocation, 516 * set its first page's private to this order; 517 * !PagePrivate(page) means it's just a normal page. 518 */ 519 split_page(page, order); 520 SetPagePrivate(page); 521 set_page_private(page, order); 522 } 523 524 return page; 525 } 526 527 static void rb_free_aux_page(struct ring_buffer *rb, int idx) 528 { 529 struct page *page = virt_to_page(rb->aux_pages[idx]); 530 531 ClearPagePrivate(page); 532 page->mapping = NULL; 533 __free_page(page); 534 } 535 536 static void __rb_free_aux(struct ring_buffer *rb) 537 { 538 int pg; 539 540 /* 541 * Should never happen, the last reference should be dropped from 542 * perf_mmap_close() path, which first stops aux transactions (which 543 * in turn are the atomic holders of aux_refcount) and then does the 544 * last rb_free_aux(). 545 */ 546 WARN_ON_ONCE(in_atomic()); 547 548 if (rb->aux_priv) { 549 rb->free_aux(rb->aux_priv); 550 rb->free_aux = NULL; 551 rb->aux_priv = NULL; 552 } 553 554 if (rb->aux_nr_pages) { 555 for (pg = 0; pg < rb->aux_nr_pages; pg++) 556 rb_free_aux_page(rb, pg); 557 558 kfree(rb->aux_pages); 559 rb->aux_nr_pages = 0; 560 } 561 } 562 563 int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event, 564 pgoff_t pgoff, int nr_pages, long watermark, int flags) 565 { 566 bool overwrite = !(flags & RING_BUFFER_WRITABLE); 567 int node = (event->cpu == -1) ? -1 : cpu_to_node(event->cpu); 568 int ret = -ENOMEM, max_order = 0; 569 570 if (!has_aux(event)) 571 return -ENOTSUPP; 572 573 if (event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) { 574 /* 575 * We need to start with the max_order that fits in nr_pages, 576 * not the other way around, hence ilog2() and not get_order. 577 */ 578 max_order = ilog2(nr_pages); 579 580 /* 581 * PMU requests more than one contiguous chunks of memory 582 * for SW double buffering 583 */ 584 if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_SW_DOUBLEBUF) && 585 !overwrite) { 586 if (!max_order) 587 return -EINVAL; 588 589 max_order--; 590 } 591 } 592 593 rb->aux_pages = kzalloc_node(nr_pages * sizeof(void *), GFP_KERNEL, node); 594 if (!rb->aux_pages) 595 return -ENOMEM; 596 597 rb->free_aux = event->pmu->free_aux; 598 for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;) { 599 struct page *page; 600 int last, order; 601 602 order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages)); 603 page = rb_alloc_aux_page(node, order); 604 if (!page) 605 goto out; 606 607 for (last = rb->aux_nr_pages + (1 << page_private(page)); 608 last > rb->aux_nr_pages; rb->aux_nr_pages++) 609 rb->aux_pages[rb->aux_nr_pages] = page_address(page++); 610 } 611 612 /* 613 * In overwrite mode, PMUs that don't support SG may not handle more 614 * than one contiguous allocation, since they rely on PMI to do double 615 * buffering. In this case, the entire buffer has to be one contiguous 616 * chunk. 617 */ 618 if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) && 619 overwrite) { 620 struct page *page = virt_to_page(rb->aux_pages[0]); 621 622 if (page_private(page) != max_order) 623 goto out; 624 } 625 626 rb->aux_priv = event->pmu->setup_aux(event->cpu, rb->aux_pages, nr_pages, 627 overwrite); 628 if (!rb->aux_priv) 629 goto out; 630 631 ret = 0; 632 633 /* 634 * aux_pages (and pmu driver's private data, aux_priv) will be 635 * referenced in both producer's and consumer's contexts, thus 636 * we keep a refcount here to make sure either of the two can 637 * reference them safely. 638 */ 639 atomic_set(&rb->aux_refcount, 1); 640 641 rb->aux_overwrite = overwrite; 642 rb->aux_watermark = watermark; 643 644 if (!rb->aux_watermark && !rb->aux_overwrite) 645 rb->aux_watermark = nr_pages << (PAGE_SHIFT - 1); 646 647 out: 648 if (!ret) 649 rb->aux_pgoff = pgoff; 650 else 651 __rb_free_aux(rb); 652 653 return ret; 654 } 655 656 void rb_free_aux(struct ring_buffer *rb) 657 { 658 if (atomic_dec_and_test(&rb->aux_refcount)) 659 __rb_free_aux(rb); 660 } 661 662 #ifndef CONFIG_PERF_USE_VMALLOC 663 664 /* 665 * Back perf_mmap() with regular GFP_KERNEL-0 pages. 666 */ 667 668 static struct page * 669 __perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff) 670 { 671 if (pgoff > rb->nr_pages) 672 return NULL; 673 674 if (pgoff == 0) 675 return virt_to_page(rb->user_page); 676 677 return virt_to_page(rb->data_pages[pgoff - 1]); 678 } 679 680 static void *perf_mmap_alloc_page(int cpu) 681 { 682 struct page *page; 683 int node; 684 685 node = (cpu == -1) ? cpu : cpu_to_node(cpu); 686 page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); 687 if (!page) 688 return NULL; 689 690 return page_address(page); 691 } 692 693 struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags) 694 { 695 struct ring_buffer *rb; 696 unsigned long size; 697 int i; 698 699 size = sizeof(struct ring_buffer); 700 size += nr_pages * sizeof(void *); 701 702 rb = kzalloc(size, GFP_KERNEL); 703 if (!rb) 704 goto fail; 705 706 rb->user_page = perf_mmap_alloc_page(cpu); 707 if (!rb->user_page) 708 goto fail_user_page; 709 710 for (i = 0; i < nr_pages; i++) { 711 rb->data_pages[i] = perf_mmap_alloc_page(cpu); 712 if (!rb->data_pages[i]) 713 goto fail_data_pages; 714 } 715 716 rb->nr_pages = nr_pages; 717 718 ring_buffer_init(rb, watermark, flags); 719 720 return rb; 721 722 fail_data_pages: 723 for (i--; i >= 0; i--) 724 free_page((unsigned long)rb->data_pages[i]); 725 726 free_page((unsigned long)rb->user_page); 727 728 fail_user_page: 729 kfree(rb); 730 731 fail: 732 return NULL; 733 } 734 735 static void perf_mmap_free_page(unsigned long addr) 736 { 737 struct page *page = virt_to_page((void *)addr); 738 739 page->mapping = NULL; 740 __free_page(page); 741 } 742 743 void rb_free(struct ring_buffer *rb) 744 { 745 int i; 746 747 perf_mmap_free_page((unsigned long)rb->user_page); 748 for (i = 0; i < rb->nr_pages; i++) 749 perf_mmap_free_page((unsigned long)rb->data_pages[i]); 750 kfree(rb); 751 } 752 753 #else 754 static int data_page_nr(struct ring_buffer *rb) 755 { 756 return rb->nr_pages << page_order(rb); 757 } 758 759 static struct page * 760 __perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff) 761 { 762 /* The '>' counts in the user page. */ 763 if (pgoff > data_page_nr(rb)) 764 return NULL; 765 766 return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE); 767 } 768 769 static void perf_mmap_unmark_page(void *addr) 770 { 771 struct page *page = vmalloc_to_page(addr); 772 773 page->mapping = NULL; 774 } 775 776 static void rb_free_work(struct work_struct *work) 777 { 778 struct ring_buffer *rb; 779 void *base; 780 int i, nr; 781 782 rb = container_of(work, struct ring_buffer, work); 783 nr = data_page_nr(rb); 784 785 base = rb->user_page; 786 /* The '<=' counts in the user page. */ 787 for (i = 0; i <= nr; i++) 788 perf_mmap_unmark_page(base + (i * PAGE_SIZE)); 789 790 vfree(base); 791 kfree(rb); 792 } 793 794 void rb_free(struct ring_buffer *rb) 795 { 796 schedule_work(&rb->work); 797 } 798 799 struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags) 800 { 801 struct ring_buffer *rb; 802 unsigned long size; 803 void *all_buf; 804 805 size = sizeof(struct ring_buffer); 806 size += sizeof(void *); 807 808 rb = kzalloc(size, GFP_KERNEL); 809 if (!rb) 810 goto fail; 811 812 INIT_WORK(&rb->work, rb_free_work); 813 814 all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE); 815 if (!all_buf) 816 goto fail_all_buf; 817 818 rb->user_page = all_buf; 819 rb->data_pages[0] = all_buf + PAGE_SIZE; 820 if (nr_pages) { 821 rb->nr_pages = 1; 822 rb->page_order = ilog2(nr_pages); 823 } 824 825 ring_buffer_init(rb, watermark, flags); 826 827 return rb; 828 829 fail_all_buf: 830 kfree(rb); 831 832 fail: 833 return NULL; 834 } 835 836 #endif 837 838 struct page * 839 perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff) 840 { 841 if (rb->aux_nr_pages) { 842 /* above AUX space */ 843 if (pgoff > rb->aux_pgoff + rb->aux_nr_pages) 844 return NULL; 845 846 /* AUX space */ 847 if (pgoff >= rb->aux_pgoff) 848 return virt_to_page(rb->aux_pages[pgoff - rb->aux_pgoff]); 849 } 850 851 return __perf_mmap_to_page(rb, pgoff); 852 } 853