1 /* 2 * Performance events ring-buffer code: 3 * 4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> 5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar 6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 8 * 9 * For licensing details see kernel-base/COPYING 10 */ 11 12 #include <linux/perf_event.h> 13 #include <linux/vmalloc.h> 14 #include <linux/slab.h> 15 #include <linux/circ_buf.h> 16 17 #include "internal.h" 18 19 static void perf_output_wakeup(struct perf_output_handle *handle) 20 { 21 atomic_set(&handle->rb->poll, POLL_IN); 22 23 handle->event->pending_wakeup = 1; 24 irq_work_queue(&handle->event->pending); 25 } 26 27 /* 28 * We need to ensure a later event_id doesn't publish a head when a former 29 * event isn't done writing. However since we need to deal with NMIs we 30 * cannot fully serialize things. 31 * 32 * We only publish the head (and generate a wakeup) when the outer-most 33 * event completes. 34 */ 35 static void perf_output_get_handle(struct perf_output_handle *handle) 36 { 37 struct ring_buffer *rb = handle->rb; 38 39 preempt_disable(); 40 local_inc(&rb->nest); 41 handle->wakeup = local_read(&rb->wakeup); 42 } 43 44 static void perf_output_put_handle(struct perf_output_handle *handle) 45 { 46 struct ring_buffer *rb = handle->rb; 47 unsigned long head; 48 49 again: 50 head = local_read(&rb->head); 51 52 /* 53 * IRQ/NMI can happen here, which means we can miss a head update. 54 */ 55 56 if (!local_dec_and_test(&rb->nest)) 57 goto out; 58 59 /* 60 * Since the mmap() consumer (userspace) can run on a different CPU: 61 * 62 * kernel user 63 * 64 * if (LOAD ->data_tail) { LOAD ->data_head 65 * (A) smp_rmb() (C) 66 * STORE $data LOAD $data 67 * smp_wmb() (B) smp_mb() (D) 68 * STORE ->data_head STORE ->data_tail 69 * } 70 * 71 * Where A pairs with D, and B pairs with C. 72 * 73 * In our case (A) is a control dependency that separates the load of 74 * the ->data_tail and the stores of $data. In case ->data_tail 75 * indicates there is no room in the buffer to store $data we do not. 76 * 77 * D needs to be a full barrier since it separates the data READ 78 * from the tail WRITE. 79 * 80 * For B a WMB is sufficient since it separates two WRITEs, and for C 81 * an RMB is sufficient since it separates two READs. 82 * 83 * See perf_output_begin(). 84 */ 85 smp_wmb(); /* B, matches C */ 86 rb->user_page->data_head = head; 87 88 /* 89 * Now check if we missed an update -- rely on previous implied 90 * compiler barriers to force a re-read. 91 */ 92 if (unlikely(head != local_read(&rb->head))) { 93 local_inc(&rb->nest); 94 goto again; 95 } 96 97 if (handle->wakeup != local_read(&rb->wakeup)) 98 perf_output_wakeup(handle); 99 100 out: 101 preempt_enable(); 102 } 103 104 int perf_output_begin(struct perf_output_handle *handle, 105 struct perf_event *event, unsigned int size) 106 { 107 struct ring_buffer *rb; 108 unsigned long tail, offset, head; 109 int have_lost, page_shift; 110 struct { 111 struct perf_event_header header; 112 u64 id; 113 u64 lost; 114 } lost_event; 115 116 rcu_read_lock(); 117 /* 118 * For inherited events we send all the output towards the parent. 119 */ 120 if (event->parent) 121 event = event->parent; 122 123 rb = rcu_dereference(event->rb); 124 if (unlikely(!rb)) 125 goto out; 126 127 if (unlikely(!rb->nr_pages)) 128 goto out; 129 130 handle->rb = rb; 131 handle->event = event; 132 133 have_lost = local_read(&rb->lost); 134 if (unlikely(have_lost)) { 135 size += sizeof(lost_event); 136 if (event->attr.sample_id_all) 137 size += event->id_header_size; 138 } 139 140 perf_output_get_handle(handle); 141 142 do { 143 tail = ACCESS_ONCE(rb->user_page->data_tail); 144 offset = head = local_read(&rb->head); 145 if (!rb->overwrite && 146 unlikely(CIRC_SPACE(head, tail, perf_data_size(rb)) < size)) 147 goto fail; 148 149 /* 150 * The above forms a control dependency barrier separating the 151 * @tail load above from the data stores below. Since the @tail 152 * load is required to compute the branch to fail below. 153 * 154 * A, matches D; the full memory barrier userspace SHOULD issue 155 * after reading the data and before storing the new tail 156 * position. 157 * 158 * See perf_output_put_handle(). 159 */ 160 161 head += size; 162 } while (local_cmpxchg(&rb->head, offset, head) != offset); 163 164 /* 165 * We rely on the implied barrier() by local_cmpxchg() to ensure 166 * none of the data stores below can be lifted up by the compiler. 167 */ 168 169 if (unlikely(head - local_read(&rb->wakeup) > rb->watermark)) 170 local_add(rb->watermark, &rb->wakeup); 171 172 page_shift = PAGE_SHIFT + page_order(rb); 173 174 handle->page = (offset >> page_shift) & (rb->nr_pages - 1); 175 offset &= (1UL << page_shift) - 1; 176 handle->addr = rb->data_pages[handle->page] + offset; 177 handle->size = (1UL << page_shift) - offset; 178 179 if (unlikely(have_lost)) { 180 struct perf_sample_data sample_data; 181 182 lost_event.header.size = sizeof(lost_event); 183 lost_event.header.type = PERF_RECORD_LOST; 184 lost_event.header.misc = 0; 185 lost_event.id = event->id; 186 lost_event.lost = local_xchg(&rb->lost, 0); 187 188 perf_event_header__init_id(&lost_event.header, 189 &sample_data, event); 190 perf_output_put(handle, lost_event); 191 perf_event__output_id_sample(event, handle, &sample_data); 192 } 193 194 return 0; 195 196 fail: 197 local_inc(&rb->lost); 198 perf_output_put_handle(handle); 199 out: 200 rcu_read_unlock(); 201 202 return -ENOSPC; 203 } 204 205 unsigned int perf_output_copy(struct perf_output_handle *handle, 206 const void *buf, unsigned int len) 207 { 208 return __output_copy(handle, buf, len); 209 } 210 211 unsigned int perf_output_skip(struct perf_output_handle *handle, 212 unsigned int len) 213 { 214 return __output_skip(handle, NULL, len); 215 } 216 217 void perf_output_end(struct perf_output_handle *handle) 218 { 219 perf_output_put_handle(handle); 220 rcu_read_unlock(); 221 } 222 223 static void 224 ring_buffer_init(struct ring_buffer *rb, long watermark, int flags) 225 { 226 long max_size = perf_data_size(rb); 227 228 if (watermark) 229 rb->watermark = min(max_size, watermark); 230 231 if (!rb->watermark) 232 rb->watermark = max_size / 2; 233 234 if (flags & RING_BUFFER_WRITABLE) 235 rb->overwrite = 0; 236 else 237 rb->overwrite = 1; 238 239 atomic_set(&rb->refcount, 1); 240 241 INIT_LIST_HEAD(&rb->event_list); 242 spin_lock_init(&rb->event_lock); 243 } 244 245 #ifndef CONFIG_PERF_USE_VMALLOC 246 247 /* 248 * Back perf_mmap() with regular GFP_KERNEL-0 pages. 249 */ 250 251 struct page * 252 perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff) 253 { 254 if (pgoff > rb->nr_pages) 255 return NULL; 256 257 if (pgoff == 0) 258 return virt_to_page(rb->user_page); 259 260 return virt_to_page(rb->data_pages[pgoff - 1]); 261 } 262 263 static void *perf_mmap_alloc_page(int cpu) 264 { 265 struct page *page; 266 int node; 267 268 node = (cpu == -1) ? cpu : cpu_to_node(cpu); 269 page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); 270 if (!page) 271 return NULL; 272 273 return page_address(page); 274 } 275 276 struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags) 277 { 278 struct ring_buffer *rb; 279 unsigned long size; 280 int i; 281 282 size = sizeof(struct ring_buffer); 283 size += nr_pages * sizeof(void *); 284 285 rb = kzalloc(size, GFP_KERNEL); 286 if (!rb) 287 goto fail; 288 289 rb->user_page = perf_mmap_alloc_page(cpu); 290 if (!rb->user_page) 291 goto fail_user_page; 292 293 for (i = 0; i < nr_pages; i++) { 294 rb->data_pages[i] = perf_mmap_alloc_page(cpu); 295 if (!rb->data_pages[i]) 296 goto fail_data_pages; 297 } 298 299 rb->nr_pages = nr_pages; 300 301 ring_buffer_init(rb, watermark, flags); 302 303 return rb; 304 305 fail_data_pages: 306 for (i--; i >= 0; i--) 307 free_page((unsigned long)rb->data_pages[i]); 308 309 free_page((unsigned long)rb->user_page); 310 311 fail_user_page: 312 kfree(rb); 313 314 fail: 315 return NULL; 316 } 317 318 static void perf_mmap_free_page(unsigned long addr) 319 { 320 struct page *page = virt_to_page((void *)addr); 321 322 page->mapping = NULL; 323 __free_page(page); 324 } 325 326 void rb_free(struct ring_buffer *rb) 327 { 328 int i; 329 330 perf_mmap_free_page((unsigned long)rb->user_page); 331 for (i = 0; i < rb->nr_pages; i++) 332 perf_mmap_free_page((unsigned long)rb->data_pages[i]); 333 kfree(rb); 334 } 335 336 #else 337 static int data_page_nr(struct ring_buffer *rb) 338 { 339 return rb->nr_pages << page_order(rb); 340 } 341 342 struct page * 343 perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff) 344 { 345 /* The '>' counts in the user page. */ 346 if (pgoff > data_page_nr(rb)) 347 return NULL; 348 349 return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE); 350 } 351 352 static void perf_mmap_unmark_page(void *addr) 353 { 354 struct page *page = vmalloc_to_page(addr); 355 356 page->mapping = NULL; 357 } 358 359 static void rb_free_work(struct work_struct *work) 360 { 361 struct ring_buffer *rb; 362 void *base; 363 int i, nr; 364 365 rb = container_of(work, struct ring_buffer, work); 366 nr = data_page_nr(rb); 367 368 base = rb->user_page; 369 /* The '<=' counts in the user page. */ 370 for (i = 0; i <= nr; i++) 371 perf_mmap_unmark_page(base + (i * PAGE_SIZE)); 372 373 vfree(base); 374 kfree(rb); 375 } 376 377 void rb_free(struct ring_buffer *rb) 378 { 379 schedule_work(&rb->work); 380 } 381 382 struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags) 383 { 384 struct ring_buffer *rb; 385 unsigned long size; 386 void *all_buf; 387 388 size = sizeof(struct ring_buffer); 389 size += sizeof(void *); 390 391 rb = kzalloc(size, GFP_KERNEL); 392 if (!rb) 393 goto fail; 394 395 INIT_WORK(&rb->work, rb_free_work); 396 397 all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE); 398 if (!all_buf) 399 goto fail_all_buf; 400 401 rb->user_page = all_buf; 402 rb->data_pages[0] = all_buf + PAGE_SIZE; 403 rb->page_order = ilog2(nr_pages); 404 rb->nr_pages = !!nr_pages; 405 406 ring_buffer_init(rb, watermark, flags); 407 408 return rb; 409 410 fail_all_buf: 411 kfree(rb); 412 413 fail: 414 return NULL; 415 } 416 417 #endif 418