Lines Matching +full:per +full:- +full:stream
1 // SPDX-License-Identifier: GPL-2.0-only
15 * Simple per-CPU NMI-safe bump allocation mechanism, backed by the NMI-safe
17 * stash it in a local per-CPU variable, and bump allocate from the page
18 * whenever items need to be printed to a stream. Each page holds a global
26 * maintaining a reference count per-page is critical for correct lifetime
39 #define BPF_STREAM_PAGE_SZ (PAGE_SIZE - offsetofend(struct bpf_stream_page, consumed))
66 refcount_inc(&stream_page->ref);
71 if (refcount_dec_and_test(&stream_page->ref))
77 refcount_set(&stream_page->ref, 1);
78 stream_page->consumed = 0;
102 int consumed = stream_page->consumed;
104 int rem = max(0, total - consumed - min);
114 init_llist_node(&elem->node);
115 elem->total_len = len;
116 elem->consumed_len = 0;
128 u32 consumed = stream_page->consumed;
130 stream_page->consumed += round_up(offsetof(struct bpf_stream_elem, str[len]), 8);
131 return (struct bpf_stream_elem *)&stream_page->buf[consumed];
168 * Length denotes the amount of data to be written as part of stream element,
192 return -ENOMEM;
194 memcpy(elem->str, str, len);
195 llist_add(&elem->node, log);
200 static int bpf_stream_consume_capacity(struct bpf_stream *stream, int len)
202 if (atomic_read(&stream->capacity) >= BPF_STREAM_MAX_CAPACITY)
203 return -ENOSPC;
204 if (atomic_add_return(len, &stream->capacity) >= BPF_STREAM_MAX_CAPACITY) {
205 atomic_sub(len, &stream->capacity);
206 return -ENOSPC;
211 static void bpf_stream_release_capacity(struct bpf_stream *stream, struct bpf_stream_elem *elem)
213 int len = elem->total_len;
215 atomic_sub(len, &stream->capacity);
218 static int bpf_stream_push_str(struct bpf_stream *stream, const char *str, int len)
220 int ret = bpf_stream_consume_capacity(stream, len);
222 return ret ?: __bpf_stream_push_str(&stream->log, str, len);
229 return &aux->stream[stream_id - 1];
248 static struct llist_node *bpf_stream_backlog_peek(struct bpf_stream *stream)
250 return stream->backlog_head;
253 static struct llist_node *bpf_stream_backlog_pop(struct bpf_stream *stream)
257 node = stream->backlog_head;
258 if (stream->backlog_head == stream->backlog_tail)
259 stream->backlog_head = stream->backlog_tail = NULL;
261 stream->backlog_head = node->next;
265 static void bpf_stream_backlog_fill(struct bpf_stream *stream)
269 if (llist_empty(&stream->log))
271 tail = llist_del_all(&stream->log);
276 if (!stream->backlog_head) {
277 stream->backlog_head = head;
278 stream->backlog_tail = tail;
280 stream->backlog_tail->next = head;
281 stream->backlog_tail = tail;
289 int rem = elem->total_len - elem->consumed_len;
292 elem->consumed_len += used;
293 *len -= used;
295 return elem->consumed_len == elem->total_len;
298 static int bpf_stream_read(struct bpf_stream *stream, void __user *buf, int len)
304 mutex_lock(&stream->lock);
307 int pos = len - rem_len;
310 node = bpf_stream_backlog_peek(stream);
312 bpf_stream_backlog_fill(stream);
313 node = bpf_stream_backlog_peek(stream);
319 cons_len = elem->consumed_len;
322 ret = copy_to_user(buf + pos, elem->str + cons_len,
323 elem->consumed_len - cons_len);
326 ret = -EFAULT;
327 elem->consumed_len = cons_len;
333 bpf_stream_backlog_pop(stream);
334 bpf_stream_release_capacity(stream, elem);
338 mutex_unlock(&stream->lock);
339 return ret ? ret : len - rem_len;
344 struct bpf_stream *stream;
346 stream = bpf_stream_get(stream_id, prog->aux);
347 if (!stream)
348 return -ENOENT;
349 return bpf_stream_read(stream, buf, len);
366 struct bpf_stream *stream;
370 stream = bpf_stream_get(stream_id, aux);
371 if (!stream)
372 return -ENOENT;
376 return -EINVAL;
385 ret = bpf_stream_push_str(stream, data.buf, ret);
399 for (i = 0; i < ARRAY_SIZE(prog->aux->stream); i++) {
400 atomic_set(&prog->aux->stream[i].capacity, 0);
401 init_llist_head(&prog->aux->stream[i].log);
402 mutex_init(&prog->aux->stream[i].lock);
403 prog->aux->stream[i].backlog_head = NULL;
404 prog->aux->stream[i].backlog_tail = NULL;
413 for (i = 0; i < ARRAY_SIZE(prog->aux->stream); i++) {
414 list = llist_del_all(&prog->aux->stream[i].log);
416 bpf_stream_free_list(prog->aux->stream[i].backlog_head);
422 init_llist_head(&ss->log);
423 ss->len = 0;
430 node = llist_del_all(&ss->log);
441 return -EBUSY;
444 ret = vsnprintf(buf->buf, ARRAY_SIZE(buf->buf), fmt, args);
446 ss->len += ret;
448 ret = __bpf_stream_push_str(&ss->log, buf->buf, ret);
457 struct bpf_stream *stream;
460 stream = bpf_stream_get(stream_id, prog->aux);
461 if (!stream)
462 return -EINVAL;
464 ret = bpf_stream_consume_capacity(stream, ss->len);
468 list = llist_del_all(&ss->log);
477 llist_add_batch(head, tail, &stream->log);
500 ctxp->err = bpf_stream_stage_printk(ctxp->ss, "%pS\n %s @ %s:%d\n",
502 return !ctxp->err;
505 ctxp->err = bpf_stream_stage_printk(ctxp->ss, "%pS\n", (void *)(long)ip);
506 return !ctxp->err;
515 raw_smp_processor_id(), __kuid_val(current_real_cred()->euid),
516 current->pid, current->comm);