Searched refs:head_page (Results 1 – 9 of 9) sorted by relevance
| /linux/kernel/trace/ |
| H A D | ring_buffer.c | 526 struct buffer_page *head_page; /* read from head */ member 603 struct buffer_page *head_page; member 1302 head = cpu_buffer->head_page; in rb_head_page_activate() 1411 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page)) in rb_set_head_page() 1419 page = head = cpu_buffer->head_page; in rb_set_head_page() 1429 cpu_buffer->head_page = page; in rb_set_head_page() 1911 struct buffer_page *head_page, *orig_head; in rb_meta_validate_events() local 1931 orig_head = head_page = cpu_buffer->head_page; in rb_meta_validate_events() 1932 ts = head_page->page->time_stamp; in rb_meta_validate_events() 1938 if (head_page == cpu_buffer->tail_page) in rb_meta_validate_events() [all …]
|
| H A D | trace.h | 879 extern void *head_page(struct trace_array_cpu *data);
|
| /linux/drivers/infiniband/core/ |
| H A D | umem_odp.c | 452 struct page *head_page = compound_head(page); in ib_umem_odp_unmap_dma_pages() local 462 set_page_dirty(head_page); in ib_umem_odp_unmap_dma_pages()
|
| /linux/Documentation/trace/ |
| H A D | ring-buffer-design.rst | 43 head_page 113 The head_page, tail_page and commit_page are all initialized to point 123 it will swap its page with the head_page. The old reader page will 124 become part of the ring buffer and the head_page will be removed. 421 of the head_page pointer with the swapping of pages with the reader.
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/ |
| H A D | en_rx.c | 2043 struct mlx5e_frag_page *head_page = frag_page; in mlx5e_skb_from_cqe_mpwrq_nonlinear() local 2129 for (pfp = head_page; pfp < frag_page; pfp++) in mlx5e_skb_from_cqe_mpwrq_nonlinear() 2167 xdp_update_skb_frags_info(skb, frag_page - head_page, in mlx5e_skb_from_cqe_mpwrq_nonlinear() 2172 pagep = head_page; in mlx5e_skb_from_cqe_mpwrq_nonlinear() 2198 addr = page_pool_get_dma_addr_netmem(head_page->netmem); in mlx5e_skb_from_cqe_mpwrq_nonlinear() 2199 mlx5e_copy_skb_header(rq, skb, head_page->netmem, addr, in mlx5e_skb_from_cqe_mpwrq_nonlinear()
|
| /linux/drivers/net/ethernet/freescale/dpaa/ |
| H A D | dpaa_eth.c | 1819 struct page *page, *head_page; in sg_fd_to_skb() local 1880 head_page = virt_to_head_page(sg_vaddr); in sg_fd_to_skb() 1885 (page_address(page) - page_address(head_page)); in sg_fd_to_skb() 1896 skb_add_rx_frag(skb, i - 1, head_page, page_offset, in sg_fd_to_skb()
|
| /linux/drivers/net/ethernet/freescale/dpaa2/ |
| H A D | dpaa2-eth.c | 208 struct page *page, *head_page; in dpaa2_eth_build_frag_skb() local 251 head_page = virt_to_head_page(sg_vaddr); in dpaa2_eth_build_frag_skb() 260 (page_address(page) - page_address(head_page)); in dpaa2_eth_build_frag_skb() 262 skb_add_rx_frag(skb, i - 1, head_page, page_offset, in dpaa2_eth_build_frag_skb()
|
| /linux/mm/ |
| H A D | page_alloc.c | 1103 static int free_tail_page_prepare(struct page *head_page, struct page *page) in free_tail_page_prepare() argument 1105 struct folio *folio = (struct folio *)head_page; in free_tail_page_prepare() 1118 switch (page - head_page) { in free_tail_page_prepare() 1184 if (unlikely(compound_head(page) != head_page)) { in free_tail_page_prepare()
|
| /linux/drivers/net/ethernet/broadcom/bnxt/ |
| H A D | bnxt.c | 3599 int i, head_page, start_idx, source_offset; in __bnxt_copy_ring() local 3602 head_page = head / rmem->page_size; in __bnxt_copy_ring() 3607 start_idx = head_page % MAX_CTX_PAGES; in __bnxt_copy_ring()
|