| /linux/drivers/char/agp/ |
| H A D | generic.c | 854 int page_order; in agp_generic_create_gatt_table() local 867 page_order = num_entries = 0; in agp_generic_create_gatt_table() 873 page_order = in agp_generic_create_gatt_table() 874 A_SIZE_8(temp)->page_order; in agp_generic_create_gatt_table() 879 page_order = A_SIZE_16(temp)->page_order; in agp_generic_create_gatt_table() 883 page_order = A_SIZE_32(temp)->page_order; in agp_generic_create_gatt_table() 890 page_order = num_entries = 0; in agp_generic_create_gatt_table() 894 table = alloc_gatt_pages(page_order); in agp_generic_create_gatt_table() 920 page_order = ((struct aper_size_info_fixed *) temp)->page_order; in agp_generic_create_gatt_table() 922 table = alloc_gatt_pages(page_order); in agp_generic_create_gatt_table() [all …]
|
| H A D | uninorth-agp.c | 374 int page_order; in uninorth_create_gatt_table() local 387 size = page_order = num_entries = 0; in uninorth_create_gatt_table() 391 page_order = A_SIZE_32(temp)->page_order; in uninorth_create_gatt_table() 394 table = (char *) __get_free_pages(GFP_KERNEL, page_order); in uninorth_create_gatt_table() 407 uninorth_priv.pages_arr = kmalloc_array(1 << page_order, in uninorth_create_gatt_table() 413 table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); in uninorth_create_gatt_table() 425 bridge->gatt_table = vmap(uninorth_priv.pages_arr, (1 << page_order), 0, PAGE_KERNEL_NCG); in uninorth_create_gatt_table() 445 free_pages((unsigned long)table, page_order); in uninorth_create_gatt_table() 451 int page_order; in uninorth_free_gatt_table() local 457 page_order = A_SIZE_32(temp)->page_order; in uninorth_free_gatt_table() [all …]
|
| H A D | agp.h | 67 int page_order; member 74 int page_order; member 81 int page_order; member 94 int page_order; member
|
| H A D | alpha-agp.c | 170 aper_size->page_order = __ffs(aper_size->num_entries / 1024); in alpha_core_agp_setup()
|
| H A D | ali-agp.c | 133 page_count = 1 << A_SIZE_32(agp_bridge->current_size)->page_order; in m1541_cache_flush()
|
| /linux/kernel/events/ |
| H A D | internal.h | 18 int page_order; /* allocation order */ member 108 static inline int page_order(struct perf_buffer *rb) in page_order() function 110 return rb->page_order; in page_order() 115 static inline int page_order(struct perf_buffer *rb) in page_order() function 123 return rb->nr_pages << page_order(rb); in data_page_nr() 128 return rb->nr_pages << (PAGE_SHIFT + page_order(rb)); in perf_data_size() 156 handle->size = PAGE_SIZE << page_order(rb); \
|
| H A D | ring_buffer.c | 241 page_shift = PAGE_SHIFT + page_order(rb); in __perf_output_begin() 943 rb->page_order = ilog2(nr_pages); in rb_alloc()
|
| /linux/drivers/md/bcache/ |
| H A D | bset.c | 270 return PAGE_SIZE << b->page_order; in btree_keys_bytes() 308 free_pages((unsigned long) t->data, b->page_order); in bch_btree_keys_free() 316 unsigned int page_order, in bch_btree_keys_alloc() argument 323 b->page_order = page_order; in bch_btree_keys_alloc() 325 t->data = (void *) __get_free_pages(__GFP_COMP|gfp, b->page_order); in bch_btree_keys_alloc() 865 PAGE_SIZE << b->page_order); in bch_bset_insert() 1184 unsigned int page_order) in bch_bset_sort_state_init() argument 1188 state->page_order = page_order; in bch_bset_sort_state_init() 1189 state->crit_factor = int_sqrt(1 << page_order); in bch_bset_sort_state_init() 1191 return mempool_init_page_pool(&state->pool, 1, page_order); in bch_bset_sort_state_init() [all …]
|
| /linux/drivers/hv/ |
| H A D | mshv_regions.c | 51 unsigned int page_order; in mshv_region_process_chunk() local 59 page_order = folio_order(page_folio(page)); in mshv_region_process_chunk() 61 if (page_order && page_order != HPAGE_PMD_ORDER) in mshv_region_process_chunk() 64 stride = 1 << page_order; in mshv_region_process_chunk() 75 if (page_order != folio_order(page_folio(page))) in mshv_region_process_chunk()
|
| /linux/include/linux/ |
| H A D | vmalloc.h | 63 unsigned int page_order; member 268 return find_vm_area(addr)->page_order > 0; in is_vm_area_hugepages()
|
| H A D | blk-mq.h | 1061 unsigned short page_order; member
|
| /linux/drivers/net/ethernet/aquantia/atlantic/ |
| H A D | aq_ring.c | 55 unsigned int order = rx_ring->page_order; in aq_alloc_rxpages() 86 unsigned int order = self->page_order; in aq_get_rxpages() 192 self->page_order = AQ_CFG_XDP_PAGEORDER; in aq_ring_rx_alloc() 196 self->page_order = fls(self->frame_max / PAGE_SIZE + in aq_ring_rx_alloc() 198 if (aq_nic_cfg->rxpageorder > self->page_order) in aq_ring_rx_alloc() 199 self->page_order = aq_nic_cfg->rxpageorder; in aq_ring_rx_alloc()
|
| H A D | aq_ring.h | 146 u16 page_order; member
|
| /linux/drivers/scsi/ |
| H A D | sg.c | 116 int page_order; member 1234 length = 1 << (PAGE_SHIFT + rsv_schp->page_order); in sg_vma_fault() 1280 length = 1 << (PAGE_SHIFT + rsv_schp->page_order); in sg_mmap() 1801 md->page_order = req_schp->page_order; in sg_start_req() 1920 schp->page_order = order; in sg_build_indirect() 1954 __free_pages(schp->pages[k], schp->page_order); in sg_remove_scat() 1975 num = 1 << (PAGE_SHIFT + schp->page_order); in sg_read_oxfer() 2026 num = 1 << (PAGE_SHIFT + rsv_schp->page_order); in sg_link_reserve() 2034 req_schp->page_order = rsv_schp->page_order; in sg_link_reserve() 2056 req_schp->page_order = 0; in sg_unlink_reserve()
|
| /linux/arch/loongarch/kvm/ |
| H A D | main.c | 383 kvm_loongarch_ops->page_order = order; in kvm_loongarch_env_init() 425 free_pages(addr, kvm_loongarch_ops->page_order); in kvm_loongarch_env_exit()
|
| /linux/drivers/net/ethernet/microchip/lan966x/ |
| H A D | lan966x_xdp.c | 82 xdp_init_buff(&xdp, PAGE_SIZE << lan966x->rx.page_order, in lan966x_xdp_run()
|
| H A D | lan966x_fdma.c | 79 .order = rx->page_order, in lan966x_fdma_rx_alloc_page_pool() 826 lan966x->rx.page_order = round_up(new_mtu, PAGE_SIZE) / PAGE_SIZE - 1; in lan966x_fdma_reload() 938 lan966x->rx.fdma.db_size = PAGE_SIZE << lan966x->rx.page_order; in lan966x_fdma_init() 948 lan966x->tx.fdma.db_size = PAGE_SIZE << lan966x->rx.page_order; in lan966x_fdma_init()
|
| H A D | lan966x_main.h | 209 u8 page_order; member
|
| /linux/arch/powerpc/platforms/pseries/ |
| H A D | setup.c | 950 int page_order = IOMMU_PAGE_SHIFT_4K; in pSeries_cmo_feature_init() local 982 page_order = simple_strtol(value, NULL, 10); in pSeries_cmo_feature_init() 995 CMO_PageSize = 1 << page_order; in pSeries_cmo_feature_init()
|
| /linux/arch/loongarch/include/asm/ |
| H A D | kvm_host.h | 88 unsigned long page_order; member
|
| /linux/kernel/dma/ |
| H A D | direct.c | 313 unsigned int page_order = get_order(size); in dma_direct_free() local 331 if (!dma_release_from_global_coherent(page_order, cpu_addr)) in dma_direct_free()
|
| /linux/mm/ |
| H A D | vmalloc.c | 3101 return vm->page_order; in vm_area_page_order() 3115 vm->page_order = order; in set_vm_area_page_order() 3348 unsigned int page_order = vm_area_page_order(area); in vm_reset_perms() local 3356 for (i = 0; i < area->nr_pages; i += 1U << page_order) { in vm_reset_perms() 3362 page_size = PAGE_SIZE << page_order; in vm_reset_perms() 3824 unsigned int page_order; in __vmalloc_area_node() local 3853 page_order = vm_area_page_order(area); in __vmalloc_area_node() 3864 vmalloc_gfp_adjust(gfp_mask, page_order), node, in __vmalloc_area_node() 3865 page_order, nr_small_pages, area->pages); in __vmalloc_area_node() 3888 if (!fatal_signal_pending(current) && page_order == 0) in __vmalloc_area_node()
|
| /linux/block/ |
| H A D | blk-map.c | 181 nr_pages = 1U << map_data->page_order; in bio_copy_user_iov()
|
| /linux/drivers/net/ethernet/sun/ |
| H A D | cassini.c | 448 __free_pages(page->buffer, cp->page_order); in cas_page_free() 474 page->buffer = alloc_pages(flags, cp->page_order); in cas_page_alloc() 3354 cp->page_order = 0; in cas_check_invariants() 3363 cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT; in cas_check_invariants() 3369 cp->page_size = (PAGE_SIZE << cp->page_order); in cas_check_invariants()
|
| H A D | cassini.h | 2812 int page_order; member
|