Home
last modified time | relevance | path

Searched +full:page +full:- +full:size (Results 1 – 25 of 1082) sorted by relevance

12345678910>>...44

/linux/arch/arm/mm/
H A Ddma-mapping.c1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/arch/arm/mm/dma-mapping.c
5 * Copyright (C) 2000-2004 Russell King
17 #include <linux/dma-direct.h>
18 #include <linux/dma-map-ops.h>
28 #include <asm/page.h>
33 #include <asm/dma-iommu.h>
36 #include <asm/xen/xen-ops.h>
43 size_t size; member
53 size_t size; member
[all …]
/linux/kernel/dma/
H A Ddirect.c1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2018-2020 Christoph Hellwig.
10 #include <linux/dma-map-ops.h>
21 * override the variable below for dma-direct to work properly.
33 static inline struct page *dma_direct_to_page(struct device *dev, in dma_direct_to_page()
41 phys_addr_t phys = (phys_addr_t)(max_pfn - 1) << PAGE_SHIFT; in dma_direct_get_required_mask()
44 return (1ULL << (fls64(max_dma) - 1)) * 2 - 1; in dma_direct_get_required_mask()
50 dev->coherent_dma_mask, in dma_direct_optimal_gfp_mask()
51 dev->bus_dma_limit); in dma_direct_optimal_gfp_mask()
69 bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) in dma_coherent_ok() argument
[all …]
H A Dops_helpers.c1 // SPDX-License-Identifier: GPL-2.0
6 #include <linux/dma-map-ops.h>
7 #include <linux/iommu-dma.h>
9 static struct page *dma_common_vaddr_to_page(void *cpu_addr) in dma_common_vaddr_to_page()
17 * Create scatter-list for the already allocated DMA buffer.
20 void *cpu_addr, dma_addr_t dma_addr, size_t size, in dma_common_get_sgtable() argument
23 struct page *page = dma_common_vaddr_to_page(cpu_addr); in dma_common_get_sgtable() local
28 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); in dma_common_get_sgtable()
33 * Create userspace mapping for the DMA-coherent memory.
36 void *cpu_addr, dma_addr_t dma_addr, size_t size, in dma_common_mmap() argument
[all …]
H A Dpool.c1 // SPDX-License-Identifier: GPL-2.0
8 #include <linux/dma-map-ops.h>
9 #include <linux/dma-direct.h>
23 /* Size can be defined by the coherent_pool command line */
46 static void dma_atomic_pool_size_add(gfp_t gfp, size_t size) in dma_atomic_pool_size_add() argument
49 pool_size_dma += size; in dma_atomic_pool_size_add()
51 pool_size_dma32 += size; in dma_atomic_pool_size_add()
53 pool_size_kernel += size; in dma_atomic_pool_size_add()
58 unsigned long size; in cma_in_zone() local
66 size = cma_get_size(cma); in cma_in_zone()
[all …]
H A Dcontiguous.c1 // SPDX-License-Identifier: GPL-2.0+
4 * Copyright (c) 2010-2011 by Samsung Electronics.
17 * Various devices on embedded systems have no scatter-getter and/or
30 * inaccessible to page system even if device drivers don't use it.
40 #include <asm/page.h>
45 #include <linux/dma-map-ops.h>
58 * Default global CMA area size can be defined in kernel's .config.
61 * The size can be set in bytes or as a percentage of the total memory
64 * Users, who want to set the size of global CMA area for their system
69 static phys_addr_t size_cmdline __initdata = -1;
[all …]
/linux/include/net/page_pool/
H A Dhelpers.h1 /* SPDX-License-Identifier: GPL-2.0
11 * The page_pool allocator is optimized for recycling page or page fragment used
15 * which allocate memory with or without page splitting depending on the
16 * requested memory size.
19 * always smaller than half a page, it can use one of the more specific API
22 * 1. page_pool_alloc_pages(): allocate memory without page splitting when
23 * driver knows that the memory it need is always bigger than half of the page
24 * allocated from page pool. There is no cache line dirtying for 'struct page'
25 * when a page is recycled back to the page pool.
27 * 2. page_pool_alloc_frag(): allocate memory with page splitting when driver
[all …]
/linux/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
H A Dvmm.c32 kvfree(pgt->pde); in nvkm_vmm_pt_del()
41 const struct nvkm_vmm_page *page) in nvkm_vmm_pt_new() argument
43 const u32 pten = 1 << desc->bits; in nvkm_vmm_pt_new()
47 if (desc->type > PGT) { in nvkm_vmm_pt_new()
48 if (desc->type == SPT) { in nvkm_vmm_pt_new()
49 const struct nvkm_vmm_desc *pair = page[-1].desc; in nvkm_vmm_pt_new()
50 lpte = pten >> (desc->bits - pair->bits); in nvkm_vmm_pt_new()
58 pgt->page = page ? page->shift : 0; in nvkm_vmm_pt_new()
59 pgt->sparse = sparse; in nvkm_vmm_pt_new()
61 if (desc->type == PGD) { in nvkm_vmm_pt_new()
[all …]
H A Duvmm.c42 return nvkm_vmm_ref(nvkm_uvmm(object)->vmm); in nvkm_uvmm_search()
51 struct nvkm_vmm *vmm = uvmm->vmm; in nvkm_uvmm_mthd_pfnclr()
52 int ret = -ENOSYS; in nvkm_uvmm_mthd_pfnclr()
53 u64 addr, size; in nvkm_uvmm_mthd_pfnclr() local
55 if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) { in nvkm_uvmm_mthd_pfnclr()
56 addr = args->v0.addr; in nvkm_uvmm_mthd_pfnclr()
57 size = args->v0.size; in nvkm_uvmm_mthd_pfnclr()
61 if (nvkm_vmm_in_managed_range(vmm, addr, size) && vmm->managed.raw) in nvkm_uvmm_mthd_pfnclr()
62 return -EINVAL; in nvkm_uvmm_mthd_pfnclr()
64 if (size) { in nvkm_uvmm_mthd_pfnclr()
[all …]
/linux/sound/pci/emu10k1/
H A Dmemory.c1 // SPDX-License-Identifier: GPL-2.0-or-later
6 * EMU10K1 memory page allocation (PTB area)
18 /* page arguments of these two macros are Emu page (4096 bytes), not like
21 #define __set_ptb_entry(emu,page,addr) \ argument
22 (((__le32 *)(emu)->ptb_pages.area)[page] = \
23 cpu_to_le32(((addr) << (emu->address_mode)) | (page)))
24 #define __get_ptb_entry(emu, page) \ argument
25 (le32_to_cpu(((__le32 *)(emu)->ptb_pages.area)[page]))
30 /* get aligned page from offset address */
32 /* get offset address from aligned page */
[all …]
/linux/mm/
H A Dzsmalloc.c1 // SPDX-License-Identifier: GPL-2.0-or-later
12 * Released under the terms of 3-clause BSD License
18 * struct page(s) to form a zspage.
20 * Usage of struct page fields:
21 * page->privat
192 int size; global() member
376 zs_zpool_malloc(void * pool,size_t size,gfp_t gfp,unsigned long * handle) zs_zpool_malloc() argument
441 is_first_page(struct page * page) is_first_page() argument
468 get_first_obj_offset(struct page * page) get_first_obj_offset() argument
474 set_first_obj_offset(struct page * page,unsigned int offset) set_first_obj_offset() argument
507 get_size_class_index(int size) get_size_class_index() argument
734 get_zspage(struct page * page) get_zspage() argument
742 get_next_page(struct page * page) get_next_page() argument
758 obj_to_location(unsigned long obj,struct page ** page,unsigned int * obj_idx) obj_to_location() argument
765 obj_to_page(unsigned long obj,struct page ** page) obj_to_page() argument
775 location_to_obj(struct page * page,unsigned int obj_idx) location_to_obj() argument
790 obj_allocated(struct page * page,void * obj,unsigned long * phandle) obj_allocated() argument
810 reset_page(struct page * page) reset_page() argument
843 struct page *page, *next; __free_zspage() local
892 struct page *page = get_first_page(zspage); init_zspage() local
936 struct page *page; create_page_chain() local
983 struct page *page; alloc_zspage() local
1045 __zs_map_object(struct mapping_area * area,struct page * pages[2],int off,int size) __zs_map_object() argument
1073 __zs_unmap_object(struct mapping_area * area,struct page * pages[2],int off,int size) __zs_unmap_object() argument
1152 zs_lookup_class_index(struct zs_pool * pool,unsigned int size) zs_lookup_class_index() argument
1187 struct page *page; zs_map_object() local
1248 struct page *page; zs_unmap_object() local
1351 zs_malloc(struct zs_pool * pool,size_t size,gfp_t gfp) zs_malloc() argument
1478 int s_size, d_size, size; zs_object_copy() local
1546 find_alloced_obj(struct size_class * class,struct page * page,int * obj_idx) find_alloced_obj() argument
1664 struct page *curr_page, *page; lock_zspage() local
1733 struct page *page; replace_sub_page() local
1753 zs_page_isolate(struct page * page,isolate_mode_t mode) zs_page_isolate() argument
1764 zs_page_migrate(struct page * newpage,struct page * page,enum migrate_mode mode) zs_page_migrate() argument
1844 zs_page_putback(struct page * page) zs_page_putback() argument
1908 struct page *page = get_first_page(zspage); SetZsPageMovable() local
2149 int size; zs_create_pool() local
[all...]
H A Ddmapool.c1 // SPDX-License-Identifier: GPL-2.0-only
9 * This allocator returns small blocks of a given size which are DMA-able by
10 * the given device. It uses the dma_alloc_coherent page allocator to get
11 * new pages, then splits them up into blocks of the required size.
15 * represented by the 'struct dma_pool' which keeps a doubly-linked list of
16 * allocated pages. Each page in the page_list is split into blocks of at
17 * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked
19 * keep a count of how many are currently allocated from each page.
23 #include <linux/dma-mapping.h>
56 unsigned int size; member
[all …]
H A Dzbud.c1 // SPDX-License-Identifier: GPL-2.0-only
12 * page.
19 * single memory page called a "zbud page". The first buddy is "left
20 * justified" at the beginning of the zbud page, and the last buddy is "right
21 * justified" at the end of the zbud page. The benefit is that if either
24 * within the zbud page
127 size_to_chunks(size_t size) size_to_chunks() argument
136 init_zbud_page(struct page * page) init_zbud_page() argument
246 zbud_alloc(struct zbud_pool * pool,size_t size,gfp_t gfp,unsigned long * handle) zbud_alloc() argument
252 struct page *page; zbud_alloc() local
393 zbud_zpool_malloc(void * pool,size_t size,gfp_t gfp,unsigned long * handle) zbud_zpool_malloc() argument
[all...]
/linux/drivers/vdpa/vdpa_user/
H A Diova_domain.c1 // SPDX-License-Identifier: GPL-2.0-only
3 * MMU-based software IOTLB.
5 * Copyright (C) 2020-2021 Bytedance Inc. and/or its affiliates. All rights reserved.
30 return -ENOMEM; in vduse_iotlb_add_range()
32 map_file->file = get_file(file); in vduse_iotlb_add_range()
33 map_file->offset = offset; in vduse_iotlb_add_range()
35 ret = vhost_iotlb_add_range_ctx(domain->iotlb, start, last, in vduse_iotlb_add_range()
38 fput(map_file->file); in vduse_iotlb_add_range()
51 while ((map = vhost_iotlb_itree_first(domain->iotlb, start, last))) { in vduse_iotlb_del_range()
52 map_file = (struct vdpa_map_file *)map->opaque; in vduse_iotlb_del_range()
[all …]
/linux/drivers/misc/
H A Dvmw_balloon.c1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2000-2018, VMware, Inc. All Rights Reserved.
46 …"Enable non-cooperative out-of-memory protection. Disabled by default as it may degrade performanc…
54 /* Magic number for the balloon mount-point */
80 #define VMW_BALLOON_2M_ORDER (PMD_SHIFT - PAGE_SHIFT)
83 * 64-bit targets are only supported in 64-bit
118 * enum vmballoon_cmd_type - backdoor commands.
140 * @VMW_BALLOON_CMD_GET_TARGET: Gets the balloon target size.
141 * @VMW_BALLOON_CMD_LOCK: Informs the hypervisor about a ballooned page.
142 * @VMW_BALLOON_CMD_UNLOCK: Informs the hypervisor about a page that is about
[all …]
/linux/arch/powerpc/include/asm/nohash/32/
H A Dmmu-8xx.h1 /* SPDX-License-Identifier: GPL-2.0 */
10 * During software tablewalk, the registers used perform mask/shift-add
34 * Then we use the APG to say whether accesses are according to Page rules or
39 * 0 => Kernel => 11 (all accesses performed according as user iaw page definition)
40 * 1 => Kernel+Accessed => 01 (all accesses performed according to page definition)
41 * 2 => User => 11 (all accesses performed according as user iaw page definition)
42 * 3 => User+Accessed => 10 (all accesses performed according to swaped page definition) for KUEP
43 * 4-15 => Not Used
47 /* The effective page number register. When read, contains the information
52 #define MI_EPNMASK 0xfffff000 /* Effective page number for entry */
[all …]
/linux/include/linux/
H A Ddma-map-ops.h1 /* SPDX-License-Identifier: GPL-2.0 */
9 #include <linux/dma-mapping.h>
17 void *(*alloc)(struct device *dev, size_t size,
20 void (*free)(struct device *dev, size_t size, void *vaddr,
22 struct page *(*alloc_pages_op)(struct device *dev, size_t size,
25 void (*free_pages)(struct device *dev, size_t size, struct page *vaddr,
31 void *cpu_addr, dma_addr_t dma_addr, size_t size,
34 dma_addr_t (*map_page)(struct device *dev, struct page *page,
35 unsigned long offset, size_t size,
38 size_t size, enum dma_data_direction dir,
[all …]
/linux/arch/powerpc/mm/
H A Ddma-noncoherent.c1 // SPDX-License-Identifier: GPL-2.0-only
13 #include <linux/dma-direct.h>
14 #include <linux/dma-map-ops.h>
22 static void __dma_sync(void *vaddr, size_t size, int direction) in __dma_sync() argument
25 unsigned long end = start + size; in __dma_sync()
32 * invalidate only when cache-line aligned otherwise there is in __dma_sync()
35 if ((start | end) & (L1_CACHE_BYTES - 1)) in __dma_sync()
52 * In this case, each page of a buffer must be kmapped/kunmapped
57 * beyond the first page.
59 static inline void __dma_sync_page_highmem(struct page *page, in __dma_sync_page_highmem() argument
[all …]
/linux/fs/hfsplus/
H A Dbitmap.c1 // SPDX-License-Identifier: GPL-2.0
19 int hfsplus_block_allocate(struct super_block *sb, u32 size, in hfsplus_block_allocate() argument
23 struct page *page; in hfsplus_block_allocate() local
32 return size; in hfsplus_block_allocate()
34 hfs_dbg(BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len); in hfsplus_block_allocate()
35 mutex_lock(&sbi->alloc_mutex); in hfsplus_block_allocate()
36 mapping = sbi->alloc_file->i_mapping; in hfsplus_block_allocate()
37 page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL); in hfsplus_block_allocate()
38 if (IS_ERR(page)) { in hfsplus_block_allocate()
39 start = size; in hfsplus_block_allocate()
[all …]
/linux/lib/
H A Diov_iter.c1 // SPDX-License-Identifier: GPL-2.0-only
4 #include <linux/fault-inject-usercopy.h>
78 * fault_in_iov_iter_readable - fault in iov iterator for reading
80 * @size: maximum length
83 * @size. For each iovec, fault in each page that constitutes the iovec.
88 * Always returns 0 for non-userspace iterators.
90 size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size) in fault_in_iov_iter_readable() argument
93 size_t n = min(size, iov_iter_count(i)); in fault_in_iov_iter_readable()
94 n -= fault_in_readable(i->ubuf + i->iov_offset, n); in fault_in_iov_iter_readable()
95 return size - n; in fault_in_iov_iter_readable()
[all …]
/linux/sound/pci/trident/
H A Dtrident_memory.c1 // SPDX-License-Identifier: GPL-2.0-or-later
7 * Trident 4DWave-NX memory page allocation (TLB area)
19 /* page arguments of these two macros are Trident page (4096 bytes), not like
22 #define __set_tlb_bus(trident,page,addr) \ argument
23 (trident)->tlb.entries[page] = cpu_to_le32((addr) & ~(SNDRV_TRIDENT_PAGE_SIZE-1))
24 #define __tlb_to_addr(trident,page) \ argument
25 (dma_addr_t)le32_to_cpu((trident->tlb.entries[page]) & ~(SNDRV_TRIDENT_PAGE_SIZE - 1))
28 /* page size == SNDRV_TRIDENT_PAGE_SIZE */
29 #define ALIGN_PAGE_SIZE PAGE_SIZE /* minimum page size for allocation */
31 /* fill TLB entrie(s) corresponding to page with ptr */
[all …]
/linux/kernel/module/
H A Ddecompress.c1 // SPDX-License-Identifier: GPL-2.0-or-later
19 struct page **new_pages; in module_extend_max_pages()
21 new_pages = kvmalloc_array(info->max_pages + extent, in module_extend_max_pages()
22 sizeof(info->pages), GFP_KERNEL); in module_extend_max_pages()
24 return -ENOMEM; in module_extend_max_pages()
26 memcpy(new_pages, info->pages, info->max_pages * sizeof(info->pages)); in module_extend_max_pages()
27 kvfree(info->pages); in module_extend_max_pages()
28 info->pages = new_pages; in module_extend_max_pages()
29 info->max_pages += extent; in module_extend_max_pages()
34 static struct page *module_get_next_page(struct load_info *info) in module_get_next_page()
[all …]
/linux/mm/kmsan/
H A Dshadow.c1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2017-2022 Google LLC
22 #define shadow_page_for(page) ((page)->kmsan_shadow) argument
24 #define origin_page_for(page) ((page)->kmsan_origin) argument
26 static void *shadow_ptr_for(struct page *page) in shadow_ptr_for() argument
28 return page_address(shadow_page_for(page)); in shadow_ptr_for()
31 static void *origin_ptr_for(struct page *page) in origin_ptr_for() argument
33 return page_address(origin_page_for(page)); in origin_ptr_for()
36 static bool page_has_metadata(struct page *page) in page_has_metadata() argument
38 return shadow_page_for(page) && origin_page_for(page); in page_has_metadata()
[all …]
/linux/drivers/gpu/drm/imagination/
H A Dpvr_mmu.c1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
17 #include <linux/dma-mapping.h>
23 #define PVR_MASK_FROM_SIZE(size_) (~((size_) - U64_C(1)))
26 * The value of the device page size (%PVR_DEVICE_PAGE_SIZE) is currently
27 * pegged to the host page size (%PAGE_SIZE). This chunk of macro goodness both
28 * ensures that the selected host page size corresponds to a valid device page
29 * size and sets up values needed by the MMU code below.
56 # error Unsupported device page size PVR_DEVICE_PAGE_SIZE
61 (PVR_DEVICE_PAGE_SHIFT - PVR_SHIFT_FROM_SIZE(SZ_4K)))
64 PVR_MMU_SYNC_LEVEL_NONE = -1,
[all …]
/linux/tools/testing/selftests/mm/
H A Dcow.c1 // SPDX-License-Identifier: GPL-2.0-only
44 static int sz2ord(size_t size) in sz2ord() argument
46 return __builtin_ctzll(size / pagesize); in sz2ord()
69 ksft_print_msg("[INFO] detected THP size: %zu KiB\n", kb); in detect_thp_sizes()
100 static bool range_is_swapped(void *addr, size_t size) in range_is_swapped() argument
102 for (; size; addr += pagesize, size in range_is_swapped()
134 child_memcmp_fn(char * mem,size_t size,struct comm_pipes * comm_pipes) child_memcmp_fn() argument
152 child_vmsplice_memcmp_fn(char * mem,size_t size,struct comm_pipes * comm_pipes) child_vmsplice_memcmp_fn() argument
201 do_test_cow_in_parent(char * mem,size_t size,bool do_mprotect,child_fn fn) do_test_cow_in_parent() argument
255 test_cow_in_parent(char * mem,size_t size) test_cow_in_parent() argument
260 test_cow_in_parent_mprotect(char * mem,size_t size) test_cow_in_parent_mprotect() argument
265 test_vmsplice_in_child(char * mem,size_t size) test_vmsplice_in_child() argument
270 test_vmsplice_in_child_mprotect(char * mem,size_t size) test_vmsplice_in_child_mprotect() argument
275 do_test_vmsplice_in_parent(char * mem,size_t size,bool before_fork) do_test_vmsplice_in_parent() argument
370 test_vmsplice_before_fork(char * mem,size_t size) test_vmsplice_before_fork() argument
375 test_vmsplice_after_fork(char * mem,size_t size) test_vmsplice_after_fork() argument
381 do_test_iouring(char * mem,size_t size,bool use_fork) do_test_iouring() argument
532 test_iouring_ro(char * mem,size_t size) test_iouring_ro() argument
537 test_iouring_fork(char * mem,size_t size) test_iouring_fork() argument
551 do_test_ro_pin(char * mem,size_t size,enum ro_pin_test test,bool fast) do_test_ro_pin() argument
681 test_ro_pin_on_shared(char * mem,size_t size) test_ro_pin_on_shared() argument
686 test_ro_fast_pin_on_shared(char * mem,size_t size) test_ro_fast_pin_on_shared() argument
691 test_ro_pin_on_ro_previously_shared(char * mem,size_t size) test_ro_pin_on_ro_previously_shared() argument
696 test_ro_fast_pin_on_ro_previously_shared(char * mem,size_t size) test_ro_fast_pin_on_ro_previously_shared() argument
701 test_ro_pin_on_ro_exclusive(char * mem,size_t size) test_ro_pin_on_ro_exclusive() argument
706 test_ro_fast_pin_on_ro_exclusive(char * mem,size_t size) test_ro_fast_pin_on_ro_exclusive() argument
774 size_t size, mmap_size, mremap_size; do_run_with_thp() local
914 run_with_thp(test_fn fn,const char * desc,size_t size) run_with_thp() argument
921 run_with_thp_swap(test_fn fn,const char * desc,size_t size) run_with_thp_swap() argument
928 run_with_pte_mapped_thp(test_fn fn,const char * desc,size_t size) run_with_pte_mapped_thp() argument
935 run_with_pte_mapped_thp_swap(test_fn fn,const char * desc,size_t size) run_with_pte_mapped_thp_swap() argument
942 run_with_single_pte_of_thp(test_fn fn,const char * desc,size_t size) run_with_single_pte_of_thp() argument
949 run_with_single_pte_of_thp_swap(test_fn fn,const char * desc,size_t size) run_with_single_pte_of_thp_swap() argument
956 run_with_partial_mremap_thp(test_fn fn,const char * desc,size_t size) run_with_partial_mremap_thp() argument
963 run_with_partial_shared_thp(test_fn fn,const char * desc,size_t size) run_with_partial_shared_thp() argument
1139 size_t size = thpsizes[i]; run_anon_test_case() local
1192 do_test_anon_thp_collapse(char * mem,size_t size,enum anon_thp_collapse_test test) do_test_anon_thp_collapse() argument
1325 test_anon_thp_collapse_unshared(char * mem,size_t size) test_anon_thp_collapse_unshared() argument
1330 test_anon_thp_collapse_fully_shared(char * mem,size_t size) test_anon_thp_collapse_fully_shared() argument
1335 test_anon_thp_collapse_lower_shared(char * mem,size_t size) test_anon_thp_collapse_lower_shared() argument
1340 test_anon_thp_collapse_upper_shared(char * mem,size_t size) test_anon_thp_collapse_upper_shared() argument
1409 test_cow(char * mem,const char * smem,size_t size) test_cow() argument
1425 test_ro_pin(char * mem,const char * smem,size_t size) test_ro_pin() argument
1430 test_ro_fast_pin(char * mem,const char * smem,size_t size) test_ro_fast_pin() argument
[all...]
/linux/drivers/nvmem/
H A Drave-sp-eeprom.c1 // SPDX-License-Identifier: GPL-2.0+
10 #include <linux/mfd/rave-sp.h>
12 #include <linux/nvmem-provider.h>
18 * enum rave_sp_eeprom_access_type - Supported types of EEPROM access
29 * enum rave_sp_eeprom_header_size - EEPROM command header sizes
31 * @RAVE_SP_EEPROM_HEADER_SMALL: EEPROM header size for "small" devices (< 8K)
32 * @RAVE_SP_EEPROM_HEADER_BIG: EEPROM header size for "big" devices (> 8K)
43 * struct rave_sp_eeprom_page - RAVE SP EEPROM page
59 * struct rave_sp_eeprom - RAVE SP EEPROM device
64 * @header_size: Size of EEPROM command header for this device
[all …]

12345678910>>...44