/linux/drivers/gpu/drm/tegra/ |
H A D | uapi.c | 1 // SPDX-License-Identifier: GPL-2.0-only 17 struct tegra_drm_mapping *mapping = in tegra_drm_mapping_release() local 20 host1x_bo_unpin(mapping->map); in tegra_drm_mapping_release() 21 host1x_bo_put(mapping->bo); in tegra_drm_mapping_release() 23 kfree(mapping); in tegra_drm_mapping_release() 26 void tegra_drm_mapping_put(struct tegra_drm_mapping *mapping) in tegra_drm_mapping_put() argument 28 kref_put(&mapping->ref, tegra_drm_mapping_release); in tegra_drm_mapping_put() 33 struct tegra_drm_mapping *mapping; in tegra_drm_channel_context_close() local 36 if (context->memory_context) in tegra_drm_channel_context_close() 37 host1x_memory_context_put(context->memory_context); in tegra_drm_channel_context_close() [all …]
|
H A D | submit.c | 1 // SPDX-License-Identifier: GPL-2.0-only 4 #include <linux/dma-fence-array.h> 5 #include <linux/dma-mapping.h> 27 dev_err_ratelimited(context->client->base.dev, \ 29 current->comm, ##__VA_ARGS__) 46 kref_get(&bo->ref); in gather_bo_get() 55 dma_free_attrs(bo->dev, bo->gather_data_words * 4, bo->gather_data, bo->gather_data_dma, in gather_bo_release() 64 kref_put(&bo->ref, gather_bo_release); in gather_bo_put() 76 return ERR_PTR(-ENOMEM); in gather_bo_pin() 78 kref_init(&map->ref); in gather_bo_pin() [all …]
|
/linux/include/linux/ |
H A D | pagemap.h | 1 /* SPDX-License-Identifier: GPL-2.0 */ 21 unsigned long invalidate_mapping_pages(struct address_space *mapping, 26 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || in invalidate_remote_inode() 27 S_ISLNK(inode->i_mode)) in invalidate_remote_inode() 28 invalidate_mapping_pages(inode->i_mapping, 0, -1); in invalidate_remote_inode() 30 int invalidate_inode_pages2(struct address_space *mapping); 31 int invalidate_inode_pages2_range(struct address_space *mapping, 35 int filemap_invalidate_pages(struct address_space *mapping, 41 int filemap_fdatawait_keep_errors(struct address_space *mapping); 43 int filemap_fdatawait_range_keep_errors(struct address_space *mapping, [all …]
|
/linux/drivers/net/wwan/iosm/ |
H A D | iosm_ipc_pcie.h | 1 /* SPDX-License-Identifier: GPL-2.0-only 3 * Copyright (C) 2020-21 Intel Corporation. 35 * enum ipc_pcie_sleep_state - Enum type to different sleep state transitions 45 * struct iosm_pcie - IPC_PCIE struct. 54 * @imem: Pointer to imem data struct 81 * struct ipc_skb_cb - Struct definition of the socket buffer which is mapped to 83 * @mapping: Store physical or IOVA mapped address of skb virtual add. 89 dma_addr_t mapping; member 96 * enum ipc_ul_usr_op - Control operation to execute the right action on 110 * ipc_pcie_addr_map - Maps the kernel's virtual address to either IOVA [all …]
|
/linux/Documentation/filesystems/iomap/ |
H A D | design.rst | 1 .. SPDX-License-Identifier: GPL-2.0 70 of mapping function calls into the filesystem across a larger amount of 71 data. 78 1. Obtain a space mapping via ``->iomap_begin`` 80 2. For each sub-unit of work... 82 1. Revalidate the mapping and go back to (1) above, if necessary. 89 4. Release the mapping via ``->iomap_end``, if necessary 105 ----------- 127 device pre-shutdown hook from returning before other threads have 130 * **filesystem mapping lock**: This synchronization primitive is [all …]
|
/linux/drivers/net/xen-netback/ |
H A D | hash.c | 46 memcpy(new->tag, tag, len); in xenvif_add_hash() 47 new->len = len; in xenvif_add_hash() 48 new->val = val; in xenvif_add_hash() 50 spin_lock_irqsave(&vif->hash.cache.lock, flags); in xenvif_add_hash() 54 list_for_each_entry_rcu(entry, &vif->hash.cache.list, link, in xenvif_add_hash() 55 lockdep_is_held(&vif->hash.cache.lock)) { in xenvif_add_hash() 57 if (entry->len == len && in xenvif_add_hash() 58 memcmp(entry->tag, tag, len) == 0) in xenvif_add_hash() 60 if (!oldest || entry->seq < oldest->seq) in xenvif_add_hash() 65 new->seq = atomic_inc_return(&vif->hash.cache.seq); in xenvif_add_hash() [all …]
|
/linux/Documentation/admin-guide/device-mapper/ |
H A D | dm-zoned.rst | 2 dm-zoned 5 The dm-zoned device mapper target exposes a zoned block device (ZBC and 7 pattern constraints. In effect, it implements a drive-managed zoned 10 host-managed zoned block devices and can mitigate the potential 11 device-side performance degradation due to excessive random writes on 12 host-aware zoned block devices. 21 http://www.t13.org/Documents/UploadedDocuments/docs2015/di537r05-Zoned_Device_ATA_Command_Set_ZAC.p… 23 The dm-zoned implementation is simple and minimizes system overhead (CPU 25 host-managed disk with 256 MB zones, dm-zoned memory usage per disk 29 dm-zoned target devices are formatted and checked using the dmzadm [all …]
|
/linux/Documentation/trace/ |
H A D | ring-buffer-map.rst | 1 .. SPDX-License-Identifier: GPL-2.0 4 Tracefs ring-buffer memory mapping 11 Tracefs ring-buffer memory map provides an efficient method to stream data 12 as no memory copy is necessary. The application mapping the ring-buffer becomes 13 then a consumer for that ring-buffer, in a similar fashion to trace_pipe. 15 Memory mapping setup 17 The mapping works with a mmap() of the trace_pipe_raw interface. 19 The first system page of the mapping contains ring-buffer statistics and 20 description. It is referred to as the meta-page. One of the most important 21 fields of the meta-page is the reader. It contains the sub-buffer ID which can [all …]
|
/linux/drivers/net/ethernet/qlogic/qede/ |
H A D | qede_fp.c | 1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) 3 * Copyright (c) 2015-2017 QLogic Corporation 4 * Copyright (c) 2019-2020 Marvell International Ltd. 31 dma_addr_t mapping; in qede_alloc_rx_buffer() local 32 struct page *data; in qede_alloc_rx_buffer() local 34 /* In case lazy-allocation is allowed, postpone allocation until the in qede_alloc_rx_buffer() 38 if (allow_lazy && likely(rxq->filled_buffers > 12)) { in qede_alloc_rx_buffer() 39 rxq->filled_buffers--; in qede_alloc_rx_buffer() 43 data = alloc_pages(GFP_ATOMIC, 0); in qede_alloc_rx_buffer() 44 if (unlikely(!data)) in qede_alloc_rx_buffer() [all …]
|
/linux/arch/arm/mm/ |
H A D | flush.c | 1 // SPDX-License-Identifier: GPL-2.0-only 5 * Copyright (C) 1995-2002 Russell King 48 : "r" (to), "r" (to + PAGE_SIZE - 1), "r" (zero) in flush_pfn_alias() 55 unsigned long offset = vaddr & (PAGE_SIZE - 1); in flush_icache_alias() 94 if (vma->vm_flags & VM_EXEC) in flush_cache_range() 110 if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged()) in flush_cache_pages() 145 /* VIPT non-aliasing D-cache */ in __flush_ptrace_access() 163 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) in flush_ptrace_access() 165 if (vma->vm_flags & VM_EXEC) in flush_ptrace_access() 179 * Copy user data from/to a page which is mapped into a different [all …]
|
/linux/mm/ |
H A D | filemap.c | 1 // SPDX-License-Identifier: GPL-2.0-only 5 * Copyright (C) 1994-1999 Linus Torvalds 30 #include <linux/error-injection.h> 33 #include <linux/backing-dev.h> 73 * finished 'unifying' the page and buffer cache and SMP-threaded the 74 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com> 76 * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de> 82 * ->i_mmap_rwsem (truncate_pagecache) 83 * ->private_lock (__free_pte->block_dirty_folio) 84 * ->swap_lock (exclusive_swap_page, others) [all …]
|
/linux/fs/nfs/ |
H A D | write.c | 1 // SPDX-License-Identifier: GPL-2.0-only 5 * Write file data over NFS. 23 #include <linux/backing-dev.h> 48 void (*complete)(void *data); 49 void *data; member 83 INIT_LIST_HEAD(&p->pages); in nfs_commitdata_alloc() 105 p->rw_mode = FMODE_WRITE; in nfs_writehdr_alloc() 120 void (*complete)(void *), void *data) in nfs_io_completion_init() argument 122 ioc->complete = complete; in nfs_io_completion_init() 123 ioc->data = data; in nfs_io_completion_init() [all …]
|
/linux/fs/ecryptfs/ |
H A D | mmap.c | 1 // SPDX-License-Identifier: GPL-2.0-or-later 5 * decryption of the file data as it passes between the lower 8 * Copyright (C) 1997-2003 Erez Zadok 9 * Copyright (C) 2001-2003 Stony Brook University 10 * Copyright (C) 2004-2007 International Business Machines Corp. 16 #include <linux/page-flags.h> 26 * This is where we encrypt the data and pass the encrypted data to 27 * the lower filesystem. In OpenPGP-compatible mode, we operate on 30 static int ecryptfs_writepages(struct address_space *mapping, in ecryptfs_writepages() argument 36 while ((folio = writeback_iter(mapping, wbc, folio, &error))) { in ecryptfs_writepages() [all …]
|
/linux/net/core/ |
H A D | ieee8021q_helpers.c | 1 // SPDX-License-Identifier: GPL-2.0 17 * IEEE 802.1Q-2022 in Annex I "I.3 Traffic type to traffic class mapping" and 18 * Table I-1 "Traffic type to traffic class mapping". 87 * ieee8021q_tt_to_tc - Map IEEE 802.1Q Traffic Type to Traffic Class 92 * on the number of queues configured on the NIC. The mapping is based on the 93 * example provided by IEEE 802.1Q-2022 in Annex I "I.3 Traffic type to traffic 94 * class mapping" and Table I-1 "Traffic type to traffic class mapping". 104 return -EINVAL; in ieee8021q_tt_to_tc() 136 return -EINVAL; in ieee8021q_tt_to_tc() 141 * ietf_dscp_to_ieee8021q_tt - Map IETF DSCP to IEEE 802.1Q Traffic Type [all …]
|
/linux/net/netfilter/ |
H A D | nft_set_pipapo.h | 1 // SPDX-License-Identifier: GPL-2.0-only 8 /* Count of concatenated fields depends on count of 32-bit nftables registers */ 25 #define NFT_PIPAPO_GROUPS_PER_BYTE(f) (BITS_PER_BYTE / (f)->bb) 32 * crossing page boundaries on most architectures (x86-64 and MIPS huge pages, 34 * keeps performance nice in case kvmalloc() gives us non-contiguous areas. 39 #define NFT_PIPAPO_LT_SIZE_LOW NFT_PIPAPO_LT_SIZE_THRESHOLD - \ 44 (round_up((f)->group 191 pipapo_and_field_buckets_4bit(const struct nft_pipapo_field * f,unsigned long * dst,const u8 * data) pipapo_and_field_buckets_4bit() argument 219 pipapo_and_field_buckets_8bit(const struct nft_pipapo_field * f,unsigned long * dst,const u8 * data) pipapo_and_field_buckets_8bit() argument [all...] |
/linux/fs/xfs/ |
H A D | xfs_aops.c | 1 // SPDX-License-Identifier: GPL-2.0 3 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 4 * Copyright (c) 2016-2025 Christoph Hellwig. 39 * Fast and loose check if this write could update the on-disk inode size. 43 return ioend->io_offset + ioend->io_size > in xfs_ioend_is_append() 44 XFS_I(ioend->io_inode)->i_disk_size; in xfs_ioend_is_append() 48 * Update on-disk file size now that data has been written to disk. 56 struct xfs_mount *mp = ip->i_mount; in xfs_setfilesize() 61 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp); in xfs_setfilesize() 75 ip->i_disk_size = isize; in xfs_setfilesize() [all …]
|
H A D | xfs_reflink.c | 1 // SPDX-License-Identifier: GPL-2.0+ 44 * through the use of a copy-on-write mechanism. At a high level, that 46 * block, write the data to the new block, and if that succeeds we map the 50 * of disk blocks to dirty-but-not-yet-mapped file blocks as long as 56 * create a delalloc mapping, which is a regular in-core extent, but without 58 * a flag that this is a delalloc mapping, and a worst-case estimate of how 59 * many blocks might be required to put the mapping into the BMBT.) delalloc 68 * D: --RRRRRRSSSRRRRRRRR--- (data fork) 69 * C: ------DDDDDDD--------- (CoW fork) 73 * allocating blocks and replacing the delalloc mapping with real ones. [all …]
|
/linux/drivers/spi/ |
H A D | spi-mem.c | 1 // SPDX-License-Identifier: GPL-2.0+ 12 #include <linux/spi/spi-mem.h> 20 * spi_controller_dma_map_mem_op_data() - DMA-map the buffer attached to a 24 * @sgt: a pointer to a non-initialized sg_table that will be filled by this 27 * Some controllers might want to do DMA on the data buffer embedded in @op. 28 * This helper prepares everything for you and provides a ready-to-use 32 * op->data.buf.{in,out} is DMA-able before calling this function. 42 if (!op->data.nbytes) in spi_controller_dma_map_mem_op_data() 43 return -EINVAL; in spi_controller_dma_map_mem_op_data() 45 if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx) in spi_controller_dma_map_mem_op_data() [all …]
|
/linux/arch/mips/include/asm/ |
H A D | vdso.h | 1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 16 * struct mips_vdso_image - Details of a VDSO image. 17 * @data: Pointer to VDSO image data (page-aligned). 18 * @size: Size of the VDSO image data (page-aligned). 21 * @mapping: Special mapping structure. 23 * This structure contains details of a VDSO image, including the image data 25 * part of the VDSO build process, aside from the mapping page array, which is 29 void *data; member 35 struct vm_special_mapping mapping; member 39 * The following structures are auto-generated as part of the build for each
|
/linux/include/linux/pinctrl/ |
H A D | machine.h | 1 /* SPDX-License-Identifier: GPL-2.0-only */ 5 * Copyright (C) 2011 ST-Ericsson SA 6 * Written on behalf of Linaro for ST-Ericsson 16 #include <linux/pinctrl/pinctrl-state.h> 27 * struct pinctrl_map_mux - mapping table content for MAP_TYPE_MUX_GROUP 39 * struct pinctrl_map_configs - mapping table content for MAP_TYPE_CONFIGS_* 54 * struct pinctrl_map - boards/machines shall provide this map for devices 55 * @dev_name: the name of the device using this specific mapping, the name 61 * @type: the type of mapping table entry 62 * @ctrl_dev_name: the name of the device controlling this specific mapping, [all …]
|
/linux/fs/netfs/ |
H A D | buffered_write.c | 1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Network filesystem high-level buffered write support. 38 static struct folio *netfs_grab_folio_for_write(struct address_space *mapping, in netfs_grab_folio_for_write() argument 44 if (mapping_large_folio_support(mapping)) in netfs_grab_folio_for_write() 47 return __filemap_get_folio(mapping, index, fgp_flags, in netfs_grab_folio_for_write() 48 mapping_gfp_mask(mapping)); in netfs_grab_folio_for_write() 53 * data written into the pagecache until we can find out from the server what 66 if (ctx->ops->update_i_size) { in netfs_update_i_size() 67 ctx->ops->update_i_size(inode, end); in netfs_update_i_size() 71 spin_lock(&inode->i_lock); in netfs_update_i_size() [all …]
|
/linux/drivers/iommu/ |
H A D | ipmmu-vmsa.c | 1 // SPDX-License-Identifier: GPL-2.0 3 * IOMMU API for Renesas VMSA-compatible IPMMU 6 * Copyright (C) 2014-2020 Renesas Electronics Corporation 11 #include <linux/dma-mapping.h> 18 #include <linux/io-pgtable.h> 29 #include <asm/dma-iommu.h> 32 #define arm_iommu_attach_device(...) -ENODEV 37 #define IPMMU_CTX_INVALID -1 67 struct dma_iommu_mapping *mapping; member 93 /* ----------------------------------------------------------------------------- [all …]
|
/linux/drivers/platform/x86/intel/ |
H A D | tpmi_power_domains.c | 1 // SPDX-License-Identifier: GPL-2.0-only 3 * Mapping of TPMI power domains CPU mapping 23 #include <asm/intel-family.h> 48 * struct tpmi_cpu_info - Mapping information for a CPU 49 * @hnode: Used to add mapping information to hash list 56 * Structure to store mapping information for a Linux CPU 97 return info->pkg_id < topology_max_packages() && in tpmi_domain_is_valid() 98 info->punit_domain_id < MAX_POWER_DOMAINS; in tpmi_domain_is_valid() 104 int ret = -EINVAL; in tpmi_get_linux_cpu_number() 108 if (info->punit_domain_id == domain_id && info->pkg_id == package_id) { in tpmi_get_linux_cpu_number() [all …]
|
/linux/Documentation/devicetree/bindings/display/ |
H A D | lvds.yaml | 1 # SPDX-License-Identifier: GPL-2.0 3 --- 5 $schema: http://devicetree.org/meta-schemas/core.yaml# 10 - $ref: lvds-data-mapping.yaml# 13 - Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com> 14 - Thierry Reding <thierry.reding@gmail.com> 17 This binding extends the data mapping defined in lvds-data-mapping.yaml. 19 to accommodate for even more specialized data formats, since a variety of 20 data formats and layouts is used to drive LVDS displays. 23 data-mirror: [all …]
|
/linux/fs/f2fs/ |
H A D | data.c | 1 // SPDX-License-Identifier: GPL-2.0 3 * fs/f2fs/data.c 16 #include <linux/blk-crypto.h> 52 struct address_space *mapping = folio->mapping; in f2fs_is_cp_guaranteed() local 59 inode = mapping->host; in f2fs_is_cp_guaranteed() 62 if (inode->i_ino == F2FS_META_INO(sbi) || in f2fs_is_cp_guaranteed() 63 inode->i_ino == F2FS_NODE_INO(sbi) || in f2fs_is_cp_guaranteed() 64 S_ISDIR(inode->i_mode)) in f2fs_is_cp_guaranteed() 67 if ((S_ISREG(inode->i_mode) && IS_NOQUOTA(inode)) || in f2fs_is_cp_guaranteed() 75 struct address_space *mapping = folio->mapping; in __read_io_type() local [all …]
|