/linux/drivers/gpu/drm/tegra/ |
H A D | uapi.c | 1 // SPDX-License-Identifier: GPL-2.0-only 17 struct tegra_drm_mapping *mapping = in tegra_drm_mapping_release() local 20 host1x_bo_unpin(mapping->map); in tegra_drm_mapping_release() 21 host1x_bo_put(mapping->bo); in tegra_drm_mapping_release() 23 kfree(mapping); in tegra_drm_mapping_release() 26 void tegra_drm_mapping_put(struct tegra_drm_mapping *mapping) in tegra_drm_mapping_put() argument 28 kref_put(&mapping->ref, tegra_drm_mapping_release); in tegra_drm_mapping_put() 33 struct tegra_drm_mapping *mapping; in tegra_drm_channel_context_close() local 36 if (context->memory_context) in tegra_drm_channel_context_close() 37 host1x_memory_context_put(context->memory_context); in tegra_drm_channel_context_close() [all …]
|
H A D | submit.c | 1 // SPDX-License-Identifier: GPL-2.0-only 4 #include <linux/dma-fence-array.h> 5 #include <linux/dma-mapping.h> 27 dev_err_ratelimited(context->client->base.dev, \ 29 current->comm, ##__VA_ARGS__) 46 kref_get(&bo->ref); in gather_bo_get() 55 dma_free_attrs(bo->dev, bo->gather_data_words * 4, bo->gather_data, bo->gather_data_dma, in gather_bo_release() 64 kref_put(&bo->ref, gather_bo_release); in gather_bo_put() 76 return ERR_PTR(-ENOMEM); in gather_bo_pin() 78 kref_init(&map->ref); in gather_bo_pin() [all …]
|
/linux/Documentation/filesystems/iomap/ |
H A D | design.rst | 1 .. SPDX-License-Identifier: GPL-2.0 70 of mapping function calls into the filesystem across a larger amount of 71 data. 78 1. Obtain a space mapping via ``->iomap_begin`` 80 2. For each sub-unit of work... 82 1. Revalidate the mapping and go back to (1) above, if necessary. 89 4. Release the mapping via ``->iomap_end``, if necessary 105 ----------- 127 device pre-shutdown hook from returning before other threads have 130 * **filesystem mapping lock**: This synchronization primitive is [all …]
|
/linux/drivers/net/wwan/iosm/ |
H A D | iosm_ipc_pcie.h | 1 /* SPDX-License-Identifier: GPL-2.0-only 3 * Copyright (C) 2020-21 Intel Corporation. 35 * enum ipc_pcie_sleep_state - Enum type to different sleep state transitions 45 * struct iosm_pcie - IPC_PCIE struct. 54 * @imem: Pointer to imem data struct 81 * struct ipc_skb_cb - Struct definition of the socket buffer which is mapped to 83 * @mapping: Store physical or IOVA mapped address of skb virtual add. 89 dma_addr_t mapping; member 96 * enum ipc_ul_usr_op - Control operation to execute the right action on 110 * ipc_pcie_addr_map - Maps the kernel's virtual address to either IOVA [all …]
|
/linux/drivers/net/xen-netback/ |
H A D | hash.c | 46 memcpy(new->tag, tag, len); in xenvif_add_hash() 47 new->len = len; in xenvif_add_hash() 48 new->val = val; in xenvif_add_hash() 50 spin_lock_irqsave(&vif->hash.cache.lock, flags); in xenvif_add_hash() 54 list_for_each_entry_rcu(entry, &vif->hash.cache.list, link, in xenvif_add_hash() 55 lockdep_is_held(&vif->hash.cache.lock)) { in xenvif_add_hash() 57 if (entry->len == len && in xenvif_add_hash() 58 memcmp(entry->tag, tag, len) == 0) in xenvif_add_hash() 60 if (!oldest || entry->seq < oldest->seq) in xenvif_add_hash() 65 new->seq = atomic_inc_return(&vif->hash.cache.seq); in xenvif_add_hash() [all …]
|
/linux/Documentation/admin-guide/device-mapper/ |
H A D | dm-zoned.rst | 2 dm-zoned 5 The dm-zoned device mapper target exposes a zoned block device (ZBC and 7 pattern constraints. In effect, it implements a drive-managed zoned 10 host-managed zoned block devices and can mitigate the potential 11 device-side performance degradation due to excessive random writes on 12 host-aware zoned block devices. 21 http://www.t13.org/Documents/UploadedDocuments/docs2015/di537r05-Zoned_Device_ATA_Command_Set_ZAC.p… 23 The dm-zoned implementation is simple and minimizes system overhead (CPU 25 host-managed disk with 256 MB zones, dm-zoned memory usage per disk 29 dm-zoned target devices are formatted and checked using the dmzadm [all …]
|
H A D | vdo-design.rst | 1 .. SPDX-License-Identifier: GPL-2.0-only 4 Design of dm-vdo 7 The dm-vdo (virtual data optimizer) target provides inline deduplication, 8 compression, zero-block elimination, and thin provisioning. A dm-vdo target 12 production environments ever since. It was made open-source in 2017 after 14 dm-vdo. For usage, see vdo.rst in the same directory as this file. 25 The design of dm-vdo is based on the idea that deduplication is a two-part 26 problem. The first is to recognize duplicate data. The second is to avoid 27 storing multiple copies of those duplicates. Therefore, dm-vdo has two main 29 duplicate data, and a data store with a reference counted block map that [all …]
|
/linux/drivers/gpu/drm/ |
H A D | drm_of.c | 1 // SPDX-License-Identifier: GPL-2.0-only 5 #include <linux/media-bus-format.h> 25 * drm_of_crtc_port_mask - find the mask of a registered CRTC by port OF node 39 if (tmp->port == port) in drm_of_crtc_port_mask() 50 * drm_of_find_possible_crtcs - find the possible CRTCs for an encoder port 83 * drm_of_component_match_add - Add a component helper OF node match rule 101 * drm_of_component_probe - Generic probe function for a component based master 121 if (!dev->of_node) in drm_of_component_probe() 122 return -EINVAL; in drm_of_component_probe() 129 port = of_parse_phandle(dev->of_node, "ports", i); in drm_of_component_probe() [all …]
|
/linux/Documentation/trace/ |
H A D | ring-buffer-map.rst | 1 .. SPDX-License-Identifier: GPL-2.0 4 Tracefs ring-buffer memory mapping 11 Tracefs ring-buffer memory map provides an efficient method to stream data 12 as no memory copy is necessary. The application mapping the ring-buffer becomes 13 then a consumer for that ring-buffer, in a similar fashion to trace_pipe. 15 Memory mapping setup 17 The mapping works with a mmap() of the trace_pipe_raw interface. 19 The first system page of the mapping contains ring-buffer statistics and 20 description. It is referred to as the meta-page. One of the most important 21 fields of the meta-page is the reader. It contains the sub-buffer ID which can [all …]
|
/linux/drivers/net/ethernet/qlogic/qede/ |
H A D | qede_fp.c | 1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) 3 * Copyright (c) 2015-2017 QLogic Corporation 4 * Copyright (c) 2019-2020 Marvell International Ltd. 31 dma_addr_t mapping; in qede_alloc_rx_buffer() local 32 struct page *data; in qede_alloc_rx_buffer() local 34 /* In case lazy-allocation is allowed, postpone allocation until the in qede_alloc_rx_buffer() 38 if (allow_lazy && likely(rxq->filled_buffers > 12)) { in qede_alloc_rx_buffer() 39 rxq->filled_buffers--; in qede_alloc_rx_buffer() 43 data = alloc_pages(GFP_ATOMIC, 0); in qede_alloc_rx_buffer() 44 if (unlikely(!data)) in qede_alloc_rx_buffer() [all …]
|
/linux/arch/arm/mm/ |
H A D | flush.c | 1 // SPDX-License-Identifier: GPL-2.0-only 5 * Copyright (C) 1995-2002 Russell King 48 : "r" (to), "r" (to + PAGE_SIZE - 1), "r" (zero) in flush_pfn_alias() 55 unsigned long offset = vaddr & (PAGE_SIZE - 1); in flush_icache_alias() 94 if (vma->vm_flags & VM_EXEC) in flush_cache_range() 110 if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged()) in flush_cache_pages() 145 /* VIPT non-aliasing D-cache */ in __flush_ptrace_access() 163 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) in flush_ptrace_access() 165 if (vma->vm_flags & VM_EXEC) in flush_ptrace_access() 179 * Copy user data from/to a page which is mapped into a different [all …]
|
/linux/mm/ |
H A D | filemap.c | 1 // SPDX-License-Identifier: GPL-2.0-only 5 * Copyright (C) 1994-1999 Linus Torvalds 30 #include <linux/error-injection.h> 33 #include <linux/backing-dev.h> 72 * finished 'unifying' the page and buffer cache and SMP-threaded the 73 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com> 75 * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de> 81 * ->i_mmap_rwsem (truncate_pagecache) 82 * ->private_lock (__free_pte->block_dirty_folio) 83 * ->swap_lock (exclusive_swap_page, others) [all …]
|
/linux/Documentation/core-api/ |
H A D | dma-api-howto.rst | 2 Dynamic DMA mapping Guide 10 with example pseudo-code. For a concise description of the API, see 11 Documentation/core-api/dma-api.rst. 39 supports 64-bit addresses for main memory and PCI BARs, it may use an IOMMU 40 so devices only need to use 32-bit DMA addresses. 49 +-------+ +------+ +------+ 52 C +-------+ --------> B +------+ ----------> +------+ A 53 | | mapping | | by host | | 54 +-----+ | | | | bridge | | +--------+ 55 | | | | +------+ | | | | [all …]
|
/linux/fs/ecryptfs/ |
H A D | mmap.c | 1 // SPDX-License-Identifier: GPL-2.0-or-later 5 * decryption of the file data as it passes between the lower 8 * Copyright (C) 1997-2003 Erez Zadok 9 * Copyright (C) 2001-2003 Stony Brook University 10 * Copyright (C) 2004-2007 International Business Machines Corp. 16 #include <linux/page-flags.h> 26 * This is where we encrypt the data and pass the encrypted data to 27 * the lower filesystem. In OpenPGP-compatible mode, we operate on 30 static int ecryptfs_writepages(struct address_space *mapping, in ecryptfs_writepages() argument 36 while ((folio = writeback_iter(mapping, wbc, folio, &error))) { in ecryptfs_writepages() [all …]
|
/linux/fs/xfs/ |
H A D | xfs_aops.c | 1 // SPDX-License-Identifier: GPL-2.0 3 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 4 * Copyright (c) 2016-2018 Christoph Hellwig. 37 * Fast and loose check if this write could update the on-disk inode size. 41 return ioend->io_offset + ioend->io_size > in xfs_ioend_is_append() 42 XFS_I(ioend->io_inode)->i_disk_size; in xfs_ioend_is_append() 46 * Update on-disk file size now that data has been written to disk. 54 struct xfs_mount *mp = ip->i_mount; in xfs_setfilesize() 59 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp); in xfs_setfilesize() 73 ip->i_disk_size = isize; in xfs_setfilesize() [all …]
|
/linux/drivers/net/ethernet/broadcom/bnxt/ |
H A D | bnxt_xdp.c | 1 /* Broadcom NetXtreme-C/E network driver. 3 * Copyright (c) 2016-2017 Broadcom Limited 27 dma_addr_t mapping, u32 len, in bnxt_xmit_bd() argument 40 num_frags = sinfo->nr_frags; in bnxt_xmit_bd() 44 prod = txr->tx_prod; in bnxt_xmit_bd() 45 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; in bnxt_xmit_bd() 46 tx_buf->nr_frags = num_frags; in bnxt_xmit_bd() 48 tx_buf->page = virt_to_head_page(xdp->data); in bnxt_xmit_bd() 50 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; in bnxt_xmit_bd() 54 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); in bnxt_xmit_bd() [all …]
|
/linux/net/core/ |
H A D | ieee8021q_helpers.c | 1 // SPDX-License-Identifier: GPL-2.0 12 * IEEE 802.1Q-2022 in Annex I "I.3 Traffic type to traffic class mapping" and 13 * Table I-1 "Traffic type to traffic class mapping". 82 * ieee8021q_tt_to_tc - Map IEEE 802.1Q Traffic Type to Traffic Class 87 * on the number of queues configured on the NIC. The mapping is based on the 88 * example provided by IEEE 802.1Q-2022 in Annex I "I.3 Traffic type to traffic 89 * class mapping" and Table I-1 "Traffic type to traffic class mapping". 99 return -EINVAL; in ieee8021q_tt_to_tc() 105 IEEE8021Q_TT_MAX - 1, in ieee8021q_tt_to_tc() 106 "ieee8021q_8queue_tt_tc_map != max - 1"); in ieee8021q_tt_to_tc() [all …]
|
/linux/include/linux/ |
H A D | iomap.h | 1 /* SPDX-License-Identifier: GPL-2.0 */ 32 #define IOMAP_INLINE 4 /* data inline in the inode */ 38 * zeroing for areas that no data is copied to. 41 * written data and requires fdatasync to commit them to persistent storage. 52 * buffer heads for this mapping. 55 * rather than a file data extent. 58 * never be merged with the mapping before it. 94 #define IOMAP_NULL_ADDR -1ULL /* addr is not valid */ 99 u64 addr; /* disk offset of mapping, bytes */ 100 loff_t offset; /* file offset of mapping, bytes */ [all …]
|
/linux/net/netfilter/ |
H A D | nft_set_pipapo.h | 1 // SPDX-License-Identifier: GPL-2.0-only 8 /* Count of concatenated fields depends on count of 32-bit nftables registers */ 25 #define NFT_PIPAPO_GROUPS_PER_BYTE(f) (BITS_PER_BYTE / (f)->bb) 32 * crossing page boundaries on most architectures (x86-64 and MIPS huge pages, 34 * keeps performance nice in case kvmalloc() gives us non-contiguous areas. 39 #define NFT_PIPAPO_LT_SIZE_LOW NFT_PIPAPO_LT_SIZE_THRESHOLD - \ 44 (round_up((f)->groups / NFT_PIPAPO_GROUPS_PER_BYTE(f), sizeof(u32))) 46 (NFT_PIPAPO_GROUPS_PADDED_SIZE(f) - (f)->groups / \ 52 /* Each n-bit range maps to up to n * 2 rules */ 55 /* Use the rest of mapping table buckets for rule indices, but it makes no sense [all …]
|
/linux/include/linux/pinctrl/ |
H A D | machine.h | 1 /* SPDX-License-Identifier: GPL-2.0-only */ 5 * Copyright (C) 2011 ST-Ericsson SA 6 * Written on behalf of Linaro for ST-Ericsson 16 #include <linux/pinctrl/pinctrl-state.h> 27 * struct pinctrl_map_mux - mapping table content for MAP_TYPE_MUX_GROUP 39 * struct pinctrl_map_configs - mapping table content for MAP_TYPE_CONFIGS_* 54 * struct pinctrl_map - boards/machines shall provide this map for devices 55 * @dev_name: the name of the device using this specific mapping, the name 61 * @type: the type of mapping table entry 62 * @ctrl_dev_name: the name of the device controlling this specific mapping, [all …]
|
/linux/fs/netfs/ |
H A D | buffered_write.c | 1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Network filesystem high-level buffered write support. 38 static struct folio *netfs_grab_folio_for_write(struct address_space *mapping, in netfs_grab_folio_for_write() argument 44 if (mapping_large_folio_support(mapping)) in netfs_grab_folio_for_write() 47 return __filemap_get_folio(mapping, index, fgp_flags, in netfs_grab_folio_for_write() 48 mapping_gfp_mask(mapping)); in netfs_grab_folio_for_write() 53 * data written into the pagecache until we can find out from the server what 62 if (ctx->ops->update_i_size) { in netfs_update_i_size() 63 ctx->ops->update_i_size(inode, pos); in netfs_update_i_size() 69 fscache_update_cookie(ctx->cache, NULL, &pos); in netfs_update_i_size() [all …]
|
/linux/arch/mips/include/asm/ |
H A D | vdso.h | 1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 16 * struct mips_vdso_image - Details of a VDSO image. 17 * @data: Pointer to VDSO image data (page-aligned). 18 * @size: Size of the VDSO image data (page-aligned). 21 * @mapping: Special mapping structure. 23 * This structure contains details of a VDSO image, including the image data 25 * part of the VDSO build process, aside from the mapping page array, which is 29 void *data; member 35 struct vm_special_mapping mapping; member 39 * The following structures are auto-generated as part of the build for each
|
/linux/drivers/platform/x86/intel/ |
H A D | tpmi_power_domains.c | 1 // SPDX-License-Identifier: GPL-2.0-only 3 * Mapping of TPMI power domains CPU mapping 23 #include <asm/intel-family.h> 48 * struct tpmi_cpu_info - Mapping information for a CPU 49 * @hnode: Used to add mapping information to hash list 56 * Structure to store mapping information for a Linux CPU 95 return info->pkg_id < topology_max_packages() && in tpmi_domain_is_valid() 96 info->punit_domain_id < MAX_POWER_DOMAINS; in tpmi_domain_is_valid() 102 int ret = -EINVAL; in tpmi_get_linux_cpu_number() 106 if (info->punit_domain_id == domain_id && info->pkg_id == package_id) { in tpmi_get_linux_cpu_number() [all …]
|
/linux/drivers/iommu/ |
H A D | ipmmu-vmsa.c | 1 // SPDX-License-Identifier: GPL-2.0 3 * IOMMU API for Renesas VMSA-compatible IPMMU 6 * Copyright (C) 2014-2020 Renesas Electronics Corporation 11 #include <linux/dma-mapping.h> 18 #include <linux/io-pgtable.h> 29 #include <asm/dma-iommu.h> 32 #define arm_iommu_attach_device(...) -ENODEV 37 #define IPMMU_CTX_INVALID -1 67 struct dma_iommu_mapping *mapping; member 93 /* ----------------------------------------------------------------------------- [all …]
|
/linux/drivers/gpu/drm/i915/gem/ |
H A D | i915_gem_shmem.c | 2 * SPDX-License-Identifier: MIT 4 * Copyright © 2014-2016 Intel Corporation 32 void shmem_sg_free_table(struct sg_table *st, struct address_space *mapping, in shmem_sg_free_table() argument 40 mapping_clear_unevictable(mapping); in shmem_sg_free_table() 65 struct address_space *mapping, in shmem_sg_alloc_table() argument 76 return -E2BIG; in shmem_sg_alloc_table() 83 if (size > resource_size(&mr->region)) in shmem_sg_alloc_table() 84 return -ENOMEM; in shmem_sg_alloc_table() 87 return -ENOMEM; in shmem_sg_alloc_table() 95 mapping_set_unevictable(mapping); in shmem_sg_alloc_table() [all …]
|