Home
last modified time | relevance | path

Searched full:unmapped (Results 1 – 25 of 356) sorted by relevance

12345678910>>...15

/linux/include/trace/events/
H A Dhuge_memory.h58 int referenced, int none_or_zero, int status, int unmapped),
60 TP_ARGS(mm, folio, referenced, none_or_zero, status, unmapped),
68 __field(int, unmapped)
77 __entry->unmapped = unmapped;
80 TP_printk("mm=%p, scan_pfn=0x%lx, referenced=%d, none_or_zero=%d, status=%s, unmapped=%d",
86 __entry->unmapped)
/linux/drivers/scsi/lpfc/
H A Dlpfc_disc.h228 * nodes transition from the unmapped to the mapped list.
247 * and put on the unmapped list. For ADISC processing, the node is taken off
248 * the ADISC list and placed on either the mapped or unmapped list (depending
249 * on its previous state). Once on the unmapped list, a PRLI is issued and the
252 * node, the node is taken off the unmapped list. The binding list is checked
254 * assignment is unsuccessful, the node is left on the unmapped list. If
259 * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped
264 * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists
281 * unmapped lists.
/linux/mm/damon/
H A Dvaddr.c108 * Find three regions separated by two biggest unmapped regions
114 * separated by the two biggest unmapped regions in the space. Please refer to
203 * is actually mapped to the memory and accessed, monitoring the unmapped
208 * with the noise by simply identifying the unmapped areas as a region that
210 * unmapped areas inside will make the adaptive mechanism quite complex. That
211 * said, too huge unmapped areas inside the monitoring target should be removed
216 * between the three regions are the two biggest unmapped areas in the given
218 * end of the mappings and the two biggest unmapped areas of the address space.
227 * region and the stack will be two biggest unmapped regions. Because these
229 * two biggest unmapped regions will be sufficient to make a trade-off.
[all …]
/linux/drivers/iommu/amd/
H A Dio_pgtable_v2.c292 unsigned long unmapped = 0; in iommu_v2_unmap_pages() local
299 while (unmapped < size) { in iommu_v2_unmap_pages()
302 return unmapped; in iommu_v2_unmap_pages()
307 unmapped += unmap_size; in iommu_v2_unmap_pages()
310 return unmapped; in iommu_v2_unmap_pages()
/linux/include/drm/
H A Ddrm_gpusvm.h112 * @unmapped: Flag indicating if the pages has been unmapped
113 * @partial_unmap: Flag indicating if the pages has been partially unmapped
124 u16 unmapped : 1; member
142 * @flags.unmapped: Flag indicating if the range has been unmapped
143 * @flags.partial_unmap: Flag indicating if the range has been partially unmapped
/linux/mm/
H A Dmigrate_device.c67 unsigned long addr = start, unmapped = 0; in migrate_vma_collect_pmd() local
275 unmapped++; in migrate_vma_collect_pmd()
287 if (unmapped) in migrate_vma_collect_pmd()
384 unsigned long unmapped = 0; in migrate_device_unmap() local
394 unmapped++; in migrate_device_unmap()
432 unmapped++; in migrate_device_unmap()
452 return unmapped; in migrate_device_unmap()
482 * and unmapped, check whether each page is pinned or not. Pages that aren't
569 * At this point pages are locked and unmapped, and thus they have in migrate_vma_setup()
731 * called if the page could not be unmapped. in __migrate_device_pages()
H A Dmseal.c19 * mseal() disallows an input range which contain unmapped ranges (VMA holes).
21 * It disallows unmapped regions from start to end whether they exist at the
31 * Does the [start, end) range contain any unmapped memory?
/linux/drivers/gpu/drm/nouveau/
H A Dnouveau_exec.c35 * mapping. If such memory backed mappings are unmapped the kernel will make
38 * will result in those memory backed mappings being unmapped first.
48 * backed mappings being mapped and unmapped, either within a single or multiple
54 * of the previously unmapped sparse mapping within the same VM_BIND ioctl
56 * range of the previously unmapped sparse mapping within the same VM_BIND
/linux/drivers/media/platform/chips-media/wave5/
H A Dwave5-vdi.c91 dev_err(vpu_dev->dev, "%s: unable to clear unmapped buffer\n", __func__); in wave5_vdi_clear_memory()
103 dev_err(vpu_dev->dev, "%s: unable to write to unmapped buffer\n", __func__); in wave5_vdi_write_memory()
142 dev_err(vpu_dev->dev, "%s: requested free of unmapped buffer\n", __func__); in wave5_vdi_free_dma_memory()
/linux/include/linux/dma/
H A Dk3-psil.h56 * @flow_start: PKDMA flow range start of mapped channel. Unmapped
58 * @flow_num: PKDMA flow count of mapped channel. Unmapped channels
/linux/Documentation/translations/zh_CN/mm/damon/
H A Ddesign.rst57 <BIG UNMAPPED REGION 1>
61 <BIG UNMAPPED REGION 2>
/linux/drivers/iommu/
H A Dvirtio-iommu.c359 * On success, returns the number of unmapped bytes
364 size_t unmapped = 0; in viommu_del_mappings() local
384 unmapped += mapping->iova.last - mapping->iova.start + 1; in viommu_del_mappings()
391 return unmapped; in viommu_del_mappings()
889 size_t unmapped; in viommu_unmap_pages() local
894 unmapped = viommu_del_mappings(vdomain, iova, iova + size - 1); in viommu_unmap_pages()
895 if (unmapped < size) in viommu_unmap_pages()
900 return unmapped; in viommu_unmap_pages()
906 .virt_end = cpu_to_le64(iova + unmapped - 1), in viommu_unmap_pages()
910 return ret ? 0 : unmapped; in viommu_unmap_pages()
/linux/drivers/staging/media/ipu3/
H A Dipu3-mmu.c383 size_t unmapped_page, unmapped = 0; in imgu_mmu_unmap() local
402 while (unmapped < size) { in imgu_mmu_unmap()
407 dev_dbg(mmu->dev, "unmapped: iova 0x%lx size 0x%zx\n", in imgu_mmu_unmap()
411 unmapped += unmapped_page; in imgu_mmu_unmap()
416 return unmapped; in imgu_mmu_unmap()
/linux/mm/damon/tests/
H A Dvaddr-kunit.h45 * regions should not include the two biggest unmapped areas in the original
48 * Because these two unmapped areas are very huge but obviously never accessed,
53 * unmapped areas. After that, based on the information, it constructs the
61 * and end with 305. The process also has three unmapped areas, 25-200,
63 * unmapped areas, and thus it should be converted to three regions of 10-25,
/linux/lib/
H A Ddevres.c70 * Managed ioremap(). Map is automatically unmapped on driver detach.
85 * Managed ioremap_uc(). Map is automatically unmapped on driver detach.
100 * Managed ioremap_wc(). Map is automatically unmapped on driver detach.
275 * Managed ioport_map(). Map is automatically unmapped on driver
/linux/arch/hexagon/include/asm/
H A Dprocessor.h47 * Apparently the convention is that ld.so will ask for "unmapped" private
52 * you have to kick the "unmapped" base requests higher up.
/linux/drivers/irqchip/
H A Dirq-ti-sci-inta.c90 * unmapped event sources.
91 * Unmapped Events are not part of the Global Event Map and
95 * generating Unmapped Event, we must use the INTA's TI-SCI
127 * For devices sending Unmapped Events we must use the INTA's TI-SCI in ti_sci_inta_get_dev_id()
619 count = of_count_phandle_with_args(node, "ti,unmapped-event-sources", NULL); in ti_sci_inta_get_unmapped_sources()
630 of_for_each_phandle(&it, err, node, "ti,unmapped-event-sources", NULL, 0) { in ti_sci_inta_get_unmapped_sources()
/linux/tools/testing/selftests/powerpc/signal/
H A Dsigreturn_vdso.c106 printf("Unmapped VDSO\n"); in test_sigreturn_vdso()
119 printf("Signal delivered OK with VDSO unmapped\n"); in test_sigreturn_vdso()
/linux/Documentation/mm/damon/
H A Ddesign.rst96 mapped to the physical memory and accessed. Thus, tracking the unmapped
100 cases. That said, too huge unmapped areas inside the monitoring target should
105 gaps between the three regions are the two biggest unmapped areas in the given
106 address space. The two biggest unmapped areas would be the gap between the
113 <BIG UNMAPPED REGION 1>
117 <BIG UNMAPPED REGION 2>
268 virtual memory could be dynamically mapped and unmapped. Physical memory could
687 - unmapped
688 - Applied to pages that unmapped.
/linux/Documentation/networking/device_drivers/ethernet/marvell/
H A Docteontx2.rst173 - Error due to operation of unmapped PF.
187 - Error due to unmapped slot.
237 - Receive packet on an unmapped PF.
249 - Error due to unmapped slot.
291 Rx on unmapped PF_FUNC
/linux/drivers/gpu/drm/msm/
H A Dmsm_iommu.c110 ssize_t unmapped; in msm_iommu_pagetable_unmap() local
114 unmapped = ops->unmap_pages(ops, iova, pgsize, count, NULL); in msm_iommu_pagetable_unmap()
115 if (unmapped <= 0) { in msm_iommu_pagetable_unmap()
122 unmapped = PAGE_SIZE; in msm_iommu_pagetable_unmap()
125 iova += unmapped; in msm_iommu_pagetable_unmap()
126 size -= unmapped; in msm_iommu_pagetable_unmap()
/linux/drivers/iommu/iommufd/
H A Dvfio_compat.c209 unsigned long unmapped = 0; in iommufd_vfio_unmap_dma() local
228 rc = iopt_unmap_all(&ioas->iopt, &unmapped); in iommufd_vfio_unmap_dma()
245 &unmapped); in iommufd_vfio_unmap_dma()
247 unmap.size = unmapped; in iommufd_vfio_unmap_dma()
/linux/arch/mips/include/asm/octeon/
H A Dcvmx-address.h108 /* mapped or unmapped virtual address */
120 /* mapped or unmapped virtual address */
128 * physical address accessed through xkphys unmapped virtual
/linux/arch/powerpc/platforms/powernv/
H A Dvas-debug.c36 /* ensure window is not unmapped */ in info_show()
63 /* ensure window is not unmapped */ in hvwc_show()
/linux/rust/kernel/
H A Dpage.rs181 /// The page is unmapped when this call returns. in read_raw()
204 // where the pages are unmapped in reverse order. This is as required by `kunmap_local`. in write_raw()
207 // unmapped first, then there must necessarily be a call to `kmap_local_page` other than the

12345678910>>...15