Home
last modified time | relevance | path

Searched full:unmapped (Results 1 – 25 of 465) sorted by relevance

12345678910>>...19

/linux/drivers/gpu/drm/msm/
H A Dmsm_gem_shrinker.c188 unsigned idx, unmapped = 0; in msm_gem_shrinker_vmap() local
191 for (idx = 0; lrus[idx] && unmapped < vmap_shrink_limit; idx++) { in msm_gem_shrinker_vmap()
192 unmapped += drm_gem_lru_scan(lrus[idx], in msm_gem_shrinker_vmap()
193 vmap_shrink_limit - unmapped, in msm_gem_shrinker_vmap()
198 *(unsigned long *)ptr += unmapped; in msm_gem_shrinker_vmap()
200 if (unmapped > 0) in msm_gem_shrinker_vmap()
201 trace_msm_gem_purge_vmaps(unmapped); in msm_gem_shrinker_vmap()
H A Dmsm_gpu_trace.h143 TP_PROTO(u32 unmapped),
144 TP_ARGS(unmapped),
146 __field(u32, unmapped)
149 __entry->unmapped = unmapped;
151 TP_printk("Purging %u vmaps", __entry->unmapped)
H A Dmsm_iommu.c98 size_t unmapped, pgsize, count; in msm_iommu_pagetable_unmap() local
102 unmapped = ops->unmap_pages(ops, iova, pgsize, count, NULL); in msm_iommu_pagetable_unmap()
103 if (!unmapped) in msm_iommu_pagetable_unmap()
106 iova += unmapped; in msm_iommu_pagetable_unmap()
107 size -= unmapped; in msm_iommu_pagetable_unmap()
/linux/include/trace/events/
H A Dhuge_memory.h59 int referenced, int none_or_zero, int status, int unmapped),
61 TP_ARGS(mm, page, writable, referenced, none_or_zero, status, unmapped),
70 __field(int, unmapped)
80 __entry->unmapped = unmapped;
83 …TP_printk("mm=%p, scan_pfn=0x%lx, writable=%d, referenced=%d, none_or_zero=%d, status=%s, unmapped
90 __entry->unmapped)
/linux/mm/damon/
H A Dvaddr.c104 * Find three regions separated by two biggest unmapped regions
110 * separated by the two biggest unmapped regions in the space. Please refer to
199 * is actually mapped to the memory and accessed, monitoring the unmapped
204 * with the noise by simply identifying the unmapped areas as a region that
206 * unmapped areas inside will make the adaptive mechanism quite complex. That
207 * said, too huge unmapped areas inside the monitoring target should be removed
212 * between the three regions are the two biggest unmapped areas in the given
214 * end of the mappings and the two biggest unmapped areas of the address space.
223 * region and the stack will be two biggest unmapped regions. Because these
225 * two biggest unmapped regions will be sufficient to make a trade-off.
[all …]
/linux/drivers/scsi/lpfc/
H A Dlpfc_disc.h229 * nodes transition from the unmapped to the mapped list.
248 * and put on the unmapped list. For ADISC processing, the node is taken off
249 * the ADISC list and placed on either the mapped or unmapped list (depending
250 * on its previous state). Once on the unmapped list, a PRLI is issued and the
253 * node, the node is taken off the unmapped list. The binding list is checked
255 * assignment is unsuccessful, the node is left on the unmapped list. If
260 * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped
265 * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists
282 * unmapped lists.
/linux/drivers/iommu/amd/
H A Dio_pgtable_v2.c289 unsigned long unmapped = 0; in iommu_v2_unmap_pages() local
296 while (unmapped < size) { in iommu_v2_unmap_pages()
299 return unmapped; in iommu_v2_unmap_pages()
304 unmapped += unmap_size; in iommu_v2_unmap_pages()
307 return unmapped; in iommu_v2_unmap_pages()
H A Dio_pgtable.c411 unsigned long long unmapped; in iommu_v1_unmap_pages() local
418 unmapped = 0; in iommu_v1_unmap_pages()
420 while (unmapped < size) { in iommu_v1_unmap_pages()
429 return unmapped; in iommu_v1_unmap_pages()
433 unmapped += unmap_size; in iommu_v1_unmap_pages()
436 return unmapped; in iommu_v1_unmap_pages()
/linux/drivers/vfio/
H A Dvfio_iommu_type1.c970 size_t unmapped = 0; in unmap_unpin_fast() local
974 unmapped = iommu_unmap_fast(domain->domain, *iova, len, in unmap_unpin_fast()
977 if (!unmapped) { in unmap_unpin_fast()
982 entry->len = unmapped; in unmap_unpin_fast()
985 *iova += unmapped; in unmap_unpin_fast()
994 if (*unmapped_cnt >= VFIO_IOMMU_TLB_SYNC_MAX || !unmapped) { in unmap_unpin_fast()
1000 return unmapped; in unmap_unpin_fast()
1008 size_t unmapped = iommu_unmap(domain->domain, *iova, len); in unmap_unpin_slow() local
1010 if (unmapped) { in unmap_unpin_slow()
1013 unmapped >> PAGE_SHIFT, in unmap_unpin_slow()
[all …]
/linux/mm/
H A Dmigrate_device.c65 unsigned long addr = start, unmapped = 0; in migrate_vma_collect_pmd() local
261 unmapped++; in migrate_vma_collect_pmd()
273 if (unmapped) in migrate_vma_collect_pmd()
368 unsigned long unmapped = 0; in migrate_device_unmap() local
378 unmapped++; in migrate_device_unmap()
416 unmapped++; in migrate_device_unmap()
435 return unmapped; in migrate_device_unmap()
465 * and unmapped, check whether each page is pinned or not. Pages that aren't
550 * At this point pages are locked and unmapped, and thus they have in migrate_vma_setup()
712 * called if the page could not be unmapped. in __migrate_device_pages()
/linux/drivers/gpu/drm/nouveau/
H A Dnouveau_exec.c33 * mapping. If such memory backed mappings are unmapped the kernel will make
36 * will result in those memory backed mappings being unmapped first.
46 * backed mappings being mapped and unmapped, either within a single or multiple
52 * of the previously unmapped sparse mapping within the same VM_BIND ioctl
54 * range of the previously unmapped sparse mapping within the same VM_BIND
/linux/drivers/media/pci/intel/ipu6/
H A Dipu6-mmu.c345 size_t unmapped = 0; in l2_unmap() local
372 unmapped++; in l2_unmap()
376 return unmapped << ISP_PAGE_SHIFT; in l2_unmap()
653 size_t unmapped_page, unmapped = 0; in ipu6_mmu_unmap() local
674 while (unmapped < size) { in ipu6_mmu_unmap()
676 iova, size - unmapped); in ipu6_mmu_unmap()
682 dev_dbg(mmu_info->dev, "unmapped: iova 0x%lx size 0x%zx\n", in ipu6_mmu_unmap()
686 unmapped += unmapped_page; in ipu6_mmu_unmap()
689 return unmapped; in ipu6_mmu_unmap()
/linux/Documentation/devicetree/bindings/interrupt-controller/
H A Dti,sci-inta.yaml37 Unmapped events ---->| | umapidx |-------------------------> Globalevents
81 ti,unmapped-event-sources:
86 Array of phandles to DMA controllers where the unmapped events originate.
/linux/Documentation/networking/device_drivers/ethernet/marvell/
H A Docteontx2.rst172 - Error due to operation of unmapped PF.
186 - Error due to unmapped slot.
236 - Receive packet on an unmapped PF.
248 - Error due to unmapped slot.
290 Rx on unmapped PF_FUNC
/linux/drivers/media/platform/chips-media/wave5/
H A Dwave5-vdi.c91 dev_err(vpu_dev->dev, "%s: unable to clear unmapped buffer\n", __func__); in wave5_vdi_clear_memory()
103 dev_err(vpu_dev->dev, "%s: unable to write to unmapped buffer\n", __func__); in wave5_vdi_write_memory()
142 dev_err(vpu_dev->dev, "%s: requested free of unmapped buffer\n", __func__); in wave5_vdi_free_dma_memory()
/linux/drivers/iommu/
H A Dvirtio-iommu.c342 * On success, returns the number of unmapped bytes
347 size_t unmapped = 0; in viommu_del_mappings() local
367 unmapped += mapping->iova.last - mapping->iova.start + 1; in viommu_del_mappings()
374 return unmapped; in viommu_del_mappings()
863 size_t unmapped; in viommu_unmap_pages() local
868 unmapped = viommu_del_mappings(vdomain, iova, iova + size - 1); in viommu_unmap_pages()
869 if (unmapped < size) in viommu_unmap_pages()
874 return unmapped; in viommu_unmap_pages()
880 .virt_end = cpu_to_le64(iova + unmapped - 1), in viommu_unmap_pages()
884 return ret ? 0 : unmapped; in viommu_unmap_pages()
/linux/drivers/staging/media/ipu3/
H A Dipu3-mmu.c383 size_t unmapped_page, unmapped = 0; in imgu_mmu_unmap() local
402 while (unmapped < size) { in imgu_mmu_unmap()
407 dev_dbg(mmu->dev, "unmapped: iova 0x%lx size 0x%zx\n", in imgu_mmu_unmap()
411 unmapped += unmapped_page; in imgu_mmu_unmap()
416 return unmapped; in imgu_mmu_unmap()
/linux/mm/damon/tests/
H A Dvaddr-kunit.h45 * regions should not include the two biggest unmapped areas in the original
48 * Because these two unmapped areas are very huge but obviously never accessed,
53 * unmapped areas. After that, based on the information, it constructs the
61 * and end with 305. The process also has three unmapped areas, 25-200,
63 * unmapped areas, and thus it should be converted to three regions of 10-25,
/linux/lib/
H A Ddevres.c70 * Managed ioremap(). Map is automatically unmapped on driver detach. in devm_ioremap()
85 * Managed ioremap_uc(). Map is automatically unmapped on driver detach. in devm_ioremap_uc()
100 * Managed ioremap_wc(). Map is automatically unmapped on driver detach. in devm_ioremap_wc()
275 * Managed ioport_map(). Map is automatically unmapped on driver
/linux/include/linux/dma/
H A Dk3-psil.h56 * @flow_start: PKDMA flow range start of mapped channel. Unmapped
58 * @flow_num: PKDMA flow count of mapped channel. Unmapped channels
/linux/Documentation/translations/zh_CN/mm/damon/
H A Ddesign.rst57 <BIG UNMAPPED REGION 1>
61 <BIG UNMAPPED REGION 2>
/linux/drivers/iommu/iommufd/
H A Dioas.c300 unsigned long unmapped = 0; in iommufd_ioas_unmap() local
308 rc = iopt_unmap_all(&ioas->iopt, &unmapped); in iommufd_ioas_unmap()
317 &unmapped); in iommufd_ioas_unmap()
322 cmd->length = unmapped; in iommufd_ioas_unmap()
/linux/arch/hexagon/include/asm/
H A Dprocessor.h47 * Apparently the convention is that ld.so will ask for "unmapped" private
52 * you have to kick the "unmapped" base requests higher up.
/linux/Documentation/arch/x86/x86_64/
H A D5level-paging.rst49 to look for unmapped area by specified address. If it's already
50 occupied, we look for unmapped area in *full* address space, rather than
/linux/drivers/irqchip/
H A Dirq-ti-sci-inta.c90 * unmapped event sources.
91 * Unmapped Events are not part of the Global Event Map and
95 * generating Unmapped Event, we must use the INTA's TI-SCI
127 * For devices sending Unmapped Events we must use the INTA's TI-SCI in ti_sci_inta_get_dev_id()
619 count = of_count_phandle_with_args(node, "ti,unmapped-event-sources", NULL); in ti_sci_inta_get_unmapped_sources()
630 of_for_each_phandle(&it, err, node, "ti,unmapped-event-sources", NULL, 0) { in ti_sci_inta_get_unmapped_sources()

12345678910>>...19