Home
last modified time | relevance | path

Searched refs:migrate (Results 1 – 25 of 78) sorted by relevance

1234

/linux/mm/
H A Dmigrate_device.c25 struct migrate_vma *migrate = walk->private; in migrate_vma_collect_skip() local
29 migrate->dst[migrate->npages] = 0; in migrate_vma_collect_skip()
30 migrate->src[migrate->npages++] = 0; in migrate_vma_collect_skip()
41 struct migrate_vma *migrate = walk->private; in migrate_vma_collect_hole() local
49 (migrate->flags & MIGRATE_VMA_SELECT_COMPOUND) && in migrate_vma_collect_hole()
52 migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE | in migrate_vma_collect_hole()
54 migrate->dst[migrate->npages] = 0; in migrate_vma_collect_hole()
55 migrate->npages++; in migrate_vma_collect_hole()
56 migrate->cpages++; in migrate_vma_collect_hole()
66 migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE; in migrate_vma_collect_hole()
[all …]
/linux/drivers/gpu/drm/
H A Ddrm_pagemap.c8 #include <linux/migrate.h>
19 * to migrate memory back and forth between device memory and system RAM and
28 * population requests and after that migrate all device pages to system ram.
40 * and it can evict the range to system before trying to migrate. Ideally an
41 * implementation would just try to migrate the missing part of the range and
54 * migrate to / from device memory.
167 * @migrate_pfn: Array of migrate page frame numbers
207 * @migrate_pfn: Array of migrate page frame numbers to map
294 * @devmem_allocation: The device memory allocation to migrate to.
299 * @start: Start of the virtual address range to migrate
324 struct migrate_vma migrate = { drm_pagemap_migrate_to_devmem() local
647 struct migrate_vma migrate = { __drm_pagemap_migrate_to_ram() local
[all...]
/linux/drivers/gpu/drm/amd/amdkfd/
H A Dkfd_migrate.c263 static unsigned long svm_migrate_successful_pages(struct migrate_vma *migrate) in svm_migrate_successful_pages() argument
268 for (i = 0; i < migrate->npages; i++) { in svm_migrate_successful_pages()
269 if (migrate->dst[i] & MIGRATE_PFN_VALID && in svm_migrate_successful_pages()
270 migrate->src[i] & MIGRATE_PFN_MIGRATE) in svm_migrate_successful_pages()
278 struct migrate_vma *migrate, struct dma_fence **mfence, in svm_migrate_copy_to_vram() argument
281 u64 npages = migrate->npages; in svm_migrate_copy_to_vram()
299 for (i = j = 0; (i < npages) && (mpages < migrate->cpages); i++) { in svm_migrate_copy_to_vram()
302 if (migrate->src[i] & MIGRATE_PFN_MIGRATE) { in svm_migrate_copy_to_vram()
304 migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]); in svm_migrate_copy_to_vram()
305 svm_migrate_get_vram_page(prange, migrate->dst[i]); in svm_migrate_copy_to_vram()
[all …]
/linux/tools/testing/selftests/mm/
H A Dmigration.c66 int migrate(uint64_t *ptr, int n1, int n2) in migrate() function
141 ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0);
176 ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0);
206 ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0);
248 ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0);
273 ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0);
308 ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0);
H A Drmap.c154 FIXTURE(migrate) in FIXTURE() argument
159 FIXTURE_SETUP(migrate) in FIXTURE_SETUP() argument
195 FIXTURE_TEARDOWN(migrate) in FIXTURE_TEARDOWN() argument
311 TEST_F(migrate, anon) in TEST_F() argument
328 TEST_F(migrate, shm) in TEST_F() argument
353 TEST_F(migrate, file) in TEST_F() argument
410 TEST_F(migrate, ksm) in TEST_F() argument
/linux/include/linux/
H A Dpsci.h29 int (*migrate)(unsigned long cpuid); member
41 u32 migrate; member
H A Dmigrate.h186 void migrate_vma_pages(struct migrate_vma *migrate);
187 void migrate_vma_finalize(struct migrate_vma *migrate);
/linux/drivers/gpu/drm/nouveau/
H A Dnouveau_dmem.c83 struct nouveau_dmem_migrate migrate; member
171 if (drm->dmem->migrate.copy_func(drm, folio_nr_pages(sfolio), in nouveau_dmem_copy_folio()
267 nouveau_fence_new(&fence, dmem->migrate.chan); in nouveau_dmem_migrate_to_ram()
517 nouveau_fence_new(&fence, chunk->drm->dmem->migrate.chan); in nouveau_dmem_evict_chunk()
558 struct nvif_push *push = &drm->dmem->migrate.chan->chan.push; in nvc0b5_migrate_copy()
631 struct nvif_push *push = &drm->dmem->migrate.chan->chan.push; in nvc0b5_migrate_clear()
691 drm->dmem->migrate.copy_func = nvc0b5_migrate_copy; in nouveau_dmem_migrate_init()
692 drm->dmem->migrate.clear_func = nvc0b5_migrate_clear; in nouveau_dmem_migrate_init()
693 drm->dmem->migrate.chan = drm->ttm.chan; in nouveau_dmem_migrate_init()
753 if (drm->dmem->migrate.copy_func(drm, folio_nr_pages(page_folio(spage)), in nouveau_dmem_migrate_copy_one()
[all …]
/linux/tools/testing/selftests/kvm/x86/
H A Dxapic_ipi_test.c359 bool *migrate, int *delay_usecs) in get_cmdline_args() argument
371 *migrate = true; in get_cmdline_args()
397 bool migrate = false; in main() local
403 get_cmdline_args(argc, argv, &run_secs, &migrate, &delay_usecs); in main()
457 if (!migrate) in main()
/linux/arch/powerpc/platforms/pseries/
H A Dvas.c652 bool migrate) in reconfig_open_windows() argument
680 if ((vcaps->nr_close_wins > creds) && !migrate) in reconfig_open_windows()
694 if (migrate) in reconfig_open_windows()
762 bool migrate) in reconfig_close_windows() argument
769 if (migrate) in reconfig_close_windows()
827 if (rc && !migrate) in reconfig_close_windows()
839 if (!migrate && !--excess_creds) in reconfig_close_windows()
/linux/Documentation/gpu/rfc/
H A Dgpusvm.rst21 migrate can actually migrate, with only the faulting page guaranteed
22 to migrate).
110 migrate device layer are a performance bottleneck, having compound
/linux/drivers/gpu/drm/i915/selftests/
H A Di915_perf_selftests.h20 selftest(migrate, intel_migrate_perf_selftests)
H A Di915_live_selftests.h30 selftest(migrate, intel_migrate_live_selftests)
/linux/tools/perf/scripts/python/
H A Dsched-migration.py191 def migrate(self, ts_list, new, old_cpu, new_cpu): member in TimeSlice
351 def migrate(self, headers, pid, prio, orig_cpu, dest_cpu): member in SchedEventProxy
353 ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
418 parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
/linux/fs/ocfs2/dlm/
H A Ddlmmaster.c3016 struct dlm_migrate_request migrate; in dlm_do_migrate_request() local
3020 memset(&migrate, 0, sizeof(migrate)); in dlm_do_migrate_request()
3021 migrate.namelen = res->lockname.len; in dlm_do_migrate_request()
3022 memcpy(migrate.name, res->lockname.name, migrate.namelen); in dlm_do_migrate_request()
3023 migrate.new_master = new_master; in dlm_do_migrate_request()
3024 migrate.master = master; in dlm_do_migrate_request()
3044 &migrate, sizeof(migrate), nodenum, in dlm_do_migrate_request()
3049 migrate.namelen, migrate.name, ret, nodenum); in dlm_do_migrate_request()
3093 struct dlm_migrate_request *migrate = (struct dlm_migrate_request *) msg->buf; in dlm_migrate_request_handler() local
3102 name = migrate->name; in dlm_migrate_request_handler()
[all …]
/linux/arch/arm64/kvm/hyp/nvhe/
H A Dpsci-relay.c53 is_psci_0_1(migrate, func_id)); in is_psci_0_1_call()
229 if (is_psci_0_1(cpu_off, func_id) || is_psci_0_1(migrate, func_id)) in psci_0_1_handler()
/linux/fs/ext4/
H A DMakefile10 indirect.o inline.o inode.o ioctl.o mballoc.o migrate.o \
/linux/kernel/sched/
H A Dstats.h229 static inline void psi_enqueue(struct task_struct *p, bool migrate) {} in psi_enqueue() argument
230 static inline void psi_dequeue(struct task_struct *p, bool migrate) {} in psi_dequeue() argument
/linux/drivers/gpu/drm/xe/
H A Dxe_svm.c518 gt = xe_migrate_exec_queue(vr->migrate)->gt; in xe_svm_copy()
566 __fence = xe_migrate_from_vram(vr->migrate, in xe_svm_copy()
576 __fence = xe_migrate_to_vram(vr->migrate, in xe_svm_copy()
607 __fence = xe_migrate_from_vram(vr->migrate, 1, in xe_svm_copy()
615 __fence = xe_migrate_to_vram(vr->migrate, 1, in xe_svm_copy()
950 * xe_svm_range_needs_migrate_to_vram() - SVM range needs migrate to VRAM or not in xe_svm_range_needs_migrate_to_vram()
1000 DECL_SVM_RANGE_COUNT_STATS(migrate, MIGRATE)
1025 DECL_SVM_RANGE_US_STATS(migrate, MIGRATE)
1200 * Create GPU bindings for a SVM page fault. Optionally migrate to device in xe_svm_handle_pagefault()
H A Dxe_vram.c301 vram->migrate = xe->tiles[id].migrate; in xe_vram_region_alloc()
/linux/Documentation/trace/
H A Dhwlat_detector.rst81 By default, one hwlat detector's kernel thread will migrate across each CPU
87 - round-robin: migrate across each CPU specified in cpumask [default]
/linux/drivers/gpu/drm/i915/gt/
H A Dintel_gt_types.h237 struct intel_migrate migrate; member
/linux/Documentation/ABI/testing/
H A Dsysfs-kernel-mm-numa17 systems to migrate pages from fast tiers to slow tiers
/linux/include/trace/events/
H A Dmigrate.h3 #define TRACE_SYSTEM migrate
/linux/drivers/perf/hisilicon/
H A Dhisi_uncore_hha_pmu.c384 HISI_PMU_EVENT_ATTR(sdir-home-migrate, 0x4c),
385 HISI_PMU_EVENT_ATTR(edir-home-migrate, 0x4d),

1234