Home
last modified time | relevance | path

Searched full:migrate (Results 1 – 25 of 251) sorted by relevance

1234567891011

/linux/mm/
H A Dmigrate_device.c9 #include <linux/migrate.h>
24 struct migrate_vma *migrate = walk->private; in migrate_vma_collect_skip() local
28 migrate->dst[migrate->npages] = 0; in migrate_vma_collect_skip()
29 migrate->src[migrate->npages++] = 0; in migrate_vma_collect_skip()
40 struct migrate_vma *migrate = walk->private; in migrate_vma_collect_hole() local
48 migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE; in migrate_vma_collect_hole()
49 migrate->dst[migrate->npages] = 0; in migrate_vma_collect_hole()
50 migrate->npages++; in migrate_vma_collect_hole()
51 migrate->cpages++; in migrate_vma_collect_hole()
62 struct migrate_vma *migrate = walk->private; in migrate_vma_collect_pmd() local
[all …]
H A Dmigrate.c3 * Memory Migration functionality - linux/mm/migrate.c
16 #include <linux/migrate.h>
50 #include <trace/events/migrate.h>
682 * For memory tiering mode, when migrate between slow and fast in folio_migrate_flags()
763 * @dst: The folio to migrate the data to.
765 * @mode: How to migrate the page.
767 * Common logic to directly migrate a single LRU folio suitable for
888 * @dst: The folio to migrate to.
889 * @src: The folio to migrate from.
890 * @mode: How to migrate the folio.
[all …]
H A Dpage_isolation.c12 #include <linux/migrate.h>
168 * We assume the caller intended to SET migrate type to isolate. in set_migratetype_isolate()
299 * @migratetype: migrate type to set in error recovery.
395 * to migrate it out of the way. in isolate_single_pageblock()
444 * @migratetype: Migrate type to set in error recovery.
455 * future will not be allocated again. If specified range includes migrate types
532 * @migratetype: New migrate type to set on the range
H A Dcompaction.c13 #include <linux/migrate.h>
379 * should be skipped for page isolation when the migrate and free page scanner
407 /* Update the migrate PFN */ in __reset_isolation_suitable()
853 * isolate_migratepages_block() - isolate all migrate-able pages within
924 * to failure. We should migrate the pages now and in isolate_migratepages_block()
1081 * It's possible to migrate LRU and non-lru movable pages. in isolate_migratepages_block()
1124 * Only allow to migrate anonymous pages in GFP_NOFS context in isolate_migratepages_block()
1143 * it will be able to migrate without blocking - clean pages in isolate_migratepages_block()
1158 * a ->migrate_folio callback are possible to migrate in isolate_migratepages_block()
1347 * isolate_migratepages_range() - isolate migrate-able pages in a PFN range
[all …]
/linux/drivers/gpu/drm/i915/gt/
H A Dselftest_migrate.c36 static int copy(struct intel_migrate *migrate, in copy() argument
37 int (*fn)(struct intel_migrate *migrate, in copy() argument
44 struct drm_i915_private *i915 = migrate->context->engine->i915; in copy()
90 err = fn(migrate, &ww, src, dst, &rq); in copy()
151 GEM_BUG_ON(ce->vm != ce->engine->gt->migrate.context->vm); in intel_context_copy_ccs()
255 static int clear(struct intel_migrate *migrate, in clear() argument
256 int (*fn)(struct intel_migrate *migrate, in clear() argument
263 struct drm_i915_private *i915 = migrate->context->engine->i915; in clear()
301 err = intel_migrate_ccs_copy(migrate, &ww, NULL, in clear()
318 err = fn(migrate, &ww, obj, val, &rq); in clear()
[all …]
/linux/drivers/gpu/drm/
H A Ddrm_gpusvm.c12 #include <linux/migrate.h>
56 * Embedded structure containing enough information for GPU SVM to migrate
74 * optionally migrate the range to device memory, and create GPU bindings.
139 * has device memory as its backing, the driver is also expected to migrate any
231 * // Partial unmap, migrate any remaining device memory pages back to RAM
786 * @migrate_devmem: Flag indicating whether to migrate device memory
948 * drm_gpusvm_migrate_to_devmem with 'migrate.cpages != npages' in drm_gpusvm_range_chunk_size()
1558 * @migrate_pfn: Array of migrate page frame numbers
1598 * @migrate_pfn: Array of migrate page frame numbers to map
1661 * drm_gpusvm_migrate_to_devmem() - Migrate GPU SVM range to device memory
[all …]
/linux/drivers/gpu/drm/xe/
H A Dxe_migrate_doc.h10 * DOC: Migrate Layer
12 * The XE migrate layer is used generate jobs which can copy memory (eviction),
14 * a migrate engine, and uses a special VM for all generated jobs.
35 * A bind job consist of two batches and runs either on the migrate engine
37 * VM of the engine is the migrate VM.
60 * A copy or clear job consist of two batches and runs on the migrate engine.
H A Dxe_bo.c163 return tile->migrate; in mem_type_to_migrate()
780 struct xe_migrate *migrate = NULL; in xe_bo_move() local
890 migrate = bo->tile->migrate; in xe_bo_move()
892 migrate = mem_type_to_migrate(xe, new_mem->mem_type); in xe_bo_move()
894 migrate = mem_type_to_migrate(xe, old_mem_type); in xe_bo_move()
896 migrate = xe->tiles[0].migrate; in xe_bo_move()
898 xe_assert(xe, migrate); in xe_bo_move()
919 fence = xe_migrate_clear(migrate, bo, new_mem, flags); in xe_bo_move()
921 fence = xe_migrate_copy(migrate, bo, bo, old_mem, new_mem, in xe_bo_move()
1216 struct xe_migrate *migrate; in xe_bo_evict_pinned() local
[all …]
H A Dxe_migrate.c39 * struct xe_migrate - migrate context.
88 * xe_tile_migrate_exec_queue() - Get this tile's migrate exec queue.
91 * Returns the default migrate exec queue of this tile.
93 * Return: The default migrate exec queue
97 return tile->migrate->q; in xe_tile_migrate_exec_queue()
128 * migrate offset in xe_migrate_vram_ofs()
365 * migrate jobs servicing the faults gets stuck behind the job that faulted.
390 * xe_migrate_init() - Initialize a migrate context
393 * Return: Pointer to a migrate context on success. Error pointer on error.
470 drm_dbg(&xe->drm, "Migrate min chunk size is 0x%08llx\n", in xe_migrate_init()
[all …]
/linux/tools/testing/selftests/bpf/prog_tests/
H A Dmigrate_reuseport.c3 * Check if we can migrate child sockets.
8 * 4. update a map to migrate all child sockets
11 * and migrate the requests in the accept queue
15 * and migrate the requests in the accept queue
19 * and migrate the requests in the accept queue
355 /* Migrate TCP_ESTABLISHED and TCP_SYN_RECV requests in migrate_dance()
364 /* No dance for TCP_NEW_SYN_RECV to migrate based on eBPF */ in migrate_dance()
385 /* Migrate from the last listener to the second one. in migrate_dance()
399 /* Migrate back to the last one from the second one */ in migrate_dance()
504 /* Migrate the requests in the accept queue only. in run_test()
[all …]
/linux/tools/testing/selftests/net/tcp_ao/
H A Drestore.c94 try_server_run("TCP-AO migrate to another socket (server)", port++, in server_fn()
128 test_fail("pre-migrate verify failed"); in test_get_sk_checkpoint()
169 test_ok("%s: post-migrate connection is broken", tst_name); in test_sk_restore()
171 test_fail("%s: post-migrate connection is working", tst_name); in test_sk_restore()
174 test_fail("%s: post-migrate connection is working", tst_name); in test_sk_restore()
176 test_ok("%s: post-migrate connection is alive", tst_name); in test_sk_restore()
205 test_sk_restore("TCP-AO migrate to another socket (client)", port++, in client_fn()
H A Dseq-ext.c195 test_fail("pre-migrate verify failed"); in client_fn()
220 test_fail("post-migrate verify failed"); in client_fn()
222 test_ok("post-migrate connection alive"); in client_fn()
/linux/include/uapi/linux/
H A Dmempolicy.h52 #define MPOL_MF_LAZY (1<<3) /* UNSUPPORTED FLAG: Lazy migrate on fault */
65 #define MPOL_F_MOF (1 << 3) /* this policy wants migrate on fault */
66 #define MPOL_F_MORON (1 << 4) /* Migrate On protnone Reference On Node */
/linux/Documentation/gpu/xe/
H A Dxe_migrate.rst4 Migrate Layer
8 :doc: Migrate Layer
/linux/include/linux/
H A Dpsci.h29 int (*migrate)(unsigned long cpuid); member
41 u32 migrate; member
H A Dcompaction.h74 * that the migrate scanner can have isolated on migrate list, and free in compact_gap()
/linux/tools/testing/selftests/mm/
H A Dhmm-tests.c956 * Migrate anonymous memory to device private memory.
958 TEST_F(hmm, migrate) in TEST_F() argument
989 /* Migrate memory to device. */ in TEST_F()
1002 * Migrate anonymous memory to device private memory and fault some of it back
1037 /* Migrate memory to device. */ in TEST_F()
1050 /* Migrate memory to the device again. */ in TEST_F()
1091 /* Migrate memory to device. */ in TEST_F()
1112 * Migrate anonymous shared memory to device private memory.
1139 /* Migrate memory to device. */ in TEST_F()
1147 * Try to migrate various memory types to device private memory.
[all …]
/linux/Documentation/devicetree/bindings/arm/
H A Dpsci.yaml85 migrate:
87 description: Function ID for MIGRATE operation
154 migrate = <0x95c10003>;
/linux/kernel/irq/
H A Dcpuhotplug.c68 pr_debug("IRQ %u: Unable to migrate away\n", d->irq); in migrate_one_irq()
162 * irq_migrate_all_off_this_cpu - Migrate irqs away from offline cpu
164 * The current CPU has been marked offline. Migrate IRQs off this CPU.
/linux/tools/testing/selftests/bpf/progs/
H A Dtest_migrate_reuseport.c3 * Check if we can migrate child sockets.
97 SEC("sk_reuseport/migrate")
/linux/arch/arm/kernel/
H A Dpsci_smp.c37 * migrate Migrate the context to a different CPU
/linux/tools/testing/selftests/kvm/
H A Darch_timer.c17 * even more, an option to migrate the vCPUs across pCPUs (-m), at a
114 "Failed to migrate the vCPU:%u to pCPU: %u; ret: %d", in test_migrate_vcpu()
191 …pr_info("\t-m: Frequency (in ms) of vCPUs to migrate to different pCPU. 0 to turn off (default: %u… in test_print_help()
/linux/tools/perf/scripts/python/
H A Dsched-migration.py191 def migrate(self, ts_list, new, old_cpu, new_cpu): member in TimeSlice
351 def migrate(self, headers, pid, prio, orig_cpu, dest_cpu): member in SchedEventProxy
353 ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
418 parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
/linux/Documentation/networking/
H A Dnet_failover.rst136 # Source Hypervisor migrate.sh
162 # Migrate the VM
163 virsh migrate --live --persistent $DOMAIN qemu+ssh://$REMOTE_HOST/system
/linux/Documentation/trace/
H A Dhwlat_detector.rst81 By default, one hwlat detector's kernel thread will migrate across each CPU
87 - round-robin: migrate across each CPU specified in cpumask [default]

1234567891011