/linux/drivers/scsi/ |
H A D | st.c | 3550 struct mtop mtc; in st_ioctl() local 3552 if (_IOC_SIZE(cmd_in) != sizeof(mtc)) { in st_ioctl() 3557 i = copy_from_user(&mtc, p, sizeof(struct mtop)); in st_ioctl() 3563 if (mtc.mt_op == MTSETDRVBUFFER && !capable(CAP_SYS_ADMIN)) { in st_ioctl() 3570 (mtc.mt_op != MTSETDRVBUFFER && in st_ioctl() 3571 (mtc.mt_count & MT_ST_OPTIONS) == 0)) { in st_ioctl() 3579 if (mtc.mt_op == MTFSF || mtc.mt_op == MTFSFM || in st_ioctl() 3580 mtc.mt_op == MTEOM) { in st_ioctl() 3581 mtc.mt_count -= 1; in st_ioctl() 3584 } else if (mtc.mt_op == MTBSF || mtc.mt_op == MTBSFM) { in st_ioctl() [all …]
|
/linux/arch/powerpc/boot/dts/ |
H A D | digsy_mtc.dts | 16 model = "intercontrol,digsy-mtc"; 17 compatible = "intercontrol,digsy-mtc";
|
/linux/tools/perf/util/intel-pt-decoder/ |
H A D | intel-pt-decoder.c | 794 static void intel_pt_fixup_last_mtc(uint32_t mtc, int mtc_shift, in intel_pt_fixup_last_mtc() argument 800 *last_mtc |= mtc & mask; in intel_pt_fixup_last_mtc() 801 if (*last_mtc >= mtc) { in intel_pt_fixup_last_mtc() 814 uint32_t mtc, mtc_delta, ctc, fc, ctc_rem; in intel_pt_calc_cyc_cb() local 844 mtc = pkt_info->packet.payload; in intel_pt_calc_cyc_cb() 847 intel_pt_fixup_last_mtc(mtc, decoder->mtc_shift, in intel_pt_calc_cyc_cb() 850 if (mtc > data->last_mtc) in intel_pt_calc_cyc_cb() 851 mtc_delta = mtc - data->last_mtc; in intel_pt_calc_cyc_cb() 853 mtc_delta = mtc + 256 - data->last_mtc; in intel_pt_calc_cyc_cb() 855 data->last_mtc = mtc; in intel_pt_calc_cyc_cb() 1939 uint32_t mtc, mtc_delta; intel_pt_calc_mtc_timestamp() local [all...] |
/linux/mm/ |
H A D | migrate.c | 2153 struct migration_target_control *mtc; in alloc_migration_target() local 2159 mtc = (struct migration_target_control *)private; in alloc_migration_target() 2160 gfp_mask = mtc->gfp_mask; in alloc_migration_target() 2161 nid = mtc->nid; in alloc_migration_target() 2170 mtc->nmask, gfp_mask, in alloc_migration_target() 2171 htlb_allow_alloc_fallback(mtc->reason)); in alloc_migration_target() 2187 return __folio_alloc(gfp_mask, order, nid, mtc->nmask); in alloc_migration_target() 2206 struct migration_target_control mtc = { in do_move_pages_to_node() local 2213 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL); in do_move_pages_to_node()
|
H A D | memory_hotplug.c | 1856 struct migration_target_control mtc = { in do_migrate_range() local 1867 mtc.nid = folio_nid(list_first_entry(&source, struct folio, lru)); in do_migrate_range() 1874 node_clear(mtc.nid, nmask); in do_migrate_range() 1876 node_set(mtc.nid, nmask); in do_migrate_range() 1878 (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_HOTPLUG, NULL); in do_migrate_range()
|
H A D | vmscan.c | 997 struct migration_target_control *mtc; in alloc_migrate_folio() local 999 mtc = (struct migration_target_control *)private; in alloc_migrate_folio() 1001 allowed_mask = mtc->nmask; in alloc_migrate_folio() 1011 mtc->nmask = NULL; in alloc_migrate_folio() 1012 mtc->gfp_mask |= __GFP_THISNODE; in alloc_migrate_folio() 1013 dst = alloc_migration_target(src, (unsigned long)mtc); in alloc_migrate_folio() 1017 mtc->gfp_mask &= ~__GFP_THISNODE; in alloc_migrate_folio() 1018 mtc->nmask = allowed_mask; in alloc_migrate_folio() 1020 return alloc_migration_target(src, (unsigned long)mtc); in alloc_migrate_folio() 1034 struct migration_target_control mtc = { in demote_folio_list() local [all …]
|
H A D | memory-failure.c | 2671 struct migration_target_control mtc = { in soft_offline_in_use_page() local 2722 (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_FAILURE, NULL); in soft_offline_in_use_page()
|
H A D | gup.c | 2409 struct migration_target_control mtc = { in migrate_longterm_unpinnable_folios() local 2416 NULL, (unsigned long)&mtc, MIGRATE_SYNC, in migrate_longterm_unpinnable_folios()
|
H A D | mempolicy.c | 1070 struct migration_target_control mtc = { in migrate_to_node() local 1100 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL); in migrate_to_node()
|
H A D | page_alloc.c | 6272 struct migration_target_control mtc = { in __alloc_contig_migrate_range() local 6317 NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE, NULL); in __alloc_contig_migrate_range()
|
/linux/arch/powerpc/platforms/52xx/ |
H A D | Kconfig | 26 intercontrol,digsy-mtc
|
/linux/mm/damon/ |
H A D | paddr.c | 346 struct migration_target_control mtc = { in __damon_pa_migrate_folio_list() 366 (unsigned long)&mtc, MIGRATE_ASYNC, MR_DAMON, in damon_pa_migrate_folio_list() 337 struct migration_target_control mtc = { __damon_pa_migrate_folio_list() local
|
/linux/tools/perf/Documentation/ |
H A D | perf-intel-pt.txt | 156 MTC packets are used - refer to the 'mtc' config term. When MTC is used, however, 252 /sys/bus/event_source/devices/intel_pt/format/mtc:config:9 396 *mtc*:: 405 /sys/bus/event_source/devices/intel_pt/caps/mtc 414 Specifies how frequently MTC packets are produced - see mtc 1354 TSC is not supported and tsc=0 must be specified. That means mtc is useless, so add mtc=0. 1360 $ sudo perf kvm --guest --host --guestkallsyms $KALLSYMS record --kcore -e intel_pt/tsc=0,mtc=0,cyc=1/k -p 1430 --per-thread
|
/linux/Documentation/scsi/ |
H A D | ChangeLog.megaraid | 197 Jun'ichi Nomura [mailto:jnomura@mtc.biglobe.ne.jp]
|