| /linux/drivers/comedi/drivers/ni_routing/ni_route_values/ |
| H A D | ni_mseries.c | 57 [B(TRIGGER_LINE(0))] = I(18), 58 [B(TRIGGER_LINE(1))] = I(19), 59 [B(TRIGGER_LINE(2))] = I(20), 60 [B(TRIGGER_LINE(3))] = I(21), 61 [B(TRIGGER_LINE(4))] = I(22), 62 [B(TRIGGER_LINE(5))] = I(23), 63 [B(TRIGGER_LINE(6))] = I(24), 64 [B(TRIGGER_LINE(7))] = I(25), 65 [B(NI_CtrSource(0))] = I(9), 66 [B(NI_CtrSource(1))] = I(4), [all …]
|
| H A D | ni_eseries.c | 54 [B(NI_AI_StartTrigger)] = I(NI_PFI_OUTPUT_AI_START1), 57 [B(NI_AI_ReferenceTrigger)] = I(NI_PFI_OUTPUT_AI_START2), 60 [B(NI_AI_ConvertClock)] = I(NI_PFI_OUTPUT_AI_CONVERT), 63 [B(NI_CtrSource(1))] = I(NI_PFI_OUTPUT_G_SRC1), 66 [B(NI_CtrGate(1))] = I(NI_PFI_OUTPUT_G_GATE1), 69 [B(NI_AO_SampleClock)] = I(NI_PFI_OUTPUT_AO_UPDATE_N), 72 [B(NI_AO_StartTrigger)] = I(NI_PFI_OUTPUT_AO_START1), 75 [B(NI_AI_SampleClock)] = I(NI_PFI_OUTPUT_AI_START_PULSE), 78 [B(NI_CtrSource(0))] = I(NI_PFI_OUTPUT_G_SRC0), 81 [B(NI_CtrGate(0))] = I(NI_PFI_OUTPUT_G_GATE0), [all …]
|
| H A D | ni_660x.c | 49 [B(NI_CtrInternalOutput(7))] = I(1), 52 [B(NI_CtrGate(7))] = I(1), 55 [B(NI_CtrSource(7))] = I(1), 58 [B(NI_CtrInternalOutput(6))] = I(1), 61 [B(NI_CtrGate(6))] = I(1), 64 [B(NI_CtrSource(6))] = I(1), 67 [B(NI_CtrInternalOutput(5))] = I(1), 70 [B(NI_CtrGate(5))] = I(1), 73 [B(NI_CtrSource(5))] = I(1), 76 [B(NI_CtrInternalOutput(4))] = I(1), [all …]
|
| /linux/Documentation/translations/zh_CN/virt/acrn/ |
| H A D | io-request.rst | 16 I/O请求处理 19 客户虚拟机的I/O请求由超级管理器构建,由ACRN超级管理器服务模块分发到与I/O请求的地址范 20 围相对应的I/O客户端。I/O请求处理的细节将在以下章节描述。 22 1. I/O请求 26 I/O请求通信。一个I/O请求是一个256字节的结构体缓冲区,它是 "acrn_io_request" 结构 27 体,当客户虚拟机中发生被困的I/O访问时,由超级管理器的I/O处理器填充。服务虚拟机中的 29 台。缓冲区被用作16个I/O请求槽的数组,每个I/O请求槽为256字节。这个数组是按vCPU ID 32 2. I/O客户端 35 一个I/O客户端负责处理客户虚拟机的I/O请求,其访问的GPA在一定范围内。每个客户虚拟机 36 可以关联多个I/O客户端。每个客户虚拟机都有一个特殊的客户端,称为默认客户端,负责处理 [all …]
|
| /linux/rust/quote/ |
| H A D | ext.rs | 35 fn append_all<I>(&mut self, iter: I) in append_all() 37 I: IntoIterator, in append_all() 38 I::Item: ToTokens; in append_all() 44 fn append_separated<I, U>(&mut self, iter: I, op: U) in append_separated() argument 46 I: IntoIterator, in append_separated() 47 I::Item: ToTokens, in append_separated() 54 fn append_terminated<I, U>(&mut self, iter: I, term: U) in append_terminated() argument 56 I: IntoIterator, in append_terminated() 57 I::Item: ToTokens, in append_terminated() 69 fn append_all<I>(&mut self, iter: I) in append_all() [all …]
|
| /linux/mm/ |
| H A D | vmstat.c | 1204 #define I(x) (x) macro 1205 [I(NR_FREE_PAGES)] = "nr_free_pages", 1206 [I(NR_FREE_PAGES_BLOCKS)] = "nr_free_pages_blocks", 1207 [I(NR_ZONE_INACTIVE_ANON)] = "nr_zone_inactive_anon", 1208 [I(NR_ZONE_ACTIVE_ANON)] = "nr_zone_active_anon", 1209 [I(NR_ZONE_INACTIVE_FILE)] = "nr_zone_inactive_file", 1210 [I(NR_ZONE_ACTIVE_FILE)] = "nr_zone_active_file", 1211 [I(NR_ZONE_UNEVICTABLE)] = "nr_zone_unevictable", 1212 [I(NR_ZONE_WRITE_PENDING)] = "nr_zone_write_pending", 1213 [I(NR_MLOCK)] = "nr_mlock", [all …]
|
| /linux/arch/loongarch/include/asm/ |
| H A D | atomic-amo.h | 15 #define ATOMIC_OP(op, I, asm_op) \ argument 21 : "r" (I) \ 25 #define ATOMIC_OP_RETURN(op, I, asm_op, c_op, mb, suffix) \ argument 33 : "r" (I) \ 36 return result c_op I; \ 39 #define ATOMIC_FETCH_OP(op, I, asm_op, mb, suffix) \ argument 47 : "r" (I) \ 53 #define ATOMIC_OPS(op, I, asm_op, c_op) \ argument 54 ATOMIC_OP(op, I, asm_op) \ 55 ATOMIC_OP_RETURN(op, I, asm_op, c_op, _db, ) \ [all …]
|
| H A D | atomic-llsc.h | 15 #define ATOMIC_OP(op, I, asm_op) \ argument 26 :"r" (I) \ 30 #define ATOMIC_OP_RETURN(op, I, asm_op) \ argument 42 : "r" (I)); \ 47 #define ATOMIC_FETCH_OP(op, I, asm_op) \ argument 59 : "r" (I)); \ 64 #define ATOMIC_OPS(op,I ,asm_op, c_op) \ argument 65 ATOMIC_OP(op, I, asm_op) \ 66 ATOMIC_OP_RETURN(op, I , asm_op) \ 67 ATOMIC_FETCH_OP(op, I, asm_op) [all …]
|
| /linux/Documentation/virt/acrn/ |
| H A D | io-request.rst | 3 I/O request handling 6 An I/O request of a User VM, which is constructed by the hypervisor, is 7 distributed by the ACRN Hypervisor Service Module to an I/O client 8 corresponding to the address range of the I/O request. Details of I/O request 11 1. I/O request 14 For each User VM, there is a shared 4-KByte memory region used for I/O requests 15 communication between the hypervisor and Service VM. An I/O request is a 17 an I/O handler of the hypervisor when a trapped I/O access happens in a User 20 used as an array of 16 I/O request slots with each I/O request slot being 256 23 2. I/O clients [all …]
|
| /linux/arch/powerpc/perf/ |
| H A D | hv-gpci-requests.h | 51 #include I(REQUEST_BEGIN) 67 #include I(REQUEST_END) 72 #include I(REQUEST_BEGIN) 80 #include I(REQUEST_END) 90 #include I(REQUEST_BEGIN) 95 #include I(REQUEST_END) 101 #include I(REQUEST_BEGIN) 106 #include I(REQUEST_END) 112 #include I(REQUEST_BEGIN) 121 #include I(REQUEST_END) [all …]
|
| /linux/drivers/net/ethernet/qlogic/netxen/ |
| H A D | netxen_nic_hdr.h | 551 #define NETXEN_NIU_GB_MAC_CONFIG_0(I) \ argument 552 (NETXEN_CRB_NIU + 0x30000 + (I)*0x10000) 553 #define NETXEN_NIU_GB_MAC_CONFIG_1(I) \ argument 554 (NETXEN_CRB_NIU + 0x30004 + (I)*0x10000) 555 #define NETXEN_NIU_GB_MAC_IPG_IFG(I) \ argument 556 (NETXEN_CRB_NIU + 0x30008 + (I)*0x10000) 557 #define NETXEN_NIU_GB_HALF_DUPLEX_CTRL(I) \ argument 558 (NETXEN_CRB_NIU + 0x3000c + (I)*0x10000) 559 #define NETXEN_NIU_GB_MAX_FRAME_SIZE(I) \ argument 560 (NETXEN_CRB_NIU + 0x30010 + (I)*0x10000) [all …]
|
| /linux/arch/xtensa/variants/test_kc705_hifi/include/variant/ |
| H A D | tie-asm.h | 222 AE_S64.I aed0, \ptr, .Lxchal_ofs_+24 223 AE_S64.I aed1, \ptr, .Lxchal_ofs_+32 224 AE_S64.I aed2, \ptr, .Lxchal_ofs_+40 225 AE_S64.I aed3, \ptr, .Lxchal_ofs_+48 226 AE_S64.I aed4, \ptr, .Lxchal_ofs_+56 228 AE_S64.I aed5, \ptr, .Lxchal_ofs_+0 229 AE_S64.I aed6, \ptr, .Lxchal_ofs_+8 230 AE_S64.I aed7, \ptr, .Lxchal_ofs_+16 231 AE_S64.I aed8, \ptr, .Lxchal_ofs_+24 232 AE_S64.I aed9, \ptr, .Lxchal_ofs_+32 [all …]
|
| /linux/Documentation/translations/zh_CN/block/ |
| H A D | blk-mq.rst | 15 多队列块设备 I/O 排队机制 (blk-mq) 18 多队列块设备 I/O 排队机制提供了一组 API,使高速存储设备能够同时在多个队列中 19 处理并发的 I/O 请求并将其提交到块设备,从而实现极高的每秒输入/输出操作次数 28 磁盘从 Linux 内核开发初期就已成为事实上的标准。块 I/O 子系统的目标是尽可能 37 原来的设计只有一个队列来存储块设备 I/O 请求,并且只使用一个锁。由于缓存中的 39 (或同一进程在不同 CPU 上)同时执行块设备 I/O 时,该单队列模型还会出现严重 47 当用户空间执行对块设备的 I/O(例如读写文件)时,blk-mq 便会介入:它将存储和 48 管理发送到块设备的 I/O 请求,充当用户空间(文件系统,如果存在的话)与块设备驱 62 在这些请求未直接发送到驱动时,块设备 I/O 子系统会将请求添加到软件暂存队列中 72 此外,I/O 调度器还可以对请求进行重新排序以确保系统资源的公平性(例如防止某 [all …]
|
| H A D | data-integrity.rst | 25 通过在 I/O 中附加完整性元数据的方式,试图解决这一问题。完整性元数据(在 27 各扇区按正确顺序被写入盘。在某些保护方案中,还能保证 I/O 写入磁盘的正确位置。 30 常只在各自的独立域内工作,或最多仅在 I/O 路径的相邻节点之间发挥作用。DIF 及 31 其它数据完整性拓展有意思的点在于保护格式定义明确,I/O 路径上的每个节点都可以 32 验证 I/O 的完整性,如检测到损坏可直接拒绝。这不仅可以防止数据损坏,还能够隔 60 的分离。只有这两个不同的缓冲区匹配,I/O 才能完成。 69 Linux 中的数据完整性框架允许将保护信息固定到 I/O 上,并在支持该功能的控制器 82 某种程度上内核)而言,完整性元数据是附加在 I/O 上的不透明信息。 84 当前实现允许块层自动为任何 I/O 生成保护信息。最终目标是将用户数据的完整性元 85 数据计算移至用户空间。内核中产生的元数据和其他 I/O 仍将使用自动生成接口。 [all …]
|
| /linux/Documentation/block/ |
| H A D | stat.rst | 29 read I/Os requests number of read I/Os processed 30 read merges requests number of read I/Os merged with in-queue I/O 33 write I/Os requests number of write I/Os processed 34 write merges requests number of write I/Os merged with in-queue I/O 37 in_flight requests number of I/Os currently in flight 40 discard I/Os requests number of discard I/Os processed 41 discard merges requests number of discard I/Os merged with in-queue I/O 44 flush I/Os requests number of flush I/Os processed 48 read I/Os, write I/Os, discard I/0s 51 These values increment when an I/O request completes. [all …]
|
| /linux/arch/xtensa/variants/test_mmuhifi_c3/include/variant/ |
| H A D | tie-asm.h | 109 AE_SP24X2S.I aep0, \ptr, 16 110 AE_SP24X2S.I aep1, \ptr, 24 111 AE_SP24X2S.I aep2, \ptr, 32 112 AE_SP24X2S.I aep3, \ptr, 40 113 AE_SP24X2S.I aep4, \ptr, 48 114 AE_SP24X2S.I aep5, \ptr, 56 116 AE_SP24X2S.I aep6, \ptr, 0 117 AE_SP24X2S.I aep7, \ptr, 8 118 AE_SQ56S.I aeq0, \ptr, 16 119 AE_SQ56S.I aeq1, \ptr, 24 [all …]
|
| /linux/arch/riscv/include/asm/ |
| H A D | atomic.h | 53 #define ATOMIC_OP(op, asm_op, I, asm_type, c_type, prefix) \ argument 60 : "r" (I) \ 65 #define ATOMIC_OPS(op, asm_op, I) \ argument 66 ATOMIC_OP (op, asm_op, I, w, int, ) 68 #define ATOMIC_OPS(op, asm_op, I) \ argument 69 ATOMIC_OP (op, asm_op, I, w, int, ) \ 70 ATOMIC_OP (op, asm_op, I, d, s64, 64) 87 #define ATOMIC_FETCH_OP(op, asm_op, I, asm_type, c_type, prefix) \ in ATOMIC_OPS() argument 96 : "r" (I) \ in ATOMIC_OPS() 107 : "r" (I) \ [all …]
|
| /linux/Documentation/trace/coresight/ |
| H A D | panic.rst | 211 I etm4_enable_hw: ffff800008ae1dd4: 213 I etm4_enable_hw: ffff800008ae1dd8: 215 I etm4_enable_hw: ffff800008ae1ddc: 217 I etm4_enable_hw: ffff800008ae1de0: 219 I etm4_enable_hw: ffff800008ae1de4: 221 I etm4_enable_hw: ffff800008ae1de8: 223 I etm4_enable_hw: ffff800008ae1dec: 225 I etm4_enable_hw: ffff800008ae1df0: 227 I etm4_enable_hw: ffff800008ae1df4: 229 I etm4_enable_hw: ffff800008ae1df8: [all …]
|
| /linux/security/apparmor/include/ |
| H A D | label.h | 171 #define label_for_each(I, L, P) \ argument 172 for ((I).i = 0; ((P) = (L)->vec[(I).i]); ++((I).i)) 175 #define label_for_each_cont(I, L, P) \ argument 176 for (++((I).i); ((P) = (L)->vec[(I).i]); ++((I).i)) 181 #define label_for_each_confined(I, L, P) \ argument 182 for ((I).i = aa_label_next_confined((L), 0); \ 183 ((P) = (L)->vec[(I).i]); \ 184 (I).i = aa_label_next_confined((L), (I).i + 1)) 186 #define label_for_each_in_merge(I, A, B, P) \ argument 187 for ((I).i = (I).j = 0; \ [all …]
|
| /linux/include/linux/platform_data/ |
| H A D | adp8870.h | 85 #define ADP8870_BL_CUR_mA(I) ((I * 127) / 30) argument 90 #define ADP8870_L2_COMP_CURR_uA(I) ((I * 255) / 1106) argument 95 #define ADP8870_L3_COMP_CURR_uA(I) ((I * 255) / 551) argument 100 #define ADP8870_L4_COMP_CURR_uA(I) ((I * 255) / 275) argument 105 #define ADP8870_L5_COMP_CURR_uA(I) ((I * 255) / 138) argument
|
| /linux/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/ |
| H A D | com.fuc | 146 iowr I[$r1 + 0x300] $r2 150 iowr I[$r1] $r2 156 iowr I[$r1] $r2 166 iord $r1 I[$r0 + 0x200] 178 iowr I[$r0 + 0x100] $r1 192 iord $r4 I[$r4 + 0] 201 iowrs I[$r15] $r5 220 iowrs I[$r15] $r14 245 iord $r3 I[$r2] 253 iowr I[$r2] $r3 [all …]
|
| /linux/rust/pin-init/examples/ |
| H A D | static_init.rs | 26 pub struct StaticInit<T, I> { 28 init: Cell<Option<I>>, 33 unsafe impl<T: Sync, I> Sync for StaticInit<T, I> {} 34 unsafe impl<T: Send, I> Send for StaticInit<T, I> {} 36 impl<T, I: PinInit<T>> StaticInit<T, I> { 37 pub const fn new(init: I) -> Self { in new() 47 impl<T, I: PinInit<T>> ops::Deref for StaticInit<T, I> {
|
| /linux/Documentation/hwmon/ |
| H A D | it87.rst | 10 Addresses scanned: from Super I/O config space (8 I/O ports) 18 Addresses scanned: from Super I/O config space (8 I/O ports) 24 Addresses scanned: from Super I/O config space (8 I/O ports) 32 Addresses scanned: from Super I/O config space (8 I/O ports) 40 Addresses scanned: from Super I/O config space (8 I/O ports) 48 Addresses scanned: from Super I/O config space (8 I/O ports) 56 Addresses scanned: from Super I/O config space (8 I/O ports) 64 Addresses scanned: from Super I/O config space (8 I/O ports) 72 Addresses scanned: from Super I/O config space (8 I/O ports) 80 Addresses scanned: from Super I/O config space (8 I/O ports) [all …]
|
| /linux/rust/pin-init/src/ |
| H A D | lib.rs | 1095 pub struct ChainPinInit<I, F, T: ?Sized, E>(I, F, __internal::Invariant<(E, T)>); 1101 unsafe impl<T: ?Sized, E, I, F> PinInit<T, E> for ChainPinInit<I, F, T, E> 1103 I: PinInit<T, E>, 1203 pub struct ChainInit<I, F, T: ?Sized, E>(I, F, __internal::Invariant<(E, T)>); 1208 unsafe impl<T: ?Sized, E, I, F> Init<T, E> for ChainInit<I, F, T, E> 1210 I: Init<T, E>, 1224 unsafe impl<T: ?Sized, E, I, F> PinInit<T, E> for ChainInit<I, F, T, E> 1226 I: Init<T, E>, 1324 pub fn init_array_from_fn<I, const N: usize, T, E>( in init_array_from_fn() argument 1325 mut make_init: impl FnMut(usize) -> I, in init_array_from_fn() argument [all …]
|
| /linux/Documentation/admin-guide/device-mapper/ |
| H A D | dm-io.rst | 5 Dm-io provides synchronous and asynchronous I/O services. There are three 6 types of I/O services available, and each type has a sync and an async 10 of the I/O. Each io_region indicates a block-device along with the starting 22 The first I/O service type takes a list of memory pages as the data buffer for 23 the I/O, along with an offset into the first page:: 37 The second I/O service type takes an array of bio vectors as the data buffer 38 for the I/O. This service can be handy if the caller has a pre-assembled bio, 48 The third I/O service type takes a pointer to a vmalloc'd memory buffer as the 49 data buffer for the I/O. This service can be handy if the caller needs to do 50 I/O to a large region but doesn't want to allocate a large number of individual [all …]
|