Lines Matching full:memory

47      - CPU attached address space (the CPU memory could be a range of things:
58 same memory location.
65 /* Ensure that the device's view of memory matches the CPU's view of memory.
76 writes could be to any CPU mapped memory object with any cachability mode.
79 only fenced normal stores to normal memory. libibverbs users using other
80 memory types or non-temporal stores are required to use SFENCE in their own
84 #define udma_to_device_barrier() asm volatile("" ::: "memory")
86 #define udma_to_device_barrier() asm volatile("" ::: "memory")
88 #define udma_to_device_barrier() asm volatile("sync" ::: "memory")
90 #define udma_to_device_barrier() asm volatile("sync" ::: "memory")
92 #define udma_to_device_barrier() asm volatile("mf" ::: "memory")
94 #define udma_to_device_barrier() asm volatile("membar #StoreStore" ::: "memory")
96 #define udma_to_device_barrier() asm volatile("dsb st" ::: "memory");
98 #define udma_to_device_barrier() asm volatile("" ::: "memory")
112 #error No architecture specific memory barrier defines found!
117 from the device - eg by reading a MMIO register or seeing that CPU memory is
123 For instance, this would be used after testing a valid bit in a memory
128 #define udma_from_device_barrier() asm volatile("lock; addl $0,0(%%esp) " ::: "memory")
130 #define udma_from_device_barrier() asm volatile("lfence" ::: "memory")
132 #define udma_from_device_barrier() asm volatile("lwsync" ::: "memory")
134 #define udma_from_device_barrier() asm volatile("sync" ::: "memory")
136 #define udma_from_device_barrier() asm volatile("mf" ::: "memory")
138 #define udma_from_device_barrier() asm volatile("membar #LoadLoad" ::: "memory")
140 #define udma_from_device_barrier() asm volatile("dsb ld" ::: "memory");
142 #define udma_from_device_barrier() asm volatile("" ::: "memory")
150 #error No architecture specific memory barrier defines found!
153 /* Order writes to CPU memory so that a DMA device cannot view writes after
163 anything but normal stores to normal malloc memory. Usage should be:
166 udma_to_device_barrier(); // Get user memory ready for DMA
174 /* Promptly flush writes to MMIO Write Cominbing memory.
175 This should be used after a write to WC memory. This is both a barrier
179 This is not required to have any effect on CPU memory.
191 Note that there is no order guarantee for writes to WC memory without
194 This is intended to be used in conjunction with WC memory to generate large
198 #define mmio_flush_writes() asm volatile("lock; addl $0,0(%%esp) " ::: "memory")
200 #define mmio_flush_writes() asm volatile("sfence" ::: "memory")
202 #define mmio_flush_writes() asm volatile("sync" ::: "memory")
204 #define mmio_flush_writes() asm volatile("sync" ::: "memory")
206 #define mmio_flush_writes() asm volatile("fwb" ::: "memory")
208 #define mmio_flush_writes() asm volatile("membar #StoreStore" ::: "memory")
210 #define mmio_flush_writes() asm volatile("dsb st" ::: "memory");
212 #define mmio_flush_writes() asm volatile("" ::: "memory")
220 #error No architecture specific memory barrier defines found!
224 writes. This should be used before a write to WC memory.
227 memory types:
235 This is intended to be used in conjunction with WC memory to generate large
243 providers haphazardly open code writes to MMIO memory omitting even
271 * strongly order WC and other memory types. */ in mmio_wc_spinlock()