atomic.h (dd5b64258f6d85a771ac71e215d8490bf80a2044) atomic.h (48cae112b516ce625d38f22fdc07a29d509de845)
1/*-
2 * Copyright (c) 1998 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 23 unchanged lines hidden (view full) ---

32#error this file needs sys/cdefs.h as a prerequisite
33#endif
34
35#ifdef _KERNEL
36#include <machine/md_var.h>
37#include <machine/specialreg.h>
38#endif
39
1/*-
2 * Copyright (c) 1998 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 23 unchanged lines hidden (view full) ---

32#error this file needs sys/cdefs.h as a prerequisite
33#endif
34
35#ifdef _KERNEL
36#include <machine/md_var.h>
37#include <machine/specialreg.h>
38#endif
39
40#define mb() __asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc")
41#define wmb() __asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc")
42#define rmb() __asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc")
40#ifndef __OFFSETOF_MONITORBUF
41/*
42 * __OFFSETOF_MONITORBUF == __pcpu_offset(pc_monitorbuf).
43 *
44 * The open-coded number is used instead of the symbolic expression to
45 * avoid a dependency on sys/pcpu.h in machine/atomic.h consumers.
46 * An assertion in i386/vm_machdep.c ensures that the value is correct.
47 */
48#define __OFFSETOF_MONITORBUF 0x180
43
49
50static __inline void
51__mbk(void)
52{
53
54 __asm __volatile("lock; addl $0,%%fs:%0"
55 : "+m" (*(u_int *)__OFFSETOF_MONITORBUF) : : "memory", "cc");
56}
57
58static __inline void
59__mbu(void)
60{
61
62 __asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc");
63}
64#endif
65
44/*
45 * Various simple operations on memory, each of which is atomic in the
46 * presence of interrupts and multiple processors.
47 *
48 * atomic_set_char(P, V) (*(u_char *)(P) |= (V))
49 * atomic_clear_char(P, V) (*(u_char *)(P) &= ~(V))
50 * atomic_add_char(P, V) (*(u_char *)(P) += (V))
51 * atomic_subtract_char(P, V) (*(u_char *)(P) -= (V))

--- 189 unchanged lines hidden (view full) ---

241 * introducing false data dependencies. In userspace, a word at the
242 * top of the stack is utilized.
243 *
244 * For UP kernels, however, the memory of the single processor is
245 * always consistent, so we only need to stop the compiler from
246 * reordering accesses in a way that violates the semantics of acquire
247 * and release.
248 */
66/*
67 * Various simple operations on memory, each of which is atomic in the
68 * presence of interrupts and multiple processors.
69 *
70 * atomic_set_char(P, V) (*(u_char *)(P) |= (V))
71 * atomic_clear_char(P, V) (*(u_char *)(P) &= ~(V))
72 * atomic_add_char(P, V) (*(u_char *)(P) += (V))
73 * atomic_subtract_char(P, V) (*(u_char *)(P) -= (V))

--- 189 unchanged lines hidden (view full) ---

263 * introducing false data dependencies. In userspace, a word at the
264 * top of the stack is utilized.
265 *
266 * For UP kernels, however, the memory of the single processor is
267 * always consistent, so we only need to stop the compiler from
268 * reordering accesses in a way that violates the semantics of acquire
269 * and release.
270 */
249#if defined(_KERNEL)
250
271
251/*
252 * OFFSETOF_MONITORBUF == __pcpu_offset(pc_monitorbuf).
253 *
254 * The open-coded number is used instead of the symbolic expression to
255 * avoid a dependency on sys/pcpu.h in machine/atomic.h consumers.
256 * An assertion in i386/vm_machdep.c ensures that the value is correct.
257 */
258#define OFFSETOF_MONITORBUF 0x180
259
272#if defined(_KERNEL)
260#if defined(SMP)
273#if defined(SMP)
261static __inline void
262__storeload_barrier(void)
263{
264
265 __asm __volatile("lock; addl $0,%%fs:%0"
266 : "+m" (*(u_int *)OFFSETOF_MONITORBUF) : : "memory", "cc");
267}
274#define __storeload_barrier() __mbk()
268#else /* _KERNEL && UP */
275#else /* _KERNEL && UP */
269static __inline void
270__storeload_barrier(void)
271{
272
273 __compiler_membar();
274}
276#define __storeload_barrier() __compiler_membar()
275#endif /* SMP */
276#else /* !_KERNEL */
277#endif /* SMP */
278#else /* !_KERNEL */
277static __inline void
278__storeload_barrier(void)
279{
280
281 __asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc");
282}
279#define __storeload_barrier() __mbu()
283#endif /* _KERNEL*/
284
285#define ATOMIC_LOAD(TYPE) \
286static __inline u_##TYPE \
287atomic_load_acq_##TYPE(volatile u_##TYPE *p) \
288{ \
289 u_##TYPE res; \
290 \

--- 480 unchanged lines hidden (view full) ---

771 (u_int)(new))
772#define atomic_swap_ptr(p, v) \
773 atomic_swap_int((volatile u_int *)(p), (u_int)(v))
774#define atomic_readandclear_ptr(p) \
775 atomic_readandclear_int((volatile u_int *)(p))
776
777#endif /* !WANT_FUNCTIONS */
778
280#endif /* _KERNEL*/
281
282#define ATOMIC_LOAD(TYPE) \
283static __inline u_##TYPE \
284atomic_load_acq_##TYPE(volatile u_##TYPE *p) \
285{ \
286 u_##TYPE res; \
287 \

--- 480 unchanged lines hidden (view full) ---

768 (u_int)(new))
769#define atomic_swap_ptr(p, v) \
770 atomic_swap_int((volatile u_int *)(p), (u_int)(v))
771#define atomic_readandclear_ptr(p) \
772 atomic_readandclear_int((volatile u_int *)(p))
773
774#endif /* !WANT_FUNCTIONS */
775
776#if defined(_KERNEL)
777#define mb() __mbk()
778#define wmb() __mbk()
779#define rmb() __mbk()
780#else
781#define mb() __mbu()
782#define wmb() __mbu()
783#define rmb() __mbu()
784#endif
785
779#endif /* !_MACHINE_ATOMIC_H_ */
786#endif /* !_MACHINE_ATOMIC_H_ */