1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2019, 2020 Jeffrey Roberson <jeff@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice unmodified, this list of conditions, and the following 11 * disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #ifndef _SYS_SMR_TYPES_H_ 29 #define _SYS_SMR_TYPES_H_ 30 31 #include <sys/_smr.h> 32 33 /* 34 * SMR Accessors are meant to provide safe access to SMR protected 35 * pointers and prevent misuse and accidental access. 36 * 37 * Accessors are grouped by type: 38 * entered - Use while in a read section (between smr_enter/smr_exit()) 39 * serialized - Use while holding a lock that serializes writers. Updates 40 * are synchronized with readers via included barriers. 41 * unserialized - Use after the memory is out of scope and not visible to 42 * readers. 43 * 44 * All acceses include a parameter for an assert to verify the required 45 * synchronization. For example, a writer might use: 46 * 47 * smr_serialized_store(pointer, value, mtx_owned(&writelock)); 48 * 49 * These are only enabled in INVARIANTS kernels. 50 */ 51 52 /* Type restricting pointer access to force smr accessors. */ 53 #define SMR_POINTER(type) \ 54 struct { \ 55 type __ptr; /* Do not access directly */ \ 56 } 57 58 /* 59 * Read from an SMR protected pointer while in a read section. 60 */ 61 #define smr_entered_load(p, smr) ({ \ 62 SMR_ASSERT(SMR_ENTERED((smr)), "smr_entered_load"); \ 63 (__typeof((p)->__ptr))atomic_load_acq_ptr( \ 64 (const uintptr_t *)&(p)->__ptr); \ 65 }) 66 67 /* 68 * Read from an SMR protected pointer while serialized by an 69 * external mechanism. 'ex' should contain an assert that the 70 * external mechanism is held. i.e. mtx_owned() 71 */ 72 #define smr_serialized_load(p, ex) ({ \ 73 SMR_ASSERT(ex, "smr_serialized_load"); \ 74 (__typeof((p)->__ptr))atomic_load_ptr( \ 75 (const uintptr_t *)&(p)->__ptr); \ 76 }) 77 78 /* 79 * Store 'v' to an SMR protected pointer while serialized by an 80 * external mechanism. 'ex' should contain an assert that the 81 * external mechanism is held. i.e. mtx_owned() 82 * 83 * Writers that are serialized with mutual exclusion or on a single 84 * thread should use smr_serialized_store() rather than swap. 85 */ 86 #define smr_serialized_store(p, v, ex) do { \ 87 SMR_ASSERT(ex, "smr_serialized_store"); \ 88 __typeof((p)->__ptr) _v = (v); \ 89 atomic_store_rel_ptr((uintptr_t *)&(p)->__ptr, (uintptr_t)_v); \ 90 } while (0) 91 92 /* 93 * swap 'v' with an SMR protected pointer and return the old value 94 * while serialized by an external mechanism. 'ex' should contain 95 * an assert that the external mechanism is provided. i.e. mtx_owned() 96 * 97 * Swap permits multiple writers to update a pointer concurrently. 98 */ 99 #define smr_serialized_swap(p, v, ex) ({ \ 100 SMR_ASSERT(ex, "smr_serialized_swap"); \ 101 __typeof((p)->__ptr) _v = (v); \ 102 /* Release barrier guarantees contents are visible to reader */ \ 103 atomic_thread_fence_rel(); \ 104 (__typeof((p)->__ptr))atomic_swap_ptr( \ 105 (uintptr_t *)&(p)->__ptr, (uintptr_t)_v); \ 106 }) 107 108 /* 109 * Read from an SMR protected pointer when no serialization is required 110 * such as in the destructor callback or when the caller guarantees other 111 * synchronization. 112 */ 113 #define smr_unserialized_load(p, ex) ({ \ 114 SMR_ASSERT(ex, "smr_unserialized_load"); \ 115 (__typeof((p)->__ptr))atomic_load_ptr(&(p)->__ptr); \ 116 }) 117 118 /* 119 * Store to an SMR protected pointer when no serialiation is required 120 * such as in the destructor callback or when the caller guarantees other 121 * synchronization. 122 */ 123 #define smr_unserialized_store(p, v, ex) do { \ 124 SMR_ASSERT(ex, "smr_unserialized_store"); \ 125 __typeof((p)->__ptr) _v = (v); \ 126 atomic_store_ptr((uintptr_t *)&(p)->__ptr, (uintptr_t)_v); \ 127 } while (0) 128 129 #ifndef _KERNEL 130 131 /* 132 * Load an SMR protected pointer when accessing kernel data structures through 133 * libkvm. 134 */ 135 #define smr_kvm_load(p) ((p)->__ptr) 136 137 #endif /* !_KERNEL */ 138 #endif /* !_SYS_SMR_TYPES_H_ */ 139