1 /* 2 * Copyright(c) 2016 Intel Corporation. 3 * 4 * This file is provided under a dual BSD/GPLv2 license. When using or 5 * redistributing this file, you may do so under either license. 6 * 7 * GPL LICENSE SUMMARY 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * BSD LICENSE 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions 22 * are met: 23 * 24 * - Redistributions of source code must retain the above copyright 25 * notice, this list of conditions and the following disclaimer. 26 * - Redistributions in binary form must reproduce the above copyright 27 * notice, this list of conditions and the following disclaimer in 28 * the documentation and/or other materials provided with the 29 * distribution. 30 * - Neither the name of Intel Corporation nor the names of its 31 * contributors may be used to endorse or promote products derived 32 * from this software without specific prior written permission. 33 * 34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45 * 46 */ 47 #include <linux/list.h> 48 #include <linux/rculist.h> 49 #include <linux/mmu_notifier.h> 50 #include <linux/interval_tree_generic.h> 51 52 #include "mmu_rb.h" 53 #include "trace.h" 54 55 struct mmu_rb_handler { 56 struct list_head list; 57 struct mmu_notifier mn; 58 struct rb_root *root; 59 spinlock_t lock; /* protect the RB tree */ 60 struct mmu_rb_ops *ops; 61 }; 62 63 static LIST_HEAD(mmu_rb_handlers); 64 static DEFINE_SPINLOCK(mmu_rb_lock); /* protect mmu_rb_handlers list */ 65 66 static unsigned long mmu_node_start(struct mmu_rb_node *); 67 static unsigned long mmu_node_last(struct mmu_rb_node *); 68 static struct mmu_rb_handler *find_mmu_handler(struct rb_root *); 69 static inline void mmu_notifier_page(struct mmu_notifier *, struct mm_struct *, 70 unsigned long); 71 static inline void mmu_notifier_range_start(struct mmu_notifier *, 72 struct mm_struct *, 73 unsigned long, unsigned long); 74 static void mmu_notifier_mem_invalidate(struct mmu_notifier *, 75 struct mm_struct *, 76 unsigned long, unsigned long); 77 static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *, 78 unsigned long, unsigned long); 79 80 static struct mmu_notifier_ops mn_opts = { 81 .invalidate_page = mmu_notifier_page, 82 .invalidate_range_start = mmu_notifier_range_start, 83 }; 84 85 INTERVAL_TREE_DEFINE(struct mmu_rb_node, node, unsigned long, __last, 86 mmu_node_start, mmu_node_last, static, __mmu_int_rb); 87 88 static unsigned long mmu_node_start(struct mmu_rb_node *node) 89 { 90 return node->addr & PAGE_MASK; 91 } 92 93 static unsigned long mmu_node_last(struct mmu_rb_node *node) 94 { 95 return PAGE_ALIGN(node->addr + node->len) - 1; 96 } 97 98 int hfi1_mmu_rb_register(struct rb_root *root, struct mmu_rb_ops *ops) 99 { 100 struct mmu_rb_handler *handlr; 101 102 if (!ops->invalidate) 103 return -EINVAL; 104 105 handlr = kmalloc(sizeof(*handlr), GFP_KERNEL); 106 if (!handlr) 107 return -ENOMEM; 108 109 handlr->root = root; 110 handlr->ops = ops; 111 INIT_HLIST_NODE(&handlr->mn.hlist); 112 spin_lock_init(&handlr->lock); 113 handlr->mn.ops = &mn_opts; 114 spin_lock(&mmu_rb_lock); 115 list_add_tail_rcu(&handlr->list, &mmu_rb_handlers); 116 spin_unlock(&mmu_rb_lock); 117 118 return mmu_notifier_register(&handlr->mn, current->mm); 119 } 120 121 void hfi1_mmu_rb_unregister(struct rb_root *root) 122 { 123 struct mmu_rb_handler *handler = find_mmu_handler(root); 124 unsigned long flags; 125 126 if (!handler) 127 return; 128 129 /* Unregister first so we don't get any more notifications. */ 130 if (current->mm) 131 mmu_notifier_unregister(&handler->mn, current->mm); 132 133 spin_lock(&mmu_rb_lock); 134 list_del_rcu(&handler->list); 135 spin_unlock(&mmu_rb_lock); 136 synchronize_rcu(); 137 138 spin_lock_irqsave(&handler->lock, flags); 139 if (!RB_EMPTY_ROOT(root)) { 140 struct rb_node *node; 141 struct mmu_rb_node *rbnode; 142 143 while ((node = rb_first(root))) { 144 rbnode = rb_entry(node, struct mmu_rb_node, node); 145 rb_erase(node, root); 146 if (handler->ops->remove) 147 handler->ops->remove(root, rbnode, NULL); 148 } 149 } 150 spin_unlock_irqrestore(&handler->lock, flags); 151 152 kfree(handler); 153 } 154 155 int hfi1_mmu_rb_insert(struct rb_root *root, struct mmu_rb_node *mnode) 156 { 157 struct mmu_rb_handler *handler = find_mmu_handler(root); 158 struct mmu_rb_node *node; 159 unsigned long flags; 160 int ret = 0; 161 162 if (!handler) 163 return -EINVAL; 164 165 spin_lock_irqsave(&handler->lock, flags); 166 hfi1_cdbg(MMU, "Inserting node addr 0x%llx, len %u", mnode->addr, 167 mnode->len); 168 node = __mmu_rb_search(handler, mnode->addr, mnode->len); 169 if (node) { 170 ret = -EINVAL; 171 goto unlock; 172 } 173 __mmu_int_rb_insert(mnode, root); 174 175 if (handler->ops->insert) { 176 ret = handler->ops->insert(root, mnode); 177 if (ret) 178 __mmu_int_rb_remove(mnode, root); 179 } 180 unlock: 181 spin_unlock_irqrestore(&handler->lock, flags); 182 return ret; 183 } 184 185 /* Caller must hold handler lock */ 186 static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler, 187 unsigned long addr, 188 unsigned long len) 189 { 190 struct mmu_rb_node *node = NULL; 191 192 hfi1_cdbg(MMU, "Searching for addr 0x%llx, len %u", addr, len); 193 if (!handler->ops->filter) { 194 node = __mmu_int_rb_iter_first(handler->root, addr, 195 (addr + len) - 1); 196 } else { 197 for (node = __mmu_int_rb_iter_first(handler->root, addr, 198 (addr + len) - 1); 199 node; 200 node = __mmu_int_rb_iter_next(node, addr, 201 (addr + len) - 1)) { 202 if (handler->ops->filter(node, addr, len)) 203 return node; 204 } 205 } 206 return node; 207 } 208 209 /* Caller must *not* hold handler lock. */ 210 static void __mmu_rb_remove(struct mmu_rb_handler *handler, 211 struct mmu_rb_node *node, struct mm_struct *mm) 212 { 213 unsigned long flags; 214 215 /* Validity of handler and node pointers has been checked by caller. */ 216 hfi1_cdbg(MMU, "Removing node addr 0x%llx, len %u", node->addr, 217 node->len); 218 spin_lock_irqsave(&handler->lock, flags); 219 __mmu_int_rb_remove(node, handler->root); 220 spin_unlock_irqrestore(&handler->lock, flags); 221 222 if (handler->ops->remove) 223 handler->ops->remove(handler->root, node, mm); 224 } 225 226 struct mmu_rb_node *hfi1_mmu_rb_search(struct rb_root *root, unsigned long addr, 227 unsigned long len) 228 { 229 struct mmu_rb_handler *handler = find_mmu_handler(root); 230 struct mmu_rb_node *node; 231 unsigned long flags; 232 233 if (!handler) 234 return ERR_PTR(-EINVAL); 235 236 spin_lock_irqsave(&handler->lock, flags); 237 node = __mmu_rb_search(handler, addr, len); 238 spin_unlock_irqrestore(&handler->lock, flags); 239 240 return node; 241 } 242 243 struct mmu_rb_node *hfi1_mmu_rb_extract(struct rb_root *root, 244 unsigned long addr, unsigned long len) 245 { 246 struct mmu_rb_handler *handler = find_mmu_handler(root); 247 struct mmu_rb_node *node; 248 unsigned long flags; 249 250 if (!handler) 251 return ERR_PTR(-EINVAL); 252 253 spin_lock_irqsave(&handler->lock, flags); 254 node = __mmu_rb_search(handler, addr, len); 255 if (node) 256 __mmu_int_rb_remove(node, handler->root); 257 spin_unlock_irqrestore(&handler->lock, flags); 258 259 return node; 260 } 261 262 void hfi1_mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node) 263 { 264 struct mmu_rb_handler *handler = find_mmu_handler(root); 265 266 if (!handler || !node) 267 return; 268 269 __mmu_rb_remove(handler, node, NULL); 270 } 271 272 static struct mmu_rb_handler *find_mmu_handler(struct rb_root *root) 273 { 274 struct mmu_rb_handler *handler; 275 276 rcu_read_lock(); 277 list_for_each_entry_rcu(handler, &mmu_rb_handlers, list) { 278 if (handler->root == root) 279 goto unlock; 280 } 281 handler = NULL; 282 unlock: 283 rcu_read_unlock(); 284 return handler; 285 } 286 287 static inline void mmu_notifier_page(struct mmu_notifier *mn, 288 struct mm_struct *mm, unsigned long addr) 289 { 290 mmu_notifier_mem_invalidate(mn, mm, addr, addr + PAGE_SIZE); 291 } 292 293 static inline void mmu_notifier_range_start(struct mmu_notifier *mn, 294 struct mm_struct *mm, 295 unsigned long start, 296 unsigned long end) 297 { 298 mmu_notifier_mem_invalidate(mn, mm, start, end); 299 } 300 301 static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn, 302 struct mm_struct *mm, 303 unsigned long start, unsigned long end) 304 { 305 struct mmu_rb_handler *handler = 306 container_of(mn, struct mmu_rb_handler, mn); 307 struct rb_root *root = handler->root; 308 struct mmu_rb_node *node, *ptr = NULL; 309 unsigned long flags; 310 311 spin_lock_irqsave(&handler->lock, flags); 312 for (node = __mmu_int_rb_iter_first(root, start, end - 1); 313 node; node = ptr) { 314 /* Guard against node removal. */ 315 ptr = __mmu_int_rb_iter_next(node, start, end - 1); 316 hfi1_cdbg(MMU, "Invalidating node addr 0x%llx, len %u", 317 node->addr, node->len); 318 if (handler->ops->invalidate(root, node)) { 319 __mmu_int_rb_remove(node, root); 320 if (handler->ops->remove) 321 handler->ops->remove(root, node, mm); 322 } 323 } 324 spin_unlock_irqrestore(&handler->lock, flags); 325 } 326