xref: /linux/drivers/infiniband/hw/hfi1/mmu_rb.h (revision be239684b18e1cdcafcf8c7face4a2f562c745ad)
1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 /*
3  * Copyright(c) 2020 Cornelis Networks, Inc.
4  * Copyright(c) 2016 Intel Corporation.
5  */
6 
7 #ifndef _HFI1_MMU_RB_H
8 #define _HFI1_MMU_RB_H
9 
10 #include "hfi.h"
11 
12 struct mmu_rb_node {
13 	unsigned long addr;
14 	unsigned long len;
15 	unsigned long __last;
16 	struct rb_node node;
17 	struct mmu_rb_handler *handler;
18 	struct list_head list;
19 	struct kref refcount;
20 };
21 
22 /* filter and evict must not sleep. Only remove is allowed to sleep. */
23 struct mmu_rb_ops {
24 	bool (*filter)(struct mmu_rb_node *node, unsigned long addr,
25 		       unsigned long len);
26 	void (*remove)(void *ops_arg, struct mmu_rb_node *mnode);
27 	int (*evict)(void *ops_arg, struct mmu_rb_node *mnode,
28 		     void *evict_arg, bool *stop);
29 };
30 
31 struct mmu_rb_handler {
32 	/*
33 	 * struct mmu_notifier is 56 bytes, and spinlock_t is 4 bytes, so
34 	 * they fit together in one cache line.  mn is relatively rarely
35 	 * accessed, so co-locating the spinlock with it achieves much of
36 	 * the cacheline contention reduction of giving the spinlock its own
37 	 * cacheline without the overhead of doing so.
38 	 */
39 	struct mmu_notifier mn;
40 	spinlock_t lock;        /* protect the RB tree */
41 
42 	/* Begin on a new cachline boundary here */
43 	struct rb_root_cached root ____cacheline_aligned_in_smp;
44 	void *ops_arg;
45 	struct mmu_rb_ops *ops;
46 	struct list_head lru_list;
47 	struct work_struct del_work;
48 	struct list_head del_list;
49 	struct workqueue_struct *wq;
50 	void *free_ptr;
51 };
52 
53 int hfi1_mmu_rb_register(void *ops_arg,
54 			 struct mmu_rb_ops *ops,
55 			 struct workqueue_struct *wq,
56 			 struct mmu_rb_handler **handler);
57 void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler);
58 int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
59 		       struct mmu_rb_node *mnode);
60 void hfi1_mmu_rb_release(struct kref *refcount);
61 
62 void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg);
63 struct mmu_rb_node *hfi1_mmu_rb_get_first(struct mmu_rb_handler *handler,
64 					  unsigned long addr,
65 					  unsigned long len);
66 
67 #endif /* _HFI1_MMU_RB_H */
68