xref: /linux/lib/cache_maint.c (revision 11efc1cb7016e300047822fd60e0f4b4158bd56d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Generic support for Memory System Cache Maintenance operations.
4  *
5  * Coherency maintenance drivers register with this simple framework that will
6  * iterate over each registered instance to first kick off invalidation and
7  * then to wait until it is complete.
8  *
9  * If no implementations are registered yet cpu_cache_has_invalidate_memregion()
10  * will return false. If this runs concurrently with unregistration then a
11  * race exists but this is no worse than the case where the operations instance
12  * responsible for a given memory region has not yet registered.
13  */
14 #include <linux/cache_coherency.h>
15 #include <linux/cleanup.h>
16 #include <linux/container_of.h>
17 #include <linux/export.h>
18 #include <linux/kref.h>
19 #include <linux/list.h>
20 #include <linux/memregion.h>
21 #include <linux/module.h>
22 #include <linux/rwsem.h>
23 #include <linux/slab.h>
24 
25 static LIST_HEAD(cache_ops_instance_list);
26 static DECLARE_RWSEM(cache_ops_instance_list_lock);
27 
__cache_coherency_ops_instance_free(struct kref * kref)28 static void __cache_coherency_ops_instance_free(struct kref *kref)
29 {
30 	struct cache_coherency_ops_inst *cci =
31 		container_of(kref, struct cache_coherency_ops_inst, kref);
32 	kfree(cci);
33 }
34 
cache_coherency_ops_instance_put(struct cache_coherency_ops_inst * cci)35 void cache_coherency_ops_instance_put(struct cache_coherency_ops_inst *cci)
36 {
37 	kref_put(&cci->kref, __cache_coherency_ops_instance_free);
38 }
39 EXPORT_SYMBOL_GPL(cache_coherency_ops_instance_put);
40 
cache_inval_one(struct cache_coherency_ops_inst * cci,void * data)41 static int cache_inval_one(struct cache_coherency_ops_inst *cci, void *data)
42 {
43 	if (!cci->ops)
44 		return -EINVAL;
45 
46 	return cci->ops->wbinv(cci, data);
47 }
48 
cache_inval_done_one(struct cache_coherency_ops_inst * cci)49 static int cache_inval_done_one(struct cache_coherency_ops_inst *cci)
50 {
51 	if (!cci->ops)
52 		return -EINVAL;
53 
54 	if (!cci->ops->done)
55 		return 0;
56 
57 	return cci->ops->done(cci);
58 }
59 
cache_invalidate_memregion(phys_addr_t addr,size_t size)60 static int cache_invalidate_memregion(phys_addr_t addr, size_t size)
61 {
62 	int ret;
63 	struct cache_coherency_ops_inst *cci;
64 	struct cc_inval_params params = {
65 		.addr = addr,
66 		.size = size,
67 	};
68 
69 	guard(rwsem_read)(&cache_ops_instance_list_lock);
70 	list_for_each_entry(cci, &cache_ops_instance_list, node) {
71 		ret = cache_inval_one(cci, &params);
72 		if (ret)
73 			return ret;
74 	}
75 	list_for_each_entry(cci, &cache_ops_instance_list, node) {
76 		ret = cache_inval_done_one(cci);
77 		if (ret)
78 			return ret;
79 	}
80 
81 	return 0;
82 }
83 
84 struct cache_coherency_ops_inst *
_cache_coherency_ops_instance_alloc(const struct cache_coherency_ops * ops,size_t size)85 _cache_coherency_ops_instance_alloc(const struct cache_coherency_ops *ops,
86 				    size_t size)
87 {
88 	struct cache_coherency_ops_inst *cci;
89 
90 	if (!ops || !ops->wbinv)
91 		return NULL;
92 
93 	cci = kzalloc(size, GFP_KERNEL);
94 	if (!cci)
95 		return NULL;
96 
97 	cci->ops = ops;
98 	INIT_LIST_HEAD(&cci->node);
99 	kref_init(&cci->kref);
100 
101 	return cci;
102 }
103 EXPORT_SYMBOL_NS_GPL(_cache_coherency_ops_instance_alloc, "CACHE_COHERENCY");
104 
cache_coherency_ops_instance_register(struct cache_coherency_ops_inst * cci)105 int cache_coherency_ops_instance_register(struct cache_coherency_ops_inst *cci)
106 {
107 	guard(rwsem_write)(&cache_ops_instance_list_lock);
108 	list_add(&cci->node, &cache_ops_instance_list);
109 
110 	return 0;
111 }
112 EXPORT_SYMBOL_NS_GPL(cache_coherency_ops_instance_register, "CACHE_COHERENCY");
113 
cache_coherency_ops_instance_unregister(struct cache_coherency_ops_inst * cci)114 void cache_coherency_ops_instance_unregister(struct cache_coherency_ops_inst *cci)
115 {
116 	guard(rwsem_write)(&cache_ops_instance_list_lock);
117 	list_del(&cci->node);
118 }
119 EXPORT_SYMBOL_NS_GPL(cache_coherency_ops_instance_unregister, "CACHE_COHERENCY");
120 
cpu_cache_invalidate_memregion(phys_addr_t start,size_t len)121 int cpu_cache_invalidate_memregion(phys_addr_t start, size_t len)
122 {
123 	return cache_invalidate_memregion(start, len);
124 }
125 EXPORT_SYMBOL_NS_GPL(cpu_cache_invalidate_memregion, "DEVMEM");
126 
127 /*
128  * Used for optimization / debug purposes only as removal can race
129  *
130  * Machines that do not support invalidation, e.g. VMs, will not have any
131  * operations instance to register and so this will always return false.
132  */
cpu_cache_has_invalidate_memregion(void)133 bool cpu_cache_has_invalidate_memregion(void)
134 {
135 	guard(rwsem_read)(&cache_ops_instance_list_lock);
136 	return !list_empty(&cache_ops_instance_list);
137 }
138 EXPORT_SYMBOL_NS_GPL(cpu_cache_has_invalidate_memregion, "DEVMEM");
139