xref: /linux/drivers/misc/cxl/base.c (revision e58e871becec2d3b04ed91c0c16fe8deac9c9dfa)
1 /*
2  * Copyright 2014 IBM Corp.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version
7  * 2 of the License, or (at your option) any later version.
8  */
9 
10 #include <linux/module.h>
11 #include <linux/rcupdate.h>
12 #include <asm/errno.h>
13 #include <misc/cxl-base.h>
14 #include <linux/of_platform.h>
15 #include "cxl.h"
16 
17 /* protected by rcu */
18 static struct cxl_calls *cxl_calls;
19 
20 atomic_t cxl_use_count = ATOMIC_INIT(0);
21 EXPORT_SYMBOL(cxl_use_count);
22 
23 #ifdef CONFIG_CXL_MODULE
24 
25 static inline struct cxl_calls *cxl_calls_get(void)
26 {
27 	struct cxl_calls *calls = NULL;
28 
29 	rcu_read_lock();
30 	calls = rcu_dereference(cxl_calls);
31 	if (calls && !try_module_get(calls->owner))
32 		calls = NULL;
33 	rcu_read_unlock();
34 
35 	return calls;
36 }
37 
38 static inline void cxl_calls_put(struct cxl_calls *calls)
39 {
40 	BUG_ON(calls != cxl_calls);
41 
42 	/* we don't need to rcu this, as we hold a reference to the module */
43 	module_put(cxl_calls->owner);
44 }
45 
46 #else /* !defined CONFIG_CXL_MODULE */
47 
48 static inline struct cxl_calls *cxl_calls_get(void)
49 {
50 	return cxl_calls;
51 }
52 
53 static inline void cxl_calls_put(struct cxl_calls *calls) { }
54 
55 #endif /* CONFIG_CXL_MODULE */
56 
57 /* AFU refcount management */
58 struct cxl_afu *cxl_afu_get(struct cxl_afu *afu)
59 {
60 	return (get_device(&afu->dev) == NULL) ? NULL : afu;
61 }
62 EXPORT_SYMBOL_GPL(cxl_afu_get);
63 
64 void cxl_afu_put(struct cxl_afu *afu)
65 {
66 	put_device(&afu->dev);
67 }
68 EXPORT_SYMBOL_GPL(cxl_afu_put);
69 
70 void cxl_slbia(struct mm_struct *mm)
71 {
72 	struct cxl_calls *calls;
73 
74 	calls = cxl_calls_get();
75 	if (!calls)
76 		return;
77 
78 	if (cxl_ctx_in_use())
79 	    calls->cxl_slbia(mm);
80 
81 	cxl_calls_put(calls);
82 }
83 
84 int register_cxl_calls(struct cxl_calls *calls)
85 {
86 	if (cxl_calls)
87 		return -EBUSY;
88 
89 	rcu_assign_pointer(cxl_calls, calls);
90 	return 0;
91 }
92 EXPORT_SYMBOL_GPL(register_cxl_calls);
93 
94 void unregister_cxl_calls(struct cxl_calls *calls)
95 {
96 	BUG_ON(cxl_calls->owner != calls->owner);
97 	RCU_INIT_POINTER(cxl_calls, NULL);
98 	synchronize_rcu();
99 }
100 EXPORT_SYMBOL_GPL(unregister_cxl_calls);
101 
102 int cxl_update_properties(struct device_node *dn,
103 			  struct property *new_prop)
104 {
105 	return of_update_property(dn, new_prop);
106 }
107 EXPORT_SYMBOL_GPL(cxl_update_properties);
108 
109 /*
110  * API calls into the driver that may be called from the PHB code and must be
111  * built in.
112  */
113 bool cxl_pci_associate_default_context(struct pci_dev *dev, struct cxl_afu *afu)
114 {
115 	bool ret;
116 	struct cxl_calls *calls;
117 
118 	calls = cxl_calls_get();
119 	if (!calls)
120 		return false;
121 
122 	ret = calls->cxl_pci_associate_default_context(dev, afu);
123 
124 	cxl_calls_put(calls);
125 
126 	return ret;
127 }
128 EXPORT_SYMBOL_GPL(cxl_pci_associate_default_context);
129 
130 void cxl_pci_disable_device(struct pci_dev *dev)
131 {
132 	struct cxl_calls *calls;
133 
134 	calls = cxl_calls_get();
135 	if (!calls)
136 		return;
137 
138 	calls->cxl_pci_disable_device(dev);
139 
140 	cxl_calls_put(calls);
141 }
142 EXPORT_SYMBOL_GPL(cxl_pci_disable_device);
143 
144 int cxl_next_msi_hwirq(struct pci_dev *pdev, struct cxl_context **ctx, int *afu_irq)
145 {
146 	int ret;
147 	struct cxl_calls *calls;
148 
149 	calls = cxl_calls_get();
150 	if (!calls)
151 		return -EBUSY;
152 
153 	ret = calls->cxl_next_msi_hwirq(pdev, ctx, afu_irq);
154 
155 	cxl_calls_put(calls);
156 
157 	return ret;
158 }
159 EXPORT_SYMBOL_GPL(cxl_next_msi_hwirq);
160 
161 int cxl_cx4_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
162 {
163 	int ret;
164 	struct cxl_calls *calls;
165 
166 	calls = cxl_calls_get();
167 	if (!calls)
168 		return false;
169 
170 	ret = calls->cxl_cx4_setup_msi_irqs(pdev, nvec, type);
171 
172 	cxl_calls_put(calls);
173 
174 	return ret;
175 }
176 EXPORT_SYMBOL_GPL(cxl_cx4_setup_msi_irqs);
177 
178 void cxl_cx4_teardown_msi_irqs(struct pci_dev *pdev)
179 {
180 	struct cxl_calls *calls;
181 
182 	calls = cxl_calls_get();
183 	if (!calls)
184 		return;
185 
186 	calls->cxl_cx4_teardown_msi_irqs(pdev);
187 
188 	cxl_calls_put(calls);
189 }
190 EXPORT_SYMBOL_GPL(cxl_cx4_teardown_msi_irqs);
191 
192 static int __init cxl_base_init(void)
193 {
194 	struct device_node *np;
195 	struct platform_device *dev;
196 	int count = 0;
197 
198 	/*
199 	 * Scan for compatible devices in guest only
200 	 */
201 	if (cpu_has_feature(CPU_FTR_HVMODE))
202 		return 0;
203 
204 	for_each_compatible_node(np, NULL, "ibm,coherent-platform-facility") {
205 		dev = of_platform_device_create(np, NULL, NULL);
206 		if (dev)
207 			count++;
208 	}
209 	pr_devel("Found %d cxl device(s)\n", count);
210 	return 0;
211 }
212 device_initcall(cxl_base_init);
213