xref: /linux/arch/powerpc/platforms/powernv/opal-async.c (revision e9331ee9b164d58b4dd0abc882ba7e23d2f404b3)
1 /*
2  * PowerNV OPAL asynchronous completion interfaces
3  *
4  * Copyright 2013 IBM Corp.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #undef DEBUG
13 
14 #include <linux/kernel.h>
15 #include <linux/init.h>
16 #include <linux/slab.h>
17 #include <linux/sched.h>
18 #include <linux/semaphore.h>
19 #include <linux/spinlock.h>
20 #include <linux/wait.h>
21 #include <linux/gfp.h>
22 #include <linux/of.h>
23 #include <asm/machdep.h>
24 #include <asm/opal.h>
25 
26 #define N_ASYNC_COMPLETIONS	64
27 
28 static DECLARE_BITMAP(opal_async_complete_map, N_ASYNC_COMPLETIONS) = {~0UL};
29 static DECLARE_BITMAP(opal_async_token_map, N_ASYNC_COMPLETIONS);
30 static DECLARE_WAIT_QUEUE_HEAD(opal_async_wait);
31 static DEFINE_SPINLOCK(opal_async_comp_lock);
32 static struct semaphore opal_async_sem;
33 static struct opal_msg *opal_async_responses;
34 static unsigned int opal_max_async_tokens;
35 
36 int __opal_async_get_token(void)
37 {
38 	unsigned long flags;
39 	int token;
40 
41 	spin_lock_irqsave(&opal_async_comp_lock, flags);
42 	token = find_first_bit(opal_async_complete_map, opal_max_async_tokens);
43 	if (token >= opal_max_async_tokens) {
44 		token = -EBUSY;
45 		goto out;
46 	}
47 
48 	if (__test_and_set_bit(token, opal_async_token_map)) {
49 		token = -EBUSY;
50 		goto out;
51 	}
52 
53 	__clear_bit(token, opal_async_complete_map);
54 
55 out:
56 	spin_unlock_irqrestore(&opal_async_comp_lock, flags);
57 	return token;
58 }
59 
60 int opal_async_get_token_interruptible(void)
61 {
62 	int token;
63 
64 	/* Wait until a token is available */
65 	if (down_interruptible(&opal_async_sem))
66 		return -ERESTARTSYS;
67 
68 	token = __opal_async_get_token();
69 	if (token < 0)
70 		up(&opal_async_sem);
71 
72 	return token;
73 }
74 EXPORT_SYMBOL_GPL(opal_async_get_token_interruptible);
75 
76 int __opal_async_release_token(int token)
77 {
78 	unsigned long flags;
79 
80 	if (token < 0 || token >= opal_max_async_tokens) {
81 		pr_err("%s: Passed token is out of range, token %d\n",
82 				__func__, token);
83 		return -EINVAL;
84 	}
85 
86 	spin_lock_irqsave(&opal_async_comp_lock, flags);
87 	__set_bit(token, opal_async_complete_map);
88 	__clear_bit(token, opal_async_token_map);
89 	spin_unlock_irqrestore(&opal_async_comp_lock, flags);
90 
91 	return 0;
92 }
93 
94 int opal_async_release_token(int token)
95 {
96 	int ret;
97 
98 	ret = __opal_async_release_token(token);
99 	if (ret)
100 		return ret;
101 
102 	up(&opal_async_sem);
103 
104 	return 0;
105 }
106 EXPORT_SYMBOL_GPL(opal_async_release_token);
107 
108 int opal_async_wait_response(uint64_t token, struct opal_msg *msg)
109 {
110 	if (token >= opal_max_async_tokens) {
111 		pr_err("%s: Invalid token passed\n", __func__);
112 		return -EINVAL;
113 	}
114 
115 	if (!msg) {
116 		pr_err("%s: Invalid message pointer passed\n", __func__);
117 		return -EINVAL;
118 	}
119 
120 	/* Wakeup the poller before we wait for events to speed things
121 	 * up on platforms or simulators where the interrupts aren't
122 	 * functional.
123 	 */
124 	opal_wake_poller();
125 	wait_event(opal_async_wait, test_bit(token, opal_async_complete_map));
126 	memcpy(msg, &opal_async_responses[token], sizeof(*msg));
127 
128 	return 0;
129 }
130 EXPORT_SYMBOL_GPL(opal_async_wait_response);
131 
132 static int opal_async_comp_event(struct notifier_block *nb,
133 		unsigned long msg_type, void *msg)
134 {
135 	struct opal_msg *comp_msg = msg;
136 	unsigned long flags;
137 	uint64_t token;
138 
139 	if (msg_type != OPAL_MSG_ASYNC_COMP)
140 		return 0;
141 
142 	token = be64_to_cpu(comp_msg->params[0]);
143 	memcpy(&opal_async_responses[token], comp_msg, sizeof(*comp_msg));
144 	spin_lock_irqsave(&opal_async_comp_lock, flags);
145 	__set_bit(token, opal_async_complete_map);
146 	spin_unlock_irqrestore(&opal_async_comp_lock, flags);
147 
148 	wake_up(&opal_async_wait);
149 
150 	return 0;
151 }
152 
153 static struct notifier_block opal_async_comp_nb = {
154 		.notifier_call	= opal_async_comp_event,
155 		.next		= NULL,
156 		.priority	= 0,
157 };
158 
159 int __init opal_async_comp_init(void)
160 {
161 	struct device_node *opal_node;
162 	const __be32 *async;
163 	int err;
164 
165 	opal_node = of_find_node_by_path("/ibm,opal");
166 	if (!opal_node) {
167 		pr_err("%s: Opal node not found\n", __func__);
168 		err = -ENOENT;
169 		goto out;
170 	}
171 
172 	async = of_get_property(opal_node, "opal-msg-async-num", NULL);
173 	if (!async) {
174 		pr_err("%s: %pOF has no opal-msg-async-num\n",
175 				__func__, opal_node);
176 		err = -ENOENT;
177 		goto out_opal_node;
178 	}
179 
180 	opal_max_async_tokens = be32_to_cpup(async);
181 	if (opal_max_async_tokens > N_ASYNC_COMPLETIONS)
182 		opal_max_async_tokens = N_ASYNC_COMPLETIONS;
183 
184 	err = opal_message_notifier_register(OPAL_MSG_ASYNC_COMP,
185 			&opal_async_comp_nb);
186 	if (err) {
187 		pr_err("%s: Can't register OPAL event notifier (%d)\n",
188 				__func__, err);
189 		goto out_opal_node;
190 	}
191 
192 	opal_async_responses = kzalloc(
193 			sizeof(*opal_async_responses) * opal_max_async_tokens,
194 			GFP_KERNEL);
195 	if (!opal_async_responses) {
196 		pr_err("%s: Out of memory, failed to do asynchronous "
197 				"completion init\n", __func__);
198 		err = -ENOMEM;
199 		goto out_opal_node;
200 	}
201 
202 	/* Initialize to 1 less than the maximum tokens available, as we may
203 	 * require to pop one during emergency through synchronous call to
204 	 * __opal_async_get_token()
205 	 */
206 	sema_init(&opal_async_sem, opal_max_async_tokens - 1);
207 
208 out_opal_node:
209 	of_node_put(opal_node);
210 out:
211 	return err;
212 }
213