xref: /linux/arch/powerpc/platforms/powernv/opal-async.c (revision 86cd6d98020924f65a6773784c66c5b842e3e320)
1 /*
2  * PowerNV OPAL asynchronous completion interfaces
3  *
4  * Copyright 2013-2017 IBM Corp.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #undef DEBUG
13 
14 #include <linux/kernel.h>
15 #include <linux/init.h>
16 #include <linux/slab.h>
17 #include <linux/sched.h>
18 #include <linux/semaphore.h>
19 #include <linux/spinlock.h>
20 #include <linux/wait.h>
21 #include <linux/gfp.h>
22 #include <linux/of.h>
23 #include <asm/machdep.h>
24 #include <asm/opal.h>
25 
26 enum opal_async_token_state {
27 	ASYNC_TOKEN_UNALLOCATED = 0,
28 	ASYNC_TOKEN_ALLOCATED,
29 	ASYNC_TOKEN_COMPLETED
30 };
31 
32 struct opal_async_token {
33 	enum opal_async_token_state state;
34 	struct opal_msg response;
35 };
36 
37 static DECLARE_WAIT_QUEUE_HEAD(opal_async_wait);
38 static DEFINE_SPINLOCK(opal_async_comp_lock);
39 static struct semaphore opal_async_sem;
40 static unsigned int opal_max_async_tokens;
41 static struct opal_async_token *opal_async_tokens;
42 
43 static int __opal_async_get_token(void)
44 {
45 	unsigned long flags;
46 	int i, token = -EBUSY;
47 
48 	spin_lock_irqsave(&opal_async_comp_lock, flags);
49 
50 	for (i = 0; i < opal_max_async_tokens; i++) {
51 		if (opal_async_tokens[i].state == ASYNC_TOKEN_UNALLOCATED) {
52 			opal_async_tokens[i].state = ASYNC_TOKEN_ALLOCATED;
53 			token = i;
54 			break;
55 		}
56 	}
57 
58 	spin_unlock_irqrestore(&opal_async_comp_lock, flags);
59 	return token;
60 }
61 
62 /*
63  * Note: If the returned token is used in an opal call and opal returns
64  * OPAL_ASYNC_COMPLETION you MUST call opal_async_wait_response() before
65  * calling another other opal_async_* function
66  */
67 int opal_async_get_token_interruptible(void)
68 {
69 	int token;
70 
71 	/* Wait until a token is available */
72 	if (down_interruptible(&opal_async_sem))
73 		return -ERESTARTSYS;
74 
75 	token = __opal_async_get_token();
76 	if (token < 0)
77 		up(&opal_async_sem);
78 
79 	return token;
80 }
81 EXPORT_SYMBOL_GPL(opal_async_get_token_interruptible);
82 
83 static int __opal_async_release_token(int token)
84 {
85 	unsigned long flags;
86 	int rc;
87 
88 	if (token < 0 || token >= opal_max_async_tokens) {
89 		pr_err("%s: Passed token is out of range, token %d\n",
90 				__func__, token);
91 		return -EINVAL;
92 	}
93 
94 	spin_lock_irqsave(&opal_async_comp_lock, flags);
95 	switch (opal_async_tokens[token].state) {
96 	case ASYNC_TOKEN_COMPLETED:
97 	case ASYNC_TOKEN_ALLOCATED:
98 		opal_async_tokens[token].state = ASYNC_TOKEN_UNALLOCATED;
99 		rc = 0;
100 		break;
101 	default:
102 		rc = 1;
103 	}
104 	spin_unlock_irqrestore(&opal_async_comp_lock, flags);
105 
106 	return rc;
107 }
108 
109 int opal_async_release_token(int token)
110 {
111 	int ret;
112 
113 	ret = __opal_async_release_token(token);
114 	if (!ret)
115 		up(&opal_async_sem);
116 
117 	return ret;
118 }
119 EXPORT_SYMBOL_GPL(opal_async_release_token);
120 
121 int opal_async_wait_response(uint64_t token, struct opal_msg *msg)
122 {
123 	if (token >= opal_max_async_tokens) {
124 		pr_err("%s: Invalid token passed\n", __func__);
125 		return -EINVAL;
126 	}
127 
128 	if (!msg) {
129 		pr_err("%s: Invalid message pointer passed\n", __func__);
130 		return -EINVAL;
131 	}
132 
133 	/* Wakeup the poller before we wait for events to speed things
134 	 * up on platforms or simulators where the interrupts aren't
135 	 * functional.
136 	 */
137 	opal_wake_poller();
138 	wait_event(opal_async_wait, opal_async_tokens[token].state
139 			== ASYNC_TOKEN_COMPLETED);
140 	memcpy(msg, &opal_async_tokens[token].response, sizeof(*msg));
141 
142 	return 0;
143 }
144 EXPORT_SYMBOL_GPL(opal_async_wait_response);
145 
146 /* Called from interrupt context */
147 static int opal_async_comp_event(struct notifier_block *nb,
148 		unsigned long msg_type, void *msg)
149 {
150 	struct opal_msg *comp_msg = msg;
151 	unsigned long flags;
152 	uint64_t token;
153 
154 	if (msg_type != OPAL_MSG_ASYNC_COMP)
155 		return 0;
156 
157 	token = be64_to_cpu(comp_msg->params[0]);
158 	memcpy(&opal_async_tokens[token].response, comp_msg, sizeof(*comp_msg));
159 	spin_lock_irqsave(&opal_async_comp_lock, flags);
160 	opal_async_tokens[token].state = ASYNC_TOKEN_COMPLETED;
161 	spin_unlock_irqrestore(&opal_async_comp_lock, flags);
162 
163 	wake_up(&opal_async_wait);
164 
165 	return 0;
166 }
167 
168 static struct notifier_block opal_async_comp_nb = {
169 		.notifier_call	= opal_async_comp_event,
170 		.next		= NULL,
171 		.priority	= 0,
172 };
173 
174 int __init opal_async_comp_init(void)
175 {
176 	struct device_node *opal_node;
177 	const __be32 *async;
178 	int err;
179 
180 	opal_node = of_find_node_by_path("/ibm,opal");
181 	if (!opal_node) {
182 		pr_err("%s: Opal node not found\n", __func__);
183 		err = -ENOENT;
184 		goto out;
185 	}
186 
187 	async = of_get_property(opal_node, "opal-msg-async-num", NULL);
188 	if (!async) {
189 		pr_err("%s: %pOF has no opal-msg-async-num\n",
190 				__func__, opal_node);
191 		err = -ENOENT;
192 		goto out_opal_node;
193 	}
194 
195 	opal_max_async_tokens = be32_to_cpup(async);
196 	opal_async_tokens = kcalloc(opal_max_async_tokens,
197 			sizeof(*opal_async_tokens), GFP_KERNEL);
198 	if (!opal_async_tokens) {
199 		err = -ENOMEM;
200 		goto out_opal_node;
201 	}
202 
203 	err = opal_message_notifier_register(OPAL_MSG_ASYNC_COMP,
204 			&opal_async_comp_nb);
205 	if (err) {
206 		pr_err("%s: Can't register OPAL event notifier (%d)\n",
207 				__func__, err);
208 		kfree(opal_async_tokens);
209 		goto out_opal_node;
210 	}
211 
212 	sema_init(&opal_async_sem, opal_max_async_tokens);
213 
214 out_opal_node:
215 	of_node_put(opal_node);
216 out:
217 	return err;
218 }
219