xref: /linux/sound/xen/xen_snd_front_evtchnl.c (revision 6fdcba32711044c35c0e1b094cbd8f3f0b4472c9)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 
3 /*
4  * Xen para-virtual sound device
5  *
6  * Copyright (C) 2016-2018 EPAM Systems Inc.
7  *
8  * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
9  */
10 
11 #include <xen/events.h>
12 #include <xen/grant_table.h>
13 #include <xen/xen.h>
14 #include <xen/xenbus.h>
15 
16 #include "xen_snd_front.h"
17 #include "xen_snd_front_alsa.h"
18 #include "xen_snd_front_cfg.h"
19 #include "xen_snd_front_evtchnl.h"
20 
21 static irqreturn_t evtchnl_interrupt_req(int irq, void *dev_id)
22 {
23 	struct xen_snd_front_evtchnl *channel = dev_id;
24 	struct xen_snd_front_info *front_info = channel->front_info;
25 	struct xensnd_resp *resp;
26 	RING_IDX i, rp;
27 
28 	if (unlikely(channel->state != EVTCHNL_STATE_CONNECTED))
29 		return IRQ_HANDLED;
30 
31 	mutex_lock(&channel->ring_io_lock);
32 
33 again:
34 	rp = channel->u.req.ring.sring->rsp_prod;
35 	/* Ensure we see queued responses up to rp. */
36 	rmb();
37 
38 	/*
39 	 * Assume that the backend is trusted to always write sane values
40 	 * to the ring counters, so no overflow checks on frontend side
41 	 * are required.
42 	 */
43 	for (i = channel->u.req.ring.rsp_cons; i != rp; i++) {
44 		resp = RING_GET_RESPONSE(&channel->u.req.ring, i);
45 		if (resp->id != channel->evt_id)
46 			continue;
47 		switch (resp->operation) {
48 		case XENSND_OP_OPEN:
49 			/* fall through */
50 		case XENSND_OP_CLOSE:
51 			/* fall through */
52 		case XENSND_OP_READ:
53 			/* fall through */
54 		case XENSND_OP_WRITE:
55 			/* fall through */
56 		case XENSND_OP_TRIGGER:
57 			channel->u.req.resp_status = resp->status;
58 			complete(&channel->u.req.completion);
59 			break;
60 		case XENSND_OP_HW_PARAM_QUERY:
61 			channel->u.req.resp_status = resp->status;
62 			channel->u.req.resp.hw_param =
63 					resp->resp.hw_param;
64 			complete(&channel->u.req.completion);
65 			break;
66 
67 		default:
68 			dev_err(&front_info->xb_dev->dev,
69 				"Operation %d is not supported\n",
70 				resp->operation);
71 			break;
72 		}
73 	}
74 
75 	channel->u.req.ring.rsp_cons = i;
76 	if (i != channel->u.req.ring.req_prod_pvt) {
77 		int more_to_do;
78 
79 		RING_FINAL_CHECK_FOR_RESPONSES(&channel->u.req.ring,
80 					       more_to_do);
81 		if (more_to_do)
82 			goto again;
83 	} else {
84 		channel->u.req.ring.sring->rsp_event = i + 1;
85 	}
86 
87 	mutex_unlock(&channel->ring_io_lock);
88 	return IRQ_HANDLED;
89 }
90 
91 static irqreturn_t evtchnl_interrupt_evt(int irq, void *dev_id)
92 {
93 	struct xen_snd_front_evtchnl *channel = dev_id;
94 	struct xensnd_event_page *page = channel->u.evt.page;
95 	u32 cons, prod;
96 
97 	if (unlikely(channel->state != EVTCHNL_STATE_CONNECTED))
98 		return IRQ_HANDLED;
99 
100 	mutex_lock(&channel->ring_io_lock);
101 
102 	prod = page->in_prod;
103 	/* Ensure we see ring contents up to prod. */
104 	virt_rmb();
105 	if (prod == page->in_cons)
106 		goto out;
107 
108 	/*
109 	 * Assume that the backend is trusted to always write sane values
110 	 * to the ring counters, so no overflow checks on frontend side
111 	 * are required.
112 	 */
113 	for (cons = page->in_cons; cons != prod; cons++) {
114 		struct xensnd_evt *event;
115 
116 		event = &XENSND_IN_RING_REF(page, cons);
117 		if (unlikely(event->id != channel->evt_id++))
118 			continue;
119 
120 		switch (event->type) {
121 		case XENSND_EVT_CUR_POS:
122 			xen_snd_front_alsa_handle_cur_pos(channel,
123 							  event->op.cur_pos.position);
124 			break;
125 		}
126 	}
127 
128 	page->in_cons = cons;
129 	/* Ensure ring contents. */
130 	virt_wmb();
131 
132 out:
133 	mutex_unlock(&channel->ring_io_lock);
134 	return IRQ_HANDLED;
135 }
136 
137 void xen_snd_front_evtchnl_flush(struct xen_snd_front_evtchnl *channel)
138 {
139 	int notify;
140 
141 	channel->u.req.ring.req_prod_pvt++;
142 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&channel->u.req.ring, notify);
143 	if (notify)
144 		notify_remote_via_irq(channel->irq);
145 }
146 
147 static void evtchnl_free(struct xen_snd_front_info *front_info,
148 			 struct xen_snd_front_evtchnl *channel)
149 {
150 	unsigned long page = 0;
151 
152 	if (channel->type == EVTCHNL_TYPE_REQ)
153 		page = (unsigned long)channel->u.req.ring.sring;
154 	else if (channel->type == EVTCHNL_TYPE_EVT)
155 		page = (unsigned long)channel->u.evt.page;
156 
157 	if (!page)
158 		return;
159 
160 	channel->state = EVTCHNL_STATE_DISCONNECTED;
161 	if (channel->type == EVTCHNL_TYPE_REQ) {
162 		/* Release all who still waits for response if any. */
163 		channel->u.req.resp_status = -EIO;
164 		complete_all(&channel->u.req.completion);
165 	}
166 
167 	if (channel->irq)
168 		unbind_from_irqhandler(channel->irq, channel);
169 
170 	if (channel->port)
171 		xenbus_free_evtchn(front_info->xb_dev, channel->port);
172 
173 	/* End access and free the page. */
174 	if (channel->gref != GRANT_INVALID_REF)
175 		gnttab_end_foreign_access(channel->gref, 0, page);
176 	else
177 		free_page(page);
178 
179 	memset(channel, 0, sizeof(*channel));
180 }
181 
182 void xen_snd_front_evtchnl_free_all(struct xen_snd_front_info *front_info)
183 {
184 	int i;
185 
186 	if (!front_info->evt_pairs)
187 		return;
188 
189 	for (i = 0; i < front_info->num_evt_pairs; i++) {
190 		evtchnl_free(front_info, &front_info->evt_pairs[i].req);
191 		evtchnl_free(front_info, &front_info->evt_pairs[i].evt);
192 	}
193 
194 	kfree(front_info->evt_pairs);
195 	front_info->evt_pairs = NULL;
196 }
197 
198 static int evtchnl_alloc(struct xen_snd_front_info *front_info, int index,
199 			 struct xen_snd_front_evtchnl *channel,
200 			 enum xen_snd_front_evtchnl_type type)
201 {
202 	struct xenbus_device *xb_dev = front_info->xb_dev;
203 	unsigned long page;
204 	grant_ref_t gref;
205 	irq_handler_t handler;
206 	char *handler_name = NULL;
207 	int ret;
208 
209 	memset(channel, 0, sizeof(*channel));
210 	channel->type = type;
211 	channel->index = index;
212 	channel->front_info = front_info;
213 	channel->state = EVTCHNL_STATE_DISCONNECTED;
214 	channel->gref = GRANT_INVALID_REF;
215 	page = get_zeroed_page(GFP_KERNEL);
216 	if (!page) {
217 		ret = -ENOMEM;
218 		goto fail;
219 	}
220 
221 	handler_name = kasprintf(GFP_KERNEL, "%s-%s", XENSND_DRIVER_NAME,
222 				 type == EVTCHNL_TYPE_REQ ?
223 				 XENSND_FIELD_RING_REF :
224 				 XENSND_FIELD_EVT_RING_REF);
225 	if (!handler_name) {
226 		ret = -ENOMEM;
227 		goto fail;
228 	}
229 
230 	mutex_init(&channel->ring_io_lock);
231 
232 	if (type == EVTCHNL_TYPE_REQ) {
233 		struct xen_sndif_sring *sring = (struct xen_sndif_sring *)page;
234 
235 		init_completion(&channel->u.req.completion);
236 		mutex_init(&channel->u.req.req_io_lock);
237 		SHARED_RING_INIT(sring);
238 		FRONT_RING_INIT(&channel->u.req.ring, sring, XEN_PAGE_SIZE);
239 
240 		ret = xenbus_grant_ring(xb_dev, sring, 1, &gref);
241 		if (ret < 0) {
242 			channel->u.req.ring.sring = NULL;
243 			goto fail;
244 		}
245 
246 		handler = evtchnl_interrupt_req;
247 	} else {
248 		ret = gnttab_grant_foreign_access(xb_dev->otherend_id,
249 						  virt_to_gfn((void *)page), 0);
250 		if (ret < 0)
251 			goto fail;
252 
253 		channel->u.evt.page = (struct xensnd_event_page *)page;
254 		gref = ret;
255 		handler = evtchnl_interrupt_evt;
256 	}
257 
258 	channel->gref = gref;
259 
260 	ret = xenbus_alloc_evtchn(xb_dev, &channel->port);
261 	if (ret < 0)
262 		goto fail;
263 
264 	ret = bind_evtchn_to_irq(channel->port);
265 	if (ret < 0) {
266 		dev_err(&xb_dev->dev,
267 			"Failed to bind IRQ for domid %d port %d: %d\n",
268 			front_info->xb_dev->otherend_id, channel->port, ret);
269 		goto fail;
270 	}
271 
272 	channel->irq = ret;
273 
274 	ret = request_threaded_irq(channel->irq, NULL, handler,
275 				   IRQF_ONESHOT, handler_name, channel);
276 	if (ret < 0) {
277 		dev_err(&xb_dev->dev, "Failed to request IRQ %d: %d\n",
278 			channel->irq, ret);
279 		goto fail;
280 	}
281 
282 	kfree(handler_name);
283 	return 0;
284 
285 fail:
286 	if (page)
287 		free_page(page);
288 	kfree(handler_name);
289 	dev_err(&xb_dev->dev, "Failed to allocate ring: %d\n", ret);
290 	return ret;
291 }
292 
293 int xen_snd_front_evtchnl_create_all(struct xen_snd_front_info *front_info,
294 				     int num_streams)
295 {
296 	struct xen_front_cfg_card *cfg = &front_info->cfg;
297 	struct device *dev = &front_info->xb_dev->dev;
298 	int d, ret = 0;
299 
300 	front_info->evt_pairs =
301 			kcalloc(num_streams,
302 				sizeof(struct xen_snd_front_evtchnl_pair),
303 				GFP_KERNEL);
304 	if (!front_info->evt_pairs)
305 		return -ENOMEM;
306 
307 	/* Iterate over devices and their streams and create event channels. */
308 	for (d = 0; d < cfg->num_pcm_instances; d++) {
309 		struct xen_front_cfg_pcm_instance *pcm_instance;
310 		int s, index;
311 
312 		pcm_instance = &cfg->pcm_instances[d];
313 
314 		for (s = 0; s < pcm_instance->num_streams_pb; s++) {
315 			index = pcm_instance->streams_pb[s].index;
316 
317 			ret = evtchnl_alloc(front_info, index,
318 					    &front_info->evt_pairs[index].req,
319 					    EVTCHNL_TYPE_REQ);
320 			if (ret < 0) {
321 				dev_err(dev, "Error allocating control channel\n");
322 				goto fail;
323 			}
324 
325 			ret = evtchnl_alloc(front_info, index,
326 					    &front_info->evt_pairs[index].evt,
327 					    EVTCHNL_TYPE_EVT);
328 			if (ret < 0) {
329 				dev_err(dev, "Error allocating in-event channel\n");
330 				goto fail;
331 			}
332 		}
333 
334 		for (s = 0; s < pcm_instance->num_streams_cap; s++) {
335 			index = pcm_instance->streams_cap[s].index;
336 
337 			ret = evtchnl_alloc(front_info, index,
338 					    &front_info->evt_pairs[index].req,
339 					    EVTCHNL_TYPE_REQ);
340 			if (ret < 0) {
341 				dev_err(dev, "Error allocating control channel\n");
342 				goto fail;
343 			}
344 
345 			ret = evtchnl_alloc(front_info, index,
346 					    &front_info->evt_pairs[index].evt,
347 					    EVTCHNL_TYPE_EVT);
348 			if (ret < 0) {
349 				dev_err(dev, "Error allocating in-event channel\n");
350 				goto fail;
351 			}
352 		}
353 	}
354 
355 	front_info->num_evt_pairs = num_streams;
356 	return 0;
357 
358 fail:
359 	xen_snd_front_evtchnl_free_all(front_info);
360 	return ret;
361 }
362 
363 static int evtchnl_publish(struct xenbus_transaction xbt,
364 			   struct xen_snd_front_evtchnl *channel,
365 			   const char *path, const char *node_ring,
366 			   const char *node_chnl)
367 {
368 	struct xenbus_device *xb_dev = channel->front_info->xb_dev;
369 	int ret;
370 
371 	/* Write control channel ring reference. */
372 	ret = xenbus_printf(xbt, path, node_ring, "%u", channel->gref);
373 	if (ret < 0) {
374 		dev_err(&xb_dev->dev, "Error writing ring-ref: %d\n", ret);
375 		return ret;
376 	}
377 
378 	/* Write event channel ring reference. */
379 	ret = xenbus_printf(xbt, path, node_chnl, "%u", channel->port);
380 	if (ret < 0) {
381 		dev_err(&xb_dev->dev, "Error writing event channel: %d\n", ret);
382 		return ret;
383 	}
384 
385 	return 0;
386 }
387 
388 int xen_snd_front_evtchnl_publish_all(struct xen_snd_front_info *front_info)
389 {
390 	struct xen_front_cfg_card *cfg = &front_info->cfg;
391 	struct xenbus_transaction xbt;
392 	int ret, d;
393 
394 again:
395 	ret = xenbus_transaction_start(&xbt);
396 	if (ret < 0) {
397 		xenbus_dev_fatal(front_info->xb_dev, ret,
398 				 "starting transaction");
399 		return ret;
400 	}
401 
402 	for (d = 0; d < cfg->num_pcm_instances; d++) {
403 		struct xen_front_cfg_pcm_instance *pcm_instance;
404 		int s, index;
405 
406 		pcm_instance = &cfg->pcm_instances[d];
407 
408 		for (s = 0; s < pcm_instance->num_streams_pb; s++) {
409 			index = pcm_instance->streams_pb[s].index;
410 
411 			ret = evtchnl_publish(xbt,
412 					      &front_info->evt_pairs[index].req,
413 					      pcm_instance->streams_pb[s].xenstore_path,
414 					      XENSND_FIELD_RING_REF,
415 					      XENSND_FIELD_EVT_CHNL);
416 			if (ret < 0)
417 				goto fail;
418 
419 			ret = evtchnl_publish(xbt,
420 					      &front_info->evt_pairs[index].evt,
421 					      pcm_instance->streams_pb[s].xenstore_path,
422 					      XENSND_FIELD_EVT_RING_REF,
423 					      XENSND_FIELD_EVT_EVT_CHNL);
424 			if (ret < 0)
425 				goto fail;
426 		}
427 
428 		for (s = 0; s < pcm_instance->num_streams_cap; s++) {
429 			index = pcm_instance->streams_cap[s].index;
430 
431 			ret = evtchnl_publish(xbt,
432 					      &front_info->evt_pairs[index].req,
433 					      pcm_instance->streams_cap[s].xenstore_path,
434 					      XENSND_FIELD_RING_REF,
435 					      XENSND_FIELD_EVT_CHNL);
436 			if (ret < 0)
437 				goto fail;
438 
439 			ret = evtchnl_publish(xbt,
440 					      &front_info->evt_pairs[index].evt,
441 					      pcm_instance->streams_cap[s].xenstore_path,
442 					      XENSND_FIELD_EVT_RING_REF,
443 					      XENSND_FIELD_EVT_EVT_CHNL);
444 			if (ret < 0)
445 				goto fail;
446 		}
447 	}
448 	ret = xenbus_transaction_end(xbt, 0);
449 	if (ret < 0) {
450 		if (ret == -EAGAIN)
451 			goto again;
452 
453 		xenbus_dev_fatal(front_info->xb_dev, ret,
454 				 "completing transaction");
455 		goto fail_to_end;
456 	}
457 	return 0;
458 fail:
459 	xenbus_transaction_end(xbt, 1);
460 fail_to_end:
461 	xenbus_dev_fatal(front_info->xb_dev, ret, "writing XenStore");
462 	return ret;
463 }
464 
465 void xen_snd_front_evtchnl_pair_set_connected(struct xen_snd_front_evtchnl_pair *evt_pair,
466 					      bool is_connected)
467 {
468 	enum xen_snd_front_evtchnl_state state;
469 
470 	if (is_connected)
471 		state = EVTCHNL_STATE_CONNECTED;
472 	else
473 		state = EVTCHNL_STATE_DISCONNECTED;
474 
475 	mutex_lock(&evt_pair->req.ring_io_lock);
476 	evt_pair->req.state = state;
477 	mutex_unlock(&evt_pair->req.ring_io_lock);
478 
479 	mutex_lock(&evt_pair->evt.ring_io_lock);
480 	evt_pair->evt.state = state;
481 	mutex_unlock(&evt_pair->evt.ring_io_lock);
482 }
483 
484 void xen_snd_front_evtchnl_pair_clear(struct xen_snd_front_evtchnl_pair *evt_pair)
485 {
486 	mutex_lock(&evt_pair->req.ring_io_lock);
487 	evt_pair->req.evt_next_id = 0;
488 	mutex_unlock(&evt_pair->req.ring_io_lock);
489 
490 	mutex_lock(&evt_pair->evt.ring_io_lock);
491 	evt_pair->evt.evt_next_id = 0;
492 	mutex_unlock(&evt_pair->evt.ring_io_lock);
493 }
494 
495