xref: /linux/drivers/gpu/drm/xen/xen_drm_front_evtchnl.c (revision 66da65005aa819e0b8d3a08f5ec1491b7690cb67)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 
3 /*
4  *  Xen para-virtual DRM device
5  *
6  * Copyright (C) 2016-2018 EPAM Systems Inc.
7  *
8  * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
9  */
10 
11 #include <linux/errno.h>
12 #include <linux/irq.h>
13 
14 #include <drm/drm_print.h>
15 
16 #include <xen/xenbus.h>
17 #include <xen/events.h>
18 #include <xen/grant_table.h>
19 
20 #include "xen_drm_front.h"
21 #include "xen_drm_front_evtchnl.h"
22 
23 static irqreturn_t evtchnl_interrupt_ctrl(int irq, void *dev_id)
24 {
25 	struct xen_drm_front_evtchnl *evtchnl = dev_id;
26 	struct xen_drm_front_info *front_info = evtchnl->front_info;
27 	struct xendispl_resp *resp;
28 	RING_IDX i, rp;
29 	unsigned long flags;
30 
31 	if (unlikely(evtchnl->state != EVTCHNL_STATE_CONNECTED))
32 		return IRQ_HANDLED;
33 
34 	spin_lock_irqsave(&front_info->io_lock, flags);
35 
36 again:
37 	rp = evtchnl->u.req.ring.sring->rsp_prod;
38 	/* ensure we see queued responses up to rp */
39 	virt_rmb();
40 
41 	for (i = evtchnl->u.req.ring.rsp_cons; i != rp; i++) {
42 		resp = RING_GET_RESPONSE(&evtchnl->u.req.ring, i);
43 		if (unlikely(resp->id != evtchnl->evt_id))
44 			continue;
45 
46 		switch (resp->operation) {
47 		case XENDISPL_OP_PG_FLIP:
48 		case XENDISPL_OP_FB_ATTACH:
49 		case XENDISPL_OP_FB_DETACH:
50 		case XENDISPL_OP_DBUF_CREATE:
51 		case XENDISPL_OP_DBUF_DESTROY:
52 		case XENDISPL_OP_SET_CONFIG:
53 			evtchnl->u.req.resp_status = resp->status;
54 			complete(&evtchnl->u.req.completion);
55 			break;
56 
57 		default:
58 			DRM_ERROR("Operation %d is not supported\n",
59 				  resp->operation);
60 			break;
61 		}
62 	}
63 
64 	evtchnl->u.req.ring.rsp_cons = i;
65 
66 	if (i != evtchnl->u.req.ring.req_prod_pvt) {
67 		int more_to_do;
68 
69 		RING_FINAL_CHECK_FOR_RESPONSES(&evtchnl->u.req.ring,
70 					       more_to_do);
71 		if (more_to_do)
72 			goto again;
73 	} else {
74 		evtchnl->u.req.ring.sring->rsp_event = i + 1;
75 	}
76 
77 	spin_unlock_irqrestore(&front_info->io_lock, flags);
78 	return IRQ_HANDLED;
79 }
80 
81 static irqreturn_t evtchnl_interrupt_evt(int irq, void *dev_id)
82 {
83 	struct xen_drm_front_evtchnl *evtchnl = dev_id;
84 	struct xen_drm_front_info *front_info = evtchnl->front_info;
85 	struct xendispl_event_page *page = evtchnl->u.evt.page;
86 	u32 cons, prod;
87 	unsigned long flags;
88 
89 	if (unlikely(evtchnl->state != EVTCHNL_STATE_CONNECTED))
90 		return IRQ_HANDLED;
91 
92 	spin_lock_irqsave(&front_info->io_lock, flags);
93 
94 	prod = page->in_prod;
95 	/* ensure we see ring contents up to prod */
96 	virt_rmb();
97 	if (prod == page->in_cons)
98 		goto out;
99 
100 	for (cons = page->in_cons; cons != prod; cons++) {
101 		struct xendispl_evt *event;
102 
103 		event = &XENDISPL_IN_RING_REF(page, cons);
104 		if (unlikely(event->id != evtchnl->evt_id++))
105 			continue;
106 
107 		switch (event->type) {
108 		case XENDISPL_EVT_PG_FLIP:
109 			xen_drm_front_on_frame_done(front_info, evtchnl->index,
110 						    event->op.pg_flip.fb_cookie);
111 			break;
112 		}
113 	}
114 	page->in_cons = cons;
115 	/* ensure ring contents */
116 	virt_wmb();
117 
118 out:
119 	spin_unlock_irqrestore(&front_info->io_lock, flags);
120 	return IRQ_HANDLED;
121 }
122 
123 static void evtchnl_free(struct xen_drm_front_info *front_info,
124 			 struct xen_drm_front_evtchnl *evtchnl)
125 {
126 	void *page = NULL;
127 
128 	if (evtchnl->type == EVTCHNL_TYPE_REQ)
129 		page = evtchnl->u.req.ring.sring;
130 	else if (evtchnl->type == EVTCHNL_TYPE_EVT)
131 		page = evtchnl->u.evt.page;
132 	if (!page)
133 		return;
134 
135 	evtchnl->state = EVTCHNL_STATE_DISCONNECTED;
136 
137 	if (evtchnl->type == EVTCHNL_TYPE_REQ) {
138 		/* release all who still waits for response if any */
139 		evtchnl->u.req.resp_status = -EIO;
140 		complete_all(&evtchnl->u.req.completion);
141 	}
142 
143 	if (evtchnl->irq)
144 		unbind_from_irqhandler(evtchnl->irq, evtchnl);
145 
146 	if (evtchnl->port)
147 		xenbus_free_evtchn(front_info->xb_dev, evtchnl->port);
148 
149 	/* end access and free the page */
150 	xenbus_teardown_ring(&page, 1, &evtchnl->gref);
151 
152 	memset(evtchnl, 0, sizeof(*evtchnl));
153 }
154 
155 static int evtchnl_alloc(struct xen_drm_front_info *front_info, int index,
156 			 struct xen_drm_front_evtchnl *evtchnl,
157 			 enum xen_drm_front_evtchnl_type type)
158 {
159 	struct xenbus_device *xb_dev = front_info->xb_dev;
160 	void *page;
161 	irq_handler_t handler;
162 	int ret;
163 
164 	memset(evtchnl, 0, sizeof(*evtchnl));
165 	evtchnl->type = type;
166 	evtchnl->index = index;
167 	evtchnl->front_info = front_info;
168 	evtchnl->state = EVTCHNL_STATE_DISCONNECTED;
169 
170 	ret = xenbus_setup_ring(xb_dev, GFP_NOIO | __GFP_HIGH, &page,
171 				1, &evtchnl->gref);
172 	if (ret)
173 		goto fail;
174 
175 	if (type == EVTCHNL_TYPE_REQ) {
176 		struct xen_displif_sring *sring;
177 
178 		init_completion(&evtchnl->u.req.completion);
179 		mutex_init(&evtchnl->u.req.req_io_lock);
180 		sring = page;
181 		XEN_FRONT_RING_INIT(&evtchnl->u.req.ring, sring, XEN_PAGE_SIZE);
182 
183 		handler = evtchnl_interrupt_ctrl;
184 	} else {
185 		evtchnl->u.evt.page = page;
186 		handler = evtchnl_interrupt_evt;
187 	}
188 
189 	ret = xenbus_alloc_evtchn(xb_dev, &evtchnl->port);
190 	if (ret < 0)
191 		goto fail;
192 
193 	ret = bind_evtchn_to_irqhandler(evtchnl->port,
194 					handler, 0, xb_dev->devicetype,
195 					evtchnl);
196 	if (ret < 0)
197 		goto fail;
198 
199 	evtchnl->irq = ret;
200 	return 0;
201 
202 fail:
203 	DRM_ERROR("Failed to allocate ring: %d\n", ret);
204 	return ret;
205 }
206 
207 int xen_drm_front_evtchnl_create_all(struct xen_drm_front_info *front_info)
208 {
209 	struct xen_drm_front_cfg *cfg;
210 	int ret, conn;
211 
212 	cfg = &front_info->cfg;
213 
214 	front_info->evt_pairs =
215 			kcalloc(cfg->num_connectors,
216 				sizeof(struct xen_drm_front_evtchnl_pair),
217 				GFP_KERNEL);
218 	if (!front_info->evt_pairs) {
219 		ret = -ENOMEM;
220 		goto fail;
221 	}
222 
223 	for (conn = 0; conn < cfg->num_connectors; conn++) {
224 		ret = evtchnl_alloc(front_info, conn,
225 				    &front_info->evt_pairs[conn].req,
226 				    EVTCHNL_TYPE_REQ);
227 		if (ret < 0) {
228 			DRM_ERROR("Error allocating control channel\n");
229 			goto fail;
230 		}
231 
232 		ret = evtchnl_alloc(front_info, conn,
233 				    &front_info->evt_pairs[conn].evt,
234 				    EVTCHNL_TYPE_EVT);
235 		if (ret < 0) {
236 			DRM_ERROR("Error allocating in-event channel\n");
237 			goto fail;
238 		}
239 	}
240 	front_info->num_evt_pairs = cfg->num_connectors;
241 	return 0;
242 
243 fail:
244 	xen_drm_front_evtchnl_free_all(front_info);
245 	return ret;
246 }
247 
248 static int evtchnl_publish(struct xenbus_transaction xbt,
249 			   struct xen_drm_front_evtchnl *evtchnl,
250 			   const char *path, const char *node_ring,
251 			   const char *node_chnl)
252 {
253 	struct xenbus_device *xb_dev = evtchnl->front_info->xb_dev;
254 	int ret;
255 
256 	/* write control channel ring reference */
257 	ret = xenbus_printf(xbt, path, node_ring, "%u", evtchnl->gref);
258 	if (ret < 0) {
259 		xenbus_dev_error(xb_dev, ret, "writing ring-ref");
260 		return ret;
261 	}
262 
263 	/* write event channel ring reference */
264 	ret = xenbus_printf(xbt, path, node_chnl, "%u", evtchnl->port);
265 	if (ret < 0) {
266 		xenbus_dev_error(xb_dev, ret, "writing event channel");
267 		return ret;
268 	}
269 
270 	return 0;
271 }
272 
273 int xen_drm_front_evtchnl_publish_all(struct xen_drm_front_info *front_info)
274 {
275 	struct xenbus_transaction xbt;
276 	struct xen_drm_front_cfg *plat_data;
277 	int ret, conn;
278 
279 	plat_data = &front_info->cfg;
280 
281 again:
282 	ret = xenbus_transaction_start(&xbt);
283 	if (ret < 0) {
284 		xenbus_dev_fatal(front_info->xb_dev, ret,
285 				 "starting transaction");
286 		return ret;
287 	}
288 
289 	for (conn = 0; conn < plat_data->num_connectors; conn++) {
290 		ret = evtchnl_publish(xbt, &front_info->evt_pairs[conn].req,
291 				      plat_data->connectors[conn].xenstore_path,
292 				      XENDISPL_FIELD_REQ_RING_REF,
293 				      XENDISPL_FIELD_REQ_CHANNEL);
294 		if (ret < 0)
295 			goto fail;
296 
297 		ret = evtchnl_publish(xbt, &front_info->evt_pairs[conn].evt,
298 				      plat_data->connectors[conn].xenstore_path,
299 				      XENDISPL_FIELD_EVT_RING_REF,
300 				      XENDISPL_FIELD_EVT_CHANNEL);
301 		if (ret < 0)
302 			goto fail;
303 	}
304 
305 	ret = xenbus_transaction_end(xbt, 0);
306 	if (ret < 0) {
307 		if (ret == -EAGAIN)
308 			goto again;
309 
310 		xenbus_dev_fatal(front_info->xb_dev, ret,
311 				 "completing transaction");
312 		goto fail_to_end;
313 	}
314 
315 	return 0;
316 
317 fail:
318 	xenbus_transaction_end(xbt, 1);
319 
320 fail_to_end:
321 	xenbus_dev_fatal(front_info->xb_dev, ret, "writing Xen store");
322 	return ret;
323 }
324 
325 void xen_drm_front_evtchnl_flush(struct xen_drm_front_evtchnl *evtchnl)
326 {
327 	int notify;
328 
329 	evtchnl->u.req.ring.req_prod_pvt++;
330 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&evtchnl->u.req.ring, notify);
331 	if (notify)
332 		notify_remote_via_irq(evtchnl->irq);
333 }
334 
335 void xen_drm_front_evtchnl_set_state(struct xen_drm_front_info *front_info,
336 				     enum xen_drm_front_evtchnl_state state)
337 {
338 	unsigned long flags;
339 	int i;
340 
341 	if (!front_info->evt_pairs)
342 		return;
343 
344 	spin_lock_irqsave(&front_info->io_lock, flags);
345 	for (i = 0; i < front_info->num_evt_pairs; i++) {
346 		front_info->evt_pairs[i].req.state = state;
347 		front_info->evt_pairs[i].evt.state = state;
348 	}
349 	spin_unlock_irqrestore(&front_info->io_lock, flags);
350 }
351 
352 void xen_drm_front_evtchnl_free_all(struct xen_drm_front_info *front_info)
353 {
354 	int i;
355 
356 	if (!front_info->evt_pairs)
357 		return;
358 
359 	for (i = 0; i < front_info->num_evt_pairs; i++) {
360 		evtchnl_free(front_info, &front_info->evt_pairs[i].req);
361 		evtchnl_free(front_info, &front_info->evt_pairs[i].evt);
362 	}
363 
364 	kfree(front_info->evt_pairs);
365 	front_info->evt_pairs = NULL;
366 }
367