xref: /linux/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c (revision c0c914eca7f251c70facc37dfebeaf176601918d)
1 /**************************************************************************
2  *
3  * Copyright © 2009-2014 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 
29 #include <drm/drmP.h>
30 #include "vmwgfx_drv.h"
31 
32 #include <drm/ttm/ttm_placement.h>
33 
34 #include "device_include/svga_overlay.h"
35 #include "device_include/svga_escape.h"
36 
37 #define VMW_MAX_NUM_STREAMS 1
38 #define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE)
39 
40 struct vmw_stream {
41 	struct vmw_dma_buffer *buf;
42 	bool claimed;
43 	bool paused;
44 	struct drm_vmw_control_stream_arg saved;
45 };
46 
47 /**
48  * Overlay control
49  */
50 struct vmw_overlay {
51 	/*
52 	 * Each stream is a single overlay. In Xv these are called ports.
53 	 */
54 	struct mutex mutex;
55 	struct vmw_stream stream[VMW_MAX_NUM_STREAMS];
56 };
57 
58 static inline struct vmw_overlay *vmw_overlay(struct drm_device *dev)
59 {
60 	struct vmw_private *dev_priv = vmw_priv(dev);
61 	return dev_priv ? dev_priv->overlay_priv : NULL;
62 }
63 
64 struct vmw_escape_header {
65 	uint32_t cmd;
66 	SVGAFifoCmdEscape body;
67 };
68 
69 struct vmw_escape_video_flush {
70 	struct vmw_escape_header escape;
71 	SVGAEscapeVideoFlush flush;
72 };
73 
74 static inline void fill_escape(struct vmw_escape_header *header,
75 			       uint32_t size)
76 {
77 	header->cmd = SVGA_CMD_ESCAPE;
78 	header->body.nsid = SVGA_ESCAPE_NSID_VMWARE;
79 	header->body.size = size;
80 }
81 
82 static inline void fill_flush(struct vmw_escape_video_flush *cmd,
83 			      uint32_t stream_id)
84 {
85 	fill_escape(&cmd->escape, sizeof(cmd->flush));
86 	cmd->flush.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_FLUSH;
87 	cmd->flush.streamId = stream_id;
88 }
89 
90 /**
91  * Send put command to hw.
92  *
93  * Returns
94  * -ERESTARTSYS if interrupted by a signal.
95  */
96 static int vmw_overlay_send_put(struct vmw_private *dev_priv,
97 				struct vmw_dma_buffer *buf,
98 				struct drm_vmw_control_stream_arg *arg,
99 				bool interruptible)
100 {
101 	struct vmw_escape_video_flush *flush;
102 	size_t fifo_size;
103 	bool have_so = (dev_priv->active_display_unit == vmw_du_screen_object);
104 	int i, num_items;
105 	SVGAGuestPtr ptr;
106 
107 	struct {
108 		struct vmw_escape_header escape;
109 		struct {
110 			uint32_t cmdType;
111 			uint32_t streamId;
112 		} header;
113 	} *cmds;
114 	struct {
115 		uint32_t registerId;
116 		uint32_t value;
117 	} *items;
118 
119 	/* defines are a index needs + 1 */
120 	if (have_so)
121 		num_items = SVGA_VIDEO_DST_SCREEN_ID + 1;
122 	else
123 		num_items = SVGA_VIDEO_PITCH_3 + 1;
124 
125 	fifo_size = sizeof(*cmds) + sizeof(*flush) + sizeof(*items) * num_items;
126 
127 	cmds = vmw_fifo_reserve(dev_priv, fifo_size);
128 	/* hardware has hung, can't do anything here */
129 	if (!cmds)
130 		return -ENOMEM;
131 
132 	items = (typeof(items))&cmds[1];
133 	flush = (struct vmw_escape_video_flush *)&items[num_items];
134 
135 	/* the size is header + number of items */
136 	fill_escape(&cmds->escape, sizeof(*items) * (num_items + 1));
137 
138 	cmds->header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
139 	cmds->header.streamId = arg->stream_id;
140 
141 	/* the IDs are neatly numbered */
142 	for (i = 0; i < num_items; i++)
143 		items[i].registerId = i;
144 
145 	vmw_bo_get_guest_ptr(&buf->base, &ptr);
146 	ptr.offset += arg->offset;
147 
148 	items[SVGA_VIDEO_ENABLED].value     = true;
149 	items[SVGA_VIDEO_FLAGS].value       = arg->flags;
150 	items[SVGA_VIDEO_DATA_OFFSET].value = ptr.offset;
151 	items[SVGA_VIDEO_FORMAT].value      = arg->format;
152 	items[SVGA_VIDEO_COLORKEY].value    = arg->color_key;
153 	items[SVGA_VIDEO_SIZE].value        = arg->size;
154 	items[SVGA_VIDEO_WIDTH].value       = arg->width;
155 	items[SVGA_VIDEO_HEIGHT].value      = arg->height;
156 	items[SVGA_VIDEO_SRC_X].value       = arg->src.x;
157 	items[SVGA_VIDEO_SRC_Y].value       = arg->src.y;
158 	items[SVGA_VIDEO_SRC_WIDTH].value   = arg->src.w;
159 	items[SVGA_VIDEO_SRC_HEIGHT].value  = arg->src.h;
160 	items[SVGA_VIDEO_DST_X].value       = arg->dst.x;
161 	items[SVGA_VIDEO_DST_Y].value       = arg->dst.y;
162 	items[SVGA_VIDEO_DST_WIDTH].value   = arg->dst.w;
163 	items[SVGA_VIDEO_DST_HEIGHT].value  = arg->dst.h;
164 	items[SVGA_VIDEO_PITCH_1].value     = arg->pitch[0];
165 	items[SVGA_VIDEO_PITCH_2].value     = arg->pitch[1];
166 	items[SVGA_VIDEO_PITCH_3].value     = arg->pitch[2];
167 	if (have_so) {
168 		items[SVGA_VIDEO_DATA_GMRID].value    = ptr.gmrId;
169 		items[SVGA_VIDEO_DST_SCREEN_ID].value = SVGA_ID_INVALID;
170 	}
171 
172 	fill_flush(flush, arg->stream_id);
173 
174 	vmw_fifo_commit(dev_priv, fifo_size);
175 
176 	return 0;
177 }
178 
179 /**
180  * Send stop command to hw.
181  *
182  * Returns
183  * -ERESTARTSYS if interrupted by a signal.
184  */
185 static int vmw_overlay_send_stop(struct vmw_private *dev_priv,
186 				 uint32_t stream_id,
187 				 bool interruptible)
188 {
189 	struct {
190 		struct vmw_escape_header escape;
191 		SVGAEscapeVideoSetRegs body;
192 		struct vmw_escape_video_flush flush;
193 	} *cmds;
194 	int ret;
195 
196 	for (;;) {
197 		cmds = vmw_fifo_reserve(dev_priv, sizeof(*cmds));
198 		if (cmds)
199 			break;
200 
201 		ret = vmw_fallback_wait(dev_priv, false, true, 0,
202 					interruptible, 3*HZ);
203 		if (interruptible && ret == -ERESTARTSYS)
204 			return ret;
205 		else
206 			BUG_ON(ret != 0);
207 	}
208 
209 	fill_escape(&cmds->escape, sizeof(cmds->body));
210 	cmds->body.header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
211 	cmds->body.header.streamId = stream_id;
212 	cmds->body.items[0].registerId = SVGA_VIDEO_ENABLED;
213 	cmds->body.items[0].value = false;
214 	fill_flush(&cmds->flush, stream_id);
215 
216 	vmw_fifo_commit(dev_priv, sizeof(*cmds));
217 
218 	return 0;
219 }
220 
221 /**
222  * Move a buffer to vram or gmr if @pin is set, else unpin the buffer.
223  *
224  * With the introduction of screen objects buffers could now be
225  * used with GMRs instead of being locked to vram.
226  */
227 static int vmw_overlay_move_buffer(struct vmw_private *dev_priv,
228 				   struct vmw_dma_buffer *buf,
229 				   bool pin, bool inter)
230 {
231 	if (!pin)
232 		return vmw_dmabuf_unpin(dev_priv, buf, inter);
233 
234 	if (dev_priv->active_display_unit == vmw_du_legacy)
235 		return vmw_dmabuf_pin_in_vram(dev_priv, buf, inter);
236 
237 	return vmw_dmabuf_pin_in_vram_or_gmr(dev_priv, buf, inter);
238 }
239 
240 /**
241  * Stop or pause a stream.
242  *
243  * If the stream is paused the no evict flag is removed from the buffer
244  * but left in vram. This allows for instance mode_set to evict it
245  * should it need to.
246  *
247  * The caller must hold the overlay lock.
248  *
249  * @stream_id which stream to stop/pause.
250  * @pause true to pause, false to stop completely.
251  */
252 static int vmw_overlay_stop(struct vmw_private *dev_priv,
253 			    uint32_t stream_id, bool pause,
254 			    bool interruptible)
255 {
256 	struct vmw_overlay *overlay = dev_priv->overlay_priv;
257 	struct vmw_stream *stream = &overlay->stream[stream_id];
258 	int ret;
259 
260 	/* no buffer attached the stream is completely stopped */
261 	if (!stream->buf)
262 		return 0;
263 
264 	/* If the stream is paused this is already done */
265 	if (!stream->paused) {
266 		ret = vmw_overlay_send_stop(dev_priv, stream_id,
267 					    interruptible);
268 		if (ret)
269 			return ret;
270 
271 		/* We just remove the NO_EVICT flag so no -ENOMEM */
272 		ret = vmw_overlay_move_buffer(dev_priv, stream->buf, false,
273 					      interruptible);
274 		if (interruptible && ret == -ERESTARTSYS)
275 			return ret;
276 		else
277 			BUG_ON(ret != 0);
278 	}
279 
280 	if (!pause) {
281 		vmw_dmabuf_unreference(&stream->buf);
282 		stream->paused = false;
283 	} else {
284 		stream->paused = true;
285 	}
286 
287 	return 0;
288 }
289 
290 /**
291  * Update a stream and send any put or stop fifo commands needed.
292  *
293  * The caller must hold the overlay lock.
294  *
295  * Returns
296  * -ENOMEM if buffer doesn't fit in vram.
297  * -ERESTARTSYS if interrupted.
298  */
299 static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
300 				     struct vmw_dma_buffer *buf,
301 				     struct drm_vmw_control_stream_arg *arg,
302 				     bool interruptible)
303 {
304 	struct vmw_overlay *overlay = dev_priv->overlay_priv;
305 	struct vmw_stream *stream = &overlay->stream[arg->stream_id];
306 	int ret = 0;
307 
308 	if (!buf)
309 		return -EINVAL;
310 
311 	DRM_DEBUG("   %s: old %p, new %p, %spaused\n", __func__,
312 		  stream->buf, buf, stream->paused ? "" : "not ");
313 
314 	if (stream->buf != buf) {
315 		ret = vmw_overlay_stop(dev_priv, arg->stream_id,
316 				       false, interruptible);
317 		if (ret)
318 			return ret;
319 	} else if (!stream->paused) {
320 		/* If the buffers match and not paused then just send
321 		 * the put command, no need to do anything else.
322 		 */
323 		ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible);
324 		if (ret == 0)
325 			stream->saved = *arg;
326 		else
327 			BUG_ON(!interruptible);
328 
329 		return ret;
330 	}
331 
332 	/* We don't start the old stream if we are interrupted.
333 	 * Might return -ENOMEM if it can't fit the buffer in vram.
334 	 */
335 	ret = vmw_overlay_move_buffer(dev_priv, buf, true, interruptible);
336 	if (ret)
337 		return ret;
338 
339 	ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible);
340 	if (ret) {
341 		/* This one needs to happen no matter what. We only remove
342 		 * the NO_EVICT flag so this is safe from -ENOMEM.
343 		 */
344 		BUG_ON(vmw_overlay_move_buffer(dev_priv, buf, false, false)
345 		       != 0);
346 		return ret;
347 	}
348 
349 	if (stream->buf != buf)
350 		stream->buf = vmw_dmabuf_reference(buf);
351 	stream->saved = *arg;
352 	/* stream is no longer stopped/paused */
353 	stream->paused = false;
354 
355 	return 0;
356 }
357 
358 /**
359  * Stop all streams.
360  *
361  * Used by the fb code when starting.
362  *
363  * Takes the overlay lock.
364  */
365 int vmw_overlay_stop_all(struct vmw_private *dev_priv)
366 {
367 	struct vmw_overlay *overlay = dev_priv->overlay_priv;
368 	int i, ret;
369 
370 	if (!overlay)
371 		return 0;
372 
373 	mutex_lock(&overlay->mutex);
374 
375 	for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
376 		struct vmw_stream *stream = &overlay->stream[i];
377 		if (!stream->buf)
378 			continue;
379 
380 		ret = vmw_overlay_stop(dev_priv, i, false, false);
381 		WARN_ON(ret != 0);
382 	}
383 
384 	mutex_unlock(&overlay->mutex);
385 
386 	return 0;
387 }
388 
389 /**
390  * Try to resume all paused streams.
391  *
392  * Used by the kms code after moving a new scanout buffer to vram.
393  *
394  * Takes the overlay lock.
395  */
396 int vmw_overlay_resume_all(struct vmw_private *dev_priv)
397 {
398 	struct vmw_overlay *overlay = dev_priv->overlay_priv;
399 	int i, ret;
400 
401 	if (!overlay)
402 		return 0;
403 
404 	mutex_lock(&overlay->mutex);
405 
406 	for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
407 		struct vmw_stream *stream = &overlay->stream[i];
408 		if (!stream->paused)
409 			continue;
410 
411 		ret = vmw_overlay_update_stream(dev_priv, stream->buf,
412 						&stream->saved, false);
413 		if (ret != 0)
414 			DRM_INFO("%s: *warning* failed to resume stream %i\n",
415 				 __func__, i);
416 	}
417 
418 	mutex_unlock(&overlay->mutex);
419 
420 	return 0;
421 }
422 
423 /**
424  * Pauses all active streams.
425  *
426  * Used by the kms code when moving a new scanout buffer to vram.
427  *
428  * Takes the overlay lock.
429  */
430 int vmw_overlay_pause_all(struct vmw_private *dev_priv)
431 {
432 	struct vmw_overlay *overlay = dev_priv->overlay_priv;
433 	int i, ret;
434 
435 	if (!overlay)
436 		return 0;
437 
438 	mutex_lock(&overlay->mutex);
439 
440 	for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
441 		if (overlay->stream[i].paused)
442 			DRM_INFO("%s: *warning* stream %i already paused\n",
443 				 __func__, i);
444 		ret = vmw_overlay_stop(dev_priv, i, true, false);
445 		WARN_ON(ret != 0);
446 	}
447 
448 	mutex_unlock(&overlay->mutex);
449 
450 	return 0;
451 }
452 
453 
454 static bool vmw_overlay_available(const struct vmw_private *dev_priv)
455 {
456 	return (dev_priv->overlay_priv != NULL &&
457 		((dev_priv->fifo.capabilities & VMW_OVERLAY_CAP_MASK) ==
458 		 VMW_OVERLAY_CAP_MASK));
459 }
460 
461 int vmw_overlay_ioctl(struct drm_device *dev, void *data,
462 		      struct drm_file *file_priv)
463 {
464 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
465 	struct vmw_private *dev_priv = vmw_priv(dev);
466 	struct vmw_overlay *overlay = dev_priv->overlay_priv;
467 	struct drm_vmw_control_stream_arg *arg =
468 	    (struct drm_vmw_control_stream_arg *)data;
469 	struct vmw_dma_buffer *buf;
470 	struct vmw_resource *res;
471 	int ret;
472 
473 	if (!vmw_overlay_available(dev_priv))
474 		return -ENOSYS;
475 
476 	ret = vmw_user_stream_lookup(dev_priv, tfile, &arg->stream_id, &res);
477 	if (ret)
478 		return ret;
479 
480 	mutex_lock(&overlay->mutex);
481 
482 	if (!arg->enabled) {
483 		ret = vmw_overlay_stop(dev_priv, arg->stream_id, false, true);
484 		goto out_unlock;
485 	}
486 
487 	ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf, NULL);
488 	if (ret)
489 		goto out_unlock;
490 
491 	ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
492 
493 	vmw_dmabuf_unreference(&buf);
494 
495 out_unlock:
496 	mutex_unlock(&overlay->mutex);
497 	vmw_resource_unreference(&res);
498 
499 	return ret;
500 }
501 
502 int vmw_overlay_num_overlays(struct vmw_private *dev_priv)
503 {
504 	if (!vmw_overlay_available(dev_priv))
505 		return 0;
506 
507 	return VMW_MAX_NUM_STREAMS;
508 }
509 
510 int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv)
511 {
512 	struct vmw_overlay *overlay = dev_priv->overlay_priv;
513 	int i, k;
514 
515 	if (!vmw_overlay_available(dev_priv))
516 		return 0;
517 
518 	mutex_lock(&overlay->mutex);
519 
520 	for (i = 0, k = 0; i < VMW_MAX_NUM_STREAMS; i++)
521 		if (!overlay->stream[i].claimed)
522 			k++;
523 
524 	mutex_unlock(&overlay->mutex);
525 
526 	return k;
527 }
528 
529 int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out)
530 {
531 	struct vmw_overlay *overlay = dev_priv->overlay_priv;
532 	int i;
533 
534 	if (!overlay)
535 		return -ENOSYS;
536 
537 	mutex_lock(&overlay->mutex);
538 
539 	for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
540 
541 		if (overlay->stream[i].claimed)
542 			continue;
543 
544 		overlay->stream[i].claimed = true;
545 		*out = i;
546 		mutex_unlock(&overlay->mutex);
547 		return 0;
548 	}
549 
550 	mutex_unlock(&overlay->mutex);
551 	return -ESRCH;
552 }
553 
554 int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id)
555 {
556 	struct vmw_overlay *overlay = dev_priv->overlay_priv;
557 
558 	BUG_ON(stream_id >= VMW_MAX_NUM_STREAMS);
559 
560 	if (!overlay)
561 		return -ENOSYS;
562 
563 	mutex_lock(&overlay->mutex);
564 
565 	WARN_ON(!overlay->stream[stream_id].claimed);
566 	vmw_overlay_stop(dev_priv, stream_id, false, false);
567 	overlay->stream[stream_id].claimed = false;
568 
569 	mutex_unlock(&overlay->mutex);
570 	return 0;
571 }
572 
573 int vmw_overlay_init(struct vmw_private *dev_priv)
574 {
575 	struct vmw_overlay *overlay;
576 	int i;
577 
578 	if (dev_priv->overlay_priv)
579 		return -EINVAL;
580 
581 	overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
582 	if (!overlay)
583 		return -ENOMEM;
584 
585 	mutex_init(&overlay->mutex);
586 	for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
587 		overlay->stream[i].buf = NULL;
588 		overlay->stream[i].paused = false;
589 		overlay->stream[i].claimed = false;
590 	}
591 
592 	dev_priv->overlay_priv = overlay;
593 
594 	return 0;
595 }
596 
597 int vmw_overlay_close(struct vmw_private *dev_priv)
598 {
599 	struct vmw_overlay *overlay = dev_priv->overlay_priv;
600 	bool forgotten_buffer = false;
601 	int i;
602 
603 	if (!overlay)
604 		return -ENOSYS;
605 
606 	for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
607 		if (overlay->stream[i].buf) {
608 			forgotten_buffer = true;
609 			vmw_overlay_stop(dev_priv, i, false, false);
610 		}
611 	}
612 
613 	WARN_ON(forgotten_buffer);
614 
615 	dev_priv->overlay_priv = NULL;
616 	kfree(overlay);
617 
618 	return 0;
619 }
620