xref: /linux/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c (revision 0a95fab36a660021c3127476a8df6518fe47a23e)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include <drm/ttm/ttm_placement.h>
29 
30 #include "vmwgfx_resource_priv.h"
31 #include "vmwgfx_binding.h"
32 #include "vmwgfx_drv.h"
33 
34 #define VMW_RES_EVICT_ERR_COUNT 10
35 
36 /**
37  * vmw_resource_mob_attach - Mark a resource as attached to its backing mob
38  * @res: The resource
39  */
40 void vmw_resource_mob_attach(struct vmw_resource *res)
41 {
42 	struct vmw_buffer_object *backup = res->backup;
43 	struct rb_node **new = &backup->res_tree.rb_node, *parent = NULL;
44 
45 	dma_resv_assert_held(res->backup->base.base.resv);
46 	res->used_prio = (res->res_dirty) ? res->func->dirty_prio :
47 		res->func->prio;
48 
49 	while (*new) {
50 		struct vmw_resource *this =
51 			container_of(*new, struct vmw_resource, mob_node);
52 
53 		parent = *new;
54 		new = (res->backup_offset < this->backup_offset) ?
55 			&((*new)->rb_left) : &((*new)->rb_right);
56 	}
57 
58 	rb_link_node(&res->mob_node, parent, new);
59 	rb_insert_color(&res->mob_node, &backup->res_tree);
60 
61 	vmw_bo_prio_add(backup, res->used_prio);
62 }
63 
64 /**
65  * vmw_resource_mob_detach - Mark a resource as detached from its backing mob
66  * @res: The resource
67  */
68 void vmw_resource_mob_detach(struct vmw_resource *res)
69 {
70 	struct vmw_buffer_object *backup = res->backup;
71 
72 	dma_resv_assert_held(backup->base.base.resv);
73 	if (vmw_resource_mob_attached(res)) {
74 		rb_erase(&res->mob_node, &backup->res_tree);
75 		RB_CLEAR_NODE(&res->mob_node);
76 		vmw_bo_prio_del(backup, res->used_prio);
77 	}
78 }
79 
80 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
81 {
82 	kref_get(&res->kref);
83 	return res;
84 }
85 
86 struct vmw_resource *
87 vmw_resource_reference_unless_doomed(struct vmw_resource *res)
88 {
89 	return kref_get_unless_zero(&res->kref) ? res : NULL;
90 }
91 
92 /**
93  * vmw_resource_release_id - release a resource id to the id manager.
94  *
95  * @res: Pointer to the resource.
96  *
97  * Release the resource id to the resource id manager and set it to -1
98  */
99 void vmw_resource_release_id(struct vmw_resource *res)
100 {
101 	struct vmw_private *dev_priv = res->dev_priv;
102 	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
103 
104 	spin_lock(&dev_priv->resource_lock);
105 	if (res->id != -1)
106 		idr_remove(idr, res->id);
107 	res->id = -1;
108 	spin_unlock(&dev_priv->resource_lock);
109 }
110 
111 static void vmw_resource_release(struct kref *kref)
112 {
113 	struct vmw_resource *res =
114 	    container_of(kref, struct vmw_resource, kref);
115 	struct vmw_private *dev_priv = res->dev_priv;
116 	int id;
117 	int ret;
118 	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
119 
120 	spin_lock(&dev_priv->resource_lock);
121 	list_del_init(&res->lru_head);
122 	spin_unlock(&dev_priv->resource_lock);
123 	if (res->backup) {
124 		struct ttm_buffer_object *bo = &res->backup->base;
125 
126 		ret = ttm_bo_reserve(bo, false, false, NULL);
127 		BUG_ON(ret);
128 		if (vmw_resource_mob_attached(res) &&
129 		    res->func->unbind != NULL) {
130 			struct ttm_validate_buffer val_buf;
131 
132 			val_buf.bo = bo;
133 			val_buf.num_shared = 0;
134 			res->func->unbind(res, false, &val_buf);
135 		}
136 		res->backup_dirty = false;
137 		vmw_resource_mob_detach(res);
138 		if (res->dirty)
139 			res->func->dirty_free(res);
140 		if (res->coherent)
141 			vmw_bo_dirty_release(res->backup);
142 		ttm_bo_unreserve(bo);
143 		vmw_bo_unreference(&res->backup);
144 	}
145 
146 	if (likely(res->hw_destroy != NULL)) {
147 		mutex_lock(&dev_priv->binding_mutex);
148 		vmw_binding_res_list_kill(&res->binding_head);
149 		mutex_unlock(&dev_priv->binding_mutex);
150 		res->hw_destroy(res);
151 	}
152 
153 	id = res->id;
154 	if (res->res_free != NULL)
155 		res->res_free(res);
156 	else
157 		kfree(res);
158 
159 	spin_lock(&dev_priv->resource_lock);
160 	if (id != -1)
161 		idr_remove(idr, id);
162 	spin_unlock(&dev_priv->resource_lock);
163 }
164 
165 void vmw_resource_unreference(struct vmw_resource **p_res)
166 {
167 	struct vmw_resource *res = *p_res;
168 
169 	*p_res = NULL;
170 	kref_put(&res->kref, vmw_resource_release);
171 }
172 
173 
174 /**
175  * vmw_resource_alloc_id - release a resource id to the id manager.
176  *
177  * @res: Pointer to the resource.
178  *
179  * Allocate the lowest free resource from the resource manager, and set
180  * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
181  */
182 int vmw_resource_alloc_id(struct vmw_resource *res)
183 {
184 	struct vmw_private *dev_priv = res->dev_priv;
185 	int ret;
186 	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
187 
188 	BUG_ON(res->id != -1);
189 
190 	idr_preload(GFP_KERNEL);
191 	spin_lock(&dev_priv->resource_lock);
192 
193 	ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
194 	if (ret >= 0)
195 		res->id = ret;
196 
197 	spin_unlock(&dev_priv->resource_lock);
198 	idr_preload_end();
199 	return ret < 0 ? ret : 0;
200 }
201 
202 /**
203  * vmw_resource_init - initialize a struct vmw_resource
204  *
205  * @dev_priv:       Pointer to a device private struct.
206  * @res:            The struct vmw_resource to initialize.
207  * @delay_id:       Boolean whether to defer device id allocation until
208  *                  the first validation.
209  * @res_free:       Resource destructor.
210  * @func:           Resource function table.
211  */
212 int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
213 		      bool delay_id,
214 		      void (*res_free) (struct vmw_resource *res),
215 		      const struct vmw_res_func *func)
216 {
217 	kref_init(&res->kref);
218 	res->hw_destroy = NULL;
219 	res->res_free = res_free;
220 	res->dev_priv = dev_priv;
221 	res->func = func;
222 	RB_CLEAR_NODE(&res->mob_node);
223 	INIT_LIST_HEAD(&res->lru_head);
224 	INIT_LIST_HEAD(&res->binding_head);
225 	res->id = -1;
226 	res->backup = NULL;
227 	res->backup_offset = 0;
228 	res->backup_dirty = false;
229 	res->res_dirty = false;
230 	res->coherent = false;
231 	res->used_prio = 3;
232 	res->dirty = NULL;
233 	if (delay_id)
234 		return 0;
235 	else
236 		return vmw_resource_alloc_id(res);
237 }
238 
239 
240 /**
241  * vmw_user_resource_lookup_handle - lookup a struct resource from a
242  * TTM user-space handle and perform basic type checks
243  *
244  * @dev_priv:     Pointer to a device private struct
245  * @tfile:        Pointer to a struct ttm_object_file identifying the caller
246  * @handle:       The TTM user-space handle
247  * @converter:    Pointer to an object describing the resource type
248  * @p_res:        On successful return the location pointed to will contain
249  *                a pointer to a refcounted struct vmw_resource.
250  *
251  * If the handle can't be found or is associated with an incorrect resource
252  * type, -EINVAL will be returned.
253  */
254 int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
255 				    struct ttm_object_file *tfile,
256 				    uint32_t handle,
257 				    const struct vmw_user_resource_conv
258 				    *converter,
259 				    struct vmw_resource **p_res)
260 {
261 	struct ttm_base_object *base;
262 	struct vmw_resource *res;
263 	int ret = -EINVAL;
264 
265 	base = ttm_base_object_lookup(tfile, handle);
266 	if (unlikely(base == NULL))
267 		return -EINVAL;
268 
269 	if (unlikely(ttm_base_object_type(base) != converter->object_type))
270 		goto out_bad_resource;
271 
272 	res = converter->base_obj_to_res(base);
273 	kref_get(&res->kref);
274 
275 	*p_res = res;
276 	ret = 0;
277 
278 out_bad_resource:
279 	ttm_base_object_unref(&base);
280 
281 	return ret;
282 }
283 
284 /**
285  * vmw_user_resource_noref_lookup_handle - lookup a struct resource from a
286  * TTM user-space handle and perform basic type checks
287  *
288  * @dev_priv:     Pointer to a device private struct
289  * @tfile:        Pointer to a struct ttm_object_file identifying the caller
290  * @handle:       The TTM user-space handle
291  * @converter:    Pointer to an object describing the resource type
292  *
293  * If the handle can't be found or is associated with an incorrect resource
294  * type, -EINVAL will be returned.
295  */
296 struct vmw_resource *
297 vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
298 				      struct ttm_object_file *tfile,
299 				      uint32_t handle,
300 				      const struct vmw_user_resource_conv
301 				      *converter)
302 {
303 	struct ttm_base_object *base;
304 
305 	base = ttm_base_object_noref_lookup(tfile, handle);
306 	if (!base)
307 		return ERR_PTR(-ESRCH);
308 
309 	if (unlikely(ttm_base_object_type(base) != converter->object_type)) {
310 		ttm_base_object_noref_release();
311 		return ERR_PTR(-EINVAL);
312 	}
313 
314 	return converter->base_obj_to_res(base);
315 }
316 
317 /*
318  * Helper function that looks either a surface or bo.
319  *
320  * The pointer this pointed at by out_surf and out_buf needs to be null.
321  */
322 int vmw_user_lookup_handle(struct vmw_private *dev_priv,
323 			   struct ttm_object_file *tfile,
324 			   uint32_t handle,
325 			   struct vmw_surface **out_surf,
326 			   struct vmw_buffer_object **out_buf)
327 {
328 	struct vmw_resource *res;
329 	int ret;
330 
331 	BUG_ON(*out_surf || *out_buf);
332 
333 	ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
334 					      user_surface_converter,
335 					      &res);
336 	if (!ret) {
337 		*out_surf = vmw_res_to_srf(res);
338 		return 0;
339 	}
340 
341 	*out_surf = NULL;
342 	ret = vmw_user_bo_lookup(tfile, handle, out_buf, NULL);
343 	return ret;
344 }
345 
346 /**
347  * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
348  *
349  * @res:            The resource for which to allocate a backup buffer.
350  * @interruptible:  Whether any sleeps during allocation should be
351  *                  performed while interruptible.
352  */
353 static int vmw_resource_buf_alloc(struct vmw_resource *res,
354 				  bool interruptible)
355 {
356 	unsigned long size =
357 		(res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
358 	struct vmw_buffer_object *backup;
359 	int ret;
360 
361 	if (likely(res->backup)) {
362 		BUG_ON(res->backup->base.base.size < size);
363 		return 0;
364 	}
365 
366 	backup = kzalloc(sizeof(*backup), GFP_KERNEL);
367 	if (unlikely(!backup))
368 		return -ENOMEM;
369 
370 	ret = vmw_bo_init(res->dev_priv, backup, res->backup_size,
371 			      res->func->backup_placement,
372 			      interruptible, false,
373 			      &vmw_bo_bo_free);
374 	if (unlikely(ret != 0))
375 		goto out_no_bo;
376 
377 	res->backup = backup;
378 
379 out_no_bo:
380 	return ret;
381 }
382 
383 /**
384  * vmw_resource_do_validate - Make a resource up-to-date and visible
385  *                            to the device.
386  *
387  * @res:            The resource to make visible to the device.
388  * @val_buf:        Information about a buffer possibly
389  *                  containing backup data if a bind operation is needed.
390  * @dirtying:       Transfer dirty regions.
391  *
392  * On hardware resource shortage, this function returns -EBUSY and
393  * should be retried once resources have been freed up.
394  */
395 static int vmw_resource_do_validate(struct vmw_resource *res,
396 				    struct ttm_validate_buffer *val_buf,
397 				    bool dirtying)
398 {
399 	int ret = 0;
400 	const struct vmw_res_func *func = res->func;
401 
402 	if (unlikely(res->id == -1)) {
403 		ret = func->create(res);
404 		if (unlikely(ret != 0))
405 			return ret;
406 	}
407 
408 	if (func->bind &&
409 	    ((func->needs_backup && !vmw_resource_mob_attached(res) &&
410 	      val_buf->bo != NULL) ||
411 	     (!func->needs_backup && val_buf->bo != NULL))) {
412 		ret = func->bind(res, val_buf);
413 		if (unlikely(ret != 0))
414 			goto out_bind_failed;
415 		if (func->needs_backup)
416 			vmw_resource_mob_attach(res);
417 	}
418 
419 	/*
420 	 * Handle the case where the backup mob is marked coherent but
421 	 * the resource isn't.
422 	 */
423 	if (func->dirty_alloc && vmw_resource_mob_attached(res) &&
424 	    !res->coherent) {
425 		if (res->backup->dirty && !res->dirty) {
426 			ret = func->dirty_alloc(res);
427 			if (ret)
428 				return ret;
429 		} else if (!res->backup->dirty && res->dirty) {
430 			func->dirty_free(res);
431 		}
432 	}
433 
434 	/*
435 	 * Transfer the dirty regions to the resource and update
436 	 * the resource.
437 	 */
438 	if (res->dirty) {
439 		if (dirtying && !res->res_dirty) {
440 			pgoff_t start = res->backup_offset >> PAGE_SHIFT;
441 			pgoff_t end = __KERNEL_DIV_ROUND_UP
442 				(res->backup_offset + res->backup_size,
443 				 PAGE_SIZE);
444 
445 			vmw_bo_dirty_unmap(res->backup, start, end);
446 		}
447 
448 		vmw_bo_dirty_transfer_to_res(res);
449 		return func->dirty_sync(res);
450 	}
451 
452 	return 0;
453 
454 out_bind_failed:
455 	func->destroy(res);
456 
457 	return ret;
458 }
459 
460 /**
461  * vmw_resource_unreserve - Unreserve a resource previously reserved for
462  * command submission.
463  *
464  * @res:               Pointer to the struct vmw_resource to unreserve.
465  * @dirty_set:         Change dirty status of the resource.
466  * @dirty:             When changing dirty status indicates the new status.
467  * @switch_backup:     Backup buffer has been switched.
468  * @new_backup:        Pointer to new backup buffer if command submission
469  *                     switched. May be NULL.
470  * @new_backup_offset: New backup offset if @switch_backup is true.
471  *
472  * Currently unreserving a resource means putting it back on the device's
473  * resource lru list, so that it can be evicted if necessary.
474  */
475 void vmw_resource_unreserve(struct vmw_resource *res,
476 			    bool dirty_set,
477 			    bool dirty,
478 			    bool switch_backup,
479 			    struct vmw_buffer_object *new_backup,
480 			    unsigned long new_backup_offset)
481 {
482 	struct vmw_private *dev_priv = res->dev_priv;
483 
484 	if (!list_empty(&res->lru_head))
485 		return;
486 
487 	if (switch_backup && new_backup != res->backup) {
488 		if (res->backup) {
489 			vmw_resource_mob_detach(res);
490 			if (res->coherent)
491 				vmw_bo_dirty_release(res->backup);
492 			vmw_bo_unreference(&res->backup);
493 		}
494 
495 		if (new_backup) {
496 			res->backup = vmw_bo_reference(new_backup);
497 
498 			/*
499 			 * The validation code should already have added a
500 			 * dirty tracker here.
501 			 */
502 			WARN_ON(res->coherent && !new_backup->dirty);
503 
504 			vmw_resource_mob_attach(res);
505 		} else {
506 			res->backup = NULL;
507 		}
508 	} else if (switch_backup && res->coherent) {
509 		vmw_bo_dirty_release(res->backup);
510 	}
511 
512 	if (switch_backup)
513 		res->backup_offset = new_backup_offset;
514 
515 	if (dirty_set)
516 		res->res_dirty = dirty;
517 
518 	if (!res->func->may_evict || res->id == -1 || res->pin_count)
519 		return;
520 
521 	spin_lock(&dev_priv->resource_lock);
522 	list_add_tail(&res->lru_head,
523 		      &res->dev_priv->res_lru[res->func->res_type]);
524 	spin_unlock(&dev_priv->resource_lock);
525 }
526 
527 /**
528  * vmw_resource_check_buffer - Check whether a backup buffer is needed
529  *                             for a resource and in that case, allocate
530  *                             one, reserve and validate it.
531  *
532  * @ticket:         The ww aqcquire context to use, or NULL if trylocking.
533  * @res:            The resource for which to allocate a backup buffer.
534  * @interruptible:  Whether any sleeps during allocation should be
535  *                  performed while interruptible.
536  * @val_buf:        On successful return contains data about the
537  *                  reserved and validated backup buffer.
538  */
539 static int
540 vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
541 			  struct vmw_resource *res,
542 			  bool interruptible,
543 			  struct ttm_validate_buffer *val_buf)
544 {
545 	struct ttm_operation_ctx ctx = { true, false };
546 	struct list_head val_list;
547 	bool backup_dirty = false;
548 	int ret;
549 
550 	if (unlikely(res->backup == NULL)) {
551 		ret = vmw_resource_buf_alloc(res, interruptible);
552 		if (unlikely(ret != 0))
553 			return ret;
554 	}
555 
556 	INIT_LIST_HEAD(&val_list);
557 	ttm_bo_get(&res->backup->base);
558 	val_buf->bo = &res->backup->base;
559 	val_buf->num_shared = 0;
560 	list_add_tail(&val_buf->head, &val_list);
561 	ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
562 	if (unlikely(ret != 0))
563 		goto out_no_reserve;
564 
565 	if (res->func->needs_backup && !vmw_resource_mob_attached(res))
566 		return 0;
567 
568 	backup_dirty = res->backup_dirty;
569 	ret = ttm_bo_validate(&res->backup->base,
570 			      res->func->backup_placement,
571 			      &ctx);
572 
573 	if (unlikely(ret != 0))
574 		goto out_no_validate;
575 
576 	return 0;
577 
578 out_no_validate:
579 	ttm_eu_backoff_reservation(ticket, &val_list);
580 out_no_reserve:
581 	ttm_bo_put(val_buf->bo);
582 	val_buf->bo = NULL;
583 	if (backup_dirty)
584 		vmw_bo_unreference(&res->backup);
585 
586 	return ret;
587 }
588 
589 /*
590  * vmw_resource_reserve - Reserve a resource for command submission
591  *
592  * @res:            The resource to reserve.
593  *
594  * This function takes the resource off the LRU list and make sure
595  * a backup buffer is present for guest-backed resources. However,
596  * the buffer may not be bound to the resource at this point.
597  *
598  */
599 int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
600 			 bool no_backup)
601 {
602 	struct vmw_private *dev_priv = res->dev_priv;
603 	int ret;
604 
605 	spin_lock(&dev_priv->resource_lock);
606 	list_del_init(&res->lru_head);
607 	spin_unlock(&dev_priv->resource_lock);
608 
609 	if (res->func->needs_backup && res->backup == NULL &&
610 	    !no_backup) {
611 		ret = vmw_resource_buf_alloc(res, interruptible);
612 		if (unlikely(ret != 0)) {
613 			DRM_ERROR("Failed to allocate a backup buffer "
614 				  "of size %lu. bytes\n",
615 				  (unsigned long) res->backup_size);
616 			return ret;
617 		}
618 	}
619 
620 	return 0;
621 }
622 
623 /**
624  * vmw_resource_backoff_reservation - Unreserve and unreference a
625  *                                    backup buffer
626  *.
627  * @ticket:         The ww acquire ctx used for reservation.
628  * @val_buf:        Backup buffer information.
629  */
630 static void
631 vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
632 				 struct ttm_validate_buffer *val_buf)
633 {
634 	struct list_head val_list;
635 
636 	if (likely(val_buf->bo == NULL))
637 		return;
638 
639 	INIT_LIST_HEAD(&val_list);
640 	list_add_tail(&val_buf->head, &val_list);
641 	ttm_eu_backoff_reservation(ticket, &val_list);
642 	ttm_bo_put(val_buf->bo);
643 	val_buf->bo = NULL;
644 }
645 
646 /**
647  * vmw_resource_do_evict - Evict a resource, and transfer its data
648  *                         to a backup buffer.
649  *
650  * @ticket:         The ww acquire ticket to use, or NULL if trylocking.
651  * @res:            The resource to evict.
652  * @interruptible:  Whether to wait interruptible.
653  */
654 static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
655 				 struct vmw_resource *res, bool interruptible)
656 {
657 	struct ttm_validate_buffer val_buf;
658 	const struct vmw_res_func *func = res->func;
659 	int ret;
660 
661 	BUG_ON(!func->may_evict);
662 
663 	val_buf.bo = NULL;
664 	val_buf.num_shared = 0;
665 	ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
666 	if (unlikely(ret != 0))
667 		return ret;
668 
669 	if (unlikely(func->unbind != NULL &&
670 		     (!func->needs_backup || vmw_resource_mob_attached(res)))) {
671 		ret = func->unbind(res, res->res_dirty, &val_buf);
672 		if (unlikely(ret != 0))
673 			goto out_no_unbind;
674 		vmw_resource_mob_detach(res);
675 	}
676 	ret = func->destroy(res);
677 	res->backup_dirty = true;
678 	res->res_dirty = false;
679 out_no_unbind:
680 	vmw_resource_backoff_reservation(ticket, &val_buf);
681 
682 	return ret;
683 }
684 
685 
686 /**
687  * vmw_resource_validate - Make a resource up-to-date and visible
688  *                         to the device.
689  * @res: The resource to make visible to the device.
690  * @intr: Perform waits interruptible if possible.
691  * @dirtying: Pending GPU operation will dirty the resource
692  *
693  * On succesful return, any backup DMA buffer pointed to by @res->backup will
694  * be reserved and validated.
695  * On hardware resource shortage, this function will repeatedly evict
696  * resources of the same type until the validation succeeds.
697  *
698  * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
699  * on failure.
700  */
701 int vmw_resource_validate(struct vmw_resource *res, bool intr,
702 			  bool dirtying)
703 {
704 	int ret;
705 	struct vmw_resource *evict_res;
706 	struct vmw_private *dev_priv = res->dev_priv;
707 	struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
708 	struct ttm_validate_buffer val_buf;
709 	unsigned err_count = 0;
710 
711 	if (!res->func->create)
712 		return 0;
713 
714 	val_buf.bo = NULL;
715 	val_buf.num_shared = 0;
716 	if (res->backup)
717 		val_buf.bo = &res->backup->base;
718 	do {
719 		ret = vmw_resource_do_validate(res, &val_buf, dirtying);
720 		if (likely(ret != -EBUSY))
721 			break;
722 
723 		spin_lock(&dev_priv->resource_lock);
724 		if (list_empty(lru_list) || !res->func->may_evict) {
725 			DRM_ERROR("Out of device device resources "
726 				  "for %s.\n", res->func->type_name);
727 			ret = -EBUSY;
728 			spin_unlock(&dev_priv->resource_lock);
729 			break;
730 		}
731 
732 		evict_res = vmw_resource_reference
733 			(list_first_entry(lru_list, struct vmw_resource,
734 					  lru_head));
735 		list_del_init(&evict_res->lru_head);
736 
737 		spin_unlock(&dev_priv->resource_lock);
738 
739 		/* Trylock backup buffers with a NULL ticket. */
740 		ret = vmw_resource_do_evict(NULL, evict_res, intr);
741 		if (unlikely(ret != 0)) {
742 			spin_lock(&dev_priv->resource_lock);
743 			list_add_tail(&evict_res->lru_head, lru_list);
744 			spin_unlock(&dev_priv->resource_lock);
745 			if (ret == -ERESTARTSYS ||
746 			    ++err_count > VMW_RES_EVICT_ERR_COUNT) {
747 				vmw_resource_unreference(&evict_res);
748 				goto out_no_validate;
749 			}
750 		}
751 
752 		vmw_resource_unreference(&evict_res);
753 	} while (1);
754 
755 	if (unlikely(ret != 0))
756 		goto out_no_validate;
757 	else if (!res->func->needs_backup && res->backup) {
758 		WARN_ON_ONCE(vmw_resource_mob_attached(res));
759 		vmw_bo_unreference(&res->backup);
760 	}
761 
762 	return 0;
763 
764 out_no_validate:
765 	return ret;
766 }
767 
768 
769 /**
770  * vmw_resource_unbind_list
771  *
772  * @vbo: Pointer to the current backing MOB.
773  *
774  * Evicts the Guest Backed hardware resource if the backup
775  * buffer is being moved out of MOB memory.
776  * Note that this function will not race with the resource
777  * validation code, since resource validation and eviction
778  * both require the backup buffer to be reserved.
779  */
780 void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
781 {
782 	struct ttm_validate_buffer val_buf = {
783 		.bo = &vbo->base,
784 		.num_shared = 0
785 	};
786 
787 	dma_resv_assert_held(vbo->base.base.resv);
788 	while (!RB_EMPTY_ROOT(&vbo->res_tree)) {
789 		struct rb_node *node = vbo->res_tree.rb_node;
790 		struct vmw_resource *res =
791 			container_of(node, struct vmw_resource, mob_node);
792 
793 		if (!WARN_ON_ONCE(!res->func->unbind))
794 			(void) res->func->unbind(res, res->res_dirty, &val_buf);
795 
796 		res->backup_dirty = true;
797 		res->res_dirty = false;
798 		vmw_resource_mob_detach(res);
799 	}
800 
801 	(void) ttm_bo_wait(&vbo->base, false, false);
802 }
803 
804 
805 /**
806  * vmw_query_readback_all - Read back cached query states
807  *
808  * @dx_query_mob: Buffer containing the DX query MOB
809  *
810  * Read back cached states from the device if they exist.  This function
811  * assumings binding_mutex is held.
812  */
813 int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
814 {
815 	struct vmw_resource *dx_query_ctx;
816 	struct vmw_private *dev_priv;
817 	struct {
818 		SVGA3dCmdHeader header;
819 		SVGA3dCmdDXReadbackAllQuery body;
820 	} *cmd;
821 
822 
823 	/* No query bound, so do nothing */
824 	if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
825 		return 0;
826 
827 	dx_query_ctx = dx_query_mob->dx_query_ctx;
828 	dev_priv     = dx_query_ctx->dev_priv;
829 
830 	cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), dx_query_ctx->id);
831 	if (unlikely(cmd == NULL))
832 		return -ENOMEM;
833 
834 	cmd->header.id   = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
835 	cmd->header.size = sizeof(cmd->body);
836 	cmd->body.cid    = dx_query_ctx->id;
837 
838 	vmw_cmd_commit(dev_priv, sizeof(*cmd));
839 
840 	/* Triggers a rebind the next time affected context is bound */
841 	dx_query_mob->dx_query_ctx = NULL;
842 
843 	return 0;
844 }
845 
846 
847 
848 /**
849  * vmw_query_move_notify - Read back cached query states
850  *
851  * @bo: The TTM buffer object about to move.
852  * @old_mem: The memory region @bo is moving from.
853  * @new_mem: The memory region @bo is moving to.
854  *
855  * Called before the query MOB is swapped out to read back cached query
856  * states from the device.
857  */
858 void vmw_query_move_notify(struct ttm_buffer_object *bo,
859 			   struct ttm_resource *old_mem,
860 			   struct ttm_resource *new_mem)
861 {
862 	struct vmw_buffer_object *dx_query_mob;
863 	struct ttm_device *bdev = bo->bdev;
864 	struct vmw_private *dev_priv;
865 
866 
867 	dev_priv = container_of(bdev, struct vmw_private, bdev);
868 
869 	mutex_lock(&dev_priv->binding_mutex);
870 
871 	dx_query_mob = container_of(bo, struct vmw_buffer_object, base);
872 	if (!dx_query_mob || !dx_query_mob->dx_query_ctx) {
873 		mutex_unlock(&dev_priv->binding_mutex);
874 		return;
875 	}
876 
877 	/* If BO is being moved from MOB to system memory */
878 	if (new_mem->mem_type == TTM_PL_SYSTEM &&
879 	    old_mem->mem_type == VMW_PL_MOB) {
880 		struct vmw_fence_obj *fence;
881 
882 		(void) vmw_query_readback_all(dx_query_mob);
883 		mutex_unlock(&dev_priv->binding_mutex);
884 
885 		/* Create a fence and attach the BO to it */
886 		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
887 		vmw_bo_fence_single(bo, fence);
888 
889 		if (fence != NULL)
890 			vmw_fence_obj_unreference(&fence);
891 
892 		(void) ttm_bo_wait(bo, false, false);
893 	} else
894 		mutex_unlock(&dev_priv->binding_mutex);
895 
896 }
897 
898 /**
899  * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
900  *
901  * @res:            The resource being queried.
902  */
903 bool vmw_resource_needs_backup(const struct vmw_resource *res)
904 {
905 	return res->func->needs_backup;
906 }
907 
908 /**
909  * vmw_resource_evict_type - Evict all resources of a specific type
910  *
911  * @dev_priv:       Pointer to a device private struct
912  * @type:           The resource type to evict
913  *
914  * To avoid thrashing starvation or as part of the hibernation sequence,
915  * try to evict all evictable resources of a specific type.
916  */
917 static void vmw_resource_evict_type(struct vmw_private *dev_priv,
918 				    enum vmw_res_type type)
919 {
920 	struct list_head *lru_list = &dev_priv->res_lru[type];
921 	struct vmw_resource *evict_res;
922 	unsigned err_count = 0;
923 	int ret;
924 	struct ww_acquire_ctx ticket;
925 
926 	do {
927 		spin_lock(&dev_priv->resource_lock);
928 
929 		if (list_empty(lru_list))
930 			goto out_unlock;
931 
932 		evict_res = vmw_resource_reference(
933 			list_first_entry(lru_list, struct vmw_resource,
934 					 lru_head));
935 		list_del_init(&evict_res->lru_head);
936 		spin_unlock(&dev_priv->resource_lock);
937 
938 		/* Wait lock backup buffers with a ticket. */
939 		ret = vmw_resource_do_evict(&ticket, evict_res, false);
940 		if (unlikely(ret != 0)) {
941 			spin_lock(&dev_priv->resource_lock);
942 			list_add_tail(&evict_res->lru_head, lru_list);
943 			spin_unlock(&dev_priv->resource_lock);
944 			if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
945 				vmw_resource_unreference(&evict_res);
946 				return;
947 			}
948 		}
949 
950 		vmw_resource_unreference(&evict_res);
951 	} while (1);
952 
953 out_unlock:
954 	spin_unlock(&dev_priv->resource_lock);
955 }
956 
957 /**
958  * vmw_resource_evict_all - Evict all evictable resources
959  *
960  * @dev_priv:       Pointer to a device private struct
961  *
962  * To avoid thrashing starvation or as part of the hibernation sequence,
963  * evict all evictable resources. In particular this means that all
964  * guest-backed resources that are registered with the device are
965  * evicted and the OTable becomes clean.
966  */
967 void vmw_resource_evict_all(struct vmw_private *dev_priv)
968 {
969 	enum vmw_res_type type;
970 
971 	mutex_lock(&dev_priv->cmdbuf_mutex);
972 
973 	for (type = 0; type < vmw_res_max; ++type)
974 		vmw_resource_evict_type(dev_priv, type);
975 
976 	mutex_unlock(&dev_priv->cmdbuf_mutex);
977 }
978 
979 /*
980  * vmw_resource_pin - Add a pin reference on a resource
981  *
982  * @res: The resource to add a pin reference on
983  *
984  * This function adds a pin reference, and if needed validates the resource.
985  * Having a pin reference means that the resource can never be evicted, and
986  * its id will never change as long as there is a pin reference.
987  * This function returns 0 on success and a negative error code on failure.
988  */
989 int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
990 {
991 	struct ttm_operation_ctx ctx = { interruptible, false };
992 	struct vmw_private *dev_priv = res->dev_priv;
993 	int ret;
994 
995 	mutex_lock(&dev_priv->cmdbuf_mutex);
996 	ret = vmw_resource_reserve(res, interruptible, false);
997 	if (ret)
998 		goto out_no_reserve;
999 
1000 	if (res->pin_count == 0) {
1001 		struct vmw_buffer_object *vbo = NULL;
1002 
1003 		if (res->backup) {
1004 			vbo = res->backup;
1005 
1006 			ret = ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
1007 			if (ret)
1008 				goto out_no_validate;
1009 			if (!vbo->base.pin_count) {
1010 				ret = ttm_bo_validate
1011 					(&vbo->base,
1012 					 res->func->backup_placement,
1013 					 &ctx);
1014 				if (ret) {
1015 					ttm_bo_unreserve(&vbo->base);
1016 					goto out_no_validate;
1017 				}
1018 			}
1019 
1020 			/* Do we really need to pin the MOB as well? */
1021 			vmw_bo_pin_reserved(vbo, true);
1022 		}
1023 		ret = vmw_resource_validate(res, interruptible, true);
1024 		if (vbo)
1025 			ttm_bo_unreserve(&vbo->base);
1026 		if (ret)
1027 			goto out_no_validate;
1028 	}
1029 	res->pin_count++;
1030 
1031 out_no_validate:
1032 	vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
1033 out_no_reserve:
1034 	mutex_unlock(&dev_priv->cmdbuf_mutex);
1035 
1036 	return ret;
1037 }
1038 
1039 /**
1040  * vmw_resource_unpin - Remove a pin reference from a resource
1041  *
1042  * @res: The resource to remove a pin reference from
1043  *
1044  * Having a pin reference means that the resource can never be evicted, and
1045  * its id will never change as long as there is a pin reference.
1046  */
1047 void vmw_resource_unpin(struct vmw_resource *res)
1048 {
1049 	struct vmw_private *dev_priv = res->dev_priv;
1050 	int ret;
1051 
1052 	mutex_lock(&dev_priv->cmdbuf_mutex);
1053 
1054 	ret = vmw_resource_reserve(res, false, true);
1055 	WARN_ON(ret);
1056 
1057 	WARN_ON(res->pin_count == 0);
1058 	if (--res->pin_count == 0 && res->backup) {
1059 		struct vmw_buffer_object *vbo = res->backup;
1060 
1061 		(void) ttm_bo_reserve(&vbo->base, false, false, NULL);
1062 		vmw_bo_pin_reserved(vbo, false);
1063 		ttm_bo_unreserve(&vbo->base);
1064 	}
1065 
1066 	vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
1067 
1068 	mutex_unlock(&dev_priv->cmdbuf_mutex);
1069 }
1070 
1071 /**
1072  * vmw_res_type - Return the resource type
1073  *
1074  * @res: Pointer to the resource
1075  */
1076 enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
1077 {
1078 	return res->func->res_type;
1079 }
1080 
1081 /**
1082  * vmw_resource_dirty_update - Update a resource's dirty tracker with a
1083  * sequential range of touched backing store memory.
1084  * @res: The resource.
1085  * @start: The first page touched.
1086  * @end: The last page touched + 1.
1087  */
1088 void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start,
1089 			       pgoff_t end)
1090 {
1091 	if (res->dirty)
1092 		res->func->dirty_range_add(res, start << PAGE_SHIFT,
1093 					   end << PAGE_SHIFT);
1094 }
1095 
1096 /**
1097  * vmw_resources_clean - Clean resources intersecting a mob range
1098  * @vbo: The mob buffer object
1099  * @start: The mob page offset starting the range
1100  * @end: The mob page offset ending the range
1101  * @num_prefault: Returns how many pages including the first have been
1102  * cleaned and are ok to prefault
1103  */
1104 int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
1105 			pgoff_t end, pgoff_t *num_prefault)
1106 {
1107 	struct rb_node *cur = vbo->res_tree.rb_node;
1108 	struct vmw_resource *found = NULL;
1109 	unsigned long res_start = start << PAGE_SHIFT;
1110 	unsigned long res_end = end << PAGE_SHIFT;
1111 	unsigned long last_cleaned = 0;
1112 
1113 	/*
1114 	 * Find the resource with lowest backup_offset that intersects the
1115 	 * range.
1116 	 */
1117 	while (cur) {
1118 		struct vmw_resource *cur_res =
1119 			container_of(cur, struct vmw_resource, mob_node);
1120 
1121 		if (cur_res->backup_offset >= res_end) {
1122 			cur = cur->rb_left;
1123 		} else if (cur_res->backup_offset + cur_res->backup_size <=
1124 			   res_start) {
1125 			cur = cur->rb_right;
1126 		} else {
1127 			found = cur_res;
1128 			cur = cur->rb_left;
1129 			/* Continue to look for resources with lower offsets */
1130 		}
1131 	}
1132 
1133 	/*
1134 	 * In order of increasing backup_offset, clean dirty resorces
1135 	 * intersecting the range.
1136 	 */
1137 	while (found) {
1138 		if (found->res_dirty) {
1139 			int ret;
1140 
1141 			if (!found->func->clean)
1142 				return -EINVAL;
1143 
1144 			ret = found->func->clean(found);
1145 			if (ret)
1146 				return ret;
1147 
1148 			found->res_dirty = false;
1149 		}
1150 		last_cleaned = found->backup_offset + found->backup_size;
1151 		cur = rb_next(&found->mob_node);
1152 		if (!cur)
1153 			break;
1154 
1155 		found = container_of(cur, struct vmw_resource, mob_node);
1156 		if (found->backup_offset >= res_end)
1157 			break;
1158 	}
1159 
1160 	/*
1161 	 * Set number of pages allowed prefaulting and fence the buffer object
1162 	 */
1163 	*num_prefault = 1;
1164 	if (last_cleaned > res_start) {
1165 		struct ttm_buffer_object *bo = &vbo->base;
1166 
1167 		*num_prefault = __KERNEL_DIV_ROUND_UP(last_cleaned - res_start,
1168 						      PAGE_SIZE);
1169 		vmw_bo_fence_single(bo, NULL);
1170 		if (bo->moving)
1171 			dma_fence_put(bo->moving);
1172 		bo->moving = dma_fence_get
1173 			(dma_resv_excl_fence(bo->base.resv));
1174 	}
1175 
1176 	return 0;
1177 }
1178