xref: /linux/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c (revision fcab107abe1ab5be9dbe874baa722372da8f4f73)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright © 2018 - 2023 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 #include "vmwgfx_bo.h"
29 #include "vmwgfx_drv.h"
30 #include "vmwgfx_resource_priv.h"
31 #include "vmwgfx_validation.h"
32 
33 #include <linux/slab.h>
34 
35 /**
36  * struct vmw_validation_bo_node - Buffer object validation metadata.
37  * @base: Metadata used for TTM reservation- and validation.
38  * @hash: A hash entry used for the duplicate detection hash table.
39  * @coherent_count: If switching backup buffers, number of new coherent
40  * resources that will have this buffer as a backup buffer.
41  *
42  * Bit fields are used since these structures are allocated and freed in
43  * large numbers and space conservation is desired.
44  */
45 struct vmw_validation_bo_node {
46 	struct ttm_validate_buffer base;
47 	struct vmwgfx_hash_item hash;
48 	unsigned int coherent_count;
49 };
50 /**
51  * struct vmw_validation_res_node - Resource validation metadata.
52  * @head: List head for the resource validation list.
53  * @hash: A hash entry used for the duplicate detection hash table.
54  * @res: Reference counted resource pointer.
55  * @new_guest_memory_bo: Non ref-counted pointer to new guest memory buffer
56  * to be assigned to a resource.
57  * @new_guest_memory_offset: Offset into the new backup mob for resources
58  * that can share MOBs.
59  * @no_buffer_needed: Kernel does not need to allocate a MOB during validation,
60  * the command stream provides a mob bind operation.
61  * @switching_guest_memory_bo: The validation process is switching backup MOB.
62  * @first_usage: True iff the resource has been seen only once in the current
63  * validation batch.
64  * @reserved: Whether the resource is currently reserved by this process.
65  * @dirty_set: Change dirty status of the resource.
66  * @dirty: Dirty information VMW_RES_DIRTY_XX.
67  * @private: Optionally additional memory for caller-private data.
68  *
69  * Bit fields are used since these structures are allocated and freed in
70  * large numbers and space conservation is desired.
71  */
72 struct vmw_validation_res_node {
73 	struct list_head head;
74 	struct vmwgfx_hash_item hash;
75 	struct vmw_resource *res;
76 	struct vmw_bo *new_guest_memory_bo;
77 	unsigned long new_guest_memory_offset;
78 	u32 no_buffer_needed : 1;
79 	u32 switching_guest_memory_bo : 1;
80 	u32 first_usage : 1;
81 	u32 reserved : 1;
82 	u32 dirty : 1;
83 	u32 dirty_set : 1;
84 	unsigned long private[];
85 };
86 
87 /**
88  * vmw_validation_mem_alloc - Allocate kernel memory from the validation
89  * context based allocator
90  * @ctx: The validation context
91  * @size: The number of bytes to allocated.
92  *
93  * The memory allocated may not exceed PAGE_SIZE, and the returned
94  * address is aligned to sizeof(long). All memory allocated this way is
95  * reclaimed after validation when calling any of the exported functions:
96  * vmw_validation_unref_lists()
97  * vmw_validation_revert()
98  * vmw_validation_done()
99  *
100  * Return: Pointer to the allocated memory on success. NULL on failure.
101  */
102 void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
103 			       unsigned int size)
104 {
105 	void *addr;
106 
107 	size = vmw_validation_align(size);
108 	if (size > PAGE_SIZE)
109 		return NULL;
110 
111 	if (ctx->mem_size_left < size) {
112 		struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
113 		if (!page)
114 			return NULL;
115 
116 		list_add_tail(&page->lru, &ctx->page_list);
117 		ctx->page_address = page_address(page);
118 		ctx->mem_size_left = PAGE_SIZE;
119 	}
120 
121 	addr = (void *) (ctx->page_address + (PAGE_SIZE - ctx->mem_size_left));
122 	ctx->mem_size_left -= size;
123 
124 	return addr;
125 }
126 
127 /**
128  * vmw_validation_mem_free - Free all memory allocated using
129  * vmw_validation_mem_alloc()
130  * @ctx: The validation context
131  *
132  * All memory previously allocated for this context using
133  * vmw_validation_mem_alloc() is freed.
134  */
135 static void vmw_validation_mem_free(struct vmw_validation_context *ctx)
136 {
137 	struct page *entry, *next;
138 
139 	list_for_each_entry_safe(entry, next, &ctx->page_list, lru) {
140 		list_del_init(&entry->lru);
141 		__free_page(entry);
142 	}
143 
144 	ctx->mem_size_left = 0;
145 }
146 
147 /**
148  * vmw_validation_find_bo_dup - Find a duplicate buffer object entry in the
149  * validation context's lists.
150  * @ctx: The validation context to search.
151  * @vbo: The buffer object to search for.
152  *
153  * Return: Pointer to the struct vmw_validation_bo_node referencing the
154  * duplicate, or NULL if none found.
155  */
156 static struct vmw_validation_bo_node *
157 vmw_validation_find_bo_dup(struct vmw_validation_context *ctx,
158 			   struct vmw_bo *vbo)
159 {
160 	struct  vmw_validation_bo_node *bo_node = NULL;
161 
162 	if (!ctx->merge_dups)
163 		return NULL;
164 
165 	if (ctx->sw_context) {
166 		struct vmwgfx_hash_item *hash;
167 		unsigned long key = (unsigned long) vbo;
168 
169 		hash_for_each_possible_rcu(ctx->sw_context->res_ht, hash, head, key) {
170 			if (hash->key == key) {
171 				bo_node = container_of(hash, typeof(*bo_node), hash);
172 				break;
173 			}
174 		}
175 	} else {
176 		struct  vmw_validation_bo_node *entry;
177 
178 		list_for_each_entry(entry, &ctx->bo_list, base.head) {
179 			if (entry->base.bo == &vbo->tbo) {
180 				bo_node = entry;
181 				break;
182 			}
183 		}
184 	}
185 
186 	return bo_node;
187 }
188 
189 /**
190  * vmw_validation_find_res_dup - Find a duplicate resource entry in the
191  * validation context's lists.
192  * @ctx: The validation context to search.
193  * @res: Reference counted resource pointer.
194  *
195  * Return: Pointer to the struct vmw_validation_bo_node referencing the
196  * duplicate, or NULL if none found.
197  */
198 static struct vmw_validation_res_node *
199 vmw_validation_find_res_dup(struct vmw_validation_context *ctx,
200 			    struct vmw_resource *res)
201 {
202 	struct  vmw_validation_res_node *res_node = NULL;
203 
204 	if (!ctx->merge_dups)
205 		return NULL;
206 
207 	if (ctx->sw_context) {
208 		struct vmwgfx_hash_item *hash;
209 		unsigned long key = (unsigned long) res;
210 
211 		hash_for_each_possible_rcu(ctx->sw_context->res_ht, hash, head, key) {
212 			if (hash->key == key) {
213 				res_node = container_of(hash, typeof(*res_node), hash);
214 				break;
215 			}
216 		}
217 	} else {
218 		struct  vmw_validation_res_node *entry;
219 
220 		list_for_each_entry(entry, &ctx->resource_ctx_list, head) {
221 			if (entry->res == res) {
222 				res_node = entry;
223 				goto out;
224 			}
225 		}
226 
227 		list_for_each_entry(entry, &ctx->resource_list, head) {
228 			if (entry->res == res) {
229 				res_node = entry;
230 				break;
231 			}
232 		}
233 
234 	}
235 out:
236 	return res_node;
237 }
238 
239 /**
240  * vmw_validation_add_bo - Add a buffer object to the validation context.
241  * @ctx: The validation context.
242  * @vbo: The buffer object.
243  *
244  * Return: Zero on success, negative error code otherwise.
245  */
246 int vmw_validation_add_bo(struct vmw_validation_context *ctx,
247 			  struct vmw_bo *vbo)
248 {
249 	struct vmw_validation_bo_node *bo_node;
250 
251 	bo_node = vmw_validation_find_bo_dup(ctx, vbo);
252 	if (!bo_node) {
253 		struct ttm_validate_buffer *val_buf;
254 
255 		bo_node = vmw_validation_mem_alloc(ctx, sizeof(*bo_node));
256 		if (!bo_node)
257 			return -ENOMEM;
258 
259 		if (ctx->sw_context) {
260 			bo_node->hash.key = (unsigned long) vbo;
261 			hash_add_rcu(ctx->sw_context->res_ht, &bo_node->hash.head,
262 				bo_node->hash.key);
263 		}
264 		val_buf = &bo_node->base;
265 		vmw_bo_reference(vbo);
266 		val_buf->bo = &vbo->tbo;
267 		val_buf->num_shared = 0;
268 		list_add_tail(&val_buf->head, &ctx->bo_list);
269 	}
270 
271 	return 0;
272 }
273 
274 /**
275  * vmw_validation_add_resource - Add a resource to the validation context.
276  * @ctx: The validation context.
277  * @res: The resource.
278  * @priv_size: Size of private, additional metadata.
279  * @dirty: Whether to change dirty status.
280  * @p_node: Output pointer of additional metadata address.
281  * @first_usage: Whether this was the first time this resource was seen.
282  *
283  * Return: Zero on success, negative error code otherwise.
284  */
285 int vmw_validation_add_resource(struct vmw_validation_context *ctx,
286 				struct vmw_resource *res,
287 				size_t priv_size,
288 				u32 dirty,
289 				void **p_node,
290 				bool *first_usage)
291 {
292 	struct vmw_validation_res_node *node;
293 
294 	node = vmw_validation_find_res_dup(ctx, res);
295 	if (node) {
296 		node->first_usage = 0;
297 		goto out_fill;
298 	}
299 
300 	node = vmw_validation_mem_alloc(ctx, sizeof(*node) + priv_size);
301 	if (!node) {
302 		VMW_DEBUG_USER("Failed to allocate a resource validation entry.\n");
303 		return -ENOMEM;
304 	}
305 
306 	if (ctx->sw_context) {
307 		node->hash.key = (unsigned long) res;
308 		hash_add_rcu(ctx->sw_context->res_ht, &node->hash.head, node->hash.key);
309 	}
310 	node->res = vmw_resource_reference_unless_doomed(res);
311 	if (!node->res)
312 		return -ESRCH;
313 
314 	node->first_usage = 1;
315 	if (!res->dev_priv->has_mob) {
316 		list_add_tail(&node->head, &ctx->resource_list);
317 	} else {
318 		switch (vmw_res_type(res)) {
319 		case vmw_res_context:
320 		case vmw_res_dx_context:
321 			list_add(&node->head, &ctx->resource_ctx_list);
322 			break;
323 		case vmw_res_cotable:
324 			list_add_tail(&node->head, &ctx->resource_ctx_list);
325 			break;
326 		default:
327 			list_add_tail(&node->head, &ctx->resource_list);
328 			break;
329 		}
330 	}
331 
332 out_fill:
333 	if (dirty) {
334 		node->dirty_set = 1;
335 		/* Overwriting previous information here is intentional! */
336 		node->dirty = (dirty & VMW_RES_DIRTY_SET) ? 1 : 0;
337 	}
338 	if (first_usage)
339 		*first_usage = node->first_usage;
340 	if (p_node)
341 		*p_node = &node->private;
342 
343 	return 0;
344 }
345 
346 /**
347  * vmw_validation_res_set_dirty - Register a resource dirty set or clear during
348  * validation.
349  * @ctx: The validation context.
350  * @val_private: The additional meta-data pointer returned when the
351  * resource was registered with the validation context. Used to identify
352  * the resource.
353  * @dirty: Dirty information VMW_RES_DIRTY_XX
354  */
355 void vmw_validation_res_set_dirty(struct vmw_validation_context *ctx,
356 				  void *val_private, u32 dirty)
357 {
358 	struct vmw_validation_res_node *val;
359 
360 	if (!dirty)
361 		return;
362 
363 	val = container_of(val_private, typeof(*val), private);
364 	val->dirty_set = 1;
365 	/* Overwriting previous information here is intentional! */
366 	val->dirty = (dirty & VMW_RES_DIRTY_SET) ? 1 : 0;
367 }
368 
369 /**
370  * vmw_validation_res_switch_backup - Register a backup MOB switch during
371  * validation.
372  * @ctx: The validation context.
373  * @val_private: The additional meta-data pointer returned when the
374  * resource was registered with the validation context. Used to identify
375  * the resource.
376  * @vbo: The new backup buffer object MOB. This buffer object needs to have
377  * already been registered with the validation context.
378  * @guest_memory_offset: Offset into the new backup MOB.
379  */
380 void vmw_validation_res_switch_backup(struct vmw_validation_context *ctx,
381 				      void *val_private,
382 				      struct vmw_bo *vbo,
383 				      unsigned long guest_memory_offset)
384 {
385 	struct vmw_validation_res_node *val;
386 
387 	val = container_of(val_private, typeof(*val), private);
388 
389 	val->switching_guest_memory_bo = 1;
390 	if (val->first_usage)
391 		val->no_buffer_needed = 1;
392 
393 	val->new_guest_memory_bo = vbo;
394 	val->new_guest_memory_offset = guest_memory_offset;
395 }
396 
397 /**
398  * vmw_validation_res_reserve - Reserve all resources registered with this
399  * validation context.
400  * @ctx: The validation context.
401  * @intr: Use interruptible waits when possible.
402  *
403  * Return: Zero on success, -ERESTARTSYS if interrupted. Negative error
404  * code on failure.
405  */
406 int vmw_validation_res_reserve(struct vmw_validation_context *ctx,
407 			       bool intr)
408 {
409 	struct vmw_validation_res_node *val;
410 	int ret = 0;
411 
412 	list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
413 
414 	list_for_each_entry(val, &ctx->resource_list, head) {
415 		struct vmw_resource *res = val->res;
416 
417 		ret = vmw_resource_reserve(res, intr, val->no_buffer_needed);
418 		if (ret)
419 			goto out_unreserve;
420 
421 		val->reserved = 1;
422 		if (res->guest_memory_bo) {
423 			struct vmw_bo *vbo = res->guest_memory_bo;
424 
425 			vmw_bo_placement_set(vbo,
426 					     res->func->domain,
427 					     res->func->busy_domain);
428 			ret = vmw_validation_add_bo(ctx, vbo);
429 			if (ret)
430 				goto out_unreserve;
431 		}
432 
433 		if (val->switching_guest_memory_bo && val->new_guest_memory_bo &&
434 		    res->coherent) {
435 			struct vmw_validation_bo_node *bo_node =
436 				vmw_validation_find_bo_dup(ctx,
437 							   val->new_guest_memory_bo);
438 
439 			if (WARN_ON(!bo_node)) {
440 				ret = -EINVAL;
441 				goto out_unreserve;
442 			}
443 			bo_node->coherent_count++;
444 		}
445 	}
446 
447 	return 0;
448 
449 out_unreserve:
450 	vmw_validation_res_unreserve(ctx, true);
451 	return ret;
452 }
453 
454 /**
455  * vmw_validation_res_unreserve - Unreserve all reserved resources
456  * registered with this validation context.
457  * @ctx: The validation context.
458  * @backoff: Whether this is a backoff- of a commit-type operation. This
459  * is used to determine whether to switch backup MOBs or not.
460  */
461 void vmw_validation_res_unreserve(struct vmw_validation_context *ctx,
462 				 bool backoff)
463 {
464 	struct vmw_validation_res_node *val;
465 
466 	list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
467 	if (backoff)
468 		list_for_each_entry(val, &ctx->resource_list, head) {
469 			if (val->reserved)
470 				vmw_resource_unreserve(val->res,
471 						       false, false, false,
472 						       NULL, 0);
473 		}
474 	else
475 		list_for_each_entry(val, &ctx->resource_list, head) {
476 			if (val->reserved)
477 				vmw_resource_unreserve(val->res,
478 						       val->dirty_set,
479 						       val->dirty,
480 						       val->switching_guest_memory_bo,
481 						       val->new_guest_memory_bo,
482 						       val->new_guest_memory_offset);
483 		}
484 }
485 
486 /**
487  * vmw_validation_bo_validate_single - Validate a single buffer object.
488  * @bo: The TTM buffer object base.
489  * @interruptible: Whether to perform waits interruptible if possible.
490  *
491  * Return: Zero on success, -ERESTARTSYS if interrupted. Negative error
492  * code on failure.
493  */
494 static int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
495 					     bool interruptible)
496 {
497 	struct vmw_bo *vbo = to_vmw_bo(&bo->base);
498 	struct ttm_operation_ctx ctx = {
499 		.interruptible = interruptible,
500 		.no_wait_gpu = false
501 	};
502 	int ret;
503 
504 	if (atomic_read(&vbo->cpu_writers))
505 		return -EBUSY;
506 
507 	if (vbo->tbo.pin_count > 0)
508 		return 0;
509 
510 	ret = ttm_bo_validate(bo, &vbo->placement, &ctx);
511 	if (ret == 0 || ret == -ERESTARTSYS)
512 		return ret;
513 
514 	/*
515 	 * If that failed, try again, this time evicting
516 	 * previous contents.
517 	 */
518 	ctx.allow_res_evict = true;
519 
520 	return ttm_bo_validate(bo, &vbo->placement, &ctx);
521 }
522 
523 /**
524  * vmw_validation_bo_validate - Validate all buffer objects registered with
525  * the validation context.
526  * @ctx: The validation context.
527  * @intr: Whether to perform waits interruptible if possible.
528  *
529  * Return: Zero on success, -ERESTARTSYS if interrupted,
530  * negative error code on failure.
531  */
532 int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr)
533 {
534 	struct vmw_validation_bo_node *entry;
535 	int ret;
536 
537 	list_for_each_entry(entry, &ctx->bo_list, base.head) {
538 		struct vmw_bo *vbo = to_vmw_bo(&entry->base.bo->base);
539 
540 		ret = vmw_validation_bo_validate_single(entry->base.bo, intr);
541 
542 		if (ret)
543 			return ret;
544 
545 		/*
546 		 * Rather than having the resource code allocating the bo
547 		 * dirty tracker in resource_unreserve() where we can't fail,
548 		 * Do it here when validating the buffer object.
549 		 */
550 		if (entry->coherent_count) {
551 			unsigned int coherent_count = entry->coherent_count;
552 
553 			while (coherent_count) {
554 				ret = vmw_bo_dirty_add(vbo);
555 				if (ret)
556 					return ret;
557 
558 				coherent_count--;
559 			}
560 			entry->coherent_count -= coherent_count;
561 		}
562 
563 		if (vbo->dirty)
564 			vmw_bo_dirty_scan(vbo);
565 	}
566 	return 0;
567 }
568 
569 /**
570  * vmw_validation_res_validate - Validate all resources registered with the
571  * validation context.
572  * @ctx: The validation context.
573  * @intr: Whether to perform waits interruptible if possible.
574  *
575  * Before this function is called, all resource backup buffers must have
576  * been validated.
577  *
578  * Return: Zero on success, -ERESTARTSYS if interrupted,
579  * negative error code on failure.
580  */
581 int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr)
582 {
583 	struct vmw_validation_res_node *val;
584 	int ret;
585 
586 	list_for_each_entry(val, &ctx->resource_list, head) {
587 		struct vmw_resource *res = val->res;
588 		struct vmw_bo *backup = res->guest_memory_bo;
589 
590 		ret = vmw_resource_validate(res, intr, val->dirty_set &&
591 					    val->dirty);
592 		if (ret) {
593 			if (ret != -ERESTARTSYS)
594 				DRM_ERROR("Failed to validate resource.\n");
595 			return ret;
596 		}
597 
598 		/* Check if the resource switched backup buffer */
599 		if (backup && res->guest_memory_bo && backup != res->guest_memory_bo) {
600 			struct vmw_bo *vbo = res->guest_memory_bo;
601 
602 			vmw_bo_placement_set(vbo, res->func->domain,
603 					     res->func->busy_domain);
604 			ret = vmw_validation_add_bo(ctx, vbo);
605 			if (ret)
606 				return ret;
607 		}
608 	}
609 	return 0;
610 }
611 
612 /**
613  * vmw_validation_drop_ht - Reset the hash table used for duplicate finding
614  * and unregister it from this validation context.
615  * @ctx: The validation context.
616  *
617  * The hash table used for duplicate finding is an expensive resource and
618  * may be protected by mutexes that may cause deadlocks during resource
619  * unreferencing if held. After resource- and buffer object registering,
620  * there is no longer any use for this hash table, so allow freeing it
621  * either to shorten any mutex locking time, or before resources- and
622  * buffer objects are freed during validation context cleanup.
623  */
624 void vmw_validation_drop_ht(struct vmw_validation_context *ctx)
625 {
626 	struct vmw_validation_bo_node *entry;
627 	struct vmw_validation_res_node *val;
628 
629 	if (!ctx->sw_context)
630 		return;
631 
632 	list_for_each_entry(entry, &ctx->bo_list, base.head)
633 		hash_del_rcu(&entry->hash.head);
634 
635 	list_for_each_entry(val, &ctx->resource_list, head)
636 		hash_del_rcu(&val->hash.head);
637 
638 	list_for_each_entry(val, &ctx->resource_ctx_list, head)
639 		hash_del_rcu(&entry->hash.head);
640 
641 	ctx->sw_context = NULL;
642 }
643 
644 /**
645  * vmw_validation_unref_lists - Unregister previously registered buffer
646  * object and resources.
647  * @ctx: The validation context.
648  *
649  * Note that this function may cause buffer object- and resource destructors
650  * to be invoked.
651  */
652 void vmw_validation_unref_lists(struct vmw_validation_context *ctx)
653 {
654 	struct vmw_validation_bo_node *entry;
655 	struct vmw_validation_res_node *val;
656 
657 	list_for_each_entry(entry, &ctx->bo_list, base.head) {
658 		drm_gem_object_put(&entry->base.bo->base);
659 		entry->base.bo = NULL;
660 	}
661 
662 	list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
663 	list_for_each_entry(val, &ctx->resource_list, head)
664 		vmw_resource_unreference(&val->res);
665 
666 	/*
667 	 * No need to detach each list entry since they are all freed with
668 	 * vmw_validation_free_mem. Just make the inaccessible.
669 	 */
670 	INIT_LIST_HEAD(&ctx->bo_list);
671 	INIT_LIST_HEAD(&ctx->resource_list);
672 
673 	vmw_validation_mem_free(ctx);
674 }
675 
676 /**
677  * vmw_validation_prepare - Prepare a validation context for command
678  * submission.
679  * @ctx: The validation context.
680  * @mutex: The mutex used to protect resource reservation.
681  * @intr: Whether to perform waits interruptible if possible.
682  *
683  * Note that the single reservation mutex @mutex is an unfortunate
684  * construct. Ideally resource reservation should be moved to per-resource
685  * ww_mutexes.
686  * If this functions doesn't return Zero to indicate success, all resources
687  * are left unreserved but still referenced.
688  * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
689  * on error.
690  */
691 int vmw_validation_prepare(struct vmw_validation_context *ctx,
692 			   struct mutex *mutex,
693 			   bool intr)
694 {
695 	int ret = 0;
696 
697 	if (mutex) {
698 		if (intr)
699 			ret = mutex_lock_interruptible(mutex);
700 		else
701 			mutex_lock(mutex);
702 		if (ret)
703 			return -ERESTARTSYS;
704 	}
705 
706 	ctx->res_mutex = mutex;
707 	ret = vmw_validation_res_reserve(ctx, intr);
708 	if (ret)
709 		goto out_no_res_reserve;
710 
711 	ret = vmw_validation_bo_reserve(ctx, intr);
712 	if (ret)
713 		goto out_no_bo_reserve;
714 
715 	ret = vmw_validation_bo_validate(ctx, intr);
716 	if (ret)
717 		goto out_no_validate;
718 
719 	ret = vmw_validation_res_validate(ctx, intr);
720 	if (ret)
721 		goto out_no_validate;
722 
723 	return 0;
724 
725 out_no_validate:
726 	vmw_validation_bo_backoff(ctx);
727 out_no_bo_reserve:
728 	vmw_validation_res_unreserve(ctx, true);
729 out_no_res_reserve:
730 	if (mutex)
731 		mutex_unlock(mutex);
732 
733 	return ret;
734 }
735 
736 /**
737  * vmw_validation_revert - Revert validation actions if command submission
738  * failed.
739  *
740  * @ctx: The validation context.
741  *
742  * The caller still needs to unref resources after a call to this function.
743  */
744 void vmw_validation_revert(struct vmw_validation_context *ctx)
745 {
746 	vmw_validation_bo_backoff(ctx);
747 	vmw_validation_res_unreserve(ctx, true);
748 	if (ctx->res_mutex)
749 		mutex_unlock(ctx->res_mutex);
750 	vmw_validation_unref_lists(ctx);
751 }
752 
753 /**
754  * vmw_validation_done - Commit validation actions after command submission
755  * success.
756  * @ctx: The validation context.
757  * @fence: Fence with which to fence all buffer objects taking part in the
758  * command submission.
759  *
760  * The caller does NOT need to unref resources after a call to this function.
761  */
762 void vmw_validation_done(struct vmw_validation_context *ctx,
763 			 struct vmw_fence_obj *fence)
764 {
765 	vmw_validation_bo_fence(ctx, fence);
766 	vmw_validation_res_unreserve(ctx, false);
767 	if (ctx->res_mutex)
768 		mutex_unlock(ctx->res_mutex);
769 	vmw_validation_unref_lists(ctx);
770 }
771 
772 /**
773  * vmw_validation_preload_bo - Preload the validation memory allocator for a
774  * call to vmw_validation_add_bo().
775  * @ctx: Pointer to the validation context.
776  *
777  * Iff this function returns successfully, the next call to
778  * vmw_validation_add_bo() is guaranteed not to sleep. An error is not fatal
779  * but voids the guarantee.
780  *
781  * Returns: Zero if successful, %-EINVAL otherwise.
782  */
783 int vmw_validation_preload_bo(struct vmw_validation_context *ctx)
784 {
785 	unsigned int size = sizeof(struct vmw_validation_bo_node);
786 
787 	if (!vmw_validation_mem_alloc(ctx, size))
788 		return -ENOMEM;
789 
790 	ctx->mem_size_left += size;
791 	return 0;
792 }
793 
794 /**
795  * vmw_validation_preload_res - Preload the validation memory allocator for a
796  * call to vmw_validation_add_res().
797  * @ctx: Pointer to the validation context.
798  * @size: Size of the validation node extra data. See below.
799  *
800  * Iff this function returns successfully, the next call to
801  * vmw_validation_add_res() with the same or smaller @size is guaranteed not to
802  * sleep. An error is not fatal but voids the guarantee.
803  *
804  * Returns: Zero if successful, %-EINVAL otherwise.
805  */
806 int vmw_validation_preload_res(struct vmw_validation_context *ctx,
807 			       unsigned int size)
808 {
809 	size = vmw_validation_align(sizeof(struct vmw_validation_res_node) +
810 				    size) +
811 		vmw_validation_align(sizeof(struct vmw_validation_bo_node));
812 	if (!vmw_validation_mem_alloc(ctx, size))
813 		return -ENOMEM;
814 
815 	ctx->mem_size_left += size;
816 	return 0;
817 }
818 
819 /**
820  * vmw_validation_bo_backoff - Unreserve buffer objects registered with a
821  * validation context
822  * @ctx: The validation context
823  *
824  * This function unreserves the buffer objects previously reserved using
825  * vmw_validation_bo_reserve. It's typically used as part of an error path
826  */
827 void vmw_validation_bo_backoff(struct vmw_validation_context *ctx)
828 {
829 	struct vmw_validation_bo_node *entry;
830 
831 	/*
832 	 * Switching coherent resource backup buffers failed.
833 	 * Release corresponding buffer object dirty trackers.
834 	 */
835 	list_for_each_entry(entry, &ctx->bo_list, base.head) {
836 		if (entry->coherent_count) {
837 			unsigned int coherent_count = entry->coherent_count;
838 			struct vmw_bo *vbo = to_vmw_bo(&entry->base.bo->base);
839 
840 			while (coherent_count--)
841 				vmw_bo_dirty_release(vbo);
842 		}
843 	}
844 
845 	ttm_eu_backoff_reservation(&ctx->ticket, &ctx->bo_list);
846 }
847