xref: /linux/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c (revision 156010ed9c2ac1e9df6c11b1f688cf8a6e0152e6)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright © 2018 - 2022 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 #include <linux/slab.h>
29 #include "vmwgfx_validation.h"
30 #include "vmwgfx_drv.h"
31 
32 
33 #define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
34 
35 /**
36  * struct vmw_validation_bo_node - Buffer object validation metadata.
37  * @base: Metadata used for TTM reservation- and validation.
38  * @hash: A hash entry used for the duplicate detection hash table.
39  * @coherent_count: If switching backup buffers, number of new coherent
40  * resources that will have this buffer as a backup buffer.
41  * @as_mob: Validate as mob.
42  * @cpu_blit: Validate for cpu blit access.
43  *
44  * Bit fields are used since these structures are allocated and freed in
45  * large numbers and space conservation is desired.
46  */
47 struct vmw_validation_bo_node {
48 	struct ttm_validate_buffer base;
49 	struct vmwgfx_hash_item hash;
50 	unsigned int coherent_count;
51 	u32 as_mob : 1;
52 	u32 cpu_blit : 1;
53 };
54 /**
55  * struct vmw_validation_res_node - Resource validation metadata.
56  * @head: List head for the resource validation list.
57  * @hash: A hash entry used for the duplicate detection hash table.
58  * @res: Reference counted resource pointer.
59  * @new_backup: Non ref-counted pointer to new backup buffer to be assigned
60  * to a resource.
61  * @new_backup_offset: Offset into the new backup mob for resources that can
62  * share MOBs.
63  * @no_buffer_needed: Kernel does not need to allocate a MOB during validation,
64  * the command stream provides a mob bind operation.
65  * @switching_backup: The validation process is switching backup MOB.
66  * @first_usage: True iff the resource has been seen only once in the current
67  * validation batch.
68  * @reserved: Whether the resource is currently reserved by this process.
69  * @dirty_set: Change dirty status of the resource.
70  * @dirty: Dirty information VMW_RES_DIRTY_XX.
71  * @private: Optionally additional memory for caller-private data.
72  *
73  * Bit fields are used since these structures are allocated and freed in
74  * large numbers and space conservation is desired.
75  */
76 struct vmw_validation_res_node {
77 	struct list_head head;
78 	struct vmwgfx_hash_item hash;
79 	struct vmw_resource *res;
80 	struct vmw_buffer_object *new_backup;
81 	unsigned long new_backup_offset;
82 	u32 no_buffer_needed : 1;
83 	u32 switching_backup : 1;
84 	u32 first_usage : 1;
85 	u32 reserved : 1;
86 	u32 dirty : 1;
87 	u32 dirty_set : 1;
88 	unsigned long private[];
89 };
90 
91 /**
92  * vmw_validation_mem_alloc - Allocate kernel memory from the validation
93  * context based allocator
94  * @ctx: The validation context
95  * @size: The number of bytes to allocated.
96  *
97  * The memory allocated may not exceed PAGE_SIZE, and the returned
98  * address is aligned to sizeof(long). All memory allocated this way is
99  * reclaimed after validation when calling any of the exported functions:
100  * vmw_validation_unref_lists()
101  * vmw_validation_revert()
102  * vmw_validation_done()
103  *
104  * Return: Pointer to the allocated memory on success. NULL on failure.
105  */
106 void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
107 			       unsigned int size)
108 {
109 	void *addr;
110 
111 	size = vmw_validation_align(size);
112 	if (size > PAGE_SIZE)
113 		return NULL;
114 
115 	if (ctx->mem_size_left < size) {
116 		struct page *page;
117 
118 		if (ctx->vm && ctx->vm_size_left < PAGE_SIZE) {
119 			ctx->vm_size_left += VMWGFX_VALIDATION_MEM_GRAN;
120 			ctx->total_mem += VMWGFX_VALIDATION_MEM_GRAN;
121 		}
122 
123 		page = alloc_page(GFP_KERNEL | __GFP_ZERO);
124 		if (!page)
125 			return NULL;
126 
127 		if (ctx->vm)
128 			ctx->vm_size_left -= PAGE_SIZE;
129 
130 		list_add_tail(&page->lru, &ctx->page_list);
131 		ctx->page_address = page_address(page);
132 		ctx->mem_size_left = PAGE_SIZE;
133 	}
134 
135 	addr = (void *) (ctx->page_address + (PAGE_SIZE - ctx->mem_size_left));
136 	ctx->mem_size_left -= size;
137 
138 	return addr;
139 }
140 
141 /**
142  * vmw_validation_mem_free - Free all memory allocated using
143  * vmw_validation_mem_alloc()
144  * @ctx: The validation context
145  *
146  * All memory previously allocated for this context using
147  * vmw_validation_mem_alloc() is freed.
148  */
149 static void vmw_validation_mem_free(struct vmw_validation_context *ctx)
150 {
151 	struct page *entry, *next;
152 
153 	list_for_each_entry_safe(entry, next, &ctx->page_list, lru) {
154 		list_del_init(&entry->lru);
155 		__free_page(entry);
156 	}
157 
158 	ctx->mem_size_left = 0;
159 	if (ctx->vm && ctx->total_mem) {
160 		ctx->total_mem = 0;
161 		ctx->vm_size_left = 0;
162 	}
163 }
164 
165 /**
166  * vmw_validation_find_bo_dup - Find a duplicate buffer object entry in the
167  * validation context's lists.
168  * @ctx: The validation context to search.
169  * @vbo: The buffer object to search for.
170  *
171  * Return: Pointer to the struct vmw_validation_bo_node referencing the
172  * duplicate, or NULL if none found.
173  */
174 static struct vmw_validation_bo_node *
175 vmw_validation_find_bo_dup(struct vmw_validation_context *ctx,
176 			   struct vmw_buffer_object *vbo)
177 {
178 	struct  vmw_validation_bo_node *bo_node = NULL;
179 
180 	if (!ctx->merge_dups)
181 		return NULL;
182 
183 	if (ctx->sw_context) {
184 		struct vmwgfx_hash_item *hash;
185 		unsigned long key = (unsigned long) vbo;
186 
187 		hash_for_each_possible_rcu(ctx->sw_context->res_ht, hash, head, key) {
188 			if (hash->key == key) {
189 				bo_node = container_of(hash, typeof(*bo_node), hash);
190 				break;
191 			}
192 		}
193 	} else {
194 		struct  vmw_validation_bo_node *entry;
195 
196 		list_for_each_entry(entry, &ctx->bo_list, base.head) {
197 			if (entry->base.bo == &vbo->base) {
198 				bo_node = entry;
199 				break;
200 			}
201 		}
202 	}
203 
204 	return bo_node;
205 }
206 
207 /**
208  * vmw_validation_find_res_dup - Find a duplicate resource entry in the
209  * validation context's lists.
210  * @ctx: The validation context to search.
211  * @res: Reference counted resource pointer.
212  *
213  * Return: Pointer to the struct vmw_validation_bo_node referencing the
214  * duplicate, or NULL if none found.
215  */
216 static struct vmw_validation_res_node *
217 vmw_validation_find_res_dup(struct vmw_validation_context *ctx,
218 			    struct vmw_resource *res)
219 {
220 	struct  vmw_validation_res_node *res_node = NULL;
221 
222 	if (!ctx->merge_dups)
223 		return NULL;
224 
225 	if (ctx->sw_context) {
226 		struct vmwgfx_hash_item *hash;
227 		unsigned long key = (unsigned long) res;
228 
229 		hash_for_each_possible_rcu(ctx->sw_context->res_ht, hash, head, key) {
230 			if (hash->key == key) {
231 				res_node = container_of(hash, typeof(*res_node), hash);
232 				break;
233 			}
234 		}
235 	} else {
236 		struct  vmw_validation_res_node *entry;
237 
238 		list_for_each_entry(entry, &ctx->resource_ctx_list, head) {
239 			if (entry->res == res) {
240 				res_node = entry;
241 				goto out;
242 			}
243 		}
244 
245 		list_for_each_entry(entry, &ctx->resource_list, head) {
246 			if (entry->res == res) {
247 				res_node = entry;
248 				break;
249 			}
250 		}
251 
252 	}
253 out:
254 	return res_node;
255 }
256 
257 /**
258  * vmw_validation_add_bo - Add a buffer object to the validation context.
259  * @ctx: The validation context.
260  * @vbo: The buffer object.
261  * @as_mob: Validate as mob, otherwise suitable for GMR operations.
262  * @cpu_blit: Validate in a page-mappable location.
263  *
264  * Return: Zero on success, negative error code otherwise.
265  */
266 int vmw_validation_add_bo(struct vmw_validation_context *ctx,
267 			  struct vmw_buffer_object *vbo,
268 			  bool as_mob,
269 			  bool cpu_blit)
270 {
271 	struct vmw_validation_bo_node *bo_node;
272 
273 	bo_node = vmw_validation_find_bo_dup(ctx, vbo);
274 	if (bo_node) {
275 		if (bo_node->as_mob != as_mob ||
276 		    bo_node->cpu_blit != cpu_blit) {
277 			DRM_ERROR("Inconsistent buffer usage.\n");
278 			return -EINVAL;
279 		}
280 	} else {
281 		struct ttm_validate_buffer *val_buf;
282 
283 		bo_node = vmw_validation_mem_alloc(ctx, sizeof(*bo_node));
284 		if (!bo_node)
285 			return -ENOMEM;
286 
287 		if (ctx->sw_context) {
288 			bo_node->hash.key = (unsigned long) vbo;
289 			hash_add_rcu(ctx->sw_context->res_ht, &bo_node->hash.head,
290 				bo_node->hash.key);
291 		}
292 		val_buf = &bo_node->base;
293 		val_buf->bo = ttm_bo_get_unless_zero(&vbo->base);
294 		if (!val_buf->bo)
295 			return -ESRCH;
296 		val_buf->num_shared = 0;
297 		list_add_tail(&val_buf->head, &ctx->bo_list);
298 		bo_node->as_mob = as_mob;
299 		bo_node->cpu_blit = cpu_blit;
300 	}
301 
302 	return 0;
303 }
304 
305 /**
306  * vmw_validation_add_resource - Add a resource to the validation context.
307  * @ctx: The validation context.
308  * @res: The resource.
309  * @priv_size: Size of private, additional metadata.
310  * @dirty: Whether to change dirty status.
311  * @p_node: Output pointer of additional metadata address.
312  * @first_usage: Whether this was the first time this resource was seen.
313  *
314  * Return: Zero on success, negative error code otherwise.
315  */
316 int vmw_validation_add_resource(struct vmw_validation_context *ctx,
317 				struct vmw_resource *res,
318 				size_t priv_size,
319 				u32 dirty,
320 				void **p_node,
321 				bool *first_usage)
322 {
323 	struct vmw_validation_res_node *node;
324 
325 	node = vmw_validation_find_res_dup(ctx, res);
326 	if (node) {
327 		node->first_usage = 0;
328 		goto out_fill;
329 	}
330 
331 	node = vmw_validation_mem_alloc(ctx, sizeof(*node) + priv_size);
332 	if (!node) {
333 		VMW_DEBUG_USER("Failed to allocate a resource validation entry.\n");
334 		return -ENOMEM;
335 	}
336 
337 	if (ctx->sw_context) {
338 		node->hash.key = (unsigned long) res;
339 		hash_add_rcu(ctx->sw_context->res_ht, &node->hash.head, node->hash.key);
340 	}
341 	node->res = vmw_resource_reference_unless_doomed(res);
342 	if (!node->res)
343 		return -ESRCH;
344 
345 	node->first_usage = 1;
346 	if (!res->dev_priv->has_mob) {
347 		list_add_tail(&node->head, &ctx->resource_list);
348 	} else {
349 		switch (vmw_res_type(res)) {
350 		case vmw_res_context:
351 		case vmw_res_dx_context:
352 			list_add(&node->head, &ctx->resource_ctx_list);
353 			break;
354 		case vmw_res_cotable:
355 			list_add_tail(&node->head, &ctx->resource_ctx_list);
356 			break;
357 		default:
358 			list_add_tail(&node->head, &ctx->resource_list);
359 			break;
360 		}
361 	}
362 
363 out_fill:
364 	if (dirty) {
365 		node->dirty_set = 1;
366 		/* Overwriting previous information here is intentional! */
367 		node->dirty = (dirty & VMW_RES_DIRTY_SET) ? 1 : 0;
368 	}
369 	if (first_usage)
370 		*first_usage = node->first_usage;
371 	if (p_node)
372 		*p_node = &node->private;
373 
374 	return 0;
375 }
376 
377 /**
378  * vmw_validation_res_set_dirty - Register a resource dirty set or clear during
379  * validation.
380  * @ctx: The validation context.
381  * @val_private: The additional meta-data pointer returned when the
382  * resource was registered with the validation context. Used to identify
383  * the resource.
384  * @dirty: Dirty information VMW_RES_DIRTY_XX
385  */
386 void vmw_validation_res_set_dirty(struct vmw_validation_context *ctx,
387 				  void *val_private, u32 dirty)
388 {
389 	struct vmw_validation_res_node *val;
390 
391 	if (!dirty)
392 		return;
393 
394 	val = container_of(val_private, typeof(*val), private);
395 	val->dirty_set = 1;
396 	/* Overwriting previous information here is intentional! */
397 	val->dirty = (dirty & VMW_RES_DIRTY_SET) ? 1 : 0;
398 }
399 
400 /**
401  * vmw_validation_res_switch_backup - Register a backup MOB switch during
402  * validation.
403  * @ctx: The validation context.
404  * @val_private: The additional meta-data pointer returned when the
405  * resource was registered with the validation context. Used to identify
406  * the resource.
407  * @vbo: The new backup buffer object MOB. This buffer object needs to have
408  * already been registered with the validation context.
409  * @backup_offset: Offset into the new backup MOB.
410  */
411 void vmw_validation_res_switch_backup(struct vmw_validation_context *ctx,
412 				      void *val_private,
413 				      struct vmw_buffer_object *vbo,
414 				      unsigned long backup_offset)
415 {
416 	struct vmw_validation_res_node *val;
417 
418 	val = container_of(val_private, typeof(*val), private);
419 
420 	val->switching_backup = 1;
421 	if (val->first_usage)
422 		val->no_buffer_needed = 1;
423 
424 	val->new_backup = vbo;
425 	val->new_backup_offset = backup_offset;
426 }
427 
428 /**
429  * vmw_validation_res_reserve - Reserve all resources registered with this
430  * validation context.
431  * @ctx: The validation context.
432  * @intr: Use interruptible waits when possible.
433  *
434  * Return: Zero on success, -ERESTARTSYS if interrupted. Negative error
435  * code on failure.
436  */
437 int vmw_validation_res_reserve(struct vmw_validation_context *ctx,
438 			       bool intr)
439 {
440 	struct vmw_validation_res_node *val;
441 	int ret = 0;
442 
443 	list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
444 
445 	list_for_each_entry(val, &ctx->resource_list, head) {
446 		struct vmw_resource *res = val->res;
447 
448 		ret = vmw_resource_reserve(res, intr, val->no_buffer_needed);
449 		if (ret)
450 			goto out_unreserve;
451 
452 		val->reserved = 1;
453 		if (res->backup) {
454 			struct vmw_buffer_object *vbo = res->backup;
455 
456 			ret = vmw_validation_add_bo
457 				(ctx, vbo, vmw_resource_needs_backup(res),
458 				 false);
459 			if (ret)
460 				goto out_unreserve;
461 		}
462 
463 		if (val->switching_backup && val->new_backup &&
464 		    res->coherent) {
465 			struct vmw_validation_bo_node *bo_node =
466 				vmw_validation_find_bo_dup(ctx,
467 							   val->new_backup);
468 
469 			if (WARN_ON(!bo_node)) {
470 				ret = -EINVAL;
471 				goto out_unreserve;
472 			}
473 			bo_node->coherent_count++;
474 		}
475 	}
476 
477 	return 0;
478 
479 out_unreserve:
480 	vmw_validation_res_unreserve(ctx, true);
481 	return ret;
482 }
483 
484 /**
485  * vmw_validation_res_unreserve - Unreserve all reserved resources
486  * registered with this validation context.
487  * @ctx: The validation context.
488  * @backoff: Whether this is a backoff- of a commit-type operation. This
489  * is used to determine whether to switch backup MOBs or not.
490  */
491 void vmw_validation_res_unreserve(struct vmw_validation_context *ctx,
492 				 bool backoff)
493 {
494 	struct vmw_validation_res_node *val;
495 
496 	list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
497 	if (backoff)
498 		list_for_each_entry(val, &ctx->resource_list, head) {
499 			if (val->reserved)
500 				vmw_resource_unreserve(val->res,
501 						       false, false, false,
502 						       NULL, 0);
503 		}
504 	else
505 		list_for_each_entry(val, &ctx->resource_list, head) {
506 			if (val->reserved)
507 				vmw_resource_unreserve(val->res,
508 						       val->dirty_set,
509 						       val->dirty,
510 						       val->switching_backup,
511 						       val->new_backup,
512 						       val->new_backup_offset);
513 		}
514 }
515 
516 /**
517  * vmw_validation_bo_validate_single - Validate a single buffer object.
518  * @bo: The TTM buffer object base.
519  * @interruptible: Whether to perform waits interruptible if possible.
520  * @validate_as_mob: Whether to validate in MOB memory.
521  *
522  * Return: Zero on success, -ERESTARTSYS if interrupted. Negative error
523  * code on failure.
524  */
525 int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
526 				      bool interruptible,
527 				      bool validate_as_mob)
528 {
529 	struct vmw_buffer_object *vbo =
530 		container_of(bo, struct vmw_buffer_object, base);
531 	struct ttm_operation_ctx ctx = {
532 		.interruptible = interruptible,
533 		.no_wait_gpu = false
534 	};
535 	int ret;
536 
537 	if (atomic_read(&vbo->cpu_writers))
538 		return -EBUSY;
539 
540 	if (vbo->base.pin_count > 0)
541 		return 0;
542 
543 	if (validate_as_mob)
544 		return ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
545 
546 	/**
547 	 * Put BO in VRAM if there is space, otherwise as a GMR.
548 	 * If there is no space in VRAM and GMR ids are all used up,
549 	 * start evicting GMRs to make room. If the DMA buffer can't be
550 	 * used as a GMR, this will return -ENOMEM.
551 	 */
552 
553 	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
554 	if (ret == 0 || ret == -ERESTARTSYS)
555 		return ret;
556 
557 	/**
558 	 * If that failed, try VRAM again, this time evicting
559 	 * previous contents.
560 	 */
561 
562 	ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
563 	return ret;
564 }
565 
566 /**
567  * vmw_validation_bo_validate - Validate all buffer objects registered with
568  * the validation context.
569  * @ctx: The validation context.
570  * @intr: Whether to perform waits interruptible if possible.
571  *
572  * Return: Zero on success, -ERESTARTSYS if interrupted,
573  * negative error code on failure.
574  */
575 int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr)
576 {
577 	struct vmw_validation_bo_node *entry;
578 	int ret;
579 
580 	list_for_each_entry(entry, &ctx->bo_list, base.head) {
581 		struct vmw_buffer_object *vbo =
582 			container_of(entry->base.bo, typeof(*vbo), base);
583 
584 		if (entry->cpu_blit) {
585 			struct ttm_operation_ctx ttm_ctx = {
586 				.interruptible = intr,
587 				.no_wait_gpu = false
588 			};
589 
590 			ret = ttm_bo_validate(entry->base.bo,
591 					      &vmw_nonfixed_placement, &ttm_ctx);
592 		} else {
593 			ret = vmw_validation_bo_validate_single
594 			(entry->base.bo, intr, entry->as_mob);
595 		}
596 		if (ret)
597 			return ret;
598 
599 		/*
600 		 * Rather than having the resource code allocating the bo
601 		 * dirty tracker in resource_unreserve() where we can't fail,
602 		 * Do it here when validating the buffer object.
603 		 */
604 		if (entry->coherent_count) {
605 			unsigned int coherent_count = entry->coherent_count;
606 
607 			while (coherent_count) {
608 				ret = vmw_bo_dirty_add(vbo);
609 				if (ret)
610 					return ret;
611 
612 				coherent_count--;
613 			}
614 			entry->coherent_count -= coherent_count;
615 		}
616 
617 		if (vbo->dirty)
618 			vmw_bo_dirty_scan(vbo);
619 	}
620 	return 0;
621 }
622 
623 /**
624  * vmw_validation_res_validate - Validate all resources registered with the
625  * validation context.
626  * @ctx: The validation context.
627  * @intr: Whether to perform waits interruptible if possible.
628  *
629  * Before this function is called, all resource backup buffers must have
630  * been validated.
631  *
632  * Return: Zero on success, -ERESTARTSYS if interrupted,
633  * negative error code on failure.
634  */
635 int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr)
636 {
637 	struct vmw_validation_res_node *val;
638 	int ret;
639 
640 	list_for_each_entry(val, &ctx->resource_list, head) {
641 		struct vmw_resource *res = val->res;
642 		struct vmw_buffer_object *backup = res->backup;
643 
644 		ret = vmw_resource_validate(res, intr, val->dirty_set &&
645 					    val->dirty);
646 		if (ret) {
647 			if (ret != -ERESTARTSYS)
648 				DRM_ERROR("Failed to validate resource.\n");
649 			return ret;
650 		}
651 
652 		/* Check if the resource switched backup buffer */
653 		if (backup && res->backup && (backup != res->backup)) {
654 			struct vmw_buffer_object *vbo = res->backup;
655 
656 			ret = vmw_validation_add_bo
657 				(ctx, vbo, vmw_resource_needs_backup(res),
658 				 false);
659 			if (ret)
660 				return ret;
661 		}
662 	}
663 	return 0;
664 }
665 
666 /**
667  * vmw_validation_drop_ht - Reset the hash table used for duplicate finding
668  * and unregister it from this validation context.
669  * @ctx: The validation context.
670  *
671  * The hash table used for duplicate finding is an expensive resource and
672  * may be protected by mutexes that may cause deadlocks during resource
673  * unreferencing if held. After resource- and buffer object registering,
674  * there is no longer any use for this hash table, so allow freeing it
675  * either to shorten any mutex locking time, or before resources- and
676  * buffer objects are freed during validation context cleanup.
677  */
678 void vmw_validation_drop_ht(struct vmw_validation_context *ctx)
679 {
680 	struct vmw_validation_bo_node *entry;
681 	struct vmw_validation_res_node *val;
682 
683 	if (!ctx->sw_context)
684 		return;
685 
686 	list_for_each_entry(entry, &ctx->bo_list, base.head)
687 		hash_del_rcu(&entry->hash.head);
688 
689 	list_for_each_entry(val, &ctx->resource_list, head)
690 		hash_del_rcu(&val->hash.head);
691 
692 	list_for_each_entry(val, &ctx->resource_ctx_list, head)
693 		hash_del_rcu(&entry->hash.head);
694 
695 	ctx->sw_context = NULL;
696 }
697 
698 /**
699  * vmw_validation_unref_lists - Unregister previously registered buffer
700  * object and resources.
701  * @ctx: The validation context.
702  *
703  * Note that this function may cause buffer object- and resource destructors
704  * to be invoked.
705  */
706 void vmw_validation_unref_lists(struct vmw_validation_context *ctx)
707 {
708 	struct vmw_validation_bo_node *entry;
709 	struct vmw_validation_res_node *val;
710 
711 	list_for_each_entry(entry, &ctx->bo_list, base.head) {
712 		ttm_bo_put(entry->base.bo);
713 		entry->base.bo = NULL;
714 	}
715 
716 	list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
717 	list_for_each_entry(val, &ctx->resource_list, head)
718 		vmw_resource_unreference(&val->res);
719 
720 	/*
721 	 * No need to detach each list entry since they are all freed with
722 	 * vmw_validation_free_mem. Just make the inaccessible.
723 	 */
724 	INIT_LIST_HEAD(&ctx->bo_list);
725 	INIT_LIST_HEAD(&ctx->resource_list);
726 
727 	vmw_validation_mem_free(ctx);
728 }
729 
730 /**
731  * vmw_validation_prepare - Prepare a validation context for command
732  * submission.
733  * @ctx: The validation context.
734  * @mutex: The mutex used to protect resource reservation.
735  * @intr: Whether to perform waits interruptible if possible.
736  *
737  * Note that the single reservation mutex @mutex is an unfortunate
738  * construct. Ideally resource reservation should be moved to per-resource
739  * ww_mutexes.
740  * If this functions doesn't return Zero to indicate success, all resources
741  * are left unreserved but still referenced.
742  * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
743  * on error.
744  */
745 int vmw_validation_prepare(struct vmw_validation_context *ctx,
746 			   struct mutex *mutex,
747 			   bool intr)
748 {
749 	int ret = 0;
750 
751 	if (mutex) {
752 		if (intr)
753 			ret = mutex_lock_interruptible(mutex);
754 		else
755 			mutex_lock(mutex);
756 		if (ret)
757 			return -ERESTARTSYS;
758 	}
759 
760 	ctx->res_mutex = mutex;
761 	ret = vmw_validation_res_reserve(ctx, intr);
762 	if (ret)
763 		goto out_no_res_reserve;
764 
765 	ret = vmw_validation_bo_reserve(ctx, intr);
766 	if (ret)
767 		goto out_no_bo_reserve;
768 
769 	ret = vmw_validation_bo_validate(ctx, intr);
770 	if (ret)
771 		goto out_no_validate;
772 
773 	ret = vmw_validation_res_validate(ctx, intr);
774 	if (ret)
775 		goto out_no_validate;
776 
777 	return 0;
778 
779 out_no_validate:
780 	vmw_validation_bo_backoff(ctx);
781 out_no_bo_reserve:
782 	vmw_validation_res_unreserve(ctx, true);
783 out_no_res_reserve:
784 	if (mutex)
785 		mutex_unlock(mutex);
786 
787 	return ret;
788 }
789 
790 /**
791  * vmw_validation_revert - Revert validation actions if command submission
792  * failed.
793  *
794  * @ctx: The validation context.
795  *
796  * The caller still needs to unref resources after a call to this function.
797  */
798 void vmw_validation_revert(struct vmw_validation_context *ctx)
799 {
800 	vmw_validation_bo_backoff(ctx);
801 	vmw_validation_res_unreserve(ctx, true);
802 	if (ctx->res_mutex)
803 		mutex_unlock(ctx->res_mutex);
804 	vmw_validation_unref_lists(ctx);
805 }
806 
807 /**
808  * vmw_validation_done - Commit validation actions after command submission
809  * success.
810  * @ctx: The validation context.
811  * @fence: Fence with which to fence all buffer objects taking part in the
812  * command submission.
813  *
814  * The caller does NOT need to unref resources after a call to this function.
815  */
816 void vmw_validation_done(struct vmw_validation_context *ctx,
817 			 struct vmw_fence_obj *fence)
818 {
819 	vmw_validation_bo_fence(ctx, fence);
820 	vmw_validation_res_unreserve(ctx, false);
821 	if (ctx->res_mutex)
822 		mutex_unlock(ctx->res_mutex);
823 	vmw_validation_unref_lists(ctx);
824 }
825 
826 /**
827  * vmw_validation_preload_bo - Preload the validation memory allocator for a
828  * call to vmw_validation_add_bo().
829  * @ctx: Pointer to the validation context.
830  *
831  * Iff this function returns successfully, the next call to
832  * vmw_validation_add_bo() is guaranteed not to sleep. An error is not fatal
833  * but voids the guarantee.
834  *
835  * Returns: Zero if successful, %-EINVAL otherwise.
836  */
837 int vmw_validation_preload_bo(struct vmw_validation_context *ctx)
838 {
839 	unsigned int size = sizeof(struct vmw_validation_bo_node);
840 
841 	if (!vmw_validation_mem_alloc(ctx, size))
842 		return -ENOMEM;
843 
844 	ctx->mem_size_left += size;
845 	return 0;
846 }
847 
848 /**
849  * vmw_validation_preload_res - Preload the validation memory allocator for a
850  * call to vmw_validation_add_res().
851  * @ctx: Pointer to the validation context.
852  * @size: Size of the validation node extra data. See below.
853  *
854  * Iff this function returns successfully, the next call to
855  * vmw_validation_add_res() with the same or smaller @size is guaranteed not to
856  * sleep. An error is not fatal but voids the guarantee.
857  *
858  * Returns: Zero if successful, %-EINVAL otherwise.
859  */
860 int vmw_validation_preload_res(struct vmw_validation_context *ctx,
861 			       unsigned int size)
862 {
863 	size = vmw_validation_align(sizeof(struct vmw_validation_res_node) +
864 				    size) +
865 		vmw_validation_align(sizeof(struct vmw_validation_bo_node));
866 	if (!vmw_validation_mem_alloc(ctx, size))
867 		return -ENOMEM;
868 
869 	ctx->mem_size_left += size;
870 	return 0;
871 }
872 
873 /**
874  * vmw_validation_bo_backoff - Unreserve buffer objects registered with a
875  * validation context
876  * @ctx: The validation context
877  *
878  * This function unreserves the buffer objects previously reserved using
879  * vmw_validation_bo_reserve. It's typically used as part of an error path
880  */
881 void vmw_validation_bo_backoff(struct vmw_validation_context *ctx)
882 {
883 	struct vmw_validation_bo_node *entry;
884 
885 	/*
886 	 * Switching coherent resource backup buffers failed.
887 	 * Release corresponding buffer object dirty trackers.
888 	 */
889 	list_for_each_entry(entry, &ctx->bo_list, base.head) {
890 		if (entry->coherent_count) {
891 			unsigned int coherent_count = entry->coherent_count;
892 			struct vmw_buffer_object *vbo =
893 				container_of(entry->base.bo, typeof(*vbo),
894 					     base);
895 
896 			while (coherent_count--)
897 				vmw_bo_dirty_release(vbo);
898 		}
899 	}
900 
901 	ttm_eu_backoff_reservation(&ctx->ticket, &ctx->bo_list);
902 }
903