xref: /linux/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c (revision 0fdebc5ec2ca492d69df2d93a6a7abade4941aae)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright © 2018 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 #include <linux/slab.h>
29 #include "vmwgfx_validation.h"
30 #include "vmwgfx_drv.h"
31 
32 
33 #define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
34 
35 /**
36  * struct vmw_validation_bo_node - Buffer object validation metadata.
37  * @base: Metadata used for TTM reservation- and validation.
38  * @hash: A hash entry used for the duplicate detection hash table.
39  * @coherent_count: If switching backup buffers, number of new coherent
40  * resources that will have this buffer as a backup buffer.
41  * @as_mob: Validate as mob.
42  * @cpu_blit: Validate for cpu blit access.
43  *
44  * Bit fields are used since these structures are allocated and freed in
45  * large numbers and space conservation is desired.
46  */
47 struct vmw_validation_bo_node {
48 	struct ttm_validate_buffer base;
49 	struct vmwgfx_hash_item hash;
50 	unsigned int coherent_count;
51 	u32 as_mob : 1;
52 	u32 cpu_blit : 1;
53 };
54 /**
55  * struct vmw_validation_res_node - Resource validation metadata.
56  * @head: List head for the resource validation list.
57  * @hash: A hash entry used for the duplicate detection hash table.
58  * @res: Reference counted resource pointer.
59  * @new_backup: Non ref-counted pointer to new backup buffer to be assigned
60  * to a resource.
61  * @new_backup_offset: Offset into the new backup mob for resources that can
62  * share MOBs.
63  * @no_buffer_needed: Kernel does not need to allocate a MOB during validation,
64  * the command stream provides a mob bind operation.
65  * @switching_backup: The validation process is switching backup MOB.
66  * @first_usage: True iff the resource has been seen only once in the current
67  * validation batch.
68  * @reserved: Whether the resource is currently reserved by this process.
69  * @dirty_set: Change dirty status of the resource.
70  * @dirty: Dirty information VMW_RES_DIRTY_XX.
71  * @private: Optionally additional memory for caller-private data.
72  *
73  * Bit fields are used since these structures are allocated and freed in
74  * large numbers and space conservation is desired.
75  */
76 struct vmw_validation_res_node {
77 	struct list_head head;
78 	struct vmwgfx_hash_item hash;
79 	struct vmw_resource *res;
80 	struct vmw_buffer_object *new_backup;
81 	unsigned long new_backup_offset;
82 	u32 no_buffer_needed : 1;
83 	u32 switching_backup : 1;
84 	u32 first_usage : 1;
85 	u32 reserved : 1;
86 	u32 dirty : 1;
87 	u32 dirty_set : 1;
88 	unsigned long private[];
89 };
90 
91 /**
92  * vmw_validation_mem_alloc - Allocate kernel memory from the validation
93  * context based allocator
94  * @ctx: The validation context
95  * @size: The number of bytes to allocated.
96  *
97  * The memory allocated may not exceed PAGE_SIZE, and the returned
98  * address is aligned to sizeof(long). All memory allocated this way is
99  * reclaimed after validation when calling any of the exported functions:
100  * vmw_validation_unref_lists()
101  * vmw_validation_revert()
102  * vmw_validation_done()
103  *
104  * Return: Pointer to the allocated memory on success. NULL on failure.
105  */
106 void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
107 			       unsigned int size)
108 {
109 	void *addr;
110 
111 	size = vmw_validation_align(size);
112 	if (size > PAGE_SIZE)
113 		return NULL;
114 
115 	if (ctx->mem_size_left < size) {
116 		struct page *page;
117 
118 		if (ctx->vm && ctx->vm_size_left < PAGE_SIZE) {
119 			ctx->vm_size_left += VMWGFX_VALIDATION_MEM_GRAN;
120 			ctx->total_mem += VMWGFX_VALIDATION_MEM_GRAN;
121 		}
122 
123 		page = alloc_page(GFP_KERNEL | __GFP_ZERO);
124 		if (!page)
125 			return NULL;
126 
127 		if (ctx->vm)
128 			ctx->vm_size_left -= PAGE_SIZE;
129 
130 		list_add_tail(&page->lru, &ctx->page_list);
131 		ctx->page_address = page_address(page);
132 		ctx->mem_size_left = PAGE_SIZE;
133 	}
134 
135 	addr = (void *) (ctx->page_address + (PAGE_SIZE - ctx->mem_size_left));
136 	ctx->mem_size_left -= size;
137 
138 	return addr;
139 }
140 
141 /**
142  * vmw_validation_mem_free - Free all memory allocated using
143  * vmw_validation_mem_alloc()
144  * @ctx: The validation context
145  *
146  * All memory previously allocated for this context using
147  * vmw_validation_mem_alloc() is freed.
148  */
149 static void vmw_validation_mem_free(struct vmw_validation_context *ctx)
150 {
151 	struct page *entry, *next;
152 
153 	list_for_each_entry_safe(entry, next, &ctx->page_list, lru) {
154 		list_del_init(&entry->lru);
155 		__free_page(entry);
156 	}
157 
158 	ctx->mem_size_left = 0;
159 	if (ctx->vm && ctx->total_mem) {
160 		ctx->total_mem = 0;
161 		ctx->vm_size_left = 0;
162 	}
163 }
164 
165 /**
166  * vmw_validation_find_bo_dup - Find a duplicate buffer object entry in the
167  * validation context's lists.
168  * @ctx: The validation context to search.
169  * @vbo: The buffer object to search for.
170  *
171  * Return: Pointer to the struct vmw_validation_bo_node referencing the
172  * duplicate, or NULL if none found.
173  */
174 static struct vmw_validation_bo_node *
175 vmw_validation_find_bo_dup(struct vmw_validation_context *ctx,
176 			   struct vmw_buffer_object *vbo)
177 {
178 	struct  vmw_validation_bo_node *bo_node = NULL;
179 
180 	if (!ctx->merge_dups)
181 		return NULL;
182 
183 	if (ctx->ht) {
184 		struct vmwgfx_hash_item *hash;
185 
186 		if (!vmwgfx_ht_find_item(ctx->ht, (unsigned long) vbo, &hash))
187 			bo_node = container_of(hash, typeof(*bo_node), hash);
188 	} else {
189 		struct  vmw_validation_bo_node *entry;
190 
191 		list_for_each_entry(entry, &ctx->bo_list, base.head) {
192 			if (entry->base.bo == &vbo->base) {
193 				bo_node = entry;
194 				break;
195 			}
196 		}
197 	}
198 
199 	return bo_node;
200 }
201 
202 /**
203  * vmw_validation_find_res_dup - Find a duplicate resource entry in the
204  * validation context's lists.
205  * @ctx: The validation context to search.
206  * @res: Reference counted resource pointer.
207  *
208  * Return: Pointer to the struct vmw_validation_bo_node referencing the
209  * duplicate, or NULL if none found.
210  */
211 static struct vmw_validation_res_node *
212 vmw_validation_find_res_dup(struct vmw_validation_context *ctx,
213 			    struct vmw_resource *res)
214 {
215 	struct  vmw_validation_res_node *res_node = NULL;
216 
217 	if (!ctx->merge_dups)
218 		return NULL;
219 
220 	if (ctx->ht) {
221 		struct vmwgfx_hash_item *hash;
222 
223 		if (!vmwgfx_ht_find_item(ctx->ht, (unsigned long) res, &hash))
224 			res_node = container_of(hash, typeof(*res_node), hash);
225 	} else {
226 		struct  vmw_validation_res_node *entry;
227 
228 		list_for_each_entry(entry, &ctx->resource_ctx_list, head) {
229 			if (entry->res == res) {
230 				res_node = entry;
231 				goto out;
232 			}
233 		}
234 
235 		list_for_each_entry(entry, &ctx->resource_list, head) {
236 			if (entry->res == res) {
237 				res_node = entry;
238 				break;
239 			}
240 		}
241 
242 	}
243 out:
244 	return res_node;
245 }
246 
247 /**
248  * vmw_validation_add_bo - Add a buffer object to the validation context.
249  * @ctx: The validation context.
250  * @vbo: The buffer object.
251  * @as_mob: Validate as mob, otherwise suitable for GMR operations.
252  * @cpu_blit: Validate in a page-mappable location.
253  *
254  * Return: Zero on success, negative error code otherwise.
255  */
256 int vmw_validation_add_bo(struct vmw_validation_context *ctx,
257 			  struct vmw_buffer_object *vbo,
258 			  bool as_mob,
259 			  bool cpu_blit)
260 {
261 	struct vmw_validation_bo_node *bo_node;
262 
263 	bo_node = vmw_validation_find_bo_dup(ctx, vbo);
264 	if (bo_node) {
265 		if (bo_node->as_mob != as_mob ||
266 		    bo_node->cpu_blit != cpu_blit) {
267 			DRM_ERROR("Inconsistent buffer usage.\n");
268 			return -EINVAL;
269 		}
270 	} else {
271 		struct ttm_validate_buffer *val_buf;
272 		int ret;
273 
274 		bo_node = vmw_validation_mem_alloc(ctx, sizeof(*bo_node));
275 		if (!bo_node)
276 			return -ENOMEM;
277 
278 		if (ctx->ht) {
279 			bo_node->hash.key = (unsigned long) vbo;
280 			ret = vmwgfx_ht_insert_item(ctx->ht, &bo_node->hash);
281 			if (ret) {
282 				DRM_ERROR("Failed to initialize a buffer "
283 					  "validation entry.\n");
284 				return ret;
285 			}
286 		}
287 		val_buf = &bo_node->base;
288 		val_buf->bo = ttm_bo_get_unless_zero(&vbo->base);
289 		if (!val_buf->bo)
290 			return -ESRCH;
291 		val_buf->num_shared = 0;
292 		list_add_tail(&val_buf->head, &ctx->bo_list);
293 		bo_node->as_mob = as_mob;
294 		bo_node->cpu_blit = cpu_blit;
295 	}
296 
297 	return 0;
298 }
299 
300 /**
301  * vmw_validation_add_resource - Add a resource to the validation context.
302  * @ctx: The validation context.
303  * @res: The resource.
304  * @priv_size: Size of private, additional metadata.
305  * @dirty: Whether to change dirty status.
306  * @p_node: Output pointer of additional metadata address.
307  * @first_usage: Whether this was the first time this resource was seen.
308  *
309  * Return: Zero on success, negative error code otherwise.
310  */
311 int vmw_validation_add_resource(struct vmw_validation_context *ctx,
312 				struct vmw_resource *res,
313 				size_t priv_size,
314 				u32 dirty,
315 				void **p_node,
316 				bool *first_usage)
317 {
318 	struct vmw_validation_res_node *node;
319 	int ret;
320 
321 	node = vmw_validation_find_res_dup(ctx, res);
322 	if (node) {
323 		node->first_usage = 0;
324 		goto out_fill;
325 	}
326 
327 	node = vmw_validation_mem_alloc(ctx, sizeof(*node) + priv_size);
328 	if (!node) {
329 		VMW_DEBUG_USER("Failed to allocate a resource validation entry.\n");
330 		return -ENOMEM;
331 	}
332 
333 	if (ctx->ht) {
334 		node->hash.key = (unsigned long) res;
335 		ret = vmwgfx_ht_insert_item(ctx->ht, &node->hash);
336 		if (ret) {
337 			DRM_ERROR("Failed to initialize a resource validation "
338 				  "entry.\n");
339 			return ret;
340 		}
341 	}
342 	node->res = vmw_resource_reference_unless_doomed(res);
343 	if (!node->res)
344 		return -ESRCH;
345 
346 	node->first_usage = 1;
347 	if (!res->dev_priv->has_mob) {
348 		list_add_tail(&node->head, &ctx->resource_list);
349 	} else {
350 		switch (vmw_res_type(res)) {
351 		case vmw_res_context:
352 		case vmw_res_dx_context:
353 			list_add(&node->head, &ctx->resource_ctx_list);
354 			break;
355 		case vmw_res_cotable:
356 			list_add_tail(&node->head, &ctx->resource_ctx_list);
357 			break;
358 		default:
359 			list_add_tail(&node->head, &ctx->resource_list);
360 			break;
361 		}
362 	}
363 
364 out_fill:
365 	if (dirty) {
366 		node->dirty_set = 1;
367 		/* Overwriting previous information here is intentional! */
368 		node->dirty = (dirty & VMW_RES_DIRTY_SET) ? 1 : 0;
369 	}
370 	if (first_usage)
371 		*first_usage = node->first_usage;
372 	if (p_node)
373 		*p_node = &node->private;
374 
375 	return 0;
376 }
377 
378 /**
379  * vmw_validation_res_set_dirty - Register a resource dirty set or clear during
380  * validation.
381  * @ctx: The validation context.
382  * @val_private: The additional meta-data pointer returned when the
383  * resource was registered with the validation context. Used to identify
384  * the resource.
385  * @dirty: Dirty information VMW_RES_DIRTY_XX
386  */
387 void vmw_validation_res_set_dirty(struct vmw_validation_context *ctx,
388 				  void *val_private, u32 dirty)
389 {
390 	struct vmw_validation_res_node *val;
391 
392 	if (!dirty)
393 		return;
394 
395 	val = container_of(val_private, typeof(*val), private);
396 	val->dirty_set = 1;
397 	/* Overwriting previous information here is intentional! */
398 	val->dirty = (dirty & VMW_RES_DIRTY_SET) ? 1 : 0;
399 }
400 
401 /**
402  * vmw_validation_res_switch_backup - Register a backup MOB switch during
403  * validation.
404  * @ctx: The validation context.
405  * @val_private: The additional meta-data pointer returned when the
406  * resource was registered with the validation context. Used to identify
407  * the resource.
408  * @vbo: The new backup buffer object MOB. This buffer object needs to have
409  * already been registered with the validation context.
410  * @backup_offset: Offset into the new backup MOB.
411  */
412 void vmw_validation_res_switch_backup(struct vmw_validation_context *ctx,
413 				      void *val_private,
414 				      struct vmw_buffer_object *vbo,
415 				      unsigned long backup_offset)
416 {
417 	struct vmw_validation_res_node *val;
418 
419 	val = container_of(val_private, typeof(*val), private);
420 
421 	val->switching_backup = 1;
422 	if (val->first_usage)
423 		val->no_buffer_needed = 1;
424 
425 	val->new_backup = vbo;
426 	val->new_backup_offset = backup_offset;
427 }
428 
429 /**
430  * vmw_validation_res_reserve - Reserve all resources registered with this
431  * validation context.
432  * @ctx: The validation context.
433  * @intr: Use interruptible waits when possible.
434  *
435  * Return: Zero on success, -ERESTARTSYS if interrupted. Negative error
436  * code on failure.
437  */
438 int vmw_validation_res_reserve(struct vmw_validation_context *ctx,
439 			       bool intr)
440 {
441 	struct vmw_validation_res_node *val;
442 	int ret = 0;
443 
444 	list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
445 
446 	list_for_each_entry(val, &ctx->resource_list, head) {
447 		struct vmw_resource *res = val->res;
448 
449 		ret = vmw_resource_reserve(res, intr, val->no_buffer_needed);
450 		if (ret)
451 			goto out_unreserve;
452 
453 		val->reserved = 1;
454 		if (res->backup) {
455 			struct vmw_buffer_object *vbo = res->backup;
456 
457 			ret = vmw_validation_add_bo
458 				(ctx, vbo, vmw_resource_needs_backup(res),
459 				 false);
460 			if (ret)
461 				goto out_unreserve;
462 		}
463 
464 		if (val->switching_backup && val->new_backup &&
465 		    res->coherent) {
466 			struct vmw_validation_bo_node *bo_node =
467 				vmw_validation_find_bo_dup(ctx,
468 							   val->new_backup);
469 
470 			if (WARN_ON(!bo_node)) {
471 				ret = -EINVAL;
472 				goto out_unreserve;
473 			}
474 			bo_node->coherent_count++;
475 		}
476 	}
477 
478 	return 0;
479 
480 out_unreserve:
481 	vmw_validation_res_unreserve(ctx, true);
482 	return ret;
483 }
484 
485 /**
486  * vmw_validation_res_unreserve - Unreserve all reserved resources
487  * registered with this validation context.
488  * @ctx: The validation context.
489  * @backoff: Whether this is a backoff- of a commit-type operation. This
490  * is used to determine whether to switch backup MOBs or not.
491  */
492 void vmw_validation_res_unreserve(struct vmw_validation_context *ctx,
493 				 bool backoff)
494 {
495 	struct vmw_validation_res_node *val;
496 
497 	list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
498 	if (backoff)
499 		list_for_each_entry(val, &ctx->resource_list, head) {
500 			if (val->reserved)
501 				vmw_resource_unreserve(val->res,
502 						       false, false, false,
503 						       NULL, 0);
504 		}
505 	else
506 		list_for_each_entry(val, &ctx->resource_list, head) {
507 			if (val->reserved)
508 				vmw_resource_unreserve(val->res,
509 						       val->dirty_set,
510 						       val->dirty,
511 						       val->switching_backup,
512 						       val->new_backup,
513 						       val->new_backup_offset);
514 		}
515 }
516 
517 /**
518  * vmw_validation_bo_validate_single - Validate a single buffer object.
519  * @bo: The TTM buffer object base.
520  * @interruptible: Whether to perform waits interruptible if possible.
521  * @validate_as_mob: Whether to validate in MOB memory.
522  *
523  * Return: Zero on success, -ERESTARTSYS if interrupted. Negative error
524  * code on failure.
525  */
526 int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
527 				      bool interruptible,
528 				      bool validate_as_mob)
529 {
530 	struct vmw_buffer_object *vbo =
531 		container_of(bo, struct vmw_buffer_object, base);
532 	struct ttm_operation_ctx ctx = {
533 		.interruptible = interruptible,
534 		.no_wait_gpu = false
535 	};
536 	int ret;
537 
538 	if (atomic_read(&vbo->cpu_writers))
539 		return -EBUSY;
540 
541 	if (vbo->base.pin_count > 0)
542 		return 0;
543 
544 	if (validate_as_mob)
545 		return ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
546 
547 	/**
548 	 * Put BO in VRAM if there is space, otherwise as a GMR.
549 	 * If there is no space in VRAM and GMR ids are all used up,
550 	 * start evicting GMRs to make room. If the DMA buffer can't be
551 	 * used as a GMR, this will return -ENOMEM.
552 	 */
553 
554 	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
555 	if (ret == 0 || ret == -ERESTARTSYS)
556 		return ret;
557 
558 	/**
559 	 * If that failed, try VRAM again, this time evicting
560 	 * previous contents.
561 	 */
562 
563 	ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
564 	return ret;
565 }
566 
567 /**
568  * vmw_validation_bo_validate - Validate all buffer objects registered with
569  * the validation context.
570  * @ctx: The validation context.
571  * @intr: Whether to perform waits interruptible if possible.
572  *
573  * Return: Zero on success, -ERESTARTSYS if interrupted,
574  * negative error code on failure.
575  */
576 int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr)
577 {
578 	struct vmw_validation_bo_node *entry;
579 	int ret;
580 
581 	list_for_each_entry(entry, &ctx->bo_list, base.head) {
582 		struct vmw_buffer_object *vbo =
583 			container_of(entry->base.bo, typeof(*vbo), base);
584 
585 		if (entry->cpu_blit) {
586 			struct ttm_operation_ctx ttm_ctx = {
587 				.interruptible = intr,
588 				.no_wait_gpu = false
589 			};
590 
591 			ret = ttm_bo_validate(entry->base.bo,
592 					      &vmw_nonfixed_placement, &ttm_ctx);
593 		} else {
594 			ret = vmw_validation_bo_validate_single
595 			(entry->base.bo, intr, entry->as_mob);
596 		}
597 		if (ret)
598 			return ret;
599 
600 		/*
601 		 * Rather than having the resource code allocating the bo
602 		 * dirty tracker in resource_unreserve() where we can't fail,
603 		 * Do it here when validating the buffer object.
604 		 */
605 		if (entry->coherent_count) {
606 			unsigned int coherent_count = entry->coherent_count;
607 
608 			while (coherent_count) {
609 				ret = vmw_bo_dirty_add(vbo);
610 				if (ret)
611 					return ret;
612 
613 				coherent_count--;
614 			}
615 			entry->coherent_count -= coherent_count;
616 		}
617 
618 		if (vbo->dirty)
619 			vmw_bo_dirty_scan(vbo);
620 	}
621 	return 0;
622 }
623 
624 /**
625  * vmw_validation_res_validate - Validate all resources registered with the
626  * validation context.
627  * @ctx: The validation context.
628  * @intr: Whether to perform waits interruptible if possible.
629  *
630  * Before this function is called, all resource backup buffers must have
631  * been validated.
632  *
633  * Return: Zero on success, -ERESTARTSYS if interrupted,
634  * negative error code on failure.
635  */
636 int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr)
637 {
638 	struct vmw_validation_res_node *val;
639 	int ret;
640 
641 	list_for_each_entry(val, &ctx->resource_list, head) {
642 		struct vmw_resource *res = val->res;
643 		struct vmw_buffer_object *backup = res->backup;
644 
645 		ret = vmw_resource_validate(res, intr, val->dirty_set &&
646 					    val->dirty);
647 		if (ret) {
648 			if (ret != -ERESTARTSYS)
649 				DRM_ERROR("Failed to validate resource.\n");
650 			return ret;
651 		}
652 
653 		/* Check if the resource switched backup buffer */
654 		if (backup && res->backup && (backup != res->backup)) {
655 			struct vmw_buffer_object *vbo = res->backup;
656 
657 			ret = vmw_validation_add_bo
658 				(ctx, vbo, vmw_resource_needs_backup(res),
659 				 false);
660 			if (ret)
661 				return ret;
662 		}
663 	}
664 	return 0;
665 }
666 
667 /**
668  * vmw_validation_drop_ht - Reset the hash table used for duplicate finding
669  * and unregister it from this validation context.
670  * @ctx: The validation context.
671  *
672  * The hash table used for duplicate finding is an expensive resource and
673  * may be protected by mutexes that may cause deadlocks during resource
674  * unreferencing if held. After resource- and buffer object registering,
675  * there is no longer any use for this hash table, so allow freeing it
676  * either to shorten any mutex locking time, or before resources- and
677  * buffer objects are freed during validation context cleanup.
678  */
679 void vmw_validation_drop_ht(struct vmw_validation_context *ctx)
680 {
681 	struct vmw_validation_bo_node *entry;
682 	struct vmw_validation_res_node *val;
683 
684 	if (!ctx->ht)
685 		return;
686 
687 	list_for_each_entry(entry, &ctx->bo_list, base.head)
688 		(void) vmwgfx_ht_remove_item(ctx->ht, &entry->hash);
689 
690 	list_for_each_entry(val, &ctx->resource_list, head)
691 		(void) vmwgfx_ht_remove_item(ctx->ht, &val->hash);
692 
693 	list_for_each_entry(val, &ctx->resource_ctx_list, head)
694 		(void) vmwgfx_ht_remove_item(ctx->ht, &val->hash);
695 
696 	ctx->ht = NULL;
697 }
698 
699 /**
700  * vmw_validation_unref_lists - Unregister previously registered buffer
701  * object and resources.
702  * @ctx: The validation context.
703  *
704  * Note that this function may cause buffer object- and resource destructors
705  * to be invoked.
706  */
707 void vmw_validation_unref_lists(struct vmw_validation_context *ctx)
708 {
709 	struct vmw_validation_bo_node *entry;
710 	struct vmw_validation_res_node *val;
711 
712 	list_for_each_entry(entry, &ctx->bo_list, base.head) {
713 		ttm_bo_put(entry->base.bo);
714 		entry->base.bo = NULL;
715 	}
716 
717 	list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
718 	list_for_each_entry(val, &ctx->resource_list, head)
719 		vmw_resource_unreference(&val->res);
720 
721 	/*
722 	 * No need to detach each list entry since they are all freed with
723 	 * vmw_validation_free_mem. Just make the inaccessible.
724 	 */
725 	INIT_LIST_HEAD(&ctx->bo_list);
726 	INIT_LIST_HEAD(&ctx->resource_list);
727 
728 	vmw_validation_mem_free(ctx);
729 }
730 
731 /**
732  * vmw_validation_prepare - Prepare a validation context for command
733  * submission.
734  * @ctx: The validation context.
735  * @mutex: The mutex used to protect resource reservation.
736  * @intr: Whether to perform waits interruptible if possible.
737  *
738  * Note that the single reservation mutex @mutex is an unfortunate
739  * construct. Ideally resource reservation should be moved to per-resource
740  * ww_mutexes.
741  * If this functions doesn't return Zero to indicate success, all resources
742  * are left unreserved but still referenced.
743  * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
744  * on error.
745  */
746 int vmw_validation_prepare(struct vmw_validation_context *ctx,
747 			   struct mutex *mutex,
748 			   bool intr)
749 {
750 	int ret = 0;
751 
752 	if (mutex) {
753 		if (intr)
754 			ret = mutex_lock_interruptible(mutex);
755 		else
756 			mutex_lock(mutex);
757 		if (ret)
758 			return -ERESTARTSYS;
759 	}
760 
761 	ctx->res_mutex = mutex;
762 	ret = vmw_validation_res_reserve(ctx, intr);
763 	if (ret)
764 		goto out_no_res_reserve;
765 
766 	ret = vmw_validation_bo_reserve(ctx, intr);
767 	if (ret)
768 		goto out_no_bo_reserve;
769 
770 	ret = vmw_validation_bo_validate(ctx, intr);
771 	if (ret)
772 		goto out_no_validate;
773 
774 	ret = vmw_validation_res_validate(ctx, intr);
775 	if (ret)
776 		goto out_no_validate;
777 
778 	return 0;
779 
780 out_no_validate:
781 	vmw_validation_bo_backoff(ctx);
782 out_no_bo_reserve:
783 	vmw_validation_res_unreserve(ctx, true);
784 out_no_res_reserve:
785 	if (mutex)
786 		mutex_unlock(mutex);
787 
788 	return ret;
789 }
790 
791 /**
792  * vmw_validation_revert - Revert validation actions if command submission
793  * failed.
794  *
795  * @ctx: The validation context.
796  *
797  * The caller still needs to unref resources after a call to this function.
798  */
799 void vmw_validation_revert(struct vmw_validation_context *ctx)
800 {
801 	vmw_validation_bo_backoff(ctx);
802 	vmw_validation_res_unreserve(ctx, true);
803 	if (ctx->res_mutex)
804 		mutex_unlock(ctx->res_mutex);
805 	vmw_validation_unref_lists(ctx);
806 }
807 
808 /**
809  * vmw_validation_done - Commit validation actions after command submission
810  * success.
811  * @ctx: The validation context.
812  * @fence: Fence with which to fence all buffer objects taking part in the
813  * command submission.
814  *
815  * The caller does NOT need to unref resources after a call to this function.
816  */
817 void vmw_validation_done(struct vmw_validation_context *ctx,
818 			 struct vmw_fence_obj *fence)
819 {
820 	vmw_validation_bo_fence(ctx, fence);
821 	vmw_validation_res_unreserve(ctx, false);
822 	if (ctx->res_mutex)
823 		mutex_unlock(ctx->res_mutex);
824 	vmw_validation_unref_lists(ctx);
825 }
826 
827 /**
828  * vmw_validation_preload_bo - Preload the validation memory allocator for a
829  * call to vmw_validation_add_bo().
830  * @ctx: Pointer to the validation context.
831  *
832  * Iff this function returns successfully, the next call to
833  * vmw_validation_add_bo() is guaranteed not to sleep. An error is not fatal
834  * but voids the guarantee.
835  *
836  * Returns: Zero if successful, %-EINVAL otherwise.
837  */
838 int vmw_validation_preload_bo(struct vmw_validation_context *ctx)
839 {
840 	unsigned int size = sizeof(struct vmw_validation_bo_node);
841 
842 	if (!vmw_validation_mem_alloc(ctx, size))
843 		return -ENOMEM;
844 
845 	ctx->mem_size_left += size;
846 	return 0;
847 }
848 
849 /**
850  * vmw_validation_preload_res - Preload the validation memory allocator for a
851  * call to vmw_validation_add_res().
852  * @ctx: Pointer to the validation context.
853  * @size: Size of the validation node extra data. See below.
854  *
855  * Iff this function returns successfully, the next call to
856  * vmw_validation_add_res() with the same or smaller @size is guaranteed not to
857  * sleep. An error is not fatal but voids the guarantee.
858  *
859  * Returns: Zero if successful, %-EINVAL otherwise.
860  */
861 int vmw_validation_preload_res(struct vmw_validation_context *ctx,
862 			       unsigned int size)
863 {
864 	size = vmw_validation_align(sizeof(struct vmw_validation_res_node) +
865 				    size) +
866 		vmw_validation_align(sizeof(struct vmw_validation_bo_node));
867 	if (!vmw_validation_mem_alloc(ctx, size))
868 		return -ENOMEM;
869 
870 	ctx->mem_size_left += size;
871 	return 0;
872 }
873 
874 /**
875  * vmw_validation_bo_backoff - Unreserve buffer objects registered with a
876  * validation context
877  * @ctx: The validation context
878  *
879  * This function unreserves the buffer objects previously reserved using
880  * vmw_validation_bo_reserve. It's typically used as part of an error path
881  */
882 void vmw_validation_bo_backoff(struct vmw_validation_context *ctx)
883 {
884 	struct vmw_validation_bo_node *entry;
885 
886 	/*
887 	 * Switching coherent resource backup buffers failed.
888 	 * Release corresponding buffer object dirty trackers.
889 	 */
890 	list_for_each_entry(entry, &ctx->bo_list, base.head) {
891 		if (entry->coherent_count) {
892 			unsigned int coherent_count = entry->coherent_count;
893 			struct vmw_buffer_object *vbo =
894 				container_of(entry->base.bo, typeof(*vbo),
895 					     base);
896 
897 			while (coherent_count--)
898 				vmw_bo_dirty_release(vbo);
899 		}
900 	}
901 
902 	ttm_eu_backoff_reservation(&ctx->ticket, &ctx->bo_list);
903 }
904