xref: /linux/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c (revision db5d28c0bfe566908719bec8e25443aabecbb802)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright © 2018 - 2023 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 #include "vmwgfx_bo.h"
29 #include "vmwgfx_drv.h"
30 #include "vmwgfx_resource_priv.h"
31 #include "vmwgfx_validation.h"
32 
33 #include <linux/slab.h>
34 
35 /**
36  * struct vmw_validation_bo_node - Buffer object validation metadata.
37  * @base: Metadata used for TTM reservation- and validation.
38  * @hash: A hash entry used for the duplicate detection hash table.
39  * @coherent_count: If switching backup buffers, number of new coherent
40  * resources that will have this buffer as a backup buffer.
41  *
42  * Bit fields are used since these structures are allocated and freed in
43  * large numbers and space conservation is desired.
44  */
45 struct vmw_validation_bo_node {
46 	struct ttm_validate_buffer base;
47 	struct vmwgfx_hash_item hash;
48 	unsigned int coherent_count;
49 };
50 /**
51  * struct vmw_validation_res_node - Resource validation metadata.
52  * @head: List head for the resource validation list.
53  * @hash: A hash entry used for the duplicate detection hash table.
54  * @res: Reference counted resource pointer.
55  * @new_guest_memory_bo: Non ref-counted pointer to new guest memory buffer
56  * to be assigned to a resource.
57  * @new_guest_memory_offset: Offset into the new backup mob for resources
58  * that can share MOBs.
59  * @no_buffer_needed: Kernel does not need to allocate a MOB during validation,
60  * the command stream provides a mob bind operation.
61  * @switching_guest_memory_bo: The validation process is switching backup MOB.
62  * @first_usage: True iff the resource has been seen only once in the current
63  * validation batch.
64  * @reserved: Whether the resource is currently reserved by this process.
65  * @dirty_set: Change dirty status of the resource.
66  * @dirty: Dirty information VMW_RES_DIRTY_XX.
67  * @private: Optionally additional memory for caller-private data.
68  *
69  * Bit fields are used since these structures are allocated and freed in
70  * large numbers and space conservation is desired.
71  */
72 struct vmw_validation_res_node {
73 	struct list_head head;
74 	struct vmwgfx_hash_item hash;
75 	struct vmw_resource *res;
76 	struct vmw_bo *new_guest_memory_bo;
77 	unsigned long new_guest_memory_offset;
78 	u32 no_buffer_needed : 1;
79 	u32 switching_guest_memory_bo : 1;
80 	u32 first_usage : 1;
81 	u32 reserved : 1;
82 	u32 dirty : 1;
83 	u32 dirty_set : 1;
84 	unsigned long private[];
85 };
86 
87 /**
88  * vmw_validation_mem_alloc - Allocate kernel memory from the validation
89  * context based allocator
90  * @ctx: The validation context
91  * @size: The number of bytes to allocated.
92  *
93  * The memory allocated may not exceed PAGE_SIZE, and the returned
94  * address is aligned to sizeof(long). All memory allocated this way is
95  * reclaimed after validation when calling any of the exported functions:
96  * vmw_validation_unref_lists()
97  * vmw_validation_revert()
98  * vmw_validation_done()
99  *
100  * Return: Pointer to the allocated memory on success. NULL on failure.
101  */
vmw_validation_mem_alloc(struct vmw_validation_context * ctx,unsigned int size)102 void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
103 			       unsigned int size)
104 {
105 	void *addr;
106 
107 	size = vmw_validation_align(size);
108 	if (size > PAGE_SIZE)
109 		return NULL;
110 
111 	if (ctx->mem_size_left < size) {
112 		struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
113 		if (!page)
114 			return NULL;
115 
116 		list_add_tail(&page->lru, &ctx->page_list);
117 		ctx->page_address = page_address(page);
118 		ctx->mem_size_left = PAGE_SIZE;
119 	}
120 
121 	addr = (void *) (ctx->page_address + (PAGE_SIZE - ctx->mem_size_left));
122 	ctx->mem_size_left -= size;
123 
124 	return addr;
125 }
126 
127 /**
128  * vmw_validation_mem_free - Free all memory allocated using
129  * vmw_validation_mem_alloc()
130  * @ctx: The validation context
131  *
132  * All memory previously allocated for this context using
133  * vmw_validation_mem_alloc() is freed.
134  */
vmw_validation_mem_free(struct vmw_validation_context * ctx)135 static void vmw_validation_mem_free(struct vmw_validation_context *ctx)
136 {
137 	struct page *entry, *next;
138 
139 	list_for_each_entry_safe(entry, next, &ctx->page_list, lru) {
140 		list_del_init(&entry->lru);
141 		__free_page(entry);
142 	}
143 
144 	ctx->mem_size_left = 0;
145 }
146 
147 /**
148  * vmw_validation_find_bo_dup - Find a duplicate buffer object entry in the
149  * validation context's lists.
150  * @ctx: The validation context to search.
151  * @vbo: The buffer object to search for.
152  *
153  * Return: Pointer to the struct vmw_validation_bo_node referencing the
154  * duplicate, or NULL if none found.
155  */
156 static struct vmw_validation_bo_node *
vmw_validation_find_bo_dup(struct vmw_validation_context * ctx,struct vmw_bo * vbo)157 vmw_validation_find_bo_dup(struct vmw_validation_context *ctx,
158 			   struct vmw_bo *vbo)
159 {
160 	struct  vmw_validation_bo_node *bo_node = NULL;
161 
162 	if (!ctx->merge_dups)
163 		return NULL;
164 
165 	if (ctx->sw_context) {
166 		struct vmwgfx_hash_item *hash;
167 		unsigned long key = (unsigned long) vbo;
168 
169 		hash_for_each_possible_rcu(ctx->sw_context->res_ht, hash, head, key) {
170 			if (hash->key == key) {
171 				bo_node = container_of(hash, typeof(*bo_node), hash);
172 				break;
173 			}
174 		}
175 	} else {
176 		struct  vmw_validation_bo_node *entry;
177 
178 		list_for_each_entry(entry, &ctx->bo_list, base.head) {
179 			if (entry->base.bo == &vbo->tbo) {
180 				bo_node = entry;
181 				break;
182 			}
183 		}
184 	}
185 
186 	return bo_node;
187 }
188 
189 /**
190  * vmw_validation_find_res_dup - Find a duplicate resource entry in the
191  * validation context's lists.
192  * @ctx: The validation context to search.
193  * @res: Reference counted resource pointer.
194  *
195  * Return: Pointer to the struct vmw_validation_bo_node referencing the
196  * duplicate, or NULL if none found.
197  */
198 static struct vmw_validation_res_node *
vmw_validation_find_res_dup(struct vmw_validation_context * ctx,struct vmw_resource * res)199 vmw_validation_find_res_dup(struct vmw_validation_context *ctx,
200 			    struct vmw_resource *res)
201 {
202 	struct  vmw_validation_res_node *res_node = NULL;
203 
204 	if (!ctx->merge_dups)
205 		return NULL;
206 
207 	if (ctx->sw_context) {
208 		struct vmwgfx_hash_item *hash;
209 		unsigned long key = (unsigned long) res;
210 
211 		hash_for_each_possible_rcu(ctx->sw_context->res_ht, hash, head, key) {
212 			if (hash->key == key) {
213 				res_node = container_of(hash, typeof(*res_node), hash);
214 				break;
215 			}
216 		}
217 	} else {
218 		struct  vmw_validation_res_node *entry;
219 
220 		list_for_each_entry(entry, &ctx->resource_ctx_list, head) {
221 			if (entry->res == res) {
222 				res_node = entry;
223 				goto out;
224 			}
225 		}
226 
227 		list_for_each_entry(entry, &ctx->resource_list, head) {
228 			if (entry->res == res) {
229 				res_node = entry;
230 				break;
231 			}
232 		}
233 
234 	}
235 out:
236 	return res_node;
237 }
238 
239 /**
240  * vmw_validation_add_bo - Add a buffer object to the validation context.
241  * @ctx: The validation context.
242  * @vbo: The buffer object.
243  *
244  * Return: Zero on success, negative error code otherwise.
245  */
vmw_validation_add_bo(struct vmw_validation_context * ctx,struct vmw_bo * vbo)246 int vmw_validation_add_bo(struct vmw_validation_context *ctx,
247 			  struct vmw_bo *vbo)
248 {
249 	struct vmw_validation_bo_node *bo_node;
250 
251 	bo_node = vmw_validation_find_bo_dup(ctx, vbo);
252 	if (!bo_node) {
253 		struct ttm_validate_buffer *val_buf;
254 
255 		bo_node = vmw_validation_mem_alloc(ctx, sizeof(*bo_node));
256 		if (!bo_node)
257 			return -ENOMEM;
258 
259 		if (ctx->sw_context) {
260 			bo_node->hash.key = (unsigned long) vbo;
261 			hash_add_rcu(ctx->sw_context->res_ht, &bo_node->hash.head,
262 				bo_node->hash.key);
263 		}
264 		val_buf = &bo_node->base;
265 		val_buf->bo = ttm_bo_get_unless_zero(&vbo->tbo);
266 		if (!val_buf->bo)
267 			return -ESRCH;
268 		val_buf->num_shared = 0;
269 		list_add_tail(&val_buf->head, &ctx->bo_list);
270 	}
271 
272 	return 0;
273 }
274 
275 /**
276  * vmw_validation_add_resource - Add a resource to the validation context.
277  * @ctx: The validation context.
278  * @res: The resource.
279  * @priv_size: Size of private, additional metadata.
280  * @dirty: Whether to change dirty status.
281  * @p_node: Output pointer of additional metadata address.
282  * @first_usage: Whether this was the first time this resource was seen.
283  *
284  * Return: Zero on success, negative error code otherwise.
285  */
vmw_validation_add_resource(struct vmw_validation_context * ctx,struct vmw_resource * res,size_t priv_size,u32 dirty,void ** p_node,bool * first_usage)286 int vmw_validation_add_resource(struct vmw_validation_context *ctx,
287 				struct vmw_resource *res,
288 				size_t priv_size,
289 				u32 dirty,
290 				void **p_node,
291 				bool *first_usage)
292 {
293 	struct vmw_validation_res_node *node;
294 
295 	node = vmw_validation_find_res_dup(ctx, res);
296 	if (node) {
297 		node->first_usage = 0;
298 		goto out_fill;
299 	}
300 
301 	node = vmw_validation_mem_alloc(ctx, sizeof(*node) + priv_size);
302 	if (!node) {
303 		VMW_DEBUG_USER("Failed to allocate a resource validation entry.\n");
304 		return -ENOMEM;
305 	}
306 
307 	if (ctx->sw_context) {
308 		node->hash.key = (unsigned long) res;
309 		hash_add_rcu(ctx->sw_context->res_ht, &node->hash.head, node->hash.key);
310 	}
311 	node->res = vmw_resource_reference_unless_doomed(res);
312 	if (!node->res)
313 		return -ESRCH;
314 
315 	node->first_usage = 1;
316 	if (!res->dev_priv->has_mob) {
317 		list_add_tail(&node->head, &ctx->resource_list);
318 	} else {
319 		switch (vmw_res_type(res)) {
320 		case vmw_res_context:
321 		case vmw_res_dx_context:
322 			list_add(&node->head, &ctx->resource_ctx_list);
323 			break;
324 		case vmw_res_cotable:
325 			list_add_tail(&node->head, &ctx->resource_ctx_list);
326 			break;
327 		default:
328 			list_add_tail(&node->head, &ctx->resource_list);
329 			break;
330 		}
331 	}
332 
333 out_fill:
334 	if (dirty) {
335 		node->dirty_set = 1;
336 		/* Overwriting previous information here is intentional! */
337 		node->dirty = (dirty & VMW_RES_DIRTY_SET) ? 1 : 0;
338 	}
339 	if (first_usage)
340 		*first_usage = node->first_usage;
341 	if (p_node)
342 		*p_node = &node->private;
343 
344 	return 0;
345 }
346 
347 /**
348  * vmw_validation_res_set_dirty - Register a resource dirty set or clear during
349  * validation.
350  * @ctx: The validation context.
351  * @val_private: The additional meta-data pointer returned when the
352  * resource was registered with the validation context. Used to identify
353  * the resource.
354  * @dirty: Dirty information VMW_RES_DIRTY_XX
355  */
vmw_validation_res_set_dirty(struct vmw_validation_context * ctx,void * val_private,u32 dirty)356 void vmw_validation_res_set_dirty(struct vmw_validation_context *ctx,
357 				  void *val_private, u32 dirty)
358 {
359 	struct vmw_validation_res_node *val;
360 
361 	if (!dirty)
362 		return;
363 
364 	val = container_of(val_private, typeof(*val), private);
365 	val->dirty_set = 1;
366 	/* Overwriting previous information here is intentional! */
367 	val->dirty = (dirty & VMW_RES_DIRTY_SET) ? 1 : 0;
368 }
369 
370 /**
371  * vmw_validation_res_switch_backup - Register a backup MOB switch during
372  * validation.
373  * @ctx: The validation context.
374  * @val_private: The additional meta-data pointer returned when the
375  * resource was registered with the validation context. Used to identify
376  * the resource.
377  * @vbo: The new backup buffer object MOB. This buffer object needs to have
378  * already been registered with the validation context.
379  * @guest_memory_offset: Offset into the new backup MOB.
380  */
vmw_validation_res_switch_backup(struct vmw_validation_context * ctx,void * val_private,struct vmw_bo * vbo,unsigned long guest_memory_offset)381 void vmw_validation_res_switch_backup(struct vmw_validation_context *ctx,
382 				      void *val_private,
383 				      struct vmw_bo *vbo,
384 				      unsigned long guest_memory_offset)
385 {
386 	struct vmw_validation_res_node *val;
387 
388 	val = container_of(val_private, typeof(*val), private);
389 
390 	val->switching_guest_memory_bo = 1;
391 	if (val->first_usage)
392 		val->no_buffer_needed = 1;
393 
394 	val->new_guest_memory_bo = vbo;
395 	val->new_guest_memory_offset = guest_memory_offset;
396 }
397 
398 /**
399  * vmw_validation_res_reserve - Reserve all resources registered with this
400  * validation context.
401  * @ctx: The validation context.
402  * @intr: Use interruptible waits when possible.
403  *
404  * Return: Zero on success, -ERESTARTSYS if interrupted. Negative error
405  * code on failure.
406  */
vmw_validation_res_reserve(struct vmw_validation_context * ctx,bool intr)407 int vmw_validation_res_reserve(struct vmw_validation_context *ctx,
408 			       bool intr)
409 {
410 	struct vmw_validation_res_node *val;
411 	int ret = 0;
412 
413 	list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
414 
415 	list_for_each_entry(val, &ctx->resource_list, head) {
416 		struct vmw_resource *res = val->res;
417 
418 		ret = vmw_resource_reserve(res, intr, val->no_buffer_needed);
419 		if (ret)
420 			goto out_unreserve;
421 
422 		val->reserved = 1;
423 		if (res->guest_memory_bo) {
424 			struct vmw_bo *vbo = res->guest_memory_bo;
425 
426 			vmw_bo_placement_set(vbo,
427 					     res->func->domain,
428 					     res->func->busy_domain);
429 			ret = vmw_validation_add_bo(ctx, vbo);
430 			if (ret)
431 				goto out_unreserve;
432 		}
433 
434 		if (val->switching_guest_memory_bo && val->new_guest_memory_bo &&
435 		    res->coherent) {
436 			struct vmw_validation_bo_node *bo_node =
437 				vmw_validation_find_bo_dup(ctx,
438 							   val->new_guest_memory_bo);
439 
440 			if (WARN_ON(!bo_node)) {
441 				ret = -EINVAL;
442 				goto out_unreserve;
443 			}
444 			bo_node->coherent_count++;
445 		}
446 	}
447 
448 	return 0;
449 
450 out_unreserve:
451 	vmw_validation_res_unreserve(ctx, true);
452 	return ret;
453 }
454 
455 /**
456  * vmw_validation_res_unreserve - Unreserve all reserved resources
457  * registered with this validation context.
458  * @ctx: The validation context.
459  * @backoff: Whether this is a backoff- of a commit-type operation. This
460  * is used to determine whether to switch backup MOBs or not.
461  */
vmw_validation_res_unreserve(struct vmw_validation_context * ctx,bool backoff)462 void vmw_validation_res_unreserve(struct vmw_validation_context *ctx,
463 				 bool backoff)
464 {
465 	struct vmw_validation_res_node *val;
466 
467 	list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
468 	if (backoff)
469 		list_for_each_entry(val, &ctx->resource_list, head) {
470 			if (val->reserved)
471 				vmw_resource_unreserve(val->res,
472 						       false, false, false,
473 						       NULL, 0);
474 		}
475 	else
476 		list_for_each_entry(val, &ctx->resource_list, head) {
477 			if (val->reserved)
478 				vmw_resource_unreserve(val->res,
479 						       val->dirty_set,
480 						       val->dirty,
481 						       val->switching_guest_memory_bo,
482 						       val->new_guest_memory_bo,
483 						       val->new_guest_memory_offset);
484 		}
485 }
486 
487 /**
488  * vmw_validation_bo_validate_single - Validate a single buffer object.
489  * @bo: The TTM buffer object base.
490  * @interruptible: Whether to perform waits interruptible if possible.
491  *
492  * Return: Zero on success, -ERESTARTSYS if interrupted. Negative error
493  * code on failure.
494  */
vmw_validation_bo_validate_single(struct ttm_buffer_object * bo,bool interruptible)495 static int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
496 					     bool interruptible)
497 {
498 	struct vmw_bo *vbo = to_vmw_bo(&bo->base);
499 	struct ttm_operation_ctx ctx = {
500 		.interruptible = interruptible,
501 		.no_wait_gpu = false
502 	};
503 	int ret;
504 
505 	if (atomic_read(&vbo->cpu_writers))
506 		return -EBUSY;
507 
508 	if (vbo->tbo.pin_count > 0)
509 		return 0;
510 
511 	ret = ttm_bo_validate(bo, &vbo->placement, &ctx);
512 	if (ret == 0 || ret == -ERESTARTSYS)
513 		return ret;
514 
515 	/*
516 	 * If that failed, try again, this time evicting
517 	 * previous contents.
518 	 */
519 	ctx.allow_res_evict = true;
520 
521 	return ttm_bo_validate(bo, &vbo->placement, &ctx);
522 }
523 
524 /**
525  * vmw_validation_bo_validate - Validate all buffer objects registered with
526  * the validation context.
527  * @ctx: The validation context.
528  * @intr: Whether to perform waits interruptible if possible.
529  *
530  * Return: Zero on success, -ERESTARTSYS if interrupted,
531  * negative error code on failure.
532  */
vmw_validation_bo_validate(struct vmw_validation_context * ctx,bool intr)533 int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr)
534 {
535 	struct vmw_validation_bo_node *entry;
536 	int ret;
537 
538 	list_for_each_entry(entry, &ctx->bo_list, base.head) {
539 		struct vmw_bo *vbo = to_vmw_bo(&entry->base.bo->base);
540 
541 		ret = vmw_validation_bo_validate_single(entry->base.bo, intr);
542 
543 		if (ret)
544 			return ret;
545 
546 		/*
547 		 * Rather than having the resource code allocating the bo
548 		 * dirty tracker in resource_unreserve() where we can't fail,
549 		 * Do it here when validating the buffer object.
550 		 */
551 		if (entry->coherent_count) {
552 			unsigned int coherent_count = entry->coherent_count;
553 
554 			while (coherent_count) {
555 				ret = vmw_bo_dirty_add(vbo);
556 				if (ret)
557 					return ret;
558 
559 				coherent_count--;
560 			}
561 			entry->coherent_count -= coherent_count;
562 		}
563 
564 		if (vbo->dirty)
565 			vmw_bo_dirty_scan(vbo);
566 	}
567 	return 0;
568 }
569 
570 /**
571  * vmw_validation_res_validate - Validate all resources registered with the
572  * validation context.
573  * @ctx: The validation context.
574  * @intr: Whether to perform waits interruptible if possible.
575  *
576  * Before this function is called, all resource backup buffers must have
577  * been validated.
578  *
579  * Return: Zero on success, -ERESTARTSYS if interrupted,
580  * negative error code on failure.
581  */
vmw_validation_res_validate(struct vmw_validation_context * ctx,bool intr)582 int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr)
583 {
584 	struct vmw_validation_res_node *val;
585 	int ret;
586 
587 	list_for_each_entry(val, &ctx->resource_list, head) {
588 		struct vmw_resource *res = val->res;
589 		struct vmw_bo *backup = res->guest_memory_bo;
590 
591 		ret = vmw_resource_validate(res, intr, val->dirty_set &&
592 					    val->dirty);
593 		if (ret) {
594 			if (ret != -ERESTARTSYS)
595 				DRM_ERROR("Failed to validate resource.\n");
596 			return ret;
597 		}
598 
599 		/* Check if the resource switched backup buffer */
600 		if (backup && res->guest_memory_bo && backup != res->guest_memory_bo) {
601 			struct vmw_bo *vbo = res->guest_memory_bo;
602 
603 			vmw_bo_placement_set(vbo, res->func->domain,
604 					     res->func->busy_domain);
605 			ret = vmw_validation_add_bo(ctx, vbo);
606 			if (ret)
607 				return ret;
608 		}
609 	}
610 	return 0;
611 }
612 
613 /**
614  * vmw_validation_drop_ht - Reset the hash table used for duplicate finding
615  * and unregister it from this validation context.
616  * @ctx: The validation context.
617  *
618  * The hash table used for duplicate finding is an expensive resource and
619  * may be protected by mutexes that may cause deadlocks during resource
620  * unreferencing if held. After resource- and buffer object registering,
621  * there is no longer any use for this hash table, so allow freeing it
622  * either to shorten any mutex locking time, or before resources- and
623  * buffer objects are freed during validation context cleanup.
624  */
vmw_validation_drop_ht(struct vmw_validation_context * ctx)625 void vmw_validation_drop_ht(struct vmw_validation_context *ctx)
626 {
627 	struct vmw_validation_bo_node *entry;
628 	struct vmw_validation_res_node *val;
629 
630 	if (!ctx->sw_context)
631 		return;
632 
633 	list_for_each_entry(entry, &ctx->bo_list, base.head)
634 		hash_del_rcu(&entry->hash.head);
635 
636 	list_for_each_entry(val, &ctx->resource_list, head)
637 		hash_del_rcu(&val->hash.head);
638 
639 	list_for_each_entry(val, &ctx->resource_ctx_list, head)
640 		hash_del_rcu(&entry->hash.head);
641 
642 	ctx->sw_context = NULL;
643 }
644 
645 /**
646  * vmw_validation_unref_lists - Unregister previously registered buffer
647  * object and resources.
648  * @ctx: The validation context.
649  *
650  * Note that this function may cause buffer object- and resource destructors
651  * to be invoked.
652  */
vmw_validation_unref_lists(struct vmw_validation_context * ctx)653 void vmw_validation_unref_lists(struct vmw_validation_context *ctx)
654 {
655 	struct vmw_validation_bo_node *entry;
656 	struct vmw_validation_res_node *val;
657 
658 	list_for_each_entry(entry, &ctx->bo_list, base.head) {
659 		ttm_bo_put(entry->base.bo);
660 		entry->base.bo = NULL;
661 	}
662 
663 	list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
664 	list_for_each_entry(val, &ctx->resource_list, head)
665 		vmw_resource_unreference(&val->res);
666 
667 	/*
668 	 * No need to detach each list entry since they are all freed with
669 	 * vmw_validation_free_mem. Just make the inaccessible.
670 	 */
671 	INIT_LIST_HEAD(&ctx->bo_list);
672 	INIT_LIST_HEAD(&ctx->resource_list);
673 
674 	vmw_validation_mem_free(ctx);
675 }
676 
677 /**
678  * vmw_validation_prepare - Prepare a validation context for command
679  * submission.
680  * @ctx: The validation context.
681  * @mutex: The mutex used to protect resource reservation.
682  * @intr: Whether to perform waits interruptible if possible.
683  *
684  * Note that the single reservation mutex @mutex is an unfortunate
685  * construct. Ideally resource reservation should be moved to per-resource
686  * ww_mutexes.
687  * If this functions doesn't return Zero to indicate success, all resources
688  * are left unreserved but still referenced.
689  * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
690  * on error.
691  */
vmw_validation_prepare(struct vmw_validation_context * ctx,struct mutex * mutex,bool intr)692 int vmw_validation_prepare(struct vmw_validation_context *ctx,
693 			   struct mutex *mutex,
694 			   bool intr)
695 {
696 	int ret = 0;
697 
698 	if (mutex) {
699 		if (intr)
700 			ret = mutex_lock_interruptible(mutex);
701 		else
702 			mutex_lock(mutex);
703 		if (ret)
704 			return -ERESTARTSYS;
705 	}
706 
707 	ctx->res_mutex = mutex;
708 	ret = vmw_validation_res_reserve(ctx, intr);
709 	if (ret)
710 		goto out_no_res_reserve;
711 
712 	ret = vmw_validation_bo_reserve(ctx, intr);
713 	if (ret)
714 		goto out_no_bo_reserve;
715 
716 	ret = vmw_validation_bo_validate(ctx, intr);
717 	if (ret)
718 		goto out_no_validate;
719 
720 	ret = vmw_validation_res_validate(ctx, intr);
721 	if (ret)
722 		goto out_no_validate;
723 
724 	return 0;
725 
726 out_no_validate:
727 	vmw_validation_bo_backoff(ctx);
728 out_no_bo_reserve:
729 	vmw_validation_res_unreserve(ctx, true);
730 out_no_res_reserve:
731 	if (mutex)
732 		mutex_unlock(mutex);
733 
734 	return ret;
735 }
736 
737 /**
738  * vmw_validation_revert - Revert validation actions if command submission
739  * failed.
740  *
741  * @ctx: The validation context.
742  *
743  * The caller still needs to unref resources after a call to this function.
744  */
vmw_validation_revert(struct vmw_validation_context * ctx)745 void vmw_validation_revert(struct vmw_validation_context *ctx)
746 {
747 	vmw_validation_bo_backoff(ctx);
748 	vmw_validation_res_unreserve(ctx, true);
749 	if (ctx->res_mutex)
750 		mutex_unlock(ctx->res_mutex);
751 	vmw_validation_unref_lists(ctx);
752 }
753 
754 /**
755  * vmw_validation_done - Commit validation actions after command submission
756  * success.
757  * @ctx: The validation context.
758  * @fence: Fence with which to fence all buffer objects taking part in the
759  * command submission.
760  *
761  * The caller does NOT need to unref resources after a call to this function.
762  */
vmw_validation_done(struct vmw_validation_context * ctx,struct vmw_fence_obj * fence)763 void vmw_validation_done(struct vmw_validation_context *ctx,
764 			 struct vmw_fence_obj *fence)
765 {
766 	vmw_validation_bo_fence(ctx, fence);
767 	vmw_validation_res_unreserve(ctx, false);
768 	if (ctx->res_mutex)
769 		mutex_unlock(ctx->res_mutex);
770 	vmw_validation_unref_lists(ctx);
771 }
772 
773 /**
774  * vmw_validation_preload_bo - Preload the validation memory allocator for a
775  * call to vmw_validation_add_bo().
776  * @ctx: Pointer to the validation context.
777  *
778  * Iff this function returns successfully, the next call to
779  * vmw_validation_add_bo() is guaranteed not to sleep. An error is not fatal
780  * but voids the guarantee.
781  *
782  * Returns: Zero if successful, %-EINVAL otherwise.
783  */
vmw_validation_preload_bo(struct vmw_validation_context * ctx)784 int vmw_validation_preload_bo(struct vmw_validation_context *ctx)
785 {
786 	unsigned int size = sizeof(struct vmw_validation_bo_node);
787 
788 	if (!vmw_validation_mem_alloc(ctx, size))
789 		return -ENOMEM;
790 
791 	ctx->mem_size_left += size;
792 	return 0;
793 }
794 
795 /**
796  * vmw_validation_preload_res - Preload the validation memory allocator for a
797  * call to vmw_validation_add_res().
798  * @ctx: Pointer to the validation context.
799  * @size: Size of the validation node extra data. See below.
800  *
801  * Iff this function returns successfully, the next call to
802  * vmw_validation_add_res() with the same or smaller @size is guaranteed not to
803  * sleep. An error is not fatal but voids the guarantee.
804  *
805  * Returns: Zero if successful, %-EINVAL otherwise.
806  */
vmw_validation_preload_res(struct vmw_validation_context * ctx,unsigned int size)807 int vmw_validation_preload_res(struct vmw_validation_context *ctx,
808 			       unsigned int size)
809 {
810 	size = vmw_validation_align(sizeof(struct vmw_validation_res_node) +
811 				    size) +
812 		vmw_validation_align(sizeof(struct vmw_validation_bo_node));
813 	if (!vmw_validation_mem_alloc(ctx, size))
814 		return -ENOMEM;
815 
816 	ctx->mem_size_left += size;
817 	return 0;
818 }
819 
820 /**
821  * vmw_validation_bo_backoff - Unreserve buffer objects registered with a
822  * validation context
823  * @ctx: The validation context
824  *
825  * This function unreserves the buffer objects previously reserved using
826  * vmw_validation_bo_reserve. It's typically used as part of an error path
827  */
vmw_validation_bo_backoff(struct vmw_validation_context * ctx)828 void vmw_validation_bo_backoff(struct vmw_validation_context *ctx)
829 {
830 	struct vmw_validation_bo_node *entry;
831 
832 	/*
833 	 * Switching coherent resource backup buffers failed.
834 	 * Release corresponding buffer object dirty trackers.
835 	 */
836 	list_for_each_entry(entry, &ctx->bo_list, base.head) {
837 		if (entry->coherent_count) {
838 			unsigned int coherent_count = entry->coherent_count;
839 			struct vmw_bo *vbo = to_vmw_bo(&entry->base.bo->base);
840 
841 			while (coherent_count--)
842 				vmw_bo_dirty_release(vbo);
843 		}
844 	}
845 
846 	ttm_eu_backoff_reservation(&ctx->ticket, &ctx->bo_list);
847 }
848