xref: /linux/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c (revision 1e5d41b981bc550f41b198706e259a45686f3b5a)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright © 2018 - 2023 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 #include "vmwgfx_bo.h"
29 #include "vmwgfx_drv.h"
30 #include "vmwgfx_resource_priv.h"
31 #include "vmwgfx_validation.h"
32 
33 #include <linux/slab.h>
34 
35 /**
36  * struct vmw_validation_bo_node - Buffer object validation metadata.
37  * @base: Metadata used for TTM reservation- and validation.
38  * @hash: A hash entry used for the duplicate detection hash table.
39  * @coherent_count: If switching backup buffers, number of new coherent
40  * resources that will have this buffer as a backup buffer.
41  *
42  * Bit fields are used since these structures are allocated and freed in
43  * large numbers and space conservation is desired.
44  */
45 struct vmw_validation_bo_node {
46 	struct ttm_validate_buffer base;
47 	struct vmwgfx_hash_item hash;
48 	unsigned int coherent_count;
49 };
50 /**
51  * struct vmw_validation_res_node - Resource validation metadata.
52  * @head: List head for the resource validation list.
53  * @hash: A hash entry used for the duplicate detection hash table.
54  * @res: Reference counted resource pointer.
55  * @new_guest_memory_bo: Non ref-counted pointer to new guest memory buffer
56  * to be assigned to a resource.
57  * @new_guest_memory_offset: Offset into the new backup mob for resources
58  * that can share MOBs.
59  * @no_buffer_needed: Kernel does not need to allocate a MOB during validation,
60  * the command stream provides a mob bind operation.
61  * @switching_guest_memory_bo: The validation process is switching backup MOB.
62  * @first_usage: True iff the resource has been seen only once in the current
63  * validation batch.
64  * @reserved: Whether the resource is currently reserved by this process.
65  * @dirty_set: Change dirty status of the resource.
66  * @dirty: Dirty information VMW_RES_DIRTY_XX.
67  * @private: Optionally additional memory for caller-private data.
68  *
69  * Bit fields are used since these structures are allocated and freed in
70  * large numbers and space conservation is desired.
71  */
72 struct vmw_validation_res_node {
73 	struct list_head head;
74 	struct vmwgfx_hash_item hash;
75 	struct vmw_resource *res;
76 	struct vmw_bo *new_guest_memory_bo;
77 	unsigned long new_guest_memory_offset;
78 	u32 no_buffer_needed : 1;
79 	u32 switching_guest_memory_bo : 1;
80 	u32 first_usage : 1;
81 	u32 reserved : 1;
82 	u32 dirty : 1;
83 	u32 dirty_set : 1;
84 	unsigned long private[];
85 };
86 
87 /**
88  * vmw_validation_mem_alloc - Allocate kernel memory from the validation
89  * context based allocator
90  * @ctx: The validation context
91  * @size: The number of bytes to allocated.
92  *
93  * The memory allocated may not exceed PAGE_SIZE, and the returned
94  * address is aligned to sizeof(long). All memory allocated this way is
95  * reclaimed after validation when calling any of the exported functions:
96  * vmw_validation_unref_lists()
97  * vmw_validation_revert()
98  * vmw_validation_done()
99  *
100  * Return: Pointer to the allocated memory on success. NULL on failure.
101  */
vmw_validation_mem_alloc(struct vmw_validation_context * ctx,unsigned int size)102 void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
103 			       unsigned int size)
104 {
105 	void *addr;
106 
107 	size = vmw_validation_align(size);
108 	if (size > PAGE_SIZE)
109 		return NULL;
110 
111 	if (ctx->mem_size_left < size) {
112 		struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
113 		if (!page)
114 			return NULL;
115 
116 		list_add_tail(&page->lru, &ctx->page_list);
117 		ctx->page_address = page_address(page);
118 		ctx->mem_size_left = PAGE_SIZE;
119 	}
120 
121 	addr = (void *) (ctx->page_address + (PAGE_SIZE - ctx->mem_size_left));
122 	ctx->mem_size_left -= size;
123 
124 	return addr;
125 }
126 
127 /**
128  * vmw_validation_mem_free - Free all memory allocated using
129  * vmw_validation_mem_alloc()
130  * @ctx: The validation context
131  *
132  * All memory previously allocated for this context using
133  * vmw_validation_mem_alloc() is freed.
134  */
vmw_validation_mem_free(struct vmw_validation_context * ctx)135 static void vmw_validation_mem_free(struct vmw_validation_context *ctx)
136 {
137 	struct page *entry, *next;
138 
139 	list_for_each_entry_safe(entry, next, &ctx->page_list, lru) {
140 		list_del_init(&entry->lru);
141 		__free_page(entry);
142 	}
143 
144 	ctx->mem_size_left = 0;
145 }
146 
147 /**
148  * vmw_validation_find_bo_dup - Find a duplicate buffer object entry in the
149  * validation context's lists.
150  * @ctx: The validation context to search.
151  * @vbo: The buffer object to search for.
152  *
153  * Return: Pointer to the struct vmw_validation_bo_node referencing the
154  * duplicate, or NULL if none found.
155  */
156 static struct vmw_validation_bo_node *
vmw_validation_find_bo_dup(struct vmw_validation_context * ctx,struct vmw_bo * vbo)157 vmw_validation_find_bo_dup(struct vmw_validation_context *ctx,
158 			   struct vmw_bo *vbo)
159 {
160 	struct  vmw_validation_bo_node *bo_node = NULL;
161 
162 	if (!ctx->merge_dups)
163 		return NULL;
164 
165 	if (ctx->sw_context) {
166 		struct vmwgfx_hash_item *hash;
167 		unsigned long key = (unsigned long) vbo;
168 
169 		hash_for_each_possible_rcu(ctx->sw_context->res_ht, hash, head, key) {
170 			if (hash->key == key) {
171 				bo_node = container_of(hash, typeof(*bo_node), hash);
172 				break;
173 			}
174 		}
175 	} else {
176 		struct  vmw_validation_bo_node *entry;
177 
178 		list_for_each_entry(entry, &ctx->bo_list, base.head) {
179 			if (entry->base.bo == &vbo->tbo) {
180 				bo_node = entry;
181 				break;
182 			}
183 		}
184 	}
185 
186 	return bo_node;
187 }
188 
189 /**
190  * vmw_validation_find_res_dup - Find a duplicate resource entry in the
191  * validation context's lists.
192  * @ctx: The validation context to search.
193  * @res: Reference counted resource pointer.
194  *
195  * Return: Pointer to the struct vmw_validation_bo_node referencing the
196  * duplicate, or NULL if none found.
197  */
198 static struct vmw_validation_res_node *
vmw_validation_find_res_dup(struct vmw_validation_context * ctx,struct vmw_resource * res)199 vmw_validation_find_res_dup(struct vmw_validation_context *ctx,
200 			    struct vmw_resource *res)
201 {
202 	struct  vmw_validation_res_node *res_node = NULL;
203 
204 	if (!ctx->merge_dups)
205 		return NULL;
206 
207 	if (ctx->sw_context) {
208 		struct vmwgfx_hash_item *hash;
209 		unsigned long key = (unsigned long) res;
210 
211 		hash_for_each_possible_rcu(ctx->sw_context->res_ht, hash, head, key) {
212 			if (hash->key == key) {
213 				res_node = container_of(hash, typeof(*res_node), hash);
214 				break;
215 			}
216 		}
217 	} else {
218 		struct  vmw_validation_res_node *entry;
219 
220 		list_for_each_entry(entry, &ctx->resource_ctx_list, head) {
221 			if (entry->res == res) {
222 				res_node = entry;
223 				goto out;
224 			}
225 		}
226 
227 		list_for_each_entry(entry, &ctx->resource_list, head) {
228 			if (entry->res == res) {
229 				res_node = entry;
230 				break;
231 			}
232 		}
233 
234 	}
235 out:
236 	return res_node;
237 }
238 
239 /**
240  * vmw_validation_add_bo - Add a buffer object to the validation context.
241  * @ctx: The validation context.
242  * @vbo: The buffer object.
243  *
244  * Return: Zero on success, negative error code otherwise.
245  */
vmw_validation_add_bo(struct vmw_validation_context * ctx,struct vmw_bo * vbo)246 int vmw_validation_add_bo(struct vmw_validation_context *ctx,
247 			  struct vmw_bo *vbo)
248 {
249 	struct vmw_validation_bo_node *bo_node;
250 
251 	bo_node = vmw_validation_find_bo_dup(ctx, vbo);
252 	if (!bo_node) {
253 		struct ttm_validate_buffer *val_buf;
254 
255 		bo_node = vmw_validation_mem_alloc(ctx, sizeof(*bo_node));
256 		if (!bo_node)
257 			return -ENOMEM;
258 
259 		if (ctx->sw_context) {
260 			bo_node->hash.key = (unsigned long) vbo;
261 			hash_add_rcu(ctx->sw_context->res_ht, &bo_node->hash.head,
262 				bo_node->hash.key);
263 		}
264 		val_buf = &bo_node->base;
265 		vmw_bo_reference(vbo);
266 		val_buf->bo = &vbo->tbo;
267 		val_buf->num_shared = 0;
268 		list_add_tail(&val_buf->head, &ctx->bo_list);
269 	}
270 
271 	return 0;
272 }
273 
274 /**
275  * vmw_validation_add_resource - Add a resource to the validation context.
276  * @ctx: The validation context.
277  * @res: The resource.
278  * @priv_size: Size of private, additional metadata.
279  * @dirty: Whether to change dirty status.
280  * @p_node: Output pointer of additional metadata address.
281  * @first_usage: Whether this was the first time this resource was seen.
282  *
283  * Return: Zero on success, negative error code otherwise.
284  */
vmw_validation_add_resource(struct vmw_validation_context * ctx,struct vmw_resource * res,size_t priv_size,u32 dirty,void ** p_node,bool * first_usage)285 int vmw_validation_add_resource(struct vmw_validation_context *ctx,
286 				struct vmw_resource *res,
287 				size_t priv_size,
288 				u32 dirty,
289 				void **p_node,
290 				bool *first_usage)
291 {
292 	struct vmw_validation_res_node *node;
293 
294 	node = vmw_validation_find_res_dup(ctx, res);
295 	if (node) {
296 		node->first_usage = 0;
297 		goto out_fill;
298 	}
299 
300 	node = vmw_validation_mem_alloc(ctx, sizeof(*node) + priv_size);
301 	if (!node) {
302 		VMW_DEBUG_USER("Failed to allocate a resource validation entry.\n");
303 		return -ENOMEM;
304 	}
305 
306 	if (ctx->sw_context) {
307 		node->hash.key = (unsigned long) res;
308 		hash_add_rcu(ctx->sw_context->res_ht, &node->hash.head, node->hash.key);
309 	}
310 	node->res = vmw_resource_reference_unless_doomed(res);
311 	if (!node->res) {
312 		hash_del_rcu(&node->hash.head);
313 		return -ESRCH;
314 	}
315 
316 	node->first_usage = 1;
317 	if (!res->dev_priv->has_mob) {
318 		list_add_tail(&node->head, &ctx->resource_list);
319 	} else {
320 		switch (vmw_res_type(res)) {
321 		case vmw_res_context:
322 		case vmw_res_dx_context:
323 			list_add(&node->head, &ctx->resource_ctx_list);
324 			break;
325 		case vmw_res_cotable:
326 			list_add_tail(&node->head, &ctx->resource_ctx_list);
327 			break;
328 		default:
329 			list_add_tail(&node->head, &ctx->resource_list);
330 			break;
331 		}
332 	}
333 
334 out_fill:
335 	if (dirty) {
336 		node->dirty_set = 1;
337 		/* Overwriting previous information here is intentional! */
338 		node->dirty = (dirty & VMW_RES_DIRTY_SET) ? 1 : 0;
339 	}
340 	if (first_usage)
341 		*first_usage = node->first_usage;
342 	if (p_node)
343 		*p_node = &node->private;
344 
345 	return 0;
346 }
347 
348 /**
349  * vmw_validation_res_set_dirty - Register a resource dirty set or clear during
350  * validation.
351  * @ctx: The validation context.
352  * @val_private: The additional meta-data pointer returned when the
353  * resource was registered with the validation context. Used to identify
354  * the resource.
355  * @dirty: Dirty information VMW_RES_DIRTY_XX
356  */
vmw_validation_res_set_dirty(struct vmw_validation_context * ctx,void * val_private,u32 dirty)357 void vmw_validation_res_set_dirty(struct vmw_validation_context *ctx,
358 				  void *val_private, u32 dirty)
359 {
360 	struct vmw_validation_res_node *val;
361 
362 	if (!dirty)
363 		return;
364 
365 	val = container_of(val_private, typeof(*val), private);
366 	val->dirty_set = 1;
367 	/* Overwriting previous information here is intentional! */
368 	val->dirty = (dirty & VMW_RES_DIRTY_SET) ? 1 : 0;
369 }
370 
371 /**
372  * vmw_validation_res_switch_backup - Register a backup MOB switch during
373  * validation.
374  * @ctx: The validation context.
375  * @val_private: The additional meta-data pointer returned when the
376  * resource was registered with the validation context. Used to identify
377  * the resource.
378  * @vbo: The new backup buffer object MOB. This buffer object needs to have
379  * already been registered with the validation context.
380  * @guest_memory_offset: Offset into the new backup MOB.
381  */
vmw_validation_res_switch_backup(struct vmw_validation_context * ctx,void * val_private,struct vmw_bo * vbo,unsigned long guest_memory_offset)382 void vmw_validation_res_switch_backup(struct vmw_validation_context *ctx,
383 				      void *val_private,
384 				      struct vmw_bo *vbo,
385 				      unsigned long guest_memory_offset)
386 {
387 	struct vmw_validation_res_node *val;
388 
389 	val = container_of(val_private, typeof(*val), private);
390 
391 	val->switching_guest_memory_bo = 1;
392 	if (val->first_usage)
393 		val->no_buffer_needed = 1;
394 
395 	val->new_guest_memory_bo = vbo;
396 	val->new_guest_memory_offset = guest_memory_offset;
397 }
398 
399 /**
400  * vmw_validation_res_reserve - Reserve all resources registered with this
401  * validation context.
402  * @ctx: The validation context.
403  * @intr: Use interruptible waits when possible.
404  *
405  * Return: Zero on success, -ERESTARTSYS if interrupted. Negative error
406  * code on failure.
407  */
vmw_validation_res_reserve(struct vmw_validation_context * ctx,bool intr)408 int vmw_validation_res_reserve(struct vmw_validation_context *ctx,
409 			       bool intr)
410 {
411 	struct vmw_validation_res_node *val;
412 	int ret = 0;
413 
414 	list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
415 
416 	list_for_each_entry(val, &ctx->resource_list, head) {
417 		struct vmw_resource *res = val->res;
418 
419 		ret = vmw_resource_reserve(res, intr, val->no_buffer_needed);
420 		if (ret)
421 			goto out_unreserve;
422 
423 		val->reserved = 1;
424 		if (res->guest_memory_bo) {
425 			struct vmw_bo *vbo = res->guest_memory_bo;
426 
427 			vmw_bo_placement_set(vbo,
428 					     res->func->domain,
429 					     res->func->busy_domain);
430 			ret = vmw_validation_add_bo(ctx, vbo);
431 			if (ret)
432 				goto out_unreserve;
433 		}
434 
435 		if (val->switching_guest_memory_bo && val->new_guest_memory_bo &&
436 		    res->coherent) {
437 			struct vmw_validation_bo_node *bo_node =
438 				vmw_validation_find_bo_dup(ctx,
439 							   val->new_guest_memory_bo);
440 
441 			if (WARN_ON(!bo_node)) {
442 				ret = -EINVAL;
443 				goto out_unreserve;
444 			}
445 			bo_node->coherent_count++;
446 		}
447 	}
448 
449 	return 0;
450 
451 out_unreserve:
452 	vmw_validation_res_unreserve(ctx, true);
453 	return ret;
454 }
455 
456 /**
457  * vmw_validation_res_unreserve - Unreserve all reserved resources
458  * registered with this validation context.
459  * @ctx: The validation context.
460  * @backoff: Whether this is a backoff- of a commit-type operation. This
461  * is used to determine whether to switch backup MOBs or not.
462  */
vmw_validation_res_unreserve(struct vmw_validation_context * ctx,bool backoff)463 void vmw_validation_res_unreserve(struct vmw_validation_context *ctx,
464 				 bool backoff)
465 {
466 	struct vmw_validation_res_node *val;
467 
468 	list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
469 	if (backoff)
470 		list_for_each_entry(val, &ctx->resource_list, head) {
471 			if (val->reserved)
472 				vmw_resource_unreserve(val->res,
473 						       false, false, false,
474 						       NULL, 0);
475 		}
476 	else
477 		list_for_each_entry(val, &ctx->resource_list, head) {
478 			if (val->reserved)
479 				vmw_resource_unreserve(val->res,
480 						       val->dirty_set,
481 						       val->dirty,
482 						       val->switching_guest_memory_bo,
483 						       val->new_guest_memory_bo,
484 						       val->new_guest_memory_offset);
485 		}
486 }
487 
488 /**
489  * vmw_validation_bo_validate_single - Validate a single buffer object.
490  * @bo: The TTM buffer object base.
491  * @interruptible: Whether to perform waits interruptible if possible.
492  *
493  * Return: Zero on success, -ERESTARTSYS if interrupted. Negative error
494  * code on failure.
495  */
vmw_validation_bo_validate_single(struct ttm_buffer_object * bo,bool interruptible)496 static int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
497 					     bool interruptible)
498 {
499 	struct vmw_bo *vbo = to_vmw_bo(&bo->base);
500 	struct ttm_operation_ctx ctx = {
501 		.interruptible = interruptible,
502 		.no_wait_gpu = false
503 	};
504 	int ret;
505 
506 	if (atomic_read(&vbo->cpu_writers))
507 		return -EBUSY;
508 
509 	if (vbo->tbo.pin_count > 0)
510 		return 0;
511 
512 	ret = ttm_bo_validate(bo, &vbo->placement, &ctx);
513 	if (ret == 0 || ret == -ERESTARTSYS)
514 		return ret;
515 
516 	/*
517 	 * If that failed, try again, this time evicting
518 	 * previous contents.
519 	 */
520 	ctx.allow_res_evict = true;
521 
522 	return ttm_bo_validate(bo, &vbo->placement, &ctx);
523 }
524 
525 /**
526  * vmw_validation_bo_validate - Validate all buffer objects registered with
527  * the validation context.
528  * @ctx: The validation context.
529  * @intr: Whether to perform waits interruptible if possible.
530  *
531  * Return: Zero on success, -ERESTARTSYS if interrupted,
532  * negative error code on failure.
533  */
vmw_validation_bo_validate(struct vmw_validation_context * ctx,bool intr)534 int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr)
535 {
536 	struct vmw_validation_bo_node *entry;
537 	int ret;
538 
539 	list_for_each_entry(entry, &ctx->bo_list, base.head) {
540 		struct vmw_bo *vbo = to_vmw_bo(&entry->base.bo->base);
541 
542 		ret = vmw_validation_bo_validate_single(entry->base.bo, intr);
543 
544 		if (ret)
545 			return ret;
546 
547 		/*
548 		 * Rather than having the resource code allocating the bo
549 		 * dirty tracker in resource_unreserve() where we can't fail,
550 		 * Do it here when validating the buffer object.
551 		 */
552 		if (entry->coherent_count) {
553 			unsigned int coherent_count = entry->coherent_count;
554 
555 			while (coherent_count) {
556 				ret = vmw_bo_dirty_add(vbo);
557 				if (ret)
558 					return ret;
559 
560 				coherent_count--;
561 			}
562 			entry->coherent_count -= coherent_count;
563 		}
564 
565 		if (vbo->dirty)
566 			vmw_bo_dirty_scan(vbo);
567 	}
568 	return 0;
569 }
570 
571 /**
572  * vmw_validation_res_validate - Validate all resources registered with the
573  * validation context.
574  * @ctx: The validation context.
575  * @intr: Whether to perform waits interruptible if possible.
576  *
577  * Before this function is called, all resource backup buffers must have
578  * been validated.
579  *
580  * Return: Zero on success, -ERESTARTSYS if interrupted,
581  * negative error code on failure.
582  */
vmw_validation_res_validate(struct vmw_validation_context * ctx,bool intr)583 int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr)
584 {
585 	struct vmw_validation_res_node *val;
586 	int ret;
587 
588 	list_for_each_entry(val, &ctx->resource_list, head) {
589 		struct vmw_resource *res = val->res;
590 		struct vmw_bo *backup = res->guest_memory_bo;
591 
592 		ret = vmw_resource_validate(res, intr, val->dirty_set &&
593 					    val->dirty);
594 		if (ret) {
595 			if (ret != -ERESTARTSYS)
596 				DRM_ERROR("Failed to validate resource.\n");
597 			return ret;
598 		}
599 
600 		/* Check if the resource switched backup buffer */
601 		if (backup && res->guest_memory_bo && backup != res->guest_memory_bo) {
602 			struct vmw_bo *vbo = res->guest_memory_bo;
603 
604 			vmw_bo_placement_set(vbo, res->func->domain,
605 					     res->func->busy_domain);
606 			ret = vmw_validation_add_bo(ctx, vbo);
607 			if (ret)
608 				return ret;
609 		}
610 	}
611 	return 0;
612 }
613 
614 /**
615  * vmw_validation_drop_ht - Reset the hash table used for duplicate finding
616  * and unregister it from this validation context.
617  * @ctx: The validation context.
618  *
619  * The hash table used for duplicate finding is an expensive resource and
620  * may be protected by mutexes that may cause deadlocks during resource
621  * unreferencing if held. After resource- and buffer object registering,
622  * there is no longer any use for this hash table, so allow freeing it
623  * either to shorten any mutex locking time, or before resources- and
624  * buffer objects are freed during validation context cleanup.
625  */
vmw_validation_drop_ht(struct vmw_validation_context * ctx)626 void vmw_validation_drop_ht(struct vmw_validation_context *ctx)
627 {
628 	struct vmw_validation_bo_node *entry;
629 	struct vmw_validation_res_node *val;
630 
631 	if (!ctx->sw_context)
632 		return;
633 
634 	list_for_each_entry(entry, &ctx->bo_list, base.head)
635 		hash_del_rcu(&entry->hash.head);
636 
637 	list_for_each_entry(val, &ctx->resource_list, head)
638 		hash_del_rcu(&val->hash.head);
639 
640 	list_for_each_entry(val, &ctx->resource_ctx_list, head)
641 		hash_del_rcu(&val->hash.head);
642 
643 	ctx->sw_context = NULL;
644 }
645 
646 /**
647  * vmw_validation_unref_lists - Unregister previously registered buffer
648  * object and resources.
649  * @ctx: The validation context.
650  *
651  * Note that this function may cause buffer object- and resource destructors
652  * to be invoked.
653  */
vmw_validation_unref_lists(struct vmw_validation_context * ctx)654 void vmw_validation_unref_lists(struct vmw_validation_context *ctx)
655 {
656 	struct vmw_validation_bo_node *entry;
657 	struct vmw_validation_res_node *val;
658 
659 	list_for_each_entry(entry, &ctx->bo_list, base.head) {
660 		drm_gem_object_put(&entry->base.bo->base);
661 		entry->base.bo = NULL;
662 	}
663 
664 	list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
665 	list_for_each_entry(val, &ctx->resource_list, head)
666 		vmw_resource_unreference(&val->res);
667 
668 	/*
669 	 * No need to detach each list entry since they are all freed with
670 	 * vmw_validation_free_mem. Just make the inaccessible.
671 	 */
672 	INIT_LIST_HEAD(&ctx->bo_list);
673 	INIT_LIST_HEAD(&ctx->resource_list);
674 
675 	vmw_validation_mem_free(ctx);
676 }
677 
678 /**
679  * vmw_validation_prepare - Prepare a validation context for command
680  * submission.
681  * @ctx: The validation context.
682  * @mutex: The mutex used to protect resource reservation.
683  * @intr: Whether to perform waits interruptible if possible.
684  *
685  * Note that the single reservation mutex @mutex is an unfortunate
686  * construct. Ideally resource reservation should be moved to per-resource
687  * ww_mutexes.
688  * If this functions doesn't return Zero to indicate success, all resources
689  * are left unreserved but still referenced.
690  * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
691  * on error.
692  */
vmw_validation_prepare(struct vmw_validation_context * ctx,struct mutex * mutex,bool intr)693 int vmw_validation_prepare(struct vmw_validation_context *ctx,
694 			   struct mutex *mutex,
695 			   bool intr)
696 {
697 	int ret = 0;
698 
699 	if (mutex) {
700 		if (intr)
701 			ret = mutex_lock_interruptible(mutex);
702 		else
703 			mutex_lock(mutex);
704 		if (ret)
705 			return -ERESTARTSYS;
706 	}
707 
708 	ctx->res_mutex = mutex;
709 	ret = vmw_validation_res_reserve(ctx, intr);
710 	if (ret)
711 		goto out_no_res_reserve;
712 
713 	ret = vmw_validation_bo_reserve(ctx, intr);
714 	if (ret)
715 		goto out_no_bo_reserve;
716 
717 	ret = vmw_validation_bo_validate(ctx, intr);
718 	if (ret)
719 		goto out_no_validate;
720 
721 	ret = vmw_validation_res_validate(ctx, intr);
722 	if (ret)
723 		goto out_no_validate;
724 
725 	return 0;
726 
727 out_no_validate:
728 	vmw_validation_bo_backoff(ctx);
729 out_no_bo_reserve:
730 	vmw_validation_res_unreserve(ctx, true);
731 out_no_res_reserve:
732 	if (mutex)
733 		mutex_unlock(mutex);
734 
735 	return ret;
736 }
737 
738 /**
739  * vmw_validation_revert - Revert validation actions if command submission
740  * failed.
741  *
742  * @ctx: The validation context.
743  *
744  * The caller still needs to unref resources after a call to this function.
745  */
vmw_validation_revert(struct vmw_validation_context * ctx)746 void vmw_validation_revert(struct vmw_validation_context *ctx)
747 {
748 	vmw_validation_bo_backoff(ctx);
749 	vmw_validation_res_unreserve(ctx, true);
750 	if (ctx->res_mutex)
751 		mutex_unlock(ctx->res_mutex);
752 	vmw_validation_unref_lists(ctx);
753 }
754 
755 /**
756  * vmw_validation_done - Commit validation actions after command submission
757  * success.
758  * @ctx: The validation context.
759  * @fence: Fence with which to fence all buffer objects taking part in the
760  * command submission.
761  *
762  * The caller does NOT need to unref resources after a call to this function.
763  */
vmw_validation_done(struct vmw_validation_context * ctx,struct vmw_fence_obj * fence)764 void vmw_validation_done(struct vmw_validation_context *ctx,
765 			 struct vmw_fence_obj *fence)
766 {
767 	vmw_validation_bo_fence(ctx, fence);
768 	vmw_validation_res_unreserve(ctx, false);
769 	if (ctx->res_mutex)
770 		mutex_unlock(ctx->res_mutex);
771 	vmw_validation_unref_lists(ctx);
772 }
773 
774 /**
775  * vmw_validation_preload_bo - Preload the validation memory allocator for a
776  * call to vmw_validation_add_bo().
777  * @ctx: Pointer to the validation context.
778  *
779  * Iff this function returns successfully, the next call to
780  * vmw_validation_add_bo() is guaranteed not to sleep. An error is not fatal
781  * but voids the guarantee.
782  *
783  * Returns: Zero if successful, %-EINVAL otherwise.
784  */
vmw_validation_preload_bo(struct vmw_validation_context * ctx)785 int vmw_validation_preload_bo(struct vmw_validation_context *ctx)
786 {
787 	unsigned int size = sizeof(struct vmw_validation_bo_node);
788 
789 	if (!vmw_validation_mem_alloc(ctx, size))
790 		return -ENOMEM;
791 
792 	ctx->mem_size_left += size;
793 	return 0;
794 }
795 
796 /**
797  * vmw_validation_preload_res - Preload the validation memory allocator for a
798  * call to vmw_validation_add_res().
799  * @ctx: Pointer to the validation context.
800  * @size: Size of the validation node extra data. See below.
801  *
802  * Iff this function returns successfully, the next call to
803  * vmw_validation_add_res() with the same or smaller @size is guaranteed not to
804  * sleep. An error is not fatal but voids the guarantee.
805  *
806  * Returns: Zero if successful, %-EINVAL otherwise.
807  */
vmw_validation_preload_res(struct vmw_validation_context * ctx,unsigned int size)808 int vmw_validation_preload_res(struct vmw_validation_context *ctx,
809 			       unsigned int size)
810 {
811 	size = vmw_validation_align(sizeof(struct vmw_validation_res_node) +
812 				    size) +
813 		vmw_validation_align(sizeof(struct vmw_validation_bo_node));
814 	if (!vmw_validation_mem_alloc(ctx, size))
815 		return -ENOMEM;
816 
817 	ctx->mem_size_left += size;
818 	return 0;
819 }
820 
821 /**
822  * vmw_validation_bo_backoff - Unreserve buffer objects registered with a
823  * validation context
824  * @ctx: The validation context
825  *
826  * This function unreserves the buffer objects previously reserved using
827  * vmw_validation_bo_reserve. It's typically used as part of an error path
828  */
vmw_validation_bo_backoff(struct vmw_validation_context * ctx)829 void vmw_validation_bo_backoff(struct vmw_validation_context *ctx)
830 {
831 	struct vmw_validation_bo_node *entry;
832 
833 	/*
834 	 * Switching coherent resource backup buffers failed.
835 	 * Release corresponding buffer object dirty trackers.
836 	 */
837 	list_for_each_entry(entry, &ctx->bo_list, base.head) {
838 		if (entry->coherent_count) {
839 			unsigned int coherent_count = entry->coherent_count;
840 			struct vmw_bo *vbo = to_vmw_bo(&entry->base.bo->base);
841 
842 			while (coherent_count--)
843 				vmw_bo_dirty_release(vbo);
844 		}
845 	}
846 
847 	ttm_eu_backoff_reservation(&ctx->ticket, &ctx->bo_list);
848 }
849