xref: /linux/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c (revision 9f2c9170934eace462499ba0bfe042cc72900173)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2007-2010 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30 
31 #include "vmwgfx_drv.h"
32 #include <drm/ttm/ttm_bo_driver.h>
33 #include <drm/ttm/ttm_placement.h>
34 #include <linux/idr.h>
35 #include <linux/spinlock.h>
36 #include <linux/kernel.h>
37 
38 struct vmwgfx_gmrid_man {
39 	struct ttm_resource_manager manager;
40 	spinlock_t lock;
41 	struct ida gmr_ida;
42 	uint32_t max_gmr_ids;
43 	uint32_t max_gmr_pages;
44 	uint32_t used_gmr_pages;
45 	uint8_t type;
46 };
47 
48 static struct vmwgfx_gmrid_man *to_gmrid_manager(struct ttm_resource_manager *man)
49 {
50 	return container_of(man, struct vmwgfx_gmrid_man, manager);
51 }
52 
53 static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
54 				  struct ttm_buffer_object *bo,
55 				  const struct ttm_place *place,
56 				  struct ttm_resource **res)
57 {
58 	struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);
59 	int id;
60 
61 	*res = kmalloc(sizeof(**res), GFP_KERNEL);
62 	if (!*res)
63 		return -ENOMEM;
64 
65 	ttm_resource_init(bo, place, *res);
66 
67 	id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
68 	if (id < 0)
69 		return id;
70 
71 	spin_lock(&gman->lock);
72 
73 	if (gman->max_gmr_pages > 0) {
74 		gman->used_gmr_pages += PFN_UP((*res)->size);
75 		/*
76 		 * Because the graphics memory is a soft limit we can try to
77 		 * expand it instead of letting the userspace apps crash.
78 		 * We're just going to have a sane limit (half of RAM)
79 		 * on the number of MOB's that we create and will try to keep
80 		 * the system running until we reach that.
81 		 */
82 		if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages)) {
83 			const unsigned long max_graphics_pages = totalram_pages() / 2;
84 			uint32_t new_max_pages = 0;
85 
86 			DRM_WARN("vmwgfx: mob memory overflow. Consider increasing guest RAM and graphicsMemory.\n");
87 			vmw_host_printf("vmwgfx, warning: mob memory overflow. Consider increasing guest RAM and graphicsMemory.\n");
88 
89 			if (gman->max_gmr_pages > (max_graphics_pages / 2)) {
90 				DRM_WARN("vmwgfx: guest requires more than half of RAM for graphics.\n");
91 				new_max_pages = max_graphics_pages;
92 			} else
93 				new_max_pages = gman->max_gmr_pages * 2;
94 			if (new_max_pages > gman->max_gmr_pages && new_max_pages >= gman->used_gmr_pages) {
95 				DRM_WARN("vmwgfx: increasing guest mob limits to %u kB.\n",
96 					 ((new_max_pages) << (PAGE_SHIFT - 10)));
97 
98 				gman->max_gmr_pages = new_max_pages;
99 			} else {
100 				char buf[256];
101 				snprintf(buf, sizeof(buf),
102 					 "vmwgfx, error: guest graphics is out of memory (mob limit at: %ukB).\n",
103 					 ((gman->max_gmr_pages) << (PAGE_SHIFT - 10)));
104 				vmw_host_printf(buf);
105 				DRM_WARN("%s", buf);
106 				goto nospace;
107 			}
108 		}
109 	}
110 
111 	(*res)->start = id;
112 
113 	spin_unlock(&gman->lock);
114 	return 0;
115 
116 nospace:
117 	gman->used_gmr_pages -= PFN_UP((*res)->size);
118 	spin_unlock(&gman->lock);
119 	ida_free(&gman->gmr_ida, id);
120 	ttm_resource_fini(man, *res);
121 	kfree(*res);
122 	return -ENOSPC;
123 }
124 
125 static void vmw_gmrid_man_put_node(struct ttm_resource_manager *man,
126 				   struct ttm_resource *res)
127 {
128 	struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);
129 
130 	ida_free(&gman->gmr_ida, res->start);
131 	spin_lock(&gman->lock);
132 	gman->used_gmr_pages -= PFN_UP(res->size);
133 	spin_unlock(&gman->lock);
134 	ttm_resource_fini(man, res);
135 	kfree(res);
136 }
137 
138 static void vmw_gmrid_man_debug(struct ttm_resource_manager *man,
139 				struct drm_printer *printer)
140 {
141 	struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);
142 
143 	BUG_ON(gman->type != VMW_PL_GMR && gman->type != VMW_PL_MOB);
144 
145 	drm_printf(printer, "%s's used: %u pages, max: %u pages, %u id's\n",
146 		   (gman->type == VMW_PL_MOB) ? "Mob" : "GMR",
147 		   gman->used_gmr_pages, gman->max_gmr_pages, gman->max_gmr_ids);
148 }
149 
150 static const struct ttm_resource_manager_func vmw_gmrid_manager_func;
151 
152 int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type)
153 {
154 	struct ttm_resource_manager *man;
155 	struct vmwgfx_gmrid_man *gman =
156 		kzalloc(sizeof(*gman), GFP_KERNEL);
157 
158 	if (unlikely(!gman))
159 		return -ENOMEM;
160 
161 	man = &gman->manager;
162 
163 	man->func = &vmw_gmrid_manager_func;
164 	man->use_tt = true;
165 	ttm_resource_manager_init(man, &dev_priv->bdev, 0);
166 	spin_lock_init(&gman->lock);
167 	gman->used_gmr_pages = 0;
168 	ida_init(&gman->gmr_ida);
169 	gman->type = type;
170 
171 	switch (type) {
172 	case VMW_PL_GMR:
173 		gman->max_gmr_ids = dev_priv->max_gmr_ids;
174 		gman->max_gmr_pages = dev_priv->max_gmr_pages;
175 		break;
176 	case VMW_PL_MOB:
177 		gman->max_gmr_ids = VMWGFX_NUM_MOB;
178 		gman->max_gmr_pages = dev_priv->max_mob_pages;
179 		break;
180 	default:
181 		BUG();
182 	}
183 	ttm_set_driver_manager(&dev_priv->bdev, type, &gman->manager);
184 	ttm_resource_manager_set_used(man, true);
185 	return 0;
186 }
187 
188 void vmw_gmrid_man_fini(struct vmw_private *dev_priv, int type)
189 {
190 	struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, type);
191 	struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);
192 
193 	ttm_resource_manager_set_used(man, false);
194 
195 	ttm_resource_manager_evict_all(&dev_priv->bdev, man);
196 
197 	ttm_resource_manager_cleanup(man);
198 
199 	ttm_set_driver_manager(&dev_priv->bdev, type, NULL);
200 	ida_destroy(&gman->gmr_ida);
201 	kfree(gman);
202 
203 }
204 
205 static const struct ttm_resource_manager_func vmw_gmrid_manager_func = {
206 	.alloc = vmw_gmrid_man_get_node,
207 	.free = vmw_gmrid_man_put_node,
208 	.debug = vmw_gmrid_man_debug
209 };
210