xref: /freebsd/sys/dev/drm2/ttm/ttm_memory.c (revision 685dc743dc3b5645e34836464128e1c0558b404b)
1*592ffb21SWarner Losh /**************************************************************************
2*592ffb21SWarner Losh  *
3*592ffb21SWarner Losh  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4*592ffb21SWarner Losh  * All Rights Reserved.
5*592ffb21SWarner Losh  *
6*592ffb21SWarner Losh  * Permission is hereby granted, free of charge, to any person obtaining a
7*592ffb21SWarner Losh  * copy of this software and associated documentation files (the
8*592ffb21SWarner Losh  * "Software"), to deal in the Software without restriction, including
9*592ffb21SWarner Losh  * without limitation the rights to use, copy, modify, merge, publish,
10*592ffb21SWarner Losh  * distribute, sub license, and/or sell copies of the Software, and to
11*592ffb21SWarner Losh  * permit persons to whom the Software is furnished to do so, subject to
12*592ffb21SWarner Losh  * the following conditions:
13*592ffb21SWarner Losh  *
14*592ffb21SWarner Losh  * The above copyright notice and this permission notice (including the
15*592ffb21SWarner Losh  * next paragraph) shall be included in all copies or substantial portions
16*592ffb21SWarner Losh  * of the Software.
17*592ffb21SWarner Losh  *
18*592ffb21SWarner Losh  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19*592ffb21SWarner Losh  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20*592ffb21SWarner Losh  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21*592ffb21SWarner Losh  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22*592ffb21SWarner Losh  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23*592ffb21SWarner Losh  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24*592ffb21SWarner Losh  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25*592ffb21SWarner Losh  *
26*592ffb21SWarner Losh  **************************************************************************/
27*592ffb21SWarner Losh 
28*592ffb21SWarner Losh #include <sys/cdefs.h>
29*592ffb21SWarner Losh #include <dev/drm2/drmP.h>
30*592ffb21SWarner Losh #include <dev/drm2/ttm/ttm_memory.h>
31*592ffb21SWarner Losh #include <dev/drm2/ttm/ttm_module.h>
32*592ffb21SWarner Losh #include <dev/drm2/ttm/ttm_page_alloc.h>
33*592ffb21SWarner Losh 
34*592ffb21SWarner Losh #define TTM_MEMORY_ALLOC_RETRIES 4
35*592ffb21SWarner Losh 
36*592ffb21SWarner Losh struct ttm_mem_zone {
37*592ffb21SWarner Losh 	u_int kobj_ref;
38*592ffb21SWarner Losh 	struct ttm_mem_global *glob;
39*592ffb21SWarner Losh 	const char *name;
40*592ffb21SWarner Losh 	uint64_t zone_mem;
41*592ffb21SWarner Losh 	uint64_t emer_mem;
42*592ffb21SWarner Losh 	uint64_t max_mem;
43*592ffb21SWarner Losh 	uint64_t swap_limit;
44*592ffb21SWarner Losh 	uint64_t used_mem;
45*592ffb21SWarner Losh };
46*592ffb21SWarner Losh 
47*592ffb21SWarner Losh MALLOC_DEFINE(M_TTM_ZONE, "ttm_zone", "TTM Zone");
48*592ffb21SWarner Losh 
ttm_mem_zone_kobj_release(struct ttm_mem_zone * zone)49*592ffb21SWarner Losh static void ttm_mem_zone_kobj_release(struct ttm_mem_zone *zone)
50*592ffb21SWarner Losh {
51*592ffb21SWarner Losh 
52*592ffb21SWarner Losh 	printf("[TTM] Zone %7s: Used memory at exit: %llu kiB\n",
53*592ffb21SWarner Losh 		zone->name, (unsigned long long)zone->used_mem >> 10);
54*592ffb21SWarner Losh 	free(zone, M_TTM_ZONE);
55*592ffb21SWarner Losh }
56*592ffb21SWarner Losh 
57*592ffb21SWarner Losh #if 0
58*592ffb21SWarner Losh /* XXXKIB sysctl */
59*592ffb21SWarner Losh static ssize_t ttm_mem_zone_show(struct ttm_mem_zone *zone;
60*592ffb21SWarner Losh 				 struct attribute *attr,
61*592ffb21SWarner Losh 				 char *buffer)
62*592ffb21SWarner Losh {
63*592ffb21SWarner Losh 	uint64_t val = 0;
64*592ffb21SWarner Losh 
65*592ffb21SWarner Losh 	mtx_lock(&zone->glob->lock);
66*592ffb21SWarner Losh 	if (attr == &ttm_mem_sys)
67*592ffb21SWarner Losh 		val = zone->zone_mem;
68*592ffb21SWarner Losh 	else if (attr == &ttm_mem_emer)
69*592ffb21SWarner Losh 		val = zone->emer_mem;
70*592ffb21SWarner Losh 	else if (attr == &ttm_mem_max)
71*592ffb21SWarner Losh 		val = zone->max_mem;
72*592ffb21SWarner Losh 	else if (attr == &ttm_mem_swap)
73*592ffb21SWarner Losh 		val = zone->swap_limit;
74*592ffb21SWarner Losh 	else if (attr == &ttm_mem_used)
75*592ffb21SWarner Losh 		val = zone->used_mem;
76*592ffb21SWarner Losh 	mtx_unlock(&zone->glob->lock);
77*592ffb21SWarner Losh 
78*592ffb21SWarner Losh 	return snprintf(buffer, PAGE_SIZE, "%llu\n",
79*592ffb21SWarner Losh 			(unsigned long long) val >> 10);
80*592ffb21SWarner Losh }
81*592ffb21SWarner Losh #endif
82*592ffb21SWarner Losh 
83*592ffb21SWarner Losh static void ttm_check_swapping(struct ttm_mem_global *glob);
84*592ffb21SWarner Losh 
85*592ffb21SWarner Losh #if 0
86*592ffb21SWarner Losh /* XXXKIB sysctl */
87*592ffb21SWarner Losh static ssize_t ttm_mem_zone_store(struct ttm_mem_zone *zone,
88*592ffb21SWarner Losh 				  struct attribute *attr,
89*592ffb21SWarner Losh 				  const char *buffer,
90*592ffb21SWarner Losh 				  size_t size)
91*592ffb21SWarner Losh {
92*592ffb21SWarner Losh 	int chars;
93*592ffb21SWarner Losh 	unsigned long val;
94*592ffb21SWarner Losh 	uint64_t val64;
95*592ffb21SWarner Losh 
96*592ffb21SWarner Losh 	chars = sscanf(buffer, "%lu", &val);
97*592ffb21SWarner Losh 	if (chars == 0)
98*592ffb21SWarner Losh 		return size;
99*592ffb21SWarner Losh 
100*592ffb21SWarner Losh 	val64 = val;
101*592ffb21SWarner Losh 	val64 <<= 10;
102*592ffb21SWarner Losh 
103*592ffb21SWarner Losh 	mtx_lock(&zone->glob->lock);
104*592ffb21SWarner Losh 	if (val64 > zone->zone_mem)
105*592ffb21SWarner Losh 		val64 = zone->zone_mem;
106*592ffb21SWarner Losh 	if (attr == &ttm_mem_emer) {
107*592ffb21SWarner Losh 		zone->emer_mem = val64;
108*592ffb21SWarner Losh 		if (zone->max_mem > val64)
109*592ffb21SWarner Losh 			zone->max_mem = val64;
110*592ffb21SWarner Losh 	} else if (attr == &ttm_mem_max) {
111*592ffb21SWarner Losh 		zone->max_mem = val64;
112*592ffb21SWarner Losh 		if (zone->emer_mem < val64)
113*592ffb21SWarner Losh 			zone->emer_mem = val64;
114*592ffb21SWarner Losh 	} else if (attr == &ttm_mem_swap)
115*592ffb21SWarner Losh 		zone->swap_limit = val64;
116*592ffb21SWarner Losh 	mtx_unlock(&zone->glob->lock);
117*592ffb21SWarner Losh 
118*592ffb21SWarner Losh 	ttm_check_swapping(zone->glob);
119*592ffb21SWarner Losh 
120*592ffb21SWarner Losh 	return size;
121*592ffb21SWarner Losh }
122*592ffb21SWarner Losh #endif
123*592ffb21SWarner Losh 
ttm_mem_global_kobj_release(struct ttm_mem_global * glob)124*592ffb21SWarner Losh static void ttm_mem_global_kobj_release(struct ttm_mem_global *glob)
125*592ffb21SWarner Losh {
126*592ffb21SWarner Losh }
127*592ffb21SWarner Losh 
ttm_zones_above_swap_target(struct ttm_mem_global * glob,bool from_wq,uint64_t extra)128*592ffb21SWarner Losh static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
129*592ffb21SWarner Losh 					bool from_wq, uint64_t extra)
130*592ffb21SWarner Losh {
131*592ffb21SWarner Losh 	unsigned int i;
132*592ffb21SWarner Losh 	struct ttm_mem_zone *zone;
133*592ffb21SWarner Losh 	uint64_t target;
134*592ffb21SWarner Losh 
135*592ffb21SWarner Losh 	for (i = 0; i < glob->num_zones; ++i) {
136*592ffb21SWarner Losh 		zone = glob->zones[i];
137*592ffb21SWarner Losh 
138*592ffb21SWarner Losh 		if (from_wq)
139*592ffb21SWarner Losh 			target = zone->swap_limit;
140*592ffb21SWarner Losh 		else if (priv_check(curthread, PRIV_VM_MLOCK) == 0)
141*592ffb21SWarner Losh 			target = zone->emer_mem;
142*592ffb21SWarner Losh 		else
143*592ffb21SWarner Losh 			target = zone->max_mem;
144*592ffb21SWarner Losh 
145*592ffb21SWarner Losh 		target = (extra > target) ? 0ULL : target;
146*592ffb21SWarner Losh 
147*592ffb21SWarner Losh 		if (zone->used_mem > target)
148*592ffb21SWarner Losh 			return true;
149*592ffb21SWarner Losh 	}
150*592ffb21SWarner Losh 	return false;
151*592ffb21SWarner Losh }
152*592ffb21SWarner Losh 
153*592ffb21SWarner Losh /**
154*592ffb21SWarner Losh  * At this point we only support a single shrink callback.
155*592ffb21SWarner Losh  * Extend this if needed, perhaps using a linked list of callbacks.
156*592ffb21SWarner Losh  * Note that this function is reentrant:
157*592ffb21SWarner Losh  * many threads may try to swap out at any given time.
158*592ffb21SWarner Losh  */
159*592ffb21SWarner Losh 
ttm_shrink(struct ttm_mem_global * glob,bool from_wq,uint64_t extra)160*592ffb21SWarner Losh static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
161*592ffb21SWarner Losh 		       uint64_t extra)
162*592ffb21SWarner Losh {
163*592ffb21SWarner Losh 	int ret;
164*592ffb21SWarner Losh 	struct ttm_mem_shrink *shrink;
165*592ffb21SWarner Losh 
166*592ffb21SWarner Losh 	mtx_lock(&glob->lock);
167*592ffb21SWarner Losh 	if (glob->shrink == NULL)
168*592ffb21SWarner Losh 		goto out;
169*592ffb21SWarner Losh 
170*592ffb21SWarner Losh 	while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
171*592ffb21SWarner Losh 		shrink = glob->shrink;
172*592ffb21SWarner Losh 		mtx_unlock(&glob->lock);
173*592ffb21SWarner Losh 		ret = shrink->do_shrink(shrink);
174*592ffb21SWarner Losh 		mtx_lock(&glob->lock);
175*592ffb21SWarner Losh 		if (unlikely(ret != 0))
176*592ffb21SWarner Losh 			goto out;
177*592ffb21SWarner Losh 	}
178*592ffb21SWarner Losh out:
179*592ffb21SWarner Losh 	mtx_unlock(&glob->lock);
180*592ffb21SWarner Losh }
181*592ffb21SWarner Losh 
182*592ffb21SWarner Losh 
183*592ffb21SWarner Losh 
ttm_shrink_work(void * arg,int pending __unused)184*592ffb21SWarner Losh static void ttm_shrink_work(void *arg, int pending __unused)
185*592ffb21SWarner Losh {
186*592ffb21SWarner Losh 	struct ttm_mem_global *glob = arg;
187*592ffb21SWarner Losh 
188*592ffb21SWarner Losh 	ttm_shrink(glob, true, 0ULL);
189*592ffb21SWarner Losh }
190*592ffb21SWarner Losh 
ttm_mem_init_kernel_zone(struct ttm_mem_global * glob,uint64_t mem)191*592ffb21SWarner Losh static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
192*592ffb21SWarner Losh     uint64_t mem)
193*592ffb21SWarner Losh {
194*592ffb21SWarner Losh 	struct ttm_mem_zone *zone;
195*592ffb21SWarner Losh 
196*592ffb21SWarner Losh 	zone = malloc(sizeof(*zone), M_TTM_ZONE, M_WAITOK | M_ZERO);
197*592ffb21SWarner Losh 
198*592ffb21SWarner Losh 	zone->name = "kernel";
199*592ffb21SWarner Losh 	zone->zone_mem = mem;
200*592ffb21SWarner Losh 	zone->max_mem = mem >> 1;
201*592ffb21SWarner Losh 	zone->emer_mem = (mem >> 1) + (mem >> 2);
202*592ffb21SWarner Losh 	zone->swap_limit = zone->max_mem - (mem >> 3);
203*592ffb21SWarner Losh 	zone->used_mem = 0;
204*592ffb21SWarner Losh 	zone->glob = glob;
205*592ffb21SWarner Losh 	glob->zone_kernel = zone;
206*592ffb21SWarner Losh 	refcount_init(&zone->kobj_ref, 1);
207*592ffb21SWarner Losh 	glob->zones[glob->num_zones++] = zone;
208*592ffb21SWarner Losh 	return 0;
209*592ffb21SWarner Losh }
210*592ffb21SWarner Losh 
ttm_mem_init_dma32_zone(struct ttm_mem_global * glob,uint64_t mem)211*592ffb21SWarner Losh static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
212*592ffb21SWarner Losh     uint64_t mem)
213*592ffb21SWarner Losh {
214*592ffb21SWarner Losh 	struct ttm_mem_zone *zone;
215*592ffb21SWarner Losh 
216*592ffb21SWarner Losh 	zone = malloc(sizeof(*zone), M_TTM_ZONE, M_WAITOK | M_ZERO);
217*592ffb21SWarner Losh 
218*592ffb21SWarner Losh 	/**
219*592ffb21SWarner Losh 	 * No special dma32 zone needed.
220*592ffb21SWarner Losh 	 */
221*592ffb21SWarner Losh 
222*592ffb21SWarner Losh 	if (mem <= ((uint64_t) 1ULL << 32)) {
223*592ffb21SWarner Losh 		free(zone, M_TTM_ZONE);
224*592ffb21SWarner Losh 		return 0;
225*592ffb21SWarner Losh 	}
226*592ffb21SWarner Losh 
227*592ffb21SWarner Losh 	/*
228*592ffb21SWarner Losh 	 * Limit max dma32 memory to 4GB for now
229*592ffb21SWarner Losh 	 * until we can figure out how big this
230*592ffb21SWarner Losh 	 * zone really is.
231*592ffb21SWarner Losh 	 */
232*592ffb21SWarner Losh 
233*592ffb21SWarner Losh 	mem = ((uint64_t) 1ULL << 32);
234*592ffb21SWarner Losh 	zone->name = "dma32";
235*592ffb21SWarner Losh 	zone->zone_mem = mem;
236*592ffb21SWarner Losh 	zone->max_mem = mem >> 1;
237*592ffb21SWarner Losh 	zone->emer_mem = (mem >> 1) + (mem >> 2);
238*592ffb21SWarner Losh 	zone->swap_limit = zone->max_mem - (mem >> 3);
239*592ffb21SWarner Losh 	zone->used_mem = 0;
240*592ffb21SWarner Losh 	zone->glob = glob;
241*592ffb21SWarner Losh 	glob->zone_dma32 = zone;
242*592ffb21SWarner Losh 	refcount_init(&zone->kobj_ref, 1);
243*592ffb21SWarner Losh 	glob->zones[glob->num_zones++] = zone;
244*592ffb21SWarner Losh 	return 0;
245*592ffb21SWarner Losh }
246*592ffb21SWarner Losh 
ttm_mem_global_init(struct ttm_mem_global * glob)247*592ffb21SWarner Losh int ttm_mem_global_init(struct ttm_mem_global *glob)
248*592ffb21SWarner Losh {
249*592ffb21SWarner Losh 	u_int64_t mem;
250*592ffb21SWarner Losh 	int ret;
251*592ffb21SWarner Losh 	int i;
252*592ffb21SWarner Losh 	struct ttm_mem_zone *zone;
253*592ffb21SWarner Losh 
254*592ffb21SWarner Losh 	mtx_init(&glob->lock, "ttmgz", NULL, MTX_DEF);
255*592ffb21SWarner Losh 	glob->swap_queue = taskqueue_create("ttm_swap", M_WAITOK,
256*592ffb21SWarner Losh 	    taskqueue_thread_enqueue, &glob->swap_queue);
257*592ffb21SWarner Losh 	taskqueue_start_threads(&glob->swap_queue, 1, PVM, "ttm swap");
258*592ffb21SWarner Losh 	TASK_INIT(&glob->work, 0, ttm_shrink_work, glob);
259*592ffb21SWarner Losh 
260*592ffb21SWarner Losh 	refcount_init(&glob->kobj_ref, 1);
261*592ffb21SWarner Losh 
262*592ffb21SWarner Losh 	mem = physmem * PAGE_SIZE;
263*592ffb21SWarner Losh 
264*592ffb21SWarner Losh 	ret = ttm_mem_init_kernel_zone(glob, mem);
265*592ffb21SWarner Losh 	if (unlikely(ret != 0))
266*592ffb21SWarner Losh 		goto out_no_zone;
267*592ffb21SWarner Losh 	ret = ttm_mem_init_dma32_zone(glob, mem);
268*592ffb21SWarner Losh 	if (unlikely(ret != 0))
269*592ffb21SWarner Losh 		goto out_no_zone;
270*592ffb21SWarner Losh 	for (i = 0; i < glob->num_zones; ++i) {
271*592ffb21SWarner Losh 		zone = glob->zones[i];
272*592ffb21SWarner Losh 		printf("[TTM] Zone %7s: Available graphics memory: %llu kiB\n",
273*592ffb21SWarner Losh 			zone->name, (unsigned long long)zone->max_mem >> 10);
274*592ffb21SWarner Losh 	}
275*592ffb21SWarner Losh 	ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
276*592ffb21SWarner Losh 	ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
277*592ffb21SWarner Losh 	return 0;
278*592ffb21SWarner Losh out_no_zone:
279*592ffb21SWarner Losh 	ttm_mem_global_release(glob);
280*592ffb21SWarner Losh 	return ret;
281*592ffb21SWarner Losh }
282*592ffb21SWarner Losh 
ttm_mem_global_release(struct ttm_mem_global * glob)283*592ffb21SWarner Losh void ttm_mem_global_release(struct ttm_mem_global *glob)
284*592ffb21SWarner Losh {
285*592ffb21SWarner Losh 	unsigned int i;
286*592ffb21SWarner Losh 	struct ttm_mem_zone *zone;
287*592ffb21SWarner Losh 
288*592ffb21SWarner Losh 	/* let the page allocator first stop the shrink work. */
289*592ffb21SWarner Losh 	ttm_page_alloc_fini();
290*592ffb21SWarner Losh 	ttm_dma_page_alloc_fini();
291*592ffb21SWarner Losh 
292*592ffb21SWarner Losh 	taskqueue_drain(glob->swap_queue, &glob->work);
293*592ffb21SWarner Losh 	taskqueue_free(glob->swap_queue);
294*592ffb21SWarner Losh 	glob->swap_queue = NULL;
295*592ffb21SWarner Losh 	for (i = 0; i < glob->num_zones; ++i) {
296*592ffb21SWarner Losh 		zone = glob->zones[i];
297*592ffb21SWarner Losh 		if (refcount_release(&zone->kobj_ref))
298*592ffb21SWarner Losh 			ttm_mem_zone_kobj_release(zone);
299*592ffb21SWarner Losh 	}
300*592ffb21SWarner Losh 	if (refcount_release(&glob->kobj_ref))
301*592ffb21SWarner Losh 		ttm_mem_global_kobj_release(glob);
302*592ffb21SWarner Losh }
303*592ffb21SWarner Losh 
ttm_check_swapping(struct ttm_mem_global * glob)304*592ffb21SWarner Losh static void ttm_check_swapping(struct ttm_mem_global *glob)
305*592ffb21SWarner Losh {
306*592ffb21SWarner Losh 	bool needs_swapping = false;
307*592ffb21SWarner Losh 	unsigned int i;
308*592ffb21SWarner Losh 	struct ttm_mem_zone *zone;
309*592ffb21SWarner Losh 
310*592ffb21SWarner Losh 	mtx_lock(&glob->lock);
311*592ffb21SWarner Losh 	for (i = 0; i < glob->num_zones; ++i) {
312*592ffb21SWarner Losh 		zone = glob->zones[i];
313*592ffb21SWarner Losh 		if (zone->used_mem > zone->swap_limit) {
314*592ffb21SWarner Losh 			needs_swapping = true;
315*592ffb21SWarner Losh 			break;
316*592ffb21SWarner Losh 		}
317*592ffb21SWarner Losh 	}
318*592ffb21SWarner Losh 
319*592ffb21SWarner Losh 	mtx_unlock(&glob->lock);
320*592ffb21SWarner Losh 
321*592ffb21SWarner Losh 	if (unlikely(needs_swapping))
322*592ffb21SWarner Losh 		taskqueue_enqueue(glob->swap_queue, &glob->work);
323*592ffb21SWarner Losh 
324*592ffb21SWarner Losh }
325*592ffb21SWarner Losh 
ttm_mem_global_free_zone(struct ttm_mem_global * glob,struct ttm_mem_zone * single_zone,uint64_t amount)326*592ffb21SWarner Losh static void ttm_mem_global_free_zone(struct ttm_mem_global *glob,
327*592ffb21SWarner Losh 				     struct ttm_mem_zone *single_zone,
328*592ffb21SWarner Losh 				     uint64_t amount)
329*592ffb21SWarner Losh {
330*592ffb21SWarner Losh 	unsigned int i;
331*592ffb21SWarner Losh 	struct ttm_mem_zone *zone;
332*592ffb21SWarner Losh 
333*592ffb21SWarner Losh 	mtx_lock(&glob->lock);
334*592ffb21SWarner Losh 	for (i = 0; i < glob->num_zones; ++i) {
335*592ffb21SWarner Losh 		zone = glob->zones[i];
336*592ffb21SWarner Losh 		if (single_zone && zone != single_zone)
337*592ffb21SWarner Losh 			continue;
338*592ffb21SWarner Losh 		zone->used_mem -= amount;
339*592ffb21SWarner Losh 	}
340*592ffb21SWarner Losh 	mtx_unlock(&glob->lock);
341*592ffb21SWarner Losh }
342*592ffb21SWarner Losh 
ttm_mem_global_free(struct ttm_mem_global * glob,uint64_t amount)343*592ffb21SWarner Losh void ttm_mem_global_free(struct ttm_mem_global *glob,
344*592ffb21SWarner Losh 			 uint64_t amount)
345*592ffb21SWarner Losh {
346*592ffb21SWarner Losh 	return ttm_mem_global_free_zone(glob, NULL, amount);
347*592ffb21SWarner Losh }
348*592ffb21SWarner Losh 
ttm_mem_global_reserve(struct ttm_mem_global * glob,struct ttm_mem_zone * single_zone,uint64_t amount,bool reserve)349*592ffb21SWarner Losh static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
350*592ffb21SWarner Losh 				  struct ttm_mem_zone *single_zone,
351*592ffb21SWarner Losh 				  uint64_t amount, bool reserve)
352*592ffb21SWarner Losh {
353*592ffb21SWarner Losh 	uint64_t limit;
354*592ffb21SWarner Losh 	int ret = -ENOMEM;
355*592ffb21SWarner Losh 	unsigned int i;
356*592ffb21SWarner Losh 	struct ttm_mem_zone *zone;
357*592ffb21SWarner Losh 
358*592ffb21SWarner Losh 	mtx_lock(&glob->lock);
359*592ffb21SWarner Losh 	for (i = 0; i < glob->num_zones; ++i) {
360*592ffb21SWarner Losh 		zone = glob->zones[i];
361*592ffb21SWarner Losh 		if (single_zone && zone != single_zone)
362*592ffb21SWarner Losh 			continue;
363*592ffb21SWarner Losh 
364*592ffb21SWarner Losh 		limit = (priv_check(curthread, PRIV_VM_MLOCK) == 0) ?
365*592ffb21SWarner Losh 			zone->emer_mem : zone->max_mem;
366*592ffb21SWarner Losh 
367*592ffb21SWarner Losh 		if (zone->used_mem > limit)
368*592ffb21SWarner Losh 			goto out_unlock;
369*592ffb21SWarner Losh 	}
370*592ffb21SWarner Losh 
371*592ffb21SWarner Losh 	if (reserve) {
372*592ffb21SWarner Losh 		for (i = 0; i < glob->num_zones; ++i) {
373*592ffb21SWarner Losh 			zone = glob->zones[i];
374*592ffb21SWarner Losh 			if (single_zone && zone != single_zone)
375*592ffb21SWarner Losh 				continue;
376*592ffb21SWarner Losh 			zone->used_mem += amount;
377*592ffb21SWarner Losh 		}
378*592ffb21SWarner Losh 	}
379*592ffb21SWarner Losh 
380*592ffb21SWarner Losh 	ret = 0;
381*592ffb21SWarner Losh out_unlock:
382*592ffb21SWarner Losh 	mtx_unlock(&glob->lock);
383*592ffb21SWarner Losh 	ttm_check_swapping(glob);
384*592ffb21SWarner Losh 
385*592ffb21SWarner Losh 	return ret;
386*592ffb21SWarner Losh }
387*592ffb21SWarner Losh 
388*592ffb21SWarner Losh 
ttm_mem_global_alloc_zone(struct ttm_mem_global * glob,struct ttm_mem_zone * single_zone,uint64_t memory,bool no_wait,bool interruptible)389*592ffb21SWarner Losh static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob,
390*592ffb21SWarner Losh 				     struct ttm_mem_zone *single_zone,
391*592ffb21SWarner Losh 				     uint64_t memory,
392*592ffb21SWarner Losh 				     bool no_wait, bool interruptible)
393*592ffb21SWarner Losh {
394*592ffb21SWarner Losh 	int count = TTM_MEMORY_ALLOC_RETRIES;
395*592ffb21SWarner Losh 
396*592ffb21SWarner Losh 	while (unlikely(ttm_mem_global_reserve(glob,
397*592ffb21SWarner Losh 					       single_zone,
398*592ffb21SWarner Losh 					       memory, true)
399*592ffb21SWarner Losh 			!= 0)) {
400*592ffb21SWarner Losh 		if (no_wait)
401*592ffb21SWarner Losh 			return -ENOMEM;
402*592ffb21SWarner Losh 		if (unlikely(count-- == 0))
403*592ffb21SWarner Losh 			return -ENOMEM;
404*592ffb21SWarner Losh 		ttm_shrink(glob, false, memory + (memory >> 2) + 16);
405*592ffb21SWarner Losh 	}
406*592ffb21SWarner Losh 
407*592ffb21SWarner Losh 	return 0;
408*592ffb21SWarner Losh }
409*592ffb21SWarner Losh 
ttm_mem_global_alloc(struct ttm_mem_global * glob,uint64_t memory,bool no_wait,bool interruptible)410*592ffb21SWarner Losh int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
411*592ffb21SWarner Losh 			 bool no_wait, bool interruptible)
412*592ffb21SWarner Losh {
413*592ffb21SWarner Losh 	/**
414*592ffb21SWarner Losh 	 * Normal allocations of kernel memory are registered in
415*592ffb21SWarner Losh 	 * all zones.
416*592ffb21SWarner Losh 	 */
417*592ffb21SWarner Losh 
418*592ffb21SWarner Losh 	return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait,
419*592ffb21SWarner Losh 					 interruptible);
420*592ffb21SWarner Losh }
421*592ffb21SWarner Losh 
422*592ffb21SWarner Losh #define page_to_pfn(pp) OFF_TO_IDX(VM_PAGE_TO_PHYS(pp))
423*592ffb21SWarner Losh 
ttm_mem_global_alloc_page(struct ttm_mem_global * glob,struct vm_page * page,bool no_wait,bool interruptible)424*592ffb21SWarner Losh int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
425*592ffb21SWarner Losh 			      struct vm_page *page,
426*592ffb21SWarner Losh 			      bool no_wait, bool interruptible)
427*592ffb21SWarner Losh {
428*592ffb21SWarner Losh 
429*592ffb21SWarner Losh 	struct ttm_mem_zone *zone = NULL;
430*592ffb21SWarner Losh 
431*592ffb21SWarner Losh 	/**
432*592ffb21SWarner Losh 	 * Page allocations may be registed in a single zone
433*592ffb21SWarner Losh 	 * only if highmem or !dma32.
434*592ffb21SWarner Losh 	 */
435*592ffb21SWarner Losh 
436*592ffb21SWarner Losh 	if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
437*592ffb21SWarner Losh 		zone = glob->zone_kernel;
438*592ffb21SWarner Losh 	return ttm_mem_global_alloc_zone(glob, zone, PAGE_SIZE, no_wait,
439*592ffb21SWarner Losh 					 interruptible);
440*592ffb21SWarner Losh }
441*592ffb21SWarner Losh 
ttm_mem_global_free_page(struct ttm_mem_global * glob,struct vm_page * page)442*592ffb21SWarner Losh void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct vm_page *page)
443*592ffb21SWarner Losh {
444*592ffb21SWarner Losh 	struct ttm_mem_zone *zone = NULL;
445*592ffb21SWarner Losh 
446*592ffb21SWarner Losh 	if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
447*592ffb21SWarner Losh 		zone = glob->zone_kernel;
448*592ffb21SWarner Losh 	ttm_mem_global_free_zone(glob, zone, PAGE_SIZE);
449*592ffb21SWarner Losh }
450*592ffb21SWarner Losh 
451*592ffb21SWarner Losh 
ttm_round_pot(size_t size)452*592ffb21SWarner Losh size_t ttm_round_pot(size_t size)
453*592ffb21SWarner Losh {
454*592ffb21SWarner Losh 	if ((size & (size - 1)) == 0)
455*592ffb21SWarner Losh 		return size;
456*592ffb21SWarner Losh 	else if (size > PAGE_SIZE)
457*592ffb21SWarner Losh 		return PAGE_ALIGN(size);
458*592ffb21SWarner Losh 	else {
459*592ffb21SWarner Losh 		size_t tmp_size = 4;
460*592ffb21SWarner Losh 
461*592ffb21SWarner Losh 		while (tmp_size < size)
462*592ffb21SWarner Losh 			tmp_size <<= 1;
463*592ffb21SWarner Losh 
464*592ffb21SWarner Losh 		return tmp_size;
465*592ffb21SWarner Losh 	}
466*592ffb21SWarner Losh 	return 0;
467*592ffb21SWarner Losh }
468