xref: /linux/drivers/gpu/drm/nouveau/nouveau_gem.c (revision 95e9fd10f06cb5642028b6b851e32b8c8afb4571)
1 /*
2  * Copyright (C) 2008 Ben Skeggs.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  *
25  */
26 #include <linux/dma-buf.h>
27 #include "drmP.h"
28 #include "drm.h"
29 
30 #include "nouveau_drv.h"
31 #include "nouveau_drm.h"
32 #include "nouveau_dma.h"
33 #include "nouveau_fence.h"
34 
35 #define nouveau_gem_pushbuf_sync(chan) 0
36 
37 int
38 nouveau_gem_object_new(struct drm_gem_object *gem)
39 {
40 	return 0;
41 }
42 
43 void
44 nouveau_gem_object_del(struct drm_gem_object *gem)
45 {
46 	struct nouveau_bo *nvbo = gem->driver_private;
47 	struct ttm_buffer_object *bo = &nvbo->bo;
48 
49 	if (!nvbo)
50 		return;
51 	nvbo->gem = NULL;
52 
53 	if (unlikely(nvbo->pin_refcnt)) {
54 		nvbo->pin_refcnt = 1;
55 		nouveau_bo_unpin(nvbo);
56 	}
57 
58 	if (gem->import_attach)
59 		drm_prime_gem_destroy(gem, nvbo->bo.sg);
60 
61 	ttm_bo_unref(&bo);
62 
63 	drm_gem_object_release(gem);
64 	kfree(gem);
65 }
66 
67 int
68 nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
69 {
70 	struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
71 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
72 	struct nouveau_vma *vma;
73 	int ret;
74 
75 	if (!fpriv->vm)
76 		return 0;
77 
78 	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
79 	if (ret)
80 		return ret;
81 
82 	vma = nouveau_bo_vma_find(nvbo, fpriv->vm);
83 	if (!vma) {
84 		vma = kzalloc(sizeof(*vma), GFP_KERNEL);
85 		if (!vma) {
86 			ret = -ENOMEM;
87 			goto out;
88 		}
89 
90 		ret = nouveau_bo_vma_add(nvbo, fpriv->vm, vma);
91 		if (ret) {
92 			kfree(vma);
93 			goto out;
94 		}
95 	} else {
96 		vma->refcount++;
97 	}
98 
99 out:
100 	ttm_bo_unreserve(&nvbo->bo);
101 	return ret;
102 }
103 
104 void
105 nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
106 {
107 	struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
108 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
109 	struct nouveau_vma *vma;
110 	int ret;
111 
112 	if (!fpriv->vm)
113 		return;
114 
115 	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
116 	if (ret)
117 		return;
118 
119 	vma = nouveau_bo_vma_find(nvbo, fpriv->vm);
120 	if (vma) {
121 		if (--vma->refcount == 0) {
122 			nouveau_bo_vma_del(nvbo, vma);
123 			kfree(vma);
124 		}
125 	}
126 	ttm_bo_unreserve(&nvbo->bo);
127 }
128 
129 int
130 nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
131 		uint32_t tile_mode, uint32_t tile_flags,
132 		struct nouveau_bo **pnvbo)
133 {
134 	struct drm_nouveau_private *dev_priv = dev->dev_private;
135 	struct nouveau_bo *nvbo;
136 	u32 flags = 0;
137 	int ret;
138 
139 	if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
140 		flags |= TTM_PL_FLAG_VRAM;
141 	if (domain & NOUVEAU_GEM_DOMAIN_GART)
142 		flags |= TTM_PL_FLAG_TT;
143 	if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
144 		flags |= TTM_PL_FLAG_SYSTEM;
145 
146 	ret = nouveau_bo_new(dev, size, align, flags, tile_mode,
147 			     tile_flags, NULL, pnvbo);
148 	if (ret)
149 		return ret;
150 	nvbo = *pnvbo;
151 
152 	/* we restrict allowed domains on nv50+ to only the types
153 	 * that were requested at creation time.  not possibly on
154 	 * earlier chips without busting the ABI.
155 	 */
156 	nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
157 			      NOUVEAU_GEM_DOMAIN_GART;
158 	if (dev_priv->card_type >= NV_50)
159 		nvbo->valid_domains &= domain;
160 
161 	nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
162 	if (!nvbo->gem) {
163 		nouveau_bo_ref(NULL, pnvbo);
164 		return -ENOMEM;
165 	}
166 
167 	nvbo->bo.persistent_swap_storage = nvbo->gem->filp;
168 	nvbo->gem->driver_private = nvbo;
169 	return 0;
170 }
171 
172 static int
173 nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
174 		 struct drm_nouveau_gem_info *rep)
175 {
176 	struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
177 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
178 	struct nouveau_vma *vma;
179 
180 	if (nvbo->bo.mem.mem_type == TTM_PL_TT)
181 		rep->domain = NOUVEAU_GEM_DOMAIN_GART;
182 	else
183 		rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
184 
185 	rep->offset = nvbo->bo.offset;
186 	if (fpriv->vm) {
187 		vma = nouveau_bo_vma_find(nvbo, fpriv->vm);
188 		if (!vma)
189 			return -EINVAL;
190 
191 		rep->offset = vma->offset;
192 	}
193 
194 	rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
195 	rep->map_handle = nvbo->bo.addr_space_offset;
196 	rep->tile_mode = nvbo->tile_mode;
197 	rep->tile_flags = nvbo->tile_flags;
198 	return 0;
199 }
200 
201 int
202 nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
203 		      struct drm_file *file_priv)
204 {
205 	struct drm_nouveau_private *dev_priv = dev->dev_private;
206 	struct drm_nouveau_gem_new *req = data;
207 	struct nouveau_bo *nvbo = NULL;
208 	int ret = 0;
209 
210 	dev_priv->ttm.bdev.dev_mapping = dev->dev_mapping;
211 
212 	if (!dev_priv->engine.vram.flags_valid(dev, req->info.tile_flags)) {
213 		NV_ERROR(dev, "bad page flags: 0x%08x\n", req->info.tile_flags);
214 		return -EINVAL;
215 	}
216 
217 	ret = nouveau_gem_new(dev, req->info.size, req->align,
218 			      req->info.domain, req->info.tile_mode,
219 			      req->info.tile_flags, &nvbo);
220 	if (ret)
221 		return ret;
222 
223 	ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
224 	if (ret == 0) {
225 		ret = nouveau_gem_info(file_priv, nvbo->gem, &req->info);
226 		if (ret)
227 			drm_gem_handle_delete(file_priv, req->info.handle);
228 	}
229 
230 	/* drop reference from allocate - handle holds it now */
231 	drm_gem_object_unreference_unlocked(nvbo->gem);
232 	return ret;
233 }
234 
235 static int
236 nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
237 		       uint32_t write_domains, uint32_t valid_domains)
238 {
239 	struct nouveau_bo *nvbo = gem->driver_private;
240 	struct ttm_buffer_object *bo = &nvbo->bo;
241 	uint32_t domains = valid_domains & nvbo->valid_domains &
242 		(write_domains ? write_domains : read_domains);
243 	uint32_t pref_flags = 0, valid_flags = 0;
244 
245 	if (!domains)
246 		return -EINVAL;
247 
248 	if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
249 		valid_flags |= TTM_PL_FLAG_VRAM;
250 
251 	if (valid_domains & NOUVEAU_GEM_DOMAIN_GART)
252 		valid_flags |= TTM_PL_FLAG_TT;
253 
254 	if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
255 	    bo->mem.mem_type == TTM_PL_VRAM)
256 		pref_flags |= TTM_PL_FLAG_VRAM;
257 
258 	else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
259 		 bo->mem.mem_type == TTM_PL_TT)
260 		pref_flags |= TTM_PL_FLAG_TT;
261 
262 	else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
263 		pref_flags |= TTM_PL_FLAG_VRAM;
264 
265 	else
266 		pref_flags |= TTM_PL_FLAG_TT;
267 
268 	nouveau_bo_placement_set(nvbo, pref_flags, valid_flags);
269 
270 	return 0;
271 }
272 
273 struct validate_op {
274 	struct list_head vram_list;
275 	struct list_head gart_list;
276 	struct list_head both_list;
277 };
278 
279 static void
280 validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
281 {
282 	struct list_head *entry, *tmp;
283 	struct nouveau_bo *nvbo;
284 
285 	list_for_each_safe(entry, tmp, list) {
286 		nvbo = list_entry(entry, struct nouveau_bo, entry);
287 
288 		nouveau_bo_fence(nvbo, fence);
289 
290 		if (unlikely(nvbo->validate_mapped)) {
291 			ttm_bo_kunmap(&nvbo->kmap);
292 			nvbo->validate_mapped = false;
293 		}
294 
295 		list_del(&nvbo->entry);
296 		nvbo->reserved_by = NULL;
297 		ttm_bo_unreserve(&nvbo->bo);
298 		drm_gem_object_unreference_unlocked(nvbo->gem);
299 	}
300 }
301 
302 static void
303 validate_fini(struct validate_op *op, struct nouveau_fence* fence)
304 {
305 	validate_fini_list(&op->vram_list, fence);
306 	validate_fini_list(&op->gart_list, fence);
307 	validate_fini_list(&op->both_list, fence);
308 }
309 
310 static int
311 validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
312 	      struct drm_nouveau_gem_pushbuf_bo *pbbo,
313 	      int nr_buffers, struct validate_op *op)
314 {
315 	struct drm_device *dev = chan->dev;
316 	struct drm_nouveau_private *dev_priv = dev->dev_private;
317 	uint32_t sequence;
318 	int trycnt = 0;
319 	int ret, i;
320 
321 	sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
322 retry:
323 	if (++trycnt > 100000) {
324 		NV_ERROR(dev, "%s failed and gave up.\n", __func__);
325 		return -EINVAL;
326 	}
327 
328 	for (i = 0; i < nr_buffers; i++) {
329 		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
330 		struct drm_gem_object *gem;
331 		struct nouveau_bo *nvbo;
332 
333 		gem = drm_gem_object_lookup(dev, file_priv, b->handle);
334 		if (!gem) {
335 			NV_ERROR(dev, "Unknown handle 0x%08x\n", b->handle);
336 			validate_fini(op, NULL);
337 			return -ENOENT;
338 		}
339 		nvbo = gem->driver_private;
340 
341 		if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
342 			NV_ERROR(dev, "multiple instances of buffer %d on "
343 				      "validation list\n", b->handle);
344 			drm_gem_object_unreference_unlocked(gem);
345 			validate_fini(op, NULL);
346 			return -EINVAL;
347 		}
348 
349 		ret = ttm_bo_reserve(&nvbo->bo, true, false, true, sequence);
350 		if (ret) {
351 			validate_fini(op, NULL);
352 			if (unlikely(ret == -EAGAIN))
353 				ret = ttm_bo_wait_unreserved(&nvbo->bo, true);
354 			drm_gem_object_unreference_unlocked(gem);
355 			if (unlikely(ret)) {
356 				if (ret != -ERESTARTSYS)
357 					NV_ERROR(dev, "fail reserve\n");
358 				return ret;
359 			}
360 			goto retry;
361 		}
362 
363 		b->user_priv = (uint64_t)(unsigned long)nvbo;
364 		nvbo->reserved_by = file_priv;
365 		nvbo->pbbo_index = i;
366 		if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
367 		    (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
368 			list_add_tail(&nvbo->entry, &op->both_list);
369 		else
370 		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
371 			list_add_tail(&nvbo->entry, &op->vram_list);
372 		else
373 		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
374 			list_add_tail(&nvbo->entry, &op->gart_list);
375 		else {
376 			NV_ERROR(dev, "invalid valid domains: 0x%08x\n",
377 				 b->valid_domains);
378 			list_add_tail(&nvbo->entry, &op->both_list);
379 			validate_fini(op, NULL);
380 			return -EINVAL;
381 		}
382 	}
383 
384 	return 0;
385 }
386 
387 static int
388 validate_sync(struct nouveau_channel *chan, struct nouveau_bo *nvbo)
389 {
390 	struct nouveau_fence *fence = NULL;
391 	int ret = 0;
392 
393 	spin_lock(&nvbo->bo.bdev->fence_lock);
394 	if (nvbo->bo.sync_obj)
395 		fence = nouveau_fence_ref(nvbo->bo.sync_obj);
396 	spin_unlock(&nvbo->bo.bdev->fence_lock);
397 
398 	if (fence) {
399 		ret = nouveau_fence_sync(fence, chan);
400 		nouveau_fence_unref(&fence);
401 	}
402 
403 	return ret;
404 }
405 
406 static int
407 validate_list(struct nouveau_channel *chan, struct list_head *list,
408 	      struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr)
409 {
410 	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
411 	struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
412 				(void __force __user *)(uintptr_t)user_pbbo_ptr;
413 	struct drm_device *dev = chan->dev;
414 	struct nouveau_bo *nvbo;
415 	int ret, relocs = 0;
416 
417 	list_for_each_entry(nvbo, list, entry) {
418 		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
419 
420 		ret = validate_sync(chan, nvbo);
421 		if (unlikely(ret)) {
422 			NV_ERROR(dev, "fail pre-validate sync\n");
423 			return ret;
424 		}
425 
426 		ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
427 					     b->write_domains,
428 					     b->valid_domains);
429 		if (unlikely(ret)) {
430 			NV_ERROR(dev, "fail set_domain\n");
431 			return ret;
432 		}
433 
434 		ret = nouveau_bo_validate(nvbo, true, false, false);
435 		if (unlikely(ret)) {
436 			if (ret != -ERESTARTSYS)
437 				NV_ERROR(dev, "fail ttm_validate\n");
438 			return ret;
439 		}
440 
441 		ret = validate_sync(chan, nvbo);
442 		if (unlikely(ret)) {
443 			NV_ERROR(dev, "fail post-validate sync\n");
444 			return ret;
445 		}
446 
447 		if (dev_priv->card_type < NV_50) {
448 			if (nvbo->bo.offset == b->presumed.offset &&
449 			    ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
450 			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
451 			     (nvbo->bo.mem.mem_type == TTM_PL_TT &&
452 			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
453 				continue;
454 
455 			if (nvbo->bo.mem.mem_type == TTM_PL_TT)
456 				b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
457 			else
458 				b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
459 			b->presumed.offset = nvbo->bo.offset;
460 			b->presumed.valid = 0;
461 			relocs++;
462 
463 			if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed,
464 					     &b->presumed, sizeof(b->presumed)))
465 				return -EFAULT;
466 		}
467 	}
468 
469 	return relocs;
470 }
471 
472 static int
473 nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
474 			     struct drm_file *file_priv,
475 			     struct drm_nouveau_gem_pushbuf_bo *pbbo,
476 			     uint64_t user_buffers, int nr_buffers,
477 			     struct validate_op *op, int *apply_relocs)
478 {
479 	struct drm_device *dev = chan->dev;
480 	int ret, relocs = 0;
481 
482 	INIT_LIST_HEAD(&op->vram_list);
483 	INIT_LIST_HEAD(&op->gart_list);
484 	INIT_LIST_HEAD(&op->both_list);
485 
486 	if (nr_buffers == 0)
487 		return 0;
488 
489 	ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
490 	if (unlikely(ret)) {
491 		if (ret != -ERESTARTSYS)
492 			NV_ERROR(dev, "validate_init\n");
493 		return ret;
494 	}
495 
496 	ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
497 	if (unlikely(ret < 0)) {
498 		if (ret != -ERESTARTSYS)
499 			NV_ERROR(dev, "validate vram_list\n");
500 		validate_fini(op, NULL);
501 		return ret;
502 	}
503 	relocs += ret;
504 
505 	ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
506 	if (unlikely(ret < 0)) {
507 		if (ret != -ERESTARTSYS)
508 			NV_ERROR(dev, "validate gart_list\n");
509 		validate_fini(op, NULL);
510 		return ret;
511 	}
512 	relocs += ret;
513 
514 	ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
515 	if (unlikely(ret < 0)) {
516 		if (ret != -ERESTARTSYS)
517 			NV_ERROR(dev, "validate both_list\n");
518 		validate_fini(op, NULL);
519 		return ret;
520 	}
521 	relocs += ret;
522 
523 	*apply_relocs = relocs;
524 	return 0;
525 }
526 
527 static inline void *
528 u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
529 {
530 	void *mem;
531 	void __user *userptr = (void __force __user *)(uintptr_t)user;
532 
533 	mem = kmalloc(nmemb * size, GFP_KERNEL);
534 	if (!mem)
535 		return ERR_PTR(-ENOMEM);
536 
537 	if (DRM_COPY_FROM_USER(mem, userptr, nmemb * size)) {
538 		kfree(mem);
539 		return ERR_PTR(-EFAULT);
540 	}
541 
542 	return mem;
543 }
544 
545 static int
546 nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
547 				struct drm_nouveau_gem_pushbuf *req,
548 				struct drm_nouveau_gem_pushbuf_bo *bo)
549 {
550 	struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
551 	int ret = 0;
552 	unsigned i;
553 
554 	reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
555 	if (IS_ERR(reloc))
556 		return PTR_ERR(reloc);
557 
558 	for (i = 0; i < req->nr_relocs; i++) {
559 		struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
560 		struct drm_nouveau_gem_pushbuf_bo *b;
561 		struct nouveau_bo *nvbo;
562 		uint32_t data;
563 
564 		if (unlikely(r->bo_index > req->nr_buffers)) {
565 			NV_ERROR(dev, "reloc bo index invalid\n");
566 			ret = -EINVAL;
567 			break;
568 		}
569 
570 		b = &bo[r->bo_index];
571 		if (b->presumed.valid)
572 			continue;
573 
574 		if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
575 			NV_ERROR(dev, "reloc container bo index invalid\n");
576 			ret = -EINVAL;
577 			break;
578 		}
579 		nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
580 
581 		if (unlikely(r->reloc_bo_offset + 4 >
582 			     nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
583 			NV_ERROR(dev, "reloc outside of bo\n");
584 			ret = -EINVAL;
585 			break;
586 		}
587 
588 		if (!nvbo->kmap.virtual) {
589 			ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
590 					  &nvbo->kmap);
591 			if (ret) {
592 				NV_ERROR(dev, "failed kmap for reloc\n");
593 				break;
594 			}
595 			nvbo->validate_mapped = true;
596 		}
597 
598 		if (r->flags & NOUVEAU_GEM_RELOC_LOW)
599 			data = b->presumed.offset + r->data;
600 		else
601 		if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
602 			data = (b->presumed.offset + r->data) >> 32;
603 		else
604 			data = r->data;
605 
606 		if (r->flags & NOUVEAU_GEM_RELOC_OR) {
607 			if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
608 				data |= r->tor;
609 			else
610 				data |= r->vor;
611 		}
612 
613 		spin_lock(&nvbo->bo.bdev->fence_lock);
614 		ret = ttm_bo_wait(&nvbo->bo, false, false, false);
615 		spin_unlock(&nvbo->bo.bdev->fence_lock);
616 		if (ret) {
617 			NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret);
618 			break;
619 		}
620 
621 		nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
622 	}
623 
624 	kfree(reloc);
625 	return ret;
626 }
627 
628 int
629 nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
630 			  struct drm_file *file_priv)
631 {
632 	struct drm_nouveau_private *dev_priv = dev->dev_private;
633 	struct drm_nouveau_gem_pushbuf *req = data;
634 	struct drm_nouveau_gem_pushbuf_push *push;
635 	struct drm_nouveau_gem_pushbuf_bo *bo;
636 	struct nouveau_channel *chan;
637 	struct validate_op op;
638 	struct nouveau_fence *fence = NULL;
639 	int i, j, ret = 0, do_reloc = 0;
640 
641 	chan = nouveau_channel_get(file_priv, req->channel);
642 	if (IS_ERR(chan))
643 		return PTR_ERR(chan);
644 
645 	req->vram_available = dev_priv->fb_aper_free;
646 	req->gart_available = dev_priv->gart_info.aper_free;
647 	if (unlikely(req->nr_push == 0))
648 		goto out_next;
649 
650 	if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
651 		NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n",
652 			 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
653 		nouveau_channel_put(&chan);
654 		return -EINVAL;
655 	}
656 
657 	if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
658 		NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n",
659 			 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
660 		nouveau_channel_put(&chan);
661 		return -EINVAL;
662 	}
663 
664 	if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
665 		NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n",
666 			 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
667 		nouveau_channel_put(&chan);
668 		return -EINVAL;
669 	}
670 
671 	push = u_memcpya(req->push, req->nr_push, sizeof(*push));
672 	if (IS_ERR(push)) {
673 		nouveau_channel_put(&chan);
674 		return PTR_ERR(push);
675 	}
676 
677 	bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
678 	if (IS_ERR(bo)) {
679 		kfree(push);
680 		nouveau_channel_put(&chan);
681 		return PTR_ERR(bo);
682 	}
683 
684 	/* Ensure all push buffers are on validate list */
685 	for (i = 0; i < req->nr_push; i++) {
686 		if (push[i].bo_index >= req->nr_buffers) {
687 			NV_ERROR(dev, "push %d buffer not in list\n", i);
688 			ret = -EINVAL;
689 			goto out_prevalid;
690 		}
691 	}
692 
693 	/* Validate buffer list */
694 	ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
695 					   req->nr_buffers, &op, &do_reloc);
696 	if (ret) {
697 		if (ret != -ERESTARTSYS)
698 			NV_ERROR(dev, "validate: %d\n", ret);
699 		goto out_prevalid;
700 	}
701 
702 	/* Apply any relocations that are required */
703 	if (do_reloc) {
704 		ret = nouveau_gem_pushbuf_reloc_apply(dev, req, bo);
705 		if (ret) {
706 			NV_ERROR(dev, "reloc apply: %d\n", ret);
707 			goto out;
708 		}
709 	}
710 
711 	if (chan->dma.ib_max) {
712 		ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
713 		if (ret) {
714 			NV_INFO(dev, "nv50cal_space: %d\n", ret);
715 			goto out;
716 		}
717 
718 		for (i = 0; i < req->nr_push; i++) {
719 			struct nouveau_bo *nvbo = (void *)(unsigned long)
720 				bo[push[i].bo_index].user_priv;
721 
722 			nv50_dma_push(chan, nvbo, push[i].offset,
723 				      push[i].length);
724 		}
725 	} else
726 	if (dev_priv->chipset >= 0x25) {
727 		ret = RING_SPACE(chan, req->nr_push * 2);
728 		if (ret) {
729 			NV_ERROR(dev, "cal_space: %d\n", ret);
730 			goto out;
731 		}
732 
733 		for (i = 0; i < req->nr_push; i++) {
734 			struct nouveau_bo *nvbo = (void *)(unsigned long)
735 				bo[push[i].bo_index].user_priv;
736 			struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
737 
738 			OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
739 					push[i].offset) | 2);
740 			OUT_RING(chan, 0);
741 		}
742 	} else {
743 		ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
744 		if (ret) {
745 			NV_ERROR(dev, "jmp_space: %d\n", ret);
746 			goto out;
747 		}
748 
749 		for (i = 0; i < req->nr_push; i++) {
750 			struct nouveau_bo *nvbo = (void *)(unsigned long)
751 				bo[push[i].bo_index].user_priv;
752 			struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
753 			uint32_t cmd;
754 
755 			cmd = chan->pushbuf_base + ((chan->dma.cur + 2) << 2);
756 			cmd |= 0x20000000;
757 			if (unlikely(cmd != req->suffix0)) {
758 				if (!nvbo->kmap.virtual) {
759 					ret = ttm_bo_kmap(&nvbo->bo, 0,
760 							  nvbo->bo.mem.
761 							  num_pages,
762 							  &nvbo->kmap);
763 					if (ret) {
764 						WIND_RING(chan);
765 						goto out;
766 					}
767 					nvbo->validate_mapped = true;
768 				}
769 
770 				nouveau_bo_wr32(nvbo, (push[i].offset +
771 						push[i].length - 8) / 4, cmd);
772 			}
773 
774 			OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
775 					push[i].offset) | 0x20000000);
776 			OUT_RING(chan, 0);
777 			for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
778 				OUT_RING(chan, 0);
779 		}
780 	}
781 
782 	ret = nouveau_fence_new(chan, &fence);
783 	if (ret) {
784 		NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
785 		WIND_RING(chan);
786 		goto out;
787 	}
788 
789 out:
790 	validate_fini(&op, fence);
791 	nouveau_fence_unref(&fence);
792 
793 out_prevalid:
794 	kfree(bo);
795 	kfree(push);
796 
797 out_next:
798 	if (chan->dma.ib_max) {
799 		req->suffix0 = 0x00000000;
800 		req->suffix1 = 0x00000000;
801 	} else
802 	if (dev_priv->chipset >= 0x25) {
803 		req->suffix0 = 0x00020000;
804 		req->suffix1 = 0x00000000;
805 	} else {
806 		req->suffix0 = 0x20000000 |
807 			      (chan->pushbuf_base + ((chan->dma.cur + 2) << 2));
808 		req->suffix1 = 0x00000000;
809 	}
810 
811 	nouveau_channel_put(&chan);
812 	return ret;
813 }
814 
815 static inline uint32_t
816 domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
817 {
818 	uint32_t flags = 0;
819 
820 	if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
821 		flags |= TTM_PL_FLAG_VRAM;
822 	if (domain & NOUVEAU_GEM_DOMAIN_GART)
823 		flags |= TTM_PL_FLAG_TT;
824 
825 	return flags;
826 }
827 
828 int
829 nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
830 			   struct drm_file *file_priv)
831 {
832 	struct drm_nouveau_gem_cpu_prep *req = data;
833 	struct drm_gem_object *gem;
834 	struct nouveau_bo *nvbo;
835 	bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
836 	int ret = -EINVAL;
837 
838 	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
839 	if (!gem)
840 		return -ENOENT;
841 	nvbo = nouveau_gem_object(gem);
842 
843 	spin_lock(&nvbo->bo.bdev->fence_lock);
844 	ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait);
845 	spin_unlock(&nvbo->bo.bdev->fence_lock);
846 	drm_gem_object_unreference_unlocked(gem);
847 	return ret;
848 }
849 
850 int
851 nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
852 			   struct drm_file *file_priv)
853 {
854 	return 0;
855 }
856 
857 int
858 nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
859 		       struct drm_file *file_priv)
860 {
861 	struct drm_nouveau_gem_info *req = data;
862 	struct drm_gem_object *gem;
863 	int ret;
864 
865 	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
866 	if (!gem)
867 		return -ENOENT;
868 
869 	ret = nouveau_gem_info(file_priv, gem, req);
870 	drm_gem_object_unreference_unlocked(gem);
871 	return ret;
872 }
873 
874