xref: /linux/drivers/gpu/drm/nouveau/nouveau_gem.c (revision 52990390f91c1c39ca742fc8f390b29891d95127)
1 /*
2  * Copyright (C) 2008 Ben Skeggs.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  *
25  */
26 
27 #include <drm/drm_gem_ttm_helper.h>
28 
29 #include "nouveau_drv.h"
30 #include "nouveau_dma.h"
31 #include "nouveau_fence.h"
32 #include "nouveau_abi16.h"
33 
34 #include "nouveau_ttm.h"
35 #include "nouveau_gem.h"
36 #include "nouveau_mem.h"
37 #include "nouveau_vmm.h"
38 
39 #include <nvif/class.h>
40 #include <nvif/push206e.h>
41 
42 static vm_fault_t nouveau_ttm_fault(struct vm_fault *vmf)
43 {
44 	struct vm_area_struct *vma = vmf->vma;
45 	struct ttm_buffer_object *bo = vma->vm_private_data;
46 	pgprot_t prot;
47 	vm_fault_t ret;
48 
49 	ret = ttm_bo_vm_reserve(bo, vmf);
50 	if (ret)
51 		return ret;
52 
53 	ret = nouveau_ttm_fault_reserve_notify(bo);
54 	if (ret)
55 		goto error_unlock;
56 
57 	nouveau_bo_del_io_reserve_lru(bo);
58 	prot = vm_get_page_prot(vma->vm_flags);
59 	ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT);
60 	nouveau_bo_add_io_reserve_lru(bo);
61 	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
62 		return ret;
63 
64 error_unlock:
65 	dma_resv_unlock(bo->base.resv);
66 	return ret;
67 }
68 
69 static const struct vm_operations_struct nouveau_ttm_vm_ops = {
70 	.fault = nouveau_ttm_fault,
71 	.open = ttm_bo_vm_open,
72 	.close = ttm_bo_vm_close,
73 	.access = ttm_bo_vm_access
74 };
75 
76 void
77 nouveau_gem_object_del(struct drm_gem_object *gem)
78 {
79 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
80 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
81 	struct device *dev = drm->dev->dev;
82 	int ret;
83 
84 	ret = pm_runtime_get_sync(dev);
85 	if (WARN_ON(ret < 0 && ret != -EACCES)) {
86 		pm_runtime_put_autosuspend(dev);
87 		return;
88 	}
89 
90 	if (gem->import_attach)
91 		drm_prime_gem_destroy(gem, nvbo->bo.sg);
92 
93 	ttm_bo_put(&nvbo->bo);
94 
95 	pm_runtime_mark_last_busy(dev);
96 	pm_runtime_put_autosuspend(dev);
97 }
98 
99 int
100 nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
101 {
102 	struct nouveau_cli *cli = nouveau_cli(file_priv);
103 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
104 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
105 	struct device *dev = drm->dev->dev;
106 	struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : &cli->vmm;
107 	struct nouveau_vma *vma;
108 	int ret;
109 
110 	if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
111 		return 0;
112 
113 	ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
114 	if (ret)
115 		return ret;
116 
117 	ret = pm_runtime_get_sync(dev);
118 	if (ret < 0 && ret != -EACCES) {
119 		pm_runtime_put_autosuspend(dev);
120 		goto out;
121 	}
122 
123 	ret = nouveau_vma_new(nvbo, vmm, &vma);
124 	pm_runtime_mark_last_busy(dev);
125 	pm_runtime_put_autosuspend(dev);
126 out:
127 	ttm_bo_unreserve(&nvbo->bo);
128 	return ret;
129 }
130 
131 struct nouveau_gem_object_unmap {
132 	struct nouveau_cli_work work;
133 	struct nouveau_vma *vma;
134 };
135 
136 static void
137 nouveau_gem_object_delete(struct nouveau_vma *vma)
138 {
139 	nouveau_fence_unref(&vma->fence);
140 	nouveau_vma_del(&vma);
141 }
142 
143 static void
144 nouveau_gem_object_delete_work(struct nouveau_cli_work *w)
145 {
146 	struct nouveau_gem_object_unmap *work =
147 		container_of(w, typeof(*work), work);
148 	nouveau_gem_object_delete(work->vma);
149 	kfree(work);
150 }
151 
152 static void
153 nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
154 {
155 	struct dma_fence *fence = vma->fence ? &vma->fence->base : NULL;
156 	struct nouveau_gem_object_unmap *work;
157 
158 	list_del_init(&vma->head);
159 
160 	if (!fence) {
161 		nouveau_gem_object_delete(vma);
162 		return;
163 	}
164 
165 	if (!(work = kmalloc(sizeof(*work), GFP_KERNEL))) {
166 		WARN_ON(dma_fence_wait_timeout(fence, false, 2 * HZ) <= 0);
167 		nouveau_gem_object_delete(vma);
168 		return;
169 	}
170 
171 	work->work.func = nouveau_gem_object_delete_work;
172 	work->vma = vma;
173 	nouveau_cli_work_queue(vma->vmm->cli, fence, &work->work);
174 }
175 
176 void
177 nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
178 {
179 	struct nouveau_cli *cli = nouveau_cli(file_priv);
180 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
181 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
182 	struct device *dev = drm->dev->dev;
183 	struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : & cli->vmm;
184 	struct nouveau_vma *vma;
185 	int ret;
186 
187 	if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
188 		return;
189 
190 	ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
191 	if (ret)
192 		return;
193 
194 	vma = nouveau_vma_find(nvbo, vmm);
195 	if (vma) {
196 		if (--vma->refs == 0) {
197 			ret = pm_runtime_get_sync(dev);
198 			if (!WARN_ON(ret < 0 && ret != -EACCES)) {
199 				nouveau_gem_object_unmap(nvbo, vma);
200 				pm_runtime_mark_last_busy(dev);
201 			}
202 			pm_runtime_put_autosuspend(dev);
203 		}
204 	}
205 	ttm_bo_unreserve(&nvbo->bo);
206 }
207 
208 const struct drm_gem_object_funcs nouveau_gem_object_funcs = {
209 	.free = nouveau_gem_object_del,
210 	.open = nouveau_gem_object_open,
211 	.close = nouveau_gem_object_close,
212 	.pin = nouveau_gem_prime_pin,
213 	.unpin = nouveau_gem_prime_unpin,
214 	.get_sg_table = nouveau_gem_prime_get_sg_table,
215 	.vmap = drm_gem_ttm_vmap,
216 	.vunmap = drm_gem_ttm_vunmap,
217 	.mmap = drm_gem_ttm_mmap,
218 	.vm_ops = &nouveau_ttm_vm_ops,
219 };
220 
221 int
222 nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
223 		uint32_t tile_mode, uint32_t tile_flags,
224 		struct nouveau_bo **pnvbo)
225 {
226 	struct nouveau_drm *drm = cli->drm;
227 	struct nouveau_bo *nvbo;
228 	int ret;
229 
230 	if (!(domain & (NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART)))
231 		domain |= NOUVEAU_GEM_DOMAIN_CPU;
232 
233 	nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode,
234 				tile_flags);
235 	if (IS_ERR(nvbo))
236 		return PTR_ERR(nvbo);
237 
238 	nvbo->bo.base.funcs = &nouveau_gem_object_funcs;
239 
240 	/* Initialize the embedded gem-object. We return a single gem-reference
241 	 * to the caller, instead of a normal nouveau_bo ttm reference. */
242 	ret = drm_gem_object_init(drm->dev, &nvbo->bo.base, size);
243 	if (ret) {
244 		drm_gem_object_release(&nvbo->bo.base);
245 		kfree(nvbo);
246 		return ret;
247 	}
248 
249 	ret = nouveau_bo_init(nvbo, size, align, domain, NULL, NULL);
250 	if (ret)
251 		return ret;
252 
253 	/* we restrict allowed domains on nv50+ to only the types
254 	 * that were requested at creation time.  not possibly on
255 	 * earlier chips without busting the ABI.
256 	 */
257 	nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
258 			      NOUVEAU_GEM_DOMAIN_GART;
259 	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
260 		nvbo->valid_domains &= domain;
261 
262 	*pnvbo = nvbo;
263 	return 0;
264 }
265 
266 static int
267 nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
268 		 struct drm_nouveau_gem_info *rep)
269 {
270 	struct nouveau_cli *cli = nouveau_cli(file_priv);
271 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
272 	struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : &cli->vmm;
273 	struct nouveau_vma *vma;
274 
275 	if (is_power_of_2(nvbo->valid_domains))
276 		rep->domain = nvbo->valid_domains;
277 	else if (nvbo->bo.resource->mem_type == TTM_PL_TT)
278 		rep->domain = NOUVEAU_GEM_DOMAIN_GART;
279 	else
280 		rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
281 	rep->offset = nvbo->offset;
282 	if (vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
283 		vma = nouveau_vma_find(nvbo, vmm);
284 		if (!vma)
285 			return -EINVAL;
286 
287 		rep->offset = vma->addr;
288 	}
289 
290 	rep->size = nvbo->bo.base.size;
291 	rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.base.vma_node);
292 	rep->tile_mode = nvbo->mode;
293 	rep->tile_flags = nvbo->contig ? 0 : NOUVEAU_GEM_TILE_NONCONTIG;
294 	if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
295 		rep->tile_flags |= nvbo->kind << 8;
296 	else
297 	if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
298 		rep->tile_flags |= nvbo->kind << 8 | nvbo->comp << 16;
299 	else
300 		rep->tile_flags |= nvbo->zeta;
301 	return 0;
302 }
303 
304 int
305 nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
306 		      struct drm_file *file_priv)
307 {
308 	struct nouveau_cli *cli = nouveau_cli(file_priv);
309 	struct drm_nouveau_gem_new *req = data;
310 	struct nouveau_bo *nvbo = NULL;
311 	int ret = 0;
312 
313 	ret = nouveau_gem_new(cli, req->info.size, req->align,
314 			      req->info.domain, req->info.tile_mode,
315 			      req->info.tile_flags, &nvbo);
316 	if (ret)
317 		return ret;
318 
319 	ret = drm_gem_handle_create(file_priv, &nvbo->bo.base,
320 				    &req->info.handle);
321 	if (ret == 0) {
322 		ret = nouveau_gem_info(file_priv, &nvbo->bo.base, &req->info);
323 		if (ret)
324 			drm_gem_handle_delete(file_priv, req->info.handle);
325 	}
326 
327 	/* drop reference from allocate - handle holds it now */
328 	drm_gem_object_put(&nvbo->bo.base);
329 	return ret;
330 }
331 
332 static int
333 nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
334 		       uint32_t write_domains, uint32_t valid_domains)
335 {
336 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
337 	struct ttm_buffer_object *bo = &nvbo->bo;
338 	uint32_t domains = valid_domains & nvbo->valid_domains &
339 		(write_domains ? write_domains : read_domains);
340 	uint32_t pref_domains = 0;
341 
342 	if (!domains)
343 		return -EINVAL;
344 
345 	valid_domains &= ~(NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART);
346 
347 	if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
348 	    bo->resource->mem_type == TTM_PL_VRAM)
349 		pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM;
350 
351 	else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
352 		 bo->resource->mem_type == TTM_PL_TT)
353 		pref_domains |= NOUVEAU_GEM_DOMAIN_GART;
354 
355 	else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
356 		pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM;
357 
358 	else
359 		pref_domains |= NOUVEAU_GEM_DOMAIN_GART;
360 
361 	nouveau_bo_placement_set(nvbo, pref_domains, valid_domains);
362 
363 	return 0;
364 }
365 
366 struct validate_op {
367 	struct list_head list;
368 	struct ww_acquire_ctx ticket;
369 };
370 
371 static void
372 validate_fini_no_ticket(struct validate_op *op, struct nouveau_channel *chan,
373 			struct nouveau_fence *fence,
374 			struct drm_nouveau_gem_pushbuf_bo *pbbo)
375 {
376 	struct nouveau_bo *nvbo;
377 	struct drm_nouveau_gem_pushbuf_bo *b;
378 
379 	while (!list_empty(&op->list)) {
380 		nvbo = list_entry(op->list.next, struct nouveau_bo, entry);
381 		b = &pbbo[nvbo->pbbo_index];
382 
383 		if (likely(fence)) {
384 			nouveau_bo_fence(nvbo, fence, !!b->write_domains);
385 
386 			if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
387 				struct nouveau_vma *vma =
388 					(void *)(unsigned long)b->user_priv;
389 				nouveau_fence_unref(&vma->fence);
390 				dma_fence_get(&fence->base);
391 				vma->fence = fence;
392 			}
393 		}
394 
395 		if (unlikely(nvbo->validate_mapped)) {
396 			ttm_bo_kunmap(&nvbo->kmap);
397 			nvbo->validate_mapped = false;
398 		}
399 
400 		list_del(&nvbo->entry);
401 		nvbo->reserved_by = NULL;
402 		ttm_bo_unreserve(&nvbo->bo);
403 		drm_gem_object_put(&nvbo->bo.base);
404 	}
405 }
406 
407 static void
408 validate_fini(struct validate_op *op, struct nouveau_channel *chan,
409 	      struct nouveau_fence *fence,
410 	      struct drm_nouveau_gem_pushbuf_bo *pbbo)
411 {
412 	validate_fini_no_ticket(op, chan, fence, pbbo);
413 	ww_acquire_fini(&op->ticket);
414 }
415 
416 static int
417 validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
418 	      struct drm_nouveau_gem_pushbuf_bo *pbbo,
419 	      int nr_buffers, struct validate_op *op)
420 {
421 	struct nouveau_cli *cli = nouveau_cli(file_priv);
422 	int trycnt = 0;
423 	int ret = -EINVAL, i;
424 	struct nouveau_bo *res_bo = NULL;
425 	LIST_HEAD(gart_list);
426 	LIST_HEAD(vram_list);
427 	LIST_HEAD(both_list);
428 
429 	ww_acquire_init(&op->ticket, &reservation_ww_class);
430 retry:
431 	if (++trycnt > 100000) {
432 		NV_PRINTK(err, cli, "%s failed and gave up.\n", __func__);
433 		return -EINVAL;
434 	}
435 
436 	for (i = 0; i < nr_buffers; i++) {
437 		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
438 		struct drm_gem_object *gem;
439 		struct nouveau_bo *nvbo;
440 
441 		gem = drm_gem_object_lookup(file_priv, b->handle);
442 		if (!gem) {
443 			NV_PRINTK(err, cli, "Unknown handle 0x%08x\n", b->handle);
444 			ret = -ENOENT;
445 			break;
446 		}
447 		nvbo = nouveau_gem_object(gem);
448 		if (nvbo == res_bo) {
449 			res_bo = NULL;
450 			drm_gem_object_put(gem);
451 			continue;
452 		}
453 
454 		if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
455 			NV_PRINTK(err, cli, "multiple instances of buffer %d on "
456 				      "validation list\n", b->handle);
457 			drm_gem_object_put(gem);
458 			ret = -EINVAL;
459 			break;
460 		}
461 
462 		ret = ttm_bo_reserve(&nvbo->bo, true, false, &op->ticket);
463 		if (ret) {
464 			list_splice_tail_init(&vram_list, &op->list);
465 			list_splice_tail_init(&gart_list, &op->list);
466 			list_splice_tail_init(&both_list, &op->list);
467 			validate_fini_no_ticket(op, chan, NULL, NULL);
468 			if (unlikely(ret == -EDEADLK)) {
469 				ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
470 							      &op->ticket);
471 				if (!ret)
472 					res_bo = nvbo;
473 			}
474 			if (unlikely(ret)) {
475 				if (ret != -ERESTARTSYS)
476 					NV_PRINTK(err, cli, "fail reserve\n");
477 				break;
478 			}
479 		}
480 
481 		if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
482 			struct nouveau_vmm *vmm = chan->vmm;
483 			struct nouveau_vma *vma = nouveau_vma_find(nvbo, vmm);
484 			if (!vma) {
485 				NV_PRINTK(err, cli, "vma not found!\n");
486 				ret = -EINVAL;
487 				break;
488 			}
489 
490 			b->user_priv = (uint64_t)(unsigned long)vma;
491 		} else {
492 			b->user_priv = (uint64_t)(unsigned long)nvbo;
493 		}
494 
495 		nvbo->reserved_by = file_priv;
496 		nvbo->pbbo_index = i;
497 		if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
498 		    (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
499 			list_add_tail(&nvbo->entry, &both_list);
500 		else
501 		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
502 			list_add_tail(&nvbo->entry, &vram_list);
503 		else
504 		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
505 			list_add_tail(&nvbo->entry, &gart_list);
506 		else {
507 			NV_PRINTK(err, cli, "invalid valid domains: 0x%08x\n",
508 				 b->valid_domains);
509 			list_add_tail(&nvbo->entry, &both_list);
510 			ret = -EINVAL;
511 			break;
512 		}
513 		if (nvbo == res_bo)
514 			goto retry;
515 	}
516 
517 	ww_acquire_done(&op->ticket);
518 	list_splice_tail(&vram_list, &op->list);
519 	list_splice_tail(&gart_list, &op->list);
520 	list_splice_tail(&both_list, &op->list);
521 	if (ret)
522 		validate_fini(op, chan, NULL, NULL);
523 	return ret;
524 
525 }
526 
527 static int
528 validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
529 	      struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo)
530 {
531 	struct nouveau_drm *drm = chan->drm;
532 	struct nouveau_bo *nvbo;
533 	int ret, relocs = 0;
534 
535 	list_for_each_entry(nvbo, list, entry) {
536 		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
537 
538 		ret = nouveau_gem_set_domain(&nvbo->bo.base, b->read_domains,
539 					     b->write_domains,
540 					     b->valid_domains);
541 		if (unlikely(ret)) {
542 			NV_PRINTK(err, cli, "fail set_domain\n");
543 			return ret;
544 		}
545 
546 		ret = nouveau_bo_validate(nvbo, true, false);
547 		if (unlikely(ret)) {
548 			if (ret != -ERESTARTSYS)
549 				NV_PRINTK(err, cli, "fail ttm_validate\n");
550 			return ret;
551 		}
552 
553 		ret = nouveau_fence_sync(nvbo, chan, !!b->write_domains, true);
554 		if (unlikely(ret)) {
555 			if (ret != -ERESTARTSYS)
556 				NV_PRINTK(err, cli, "fail post-validate sync\n");
557 			return ret;
558 		}
559 
560 		if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
561 			if (nvbo->offset == b->presumed.offset &&
562 			    ((nvbo->bo.resource->mem_type == TTM_PL_VRAM &&
563 			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
564 			     (nvbo->bo.resource->mem_type == TTM_PL_TT &&
565 			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
566 				continue;
567 
568 			if (nvbo->bo.resource->mem_type == TTM_PL_TT)
569 				b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
570 			else
571 				b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
572 			b->presumed.offset = nvbo->offset;
573 			b->presumed.valid = 0;
574 			relocs++;
575 		}
576 	}
577 
578 	return relocs;
579 }
580 
581 static int
582 nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
583 			     struct drm_file *file_priv,
584 			     struct drm_nouveau_gem_pushbuf_bo *pbbo,
585 			     int nr_buffers,
586 			     struct validate_op *op, bool *apply_relocs)
587 {
588 	struct nouveau_cli *cli = nouveau_cli(file_priv);
589 	int ret;
590 
591 	INIT_LIST_HEAD(&op->list);
592 
593 	if (nr_buffers == 0)
594 		return 0;
595 
596 	ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
597 	if (unlikely(ret)) {
598 		if (ret != -ERESTARTSYS)
599 			NV_PRINTK(err, cli, "validate_init\n");
600 		return ret;
601 	}
602 
603 	ret = validate_list(chan, cli, &op->list, pbbo);
604 	if (unlikely(ret < 0)) {
605 		if (ret != -ERESTARTSYS)
606 			NV_PRINTK(err, cli, "validating bo list\n");
607 		validate_fini(op, chan, NULL, NULL);
608 		return ret;
609 	} else if (ret > 0) {
610 		*apply_relocs = true;
611 	}
612 
613 	return 0;
614 }
615 
616 static inline void
617 u_free(void *addr)
618 {
619 	kvfree(addr);
620 }
621 
622 static inline void *
623 u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
624 {
625 	void *mem;
626 	void __user *userptr = (void __force __user *)(uintptr_t)user;
627 
628 	size *= nmemb;
629 
630 	mem = kvmalloc(size, GFP_KERNEL);
631 	if (!mem)
632 		return ERR_PTR(-ENOMEM);
633 
634 	if (copy_from_user(mem, userptr, size)) {
635 		u_free(mem);
636 		return ERR_PTR(-EFAULT);
637 	}
638 
639 	return mem;
640 }
641 
642 static int
643 nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
644 				struct drm_nouveau_gem_pushbuf *req,
645 				struct drm_nouveau_gem_pushbuf_reloc *reloc,
646 				struct drm_nouveau_gem_pushbuf_bo *bo)
647 {
648 	int ret = 0;
649 	unsigned i;
650 
651 	for (i = 0; i < req->nr_relocs; i++) {
652 		struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
653 		struct drm_nouveau_gem_pushbuf_bo *b;
654 		struct nouveau_bo *nvbo;
655 		uint32_t data;
656 		long lret;
657 
658 		if (unlikely(r->bo_index >= req->nr_buffers)) {
659 			NV_PRINTK(err, cli, "reloc bo index invalid\n");
660 			ret = -EINVAL;
661 			break;
662 		}
663 
664 		b = &bo[r->bo_index];
665 		if (b->presumed.valid)
666 			continue;
667 
668 		if (unlikely(r->reloc_bo_index >= req->nr_buffers)) {
669 			NV_PRINTK(err, cli, "reloc container bo index invalid\n");
670 			ret = -EINVAL;
671 			break;
672 		}
673 		nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
674 
675 		if (unlikely(r->reloc_bo_offset + 4 >
676 			     nvbo->bo.base.size)) {
677 			NV_PRINTK(err, cli, "reloc outside of bo\n");
678 			ret = -EINVAL;
679 			break;
680 		}
681 
682 		if (!nvbo->kmap.virtual) {
683 			ret = ttm_bo_kmap(&nvbo->bo, 0, PFN_UP(nvbo->bo.base.size),
684 					  &nvbo->kmap);
685 			if (ret) {
686 				NV_PRINTK(err, cli, "failed kmap for reloc\n");
687 				break;
688 			}
689 			nvbo->validate_mapped = true;
690 		}
691 
692 		if (r->flags & NOUVEAU_GEM_RELOC_LOW)
693 			data = b->presumed.offset + r->data;
694 		else
695 		if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
696 			data = (b->presumed.offset + r->data) >> 32;
697 		else
698 			data = r->data;
699 
700 		if (r->flags & NOUVEAU_GEM_RELOC_OR) {
701 			if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
702 				data |= r->tor;
703 			else
704 				data |= r->vor;
705 		}
706 
707 		lret = dma_resv_wait_timeout(nvbo->bo.base.resv,
708 					     DMA_RESV_USAGE_BOOKKEEP,
709 					     false, 15 * HZ);
710 		if (!lret)
711 			ret = -EBUSY;
712 		else if (lret > 0)
713 			ret = 0;
714 		else
715 			ret = lret;
716 
717 		if (ret) {
718 			NV_PRINTK(err, cli, "reloc wait_idle failed: %d\n",
719 				  ret);
720 			break;
721 		}
722 
723 		nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
724 	}
725 
726 	return ret;
727 }
728 
729 int
730 nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
731 			  struct drm_file *file_priv)
732 {
733 	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
734 	struct nouveau_cli *cli = nouveau_cli(file_priv);
735 	struct nouveau_abi16_chan *temp;
736 	struct nouveau_drm *drm = nouveau_drm(dev);
737 	struct drm_nouveau_gem_pushbuf *req = data;
738 	struct drm_nouveau_gem_pushbuf_push *push;
739 	struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
740 	struct drm_nouveau_gem_pushbuf_bo *bo;
741 	struct nouveau_channel *chan = NULL;
742 	struct validate_op op;
743 	struct nouveau_fence *fence = NULL;
744 	int i, j, ret = 0;
745 	bool do_reloc = false, sync = false;
746 
747 	if (unlikely(!abi16))
748 		return -ENOMEM;
749 
750 	list_for_each_entry(temp, &abi16->channels, head) {
751 		if (temp->chan->chid == req->channel) {
752 			chan = temp->chan;
753 			break;
754 		}
755 	}
756 
757 	if (!chan)
758 		return nouveau_abi16_put(abi16, -ENOENT);
759 	if (unlikely(atomic_read(&chan->killed)))
760 		return nouveau_abi16_put(abi16, -ENODEV);
761 
762 	sync = req->vram_available & NOUVEAU_GEM_PUSHBUF_SYNC;
763 
764 	req->vram_available = drm->gem.vram_available;
765 	req->gart_available = drm->gem.gart_available;
766 	if (unlikely(req->nr_push == 0))
767 		goto out_next;
768 
769 	if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
770 		NV_PRINTK(err, cli, "pushbuf push count exceeds limit: %d max %d\n",
771 			 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
772 		return nouveau_abi16_put(abi16, -EINVAL);
773 	}
774 
775 	if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
776 		NV_PRINTK(err, cli, "pushbuf bo count exceeds limit: %d max %d\n",
777 			 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
778 		return nouveau_abi16_put(abi16, -EINVAL);
779 	}
780 
781 	if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
782 		NV_PRINTK(err, cli, "pushbuf reloc count exceeds limit: %d max %d\n",
783 			 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
784 		return nouveau_abi16_put(abi16, -EINVAL);
785 	}
786 
787 	push = u_memcpya(req->push, req->nr_push, sizeof(*push));
788 	if (IS_ERR(push))
789 		return nouveau_abi16_put(abi16, PTR_ERR(push));
790 
791 	bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
792 	if (IS_ERR(bo)) {
793 		u_free(push);
794 		return nouveau_abi16_put(abi16, PTR_ERR(bo));
795 	}
796 
797 	/* Ensure all push buffers are on validate list */
798 	for (i = 0; i < req->nr_push; i++) {
799 		if (push[i].bo_index >= req->nr_buffers) {
800 			NV_PRINTK(err, cli, "push %d buffer not in list\n", i);
801 			ret = -EINVAL;
802 			goto out_prevalid;
803 		}
804 	}
805 
806 	/* Validate buffer list */
807 revalidate:
808 	ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo,
809 					   req->nr_buffers, &op, &do_reloc);
810 	if (ret) {
811 		if (ret != -ERESTARTSYS)
812 			NV_PRINTK(err, cli, "validate: %d\n", ret);
813 		goto out_prevalid;
814 	}
815 
816 	/* Apply any relocations that are required */
817 	if (do_reloc) {
818 		if (!reloc) {
819 			validate_fini(&op, chan, NULL, bo);
820 			reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
821 			if (IS_ERR(reloc)) {
822 				ret = PTR_ERR(reloc);
823 				goto out_prevalid;
824 			}
825 
826 			goto revalidate;
827 		}
828 
829 		ret = nouveau_gem_pushbuf_reloc_apply(cli, req, reloc, bo);
830 		if (ret) {
831 			NV_PRINTK(err, cli, "reloc apply: %d\n", ret);
832 			goto out;
833 		}
834 	}
835 
836 	if (chan->dma.ib_max) {
837 		ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
838 		if (ret) {
839 			NV_PRINTK(err, cli, "nv50cal_space: %d\n", ret);
840 			goto out;
841 		}
842 
843 		for (i = 0; i < req->nr_push; i++) {
844 			struct nouveau_vma *vma = (void *)(unsigned long)
845 				bo[push[i].bo_index].user_priv;
846 
847 			nv50_dma_push(chan, vma->addr + push[i].offset,
848 				      push[i].length);
849 		}
850 	} else
851 	if (drm->client.device.info.chipset >= 0x25) {
852 		ret = PUSH_WAIT(chan->chan.push, req->nr_push * 2);
853 		if (ret) {
854 			NV_PRINTK(err, cli, "cal_space: %d\n", ret);
855 			goto out;
856 		}
857 
858 		for (i = 0; i < req->nr_push; i++) {
859 			struct nouveau_bo *nvbo = (void *)(unsigned long)
860 				bo[push[i].bo_index].user_priv;
861 
862 			PUSH_CALL(chan->chan.push, nvbo->offset + push[i].offset);
863 			PUSH_DATA(chan->chan.push, 0);
864 		}
865 	} else {
866 		ret = PUSH_WAIT(chan->chan.push, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
867 		if (ret) {
868 			NV_PRINTK(err, cli, "jmp_space: %d\n", ret);
869 			goto out;
870 		}
871 
872 		for (i = 0; i < req->nr_push; i++) {
873 			struct nouveau_bo *nvbo = (void *)(unsigned long)
874 				bo[push[i].bo_index].user_priv;
875 			uint32_t cmd;
876 
877 			cmd = chan->push.addr + ((chan->dma.cur + 2) << 2);
878 			cmd |= 0x20000000;
879 			if (unlikely(cmd != req->suffix0)) {
880 				if (!nvbo->kmap.virtual) {
881 					ret = ttm_bo_kmap(&nvbo->bo, 0,
882 							  PFN_UP(nvbo->bo.base.size),
883 							  &nvbo->kmap);
884 					if (ret) {
885 						WIND_RING(chan);
886 						goto out;
887 					}
888 					nvbo->validate_mapped = true;
889 				}
890 
891 				nouveau_bo_wr32(nvbo, (push[i].offset +
892 						push[i].length - 8) / 4, cmd);
893 			}
894 
895 			PUSH_JUMP(chan->chan.push, nvbo->offset + push[i].offset);
896 			PUSH_DATA(chan->chan.push, 0);
897 			for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
898 				PUSH_DATA(chan->chan.push, 0);
899 		}
900 	}
901 
902 	ret = nouveau_fence_new(chan, false, &fence);
903 	if (ret) {
904 		NV_PRINTK(err, cli, "error fencing pushbuf: %d\n", ret);
905 		WIND_RING(chan);
906 		goto out;
907 	}
908 
909 	if (sync) {
910 		if (!(ret = nouveau_fence_wait(fence, false, false))) {
911 			if ((ret = dma_fence_get_status(&fence->base)) == 1)
912 				ret = 0;
913 		}
914 	}
915 
916 out:
917 	validate_fini(&op, chan, fence, bo);
918 	nouveau_fence_unref(&fence);
919 
920 	if (do_reloc) {
921 		struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
922 			u64_to_user_ptr(req->buffers);
923 
924 		for (i = 0; i < req->nr_buffers; i++) {
925 			if (bo[i].presumed.valid)
926 				continue;
927 
928 			if (copy_to_user(&upbbo[i].presumed, &bo[i].presumed,
929 					 sizeof(bo[i].presumed))) {
930 				ret = -EFAULT;
931 				break;
932 			}
933 		}
934 	}
935 out_prevalid:
936 	if (!IS_ERR(reloc))
937 		u_free(reloc);
938 	u_free(bo);
939 	u_free(push);
940 
941 out_next:
942 	if (chan->dma.ib_max) {
943 		req->suffix0 = 0x00000000;
944 		req->suffix1 = 0x00000000;
945 	} else
946 	if (drm->client.device.info.chipset >= 0x25) {
947 		req->suffix0 = 0x00020000;
948 		req->suffix1 = 0x00000000;
949 	} else {
950 		req->suffix0 = 0x20000000 |
951 			      (chan->push.addr + ((chan->dma.cur + 2) << 2));
952 		req->suffix1 = 0x00000000;
953 	}
954 
955 	return nouveau_abi16_put(abi16, ret);
956 }
957 
958 int
959 nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
960 			   struct drm_file *file_priv)
961 {
962 	struct drm_nouveau_gem_cpu_prep *req = data;
963 	struct drm_gem_object *gem;
964 	struct nouveau_bo *nvbo;
965 	bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
966 	bool write = !!(req->flags & NOUVEAU_GEM_CPU_PREP_WRITE);
967 	long lret;
968 	int ret;
969 
970 	gem = drm_gem_object_lookup(file_priv, req->handle);
971 	if (!gem)
972 		return -ENOENT;
973 	nvbo = nouveau_gem_object(gem);
974 
975 	lret = dma_resv_wait_timeout(nvbo->bo.base.resv,
976 				     dma_resv_usage_rw(write), true,
977 				     no_wait ? 0 : 30 * HZ);
978 	if (!lret)
979 		ret = -EBUSY;
980 	else if (lret > 0)
981 		ret = 0;
982 	else
983 		ret = lret;
984 
985 	nouveau_bo_sync_for_cpu(nvbo);
986 	drm_gem_object_put(gem);
987 
988 	return ret;
989 }
990 
991 int
992 nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
993 			   struct drm_file *file_priv)
994 {
995 	struct drm_nouveau_gem_cpu_fini *req = data;
996 	struct drm_gem_object *gem;
997 	struct nouveau_bo *nvbo;
998 
999 	gem = drm_gem_object_lookup(file_priv, req->handle);
1000 	if (!gem)
1001 		return -ENOENT;
1002 	nvbo = nouveau_gem_object(gem);
1003 
1004 	nouveau_bo_sync_for_device(nvbo);
1005 	drm_gem_object_put(gem);
1006 	return 0;
1007 }
1008 
1009 int
1010 nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
1011 		       struct drm_file *file_priv)
1012 {
1013 	struct drm_nouveau_gem_info *req = data;
1014 	struct drm_gem_object *gem;
1015 	int ret;
1016 
1017 	gem = drm_gem_object_lookup(file_priv, req->handle);
1018 	if (!gem)
1019 		return -ENOENT;
1020 
1021 	ret = nouveau_gem_info(file_priv, gem, req);
1022 	drm_gem_object_put(gem);
1023 	return ret;
1024 }
1025 
1026