xref: /linux/drivers/gpu/drm/nouveau/nouveau_abi16.c (revision 3027ce13e04eee76539ca65c2cb1028a01c8c508)
1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <nvif/client.h>
25 #include <nvif/driver.h>
26 #include <nvif/fifo.h>
27 #include <nvif/ioctl.h>
28 #include <nvif/class.h>
29 #include <nvif/cl0002.h>
30 #include <nvif/unpack.h>
31 
32 #include "nouveau_drv.h"
33 #include "nouveau_dma.h"
34 #include "nouveau_exec.h"
35 #include "nouveau_gem.h"
36 #include "nouveau_chan.h"
37 #include "nouveau_abi16.h"
38 #include "nouveau_vmm.h"
39 #include "nouveau_sched.h"
40 
41 static struct nouveau_abi16 *
42 nouveau_abi16(struct drm_file *file_priv)
43 {
44 	struct nouveau_cli *cli = nouveau_cli(file_priv);
45 	if (!cli->abi16) {
46 		struct nouveau_abi16 *abi16;
47 		cli->abi16 = abi16 = kzalloc(sizeof(*abi16), GFP_KERNEL);
48 		if (cli->abi16) {
49 			struct nv_device_v0 args = {
50 				.device = ~0ULL,
51 			};
52 
53 			INIT_LIST_HEAD(&abi16->channels);
54 
55 			/* allocate device object targeting client's default
56 			 * device (ie. the one that belongs to the fd it
57 			 * opened)
58 			 */
59 			if (nvif_device_ctor(&cli->base.object, "abi16Device",
60 					     0, NV_DEVICE, &args, sizeof(args),
61 					     &abi16->device) == 0)
62 				return cli->abi16;
63 
64 			kfree(cli->abi16);
65 			cli->abi16 = NULL;
66 		}
67 	}
68 	return cli->abi16;
69 }
70 
71 struct nouveau_abi16 *
72 nouveau_abi16_get(struct drm_file *file_priv)
73 {
74 	struct nouveau_cli *cli = nouveau_cli(file_priv);
75 	mutex_lock(&cli->mutex);
76 	if (nouveau_abi16(file_priv))
77 		return cli->abi16;
78 	mutex_unlock(&cli->mutex);
79 	return NULL;
80 }
81 
82 int
83 nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret)
84 {
85 	struct nouveau_cli *cli = (void *)abi16->device.object.client;
86 	mutex_unlock(&cli->mutex);
87 	return ret;
88 }
89 
90 s32
91 nouveau_abi16_swclass(struct nouveau_drm *drm)
92 {
93 	switch (drm->client.device.info.family) {
94 	case NV_DEVICE_INFO_V0_TNT:
95 		return NVIF_CLASS_SW_NV04;
96 	case NV_DEVICE_INFO_V0_CELSIUS:
97 	case NV_DEVICE_INFO_V0_KELVIN:
98 	case NV_DEVICE_INFO_V0_RANKINE:
99 	case NV_DEVICE_INFO_V0_CURIE:
100 		return NVIF_CLASS_SW_NV10;
101 	case NV_DEVICE_INFO_V0_TESLA:
102 		return NVIF_CLASS_SW_NV50;
103 	case NV_DEVICE_INFO_V0_FERMI:
104 	case NV_DEVICE_INFO_V0_KEPLER:
105 	case NV_DEVICE_INFO_V0_MAXWELL:
106 	case NV_DEVICE_INFO_V0_PASCAL:
107 	case NV_DEVICE_INFO_V0_VOLTA:
108 		return NVIF_CLASS_SW_GF100;
109 	}
110 
111 	return 0x0000;
112 }
113 
114 static void
115 nouveau_abi16_ntfy_fini(struct nouveau_abi16_chan *chan,
116 			struct nouveau_abi16_ntfy *ntfy)
117 {
118 	nvif_object_dtor(&ntfy->object);
119 	nvkm_mm_free(&chan->heap, &ntfy->node);
120 	list_del(&ntfy->head);
121 	kfree(ntfy);
122 }
123 
124 static void
125 nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
126 			struct nouveau_abi16_chan *chan)
127 {
128 	struct nouveau_abi16_ntfy *ntfy, *temp;
129 
130 	/* Cancel all jobs from the entity's queue. */
131 	if (chan->sched)
132 		drm_sched_entity_fini(&chan->sched->entity);
133 
134 	if (chan->chan)
135 		nouveau_channel_idle(chan->chan);
136 
137 	if (chan->sched)
138 		nouveau_sched_destroy(&chan->sched);
139 
140 	/* cleanup notifier state */
141 	list_for_each_entry_safe(ntfy, temp, &chan->notifiers, head) {
142 		nouveau_abi16_ntfy_fini(chan, ntfy);
143 	}
144 
145 	if (chan->ntfy) {
146 		nouveau_vma_del(&chan->ntfy_vma);
147 		nouveau_bo_unpin(chan->ntfy);
148 		drm_gem_object_put(&chan->ntfy->bo.base);
149 	}
150 
151 	if (chan->heap.block_size)
152 		nvkm_mm_fini(&chan->heap);
153 
154 	/* destroy channel object, all children will be killed too */
155 	if (chan->chan) {
156 		nvif_object_dtor(&chan->ce);
157 		nouveau_channel_del(&chan->chan);
158 	}
159 
160 	list_del(&chan->head);
161 	kfree(chan);
162 }
163 
164 void
165 nouveau_abi16_fini(struct nouveau_abi16 *abi16)
166 {
167 	struct nouveau_cli *cli = (void *)abi16->device.object.client;
168 	struct nouveau_abi16_chan *chan, *temp;
169 
170 	/* cleanup channels */
171 	list_for_each_entry_safe(chan, temp, &abi16->channels, head) {
172 		nouveau_abi16_chan_fini(abi16, chan);
173 	}
174 
175 	/* destroy the device object */
176 	nvif_device_dtor(&abi16->device);
177 
178 	kfree(cli->abi16);
179 	cli->abi16 = NULL;
180 }
181 
182 static inline int
183 getparam_dma_ib_max(struct nvif_device *device)
184 {
185 	const struct nvif_mclass dmas[] = {
186 		{ NV03_CHANNEL_DMA, 0 },
187 		{ NV10_CHANNEL_DMA, 0 },
188 		{ NV17_CHANNEL_DMA, 0 },
189 		{ NV40_CHANNEL_DMA, 0 },
190 		{}
191 	};
192 
193 	return nvif_mclass(&device->object, dmas) < 0 ? NV50_DMA_IB_MAX : 0;
194 }
195 
196 int
197 nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
198 {
199 	struct nouveau_cli *cli = nouveau_cli(file_priv);
200 	struct nouveau_drm *drm = nouveau_drm(dev);
201 	struct nvif_device *device = &drm->client.device;
202 	struct nvkm_device *nvkm_device = nvxx_device(&drm->client.device);
203 	struct nvkm_gr *gr = nvxx_gr(device);
204 	struct drm_nouveau_getparam *getparam = data;
205 	struct pci_dev *pdev = to_pci_dev(dev->dev);
206 
207 	switch (getparam->param) {
208 	case NOUVEAU_GETPARAM_CHIPSET_ID:
209 		getparam->value = device->info.chipset;
210 		break;
211 	case NOUVEAU_GETPARAM_PCI_VENDOR:
212 		if (device->info.platform != NV_DEVICE_INFO_V0_SOC)
213 			getparam->value = pdev->vendor;
214 		else
215 			getparam->value = 0;
216 		break;
217 	case NOUVEAU_GETPARAM_PCI_DEVICE:
218 		if (device->info.platform != NV_DEVICE_INFO_V0_SOC)
219 			getparam->value = pdev->device;
220 		else
221 			getparam->value = 0;
222 		break;
223 	case NOUVEAU_GETPARAM_BUS_TYPE:
224 		switch (device->info.platform) {
225 		case NV_DEVICE_INFO_V0_AGP : getparam->value = 0; break;
226 		case NV_DEVICE_INFO_V0_PCI : getparam->value = 1; break;
227 		case NV_DEVICE_INFO_V0_PCIE: getparam->value = 2; break;
228 		case NV_DEVICE_INFO_V0_SOC : getparam->value = 3; break;
229 		case NV_DEVICE_INFO_V0_IGP :
230 			if (!pci_is_pcie(pdev))
231 				getparam->value = 1;
232 			else
233 				getparam->value = 2;
234 			break;
235 		default:
236 			WARN_ON(1);
237 			break;
238 		}
239 		break;
240 	case NOUVEAU_GETPARAM_FB_SIZE:
241 		getparam->value = drm->gem.vram_available;
242 		break;
243 	case NOUVEAU_GETPARAM_AGP_SIZE:
244 		getparam->value = drm->gem.gart_available;
245 		break;
246 	case NOUVEAU_GETPARAM_VM_VRAM_BASE:
247 		getparam->value = 0; /* deprecated */
248 		break;
249 	case NOUVEAU_GETPARAM_PTIMER_TIME:
250 		getparam->value = nvif_device_time(device);
251 		break;
252 	case NOUVEAU_GETPARAM_HAS_BO_USAGE:
253 		getparam->value = 1;
254 		break;
255 	case NOUVEAU_GETPARAM_HAS_PAGEFLIP:
256 		getparam->value = 1;
257 		break;
258 	case NOUVEAU_GETPARAM_GRAPH_UNITS:
259 		getparam->value = nvkm_gr_units(gr);
260 		break;
261 	case NOUVEAU_GETPARAM_EXEC_PUSH_MAX: {
262 		int ib_max = getparam_dma_ib_max(device);
263 
264 		getparam->value = nouveau_exec_push_max_from_ib_max(ib_max);
265 		break;
266 	}
267 	case NOUVEAU_GETPARAM_VRAM_BAR_SIZE:
268 		getparam->value = nvkm_device->func->resource_size(nvkm_device, 1);
269 		break;
270 	case NOUVEAU_GETPARAM_VRAM_USED: {
271 		struct ttm_resource_manager *vram_mgr = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM);
272 		getparam->value = (u64)ttm_resource_manager_usage(vram_mgr);
273 		break;
274 	}
275 	default:
276 		NV_PRINTK(dbg, cli, "unknown parameter %lld\n", getparam->param);
277 		return -EINVAL;
278 	}
279 
280 	return 0;
281 }
282 
283 int
284 nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
285 {
286 	struct drm_nouveau_channel_alloc *init = data;
287 	struct nouveau_cli *cli = nouveau_cli(file_priv);
288 	struct nouveau_drm *drm = nouveau_drm(dev);
289 	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
290 	struct nouveau_abi16_chan *chan;
291 	struct nvif_device *device;
292 	u64 engine, runm;
293 	int ret;
294 
295 	if (unlikely(!abi16))
296 		return -ENOMEM;
297 
298 	if (!drm->channel)
299 		return nouveau_abi16_put(abi16, -ENODEV);
300 
301 	/* If uvmm wasn't initialized until now disable it completely to prevent
302 	 * userspace from mixing up UAPIs.
303 	 *
304 	 * The client lock is already acquired by nouveau_abi16_get().
305 	 */
306 	__nouveau_cli_disable_uvmm_noinit(cli);
307 
308 	device = &abi16->device;
309 	engine = NV_DEVICE_HOST_RUNLIST_ENGINES_GR;
310 
311 	/* hack to allow channel engine type specification on kepler */
312 	if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
313 		if (init->fb_ctxdma_handle == ~0) {
314 			switch (init->tt_ctxdma_handle) {
315 			case NOUVEAU_FIFO_ENGINE_GR:
316 				engine = NV_DEVICE_HOST_RUNLIST_ENGINES_GR;
317 				break;
318 			case NOUVEAU_FIFO_ENGINE_VP:
319 				engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSPDEC;
320 				break;
321 			case NOUVEAU_FIFO_ENGINE_PPP:
322 				engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSPPP;
323 				break;
324 			case NOUVEAU_FIFO_ENGINE_BSP:
325 				engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSVLD;
326 				break;
327 			case NOUVEAU_FIFO_ENGINE_CE:
328 				engine = NV_DEVICE_HOST_RUNLIST_ENGINES_CE;
329 				break;
330 			default:
331 				return nouveau_abi16_put(abi16, -ENOSYS);
332 			}
333 
334 			init->fb_ctxdma_handle = 0;
335 			init->tt_ctxdma_handle = 0;
336 		}
337 	}
338 
339 	if (engine != NV_DEVICE_HOST_RUNLIST_ENGINES_CE)
340 		runm = nvif_fifo_runlist(device, engine);
341 	else
342 		runm = nvif_fifo_runlist_ce(device);
343 
344 	if (!runm || init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
345 		return nouveau_abi16_put(abi16, -EINVAL);
346 
347 	/* allocate "abi16 channel" data and make up a handle for it */
348 	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
349 	if (!chan)
350 		return nouveau_abi16_put(abi16, -ENOMEM);
351 
352 	INIT_LIST_HEAD(&chan->notifiers);
353 	list_add(&chan->head, &abi16->channels);
354 
355 	/* create channel object and initialise dma and fence management */
356 	ret = nouveau_channel_new(drm, device, false, runm, init->fb_ctxdma_handle,
357 				  init->tt_ctxdma_handle, &chan->chan);
358 	if (ret)
359 		goto done;
360 
361 	/* If we're not using the VM_BIND uAPI, we don't need a scheduler.
362 	 *
363 	 * The client lock is already acquired by nouveau_abi16_get().
364 	 */
365 	if (nouveau_cli_uvmm(cli)) {
366 		ret = nouveau_sched_create(&chan->sched, drm, drm->sched_wq,
367 					   chan->chan->dma.ib_max);
368 		if (ret)
369 			goto done;
370 	}
371 
372 	init->channel = chan->chan->chid;
373 
374 	if (device->info.family >= NV_DEVICE_INFO_V0_TESLA)
375 		init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
376 					NOUVEAU_GEM_DOMAIN_GART;
377 	else
378 	if (chan->chan->push.buffer->bo.resource->mem_type == TTM_PL_VRAM)
379 		init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
380 	else
381 		init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
382 
383 	if (device->info.family < NV_DEVICE_INFO_V0_CELSIUS) {
384 		init->subchan[0].handle = 0x00000000;
385 		init->subchan[0].grclass = 0x0000;
386 		init->subchan[1].handle = chan->chan->nvsw.handle;
387 		init->subchan[1].grclass = 0x506e;
388 		init->nr_subchan = 2;
389 	}
390 
391 	/* Workaround "nvc0" gallium driver using classes it doesn't allocate on
392 	 * Kepler and above.  NVKM no longer always sets CE_CTX_VALID as part of
393 	 * channel init, now we know what that stuff actually is.
394 	 *
395 	 * Doesn't matter for Kepler/Pascal, CE context stored in NV_RAMIN.
396 	 *
397 	 * Userspace was fixed prior to adding Ampere support.
398 	 */
399 	switch (device->info.family) {
400 	case NV_DEVICE_INFO_V0_VOLTA:
401 		ret = nvif_object_ctor(&chan->chan->user, "abi16CeWar", 0, VOLTA_DMA_COPY_A,
402 				       NULL, 0, &chan->ce);
403 		if (ret)
404 			goto done;
405 		break;
406 	case NV_DEVICE_INFO_V0_TURING:
407 		ret = nvif_object_ctor(&chan->chan->user, "abi16CeWar", 0, TURING_DMA_COPY_A,
408 				       NULL, 0, &chan->ce);
409 		if (ret)
410 			goto done;
411 		break;
412 	default:
413 		break;
414 	}
415 
416 	/* Named memory object area */
417 	ret = nouveau_gem_new(cli, PAGE_SIZE, 0, NOUVEAU_GEM_DOMAIN_GART,
418 			      0, 0, &chan->ntfy);
419 	if (ret == 0)
420 		ret = nouveau_bo_pin(chan->ntfy, NOUVEAU_GEM_DOMAIN_GART,
421 				     false);
422 	if (ret)
423 		goto done;
424 
425 	if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
426 		ret = nouveau_vma_new(chan->ntfy, chan->chan->vmm,
427 				      &chan->ntfy_vma);
428 		if (ret)
429 			goto done;
430 	}
431 
432 	ret = drm_gem_handle_create(file_priv, &chan->ntfy->bo.base,
433 				    &init->notifier_handle);
434 	if (ret)
435 		goto done;
436 
437 	ret = nvkm_mm_init(&chan->heap, 0, 0, PAGE_SIZE, 1);
438 done:
439 	if (ret)
440 		nouveau_abi16_chan_fini(abi16, chan);
441 	return nouveau_abi16_put(abi16, ret);
442 }
443 
444 static struct nouveau_abi16_chan *
445 nouveau_abi16_chan(struct nouveau_abi16 *abi16, int channel)
446 {
447 	struct nouveau_abi16_chan *chan;
448 
449 	list_for_each_entry(chan, &abi16->channels, head) {
450 		if (chan->chan->chid == channel)
451 			return chan;
452 	}
453 
454 	return NULL;
455 }
456 
457 int
458 nouveau_abi16_usif(struct drm_file *file_priv, void *data, u32 size)
459 {
460 	union {
461 		struct nvif_ioctl_v0 v0;
462 	} *args = data;
463 	struct nouveau_abi16_chan *chan;
464 	struct nouveau_abi16 *abi16;
465 	int ret = -ENOSYS;
466 
467 	if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
468 		switch (args->v0.type) {
469 		case NVIF_IOCTL_V0_NEW:
470 		case NVIF_IOCTL_V0_MTHD:
471 		case NVIF_IOCTL_V0_SCLASS:
472 			break;
473 		default:
474 			return -EACCES;
475 		}
476 	} else
477 		return ret;
478 
479 	if (!(abi16 = nouveau_abi16(file_priv)))
480 		return -ENOMEM;
481 
482 	if (args->v0.token != ~0ULL) {
483 		if (!(chan = nouveau_abi16_chan(abi16, args->v0.token)))
484 			return -EINVAL;
485 		args->v0.object = nvif_handle(&chan->chan->user);
486 		args->v0.owner  = NVIF_IOCTL_V0_OWNER_ANY;
487 		return 0;
488 	}
489 
490 	args->v0.object = nvif_handle(&abi16->device.object);
491 	args->v0.owner  = NVIF_IOCTL_V0_OWNER_ANY;
492 	return 0;
493 }
494 
495 int
496 nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS)
497 {
498 	struct drm_nouveau_channel_free *req = data;
499 	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
500 	struct nouveau_abi16_chan *chan;
501 
502 	if (unlikely(!abi16))
503 		return -ENOMEM;
504 
505 	chan = nouveau_abi16_chan(abi16, req->channel);
506 	if (!chan)
507 		return nouveau_abi16_put(abi16, -ENOENT);
508 	nouveau_abi16_chan_fini(abi16, chan);
509 	return nouveau_abi16_put(abi16, 0);
510 }
511 
512 int
513 nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
514 {
515 	struct drm_nouveau_grobj_alloc *init = data;
516 	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
517 	struct nouveau_abi16_chan *chan;
518 	struct nouveau_abi16_ntfy *ntfy;
519 	struct nvif_client *client;
520 	struct nvif_sclass *sclass;
521 	s32 oclass = 0;
522 	int ret, i;
523 
524 	if (unlikely(!abi16))
525 		return -ENOMEM;
526 
527 	if (init->handle == ~0)
528 		return nouveau_abi16_put(abi16, -EINVAL);
529 	client = abi16->device.object.client;
530 
531 	chan = nouveau_abi16_chan(abi16, init->channel);
532 	if (!chan)
533 		return nouveau_abi16_put(abi16, -ENOENT);
534 
535 	ret = nvif_object_sclass_get(&chan->chan->user, &sclass);
536 	if (ret < 0)
537 		return nouveau_abi16_put(abi16, ret);
538 
539 	if ((init->class & 0x00ff) == 0x006e) {
540 		/* nvsw: compatibility with older 0x*6e class identifier */
541 		for (i = 0; !oclass && i < ret; i++) {
542 			switch (sclass[i].oclass) {
543 			case NVIF_CLASS_SW_NV04:
544 			case NVIF_CLASS_SW_NV10:
545 			case NVIF_CLASS_SW_NV50:
546 			case NVIF_CLASS_SW_GF100:
547 				oclass = sclass[i].oclass;
548 				break;
549 			default:
550 				break;
551 			}
552 		}
553 	} else
554 	if ((init->class & 0x00ff) == 0x00b1) {
555 		/* msvld: compatibility with incorrect version exposure */
556 		for (i = 0; i < ret; i++) {
557 			if ((sclass[i].oclass & 0x00ff) == 0x00b1) {
558 				oclass = sclass[i].oclass;
559 				break;
560 			}
561 		}
562 	} else
563 	if ((init->class & 0x00ff) == 0x00b2) { /* mspdec */
564 		/* mspdec: compatibility with incorrect version exposure */
565 		for (i = 0; i < ret; i++) {
566 			if ((sclass[i].oclass & 0x00ff) == 0x00b2) {
567 				oclass = sclass[i].oclass;
568 				break;
569 			}
570 		}
571 	} else
572 	if ((init->class & 0x00ff) == 0x00b3) { /* msppp */
573 		/* msppp: compatibility with incorrect version exposure */
574 		for (i = 0; i < ret; i++) {
575 			if ((sclass[i].oclass & 0x00ff) == 0x00b3) {
576 				oclass = sclass[i].oclass;
577 				break;
578 			}
579 		}
580 	} else {
581 		oclass = init->class;
582 	}
583 
584 	nvif_object_sclass_put(&sclass);
585 	if (!oclass)
586 		return nouveau_abi16_put(abi16, -EINVAL);
587 
588 	ntfy = kzalloc(sizeof(*ntfy), GFP_KERNEL);
589 	if (!ntfy)
590 		return nouveau_abi16_put(abi16, -ENOMEM);
591 
592 	list_add(&ntfy->head, &chan->notifiers);
593 
594 	client->route = NVDRM_OBJECT_ABI16;
595 	ret = nvif_object_ctor(&chan->chan->user, "abi16EngObj", init->handle,
596 			       oclass, NULL, 0, &ntfy->object);
597 	client->route = NVDRM_OBJECT_NVIF;
598 
599 	if (ret)
600 		nouveau_abi16_ntfy_fini(chan, ntfy);
601 	return nouveau_abi16_put(abi16, ret);
602 }
603 
604 int
605 nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
606 {
607 	struct drm_nouveau_notifierobj_alloc *info = data;
608 	struct nouveau_drm *drm = nouveau_drm(dev);
609 	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
610 	struct nouveau_abi16_chan *chan;
611 	struct nouveau_abi16_ntfy *ntfy;
612 	struct nvif_device *device = &abi16->device;
613 	struct nvif_client *client;
614 	struct nv_dma_v0 args = {};
615 	int ret;
616 
617 	if (unlikely(!abi16))
618 		return -ENOMEM;
619 
620 	/* completely unnecessary for these chipsets... */
621 	if (unlikely(device->info.family >= NV_DEVICE_INFO_V0_FERMI))
622 		return nouveau_abi16_put(abi16, -EINVAL);
623 	client = abi16->device.object.client;
624 
625 	chan = nouveau_abi16_chan(abi16, info->channel);
626 	if (!chan)
627 		return nouveau_abi16_put(abi16, -ENOENT);
628 
629 	ntfy = kzalloc(sizeof(*ntfy), GFP_KERNEL);
630 	if (!ntfy)
631 		return nouveau_abi16_put(abi16, -ENOMEM);
632 
633 	list_add(&ntfy->head, &chan->notifiers);
634 
635 	ret = nvkm_mm_head(&chan->heap, 0, 1, info->size, info->size, 1,
636 			   &ntfy->node);
637 	if (ret)
638 		goto done;
639 
640 	args.start = ntfy->node->offset;
641 	args.limit = ntfy->node->offset + ntfy->node->length - 1;
642 	if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
643 		args.target = NV_DMA_V0_TARGET_VM;
644 		args.access = NV_DMA_V0_ACCESS_VM;
645 		args.start += chan->ntfy_vma->addr;
646 		args.limit += chan->ntfy_vma->addr;
647 	} else
648 	if (drm->agp.bridge) {
649 		args.target = NV_DMA_V0_TARGET_AGP;
650 		args.access = NV_DMA_V0_ACCESS_RDWR;
651 		args.start += drm->agp.base + chan->ntfy->offset;
652 		args.limit += drm->agp.base + chan->ntfy->offset;
653 	} else {
654 		args.target = NV_DMA_V0_TARGET_VM;
655 		args.access = NV_DMA_V0_ACCESS_RDWR;
656 		args.start += chan->ntfy->offset;
657 		args.limit += chan->ntfy->offset;
658 	}
659 
660 	client->route = NVDRM_OBJECT_ABI16;
661 	ret = nvif_object_ctor(&chan->chan->user, "abi16Ntfy", info->handle,
662 			       NV_DMA_IN_MEMORY, &args, sizeof(args),
663 			       &ntfy->object);
664 	client->route = NVDRM_OBJECT_NVIF;
665 	if (ret)
666 		goto done;
667 
668 	info->offset = ntfy->node->offset;
669 done:
670 	if (ret)
671 		nouveau_abi16_ntfy_fini(chan, ntfy);
672 	return nouveau_abi16_put(abi16, ret);
673 }
674 
675 int
676 nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS)
677 {
678 	struct drm_nouveau_gpuobj_free *fini = data;
679 	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
680 	struct nouveau_abi16_chan *chan;
681 	struct nouveau_abi16_ntfy *ntfy;
682 	int ret = -ENOENT;
683 
684 	if (unlikely(!abi16))
685 		return -ENOMEM;
686 
687 	chan = nouveau_abi16_chan(abi16, fini->channel);
688 	if (!chan)
689 		return nouveau_abi16_put(abi16, -EINVAL);
690 
691 	/* synchronize with the user channel and destroy the gpu object */
692 	nouveau_channel_idle(chan->chan);
693 
694 	list_for_each_entry(ntfy, &chan->notifiers, head) {
695 		if (ntfy->object.handle == fini->handle) {
696 			nouveau_abi16_ntfy_fini(chan, ntfy);
697 			ret = 0;
698 			break;
699 		}
700 	}
701 
702 	return nouveau_abi16_put(abi16, ret);
703 }
704