xref: /linux/drivers/gpu/drm/nouveau/nouveau_abi16.c (revision 4b132aacb0768ac1e652cf517097ea6f237214b9)
1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <nvif/client.h>
25 #include <nvif/driver.h>
26 #include <nvif/fifo.h>
27 #include <nvif/ioctl.h>
28 #include <nvif/class.h>
29 #include <nvif/cl0002.h>
30 #include <nvif/unpack.h>
31 
32 #include "nouveau_drv.h"
33 #include "nouveau_dma.h"
34 #include "nouveau_exec.h"
35 #include "nouveau_gem.h"
36 #include "nouveau_chan.h"
37 #include "nouveau_abi16.h"
38 #include "nouveau_vmm.h"
39 #include "nouveau_sched.h"
40 
41 static struct nouveau_abi16 *
42 nouveau_abi16(struct drm_file *file_priv)
43 {
44 	struct nouveau_cli *cli = nouveau_cli(file_priv);
45 	if (!cli->abi16) {
46 		struct nouveau_abi16 *abi16;
47 		cli->abi16 = abi16 = kzalloc(sizeof(*abi16), GFP_KERNEL);
48 		if (cli->abi16) {
49 			struct nv_device_v0 args = {
50 				.device = ~0ULL,
51 			};
52 
53 			INIT_LIST_HEAD(&abi16->channels);
54 
55 			/* allocate device object targeting client's default
56 			 * device (ie. the one that belongs to the fd it
57 			 * opened)
58 			 */
59 			if (nvif_device_ctor(&cli->base.object, "abi16Device",
60 					     0, NV_DEVICE, &args, sizeof(args),
61 					     &abi16->device) == 0)
62 				return cli->abi16;
63 
64 			kfree(cli->abi16);
65 			cli->abi16 = NULL;
66 		}
67 	}
68 	return cli->abi16;
69 }
70 
71 struct nouveau_abi16 *
72 nouveau_abi16_get(struct drm_file *file_priv)
73 {
74 	struct nouveau_cli *cli = nouveau_cli(file_priv);
75 	mutex_lock(&cli->mutex);
76 	if (nouveau_abi16(file_priv))
77 		return cli->abi16;
78 	mutex_unlock(&cli->mutex);
79 	return NULL;
80 }
81 
82 int
83 nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret)
84 {
85 	struct nouveau_cli *cli = (void *)abi16->device.object.client;
86 	mutex_unlock(&cli->mutex);
87 	return ret;
88 }
89 
90 s32
91 nouveau_abi16_swclass(struct nouveau_drm *drm)
92 {
93 	switch (drm->client.device.info.family) {
94 	case NV_DEVICE_INFO_V0_TNT:
95 		return NVIF_CLASS_SW_NV04;
96 	case NV_DEVICE_INFO_V0_CELSIUS:
97 	case NV_DEVICE_INFO_V0_KELVIN:
98 	case NV_DEVICE_INFO_V0_RANKINE:
99 	case NV_DEVICE_INFO_V0_CURIE:
100 		return NVIF_CLASS_SW_NV10;
101 	case NV_DEVICE_INFO_V0_TESLA:
102 		return NVIF_CLASS_SW_NV50;
103 	case NV_DEVICE_INFO_V0_FERMI:
104 	case NV_DEVICE_INFO_V0_KEPLER:
105 	case NV_DEVICE_INFO_V0_MAXWELL:
106 	case NV_DEVICE_INFO_V0_PASCAL:
107 	case NV_DEVICE_INFO_V0_VOLTA:
108 		return NVIF_CLASS_SW_GF100;
109 	}
110 
111 	return 0x0000;
112 }
113 
114 static void
115 nouveau_abi16_ntfy_fini(struct nouveau_abi16_chan *chan,
116 			struct nouveau_abi16_ntfy *ntfy)
117 {
118 	nvif_object_dtor(&ntfy->object);
119 	nvkm_mm_free(&chan->heap, &ntfy->node);
120 	list_del(&ntfy->head);
121 	kfree(ntfy);
122 }
123 
124 static void
125 nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
126 			struct nouveau_abi16_chan *chan)
127 {
128 	struct nouveau_abi16_ntfy *ntfy, *temp;
129 
130 	/* Cancel all jobs from the entity's queue. */
131 	if (chan->sched)
132 		drm_sched_entity_fini(&chan->sched->entity);
133 
134 	if (chan->chan)
135 		nouveau_channel_idle(chan->chan);
136 
137 	if (chan->sched)
138 		nouveau_sched_destroy(&chan->sched);
139 
140 	/* cleanup notifier state */
141 	list_for_each_entry_safe(ntfy, temp, &chan->notifiers, head) {
142 		nouveau_abi16_ntfy_fini(chan, ntfy);
143 	}
144 
145 	if (chan->ntfy) {
146 		nouveau_vma_del(&chan->ntfy_vma);
147 		nouveau_bo_unpin(chan->ntfy);
148 		drm_gem_object_put(&chan->ntfy->bo.base);
149 	}
150 
151 	if (chan->heap.block_size)
152 		nvkm_mm_fini(&chan->heap);
153 
154 	/* destroy channel object, all children will be killed too */
155 	if (chan->chan) {
156 		nvif_object_dtor(&chan->ce);
157 		nouveau_channel_del(&chan->chan);
158 	}
159 
160 	list_del(&chan->head);
161 	kfree(chan);
162 }
163 
164 void
165 nouveau_abi16_fini(struct nouveau_abi16 *abi16)
166 {
167 	struct nouveau_cli *cli = (void *)abi16->device.object.client;
168 	struct nouveau_abi16_chan *chan, *temp;
169 
170 	/* cleanup channels */
171 	list_for_each_entry_safe(chan, temp, &abi16->channels, head) {
172 		nouveau_abi16_chan_fini(abi16, chan);
173 	}
174 
175 	/* destroy the device object */
176 	nvif_device_dtor(&abi16->device);
177 
178 	kfree(cli->abi16);
179 	cli->abi16 = NULL;
180 }
181 
182 static inline int
183 getparam_dma_ib_max(struct nvif_device *device)
184 {
185 	const struct nvif_mclass dmas[] = {
186 		{ NV03_CHANNEL_DMA, 0 },
187 		{ NV10_CHANNEL_DMA, 0 },
188 		{ NV17_CHANNEL_DMA, 0 },
189 		{ NV40_CHANNEL_DMA, 0 },
190 		{}
191 	};
192 
193 	return nvif_mclass(&device->object, dmas) < 0 ? NV50_DMA_IB_MAX : 0;
194 }
195 
196 int
197 nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
198 {
199 	struct nouveau_cli *cli = nouveau_cli(file_priv);
200 	struct nouveau_drm *drm = nouveau_drm(dev);
201 	struct nvif_device *device = &drm->client.device;
202 	struct nvkm_device *nvkm_device = nvxx_device(&drm->client.device);
203 	struct nvkm_gr *gr = nvxx_gr(device);
204 	struct drm_nouveau_getparam *getparam = data;
205 	struct pci_dev *pdev = to_pci_dev(dev->dev);
206 
207 	switch (getparam->param) {
208 	case NOUVEAU_GETPARAM_CHIPSET_ID:
209 		getparam->value = device->info.chipset;
210 		break;
211 	case NOUVEAU_GETPARAM_PCI_VENDOR:
212 		if (device->info.platform != NV_DEVICE_INFO_V0_SOC)
213 			getparam->value = pdev->vendor;
214 		else
215 			getparam->value = 0;
216 		break;
217 	case NOUVEAU_GETPARAM_PCI_DEVICE:
218 		if (device->info.platform != NV_DEVICE_INFO_V0_SOC)
219 			getparam->value = pdev->device;
220 		else
221 			getparam->value = 0;
222 		break;
223 	case NOUVEAU_GETPARAM_BUS_TYPE:
224 		switch (device->info.platform) {
225 		case NV_DEVICE_INFO_V0_AGP : getparam->value = 0; break;
226 		case NV_DEVICE_INFO_V0_PCI : getparam->value = 1; break;
227 		case NV_DEVICE_INFO_V0_PCIE: getparam->value = 2; break;
228 		case NV_DEVICE_INFO_V0_SOC : getparam->value = 3; break;
229 		case NV_DEVICE_INFO_V0_IGP :
230 			if (!pci_is_pcie(pdev))
231 				getparam->value = 1;
232 			else
233 				getparam->value = 2;
234 			break;
235 		default:
236 			WARN_ON(1);
237 			break;
238 		}
239 		break;
240 	case NOUVEAU_GETPARAM_FB_SIZE:
241 		getparam->value = drm->gem.vram_available;
242 		break;
243 	case NOUVEAU_GETPARAM_AGP_SIZE:
244 		getparam->value = drm->gem.gart_available;
245 		break;
246 	case NOUVEAU_GETPARAM_VM_VRAM_BASE:
247 		getparam->value = 0; /* deprecated */
248 		break;
249 	case NOUVEAU_GETPARAM_PTIMER_TIME:
250 		getparam->value = nvif_device_time(device);
251 		break;
252 	case NOUVEAU_GETPARAM_HAS_BO_USAGE:
253 		getparam->value = 1;
254 		break;
255 	case NOUVEAU_GETPARAM_HAS_PAGEFLIP:
256 		getparam->value = 1;
257 		break;
258 	case NOUVEAU_GETPARAM_GRAPH_UNITS:
259 		getparam->value = nvkm_gr_units(gr);
260 		break;
261 	case NOUVEAU_GETPARAM_EXEC_PUSH_MAX: {
262 		int ib_max = getparam_dma_ib_max(device);
263 
264 		getparam->value = nouveau_exec_push_max_from_ib_max(ib_max);
265 		break;
266 	}
267 	case NOUVEAU_GETPARAM_VRAM_BAR_SIZE:
268 		getparam->value = nvkm_device->func->resource_size(nvkm_device, 1);
269 		break;
270 	case NOUVEAU_GETPARAM_VRAM_USED: {
271 		struct ttm_resource_manager *vram_mgr = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM);
272 		getparam->value = (u64)ttm_resource_manager_usage(vram_mgr);
273 		break;
274 	}
275 	case NOUVEAU_GETPARAM_HAS_VMA_TILEMODE:
276 		getparam->value = 1;
277 		break;
278 	default:
279 		NV_PRINTK(dbg, cli, "unknown parameter %lld\n", getparam->param);
280 		return -EINVAL;
281 	}
282 
283 	return 0;
284 }
285 
286 int
287 nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
288 {
289 	struct drm_nouveau_channel_alloc *init = data;
290 	struct nouveau_cli *cli = nouveau_cli(file_priv);
291 	struct nouveau_drm *drm = nouveau_drm(dev);
292 	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
293 	struct nouveau_abi16_chan *chan;
294 	struct nvif_device *device;
295 	u64 engine, runm;
296 	int ret;
297 
298 	if (unlikely(!abi16))
299 		return -ENOMEM;
300 
301 	if (!drm->channel)
302 		return nouveau_abi16_put(abi16, -ENODEV);
303 
304 	/* If uvmm wasn't initialized until now disable it completely to prevent
305 	 * userspace from mixing up UAPIs.
306 	 *
307 	 * The client lock is already acquired by nouveau_abi16_get().
308 	 */
309 	__nouveau_cli_disable_uvmm_noinit(cli);
310 
311 	device = &abi16->device;
312 	engine = NV_DEVICE_HOST_RUNLIST_ENGINES_GR;
313 
314 	/* hack to allow channel engine type specification on kepler */
315 	if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
316 		if (init->fb_ctxdma_handle == ~0) {
317 			switch (init->tt_ctxdma_handle) {
318 			case NOUVEAU_FIFO_ENGINE_GR:
319 				engine = NV_DEVICE_HOST_RUNLIST_ENGINES_GR;
320 				break;
321 			case NOUVEAU_FIFO_ENGINE_VP:
322 				engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSPDEC;
323 				break;
324 			case NOUVEAU_FIFO_ENGINE_PPP:
325 				engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSPPP;
326 				break;
327 			case NOUVEAU_FIFO_ENGINE_BSP:
328 				engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSVLD;
329 				break;
330 			case NOUVEAU_FIFO_ENGINE_CE:
331 				engine = NV_DEVICE_HOST_RUNLIST_ENGINES_CE;
332 				break;
333 			default:
334 				return nouveau_abi16_put(abi16, -ENOSYS);
335 			}
336 
337 			init->fb_ctxdma_handle = 0;
338 			init->tt_ctxdma_handle = 0;
339 		}
340 	}
341 
342 	if (engine != NV_DEVICE_HOST_RUNLIST_ENGINES_CE)
343 		runm = nvif_fifo_runlist(device, engine);
344 	else
345 		runm = nvif_fifo_runlist_ce(device);
346 
347 	if (!runm || init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
348 		return nouveau_abi16_put(abi16, -EINVAL);
349 
350 	/* allocate "abi16 channel" data and make up a handle for it */
351 	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
352 	if (!chan)
353 		return nouveau_abi16_put(abi16, -ENOMEM);
354 
355 	INIT_LIST_HEAD(&chan->notifiers);
356 	list_add(&chan->head, &abi16->channels);
357 
358 	/* create channel object and initialise dma and fence management */
359 	ret = nouveau_channel_new(drm, device, false, runm, init->fb_ctxdma_handle,
360 				  init->tt_ctxdma_handle, &chan->chan);
361 	if (ret)
362 		goto done;
363 
364 	/* If we're not using the VM_BIND uAPI, we don't need a scheduler.
365 	 *
366 	 * The client lock is already acquired by nouveau_abi16_get().
367 	 */
368 	if (nouveau_cli_uvmm(cli)) {
369 		ret = nouveau_sched_create(&chan->sched, drm, drm->sched_wq,
370 					   chan->chan->dma.ib_max);
371 		if (ret)
372 			goto done;
373 	}
374 
375 	init->channel = chan->chan->chid;
376 
377 	if (device->info.family >= NV_DEVICE_INFO_V0_TESLA)
378 		init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
379 					NOUVEAU_GEM_DOMAIN_GART;
380 	else
381 	if (chan->chan->push.buffer->bo.resource->mem_type == TTM_PL_VRAM)
382 		init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
383 	else
384 		init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
385 
386 	if (device->info.family < NV_DEVICE_INFO_V0_CELSIUS) {
387 		init->subchan[0].handle = 0x00000000;
388 		init->subchan[0].grclass = 0x0000;
389 		init->subchan[1].handle = chan->chan->nvsw.handle;
390 		init->subchan[1].grclass = 0x506e;
391 		init->nr_subchan = 2;
392 	}
393 
394 	/* Workaround "nvc0" gallium driver using classes it doesn't allocate on
395 	 * Kepler and above.  NVKM no longer always sets CE_CTX_VALID as part of
396 	 * channel init, now we know what that stuff actually is.
397 	 *
398 	 * Doesn't matter for Kepler/Pascal, CE context stored in NV_RAMIN.
399 	 *
400 	 * Userspace was fixed prior to adding Ampere support.
401 	 */
402 	switch (device->info.family) {
403 	case NV_DEVICE_INFO_V0_VOLTA:
404 		ret = nvif_object_ctor(&chan->chan->user, "abi16CeWar", 0, VOLTA_DMA_COPY_A,
405 				       NULL, 0, &chan->ce);
406 		if (ret)
407 			goto done;
408 		break;
409 	case NV_DEVICE_INFO_V0_TURING:
410 		ret = nvif_object_ctor(&chan->chan->user, "abi16CeWar", 0, TURING_DMA_COPY_A,
411 				       NULL, 0, &chan->ce);
412 		if (ret)
413 			goto done;
414 		break;
415 	default:
416 		break;
417 	}
418 
419 	/* Named memory object area */
420 	ret = nouveau_gem_new(cli, PAGE_SIZE, 0, NOUVEAU_GEM_DOMAIN_GART,
421 			      0, 0, &chan->ntfy);
422 	if (ret == 0)
423 		ret = nouveau_bo_pin(chan->ntfy, NOUVEAU_GEM_DOMAIN_GART,
424 				     false);
425 	if (ret)
426 		goto done;
427 
428 	if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
429 		ret = nouveau_vma_new(chan->ntfy, chan->chan->vmm,
430 				      &chan->ntfy_vma);
431 		if (ret)
432 			goto done;
433 	}
434 
435 	ret = drm_gem_handle_create(file_priv, &chan->ntfy->bo.base,
436 				    &init->notifier_handle);
437 	if (ret)
438 		goto done;
439 
440 	ret = nvkm_mm_init(&chan->heap, 0, 0, PAGE_SIZE, 1);
441 done:
442 	if (ret)
443 		nouveau_abi16_chan_fini(abi16, chan);
444 	return nouveau_abi16_put(abi16, ret);
445 }
446 
447 static struct nouveau_abi16_chan *
448 nouveau_abi16_chan(struct nouveau_abi16 *abi16, int channel)
449 {
450 	struct nouveau_abi16_chan *chan;
451 
452 	list_for_each_entry(chan, &abi16->channels, head) {
453 		if (chan->chan->chid == channel)
454 			return chan;
455 	}
456 
457 	return NULL;
458 }
459 
460 int
461 nouveau_abi16_usif(struct drm_file *file_priv, void *data, u32 size)
462 {
463 	union {
464 		struct nvif_ioctl_v0 v0;
465 	} *args = data;
466 	struct nouveau_abi16_chan *chan;
467 	struct nouveau_abi16 *abi16;
468 	int ret = -ENOSYS;
469 
470 	if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
471 		switch (args->v0.type) {
472 		case NVIF_IOCTL_V0_NEW:
473 		case NVIF_IOCTL_V0_MTHD:
474 		case NVIF_IOCTL_V0_SCLASS:
475 			break;
476 		default:
477 			return -EACCES;
478 		}
479 	} else
480 		return ret;
481 
482 	if (!(abi16 = nouveau_abi16(file_priv)))
483 		return -ENOMEM;
484 
485 	if (args->v0.token != ~0ULL) {
486 		if (!(chan = nouveau_abi16_chan(abi16, args->v0.token)))
487 			return -EINVAL;
488 		args->v0.object = nvif_handle(&chan->chan->user);
489 		args->v0.owner  = NVIF_IOCTL_V0_OWNER_ANY;
490 		return 0;
491 	}
492 
493 	args->v0.object = nvif_handle(&abi16->device.object);
494 	args->v0.owner  = NVIF_IOCTL_V0_OWNER_ANY;
495 	return 0;
496 }
497 
498 int
499 nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS)
500 {
501 	struct drm_nouveau_channel_free *req = data;
502 	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
503 	struct nouveau_abi16_chan *chan;
504 
505 	if (unlikely(!abi16))
506 		return -ENOMEM;
507 
508 	chan = nouveau_abi16_chan(abi16, req->channel);
509 	if (!chan)
510 		return nouveau_abi16_put(abi16, -ENOENT);
511 	nouveau_abi16_chan_fini(abi16, chan);
512 	return nouveau_abi16_put(abi16, 0);
513 }
514 
515 int
516 nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
517 {
518 	struct drm_nouveau_grobj_alloc *init = data;
519 	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
520 	struct nouveau_abi16_chan *chan;
521 	struct nouveau_abi16_ntfy *ntfy;
522 	struct nvif_client *client;
523 	struct nvif_sclass *sclass;
524 	s32 oclass = 0;
525 	int ret, i;
526 
527 	if (unlikely(!abi16))
528 		return -ENOMEM;
529 
530 	if (init->handle == ~0)
531 		return nouveau_abi16_put(abi16, -EINVAL);
532 	client = abi16->device.object.client;
533 
534 	chan = nouveau_abi16_chan(abi16, init->channel);
535 	if (!chan)
536 		return nouveau_abi16_put(abi16, -ENOENT);
537 
538 	ret = nvif_object_sclass_get(&chan->chan->user, &sclass);
539 	if (ret < 0)
540 		return nouveau_abi16_put(abi16, ret);
541 
542 	if ((init->class & 0x00ff) == 0x006e) {
543 		/* nvsw: compatibility with older 0x*6e class identifier */
544 		for (i = 0; !oclass && i < ret; i++) {
545 			switch (sclass[i].oclass) {
546 			case NVIF_CLASS_SW_NV04:
547 			case NVIF_CLASS_SW_NV10:
548 			case NVIF_CLASS_SW_NV50:
549 			case NVIF_CLASS_SW_GF100:
550 				oclass = sclass[i].oclass;
551 				break;
552 			default:
553 				break;
554 			}
555 		}
556 	} else
557 	if ((init->class & 0x00ff) == 0x00b1) {
558 		/* msvld: compatibility with incorrect version exposure */
559 		for (i = 0; i < ret; i++) {
560 			if ((sclass[i].oclass & 0x00ff) == 0x00b1) {
561 				oclass = sclass[i].oclass;
562 				break;
563 			}
564 		}
565 	} else
566 	if ((init->class & 0x00ff) == 0x00b2) { /* mspdec */
567 		/* mspdec: compatibility with incorrect version exposure */
568 		for (i = 0; i < ret; i++) {
569 			if ((sclass[i].oclass & 0x00ff) == 0x00b2) {
570 				oclass = sclass[i].oclass;
571 				break;
572 			}
573 		}
574 	} else
575 	if ((init->class & 0x00ff) == 0x00b3) { /* msppp */
576 		/* msppp: compatibility with incorrect version exposure */
577 		for (i = 0; i < ret; i++) {
578 			if ((sclass[i].oclass & 0x00ff) == 0x00b3) {
579 				oclass = sclass[i].oclass;
580 				break;
581 			}
582 		}
583 	} else {
584 		oclass = init->class;
585 	}
586 
587 	nvif_object_sclass_put(&sclass);
588 	if (!oclass)
589 		return nouveau_abi16_put(abi16, -EINVAL);
590 
591 	ntfy = kzalloc(sizeof(*ntfy), GFP_KERNEL);
592 	if (!ntfy)
593 		return nouveau_abi16_put(abi16, -ENOMEM);
594 
595 	list_add(&ntfy->head, &chan->notifiers);
596 
597 	client->route = NVDRM_OBJECT_ABI16;
598 	ret = nvif_object_ctor(&chan->chan->user, "abi16EngObj", init->handle,
599 			       oclass, NULL, 0, &ntfy->object);
600 	client->route = NVDRM_OBJECT_NVIF;
601 
602 	if (ret)
603 		nouveau_abi16_ntfy_fini(chan, ntfy);
604 	return nouveau_abi16_put(abi16, ret);
605 }
606 
607 int
608 nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
609 {
610 	struct drm_nouveau_notifierobj_alloc *info = data;
611 	struct nouveau_drm *drm = nouveau_drm(dev);
612 	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
613 	struct nouveau_abi16_chan *chan;
614 	struct nouveau_abi16_ntfy *ntfy;
615 	struct nvif_device *device = &abi16->device;
616 	struct nvif_client *client;
617 	struct nv_dma_v0 args = {};
618 	int ret;
619 
620 	if (unlikely(!abi16))
621 		return -ENOMEM;
622 
623 	/* completely unnecessary for these chipsets... */
624 	if (unlikely(device->info.family >= NV_DEVICE_INFO_V0_FERMI))
625 		return nouveau_abi16_put(abi16, -EINVAL);
626 	client = abi16->device.object.client;
627 
628 	chan = nouveau_abi16_chan(abi16, info->channel);
629 	if (!chan)
630 		return nouveau_abi16_put(abi16, -ENOENT);
631 
632 	ntfy = kzalloc(sizeof(*ntfy), GFP_KERNEL);
633 	if (!ntfy)
634 		return nouveau_abi16_put(abi16, -ENOMEM);
635 
636 	list_add(&ntfy->head, &chan->notifiers);
637 
638 	ret = nvkm_mm_head(&chan->heap, 0, 1, info->size, info->size, 1,
639 			   &ntfy->node);
640 	if (ret)
641 		goto done;
642 
643 	args.start = ntfy->node->offset;
644 	args.limit = ntfy->node->offset + ntfy->node->length - 1;
645 	if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
646 		args.target = NV_DMA_V0_TARGET_VM;
647 		args.access = NV_DMA_V0_ACCESS_VM;
648 		args.start += chan->ntfy_vma->addr;
649 		args.limit += chan->ntfy_vma->addr;
650 	} else
651 	if (drm->agp.bridge) {
652 		args.target = NV_DMA_V0_TARGET_AGP;
653 		args.access = NV_DMA_V0_ACCESS_RDWR;
654 		args.start += drm->agp.base + chan->ntfy->offset;
655 		args.limit += drm->agp.base + chan->ntfy->offset;
656 	} else {
657 		args.target = NV_DMA_V0_TARGET_VM;
658 		args.access = NV_DMA_V0_ACCESS_RDWR;
659 		args.start += chan->ntfy->offset;
660 		args.limit += chan->ntfy->offset;
661 	}
662 
663 	client->route = NVDRM_OBJECT_ABI16;
664 	ret = nvif_object_ctor(&chan->chan->user, "abi16Ntfy", info->handle,
665 			       NV_DMA_IN_MEMORY, &args, sizeof(args),
666 			       &ntfy->object);
667 	client->route = NVDRM_OBJECT_NVIF;
668 	if (ret)
669 		goto done;
670 
671 	info->offset = ntfy->node->offset;
672 done:
673 	if (ret)
674 		nouveau_abi16_ntfy_fini(chan, ntfy);
675 	return nouveau_abi16_put(abi16, ret);
676 }
677 
678 int
679 nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS)
680 {
681 	struct drm_nouveau_gpuobj_free *fini = data;
682 	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
683 	struct nouveau_abi16_chan *chan;
684 	struct nouveau_abi16_ntfy *ntfy;
685 	int ret = -ENOENT;
686 
687 	if (unlikely(!abi16))
688 		return -ENOMEM;
689 
690 	chan = nouveau_abi16_chan(abi16, fini->channel);
691 	if (!chan)
692 		return nouveau_abi16_put(abi16, -EINVAL);
693 
694 	/* synchronize with the user channel and destroy the gpu object */
695 	nouveau_channel_idle(chan->chan);
696 
697 	list_for_each_entry(ntfy, &chan->notifiers, head) {
698 		if (ntfy->object.handle == fini->handle) {
699 			nouveau_abi16_ntfy_fini(chan, ntfy);
700 			ret = 0;
701 			break;
702 		}
703 	}
704 
705 	return nouveau_abi16_put(abi16, ret);
706 }
707