1 /*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <nvif/client.h>
25 #include <nvif/driver.h>
26 #include <nvif/fifo.h>
27 #include <nvif/ioctl.h>
28 #include <nvif/class.h>
29 #include <nvif/cl0002.h>
30 #include <nvif/unpack.h>
31
32 #include "nouveau_drv.h"
33 #include "nouveau_dma.h"
34 #include "nouveau_exec.h"
35 #include "nouveau_gem.h"
36 #include "nouveau_chan.h"
37 #include "nouveau_abi16.h"
38 #include "nouveau_vmm.h"
39 #include "nouveau_sched.h"
40
41 static struct nouveau_abi16 *
nouveau_abi16(struct drm_file * file_priv)42 nouveau_abi16(struct drm_file *file_priv)
43 {
44 struct nouveau_cli *cli = nouveau_cli(file_priv);
45 if (!cli->abi16) {
46 struct nouveau_abi16 *abi16;
47 cli->abi16 = abi16 = kzalloc(sizeof(*abi16), GFP_KERNEL);
48 if (cli->abi16) {
49 abi16->cli = cli;
50 INIT_LIST_HEAD(&abi16->channels);
51 INIT_LIST_HEAD(&abi16->objects);
52 }
53 }
54 return cli->abi16;
55 }
56
57 struct nouveau_abi16 *
nouveau_abi16_get(struct drm_file * file_priv)58 nouveau_abi16_get(struct drm_file *file_priv)
59 {
60 struct nouveau_cli *cli = nouveau_cli(file_priv);
61 mutex_lock(&cli->mutex);
62 if (nouveau_abi16(file_priv))
63 return cli->abi16;
64 mutex_unlock(&cli->mutex);
65 return NULL;
66 }
67
68 int
nouveau_abi16_put(struct nouveau_abi16 * abi16,int ret)69 nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret)
70 {
71 struct nouveau_cli *cli = abi16->cli;
72 mutex_unlock(&cli->mutex);
73 return ret;
74 }
75
76 /* Tracks objects created via the DRM_NOUVEAU_NVIF ioctl.
77 *
78 * The only two types of object that userspace ever allocated via this
79 * interface are 'device', in order to retrieve basic device info, and
80 * 'engine objects', which instantiate HW classes on a channel.
81 *
82 * The remainder of what used to be available via DRM_NOUVEAU_NVIF has
83 * been removed, but these object types need to be tracked to maintain
84 * compatibility with userspace.
85 */
86 struct nouveau_abi16_obj {
87 enum nouveau_abi16_obj_type {
88 DEVICE,
89 ENGOBJ,
90 } type;
91 u64 object;
92
93 struct nvif_object engobj;
94
95 struct list_head head; /* protected by nouveau_abi16.cli.mutex */
96 };
97
98 static struct nouveau_abi16_obj *
nouveau_abi16_obj_find(struct nouveau_abi16 * abi16,u64 object)99 nouveau_abi16_obj_find(struct nouveau_abi16 *abi16, u64 object)
100 {
101 struct nouveau_abi16_obj *obj;
102
103 list_for_each_entry(obj, &abi16->objects, head) {
104 if (obj->object == object)
105 return obj;
106 }
107
108 return NULL;
109 }
110
111 static void
nouveau_abi16_obj_del(struct nouveau_abi16_obj * obj)112 nouveau_abi16_obj_del(struct nouveau_abi16_obj *obj)
113 {
114 list_del(&obj->head);
115 kfree(obj);
116 }
117
118 static struct nouveau_abi16_obj *
nouveau_abi16_obj_new(struct nouveau_abi16 * abi16,enum nouveau_abi16_obj_type type,u64 object)119 nouveau_abi16_obj_new(struct nouveau_abi16 *abi16, enum nouveau_abi16_obj_type type, u64 object)
120 {
121 struct nouveau_abi16_obj *obj;
122
123 obj = nouveau_abi16_obj_find(abi16, object);
124 if (obj)
125 return ERR_PTR(-EEXIST);
126
127 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
128 if (!obj)
129 return ERR_PTR(-ENOMEM);
130
131 obj->type = type;
132 obj->object = object;
133 list_add_tail(&obj->head, &abi16->objects);
134 return obj;
135 }
136
137 s32
nouveau_abi16_swclass(struct nouveau_drm * drm)138 nouveau_abi16_swclass(struct nouveau_drm *drm)
139 {
140 switch (drm->client.device.info.family) {
141 case NV_DEVICE_INFO_V0_TNT:
142 return NVIF_CLASS_SW_NV04;
143 case NV_DEVICE_INFO_V0_CELSIUS:
144 case NV_DEVICE_INFO_V0_KELVIN:
145 case NV_DEVICE_INFO_V0_RANKINE:
146 case NV_DEVICE_INFO_V0_CURIE:
147 return NVIF_CLASS_SW_NV10;
148 case NV_DEVICE_INFO_V0_TESLA:
149 return NVIF_CLASS_SW_NV50;
150 case NV_DEVICE_INFO_V0_FERMI:
151 case NV_DEVICE_INFO_V0_KEPLER:
152 case NV_DEVICE_INFO_V0_MAXWELL:
153 case NV_DEVICE_INFO_V0_PASCAL:
154 case NV_DEVICE_INFO_V0_VOLTA:
155 return NVIF_CLASS_SW_GF100;
156 }
157
158 return 0x0000;
159 }
160
161 static void
nouveau_abi16_ntfy_fini(struct nouveau_abi16_chan * chan,struct nouveau_abi16_ntfy * ntfy)162 nouveau_abi16_ntfy_fini(struct nouveau_abi16_chan *chan,
163 struct nouveau_abi16_ntfy *ntfy)
164 {
165 nvif_object_dtor(&ntfy->object);
166 nvkm_mm_free(&chan->heap, &ntfy->node);
167 list_del(&ntfy->head);
168 kfree(ntfy);
169 }
170
171 static void
nouveau_abi16_chan_fini(struct nouveau_abi16 * abi16,struct nouveau_abi16_chan * chan)172 nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
173 struct nouveau_abi16_chan *chan)
174 {
175 struct nouveau_abi16_ntfy *ntfy, *temp;
176
177 /* Cancel all jobs from the entity's queue. */
178 if (chan->sched)
179 drm_sched_entity_fini(&chan->sched->entity);
180
181 if (chan->chan)
182 nouveau_channel_idle(chan->chan);
183
184 if (chan->sched)
185 nouveau_sched_destroy(&chan->sched);
186
187 /* cleanup notifier state */
188 list_for_each_entry_safe(ntfy, temp, &chan->notifiers, head) {
189 nouveau_abi16_ntfy_fini(chan, ntfy);
190 }
191
192 if (chan->ntfy) {
193 nouveau_vma_del(&chan->ntfy_vma);
194 nouveau_bo_unpin(chan->ntfy);
195 drm_gem_object_put(&chan->ntfy->bo.base);
196 }
197
198 if (chan->heap.block_size)
199 nvkm_mm_fini(&chan->heap);
200
201 /* destroy channel object, all children will be killed too */
202 if (chan->chan) {
203 nvif_object_dtor(&chan->ce);
204 nouveau_channel_del(&chan->chan);
205 }
206
207 list_del(&chan->head);
208 kfree(chan);
209 }
210
211 void
nouveau_abi16_fini(struct nouveau_abi16 * abi16)212 nouveau_abi16_fini(struct nouveau_abi16 *abi16)
213 {
214 struct nouveau_cli *cli = abi16->cli;
215 struct nouveau_abi16_chan *chan, *temp;
216 struct nouveau_abi16_obj *obj, *tmp;
217
218 /* cleanup objects */
219 list_for_each_entry_safe(obj, tmp, &abi16->objects, head) {
220 nouveau_abi16_obj_del(obj);
221 }
222
223 /* cleanup channels */
224 list_for_each_entry_safe(chan, temp, &abi16->channels, head) {
225 nouveau_abi16_chan_fini(abi16, chan);
226 }
227
228 kfree(cli->abi16);
229 cli->abi16 = NULL;
230 }
231
232 static inline int
getparam_dma_ib_max(struct nvif_device * device)233 getparam_dma_ib_max(struct nvif_device *device)
234 {
235 const struct nvif_mclass dmas[] = {
236 { NV03_CHANNEL_DMA, 0 },
237 { NV10_CHANNEL_DMA, 0 },
238 { NV17_CHANNEL_DMA, 0 },
239 { NV40_CHANNEL_DMA, 0 },
240 {}
241 };
242
243 return nvif_mclass(&device->object, dmas) < 0 ? NV50_DMA_IB_MAX : 0;
244 }
245
246 int
nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)247 nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
248 {
249 struct nouveau_cli *cli = nouveau_cli(file_priv);
250 struct nouveau_drm *drm = nouveau_drm(dev);
251 struct nvif_device *device = &drm->client.device;
252 struct nvkm_device *nvkm_device = nvxx_device(drm);
253 struct nvkm_gr *gr = nvxx_gr(drm);
254 struct drm_nouveau_getparam *getparam = data;
255 struct pci_dev *pdev = to_pci_dev(dev->dev);
256
257 switch (getparam->param) {
258 case NOUVEAU_GETPARAM_CHIPSET_ID:
259 getparam->value = device->info.chipset;
260 break;
261 case NOUVEAU_GETPARAM_PCI_VENDOR:
262 if (device->info.platform != NV_DEVICE_INFO_V0_SOC)
263 getparam->value = pdev->vendor;
264 else
265 getparam->value = 0;
266 break;
267 case NOUVEAU_GETPARAM_PCI_DEVICE:
268 if (device->info.platform != NV_DEVICE_INFO_V0_SOC)
269 getparam->value = pdev->device;
270 else
271 getparam->value = 0;
272 break;
273 case NOUVEAU_GETPARAM_BUS_TYPE:
274 switch (device->info.platform) {
275 case NV_DEVICE_INFO_V0_AGP : getparam->value = 0; break;
276 case NV_DEVICE_INFO_V0_PCI : getparam->value = 1; break;
277 case NV_DEVICE_INFO_V0_PCIE: getparam->value = 2; break;
278 case NV_DEVICE_INFO_V0_SOC : getparam->value = 3; break;
279 case NV_DEVICE_INFO_V0_IGP :
280 if (!pci_is_pcie(pdev))
281 getparam->value = 1;
282 else
283 getparam->value = 2;
284 break;
285 default:
286 WARN_ON(1);
287 break;
288 }
289 break;
290 case NOUVEAU_GETPARAM_FB_SIZE:
291 getparam->value = drm->gem.vram_available;
292 break;
293 case NOUVEAU_GETPARAM_AGP_SIZE:
294 getparam->value = drm->gem.gart_available;
295 break;
296 case NOUVEAU_GETPARAM_VM_VRAM_BASE:
297 getparam->value = 0; /* deprecated */
298 break;
299 case NOUVEAU_GETPARAM_PTIMER_TIME:
300 getparam->value = nvif_device_time(device);
301 break;
302 case NOUVEAU_GETPARAM_HAS_BO_USAGE:
303 getparam->value = 1;
304 break;
305 case NOUVEAU_GETPARAM_HAS_PAGEFLIP:
306 getparam->value = 1;
307 break;
308 case NOUVEAU_GETPARAM_GRAPH_UNITS:
309 getparam->value = nvkm_gr_units(gr);
310 break;
311 case NOUVEAU_GETPARAM_EXEC_PUSH_MAX: {
312 int ib_max = getparam_dma_ib_max(device);
313
314 getparam->value = nouveau_exec_push_max_from_ib_max(ib_max);
315 break;
316 }
317 case NOUVEAU_GETPARAM_VRAM_BAR_SIZE:
318 getparam->value = nvkm_device->func->resource_size(nvkm_device, 1);
319 break;
320 case NOUVEAU_GETPARAM_VRAM_USED: {
321 struct ttm_resource_manager *vram_mgr = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM);
322 getparam->value = (u64)ttm_resource_manager_usage(vram_mgr);
323 break;
324 }
325 case NOUVEAU_GETPARAM_HAS_VMA_TILEMODE:
326 getparam->value = 1;
327 break;
328 default:
329 NV_PRINTK(dbg, cli, "unknown parameter %lld\n", getparam->param);
330 return -EINVAL;
331 }
332
333 return 0;
334 }
335
336 int
nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)337 nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
338 {
339 struct drm_nouveau_channel_alloc *init = data;
340 struct nouveau_cli *cli = nouveau_cli(file_priv);
341 struct nouveau_drm *drm = nouveau_drm(dev);
342 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
343 struct nouveau_abi16_chan *chan;
344 struct nvif_device *device = &cli->device;
345 u64 engine, runm;
346 int ret;
347
348 if (unlikely(!abi16))
349 return -ENOMEM;
350
351 if (!drm->channel)
352 return nouveau_abi16_put(abi16, -ENODEV);
353
354 /* If uvmm wasn't initialized until now disable it completely to prevent
355 * userspace from mixing up UAPIs.
356 *
357 * The client lock is already acquired by nouveau_abi16_get().
358 */
359 __nouveau_cli_disable_uvmm_noinit(cli);
360
361 engine = NV_DEVICE_HOST_RUNLIST_ENGINES_GR;
362
363 /* hack to allow channel engine type specification on kepler */
364 if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
365 if (init->fb_ctxdma_handle == ~0) {
366 switch (init->tt_ctxdma_handle) {
367 case NOUVEAU_FIFO_ENGINE_GR:
368 engine = NV_DEVICE_HOST_RUNLIST_ENGINES_GR;
369 break;
370 case NOUVEAU_FIFO_ENGINE_VP:
371 engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSPDEC;
372 break;
373 case NOUVEAU_FIFO_ENGINE_PPP:
374 engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSPPP;
375 break;
376 case NOUVEAU_FIFO_ENGINE_BSP:
377 engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSVLD;
378 break;
379 case NOUVEAU_FIFO_ENGINE_CE:
380 engine = NV_DEVICE_HOST_RUNLIST_ENGINES_CE;
381 break;
382 default:
383 return nouveau_abi16_put(abi16, -ENOSYS);
384 }
385
386 init->fb_ctxdma_handle = 0;
387 init->tt_ctxdma_handle = 0;
388 }
389 }
390
391 if (engine != NV_DEVICE_HOST_RUNLIST_ENGINES_CE)
392 runm = nvif_fifo_runlist(device, engine);
393 else
394 runm = nvif_fifo_runlist_ce(device);
395
396 if (!runm || init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
397 return nouveau_abi16_put(abi16, -EINVAL);
398
399 /* allocate "abi16 channel" data and make up a handle for it */
400 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
401 if (!chan)
402 return nouveau_abi16_put(abi16, -ENOMEM);
403
404 INIT_LIST_HEAD(&chan->notifiers);
405 list_add(&chan->head, &abi16->channels);
406
407 /* create channel object and initialise dma and fence management */
408 ret = nouveau_channel_new(cli, false, runm, init->fb_ctxdma_handle,
409 init->tt_ctxdma_handle, &chan->chan);
410 if (ret)
411 goto done;
412
413 /* If we're not using the VM_BIND uAPI, we don't need a scheduler.
414 *
415 * The client lock is already acquired by nouveau_abi16_get().
416 */
417 if (nouveau_cli_uvmm(cli)) {
418 ret = nouveau_sched_create(&chan->sched, drm, drm->sched_wq,
419 chan->chan->dma.ib_max);
420 if (ret)
421 goto done;
422 }
423
424 init->channel = chan->chan->chid;
425
426 if (device->info.family >= NV_DEVICE_INFO_V0_TESLA)
427 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
428 NOUVEAU_GEM_DOMAIN_GART;
429 else
430 if (chan->chan->push.buffer->bo.resource->mem_type == TTM_PL_VRAM)
431 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
432 else
433 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
434
435 if (device->info.family < NV_DEVICE_INFO_V0_CELSIUS) {
436 init->subchan[0].handle = 0x00000000;
437 init->subchan[0].grclass = 0x0000;
438 init->subchan[1].handle = chan->chan->nvsw.handle;
439 init->subchan[1].grclass = 0x506e;
440 init->nr_subchan = 2;
441 }
442
443 /* Workaround "nvc0" gallium driver using classes it doesn't allocate on
444 * Kepler and above. NVKM no longer always sets CE_CTX_VALID as part of
445 * channel init, now we know what that stuff actually is.
446 *
447 * Doesn't matter for Kepler/Pascal, CE context stored in NV_RAMIN.
448 *
449 * Userspace was fixed prior to adding Ampere support.
450 */
451 switch (device->info.family) {
452 case NV_DEVICE_INFO_V0_VOLTA:
453 ret = nvif_object_ctor(&chan->chan->user, "abi16CeWar", 0, VOLTA_DMA_COPY_A,
454 NULL, 0, &chan->ce);
455 if (ret)
456 goto done;
457 break;
458 case NV_DEVICE_INFO_V0_TURING:
459 ret = nvif_object_ctor(&chan->chan->user, "abi16CeWar", 0, TURING_DMA_COPY_A,
460 NULL, 0, &chan->ce);
461 if (ret)
462 goto done;
463 break;
464 default:
465 break;
466 }
467
468 /* Named memory object area */
469 ret = nouveau_gem_new(cli, PAGE_SIZE, 0, NOUVEAU_GEM_DOMAIN_GART,
470 0, 0, &chan->ntfy);
471 if (ret == 0)
472 ret = nouveau_bo_pin(chan->ntfy, NOUVEAU_GEM_DOMAIN_GART,
473 false);
474 if (ret)
475 goto done;
476
477 if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
478 ret = nouveau_vma_new(chan->ntfy, chan->chan->vmm,
479 &chan->ntfy_vma);
480 if (ret)
481 goto done;
482 }
483
484 ret = drm_gem_handle_create(file_priv, &chan->ntfy->bo.base,
485 &init->notifier_handle);
486 if (ret)
487 goto done;
488
489 ret = nvkm_mm_init(&chan->heap, 0, 0, PAGE_SIZE, 1);
490 done:
491 if (ret)
492 nouveau_abi16_chan_fini(abi16, chan);
493 return nouveau_abi16_put(abi16, ret);
494 }
495
496 static struct nouveau_abi16_chan *
nouveau_abi16_chan(struct nouveau_abi16 * abi16,int channel)497 nouveau_abi16_chan(struct nouveau_abi16 *abi16, int channel)
498 {
499 struct nouveau_abi16_chan *chan;
500
501 list_for_each_entry(chan, &abi16->channels, head) {
502 if (chan->chan->chid == channel)
503 return chan;
504 }
505
506 return NULL;
507 }
508
509 int
nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS)510 nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS)
511 {
512 struct drm_nouveau_channel_free *req = data;
513 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
514 struct nouveau_abi16_chan *chan;
515
516 if (unlikely(!abi16))
517 return -ENOMEM;
518
519 chan = nouveau_abi16_chan(abi16, req->channel);
520 if (!chan)
521 return nouveau_abi16_put(abi16, -ENOENT);
522 nouveau_abi16_chan_fini(abi16, chan);
523 return nouveau_abi16_put(abi16, 0);
524 }
525
526 int
nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)527 nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
528 {
529 struct drm_nouveau_grobj_alloc *init = data;
530 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
531 struct nouveau_abi16_chan *chan;
532 struct nouveau_abi16_ntfy *ntfy;
533 struct nvif_sclass *sclass;
534 s32 oclass = 0;
535 int ret, i;
536
537 if (unlikely(!abi16))
538 return -ENOMEM;
539
540 if (init->handle == ~0)
541 return nouveau_abi16_put(abi16, -EINVAL);
542
543 chan = nouveau_abi16_chan(abi16, init->channel);
544 if (!chan)
545 return nouveau_abi16_put(abi16, -ENOENT);
546
547 ret = nvif_object_sclass_get(&chan->chan->user, &sclass);
548 if (ret < 0)
549 return nouveau_abi16_put(abi16, ret);
550
551 if ((init->class & 0x00ff) == 0x006e) {
552 /* nvsw: compatibility with older 0x*6e class identifier */
553 for (i = 0; !oclass && i < ret; i++) {
554 switch (sclass[i].oclass) {
555 case NVIF_CLASS_SW_NV04:
556 case NVIF_CLASS_SW_NV10:
557 case NVIF_CLASS_SW_NV50:
558 case NVIF_CLASS_SW_GF100:
559 oclass = sclass[i].oclass;
560 break;
561 default:
562 break;
563 }
564 }
565 } else
566 if ((init->class & 0x00ff) == 0x00b1) {
567 /* msvld: compatibility with incorrect version exposure */
568 for (i = 0; i < ret; i++) {
569 if ((sclass[i].oclass & 0x00ff) == 0x00b1) {
570 oclass = sclass[i].oclass;
571 break;
572 }
573 }
574 } else
575 if ((init->class & 0x00ff) == 0x00b2) { /* mspdec */
576 /* mspdec: compatibility with incorrect version exposure */
577 for (i = 0; i < ret; i++) {
578 if ((sclass[i].oclass & 0x00ff) == 0x00b2) {
579 oclass = sclass[i].oclass;
580 break;
581 }
582 }
583 } else
584 if ((init->class & 0x00ff) == 0x00b3) { /* msppp */
585 /* msppp: compatibility with incorrect version exposure */
586 for (i = 0; i < ret; i++) {
587 if ((sclass[i].oclass & 0x00ff) == 0x00b3) {
588 oclass = sclass[i].oclass;
589 break;
590 }
591 }
592 } else {
593 oclass = init->class;
594 }
595
596 nvif_object_sclass_put(&sclass);
597 if (!oclass)
598 return nouveau_abi16_put(abi16, -EINVAL);
599
600 ntfy = kzalloc(sizeof(*ntfy), GFP_KERNEL);
601 if (!ntfy)
602 return nouveau_abi16_put(abi16, -ENOMEM);
603
604 list_add(&ntfy->head, &chan->notifiers);
605
606 ret = nvif_object_ctor(&chan->chan->user, "abi16EngObj", init->handle,
607 oclass, NULL, 0, &ntfy->object);
608
609 if (ret)
610 nouveau_abi16_ntfy_fini(chan, ntfy);
611 return nouveau_abi16_put(abi16, ret);
612 }
613
614 int
nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)615 nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
616 {
617 struct drm_nouveau_notifierobj_alloc *info = data;
618 struct nouveau_drm *drm = nouveau_drm(dev);
619 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
620 struct nouveau_abi16_chan *chan;
621 struct nouveau_abi16_ntfy *ntfy;
622 struct nvif_device *device;
623 struct nv_dma_v0 args = {};
624 int ret;
625
626 if (unlikely(!abi16))
627 return -ENOMEM;
628 device = &abi16->cli->device;
629
630 /* completely unnecessary for these chipsets... */
631 if (unlikely(device->info.family >= NV_DEVICE_INFO_V0_FERMI))
632 return nouveau_abi16_put(abi16, -EINVAL);
633
634 chan = nouveau_abi16_chan(abi16, info->channel);
635 if (!chan)
636 return nouveau_abi16_put(abi16, -ENOENT);
637
638 ntfy = kzalloc(sizeof(*ntfy), GFP_KERNEL);
639 if (!ntfy)
640 return nouveau_abi16_put(abi16, -ENOMEM);
641
642 list_add(&ntfy->head, &chan->notifiers);
643
644 ret = nvkm_mm_head(&chan->heap, 0, 1, info->size, info->size, 1,
645 &ntfy->node);
646 if (ret)
647 goto done;
648
649 args.start = ntfy->node->offset;
650 args.limit = ntfy->node->offset + ntfy->node->length - 1;
651 if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
652 args.target = NV_DMA_V0_TARGET_VM;
653 args.access = NV_DMA_V0_ACCESS_VM;
654 args.start += chan->ntfy_vma->addr;
655 args.limit += chan->ntfy_vma->addr;
656 } else
657 if (drm->agp.bridge) {
658 args.target = NV_DMA_V0_TARGET_AGP;
659 args.access = NV_DMA_V0_ACCESS_RDWR;
660 args.start += drm->agp.base + chan->ntfy->offset;
661 args.limit += drm->agp.base + chan->ntfy->offset;
662 } else {
663 args.target = NV_DMA_V0_TARGET_VM;
664 args.access = NV_DMA_V0_ACCESS_RDWR;
665 args.start += chan->ntfy->offset;
666 args.limit += chan->ntfy->offset;
667 }
668
669 ret = nvif_object_ctor(&chan->chan->user, "abi16Ntfy", info->handle,
670 NV_DMA_IN_MEMORY, &args, sizeof(args),
671 &ntfy->object);
672 if (ret)
673 goto done;
674
675 info->offset = ntfy->node->offset;
676 done:
677 if (ret)
678 nouveau_abi16_ntfy_fini(chan, ntfy);
679 return nouveau_abi16_put(abi16, ret);
680 }
681
682 int
nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS)683 nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS)
684 {
685 struct drm_nouveau_gpuobj_free *fini = data;
686 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
687 struct nouveau_abi16_chan *chan;
688 struct nouveau_abi16_ntfy *ntfy;
689 int ret = -ENOENT;
690
691 if (unlikely(!abi16))
692 return -ENOMEM;
693
694 chan = nouveau_abi16_chan(abi16, fini->channel);
695 if (!chan)
696 return nouveau_abi16_put(abi16, -EINVAL);
697
698 /* synchronize with the user channel and destroy the gpu object */
699 nouveau_channel_idle(chan->chan);
700
701 list_for_each_entry(ntfy, &chan->notifiers, head) {
702 if (ntfy->object.handle == fini->handle) {
703 nouveau_abi16_ntfy_fini(chan, ntfy);
704 ret = 0;
705 break;
706 }
707 }
708
709 return nouveau_abi16_put(abi16, ret);
710 }
711
712 static int
nouveau_abi16_ioctl_mthd(struct nouveau_abi16 * abi16,struct nvif_ioctl_v0 * ioctl,u32 argc)713 nouveau_abi16_ioctl_mthd(struct nouveau_abi16 *abi16, struct nvif_ioctl_v0 *ioctl, u32 argc)
714 {
715 struct nouveau_cli *cli = abi16->cli;
716 struct nvif_ioctl_mthd_v0 *args;
717 struct nouveau_abi16_obj *obj;
718 struct nv_device_info_v0 *info;
719
720 if (ioctl->route || argc < sizeof(*args))
721 return -EINVAL;
722 args = (void *)ioctl->data;
723 argc -= sizeof(*args);
724
725 obj = nouveau_abi16_obj_find(abi16, ioctl->object);
726 if (!obj || obj->type != DEVICE)
727 return -EINVAL;
728
729 if (args->method != NV_DEVICE_V0_INFO ||
730 argc != sizeof(*info))
731 return -EINVAL;
732
733 info = (void *)args->data;
734 if (info->version != 0x00)
735 return -EINVAL;
736
737 info = &cli->device.info;
738 memcpy(args->data, info, sizeof(*info));
739 return 0;
740 }
741
742 static int
nouveau_abi16_ioctl_del(struct nouveau_abi16 * abi16,struct nvif_ioctl_v0 * ioctl,u32 argc)743 nouveau_abi16_ioctl_del(struct nouveau_abi16 *abi16, struct nvif_ioctl_v0 *ioctl, u32 argc)
744 {
745 struct nouveau_abi16_obj *obj;
746
747 if (ioctl->route || argc)
748 return -EINVAL;
749
750 obj = nouveau_abi16_obj_find(abi16, ioctl->object);
751 if (obj) {
752 if (obj->type == ENGOBJ)
753 nvif_object_dtor(&obj->engobj);
754 nouveau_abi16_obj_del(obj);
755 }
756
757 return 0;
758 }
759
760 static int
nouveau_abi16_ioctl_new(struct nouveau_abi16 * abi16,struct nvif_ioctl_v0 * ioctl,u32 argc)761 nouveau_abi16_ioctl_new(struct nouveau_abi16 *abi16, struct nvif_ioctl_v0 *ioctl, u32 argc)
762 {
763 struct nvif_ioctl_new_v0 *args;
764 struct nouveau_abi16_chan *chan;
765 struct nouveau_abi16_obj *obj;
766 int ret;
767
768 if (argc < sizeof(*args))
769 return -EINVAL;
770 args = (void *)ioctl->data;
771 argc -= sizeof(*args);
772
773 if (args->version != 0)
774 return -EINVAL;
775
776 if (!ioctl->route) {
777 if (ioctl->object || args->oclass != NV_DEVICE)
778 return -EINVAL;
779
780 obj = nouveau_abi16_obj_new(abi16, DEVICE, args->object);
781 if (IS_ERR(obj))
782 return PTR_ERR(obj);
783
784 return 0;
785 }
786
787 chan = nouveau_abi16_chan(abi16, ioctl->token);
788 if (!chan)
789 return -EINVAL;
790
791 obj = nouveau_abi16_obj_new(abi16, ENGOBJ, args->object);
792 if (IS_ERR(obj))
793 return PTR_ERR(obj);
794
795 ret = nvif_object_ctor(&chan->chan->user, "abi16EngObj", args->handle, args->oclass,
796 NULL, 0, &obj->engobj);
797 if (ret)
798 nouveau_abi16_obj_del(obj);
799
800 return ret;
801 }
802
803 static int
nouveau_abi16_ioctl_sclass(struct nouveau_abi16 * abi16,struct nvif_ioctl_v0 * ioctl,u32 argc)804 nouveau_abi16_ioctl_sclass(struct nouveau_abi16 *abi16, struct nvif_ioctl_v0 *ioctl, u32 argc)
805 {
806 struct nvif_ioctl_sclass_v0 *args;
807 struct nouveau_abi16_chan *chan;
808 struct nvif_sclass *sclass;
809 int ret;
810
811 if (!ioctl->route || argc < sizeof(*args))
812 return -EINVAL;
813 args = (void *)ioctl->data;
814 argc -= sizeof(*args);
815
816 if (argc != args->count * sizeof(args->oclass[0]))
817 return -EINVAL;
818
819 chan = nouveau_abi16_chan(abi16, ioctl->token);
820 if (!chan)
821 return -EINVAL;
822
823 ret = nvif_object_sclass_get(&chan->chan->user, &sclass);
824 if (ret < 0)
825 return ret;
826
827 for (int i = 0; i < min_t(u8, args->count, ret); i++) {
828 args->oclass[i].oclass = sclass[i].oclass;
829 args->oclass[i].minver = sclass[i].minver;
830 args->oclass[i].maxver = sclass[i].maxver;
831 }
832 args->count = ret;
833
834 nvif_object_sclass_put(&sclass);
835 return 0;
836 }
837
838 int
nouveau_abi16_ioctl(struct drm_file * filp,void __user * user,u32 size)839 nouveau_abi16_ioctl(struct drm_file *filp, void __user *user, u32 size)
840 {
841 struct nvif_ioctl_v0 *ioctl;
842 struct nouveau_abi16 *abi16;
843 u32 argc = size;
844 int ret;
845
846 if (argc < sizeof(*ioctl))
847 return -EINVAL;
848 argc -= sizeof(*ioctl);
849
850 ioctl = kmalloc(size, GFP_KERNEL);
851 if (!ioctl)
852 return -ENOMEM;
853
854 ret = -EFAULT;
855 if (copy_from_user(ioctl, user, size))
856 goto done_free;
857
858 if (ioctl->version != 0x00 ||
859 (ioctl->route && ioctl->route != 0xff)) {
860 ret = -EINVAL;
861 goto done_free;
862 }
863
864 abi16 = nouveau_abi16_get(filp);
865 if (unlikely(!abi16)) {
866 ret = -ENOMEM;
867 goto done_free;
868 }
869
870 switch (ioctl->type) {
871 case NVIF_IOCTL_V0_SCLASS: ret = nouveau_abi16_ioctl_sclass(abi16, ioctl, argc); break;
872 case NVIF_IOCTL_V0_NEW : ret = nouveau_abi16_ioctl_new (abi16, ioctl, argc); break;
873 case NVIF_IOCTL_V0_DEL : ret = nouveau_abi16_ioctl_del (abi16, ioctl, argc); break;
874 case NVIF_IOCTL_V0_MTHD : ret = nouveau_abi16_ioctl_mthd (abi16, ioctl, argc); break;
875 default:
876 ret = -EINVAL;
877 break;
878 }
879
880 nouveau_abi16_put(abi16, 0);
881
882 if (ret == 0) {
883 if (copy_to_user(user, ioctl, size))
884 ret = -EFAULT;
885 }
886
887 done_free:
888 kfree(ioctl);
889 return ret;
890 }
891