1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <nvif/client.h> 25 #include <nvif/driver.h> 26 #include <nvif/fifo.h> 27 #include <nvif/ioctl.h> 28 #include <nvif/class.h> 29 #include <nvif/cl0002.h> 30 #include <nvif/unpack.h> 31 32 #include "nouveau_drv.h" 33 #include "nouveau_dma.h" 34 #include "nouveau_exec.h" 35 #include "nouveau_gem.h" 36 #include "nouveau_chan.h" 37 #include "nouveau_abi16.h" 38 #include "nouveau_vmm.h" 39 #include "nouveau_sched.h" 40 41 static struct nouveau_abi16 * 42 nouveau_abi16(struct drm_file *file_priv) 43 { 44 struct nouveau_cli *cli = nouveau_cli(file_priv); 45 if (!cli->abi16) { 46 struct nouveau_abi16 *abi16; 47 cli->abi16 = abi16 = kzalloc_obj(*abi16); 48 if (cli->abi16) { 49 abi16->cli = cli; 50 INIT_LIST_HEAD(&abi16->channels); 51 INIT_LIST_HEAD(&abi16->objects); 52 } 53 } 54 return cli->abi16; 55 } 56 57 struct nouveau_abi16 * 58 nouveau_abi16_get(struct drm_file *file_priv) 59 { 60 struct nouveau_cli *cli = nouveau_cli(file_priv); 61 mutex_lock(&cli->mutex); 62 if (nouveau_abi16(file_priv)) 63 return cli->abi16; 64 mutex_unlock(&cli->mutex); 65 return NULL; 66 } 67 68 int 69 nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret) 70 { 71 struct nouveau_cli *cli = abi16->cli; 72 mutex_unlock(&cli->mutex); 73 return ret; 74 } 75 76 /* Tracks objects created via the DRM_NOUVEAU_NVIF ioctl. 77 * 78 * The only two types of object that userspace ever allocated via this 79 * interface are 'device', in order to retrieve basic device info, and 80 * 'engine objects', which instantiate HW classes on a channel. 81 * 82 * The remainder of what used to be available via DRM_NOUVEAU_NVIF has 83 * been removed, but these object types need to be tracked to maintain 84 * compatibility with userspace. 85 */ 86 struct nouveau_abi16_obj { 87 enum nouveau_abi16_obj_type { 88 DEVICE, 89 ENGOBJ, 90 } type; 91 u64 object; 92 93 struct nvif_object engobj; 94 95 struct list_head head; /* protected by nouveau_abi16.cli.mutex */ 96 }; 97 98 static struct nouveau_abi16_obj * 99 nouveau_abi16_obj_find(struct nouveau_abi16 *abi16, u64 object) 100 { 101 struct nouveau_abi16_obj *obj; 102 103 list_for_each_entry(obj, &abi16->objects, head) { 104 if (obj->object == object) 105 return obj; 106 } 107 108 return NULL; 109 } 110 111 static void 112 nouveau_abi16_obj_del(struct nouveau_abi16_obj *obj) 113 { 114 list_del(&obj->head); 115 kfree(obj); 116 } 117 118 static struct nouveau_abi16_obj * 119 nouveau_abi16_obj_new(struct nouveau_abi16 *abi16, enum nouveau_abi16_obj_type type, u64 object) 120 { 121 struct nouveau_abi16_obj *obj; 122 123 obj = nouveau_abi16_obj_find(abi16, object); 124 if (obj) 125 return ERR_PTR(-EEXIST); 126 127 obj = kzalloc_obj(*obj); 128 if (!obj) 129 return ERR_PTR(-ENOMEM); 130 131 obj->type = type; 132 obj->object = object; 133 list_add_tail(&obj->head, &abi16->objects); 134 return obj; 135 } 136 137 s32 138 nouveau_abi16_swclass(struct nouveau_drm *drm) 139 { 140 switch (drm->client.device.info.family) { 141 case NV_DEVICE_INFO_V0_TNT: 142 return NVIF_CLASS_SW_NV04; 143 case NV_DEVICE_INFO_V0_CELSIUS: 144 case NV_DEVICE_INFO_V0_KELVIN: 145 case NV_DEVICE_INFO_V0_RANKINE: 146 case NV_DEVICE_INFO_V0_CURIE: 147 return NVIF_CLASS_SW_NV10; 148 case NV_DEVICE_INFO_V0_TESLA: 149 return NVIF_CLASS_SW_NV50; 150 case NV_DEVICE_INFO_V0_FERMI: 151 case NV_DEVICE_INFO_V0_KEPLER: 152 case NV_DEVICE_INFO_V0_MAXWELL: 153 case NV_DEVICE_INFO_V0_PASCAL: 154 case NV_DEVICE_INFO_V0_VOLTA: 155 return NVIF_CLASS_SW_GF100; 156 } 157 158 return 0x0000; 159 } 160 161 static void 162 nouveau_abi16_ntfy_fini(struct nouveau_abi16_chan *chan, 163 struct nouveau_abi16_ntfy *ntfy) 164 { 165 nvif_object_dtor(&ntfy->object); 166 nvkm_mm_free(&chan->heap, &ntfy->node); 167 list_del(&ntfy->head); 168 kfree(ntfy); 169 } 170 171 static void 172 nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16, 173 struct nouveau_abi16_chan *chan) 174 { 175 struct nouveau_abi16_ntfy *ntfy, *temp; 176 177 /* Cancel all jobs from the entity's queue. */ 178 if (chan->sched) 179 drm_sched_entity_fini(&chan->sched->entity); 180 181 if (chan->chan) 182 nouveau_channel_idle(chan->chan); 183 184 if (chan->sched) 185 nouveau_sched_destroy(&chan->sched); 186 187 /* cleanup notifier state */ 188 list_for_each_entry_safe(ntfy, temp, &chan->notifiers, head) { 189 nouveau_abi16_ntfy_fini(chan, ntfy); 190 } 191 192 if (chan->ntfy) { 193 nouveau_vma_del(&chan->ntfy_vma); 194 nouveau_bo_unpin(chan->ntfy); 195 drm_gem_object_put(&chan->ntfy->bo.base); 196 } 197 198 if (chan->heap.block_size) 199 nvkm_mm_fini(&chan->heap); 200 201 /* destroy channel object, all children will be killed too */ 202 if (chan->chan) { 203 nvif_object_dtor(&chan->ce); 204 nouveau_channel_del(&chan->chan); 205 } 206 207 list_del(&chan->head); 208 kfree(chan); 209 } 210 211 void 212 nouveau_abi16_fini(struct nouveau_abi16 *abi16) 213 { 214 struct nouveau_cli *cli = abi16->cli; 215 struct nouveau_abi16_chan *chan, *temp; 216 struct nouveau_abi16_obj *obj, *tmp; 217 218 /* cleanup objects */ 219 list_for_each_entry_safe(obj, tmp, &abi16->objects, head) { 220 nouveau_abi16_obj_del(obj); 221 } 222 223 /* cleanup channels */ 224 list_for_each_entry_safe(chan, temp, &abi16->channels, head) { 225 nouveau_abi16_chan_fini(abi16, chan); 226 } 227 228 kfree(cli->abi16); 229 cli->abi16 = NULL; 230 } 231 232 static inline int 233 getparam_dma_ib_max(struct nvif_device *device) 234 { 235 const struct nvif_mclass dmas[] = { 236 { NV03_CHANNEL_DMA, 0 }, 237 { NV10_CHANNEL_DMA, 0 }, 238 { NV17_CHANNEL_DMA, 0 }, 239 { NV40_CHANNEL_DMA, 0 }, 240 {} 241 }; 242 243 return nvif_mclass(&device->object, dmas) < 0 ? NV50_DMA_IB_MAX : 0; 244 } 245 246 int 247 nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS) 248 { 249 struct nouveau_cli *cli = nouveau_cli(file_priv); 250 struct nouveau_drm *drm = nouveau_drm(dev); 251 struct nvif_device *device = &drm->client.device; 252 struct nvkm_device *nvkm_device = nvxx_device(drm); 253 struct nvkm_gr *gr = nvxx_gr(drm); 254 struct drm_nouveau_getparam *getparam = data; 255 struct pci_dev *pdev = to_pci_dev(dev->dev); 256 257 switch (getparam->param) { 258 case NOUVEAU_GETPARAM_CHIPSET_ID: 259 getparam->value = device->info.chipset; 260 break; 261 case NOUVEAU_GETPARAM_PCI_VENDOR: 262 if (device->info.platform != NV_DEVICE_INFO_V0_SOC) 263 getparam->value = pdev->vendor; 264 else 265 getparam->value = 0; 266 break; 267 case NOUVEAU_GETPARAM_PCI_DEVICE: 268 if (device->info.platform != NV_DEVICE_INFO_V0_SOC) 269 getparam->value = pdev->device; 270 else 271 getparam->value = 0; 272 break; 273 case NOUVEAU_GETPARAM_BUS_TYPE: 274 switch (device->info.platform) { 275 case NV_DEVICE_INFO_V0_AGP : getparam->value = 0; break; 276 case NV_DEVICE_INFO_V0_PCI : getparam->value = 1; break; 277 case NV_DEVICE_INFO_V0_PCIE: getparam->value = 2; break; 278 case NV_DEVICE_INFO_V0_SOC : getparam->value = 3; break; 279 case NV_DEVICE_INFO_V0_IGP : 280 if (!pci_is_pcie(pdev)) 281 getparam->value = 1; 282 else 283 getparam->value = 2; 284 break; 285 default: 286 WARN_ON(1); 287 break; 288 } 289 break; 290 case NOUVEAU_GETPARAM_FB_SIZE: 291 getparam->value = drm->gem.vram_available; 292 break; 293 case NOUVEAU_GETPARAM_AGP_SIZE: 294 getparam->value = drm->gem.gart_available; 295 break; 296 case NOUVEAU_GETPARAM_VM_VRAM_BASE: 297 getparam->value = 0; /* deprecated */ 298 break; 299 case NOUVEAU_GETPARAM_PTIMER_TIME: 300 getparam->value = nvif_device_time(device); 301 break; 302 case NOUVEAU_GETPARAM_HAS_BO_USAGE: 303 getparam->value = 1; 304 break; 305 case NOUVEAU_GETPARAM_HAS_PAGEFLIP: 306 getparam->value = 1; 307 break; 308 case NOUVEAU_GETPARAM_GRAPH_UNITS: 309 getparam->value = nvkm_gr_units(gr); 310 break; 311 case NOUVEAU_GETPARAM_EXEC_PUSH_MAX: { 312 int ib_max = getparam_dma_ib_max(device); 313 314 getparam->value = nouveau_exec_push_max_from_ib_max(ib_max); 315 break; 316 } 317 case NOUVEAU_GETPARAM_VRAM_BAR_SIZE: 318 getparam->value = nvkm_device->func->resource_size(nvkm_device, NVKM_BAR1_FB); 319 break; 320 case NOUVEAU_GETPARAM_VRAM_USED: { 321 struct ttm_resource_manager *vram_mgr = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM); 322 getparam->value = (u64)ttm_resource_manager_usage(vram_mgr); 323 break; 324 } 325 case NOUVEAU_GETPARAM_HAS_VMA_TILEMODE: 326 getparam->value = 1; 327 break; 328 default: 329 NV_PRINTK(dbg, cli, "unknown parameter %lld\n", getparam->param); 330 return -EINVAL; 331 } 332 333 return 0; 334 } 335 336 int 337 nouveau_abi16_ioctl_get_zcull_info(ABI16_IOCTL_ARGS) 338 { 339 struct nouveau_drm *drm = nouveau_drm(dev); 340 struct nvkm_gr *gr = nvxx_gr(drm); 341 struct drm_nouveau_get_zcull_info *out = data; 342 343 if (gr->has_zcull_info) { 344 const struct nvkm_gr_zcull_info *i = &gr->zcull_info; 345 346 out->width_align_pixels = i->width_align_pixels; 347 out->height_align_pixels = i->height_align_pixels; 348 out->pixel_squares_by_aliquots = i->pixel_squares_by_aliquots; 349 out->aliquot_total = i->aliquot_total; 350 out->zcull_region_byte_multiplier = i->zcull_region_byte_multiplier; 351 out->zcull_region_header_size = i->zcull_region_header_size; 352 out->zcull_subregion_header_size = i->zcull_subregion_header_size; 353 out->subregion_count = i->subregion_count; 354 out->subregion_width_align_pixels = i->subregion_width_align_pixels; 355 out->subregion_height_align_pixels = i->subregion_height_align_pixels; 356 out->ctxsw_size = i->ctxsw_size; 357 out->ctxsw_align = i->ctxsw_align; 358 359 return 0; 360 } else { 361 return -ENOTTY; 362 } 363 } 364 365 int 366 nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS) 367 { 368 struct drm_nouveau_channel_alloc *init = data; 369 struct nouveau_cli *cli = nouveau_cli(file_priv); 370 struct nouveau_drm *drm = nouveau_drm(dev); 371 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv); 372 struct nouveau_abi16_chan *chan; 373 struct nvif_device *device = &cli->device; 374 u64 engine, runm; 375 int ret; 376 377 if (unlikely(!abi16)) 378 return -ENOMEM; 379 380 if (!drm->channel) 381 return nouveau_abi16_put(abi16, -ENODEV); 382 383 /* If uvmm wasn't initialized until now disable it completely to prevent 384 * userspace from mixing up UAPIs. 385 * 386 * The client lock is already acquired by nouveau_abi16_get(). 387 */ 388 __nouveau_cli_disable_uvmm_noinit(cli); 389 390 engine = NV_DEVICE_HOST_RUNLIST_ENGINES_GR; 391 392 /* hack to allow channel engine type specification on kepler */ 393 if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) { 394 if (init->fb_ctxdma_handle == ~0) { 395 switch (init->tt_ctxdma_handle) { 396 case NOUVEAU_FIFO_ENGINE_GR: 397 engine = NV_DEVICE_HOST_RUNLIST_ENGINES_GR; 398 break; 399 case NOUVEAU_FIFO_ENGINE_VP: 400 engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSPDEC; 401 break; 402 case NOUVEAU_FIFO_ENGINE_PPP: 403 engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSPPP; 404 break; 405 case NOUVEAU_FIFO_ENGINE_BSP: 406 engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSVLD; 407 break; 408 case NOUVEAU_FIFO_ENGINE_CE: 409 engine = NV_DEVICE_HOST_RUNLIST_ENGINES_CE; 410 break; 411 default: 412 return nouveau_abi16_put(abi16, -ENOSYS); 413 } 414 415 init->fb_ctxdma_handle = 0; 416 init->tt_ctxdma_handle = 0; 417 } 418 } 419 420 if (engine != NV_DEVICE_HOST_RUNLIST_ENGINES_CE) 421 runm = nvif_fifo_runlist(device, engine); 422 else 423 runm = nvif_fifo_runlist_ce(device); 424 425 if (!runm || init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0) 426 return nouveau_abi16_put(abi16, -EINVAL); 427 428 /* allocate "abi16 channel" data and make up a handle for it */ 429 chan = kzalloc_obj(*chan); 430 if (!chan) 431 return nouveau_abi16_put(abi16, -ENOMEM); 432 433 INIT_LIST_HEAD(&chan->notifiers); 434 list_add(&chan->head, &abi16->channels); 435 436 /* create channel object and initialise dma and fence management */ 437 ret = nouveau_channel_new(cli, false, runm, init->fb_ctxdma_handle, 438 init->tt_ctxdma_handle, &chan->chan); 439 if (ret) 440 goto done; 441 442 /* If we're not using the VM_BIND uAPI, we don't need a scheduler. 443 * 444 * The client lock is already acquired by nouveau_abi16_get(). 445 */ 446 if (nouveau_cli_uvmm(cli)) { 447 ret = nouveau_sched_create(&chan->sched, drm, drm->sched_wq, 448 chan->chan->chan.gpfifo.max); 449 if (ret) 450 goto done; 451 } 452 453 init->channel = chan->chan->chid; 454 455 if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) 456 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM | 457 NOUVEAU_GEM_DOMAIN_GART; 458 else 459 if (chan->chan->push.buffer->bo.resource->mem_type == TTM_PL_VRAM) 460 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM; 461 else 462 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART; 463 464 if (device->info.family < NV_DEVICE_INFO_V0_CELSIUS) { 465 init->subchan[0].handle = 0x00000000; 466 init->subchan[0].grclass = 0x0000; 467 init->subchan[1].handle = chan->chan->nvsw.handle; 468 init->subchan[1].grclass = 0x506e; 469 init->nr_subchan = 2; 470 } 471 472 /* Workaround "nvc0" gallium driver using classes it doesn't allocate on 473 * Kepler and above. NVKM no longer always sets CE_CTX_VALID as part of 474 * channel init, now we know what that stuff actually is. 475 * 476 * Doesn't matter for Kepler/Pascal, CE context stored in NV_RAMIN. 477 * 478 * Userspace was fixed prior to adding Ampere support. 479 */ 480 switch (device->info.family) { 481 case NV_DEVICE_INFO_V0_VOLTA: 482 ret = nvif_object_ctor(&chan->chan->user, "abi16CeWar", 0, VOLTA_DMA_COPY_A, 483 NULL, 0, &chan->ce); 484 if (ret) 485 goto done; 486 break; 487 case NV_DEVICE_INFO_V0_TURING: 488 ret = nvif_object_ctor(&chan->chan->user, "abi16CeWar", 0, TURING_DMA_COPY_A, 489 NULL, 0, &chan->ce); 490 if (ret) 491 goto done; 492 break; 493 default: 494 break; 495 } 496 497 /* Named memory object area */ 498 ret = nouveau_gem_new(cli, PAGE_SIZE, 0, NOUVEAU_GEM_DOMAIN_GART, 499 0, 0, &chan->ntfy); 500 if (ret == 0) 501 ret = nouveau_bo_pin(chan->ntfy, NOUVEAU_GEM_DOMAIN_GART, 502 false); 503 if (ret) 504 goto done; 505 506 if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) { 507 ret = nouveau_vma_new(chan->ntfy, chan->chan->vmm, 508 &chan->ntfy_vma); 509 if (ret) 510 goto done; 511 } 512 513 ret = drm_gem_handle_create(file_priv, &chan->ntfy->bo.base, 514 &init->notifier_handle); 515 if (ret) 516 goto done; 517 518 ret = nvkm_mm_init(&chan->heap, 0, 0, PAGE_SIZE, 1); 519 done: 520 if (ret) 521 nouveau_abi16_chan_fini(abi16, chan); 522 return nouveau_abi16_put(abi16, ret); 523 } 524 525 static struct nouveau_abi16_chan * 526 nouveau_abi16_chan(struct nouveau_abi16 *abi16, int channel) 527 { 528 struct nouveau_abi16_chan *chan; 529 530 list_for_each_entry(chan, &abi16->channels, head) { 531 if (chan->chan->chid == channel) 532 return chan; 533 } 534 535 return NULL; 536 } 537 538 int 539 nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS) 540 { 541 struct drm_nouveau_channel_free *req = data; 542 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv); 543 struct nouveau_abi16_chan *chan; 544 545 if (unlikely(!abi16)) 546 return -ENOMEM; 547 548 chan = nouveau_abi16_chan(abi16, req->channel); 549 if (!chan) 550 return nouveau_abi16_put(abi16, -ENOENT); 551 nouveau_abi16_chan_fini(abi16, chan); 552 return nouveau_abi16_put(abi16, 0); 553 } 554 555 int 556 nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS) 557 { 558 struct drm_nouveau_grobj_alloc *init = data; 559 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv); 560 struct nouveau_abi16_chan *chan; 561 struct nouveau_abi16_ntfy *ntfy; 562 struct nvif_sclass *sclass; 563 s32 oclass = 0; 564 int ret, i; 565 566 if (unlikely(!abi16)) 567 return -ENOMEM; 568 569 if (init->handle == ~0) 570 return nouveau_abi16_put(abi16, -EINVAL); 571 572 chan = nouveau_abi16_chan(abi16, init->channel); 573 if (!chan) 574 return nouveau_abi16_put(abi16, -ENOENT); 575 576 ret = nvif_object_sclass_get(&chan->chan->user, &sclass); 577 if (ret < 0) 578 return nouveau_abi16_put(abi16, ret); 579 580 if ((init->class & 0x00ff) == 0x006e) { 581 /* nvsw: compatibility with older 0x*6e class identifier */ 582 for (i = 0; !oclass && i < ret; i++) { 583 switch (sclass[i].oclass) { 584 case NVIF_CLASS_SW_NV04: 585 case NVIF_CLASS_SW_NV10: 586 case NVIF_CLASS_SW_NV50: 587 case NVIF_CLASS_SW_GF100: 588 oclass = sclass[i].oclass; 589 break; 590 default: 591 break; 592 } 593 } 594 } else 595 if ((init->class & 0x00ff) == 0x00b1) { 596 /* msvld: compatibility with incorrect version exposure */ 597 for (i = 0; i < ret; i++) { 598 if ((sclass[i].oclass & 0x00ff) == 0x00b1) { 599 oclass = sclass[i].oclass; 600 break; 601 } 602 } 603 } else 604 if ((init->class & 0x00ff) == 0x00b2) { /* mspdec */ 605 /* mspdec: compatibility with incorrect version exposure */ 606 for (i = 0; i < ret; i++) { 607 if ((sclass[i].oclass & 0x00ff) == 0x00b2) { 608 oclass = sclass[i].oclass; 609 break; 610 } 611 } 612 } else 613 if ((init->class & 0x00ff) == 0x00b3) { /* msppp */ 614 /* msppp: compatibility with incorrect version exposure */ 615 for (i = 0; i < ret; i++) { 616 if ((sclass[i].oclass & 0x00ff) == 0x00b3) { 617 oclass = sclass[i].oclass; 618 break; 619 } 620 } 621 } else { 622 oclass = init->class; 623 } 624 625 nvif_object_sclass_put(&sclass); 626 if (!oclass) 627 return nouveau_abi16_put(abi16, -EINVAL); 628 629 ntfy = kzalloc_obj(*ntfy); 630 if (!ntfy) 631 return nouveau_abi16_put(abi16, -ENOMEM); 632 633 list_add(&ntfy->head, &chan->notifiers); 634 635 ret = nvif_object_ctor(&chan->chan->user, "abi16EngObj", init->handle, 636 oclass, NULL, 0, &ntfy->object); 637 638 if (ret) 639 nouveau_abi16_ntfy_fini(chan, ntfy); 640 return nouveau_abi16_put(abi16, ret); 641 } 642 643 int 644 nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS) 645 { 646 struct drm_nouveau_notifierobj_alloc *info = data; 647 struct nouveau_drm *drm = nouveau_drm(dev); 648 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv); 649 struct nouveau_abi16_chan *chan; 650 struct nouveau_abi16_ntfy *ntfy; 651 struct nvif_device *device; 652 struct nv_dma_v0 args = {}; 653 int ret; 654 655 if (unlikely(!abi16)) 656 return -ENOMEM; 657 device = &abi16->cli->device; 658 659 /* completely unnecessary for these chipsets... */ 660 if (unlikely(device->info.family >= NV_DEVICE_INFO_V0_FERMI)) 661 return nouveau_abi16_put(abi16, -EINVAL); 662 663 chan = nouveau_abi16_chan(abi16, info->channel); 664 if (!chan) 665 return nouveau_abi16_put(abi16, -ENOENT); 666 667 ntfy = kzalloc_obj(*ntfy); 668 if (!ntfy) 669 return nouveau_abi16_put(abi16, -ENOMEM); 670 671 list_add(&ntfy->head, &chan->notifiers); 672 673 ret = nvkm_mm_head(&chan->heap, 0, 1, info->size, info->size, 1, 674 &ntfy->node); 675 if (ret) 676 goto done; 677 678 args.start = ntfy->node->offset; 679 args.limit = ntfy->node->offset + ntfy->node->length - 1; 680 if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) { 681 args.target = NV_DMA_V0_TARGET_VM; 682 args.access = NV_DMA_V0_ACCESS_VM; 683 args.start += chan->ntfy_vma->addr; 684 args.limit += chan->ntfy_vma->addr; 685 } else 686 if (drm->agp.bridge) { 687 args.target = NV_DMA_V0_TARGET_AGP; 688 args.access = NV_DMA_V0_ACCESS_RDWR; 689 args.start += drm->agp.base + chan->ntfy->offset; 690 args.limit += drm->agp.base + chan->ntfy->offset; 691 } else { 692 args.target = NV_DMA_V0_TARGET_VM; 693 args.access = NV_DMA_V0_ACCESS_RDWR; 694 args.start += chan->ntfy->offset; 695 args.limit += chan->ntfy->offset; 696 } 697 698 ret = nvif_object_ctor(&chan->chan->user, "abi16Ntfy", info->handle, 699 NV_DMA_IN_MEMORY, &args, sizeof(args), 700 &ntfy->object); 701 if (ret) 702 goto done; 703 704 info->offset = ntfy->node->offset; 705 done: 706 if (ret) 707 nouveau_abi16_ntfy_fini(chan, ntfy); 708 return nouveau_abi16_put(abi16, ret); 709 } 710 711 int 712 nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS) 713 { 714 struct drm_nouveau_gpuobj_free *fini = data; 715 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv); 716 struct nouveau_abi16_chan *chan; 717 struct nouveau_abi16_ntfy *ntfy; 718 int ret = -ENOENT; 719 720 if (unlikely(!abi16)) 721 return -ENOMEM; 722 723 chan = nouveau_abi16_chan(abi16, fini->channel); 724 if (!chan) 725 return nouveau_abi16_put(abi16, -EINVAL); 726 727 /* synchronize with the user channel and destroy the gpu object */ 728 nouveau_channel_idle(chan->chan); 729 730 list_for_each_entry(ntfy, &chan->notifiers, head) { 731 if (ntfy->object.handle == fini->handle) { 732 nouveau_abi16_ntfy_fini(chan, ntfy); 733 ret = 0; 734 break; 735 } 736 } 737 738 return nouveau_abi16_put(abi16, ret); 739 } 740 741 static int 742 nouveau_abi16_ioctl_mthd(struct nouveau_abi16 *abi16, struct nvif_ioctl_v0 *ioctl, u32 argc) 743 { 744 struct nouveau_cli *cli = abi16->cli; 745 struct nvif_ioctl_mthd_v0 *args; 746 struct nouveau_abi16_obj *obj; 747 struct nv_device_info_v0 *info; 748 749 if (ioctl->route || argc < sizeof(*args)) 750 return -EINVAL; 751 args = (void *)ioctl->data; 752 argc -= sizeof(*args); 753 754 obj = nouveau_abi16_obj_find(abi16, ioctl->object); 755 if (!obj || obj->type != DEVICE) 756 return -EINVAL; 757 758 if (args->method != NV_DEVICE_V0_INFO || 759 argc != sizeof(*info)) 760 return -EINVAL; 761 762 info = (void *)args->data; 763 if (info->version != 0x00) 764 return -EINVAL; 765 766 info = &cli->device.info; 767 memcpy(args->data, info, sizeof(*info)); 768 return 0; 769 } 770 771 static int 772 nouveau_abi16_ioctl_del(struct nouveau_abi16 *abi16, struct nvif_ioctl_v0 *ioctl, u32 argc) 773 { 774 struct nouveau_abi16_obj *obj; 775 776 if (ioctl->route || argc) 777 return -EINVAL; 778 779 obj = nouveau_abi16_obj_find(abi16, ioctl->object); 780 if (obj) { 781 if (obj->type == ENGOBJ) 782 nvif_object_dtor(&obj->engobj); 783 nouveau_abi16_obj_del(obj); 784 } 785 786 return 0; 787 } 788 789 static int 790 nouveau_abi16_ioctl_new(struct nouveau_abi16 *abi16, struct nvif_ioctl_v0 *ioctl, u32 argc) 791 { 792 struct nvif_ioctl_new_v0 *args; 793 struct nouveau_abi16_chan *chan; 794 struct nouveau_abi16_obj *obj; 795 int ret; 796 797 if (argc < sizeof(*args)) 798 return -EINVAL; 799 args = (void *)ioctl->data; 800 argc -= sizeof(*args); 801 802 if (args->version != 0) 803 return -EINVAL; 804 805 if (!ioctl->route) { 806 if (ioctl->object || args->oclass != NV_DEVICE) 807 return -EINVAL; 808 809 obj = nouveau_abi16_obj_new(abi16, DEVICE, args->object); 810 if (IS_ERR(obj)) 811 return PTR_ERR(obj); 812 813 return 0; 814 } 815 816 chan = nouveau_abi16_chan(abi16, ioctl->token); 817 if (!chan) 818 return -EINVAL; 819 820 obj = nouveau_abi16_obj_new(abi16, ENGOBJ, args->object); 821 if (IS_ERR(obj)) 822 return PTR_ERR(obj); 823 824 ret = nvif_object_ctor(&chan->chan->user, "abi16EngObj", args->handle, args->oclass, 825 NULL, 0, &obj->engobj); 826 if (ret) 827 nouveau_abi16_obj_del(obj); 828 829 return ret; 830 } 831 832 static int 833 nouveau_abi16_ioctl_sclass(struct nouveau_abi16 *abi16, struct nvif_ioctl_v0 *ioctl, u32 argc) 834 { 835 struct nvif_ioctl_sclass_v0 *args; 836 struct nouveau_abi16_chan *chan; 837 struct nvif_sclass *sclass; 838 int ret; 839 840 if (!ioctl->route || argc < sizeof(*args)) 841 return -EINVAL; 842 args = (void *)ioctl->data; 843 argc -= sizeof(*args); 844 845 if (argc != args->count * sizeof(args->oclass[0])) 846 return -EINVAL; 847 848 chan = nouveau_abi16_chan(abi16, ioctl->token); 849 if (!chan) 850 return -EINVAL; 851 852 ret = nvif_object_sclass_get(&chan->chan->user, &sclass); 853 if (ret < 0) 854 return ret; 855 856 for (int i = 0; i < min_t(u8, args->count, ret); i++) { 857 args->oclass[i].oclass = sclass[i].oclass; 858 args->oclass[i].minver = sclass[i].minver; 859 args->oclass[i].maxver = sclass[i].maxver; 860 } 861 args->count = ret; 862 863 nvif_object_sclass_put(&sclass); 864 return 0; 865 } 866 867 int 868 nouveau_abi16_ioctl(struct drm_file *filp, void __user *user, u32 size) 869 { 870 struct nvif_ioctl_v0 *ioctl; 871 struct nouveau_abi16 *abi16; 872 u32 argc = size; 873 int ret; 874 875 if (argc < sizeof(*ioctl)) 876 return -EINVAL; 877 argc -= sizeof(*ioctl); 878 879 ioctl = kmalloc(size, GFP_KERNEL); 880 if (!ioctl) 881 return -ENOMEM; 882 883 ret = -EFAULT; 884 if (copy_from_user(ioctl, user, size)) 885 goto done_free; 886 887 if (ioctl->version != 0x00 || 888 (ioctl->route && ioctl->route != 0xff)) { 889 ret = -EINVAL; 890 goto done_free; 891 } 892 893 abi16 = nouveau_abi16_get(filp); 894 if (unlikely(!abi16)) { 895 ret = -ENOMEM; 896 goto done_free; 897 } 898 899 switch (ioctl->type) { 900 case NVIF_IOCTL_V0_SCLASS: ret = nouveau_abi16_ioctl_sclass(abi16, ioctl, argc); break; 901 case NVIF_IOCTL_V0_NEW : ret = nouveau_abi16_ioctl_new (abi16, ioctl, argc); break; 902 case NVIF_IOCTL_V0_DEL : ret = nouveau_abi16_ioctl_del (abi16, ioctl, argc); break; 903 case NVIF_IOCTL_V0_MTHD : ret = nouveau_abi16_ioctl_mthd (abi16, ioctl, argc); break; 904 default: 905 ret = -EINVAL; 906 break; 907 } 908 909 nouveau_abi16_put(abi16, 0); 910 911 if (ret == 0) { 912 if (copy_to_user(user, ioctl, size)) 913 ret = -EFAULT; 914 } 915 916 done_free: 917 kfree(ioctl); 918 return ret; 919 } 920