1 // SPDX-License-Identifier: GPL-2.0+ 2 3 #include <drm/drm_atomic_helper.h> 4 #include <drm/drm_edid.h> 5 #include <drm/drm_simple_kms_helper.h> 6 #include <drm/drm_gem_framebuffer_helper.h> 7 #include <drm/drm_vblank.h> 8 9 #include "amdgpu.h" 10 #ifdef CONFIG_DRM_AMDGPU_SI 11 #include "dce_v6_0.h" 12 #endif 13 #ifdef CONFIG_DRM_AMDGPU_CIK 14 #include "dce_v8_0.h" 15 #endif 16 #include "dce_v10_0.h" 17 #include "ivsrcid/ivsrcid_vislands30.h" 18 #include "amdgpu_vkms.h" 19 #include "amdgpu_display.h" 20 #include "atom.h" 21 #include "amdgpu_irq.h" 22 23 /** 24 * DOC: amdgpu_vkms 25 * 26 * The amdgpu vkms interface provides a virtual KMS interface for several use 27 * cases: devices without display hardware, platforms where the actual display 28 * hardware is not useful (e.g., servers), SR-IOV virtual functions, device 29 * emulation/simulation, and device bring up prior to display hardware being 30 * usable. We previously emulated a legacy KMS interface, but there was a desire 31 * to move to the atomic KMS interface. The vkms driver did everything we 32 * needed, but we wanted KMS support natively in the driver without buffer 33 * sharing and the ability to support an instance of VKMS per device. We first 34 * looked at splitting vkms into a stub driver and a helper module that other 35 * drivers could use to implement a virtual display, but this strategy ended up 36 * being messy due to driver specific callbacks needed for buffer management. 37 * Ultimately, it proved easier to import the vkms code as it mostly used core 38 * drm helpers anyway. 39 */ 40 41 static const u32 amdgpu_vkms_formats[] = { 42 DRM_FORMAT_XRGB8888, 43 }; 44 45 static enum hrtimer_restart amdgpu_vkms_vblank_simulate(struct hrtimer *timer) 46 { 47 struct amdgpu_crtc *amdgpu_crtc = container_of(timer, struct amdgpu_crtc, vblank_timer); 48 struct drm_crtc *crtc = &amdgpu_crtc->base; 49 struct amdgpu_vkms_output *output = drm_crtc_to_amdgpu_vkms_output(crtc); 50 u64 ret_overrun; 51 bool ret; 52 53 ret_overrun = hrtimer_forward_now(&amdgpu_crtc->vblank_timer, 54 output->period_ns); 55 if (ret_overrun != 1) 56 drm_warn(amdgpu_crtc->base.dev, 57 "%s: vblank timer overrun count: %llu\n", 58 __func__, ret_overrun); 59 60 ret = drm_crtc_handle_vblank(crtc); 61 /* Don't queue timer again when vblank is disabled. */ 62 if (!ret) 63 return HRTIMER_NORESTART; 64 65 return HRTIMER_RESTART; 66 } 67 68 static int amdgpu_vkms_enable_vblank(struct drm_crtc *crtc) 69 { 70 struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); 71 struct amdgpu_vkms_output *out = drm_crtc_to_amdgpu_vkms_output(crtc); 72 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 73 74 drm_calc_timestamping_constants(crtc, &crtc->mode); 75 76 out->period_ns = ktime_set(0, vblank->framedur_ns); 77 hrtimer_start(&amdgpu_crtc->vblank_timer, out->period_ns, HRTIMER_MODE_REL); 78 79 return 0; 80 } 81 82 static void amdgpu_vkms_disable_vblank(struct drm_crtc *crtc) 83 { 84 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 85 86 hrtimer_try_to_cancel(&amdgpu_crtc->vblank_timer); 87 } 88 89 static bool amdgpu_vkms_get_vblank_timestamp(struct drm_crtc *crtc, 90 int *max_error, 91 ktime_t *vblank_time, 92 bool in_vblank_irq) 93 { 94 struct amdgpu_vkms_output *output = drm_crtc_to_amdgpu_vkms_output(crtc); 95 struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); 96 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 97 98 if (!READ_ONCE(vblank->enabled)) { 99 *vblank_time = ktime_get(); 100 return true; 101 } 102 103 *vblank_time = READ_ONCE(amdgpu_crtc->vblank_timer.node.expires); 104 105 if (WARN_ON(*vblank_time == vblank->time)) 106 return true; 107 108 /* 109 * To prevent races we roll the hrtimer forward before we do any 110 * interrupt processing - this is how real hw works (the interrupt is 111 * only generated after all the vblank registers are updated) and what 112 * the vblank core expects. Therefore we need to always correct the 113 * timestampe by one frame. 114 */ 115 *vblank_time -= output->period_ns; 116 117 return true; 118 } 119 120 static const struct drm_crtc_funcs amdgpu_vkms_crtc_funcs = { 121 .set_config = drm_atomic_helper_set_config, 122 .destroy = drm_crtc_cleanup, 123 .page_flip = drm_atomic_helper_page_flip, 124 .reset = drm_atomic_helper_crtc_reset, 125 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, 126 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, 127 .enable_vblank = amdgpu_vkms_enable_vblank, 128 .disable_vblank = amdgpu_vkms_disable_vblank, 129 .get_vblank_timestamp = amdgpu_vkms_get_vblank_timestamp, 130 }; 131 132 static void amdgpu_vkms_crtc_atomic_enable(struct drm_crtc *crtc, 133 struct drm_atomic_state *state) 134 { 135 drm_crtc_vblank_on(crtc); 136 } 137 138 static void amdgpu_vkms_crtc_atomic_disable(struct drm_crtc *crtc, 139 struct drm_atomic_state *state) 140 { 141 drm_crtc_vblank_off(crtc); 142 } 143 144 static void amdgpu_vkms_crtc_atomic_flush(struct drm_crtc *crtc, 145 struct drm_atomic_state *state) 146 { 147 unsigned long flags; 148 if (crtc->state->event) { 149 spin_lock_irqsave(&crtc->dev->event_lock, flags); 150 151 if (drm_crtc_vblank_get(crtc) != 0) 152 drm_crtc_send_vblank_event(crtc, crtc->state->event); 153 else 154 drm_crtc_arm_vblank_event(crtc, crtc->state->event); 155 156 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 157 158 crtc->state->event = NULL; 159 } 160 } 161 162 static const struct drm_crtc_helper_funcs amdgpu_vkms_crtc_helper_funcs = { 163 .atomic_flush = amdgpu_vkms_crtc_atomic_flush, 164 .atomic_enable = amdgpu_vkms_crtc_atomic_enable, 165 .atomic_disable = amdgpu_vkms_crtc_atomic_disable, 166 }; 167 168 static int amdgpu_vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, 169 struct drm_plane *primary, struct drm_plane *cursor) 170 { 171 struct amdgpu_device *adev = drm_to_adev(dev); 172 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 173 int ret; 174 175 ret = drm_crtc_init_with_planes(dev, crtc, primary, cursor, 176 &amdgpu_vkms_crtc_funcs, NULL); 177 if (ret) { 178 DRM_ERROR("Failed to init CRTC\n"); 179 return ret; 180 } 181 182 drm_crtc_helper_add(crtc, &amdgpu_vkms_crtc_helper_funcs); 183 184 amdgpu_crtc->crtc_id = drm_crtc_index(crtc); 185 adev->mode_info.crtcs[drm_crtc_index(crtc)] = amdgpu_crtc; 186 187 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; 188 amdgpu_crtc->encoder = NULL; 189 amdgpu_crtc->connector = NULL; 190 amdgpu_crtc->vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE; 191 192 hrtimer_setup(&amdgpu_crtc->vblank_timer, &amdgpu_vkms_vblank_simulate, CLOCK_MONOTONIC, 193 HRTIMER_MODE_REL); 194 195 return ret; 196 } 197 198 static const struct drm_connector_funcs amdgpu_vkms_connector_funcs = { 199 .fill_modes = drm_helper_probe_single_connector_modes, 200 .destroy = drm_connector_cleanup, 201 .reset = drm_atomic_helper_connector_reset, 202 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 203 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 204 }; 205 206 static int amdgpu_vkms_conn_get_modes(struct drm_connector *connector) 207 { 208 struct drm_device *dev = connector->dev; 209 struct drm_display_mode *mode = NULL; 210 unsigned i; 211 static const struct mode_size { 212 int w; 213 int h; 214 } common_modes[] = { 215 { 640, 480}, 216 { 720, 480}, 217 { 800, 600}, 218 { 848, 480}, 219 {1024, 768}, 220 {1152, 768}, 221 {1280, 720}, 222 {1280, 800}, 223 {1280, 854}, 224 {1280, 960}, 225 {1280, 1024}, 226 {1440, 900}, 227 {1400, 1050}, 228 {1680, 1050}, 229 {1600, 1200}, 230 {1920, 1080}, 231 {1920, 1200}, 232 {2560, 1440}, 233 {4096, 3112}, 234 {3656, 2664}, 235 {3840, 2160}, 236 {4096, 2160}, 237 }; 238 239 for (i = 0; i < ARRAY_SIZE(common_modes); i++) { 240 mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false); 241 if (!mode) 242 continue; 243 drm_mode_probed_add(connector, mode); 244 } 245 246 drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF); 247 248 return ARRAY_SIZE(common_modes); 249 } 250 251 static const struct drm_connector_helper_funcs amdgpu_vkms_conn_helper_funcs = { 252 .get_modes = amdgpu_vkms_conn_get_modes, 253 }; 254 255 static const struct drm_plane_funcs amdgpu_vkms_plane_funcs = { 256 .update_plane = drm_atomic_helper_update_plane, 257 .disable_plane = drm_atomic_helper_disable_plane, 258 .destroy = drm_plane_cleanup, 259 .reset = drm_atomic_helper_plane_reset, 260 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, 261 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, 262 }; 263 264 static void amdgpu_vkms_plane_atomic_update(struct drm_plane *plane, 265 struct drm_atomic_state *old_state) 266 { 267 return; 268 } 269 270 static int amdgpu_vkms_plane_atomic_check(struct drm_plane *plane, 271 struct drm_atomic_state *state) 272 { 273 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, 274 plane); 275 struct drm_crtc_state *crtc_state; 276 int ret; 277 278 if (!new_plane_state->fb || WARN_ON(!new_plane_state->crtc)) 279 return 0; 280 281 crtc_state = drm_atomic_get_crtc_state(state, 282 new_plane_state->crtc); 283 if (IS_ERR(crtc_state)) 284 return PTR_ERR(crtc_state); 285 286 ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state, 287 DRM_PLANE_NO_SCALING, 288 DRM_PLANE_NO_SCALING, 289 false, true); 290 if (ret != 0) 291 return ret; 292 293 /* for now primary plane must be visible and full screen */ 294 if (!new_plane_state->visible) 295 return -EINVAL; 296 297 return 0; 298 } 299 300 static int amdgpu_vkms_prepare_fb(struct drm_plane *plane, 301 struct drm_plane_state *new_state) 302 { 303 struct amdgpu_framebuffer *afb; 304 struct drm_gem_object *obj; 305 struct amdgpu_device *adev; 306 struct amdgpu_bo *rbo; 307 uint32_t domain; 308 int r; 309 310 if (!new_state->fb) { 311 DRM_DEBUG_KMS("No FB bound\n"); 312 return 0; 313 } 314 afb = to_amdgpu_framebuffer(new_state->fb); 315 316 obj = drm_gem_fb_get_obj(new_state->fb, 0); 317 if (!obj) { 318 DRM_ERROR("Failed to get obj from framebuffer\n"); 319 return -EINVAL; 320 } 321 322 rbo = gem_to_amdgpu_bo(obj); 323 adev = amdgpu_ttm_adev(rbo->tbo.bdev); 324 325 r = amdgpu_bo_reserve(rbo, true); 326 if (r) { 327 dev_err(adev->dev, "fail to reserve bo (%d)\n", r); 328 return r; 329 } 330 331 r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1); 332 if (r) { 333 dev_err(adev->dev, "allocating fence slot failed (%d)\n", r); 334 goto error_unlock; 335 } 336 337 if (plane->type != DRM_PLANE_TYPE_CURSOR) 338 domain = amdgpu_display_supported_domains(adev, rbo->flags); 339 else 340 domain = AMDGPU_GEM_DOMAIN_VRAM; 341 342 rbo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; 343 r = amdgpu_bo_pin(rbo, domain); 344 if (unlikely(r != 0)) { 345 if (r != -ERESTARTSYS) 346 DRM_ERROR("Failed to pin framebuffer with error %d\n", r); 347 goto error_unlock; 348 } 349 350 r = amdgpu_ttm_alloc_gart(&rbo->tbo); 351 if (unlikely(r != 0)) { 352 DRM_ERROR("%p bind failed\n", rbo); 353 goto error_unpin; 354 } 355 356 amdgpu_bo_unreserve(rbo); 357 358 afb->address = amdgpu_bo_gpu_offset(rbo); 359 360 amdgpu_bo_ref(rbo); 361 362 return 0; 363 364 error_unpin: 365 amdgpu_bo_unpin(rbo); 366 367 error_unlock: 368 amdgpu_bo_unreserve(rbo); 369 return r; 370 } 371 372 static void amdgpu_vkms_cleanup_fb(struct drm_plane *plane, 373 struct drm_plane_state *old_state) 374 { 375 struct amdgpu_bo *rbo; 376 struct drm_gem_object *obj; 377 int r; 378 379 if (!old_state->fb) 380 return; 381 382 obj = drm_gem_fb_get_obj(old_state->fb, 0); 383 if (!obj) { 384 DRM_ERROR("Failed to get obj from framebuffer\n"); 385 return; 386 } 387 388 rbo = gem_to_amdgpu_bo(obj); 389 r = amdgpu_bo_reserve(rbo, false); 390 if (unlikely(r)) { 391 DRM_ERROR("failed to reserve rbo before unpin\n"); 392 return; 393 } 394 395 amdgpu_bo_unpin(rbo); 396 amdgpu_bo_unreserve(rbo); 397 amdgpu_bo_unref(&rbo); 398 } 399 400 static const struct drm_plane_helper_funcs amdgpu_vkms_primary_helper_funcs = { 401 .atomic_update = amdgpu_vkms_plane_atomic_update, 402 .atomic_check = amdgpu_vkms_plane_atomic_check, 403 .prepare_fb = amdgpu_vkms_prepare_fb, 404 .cleanup_fb = amdgpu_vkms_cleanup_fb, 405 }; 406 407 static struct drm_plane *amdgpu_vkms_plane_init(struct drm_device *dev, 408 enum drm_plane_type type, 409 int index) 410 { 411 struct drm_plane *plane; 412 int ret; 413 414 plane = kzalloc(sizeof(*plane), GFP_KERNEL); 415 if (!plane) 416 return ERR_PTR(-ENOMEM); 417 418 ret = drm_universal_plane_init(dev, plane, 1 << index, 419 &amdgpu_vkms_plane_funcs, 420 amdgpu_vkms_formats, 421 ARRAY_SIZE(amdgpu_vkms_formats), 422 NULL, type, NULL); 423 if (ret) { 424 kfree(plane); 425 return ERR_PTR(ret); 426 } 427 428 drm_plane_helper_add(plane, &amdgpu_vkms_primary_helper_funcs); 429 430 return plane; 431 } 432 433 static int amdgpu_vkms_output_init(struct drm_device *dev, struct 434 amdgpu_vkms_output *output, int index) 435 { 436 struct drm_connector *connector = &output->connector; 437 struct drm_encoder *encoder = &output->encoder; 438 struct drm_crtc *crtc = &output->crtc.base; 439 struct drm_plane *primary, *cursor = NULL; 440 int ret; 441 442 primary = amdgpu_vkms_plane_init(dev, DRM_PLANE_TYPE_PRIMARY, index); 443 if (IS_ERR(primary)) 444 return PTR_ERR(primary); 445 446 ret = amdgpu_vkms_crtc_init(dev, crtc, primary, cursor); 447 if (ret) 448 goto err_crtc; 449 450 ret = drm_connector_init(dev, connector, &amdgpu_vkms_connector_funcs, 451 DRM_MODE_CONNECTOR_VIRTUAL); 452 if (ret) { 453 DRM_ERROR("Failed to init connector\n"); 454 goto err_connector; 455 } 456 457 drm_connector_helper_add(connector, &amdgpu_vkms_conn_helper_funcs); 458 459 ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_VIRTUAL); 460 if (ret) { 461 DRM_ERROR("Failed to init encoder\n"); 462 goto err_encoder; 463 } 464 encoder->possible_crtcs = 1 << index; 465 466 ret = drm_connector_attach_encoder(connector, encoder); 467 if (ret) { 468 DRM_ERROR("Failed to attach connector to encoder\n"); 469 goto err_attach; 470 } 471 472 drm_mode_config_reset(dev); 473 474 return 0; 475 476 err_attach: 477 drm_encoder_cleanup(encoder); 478 479 err_encoder: 480 drm_connector_cleanup(connector); 481 482 err_connector: 483 drm_crtc_cleanup(crtc); 484 485 err_crtc: 486 drm_plane_cleanup(primary); 487 488 return ret; 489 } 490 491 const struct drm_mode_config_funcs amdgpu_vkms_mode_funcs = { 492 .fb_create = amdgpu_display_user_framebuffer_create, 493 .atomic_check = drm_atomic_helper_check, 494 .atomic_commit = drm_atomic_helper_commit, 495 }; 496 497 static int amdgpu_vkms_sw_init(struct amdgpu_ip_block *ip_block) 498 { 499 int r, i; 500 struct amdgpu_device *adev = ip_block->adev; 501 502 adev->amdgpu_vkms_output = kcalloc(adev->mode_info.num_crtc, 503 sizeof(struct amdgpu_vkms_output), GFP_KERNEL); 504 if (!adev->amdgpu_vkms_output) 505 return -ENOMEM; 506 507 adev_to_drm(adev)->max_vblank_count = 0; 508 509 adev_to_drm(adev)->mode_config.funcs = &amdgpu_vkms_mode_funcs; 510 511 adev_to_drm(adev)->mode_config.max_width = XRES_MAX; 512 adev_to_drm(adev)->mode_config.max_height = YRES_MAX; 513 514 adev_to_drm(adev)->mode_config.preferred_depth = 24; 515 adev_to_drm(adev)->mode_config.prefer_shadow = 1; 516 517 adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true; 518 519 r = amdgpu_display_modeset_create_props(adev); 520 if (r) 521 return r; 522 523 /* allocate crtcs, encoders, connectors */ 524 for (i = 0; i < adev->mode_info.num_crtc; i++) { 525 r = amdgpu_vkms_output_init(adev_to_drm(adev), &adev->amdgpu_vkms_output[i], i); 526 if (r) 527 return r; 528 } 529 530 r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc); 531 if (r) 532 return r; 533 534 drm_kms_helper_poll_init(adev_to_drm(adev)); 535 536 adev->mode_info.mode_config_initialized = true; 537 return 0; 538 } 539 540 static int amdgpu_vkms_sw_fini(struct amdgpu_ip_block *ip_block) 541 { 542 struct amdgpu_device *adev = ip_block->adev; 543 int i = 0; 544 545 for (i = 0; i < adev->mode_info.num_crtc; i++) 546 if (adev->mode_info.crtcs[i]) 547 hrtimer_cancel(&adev->mode_info.crtcs[i]->vblank_timer); 548 549 drm_kms_helper_poll_fini(adev_to_drm(adev)); 550 drm_mode_config_cleanup(adev_to_drm(adev)); 551 552 adev->mode_info.mode_config_initialized = false; 553 554 drm_edid_free(adev->mode_info.bios_hardcoded_edid); 555 kfree(adev->amdgpu_vkms_output); 556 return 0; 557 } 558 559 static int amdgpu_vkms_hw_init(struct amdgpu_ip_block *ip_block) 560 { 561 struct amdgpu_device *adev = ip_block->adev; 562 563 switch (adev->asic_type) { 564 #ifdef CONFIG_DRM_AMDGPU_SI 565 case CHIP_TAHITI: 566 case CHIP_PITCAIRN: 567 case CHIP_VERDE: 568 case CHIP_OLAND: 569 dce_v6_0_disable_dce(adev); 570 break; 571 #endif 572 #ifdef CONFIG_DRM_AMDGPU_CIK 573 case CHIP_BONAIRE: 574 case CHIP_HAWAII: 575 case CHIP_KAVERI: 576 case CHIP_KABINI: 577 case CHIP_MULLINS: 578 dce_v8_0_disable_dce(adev); 579 break; 580 #endif 581 case CHIP_FIJI: 582 case CHIP_TONGA: 583 dce_v10_0_disable_dce(adev); 584 break; 585 case CHIP_TOPAZ: 586 #ifdef CONFIG_DRM_AMDGPU_SI 587 case CHIP_HAINAN: 588 #endif 589 /* no DCE */ 590 break; 591 default: 592 break; 593 } 594 return 0; 595 } 596 597 static int amdgpu_vkms_hw_fini(struct amdgpu_ip_block *ip_block) 598 { 599 return 0; 600 } 601 602 static int amdgpu_vkms_suspend(struct amdgpu_ip_block *ip_block) 603 { 604 struct amdgpu_device *adev = ip_block->adev; 605 int r; 606 607 r = drm_mode_config_helper_suspend(adev_to_drm(adev)); 608 if (r) 609 return r; 610 611 return 0; 612 } 613 614 static int amdgpu_vkms_resume(struct amdgpu_ip_block *ip_block) 615 { 616 int r; 617 618 r = amdgpu_vkms_hw_init(ip_block); 619 if (r) 620 return r; 621 return drm_mode_config_helper_resume(adev_to_drm(ip_block->adev)); 622 } 623 624 static bool amdgpu_vkms_is_idle(struct amdgpu_ip_block *ip_block) 625 { 626 return true; 627 } 628 629 static int amdgpu_vkms_set_clockgating_state(struct amdgpu_ip_block *ip_block, 630 enum amd_clockgating_state state) 631 { 632 return 0; 633 } 634 635 static int amdgpu_vkms_set_powergating_state(struct amdgpu_ip_block *ip_block, 636 enum amd_powergating_state state) 637 { 638 return 0; 639 } 640 641 static const struct amd_ip_funcs amdgpu_vkms_ip_funcs = { 642 .name = "amdgpu_vkms", 643 .sw_init = amdgpu_vkms_sw_init, 644 .sw_fini = amdgpu_vkms_sw_fini, 645 .hw_init = amdgpu_vkms_hw_init, 646 .hw_fini = amdgpu_vkms_hw_fini, 647 .suspend = amdgpu_vkms_suspend, 648 .resume = amdgpu_vkms_resume, 649 .is_idle = amdgpu_vkms_is_idle, 650 .set_clockgating_state = amdgpu_vkms_set_clockgating_state, 651 .set_powergating_state = amdgpu_vkms_set_powergating_state, 652 }; 653 654 const struct amdgpu_ip_block_version amdgpu_vkms_ip_block = { 655 .type = AMD_IP_BLOCK_TYPE_DCE, 656 .major = 1, 657 .minor = 0, 658 .rev = 0, 659 .funcs = &amdgpu_vkms_ip_funcs, 660 }; 661 662