1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2015 Broadcom 4 */ 5 #ifndef _VC4_DRV_H_ 6 #define _VC4_DRV_H_ 7 8 #include <linux/debugfs.h> 9 #include <linux/delay.h> 10 #include <linux/of.h> 11 #include <linux/refcount.h> 12 #include <linux/uaccess.h> 13 14 #include <drm/drm_atomic.h> 15 #include <drm/drm_debugfs.h> 16 #include <drm/drm_device.h> 17 #include <drm/drm_encoder.h> 18 #include <drm/drm_gem_dma_helper.h> 19 #include <drm/drm_managed.h> 20 #include <drm/drm_mm.h> 21 #include <drm/drm_modeset_lock.h> 22 23 #include <kunit/test-bug.h> 24 25 #include "uapi/drm/vc4_drm.h" 26 27 struct drm_device; 28 struct drm_gem_object; 29 30 extern const struct drm_driver vc4_drm_driver; 31 extern const struct drm_driver vc5_drm_driver; 32 33 /* Don't forget to update vc4_bo.c: bo_type_names[] when adding to 34 * this. 35 */ 36 enum vc4_kernel_bo_type { 37 /* Any kernel allocation (gem_create_object hook) before it 38 * gets another type set. 39 */ 40 VC4_BO_TYPE_KERNEL, 41 VC4_BO_TYPE_V3D, 42 VC4_BO_TYPE_V3D_SHADER, 43 VC4_BO_TYPE_DUMB, 44 VC4_BO_TYPE_BIN, 45 VC4_BO_TYPE_RCL, 46 VC4_BO_TYPE_BCL, 47 VC4_BO_TYPE_KERNEL_CACHE, 48 VC4_BO_TYPE_COUNT 49 }; 50 51 /* Performance monitor object. The perform lifetime is controlled by userspace 52 * using perfmon related ioctls. A perfmon can be attached to a submit_cl 53 * request, and when this is the case, HW perf counters will be activated just 54 * before the submit_cl is submitted to the GPU and disabled when the job is 55 * done. This way, only events related to a specific job will be counted. 56 */ 57 struct vc4_perfmon { 58 struct vc4_dev *dev; 59 60 /* Tracks the number of users of the perfmon, when this counter reaches 61 * zero the perfmon is destroyed. 62 */ 63 refcount_t refcnt; 64 65 /* Number of counters activated in this perfmon instance 66 * (should be less than DRM_VC4_MAX_PERF_COUNTERS). 67 */ 68 u8 ncounters; 69 70 /* Events counted by the HW perf counters. */ 71 u8 events[DRM_VC4_MAX_PERF_COUNTERS]; 72 73 /* Storage for counter values. Counters are incremented by the HW 74 * perf counter values every time the perfmon is attached to a GPU job. 75 * This way, perfmon users don't have to retrieve the results after 76 * each job if they want to track events covering several submissions. 77 * Note that counter values can't be reset, but you can fake a reset by 78 * destroying the perfmon and creating a new one. 79 */ 80 u64 counters[] __counted_by(ncounters); 81 }; 82 83 struct vc4_dev { 84 struct drm_device base; 85 struct device *dev; 86 87 bool is_vc5; 88 89 unsigned int irq; 90 91 struct vc4_hvs *hvs; 92 struct vc4_v3d *v3d; 93 94 struct vc4_hang_state *hang_state; 95 96 /* The kernel-space BO cache. Tracks buffers that have been 97 * unreferenced by all other users (refcounts of 0!) but not 98 * yet freed, so we can do cheap allocations. 99 */ 100 struct vc4_bo_cache { 101 /* Array of list heads for entries in the BO cache, 102 * based on number of pages, so we can do O(1) lookups 103 * in the cache when allocating. 104 */ 105 struct list_head *size_list; 106 uint32_t size_list_size; 107 108 /* List of all BOs in the cache, ordered by age, so we 109 * can do O(1) lookups when trying to free old 110 * buffers. 111 */ 112 struct list_head time_list; 113 struct work_struct time_work; 114 struct timer_list time_timer; 115 } bo_cache; 116 117 u32 num_labels; 118 struct vc4_label { 119 const char *name; 120 u32 num_allocated; 121 u32 size_allocated; 122 } *bo_labels; 123 124 /* Protects bo_cache and bo_labels. */ 125 struct mutex bo_lock; 126 127 /* Purgeable BO pool. All BOs in this pool can have their memory 128 * reclaimed if the driver is unable to allocate new BOs. We also 129 * keep stats related to the purge mechanism here. 130 */ 131 struct { 132 struct list_head list; 133 unsigned int num; 134 size_t size; 135 unsigned int purged_num; 136 size_t purged_size; 137 struct mutex lock; 138 } purgeable; 139 140 uint64_t dma_fence_context; 141 142 /* Sequence number for the last job queued in bin_job_list. 143 * Starts at 0 (no jobs emitted). 144 */ 145 uint64_t emit_seqno; 146 147 /* Sequence number for the last completed job on the GPU. 148 * Starts at 0 (no jobs completed). 149 */ 150 uint64_t finished_seqno; 151 152 /* List of all struct vc4_exec_info for jobs to be executed in 153 * the binner. The first job in the list is the one currently 154 * programmed into ct0ca for execution. 155 */ 156 struct list_head bin_job_list; 157 158 /* List of all struct vc4_exec_info for jobs that have 159 * completed binning and are ready for rendering. The first 160 * job in the list is the one currently programmed into ct1ca 161 * for execution. 162 */ 163 struct list_head render_job_list; 164 165 /* List of the finished vc4_exec_infos waiting to be freed by 166 * job_done_work. 167 */ 168 struct list_head job_done_list; 169 /* Spinlock used to synchronize the job_list and seqno 170 * accesses between the IRQ handler and GEM ioctls. 171 */ 172 spinlock_t job_lock; 173 wait_queue_head_t job_wait_queue; 174 struct work_struct job_done_work; 175 176 /* Used to track the active perfmon if any. Access to this field is 177 * protected by job_lock. 178 */ 179 struct vc4_perfmon *active_perfmon; 180 181 /* List of struct vc4_seqno_cb for callbacks to be made from a 182 * workqueue when the given seqno is passed. 183 */ 184 struct list_head seqno_cb_list; 185 186 /* The memory used for storing binner tile alloc, tile state, 187 * and overflow memory allocations. This is freed when V3D 188 * powers down. 189 */ 190 struct vc4_bo *bin_bo; 191 192 /* Size of blocks allocated within bin_bo. */ 193 uint32_t bin_alloc_size; 194 195 /* Bitmask of the bin_alloc_size chunks in bin_bo that are 196 * used. 197 */ 198 uint32_t bin_alloc_used; 199 200 /* Bitmask of the current bin_alloc used for overflow memory. */ 201 uint32_t bin_alloc_overflow; 202 203 /* Incremented when an underrun error happened after an atomic commit. 204 * This is particularly useful to detect when a specific modeset is too 205 * demanding in term of memory or HVS bandwidth which is hard to guess 206 * at atomic check time. 207 */ 208 atomic_t underrun; 209 210 struct work_struct overflow_mem_work; 211 212 int power_refcount; 213 214 /* Set to true when the load tracker is active. */ 215 bool load_tracker_enabled; 216 217 /* Mutex controlling the power refcount. */ 218 struct mutex power_lock; 219 220 struct { 221 struct timer_list timer; 222 struct work_struct reset_work; 223 } hangcheck; 224 225 struct drm_modeset_lock ctm_state_lock; 226 struct drm_private_obj ctm_manager; 227 struct drm_private_obj hvs_channels; 228 struct drm_private_obj load_tracker; 229 230 /* Mutex for binner bo allocation. */ 231 struct mutex bin_bo_lock; 232 /* Reference count for our binner bo. */ 233 struct kref bin_bo_kref; 234 }; 235 236 #define to_vc4_dev(_dev) \ 237 container_of_const(_dev, struct vc4_dev, base) 238 239 struct vc4_bo { 240 struct drm_gem_dma_object base; 241 242 /* seqno of the last job to render using this BO. */ 243 uint64_t seqno; 244 245 /* seqno of the last job to use the RCL to write to this BO. 246 * 247 * Note that this doesn't include binner overflow memory 248 * writes. 249 */ 250 uint64_t write_seqno; 251 252 bool t_format; 253 254 /* List entry for the BO's position in either 255 * vc4_exec_info->unref_list or vc4_dev->bo_cache.time_list 256 */ 257 struct list_head unref_head; 258 259 /* Time in jiffies when the BO was put in vc4->bo_cache. */ 260 unsigned long free_time; 261 262 /* List entry for the BO's position in vc4_dev->bo_cache.size_list */ 263 struct list_head size_head; 264 265 /* Struct for shader validation state, if created by 266 * DRM_IOCTL_VC4_CREATE_SHADER_BO. 267 */ 268 struct vc4_validated_shader_info *validated_shader; 269 270 /* One of enum vc4_kernel_bo_type, or VC4_BO_TYPE_COUNT + i 271 * for user-allocated labels. 272 */ 273 int label; 274 275 /* Count the number of active users. This is needed to determine 276 * whether we can move the BO to the purgeable list or not (when the BO 277 * is used by the GPU or the display engine we can't purge it). 278 */ 279 refcount_t usecnt; 280 281 /* Store purgeable/purged state here */ 282 u32 madv; 283 struct mutex madv_lock; 284 }; 285 286 #define to_vc4_bo(_bo) \ 287 container_of_const(to_drm_gem_dma_obj(_bo), struct vc4_bo, base) 288 289 struct vc4_fence { 290 struct dma_fence base; 291 struct drm_device *dev; 292 /* vc4 seqno for signaled() test */ 293 uint64_t seqno; 294 }; 295 296 #define to_vc4_fence(_fence) \ 297 container_of_const(_fence, struct vc4_fence, base) 298 299 struct vc4_seqno_cb { 300 struct work_struct work; 301 uint64_t seqno; 302 void (*func)(struct vc4_seqno_cb *cb); 303 }; 304 305 struct vc4_v3d { 306 struct vc4_dev *vc4; 307 struct platform_device *pdev; 308 void __iomem *regs; 309 struct clk *clk; 310 struct debugfs_regset32 regset; 311 }; 312 313 struct vc4_hvs { 314 struct vc4_dev *vc4; 315 struct platform_device *pdev; 316 void __iomem *regs; 317 u32 __iomem *dlist; 318 319 struct clk *core_clk; 320 321 unsigned long max_core_rate; 322 323 /* Memory manager for CRTCs to allocate space in the display 324 * list. Units are dwords. 325 */ 326 struct drm_mm dlist_mm; 327 /* Memory manager for the LBM memory used by HVS scaling. */ 328 struct drm_mm lbm_mm; 329 spinlock_t mm_lock; 330 331 struct drm_mm_node mitchell_netravali_filter; 332 333 struct debugfs_regset32 regset; 334 335 /* 336 * Even if HDMI0 on the RPi4 can output modes requiring a pixel 337 * rate higher than 297MHz, it needs some adjustments in the 338 * config.txt file to be able to do so and thus won't always be 339 * available. 340 */ 341 bool vc5_hdmi_enable_hdmi_20; 342 343 /* 344 * 4096x2160@60 requires a core overclock to work, so register 345 * whether that is sufficient. 346 */ 347 bool vc5_hdmi_enable_4096by2160; 348 }; 349 350 #define HVS_NUM_CHANNELS 3 351 352 struct vc4_hvs_state { 353 struct drm_private_state base; 354 unsigned long core_clock_rate; 355 356 struct { 357 unsigned in_use: 1; 358 unsigned long fifo_load; 359 struct drm_crtc_commit *pending_commit; 360 } fifo_state[HVS_NUM_CHANNELS]; 361 }; 362 363 #define to_vc4_hvs_state(_state) \ 364 container_of_const(_state, struct vc4_hvs_state, base) 365 366 struct vc4_hvs_state *vc4_hvs_get_global_state(struct drm_atomic_state *state); 367 struct vc4_hvs_state *vc4_hvs_get_old_global_state(const struct drm_atomic_state *state); 368 struct vc4_hvs_state *vc4_hvs_get_new_global_state(const struct drm_atomic_state *state); 369 370 struct vc4_plane { 371 struct drm_plane base; 372 }; 373 374 #define to_vc4_plane(_plane) \ 375 container_of_const(_plane, struct vc4_plane, base) 376 377 enum vc4_scaling_mode { 378 VC4_SCALING_NONE, 379 VC4_SCALING_TPZ, 380 VC4_SCALING_PPF, 381 }; 382 383 struct vc4_plane_state { 384 struct drm_plane_state base; 385 /* System memory copy of the display list for this element, computed 386 * at atomic_check time. 387 */ 388 u32 *dlist; 389 u32 dlist_size; /* Number of dwords allocated for the display list */ 390 u32 dlist_count; /* Number of used dwords in the display list. */ 391 392 /* Offset in the dlist to various words, for pageflip or 393 * cursor updates. 394 */ 395 u32 pos0_offset; 396 u32 pos2_offset; 397 u32 ptr0_offset; 398 u32 lbm_offset; 399 400 /* Offset where the plane's dlist was last stored in the 401 * hardware at vc4_crtc_atomic_flush() time. 402 */ 403 u32 __iomem *hw_dlist; 404 405 /* Clipped coordinates of the plane on the display. */ 406 int crtc_x, crtc_y, crtc_w, crtc_h; 407 /* Clipped area being scanned from in the FB. */ 408 u32 src_x, src_y; 409 410 u32 src_w[2], src_h[2]; 411 412 /* Scaling selection for the RGB/Y plane and the Cb/Cr planes. */ 413 enum vc4_scaling_mode x_scaling[2], y_scaling[2]; 414 bool is_unity; 415 bool is_yuv; 416 417 /* Offset to start scanning out from the start of the plane's 418 * BO. 419 */ 420 u32 offsets[3]; 421 422 /* Our allocation in LBM for temporary storage during scaling. */ 423 struct drm_mm_node lbm; 424 425 /* Set when the plane has per-pixel alpha content or does not cover 426 * the entire screen. This is a hint to the CRTC that it might need 427 * to enable background color fill. 428 */ 429 bool needs_bg_fill; 430 431 /* Mark the dlist as initialized. Useful to avoid initializing it twice 432 * when async update is not possible. 433 */ 434 bool dlist_initialized; 435 436 /* Load of this plane on the HVS block. The load is expressed in HVS 437 * cycles/sec. 438 */ 439 u64 hvs_load; 440 441 /* Memory bandwidth needed for this plane. This is expressed in 442 * bytes/sec. 443 */ 444 u64 membus_load; 445 }; 446 447 #define to_vc4_plane_state(_state) \ 448 container_of_const(_state, struct vc4_plane_state, base) 449 450 enum vc4_encoder_type { 451 VC4_ENCODER_TYPE_NONE, 452 VC4_ENCODER_TYPE_HDMI0, 453 VC4_ENCODER_TYPE_HDMI1, 454 VC4_ENCODER_TYPE_VEC, 455 VC4_ENCODER_TYPE_DSI0, 456 VC4_ENCODER_TYPE_DSI1, 457 VC4_ENCODER_TYPE_SMI, 458 VC4_ENCODER_TYPE_DPI, 459 VC4_ENCODER_TYPE_TXP, 460 }; 461 462 struct vc4_encoder { 463 struct drm_encoder base; 464 enum vc4_encoder_type type; 465 u32 clock_select; 466 467 void (*pre_crtc_configure)(struct drm_encoder *encoder, struct drm_atomic_state *state); 468 void (*pre_crtc_enable)(struct drm_encoder *encoder, struct drm_atomic_state *state); 469 void (*post_crtc_enable)(struct drm_encoder *encoder, struct drm_atomic_state *state); 470 471 void (*post_crtc_disable)(struct drm_encoder *encoder, struct drm_atomic_state *state); 472 void (*post_crtc_powerdown)(struct drm_encoder *encoder, struct drm_atomic_state *state); 473 }; 474 475 #define to_vc4_encoder(_encoder) \ 476 container_of_const(_encoder, struct vc4_encoder, base) 477 478 static inline 479 struct drm_encoder *vc4_find_encoder_by_type(struct drm_device *drm, 480 enum vc4_encoder_type type) 481 { 482 struct drm_encoder *encoder; 483 484 drm_for_each_encoder(encoder, drm) { 485 struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder); 486 487 if (vc4_encoder->type == type) 488 return encoder; 489 } 490 491 return NULL; 492 } 493 494 struct vc4_crtc_data { 495 const char *name; 496 497 const char *debugfs_name; 498 499 /* Bitmask of channels (FIFOs) of the HVS that the output can source from */ 500 unsigned int hvs_available_channels; 501 502 /* Which output of the HVS this pixelvalve sources from. */ 503 int hvs_output; 504 }; 505 506 extern const struct vc4_crtc_data vc4_txp_crtc_data; 507 508 struct vc4_pv_data { 509 struct vc4_crtc_data base; 510 511 /* Depth of the PixelValve FIFO in bytes */ 512 unsigned int fifo_depth; 513 514 /* Number of pixels output per clock period */ 515 u8 pixels_per_clock; 516 517 enum vc4_encoder_type encoder_types[4]; 518 }; 519 520 extern const struct vc4_pv_data bcm2835_pv0_data; 521 extern const struct vc4_pv_data bcm2835_pv1_data; 522 extern const struct vc4_pv_data bcm2835_pv2_data; 523 extern const struct vc4_pv_data bcm2711_pv0_data; 524 extern const struct vc4_pv_data bcm2711_pv1_data; 525 extern const struct vc4_pv_data bcm2711_pv2_data; 526 extern const struct vc4_pv_data bcm2711_pv3_data; 527 extern const struct vc4_pv_data bcm2711_pv4_data; 528 529 struct vc4_crtc { 530 struct drm_crtc base; 531 struct platform_device *pdev; 532 const struct vc4_crtc_data *data; 533 void __iomem *regs; 534 535 /* Timestamp at start of vblank irq - unaffected by lock delays. */ 536 ktime_t t_vblank; 537 538 u8 lut_r[256]; 539 u8 lut_g[256]; 540 u8 lut_b[256]; 541 542 struct drm_pending_vblank_event *event; 543 544 struct debugfs_regset32 regset; 545 546 /** 547 * @feeds_txp: True if the CRTC feeds our writeback controller. 548 */ 549 bool feeds_txp; 550 551 /** 552 * @irq_lock: Spinlock protecting the resources shared between 553 * the atomic code and our vblank handler. 554 */ 555 spinlock_t irq_lock; 556 557 /** 558 * @current_dlist: Start offset of the display list currently 559 * set in the HVS for that CRTC. Protected by @irq_lock, and 560 * copied in vc4_hvs_update_dlist() for the CRTC interrupt 561 * handler to have access to that value. 562 */ 563 unsigned int current_dlist; 564 565 /** 566 * @current_hvs_channel: HVS channel currently assigned to the 567 * CRTC. Protected by @irq_lock, and copied in 568 * vc4_hvs_atomic_begin() for the CRTC interrupt handler to have 569 * access to that value. 570 */ 571 unsigned int current_hvs_channel; 572 }; 573 574 #define to_vc4_crtc(_crtc) \ 575 container_of_const(_crtc, struct vc4_crtc, base) 576 577 static inline const struct vc4_crtc_data * 578 vc4_crtc_to_vc4_crtc_data(const struct vc4_crtc *crtc) 579 { 580 return crtc->data; 581 } 582 583 static inline const struct vc4_pv_data * 584 vc4_crtc_to_vc4_pv_data(const struct vc4_crtc *crtc) 585 { 586 const struct vc4_crtc_data *data = vc4_crtc_to_vc4_crtc_data(crtc); 587 588 return container_of_const(data, struct vc4_pv_data, base); 589 } 590 591 struct drm_encoder *vc4_get_crtc_encoder(struct drm_crtc *crtc, 592 struct drm_crtc_state *state); 593 594 struct vc4_crtc_state { 595 struct drm_crtc_state base; 596 /* Dlist area for this CRTC configuration. */ 597 struct drm_mm_node mm; 598 bool txp_armed; 599 unsigned int assigned_channel; 600 601 struct { 602 unsigned int left; 603 unsigned int right; 604 unsigned int top; 605 unsigned int bottom; 606 } margins; 607 608 unsigned long hvs_load; 609 610 /* Transitional state below, only valid during atomic commits */ 611 bool update_muxing; 612 }; 613 614 #define VC4_HVS_CHANNEL_DISABLED ((unsigned int)-1) 615 616 #define to_vc4_crtc_state(_state) \ 617 container_of_const(_state, struct vc4_crtc_state, base) 618 619 #define V3D_READ(offset) \ 620 ({ \ 621 kunit_fail_current_test("Accessing a register in a unit test!\n"); \ 622 readl(vc4->v3d->regs + (offset)); \ 623 }) 624 625 #define V3D_WRITE(offset, val) \ 626 do { \ 627 kunit_fail_current_test("Accessing a register in a unit test!\n"); \ 628 writel(val, vc4->v3d->regs + (offset)); \ 629 } while (0) 630 631 #define HVS_READ(offset) \ 632 ({ \ 633 kunit_fail_current_test("Accessing a register in a unit test!\n"); \ 634 readl(hvs->regs + (offset)); \ 635 }) 636 637 #define HVS_WRITE(offset, val) \ 638 do { \ 639 kunit_fail_current_test("Accessing a register in a unit test!\n"); \ 640 writel(val, hvs->regs + (offset)); \ 641 } while (0) 642 643 #define VC4_REG32(reg) { .name = #reg, .offset = reg } 644 645 struct vc4_exec_info { 646 struct vc4_dev *dev; 647 648 /* Sequence number for this bin/render job. */ 649 uint64_t seqno; 650 651 /* Latest write_seqno of any BO that binning depends on. */ 652 uint64_t bin_dep_seqno; 653 654 struct dma_fence *fence; 655 656 /* Last current addresses the hardware was processing when the 657 * hangcheck timer checked on us. 658 */ 659 uint32_t last_ct0ca, last_ct1ca; 660 661 /* Kernel-space copy of the ioctl arguments */ 662 struct drm_vc4_submit_cl *args; 663 664 /* This is the array of BOs that were looked up at the start of exec. 665 * Command validation will use indices into this array. 666 */ 667 struct drm_gem_object **bo; 668 uint32_t bo_count; 669 670 /* List of BOs that are being written by the RCL. Other than 671 * the binner temporary storage, this is all the BOs written 672 * by the job. 673 */ 674 struct drm_gem_dma_object *rcl_write_bo[4]; 675 uint32_t rcl_write_bo_count; 676 677 /* Pointers for our position in vc4->job_list */ 678 struct list_head head; 679 680 /* List of other BOs used in the job that need to be released 681 * once the job is complete. 682 */ 683 struct list_head unref_list; 684 685 /* Current unvalidated indices into @bo loaded by the non-hardware 686 * VC4_PACKET_GEM_HANDLES. 687 */ 688 uint32_t bo_index[2]; 689 690 /* This is the BO where we store the validated command lists, shader 691 * records, and uniforms. 692 */ 693 struct drm_gem_dma_object *exec_bo; 694 695 /** 696 * This tracks the per-shader-record state (packet 64) that 697 * determines the length of the shader record and the offset 698 * it's expected to be found at. It gets read in from the 699 * command lists. 700 */ 701 struct vc4_shader_state { 702 uint32_t addr; 703 /* Maximum vertex index referenced by any primitive using this 704 * shader state. 705 */ 706 uint32_t max_index; 707 } *shader_state; 708 709 /** How many shader states the user declared they were using. */ 710 uint32_t shader_state_size; 711 /** How many shader state records the validator has seen. */ 712 uint32_t shader_state_count; 713 714 bool found_tile_binning_mode_config_packet; 715 bool found_start_tile_binning_packet; 716 bool found_increment_semaphore_packet; 717 bool found_flush; 718 uint8_t bin_tiles_x, bin_tiles_y; 719 /* Physical address of the start of the tile alloc array 720 * (where each tile's binned CL will start) 721 */ 722 uint32_t tile_alloc_offset; 723 /* Bitmask of which binner slots are freed when this job completes. */ 724 uint32_t bin_slots; 725 726 /** 727 * Computed addresses pointing into exec_bo where we start the 728 * bin thread (ct0) and render thread (ct1). 729 */ 730 uint32_t ct0ca, ct0ea; 731 uint32_t ct1ca, ct1ea; 732 733 /* Pointer to the unvalidated bin CL (if present). */ 734 void *bin_u; 735 736 /* Pointers to the shader recs. These paddr gets incremented as CL 737 * packets are relocated in validate_gl_shader_state, and the vaddrs 738 * (u and v) get incremented and size decremented as the shader recs 739 * themselves are validated. 740 */ 741 void *shader_rec_u; 742 void *shader_rec_v; 743 uint32_t shader_rec_p; 744 uint32_t shader_rec_size; 745 746 /* Pointers to the uniform data. These pointers are incremented, and 747 * size decremented, as each batch of uniforms is uploaded. 748 */ 749 void *uniforms_u; 750 void *uniforms_v; 751 uint32_t uniforms_p; 752 uint32_t uniforms_size; 753 754 /* Pointer to a performance monitor object if the user requested it, 755 * NULL otherwise. 756 */ 757 struct vc4_perfmon *perfmon; 758 759 /* Whether the exec has taken a reference to the binner BO, which should 760 * happen with a VC4_PACKET_TILE_BINNING_MODE_CONFIG packet. 761 */ 762 bool bin_bo_used; 763 }; 764 765 /* Per-open file private data. Any driver-specific resource that has to be 766 * released when the DRM file is closed should be placed here. 767 */ 768 struct vc4_file { 769 struct vc4_dev *dev; 770 771 struct { 772 struct idr idr; 773 struct mutex lock; 774 } perfmon; 775 776 bool bin_bo_used; 777 }; 778 779 static inline struct vc4_exec_info * 780 vc4_first_bin_job(struct vc4_dev *vc4) 781 { 782 return list_first_entry_or_null(&vc4->bin_job_list, 783 struct vc4_exec_info, head); 784 } 785 786 static inline struct vc4_exec_info * 787 vc4_first_render_job(struct vc4_dev *vc4) 788 { 789 return list_first_entry_or_null(&vc4->render_job_list, 790 struct vc4_exec_info, head); 791 } 792 793 static inline struct vc4_exec_info * 794 vc4_last_render_job(struct vc4_dev *vc4) 795 { 796 if (list_empty(&vc4->render_job_list)) 797 return NULL; 798 return list_last_entry(&vc4->render_job_list, 799 struct vc4_exec_info, head); 800 } 801 802 /** 803 * struct vc4_texture_sample_info - saves the offsets into the UBO for texture 804 * setup parameters. 805 * 806 * This will be used at draw time to relocate the reference to the texture 807 * contents in p0, and validate that the offset combined with 808 * width/height/stride/etc. from p1 and p2/p3 doesn't sample outside the BO. 809 * Note that the hardware treats unprovided config parameters as 0, so not all 810 * of them need to be set up for every texure sample, and we'll store ~0 as 811 * the offset to mark the unused ones. 812 * 813 * See the VC4 3D architecture guide page 41 ("Texture and Memory Lookup Unit 814 * Setup") for definitions of the texture parameters. 815 */ 816 struct vc4_texture_sample_info { 817 bool is_direct; 818 uint32_t p_offset[4]; 819 }; 820 821 /** 822 * struct vc4_validated_shader_info - information about validated shaders that 823 * needs to be used from command list validation. 824 * 825 * For a given shader, each time a shader state record references it, we need 826 * to verify that the shader doesn't read more uniforms than the shader state 827 * record's uniform BO pointer can provide, and we need to apply relocations 828 * and validate the shader state record's uniforms that define the texture 829 * samples. 830 */ 831 struct vc4_validated_shader_info { 832 uint32_t uniforms_size; 833 uint32_t uniforms_src_size; 834 uint32_t num_texture_samples; 835 struct vc4_texture_sample_info *texture_samples; 836 837 uint32_t num_uniform_addr_offsets; 838 uint32_t *uniform_addr_offsets; 839 840 bool is_threaded; 841 }; 842 843 /** 844 * __wait_for - magic wait macro 845 * 846 * Macro to help avoid open coding check/wait/timeout patterns. Note that it's 847 * important that we check the condition again after having timed out, since the 848 * timeout could be due to preemption or similar and we've never had a chance to 849 * check the condition before the timeout. 850 */ 851 #define __wait_for(OP, COND, US, Wmin, Wmax) ({ \ 852 const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \ 853 long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \ 854 int ret__; \ 855 might_sleep(); \ 856 for (;;) { \ 857 const bool expired__ = ktime_after(ktime_get_raw(), end__); \ 858 OP; \ 859 /* Guarantee COND check prior to timeout */ \ 860 barrier(); \ 861 if (COND) { \ 862 ret__ = 0; \ 863 break; \ 864 } \ 865 if (expired__) { \ 866 ret__ = -ETIMEDOUT; \ 867 break; \ 868 } \ 869 usleep_range(wait__, wait__ * 2); \ 870 if (wait__ < (Wmax)) \ 871 wait__ <<= 1; \ 872 } \ 873 ret__; \ 874 }) 875 876 #define _wait_for(COND, US, Wmin, Wmax) __wait_for(, (COND), (US), (Wmin), \ 877 (Wmax)) 878 #define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000) 879 880 /* vc4_bo.c */ 881 struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size); 882 struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size, 883 bool from_cache, enum vc4_kernel_bo_type type); 884 int vc4_bo_dumb_create(struct drm_file *file_priv, 885 struct drm_device *dev, 886 struct drm_mode_create_dumb *args); 887 int vc4_create_bo_ioctl(struct drm_device *dev, void *data, 888 struct drm_file *file_priv); 889 int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data, 890 struct drm_file *file_priv); 891 int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data, 892 struct drm_file *file_priv); 893 int vc4_set_tiling_ioctl(struct drm_device *dev, void *data, 894 struct drm_file *file_priv); 895 int vc4_get_tiling_ioctl(struct drm_device *dev, void *data, 896 struct drm_file *file_priv); 897 int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data, 898 struct drm_file *file_priv); 899 int vc4_label_bo_ioctl(struct drm_device *dev, void *data, 900 struct drm_file *file_priv); 901 int vc4_bo_cache_init(struct drm_device *dev); 902 int vc4_bo_inc_usecnt(struct vc4_bo *bo); 903 void vc4_bo_dec_usecnt(struct vc4_bo *bo); 904 void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo); 905 void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo); 906 int vc4_bo_debugfs_init(struct drm_minor *minor); 907 908 /* vc4_crtc.c */ 909 extern struct platform_driver vc4_crtc_driver; 910 int vc4_crtc_disable_at_boot(struct drm_crtc *crtc); 911 int __vc4_crtc_init(struct drm_device *drm, struct platform_device *pdev, 912 struct vc4_crtc *vc4_crtc, const struct vc4_crtc_data *data, 913 struct drm_plane *primary_plane, 914 const struct drm_crtc_funcs *crtc_funcs, 915 const struct drm_crtc_helper_funcs *crtc_helper_funcs, 916 bool feeds_txp); 917 int vc4_crtc_init(struct drm_device *drm, struct platform_device *pdev, 918 struct vc4_crtc *vc4_crtc, const struct vc4_crtc_data *data, 919 const struct drm_crtc_funcs *crtc_funcs, 920 const struct drm_crtc_helper_funcs *crtc_helper_funcs, 921 bool feeds_txp); 922 int vc4_page_flip(struct drm_crtc *crtc, 923 struct drm_framebuffer *fb, 924 struct drm_pending_vblank_event *event, 925 uint32_t flags, 926 struct drm_modeset_acquire_ctx *ctx); 927 int vc4_crtc_atomic_check(struct drm_crtc *crtc, 928 struct drm_atomic_state *state); 929 struct drm_crtc_state *vc4_crtc_duplicate_state(struct drm_crtc *crtc); 930 void vc4_crtc_destroy_state(struct drm_crtc *crtc, 931 struct drm_crtc_state *state); 932 void vc4_crtc_reset(struct drm_crtc *crtc); 933 void vc4_crtc_handle_vblank(struct vc4_crtc *crtc); 934 void vc4_crtc_send_vblank(struct drm_crtc *crtc); 935 int vc4_crtc_late_register(struct drm_crtc *crtc); 936 void vc4_crtc_get_margins(struct drm_crtc_state *state, 937 unsigned int *left, unsigned int *right, 938 unsigned int *top, unsigned int *bottom); 939 940 /* vc4_debugfs.c */ 941 void vc4_debugfs_init(struct drm_minor *minor); 942 #ifdef CONFIG_DEBUG_FS 943 void vc4_debugfs_add_regset32(struct drm_device *drm, 944 const char *filename, 945 struct debugfs_regset32 *regset); 946 #else 947 948 static inline void vc4_debugfs_add_regset32(struct drm_device *drm, 949 const char *filename, 950 struct debugfs_regset32 *regset) 951 {} 952 #endif 953 954 /* vc4_drv.c */ 955 void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index); 956 int vc4_dumb_fixup_args(struct drm_mode_create_dumb *args); 957 958 /* vc4_dpi.c */ 959 extern struct platform_driver vc4_dpi_driver; 960 961 /* vc4_dsi.c */ 962 extern struct platform_driver vc4_dsi_driver; 963 964 /* vc4_fence.c */ 965 extern const struct dma_fence_ops vc4_fence_ops; 966 967 /* vc4_gem.c */ 968 int vc4_gem_init(struct drm_device *dev); 969 int vc4_submit_cl_ioctl(struct drm_device *dev, void *data, 970 struct drm_file *file_priv); 971 int vc4_wait_seqno_ioctl(struct drm_device *dev, void *data, 972 struct drm_file *file_priv); 973 int vc4_wait_bo_ioctl(struct drm_device *dev, void *data, 974 struct drm_file *file_priv); 975 void vc4_submit_next_bin_job(struct drm_device *dev); 976 void vc4_submit_next_render_job(struct drm_device *dev); 977 void vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec); 978 int vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, 979 uint64_t timeout_ns, bool interruptible); 980 void vc4_job_handle_completed(struct vc4_dev *vc4); 981 int vc4_queue_seqno_cb(struct drm_device *dev, 982 struct vc4_seqno_cb *cb, uint64_t seqno, 983 void (*func)(struct vc4_seqno_cb *cb)); 984 int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data, 985 struct drm_file *file_priv); 986 987 /* vc4_hdmi.c */ 988 extern struct platform_driver vc4_hdmi_driver; 989 990 /* vc4_vec.c */ 991 extern struct platform_driver vc4_vec_driver; 992 993 /* vc4_txp.c */ 994 extern struct platform_driver vc4_txp_driver; 995 996 /* vc4_irq.c */ 997 void vc4_irq_enable(struct drm_device *dev); 998 void vc4_irq_disable(struct drm_device *dev); 999 int vc4_irq_install(struct drm_device *dev, int irq); 1000 void vc4_irq_uninstall(struct drm_device *dev); 1001 void vc4_irq_reset(struct drm_device *dev); 1002 1003 /* vc4_hvs.c */ 1004 extern struct platform_driver vc4_hvs_driver; 1005 struct vc4_hvs *__vc4_hvs_alloc(struct vc4_dev *vc4, struct platform_device *pdev); 1006 void vc4_hvs_stop_channel(struct vc4_hvs *hvs, unsigned int output); 1007 int vc4_hvs_get_fifo_from_output(struct vc4_hvs *hvs, unsigned int output); 1008 u8 vc4_hvs_get_fifo_frame_count(struct vc4_hvs *hvs, unsigned int fifo); 1009 int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state); 1010 void vc4_hvs_atomic_begin(struct drm_crtc *crtc, struct drm_atomic_state *state); 1011 void vc4_hvs_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state); 1012 void vc4_hvs_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state); 1013 void vc4_hvs_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state); 1014 void vc4_hvs_dump_state(struct vc4_hvs *hvs); 1015 void vc4_hvs_unmask_underrun(struct vc4_hvs *hvs, int channel); 1016 void vc4_hvs_mask_underrun(struct vc4_hvs *hvs, int channel); 1017 int vc4_hvs_debugfs_init(struct drm_minor *minor); 1018 1019 /* vc4_kms.c */ 1020 int vc4_kms_load(struct drm_device *dev); 1021 1022 /* vc4_plane.c */ 1023 struct drm_plane *vc4_plane_init(struct drm_device *dev, 1024 enum drm_plane_type type, 1025 uint32_t possible_crtcs); 1026 int vc4_plane_create_additional_planes(struct drm_device *dev); 1027 u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist); 1028 u32 vc4_plane_dlist_size(const struct drm_plane_state *state); 1029 void vc4_plane_async_set_fb(struct drm_plane *plane, 1030 struct drm_framebuffer *fb); 1031 1032 /* vc4_v3d.c */ 1033 extern struct platform_driver vc4_v3d_driver; 1034 extern const struct of_device_id vc4_v3d_dt_match[]; 1035 int vc4_v3d_get_bin_slot(struct vc4_dev *vc4); 1036 int vc4_v3d_bin_bo_get(struct vc4_dev *vc4, bool *used); 1037 void vc4_v3d_bin_bo_put(struct vc4_dev *vc4); 1038 int vc4_v3d_pm_get(struct vc4_dev *vc4); 1039 void vc4_v3d_pm_put(struct vc4_dev *vc4); 1040 int vc4_v3d_debugfs_init(struct drm_minor *minor); 1041 1042 /* vc4_validate.c */ 1043 int 1044 vc4_validate_bin_cl(struct drm_device *dev, 1045 void *validated, 1046 void *unvalidated, 1047 struct vc4_exec_info *exec); 1048 1049 int 1050 vc4_validate_shader_recs(struct drm_device *dev, struct vc4_exec_info *exec); 1051 1052 struct drm_gem_dma_object *vc4_use_bo(struct vc4_exec_info *exec, 1053 uint32_t hindex); 1054 1055 int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec); 1056 1057 bool vc4_check_tex_size(struct vc4_exec_info *exec, 1058 struct drm_gem_dma_object *fbo, 1059 uint32_t offset, uint8_t tiling_format, 1060 uint32_t width, uint32_t height, uint8_t cpp); 1061 1062 /* vc4_validate_shader.c */ 1063 struct vc4_validated_shader_info * 1064 vc4_validate_shader(struct drm_gem_dma_object *shader_obj); 1065 1066 /* vc4_perfmon.c */ 1067 void vc4_perfmon_get(struct vc4_perfmon *perfmon); 1068 void vc4_perfmon_put(struct vc4_perfmon *perfmon); 1069 void vc4_perfmon_start(struct vc4_dev *vc4, struct vc4_perfmon *perfmon); 1070 void vc4_perfmon_stop(struct vc4_dev *vc4, struct vc4_perfmon *perfmon, 1071 bool capture); 1072 struct vc4_perfmon *vc4_perfmon_find(struct vc4_file *vc4file, int id); 1073 void vc4_perfmon_open_file(struct vc4_file *vc4file); 1074 void vc4_perfmon_close_file(struct vc4_file *vc4file); 1075 int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data, 1076 struct drm_file *file_priv); 1077 int vc4_perfmon_destroy_ioctl(struct drm_device *dev, void *data, 1078 struct drm_file *file_priv); 1079 int vc4_perfmon_get_values_ioctl(struct drm_device *dev, void *data, 1080 struct drm_file *file_priv); 1081 1082 #endif /* _VC4_DRV_H_ */ 1083