1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2015 Broadcom 4 */ 5 #ifndef _VC4_DRV_H_ 6 #define _VC4_DRV_H_ 7 8 #include <linux/delay.h> 9 #include <linux/refcount.h> 10 #include <linux/uaccess.h> 11 12 #include <drm/drm_atomic.h> 13 #include <drm/drm_debugfs.h> 14 #include <drm/drm_device.h> 15 #include <drm/drm_encoder.h> 16 #include <drm/drm_gem_cma_helper.h> 17 #include <drm/drm_managed.h> 18 #include <drm/drm_mm.h> 19 #include <drm/drm_modeset_lock.h> 20 21 #include "uapi/drm/vc4_drm.h" 22 23 struct drm_device; 24 struct drm_gem_object; 25 26 /* Don't forget to update vc4_bo.c: bo_type_names[] when adding to 27 * this. 28 */ 29 enum vc4_kernel_bo_type { 30 /* Any kernel allocation (gem_create_object hook) before it 31 * gets another type set. 32 */ 33 VC4_BO_TYPE_KERNEL, 34 VC4_BO_TYPE_V3D, 35 VC4_BO_TYPE_V3D_SHADER, 36 VC4_BO_TYPE_DUMB, 37 VC4_BO_TYPE_BIN, 38 VC4_BO_TYPE_RCL, 39 VC4_BO_TYPE_BCL, 40 VC4_BO_TYPE_KERNEL_CACHE, 41 VC4_BO_TYPE_COUNT 42 }; 43 44 /* Performance monitor object. The perform lifetime is controlled by userspace 45 * using perfmon related ioctls. A perfmon can be attached to a submit_cl 46 * request, and when this is the case, HW perf counters will be activated just 47 * before the submit_cl is submitted to the GPU and disabled when the job is 48 * done. This way, only events related to a specific job will be counted. 49 */ 50 struct vc4_perfmon { 51 /* Tracks the number of users of the perfmon, when this counter reaches 52 * zero the perfmon is destroyed. 53 */ 54 refcount_t refcnt; 55 56 /* Number of counters activated in this perfmon instance 57 * (should be less than DRM_VC4_MAX_PERF_COUNTERS). 58 */ 59 u8 ncounters; 60 61 /* Events counted by the HW perf counters. */ 62 u8 events[DRM_VC4_MAX_PERF_COUNTERS]; 63 64 /* Storage for counter values. Counters are incremented by the HW 65 * perf counter values every time the perfmon is attached to a GPU job. 66 * This way, perfmon users don't have to retrieve the results after 67 * each job if they want to track events covering several submissions. 68 * Note that counter values can't be reset, but you can fake a reset by 69 * destroying the perfmon and creating a new one. 70 */ 71 u64 counters[]; 72 }; 73 74 struct vc4_dev { 75 struct drm_device base; 76 77 unsigned int irq; 78 79 struct vc4_hvs *hvs; 80 struct vc4_v3d *v3d; 81 struct vc4_dpi *dpi; 82 struct vc4_vec *vec; 83 struct vc4_txp *txp; 84 85 struct vc4_hang_state *hang_state; 86 87 /* The kernel-space BO cache. Tracks buffers that have been 88 * unreferenced by all other users (refcounts of 0!) but not 89 * yet freed, so we can do cheap allocations. 90 */ 91 struct vc4_bo_cache { 92 /* Array of list heads for entries in the BO cache, 93 * based on number of pages, so we can do O(1) lookups 94 * in the cache when allocating. 95 */ 96 struct list_head *size_list; 97 uint32_t size_list_size; 98 99 /* List of all BOs in the cache, ordered by age, so we 100 * can do O(1) lookups when trying to free old 101 * buffers. 102 */ 103 struct list_head time_list; 104 struct work_struct time_work; 105 struct timer_list time_timer; 106 } bo_cache; 107 108 u32 num_labels; 109 struct vc4_label { 110 const char *name; 111 u32 num_allocated; 112 u32 size_allocated; 113 } *bo_labels; 114 115 /* Protects bo_cache and bo_labels. */ 116 struct mutex bo_lock; 117 118 /* Purgeable BO pool. All BOs in this pool can have their memory 119 * reclaimed if the driver is unable to allocate new BOs. We also 120 * keep stats related to the purge mechanism here. 121 */ 122 struct { 123 struct list_head list; 124 unsigned int num; 125 size_t size; 126 unsigned int purged_num; 127 size_t purged_size; 128 struct mutex lock; 129 } purgeable; 130 131 uint64_t dma_fence_context; 132 133 /* Sequence number for the last job queued in bin_job_list. 134 * Starts at 0 (no jobs emitted). 135 */ 136 uint64_t emit_seqno; 137 138 /* Sequence number for the last completed job on the GPU. 139 * Starts at 0 (no jobs completed). 140 */ 141 uint64_t finished_seqno; 142 143 /* List of all struct vc4_exec_info for jobs to be executed in 144 * the binner. The first job in the list is the one currently 145 * programmed into ct0ca for execution. 146 */ 147 struct list_head bin_job_list; 148 149 /* List of all struct vc4_exec_info for jobs that have 150 * completed binning and are ready for rendering. The first 151 * job in the list is the one currently programmed into ct1ca 152 * for execution. 153 */ 154 struct list_head render_job_list; 155 156 /* List of the finished vc4_exec_infos waiting to be freed by 157 * job_done_work. 158 */ 159 struct list_head job_done_list; 160 /* Spinlock used to synchronize the job_list and seqno 161 * accesses between the IRQ handler and GEM ioctls. 162 */ 163 spinlock_t job_lock; 164 wait_queue_head_t job_wait_queue; 165 struct work_struct job_done_work; 166 167 /* Used to track the active perfmon if any. Access to this field is 168 * protected by job_lock. 169 */ 170 struct vc4_perfmon *active_perfmon; 171 172 /* List of struct vc4_seqno_cb for callbacks to be made from a 173 * workqueue when the given seqno is passed. 174 */ 175 struct list_head seqno_cb_list; 176 177 /* The memory used for storing binner tile alloc, tile state, 178 * and overflow memory allocations. This is freed when V3D 179 * powers down. 180 */ 181 struct vc4_bo *bin_bo; 182 183 /* Size of blocks allocated within bin_bo. */ 184 uint32_t bin_alloc_size; 185 186 /* Bitmask of the bin_alloc_size chunks in bin_bo that are 187 * used. 188 */ 189 uint32_t bin_alloc_used; 190 191 /* Bitmask of the current bin_alloc used for overflow memory. */ 192 uint32_t bin_alloc_overflow; 193 194 /* Incremented when an underrun error happened after an atomic commit. 195 * This is particularly useful to detect when a specific modeset is too 196 * demanding in term of memory or HVS bandwidth which is hard to guess 197 * at atomic check time. 198 */ 199 atomic_t underrun; 200 201 struct work_struct overflow_mem_work; 202 203 int power_refcount; 204 205 /* Set to true when the load tracker is active. */ 206 bool load_tracker_enabled; 207 208 /* Mutex controlling the power refcount. */ 209 struct mutex power_lock; 210 211 struct { 212 struct timer_list timer; 213 struct work_struct reset_work; 214 } hangcheck; 215 216 struct drm_modeset_lock ctm_state_lock; 217 struct drm_private_obj ctm_manager; 218 struct drm_private_obj hvs_channels; 219 struct drm_private_obj load_tracker; 220 221 /* List of vc4_debugfs_info_entry for adding to debugfs once 222 * the minor is available (after drm_dev_register()). 223 */ 224 struct list_head debugfs_list; 225 226 /* Mutex for binner bo allocation. */ 227 struct mutex bin_bo_lock; 228 /* Reference count for our binner bo. */ 229 struct kref bin_bo_kref; 230 }; 231 232 static inline struct vc4_dev * 233 to_vc4_dev(struct drm_device *dev) 234 { 235 return container_of(dev, struct vc4_dev, base); 236 } 237 238 struct vc4_bo { 239 struct drm_gem_cma_object base; 240 241 /* seqno of the last job to render using this BO. */ 242 uint64_t seqno; 243 244 /* seqno of the last job to use the RCL to write to this BO. 245 * 246 * Note that this doesn't include binner overflow memory 247 * writes. 248 */ 249 uint64_t write_seqno; 250 251 bool t_format; 252 253 /* List entry for the BO's position in either 254 * vc4_exec_info->unref_list or vc4_dev->bo_cache.time_list 255 */ 256 struct list_head unref_head; 257 258 /* Time in jiffies when the BO was put in vc4->bo_cache. */ 259 unsigned long free_time; 260 261 /* List entry for the BO's position in vc4_dev->bo_cache.size_list */ 262 struct list_head size_head; 263 264 /* Struct for shader validation state, if created by 265 * DRM_IOCTL_VC4_CREATE_SHADER_BO. 266 */ 267 struct vc4_validated_shader_info *validated_shader; 268 269 /* One of enum vc4_kernel_bo_type, or VC4_BO_TYPE_COUNT + i 270 * for user-allocated labels. 271 */ 272 int label; 273 274 /* Count the number of active users. This is needed to determine 275 * whether we can move the BO to the purgeable list or not (when the BO 276 * is used by the GPU or the display engine we can't purge it). 277 */ 278 refcount_t usecnt; 279 280 /* Store purgeable/purged state here */ 281 u32 madv; 282 struct mutex madv_lock; 283 }; 284 285 static inline struct vc4_bo * 286 to_vc4_bo(struct drm_gem_object *bo) 287 { 288 return container_of(to_drm_gem_cma_obj(bo), struct vc4_bo, base); 289 } 290 291 struct vc4_fence { 292 struct dma_fence base; 293 struct drm_device *dev; 294 /* vc4 seqno for signaled() test */ 295 uint64_t seqno; 296 }; 297 298 static inline struct vc4_fence * 299 to_vc4_fence(struct dma_fence *fence) 300 { 301 return container_of(fence, struct vc4_fence, base); 302 } 303 304 struct vc4_seqno_cb { 305 struct work_struct work; 306 uint64_t seqno; 307 void (*func)(struct vc4_seqno_cb *cb); 308 }; 309 310 struct vc4_v3d { 311 struct vc4_dev *vc4; 312 struct platform_device *pdev; 313 void __iomem *regs; 314 struct clk *clk; 315 struct debugfs_regset32 regset; 316 }; 317 318 struct vc4_hvs { 319 struct platform_device *pdev; 320 void __iomem *regs; 321 u32 __iomem *dlist; 322 323 struct clk *core_clk; 324 325 /* Memory manager for CRTCs to allocate space in the display 326 * list. Units are dwords. 327 */ 328 struct drm_mm dlist_mm; 329 /* Memory manager for the LBM memory used by HVS scaling. */ 330 struct drm_mm lbm_mm; 331 spinlock_t mm_lock; 332 333 struct drm_mm_node mitchell_netravali_filter; 334 335 struct debugfs_regset32 regset; 336 337 /* HVS version 5 flag, therefore requires updated dlist structures */ 338 bool hvs5; 339 }; 340 341 struct vc4_plane { 342 struct drm_plane base; 343 }; 344 345 static inline struct vc4_plane * 346 to_vc4_plane(struct drm_plane *plane) 347 { 348 return container_of(plane, struct vc4_plane, base); 349 } 350 351 enum vc4_scaling_mode { 352 VC4_SCALING_NONE, 353 VC4_SCALING_TPZ, 354 VC4_SCALING_PPF, 355 }; 356 357 struct vc4_plane_state { 358 struct drm_plane_state base; 359 /* System memory copy of the display list for this element, computed 360 * at atomic_check time. 361 */ 362 u32 *dlist; 363 u32 dlist_size; /* Number of dwords allocated for the display list */ 364 u32 dlist_count; /* Number of used dwords in the display list. */ 365 366 /* Offset in the dlist to various words, for pageflip or 367 * cursor updates. 368 */ 369 u32 pos0_offset; 370 u32 pos2_offset; 371 u32 ptr0_offset; 372 u32 lbm_offset; 373 374 /* Offset where the plane's dlist was last stored in the 375 * hardware at vc4_crtc_atomic_flush() time. 376 */ 377 u32 __iomem *hw_dlist; 378 379 /* Clipped coordinates of the plane on the display. */ 380 int crtc_x, crtc_y, crtc_w, crtc_h; 381 /* Clipped area being scanned from in the FB. */ 382 u32 src_x, src_y; 383 384 u32 src_w[2], src_h[2]; 385 386 /* Scaling selection for the RGB/Y plane and the Cb/Cr planes. */ 387 enum vc4_scaling_mode x_scaling[2], y_scaling[2]; 388 bool is_unity; 389 bool is_yuv; 390 391 /* Offset to start scanning out from the start of the plane's 392 * BO. 393 */ 394 u32 offsets[3]; 395 396 /* Our allocation in LBM for temporary storage during scaling. */ 397 struct drm_mm_node lbm; 398 399 /* Set when the plane has per-pixel alpha content or does not cover 400 * the entire screen. This is a hint to the CRTC that it might need 401 * to enable background color fill. 402 */ 403 bool needs_bg_fill; 404 405 /* Mark the dlist as initialized. Useful to avoid initializing it twice 406 * when async update is not possible. 407 */ 408 bool dlist_initialized; 409 410 /* Load of this plane on the HVS block. The load is expressed in HVS 411 * cycles/sec. 412 */ 413 u64 hvs_load; 414 415 /* Memory bandwidth needed for this plane. This is expressed in 416 * bytes/sec. 417 */ 418 u64 membus_load; 419 }; 420 421 static inline struct vc4_plane_state * 422 to_vc4_plane_state(struct drm_plane_state *state) 423 { 424 return container_of(state, struct vc4_plane_state, base); 425 } 426 427 enum vc4_encoder_type { 428 VC4_ENCODER_TYPE_NONE, 429 VC4_ENCODER_TYPE_HDMI0, 430 VC4_ENCODER_TYPE_HDMI1, 431 VC4_ENCODER_TYPE_VEC, 432 VC4_ENCODER_TYPE_DSI0, 433 VC4_ENCODER_TYPE_DSI1, 434 VC4_ENCODER_TYPE_SMI, 435 VC4_ENCODER_TYPE_DPI, 436 }; 437 438 struct vc4_encoder { 439 struct drm_encoder base; 440 enum vc4_encoder_type type; 441 u32 clock_select; 442 443 void (*pre_crtc_configure)(struct drm_encoder *encoder, struct drm_atomic_state *state); 444 void (*pre_crtc_enable)(struct drm_encoder *encoder, struct drm_atomic_state *state); 445 void (*post_crtc_enable)(struct drm_encoder *encoder, struct drm_atomic_state *state); 446 447 void (*post_crtc_disable)(struct drm_encoder *encoder, struct drm_atomic_state *state); 448 void (*post_crtc_powerdown)(struct drm_encoder *encoder, struct drm_atomic_state *state); 449 }; 450 451 static inline struct vc4_encoder * 452 to_vc4_encoder(struct drm_encoder *encoder) 453 { 454 return container_of(encoder, struct vc4_encoder, base); 455 } 456 457 struct vc4_crtc_data { 458 /* Bitmask of channels (FIFOs) of the HVS that the output can source from */ 459 unsigned int hvs_available_channels; 460 461 /* Which output of the HVS this pixelvalve sources from. */ 462 int hvs_output; 463 }; 464 465 struct vc4_pv_data { 466 struct vc4_crtc_data base; 467 468 /* Depth of the PixelValve FIFO in bytes */ 469 unsigned int fifo_depth; 470 471 /* Number of pixels output per clock period */ 472 u8 pixels_per_clock; 473 474 enum vc4_encoder_type encoder_types[4]; 475 const char *debugfs_name; 476 477 }; 478 479 struct vc4_crtc { 480 struct drm_crtc base; 481 struct platform_device *pdev; 482 const struct vc4_crtc_data *data; 483 void __iomem *regs; 484 485 /* Timestamp at start of vblank irq - unaffected by lock delays. */ 486 ktime_t t_vblank; 487 488 u8 lut_r[256]; 489 u8 lut_g[256]; 490 u8 lut_b[256]; 491 492 struct drm_pending_vblank_event *event; 493 494 struct debugfs_regset32 regset; 495 496 /** 497 * @feeds_txp: True if the CRTC feeds our writeback controller. 498 */ 499 bool feeds_txp; 500 501 /** 502 * @irq_lock: Spinlock protecting the resources shared between 503 * the atomic code and our vblank handler. 504 */ 505 spinlock_t irq_lock; 506 507 /** 508 * @current_dlist: Start offset of the display list currently 509 * set in the HVS for that CRTC. Protected by @irq_lock, and 510 * copied in vc4_hvs_update_dlist() for the CRTC interrupt 511 * handler to have access to that value. 512 */ 513 unsigned int current_dlist; 514 515 /** 516 * @current_hvs_channel: HVS channel currently assigned to the 517 * CRTC. Protected by @irq_lock, and copied in 518 * vc4_hvs_atomic_begin() for the CRTC interrupt handler to have 519 * access to that value. 520 */ 521 unsigned int current_hvs_channel; 522 }; 523 524 static inline struct vc4_crtc * 525 to_vc4_crtc(struct drm_crtc *crtc) 526 { 527 return container_of(crtc, struct vc4_crtc, base); 528 } 529 530 static inline const struct vc4_crtc_data * 531 vc4_crtc_to_vc4_crtc_data(const struct vc4_crtc *crtc) 532 { 533 return crtc->data; 534 } 535 536 static inline const struct vc4_pv_data * 537 vc4_crtc_to_vc4_pv_data(const struct vc4_crtc *crtc) 538 { 539 const struct vc4_crtc_data *data = vc4_crtc_to_vc4_crtc_data(crtc); 540 541 return container_of(data, struct vc4_pv_data, base); 542 } 543 544 struct drm_encoder *vc4_get_crtc_encoder(struct drm_crtc *crtc, 545 struct drm_crtc_state *state); 546 547 struct vc4_crtc_state { 548 struct drm_crtc_state base; 549 /* Dlist area for this CRTC configuration. */ 550 struct drm_mm_node mm; 551 bool txp_armed; 552 unsigned int assigned_channel; 553 554 struct { 555 unsigned int left; 556 unsigned int right; 557 unsigned int top; 558 unsigned int bottom; 559 } margins; 560 561 unsigned long hvs_load; 562 563 /* Transitional state below, only valid during atomic commits */ 564 bool update_muxing; 565 }; 566 567 #define VC4_HVS_CHANNEL_DISABLED ((unsigned int)-1) 568 569 static inline struct vc4_crtc_state * 570 to_vc4_crtc_state(struct drm_crtc_state *crtc_state) 571 { 572 return container_of(crtc_state, struct vc4_crtc_state, base); 573 } 574 575 #define V3D_READ(offset) readl(vc4->v3d->regs + offset) 576 #define V3D_WRITE(offset, val) writel(val, vc4->v3d->regs + offset) 577 #define HVS_READ(offset) readl(hvs->regs + offset) 578 #define HVS_WRITE(offset, val) writel(val, hvs->regs + offset) 579 580 #define VC4_REG32(reg) { .name = #reg, .offset = reg } 581 582 struct vc4_exec_info { 583 /* Sequence number for this bin/render job. */ 584 uint64_t seqno; 585 586 /* Latest write_seqno of any BO that binning depends on. */ 587 uint64_t bin_dep_seqno; 588 589 struct dma_fence *fence; 590 591 /* Last current addresses the hardware was processing when the 592 * hangcheck timer checked on us. 593 */ 594 uint32_t last_ct0ca, last_ct1ca; 595 596 /* Kernel-space copy of the ioctl arguments */ 597 struct drm_vc4_submit_cl *args; 598 599 /* This is the array of BOs that were looked up at the start of exec. 600 * Command validation will use indices into this array. 601 */ 602 struct drm_gem_cma_object **bo; 603 uint32_t bo_count; 604 605 /* List of BOs that are being written by the RCL. Other than 606 * the binner temporary storage, this is all the BOs written 607 * by the job. 608 */ 609 struct drm_gem_cma_object *rcl_write_bo[4]; 610 uint32_t rcl_write_bo_count; 611 612 /* Pointers for our position in vc4->job_list */ 613 struct list_head head; 614 615 /* List of other BOs used in the job that need to be released 616 * once the job is complete. 617 */ 618 struct list_head unref_list; 619 620 /* Current unvalidated indices into @bo loaded by the non-hardware 621 * VC4_PACKET_GEM_HANDLES. 622 */ 623 uint32_t bo_index[2]; 624 625 /* This is the BO where we store the validated command lists, shader 626 * records, and uniforms. 627 */ 628 struct drm_gem_cma_object *exec_bo; 629 630 /** 631 * This tracks the per-shader-record state (packet 64) that 632 * determines the length of the shader record and the offset 633 * it's expected to be found at. It gets read in from the 634 * command lists. 635 */ 636 struct vc4_shader_state { 637 uint32_t addr; 638 /* Maximum vertex index referenced by any primitive using this 639 * shader state. 640 */ 641 uint32_t max_index; 642 } *shader_state; 643 644 /** How many shader states the user declared they were using. */ 645 uint32_t shader_state_size; 646 /** How many shader state records the validator has seen. */ 647 uint32_t shader_state_count; 648 649 bool found_tile_binning_mode_config_packet; 650 bool found_start_tile_binning_packet; 651 bool found_increment_semaphore_packet; 652 bool found_flush; 653 uint8_t bin_tiles_x, bin_tiles_y; 654 /* Physical address of the start of the tile alloc array 655 * (where each tile's binned CL will start) 656 */ 657 uint32_t tile_alloc_offset; 658 /* Bitmask of which binner slots are freed when this job completes. */ 659 uint32_t bin_slots; 660 661 /** 662 * Computed addresses pointing into exec_bo where we start the 663 * bin thread (ct0) and render thread (ct1). 664 */ 665 uint32_t ct0ca, ct0ea; 666 uint32_t ct1ca, ct1ea; 667 668 /* Pointer to the unvalidated bin CL (if present). */ 669 void *bin_u; 670 671 /* Pointers to the shader recs. These paddr gets incremented as CL 672 * packets are relocated in validate_gl_shader_state, and the vaddrs 673 * (u and v) get incremented and size decremented as the shader recs 674 * themselves are validated. 675 */ 676 void *shader_rec_u; 677 void *shader_rec_v; 678 uint32_t shader_rec_p; 679 uint32_t shader_rec_size; 680 681 /* Pointers to the uniform data. These pointers are incremented, and 682 * size decremented, as each batch of uniforms is uploaded. 683 */ 684 void *uniforms_u; 685 void *uniforms_v; 686 uint32_t uniforms_p; 687 uint32_t uniforms_size; 688 689 /* Pointer to a performance monitor object if the user requested it, 690 * NULL otherwise. 691 */ 692 struct vc4_perfmon *perfmon; 693 694 /* Whether the exec has taken a reference to the binner BO, which should 695 * happen with a VC4_PACKET_TILE_BINNING_MODE_CONFIG packet. 696 */ 697 bool bin_bo_used; 698 }; 699 700 /* Per-open file private data. Any driver-specific resource that has to be 701 * released when the DRM file is closed should be placed here. 702 */ 703 struct vc4_file { 704 struct { 705 struct idr idr; 706 struct mutex lock; 707 } perfmon; 708 709 bool bin_bo_used; 710 }; 711 712 static inline struct vc4_exec_info * 713 vc4_first_bin_job(struct vc4_dev *vc4) 714 { 715 return list_first_entry_or_null(&vc4->bin_job_list, 716 struct vc4_exec_info, head); 717 } 718 719 static inline struct vc4_exec_info * 720 vc4_first_render_job(struct vc4_dev *vc4) 721 { 722 return list_first_entry_or_null(&vc4->render_job_list, 723 struct vc4_exec_info, head); 724 } 725 726 static inline struct vc4_exec_info * 727 vc4_last_render_job(struct vc4_dev *vc4) 728 { 729 if (list_empty(&vc4->render_job_list)) 730 return NULL; 731 return list_last_entry(&vc4->render_job_list, 732 struct vc4_exec_info, head); 733 } 734 735 /** 736 * struct vc4_texture_sample_info - saves the offsets into the UBO for texture 737 * setup parameters. 738 * 739 * This will be used at draw time to relocate the reference to the texture 740 * contents in p0, and validate that the offset combined with 741 * width/height/stride/etc. from p1 and p2/p3 doesn't sample outside the BO. 742 * Note that the hardware treats unprovided config parameters as 0, so not all 743 * of them need to be set up for every texure sample, and we'll store ~0 as 744 * the offset to mark the unused ones. 745 * 746 * See the VC4 3D architecture guide page 41 ("Texture and Memory Lookup Unit 747 * Setup") for definitions of the texture parameters. 748 */ 749 struct vc4_texture_sample_info { 750 bool is_direct; 751 uint32_t p_offset[4]; 752 }; 753 754 /** 755 * struct vc4_validated_shader_info - information about validated shaders that 756 * needs to be used from command list validation. 757 * 758 * For a given shader, each time a shader state record references it, we need 759 * to verify that the shader doesn't read more uniforms than the shader state 760 * record's uniform BO pointer can provide, and we need to apply relocations 761 * and validate the shader state record's uniforms that define the texture 762 * samples. 763 */ 764 struct vc4_validated_shader_info { 765 uint32_t uniforms_size; 766 uint32_t uniforms_src_size; 767 uint32_t num_texture_samples; 768 struct vc4_texture_sample_info *texture_samples; 769 770 uint32_t num_uniform_addr_offsets; 771 uint32_t *uniform_addr_offsets; 772 773 bool is_threaded; 774 }; 775 776 /** 777 * __wait_for - magic wait macro 778 * 779 * Macro to help avoid open coding check/wait/timeout patterns. Note that it's 780 * important that we check the condition again after having timed out, since the 781 * timeout could be due to preemption or similar and we've never had a chance to 782 * check the condition before the timeout. 783 */ 784 #define __wait_for(OP, COND, US, Wmin, Wmax) ({ \ 785 const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \ 786 long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \ 787 int ret__; \ 788 might_sleep(); \ 789 for (;;) { \ 790 const bool expired__ = ktime_after(ktime_get_raw(), end__); \ 791 OP; \ 792 /* Guarantee COND check prior to timeout */ \ 793 barrier(); \ 794 if (COND) { \ 795 ret__ = 0; \ 796 break; \ 797 } \ 798 if (expired__) { \ 799 ret__ = -ETIMEDOUT; \ 800 break; \ 801 } \ 802 usleep_range(wait__, wait__ * 2); \ 803 if (wait__ < (Wmax)) \ 804 wait__ <<= 1; \ 805 } \ 806 ret__; \ 807 }) 808 809 #define _wait_for(COND, US, Wmin, Wmax) __wait_for(, (COND), (US), (Wmin), \ 810 (Wmax)) 811 #define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000) 812 813 /* vc4_bo.c */ 814 struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size); 815 struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size, 816 bool from_cache, enum vc4_kernel_bo_type type); 817 int vc4_dumb_create(struct drm_file *file_priv, 818 struct drm_device *dev, 819 struct drm_mode_create_dumb *args); 820 int vc4_create_bo_ioctl(struct drm_device *dev, void *data, 821 struct drm_file *file_priv); 822 int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data, 823 struct drm_file *file_priv); 824 int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data, 825 struct drm_file *file_priv); 826 int vc4_set_tiling_ioctl(struct drm_device *dev, void *data, 827 struct drm_file *file_priv); 828 int vc4_get_tiling_ioctl(struct drm_device *dev, void *data, 829 struct drm_file *file_priv); 830 int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data, 831 struct drm_file *file_priv); 832 int vc4_label_bo_ioctl(struct drm_device *dev, void *data, 833 struct drm_file *file_priv); 834 int vc4_bo_cache_init(struct drm_device *dev); 835 int vc4_bo_inc_usecnt(struct vc4_bo *bo); 836 void vc4_bo_dec_usecnt(struct vc4_bo *bo); 837 void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo); 838 void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo); 839 840 /* vc4_crtc.c */ 841 extern struct platform_driver vc4_crtc_driver; 842 int vc4_crtc_disable_at_boot(struct drm_crtc *crtc); 843 int vc4_crtc_init(struct drm_device *drm, struct vc4_crtc *vc4_crtc, 844 const struct drm_crtc_funcs *crtc_funcs, 845 const struct drm_crtc_helper_funcs *crtc_helper_funcs); 846 void vc4_crtc_destroy(struct drm_crtc *crtc); 847 int vc4_page_flip(struct drm_crtc *crtc, 848 struct drm_framebuffer *fb, 849 struct drm_pending_vblank_event *event, 850 uint32_t flags, 851 struct drm_modeset_acquire_ctx *ctx); 852 struct drm_crtc_state *vc4_crtc_duplicate_state(struct drm_crtc *crtc); 853 void vc4_crtc_destroy_state(struct drm_crtc *crtc, 854 struct drm_crtc_state *state); 855 void vc4_crtc_reset(struct drm_crtc *crtc); 856 void vc4_crtc_handle_vblank(struct vc4_crtc *crtc); 857 void vc4_crtc_get_margins(struct drm_crtc_state *state, 858 unsigned int *left, unsigned int *right, 859 unsigned int *top, unsigned int *bottom); 860 861 /* vc4_debugfs.c */ 862 void vc4_debugfs_init(struct drm_minor *minor); 863 #ifdef CONFIG_DEBUG_FS 864 void vc4_debugfs_add_file(struct drm_device *drm, 865 const char *filename, 866 int (*show)(struct seq_file*, void*), 867 void *data); 868 void vc4_debugfs_add_regset32(struct drm_device *drm, 869 const char *filename, 870 struct debugfs_regset32 *regset); 871 #else 872 static inline void vc4_debugfs_add_file(struct drm_device *drm, 873 const char *filename, 874 int (*show)(struct seq_file*, void*), 875 void *data) 876 { 877 } 878 879 static inline void vc4_debugfs_add_regset32(struct drm_device *drm, 880 const char *filename, 881 struct debugfs_regset32 *regset) 882 { 883 } 884 #endif 885 886 /* vc4_drv.c */ 887 void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index); 888 889 /* vc4_dpi.c */ 890 extern struct platform_driver vc4_dpi_driver; 891 892 /* vc4_dsi.c */ 893 extern struct platform_driver vc4_dsi_driver; 894 895 /* vc4_fence.c */ 896 extern const struct dma_fence_ops vc4_fence_ops; 897 898 /* vc4_gem.c */ 899 int vc4_gem_init(struct drm_device *dev); 900 int vc4_submit_cl_ioctl(struct drm_device *dev, void *data, 901 struct drm_file *file_priv); 902 int vc4_wait_seqno_ioctl(struct drm_device *dev, void *data, 903 struct drm_file *file_priv); 904 int vc4_wait_bo_ioctl(struct drm_device *dev, void *data, 905 struct drm_file *file_priv); 906 void vc4_submit_next_bin_job(struct drm_device *dev); 907 void vc4_submit_next_render_job(struct drm_device *dev); 908 void vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec); 909 int vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, 910 uint64_t timeout_ns, bool interruptible); 911 void vc4_job_handle_completed(struct vc4_dev *vc4); 912 int vc4_queue_seqno_cb(struct drm_device *dev, 913 struct vc4_seqno_cb *cb, uint64_t seqno, 914 void (*func)(struct vc4_seqno_cb *cb)); 915 int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data, 916 struct drm_file *file_priv); 917 918 /* vc4_hdmi.c */ 919 extern struct platform_driver vc4_hdmi_driver; 920 921 /* vc4_vec.c */ 922 extern struct platform_driver vc4_vec_driver; 923 924 /* vc4_txp.c */ 925 extern struct platform_driver vc4_txp_driver; 926 927 /* vc4_irq.c */ 928 void vc4_irq_enable(struct drm_device *dev); 929 void vc4_irq_disable(struct drm_device *dev); 930 int vc4_irq_install(struct drm_device *dev, int irq); 931 void vc4_irq_uninstall(struct drm_device *dev); 932 void vc4_irq_reset(struct drm_device *dev); 933 934 /* vc4_hvs.c */ 935 extern struct platform_driver vc4_hvs_driver; 936 void vc4_hvs_stop_channel(struct vc4_hvs *hvs, unsigned int output); 937 int vc4_hvs_get_fifo_from_output(struct vc4_hvs *hvs, unsigned int output); 938 u8 vc4_hvs_get_fifo_frame_count(struct vc4_hvs *hvs, unsigned int fifo); 939 int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state); 940 void vc4_hvs_atomic_begin(struct drm_crtc *crtc, struct drm_atomic_state *state); 941 void vc4_hvs_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state); 942 void vc4_hvs_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state); 943 void vc4_hvs_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state); 944 void vc4_hvs_dump_state(struct vc4_hvs *hvs); 945 void vc4_hvs_unmask_underrun(struct vc4_hvs *hvs, int channel); 946 void vc4_hvs_mask_underrun(struct vc4_hvs *hvs, int channel); 947 948 /* vc4_kms.c */ 949 int vc4_kms_load(struct drm_device *dev); 950 951 /* vc4_plane.c */ 952 struct drm_plane *vc4_plane_init(struct drm_device *dev, 953 enum drm_plane_type type); 954 int vc4_plane_create_additional_planes(struct drm_device *dev); 955 u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist); 956 u32 vc4_plane_dlist_size(const struct drm_plane_state *state); 957 void vc4_plane_async_set_fb(struct drm_plane *plane, 958 struct drm_framebuffer *fb); 959 960 /* vc4_v3d.c */ 961 extern struct platform_driver vc4_v3d_driver; 962 extern const struct of_device_id vc4_v3d_dt_match[]; 963 int vc4_v3d_get_bin_slot(struct vc4_dev *vc4); 964 int vc4_v3d_bin_bo_get(struct vc4_dev *vc4, bool *used); 965 void vc4_v3d_bin_bo_put(struct vc4_dev *vc4); 966 int vc4_v3d_pm_get(struct vc4_dev *vc4); 967 void vc4_v3d_pm_put(struct vc4_dev *vc4); 968 969 /* vc4_validate.c */ 970 int 971 vc4_validate_bin_cl(struct drm_device *dev, 972 void *validated, 973 void *unvalidated, 974 struct vc4_exec_info *exec); 975 976 int 977 vc4_validate_shader_recs(struct drm_device *dev, struct vc4_exec_info *exec); 978 979 struct drm_gem_cma_object *vc4_use_bo(struct vc4_exec_info *exec, 980 uint32_t hindex); 981 982 int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec); 983 984 bool vc4_check_tex_size(struct vc4_exec_info *exec, 985 struct drm_gem_cma_object *fbo, 986 uint32_t offset, uint8_t tiling_format, 987 uint32_t width, uint32_t height, uint8_t cpp); 988 989 /* vc4_validate_shader.c */ 990 struct vc4_validated_shader_info * 991 vc4_validate_shader(struct drm_gem_cma_object *shader_obj); 992 993 /* vc4_perfmon.c */ 994 void vc4_perfmon_get(struct vc4_perfmon *perfmon); 995 void vc4_perfmon_put(struct vc4_perfmon *perfmon); 996 void vc4_perfmon_start(struct vc4_dev *vc4, struct vc4_perfmon *perfmon); 997 void vc4_perfmon_stop(struct vc4_dev *vc4, struct vc4_perfmon *perfmon, 998 bool capture); 999 struct vc4_perfmon *vc4_perfmon_find(struct vc4_file *vc4file, int id); 1000 void vc4_perfmon_open_file(struct vc4_file *vc4file); 1001 void vc4_perfmon_close_file(struct vc4_file *vc4file); 1002 int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data, 1003 struct drm_file *file_priv); 1004 int vc4_perfmon_destroy_ioctl(struct drm_device *dev, void *data, 1005 struct drm_file *file_priv); 1006 int vc4_perfmon_get_values_ioctl(struct drm_device *dev, void *data, 1007 struct drm_file *file_priv); 1008 1009 #endif /* _VC4_DRV_H_ */ 1010