1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #ifndef __RADEON_H__ 29 #define __RADEON_H__ 30 31 #include "radeon_object.h" 32 33 /* TODO: Here are things that needs to be done : 34 * - surface allocator & initializer : (bit like scratch reg) should 35 * initialize HDP_ stuff on RS600, R600, R700 hw, well anythings 36 * related to surface 37 * - WB : write back stuff (do it bit like scratch reg things) 38 * - Vblank : look at Jesse's rework and what we should do 39 * - r600/r700: gart & cp 40 * - cs : clean cs ioctl use bitmap & things like that. 41 * - power management stuff 42 * - Barrier in gart code 43 * - Unmappabled vram ? 44 * - TESTING, TESTING, TESTING 45 */ 46 47 /* Initialization path: 48 * We expect that acceleration initialization might fail for various 49 * reasons even thought we work hard to make it works on most 50 * configurations. In order to still have a working userspace in such 51 * situation the init path must succeed up to the memory controller 52 * initialization point. Failure before this point are considered as 53 * fatal error. Here is the init callchain : 54 * radeon_device_init perform common structure, mutex initialization 55 * asic_init setup the GPU memory layout and perform all 56 * one time initialization (failure in this 57 * function are considered fatal) 58 * asic_startup setup the GPU acceleration, in order to 59 * follow guideline the first thing this 60 * function should do is setting the GPU 61 * memory controller (only MC setup failure 62 * are considered as fatal) 63 */ 64 65 #include <asm/atomic.h> 66 #include <linux/wait.h> 67 #include <linux/list.h> 68 #include <linux/kref.h> 69 70 #include "radeon_family.h" 71 #include "radeon_mode.h" 72 #include "radeon_reg.h" 73 74 /* 75 * Modules parameters. 76 */ 77 extern int radeon_no_wb; 78 extern int radeon_modeset; 79 extern int radeon_dynclks; 80 extern int radeon_r4xx_atom; 81 extern int radeon_agpmode; 82 extern int radeon_vram_limit; 83 extern int radeon_gart_size; 84 extern int radeon_benchmarking; 85 extern int radeon_testing; 86 extern int radeon_connector_table; 87 extern int radeon_tv; 88 89 /* 90 * Copy from radeon_drv.h so we don't have to include both and have conflicting 91 * symbol; 92 */ 93 #define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */ 94 #define RADEON_IB_POOL_SIZE 16 95 #define RADEON_DEBUGFS_MAX_NUM_FILES 32 96 #define RADEONFB_CONN_LIMIT 4 97 #define RADEON_BIOS_NUM_SCRATCH 8 98 99 /* 100 * Errata workarounds. 101 */ 102 enum radeon_pll_errata { 103 CHIP_ERRATA_R300_CG = 0x00000001, 104 CHIP_ERRATA_PLL_DUMMYREADS = 0x00000002, 105 CHIP_ERRATA_PLL_DELAY = 0x00000004 106 }; 107 108 109 struct radeon_device; 110 111 112 /* 113 * BIOS. 114 */ 115 bool radeon_get_bios(struct radeon_device *rdev); 116 117 118 /* 119 * Dummy page 120 */ 121 struct radeon_dummy_page { 122 struct page *page; 123 dma_addr_t addr; 124 }; 125 int radeon_dummy_page_init(struct radeon_device *rdev); 126 void radeon_dummy_page_fini(struct radeon_device *rdev); 127 128 129 /* 130 * Clocks 131 */ 132 struct radeon_clock { 133 struct radeon_pll p1pll; 134 struct radeon_pll p2pll; 135 struct radeon_pll spll; 136 struct radeon_pll mpll; 137 /* 10 Khz units */ 138 uint32_t default_mclk; 139 uint32_t default_sclk; 140 }; 141 142 /* 143 * Power management 144 */ 145 int radeon_pm_init(struct radeon_device *rdev); 146 147 /* 148 * Fences. 149 */ 150 struct radeon_fence_driver { 151 uint32_t scratch_reg; 152 atomic_t seq; 153 uint32_t last_seq; 154 unsigned long count_timeout; 155 wait_queue_head_t queue; 156 rwlock_t lock; 157 struct list_head created; 158 struct list_head emited; 159 struct list_head signaled; 160 }; 161 162 struct radeon_fence { 163 struct radeon_device *rdev; 164 struct kref kref; 165 struct list_head list; 166 /* protected by radeon_fence.lock */ 167 uint32_t seq; 168 unsigned long timeout; 169 bool emited; 170 bool signaled; 171 }; 172 173 int radeon_fence_driver_init(struct radeon_device *rdev); 174 void radeon_fence_driver_fini(struct radeon_device *rdev); 175 int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence); 176 int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence); 177 void radeon_fence_process(struct radeon_device *rdev); 178 bool radeon_fence_signaled(struct radeon_fence *fence); 179 int radeon_fence_wait(struct radeon_fence *fence, bool interruptible); 180 int radeon_fence_wait_next(struct radeon_device *rdev); 181 int radeon_fence_wait_last(struct radeon_device *rdev); 182 struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence); 183 void radeon_fence_unref(struct radeon_fence **fence); 184 185 /* 186 * Tiling registers 187 */ 188 struct radeon_surface_reg { 189 struct radeon_object *robj; 190 }; 191 192 #define RADEON_GEM_MAX_SURFACES 8 193 194 /* 195 * Radeon buffer. 196 */ 197 struct radeon_object; 198 199 struct radeon_object_list { 200 struct list_head list; 201 struct radeon_object *robj; 202 uint64_t gpu_offset; 203 unsigned rdomain; 204 unsigned wdomain; 205 uint32_t tiling_flags; 206 }; 207 208 int radeon_object_init(struct radeon_device *rdev); 209 void radeon_object_fini(struct radeon_device *rdev); 210 int radeon_object_create(struct radeon_device *rdev, 211 struct drm_gem_object *gobj, 212 unsigned long size, 213 bool kernel, 214 uint32_t domain, 215 bool interruptible, 216 struct radeon_object **robj_ptr); 217 int radeon_object_kmap(struct radeon_object *robj, void **ptr); 218 void radeon_object_kunmap(struct radeon_object *robj); 219 void radeon_object_unref(struct radeon_object **robj); 220 int radeon_object_pin(struct radeon_object *robj, uint32_t domain, 221 uint64_t *gpu_addr); 222 void radeon_object_unpin(struct radeon_object *robj); 223 int radeon_object_wait(struct radeon_object *robj); 224 int radeon_object_busy_domain(struct radeon_object *robj, uint32_t *cur_placement); 225 int radeon_object_evict_vram(struct radeon_device *rdev); 226 int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset); 227 void radeon_object_force_delete(struct radeon_device *rdev); 228 void radeon_object_list_add_object(struct radeon_object_list *lobj, 229 struct list_head *head); 230 int radeon_object_list_validate(struct list_head *head, void *fence); 231 void radeon_object_list_unvalidate(struct list_head *head); 232 void radeon_object_list_clean(struct list_head *head); 233 int radeon_object_fbdev_mmap(struct radeon_object *robj, 234 struct vm_area_struct *vma); 235 unsigned long radeon_object_size(struct radeon_object *robj); 236 void radeon_object_clear_surface_reg(struct radeon_object *robj); 237 int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved, 238 bool force_drop); 239 void radeon_object_set_tiling_flags(struct radeon_object *robj, 240 uint32_t tiling_flags, uint32_t pitch); 241 void radeon_object_get_tiling_flags(struct radeon_object *robj, uint32_t *tiling_flags, uint32_t *pitch); 242 void radeon_bo_move_notify(struct ttm_buffer_object *bo, 243 struct ttm_mem_reg *mem); 244 void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo); 245 /* 246 * GEM objects. 247 */ 248 struct radeon_gem { 249 struct list_head objects; 250 }; 251 252 int radeon_gem_init(struct radeon_device *rdev); 253 void radeon_gem_fini(struct radeon_device *rdev); 254 int radeon_gem_object_create(struct radeon_device *rdev, int size, 255 int alignment, int initial_domain, 256 bool discardable, bool kernel, 257 bool interruptible, 258 struct drm_gem_object **obj); 259 int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain, 260 uint64_t *gpu_addr); 261 void radeon_gem_object_unpin(struct drm_gem_object *obj); 262 263 264 /* 265 * GART structures, functions & helpers 266 */ 267 struct radeon_mc; 268 269 struct radeon_gart_table_ram { 270 volatile uint32_t *ptr; 271 }; 272 273 struct radeon_gart_table_vram { 274 struct radeon_object *robj; 275 volatile uint32_t *ptr; 276 }; 277 278 union radeon_gart_table { 279 struct radeon_gart_table_ram ram; 280 struct radeon_gart_table_vram vram; 281 }; 282 283 #define RADEON_GPU_PAGE_SIZE 4096 284 285 struct radeon_gart { 286 dma_addr_t table_addr; 287 unsigned num_gpu_pages; 288 unsigned num_cpu_pages; 289 unsigned table_size; 290 union radeon_gart_table table; 291 struct page **pages; 292 dma_addr_t *pages_addr; 293 bool ready; 294 }; 295 296 int radeon_gart_table_ram_alloc(struct radeon_device *rdev); 297 void radeon_gart_table_ram_free(struct radeon_device *rdev); 298 int radeon_gart_table_vram_alloc(struct radeon_device *rdev); 299 void radeon_gart_table_vram_free(struct radeon_device *rdev); 300 int radeon_gart_init(struct radeon_device *rdev); 301 void radeon_gart_fini(struct radeon_device *rdev); 302 void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, 303 int pages); 304 int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, 305 int pages, struct page **pagelist); 306 307 308 /* 309 * GPU MC structures, functions & helpers 310 */ 311 struct radeon_mc { 312 resource_size_t aper_size; 313 resource_size_t aper_base; 314 resource_size_t agp_base; 315 /* for some chips with <= 32MB we need to lie 316 * about vram size near mc fb location */ 317 u64 mc_vram_size; 318 u64 gtt_location; 319 u64 gtt_size; 320 u64 gtt_start; 321 u64 gtt_end; 322 u64 vram_location; 323 u64 vram_start; 324 u64 vram_end; 325 unsigned vram_width; 326 u64 real_vram_size; 327 int vram_mtrr; 328 bool vram_is_ddr; 329 }; 330 331 int radeon_mc_setup(struct radeon_device *rdev); 332 333 334 /* 335 * GPU scratch registers structures, functions & helpers 336 */ 337 struct radeon_scratch { 338 unsigned num_reg; 339 bool free[32]; 340 uint32_t reg[32]; 341 }; 342 343 int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg); 344 void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg); 345 346 347 /* 348 * IRQS. 349 */ 350 struct radeon_irq { 351 bool installed; 352 bool sw_int; 353 /* FIXME: use a define max crtc rather than hardcode it */ 354 bool crtc_vblank_int[2]; 355 }; 356 357 int radeon_irq_kms_init(struct radeon_device *rdev); 358 void radeon_irq_kms_fini(struct radeon_device *rdev); 359 360 361 /* 362 * CP & ring. 363 */ 364 struct radeon_ib { 365 struct list_head list; 366 unsigned long idx; 367 uint64_t gpu_addr; 368 struct radeon_fence *fence; 369 uint32_t *ptr; 370 uint32_t length_dw; 371 }; 372 373 /* 374 * locking - 375 * mutex protects scheduled_ibs, ready, alloc_bm 376 */ 377 struct radeon_ib_pool { 378 struct mutex mutex; 379 struct radeon_object *robj; 380 struct list_head scheduled_ibs; 381 struct radeon_ib ibs[RADEON_IB_POOL_SIZE]; 382 bool ready; 383 DECLARE_BITMAP(alloc_bm, RADEON_IB_POOL_SIZE); 384 }; 385 386 struct radeon_cp { 387 struct radeon_object *ring_obj; 388 volatile uint32_t *ring; 389 unsigned rptr; 390 unsigned wptr; 391 unsigned wptr_old; 392 unsigned ring_size; 393 unsigned ring_free_dw; 394 int count_dw; 395 uint64_t gpu_addr; 396 uint32_t align_mask; 397 uint32_t ptr_mask; 398 struct mutex mutex; 399 bool ready; 400 }; 401 402 struct r600_blit { 403 struct radeon_object *shader_obj; 404 u64 shader_gpu_addr; 405 u32 vs_offset, ps_offset; 406 u32 state_offset; 407 u32 state_len; 408 u32 vb_used, vb_total; 409 struct radeon_ib *vb_ib; 410 }; 411 412 int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib); 413 void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib); 414 int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib); 415 int radeon_ib_pool_init(struct radeon_device *rdev); 416 void radeon_ib_pool_fini(struct radeon_device *rdev); 417 int radeon_ib_test(struct radeon_device *rdev); 418 /* Ring access between begin & end cannot sleep */ 419 void radeon_ring_free_size(struct radeon_device *rdev); 420 int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw); 421 void radeon_ring_unlock_commit(struct radeon_device *rdev); 422 void radeon_ring_unlock_undo(struct radeon_device *rdev); 423 int radeon_ring_test(struct radeon_device *rdev); 424 int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size); 425 void radeon_ring_fini(struct radeon_device *rdev); 426 427 428 /* 429 * CS. 430 */ 431 struct radeon_cs_reloc { 432 struct drm_gem_object *gobj; 433 struct radeon_object *robj; 434 struct radeon_object_list lobj; 435 uint32_t handle; 436 uint32_t flags; 437 }; 438 439 struct radeon_cs_chunk { 440 uint32_t chunk_id; 441 uint32_t length_dw; 442 int kpage_idx[2]; 443 uint32_t *kpage[2]; 444 uint32_t *kdata; 445 void __user *user_ptr; 446 int last_copied_page; 447 int last_page_index; 448 }; 449 450 struct radeon_cs_parser { 451 struct radeon_device *rdev; 452 struct drm_file *filp; 453 /* chunks */ 454 unsigned nchunks; 455 struct radeon_cs_chunk *chunks; 456 uint64_t *chunks_array; 457 /* IB */ 458 unsigned idx; 459 /* relocations */ 460 unsigned nrelocs; 461 struct radeon_cs_reloc *relocs; 462 struct radeon_cs_reloc **relocs_ptr; 463 struct list_head validated; 464 /* indices of various chunks */ 465 int chunk_ib_idx; 466 int chunk_relocs_idx; 467 struct radeon_ib *ib; 468 void *track; 469 unsigned family; 470 int parser_error; 471 }; 472 473 extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx); 474 extern int radeon_cs_finish_pages(struct radeon_cs_parser *p); 475 476 477 static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx) 478 { 479 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx]; 480 u32 pg_idx, pg_offset; 481 u32 idx_value = 0; 482 int new_page; 483 484 pg_idx = (idx * 4) / PAGE_SIZE; 485 pg_offset = (idx * 4) % PAGE_SIZE; 486 487 if (ibc->kpage_idx[0] == pg_idx) 488 return ibc->kpage[0][pg_offset/4]; 489 if (ibc->kpage_idx[1] == pg_idx) 490 return ibc->kpage[1][pg_offset/4]; 491 492 new_page = radeon_cs_update_pages(p, pg_idx); 493 if (new_page < 0) { 494 p->parser_error = new_page; 495 return 0; 496 } 497 498 idx_value = ibc->kpage[new_page][pg_offset/4]; 499 return idx_value; 500 } 501 502 struct radeon_cs_packet { 503 unsigned idx; 504 unsigned type; 505 unsigned reg; 506 unsigned opcode; 507 int count; 508 unsigned one_reg_wr; 509 }; 510 511 typedef int (*radeon_packet0_check_t)(struct radeon_cs_parser *p, 512 struct radeon_cs_packet *pkt, 513 unsigned idx, unsigned reg); 514 typedef int (*radeon_packet3_check_t)(struct radeon_cs_parser *p, 515 struct radeon_cs_packet *pkt); 516 517 518 /* 519 * AGP 520 */ 521 int radeon_agp_init(struct radeon_device *rdev); 522 void radeon_agp_fini(struct radeon_device *rdev); 523 524 525 /* 526 * Writeback 527 */ 528 struct radeon_wb { 529 struct radeon_object *wb_obj; 530 volatile uint32_t *wb; 531 uint64_t gpu_addr; 532 }; 533 534 /** 535 * struct radeon_pm - power management datas 536 * @max_bandwidth: maximum bandwidth the gpu has (MByte/s) 537 * @igp_sideport_mclk: sideport memory clock Mhz (rs690,rs740,rs780,rs880) 538 * @igp_system_mclk: system clock Mhz (rs690,rs740,rs780,rs880) 539 * @igp_ht_link_clk: ht link clock Mhz (rs690,rs740,rs780,rs880) 540 * @igp_ht_link_width: ht link width in bits (rs690,rs740,rs780,rs880) 541 * @k8_bandwidth: k8 bandwidth the gpu has (MByte/s) (IGP) 542 * @sideport_bandwidth: sideport bandwidth the gpu has (MByte/s) (IGP) 543 * @ht_bandwidth: ht bandwidth the gpu has (MByte/s) (IGP) 544 * @core_bandwidth: core GPU bandwidth the gpu has (MByte/s) (IGP) 545 * @sclk: GPU clock Mhz (core bandwith depends of this clock) 546 * @needed_bandwidth: current bandwidth needs 547 * 548 * It keeps track of various data needed to take powermanagement decision. 549 * Bandwith need is used to determine minimun clock of the GPU and memory. 550 * Equation between gpu/memory clock and available bandwidth is hw dependent 551 * (type of memory, bus size, efficiency, ...) 552 */ 553 struct radeon_pm { 554 fixed20_12 max_bandwidth; 555 fixed20_12 igp_sideport_mclk; 556 fixed20_12 igp_system_mclk; 557 fixed20_12 igp_ht_link_clk; 558 fixed20_12 igp_ht_link_width; 559 fixed20_12 k8_bandwidth; 560 fixed20_12 sideport_bandwidth; 561 fixed20_12 ht_bandwidth; 562 fixed20_12 core_bandwidth; 563 fixed20_12 sclk; 564 fixed20_12 needed_bandwidth; 565 }; 566 567 568 /* 569 * Benchmarking 570 */ 571 void radeon_benchmark(struct radeon_device *rdev); 572 573 574 /* 575 * Testing 576 */ 577 void radeon_test_moves(struct radeon_device *rdev); 578 579 580 /* 581 * Debugfs 582 */ 583 int radeon_debugfs_add_files(struct radeon_device *rdev, 584 struct drm_info_list *files, 585 unsigned nfiles); 586 int radeon_debugfs_fence_init(struct radeon_device *rdev); 587 int r100_debugfs_rbbm_init(struct radeon_device *rdev); 588 int r100_debugfs_cp_init(struct radeon_device *rdev); 589 590 591 /* 592 * ASIC specific functions. 593 */ 594 struct radeon_asic { 595 int (*init)(struct radeon_device *rdev); 596 void (*fini)(struct radeon_device *rdev); 597 int (*resume)(struct radeon_device *rdev); 598 int (*suspend)(struct radeon_device *rdev); 599 void (*vga_set_state)(struct radeon_device *rdev, bool state); 600 int (*gpu_reset)(struct radeon_device *rdev); 601 void (*gart_tlb_flush)(struct radeon_device *rdev); 602 int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr); 603 int (*cp_init)(struct radeon_device *rdev, unsigned ring_size); 604 void (*cp_fini)(struct radeon_device *rdev); 605 void (*cp_disable)(struct radeon_device *rdev); 606 void (*cp_commit)(struct radeon_device *rdev); 607 void (*ring_start)(struct radeon_device *rdev); 608 int (*ring_test)(struct radeon_device *rdev); 609 void (*ring_ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib); 610 int (*irq_set)(struct radeon_device *rdev); 611 int (*irq_process)(struct radeon_device *rdev); 612 u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc); 613 void (*fence_ring_emit)(struct radeon_device *rdev, struct radeon_fence *fence); 614 int (*cs_parse)(struct radeon_cs_parser *p); 615 int (*copy_blit)(struct radeon_device *rdev, 616 uint64_t src_offset, 617 uint64_t dst_offset, 618 unsigned num_pages, 619 struct radeon_fence *fence); 620 int (*copy_dma)(struct radeon_device *rdev, 621 uint64_t src_offset, 622 uint64_t dst_offset, 623 unsigned num_pages, 624 struct radeon_fence *fence); 625 int (*copy)(struct radeon_device *rdev, 626 uint64_t src_offset, 627 uint64_t dst_offset, 628 unsigned num_pages, 629 struct radeon_fence *fence); 630 uint32_t (*get_engine_clock)(struct radeon_device *rdev); 631 void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock); 632 uint32_t (*get_memory_clock)(struct radeon_device *rdev); 633 void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock); 634 void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes); 635 void (*set_clock_gating)(struct radeon_device *rdev, int enable); 636 int (*set_surface_reg)(struct radeon_device *rdev, int reg, 637 uint32_t tiling_flags, uint32_t pitch, 638 uint32_t offset, uint32_t obj_size); 639 int (*clear_surface_reg)(struct radeon_device *rdev, int reg); 640 void (*bandwidth_update)(struct radeon_device *rdev); 641 }; 642 643 /* 644 * Asic structures 645 */ 646 struct r100_asic { 647 const unsigned *reg_safe_bm; 648 unsigned reg_safe_bm_size; 649 }; 650 651 struct r300_asic { 652 const unsigned *reg_safe_bm; 653 unsigned reg_safe_bm_size; 654 }; 655 656 struct r600_asic { 657 unsigned max_pipes; 658 unsigned max_tile_pipes; 659 unsigned max_simds; 660 unsigned max_backends; 661 unsigned max_gprs; 662 unsigned max_threads; 663 unsigned max_stack_entries; 664 unsigned max_hw_contexts; 665 unsigned max_gs_threads; 666 unsigned sx_max_export_size; 667 unsigned sx_max_export_pos_size; 668 unsigned sx_max_export_smx_size; 669 unsigned sq_num_cf_insts; 670 }; 671 672 struct rv770_asic { 673 unsigned max_pipes; 674 unsigned max_tile_pipes; 675 unsigned max_simds; 676 unsigned max_backends; 677 unsigned max_gprs; 678 unsigned max_threads; 679 unsigned max_stack_entries; 680 unsigned max_hw_contexts; 681 unsigned max_gs_threads; 682 unsigned sx_max_export_size; 683 unsigned sx_max_export_pos_size; 684 unsigned sx_max_export_smx_size; 685 unsigned sq_num_cf_insts; 686 unsigned sx_num_of_sets; 687 unsigned sc_prim_fifo_size; 688 unsigned sc_hiz_tile_fifo_size; 689 unsigned sc_earlyz_tile_fifo_fize; 690 }; 691 692 union radeon_asic_config { 693 struct r300_asic r300; 694 struct r100_asic r100; 695 struct r600_asic r600; 696 struct rv770_asic rv770; 697 }; 698 699 700 /* 701 * IOCTL. 702 */ 703 int radeon_gem_info_ioctl(struct drm_device *dev, void *data, 704 struct drm_file *filp); 705 int radeon_gem_create_ioctl(struct drm_device *dev, void *data, 706 struct drm_file *filp); 707 int radeon_gem_pin_ioctl(struct drm_device *dev, void *data, 708 struct drm_file *file_priv); 709 int radeon_gem_unpin_ioctl(struct drm_device *dev, void *data, 710 struct drm_file *file_priv); 711 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data, 712 struct drm_file *file_priv); 713 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data, 714 struct drm_file *file_priv); 715 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, 716 struct drm_file *filp); 717 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, 718 struct drm_file *filp); 719 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, 720 struct drm_file *filp); 721 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, 722 struct drm_file *filp); 723 int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); 724 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, 725 struct drm_file *filp); 726 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, 727 struct drm_file *filp); 728 729 730 /* 731 * Core structure, functions and helpers. 732 */ 733 typedef uint32_t (*radeon_rreg_t)(struct radeon_device*, uint32_t); 734 typedef void (*radeon_wreg_t)(struct radeon_device*, uint32_t, uint32_t); 735 736 struct radeon_device { 737 struct device *dev; 738 struct drm_device *ddev; 739 struct pci_dev *pdev; 740 /* ASIC */ 741 union radeon_asic_config config; 742 enum radeon_family family; 743 unsigned long flags; 744 int usec_timeout; 745 enum radeon_pll_errata pll_errata; 746 int num_gb_pipes; 747 int num_z_pipes; 748 int disp_priority; 749 /* BIOS */ 750 uint8_t *bios; 751 bool is_atom_bios; 752 uint16_t bios_header_start; 753 struct radeon_object *stollen_vga_memory; 754 struct fb_info *fbdev_info; 755 struct radeon_object *fbdev_robj; 756 struct radeon_framebuffer *fbdev_rfb; 757 /* Register mmio */ 758 resource_size_t rmmio_base; 759 resource_size_t rmmio_size; 760 void *rmmio; 761 radeon_rreg_t mc_rreg; 762 radeon_wreg_t mc_wreg; 763 radeon_rreg_t pll_rreg; 764 radeon_wreg_t pll_wreg; 765 uint32_t pcie_reg_mask; 766 radeon_rreg_t pciep_rreg; 767 radeon_wreg_t pciep_wreg; 768 struct radeon_clock clock; 769 struct radeon_mc mc; 770 struct radeon_gart gart; 771 struct radeon_mode_info mode_info; 772 struct radeon_scratch scratch; 773 struct radeon_mman mman; 774 struct radeon_fence_driver fence_drv; 775 struct radeon_cp cp; 776 struct radeon_ib_pool ib_pool; 777 struct radeon_irq irq; 778 struct radeon_asic *asic; 779 struct radeon_gem gem; 780 struct radeon_pm pm; 781 uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH]; 782 struct mutex cs_mutex; 783 struct radeon_wb wb; 784 struct radeon_dummy_page dummy_page; 785 bool gpu_lockup; 786 bool shutdown; 787 bool suspend; 788 bool need_dma32; 789 bool accel_working; 790 struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES]; 791 const struct firmware *me_fw; /* all family ME firmware */ 792 const struct firmware *pfp_fw; /* r6/700 PFP firmware */ 793 struct r600_blit r600_blit; 794 int msi_enabled; /* msi enabled */ 795 }; 796 797 int radeon_device_init(struct radeon_device *rdev, 798 struct drm_device *ddev, 799 struct pci_dev *pdev, 800 uint32_t flags); 801 void radeon_device_fini(struct radeon_device *rdev); 802 int radeon_gpu_wait_for_idle(struct radeon_device *rdev); 803 804 /* r600 blit */ 805 int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes); 806 void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence); 807 void r600_kms_blit_copy(struct radeon_device *rdev, 808 u64 src_gpu_addr, u64 dst_gpu_addr, 809 int size_bytes); 810 811 static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg) 812 { 813 if (reg < 0x10000) 814 return readl(((void __iomem *)rdev->rmmio) + reg); 815 else { 816 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); 817 return readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); 818 } 819 } 820 821 static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 822 { 823 if (reg < 0x10000) 824 writel(v, ((void __iomem *)rdev->rmmio) + reg); 825 else { 826 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); 827 writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); 828 } 829 } 830 831 832 /* 833 * Registers read & write functions. 834 */ 835 #define RREG8(reg) readb(((void __iomem *)rdev->rmmio) + (reg)) 836 #define WREG8(reg, v) writeb(v, ((void __iomem *)rdev->rmmio) + (reg)) 837 #define RREG32(reg) r100_mm_rreg(rdev, (reg)) 838 #define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg))) 839 #define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v)) 840 #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) 841 #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) 842 #define RREG32_PLL(reg) rdev->pll_rreg(rdev, (reg)) 843 #define WREG32_PLL(reg, v) rdev->pll_wreg(rdev, (reg), (v)) 844 #define RREG32_MC(reg) rdev->mc_rreg(rdev, (reg)) 845 #define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v)) 846 #define RREG32_PCIE(reg) rv370_pcie_rreg(rdev, (reg)) 847 #define WREG32_PCIE(reg, v) rv370_pcie_wreg(rdev, (reg), (v)) 848 #define WREG32_P(reg, val, mask) \ 849 do { \ 850 uint32_t tmp_ = RREG32(reg); \ 851 tmp_ &= (mask); \ 852 tmp_ |= ((val) & ~(mask)); \ 853 WREG32(reg, tmp_); \ 854 } while (0) 855 #define WREG32_PLL_P(reg, val, mask) \ 856 do { \ 857 uint32_t tmp_ = RREG32_PLL(reg); \ 858 tmp_ &= (mask); \ 859 tmp_ |= ((val) & ~(mask)); \ 860 WREG32_PLL(reg, tmp_); \ 861 } while (0) 862 #define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg))) 863 864 /* 865 * Indirect registers accessor 866 */ 867 static inline uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg) 868 { 869 uint32_t r; 870 871 WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); 872 r = RREG32(RADEON_PCIE_DATA); 873 return r; 874 } 875 876 static inline void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 877 { 878 WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); 879 WREG32(RADEON_PCIE_DATA, (v)); 880 } 881 882 void r100_pll_errata_after_index(struct radeon_device *rdev); 883 884 885 /* 886 * ASICs helpers. 887 */ 888 #define ASIC_IS_RN50(rdev) ((rdev->pdev->device == 0x515e) || \ 889 (rdev->pdev->device == 0x5969)) 890 #define ASIC_IS_RV100(rdev) ((rdev->family == CHIP_RV100) || \ 891 (rdev->family == CHIP_RV200) || \ 892 (rdev->family == CHIP_RS100) || \ 893 (rdev->family == CHIP_RS200) || \ 894 (rdev->family == CHIP_RV250) || \ 895 (rdev->family == CHIP_RV280) || \ 896 (rdev->family == CHIP_RS300)) 897 #define ASIC_IS_R300(rdev) ((rdev->family == CHIP_R300) || \ 898 (rdev->family == CHIP_RV350) || \ 899 (rdev->family == CHIP_R350) || \ 900 (rdev->family == CHIP_RV380) || \ 901 (rdev->family == CHIP_R420) || \ 902 (rdev->family == CHIP_R423) || \ 903 (rdev->family == CHIP_RV410) || \ 904 (rdev->family == CHIP_RS400) || \ 905 (rdev->family == CHIP_RS480)) 906 #define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600)) 907 #define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620)) 908 #define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730)) 909 910 911 /* 912 * BIOS helpers. 913 */ 914 #define RBIOS8(i) (rdev->bios[i]) 915 #define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8)) 916 #define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16)) 917 918 int radeon_combios_init(struct radeon_device *rdev); 919 void radeon_combios_fini(struct radeon_device *rdev); 920 int radeon_atombios_init(struct radeon_device *rdev); 921 void radeon_atombios_fini(struct radeon_device *rdev); 922 923 924 /* 925 * RING helpers. 926 */ 927 static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v) 928 { 929 #if DRM_DEBUG_CODE 930 if (rdev->cp.count_dw <= 0) { 931 DRM_ERROR("radeon: writting more dword to ring than expected !\n"); 932 } 933 #endif 934 rdev->cp.ring[rdev->cp.wptr++] = v; 935 rdev->cp.wptr &= rdev->cp.ptr_mask; 936 rdev->cp.count_dw--; 937 rdev->cp.ring_free_dw--; 938 } 939 940 941 /* 942 * ASICs macro. 943 */ 944 #define radeon_init(rdev) (rdev)->asic->init((rdev)) 945 #define radeon_fini(rdev) (rdev)->asic->fini((rdev)) 946 #define radeon_resume(rdev) (rdev)->asic->resume((rdev)) 947 #define radeon_suspend(rdev) (rdev)->asic->suspend((rdev)) 948 #define radeon_cs_parse(p) rdev->asic->cs_parse((p)) 949 #define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) 950 #define radeon_gpu_reset(rdev) (rdev)->asic->gpu_reset((rdev)) 951 #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev)) 952 #define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p)) 953 #define radeon_cp_commit(rdev) (rdev)->asic->cp_commit((rdev)) 954 #define radeon_ring_start(rdev) (rdev)->asic->ring_start((rdev)) 955 #define radeon_ring_test(rdev) (rdev)->asic->ring_test((rdev)) 956 #define radeon_ring_ib_execute(rdev, ib) (rdev)->asic->ring_ib_execute((rdev), (ib)) 957 #define radeon_irq_set(rdev) (rdev)->asic->irq_set((rdev)) 958 #define radeon_irq_process(rdev) (rdev)->asic->irq_process((rdev)) 959 #define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->get_vblank_counter((rdev), (crtc)) 960 #define radeon_fence_ring_emit(rdev, fence) (rdev)->asic->fence_ring_emit((rdev), (fence)) 961 #define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy_blit((rdev), (s), (d), (np), (f)) 962 #define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy_dma((rdev), (s), (d), (np), (f)) 963 #define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy((rdev), (s), (d), (np), (f)) 964 #define radeon_get_engine_clock(rdev) (rdev)->asic->get_engine_clock((rdev)) 965 #define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e)) 966 #define radeon_get_memory_clock(rdev) (rdev)->asic->get_memory_clock((rdev)) 967 #define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e)) 968 #define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l)) 969 #define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e)) 970 #define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s))) 971 #define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r))) 972 #define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev)) 973 974 /* Common functions */ 975 extern int radeon_gart_table_vram_pin(struct radeon_device *rdev); 976 extern int radeon_modeset_init(struct radeon_device *rdev); 977 extern void radeon_modeset_fini(struct radeon_device *rdev); 978 extern bool radeon_card_posted(struct radeon_device *rdev); 979 extern int radeon_clocks_init(struct radeon_device *rdev); 980 extern void radeon_clocks_fini(struct radeon_device *rdev); 981 extern void radeon_scratch_init(struct radeon_device *rdev); 982 extern void radeon_surface_init(struct radeon_device *rdev); 983 extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data); 984 extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable); 985 extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); 986 987 /* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ 988 struct r100_mc_save { 989 u32 GENMO_WT; 990 u32 CRTC_EXT_CNTL; 991 u32 CRTC_GEN_CNTL; 992 u32 CRTC2_GEN_CNTL; 993 u32 CUR_OFFSET; 994 u32 CUR2_OFFSET; 995 }; 996 extern void r100_cp_disable(struct radeon_device *rdev); 997 extern int r100_cp_init(struct radeon_device *rdev, unsigned ring_size); 998 extern void r100_cp_fini(struct radeon_device *rdev); 999 extern void r100_pci_gart_tlb_flush(struct radeon_device *rdev); 1000 extern int r100_pci_gart_init(struct radeon_device *rdev); 1001 extern void r100_pci_gart_fini(struct radeon_device *rdev); 1002 extern int r100_pci_gart_enable(struct radeon_device *rdev); 1003 extern void r100_pci_gart_disable(struct radeon_device *rdev); 1004 extern int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); 1005 extern int r100_debugfs_mc_info_init(struct radeon_device *rdev); 1006 extern int r100_gui_wait_for_idle(struct radeon_device *rdev); 1007 extern void r100_ib_fini(struct radeon_device *rdev); 1008 extern int r100_ib_init(struct radeon_device *rdev); 1009 extern void r100_irq_disable(struct radeon_device *rdev); 1010 extern int r100_irq_set(struct radeon_device *rdev); 1011 extern void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save); 1012 extern void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save); 1013 extern void r100_vram_init_sizes(struct radeon_device *rdev); 1014 extern void r100_wb_disable(struct radeon_device *rdev); 1015 extern void r100_wb_fini(struct radeon_device *rdev); 1016 extern int r100_wb_init(struct radeon_device *rdev); 1017 extern void r100_hdp_reset(struct radeon_device *rdev); 1018 extern int r100_rb2d_reset(struct radeon_device *rdev); 1019 extern int r100_cp_reset(struct radeon_device *rdev); 1020 extern void r100_vga_render_disable(struct radeon_device *rdev); 1021 extern int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, 1022 struct radeon_cs_packet *pkt, 1023 struct radeon_object *robj); 1024 extern int r100_cs_parse_packet0(struct radeon_cs_parser *p, 1025 struct radeon_cs_packet *pkt, 1026 const unsigned *auth, unsigned n, 1027 radeon_packet0_check_t check); 1028 extern int r100_cs_packet_parse(struct radeon_cs_parser *p, 1029 struct radeon_cs_packet *pkt, 1030 unsigned idx); 1031 1032 /* rv200,rv250,rv280 */ 1033 extern void r200_set_safe_registers(struct radeon_device *rdev); 1034 1035 /* r300,r350,rv350,rv370,rv380 */ 1036 extern void r300_set_reg_safe(struct radeon_device *rdev); 1037 extern void r300_mc_program(struct radeon_device *rdev); 1038 extern void r300_vram_info(struct radeon_device *rdev); 1039 extern void r300_clock_startup(struct radeon_device *rdev); 1040 extern int r300_mc_wait_for_idle(struct radeon_device *rdev); 1041 extern int rv370_pcie_gart_init(struct radeon_device *rdev); 1042 extern void rv370_pcie_gart_fini(struct radeon_device *rdev); 1043 extern int rv370_pcie_gart_enable(struct radeon_device *rdev); 1044 extern void rv370_pcie_gart_disable(struct radeon_device *rdev); 1045 1046 /* r420,r423,rv410 */ 1047 extern int r420_mc_init(struct radeon_device *rdev); 1048 extern u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg); 1049 extern void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v); 1050 extern int r420_debugfs_pipes_info_init(struct radeon_device *rdev); 1051 extern void r420_pipes_init(struct radeon_device *rdev); 1052 1053 /* rv515 */ 1054 struct rv515_mc_save { 1055 u32 d1vga_control; 1056 u32 d2vga_control; 1057 u32 vga_render_control; 1058 u32 vga_hdp_control; 1059 u32 d1crtc_control; 1060 u32 d2crtc_control; 1061 }; 1062 extern void rv515_bandwidth_avivo_update(struct radeon_device *rdev); 1063 extern void rv515_vga_render_disable(struct radeon_device *rdev); 1064 extern void rv515_set_safe_registers(struct radeon_device *rdev); 1065 extern void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save); 1066 extern void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save); 1067 extern void rv515_clock_startup(struct radeon_device *rdev); 1068 extern void rv515_debugfs(struct radeon_device *rdev); 1069 extern int rv515_suspend(struct radeon_device *rdev); 1070 1071 /* rs400 */ 1072 extern int rs400_gart_init(struct radeon_device *rdev); 1073 extern int rs400_gart_enable(struct radeon_device *rdev); 1074 extern void rs400_gart_adjust_size(struct radeon_device *rdev); 1075 extern void rs400_gart_disable(struct radeon_device *rdev); 1076 extern void rs400_gart_fini(struct radeon_device *rdev); 1077 1078 /* rs600 */ 1079 extern void rs600_set_safe_registers(struct radeon_device *rdev); 1080 extern int rs600_irq_set(struct radeon_device *rdev); 1081 extern void rs600_irq_disable(struct radeon_device *rdev); 1082 1083 /* rs690, rs740 */ 1084 extern void rs690_line_buffer_adjust(struct radeon_device *rdev, 1085 struct drm_display_mode *mode1, 1086 struct drm_display_mode *mode2); 1087 1088 /* r600, rv610, rv630, rv620, rv635, rv670, rs780, rs880 */ 1089 extern bool r600_card_posted(struct radeon_device *rdev); 1090 extern void r600_cp_stop(struct radeon_device *rdev); 1091 extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size); 1092 extern int r600_cp_resume(struct radeon_device *rdev); 1093 extern int r600_count_pipe_bits(uint32_t val); 1094 extern int r600_gart_clear_page(struct radeon_device *rdev, int i); 1095 extern int r600_mc_wait_for_idle(struct radeon_device *rdev); 1096 extern int r600_pcie_gart_init(struct radeon_device *rdev); 1097 extern void r600_pcie_gart_tlb_flush(struct radeon_device *rdev); 1098 extern int r600_ib_test(struct radeon_device *rdev); 1099 extern int r600_ring_test(struct radeon_device *rdev); 1100 extern void r600_wb_fini(struct radeon_device *rdev); 1101 extern int r600_wb_enable(struct radeon_device *rdev); 1102 extern void r600_wb_disable(struct radeon_device *rdev); 1103 extern void r600_scratch_init(struct radeon_device *rdev); 1104 extern int r600_blit_init(struct radeon_device *rdev); 1105 extern void r600_blit_fini(struct radeon_device *rdev); 1106 extern int r600_cp_init_microcode(struct radeon_device *rdev); 1107 extern int r600_gpu_reset(struct radeon_device *rdev); 1108 1109 #endif 1110