amdgpu_ttm.c (e862b08b4650be6d5196c191baceff3c43dfddef) amdgpu_ttm.c (17ffdc482982af92bddb59692af1c5e1de23d184)
1/*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,

--- 46 unchanged lines hidden (view full) ---

55#include "amdgpu.h"
56#include "amdgpu_object.h"
57#include "amdgpu_trace.h"
58#include "amdgpu_amdkfd.h"
59#include "amdgpu_sdma.h"
60#include "amdgpu_ras.h"
61#include "bif/bif_4_1_d.h"
62
1/*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,

--- 46 unchanged lines hidden (view full) ---

55#include "amdgpu.h"
56#include "amdgpu_object.h"
57#include "amdgpu_trace.h"
58#include "amdgpu_amdkfd.h"
59#include "amdgpu_sdma.h"
60#include "amdgpu_ras.h"
61#include "bif/bif_4_1_d.h"
62
63#define AMDGPU_TTM_VRAM_MAX_DW_READ (size_t)128
64
65static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
66 struct ttm_mem_reg *mem, unsigned num_pages,
67 uint64_t offset, unsigned window,
68 struct amdgpu_ring *ring,
69 uint64_t *addr);
70
63static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
64 struct ttm_mem_reg *mem, unsigned num_pages,
65 uint64_t offset, unsigned window,
66 struct amdgpu_ring *ring,
67 uint64_t *addr);
68
69static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
70static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
71
72static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
73{
74 return 0;
75}
76
71/**
72 * amdgpu_init_mem_type - Initialize a memory manager for a specific type of
73 * memory request.
74 *
75 * @bdev: The TTM BO device object (contains a reference to amdgpu_device)
76 * @type: The type of memory requested
77 * @man: The memory type manager for each domain
78 *

--- 686 unchanged lines hidden (view full) ---

765#endif
766};
767
768#ifdef CONFIG_DRM_AMDGPU_USERPTR
769/* flags used by HMM internal, not related to CPU/GPU PTE flags */
770static const uint64_t hmm_range_flags[HMM_PFN_FLAG_MAX] = {
771 (1 << 0), /* HMM_PFN_VALID */
772 (1 << 1), /* HMM_PFN_WRITE */
77/**
78 * amdgpu_init_mem_type - Initialize a memory manager for a specific type of
79 * memory request.
80 *
81 * @bdev: The TTM BO device object (contains a reference to amdgpu_device)
82 * @type: The type of memory requested
83 * @man: The memory type manager for each domain
84 *

--- 686 unchanged lines hidden (view full) ---

771#endif
772};
773
774#ifdef CONFIG_DRM_AMDGPU_USERPTR
775/* flags used by HMM internal, not related to CPU/GPU PTE flags */
776static const uint64_t hmm_range_flags[HMM_PFN_FLAG_MAX] = {
777 (1 << 0), /* HMM_PFN_VALID */
778 (1 << 1), /* HMM_PFN_WRITE */
773 0 /* HMM_PFN_DEVICE_PRIVATE */
774};
775
776static const uint64_t hmm_range_values[HMM_PFN_VALUE_MAX] = {
777 0xfffffffffffffffeUL, /* HMM_PFN_ERROR */
778 0, /* HMM_PFN_NONE */
779 0xfffffffffffffffcUL /* HMM_PFN_SPECIAL */
780};
781

--- 241 unchanged lines hidden (view full) ---

1023 struct ttm_buffer_object *tbo,
1024 uint64_t flags)
1025{
1026 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo);
1027 struct ttm_tt *ttm = tbo->ttm;
1028 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1029 int r;
1030
779};
780
781static const uint64_t hmm_range_values[HMM_PFN_VALUE_MAX] = {
782 0xfffffffffffffffeUL, /* HMM_PFN_ERROR */
783 0, /* HMM_PFN_NONE */
784 0xfffffffffffffffcUL /* HMM_PFN_SPECIAL */
785};
786

--- 241 unchanged lines hidden (view full) ---

1028 struct ttm_buffer_object *tbo,
1029 uint64_t flags)
1030{
1031 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo);
1032 struct ttm_tt *ttm = tbo->ttm;
1033 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1034 int r;
1035
1031 if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) {
1036 if (abo->flags & AMDGPU_GEM_CREATE_MQD_GFX9) {
1032 uint64_t page_idx = 1;
1033
1034 r = amdgpu_gart_bind(adev, gtt->offset, page_idx,
1035 ttm->pages, gtt->ttm.dma_address, flags);
1036 if (r)
1037 goto gart_bind_fail;
1038
1037 uint64_t page_idx = 1;
1038
1039 r = amdgpu_gart_bind(adev, gtt->offset, page_idx,
1040 ttm->pages, gtt->ttm.dma_address, flags);
1041 if (r)
1042 goto gart_bind_fail;
1043
1039 /* The memory type of the first page defaults to UC. Now
1040 * modify the memory type to NC from the second page of
1041 * the BO onward.
1042 */
1044 /* Patch mtype of the second part BO */
1043 flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
1044 flags |= AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC);
1045
1046 r = amdgpu_gart_bind(adev,
1047 gtt->offset + (page_idx << PAGE_SHIFT),
1048 ttm->num_pages - page_idx,
1049 &ttm->pages[page_idx],
1050 &(gtt->ttm.dma_address[page_idx]), flags);

--- 537 unchanged lines hidden (view full) ---

1588 if (bo->mem.mem_type != TTM_PL_VRAM)
1589 return -EIO;
1590
1591 nodes = amdgpu_find_mm_node(&abo->tbo.mem, &offset);
1592 pos = (nodes->start << PAGE_SHIFT) + offset;
1593
1594 while (len && pos < adev->gmc.mc_vram_size) {
1595 uint64_t aligned_pos = pos & ~(uint64_t)3;
1045 flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
1046 flags |= AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC);
1047
1048 r = amdgpu_gart_bind(adev,
1049 gtt->offset + (page_idx << PAGE_SHIFT),
1050 ttm->num_pages - page_idx,
1051 &ttm->pages[page_idx],
1052 &(gtt->ttm.dma_address[page_idx]), flags);

--- 537 unchanged lines hidden (view full) ---

1590 if (bo->mem.mem_type != TTM_PL_VRAM)
1591 return -EIO;
1592
1593 nodes = amdgpu_find_mm_node(&abo->tbo.mem, &offset);
1594 pos = (nodes->start << PAGE_SHIFT) + offset;
1595
1596 while (len && pos < adev->gmc.mc_vram_size) {
1597 uint64_t aligned_pos = pos & ~(uint64_t)3;
1596 uint64_t bytes = 4 - (pos & 3);
1598 uint32_t bytes = 4 - (pos & 3);
1597 uint32_t shift = (pos & 3) * 8;
1598 uint32_t mask = 0xffffffff << shift;
1599
1600 if (len < bytes) {
1601 mask &= 0xffffffff >> (bytes - len) * 8;
1602 bytes = len;
1603 }
1604
1599 uint32_t shift = (pos & 3) * 8;
1600 uint32_t mask = 0xffffffff << shift;
1601
1602 if (len < bytes) {
1603 mask &= 0xffffffff >> (bytes - len) * 8;
1604 bytes = len;
1605 }
1606
1605 if (mask != 0xffffffff) {
1606 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
1607 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
1608 WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
1609 if (!write || mask != 0xffffffff)
1610 value = RREG32_NO_KIQ(mmMM_DATA);
1611 if (write) {
1612 value &= ~mask;
1613 value |= (*(uint32_t *)buf << shift) & mask;
1614 WREG32_NO_KIQ(mmMM_DATA, value);
1615 }
1616 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
1617 if (!write) {
1618 value = (value & mask) >> shift;
1619 memcpy(buf, &value, bytes);
1620 }
1621 } else {
1622 bytes = (nodes->start + nodes->size) << PAGE_SHIFT;
1623 bytes = min(bytes - pos, (uint64_t)len & ~0x3ull);
1624
1625 amdgpu_device_vram_access(adev, pos, (uint32_t *)buf,
1626 bytes, write);
1607 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
1608 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
1609 WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
1610 if (!write || mask != 0xffffffff)
1611 value = RREG32_NO_KIQ(mmMM_DATA);
1612 if (write) {
1613 value &= ~mask;
1614 value |= (*(uint32_t *)buf << shift) & mask;
1615 WREG32_NO_KIQ(mmMM_DATA, value);
1627 }
1616 }
1617 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
1618 if (!write) {
1619 value = (value & mask) >> shift;
1620 memcpy(buf, &value, bytes);
1621 }
1628
1629 ret += bytes;
1630 buf = (uint8_t *)buf + bytes;
1631 pos += bytes;
1632 len -= bytes;
1633 if (pos >= (nodes->start + nodes->size) << PAGE_SHIFT) {
1634 ++nodes;
1635 pos = (nodes->start << PAGE_SHIFT);
1636 }
1637 }
1638
1639 return ret;
1640}
1641
1642static struct ttm_bo_driver amdgpu_bo_driver = {
1643 .ttm_tt_create = &amdgpu_ttm_tt_create,
1644 .ttm_tt_populate = &amdgpu_ttm_tt_populate,
1645 .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
1622
1623 ret += bytes;
1624 buf = (uint8_t *)buf + bytes;
1625 pos += bytes;
1626 len -= bytes;
1627 if (pos >= (nodes->start + nodes->size) << PAGE_SHIFT) {
1628 ++nodes;
1629 pos = (nodes->start << PAGE_SHIFT);
1630 }
1631 }
1632
1633 return ret;
1634}
1635
1636static struct ttm_bo_driver amdgpu_bo_driver = {
1637 .ttm_tt_create = &amdgpu_ttm_tt_create,
1638 .ttm_tt_populate = &amdgpu_ttm_tt_populate,
1639 .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
1640 .invalidate_caches = &amdgpu_invalidate_caches,
1646 .init_mem_type = &amdgpu_init_mem_type,
1647 .eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
1648 .evict_flags = &amdgpu_evict_flags,
1649 .move = &amdgpu_bo_move,
1650 .verify_access = &amdgpu_verify_access,
1651 .move_notify = &amdgpu_bo_move_notify,
1652 .release_notify = &amdgpu_bo_release_notify,
1653 .fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,

--- 181 unchanged lines hidden (view full) ---

1835 if (r) {
1836 return r;
1837 }
1838
1839 /*
1840 *The reserved vram for memory training must be pinned to the specified
1841 *place on the VRAM, so reserve it early.
1842 */
1641 .init_mem_type = &amdgpu_init_mem_type,
1642 .eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
1643 .evict_flags = &amdgpu_evict_flags,
1644 .move = &amdgpu_bo_move,
1645 .verify_access = &amdgpu_verify_access,
1646 .move_notify = &amdgpu_bo_move_notify,
1647 .release_notify = &amdgpu_bo_release_notify,
1648 .fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,

--- 181 unchanged lines hidden (view full) ---

1830 if (r) {
1831 return r;
1832 }
1833
1834 /*
1835 *The reserved vram for memory training must be pinned to the specified
1836 *place on the VRAM, so reserve it early.
1837 */
1843 if (!amdgpu_sriov_vf(adev)) {
1844 r = amdgpu_ttm_training_reserve_vram_init(adev);
1845 if (r)
1846 return r;
1847 }
1838 r = amdgpu_ttm_training_reserve_vram_init(adev);
1839 if (r)
1840 return r;
1848
1849 /* allocate memory as required for VGA
1850 * This is used for VGA emulation and pre-OS scanout buffers to
1851 * avoid display artifacts while transitioning between pre-OS
1852 * and driver. */
1853 r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE,
1854 AMDGPU_GEM_DOMAIN_VRAM,
1855 &adev->stolen_vga_memory,

--- 56 unchanged lines hidden (view full) ---

1912
1913 r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA,
1914 adev->gds.oa_size);
1915 if (r) {
1916 DRM_ERROR("Failed initializing oa heap.\n");
1917 return r;
1918 }
1919
1841
1842 /* allocate memory as required for VGA
1843 * This is used for VGA emulation and pre-OS scanout buffers to
1844 * avoid display artifacts while transitioning between pre-OS
1845 * and driver. */
1846 r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE,
1847 AMDGPU_GEM_DOMAIN_VRAM,
1848 &adev->stolen_vga_memory,

--- 56 unchanged lines hidden (view full) ---

1905
1906 r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA,
1907 adev->gds.oa_size);
1908 if (r) {
1909 DRM_ERROR("Failed initializing oa heap.\n");
1910 return r;
1911 }
1912
1913 /* Register debugfs entries for amdgpu_ttm */
1914 r = amdgpu_ttm_debugfs_init(adev);
1915 if (r) {
1916 DRM_ERROR("Failed to init debugfs\n");
1917 return r;
1918 }
1920 return 0;
1921}
1922
1923/**
1924 * amdgpu_ttm_late_init - Handle any late initialization for amdgpu_ttm
1925 */
1926void amdgpu_ttm_late_init(struct amdgpu_device *adev)
1927{

--- 5 unchanged lines hidden (view full) ---

1933/**
1934 * amdgpu_ttm_fini - De-initialize the TTM memory pools
1935 */
1936void amdgpu_ttm_fini(struct amdgpu_device *adev)
1937{
1938 if (!adev->mman.initialized)
1939 return;
1940
1919 return 0;
1920}
1921
1922/**
1923 * amdgpu_ttm_late_init - Handle any late initialization for amdgpu_ttm
1924 */
1925void amdgpu_ttm_late_init(struct amdgpu_device *adev)
1926{

--- 5 unchanged lines hidden (view full) ---

1932/**
1933 * amdgpu_ttm_fini - De-initialize the TTM memory pools
1934 */
1935void amdgpu_ttm_fini(struct amdgpu_device *adev)
1936{
1937 if (!adev->mman.initialized)
1938 return;
1939
1940 amdgpu_ttm_debugfs_fini(adev);
1941 amdgpu_ttm_training_reserve_vram_fini(adev);
1942 /* return the IP Discovery TMR memory back to VRAM */
1943 amdgpu_bo_free_kernel(&adev->discovery_memory, NULL, NULL);
1944 amdgpu_ttm_fw_reserve_vram_fini(adev);
1945
1946 if (adev->mman.aper_base_kaddr)
1947 iounmap(adev->mman.aper_base_kaddr);
1948 adev->mman.aper_base_kaddr = NULL;

--- 158 unchanged lines hidden (view full) ---

2107 return r;
2108
2109 if (vm_needs_flush) {
2110 job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo);
2111 job->vm_needs_flush = true;
2112 }
2113 if (resv) {
2114 r = amdgpu_sync_resv(adev, &job->sync, resv,
1941 amdgpu_ttm_training_reserve_vram_fini(adev);
1942 /* return the IP Discovery TMR memory back to VRAM */
1943 amdgpu_bo_free_kernel(&adev->discovery_memory, NULL, NULL);
1944 amdgpu_ttm_fw_reserve_vram_fini(adev);
1945
1946 if (adev->mman.aper_base_kaddr)
1947 iounmap(adev->mman.aper_base_kaddr);
1948 adev->mman.aper_base_kaddr = NULL;

--- 158 unchanged lines hidden (view full) ---

2107 return r;
2108
2109 if (vm_needs_flush) {
2110 job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo);
2111 job->vm_needs_flush = true;
2112 }
2113 if (resv) {
2114 r = amdgpu_sync_resv(adev, &job->sync, resv,
2115 AMDGPU_SYNC_ALWAYS,
2116 AMDGPU_FENCE_OWNER_UNDEFINED);
2115 AMDGPU_FENCE_OWNER_UNDEFINED,
2116 false);
2117 if (r) {
2118 DRM_ERROR("sync failed (%d).\n", r);
2119 goto error_free;
2120 }
2121 }
2122
2123 for (i = 0; i < num_loops; i++) {
2124 uint32_t cur_size_in_bytes = min(byte_count, max_bytes);

--- 67 unchanged lines hidden (view full) ---

2192 num_dw += 64;
2193
2194 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
2195 if (r)
2196 return r;
2197
2198 if (resv) {
2199 r = amdgpu_sync_resv(adev, &job->sync, resv,
2117 if (r) {
2118 DRM_ERROR("sync failed (%d).\n", r);
2119 goto error_free;
2120 }
2121 }
2122
2123 for (i = 0; i < num_loops; i++) {
2124 uint32_t cur_size_in_bytes = min(byte_count, max_bytes);

--- 67 unchanged lines hidden (view full) ---

2192 num_dw += 64;
2193
2194 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
2195 if (r)
2196 return r;
2197
2198 if (resv) {
2199 r = amdgpu_sync_resv(adev, &job->sync, resv,
2200 AMDGPU_SYNC_ALWAYS,
2201 AMDGPU_FENCE_OWNER_UNDEFINED);
2200 AMDGPU_FENCE_OWNER_UNDEFINED, false);
2202 if (r) {
2203 DRM_ERROR("sync failed (%d).\n", r);
2204 goto error_free;
2205 }
2206 }
2207
2208 num_pages = bo->tbo.num_pages;
2209 mm_node = bo->tbo.mem.mm_node;

--- 64 unchanged lines hidden (view full) ---

2274 *
2275 * Accesses VRAM via MMIO for debugging purposes.
2276 */
2277static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
2278 size_t size, loff_t *pos)
2279{
2280 struct amdgpu_device *adev = file_inode(f)->i_private;
2281 ssize_t result = 0;
2201 if (r) {
2202 DRM_ERROR("sync failed (%d).\n", r);
2203 goto error_free;
2204 }
2205 }
2206
2207 num_pages = bo->tbo.num_pages;
2208 mm_node = bo->tbo.mem.mm_node;

--- 64 unchanged lines hidden (view full) ---

2273 *
2274 * Accesses VRAM via MMIO for debugging purposes.
2275 */
2276static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
2277 size_t size, loff_t *pos)
2278{
2279 struct amdgpu_device *adev = file_inode(f)->i_private;
2280 ssize_t result = 0;
2281 int r;
2282
2283 if (size & 0x3 || *pos & 0x3)
2284 return -EINVAL;
2285
2286 if (*pos >= adev->gmc.mc_vram_size)
2287 return -ENXIO;
2288
2282
2283 if (size & 0x3 || *pos & 0x3)
2284 return -EINVAL;
2285
2286 if (*pos >= adev->gmc.mc_vram_size)
2287 return -ENXIO;
2288
2289 size = min(size, (size_t)(adev->gmc.mc_vram_size - *pos));
2290 while (size) {
2289 while (size) {
2291 size_t bytes = min(size, AMDGPU_TTM_VRAM_MAX_DW_READ * 4);
2292 uint32_t value[AMDGPU_TTM_VRAM_MAX_DW_READ];
2290 unsigned long flags;
2291 uint32_t value;
2293
2292
2294 amdgpu_device_vram_access(adev, *pos, value, bytes, false);
2295 if (copy_to_user(buf, value, bytes))
2296 return -EFAULT;
2293 if (*pos >= adev->gmc.mc_vram_size)
2294 return result;
2297
2295
2298 result += bytes;
2299 buf += bytes;
2300 *pos += bytes;
2301 size -= bytes;
2296 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
2297 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
2298 WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
2299 value = RREG32_NO_KIQ(mmMM_DATA);
2300 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
2301
2302 r = put_user(value, (uint32_t *)buf);
2303 if (r)
2304 return r;
2305
2306 result += 4;
2307 buf += 4;
2308 *pos += 4;
2309 size -= 4;
2302 }
2303
2304 return result;
2305}
2306
2307/**
2308 * amdgpu_ttm_vram_write - Linear write access to VRAM
2309 *

--- 220 unchanged lines hidden (view full) ---

2530#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
2531 { "amdgpu_gtt", &amdgpu_ttm_gtt_fops, TTM_PL_TT },
2532#endif
2533 { "amdgpu_iomem", &amdgpu_ttm_iomem_fops, TTM_PL_SYSTEM },
2534};
2535
2536#endif
2537
2310 }
2311
2312 return result;
2313}
2314
2315/**
2316 * amdgpu_ttm_vram_write - Linear write access to VRAM
2317 *

--- 220 unchanged lines hidden (view full) ---

2538#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
2539 { "amdgpu_gtt", &amdgpu_ttm_gtt_fops, TTM_PL_TT },
2540#endif
2541 { "amdgpu_iomem", &amdgpu_ttm_iomem_fops, TTM_PL_SYSTEM },
2542};
2543
2544#endif
2545
2538int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
2546static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
2539{
2540#if defined(CONFIG_DEBUG_FS)
2541 unsigned count;
2542
2543 struct drm_minor *minor = adev->ddev->primary;
2544 struct dentry *ent, *root = minor->debugfs_root;
2545
2546 for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries); count++) {

--- 18 unchanged lines hidden (view full) ---

2565 --count;
2566#endif
2567
2568 return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count);
2569#else
2570 return 0;
2571#endif
2572}
2547{
2548#if defined(CONFIG_DEBUG_FS)
2549 unsigned count;
2550
2551 struct drm_minor *minor = adev->ddev->primary;
2552 struct dentry *ent, *root = minor->debugfs_root;
2553
2554 for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries); count++) {

--- 18 unchanged lines hidden (view full) ---

2573 --count;
2574#endif
2575
2576 return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count);
2577#else
2578 return 0;
2579#endif
2580}
2581
2582static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
2583{
2584#if defined(CONFIG_DEBUG_FS)
2585 unsigned i;
2586
2587 for (i = 0; i < ARRAY_SIZE(ttm_debugfs_entries); i++)
2588 debugfs_remove(adev->mman.debugfs_entries[i]);
2589#endif
2590}