1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2022 Intel Corporation
4 */
5
6 #include "xe_sa.h"
7
8 #include <linux/kernel.h>
9
10 #include <drm/drm_managed.h>
11
12 #include "xe_bo.h"
13 #include "xe_device.h"
14 #include "xe_map.h"
15
xe_sa_bo_manager_fini(struct drm_device * drm,void * arg)16 static void xe_sa_bo_manager_fini(struct drm_device *drm, void *arg)
17 {
18 struct xe_sa_manager *sa_manager = arg;
19 struct xe_bo *bo = sa_manager->bo;
20
21 if (!bo) {
22 drm_err(drm, "no bo for sa manager\n");
23 return;
24 }
25
26 drm_suballoc_manager_fini(&sa_manager->base);
27
28 if (sa_manager->is_iomem)
29 kvfree(sa_manager->cpu_ptr);
30
31 sa_manager->bo = NULL;
32 }
33
xe_sa_bo_manager_init(struct xe_tile * tile,u32 size,u32 align)34 struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 align)
35 {
36 struct xe_device *xe = tile_to_xe(tile);
37 u32 managed_size = size - SZ_4K;
38 struct xe_bo *bo;
39 int ret;
40
41 struct xe_sa_manager *sa_manager = drmm_kzalloc(&tile_to_xe(tile)->drm,
42 sizeof(*sa_manager),
43 GFP_KERNEL);
44 if (!sa_manager)
45 return ERR_PTR(-ENOMEM);
46
47 sa_manager->bo = NULL;
48
49 bo = xe_managed_bo_create_pin_map(xe, tile, size,
50 XE_BO_FLAG_VRAM_IF_DGFX(tile) |
51 XE_BO_FLAG_GGTT |
52 XE_BO_FLAG_GGTT_INVALIDATE);
53 if (IS_ERR(bo)) {
54 drm_err(&xe->drm, "failed to allocate bo for sa manager: %ld\n",
55 PTR_ERR(bo));
56 return ERR_CAST(bo);
57 }
58 sa_manager->bo = bo;
59 sa_manager->is_iomem = bo->vmap.is_iomem;
60
61 drm_suballoc_manager_init(&sa_manager->base, managed_size, align);
62 sa_manager->gpu_addr = xe_bo_ggtt_addr(bo);
63
64 if (bo->vmap.is_iomem) {
65 sa_manager->cpu_ptr = kvzalloc(managed_size, GFP_KERNEL);
66 if (!sa_manager->cpu_ptr) {
67 sa_manager->bo = NULL;
68 return ERR_PTR(-ENOMEM);
69 }
70 } else {
71 sa_manager->cpu_ptr = bo->vmap.vaddr;
72 memset(sa_manager->cpu_ptr, 0, bo->ttm.base.size);
73 }
74
75 ret = drmm_add_action_or_reset(&xe->drm, xe_sa_bo_manager_fini,
76 sa_manager);
77 if (ret)
78 return ERR_PTR(ret);
79
80 return sa_manager;
81 }
82
xe_sa_bo_new(struct xe_sa_manager * sa_manager,unsigned int size)83 struct drm_suballoc *xe_sa_bo_new(struct xe_sa_manager *sa_manager,
84 unsigned int size)
85 {
86 /*
87 * BB to large, return -ENOBUFS indicating user should split
88 * array of binds into smaller chunks.
89 */
90 if (size > sa_manager->base.size)
91 return ERR_PTR(-ENOBUFS);
92
93 return drm_suballoc_new(&sa_manager->base, size, GFP_KERNEL, true, 0);
94 }
95
xe_sa_bo_flush_write(struct drm_suballoc * sa_bo)96 void xe_sa_bo_flush_write(struct drm_suballoc *sa_bo)
97 {
98 struct xe_sa_manager *sa_manager = to_xe_sa_manager(sa_bo->manager);
99 struct xe_device *xe = tile_to_xe(sa_manager->bo->tile);
100
101 if (!sa_manager->bo->vmap.is_iomem)
102 return;
103
104 xe_map_memcpy_to(xe, &sa_manager->bo->vmap, drm_suballoc_soffset(sa_bo),
105 xe_sa_bo_cpu_addr(sa_bo),
106 drm_suballoc_size(sa_bo));
107 }
108
xe_sa_bo_free(struct drm_suballoc * sa_bo,struct dma_fence * fence)109 void xe_sa_bo_free(struct drm_suballoc *sa_bo,
110 struct dma_fence *fence)
111 {
112 drm_suballoc_free(sa_bo, fence);
113 }
114