xref: /linux/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c (revision e66774dd6f6a3d44559599e4eeb785734c28d034)
1ea1f5729SLucas Stach /*
2ea1f5729SLucas Stach  * Copyright (C) 2017 Etnaviv Project
3ea1f5729SLucas Stach  *
4ea1f5729SLucas Stach  * This program is free software; you can redistribute it and/or modify it
5ea1f5729SLucas Stach  * under the terms of the GNU General Public License version 2 as published by
6ea1f5729SLucas Stach  * the Free Software Foundation.
7ea1f5729SLucas Stach  *
8ea1f5729SLucas Stach  * This program is distributed in the hope that it will be useful, but WITHOUT
9ea1f5729SLucas Stach  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10ea1f5729SLucas Stach  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11ea1f5729SLucas Stach  * more details.
12ea1f5729SLucas Stach  *
13ea1f5729SLucas Stach  * You should have received a copy of the GNU General Public License along with
14ea1f5729SLucas Stach  * this program.  If not, see <http://www.gnu.org/licenses/>.
15ea1f5729SLucas Stach  */
16ea1f5729SLucas Stach 
17*e66774ddSLucas Stach #include <drm/drm_mm.h>
18*e66774ddSLucas Stach 
19ea1f5729SLucas Stach #include "etnaviv_cmdbuf.h"
20ea1f5729SLucas Stach #include "etnaviv_gpu.h"
21ea1f5729SLucas Stach #include "etnaviv_mmu.h"
22ea1f5729SLucas Stach 
23*e66774ddSLucas Stach #define SUBALLOC_SIZE		SZ_256K
24*e66774ddSLucas Stach #define SUBALLOC_GRANULE	SZ_4K
25*e66774ddSLucas Stach #define SUBALLOC_GRANULES	(SUBALLOC_SIZE / SUBALLOC_GRANULE)
26*e66774ddSLucas Stach 
27*e66774ddSLucas Stach struct etnaviv_cmdbuf_suballoc {
28*e66774ddSLucas Stach 	/* suballocated dma buffer properties */
29*e66774ddSLucas Stach 	struct etnaviv_gpu *gpu;
30*e66774ddSLucas Stach 	void *vaddr;
31*e66774ddSLucas Stach 	dma_addr_t paddr;
32*e66774ddSLucas Stach 
33*e66774ddSLucas Stach 	/* GPU mapping */
34*e66774ddSLucas Stach 	u32 iova;
35*e66774ddSLucas Stach 	struct drm_mm_node vram_node; /* only used on MMUv2 */
36*e66774ddSLucas Stach 
37*e66774ddSLucas Stach 	/* allocation management */
38*e66774ddSLucas Stach 	struct mutex lock;
39*e66774ddSLucas Stach 	DECLARE_BITMAP(granule_map, SUBALLOC_GRANULES);
40*e66774ddSLucas Stach 	int free_space;
41*e66774ddSLucas Stach 	wait_queue_head_t free_event;
42*e66774ddSLucas Stach };
43*e66774ddSLucas Stach 
44*e66774ddSLucas Stach struct etnaviv_cmdbuf_suballoc *
45*e66774ddSLucas Stach etnaviv_cmdbuf_suballoc_new(struct etnaviv_gpu * gpu)
46*e66774ddSLucas Stach {
47*e66774ddSLucas Stach 	struct etnaviv_cmdbuf_suballoc *suballoc;
48*e66774ddSLucas Stach 	int ret;
49*e66774ddSLucas Stach 
50*e66774ddSLucas Stach 	suballoc = kzalloc(sizeof(*suballoc), GFP_KERNEL);
51*e66774ddSLucas Stach 	if (!suballoc)
52*e66774ddSLucas Stach 		return ERR_PTR(-ENOMEM);
53*e66774ddSLucas Stach 
54*e66774ddSLucas Stach 	suballoc->gpu = gpu;
55*e66774ddSLucas Stach 	mutex_init(&suballoc->lock);
56*e66774ddSLucas Stach 	init_waitqueue_head(&suballoc->free_event);
57*e66774ddSLucas Stach 
58*e66774ddSLucas Stach 	suballoc->vaddr = dma_alloc_wc(gpu->dev, SUBALLOC_SIZE,
59*e66774ddSLucas Stach 				       &suballoc->paddr, GFP_KERNEL);
60*e66774ddSLucas Stach 	if (!suballoc->vaddr)
61*e66774ddSLucas Stach 		goto free_suballoc;
62*e66774ddSLucas Stach 
63*e66774ddSLucas Stach 	ret = etnaviv_iommu_get_suballoc_va(gpu, suballoc->paddr,
64*e66774ddSLucas Stach 					    &suballoc->vram_node, SUBALLOC_SIZE,
65*e66774ddSLucas Stach 					    &suballoc->iova);
66*e66774ddSLucas Stach 	if (ret)
67*e66774ddSLucas Stach 		goto free_dma;
68*e66774ddSLucas Stach 
69*e66774ddSLucas Stach 	return suballoc;
70*e66774ddSLucas Stach 
71*e66774ddSLucas Stach free_dma:
72*e66774ddSLucas Stach 	dma_free_wc(gpu->dev, SUBALLOC_SIZE, suballoc->vaddr, suballoc->paddr);
73*e66774ddSLucas Stach free_suballoc:
74*e66774ddSLucas Stach 	kfree(suballoc);
75*e66774ddSLucas Stach 
76*e66774ddSLucas Stach 	return NULL;
77*e66774ddSLucas Stach }
78*e66774ddSLucas Stach 
79*e66774ddSLucas Stach void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc *suballoc)
80*e66774ddSLucas Stach {
81*e66774ddSLucas Stach 	etnaviv_iommu_put_suballoc_va(suballoc->gpu, &suballoc->vram_node,
82*e66774ddSLucas Stach 				      SUBALLOC_SIZE, suballoc->iova);
83*e66774ddSLucas Stach 	dma_free_wc(suballoc->gpu->dev, SUBALLOC_SIZE, suballoc->vaddr,
84*e66774ddSLucas Stach 		    suballoc->paddr);
85*e66774ddSLucas Stach 	kfree(suballoc);
86*e66774ddSLucas Stach }
87*e66774ddSLucas Stach 
88*e66774ddSLucas Stach struct etnaviv_cmdbuf *
89*e66774ddSLucas Stach etnaviv_cmdbuf_new(struct etnaviv_cmdbuf_suballoc *suballoc, u32 size,
90ea1f5729SLucas Stach 		   size_t nr_bos)
91ea1f5729SLucas Stach {
92ea1f5729SLucas Stach 	struct etnaviv_cmdbuf *cmdbuf;
93ea1f5729SLucas Stach 	size_t sz = size_vstruct(nr_bos, sizeof(cmdbuf->bo_map[0]),
94ea1f5729SLucas Stach 				 sizeof(*cmdbuf));
95*e66774ddSLucas Stach 	int granule_offs, order, ret;
96ea1f5729SLucas Stach 
97ea1f5729SLucas Stach 	cmdbuf = kzalloc(sz, GFP_KERNEL);
98ea1f5729SLucas Stach 	if (!cmdbuf)
99ea1f5729SLucas Stach 		return NULL;
100ea1f5729SLucas Stach 
101*e66774ddSLucas Stach 	cmdbuf->suballoc = suballoc;
102*e66774ddSLucas Stach 	cmdbuf->size = size;
103ea1f5729SLucas Stach 
104*e66774ddSLucas Stach 	order = order_base_2(ALIGN(size, SUBALLOC_GRANULE) / SUBALLOC_GRANULE);
105*e66774ddSLucas Stach retry:
106*e66774ddSLucas Stach 	mutex_lock(&suballoc->lock);
107*e66774ddSLucas Stach 	granule_offs = bitmap_find_free_region(suballoc->granule_map,
108*e66774ddSLucas Stach 					SUBALLOC_GRANULES, order);
109*e66774ddSLucas Stach 	if (granule_offs < 0) {
110*e66774ddSLucas Stach 		suballoc->free_space = 0;
111*e66774ddSLucas Stach 		mutex_unlock(&suballoc->lock);
112*e66774ddSLucas Stach 		ret = wait_event_interruptible_timeout(suballoc->free_event,
113*e66774ddSLucas Stach 						       suballoc->free_space,
114*e66774ddSLucas Stach 						       msecs_to_jiffies(10 * 1000));
115*e66774ddSLucas Stach 		if (!ret) {
116*e66774ddSLucas Stach 			dev_err(suballoc->gpu->dev,
117*e66774ddSLucas Stach 				"Timeout waiting for cmdbuf space\n");
118ea1f5729SLucas Stach 			return NULL;
119ea1f5729SLucas Stach 		}
120*e66774ddSLucas Stach 		goto retry;
121*e66774ddSLucas Stach 	}
122*e66774ddSLucas Stach 	mutex_unlock(&suballoc->lock);
123*e66774ddSLucas Stach 	cmdbuf->suballoc_offset = granule_offs * SUBALLOC_GRANULE;
124*e66774ddSLucas Stach 	cmdbuf->vaddr = suballoc->vaddr + cmdbuf->suballoc_offset;
125ea1f5729SLucas Stach 
126ea1f5729SLucas Stach 	return cmdbuf;
127ea1f5729SLucas Stach }
128ea1f5729SLucas Stach 
129ea1f5729SLucas Stach void etnaviv_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
130ea1f5729SLucas Stach {
131*e66774ddSLucas Stach 	struct etnaviv_cmdbuf_suballoc *suballoc = cmdbuf->suballoc;
132*e66774ddSLucas Stach 	int order = order_base_2(ALIGN(cmdbuf->size, SUBALLOC_GRANULE) /
133*e66774ddSLucas Stach 				 SUBALLOC_GRANULE);
134*e66774ddSLucas Stach 
135*e66774ddSLucas Stach 	mutex_lock(&suballoc->lock);
136*e66774ddSLucas Stach 	bitmap_release_region(suballoc->granule_map,
137*e66774ddSLucas Stach 			      cmdbuf->suballoc_offset / SUBALLOC_GRANULE,
138*e66774ddSLucas Stach 			      order);
139*e66774ddSLucas Stach 	suballoc->free_space = 1;
140*e66774ddSLucas Stach 	mutex_unlock(&suballoc->lock);
141*e66774ddSLucas Stach 	wake_up_all(&suballoc->free_event);
142ea1f5729SLucas Stach 	kfree(cmdbuf);
143ea1f5729SLucas Stach }
144c3ef4b8cSLucas Stach 
145c3ef4b8cSLucas Stach u32 etnaviv_cmdbuf_get_va(struct etnaviv_cmdbuf *buf)
146c3ef4b8cSLucas Stach {
147*e66774ddSLucas Stach 	return buf->suballoc->iova + buf->suballoc_offset;
148c3ef4b8cSLucas Stach }
1499912b4dbSLucas Stach 
1509912b4dbSLucas Stach dma_addr_t etnaviv_cmdbuf_get_pa(struct etnaviv_cmdbuf *buf)
1519912b4dbSLucas Stach {
152*e66774ddSLucas Stach 	return buf->suballoc->paddr + buf->suballoc_offset;
1539912b4dbSLucas Stach }
154