xref: /linux/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c (revision b08494a8f7416e5f09907318c5460ad6f6e2a548)
179819d9aSAlex Deucher // SPDX-License-Identifier: MIT
279819d9aSAlex Deucher /*
379819d9aSAlex Deucher  * Copyright 2024 Advanced Micro Devices, Inc.
479819d9aSAlex Deucher  *
579819d9aSAlex Deucher  * Permission is hereby granted, free of charge, to any person obtaining a
679819d9aSAlex Deucher  * copy of this software and associated documentation files (the "Software"),
779819d9aSAlex Deucher  * to deal in the Software without restriction, including without limitation
879819d9aSAlex Deucher  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
979819d9aSAlex Deucher  * and/or sell copies of the Software, and to permit persons to whom the
1079819d9aSAlex Deucher  * Software is furnished to do so, subject to the following conditions:
1179819d9aSAlex Deucher  *
1279819d9aSAlex Deucher  * The above copyright notice and this permission notice shall be included in
1379819d9aSAlex Deucher  * all copies or substantial portions of the Software.
1479819d9aSAlex Deucher  *
1579819d9aSAlex Deucher  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1679819d9aSAlex Deucher  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1779819d9aSAlex Deucher  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
1879819d9aSAlex Deucher  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
1979819d9aSAlex Deucher  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
2079819d9aSAlex Deucher  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
2179819d9aSAlex Deucher  * OTHER DEALINGS IN THE SOFTWARE.
2279819d9aSAlex Deucher  *
2379819d9aSAlex Deucher  */
2479819d9aSAlex Deucher #include "amdgpu.h"
2579819d9aSAlex Deucher #include "amdgpu_gfx.h"
2679819d9aSAlex Deucher #include "mes_userqueue.h"
2779819d9aSAlex Deucher #include "amdgpu_userq_fence.h"
2879819d9aSAlex Deucher 
2979819d9aSAlex Deucher #define AMDGPU_USERQ_PROC_CTX_SZ PAGE_SIZE
3079819d9aSAlex Deucher #define AMDGPU_USERQ_GANG_CTX_SZ PAGE_SIZE
3179819d9aSAlex Deucher 
3279819d9aSAlex Deucher static int
3379819d9aSAlex Deucher mes_userq_map_gtt_bo_to_gart(struct amdgpu_bo *bo)
3479819d9aSAlex Deucher {
3579819d9aSAlex Deucher 	int ret;
3679819d9aSAlex Deucher 
3779819d9aSAlex Deucher 	ret = amdgpu_bo_reserve(bo, true);
3879819d9aSAlex Deucher 	if (ret) {
3979819d9aSAlex Deucher 		DRM_ERROR("Failed to reserve bo. ret %d\n", ret);
4079819d9aSAlex Deucher 		goto err_reserve_bo_failed;
4179819d9aSAlex Deucher 	}
4279819d9aSAlex Deucher 
4379819d9aSAlex Deucher 	ret = amdgpu_ttm_alloc_gart(&bo->tbo);
4479819d9aSAlex Deucher 	if (ret) {
4579819d9aSAlex Deucher 		DRM_ERROR("Failed to bind bo to GART. ret %d\n", ret);
4679819d9aSAlex Deucher 		goto err_map_bo_gart_failed;
4779819d9aSAlex Deucher 	}
4879819d9aSAlex Deucher 
4979819d9aSAlex Deucher 	amdgpu_bo_unreserve(bo);
5079819d9aSAlex Deucher 	bo = amdgpu_bo_ref(bo);
5179819d9aSAlex Deucher 
5279819d9aSAlex Deucher 	return 0;
5379819d9aSAlex Deucher 
5479819d9aSAlex Deucher err_map_bo_gart_failed:
5579819d9aSAlex Deucher 	amdgpu_bo_unreserve(bo);
5679819d9aSAlex Deucher err_reserve_bo_failed:
5779819d9aSAlex Deucher 	return ret;
5879819d9aSAlex Deucher }
5979819d9aSAlex Deucher 
6079819d9aSAlex Deucher static int
6179819d9aSAlex Deucher mes_userq_create_wptr_mapping(struct amdgpu_userq_mgr *uq_mgr,
6279819d9aSAlex Deucher 			      struct amdgpu_usermode_queue *queue,
6379819d9aSAlex Deucher 			      uint64_t wptr)
6479819d9aSAlex Deucher {
6579819d9aSAlex Deucher 	struct amdgpu_bo_va_mapping *wptr_mapping;
6679819d9aSAlex Deucher 	struct amdgpu_vm *wptr_vm;
6779819d9aSAlex Deucher 	struct amdgpu_userq_obj *wptr_obj = &queue->wptr_obj;
6879819d9aSAlex Deucher 	int ret;
6979819d9aSAlex Deucher 
7079819d9aSAlex Deucher 	wptr_vm = queue->vm;
7179819d9aSAlex Deucher 	ret = amdgpu_bo_reserve(wptr_vm->root.bo, false);
7279819d9aSAlex Deucher 	if (ret)
7379819d9aSAlex Deucher 		return ret;
7479819d9aSAlex Deucher 
7579819d9aSAlex Deucher 	wptr &= AMDGPU_GMC_HOLE_MASK;
7679819d9aSAlex Deucher 	wptr_mapping = amdgpu_vm_bo_lookup_mapping(wptr_vm, wptr >> PAGE_SHIFT);
7779819d9aSAlex Deucher 	amdgpu_bo_unreserve(wptr_vm->root.bo);
7879819d9aSAlex Deucher 	if (!wptr_mapping) {
7979819d9aSAlex Deucher 		DRM_ERROR("Failed to lookup wptr bo\n");
8079819d9aSAlex Deucher 		return -EINVAL;
8179819d9aSAlex Deucher 	}
8279819d9aSAlex Deucher 
8379819d9aSAlex Deucher 	wptr_obj->obj = wptr_mapping->bo_va->base.bo;
8479819d9aSAlex Deucher 	if (wptr_obj->obj->tbo.base.size > PAGE_SIZE) {
8579819d9aSAlex Deucher 		DRM_ERROR("Requested GART mapping for wptr bo larger than one page\n");
8679819d9aSAlex Deucher 		return -EINVAL;
8779819d9aSAlex Deucher 	}
8879819d9aSAlex Deucher 
8979819d9aSAlex Deucher 	ret = mes_userq_map_gtt_bo_to_gart(wptr_obj->obj);
9079819d9aSAlex Deucher 	if (ret) {
9179819d9aSAlex Deucher 		DRM_ERROR("Failed to map wptr bo to GART\n");
9279819d9aSAlex Deucher 		return ret;
9379819d9aSAlex Deucher 	}
9479819d9aSAlex Deucher 
9579819d9aSAlex Deucher 	queue->wptr_obj.gpu_addr = amdgpu_bo_gpu_offset_no_check(wptr_obj->obj);
9679819d9aSAlex Deucher 	return 0;
9779819d9aSAlex Deucher }
9879819d9aSAlex Deucher 
9923a650bbSAlex Deucher static int convert_to_mes_priority(int priority)
10023a650bbSAlex Deucher {
10123a650bbSAlex Deucher 	switch (priority) {
10223a650bbSAlex Deucher 	case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_NORMAL_LOW:
10323a650bbSAlex Deucher 	default:
10423a650bbSAlex Deucher 		return AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
10523a650bbSAlex Deucher 	case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_LOW:
10623a650bbSAlex Deucher 		return AMDGPU_MES_PRIORITY_LEVEL_LOW;
10723a650bbSAlex Deucher 	case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_NORMAL_HIGH:
10823a650bbSAlex Deucher 		return AMDGPU_MES_PRIORITY_LEVEL_MEDIUM;
10923a650bbSAlex Deucher 	case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_HIGH:
11023a650bbSAlex Deucher 		return AMDGPU_MES_PRIORITY_LEVEL_HIGH;
11123a650bbSAlex Deucher 	}
11223a650bbSAlex Deucher }
11323a650bbSAlex Deucher 
11479819d9aSAlex Deucher static int mes_userq_map(struct amdgpu_userq_mgr *uq_mgr,
11551a9ea45SAlex Deucher 			 struct amdgpu_usermode_queue *queue)
11679819d9aSAlex Deucher {
11779819d9aSAlex Deucher 	struct amdgpu_device *adev = uq_mgr->adev;
11879819d9aSAlex Deucher 	struct amdgpu_userq_obj *ctx = &queue->fw_obj;
11951a9ea45SAlex Deucher 	struct amdgpu_mqd_prop *userq_props = queue->userq_prop;
12079819d9aSAlex Deucher 	struct mes_add_queue_input queue_input;
12179819d9aSAlex Deucher 	int r;
12279819d9aSAlex Deucher 
12379819d9aSAlex Deucher 	memset(&queue_input, 0x0, sizeof(struct mes_add_queue_input));
12479819d9aSAlex Deucher 
12579819d9aSAlex Deucher 	queue_input.process_va_start = 0;
12691acb5d4SChristian König 	queue_input.process_va_end = adev->vm_manager.max_pfn - 1;
12779819d9aSAlex Deucher 
12879819d9aSAlex Deucher 	/* set process quantum to 10 ms and gang quantum to 1 ms as default */
12979819d9aSAlex Deucher 	queue_input.process_quantum = 100000;
13079819d9aSAlex Deucher 	queue_input.gang_quantum = 10000;
13179819d9aSAlex Deucher 	queue_input.paging = false;
13279819d9aSAlex Deucher 
13379819d9aSAlex Deucher 	queue_input.process_context_addr = ctx->gpu_addr;
13479819d9aSAlex Deucher 	queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ;
13579819d9aSAlex Deucher 	queue_input.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
13623a650bbSAlex Deucher 	queue_input.gang_global_priority_level = convert_to_mes_priority(queue->priority);
13779819d9aSAlex Deucher 
13879819d9aSAlex Deucher 	queue_input.process_id = queue->vm->pasid;
13979819d9aSAlex Deucher 	queue_input.queue_type = queue->queue_type;
14079819d9aSAlex Deucher 	queue_input.mqd_addr = queue->mqd.gpu_addr;
14179819d9aSAlex Deucher 	queue_input.wptr_addr = userq_props->wptr_gpu_addr;
14279819d9aSAlex Deucher 	queue_input.queue_size = userq_props->queue_size >> 2;
14379819d9aSAlex Deucher 	queue_input.doorbell_offset = userq_props->doorbell_index;
14479819d9aSAlex Deucher 	queue_input.page_table_base_addr = amdgpu_gmc_pd_addr(queue->vm->root.bo);
14579819d9aSAlex Deucher 	queue_input.wptr_mc_addr = queue->wptr_obj.gpu_addr;
14679819d9aSAlex Deucher 
14779819d9aSAlex Deucher 	amdgpu_mes_lock(&adev->mes);
14879819d9aSAlex Deucher 	r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
14979819d9aSAlex Deucher 	amdgpu_mes_unlock(&adev->mes);
15079819d9aSAlex Deucher 	if (r) {
15179819d9aSAlex Deucher 		DRM_ERROR("Failed to map queue in HW, err (%d)\n", r);
15279819d9aSAlex Deucher 		return r;
15379819d9aSAlex Deucher 	}
15479819d9aSAlex Deucher 
15579819d9aSAlex Deucher 	DRM_DEBUG_DRIVER("Queue (doorbell:%d) mapped successfully\n", userq_props->doorbell_index);
15679819d9aSAlex Deucher 	return 0;
15779819d9aSAlex Deucher }
15879819d9aSAlex Deucher 
15951a9ea45SAlex Deucher static int mes_userq_unmap(struct amdgpu_userq_mgr *uq_mgr,
16079819d9aSAlex Deucher 			   struct amdgpu_usermode_queue *queue)
16179819d9aSAlex Deucher {
16279819d9aSAlex Deucher 	struct amdgpu_device *adev = uq_mgr->adev;
16379819d9aSAlex Deucher 	struct mes_remove_queue_input queue_input;
16479819d9aSAlex Deucher 	struct amdgpu_userq_obj *ctx = &queue->fw_obj;
16579819d9aSAlex Deucher 	int r;
16679819d9aSAlex Deucher 
16779819d9aSAlex Deucher 	memset(&queue_input, 0x0, sizeof(struct mes_remove_queue_input));
16879819d9aSAlex Deucher 	queue_input.doorbell_offset = queue->doorbell_index;
16979819d9aSAlex Deucher 	queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ;
17079819d9aSAlex Deucher 
17179819d9aSAlex Deucher 	amdgpu_mes_lock(&adev->mes);
17279819d9aSAlex Deucher 	r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
17379819d9aSAlex Deucher 	amdgpu_mes_unlock(&adev->mes);
17479819d9aSAlex Deucher 	if (r)
17579819d9aSAlex Deucher 		DRM_ERROR("Failed to unmap queue in HW, err (%d)\n", r);
17651a9ea45SAlex Deucher 	return r;
17779819d9aSAlex Deucher }
17879819d9aSAlex Deucher 
17979819d9aSAlex Deucher static int mes_userq_create_ctx_space(struct amdgpu_userq_mgr *uq_mgr,
18079819d9aSAlex Deucher 				      struct amdgpu_usermode_queue *queue,
18179819d9aSAlex Deucher 				      struct drm_amdgpu_userq_in *mqd_user)
18279819d9aSAlex Deucher {
18379819d9aSAlex Deucher 	struct amdgpu_userq_obj *ctx = &queue->fw_obj;
18479819d9aSAlex Deucher 	int r, size;
18579819d9aSAlex Deucher 
18679819d9aSAlex Deucher 	/*
18779819d9aSAlex Deucher 	 * The FW expects at least one page space allocated for
18879819d9aSAlex Deucher 	 * process ctx and gang ctx each. Create an object
18979819d9aSAlex Deucher 	 * for the same.
19079819d9aSAlex Deucher 	 */
19179819d9aSAlex Deucher 	size = AMDGPU_USERQ_PROC_CTX_SZ + AMDGPU_USERQ_GANG_CTX_SZ;
192*42a66677SAlex Deucher 	r = amdgpu_userq_create_object(uq_mgr, ctx, size);
19379819d9aSAlex Deucher 	if (r) {
19479819d9aSAlex Deucher 		DRM_ERROR("Failed to allocate ctx space bo for userqueue, err:%d\n", r);
19579819d9aSAlex Deucher 		return r;
19679819d9aSAlex Deucher 	}
19779819d9aSAlex Deucher 
19879819d9aSAlex Deucher 	return 0;
19979819d9aSAlex Deucher }
20079819d9aSAlex Deucher 
20179819d9aSAlex Deucher static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
20279819d9aSAlex Deucher 				struct drm_amdgpu_userq_in *args_in,
20379819d9aSAlex Deucher 				struct amdgpu_usermode_queue *queue)
20479819d9aSAlex Deucher {
20579819d9aSAlex Deucher 	struct amdgpu_device *adev = uq_mgr->adev;
20679819d9aSAlex Deucher 	struct amdgpu_mqd *mqd_hw_default = &adev->mqds[queue->queue_type];
20779819d9aSAlex Deucher 	struct drm_amdgpu_userq_in *mqd_user = args_in;
20879819d9aSAlex Deucher 	struct amdgpu_mqd_prop *userq_props;
20979819d9aSAlex Deucher 	int r;
21079819d9aSAlex Deucher 
21179819d9aSAlex Deucher 	/* Structure to initialize MQD for userqueue using generic MQD init function */
21279819d9aSAlex Deucher 	userq_props = kzalloc(sizeof(struct amdgpu_mqd_prop), GFP_KERNEL);
21379819d9aSAlex Deucher 	if (!userq_props) {
21479819d9aSAlex Deucher 		DRM_ERROR("Failed to allocate memory for userq_props\n");
21579819d9aSAlex Deucher 		return -ENOMEM;
21679819d9aSAlex Deucher 	}
21779819d9aSAlex Deucher 
21879819d9aSAlex Deucher 	if (!mqd_user->wptr_va || !mqd_user->rptr_va ||
21979819d9aSAlex Deucher 	    !mqd_user->queue_va || mqd_user->queue_size == 0) {
22079819d9aSAlex Deucher 		DRM_ERROR("Invalid MQD parameters for userqueue\n");
22179819d9aSAlex Deucher 		r = -EINVAL;
22279819d9aSAlex Deucher 		goto free_props;
22379819d9aSAlex Deucher 	}
22479819d9aSAlex Deucher 
225*42a66677SAlex Deucher 	r = amdgpu_userq_create_object(uq_mgr, &queue->mqd, mqd_hw_default->mqd_size);
22679819d9aSAlex Deucher 	if (r) {
22779819d9aSAlex Deucher 		DRM_ERROR("Failed to create MQD object for userqueue\n");
22879819d9aSAlex Deucher 		goto free_props;
22979819d9aSAlex Deucher 	}
23079819d9aSAlex Deucher 
23179819d9aSAlex Deucher 	/* Initialize the MQD BO with user given values */
23279819d9aSAlex Deucher 	userq_props->wptr_gpu_addr = mqd_user->wptr_va;
23379819d9aSAlex Deucher 	userq_props->rptr_gpu_addr = mqd_user->rptr_va;
23479819d9aSAlex Deucher 	userq_props->queue_size = mqd_user->queue_size;
23579819d9aSAlex Deucher 	userq_props->hqd_base_gpu_addr = mqd_user->queue_va;
23679819d9aSAlex Deucher 	userq_props->mqd_gpu_addr = queue->mqd.gpu_addr;
23779819d9aSAlex Deucher 	userq_props->use_doorbell = true;
23879819d9aSAlex Deucher 	userq_props->doorbell_index = queue->doorbell_index;
239dd5a376cSArunpravin Paneer Selvam 	userq_props->fence_address = queue->fence_drv->gpu_addr;
24079819d9aSAlex Deucher 
24179819d9aSAlex Deucher 	if (queue->queue_type == AMDGPU_HW_IP_COMPUTE) {
24279819d9aSAlex Deucher 		struct drm_amdgpu_userq_mqd_compute_gfx11 *compute_mqd;
24379819d9aSAlex Deucher 
24479819d9aSAlex Deucher 		if (mqd_user->mqd_size != sizeof(*compute_mqd)) {
24579819d9aSAlex Deucher 			DRM_ERROR("Invalid compute IP MQD size\n");
24679819d9aSAlex Deucher 			r = -EINVAL;
24779819d9aSAlex Deucher 			goto free_mqd;
24879819d9aSAlex Deucher 		}
24979819d9aSAlex Deucher 
25079819d9aSAlex Deucher 		compute_mqd = memdup_user(u64_to_user_ptr(mqd_user->mqd), mqd_user->mqd_size);
25179819d9aSAlex Deucher 		if (IS_ERR(compute_mqd)) {
25279819d9aSAlex Deucher 			DRM_ERROR("Failed to read user MQD\n");
25379819d9aSAlex Deucher 			r = -ENOMEM;
25479819d9aSAlex Deucher 			goto free_mqd;
25579819d9aSAlex Deucher 		}
25679819d9aSAlex Deucher 
25779819d9aSAlex Deucher 		userq_props->eop_gpu_addr = compute_mqd->eop_va;
25879819d9aSAlex Deucher 		userq_props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
25979819d9aSAlex Deucher 		userq_props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
26079819d9aSAlex Deucher 		userq_props->hqd_active = false;
26187ceff61SAlex Deucher 		userq_props->tmz_queue =
26287ceff61SAlex Deucher 			mqd_user->flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE;
26379819d9aSAlex Deucher 		kfree(compute_mqd);
26479819d9aSAlex Deucher 	} else if (queue->queue_type == AMDGPU_HW_IP_GFX) {
26579819d9aSAlex Deucher 		struct drm_amdgpu_userq_mqd_gfx11 *mqd_gfx_v11;
26679819d9aSAlex Deucher 
26779819d9aSAlex Deucher 		if (mqd_user->mqd_size != sizeof(*mqd_gfx_v11) || !mqd_user->mqd) {
26879819d9aSAlex Deucher 			DRM_ERROR("Invalid GFX MQD\n");
26979819d9aSAlex Deucher 			r = -EINVAL;
27079819d9aSAlex Deucher 			goto free_mqd;
27179819d9aSAlex Deucher 		}
27279819d9aSAlex Deucher 
27379819d9aSAlex Deucher 		mqd_gfx_v11 = memdup_user(u64_to_user_ptr(mqd_user->mqd), mqd_user->mqd_size);
27479819d9aSAlex Deucher 		if (IS_ERR(mqd_gfx_v11)) {
27579819d9aSAlex Deucher 			DRM_ERROR("Failed to read user MQD\n");
27679819d9aSAlex Deucher 			r = -ENOMEM;
27779819d9aSAlex Deucher 			goto free_mqd;
27879819d9aSAlex Deucher 		}
27979819d9aSAlex Deucher 
28079819d9aSAlex Deucher 		userq_props->shadow_addr = mqd_gfx_v11->shadow_va;
28179819d9aSAlex Deucher 		userq_props->csa_addr = mqd_gfx_v11->csa_va;
28287ceff61SAlex Deucher 		userq_props->tmz_queue =
28387ceff61SAlex Deucher 			mqd_user->flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE;
28479819d9aSAlex Deucher 		kfree(mqd_gfx_v11);
28579819d9aSAlex Deucher 	} else if (queue->queue_type == AMDGPU_HW_IP_DMA) {
28679819d9aSAlex Deucher 		struct drm_amdgpu_userq_mqd_sdma_gfx11 *mqd_sdma_v11;
28779819d9aSAlex Deucher 
28879819d9aSAlex Deucher 		if (mqd_user->mqd_size != sizeof(*mqd_sdma_v11) || !mqd_user->mqd) {
28979819d9aSAlex Deucher 			DRM_ERROR("Invalid SDMA MQD\n");
29079819d9aSAlex Deucher 			r = -EINVAL;
29179819d9aSAlex Deucher 			goto free_mqd;
29279819d9aSAlex Deucher 		}
29379819d9aSAlex Deucher 
29479819d9aSAlex Deucher 		mqd_sdma_v11 = memdup_user(u64_to_user_ptr(mqd_user->mqd), mqd_user->mqd_size);
29579819d9aSAlex Deucher 		if (IS_ERR(mqd_sdma_v11)) {
29679819d9aSAlex Deucher 			DRM_ERROR("Failed to read sdma user MQD\n");
29779819d9aSAlex Deucher 			r = -ENOMEM;
29879819d9aSAlex Deucher 			goto free_mqd;
29979819d9aSAlex Deucher 		}
30079819d9aSAlex Deucher 
30179819d9aSAlex Deucher 		userq_props->csa_addr = mqd_sdma_v11->csa_va;
30279819d9aSAlex Deucher 		kfree(mqd_sdma_v11);
30379819d9aSAlex Deucher 	}
30479819d9aSAlex Deucher 
30579819d9aSAlex Deucher 	queue->userq_prop = userq_props;
30679819d9aSAlex Deucher 
30779819d9aSAlex Deucher 	r = mqd_hw_default->init_mqd(adev, (void *)queue->mqd.cpu_ptr, userq_props);
30879819d9aSAlex Deucher 	if (r) {
30979819d9aSAlex Deucher 		DRM_ERROR("Failed to initialize MQD for userqueue\n");
31079819d9aSAlex Deucher 		goto free_mqd;
31179819d9aSAlex Deucher 	}
31279819d9aSAlex Deucher 
31379819d9aSAlex Deucher 	/* Create BO for FW operations */
31479819d9aSAlex Deucher 	r = mes_userq_create_ctx_space(uq_mgr, queue, mqd_user);
31579819d9aSAlex Deucher 	if (r) {
31679819d9aSAlex Deucher 		DRM_ERROR("Failed to allocate BO for userqueue (%d)", r);
31779819d9aSAlex Deucher 		goto free_mqd;
31879819d9aSAlex Deucher 	}
31979819d9aSAlex Deucher 
32079819d9aSAlex Deucher 	/* FW expects WPTR BOs to be mapped into GART */
32179819d9aSAlex Deucher 	r = mes_userq_create_wptr_mapping(uq_mgr, queue, userq_props->wptr_gpu_addr);
32279819d9aSAlex Deucher 	if (r) {
32379819d9aSAlex Deucher 		DRM_ERROR("Failed to create WPTR mapping\n");
32479819d9aSAlex Deucher 		goto free_ctx;
32579819d9aSAlex Deucher 	}
32679819d9aSAlex Deucher 
32779819d9aSAlex Deucher 	return 0;
32879819d9aSAlex Deucher 
32979819d9aSAlex Deucher free_ctx:
330*42a66677SAlex Deucher 	amdgpu_userq_destroy_object(uq_mgr, &queue->fw_obj);
33179819d9aSAlex Deucher 
33279819d9aSAlex Deucher free_mqd:
333*42a66677SAlex Deucher 	amdgpu_userq_destroy_object(uq_mgr, &queue->mqd);
33479819d9aSAlex Deucher 
33579819d9aSAlex Deucher free_props:
33679819d9aSAlex Deucher 	kfree(userq_props);
33779819d9aSAlex Deucher 
33879819d9aSAlex Deucher 	return r;
33979819d9aSAlex Deucher }
34079819d9aSAlex Deucher 
34179819d9aSAlex Deucher static void
34279819d9aSAlex Deucher mes_userq_mqd_destroy(struct amdgpu_userq_mgr *uq_mgr,
34379819d9aSAlex Deucher 		      struct amdgpu_usermode_queue *queue)
34479819d9aSAlex Deucher {
345*42a66677SAlex Deucher 	amdgpu_userq_destroy_object(uq_mgr, &queue->fw_obj);
34679819d9aSAlex Deucher 	kfree(queue->userq_prop);
347*42a66677SAlex Deucher 	amdgpu_userq_destroy_object(uq_mgr, &queue->mqd);
34879819d9aSAlex Deucher }
34979819d9aSAlex Deucher 
35079819d9aSAlex Deucher const struct amdgpu_userq_funcs userq_mes_funcs = {
35179819d9aSAlex Deucher 	.mqd_create = mes_userq_mqd_create,
35279819d9aSAlex Deucher 	.mqd_destroy = mes_userq_mqd_destroy,
35351a9ea45SAlex Deucher 	.unmap = mes_userq_unmap,
35451a9ea45SAlex Deucher 	.map = mes_userq_map,
35579819d9aSAlex Deucher };
356