xref: /linux/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c (revision a1c613ae4c322ddd58d5a8539dbfba2a0380a8c0)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2014-2022 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  */
24 
25 #include "kfd_device_queue_manager.h"
26 #include "gca/gfx_8_0_enum.h"
27 #include "gca/gfx_8_0_sh_mask.h"
28 #include "oss/oss_3_0_sh_mask.h"
29 
30 static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
31 				       struct qcm_process_device *qpd,
32 				       enum cache_policy default_policy,
33 				       enum cache_policy alternate_policy,
34 				       void __user *alternate_aperture_base,
35 				       uint64_t alternate_aperture_size);
36 static int update_qpd_vi(struct device_queue_manager *dqm,
37 			 struct qcm_process_device *qpd);
38 static void init_sdma_vm(struct device_queue_manager *dqm,
39 			 struct queue *q,
40 			 struct qcm_process_device *qpd);
41 
device_queue_manager_init_vi(struct device_queue_manager_asic_ops * asic_ops)42 void device_queue_manager_init_vi(
43 	struct device_queue_manager_asic_ops *asic_ops)
44 {
45 	asic_ops->set_cache_memory_policy = set_cache_memory_policy_vi;
46 	asic_ops->update_qpd = update_qpd_vi;
47 	asic_ops->init_sdma_vm = init_sdma_vm;
48 	asic_ops->mqd_manager_init = mqd_manager_init_vi;
49 }
50 
compute_sh_mem_bases_64bit(unsigned int top_address_nybble)51 static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
52 {
53 	/* In 64-bit mode, we can only control the top 3 bits of the LDS,
54 	 * scratch and GPUVM apertures.
55 	 * The hardware fills in the remaining 59 bits according to the
56 	 * following pattern:
57 	 * LDS:		X0000000'00000000 - X0000001'00000000 (4GB)
58 	 * Scratch:	X0000001'00000000 - X0000002'00000000 (4GB)
59 	 * GPUVM:	Y0010000'00000000 - Y0020000'00000000 (1TB)
60 	 *
61 	 * (where X/Y is the configurable nybble with the low-bit 0)
62 	 *
63 	 * LDS and scratch will have the same top nybble programmed in the
64 	 * top 3 bits of SH_MEM_BASES.PRIVATE_BASE.
65 	 * GPUVM can have a different top nybble programmed in the
66 	 * top 3 bits of SH_MEM_BASES.SHARED_BASE.
67 	 * We don't bother to support different top nybbles
68 	 * for LDS/Scratch and GPUVM.
69 	 */
70 
71 	WARN_ON((top_address_nybble & 1) || top_address_nybble > 0xE ||
72 		top_address_nybble == 0);
73 
74 	return top_address_nybble << 12 |
75 			(top_address_nybble << 12) <<
76 			SH_MEM_BASES__SHARED_BASE__SHIFT;
77 }
78 
set_cache_memory_policy_vi(struct device_queue_manager * dqm,struct qcm_process_device * qpd,enum cache_policy default_policy,enum cache_policy alternate_policy,void __user * alternate_aperture_base,uint64_t alternate_aperture_size)79 static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
80 		struct qcm_process_device *qpd,
81 		enum cache_policy default_policy,
82 		enum cache_policy alternate_policy,
83 		void __user *alternate_aperture_base,
84 		uint64_t alternate_aperture_size)
85 {
86 	uint32_t default_mtype;
87 	uint32_t ape1_mtype;
88 
89 	default_mtype = (default_policy == cache_policy_coherent) ?
90 			MTYPE_UC :
91 			MTYPE_NC;
92 
93 	ape1_mtype = (alternate_policy == cache_policy_coherent) ?
94 			MTYPE_UC :
95 			MTYPE_NC;
96 
97 	qpd->sh_mem_config =
98 			SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
99 				   SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT |
100 			default_mtype << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
101 			ape1_mtype << SH_MEM_CONFIG__APE1_MTYPE__SHIFT;
102 
103 	return true;
104 }
105 
update_qpd_vi(struct device_queue_manager * dqm,struct qcm_process_device * qpd)106 static int update_qpd_vi(struct device_queue_manager *dqm,
107 			 struct qcm_process_device *qpd)
108 {
109 	struct kfd_process_device *pdd;
110 	unsigned int temp;
111 
112 	pdd = qpd_to_pdd(qpd);
113 
114 	/* check if sh_mem_config register already configured */
115 	if (qpd->sh_mem_config == 0) {
116 		qpd->sh_mem_config =
117 				SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
118 					SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT |
119 				MTYPE_UC <<
120 					SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
121 				MTYPE_UC <<
122 					SH_MEM_CONFIG__APE1_MTYPE__SHIFT;
123 
124 		qpd->sh_mem_ape1_limit = 0;
125 		qpd->sh_mem_ape1_base = 0;
126 	}
127 
128 	/* On dGPU we're always in GPUVM64 addressing mode with 64-bit
129 	 * aperture addresses.
130 	 */
131 	temp = get_sh_mem_bases_nybble_64(pdd);
132 	qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
133 
134 	pr_debug("sh_mem_bases nybble: 0x%X and register 0x%X\n",
135 		temp, qpd->sh_mem_bases);
136 
137 	return 0;
138 }
139 
init_sdma_vm(struct device_queue_manager * dqm,struct queue * q,struct qcm_process_device * qpd)140 static void init_sdma_vm(struct device_queue_manager *dqm,
141 			 struct queue *q,
142 			 struct qcm_process_device *qpd)
143 {
144 	/* On dGPU we're always in GPUVM64 addressing mode with 64-bit
145 	 * aperture addresses.
146 	 */
147 	q->properties.sdma_vm_addr =
148 		((get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd))) <<
149 		 SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE__SHIFT) &
150 		SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE_MASK;
151 }
152