1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright (C) 2024 Advanced Micro Devices, Inc. All rights reserved. 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 18 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 19 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 20 * USE OR OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * The above copyright notice and this permission notice (including the 23 * next paragraph) shall be included in all copies or substantial portions 24 * of the Software. 25 * 26 */ 27 28 #include <linux/firmware.h> 29 #include <linux/mfd/core.h> 30 31 #include "amdgpu.h" 32 #include "amdgpu_isp.h" 33 #include "isp_v4_1_0.h" 34 #include "isp_v4_1_1.h" 35 36 #define ISP_MC_ADDR_ALIGN (1024 * 32) 37 38 /** 39 * isp_hw_init - start and test isp block 40 * 41 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 42 * 43 */ 44 static int isp_hw_init(struct amdgpu_ip_block *ip_block) 45 { 46 struct amdgpu_device *adev = ip_block->adev; 47 struct amdgpu_isp *isp = &adev->isp; 48 49 if (isp->funcs->hw_init != NULL) 50 return isp->funcs->hw_init(isp); 51 52 return -ENODEV; 53 } 54 55 /** 56 * isp_hw_fini - stop the hardware block 57 * 58 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 59 * 60 */ 61 static int isp_hw_fini(struct amdgpu_ip_block *ip_block) 62 { 63 struct amdgpu_isp *isp = &ip_block->adev->isp; 64 65 if (isp->funcs->hw_fini != NULL) 66 return isp->funcs->hw_fini(isp); 67 68 return -ENODEV; 69 } 70 71 static int isp_load_fw_by_psp(struct amdgpu_device *adev) 72 { 73 const struct common_firmware_header *hdr; 74 char ucode_prefix[10]; 75 int r = 0; 76 77 /* get isp fw binary name and path */ 78 amdgpu_ucode_ip_version_decode(adev, ISP_HWIP, ucode_prefix, 79 sizeof(ucode_prefix)); 80 81 /* read isp fw */ 82 r = amdgpu_ucode_request(adev, &adev->isp.fw, AMDGPU_UCODE_OPTIONAL, 83 "amdgpu/%s.bin", ucode_prefix); 84 if (r) { 85 amdgpu_ucode_release(&adev->isp.fw); 86 return r; 87 } 88 89 hdr = (const struct common_firmware_header *)adev->isp.fw->data; 90 91 adev->firmware.ucode[AMDGPU_UCODE_ID_ISP].ucode_id = 92 AMDGPU_UCODE_ID_ISP; 93 adev->firmware.ucode[AMDGPU_UCODE_ID_ISP].fw = adev->isp.fw; 94 95 adev->firmware.fw_size += 96 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); 97 98 return r; 99 } 100 101 static int isp_early_init(struct amdgpu_ip_block *ip_block) 102 { 103 104 struct amdgpu_device *adev = ip_block->adev; 105 struct amdgpu_isp *isp = &adev->isp; 106 107 switch (amdgpu_ip_version(adev, ISP_HWIP, 0)) { 108 case IP_VERSION(4, 1, 0): 109 isp_v4_1_0_set_isp_funcs(isp); 110 break; 111 case IP_VERSION(4, 1, 1): 112 isp_v4_1_1_set_isp_funcs(isp); 113 break; 114 default: 115 return -EINVAL; 116 } 117 118 isp->adev = adev; 119 isp->parent = adev->dev; 120 121 if (isp_load_fw_by_psp(adev)) { 122 DRM_DEBUG_DRIVER("%s: isp fw load failed\n", __func__); 123 return -ENOENT; 124 } 125 126 return 0; 127 } 128 129 static bool isp_is_idle(struct amdgpu_ip_block *ip_block) 130 { 131 return true; 132 } 133 134 static int isp_set_clockgating_state(struct amdgpu_ip_block *ip_block, 135 enum amd_clockgating_state state) 136 { 137 return 0; 138 } 139 140 static int isp_set_powergating_state(struct amdgpu_ip_block *ip_block, 141 enum amd_powergating_state state) 142 { 143 return 0; 144 } 145 146 static int is_valid_isp_device(struct device *isp_parent, struct device *amdgpu_dev) 147 { 148 if (isp_parent != amdgpu_dev) 149 return -EINVAL; 150 151 return 0; 152 } 153 154 /** 155 * isp_user_buffer_alloc - create user buffer object (BO) for isp 156 * 157 * @dev: isp device handle 158 * @dmabuf: DMABUF handle for isp buffer allocated in system memory 159 * @buf_obj: GPU buffer object handle to initialize 160 * @buf_addr: GPU addr of the pinned BO to initialize 161 * 162 * Imports isp DMABUF to allocate and pin a user BO for isp internal use. It does 163 * GART alloc to generate GPU addr for BO to make it accessible through the 164 * GART aperture for ISP HW. 165 * 166 * This function is exported to allow the V4L2 isp device external to drm device 167 * to create and access the isp user BO. 168 * 169 * Returns: 170 * 0 on success, negative error code otherwise. 171 */ 172 int isp_user_buffer_alloc(struct device *dev, void *dmabuf, 173 void **buf_obj, u64 *buf_addr) 174 { 175 struct platform_device *ispdev = to_platform_device(dev); 176 const struct isp_platform_data *isp_pdata; 177 struct amdgpu_device *adev; 178 struct mfd_cell *mfd_cell; 179 struct amdgpu_bo *bo; 180 u64 gpu_addr; 181 int ret; 182 183 if (WARN_ON(!ispdev)) 184 return -ENODEV; 185 186 if (WARN_ON(!buf_obj)) 187 return -EINVAL; 188 189 if (WARN_ON(!buf_addr)) 190 return -EINVAL; 191 192 mfd_cell = &ispdev->mfd_cell[0]; 193 if (!mfd_cell) 194 return -ENODEV; 195 196 isp_pdata = mfd_cell->platform_data; 197 adev = isp_pdata->adev; 198 199 ret = is_valid_isp_device(ispdev->dev.parent, adev->dev); 200 if (ret) 201 return ret; 202 203 ret = amdgpu_bo_create_isp_user(adev, dmabuf, 204 AMDGPU_GEM_DOMAIN_GTT, &bo, &gpu_addr); 205 if (ret) { 206 drm_err(&adev->ddev, "failed to alloc gart user buffer (%d)", ret); 207 return ret; 208 } 209 210 *buf_obj = (void *)bo; 211 *buf_addr = gpu_addr; 212 213 return 0; 214 } 215 EXPORT_SYMBOL(isp_user_buffer_alloc); 216 217 /** 218 * isp_user_buffer_free - free isp user buffer object (BO) 219 * 220 * @buf_obj: amdgpu isp user BO to free 221 * 222 * unpin and unref BO for isp internal use. 223 * 224 * This function is exported to allow the V4L2 isp device 225 * external to drm device to free the isp user BO. 226 */ 227 void isp_user_buffer_free(void *buf_obj) 228 { 229 amdgpu_bo_free_isp_user(buf_obj); 230 } 231 EXPORT_SYMBOL(isp_user_buffer_free); 232 233 /** 234 * isp_kernel_buffer_alloc - create kernel buffer object (BO) for isp 235 * 236 * @dev: isp device handle 237 * @size: size for the new BO 238 * @buf_obj: GPU BO handle to initialize 239 * @gpu_addr: GPU addr of the pinned BO 240 * @cpu_addr: CPU address mapping of BO 241 * 242 * Allocates and pins a kernel BO for internal isp firmware use. 243 * 244 * This function is exported to allow the V4L2 isp device 245 * external to drm device to create and access the kernel BO. 246 * 247 * Returns: 248 * 0 on success, negative error code otherwise. 249 */ 250 int isp_kernel_buffer_alloc(struct device *dev, u64 size, 251 void **buf_obj, u64 *gpu_addr, void **cpu_addr) 252 { 253 struct platform_device *ispdev = to_platform_device(dev); 254 struct amdgpu_bo **bo = (struct amdgpu_bo **)buf_obj; 255 const struct isp_platform_data *isp_pdata; 256 struct amdgpu_device *adev; 257 struct mfd_cell *mfd_cell; 258 int ret; 259 260 if (WARN_ON(!ispdev)) 261 return -ENODEV; 262 263 if (WARN_ON(!buf_obj)) 264 return -EINVAL; 265 266 if (WARN_ON(!gpu_addr)) 267 return -EINVAL; 268 269 if (WARN_ON(!cpu_addr)) 270 return -EINVAL; 271 272 mfd_cell = &ispdev->mfd_cell[0]; 273 if (!mfd_cell) 274 return -ENODEV; 275 276 isp_pdata = mfd_cell->platform_data; 277 adev = isp_pdata->adev; 278 279 ret = is_valid_isp_device(ispdev->dev.parent, adev->dev); 280 if (ret) 281 return ret; 282 283 ret = amdgpu_bo_create_kernel(adev, 284 size, 285 ISP_MC_ADDR_ALIGN, 286 AMDGPU_GEM_DOMAIN_GTT, 287 bo, 288 gpu_addr, 289 cpu_addr); 290 if (!cpu_addr || ret) { 291 drm_err(&adev->ddev, "failed to alloc gart kernel buffer (%d)", ret); 292 return ret; 293 } 294 295 return 0; 296 } 297 EXPORT_SYMBOL(isp_kernel_buffer_alloc); 298 299 /** 300 * isp_kernel_buffer_free - free isp kernel buffer object (BO) 301 * 302 * @buf_obj: amdgpu isp user BO to free 303 * @gpu_addr: GPU addr of isp kernel BO 304 * @cpu_addr: CPU addr of isp kernel BO 305 * 306 * unmaps and unpin a isp kernel BO. 307 * 308 * This function is exported to allow the V4L2 isp device 309 * external to drm device to free the kernel BO. 310 */ 311 void isp_kernel_buffer_free(void **buf_obj, u64 *gpu_addr, void **cpu_addr) 312 { 313 struct amdgpu_bo **bo = (struct amdgpu_bo **)buf_obj; 314 315 amdgpu_bo_free_kernel(bo, gpu_addr, cpu_addr); 316 } 317 EXPORT_SYMBOL(isp_kernel_buffer_free); 318 319 static const struct amd_ip_funcs isp_ip_funcs = { 320 .name = "isp_ip", 321 .early_init = isp_early_init, 322 .hw_init = isp_hw_init, 323 .hw_fini = isp_hw_fini, 324 .is_idle = isp_is_idle, 325 .set_clockgating_state = isp_set_clockgating_state, 326 .set_powergating_state = isp_set_powergating_state, 327 }; 328 329 const struct amdgpu_ip_block_version isp_v4_1_0_ip_block = { 330 .type = AMD_IP_BLOCK_TYPE_ISP, 331 .major = 4, 332 .minor = 1, 333 .rev = 0, 334 .funcs = &isp_ip_funcs, 335 }; 336 337 const struct amdgpu_ip_block_version isp_v4_1_1_ip_block = { 338 .type = AMD_IP_BLOCK_TYPE_ISP, 339 .major = 4, 340 .minor = 1, 341 .rev = 1, 342 .funcs = &isp_ip_funcs, 343 }; 344