1 /* 2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 * SOFTWARE. 22 * 23 * Authors: 24 * Kevin Tian <kevin.tian@intel.com> 25 * Dexuan Cui 26 * 27 * Contributors: 28 * Pei Zhang <pei.zhang@intel.com> 29 * Min He <min.he@intel.com> 30 * Niu Bing <bing.niu@intel.com> 31 * Yulei Zhang <yulei.zhang@intel.com> 32 * Zhenyu Wang <zhenyuw@linux.intel.com> 33 * Zhi Wang <zhi.a.wang@intel.com> 34 * 35 */ 36 37 #include "i915_drv.h" 38 #include "gvt.h" 39 40 static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) 41 { 42 struct intel_gvt *gvt = vgpu->gvt; 43 struct drm_i915_private *dev_priv = gvt->dev_priv; 44 u32 alloc_flag, search_flag; 45 u64 start, end, size; 46 struct drm_mm_node *node; 47 int retried = 0; 48 int ret; 49 50 if (high_gm) { 51 search_flag = DRM_MM_SEARCH_BELOW; 52 alloc_flag = DRM_MM_CREATE_TOP; 53 node = &vgpu->gm.high_gm_node; 54 size = vgpu_hidden_sz(vgpu); 55 start = gvt_hidden_gmadr_base(gvt); 56 end = gvt_hidden_gmadr_end(gvt); 57 } else { 58 search_flag = DRM_MM_SEARCH_DEFAULT; 59 alloc_flag = DRM_MM_CREATE_DEFAULT; 60 node = &vgpu->gm.low_gm_node; 61 size = vgpu_aperture_sz(vgpu); 62 start = gvt_aperture_gmadr_base(gvt); 63 end = gvt_aperture_gmadr_end(gvt); 64 } 65 66 mutex_lock(&dev_priv->drm.struct_mutex); 67 search_again: 68 ret = drm_mm_insert_node_in_range_generic(&dev_priv->ggtt.base.mm, 69 node, size, 4096, 0, 70 start, end, search_flag, 71 alloc_flag); 72 if (ret) { 73 ret = i915_gem_evict_something(&dev_priv->ggtt.base, 74 size, 4096, 0, start, end, 0); 75 if (ret == 0 && ++retried < 3) 76 goto search_again; 77 78 gvt_err("fail to alloc %s gm space from host, retried %d\n", 79 high_gm ? "high" : "low", retried); 80 } 81 mutex_unlock(&dev_priv->drm.struct_mutex); 82 return ret; 83 } 84 85 static int alloc_vgpu_gm(struct intel_vgpu *vgpu) 86 { 87 struct intel_gvt *gvt = vgpu->gvt; 88 struct drm_i915_private *dev_priv = gvt->dev_priv; 89 int ret; 90 91 ret = alloc_gm(vgpu, false); 92 if (ret) 93 return ret; 94 95 ret = alloc_gm(vgpu, true); 96 if (ret) 97 goto out_free_aperture; 98 99 gvt_dbg_core("vgpu%d: alloc low GM start %llx size %llx\n", vgpu->id, 100 vgpu_aperture_offset(vgpu), vgpu_aperture_sz(vgpu)); 101 102 gvt_dbg_core("vgpu%d: alloc high GM start %llx size %llx\n", vgpu->id, 103 vgpu_hidden_offset(vgpu), vgpu_hidden_sz(vgpu)); 104 105 return 0; 106 out_free_aperture: 107 mutex_lock(&dev_priv->drm.struct_mutex); 108 drm_mm_remove_node(&vgpu->gm.low_gm_node); 109 mutex_unlock(&dev_priv->drm.struct_mutex); 110 return ret; 111 } 112 113 static void free_vgpu_gm(struct intel_vgpu *vgpu) 114 { 115 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 116 117 mutex_lock(&dev_priv->drm.struct_mutex); 118 drm_mm_remove_node(&vgpu->gm.low_gm_node); 119 drm_mm_remove_node(&vgpu->gm.high_gm_node); 120 mutex_unlock(&dev_priv->drm.struct_mutex); 121 } 122 123 /** 124 * intel_vgpu_write_fence - write fence registers owned by a vGPU 125 * @vgpu: vGPU instance 126 * @fence: vGPU fence register number 127 * @value: Fence register value to be written 128 * 129 * This function is used to write fence registers owned by a vGPU. The vGPU 130 * fence register number will be translated into HW fence register number. 131 * 132 */ 133 void intel_vgpu_write_fence(struct intel_vgpu *vgpu, 134 u32 fence, u64 value) 135 { 136 struct intel_gvt *gvt = vgpu->gvt; 137 struct drm_i915_private *dev_priv = gvt->dev_priv; 138 struct drm_i915_fence_reg *reg; 139 i915_reg_t fence_reg_lo, fence_reg_hi; 140 141 assert_rpm_wakelock_held(dev_priv); 142 143 if (WARN_ON(fence > vgpu_fence_sz(vgpu))) 144 return; 145 146 reg = vgpu->fence.regs[fence]; 147 if (WARN_ON(!reg)) 148 return; 149 150 fence_reg_lo = FENCE_REG_GEN6_LO(reg->id); 151 fence_reg_hi = FENCE_REG_GEN6_HI(reg->id); 152 153 I915_WRITE(fence_reg_lo, 0); 154 POSTING_READ(fence_reg_lo); 155 156 I915_WRITE(fence_reg_hi, upper_32_bits(value)); 157 I915_WRITE(fence_reg_lo, lower_32_bits(value)); 158 POSTING_READ(fence_reg_lo); 159 } 160 161 static void _clear_vgpu_fence(struct intel_vgpu *vgpu) 162 { 163 int i; 164 165 for (i = 0; i < vgpu_fence_sz(vgpu); i++) 166 intel_vgpu_write_fence(vgpu, i, 0); 167 } 168 169 static void free_vgpu_fence(struct intel_vgpu *vgpu) 170 { 171 struct intel_gvt *gvt = vgpu->gvt; 172 struct drm_i915_private *dev_priv = gvt->dev_priv; 173 struct drm_i915_fence_reg *reg; 174 u32 i; 175 176 if (WARN_ON(!vgpu_fence_sz(vgpu))) 177 return; 178 179 intel_runtime_pm_get(dev_priv); 180 181 mutex_lock(&dev_priv->drm.struct_mutex); 182 _clear_vgpu_fence(vgpu); 183 for (i = 0; i < vgpu_fence_sz(vgpu); i++) { 184 reg = vgpu->fence.regs[i]; 185 list_add_tail(®->link, 186 &dev_priv->mm.fence_list); 187 } 188 mutex_unlock(&dev_priv->drm.struct_mutex); 189 190 intel_runtime_pm_put(dev_priv); 191 } 192 193 static int alloc_vgpu_fence(struct intel_vgpu *vgpu) 194 { 195 struct intel_gvt *gvt = vgpu->gvt; 196 struct drm_i915_private *dev_priv = gvt->dev_priv; 197 struct drm_i915_fence_reg *reg; 198 int i; 199 struct list_head *pos, *q; 200 201 intel_runtime_pm_get(dev_priv); 202 203 /* Request fences from host */ 204 mutex_lock(&dev_priv->drm.struct_mutex); 205 i = 0; 206 list_for_each_safe(pos, q, &dev_priv->mm.fence_list) { 207 reg = list_entry(pos, struct drm_i915_fence_reg, link); 208 if (reg->pin_count || reg->vma) 209 continue; 210 list_del(pos); 211 vgpu->fence.regs[i] = reg; 212 if (++i == vgpu_fence_sz(vgpu)) 213 break; 214 } 215 if (i != vgpu_fence_sz(vgpu)) 216 goto out_free_fence; 217 218 _clear_vgpu_fence(vgpu); 219 220 mutex_unlock(&dev_priv->drm.struct_mutex); 221 intel_runtime_pm_put(dev_priv); 222 return 0; 223 out_free_fence: 224 /* Return fences to host, if fail */ 225 for (i = 0; i < vgpu_fence_sz(vgpu); i++) { 226 reg = vgpu->fence.regs[i]; 227 if (!reg) 228 continue; 229 list_add_tail(®->link, 230 &dev_priv->mm.fence_list); 231 } 232 mutex_unlock(&dev_priv->drm.struct_mutex); 233 intel_runtime_pm_put(dev_priv); 234 return -ENOSPC; 235 } 236 237 static void free_resource(struct intel_vgpu *vgpu) 238 { 239 struct intel_gvt *gvt = vgpu->gvt; 240 241 gvt->gm.vgpu_allocated_low_gm_size -= vgpu_aperture_sz(vgpu); 242 gvt->gm.vgpu_allocated_high_gm_size -= vgpu_hidden_sz(vgpu); 243 gvt->fence.vgpu_allocated_fence_num -= vgpu_fence_sz(vgpu); 244 } 245 246 static int alloc_resource(struct intel_vgpu *vgpu, 247 struct intel_vgpu_creation_params *param) 248 { 249 struct intel_gvt *gvt = vgpu->gvt; 250 unsigned long request, avail, max, taken; 251 const char *item; 252 253 if (!param->low_gm_sz || !param->high_gm_sz || !param->fence_sz) { 254 gvt_err("Invalid vGPU creation params\n"); 255 return -EINVAL; 256 } 257 258 item = "low GM space"; 259 max = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE; 260 taken = gvt->gm.vgpu_allocated_low_gm_size; 261 avail = max - taken; 262 request = MB_TO_BYTES(param->low_gm_sz); 263 264 if (request > avail) 265 goto no_enough_resource; 266 267 vgpu_aperture_sz(vgpu) = request; 268 269 item = "high GM space"; 270 max = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE; 271 taken = gvt->gm.vgpu_allocated_high_gm_size; 272 avail = max - taken; 273 request = MB_TO_BYTES(param->high_gm_sz); 274 275 if (request > avail) 276 goto no_enough_resource; 277 278 vgpu_hidden_sz(vgpu) = request; 279 280 item = "fence"; 281 max = gvt_fence_sz(gvt) - HOST_FENCE; 282 taken = gvt->fence.vgpu_allocated_fence_num; 283 avail = max - taken; 284 request = param->fence_sz; 285 286 if (request > avail) 287 goto no_enough_resource; 288 289 vgpu_fence_sz(vgpu) = request; 290 291 gvt->gm.vgpu_allocated_low_gm_size += MB_TO_BYTES(param->low_gm_sz); 292 gvt->gm.vgpu_allocated_high_gm_size += MB_TO_BYTES(param->high_gm_sz); 293 gvt->fence.vgpu_allocated_fence_num += param->fence_sz; 294 return 0; 295 296 no_enough_resource: 297 gvt_err("vgpu%d: fail to allocate resource %s\n", vgpu->id, item); 298 gvt_err("vgpu%d: request %luMB avail %luMB max %luMB taken %luMB\n", 299 vgpu->id, BYTES_TO_MB(request), BYTES_TO_MB(avail), 300 BYTES_TO_MB(max), BYTES_TO_MB(taken)); 301 return -ENOSPC; 302 } 303 304 /** 305 * inte_gvt_free_vgpu_resource - free HW resource owned by a vGPU 306 * @vgpu: a vGPU 307 * 308 * This function is used to free the HW resource owned by a vGPU. 309 * 310 */ 311 void intel_vgpu_free_resource(struct intel_vgpu *vgpu) 312 { 313 free_vgpu_gm(vgpu); 314 free_vgpu_fence(vgpu); 315 free_resource(vgpu); 316 } 317 318 /** 319 * intel_vgpu_reset_resource - reset resource state owned by a vGPU 320 * @vgpu: a vGPU 321 * 322 * This function is used to reset resource state owned by a vGPU. 323 * 324 */ 325 void intel_vgpu_reset_resource(struct intel_vgpu *vgpu) 326 { 327 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 328 329 intel_runtime_pm_get(dev_priv); 330 _clear_vgpu_fence(vgpu); 331 intel_runtime_pm_put(dev_priv); 332 } 333 334 /** 335 * intel_alloc_vgpu_resource - allocate HW resource for a vGPU 336 * @vgpu: vGPU 337 * @param: vGPU creation params 338 * 339 * This function is used to allocate HW resource for a vGPU. User specifies 340 * the resource configuration through the creation params. 341 * 342 * Returns: 343 * zero on success, negative error code if failed. 344 * 345 */ 346 int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu, 347 struct intel_vgpu_creation_params *param) 348 { 349 int ret; 350 351 ret = alloc_resource(vgpu, param); 352 if (ret) 353 return ret; 354 355 ret = alloc_vgpu_gm(vgpu); 356 if (ret) 357 goto out_free_resource; 358 359 ret = alloc_vgpu_fence(vgpu); 360 if (ret) 361 goto out_free_vgpu_gm; 362 363 return 0; 364 365 out_free_vgpu_gm: 366 free_vgpu_gm(vgpu); 367 out_free_resource: 368 free_resource(vgpu); 369 return ret; 370 } 371