1 /* 2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 * SOFTWARE. 22 * 23 * Authors: 24 * Kevin Tian <kevin.tian@intel.com> 25 * Dexuan Cui 26 * 27 * Contributors: 28 * Pei Zhang <pei.zhang@intel.com> 29 * Min He <min.he@intel.com> 30 * Niu Bing <bing.niu@intel.com> 31 * Yulei Zhang <yulei.zhang@intel.com> 32 * Zhenyu Wang <zhenyuw@linux.intel.com> 33 * Zhi Wang <zhi.a.wang@intel.com> 34 * 35 */ 36 37 #include "i915_drv.h" 38 #include "gvt.h" 39 40 static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) 41 { 42 struct intel_gvt *gvt = vgpu->gvt; 43 struct drm_i915_private *dev_priv = gvt->dev_priv; 44 unsigned int flags; 45 u64 start, end, size; 46 struct drm_mm_node *node; 47 int ret; 48 49 if (high_gm) { 50 node = &vgpu->gm.high_gm_node; 51 size = vgpu_hidden_sz(vgpu); 52 start = ALIGN(gvt_hidden_gmadr_base(gvt), I915_GTT_PAGE_SIZE); 53 end = ALIGN(gvt_hidden_gmadr_end(gvt), I915_GTT_PAGE_SIZE); 54 flags = PIN_HIGH; 55 } else { 56 node = &vgpu->gm.low_gm_node; 57 size = vgpu_aperture_sz(vgpu); 58 start = ALIGN(gvt_aperture_gmadr_base(gvt), I915_GTT_PAGE_SIZE); 59 end = ALIGN(gvt_aperture_gmadr_end(gvt), I915_GTT_PAGE_SIZE); 60 flags = PIN_MAPPABLE; 61 } 62 63 mutex_lock(&dev_priv->drm.struct_mutex); 64 ret = i915_gem_gtt_insert(&dev_priv->ggtt.base, node, 65 size, I915_GTT_PAGE_SIZE, 66 I915_COLOR_UNEVICTABLE, 67 start, end, flags); 68 mutex_unlock(&dev_priv->drm.struct_mutex); 69 if (ret) 70 gvt_err("fail to alloc %s gm space from host\n", 71 high_gm ? "high" : "low"); 72 73 return ret; 74 } 75 76 static int alloc_vgpu_gm(struct intel_vgpu *vgpu) 77 { 78 struct intel_gvt *gvt = vgpu->gvt; 79 struct drm_i915_private *dev_priv = gvt->dev_priv; 80 int ret; 81 82 ret = alloc_gm(vgpu, false); 83 if (ret) 84 return ret; 85 86 ret = alloc_gm(vgpu, true); 87 if (ret) 88 goto out_free_aperture; 89 90 gvt_dbg_core("vgpu%d: alloc low GM start %llx size %llx\n", vgpu->id, 91 vgpu_aperture_offset(vgpu), vgpu_aperture_sz(vgpu)); 92 93 gvt_dbg_core("vgpu%d: alloc high GM start %llx size %llx\n", vgpu->id, 94 vgpu_hidden_offset(vgpu), vgpu_hidden_sz(vgpu)); 95 96 return 0; 97 out_free_aperture: 98 mutex_lock(&dev_priv->drm.struct_mutex); 99 drm_mm_remove_node(&vgpu->gm.low_gm_node); 100 mutex_unlock(&dev_priv->drm.struct_mutex); 101 return ret; 102 } 103 104 static void free_vgpu_gm(struct intel_vgpu *vgpu) 105 { 106 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 107 108 mutex_lock(&dev_priv->drm.struct_mutex); 109 drm_mm_remove_node(&vgpu->gm.low_gm_node); 110 drm_mm_remove_node(&vgpu->gm.high_gm_node); 111 mutex_unlock(&dev_priv->drm.struct_mutex); 112 } 113 114 /** 115 * intel_vgpu_write_fence - write fence registers owned by a vGPU 116 * @vgpu: vGPU instance 117 * @fence: vGPU fence register number 118 * @value: Fence register value to be written 119 * 120 * This function is used to write fence registers owned by a vGPU. The vGPU 121 * fence register number will be translated into HW fence register number. 122 * 123 */ 124 void intel_vgpu_write_fence(struct intel_vgpu *vgpu, 125 u32 fence, u64 value) 126 { 127 struct intel_gvt *gvt = vgpu->gvt; 128 struct drm_i915_private *dev_priv = gvt->dev_priv; 129 struct drm_i915_fence_reg *reg; 130 i915_reg_t fence_reg_lo, fence_reg_hi; 131 132 assert_rpm_wakelock_held(dev_priv); 133 134 if (WARN_ON(fence > vgpu_fence_sz(vgpu))) 135 return; 136 137 reg = vgpu->fence.regs[fence]; 138 if (WARN_ON(!reg)) 139 return; 140 141 fence_reg_lo = FENCE_REG_GEN6_LO(reg->id); 142 fence_reg_hi = FENCE_REG_GEN6_HI(reg->id); 143 144 I915_WRITE(fence_reg_lo, 0); 145 POSTING_READ(fence_reg_lo); 146 147 I915_WRITE(fence_reg_hi, upper_32_bits(value)); 148 I915_WRITE(fence_reg_lo, lower_32_bits(value)); 149 POSTING_READ(fence_reg_lo); 150 } 151 152 static void _clear_vgpu_fence(struct intel_vgpu *vgpu) 153 { 154 int i; 155 156 for (i = 0; i < vgpu_fence_sz(vgpu); i++) 157 intel_vgpu_write_fence(vgpu, i, 0); 158 } 159 160 static void free_vgpu_fence(struct intel_vgpu *vgpu) 161 { 162 struct intel_gvt *gvt = vgpu->gvt; 163 struct drm_i915_private *dev_priv = gvt->dev_priv; 164 struct drm_i915_fence_reg *reg; 165 u32 i; 166 167 if (WARN_ON(!vgpu_fence_sz(vgpu))) 168 return; 169 170 intel_runtime_pm_get(dev_priv); 171 172 mutex_lock(&dev_priv->drm.struct_mutex); 173 _clear_vgpu_fence(vgpu); 174 for (i = 0; i < vgpu_fence_sz(vgpu); i++) { 175 reg = vgpu->fence.regs[i]; 176 list_add_tail(®->link, 177 &dev_priv->mm.fence_list); 178 } 179 mutex_unlock(&dev_priv->drm.struct_mutex); 180 181 intel_runtime_pm_put(dev_priv); 182 } 183 184 static int alloc_vgpu_fence(struct intel_vgpu *vgpu) 185 { 186 struct intel_gvt *gvt = vgpu->gvt; 187 struct drm_i915_private *dev_priv = gvt->dev_priv; 188 struct drm_i915_fence_reg *reg; 189 int i; 190 struct list_head *pos, *q; 191 192 intel_runtime_pm_get(dev_priv); 193 194 /* Request fences from host */ 195 mutex_lock(&dev_priv->drm.struct_mutex); 196 i = 0; 197 list_for_each_safe(pos, q, &dev_priv->mm.fence_list) { 198 reg = list_entry(pos, struct drm_i915_fence_reg, link); 199 if (reg->pin_count || reg->vma) 200 continue; 201 list_del(pos); 202 vgpu->fence.regs[i] = reg; 203 if (++i == vgpu_fence_sz(vgpu)) 204 break; 205 } 206 if (i != vgpu_fence_sz(vgpu)) 207 goto out_free_fence; 208 209 _clear_vgpu_fence(vgpu); 210 211 mutex_unlock(&dev_priv->drm.struct_mutex); 212 intel_runtime_pm_put(dev_priv); 213 return 0; 214 out_free_fence: 215 /* Return fences to host, if fail */ 216 for (i = 0; i < vgpu_fence_sz(vgpu); i++) { 217 reg = vgpu->fence.regs[i]; 218 if (!reg) 219 continue; 220 list_add_tail(®->link, 221 &dev_priv->mm.fence_list); 222 } 223 mutex_unlock(&dev_priv->drm.struct_mutex); 224 intel_runtime_pm_put(dev_priv); 225 return -ENOSPC; 226 } 227 228 static void free_resource(struct intel_vgpu *vgpu) 229 { 230 struct intel_gvt *gvt = vgpu->gvt; 231 232 gvt->gm.vgpu_allocated_low_gm_size -= vgpu_aperture_sz(vgpu); 233 gvt->gm.vgpu_allocated_high_gm_size -= vgpu_hidden_sz(vgpu); 234 gvt->fence.vgpu_allocated_fence_num -= vgpu_fence_sz(vgpu); 235 } 236 237 static int alloc_resource(struct intel_vgpu *vgpu, 238 struct intel_vgpu_creation_params *param) 239 { 240 struct intel_gvt *gvt = vgpu->gvt; 241 unsigned long request, avail, max, taken; 242 const char *item; 243 244 if (!param->low_gm_sz || !param->high_gm_sz || !param->fence_sz) { 245 gvt_vgpu_err("Invalid vGPU creation params\n"); 246 return -EINVAL; 247 } 248 249 item = "low GM space"; 250 max = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE; 251 taken = gvt->gm.vgpu_allocated_low_gm_size; 252 avail = max - taken; 253 request = MB_TO_BYTES(param->low_gm_sz); 254 255 if (request > avail) 256 goto no_enough_resource; 257 258 vgpu_aperture_sz(vgpu) = ALIGN(request, I915_GTT_PAGE_SIZE); 259 260 item = "high GM space"; 261 max = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE; 262 taken = gvt->gm.vgpu_allocated_high_gm_size; 263 avail = max - taken; 264 request = MB_TO_BYTES(param->high_gm_sz); 265 266 if (request > avail) 267 goto no_enough_resource; 268 269 vgpu_hidden_sz(vgpu) = ALIGN(request, I915_GTT_PAGE_SIZE); 270 271 item = "fence"; 272 max = gvt_fence_sz(gvt) - HOST_FENCE; 273 taken = gvt->fence.vgpu_allocated_fence_num; 274 avail = max - taken; 275 request = param->fence_sz; 276 277 if (request > avail) 278 goto no_enough_resource; 279 280 vgpu_fence_sz(vgpu) = request; 281 282 gvt->gm.vgpu_allocated_low_gm_size += MB_TO_BYTES(param->low_gm_sz); 283 gvt->gm.vgpu_allocated_high_gm_size += MB_TO_BYTES(param->high_gm_sz); 284 gvt->fence.vgpu_allocated_fence_num += param->fence_sz; 285 return 0; 286 287 no_enough_resource: 288 gvt_vgpu_err("fail to allocate resource %s\n", item); 289 gvt_vgpu_err("request %luMB avail %luMB max %luMB taken %luMB\n", 290 BYTES_TO_MB(request), BYTES_TO_MB(avail), 291 BYTES_TO_MB(max), BYTES_TO_MB(taken)); 292 return -ENOSPC; 293 } 294 295 /** 296 * inte_gvt_free_vgpu_resource - free HW resource owned by a vGPU 297 * @vgpu: a vGPU 298 * 299 * This function is used to free the HW resource owned by a vGPU. 300 * 301 */ 302 void intel_vgpu_free_resource(struct intel_vgpu *vgpu) 303 { 304 free_vgpu_gm(vgpu); 305 free_vgpu_fence(vgpu); 306 free_resource(vgpu); 307 } 308 309 /** 310 * intel_vgpu_reset_resource - reset resource state owned by a vGPU 311 * @vgpu: a vGPU 312 * 313 * This function is used to reset resource state owned by a vGPU. 314 * 315 */ 316 void intel_vgpu_reset_resource(struct intel_vgpu *vgpu) 317 { 318 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 319 320 intel_runtime_pm_get(dev_priv); 321 _clear_vgpu_fence(vgpu); 322 intel_runtime_pm_put(dev_priv); 323 } 324 325 /** 326 * intel_alloc_vgpu_resource - allocate HW resource for a vGPU 327 * @vgpu: vGPU 328 * @param: vGPU creation params 329 * 330 * This function is used to allocate HW resource for a vGPU. User specifies 331 * the resource configuration through the creation params. 332 * 333 * Returns: 334 * zero on success, negative error code if failed. 335 * 336 */ 337 int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu, 338 struct intel_vgpu_creation_params *param) 339 { 340 int ret; 341 342 ret = alloc_resource(vgpu, param); 343 if (ret) 344 return ret; 345 346 ret = alloc_vgpu_gm(vgpu); 347 if (ret) 348 goto out_free_resource; 349 350 ret = alloc_vgpu_fence(vgpu); 351 if (ret) 352 goto out_free_vgpu_gm; 353 354 return 0; 355 356 out_free_vgpu_gm: 357 free_vgpu_gm(vgpu); 358 out_free_resource: 359 free_resource(vgpu); 360 return ret; 361 } 362