1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #include "gem/i915_gem_lmem.h" 7 #include "gem/i915_gem_object.h" 8 9 #include "i915_drv.h" 10 #include "i915_vma.h" 11 #include "intel_engine.h" 12 #include "intel_gpu_commands.h" 13 #include "intel_ring.h" 14 #include "intel_timeline.h" 15 16 unsigned int intel_ring_update_space(struct intel_ring *ring) 17 { 18 unsigned int space; 19 20 space = __intel_ring_space(ring->head, ring->emit, ring->size); 21 22 ring->space = space; 23 return space; 24 } 25 26 void __intel_ring_pin(struct intel_ring *ring) 27 { 28 GEM_BUG_ON(!atomic_read(&ring->pin_count)); 29 atomic_inc(&ring->pin_count); 30 } 31 32 int intel_ring_pin(struct intel_ring *ring, struct i915_gem_ww_ctx *ww) 33 { 34 struct i915_vma *vma = ring->vma; 35 unsigned int flags; 36 void *addr; 37 int ret; 38 39 if (atomic_fetch_inc(&ring->pin_count)) 40 return 0; 41 42 /* Ring wraparound at offset 0 sometimes hangs. No idea why. */ 43 flags = PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma); 44 45 if (i915_gem_object_is_stolen(vma->obj)) 46 flags |= PIN_MAPPABLE; 47 else 48 flags |= PIN_HIGH; 49 50 ret = i915_ggtt_pin(vma, ww, 0, flags); 51 if (unlikely(ret)) 52 goto err_unpin; 53 54 if (i915_vma_is_map_and_fenceable(vma)) { 55 addr = (void __force *)i915_vma_pin_iomap(vma); 56 } else { 57 int type = i915_coherent_map_type(vma->vm->i915, vma->obj, false); 58 59 addr = i915_gem_object_pin_map(vma->obj, type); 60 } 61 62 if (IS_ERR(addr)) { 63 ret = PTR_ERR(addr); 64 goto err_ring; 65 } 66 67 i915_vma_make_unshrinkable(vma); 68 69 /* Discard any unused bytes beyond that submitted to hw. */ 70 intel_ring_reset(ring, ring->emit); 71 72 ring->vaddr = addr; 73 return 0; 74 75 err_ring: 76 i915_vma_unpin(vma); 77 err_unpin: 78 atomic_dec(&ring->pin_count); 79 return ret; 80 } 81 82 void intel_ring_reset(struct intel_ring *ring, u32 tail) 83 { 84 tail = intel_ring_wrap(ring, tail); 85 ring->tail = tail; 86 ring->head = tail; 87 ring->emit = tail; 88 intel_ring_update_space(ring); 89 } 90 91 void intel_ring_unpin(struct intel_ring *ring) 92 { 93 struct i915_vma *vma = ring->vma; 94 95 if (!atomic_dec_and_test(&ring->pin_count)) 96 return; 97 98 i915_vma_unset_ggtt_write(vma); 99 if (i915_vma_is_map_and_fenceable(vma)) 100 i915_vma_unpin_iomap(vma); 101 else 102 i915_gem_object_unpin_map(vma->obj); 103 104 i915_vma_make_purgeable(vma); 105 i915_vma_unpin(vma); 106 } 107 108 static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size) 109 { 110 struct i915_address_space *vm = &ggtt->vm; 111 struct drm_i915_private *i915 = vm->i915; 112 struct drm_i915_gem_object *obj; 113 struct i915_vma *vma; 114 115 obj = i915_gem_object_create_lmem(i915, size, I915_BO_ALLOC_VOLATILE | 116 I915_BO_ALLOC_PM_VOLATILE); 117 if (IS_ERR(obj) && i915_ggtt_has_aperture(ggtt)) 118 obj = i915_gem_object_create_stolen(i915, size); 119 if (IS_ERR(obj)) 120 obj = i915_gem_object_create_internal(i915, size); 121 if (IS_ERR(obj)) 122 return ERR_CAST(obj); 123 124 /* 125 * Mark ring buffers as read-only from GPU side (so no stray overwrites) 126 * if supported by the platform's GGTT. 127 */ 128 if (vm->has_read_only) 129 i915_gem_object_set_readonly(obj); 130 131 vma = i915_vma_instance(obj, vm, NULL); 132 if (IS_ERR(vma)) 133 goto err; 134 135 return vma; 136 137 err: 138 i915_gem_object_put(obj); 139 return vma; 140 } 141 142 struct intel_ring * 143 intel_engine_create_ring(struct intel_engine_cs *engine, int size) 144 { 145 struct drm_i915_private *i915 = engine->i915; 146 struct intel_ring *ring; 147 struct i915_vma *vma; 148 149 GEM_BUG_ON(!is_power_of_2(size)); 150 GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES); 151 152 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 153 if (!ring) 154 return ERR_PTR(-ENOMEM); 155 156 kref_init(&ring->ref); 157 ring->size = size; 158 ring->wrap = BITS_PER_TYPE(ring->size) - ilog2(size); 159 160 /* 161 * Workaround an erratum on the i830 which causes a hang if 162 * the TAIL pointer points to within the last 2 cachelines 163 * of the buffer. 164 */ 165 ring->effective_size = size; 166 if (IS_I830(i915) || IS_I845G(i915)) 167 ring->effective_size -= 2 * CACHELINE_BYTES; 168 169 intel_ring_update_space(ring); 170 171 vma = create_ring_vma(engine->gt->ggtt, size); 172 if (IS_ERR(vma)) { 173 kfree(ring); 174 return ERR_CAST(vma); 175 } 176 ring->vma = vma; 177 178 return ring; 179 } 180 181 void intel_ring_free(struct kref *ref) 182 { 183 struct intel_ring *ring = container_of(ref, typeof(*ring), ref); 184 185 i915_vma_put(ring->vma); 186 kfree(ring); 187 } 188 189 static noinline int 190 wait_for_space(struct intel_ring *ring, 191 struct intel_timeline *tl, 192 unsigned int bytes) 193 { 194 struct i915_request *target; 195 long timeout; 196 197 if (intel_ring_update_space(ring) >= bytes) 198 return 0; 199 200 GEM_BUG_ON(list_empty(&tl->requests)); 201 list_for_each_entry(target, &tl->requests, link) { 202 if (target->ring != ring) 203 continue; 204 205 /* Would completion of this request free enough space? */ 206 if (bytes <= __intel_ring_space(target->postfix, 207 ring->emit, ring->size)) 208 break; 209 } 210 211 if (GEM_WARN_ON(&target->link == &tl->requests)) 212 return -ENOSPC; 213 214 timeout = i915_request_wait(target, 215 I915_WAIT_INTERRUPTIBLE, 216 MAX_SCHEDULE_TIMEOUT); 217 if (timeout < 0) 218 return timeout; 219 220 i915_request_retire_upto(target); 221 222 intel_ring_update_space(ring); 223 GEM_BUG_ON(ring->space < bytes); 224 return 0; 225 } 226 227 u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords) 228 { 229 struct intel_ring *ring = rq->ring; 230 const unsigned int remain_usable = ring->effective_size - ring->emit; 231 const unsigned int bytes = num_dwords * sizeof(u32); 232 unsigned int need_wrap = 0; 233 unsigned int total_bytes; 234 u32 *cs; 235 236 /* Packets must be qword aligned. */ 237 GEM_BUG_ON(num_dwords & 1); 238 239 total_bytes = bytes + rq->reserved_space; 240 GEM_BUG_ON(total_bytes > ring->effective_size); 241 242 if (unlikely(total_bytes > remain_usable)) { 243 const int remain_actual = ring->size - ring->emit; 244 245 if (bytes > remain_usable) { 246 /* 247 * Not enough space for the basic request. So need to 248 * flush out the remainder and then wait for 249 * base + reserved. 250 */ 251 total_bytes += remain_actual; 252 need_wrap = remain_actual | 1; 253 } else { 254 /* 255 * The base request will fit but the reserved space 256 * falls off the end. So we don't need an immediate 257 * wrap and only need to effectively wait for the 258 * reserved size from the start of ringbuffer. 259 */ 260 total_bytes = rq->reserved_space + remain_actual; 261 } 262 } 263 264 if (unlikely(total_bytes > ring->space)) { 265 int ret; 266 267 /* 268 * Space is reserved in the ringbuffer for finalising the 269 * request, as that cannot be allowed to fail. During request 270 * finalisation, reserved_space is set to 0 to stop the 271 * overallocation and the assumption is that then we never need 272 * to wait (which has the risk of failing with EINTR). 273 * 274 * See also i915_request_alloc() and i915_request_add(). 275 */ 276 GEM_BUG_ON(!rq->reserved_space); 277 278 ret = wait_for_space(ring, 279 i915_request_timeline(rq), 280 total_bytes); 281 if (unlikely(ret)) 282 return ERR_PTR(ret); 283 } 284 285 if (unlikely(need_wrap)) { 286 need_wrap &= ~1; 287 GEM_BUG_ON(need_wrap > ring->space); 288 GEM_BUG_ON(ring->emit + need_wrap > ring->size); 289 GEM_BUG_ON(!IS_ALIGNED(need_wrap, sizeof(u64))); 290 291 /* Fill the tail with MI_NOOP */ 292 memset64(ring->vaddr + ring->emit, 0, need_wrap / sizeof(u64)); 293 ring->space -= need_wrap; 294 ring->emit = 0; 295 } 296 297 GEM_BUG_ON(ring->emit > ring->size - bytes); 298 GEM_BUG_ON(ring->space < bytes); 299 cs = ring->vaddr + ring->emit; 300 GEM_DEBUG_EXEC(memset32(cs, POISON_INUSE, bytes / sizeof(*cs))); 301 ring->emit += bytes; 302 ring->space -= bytes; 303 304 return cs; 305 } 306 307 /* Align the ring tail to a cacheline boundary */ 308 int intel_ring_cacheline_align(struct i915_request *rq) 309 { 310 int num_dwords; 311 void *cs; 312 313 num_dwords = (rq->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(u32); 314 if (num_dwords == 0) 315 return 0; 316 317 num_dwords = CACHELINE_DWORDS - num_dwords; 318 GEM_BUG_ON(num_dwords & 1); 319 320 cs = intel_ring_begin(rq, num_dwords); 321 if (IS_ERR(cs)) 322 return PTR_ERR(cs); 323 324 memset64(cs, (u64)MI_NOOP << 32 | MI_NOOP, num_dwords / 2); 325 intel_ring_advance(rq, cs + num_dwords); 326 327 GEM_BUG_ON(rq->ring->emit & (CACHELINE_BYTES - 1)); 328 return 0; 329 } 330 331 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 332 #include "selftest_ring.c" 333 #endif 334