1 /* SPDX-License-Identifier: MIT */
2 /*
3 * Copyright © 2019 Intel Corporation
4 */
5
6 #ifndef INTEL_RING_H
7 #define INTEL_RING_H
8
9 #include "i915_gem.h" /* GEM_BUG_ON */
10 #include "i915_request.h"
11 #include "intel_ring_types.h"
12
13 struct intel_engine_cs;
14
15 struct intel_ring *
16 intel_engine_create_ring(struct intel_engine_cs *engine, int size);
17
18 u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords);
19
20 unsigned int intel_ring_update_space(struct intel_ring *ring);
21
22 void __intel_ring_pin(struct intel_ring *ring);
23 int intel_ring_pin(struct intel_ring *ring, struct i915_gem_ww_ctx *ww);
24 void intel_ring_unpin(struct intel_ring *ring);
25 void intel_ring_reset(struct intel_ring *ring, u32 tail);
26
27 void intel_ring_free(struct kref *ref);
28
intel_ring_get(struct intel_ring * ring)29 static inline struct intel_ring *intel_ring_get(struct intel_ring *ring)
30 {
31 kref_get(&ring->ref);
32 return ring;
33 }
34
intel_ring_put(struct intel_ring * ring)35 static inline void intel_ring_put(struct intel_ring *ring)
36 {
37 kref_put(&ring->ref, intel_ring_free);
38 }
39
intel_ring_advance(struct i915_request * rq,u32 * cs)40 static inline void intel_ring_advance(struct i915_request *rq, u32 *cs)
41 {
42 /* Dummy function.
43 *
44 * This serves as a placeholder in the code so that the reader
45 * can compare against the preceding intel_ring_begin() and
46 * check that the number of dwords emitted matches the space
47 * reserved for the command packet (i.e. the value passed to
48 * intel_ring_begin()).
49 */
50 GEM_BUG_ON((rq->ring->vaddr + rq->ring->emit) != cs);
51 GEM_BUG_ON(!IS_ALIGNED(rq->ring->emit, 8)); /* RING_TAIL qword align */
52 }
53
intel_ring_wrap(const struct intel_ring * ring,u32 pos)54 static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos)
55 {
56 return pos & (ring->size - 1);
57 }
58
intel_ring_direction(const struct intel_ring * ring,u32 next,u32 prev)59 static inline int intel_ring_direction(const struct intel_ring *ring,
60 u32 next, u32 prev)
61 {
62 typecheck(typeof(ring->size), next);
63 typecheck(typeof(ring->size), prev);
64 return (next - prev) << ring->wrap;
65 }
66
67 static inline bool
intel_ring_offset_valid(const struct intel_ring * ring,unsigned int pos)68 intel_ring_offset_valid(const struct intel_ring *ring,
69 unsigned int pos)
70 {
71 if (pos & -ring->size) /* must be strictly within the ring */
72 return false;
73
74 if (!IS_ALIGNED(pos, 8)) /* must be qword aligned */
75 return false;
76
77 return true;
78 }
79
intel_ring_offset(const struct i915_request * rq,void * addr)80 static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr)
81 {
82 /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
83 u32 offset = addr - rq->ring->vaddr;
84
85 GEM_BUG_ON(offset > rq->ring->size);
86 return intel_ring_wrap(rq->ring, offset);
87 }
88
89 static inline void
assert_ring_tail_valid(const struct intel_ring * ring,unsigned int tail)90 assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
91 {
92 unsigned int head = READ_ONCE(ring->head);
93
94 GEM_BUG_ON(!intel_ring_offset_valid(ring, tail));
95
96 /*
97 * "Ring Buffer Use"
98 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6
99 * Gen3 BSpec "1c Memory Interface Functions" / 2.3.4.5
100 * Gen4+ BSpec "1c Memory Interface and Command Stream" / 5.3.4.5
101 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
102 * same cacheline, the Head Pointer must not be greater than the Tail
103 * Pointer."
104 *
105 * We use ring->head as the last known location of the actual RING_HEAD,
106 * it may have advanced but in the worst case it is equally the same
107 * as ring->head and so we should never program RING_TAIL to advance
108 * into the same cacheline as ring->head.
109 */
110 #define cacheline(a) round_down(a, CACHELINE_BYTES)
111 GEM_BUG_ON(cacheline(tail) == cacheline(head) && tail < head);
112 #undef cacheline
113 }
114
115 static inline unsigned int
intel_ring_set_tail(struct intel_ring * ring,unsigned int tail)116 intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
117 {
118 /* Whilst writes to the tail are strictly order, there is no
119 * serialisation between readers and the writers. The tail may be
120 * read by i915_request_retire() just as it is being updated
121 * by execlists, as although the breadcrumb is complete, the context
122 * switch hasn't been seen.
123 */
124 assert_ring_tail_valid(ring, tail);
125 ring->tail = tail;
126 return tail;
127 }
128
129 static inline unsigned int
__intel_ring_space(unsigned int head,unsigned int tail,unsigned int size)130 __intel_ring_space(unsigned int head, unsigned int tail, unsigned int size)
131 {
132 /*
133 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
134 * same cacheline, the Head Pointer must not be greater than the Tail
135 * Pointer."
136 */
137 GEM_BUG_ON(!is_power_of_2(size));
138 return (head - tail - CACHELINE_BYTES) & (size - 1);
139 }
140
141 #endif /* INTEL_RING_H */
142