1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4 */
5
6 #include <linux/pm_runtime.h>
7
8 #include "iris_core.h"
9 #include "iris_hfi_queue.h"
10 #include "iris_vpu_common.h"
11
iris_hfi_queue_write(struct iris_iface_q_info * qinfo,void * packet,u32 packet_size)12 static int iris_hfi_queue_write(struct iris_iface_q_info *qinfo, void *packet, u32 packet_size)
13 {
14 struct iris_hfi_queue_header *queue = qinfo->qhdr;
15 u32 write_idx = queue->write_idx * sizeof(u32);
16 u32 read_idx = queue->read_idx * sizeof(u32);
17 u32 empty_space, new_write_idx, residue;
18 u32 *write_ptr;
19
20 if (write_idx < read_idx)
21 empty_space = read_idx - write_idx;
22 else
23 empty_space = IFACEQ_QUEUE_SIZE - (write_idx - read_idx);
24 if (empty_space < packet_size)
25 return -ENOSPC;
26
27 queue->tx_req = 0;
28
29 new_write_idx = write_idx + packet_size;
30 write_ptr = (u32 *)((u8 *)qinfo->kernel_vaddr + write_idx);
31
32 if (write_ptr < (u32 *)qinfo->kernel_vaddr ||
33 write_ptr > (u32 *)(qinfo->kernel_vaddr +
34 IFACEQ_QUEUE_SIZE))
35 return -EINVAL;
36
37 if (new_write_idx < IFACEQ_QUEUE_SIZE) {
38 memcpy(write_ptr, packet, packet_size);
39 } else {
40 residue = new_write_idx - IFACEQ_QUEUE_SIZE;
41 memcpy(write_ptr, packet, (packet_size - residue));
42 memcpy(qinfo->kernel_vaddr,
43 packet + (packet_size - residue), residue);
44 new_write_idx = residue;
45 }
46
47 /* Make sure packet is written before updating the write index */
48 mb();
49 queue->write_idx = new_write_idx / sizeof(u32);
50
51 /* Make sure write index is updated before an interrupt is raised */
52 mb();
53
54 return 0;
55 }
56
iris_hfi_queue_read(struct iris_iface_q_info * qinfo,void * packet)57 static int iris_hfi_queue_read(struct iris_iface_q_info *qinfo, void *packet)
58 {
59 struct iris_hfi_queue_header *queue = qinfo->qhdr;
60 u32 write_idx = queue->write_idx * sizeof(u32);
61 u32 read_idx = queue->read_idx * sizeof(u32);
62 u32 packet_size, receive_request = 0;
63 u32 new_read_idx, residue;
64 u32 *read_ptr;
65 int ret = 0;
66
67 if (queue->queue_type == IFACEQ_MSGQ_ID)
68 receive_request = 1;
69
70 if (read_idx == write_idx) {
71 queue->rx_req = receive_request;
72 /* Ensure qhdr is updated in main memory */
73 mb();
74 return -ENODATA;
75 }
76
77 read_ptr = qinfo->kernel_vaddr + read_idx;
78 if (read_ptr < (u32 *)qinfo->kernel_vaddr ||
79 read_ptr > (u32 *)(qinfo->kernel_vaddr +
80 IFACEQ_QUEUE_SIZE - sizeof(*read_ptr)))
81 return -ENODATA;
82
83 packet_size = *read_ptr;
84 if (!packet_size)
85 return -EINVAL;
86
87 new_read_idx = read_idx + packet_size;
88 if (packet_size <= IFACEQ_CORE_PKT_SIZE) {
89 if (new_read_idx < IFACEQ_QUEUE_SIZE) {
90 memcpy(packet, read_ptr, packet_size);
91 } else {
92 residue = new_read_idx - IFACEQ_QUEUE_SIZE;
93 memcpy(packet, read_ptr, (packet_size - residue));
94 memcpy((packet + (packet_size - residue)),
95 qinfo->kernel_vaddr, residue);
96 new_read_idx = residue;
97 }
98 } else {
99 new_read_idx = write_idx;
100 ret = -EBADMSG;
101 }
102
103 queue->rx_req = receive_request;
104
105 queue->read_idx = new_read_idx / sizeof(u32);
106 /* Ensure qhdr is updated in main memory */
107 mb();
108
109 return ret;
110 }
111
iris_hfi_queue_cmd_write_locked(struct iris_core * core,void * pkt,u32 pkt_size)112 int iris_hfi_queue_cmd_write_locked(struct iris_core *core, void *pkt, u32 pkt_size)
113 {
114 struct iris_iface_q_info *q_info = &core->command_queue;
115
116 if (core->state == IRIS_CORE_ERROR || core->state == IRIS_CORE_DEINIT)
117 return -EINVAL;
118
119 if (!iris_hfi_queue_write(q_info, pkt, pkt_size)) {
120 iris_vpu_raise_interrupt(core);
121 } else {
122 dev_err(core->dev, "queue full\n");
123 return -ENODATA;
124 }
125
126 return 0;
127 }
128
iris_hfi_queue_cmd_write(struct iris_core * core,void * pkt,u32 pkt_size)129 int iris_hfi_queue_cmd_write(struct iris_core *core, void *pkt, u32 pkt_size)
130 {
131 int ret;
132
133 ret = pm_runtime_resume_and_get(core->dev);
134 if (ret < 0)
135 goto exit;
136
137 mutex_lock(&core->lock);
138 ret = iris_hfi_queue_cmd_write_locked(core, pkt, pkt_size);
139 if (ret) {
140 mutex_unlock(&core->lock);
141 goto exit;
142 }
143 mutex_unlock(&core->lock);
144
145 pm_runtime_put_autosuspend(core->dev);
146
147 return 0;
148
149 exit:
150 pm_runtime_put_sync(core->dev);
151
152 return ret;
153 }
154
iris_hfi_queue_msg_read(struct iris_core * core,void * pkt)155 int iris_hfi_queue_msg_read(struct iris_core *core, void *pkt)
156 {
157 struct iris_iface_q_info *q_info = &core->message_queue;
158 int ret = 0;
159
160 mutex_lock(&core->lock);
161 if (core->state != IRIS_CORE_INIT) {
162 ret = -EINVAL;
163 goto unlock;
164 }
165
166 if (iris_hfi_queue_read(q_info, pkt)) {
167 ret = -ENODATA;
168 goto unlock;
169 }
170
171 unlock:
172 mutex_unlock(&core->lock);
173
174 return ret;
175 }
176
iris_hfi_queue_dbg_read(struct iris_core * core,void * pkt)177 int iris_hfi_queue_dbg_read(struct iris_core *core, void *pkt)
178 {
179 struct iris_iface_q_info *q_info = &core->debug_queue;
180 int ret = 0;
181
182 mutex_lock(&core->lock);
183 if (core->state != IRIS_CORE_INIT) {
184 ret = -EINVAL;
185 goto unlock;
186 }
187
188 if (iris_hfi_queue_read(q_info, pkt)) {
189 ret = -ENODATA;
190 goto unlock;
191 }
192
193 unlock:
194 mutex_unlock(&core->lock);
195
196 return ret;
197 }
198
iris_hfi_queue_set_header(struct iris_core * core,u32 queue_id,struct iris_iface_q_info * iface_q)199 static void iris_hfi_queue_set_header(struct iris_core *core, u32 queue_id,
200 struct iris_iface_q_info *iface_q)
201 {
202 iface_q->qhdr->status = 0x1;
203 iface_q->qhdr->start_addr = iface_q->device_addr;
204 iface_q->qhdr->header_type = IFACEQ_DFLT_QHDR;
205 iface_q->qhdr->queue_type = queue_id;
206 iface_q->qhdr->q_size = IFACEQ_QUEUE_SIZE / sizeof(u32);
207 iface_q->qhdr->pkt_size = 0; /* variable packet size */
208 iface_q->qhdr->rx_wm = 0x1;
209 iface_q->qhdr->tx_wm = 0x1;
210 iface_q->qhdr->rx_req = 0x1;
211 iface_q->qhdr->tx_req = 0x0;
212 iface_q->qhdr->rx_irq_status = 0x0;
213 iface_q->qhdr->tx_irq_status = 0x0;
214 iface_q->qhdr->read_idx = 0x0;
215 iface_q->qhdr->write_idx = 0x0;
216
217 /*
218 * Set receive request to zero on debug queue as there is no
219 * need of interrupt from video hardware for debug messages
220 */
221 if (queue_id == IFACEQ_DBGQ_ID)
222 iface_q->qhdr->rx_req = 0;
223 }
224
225 static void
iris_hfi_queue_init(struct iris_core * core,u32 queue_id,struct iris_iface_q_info * iface_q)226 iris_hfi_queue_init(struct iris_core *core, u32 queue_id, struct iris_iface_q_info *iface_q)
227 {
228 struct iris_hfi_queue_table_header *q_tbl_hdr = core->iface_q_table_vaddr;
229 u32 offset = sizeof(*q_tbl_hdr) + (queue_id * IFACEQ_QUEUE_SIZE);
230
231 iface_q->device_addr = core->iface_q_table_daddr + offset;
232 iface_q->kernel_vaddr =
233 (void *)((char *)core->iface_q_table_vaddr + offset);
234 iface_q->qhdr = &q_tbl_hdr->q_hdr[queue_id];
235
236 iris_hfi_queue_set_header(core, queue_id, iface_q);
237 }
238
iris_hfi_queue_deinit(struct iris_iface_q_info * iface_q)239 static void iris_hfi_queue_deinit(struct iris_iface_q_info *iface_q)
240 {
241 iface_q->qhdr = NULL;
242 iface_q->kernel_vaddr = NULL;
243 iface_q->device_addr = 0;
244 }
245
iris_hfi_queues_init(struct iris_core * core)246 int iris_hfi_queues_init(struct iris_core *core)
247 {
248 struct iris_hfi_queue_table_header *q_tbl_hdr;
249 u32 queue_size;
250
251 /* Iris hardware requires 4K queue alignment */
252 queue_size = ALIGN((sizeof(*q_tbl_hdr) + (IFACEQ_QUEUE_SIZE * IFACEQ_NUMQ)), SZ_4K);
253 core->iface_q_table_vaddr = dma_alloc_attrs(core->dev, queue_size,
254 &core->iface_q_table_daddr,
255 GFP_KERNEL, DMA_ATTR_WRITE_COMBINE);
256 if (!core->iface_q_table_vaddr) {
257 dev_err(core->dev, "queues alloc and map failed\n");
258 return -ENOMEM;
259 }
260
261 core->sfr_vaddr = dma_alloc_attrs(core->dev, SFR_SIZE,
262 &core->sfr_daddr,
263 GFP_KERNEL, DMA_ATTR_WRITE_COMBINE);
264 if (!core->sfr_vaddr) {
265 dev_err(core->dev, "sfr alloc and map failed\n");
266 dma_free_attrs(core->dev, sizeof(*q_tbl_hdr), core->iface_q_table_vaddr,
267 core->iface_q_table_daddr, DMA_ATTR_WRITE_COMBINE);
268 return -ENOMEM;
269 }
270
271 iris_hfi_queue_init(core, IFACEQ_CMDQ_ID, &core->command_queue);
272 iris_hfi_queue_init(core, IFACEQ_MSGQ_ID, &core->message_queue);
273 iris_hfi_queue_init(core, IFACEQ_DBGQ_ID, &core->debug_queue);
274
275 q_tbl_hdr = (struct iris_hfi_queue_table_header *)core->iface_q_table_vaddr;
276 q_tbl_hdr->version = 0;
277 q_tbl_hdr->device_addr = (void *)core;
278 strscpy(q_tbl_hdr->name, "iris-hfi-queues", sizeof(q_tbl_hdr->name));
279 q_tbl_hdr->size = sizeof(*q_tbl_hdr);
280 q_tbl_hdr->qhdr0_offset = sizeof(*q_tbl_hdr) -
281 (IFACEQ_NUMQ * sizeof(struct iris_hfi_queue_header));
282 q_tbl_hdr->qhdr_size = sizeof(q_tbl_hdr->q_hdr[0]);
283 q_tbl_hdr->num_q = IFACEQ_NUMQ;
284 q_tbl_hdr->num_active_q = IFACEQ_NUMQ;
285
286 /* Write sfr size in first word to be used by firmware */
287 *((u32 *)core->sfr_vaddr) = SFR_SIZE;
288
289 return 0;
290 }
291
iris_hfi_queues_deinit(struct iris_core * core)292 void iris_hfi_queues_deinit(struct iris_core *core)
293 {
294 u32 queue_size;
295
296 if (!core->iface_q_table_vaddr)
297 return;
298
299 iris_hfi_queue_deinit(&core->debug_queue);
300 iris_hfi_queue_deinit(&core->message_queue);
301 iris_hfi_queue_deinit(&core->command_queue);
302
303 dma_free_attrs(core->dev, SFR_SIZE, core->sfr_vaddr,
304 core->sfr_daddr, DMA_ATTR_WRITE_COMBINE);
305
306 core->sfr_vaddr = NULL;
307 core->sfr_daddr = 0;
308
309 queue_size = ALIGN(sizeof(struct iris_hfi_queue_table_header) +
310 (IFACEQ_QUEUE_SIZE * IFACEQ_NUMQ), SZ_4K);
311
312 dma_free_attrs(core->dev, queue_size, core->iface_q_table_vaddr,
313 core->iface_q_table_daddr, DMA_ATTR_WRITE_COMBINE);
314
315 core->iface_q_table_vaddr = NULL;
316 core->iface_q_table_daddr = 0;
317 }
318