Lines Matching +full:s +full:-
1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * Derived from ivtv-queue.c
11 #include "cx18-driver.h"
12 #include "cx18-queue.h"
13 #include "cx18-streams.h"
14 #include "cx18-scb.h"
15 #include "cx18-io.h"
21 for (i = 0; i < buf->bytesused; i += 4) in cx18_buf_swap()
22 swab32s((u32 *)(buf->buf + i)); in cx18_buf_swap()
29 list_for_each_entry(buf, &mdl->buf_list, list) { in _cx18_mdl_swap()
30 if (buf->bytesused == 0) in _cx18_mdl_swap()
38 INIT_LIST_HEAD(&q->list); in cx18_queue_init()
39 atomic_set(&q->depth, 0); in cx18_queue_init()
40 q->bytesused = 0; in cx18_queue_init()
43 struct cx18_queue *_cx18_enqueue(struct cx18_stream *s, struct cx18_mdl *mdl, in _cx18_enqueue() argument
47 if (q != &s->q_full) { in _cx18_enqueue()
48 mdl->bytesused = 0; in _cx18_enqueue()
49 mdl->readpos = 0; in _cx18_enqueue()
50 mdl->m_flags = 0; in _cx18_enqueue()
51 mdl->skipped = 0; in _cx18_enqueue()
52 mdl->curr_buf = NULL; in _cx18_enqueue()
56 if (q == &s->q_busy && in _cx18_enqueue()
57 atomic_read(&q->depth) >= CX18_MAX_FW_MDLS_PER_STREAM) in _cx18_enqueue()
58 q = &s->q_free; in _cx18_enqueue()
60 spin_lock(&q->lock); in _cx18_enqueue()
63 list_add(&mdl->list, &q->list); /* LIFO */ in _cx18_enqueue()
65 list_add_tail(&mdl->list, &q->list); /* FIFO */ in _cx18_enqueue()
66 q->bytesused += mdl->bytesused - mdl->readpos; in _cx18_enqueue()
67 atomic_inc(&q->depth); in _cx18_enqueue()
69 spin_unlock(&q->lock); in _cx18_enqueue()
73 struct cx18_mdl *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q) in cx18_dequeue() argument
77 spin_lock(&q->lock); in cx18_dequeue()
78 if (!list_empty(&q->list)) { in cx18_dequeue()
79 mdl = list_first_entry(&q->list, struct cx18_mdl, list); in cx18_dequeue()
80 list_del_init(&mdl->list); in cx18_dequeue()
81 q->bytesused -= mdl->bytesused - mdl->readpos; in cx18_dequeue()
82 mdl->skipped = 0; in cx18_dequeue()
83 atomic_dec(&q->depth); in cx18_dequeue()
85 spin_unlock(&q->lock); in cx18_dequeue()
89 static void _cx18_mdl_update_bufs_for_cpu(struct cx18_stream *s, in _cx18_mdl_update_bufs_for_cpu() argument
93 u32 buf_size = s->buf_size; in _cx18_mdl_update_bufs_for_cpu()
94 u32 bytesused = mdl->bytesused; in _cx18_mdl_update_bufs_for_cpu()
96 list_for_each_entry(buf, &mdl->buf_list, list) { in _cx18_mdl_update_bufs_for_cpu()
97 buf->readpos = 0; in _cx18_mdl_update_bufs_for_cpu()
99 buf->bytesused = buf_size; in _cx18_mdl_update_bufs_for_cpu()
100 bytesused -= buf_size; in _cx18_mdl_update_bufs_for_cpu()
102 buf->bytesused = bytesused; in _cx18_mdl_update_bufs_for_cpu()
105 cx18_buf_sync_for_cpu(s, buf); in _cx18_mdl_update_bufs_for_cpu()
109 static inline void cx18_mdl_update_bufs_for_cpu(struct cx18_stream *s, in cx18_mdl_update_bufs_for_cpu() argument
114 if (list_is_singular(&mdl->buf_list)) { in cx18_mdl_update_bufs_for_cpu()
115 buf = list_first_entry(&mdl->buf_list, struct cx18_buffer, in cx18_mdl_update_bufs_for_cpu()
117 buf->bytesused = mdl->bytesused; in cx18_mdl_update_bufs_for_cpu()
118 buf->readpos = 0; in cx18_mdl_update_bufs_for_cpu()
119 cx18_buf_sync_for_cpu(s, buf); in cx18_mdl_update_bufs_for_cpu()
121 _cx18_mdl_update_bufs_for_cpu(s, mdl); in cx18_mdl_update_bufs_for_cpu()
125 struct cx18_mdl *cx18_queue_get_mdl(struct cx18_stream *s, u32 id, in cx18_queue_get_mdl() argument
128 struct cx18 *cx = s->cx; in cx18_queue_get_mdl()
140 spin_lock(&s->q_busy.lock); in cx18_queue_get_mdl()
141 list_for_each_entry_safe(mdl, tmp, &s->q_busy.list, list) { in cx18_queue_get_mdl()
149 if (mdl->id != id) { in cx18_queue_get_mdl()
150 mdl->skipped++; in cx18_queue_get_mdl()
151 if (mdl->skipped >= atomic_read(&s->q_busy.depth)-1) { in cx18_queue_get_mdl()
153 CX18_WARN("Skipped %s, MDL %d, %d times - it must have dropped out of rotation\n", in cx18_queue_get_mdl()
154 s->name, mdl->id, in cx18_queue_get_mdl()
155 mdl->skipped); in cx18_queue_get_mdl()
157 list_move_tail(&mdl->list, &sweep_up); in cx18_queue_get_mdl()
158 atomic_dec(&s->q_busy.depth); in cx18_queue_get_mdl()
166 list_del_init(&mdl->list); in cx18_queue_get_mdl()
167 atomic_dec(&s->q_busy.depth); in cx18_queue_get_mdl()
171 spin_unlock(&s->q_busy.lock); in cx18_queue_get_mdl()
178 ret->bytesused = bytesused; in cx18_queue_get_mdl()
179 ret->skipped = 0; in cx18_queue_get_mdl()
181 cx18_mdl_update_bufs_for_cpu(s, ret); in cx18_queue_get_mdl()
182 if (s->type != CX18_ENC_STREAM_TYPE_TS) in cx18_queue_get_mdl()
183 set_bit(CX18_F_M_NEED_SWAP, &ret->m_flags); in cx18_queue_get_mdl()
188 list_del_init(&mdl->list); in cx18_queue_get_mdl()
189 cx18_enqueue(s, mdl, &s->q_free); in cx18_queue_get_mdl()
195 static void cx18_queue_flush(struct cx18_stream *s, in cx18_queue_flush() argument
201 if (q_src == q_dst || q_dst == &s->q_full || q_dst == &s->q_busy) in cx18_queue_flush()
204 spin_lock(&q_src->lock); in cx18_queue_flush()
205 spin_lock(&q_dst->lock); in cx18_queue_flush()
206 while (!list_empty(&q_src->list)) { in cx18_queue_flush()
207 mdl = list_first_entry(&q_src->list, struct cx18_mdl, list); in cx18_queue_flush()
208 list_move_tail(&mdl->list, &q_dst->list); in cx18_queue_flush()
209 mdl->bytesused = 0; in cx18_queue_flush()
210 mdl->readpos = 0; in cx18_queue_flush()
211 mdl->m_flags = 0; in cx18_queue_flush()
212 mdl->skipped = 0; in cx18_queue_flush()
213 mdl->curr_buf = NULL; in cx18_queue_flush()
214 atomic_inc(&q_dst->depth); in cx18_queue_flush()
217 spin_unlock(&q_src->lock); in cx18_queue_flush()
218 spin_unlock(&q_dst->lock); in cx18_queue_flush()
221 void cx18_flush_queues(struct cx18_stream *s) in cx18_flush_queues() argument
223 cx18_queue_flush(s, &s->q_busy, &s->q_free); in cx18_flush_queues()
224 cx18_queue_flush(s, &s->q_full, &s->q_free); in cx18_flush_queues()
228 * Note, s->buf_pool is not protected by a lock,
231 void cx18_unload_queues(struct cx18_stream *s) in cx18_unload_queues() argument
233 struct cx18_queue *q_idle = &s->q_idle; in cx18_unload_queues()
238 cx18_queue_flush(s, &s->q_busy, q_idle); in cx18_unload_queues()
239 cx18_queue_flush(s, &s->q_full, q_idle); in cx18_unload_queues()
240 cx18_queue_flush(s, &s->q_free, q_idle); in cx18_unload_queues()
242 /* Reset MDL id's and move all buffers back to the stream's buf_pool */ in cx18_unload_queues()
243 spin_lock(&q_idle->lock); in cx18_unload_queues()
244 list_for_each_entry(mdl, &q_idle->list, list) { in cx18_unload_queues()
245 while (!list_empty(&mdl->buf_list)) { in cx18_unload_queues()
246 buf = list_first_entry(&mdl->buf_list, in cx18_unload_queues()
248 list_move_tail(&buf->list, &s->buf_pool); in cx18_unload_queues()
249 buf->bytesused = 0; in cx18_unload_queues()
250 buf->readpos = 0; in cx18_unload_queues()
252 mdl->id = s->mdl_base_idx; /* reset id to a "safe" value */ in cx18_unload_queues()
255 spin_unlock(&q_idle->lock); in cx18_unload_queues()
259 * Note, s->buf_pool is not protected by a lock,
262 void cx18_load_queues(struct cx18_stream *s) in cx18_load_queues() argument
264 struct cx18 *cx = s->cx; in cx18_load_queues()
276 mdl_id = s->mdl_base_idx; in cx18_load_queues()
277 for (mdl = cx18_dequeue(s, &s->q_idle), i = s->bufs_per_mdl; in cx18_load_queues()
278 mdl != NULL && i == s->bufs_per_mdl; in cx18_load_queues()
279 mdl = cx18_dequeue(s, &s->q_idle)) { in cx18_load_queues()
281 mdl->id = mdl_id; in cx18_load_queues()
283 for (i = 0; i < s->bufs_per_mdl; i++) { in cx18_load_queues()
284 if (list_empty(&s->buf_pool)) in cx18_load_queues()
287 buf = list_first_entry(&s->buf_pool, struct cx18_buffer, in cx18_load_queues()
289 list_move_tail(&buf->list, &mdl->buf_list); in cx18_load_queues()
291 /* update the firmware's MDL array with this buffer */ in cx18_load_queues()
292 cx18_writel(cx, buf->dma_handle, in cx18_load_queues()
293 &cx->scb->cpu_mdl[mdl_id + i].paddr); in cx18_load_queues()
294 cx18_writel(cx, s->buf_size, in cx18_load_queues()
295 &cx->scb->cpu_mdl[mdl_id + i].length); in cx18_load_queues()
298 if (i == s->bufs_per_mdl) { in cx18_load_queues()
300 * The encoder doesn't honor s->mdl_size. So in the in cx18_load_queues()
301 * case of a non-integral number of buffers to meet in cx18_load_queues()
306 partial_buf_size = s->mdl_size % s->buf_size; in cx18_load_queues()
309 &cx->scb->cpu_mdl[mdl_id + i - 1].length); in cx18_load_queues()
311 cx18_enqueue(s, mdl, &s->q_free); in cx18_load_queues()
314 cx18_push(s, mdl, &s->q_idle); in cx18_load_queues()
320 void _cx18_mdl_sync_for_device(struct cx18_stream *s, struct cx18_mdl *mdl) in _cx18_mdl_sync_for_device() argument
322 int dma = s->dma; in _cx18_mdl_sync_for_device()
323 u32 buf_size = s->buf_size; in _cx18_mdl_sync_for_device()
324 struct pci_dev *pci_dev = s->cx->pci_dev; in _cx18_mdl_sync_for_device()
327 list_for_each_entry(buf, &mdl->buf_list, list) in _cx18_mdl_sync_for_device()
328 dma_sync_single_for_device(&pci_dev->dev, buf->dma_handle, in _cx18_mdl_sync_for_device()
332 int cx18_stream_alloc(struct cx18_stream *s) in cx18_stream_alloc() argument
334 struct cx18 *cx = s->cx; in cx18_stream_alloc()
337 if (s->buffers == 0) in cx18_stream_alloc()
340 CX18_DEBUG_INFO("Allocate %s stream: %d x %d buffers (%d.%02d kB total)\n", in cx18_stream_alloc()
341 s->name, s->buffers, s->buf_size, in cx18_stream_alloc()
342 s->buffers * s->buf_size / 1024, in cx18_stream_alloc()
343 (s->buffers * s->buf_size * 100 / 1024) % 100); in cx18_stream_alloc()
345 if (((char __iomem *)&cx->scb->cpu_mdl[cx->free_mdl_idx + s->buffers] - in cx18_stream_alloc()
346 (char __iomem *)cx->scb) > SCB_RESERVED_SIZE) { in cx18_stream_alloc()
347 unsigned bufsz = (((char __iomem *)cx->scb) + SCB_RESERVED_SIZE - in cx18_stream_alloc()
348 ((char __iomem *)cx->scb->cpu_mdl)); in cx18_stream_alloc()
353 return -ENOMEM; in cx18_stream_alloc()
356 s->mdl_base_idx = cx->free_mdl_idx; in cx18_stream_alloc()
359 for (i = 0; i < s->buffers; i++) { in cx18_stream_alloc()
375 buf->buf = kmalloc(s->buf_size, GFP_KERNEL|__GFP_NOWARN); in cx18_stream_alloc()
376 if (buf->buf == NULL) { in cx18_stream_alloc()
382 INIT_LIST_HEAD(&mdl->list); in cx18_stream_alloc()
383 INIT_LIST_HEAD(&mdl->buf_list); in cx18_stream_alloc()
384 mdl->id = s->mdl_base_idx; /* a somewhat safe value */ in cx18_stream_alloc()
385 cx18_enqueue(s, mdl, &s->q_idle); in cx18_stream_alloc()
387 INIT_LIST_HEAD(&buf->list); in cx18_stream_alloc()
388 buf->dma_handle = dma_map_single(&s->cx->pci_dev->dev, in cx18_stream_alloc()
389 buf->buf, s->buf_size, in cx18_stream_alloc()
390 s->dma); in cx18_stream_alloc()
391 cx18_buf_sync_for_cpu(s, buf); in cx18_stream_alloc()
392 list_add_tail(&buf->list, &s->buf_pool); in cx18_stream_alloc()
394 if (i == s->buffers) { in cx18_stream_alloc()
395 cx->free_mdl_idx += s->buffers; in cx18_stream_alloc()
398 CX18_ERR("Couldn't allocate buffers for %s stream\n", s->name); in cx18_stream_alloc()
399 cx18_stream_free(s); in cx18_stream_alloc()
400 return -ENOMEM; in cx18_stream_alloc()
403 void cx18_stream_free(struct cx18_stream *s) in cx18_stream_free() argument
407 struct cx18 *cx = s->cx; in cx18_stream_free()
409 CX18_DEBUG_INFO("Deallocating buffers for %s stream\n", s->name); in cx18_stream_free()
412 cx18_unload_queues(s); in cx18_stream_free()
415 while ((mdl = cx18_dequeue(s, &s->q_idle))) in cx18_stream_free()
419 while (!list_empty(&s->buf_pool)) { in cx18_stream_free()
420 buf = list_first_entry(&s->buf_pool, struct cx18_buffer, list); in cx18_stream_free()
421 list_del_init(&buf->list); in cx18_stream_free()
423 dma_unmap_single(&s->cx->pci_dev->dev, buf->dma_handle, in cx18_stream_free()
424 s->buf_size, s->dma); in cx18_stream_free()
425 kfree(buf->buf); in cx18_stream_free()