1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
7 * with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22 /*
23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #pragma ident "%Z%%M% %I% %E% SMI"
28
29 /*
30 * dcam_ring_buff.c
31 *
32 * dcam1394 driver. Video frame ring buffer support.
33 */
34
35 #include <sys/types.h>
36 #include <sys/kmem.h>
37 #include <sys/ddidmareq.h>
38 #include <sys/types.h>
39 #include <sys/inttypes.h>
40 #include <sys/tnf_probe.h>
41 #include <sys/cmn_err.h>
42
43 #include <sys/1394/targets/dcam1394/dcam.h>
44
45 /*
46 * ring_buff_create
47 *
48 * - alloc ring_buff_t structure
49 * - init ring_buff's num_buffs, buff_num_bytes, num_read_ptrs,
50 * read_ptr_pos
51 * - alloc (num buffs) entries in ring_buff's buff_info_array_p
52 *
53 * - for each buff
54 * - alloc DMA handle; store DMA handle in buff's buff_info_array_p
55 * - alloc mem for DMA transfer; store base addr, data access handle
56 * in buff's buff_info_array_p entry
57 * - bind alloc'ed mem to DMA handle; store assoc info in buff's
58 * buff_info_array_p entry
59 */
60 ring_buff_t *
ring_buff_create(dcam_state_t * softc_p,size_t num_buffs,size_t buff_num_bytes)61 ring_buff_create(dcam_state_t *softc_p, size_t num_buffs,
62 size_t buff_num_bytes)
63 {
64 buff_info_t *buff_info_p;
65 size_t buff;
66 int i, rc;
67 ring_buff_t *ring_buff_p;
68 size_t num_bytes;
69
70 num_bytes = sizeof (ring_buff_t);
71
72 ring_buff_p = (ring_buff_t *)kmem_alloc(num_bytes, KM_SLEEP);
73
74 ring_buff_p->num_buffs = num_buffs;
75 ring_buff_p->buff_num_bytes = buff_num_bytes;
76 ring_buff_p->write_ptr_pos = 0;
77 ring_buff_p->num_read_ptrs = 0;
78 ring_buff_p->read_ptr_incr_val = 1;
79
80 for (i = 0; i < MAX_NUM_READ_PTRS; i++) {
81 ring_buff_p->read_ptr_pos[i] = (size_t)-1;
82 }
83
84 num_bytes = num_buffs * sizeof (buff_info_t);
85
86 ring_buff_p->buff_info_array_p =
87 (buff_info_t *)kmem_alloc(num_bytes, KM_SLEEP);
88
89 for (buff = 0; buff < num_buffs; buff++) {
90
91 buff_info_p = &(ring_buff_p->buff_info_array_p[buff]);
92
93 if ((ddi_dma_alloc_handle(
94 softc_p->dip,
95 &softc_p->attachinfo.dma_attr,
96 DDI_DMA_DONTWAIT,
97 NULL,
98 &(buff_info_p->dma_handle))) != DDI_SUCCESS) {
99 ring_buff_free(softc_p, ring_buff_p);
100 return (NULL);
101 }
102
103 if (ddi_dma_mem_alloc(
104 buff_info_p->dma_handle,
105 buff_num_bytes,
106 &softc_p->attachinfo.acc_attr,
107 DDI_DMA_STREAMING,
108 DDI_DMA_DONTWAIT,
109 (caddr_t)NULL,
110 &(buff_info_p->kaddr_p),
111 &(buff_info_p->real_len),
112 &(buff_info_p->data_acc_handle)) != DDI_SUCCESS) {
113 ring_buff_free(softc_p, ring_buff_p);
114
115 /*
116 * Print a warning, this triggered the bug
117 * report #4423667. This call can fail if
118 * the memory tests are being run in sunvts.
119 * The fact is, this code is doing the right
120 * thing. I added an error message, so that
121 * future occurrences can be dealt with directly.
122 * This is not a bug... The vmem test in sunvts
123 * can eat up all swap/virtual memory.
124 */
125 cmn_err(CE_WARN,
126 "ddi_dma_mem_alloc() failed in ring_buff_create(),"\
127 " insufficient memory resources.\n");
128 return (NULL);
129 }
130
131 rc = ddi_dma_addr_bind_handle(
132 buff_info_p->dma_handle,
133 (struct as *)NULL,
134 (caddr_t)buff_info_p->kaddr_p,
135 buff_info_p->real_len,
136 DDI_DMA_RDWR | DDI_DMA_STREAMING,
137 DDI_DMA_DONTWAIT,
138 NULL,
139 &buff_info_p->dma_cookie,
140 &buff_info_p->dma_cookie_count);
141
142 if (rc != DDI_DMA_MAPPED) {
143 ring_buff_free(softc_p, ring_buff_p);
144 return (NULL);
145 }
146 }
147
148 return (ring_buff_p);
149 }
150
151
152 /*
153 * ring_buff_free
154 */
155 void
ring_buff_free(dcam_state_t * softc_p,ring_buff_t * ring_buff_p)156 ring_buff_free(dcam_state_t *softc_p, ring_buff_t *ring_buff_p)
157 {
158 buff_info_t *buff_info_p;
159 int i;
160
161 if (ring_buff_p == NULL) {
162 softc_p->ring_buff_p = NULL;
163 return;
164 }
165
166 if (ring_buff_p->buff_info_array_p != NULL) {
167 for (i = 0; i < ring_buff_p->num_buffs; i++) {
168
169 buff_info_p = &(ring_buff_p->buff_info_array_p[i]);
170
171 (void) ddi_dma_unbind_handle(buff_info_p->dma_handle);
172 ddi_dma_mem_free(&buff_info_p->data_acc_handle);
173 ddi_dma_free_handle(&buff_info_p->dma_handle);
174 }
175
176 kmem_free(ring_buff_p->buff_info_array_p,
177 ring_buff_p->num_buffs * sizeof (buff_info_t));
178 }
179
180 kmem_free(ring_buff_p, sizeof (ring_buff_t));
181
182 softc_p->ring_buff_p = NULL;
183 }
184
185
186 /*
187 * ring_buff_read_ptr_add
188 */
189 int
ring_buff_read_ptr_add(ring_buff_t * ring_buff_p)190 ring_buff_read_ptr_add(ring_buff_t *ring_buff_p)
191 {
192 int i;
193 int read_ptr_id;
194
195 read_ptr_id = -1;
196
197 for (i = 0; i < MAX_NUM_READ_PTRS; i++) {
198
199 if (ring_buff_p->read_ptr_pos[i] == -1) {
200 ring_buff_p->read_ptr_pos[i] = 0;
201 read_ptr_id = i;
202 break;
203 }
204 }
205
206 return (read_ptr_id);
207 }
208
209
210 /*
211 * ring_buff_read_ptr_remove
212 */
213 int
ring_buff_read_ptr_remove(ring_buff_t * ring_buff_p,int read_ptr_id)214 ring_buff_read_ptr_remove(ring_buff_t *ring_buff_p, int read_ptr_id)
215 {
216 ring_buff_p->read_ptr_pos[read_ptr_id] = (size_t)-1;
217
218 return (0);
219 }
220
221
222 /*
223 * ring_buff_read_ptr_buff_get
224 *
225 * Return pointer to buffer that a read pointer associated with the
226 * ring buffer is pointing to.
227 */
228 buff_info_t *
ring_buff_read_ptr_buff_get(ring_buff_t * ring_buff_p,int read_ptr_id)229 ring_buff_read_ptr_buff_get(ring_buff_t *ring_buff_p, int read_ptr_id)
230 {
231 size_t read_ptr_pos;
232 buff_info_t *buff_info_p;
233
234 read_ptr_pos = ring_buff_p->read_ptr_pos[read_ptr_id];
235 buff_info_p = &(ring_buff_p->buff_info_array_p[read_ptr_pos]);
236
237 return (buff_info_p);
238 }
239
240
241 /*
242 * ring_buff_read_ptr_pos_get
243 */
244 size_t
ring_buff_read_ptr_pos_get(ring_buff_t * ring_buff_p,int read_ptr_id)245 ring_buff_read_ptr_pos_get(ring_buff_t *ring_buff_p, int read_ptr_id)
246 {
247 return (ring_buff_p->read_ptr_pos[read_ptr_id]);
248 }
249
250
251 /*
252 * ring_buff_read_ptr_incr
253 */
254 void
ring_buff_read_ptr_incr(ring_buff_t * ring_buff_p,int read_ptr_id)255 ring_buff_read_ptr_incr(ring_buff_t *ring_buff_p, int read_ptr_id)
256 {
257 size_t read_ptr_pos;
258 #if defined(_ADDL_RING_BUFF_CHECK)
259 size_t lrp, lwp; /* linear read, write positions */
260 #endif /* _ADDL_RING_BUFFER_CHECK */
261
262 /*
263 * increment the read pointer based on read_ptr_incr_val
264 * which can vary from 1 to 10
265 */
266
267 /* get current read pointer pos */
268 read_ptr_pos = ring_buff_p->read_ptr_pos[read_ptr_id];
269
270 ring_buff_p->read_ptr_pos[read_ptr_id] =
271 (read_ptr_pos + 1) % ring_buff_p->num_buffs;
272
273 #if defined(_ADDL_RING_BUFF_CHECK)
274 if ((read_ptr_pos == 0) && (ring_buff_p->write_ptr_pos == 0)) {
275 return;
276 }
277
278 if (read_ptr_pos < ring_buff_p->write_ptr_pos) {
279
280 /* calculate new read pointer position */
281 if ((read_ptr_pos + ring_buff_p->read_ptr_incr_val) <
282 ring_buff_p->write_ptr_pos) {
283
284 /* there is still some valid frame data */
285 ring_buff_p->read_ptr_pos[read_ptr_id] =
286 (read_ptr_pos +
287 ring_buff_p->read_ptr_incr_val) %
288 ring_buff_p->num_buffs;
289 } else {
290 /*
291 * we have skipped beyond available frame
292 * data, so the buffer is empty
293 */
294 ring_buff_p->read_ptr_pos[read_ptr_id] =
295 ring_buff_p->write_ptr_pos;
296 }
297 } else {
298 /*
299 * since read pointer is ahead of write pointer,
300 * it becomes easier to check for new read
301 * pointer position if we pretend that our data
302 * buffer is linear instead of circular
303 */
304
305 lrp = read_ptr_pos + ring_buff_p->read_ptr_incr_val;
306 lwp = ring_buff_p->num_buffs +
307 ring_buff_p->write_ptr_pos;
308
309 if (lrp < lwp) {
310 /* there is still some valid frame data */
311 ring_buff_p->read_ptr_pos[read_ptr_id] =
312 (read_ptr_pos +
313 ring_buff_p->read_ptr_incr_val) %
314 ring_buff_p->num_buffs;
315 } else {
316 /*
317 * we have skipped beyond available
318 * frame data, so the buffer is empty
319 */
320 ring_buff_p->read_ptr_pos[read_ptr_id] =
321 ring_buff_p->write_ptr_pos;
322 }
323 }
324 #endif /* _ADDL_RING_BUFF_CHECK */
325 }
326
327
328 /*
329 * ring_buff_write_ptr_pos_get
330 */
331 size_t
ring_buff_write_ptr_pos_get(ring_buff_t * ring_buff_p)332 ring_buff_write_ptr_pos_get(ring_buff_t *ring_buff_p)
333 {
334 return (ring_buff_p->write_ptr_pos);
335 }
336
337
338 /*
339 * ring_buff_write_ptr_incr
340 */
341 void
ring_buff_write_ptr_incr(ring_buff_t * ring_buff_p)342 ring_buff_write_ptr_incr(ring_buff_t *ring_buff_p)
343 {
344 size_t write_ptr_pos;
345
346 write_ptr_pos = ring_buff_p->write_ptr_pos;
347
348 ring_buff_p->write_ptr_pos =
349 ((write_ptr_pos + 1) % ring_buff_p->num_buffs);
350 }
351