1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 */
4
5 /* USX2Y "rawusb" aka hwdep_pcm implementation
6
7 Its usb's unableness to atomically handle power of 2 period sized data chuncs
8 at standard samplerates,
9 what led to this part of the usx2y module:
10 It provides the alsa kernel half of the usx2y-alsa-jack driver pair.
11 The pair uses a hardware dependent alsa-device for mmaped pcm transport.
12 Advantage achieved:
13 The usb_hc moves pcm data from/into memory via DMA.
14 That memory is mmaped by jack's usx2y driver.
15 Jack's usx2y driver is the first/last to read/write pcm data.
16 Read/write is a combination of power of 2 period shaping and
17 float/int conversation.
18 Compared to mainline alsa/jack we leave out power of 2 period shaping inside
19 snd-usb-usx2y which needs memcpy() and additional buffers.
20 As a side effect possible unwanted pcm-data coruption resulting of
21 standard alsa's snd-usb-usx2y period shaping scheme falls away.
22 Result is sane jack operation at buffering schemes down to 128frames,
23 2 periods.
24 plain usx2y alsa mode is able to achieve 64frames, 4periods, but only at the
25 cost of easier triggered i.e. aeolus xruns (128 or 256frames,
26 2periods works but is useless cause of crackling).
27
28 This is a first "proof of concept" implementation.
29 Later, functionalities should migrate to more appropriate places:
30 Userland:
31 - The jackd could mmap its float-pcm buffers directly from alsa-lib.
32 - alsa-lib could provide power of 2 period sized shaping combined with int/float
33 conversation.
34 Currently the usx2y jack driver provides above 2 services.
35 Kernel:
36 - rawusb dma pcm buffer transport should go to snd-usb-lib, so also snd-usb-audio
37 devices can use it.
38 Currently rawusb dma pcm buffer transport (this file) is only available to snd-usb-usx2y.
39 */
40
41 #include <linux/delay.h>
42 #include <linux/gfp.h>
43 #include "usbusx2yaudio.c"
44
45 #if defined(USX2Y_NRPACKS_VARIABLE) || USX2Y_NRPACKS == 1
46
47 #include <sound/hwdep.h>
48
usx2y_usbpcm_urb_capt_retire(struct snd_usx2y_substream * subs)49 static int usx2y_usbpcm_urb_capt_retire(struct snd_usx2y_substream *subs)
50 {
51 struct urb *urb = subs->completed_urb;
52 struct snd_pcm_runtime *runtime = subs->pcm_substream->runtime;
53 int i, lens = 0, hwptr_done = subs->hwptr_done;
54 struct usx2ydev *usx2y = subs->usx2y;
55 int head;
56
57 if (usx2y->hwdep_pcm_shm->capture_iso_start < 0) { //FIXME
58 head = usx2y->hwdep_pcm_shm->captured_iso_head + 1;
59 if (head >= ARRAY_SIZE(usx2y->hwdep_pcm_shm->captured_iso))
60 head = 0;
61 usx2y->hwdep_pcm_shm->capture_iso_start = head;
62 dev_dbg(&usx2y->dev->dev, "cap start %i\n", head);
63 }
64 for (i = 0; i < nr_of_packs(); i++) {
65 if (urb->iso_frame_desc[i].status) { /* active? hmm, skip this */
66 dev_err(&usx2y->dev->dev,
67 "active frame status %i. Most probably some hardware problem.\n",
68 urb->iso_frame_desc[i].status);
69 return urb->iso_frame_desc[i].status;
70 }
71 lens += urb->iso_frame_desc[i].actual_length / usx2y->stride;
72 }
73 hwptr_done += lens;
74 if (hwptr_done >= runtime->buffer_size)
75 hwptr_done -= runtime->buffer_size;
76 subs->hwptr_done = hwptr_done;
77 subs->transfer_done += lens;
78 /* update the pointer, call callback if necessary */
79 if (subs->transfer_done >= runtime->period_size) {
80 subs->transfer_done -= runtime->period_size;
81 snd_pcm_period_elapsed(subs->pcm_substream);
82 }
83 return 0;
84 }
85
usx2y_iso_frames_per_buffer(struct snd_pcm_runtime * runtime,struct usx2ydev * usx2y)86 static int usx2y_iso_frames_per_buffer(struct snd_pcm_runtime *runtime,
87 struct usx2ydev *usx2y)
88 {
89 return (runtime->buffer_size * 1000) / usx2y->rate + 1; //FIXME: so far only correct period_size == 2^x ?
90 }
91
92 /*
93 * prepare urb for playback data pipe
94 *
95 * we copy the data directly from the pcm buffer.
96 * the current position to be copied is held in hwptr field.
97 * since a urb can handle only a single linear buffer, if the total
98 * transferred area overflows the buffer boundary, we cannot send
99 * it directly from the buffer. thus the data is once copied to
100 * a temporary buffer and urb points to that.
101 */
usx2y_hwdep_urb_play_prepare(struct snd_usx2y_substream * subs,struct urb * urb)102 static int usx2y_hwdep_urb_play_prepare(struct snd_usx2y_substream *subs,
103 struct urb *urb)
104 {
105 int count, counts, pack;
106 struct usx2ydev *usx2y = subs->usx2y;
107 struct snd_usx2y_hwdep_pcm_shm *shm = usx2y->hwdep_pcm_shm;
108 struct snd_pcm_runtime *runtime = subs->pcm_substream->runtime;
109
110 if (shm->playback_iso_start < 0) {
111 shm->playback_iso_start = shm->captured_iso_head -
112 usx2y_iso_frames_per_buffer(runtime, usx2y);
113 if (shm->playback_iso_start < 0)
114 shm->playback_iso_start += ARRAY_SIZE(shm->captured_iso);
115 shm->playback_iso_head = shm->playback_iso_start;
116 }
117
118 count = 0;
119 for (pack = 0; pack < nr_of_packs(); pack++) {
120 /* calculate the size of a packet */
121 counts = shm->captured_iso[shm->playback_iso_head].length / usx2y->stride;
122 if (counts < 43 || counts > 50) {
123 dev_err(&usx2y->dev->dev, "should not be here with counts=%i\n", counts);
124 return -EPIPE;
125 }
126 /* set up descriptor */
127 urb->iso_frame_desc[pack].offset = shm->captured_iso[shm->playback_iso_head].offset;
128 urb->iso_frame_desc[pack].length = shm->captured_iso[shm->playback_iso_head].length;
129 if (atomic_read(&subs->state) != STATE_RUNNING)
130 memset((char *)urb->transfer_buffer + urb->iso_frame_desc[pack].offset, 0,
131 urb->iso_frame_desc[pack].length);
132 if (++shm->playback_iso_head >= ARRAY_SIZE(shm->captured_iso))
133 shm->playback_iso_head = 0;
134 count += counts;
135 }
136 urb->transfer_buffer_length = count * usx2y->stride;
137 return 0;
138 }
139
usx2y_usbpcm_urb_capt_iso_advance(struct snd_usx2y_substream * subs,struct urb * urb)140 static void usx2y_usbpcm_urb_capt_iso_advance(struct snd_usx2y_substream *subs,
141 struct urb *urb)
142 {
143 struct usb_iso_packet_descriptor *desc;
144 struct snd_usx2y_hwdep_pcm_shm *shm;
145 int pack, head;
146
147 for (pack = 0; pack < nr_of_packs(); ++pack) {
148 desc = urb->iso_frame_desc + pack;
149 if (subs) {
150 shm = subs->usx2y->hwdep_pcm_shm;
151 head = shm->captured_iso_head + 1;
152 if (head >= ARRAY_SIZE(shm->captured_iso))
153 head = 0;
154 shm->captured_iso[head].frame = urb->start_frame + pack;
155 shm->captured_iso[head].offset = desc->offset;
156 shm->captured_iso[head].length = desc->actual_length;
157 shm->captured_iso_head = head;
158 shm->captured_iso_frames++;
159 }
160 desc->offset += desc->length * NRURBS * nr_of_packs();
161 if (desc->offset + desc->length >= SSS)
162 desc->offset -= (SSS - desc->length);
163 }
164 }
165
usx2y_usbpcm_usbframe_complete(struct snd_usx2y_substream * capsubs,struct snd_usx2y_substream * capsubs2,struct snd_usx2y_substream * playbacksubs,int frame)166 static int usx2y_usbpcm_usbframe_complete(struct snd_usx2y_substream *capsubs,
167 struct snd_usx2y_substream *capsubs2,
168 struct snd_usx2y_substream *playbacksubs,
169 int frame)
170 {
171 int err, state;
172 struct urb *urb = playbacksubs->completed_urb;
173
174 state = atomic_read(&playbacksubs->state);
175 if (urb) {
176 if (state == STATE_RUNNING)
177 usx2y_urb_play_retire(playbacksubs, urb);
178 else if (state >= STATE_PRERUNNING)
179 atomic_inc(&playbacksubs->state);
180 } else {
181 switch (state) {
182 case STATE_STARTING1:
183 urb = playbacksubs->urb[0];
184 atomic_inc(&playbacksubs->state);
185 break;
186 case STATE_STARTING2:
187 urb = playbacksubs->urb[1];
188 atomic_inc(&playbacksubs->state);
189 break;
190 }
191 }
192 if (urb) {
193 err = usx2y_hwdep_urb_play_prepare(playbacksubs, urb);
194 if (err)
195 return err;
196 err = usx2y_hwdep_urb_play_prepare(playbacksubs, urb);
197 if (err)
198 return err;
199 }
200
201 playbacksubs->completed_urb = NULL;
202
203 state = atomic_read(&capsubs->state);
204 if (state >= STATE_PREPARED) {
205 if (state == STATE_RUNNING) {
206 err = usx2y_usbpcm_urb_capt_retire(capsubs);
207 if (err)
208 return err;
209 } else if (state >= STATE_PRERUNNING) {
210 atomic_inc(&capsubs->state);
211 }
212 usx2y_usbpcm_urb_capt_iso_advance(capsubs, capsubs->completed_urb);
213 if (capsubs2)
214 usx2y_usbpcm_urb_capt_iso_advance(NULL, capsubs2->completed_urb);
215 err = usx2y_urb_submit(capsubs, capsubs->completed_urb, frame);
216 if (err)
217 return err;
218 if (capsubs2) {
219 err = usx2y_urb_submit(capsubs2, capsubs2->completed_urb, frame);
220 if (err)
221 return err;
222 }
223 }
224 capsubs->completed_urb = NULL;
225 if (capsubs2)
226 capsubs2->completed_urb = NULL;
227 return 0;
228 }
229
i_usx2y_usbpcm_urb_complete(struct urb * urb)230 static void i_usx2y_usbpcm_urb_complete(struct urb *urb)
231 {
232 struct snd_usx2y_substream *subs = urb->context;
233 struct usx2ydev *usx2y = subs->usx2y;
234 struct snd_usx2y_substream *capsubs, *capsubs2, *playbacksubs;
235
236 if (unlikely(atomic_read(&subs->state) < STATE_PREPARED)) {
237 dev_dbg(&usx2y->dev->dev,
238 "hcd_frame=%i ep=%i%s status=%i start_frame=%i\n",
239 usb_get_current_frame_number(usx2y->dev),
240 subs->endpoint, usb_pipein(urb->pipe) ? "in" : "out",
241 urb->status, urb->start_frame);
242 return;
243 }
244 if (unlikely(urb->status)) {
245 usx2y_error_urb_status(usx2y, subs, urb);
246 return;
247 }
248
249 subs->completed_urb = urb;
250 capsubs = usx2y->subs[SNDRV_PCM_STREAM_CAPTURE];
251 capsubs2 = usx2y->subs[SNDRV_PCM_STREAM_CAPTURE + 2];
252 playbacksubs = usx2y->subs[SNDRV_PCM_STREAM_PLAYBACK];
253 if (capsubs->completed_urb && atomic_read(&capsubs->state) >= STATE_PREPARED &&
254 (!capsubs2 || capsubs2->completed_urb) &&
255 (playbacksubs->completed_urb || atomic_read(&playbacksubs->state) < STATE_PREPARED)) {
256 if (!usx2y_usbpcm_usbframe_complete(capsubs, capsubs2, playbacksubs, urb->start_frame)) {
257 usx2y->wait_iso_frame += nr_of_packs();
258 } else {
259 usx2y_clients_stop(usx2y);
260 }
261 }
262 }
263
usx2y_hwdep_urb_release(struct urb ** urb)264 static void usx2y_hwdep_urb_release(struct urb **urb)
265 {
266 usb_kill_urb(*urb);
267 usb_free_urb(*urb);
268 *urb = NULL;
269 }
270
271 /*
272 * release a substream
273 */
usx2y_usbpcm_urbs_release(struct snd_usx2y_substream * subs)274 static void usx2y_usbpcm_urbs_release(struct snd_usx2y_substream *subs)
275 {
276 int i;
277
278 dev_dbg(&subs->usx2y->dev->dev,
279 "snd_usx2y_urbs_release() %i\n", subs->endpoint);
280 for (i = 0; i < NRURBS; i++)
281 usx2y_hwdep_urb_release(subs->urb + i);
282 }
283
usx2y_usbpcm_subs_startup_finish(struct usx2ydev * usx2y)284 static void usx2y_usbpcm_subs_startup_finish(struct usx2ydev *usx2y)
285 {
286 usx2y_urbs_set_complete(usx2y, i_usx2y_usbpcm_urb_complete);
287 usx2y->prepare_subs = NULL;
288 }
289
i_usx2y_usbpcm_subs_startup(struct urb * urb)290 static void i_usx2y_usbpcm_subs_startup(struct urb *urb)
291 {
292 struct snd_usx2y_substream *subs = urb->context;
293 struct usx2ydev *usx2y = subs->usx2y;
294 struct snd_usx2y_substream *prepare_subs = usx2y->prepare_subs;
295 struct snd_usx2y_substream *cap_subs2;
296
297 if (prepare_subs &&
298 urb->start_frame == prepare_subs->urb[0]->start_frame) {
299 atomic_inc(&prepare_subs->state);
300 if (prepare_subs == usx2y->subs[SNDRV_PCM_STREAM_CAPTURE]) {
301 cap_subs2 = usx2y->subs[SNDRV_PCM_STREAM_CAPTURE + 2];
302 if (cap_subs2)
303 atomic_inc(&cap_subs2->state);
304 }
305 usx2y_usbpcm_subs_startup_finish(usx2y);
306 wake_up(&usx2y->prepare_wait_queue);
307 }
308
309 i_usx2y_usbpcm_urb_complete(urb);
310 }
311
312 /*
313 * initialize a substream's urbs
314 */
usx2y_usbpcm_urbs_allocate(struct snd_usx2y_substream * subs)315 static int usx2y_usbpcm_urbs_allocate(struct snd_usx2y_substream *subs)
316 {
317 int i;
318 unsigned int pipe;
319 int is_playback = subs == subs->usx2y->subs[SNDRV_PCM_STREAM_PLAYBACK];
320 struct usb_device *dev = subs->usx2y->dev;
321 struct urb **purb;
322
323 pipe = is_playback ? usb_sndisocpipe(dev, subs->endpoint) :
324 usb_rcvisocpipe(dev, subs->endpoint);
325 subs->maxpacksize = usb_maxpacket(dev, pipe);
326 if (!subs->maxpacksize)
327 return -EINVAL;
328
329 /* allocate and initialize data urbs */
330 for (i = 0; i < NRURBS; i++) {
331 purb = subs->urb + i;
332 if (*purb) {
333 usb_kill_urb(*purb);
334 continue;
335 }
336 *purb = usb_alloc_urb(nr_of_packs(), GFP_KERNEL);
337 if (!*purb) {
338 usx2y_usbpcm_urbs_release(subs);
339 return -ENOMEM;
340 }
341 (*purb)->transfer_buffer = is_playback ?
342 subs->usx2y->hwdep_pcm_shm->playback : (
343 subs->endpoint == 0x8 ?
344 subs->usx2y->hwdep_pcm_shm->capture0x8 :
345 subs->usx2y->hwdep_pcm_shm->capture0xA);
346
347 (*purb)->dev = dev;
348 (*purb)->pipe = pipe;
349 (*purb)->number_of_packets = nr_of_packs();
350 (*purb)->context = subs;
351 (*purb)->interval = 1;
352 (*purb)->complete = i_usx2y_usbpcm_subs_startup;
353 }
354 return 0;
355 }
356
357 /*
358 * free the buffer
359 */
snd_usx2y_usbpcm_hw_free(struct snd_pcm_substream * substream)360 static int snd_usx2y_usbpcm_hw_free(struct snd_pcm_substream *substream)
361 {
362 struct snd_pcm_runtime *runtime = substream->runtime;
363 struct snd_usx2y_substream *subs = runtime->private_data;
364 struct snd_usx2y_substream *cap_subs;
365 struct snd_usx2y_substream *playback_subs;
366 struct snd_usx2y_substream *cap_subs2;
367
368 mutex_lock(&subs->usx2y->pcm_mutex);
369 dev_dbg(&subs->usx2y->dev->dev, "%s(%p)\n", __func__, substream);
370
371 cap_subs2 = subs->usx2y->subs[SNDRV_PCM_STREAM_CAPTURE + 2];
372 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
373 cap_subs = subs->usx2y->subs[SNDRV_PCM_STREAM_CAPTURE];
374 atomic_set(&subs->state, STATE_STOPPED);
375 usx2y_usbpcm_urbs_release(subs);
376 if (!cap_subs->pcm_substream ||
377 !cap_subs->pcm_substream->runtime ||
378 cap_subs->pcm_substream->runtime->state < SNDRV_PCM_STATE_PREPARED) {
379 atomic_set(&cap_subs->state, STATE_STOPPED);
380 if (cap_subs2)
381 atomic_set(&cap_subs2->state, STATE_STOPPED);
382 usx2y_usbpcm_urbs_release(cap_subs);
383 if (cap_subs2)
384 usx2y_usbpcm_urbs_release(cap_subs2);
385 }
386 } else {
387 playback_subs = subs->usx2y->subs[SNDRV_PCM_STREAM_PLAYBACK];
388 if (atomic_read(&playback_subs->state) < STATE_PREPARED) {
389 atomic_set(&subs->state, STATE_STOPPED);
390 if (cap_subs2)
391 atomic_set(&cap_subs2->state, STATE_STOPPED);
392 usx2y_usbpcm_urbs_release(subs);
393 if (cap_subs2)
394 usx2y_usbpcm_urbs_release(cap_subs2);
395 }
396 }
397 mutex_unlock(&subs->usx2y->pcm_mutex);
398 return 0;
399 }
400
usx2y_usbpcm_subs_startup(struct snd_usx2y_substream * subs)401 static void usx2y_usbpcm_subs_startup(struct snd_usx2y_substream *subs)
402 {
403 struct usx2ydev *usx2y = subs->usx2y;
404
405 usx2y->prepare_subs = subs;
406 subs->urb[0]->start_frame = -1;
407 smp_wmb(); // Make sure above modifications are seen by i_usx2y_subs_startup()
408 usx2y_urbs_set_complete(usx2y, i_usx2y_usbpcm_subs_startup);
409 }
410
usx2y_usbpcm_urbs_start(struct snd_usx2y_substream * subs)411 static int usx2y_usbpcm_urbs_start(struct snd_usx2y_substream *subs)
412 {
413 int p, u, err, stream = subs->pcm_substream->stream;
414 struct usx2ydev *usx2y = subs->usx2y;
415 struct urb *urb;
416 unsigned long pack;
417
418 if (stream == SNDRV_PCM_STREAM_CAPTURE) {
419 usx2y->hwdep_pcm_shm->captured_iso_head = -1;
420 usx2y->hwdep_pcm_shm->captured_iso_frames = 0;
421 }
422
423 for (p = 0; 3 >= (stream + p); p += 2) {
424 struct snd_usx2y_substream *subs = usx2y->subs[stream + p];
425 if (subs) {
426 err = usx2y_usbpcm_urbs_allocate(subs);
427 if (err < 0)
428 return err;
429 subs->completed_urb = NULL;
430 }
431 }
432
433 for (p = 0; p < 4; p++) {
434 struct snd_usx2y_substream *subs = usx2y->subs[p];
435
436 if (subs && atomic_read(&subs->state) >= STATE_PREPARED)
437 goto start;
438 }
439
440 start:
441 usx2y_usbpcm_subs_startup(subs);
442 for (u = 0; u < NRURBS; u++) {
443 for (p = 0; 3 >= (stream + p); p += 2) {
444 struct snd_usx2y_substream *subs = usx2y->subs[stream + p];
445
446 if (!subs)
447 continue;
448 urb = subs->urb[u];
449 if (usb_pipein(urb->pipe)) {
450 if (!u)
451 atomic_set(&subs->state, STATE_STARTING3);
452 urb->dev = usx2y->dev;
453 for (pack = 0; pack < nr_of_packs(); pack++) {
454 urb->iso_frame_desc[pack].offset = subs->maxpacksize * (pack + u * nr_of_packs());
455 urb->iso_frame_desc[pack].length = subs->maxpacksize;
456 }
457 urb->transfer_buffer_length = subs->maxpacksize * nr_of_packs();
458 err = usb_submit_urb(urb, GFP_KERNEL);
459 if (err < 0) {
460 dev_err(&urb->dev->dev,
461 "cannot usb_submit_urb() for urb %d, err = %d\n",
462 u, err);
463 err = -EPIPE;
464 goto cleanup;
465 } else {
466 if (!u)
467 usx2y->wait_iso_frame = urb->start_frame;
468 }
469 urb->transfer_flags = 0;
470 } else {
471 atomic_set(&subs->state, STATE_STARTING1);
472 break;
473 }
474 }
475 }
476 err = 0;
477 wait_event(usx2y->prepare_wait_queue, !usx2y->prepare_subs);
478 if (atomic_read(&subs->state) != STATE_PREPARED)
479 err = -EPIPE;
480
481 cleanup:
482 if (err) {
483 usx2y_subs_startup_finish(usx2y); // Call it now
484 usx2y_clients_stop(usx2y); // something is completely wrong > stop everything
485 }
486 return err;
487 }
488
489 #define USX2Y_HWDEP_PCM_PAGES \
490 PAGE_ALIGN(sizeof(struct snd_usx2y_hwdep_pcm_shm))
491
492 /*
493 * prepare callback
494 *
495 * set format and initialize urbs
496 */
snd_usx2y_usbpcm_prepare(struct snd_pcm_substream * substream)497 static int snd_usx2y_usbpcm_prepare(struct snd_pcm_substream *substream)
498 {
499 struct snd_pcm_runtime *runtime = substream->runtime;
500 struct snd_usx2y_substream *subs = runtime->private_data;
501 struct usx2ydev *usx2y = subs->usx2y;
502 struct snd_usx2y_substream *capsubs = subs->usx2y->subs[SNDRV_PCM_STREAM_CAPTURE];
503 int err = 0;
504
505 dev_dbg(&usx2y->dev->dev, "snd_usx2y_pcm_prepare(%p)\n", substream);
506
507 mutex_lock(&usx2y->pcm_mutex);
508
509 if (!usx2y->hwdep_pcm_shm) {
510 usx2y->hwdep_pcm_shm = alloc_pages_exact(USX2Y_HWDEP_PCM_PAGES,
511 GFP_KERNEL);
512 if (!usx2y->hwdep_pcm_shm) {
513 err = -ENOMEM;
514 goto up_prepare_mutex;
515 }
516 memset(usx2y->hwdep_pcm_shm, 0, USX2Y_HWDEP_PCM_PAGES);
517 }
518
519 usx2y_subs_prepare(subs);
520 // Start hardware streams
521 // SyncStream first....
522 if (atomic_read(&capsubs->state) < STATE_PREPARED) {
523 if (usx2y->format != runtime->format) {
524 err = usx2y_format_set(usx2y, runtime->format);
525 if (err < 0)
526 goto up_prepare_mutex;
527 }
528 if (usx2y->rate != runtime->rate) {
529 err = usx2y_rate_set(usx2y, runtime->rate);
530 if (err < 0)
531 goto up_prepare_mutex;
532 }
533 dev_dbg(&usx2y->dev->dev,
534 "starting capture pipe for %s\n", subs == capsubs ?
535 "self" : "playpipe");
536 err = usx2y_usbpcm_urbs_start(capsubs);
537 if (err < 0)
538 goto up_prepare_mutex;
539 }
540
541 if (subs != capsubs) {
542 usx2y->hwdep_pcm_shm->playback_iso_start = -1;
543 if (atomic_read(&subs->state) < STATE_PREPARED) {
544 while (usx2y_iso_frames_per_buffer(runtime, usx2y) >
545 usx2y->hwdep_pcm_shm->captured_iso_frames) {
546 dev_dbg(&usx2y->dev->dev,
547 "Wait: iso_frames_per_buffer=%i,captured_iso_frames=%i\n",
548 usx2y_iso_frames_per_buffer(runtime, usx2y),
549 usx2y->hwdep_pcm_shm->captured_iso_frames);
550 if (msleep_interruptible(10)) {
551 err = -ERESTARTSYS;
552 goto up_prepare_mutex;
553 }
554 }
555 err = usx2y_usbpcm_urbs_start(subs);
556 if (err < 0)
557 goto up_prepare_mutex;
558 }
559 dev_dbg(&usx2y->dev->dev,
560 "Ready: iso_frames_per_buffer=%i,captured_iso_frames=%i\n",
561 usx2y_iso_frames_per_buffer(runtime, usx2y),
562 usx2y->hwdep_pcm_shm->captured_iso_frames);
563 } else {
564 usx2y->hwdep_pcm_shm->capture_iso_start = -1;
565 }
566
567 up_prepare_mutex:
568 mutex_unlock(&usx2y->pcm_mutex);
569 return err;
570 }
571
572 static const struct snd_pcm_hardware snd_usx2y_4c = {
573 .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
574 SNDRV_PCM_INFO_BLOCK_TRANSFER |
575 SNDRV_PCM_INFO_MMAP_VALID),
576 .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_3LE,
577 .rates = SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000,
578 .rate_min = 44100,
579 .rate_max = 48000,
580 .channels_min = 2,
581 .channels_max = 4,
582 .buffer_bytes_max = (2*128*1024),
583 .period_bytes_min = 64,
584 .period_bytes_max = (128*1024),
585 .periods_min = 2,
586 .periods_max = 1024,
587 .fifo_size = 0
588 };
589
snd_usx2y_usbpcm_open(struct snd_pcm_substream * substream)590 static int snd_usx2y_usbpcm_open(struct snd_pcm_substream *substream)
591 {
592 struct snd_usx2y_substream *subs =
593 ((struct snd_usx2y_substream **)
594 snd_pcm_substream_chip(substream))[substream->stream];
595 struct snd_pcm_runtime *runtime = substream->runtime;
596
597 if (!(subs->usx2y->chip_status & USX2Y_STAT_CHIP_MMAP_PCM_URBS))
598 return -EBUSY;
599
600 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
601 runtime->hw = snd_usx2y_2c;
602 else
603 runtime->hw = (subs->usx2y->subs[3] ? snd_usx2y_4c : snd_usx2y_2c);
604 runtime->private_data = subs;
605 subs->pcm_substream = substream;
606 snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_TIME, 1000, 200000);
607 return 0;
608 }
609
snd_usx2y_usbpcm_close(struct snd_pcm_substream * substream)610 static int snd_usx2y_usbpcm_close(struct snd_pcm_substream *substream)
611 {
612 struct snd_pcm_runtime *runtime = substream->runtime;
613 struct snd_usx2y_substream *subs = runtime->private_data;
614
615 subs->pcm_substream = NULL;
616 return 0;
617 }
618
619 static const struct snd_pcm_ops snd_usx2y_usbpcm_ops = {
620 .open = snd_usx2y_usbpcm_open,
621 .close = snd_usx2y_usbpcm_close,
622 .hw_params = snd_usx2y_pcm_hw_params,
623 .hw_free = snd_usx2y_usbpcm_hw_free,
624 .prepare = snd_usx2y_usbpcm_prepare,
625 .trigger = snd_usx2y_pcm_trigger,
626 .pointer = snd_usx2y_pcm_pointer,
627 };
628
usx2y_pcms_busy_check(struct snd_card * card)629 static int usx2y_pcms_busy_check(struct snd_card *card)
630 {
631 struct usx2ydev *dev = usx2y(card);
632 struct snd_usx2y_substream *subs;
633 int i;
634
635 for (i = 0; i < dev->pcm_devs * 2; i++) {
636 subs = dev->subs[i];
637 if (subs && subs->pcm_substream &&
638 SUBSTREAM_BUSY(subs->pcm_substream))
639 return -EBUSY;
640 }
641 return 0;
642 }
643
snd_usx2y_hwdep_pcm_open(struct snd_hwdep * hw,struct file * file)644 static int snd_usx2y_hwdep_pcm_open(struct snd_hwdep *hw, struct file *file)
645 {
646 struct snd_card *card = hw->card;
647 int err;
648
649 mutex_lock(&usx2y(card)->pcm_mutex);
650 err = usx2y_pcms_busy_check(card);
651 if (!err)
652 usx2y(card)->chip_status |= USX2Y_STAT_CHIP_MMAP_PCM_URBS;
653 mutex_unlock(&usx2y(card)->pcm_mutex);
654 return err;
655 }
656
snd_usx2y_hwdep_pcm_release(struct snd_hwdep * hw,struct file * file)657 static int snd_usx2y_hwdep_pcm_release(struct snd_hwdep *hw, struct file *file)
658 {
659 struct snd_card *card = hw->card;
660 int err;
661
662 mutex_lock(&usx2y(card)->pcm_mutex);
663 err = usx2y_pcms_busy_check(card);
664 if (!err)
665 usx2y(hw->card)->chip_status &= ~USX2Y_STAT_CHIP_MMAP_PCM_URBS;
666 mutex_unlock(&usx2y(card)->pcm_mutex);
667 return err;
668 }
669
snd_usx2y_hwdep_pcm_vm_open(struct vm_area_struct * area)670 static void snd_usx2y_hwdep_pcm_vm_open(struct vm_area_struct *area)
671 {
672 }
673
snd_usx2y_hwdep_pcm_vm_close(struct vm_area_struct * area)674 static void snd_usx2y_hwdep_pcm_vm_close(struct vm_area_struct *area)
675 {
676 }
677
snd_usx2y_hwdep_pcm_vm_fault(struct vm_fault * vmf)678 static vm_fault_t snd_usx2y_hwdep_pcm_vm_fault(struct vm_fault *vmf)
679 {
680 unsigned long offset;
681 void *vaddr;
682
683 offset = vmf->pgoff << PAGE_SHIFT;
684 vaddr = (char *)((struct usx2ydev *)vmf->vma->vm_private_data)->hwdep_pcm_shm + offset;
685 vmf->page = virt_to_page(vaddr);
686 get_page(vmf->page);
687 return 0;
688 }
689
690 static const struct vm_operations_struct snd_usx2y_hwdep_pcm_vm_ops = {
691 .open = snd_usx2y_hwdep_pcm_vm_open,
692 .close = snd_usx2y_hwdep_pcm_vm_close,
693 .fault = snd_usx2y_hwdep_pcm_vm_fault,
694 };
695
snd_usx2y_hwdep_pcm_mmap(struct snd_hwdep * hw,struct file * filp,struct vm_area_struct * area)696 static int snd_usx2y_hwdep_pcm_mmap(struct snd_hwdep *hw, struct file *filp, struct vm_area_struct *area)
697 {
698 unsigned long size = (unsigned long)(area->vm_end - area->vm_start);
699 struct usx2ydev *usx2y = hw->private_data;
700
701 if (!(usx2y->chip_status & USX2Y_STAT_CHIP_INIT))
702 return -EBUSY;
703
704 /* if userspace tries to mmap beyond end of our buffer, fail */
705 if (size > USX2Y_HWDEP_PCM_PAGES) {
706 dev_dbg(hw->card->dev, "%s: %lu > %lu\n", __func__,
707 size, (unsigned long)USX2Y_HWDEP_PCM_PAGES);
708 return -EINVAL;
709 }
710
711 if (!usx2y->hwdep_pcm_shm)
712 return -ENODEV;
713
714 area->vm_ops = &snd_usx2y_hwdep_pcm_vm_ops;
715 vm_flags_set(area, VM_DONTEXPAND | VM_DONTDUMP);
716 area->vm_private_data = hw->private_data;
717 return 0;
718 }
719
snd_usx2y_hwdep_pcm_private_free(struct snd_hwdep * hwdep)720 static void snd_usx2y_hwdep_pcm_private_free(struct snd_hwdep *hwdep)
721 {
722 struct usx2ydev *usx2y = hwdep->private_data;
723
724 if (usx2y->hwdep_pcm_shm)
725 free_pages_exact(usx2y->hwdep_pcm_shm, USX2Y_HWDEP_PCM_PAGES);
726 }
727
usx2y_hwdep_pcm_new(struct snd_card * card)728 int usx2y_hwdep_pcm_new(struct snd_card *card)
729 {
730 int err;
731 struct snd_hwdep *hw;
732 struct snd_pcm *pcm;
733 struct usb_device *dev = usx2y(card)->dev;
734
735 if (nr_of_packs() != 1)
736 return 0;
737
738 err = snd_hwdep_new(card, SND_USX2Y_USBPCM_ID, 1, &hw);
739 if (err < 0)
740 return err;
741
742 hw->iface = SNDRV_HWDEP_IFACE_USX2Y_PCM;
743 hw->private_data = usx2y(card);
744 hw->private_free = snd_usx2y_hwdep_pcm_private_free;
745 hw->ops.open = snd_usx2y_hwdep_pcm_open;
746 hw->ops.release = snd_usx2y_hwdep_pcm_release;
747 hw->ops.mmap = snd_usx2y_hwdep_pcm_mmap;
748 hw->exclusive = 1;
749 sprintf(hw->name, "/dev/bus/usb/%03d/%03d/hwdeppcm", dev->bus->busnum, dev->devnum);
750
751 err = snd_pcm_new(card, NAME_ALLCAPS" hwdep Audio", 2, 1, 1, &pcm);
752 if (err < 0)
753 return err;
754
755 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_usx2y_usbpcm_ops);
756 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_usx2y_usbpcm_ops);
757
758 pcm->private_data = usx2y(card)->subs;
759 pcm->info_flags = 0;
760
761 sprintf(pcm->name, NAME_ALLCAPS" hwdep Audio");
762 snd_pcm_set_managed_buffer(pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream,
763 SNDRV_DMA_TYPE_CONTINUOUS,
764 NULL,
765 64*1024, 128*1024);
766 snd_pcm_set_managed_buffer(pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream,
767 SNDRV_DMA_TYPE_CONTINUOUS,
768 NULL,
769 64*1024, 128*1024);
770
771 return 0;
772 }
773
774 #else
775
usx2y_hwdep_pcm_new(struct snd_card * card)776 int usx2y_hwdep_pcm_new(struct snd_card *card)
777 {
778 return 0;
779 }
780
781 #endif
782