Lines Matching +full:isoc +full:- +full:in

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Audio and Music Data Transmission Protocol (IEC 61883-6) streams
4 * with Common Isochronous Packet (IEC 61883-1) headers
12 #include <linux/firewire-constants.h>
17 #include "amdtp-stream.h"
27 #include "amdtp-stream-trace.h"
83 * amdtp_stream_init - initialize an AMDTP stream structure
87 * @flags: the details of the streaming protocol consist of cip_flags enumeration-constants.
88 * @fmt: the value of fmt field in CIP header
89 * @process_ctx_payloads: callback handler to process payloads of isoc context
99 return -EINVAL;
101 s->protocol = kzalloc(protocol_size, GFP_KERNEL);
102 if (!s->protocol)
103 return -ENOMEM;
105 s->unit = unit;
106 s->direction = dir;
107 s->flags = flags;
108 s->context = ERR_PTR(-1);
109 mutex_init(&s->mutex);
110 INIT_WORK(&s->period_work, pcm_period_work);
111 s->packet_index = 0;
113 init_waitqueue_head(&s->ready_wait);
115 s->fmt = fmt;
116 s->process_ctx_payloads = process_ctx_payloads;
123 * amdtp_stream_destroy - free stream resources
129 if (s->protocol == NULL)
133 kfree(s->protocol);
134 mutex_destroy(&s->mutex);
163 struct snd_interval *s = hw_param_interval(params, rule->var);
176 return -EINVAL;
178 t.min = roundup(s->min, step);
179 t.max = rounddown(s->max, step);
186 * amdtp_stream_add_pcm_hw_constraints - add hw constraints for PCM substream
193 struct snd_pcm_hardware *hw = &runtime->hw;
198 hw->info = SNDRV_PCM_INFO_BLOCK_TRANSFER |
205 hw->periods_min = 2;
206 hw->periods_max = UINT_MAX;
209 hw->period_bytes_min = 4 * hw->channels_max;
212 hw->period_bytes_max = hw->period_bytes_min * 2048;
213 hw->buffer_bytes_max = hw->period_bytes_max * hw->periods_min;
215 // Linux driver for 1394 OHCI controller voluntarily flushes isoc
217 // PAGE_SIZE. This kicks work for the isoc context and brings
218 // callback in the middle of scheduled interrupts.
219 // Although AMDTP streams in the same domain use the same events per
221 // Here, use the value of context header in IR context is for both
223 if (!(s->flags & CIP_NO_HEADER))
230 // In IEC 61883-6, one isoc packet can transfer events up to the value
231 // of syt interval. This comes from the interval of isoc cycle. As 1394
232 // OHCI controller can generate hardware IRQ per isoc packet, the
234 // However, there are two ways of transmission in IEC 61883-6; blocking
235 // and non-blocking modes. In blocking mode, the sequence of isoc packet
236 // includes 'empty' or 'NODATA' packets which include no event. In
237 // non-blocking mode, the number of events per packet is variable up to
248 /* Non-Blocking stream has no more constraints */
249 if (!(s->flags & CIP_BLOCKING))
253 * One AMDTP packet can include some frames. In blocking mode, the
261 SNDRV_PCM_HW_PARAM_RATE, -1);
267 SNDRV_PCM_HW_PARAM_RATE, -1);
276 * amdtp_stream_set_parameters - set stream parameters
279 * @data_block_quadlets: the size of a data block in quadlet unit
296 return -EINVAL;
298 s->sfc = sfc;
299 s->data_block_quadlets = data_block_quadlets;
300 s->syt_interval = amdtp_syt_intervals[sfc];
302 // default buffering in the device.
303 s->transfer_delay = TRANSFER_DELAY_TICKS - TICKS_PER_CYCLE;
305 // additional buffering needed to adjust for no-data packets.
306 if (s->flags & CIP_BLOCKING)
307 s->transfer_delay += TICKS_PER_SECOND * s->syt_interval / rate;
309 s->pcm_frame_multiplier = pcm_frame_multiplier;
315 // The CIP header is processed in context header apart from context payload.
320 if (s->flags & CIP_JUMBO_PAYLOAD)
325 return s->syt_interval * s->data_block_quadlets * sizeof(__be32) * multiplier;
329 * amdtp_stream_get_max_payload - get the stream's packet size
339 if (!(s->flags & CIP_NO_HEADER))
349 * amdtp_stream_pcm_prepare - prepare PCM device for running
356 cancel_work_sync(&s->period_work);
357 s->pcm_buffer_pointer = 0;
358 s->pcm_period_pointer = 0;
363 list_prev_entry_circular(desc, &s->packet_descs_list, link)
368 const unsigned int syt_interval = s->syt_interval;
374 if (desc->syt_offset != CIP_SYT_NO_INFO)
375 desc->data_blocks = syt_interval;
377 desc->data_blocks = 0;
387 const enum cip_sfc sfc = s->sfc;
388 unsigned int state = s->ctx_data.rx.data_block_state;
396 desc->data_blocks = state;
404 * 2) packets with a rounded-up number of blocks occur as early
405 * as possible in the sequence (to prevent underruns of the
410 desc->data_blocks = 5 + ((phase & 1) ^ (phase == 0 || phase >= 40));
413 desc->data_blocks = 11 * (sfc >> 1) + (phase == 0);
422 s->ctx_data.rx.data_block_state = state;
435 * The time, in ticks, of the n'th SYT_INTERVAL sample is:
439 * formula to the SYT precision results in a sequence of
455 syt_offset = *last_syt_offset - TICKS_PER_CYCLE;
467 const enum cip_sfc sfc = s->sfc;
468 unsigned int last = s->ctx_data.rx.last_syt_offset;
469 unsigned int state = s->ctx_data.rx.syt_offset_state;
475 desc->syt_offset = calculate_syt_offset(&last, &state, sfc);
480 s->ctx_data.rx.last_syt_offset = last;
481 s->ctx_data.rx.syt_offset_state = state;
494 syt_cycle_lo -= cycle_lo;
502 return syt_offset - transfer_delay;
505 // Both of the producer and consumer of the queue runs in the same clock of IEEE 1394 bus.
507 // before filling entries in the queue. The calculation is safe even if it looks fragile by
511 const unsigned int cache_size = s->ctx_data.tx.cache.size;
512 unsigned int cycles = s->ctx_data.tx.cache.pos;
516 cycles -= head;
523 const unsigned int transfer_delay = s->transfer_delay;
524 const unsigned int cache_size = s->ctx_data.tx.cache.size;
525 struct seq_desc *cache = s->ctx_data.tx.cache.descs;
526 unsigned int cache_pos = s->ctx_data.tx.cache.pos;
527 bool aware_syt = !(s->flags & CIP_UNAWARE_SYT);
533 if (aware_syt && src->syt != CIP_SYT_NO_INFO)
534 dst->syt_offset = compute_syt_offset(src->syt, src->cycle, transfer_delay);
536 dst->syt_offset = CIP_SYT_NO_INFO;
537 dst->data_blocks = src->data_blocks;
543 s->ctx_data.tx.cache.pos = cache_pos;
551 if (s->flags & CIP_BLOCKING)
560 struct amdtp_stream *target = s->ctx_data.rx.replay_target;
561 const struct seq_desc *cache = target->ctx_data.tx.cache.descs;
562 const unsigned int cache_size = target->ctx_data.tx.cache.size;
563 unsigned int cache_pos = s->ctx_data.rx.cache_pos;
572 s->ctx_data.rx.cache_pos = cache_pos;
578 struct amdtp_domain *d = s->domain;
582 if (!d->replay.enable || !s->ctx_data.rx.replay_target) {
585 if (!d->replay.on_the_fly) {
588 struct amdtp_stream *tx = s->ctx_data.rx.replay_target;
589 const unsigned int cache_size = tx->ctx_data.tx.cache.size;
590 const unsigned int cache_pos = s->ctx_data.rx.cache_pos;
609 ptr = s->pcm_buffer_pointer + frames;
610 if (ptr >= pcm->runtime->buffer_size)
611 ptr -= pcm->runtime->buffer_size;
612 WRITE_ONCE(s->pcm_buffer_pointer, ptr);
614 s->pcm_period_pointer += frames;
615 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
616 s->pcm_period_pointer -= pcm->runtime->period_size;
618 // The program in user process should periodically check the status of intermediate
619 // buffer associated to PCM substream to process PCM frames in the buffer, instead
631 // v callbacks in snd_pcm_ops
637 if (!pcm->runtime->no_period_wakeup)
638 queue_work(system_highpri_wq, &s->period_work);
646 struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
657 params->interrupt = sched_irq;
658 params->tag = s->tag;
659 params->sy = 0;
661 err = fw_iso_context_queue(s->context, params, &s->buffer.iso_buffer,
662 s->buffer.packets[s->packet_index].offset);
664 dev_err(&s->unit->device, "queueing error: %d\n", err);
668 if (++s->packet_index >= s->queue_size)
669 s->packet_index = 0;
677 params->skip =
678 !!(params->header_length == 0 && params->payload_length == 0);
686 params->header_length = s->ctx_data.tx.ctx_header_size;
687 params->payload_length = s->ctx_data.tx.max_ctx_payload_length;
688 params->skip = false;
695 cip_header[0] = cpu_to_be32(READ_ONCE(s->source_node_id_field) |
696 (s->data_block_quadlets << CIP_DBS_SHIFT) |
697 ((s->sph << CIP_SPH_SHIFT) & CIP_SPH_MASK) |
700 ((s->fmt << CIP_FMT_SHIFT) & CIP_FMT_MASK) |
701 ((s->ctx_data.rx.fdf << CIP_FDF_SHIFT) & CIP_FDF_MASK) |
714 payload_length = data_blocks * sizeof(__be32) * s->data_block_quadlets;
715 params->payload_length = payload_length;
718 cip_header = (__be32 *)params->header;
720 params->header_length = header_length;
726 data_block_counter, s->packet_index, index, curr_cycle_time);
745 * This module supports 'Two-quadlet CIP header with SYT field'.
750 (!(s->flags & CIP_HEADER_WITHOUT_EOH))) {
751 dev_info_ratelimited(&s->unit->device,
754 return -EAGAIN;
760 if (sph != s->sph || fmt != s->fmt) {
761 dev_info_ratelimited(&s->unit->device,
764 return -EAGAIN;
776 dev_err(&s->unit->device,
777 "Detect invalid value in dbs field: %08X\n",
779 return -EPROTO;
781 if (s->flags & CIP_WRONG_DBS)
782 data_block_quadlets = s->data_block_quadlets;
789 if (*data_blocks == 0 && (s->flags & CIP_EMPTY_HAS_WRONG_DBC) &&
793 if ((dbc == 0x00 && (s->flags & CIP_SKIP_DBC_ZERO_CHECK)) ||
796 } else if (!(s->flags & CIP_DBC_IS_END_EVENT)) {
801 if (!(s->flags & CIP_DBC_IS_PAYLOAD_QUADLETS)) {
802 if (*data_blocks > 0 && s->ctx_data.tx.dbc_interval > 0)
803 dbc_interval = s->ctx_data.tx.dbc_interval;
814 dev_err(&s->unit->device,
817 return -EIO;
822 if (!(s->flags & CIP_UNAWARE_SYT))
841 if (!(s->flags & CIP_NO_HEADER))
846 if (payload_length > cip_header_size + s->ctx_data.tx.max_ctx_payload_length) {
847 dev_err(&s->unit->device,
849 payload_length, cip_header_size + s->ctx_data.tx.max_ctx_payload_length);
850 return -EIO;
858 err = check_cip_header(s, cip_header, payload_length - cip_header_size,
870 *data_blocks = payload_length / sizeof(__be32) / s->data_block_quadlets;
883 // In CYCLE_TIMER register of IEEE 1394, 7 bits are used to represent second. On
884 // the other hand, in DMA descriptors of 1394 OHCI, 3 bits are used to represent
901 cycle -= OHCI_SECOND_MODULUS * CYCLES_PER_SECOND;
910 return minuend - subtrahend;
917 else if (lval < rval && rval - lval < OHCI_SECOND_MODULUS * CYCLES_PER_SECOND / 2)
918 return -1;
938 unsigned int next_cycle = s->next_cycle;
939 unsigned int dbc = s->data_block_counter;
940 unsigned int packet_index = s->packet_index;
941 unsigned int queue_size = s->queue_size;
947 (void)fw_card_read_cycle_time(fw_parent_device(s->unit)->card, &curr_cycle_time);
959 if (s->flags & CIP_NO_HEADER) {
960 // Fireface skips transmission just for an isoc cycle corresponding
969 desc->cycle = prev_cycle;
970 desc->syt = 0;
971 desc->data_blocks = 0;
972 desc->data_block_counter = dbc;
973 desc->ctx_payload = NULL;
977 } else if (s->flags & CIP_JUMBO_PAYLOAD) {
978 // OXFW970 skips transmission for several isoc cycles during
986 dev_err(&s->unit->device, "Detect discontinuity of cycle: %d %d\n",
988 return -EIO;
997 desc->cycle = cycle;
998 desc->syt = syt;
999 desc->data_blocks = data_blocks;
1000 desc->data_block_counter = dbc;
1001 desc->ctx_payload = s->buffer.packets[packet_index].buffer;
1003 if (!(s->flags & CIP_DBC_IS_END_EVENT))
1004 dbc = (dbc + desc->data_blocks) & 0xff;
1009 ctx_header += s->ctx_data.tx.ctx_header_size / sizeof(*ctx_header);
1013 s->next_cycle = next_cycle;
1014 s->data_block_counter = dbc;
1033 struct seq_desc *seq_descs = s->ctx_data.rx.seq.descs;
1034 unsigned int seq_size = s->ctx_data.rx.seq.size;
1035 unsigned int seq_pos = s->ctx_data.rx.seq.pos;
1036 unsigned int dbc = s->data_block_counter;
1037 bool aware_syt = !(s->flags & CIP_UNAWARE_SYT);
1043 unsigned int index = (s->packet_index + i) % s->queue_size;
1046 desc->cycle = compute_ohci_it_cycle(*ctx_header, s->queue_size);
1048 if (aware_syt && seq->syt_offset != CIP_SYT_NO_INFO)
1049 desc->syt = compute_syt(seq->syt_offset, desc->cycle, s->transfer_delay);
1051 desc->syt = CIP_SYT_NO_INFO;
1053 desc->data_blocks = seq->data_blocks;
1055 if (s->flags & CIP_DBC_IS_END_EVENT)
1056 dbc = (dbc + desc->data_blocks) & 0xff;
1058 desc->data_block_counter = dbc;
1060 if (!(s->flags & CIP_DBC_IS_END_EVENT))
1061 dbc = (dbc + desc->data_blocks) & 0xff;
1063 desc->ctx_payload = s->buffer.packets[index].buffer;
1071 s->data_block_counter = dbc;
1072 s->ctx_data.rx.seq.pos = seq_pos;
1079 s->packet_index = -1;
1085 if (work && work != &s->period_work)
1087 WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN);
1104 for (i = 0; i < count - 1; ++i)
1106 latest_cycle = desc->cycle;
1108 err = fw_card_read_cycle_time(fw_parent_device(s->unit)->card, &cycle_time);
1116 if (s->direction == AMDTP_IN_STREAM) {
1124 // value expectedly corresponds to a few packets (0-2) since the packet arrived at
1128 data_block_count += desc->data_blocks;
1139 data_block_count += desc->data_blocks;
1144 return data_block_count * s->pcm_frame_multiplier;
1154 pcm = READ_ONCE(s->pcm);
1155 s->process_ctx_payloads(s, desc, count, pcm);
1160 pcm->runtime->delay = compute_pcm_extra_delay(s, desc, count);
1163 data_block_count += desc->data_blocks;
1167 update_pcm_pointers(s, pcm, data_block_count * s->pcm_frame_multiplier);
1175 const struct amdtp_domain *d = s->domain;
1177 const unsigned int events_per_period = d->events_per_period;
1178 unsigned int event_count = s->ctx_data.rx.event_count;
1179 struct pkt_desc *desc = s->packet_descs_cursor;
1186 if (s->packet_index < 0)
1189 // Calculate the number of packets in buffer and check XRUN.
1196 if (!(s->flags & CIP_NO_HEADER))
1201 if (s == d->irq_target) {
1205 struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
1206 need_hw_irq = !pcm || !pcm->runtime->no_period_wakeup;
1212 (void)fw_card_read_cycle_time(fw_parent_device(s->unit)->card, &curr_cycle_time);
1218 build_it_pkt_header(s, desc->cycle, template, pkt_header_length,
1219 desc->data_blocks, desc->data_block_counter,
1220 desc->syt, i, curr_cycle_time);
1222 if (s == s->domain->irq_target) {
1223 event_count += desc->data_blocks;
1225 event_count -= events_per_period;
1238 s->ctx_data.rx.event_count = event_count;
1239 s->packet_descs_cursor = desc;
1246 struct amdtp_domain *d = s->domain;
1252 if (s->packet_index < 0)
1257 cycle = compute_ohci_it_cycle(ctx_header[packets - 1], s->queue_size);
1258 s->next_cycle = increment_ohci_cycle_count(cycle, 1);
1265 bool sched_irq = (s == d->irq_target && i == packets - 1);
1281 struct amdtp_domain *d = s->domain;
1283 const unsigned int queue_size = s->queue_size;
1287 if (s->packet_index < 0)
1296 if (compare_ohci_cycle_count(cycle, d->processing_cycle.rx_start) >= 0)
1310 header_length -= length;
1314 s->ready_processing = true;
1315 wake_up(&s->ready_wait);
1317 if (d->replay.enable)
1318 s->ctx_data.rx.cache_pos = 0;
1324 if (s == d->irq_target)
1325 s->context->callback.sc = irq_target_callback;
1327 s->context->callback.sc = process_rx_packets;
1336 struct pkt_desc *desc = s->packet_descs_cursor;
1342 if (s->packet_index < 0)
1345 // Calculate the number of packets in buffer and check XRUN.
1346 packet_count = header_length / s->ctx_data.tx.ctx_header_size;
1351 if (err != -EAGAIN) {
1356 struct amdtp_domain *d = s->domain;
1360 if (d->replay.enable)
1365 s->packet_descs_cursor = desc;
1387 if (s->packet_index < 0)
1390 packets = header_length / s->ctx_data.tx.ctx_header_size;
1392 ctx_header += (packets - 1) * s->ctx_data.tx.ctx_header_size / sizeof(*ctx_header);
1394 s->next_cycle = increment_ohci_cycle_count(cycle, 1);
1410 struct amdtp_domain *d = s->domain;
1415 if (s->packet_index < 0)
1418 packets = header_length / s->ctx_data.tx.ctx_header_size;
1425 if (compare_ohci_cycle_count(cycle, d->processing_cycle.tx_start) >= 0)
1428 ctx_header += s->ctx_data.tx.ctx_header_size / sizeof(__be32);
1435 size_t length = s->ctx_data.tx.ctx_header_size * offset;
1442 header_length -= length;
1446 s->ready_processing = true;
1447 wake_up(&s->ready_wait);
1453 context->callback.sc = process_tx_packets;
1461 struct amdtp_domain *d = s->domain;
1467 if (s->packet_index < 0)
1470 count = header_length / s->ctx_data.tx.ctx_header_size;
1472 // Attempt to detect any event in the batch of packets.
1480 if (s->flags & CIP_NO_HEADER) {
1481 data_blocks = payload_quads / s->data_block_quadlets;
1488 payload_quads -= CIP_HEADER_QUADLETS;
1490 if (s->flags & CIP_UNAWARE_SYT) {
1491 data_blocks = payload_quads / s->data_block_quadlets;
1500 data_blocks = payload_quads / s->data_block_quadlets;
1507 ctx_header += s->ctx_data.tx.ctx_header_size / sizeof(__be32);
1513 s->ctx_data.tx.event_starts = true;
1515 // Decide the cycle count to begin processing content of packet in IR contexts.
1521 list_for_each_entry(s, &d->streams, list) {
1522 if (s->direction == AMDTP_IN_STREAM) {
1524 if (s->ctx_data.tx.event_starts)
1532 list_for_each_entry(s, &d->streams, list) {
1533 if (s->direction != AMDTP_IN_STREAM)
1536 next_cycle = increment_ohci_cycle_count(s->next_cycle,
1537 d->processing_cycle.tx_init_skip);
1542 s->context->callback.sc = process_tx_packets_intermediately;
1545 d->processing_cycle.tx_start = cycle;
1554 list_for_each_entry(s, &d->streams, list) {
1555 if (s != d->irq_target && amdtp_stream_running(s))
1556 fw_iso_context_flush_completions(s->context);
1564 if (amdtp_stream_running(d->irq_target))
1565 cancel_stream(d->irq_target);
1567 list_for_each_entry(s, &d->streams, list) {
1577 struct amdtp_domain *d = s->domain;
1587 struct amdtp_domain *d = s->domain;
1597 struct amdtp_domain *d = s->domain;
1603 if (d->replay.enable && !d->replay.on_the_fly) {
1608 list_for_each_entry(rx, &d->streams, list) {
1612 if (rx->direction != AMDTP_OUT_STREAM)
1616 tx = rx->ctx_data.rx.replay_target;
1618 if (cached_cycles > tx->ctx_data.tx.cache.size / 2)
1627 // Decide the cycle count to begin processing content of packet in IT contexts. All of IT
1630 unsigned int cycle = s->next_cycle;
1631 list_for_each_entry(s, &d->streams, list) {
1632 if (s->direction != AMDTP_OUT_STREAM)
1635 if (compare_ohci_cycle_count(s->next_cycle, cycle) > 0)
1636 cycle = s->next_cycle;
1638 if (s == d->irq_target)
1639 s->context->callback.sc = irq_target_callback_intermediately;
1641 s->context->callback.sc = process_rx_packets_intermediately;
1644 d->processing_cycle.rx_start = cycle;
1648 // This is executed one time. For in-stream, first packet has come. For out-stream, prepared to
1655 struct amdtp_domain *d = s->domain;
1657 if (s->direction == AMDTP_IN_STREAM) {
1658 context->callback.sc = drop_tx_packets_initially;
1660 if (s == d->irq_target)
1661 context->callback.sc = irq_target_callback_skip;
1663 context->callback.sc = skip_rx_packets;
1666 context->callback.sc(context, tstamp, header_length, header, s);
1670 * amdtp_stream_start - start transferring packets
1674 * @queue_size: The number of packets in the queue.
1684 bool is_irq_target = (s == s->domain->irq_target);
1691 guard(mutex)(&s->mutex);
1694 (s->data_block_quadlets < 1)))
1695 return -EBADFD;
1697 if (s->direction == AMDTP_IN_STREAM) {
1700 return -EINVAL;
1702 s->data_block_counter = UINT_MAX;
1704 s->data_block_counter = 0;
1708 if (s->direction == AMDTP_IN_STREAM) {
1711 if (!(s->flags & CIP_NO_HEADER))
1722 err = iso_packets_buffer_init(&s->buffer, s->unit, queue_size, max_ctx_payload_size, dir);
1725 s->queue_size = queue_size;
1727 s->context = fw_iso_context_create(fw_parent_device(s->unit)->card,
1730 if (IS_ERR(s->context)) {
1731 err = PTR_ERR(s->context);
1732 if (err == -EBUSY)
1733 dev_err(&s->unit->device,
1740 if (s->direction == AMDTP_IN_STREAM) {
1741 s->ctx_data.tx.max_ctx_payload_length = max_ctx_payload_size;
1742 s->ctx_data.tx.ctx_header_size = ctx_header_size;
1743 s->ctx_data.tx.event_starts = false;
1745 if (s->domain->replay.enable) {
1748 s->ctx_data.tx.cache.size = max_t(unsigned int, s->syt_interval * 2,
1750 s->ctx_data.tx.cache.pos = 0;
1751 s->ctx_data.tx.cache.descs = kcalloc(s->ctx_data.tx.cache.size,
1752 sizeof(*s->ctx_data.tx.cache.descs), GFP_KERNEL);
1753 if (!s->ctx_data.tx.cache.descs) {
1754 err = -ENOMEM;
1772 s->ctx_data.rx.seq.descs = kcalloc(queue_size, sizeof(*s->ctx_data.rx.seq.descs), GFP_KERNEL);
1773 if (!s->ctx_data.rx.seq.descs) {
1774 err = -ENOMEM;
1777 s->ctx_data.rx.seq.size = queue_size;
1778 s->ctx_data.rx.seq.pos = 0;
1780 entry = &initial_state[s->sfc];
1781 s->ctx_data.rx.data_block_state = entry->data_block;
1782 s->ctx_data.rx.syt_offset_state = entry->syt_offset;
1783 s->ctx_data.rx.last_syt_offset = TICKS_PER_CYCLE;
1785 s->ctx_data.rx.event_count = 0;
1788 if (s->flags & CIP_NO_HEADER)
1789 s->tag = TAG_NO_CIP_HEADER;
1791 s->tag = TAG_CIP;
1794 // for runtime of PCM substream in the interval equivalent to the size of PCM buffer. It
1797 descs = kcalloc(s->queue_size + 8, sizeof(*descs), GFP_KERNEL);
1799 err = -ENOMEM;
1802 s->packet_descs = descs;
1804 INIT_LIST_HEAD(&s->packet_descs_list);
1805 for (i = 0; i < s->queue_size; ++i) {
1806 INIT_LIST_HEAD(&descs->link);
1807 list_add_tail(&descs->link, &s->packet_descs_list);
1810 s->packet_descs_cursor = list_first_entry(&s->packet_descs_list, struct pkt_desc, link);
1812 s->packet_index = 0;
1816 if (s->direction == AMDTP_IN_STREAM) {
1825 sched_irq = !((s->packet_index + 1) %
1833 } while (s->packet_index > 0);
1835 /* NOTE: TAG1 matches CIP. This just affects in stream. */
1837 if ((s->flags & CIP_EMPTY_WITH_TAG0) || (s->flags & CIP_NO_HEADER))
1840 s->ready_processing = false;
1841 err = fw_iso_context_start(s->context, -1, 0, tag);
1847 kfree(s->packet_descs);
1848 s->packet_descs = NULL;
1850 if (s->direction == AMDTP_OUT_STREAM) {
1851 kfree(s->ctx_data.rx.seq.descs);
1853 if (s->domain->replay.enable)
1854 kfree(s->ctx_data.tx.cache.descs);
1856 fw_iso_context_destroy(s->context);
1857 s->context = ERR_PTR(-1);
1859 iso_packets_buffer_destroy(&s->buffer, s->unit);
1865 * amdtp_domain_stream_pcm_pointer - get the PCM buffer position
1869 * Returns the current buffer position, in frames.
1874 struct amdtp_stream *irq_target = d->irq_target;
1880 if (current_work() != &s->period_work)
1881 fw_iso_context_flush_completions(irq_target->context);
1884 return READ_ONCE(s->pcm_buffer_pointer);
1889 * amdtp_domain_stream_pcm_ack - acknowledge queued PCM frames
1897 struct amdtp_stream *irq_target = d->irq_target;
1902 fw_iso_context_flush_completions(irq_target->context);
1909 * amdtp_stream_update - update the stream after a bus reset
1915 WRITE_ONCE(s->source_node_id_field,
1916 (fw_parent_device(s->unit)->card->node_id << CIP_SID_SHIFT) & CIP_SID_MASK);
1921 * amdtp_stream_stop - stop sending packets
1929 guard(mutex)(&s->mutex);
1934 cancel_work_sync(&s->period_work);
1935 fw_iso_context_stop(s->context);
1936 fw_iso_context_destroy(s->context);
1937 s->context = ERR_PTR(-1);
1938 iso_packets_buffer_destroy(&s->buffer, s->unit);
1939 kfree(s->packet_descs);
1940 s->packet_descs = NULL;
1942 if (s->direction == AMDTP_OUT_STREAM) {
1943 kfree(s->ctx_data.rx.seq.descs);
1945 if (s->domain->replay.enable)
1946 kfree(s->ctx_data.tx.cache.descs);
1951 * amdtp_stream_pcm_abort - abort the running PCM device
1961 pcm = READ_ONCE(s->pcm);
1968 * amdtp_domain_init - initialize an AMDTP domain structure
1973 INIT_LIST_HEAD(&d->streams);
1975 d->events_per_period = 0;
1982 * amdtp_domain_destroy - destroy an AMDTP domain structure
1993 * amdtp_domain_add_stream - register isoc context into the domain.
2004 list_for_each_entry(tmp, &d->streams, list) {
2006 return -EBUSY;
2009 list_add(&s->list, &d->streams);
2011 s->channel = channel;
2012 s->speed = speed;
2013 s->domain = d;
2027 list_for_each_entry(rx, &d->streams, list) {
2028 if (rx->direction == AMDTP_OUT_STREAM) {
2033 list_for_each_entry(s, &d->streams, list) {
2034 if (s->direction == AMDTP_IN_STREAM) {
2045 list_for_each_entry(s, &d->streams, list) {
2046 if (s->direction == AMDTP_IN_STREAM) {
2053 return -EINVAL;
2056 rx->ctx_data.rx.replay_target = tx;
2066 * amdtp_domain_start - start sending packets for isoc context in the domain.
2070 * @replay_seq: whether to replay the sequence of packet in IR context for the sequence of packet in
2073 * according to arrival of events in tx packets.
2078 unsigned int events_per_buffer = d->events_per_buffer;
2079 unsigned int events_per_period = d->events_per_period;
2090 d->replay.enable = replay_seq;
2091 d->replay.on_the_fly = replay_on_the_fly;
2094 list_for_each_entry(s, &d->streams, list) {
2095 if (s->direction == AMDTP_OUT_STREAM) {
2101 return -ENXIO;
2102 d->irq_target = s;
2104 d->processing_cycle.tx_init_skip = tx_init_skip_cycles;
2106 // This is a case that AMDTP streams in domain run just for MIDI
2110 events_per_period = amdtp_rate_table[d->irq_target->sfc] / 100;
2115 amdtp_rate_table[d->irq_target->sfc]);
2117 list_for_each_entry(s, &d->streams, list) {
2120 if (s->direction == AMDTP_OUT_STREAM && s == d->irq_target) {
2122 amdtp_rate_table[d->irq_target->sfc]);
2126 err = amdtp_stream_start(s, s->channel, s->speed, queue_size, idle_irq_interval);
2133 list_for_each_entry(s, &d->streams, list)
2140 * amdtp_domain_stop - stop sending packets for isoc context in the same domain.
2141 * @d: the AMDTP domain to which the isoc contexts belong.
2147 if (d->irq_target)
2148 amdtp_stream_stop(d->irq_target);
2150 list_for_each_entry_safe(s, next, &d->streams, list) {
2151 list_del(&s->list);
2153 if (s != d->irq_target)
2157 d->events_per_period = 0;
2158 d->irq_target = NULL;