1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Audio and Music Data Transmission Protocol (IEC 61883-6) streams 4 * with Common Isochronous Packet (IEC 61883-1) headers 5 * 6 * Copyright (c) Clemens Ladisch <clemens@ladisch.de> 7 */ 8 9 #include <linux/device.h> 10 #include <linux/err.h> 11 #include <linux/firewire.h> 12 #include <linux/firewire-constants.h> 13 #include <linux/module.h> 14 #include <linux/slab.h> 15 #include <sound/pcm.h> 16 #include <sound/pcm_params.h> 17 #include "amdtp-stream.h" 18 19 #define TICKS_PER_CYCLE 3072 20 #define CYCLES_PER_SECOND 8000 21 #define TICKS_PER_SECOND (TICKS_PER_CYCLE * CYCLES_PER_SECOND) 22 23 #define OHCI_SECOND_MODULUS 8 24 25 /* Always support Linux tracing subsystem. */ 26 #define CREATE_TRACE_POINTS 27 #include "amdtp-stream-trace.h" 28 29 #define TRANSFER_DELAY_TICKS 0x2e00 /* 479.17 microseconds */ 30 31 /* isochronous header parameters */ 32 #define ISO_DATA_LENGTH_SHIFT 16 33 #define TAG_NO_CIP_HEADER 0 34 #define TAG_CIP 1 35 36 // Common Isochronous Packet (CIP) header parameters. Use two quadlets CIP header when supported. 37 #define CIP_HEADER_QUADLETS 2 38 #define CIP_EOH_SHIFT 31 39 #define CIP_EOH (1u << CIP_EOH_SHIFT) 40 #define CIP_EOH_MASK 0x80000000 41 #define CIP_SID_SHIFT 24 42 #define CIP_SID_MASK 0x3f000000 43 #define CIP_DBS_MASK 0x00ff0000 44 #define CIP_DBS_SHIFT 16 45 #define CIP_SPH_MASK 0x00000400 46 #define CIP_SPH_SHIFT 10 47 #define CIP_DBC_MASK 0x000000ff 48 #define CIP_FMT_SHIFT 24 49 #define CIP_FMT_MASK 0x3f000000 50 #define CIP_FDF_MASK 0x00ff0000 51 #define CIP_FDF_SHIFT 16 52 #define CIP_FDF_NO_DATA 0xff 53 #define CIP_SYT_MASK 0x0000ffff 54 #define CIP_SYT_NO_INFO 0xffff 55 #define CIP_SYT_CYCLE_MODULUS 16 56 #define CIP_NO_DATA ((CIP_FDF_NO_DATA << CIP_FDF_SHIFT) | CIP_SYT_NO_INFO) 57 58 #define CIP_HEADER_SIZE (sizeof(__be32) * CIP_HEADER_QUADLETS) 59 60 /* Audio and Music transfer protocol specific parameters */ 61 #define CIP_FMT_AM 0x10 62 #define AMDTP_FDF_NO_DATA 0xff 63 64 // For iso header and tstamp. 65 #define IR_CTX_HEADER_DEFAULT_QUADLETS 2 66 // Add nothing. 67 #define IR_CTX_HEADER_SIZE_NO_CIP (sizeof(__be32) * IR_CTX_HEADER_DEFAULT_QUADLETS) 68 // Add two quadlets CIP header. 69 #define IR_CTX_HEADER_SIZE_CIP (IR_CTX_HEADER_SIZE_NO_CIP + CIP_HEADER_SIZE) 70 #define HEADER_TSTAMP_MASK 0x0000ffff 71 72 #define IT_PKT_HEADER_SIZE_CIP CIP_HEADER_SIZE 73 #define IT_PKT_HEADER_SIZE_NO_CIP 0 // Nothing. 74 75 // The initial firmware of OXFW970 can postpone transmission of packet during finishing 76 // asynchronous transaction. This module accepts 5 cycles to skip as maximum to avoid buffer 77 // overrun. Actual device can skip more, then this module stops the packet streaming. 78 #define IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES 5 79 80 static void pcm_period_work(struct work_struct *work); 81 82 /** 83 * amdtp_stream_init - initialize an AMDTP stream structure 84 * @s: the AMDTP stream to initialize 85 * @unit: the target of the stream 86 * @dir: the direction of stream 87 * @flags: the details of the streaming protocol consist of cip_flags enumeration-constants. 88 * @fmt: the value of fmt field in CIP header 89 * @process_ctx_payloads: callback handler to process payloads of isoc context 90 * @protocol_size: the size to allocate newly for protocol 91 */ 92 int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit, 93 enum amdtp_stream_direction dir, unsigned int flags, 94 unsigned int fmt, 95 amdtp_stream_process_ctx_payloads_t process_ctx_payloads, 96 unsigned int protocol_size) 97 { 98 if (process_ctx_payloads == NULL) 99 return -EINVAL; 100 101 s->protocol = kzalloc(protocol_size, GFP_KERNEL); 102 if (!s->protocol) 103 return -ENOMEM; 104 105 s->unit = unit; 106 s->direction = dir; 107 s->flags = flags; 108 s->context = ERR_PTR(-1); 109 mutex_init(&s->mutex); 110 INIT_WORK(&s->period_work, pcm_period_work); 111 s->packet_index = 0; 112 113 init_waitqueue_head(&s->ready_wait); 114 115 s->fmt = fmt; 116 s->process_ctx_payloads = process_ctx_payloads; 117 118 return 0; 119 } 120 EXPORT_SYMBOL(amdtp_stream_init); 121 122 /** 123 * amdtp_stream_destroy - free stream resources 124 * @s: the AMDTP stream to destroy 125 */ 126 void amdtp_stream_destroy(struct amdtp_stream *s) 127 { 128 /* Not initialized. */ 129 if (s->protocol == NULL) 130 return; 131 132 WARN_ON(amdtp_stream_running(s)); 133 kfree(s->protocol); 134 mutex_destroy(&s->mutex); 135 } 136 EXPORT_SYMBOL(amdtp_stream_destroy); 137 138 const unsigned int amdtp_syt_intervals[CIP_SFC_COUNT] = { 139 [CIP_SFC_32000] = 8, 140 [CIP_SFC_44100] = 8, 141 [CIP_SFC_48000] = 8, 142 [CIP_SFC_88200] = 16, 143 [CIP_SFC_96000] = 16, 144 [CIP_SFC_176400] = 32, 145 [CIP_SFC_192000] = 32, 146 }; 147 EXPORT_SYMBOL(amdtp_syt_intervals); 148 149 const unsigned int amdtp_rate_table[CIP_SFC_COUNT] = { 150 [CIP_SFC_32000] = 32000, 151 [CIP_SFC_44100] = 44100, 152 [CIP_SFC_48000] = 48000, 153 [CIP_SFC_88200] = 88200, 154 [CIP_SFC_96000] = 96000, 155 [CIP_SFC_176400] = 176400, 156 [CIP_SFC_192000] = 192000, 157 }; 158 EXPORT_SYMBOL(amdtp_rate_table); 159 160 static int apply_constraint_to_size(struct snd_pcm_hw_params *params, 161 struct snd_pcm_hw_rule *rule) 162 { 163 struct snd_interval *s = hw_param_interval(params, rule->var); 164 const struct snd_interval *r = 165 hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE); 166 struct snd_interval t = {0}; 167 unsigned int step = 0; 168 int i; 169 170 for (i = 0; i < CIP_SFC_COUNT; ++i) { 171 if (snd_interval_test(r, amdtp_rate_table[i])) 172 step = max(step, amdtp_syt_intervals[i]); 173 } 174 175 if (step == 0) 176 return -EINVAL; 177 178 t.min = roundup(s->min, step); 179 t.max = rounddown(s->max, step); 180 t.integer = 1; 181 182 return snd_interval_refine(s, &t); 183 } 184 185 /** 186 * amdtp_stream_add_pcm_hw_constraints - add hw constraints for PCM substream 187 * @s: the AMDTP stream, which must be initialized. 188 * @runtime: the PCM substream runtime 189 */ 190 int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream *s, 191 struct snd_pcm_runtime *runtime) 192 { 193 struct snd_pcm_hardware *hw = &runtime->hw; 194 int err; 195 196 hw->info = SNDRV_PCM_INFO_BLOCK_TRANSFER | 197 SNDRV_PCM_INFO_INTERLEAVED | 198 SNDRV_PCM_INFO_JOINT_DUPLEX | 199 SNDRV_PCM_INFO_MMAP | 200 SNDRV_PCM_INFO_MMAP_VALID | 201 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP; 202 203 hw->periods_min = 2; 204 hw->periods_max = UINT_MAX; 205 206 /* bytes for a frame */ 207 hw->period_bytes_min = 4 * hw->channels_max; 208 209 /* Just to prevent from allocating much pages. */ 210 hw->period_bytes_max = hw->period_bytes_min * 2048; 211 hw->buffer_bytes_max = hw->period_bytes_max * hw->periods_min; 212 213 // In IEC 61883-6, one isoc packet can transfer events up to the value 214 // of syt interval. This comes from the interval of isoc cycle. As 1394 215 // OHCI controller can generate hardware IRQ per isoc packet, the 216 // interval is 125 usec. 217 // However, there are two ways of transmission in IEC 61883-6; blocking 218 // and non-blocking modes. In blocking mode, the sequence of isoc packet 219 // includes 'empty' or 'NODATA' packets which include no event. In 220 // non-blocking mode, the number of events per packet is variable up to 221 // the syt interval. 222 // Due to the above protocol design, the minimum PCM frames per 223 // interrupt should be double of the value of syt interval, thus it is 224 // 250 usec. 225 // There is no reason, but up to 250 msec to avoid consuming resources so much. 226 err = snd_pcm_hw_constraint_minmax(runtime, 227 SNDRV_PCM_HW_PARAM_PERIOD_TIME, 228 250, USEC_PER_SEC / 4); 229 if (err < 0) 230 goto end; 231 232 /* Non-Blocking stream has no more constraints */ 233 if (!(s->flags & CIP_BLOCKING)) 234 goto end; 235 236 /* 237 * One AMDTP packet can include some frames. In blocking mode, the 238 * number equals to SYT_INTERVAL. So the number is 8, 16 or 32, 239 * depending on its sampling rate. For accurate period interrupt, it's 240 * preferrable to align period/buffer sizes to current SYT_INTERVAL. 241 */ 242 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 243 apply_constraint_to_size, NULL, 244 SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 245 SNDRV_PCM_HW_PARAM_RATE, -1); 246 if (err < 0) 247 goto end; 248 249 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 250 apply_constraint_to_size, NULL, 251 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 252 SNDRV_PCM_HW_PARAM_RATE, -1); 253 if (err < 0) 254 goto end; 255 end: 256 return err; 257 } 258 EXPORT_SYMBOL(amdtp_stream_add_pcm_hw_constraints); 259 260 /** 261 * amdtp_stream_set_parameters - set stream parameters 262 * @s: the AMDTP stream to configure 263 * @rate: the sample rate 264 * @data_block_quadlets: the size of a data block in quadlet unit 265 * @pcm_frame_multiplier: the multiplier to compute the number of PCM frames by the number of AMDTP 266 * events. 267 * 268 * The parameters must be set before the stream is started, and must not be 269 * changed while the stream is running. 270 */ 271 int amdtp_stream_set_parameters(struct amdtp_stream *s, unsigned int rate, 272 unsigned int data_block_quadlets, unsigned int pcm_frame_multiplier) 273 { 274 unsigned int sfc; 275 276 for (sfc = 0; sfc < ARRAY_SIZE(amdtp_rate_table); ++sfc) { 277 if (amdtp_rate_table[sfc] == rate) 278 break; 279 } 280 if (sfc == ARRAY_SIZE(amdtp_rate_table)) 281 return -EINVAL; 282 283 s->sfc = sfc; 284 s->data_block_quadlets = data_block_quadlets; 285 s->syt_interval = amdtp_syt_intervals[sfc]; 286 287 // default buffering in the device. 288 s->transfer_delay = TRANSFER_DELAY_TICKS - TICKS_PER_CYCLE; 289 290 // additional buffering needed to adjust for no-data packets. 291 if (s->flags & CIP_BLOCKING) 292 s->transfer_delay += TICKS_PER_SECOND * s->syt_interval / rate; 293 294 s->pcm_frame_multiplier = pcm_frame_multiplier; 295 296 return 0; 297 } 298 EXPORT_SYMBOL(amdtp_stream_set_parameters); 299 300 // The CIP header is processed in context header apart from context payload. 301 static int amdtp_stream_get_max_ctx_payload_size(struct amdtp_stream *s) 302 { 303 unsigned int multiplier; 304 305 if (s->flags & CIP_JUMBO_PAYLOAD) 306 multiplier = IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES; 307 else 308 multiplier = 1; 309 310 return s->syt_interval * s->data_block_quadlets * sizeof(__be32) * multiplier; 311 } 312 313 /** 314 * amdtp_stream_get_max_payload - get the stream's packet size 315 * @s: the AMDTP stream 316 * 317 * This function must not be called before the stream has been configured 318 * with amdtp_stream_set_parameters(). 319 */ 320 unsigned int amdtp_stream_get_max_payload(struct amdtp_stream *s) 321 { 322 unsigned int cip_header_size; 323 324 if (!(s->flags & CIP_NO_HEADER)) 325 cip_header_size = CIP_HEADER_SIZE; 326 else 327 cip_header_size = 0; 328 329 return cip_header_size + amdtp_stream_get_max_ctx_payload_size(s); 330 } 331 EXPORT_SYMBOL(amdtp_stream_get_max_payload); 332 333 /** 334 * amdtp_stream_pcm_prepare - prepare PCM device for running 335 * @s: the AMDTP stream 336 * 337 * This function should be called from the PCM device's .prepare callback. 338 */ 339 void amdtp_stream_pcm_prepare(struct amdtp_stream *s) 340 { 341 cancel_work_sync(&s->period_work); 342 s->pcm_buffer_pointer = 0; 343 s->pcm_period_pointer = 0; 344 } 345 EXPORT_SYMBOL(amdtp_stream_pcm_prepare); 346 347 #define prev_packet_desc(s, desc) \ 348 list_prev_entry_circular(desc, &s->packet_descs_list, link) 349 350 static void pool_blocking_data_blocks(struct amdtp_stream *s, struct seq_desc *descs, 351 unsigned int size, unsigned int pos, unsigned int count) 352 { 353 const unsigned int syt_interval = s->syt_interval; 354 int i; 355 356 for (i = 0; i < count; ++i) { 357 struct seq_desc *desc = descs + pos; 358 359 if (desc->syt_offset != CIP_SYT_NO_INFO) 360 desc->data_blocks = syt_interval; 361 else 362 desc->data_blocks = 0; 363 364 pos = (pos + 1) % size; 365 } 366 } 367 368 static void pool_ideal_nonblocking_data_blocks(struct amdtp_stream *s, struct seq_desc *descs, 369 unsigned int size, unsigned int pos, 370 unsigned int count) 371 { 372 const enum cip_sfc sfc = s->sfc; 373 unsigned int state = s->ctx_data.rx.data_block_state; 374 int i; 375 376 for (i = 0; i < count; ++i) { 377 struct seq_desc *desc = descs + pos; 378 379 if (!cip_sfc_is_base_44100(sfc)) { 380 // Sample_rate / 8000 is an integer, and precomputed. 381 desc->data_blocks = state; 382 } else { 383 unsigned int phase = state; 384 385 /* 386 * This calculates the number of data blocks per packet so that 387 * 1) the overall rate is correct and exactly synchronized to 388 * the bus clock, and 389 * 2) packets with a rounded-up number of blocks occur as early 390 * as possible in the sequence (to prevent underruns of the 391 * device's buffer). 392 */ 393 if (sfc == CIP_SFC_44100) 394 /* 6 6 5 6 5 6 5 ... */ 395 desc->data_blocks = 5 + ((phase & 1) ^ (phase == 0 || phase >= 40)); 396 else 397 /* 12 11 11 11 11 ... or 23 22 22 22 22 ... */ 398 desc->data_blocks = 11 * (sfc >> 1) + (phase == 0); 399 if (++phase >= (80 >> (sfc >> 1))) 400 phase = 0; 401 state = phase; 402 } 403 404 pos = (pos + 1) % size; 405 } 406 407 s->ctx_data.rx.data_block_state = state; 408 } 409 410 static unsigned int calculate_syt_offset(unsigned int *last_syt_offset, 411 unsigned int *syt_offset_state, enum cip_sfc sfc) 412 { 413 unsigned int syt_offset; 414 415 if (*last_syt_offset < TICKS_PER_CYCLE) { 416 if (!cip_sfc_is_base_44100(sfc)) 417 syt_offset = *last_syt_offset + *syt_offset_state; 418 else { 419 /* 420 * The time, in ticks, of the n'th SYT_INTERVAL sample is: 421 * n * SYT_INTERVAL * 24576000 / sample_rate 422 * Modulo TICKS_PER_CYCLE, the difference between successive 423 * elements is about 1386.23. Rounding the results of this 424 * formula to the SYT precision results in a sequence of 425 * differences that begins with: 426 * 1386 1386 1387 1386 1386 1386 1387 1386 1386 1386 1387 ... 427 * This code generates _exactly_ the same sequence. 428 */ 429 unsigned int phase = *syt_offset_state; 430 unsigned int index = phase % 13; 431 432 syt_offset = *last_syt_offset; 433 syt_offset += 1386 + ((index && !(index & 3)) || 434 phase == 146); 435 if (++phase >= 147) 436 phase = 0; 437 *syt_offset_state = phase; 438 } 439 } else 440 syt_offset = *last_syt_offset - TICKS_PER_CYCLE; 441 *last_syt_offset = syt_offset; 442 443 if (syt_offset >= TICKS_PER_CYCLE) 444 syt_offset = CIP_SYT_NO_INFO; 445 446 return syt_offset; 447 } 448 449 static void pool_ideal_syt_offsets(struct amdtp_stream *s, struct seq_desc *descs, 450 unsigned int size, unsigned int pos, unsigned int count) 451 { 452 const enum cip_sfc sfc = s->sfc; 453 unsigned int last = s->ctx_data.rx.last_syt_offset; 454 unsigned int state = s->ctx_data.rx.syt_offset_state; 455 int i; 456 457 for (i = 0; i < count; ++i) { 458 struct seq_desc *desc = descs + pos; 459 460 desc->syt_offset = calculate_syt_offset(&last, &state, sfc); 461 462 pos = (pos + 1) % size; 463 } 464 465 s->ctx_data.rx.last_syt_offset = last; 466 s->ctx_data.rx.syt_offset_state = state; 467 } 468 469 static unsigned int compute_syt_offset(unsigned int syt, unsigned int cycle, 470 unsigned int transfer_delay) 471 { 472 unsigned int cycle_lo = (cycle % CYCLES_PER_SECOND) & 0x0f; 473 unsigned int syt_cycle_lo = (syt & 0xf000) >> 12; 474 unsigned int syt_offset; 475 476 // Round up. 477 if (syt_cycle_lo < cycle_lo) 478 syt_cycle_lo += CIP_SYT_CYCLE_MODULUS; 479 syt_cycle_lo -= cycle_lo; 480 481 // Subtract transfer delay so that the synchronization offset is not so large 482 // at transmission. 483 syt_offset = syt_cycle_lo * TICKS_PER_CYCLE + (syt & 0x0fff); 484 if (syt_offset < transfer_delay) 485 syt_offset += CIP_SYT_CYCLE_MODULUS * TICKS_PER_CYCLE; 486 487 return syt_offset - transfer_delay; 488 } 489 490 // Both of the producer and consumer of the queue runs in the same clock of IEEE 1394 bus. 491 // Additionally, the sequence of tx packets is severely checked against any discontinuity 492 // before filling entries in the queue. The calculation is safe even if it looks fragile by 493 // overrun. 494 static unsigned int calculate_cached_cycle_count(struct amdtp_stream *s, unsigned int head) 495 { 496 const unsigned int cache_size = s->ctx_data.tx.cache.size; 497 unsigned int cycles = s->ctx_data.tx.cache.pos; 498 499 if (cycles < head) 500 cycles += cache_size; 501 cycles -= head; 502 503 return cycles; 504 } 505 506 static void cache_seq(struct amdtp_stream *s, const struct pkt_desc *src, unsigned int desc_count) 507 { 508 const unsigned int transfer_delay = s->transfer_delay; 509 const unsigned int cache_size = s->ctx_data.tx.cache.size; 510 struct seq_desc *cache = s->ctx_data.tx.cache.descs; 511 unsigned int cache_pos = s->ctx_data.tx.cache.pos; 512 bool aware_syt = !(s->flags & CIP_UNAWARE_SYT); 513 int i; 514 515 for (i = 0; i < desc_count; ++i) { 516 struct seq_desc *dst = cache + cache_pos; 517 518 if (aware_syt && src->syt != CIP_SYT_NO_INFO) 519 dst->syt_offset = compute_syt_offset(src->syt, src->cycle, transfer_delay); 520 else 521 dst->syt_offset = CIP_SYT_NO_INFO; 522 dst->data_blocks = src->data_blocks; 523 524 cache_pos = (cache_pos + 1) % cache_size; 525 src = amdtp_stream_next_packet_desc(s, src); 526 } 527 528 s->ctx_data.tx.cache.pos = cache_pos; 529 } 530 531 static void pool_ideal_seq_descs(struct amdtp_stream *s, struct seq_desc *descs, unsigned int size, 532 unsigned int pos, unsigned int count) 533 { 534 pool_ideal_syt_offsets(s, descs, size, pos, count); 535 536 if (s->flags & CIP_BLOCKING) 537 pool_blocking_data_blocks(s, descs, size, pos, count); 538 else 539 pool_ideal_nonblocking_data_blocks(s, descs, size, pos, count); 540 } 541 542 static void pool_replayed_seq(struct amdtp_stream *s, struct seq_desc *descs, unsigned int size, 543 unsigned int pos, unsigned int count) 544 { 545 struct amdtp_stream *target = s->ctx_data.rx.replay_target; 546 const struct seq_desc *cache = target->ctx_data.tx.cache.descs; 547 const unsigned int cache_size = target->ctx_data.tx.cache.size; 548 unsigned int cache_pos = s->ctx_data.rx.cache_pos; 549 int i; 550 551 for (i = 0; i < count; ++i) { 552 descs[pos] = cache[cache_pos]; 553 cache_pos = (cache_pos + 1) % cache_size; 554 pos = (pos + 1) % size; 555 } 556 557 s->ctx_data.rx.cache_pos = cache_pos; 558 } 559 560 static void pool_seq_descs(struct amdtp_stream *s, struct seq_desc *descs, unsigned int size, 561 unsigned int pos, unsigned int count) 562 { 563 struct amdtp_domain *d = s->domain; 564 void (*pool_seq_descs)(struct amdtp_stream *s, struct seq_desc *descs, unsigned int size, 565 unsigned int pos, unsigned int count); 566 567 if (!d->replay.enable || !s->ctx_data.rx.replay_target) { 568 pool_seq_descs = pool_ideal_seq_descs; 569 } else { 570 if (!d->replay.on_the_fly) { 571 pool_seq_descs = pool_replayed_seq; 572 } else { 573 struct amdtp_stream *tx = s->ctx_data.rx.replay_target; 574 const unsigned int cache_size = tx->ctx_data.tx.cache.size; 575 const unsigned int cache_pos = s->ctx_data.rx.cache_pos; 576 unsigned int cached_cycles = calculate_cached_cycle_count(tx, cache_pos); 577 578 if (cached_cycles > count && cached_cycles > cache_size / 2) 579 pool_seq_descs = pool_replayed_seq; 580 else 581 pool_seq_descs = pool_ideal_seq_descs; 582 } 583 } 584 585 pool_seq_descs(s, descs, size, pos, count); 586 } 587 588 static void update_pcm_pointers(struct amdtp_stream *s, 589 struct snd_pcm_substream *pcm, 590 unsigned int frames) 591 { 592 unsigned int ptr; 593 594 ptr = s->pcm_buffer_pointer + frames; 595 if (ptr >= pcm->runtime->buffer_size) 596 ptr -= pcm->runtime->buffer_size; 597 WRITE_ONCE(s->pcm_buffer_pointer, ptr); 598 599 s->pcm_period_pointer += frames; 600 if (s->pcm_period_pointer >= pcm->runtime->period_size) { 601 s->pcm_period_pointer -= pcm->runtime->period_size; 602 603 // The program in user process should periodically check the status of intermediate 604 // buffer associated to PCM substream to process PCM frames in the buffer, instead 605 // of receiving notification of period elapsed by poll wait. 606 // 607 // Use another work item for period elapsed event to prevent the following AB/BA 608 // deadlock: 609 // 610 // thread 1 thread 2 611 // ================================= ================================= 612 // A.work item (process) pcm ioctl (process) 613 // v v 614 // process_rx_packets() B.PCM stream lock 615 // process_tx_packets() v 616 // v callbacks in snd_pcm_ops 617 // update_pcm_pointers() v 618 // snd_pcm_elapsed() fw_iso_context_flush_completions() 619 // snd_pcm_stream_lock_irqsave() disable_work_sync() 620 // v v 621 // wait until release of B wait until A exits 622 if (!pcm->runtime->no_period_wakeup) 623 queue_work(system_highpri_wq, &s->period_work); 624 } 625 } 626 627 static void pcm_period_work(struct work_struct *work) 628 { 629 struct amdtp_stream *s = container_of(work, struct amdtp_stream, 630 period_work); 631 struct snd_pcm_substream *pcm = READ_ONCE(s->pcm); 632 633 if (pcm) 634 snd_pcm_period_elapsed(pcm); 635 } 636 637 static int queue_packet(struct amdtp_stream *s, struct fw_iso_packet *params, 638 bool sched_irq) 639 { 640 int err; 641 642 params->interrupt = sched_irq; 643 params->tag = s->tag; 644 params->sy = 0; 645 646 err = fw_iso_context_queue(s->context, params, &s->buffer.iso_buffer, 647 s->buffer.packets[s->packet_index].offset); 648 if (err < 0) { 649 dev_err(&s->unit->device, "queueing error: %d\n", err); 650 goto end; 651 } 652 653 if (++s->packet_index >= s->queue_size) 654 s->packet_index = 0; 655 end: 656 return err; 657 } 658 659 static inline int queue_out_packet(struct amdtp_stream *s, 660 struct fw_iso_packet *params, bool sched_irq) 661 { 662 params->skip = 663 !!(params->header_length == 0 && params->payload_length == 0); 664 return queue_packet(s, params, sched_irq); 665 } 666 667 static inline int queue_in_packet(struct amdtp_stream *s, 668 struct fw_iso_packet *params) 669 { 670 // Queue one packet for IR context. 671 params->header_length = s->ctx_data.tx.ctx_header_size; 672 params->payload_length = s->ctx_data.tx.max_ctx_payload_length; 673 params->skip = false; 674 return queue_packet(s, params, false); 675 } 676 677 static void generate_cip_header(struct amdtp_stream *s, __be32 cip_header[2], 678 unsigned int data_block_counter, unsigned int syt) 679 { 680 cip_header[0] = cpu_to_be32(READ_ONCE(s->source_node_id_field) | 681 (s->data_block_quadlets << CIP_DBS_SHIFT) | 682 ((s->sph << CIP_SPH_SHIFT) & CIP_SPH_MASK) | 683 data_block_counter); 684 cip_header[1] = cpu_to_be32(CIP_EOH | 685 ((s->fmt << CIP_FMT_SHIFT) & CIP_FMT_MASK) | 686 ((s->ctx_data.rx.fdf << CIP_FDF_SHIFT) & CIP_FDF_MASK) | 687 (syt & CIP_SYT_MASK)); 688 } 689 690 static void build_it_pkt_header(struct amdtp_stream *s, unsigned int cycle, 691 struct fw_iso_packet *params, unsigned int header_length, 692 unsigned int data_blocks, 693 unsigned int data_block_counter, 694 unsigned int syt, unsigned int index, u32 curr_cycle_time) 695 { 696 unsigned int payload_length; 697 __be32 *cip_header; 698 699 payload_length = data_blocks * sizeof(__be32) * s->data_block_quadlets; 700 params->payload_length = payload_length; 701 702 if (header_length > 0) { 703 cip_header = (__be32 *)params->header; 704 generate_cip_header(s, cip_header, data_block_counter, syt); 705 params->header_length = header_length; 706 } else { 707 cip_header = NULL; 708 } 709 710 trace_amdtp_packet(s, cycle, cip_header, payload_length + header_length, data_blocks, 711 data_block_counter, s->packet_index, index, curr_cycle_time); 712 } 713 714 static int check_cip_header(struct amdtp_stream *s, const __be32 *buf, 715 unsigned int payload_length, 716 unsigned int *data_blocks, 717 unsigned int *data_block_counter, unsigned int *syt) 718 { 719 u32 cip_header[2]; 720 unsigned int sph; 721 unsigned int fmt; 722 unsigned int fdf; 723 unsigned int dbc; 724 bool lost; 725 726 cip_header[0] = be32_to_cpu(buf[0]); 727 cip_header[1] = be32_to_cpu(buf[1]); 728 729 /* 730 * This module supports 'Two-quadlet CIP header with SYT field'. 731 * For convenience, also check FMT field is AM824 or not. 732 */ 733 if ((((cip_header[0] & CIP_EOH_MASK) == CIP_EOH) || 734 ((cip_header[1] & CIP_EOH_MASK) != CIP_EOH)) && 735 (!(s->flags & CIP_HEADER_WITHOUT_EOH))) { 736 dev_info_ratelimited(&s->unit->device, 737 "Invalid CIP header for AMDTP: %08X:%08X\n", 738 cip_header[0], cip_header[1]); 739 return -EAGAIN; 740 } 741 742 /* Check valid protocol or not. */ 743 sph = (cip_header[0] & CIP_SPH_MASK) >> CIP_SPH_SHIFT; 744 fmt = (cip_header[1] & CIP_FMT_MASK) >> CIP_FMT_SHIFT; 745 if (sph != s->sph || fmt != s->fmt) { 746 dev_info_ratelimited(&s->unit->device, 747 "Detect unexpected protocol: %08x %08x\n", 748 cip_header[0], cip_header[1]); 749 return -EAGAIN; 750 } 751 752 /* Calculate data blocks */ 753 fdf = (cip_header[1] & CIP_FDF_MASK) >> CIP_FDF_SHIFT; 754 if (payload_length == 0 || (fmt == CIP_FMT_AM && fdf == AMDTP_FDF_NO_DATA)) { 755 *data_blocks = 0; 756 } else { 757 unsigned int data_block_quadlets = 758 (cip_header[0] & CIP_DBS_MASK) >> CIP_DBS_SHIFT; 759 /* avoid division by zero */ 760 if (data_block_quadlets == 0) { 761 dev_err(&s->unit->device, 762 "Detect invalid value in dbs field: %08X\n", 763 cip_header[0]); 764 return -EPROTO; 765 } 766 if (s->flags & CIP_WRONG_DBS) 767 data_block_quadlets = s->data_block_quadlets; 768 769 *data_blocks = payload_length / sizeof(__be32) / data_block_quadlets; 770 } 771 772 /* Check data block counter continuity */ 773 dbc = cip_header[0] & CIP_DBC_MASK; 774 if (*data_blocks == 0 && (s->flags & CIP_EMPTY_HAS_WRONG_DBC) && 775 *data_block_counter != UINT_MAX) 776 dbc = *data_block_counter; 777 778 if ((dbc == 0x00 && (s->flags & CIP_SKIP_DBC_ZERO_CHECK)) || 779 *data_block_counter == UINT_MAX) { 780 lost = false; 781 } else if (!(s->flags & CIP_DBC_IS_END_EVENT)) { 782 lost = dbc != *data_block_counter; 783 } else { 784 unsigned int dbc_interval; 785 786 if (!(s->flags & CIP_DBC_IS_PAYLOAD_QUADLETS)) { 787 if (*data_blocks > 0 && s->ctx_data.tx.dbc_interval > 0) 788 dbc_interval = s->ctx_data.tx.dbc_interval; 789 else 790 dbc_interval = *data_blocks; 791 } else { 792 dbc_interval = payload_length / sizeof(__be32); 793 } 794 795 lost = dbc != ((*data_block_counter + dbc_interval) & 0xff); 796 } 797 798 if (lost) { 799 dev_err(&s->unit->device, 800 "Detect discontinuity of CIP: %02X %02X\n", 801 *data_block_counter, dbc); 802 return -EIO; 803 } 804 805 *data_block_counter = dbc; 806 807 if (!(s->flags & CIP_UNAWARE_SYT)) 808 *syt = cip_header[1] & CIP_SYT_MASK; 809 810 return 0; 811 } 812 813 static int parse_ir_ctx_header(struct amdtp_stream *s, unsigned int cycle, 814 const __be32 *ctx_header, 815 unsigned int *data_blocks, 816 unsigned int *data_block_counter, 817 unsigned int *syt, unsigned int packet_index, unsigned int index, 818 u32 curr_cycle_time) 819 { 820 unsigned int payload_length; 821 const __be32 *cip_header; 822 unsigned int cip_header_size; 823 824 payload_length = be32_to_cpu(ctx_header[0]) >> ISO_DATA_LENGTH_SHIFT; 825 826 if (!(s->flags & CIP_NO_HEADER)) 827 cip_header_size = CIP_HEADER_SIZE; 828 else 829 cip_header_size = 0; 830 831 if (payload_length > cip_header_size + s->ctx_data.tx.max_ctx_payload_length) { 832 dev_err(&s->unit->device, 833 "Detect jumbo payload: %04x %04x\n", 834 payload_length, cip_header_size + s->ctx_data.tx.max_ctx_payload_length); 835 return -EIO; 836 } 837 838 if (cip_header_size > 0) { 839 if (payload_length >= cip_header_size) { 840 int err; 841 842 cip_header = ctx_header + IR_CTX_HEADER_DEFAULT_QUADLETS; 843 err = check_cip_header(s, cip_header, payload_length - cip_header_size, 844 data_blocks, data_block_counter, syt); 845 if (err < 0) 846 return err; 847 } else { 848 // Handle the cycle so that empty packet arrives. 849 cip_header = NULL; 850 *data_blocks = 0; 851 *syt = 0; 852 } 853 } else { 854 cip_header = NULL; 855 *data_blocks = payload_length / sizeof(__be32) / s->data_block_quadlets; 856 *syt = 0; 857 858 if (*data_block_counter == UINT_MAX) 859 *data_block_counter = 0; 860 } 861 862 trace_amdtp_packet(s, cycle, cip_header, payload_length, *data_blocks, 863 *data_block_counter, packet_index, index, curr_cycle_time); 864 865 return 0; 866 } 867 868 // In CYCLE_TIMER register of IEEE 1394, 7 bits are used to represent second. On 869 // the other hand, in DMA descriptors of 1394 OHCI, 3 bits are used to represent 870 // it. Thus, via Linux firewire subsystem, we can get the 3 bits for second. 871 static inline u32 compute_ohci_iso_ctx_cycle_count(u32 tstamp) 872 { 873 return (((tstamp >> 13) & 0x07) * CYCLES_PER_SECOND) + (tstamp & 0x1fff); 874 } 875 876 static inline u32 compute_ohci_cycle_count(__be32 ctx_header_tstamp) 877 { 878 u32 tstamp = be32_to_cpu(ctx_header_tstamp) & HEADER_TSTAMP_MASK; 879 return compute_ohci_iso_ctx_cycle_count(tstamp); 880 } 881 882 static inline u32 increment_ohci_cycle_count(u32 cycle, unsigned int addend) 883 { 884 cycle += addend; 885 if (cycle >= OHCI_SECOND_MODULUS * CYCLES_PER_SECOND) 886 cycle -= OHCI_SECOND_MODULUS * CYCLES_PER_SECOND; 887 return cycle; 888 } 889 890 static inline u32 decrement_ohci_cycle_count(u32 minuend, u32 subtrahend) 891 { 892 if (minuend < subtrahend) 893 minuend += OHCI_SECOND_MODULUS * CYCLES_PER_SECOND; 894 895 return minuend - subtrahend; 896 } 897 898 static int compare_ohci_cycle_count(u32 lval, u32 rval) 899 { 900 if (lval == rval) 901 return 0; 902 else if (lval < rval && rval - lval < OHCI_SECOND_MODULUS * CYCLES_PER_SECOND / 2) 903 return -1; 904 else 905 return 1; 906 } 907 908 // Align to actual cycle count for the packet which is going to be scheduled. 909 // This module queued the same number of isochronous cycle as the size of queue 910 // to kip isochronous cycle, therefore it's OK to just increment the cycle by 911 // the size of queue for scheduled cycle. 912 static inline u32 compute_ohci_it_cycle(const __be32 ctx_header_tstamp, 913 unsigned int queue_size) 914 { 915 u32 cycle = compute_ohci_cycle_count(ctx_header_tstamp); 916 return increment_ohci_cycle_count(cycle, queue_size); 917 } 918 919 static int generate_tx_packet_descs(struct amdtp_stream *s, struct pkt_desc *desc, 920 const __be32 *ctx_header, unsigned int packet_count, 921 unsigned int *desc_count) 922 { 923 unsigned int next_cycle = s->next_cycle; 924 unsigned int dbc = s->data_block_counter; 925 unsigned int packet_index = s->packet_index; 926 unsigned int queue_size = s->queue_size; 927 u32 curr_cycle_time = 0; 928 int i; 929 int err; 930 931 if (trace_amdtp_packet_enabled()) 932 (void)fw_card_read_cycle_time(fw_parent_device(s->unit)->card, &curr_cycle_time); 933 934 *desc_count = 0; 935 for (i = 0; i < packet_count; ++i) { 936 unsigned int cycle; 937 bool lost; 938 unsigned int data_blocks; 939 unsigned int syt; 940 941 cycle = compute_ohci_cycle_count(ctx_header[1]); 942 lost = (next_cycle != cycle); 943 if (lost) { 944 if (s->flags & CIP_NO_HEADER) { 945 // Fireface skips transmission just for an isoc cycle corresponding 946 // to empty packet. 947 unsigned int prev_cycle = next_cycle; 948 949 next_cycle = increment_ohci_cycle_count(next_cycle, 1); 950 lost = (next_cycle != cycle); 951 if (!lost) { 952 // Prepare a description for the skipped cycle for 953 // sequence replay. 954 desc->cycle = prev_cycle; 955 desc->syt = 0; 956 desc->data_blocks = 0; 957 desc->data_block_counter = dbc; 958 desc->ctx_payload = NULL; 959 desc = amdtp_stream_next_packet_desc(s, desc); 960 ++(*desc_count); 961 } 962 } else if (s->flags & CIP_JUMBO_PAYLOAD) { 963 // OXFW970 skips transmission for several isoc cycles during 964 // asynchronous transaction. The sequence replay is impossible due 965 // to the reason. 966 unsigned int safe_cycle = increment_ohci_cycle_count(next_cycle, 967 IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES); 968 lost = (compare_ohci_cycle_count(safe_cycle, cycle) < 0); 969 } 970 if (lost) { 971 dev_err(&s->unit->device, "Detect discontinuity of cycle: %d %d\n", 972 next_cycle, cycle); 973 return -EIO; 974 } 975 } 976 977 err = parse_ir_ctx_header(s, cycle, ctx_header, &data_blocks, &dbc, &syt, 978 packet_index, i, curr_cycle_time); 979 if (err < 0) 980 return err; 981 982 desc->cycle = cycle; 983 desc->syt = syt; 984 desc->data_blocks = data_blocks; 985 desc->data_block_counter = dbc; 986 desc->ctx_payload = s->buffer.packets[packet_index].buffer; 987 988 if (!(s->flags & CIP_DBC_IS_END_EVENT)) 989 dbc = (dbc + desc->data_blocks) & 0xff; 990 991 next_cycle = increment_ohci_cycle_count(next_cycle, 1); 992 desc = amdtp_stream_next_packet_desc(s, desc); 993 ++(*desc_count); 994 ctx_header += s->ctx_data.tx.ctx_header_size / sizeof(*ctx_header); 995 packet_index = (packet_index + 1) % queue_size; 996 } 997 998 s->next_cycle = next_cycle; 999 s->data_block_counter = dbc; 1000 1001 return 0; 1002 } 1003 1004 static unsigned int compute_syt(unsigned int syt_offset, unsigned int cycle, 1005 unsigned int transfer_delay) 1006 { 1007 unsigned int syt; 1008 1009 syt_offset += transfer_delay; 1010 syt = ((cycle + syt_offset / TICKS_PER_CYCLE) << 12) | 1011 (syt_offset % TICKS_PER_CYCLE); 1012 return syt & CIP_SYT_MASK; 1013 } 1014 1015 static void generate_rx_packet_descs(struct amdtp_stream *s, struct pkt_desc *desc, 1016 const __be32 *ctx_header, unsigned int packet_count) 1017 { 1018 struct seq_desc *seq_descs = s->ctx_data.rx.seq.descs; 1019 unsigned int seq_size = s->ctx_data.rx.seq.size; 1020 unsigned int seq_pos = s->ctx_data.rx.seq.pos; 1021 unsigned int dbc = s->data_block_counter; 1022 bool aware_syt = !(s->flags & CIP_UNAWARE_SYT); 1023 int i; 1024 1025 pool_seq_descs(s, seq_descs, seq_size, seq_pos, packet_count); 1026 1027 for (i = 0; i < packet_count; ++i) { 1028 unsigned int index = (s->packet_index + i) % s->queue_size; 1029 const struct seq_desc *seq = seq_descs + seq_pos; 1030 1031 desc->cycle = compute_ohci_it_cycle(*ctx_header, s->queue_size); 1032 1033 if (aware_syt && seq->syt_offset != CIP_SYT_NO_INFO) 1034 desc->syt = compute_syt(seq->syt_offset, desc->cycle, s->transfer_delay); 1035 else 1036 desc->syt = CIP_SYT_NO_INFO; 1037 1038 desc->data_blocks = seq->data_blocks; 1039 1040 if (s->flags & CIP_DBC_IS_END_EVENT) 1041 dbc = (dbc + desc->data_blocks) & 0xff; 1042 1043 desc->data_block_counter = dbc; 1044 1045 if (!(s->flags & CIP_DBC_IS_END_EVENT)) 1046 dbc = (dbc + desc->data_blocks) & 0xff; 1047 1048 desc->ctx_payload = s->buffer.packets[index].buffer; 1049 1050 seq_pos = (seq_pos + 1) % seq_size; 1051 desc = amdtp_stream_next_packet_desc(s, desc); 1052 1053 ++ctx_header; 1054 } 1055 1056 s->data_block_counter = dbc; 1057 s->ctx_data.rx.seq.pos = seq_pos; 1058 } 1059 1060 static inline void cancel_stream(struct amdtp_stream *s) 1061 { 1062 struct work_struct *work = current_work(); 1063 1064 s->packet_index = -1; 1065 1066 // Detect work items for any isochronous context. The work item for pcm_period_work() 1067 // should be avoided since the call of snd_pcm_period_elapsed() can reach via 1068 // snd_pcm_ops.pointer() under acquiring PCM stream(group) lock and causes dead lock at 1069 // snd_pcm_stop_xrun(). 1070 if (work && work != &s->period_work) 1071 amdtp_stream_pcm_abort(s); 1072 WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN); 1073 } 1074 1075 static snd_pcm_sframes_t compute_pcm_extra_delay(struct amdtp_stream *s, 1076 const struct pkt_desc *desc, unsigned int count) 1077 { 1078 unsigned int data_block_count = 0; 1079 u32 latest_cycle; 1080 u32 cycle_time; 1081 u32 curr_cycle; 1082 u32 cycle_gap; 1083 int i, err; 1084 1085 if (count == 0) 1086 goto end; 1087 1088 // Forward to the latest record. 1089 for (i = 0; i < count - 1; ++i) 1090 desc = amdtp_stream_next_packet_desc(s, desc); 1091 latest_cycle = desc->cycle; 1092 1093 err = fw_card_read_cycle_time(fw_parent_device(s->unit)->card, &cycle_time); 1094 if (err < 0) 1095 goto end; 1096 1097 // Compute cycle count with lower 3 bits of second field and cycle field like timestamp 1098 // format of 1394 OHCI isochronous context. 1099 curr_cycle = compute_ohci_iso_ctx_cycle_count((cycle_time >> 12) & 0x0000ffff); 1100 1101 if (s->direction == AMDTP_IN_STREAM) { 1102 // NOTE: The AMDTP packet descriptor should be for the past isochronous cycle since 1103 // it corresponds to arrived isochronous packet. 1104 if (compare_ohci_cycle_count(latest_cycle, curr_cycle) > 0) 1105 goto end; 1106 cycle_gap = decrement_ohci_cycle_count(curr_cycle, latest_cycle); 1107 1108 // NOTE: estimate delay by recent history of arrived AMDTP packets. The estimated 1109 // value expectedly corresponds to a few packets (0-2) since the packet arrived at 1110 // the most recent isochronous cycle has been already processed. 1111 for (i = 0; i < cycle_gap; ++i) { 1112 desc = amdtp_stream_next_packet_desc(s, desc); 1113 data_block_count += desc->data_blocks; 1114 } 1115 } else { 1116 // NOTE: The AMDTP packet descriptor should be for the future isochronous cycle 1117 // since it was already scheduled. 1118 if (compare_ohci_cycle_count(latest_cycle, curr_cycle) < 0) 1119 goto end; 1120 cycle_gap = decrement_ohci_cycle_count(latest_cycle, curr_cycle); 1121 1122 // NOTE: use history of scheduled packets. 1123 for (i = 0; i < cycle_gap; ++i) { 1124 data_block_count += desc->data_blocks; 1125 desc = prev_packet_desc(s, desc); 1126 } 1127 } 1128 end: 1129 return data_block_count * s->pcm_frame_multiplier; 1130 } 1131 1132 static void process_ctx_payloads(struct amdtp_stream *s, 1133 const struct pkt_desc *desc, 1134 unsigned int count) 1135 { 1136 struct snd_pcm_substream *pcm; 1137 int i; 1138 1139 pcm = READ_ONCE(s->pcm); 1140 s->process_ctx_payloads(s, desc, count, pcm); 1141 1142 if (pcm) { 1143 unsigned int data_block_count = 0; 1144 1145 pcm->runtime->delay = compute_pcm_extra_delay(s, desc, count); 1146 1147 for (i = 0; i < count; ++i) { 1148 data_block_count += desc->data_blocks; 1149 desc = amdtp_stream_next_packet_desc(s, desc); 1150 } 1151 1152 update_pcm_pointers(s, pcm, data_block_count * s->pcm_frame_multiplier); 1153 } 1154 } 1155 1156 static void process_rx_packets(struct fw_iso_context *context, u32 tstamp, size_t header_length, 1157 void *header, void *private_data) 1158 { 1159 struct amdtp_stream *s = private_data; 1160 const struct amdtp_domain *d = s->domain; 1161 const __be32 *ctx_header = header; 1162 const unsigned int events_per_period = d->events_per_period; 1163 unsigned int event_count = s->ctx_data.rx.event_count; 1164 struct pkt_desc *desc = s->packet_descs_cursor; 1165 unsigned int pkt_header_length; 1166 unsigned int packets; 1167 u32 curr_cycle_time; 1168 bool need_hw_irq; 1169 int i; 1170 1171 if (s->packet_index < 0) 1172 return; 1173 1174 // Calculate the number of packets in buffer and check XRUN. 1175 packets = header_length / sizeof(*ctx_header); 1176 1177 generate_rx_packet_descs(s, desc, ctx_header, packets); 1178 1179 process_ctx_payloads(s, desc, packets); 1180 1181 if (!(s->flags & CIP_NO_HEADER)) 1182 pkt_header_length = IT_PKT_HEADER_SIZE_CIP; 1183 else 1184 pkt_header_length = 0; 1185 1186 if (s == d->irq_target) { 1187 // At NO_PERIOD_WAKEUP mode, the packets for all IT/IR contexts are processed by 1188 // the tasks of user process operating ALSA PCM character device by calling ioctl(2) 1189 // with some requests, instead of scheduled hardware IRQ of an IT context. 1190 struct snd_pcm_substream *pcm = READ_ONCE(s->pcm); 1191 need_hw_irq = !pcm || !pcm->runtime->no_period_wakeup; 1192 } else { 1193 need_hw_irq = false; 1194 } 1195 1196 if (trace_amdtp_packet_enabled()) 1197 (void)fw_card_read_cycle_time(fw_parent_device(s->unit)->card, &curr_cycle_time); 1198 1199 for (i = 0; i < packets; ++i) { 1200 DEFINE_RAW_FLEX(struct fw_iso_packet, template, header, CIP_HEADER_QUADLETS); 1201 bool sched_irq = false; 1202 1203 build_it_pkt_header(s, desc->cycle, template, pkt_header_length, 1204 desc->data_blocks, desc->data_block_counter, 1205 desc->syt, i, curr_cycle_time); 1206 1207 if (s == s->domain->irq_target) { 1208 event_count += desc->data_blocks; 1209 if (event_count >= events_per_period) { 1210 event_count -= events_per_period; 1211 sched_irq = need_hw_irq; 1212 } 1213 } 1214 1215 if (queue_out_packet(s, template, sched_irq) < 0) { 1216 cancel_stream(s); 1217 return; 1218 } 1219 1220 desc = amdtp_stream_next_packet_desc(s, desc); 1221 } 1222 1223 s->ctx_data.rx.event_count = event_count; 1224 s->packet_descs_cursor = desc; 1225 } 1226 1227 static void skip_rx_packets(struct fw_iso_context *context, u32 tstamp, size_t header_length, 1228 void *header, void *private_data) 1229 { 1230 struct amdtp_stream *s = private_data; 1231 struct amdtp_domain *d = s->domain; 1232 const __be32 *ctx_header = header; 1233 unsigned int packets; 1234 unsigned int cycle; 1235 int i; 1236 1237 if (s->packet_index < 0) 1238 return; 1239 1240 packets = header_length / sizeof(*ctx_header); 1241 1242 cycle = compute_ohci_it_cycle(ctx_header[packets - 1], s->queue_size); 1243 s->next_cycle = increment_ohci_cycle_count(cycle, 1); 1244 1245 for (i = 0; i < packets; ++i) { 1246 struct fw_iso_packet params = { 1247 .header_length = 0, 1248 .payload_length = 0, 1249 }; 1250 bool sched_irq = (s == d->irq_target && i == packets - 1); 1251 1252 if (queue_out_packet(s, ¶ms, sched_irq) < 0) { 1253 cancel_stream(s); 1254 return; 1255 } 1256 } 1257 } 1258 1259 static void irq_target_callback(struct fw_iso_context *context, u32 tstamp, size_t header_length, 1260 void *header, void *private_data); 1261 1262 static void process_rx_packets_intermediately(struct fw_iso_context *context, u32 tstamp, 1263 size_t header_length, void *header, void *private_data) 1264 { 1265 struct amdtp_stream *s = private_data; 1266 struct amdtp_domain *d = s->domain; 1267 __be32 *ctx_header = header; 1268 const unsigned int queue_size = s->queue_size; 1269 unsigned int packets; 1270 unsigned int offset; 1271 1272 if (s->packet_index < 0) 1273 return; 1274 1275 packets = header_length / sizeof(*ctx_header); 1276 1277 offset = 0; 1278 while (offset < packets) { 1279 unsigned int cycle = compute_ohci_it_cycle(ctx_header[offset], queue_size); 1280 1281 if (compare_ohci_cycle_count(cycle, d->processing_cycle.rx_start) >= 0) 1282 break; 1283 1284 ++offset; 1285 } 1286 1287 if (offset > 0) { 1288 unsigned int length = sizeof(*ctx_header) * offset; 1289 1290 skip_rx_packets(context, tstamp, length, ctx_header, private_data); 1291 if (amdtp_streaming_error(s)) 1292 return; 1293 1294 ctx_header += offset; 1295 header_length -= length; 1296 } 1297 1298 if (offset < packets) { 1299 s->ready_processing = true; 1300 wake_up(&s->ready_wait); 1301 1302 if (d->replay.enable) 1303 s->ctx_data.rx.cache_pos = 0; 1304 1305 process_rx_packets(context, tstamp, header_length, ctx_header, private_data); 1306 if (amdtp_streaming_error(s)) 1307 return; 1308 1309 if (s == d->irq_target) 1310 s->context->callback.sc = irq_target_callback; 1311 else 1312 s->context->callback.sc = process_rx_packets; 1313 } 1314 } 1315 1316 static void process_tx_packets(struct fw_iso_context *context, u32 tstamp, size_t header_length, 1317 void *header, void *private_data) 1318 { 1319 struct amdtp_stream *s = private_data; 1320 __be32 *ctx_header = header; 1321 struct pkt_desc *desc = s->packet_descs_cursor; 1322 unsigned int packet_count; 1323 unsigned int desc_count; 1324 int i; 1325 int err; 1326 1327 if (s->packet_index < 0) 1328 return; 1329 1330 // Calculate the number of packets in buffer and check XRUN. 1331 packet_count = header_length / s->ctx_data.tx.ctx_header_size; 1332 1333 desc_count = 0; 1334 err = generate_tx_packet_descs(s, desc, ctx_header, packet_count, &desc_count); 1335 if (err < 0) { 1336 if (err != -EAGAIN) { 1337 cancel_stream(s); 1338 return; 1339 } 1340 } else { 1341 struct amdtp_domain *d = s->domain; 1342 1343 process_ctx_payloads(s, desc, desc_count); 1344 1345 if (d->replay.enable) 1346 cache_seq(s, desc, desc_count); 1347 1348 for (i = 0; i < desc_count; ++i) 1349 desc = amdtp_stream_next_packet_desc(s, desc); 1350 s->packet_descs_cursor = desc; 1351 } 1352 1353 for (i = 0; i < packet_count; ++i) { 1354 struct fw_iso_packet params = {0}; 1355 1356 if (queue_in_packet(s, ¶ms) < 0) { 1357 cancel_stream(s); 1358 return; 1359 } 1360 } 1361 } 1362 1363 static void drop_tx_packets(struct fw_iso_context *context, u32 tstamp, size_t header_length, 1364 void *header, void *private_data) 1365 { 1366 struct amdtp_stream *s = private_data; 1367 const __be32 *ctx_header = header; 1368 unsigned int packets; 1369 unsigned int cycle; 1370 int i; 1371 1372 if (s->packet_index < 0) 1373 return; 1374 1375 packets = header_length / s->ctx_data.tx.ctx_header_size; 1376 1377 ctx_header += (packets - 1) * s->ctx_data.tx.ctx_header_size / sizeof(*ctx_header); 1378 cycle = compute_ohci_cycle_count(ctx_header[1]); 1379 s->next_cycle = increment_ohci_cycle_count(cycle, 1); 1380 1381 for (i = 0; i < packets; ++i) { 1382 struct fw_iso_packet params = {0}; 1383 1384 if (queue_in_packet(s, ¶ms) < 0) { 1385 cancel_stream(s); 1386 return; 1387 } 1388 } 1389 } 1390 1391 static void process_tx_packets_intermediately(struct fw_iso_context *context, u32 tstamp, 1392 size_t header_length, void *header, void *private_data) 1393 { 1394 struct amdtp_stream *s = private_data; 1395 struct amdtp_domain *d = s->domain; 1396 __be32 *ctx_header; 1397 unsigned int packets; 1398 unsigned int offset; 1399 1400 if (s->packet_index < 0) 1401 return; 1402 1403 packets = header_length / s->ctx_data.tx.ctx_header_size; 1404 1405 offset = 0; 1406 ctx_header = header; 1407 while (offset < packets) { 1408 unsigned int cycle = compute_ohci_cycle_count(ctx_header[1]); 1409 1410 if (compare_ohci_cycle_count(cycle, d->processing_cycle.tx_start) >= 0) 1411 break; 1412 1413 ctx_header += s->ctx_data.tx.ctx_header_size / sizeof(__be32); 1414 ++offset; 1415 } 1416 1417 ctx_header = header; 1418 1419 if (offset > 0) { 1420 size_t length = s->ctx_data.tx.ctx_header_size * offset; 1421 1422 drop_tx_packets(context, tstamp, length, ctx_header, s); 1423 if (amdtp_streaming_error(s)) 1424 return; 1425 1426 ctx_header += length / sizeof(*ctx_header); 1427 header_length -= length; 1428 } 1429 1430 if (offset < packets) { 1431 s->ready_processing = true; 1432 wake_up(&s->ready_wait); 1433 1434 process_tx_packets(context, tstamp, header_length, ctx_header, s); 1435 if (amdtp_streaming_error(s)) 1436 return; 1437 1438 context->callback.sc = process_tx_packets; 1439 } 1440 } 1441 1442 static void drop_tx_packets_initially(struct fw_iso_context *context, u32 tstamp, 1443 size_t header_length, void *header, void *private_data) 1444 { 1445 struct amdtp_stream *s = private_data; 1446 struct amdtp_domain *d = s->domain; 1447 __be32 *ctx_header; 1448 unsigned int count; 1449 unsigned int events; 1450 int i; 1451 1452 if (s->packet_index < 0) 1453 return; 1454 1455 count = header_length / s->ctx_data.tx.ctx_header_size; 1456 1457 // Attempt to detect any event in the batch of packets. 1458 events = 0; 1459 ctx_header = header; 1460 for (i = 0; i < count; ++i) { 1461 unsigned int payload_quads = 1462 (be32_to_cpu(*ctx_header) >> ISO_DATA_LENGTH_SHIFT) / sizeof(__be32); 1463 unsigned int data_blocks; 1464 1465 if (s->flags & CIP_NO_HEADER) { 1466 data_blocks = payload_quads / s->data_block_quadlets; 1467 } else { 1468 __be32 *cip_headers = ctx_header + IR_CTX_HEADER_DEFAULT_QUADLETS; 1469 1470 if (payload_quads < CIP_HEADER_QUADLETS) { 1471 data_blocks = 0; 1472 } else { 1473 payload_quads -= CIP_HEADER_QUADLETS; 1474 1475 if (s->flags & CIP_UNAWARE_SYT) { 1476 data_blocks = payload_quads / s->data_block_quadlets; 1477 } else { 1478 u32 cip1 = be32_to_cpu(cip_headers[1]); 1479 1480 // NODATA packet can includes any data blocks but they are 1481 // not available as event. 1482 if ((cip1 & CIP_NO_DATA) == CIP_NO_DATA) 1483 data_blocks = 0; 1484 else 1485 data_blocks = payload_quads / s->data_block_quadlets; 1486 } 1487 } 1488 } 1489 1490 events += data_blocks; 1491 1492 ctx_header += s->ctx_data.tx.ctx_header_size / sizeof(__be32); 1493 } 1494 1495 drop_tx_packets(context, tstamp, header_length, header, s); 1496 1497 if (events > 0) 1498 s->ctx_data.tx.event_starts = true; 1499 1500 // Decide the cycle count to begin processing content of packet in IR contexts. 1501 { 1502 unsigned int stream_count = 0; 1503 unsigned int event_starts_count = 0; 1504 unsigned int cycle = UINT_MAX; 1505 1506 list_for_each_entry(s, &d->streams, list) { 1507 if (s->direction == AMDTP_IN_STREAM) { 1508 ++stream_count; 1509 if (s->ctx_data.tx.event_starts) 1510 ++event_starts_count; 1511 } 1512 } 1513 1514 if (stream_count == event_starts_count) { 1515 unsigned int next_cycle; 1516 1517 list_for_each_entry(s, &d->streams, list) { 1518 if (s->direction != AMDTP_IN_STREAM) 1519 continue; 1520 1521 next_cycle = increment_ohci_cycle_count(s->next_cycle, 1522 d->processing_cycle.tx_init_skip); 1523 if (cycle == UINT_MAX || 1524 compare_ohci_cycle_count(next_cycle, cycle) > 0) 1525 cycle = next_cycle; 1526 1527 s->context->callback.sc = process_tx_packets_intermediately; 1528 } 1529 1530 d->processing_cycle.tx_start = cycle; 1531 } 1532 } 1533 } 1534 1535 static void process_ctxs_in_domain(struct amdtp_domain *d) 1536 { 1537 struct amdtp_stream *s; 1538 1539 list_for_each_entry(s, &d->streams, list) { 1540 if (s != d->irq_target && amdtp_stream_running(s)) 1541 fw_iso_context_flush_completions(s->context); 1542 1543 if (amdtp_streaming_error(s)) 1544 goto error; 1545 } 1546 1547 return; 1548 error: 1549 if (amdtp_stream_running(d->irq_target)) 1550 cancel_stream(d->irq_target); 1551 1552 list_for_each_entry(s, &d->streams, list) { 1553 if (amdtp_stream_running(s)) 1554 cancel_stream(s); 1555 } 1556 } 1557 1558 static void irq_target_callback(struct fw_iso_context *context, u32 tstamp, size_t header_length, 1559 void *header, void *private_data) 1560 { 1561 struct amdtp_stream *s = private_data; 1562 struct amdtp_domain *d = s->domain; 1563 1564 process_rx_packets(context, tstamp, header_length, header, private_data); 1565 process_ctxs_in_domain(d); 1566 } 1567 1568 static void irq_target_callback_intermediately(struct fw_iso_context *context, u32 tstamp, 1569 size_t header_length, void *header, void *private_data) 1570 { 1571 struct amdtp_stream *s = private_data; 1572 struct amdtp_domain *d = s->domain; 1573 1574 process_rx_packets_intermediately(context, tstamp, header_length, header, private_data); 1575 process_ctxs_in_domain(d); 1576 } 1577 1578 static void irq_target_callback_skip(struct fw_iso_context *context, u32 tstamp, 1579 size_t header_length, void *header, void *private_data) 1580 { 1581 struct amdtp_stream *s = private_data; 1582 struct amdtp_domain *d = s->domain; 1583 bool ready_to_start; 1584 1585 skip_rx_packets(context, tstamp, header_length, header, private_data); 1586 process_ctxs_in_domain(d); 1587 1588 if (d->replay.enable && !d->replay.on_the_fly) { 1589 unsigned int rx_count = 0; 1590 unsigned int rx_ready_count = 0; 1591 struct amdtp_stream *rx; 1592 1593 list_for_each_entry(rx, &d->streams, list) { 1594 struct amdtp_stream *tx; 1595 unsigned int cached_cycles; 1596 1597 if (rx->direction != AMDTP_OUT_STREAM) 1598 continue; 1599 ++rx_count; 1600 1601 tx = rx->ctx_data.rx.replay_target; 1602 cached_cycles = calculate_cached_cycle_count(tx, 0); 1603 if (cached_cycles > tx->ctx_data.tx.cache.size / 2) 1604 ++rx_ready_count; 1605 } 1606 1607 ready_to_start = (rx_count == rx_ready_count); 1608 } else { 1609 ready_to_start = true; 1610 } 1611 1612 // Decide the cycle count to begin processing content of packet in IT contexts. All of IT 1613 // contexts are expected to start and get callback when reaching here. 1614 if (ready_to_start) { 1615 unsigned int cycle = s->next_cycle; 1616 list_for_each_entry(s, &d->streams, list) { 1617 if (s->direction != AMDTP_OUT_STREAM) 1618 continue; 1619 1620 if (compare_ohci_cycle_count(s->next_cycle, cycle) > 0) 1621 cycle = s->next_cycle; 1622 1623 if (s == d->irq_target) 1624 s->context->callback.sc = irq_target_callback_intermediately; 1625 else 1626 s->context->callback.sc = process_rx_packets_intermediately; 1627 } 1628 1629 d->processing_cycle.rx_start = cycle; 1630 } 1631 } 1632 1633 // This is executed one time. For in-stream, first packet has come. For out-stream, prepared to 1634 // transmit first packet. 1635 static void amdtp_stream_first_callback(struct fw_iso_context *context, 1636 u32 tstamp, size_t header_length, 1637 void *header, void *private_data) 1638 { 1639 struct amdtp_stream *s = private_data; 1640 struct amdtp_domain *d = s->domain; 1641 1642 if (s->direction == AMDTP_IN_STREAM) { 1643 context->callback.sc = drop_tx_packets_initially; 1644 } else { 1645 if (s == d->irq_target) 1646 context->callback.sc = irq_target_callback_skip; 1647 else 1648 context->callback.sc = skip_rx_packets; 1649 } 1650 1651 context->callback.sc(context, tstamp, header_length, header, s); 1652 } 1653 1654 /** 1655 * amdtp_stream_start - start transferring packets 1656 * @s: the AMDTP stream to start 1657 * @channel: the isochronous channel on the bus 1658 * @speed: firewire speed code 1659 * @queue_size: The number of packets in the queue. 1660 * @idle_irq_interval: the interval to queue packet during initial state. 1661 * 1662 * The stream cannot be started until it has been configured with 1663 * amdtp_stream_set_parameters() and it must be started before any PCM or MIDI 1664 * device can be started. 1665 */ 1666 static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed, 1667 unsigned int queue_size, unsigned int idle_irq_interval) 1668 { 1669 bool is_irq_target = (s == s->domain->irq_target); 1670 unsigned int ctx_header_size; 1671 unsigned int max_ctx_payload_size; 1672 enum dma_data_direction dir; 1673 struct pkt_desc *descs; 1674 int i, type, tag, err; 1675 1676 guard(mutex)(&s->mutex); 1677 1678 if (WARN_ON(amdtp_stream_running(s) || 1679 (s->data_block_quadlets < 1))) 1680 return -EBADFD; 1681 1682 if (s->direction == AMDTP_IN_STREAM) { 1683 // NOTE: IT context should be used for constant IRQ. 1684 if (is_irq_target) 1685 return -EINVAL; 1686 1687 s->data_block_counter = UINT_MAX; 1688 } else { 1689 s->data_block_counter = 0; 1690 } 1691 1692 // initialize packet buffer. 1693 if (s->direction == AMDTP_IN_STREAM) { 1694 dir = DMA_FROM_DEVICE; 1695 type = FW_ISO_CONTEXT_RECEIVE; 1696 if (!(s->flags & CIP_NO_HEADER)) 1697 ctx_header_size = IR_CTX_HEADER_SIZE_CIP; 1698 else 1699 ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP; 1700 } else { 1701 dir = DMA_TO_DEVICE; 1702 type = FW_ISO_CONTEXT_TRANSMIT; 1703 // Although no effect for IT context, this value is required to compute the size 1704 // of header storage correctly. 1705 ctx_header_size = sizeof(__be32); 1706 } 1707 max_ctx_payload_size = amdtp_stream_get_max_ctx_payload_size(s); 1708 1709 err = iso_packets_buffer_init(&s->buffer, s->unit, queue_size, max_ctx_payload_size, dir); 1710 if (err < 0) 1711 return err; 1712 s->queue_size = queue_size; 1713 1714 s->context = fw_iso_context_create_with_header_storage_size( 1715 fw_parent_device(s->unit)->card, type, channel, speed, ctx_header_size, 1716 ctx_header_size * queue_size, amdtp_stream_first_callback, s); 1717 if (IS_ERR(s->context)) { 1718 err = PTR_ERR(s->context); 1719 if (err == -EBUSY) 1720 dev_err(&s->unit->device, 1721 "no free stream on this controller\n"); 1722 goto err_buffer; 1723 } 1724 1725 amdtp_stream_update(s); 1726 1727 if (s->direction == AMDTP_IN_STREAM) { 1728 s->ctx_data.tx.max_ctx_payload_length = max_ctx_payload_size; 1729 s->ctx_data.tx.ctx_header_size = ctx_header_size; 1730 s->ctx_data.tx.event_starts = false; 1731 1732 if (s->domain->replay.enable) { 1733 // struct fw_iso_context.drop_overflow_headers is false therefore it's 1734 // possible to cache much unexpectedly. 1735 s->ctx_data.tx.cache.size = max_t(unsigned int, s->syt_interval * 2, 1736 queue_size * 3 / 2); 1737 s->ctx_data.tx.cache.pos = 0; 1738 s->ctx_data.tx.cache.descs = kzalloc_objs(*s->ctx_data.tx.cache.descs, 1739 s->ctx_data.tx.cache.size); 1740 if (!s->ctx_data.tx.cache.descs) { 1741 err = -ENOMEM; 1742 goto err_context; 1743 } 1744 } 1745 } else { 1746 static const struct { 1747 unsigned int data_block; 1748 unsigned int syt_offset; 1749 } *entry, initial_state[] = { 1750 [CIP_SFC_32000] = { 4, 3072 }, 1751 [CIP_SFC_48000] = { 6, 1024 }, 1752 [CIP_SFC_96000] = { 12, 1024 }, 1753 [CIP_SFC_192000] = { 24, 1024 }, 1754 [CIP_SFC_44100] = { 0, 67 }, 1755 [CIP_SFC_88200] = { 0, 67 }, 1756 [CIP_SFC_176400] = { 0, 67 }, 1757 }; 1758 1759 s->ctx_data.rx.seq.descs = kzalloc_objs(*s->ctx_data.rx.seq.descs, 1760 queue_size); 1761 if (!s->ctx_data.rx.seq.descs) { 1762 err = -ENOMEM; 1763 goto err_context; 1764 } 1765 s->ctx_data.rx.seq.size = queue_size; 1766 s->ctx_data.rx.seq.pos = 0; 1767 1768 entry = &initial_state[s->sfc]; 1769 s->ctx_data.rx.data_block_state = entry->data_block; 1770 s->ctx_data.rx.syt_offset_state = entry->syt_offset; 1771 s->ctx_data.rx.last_syt_offset = TICKS_PER_CYCLE; 1772 1773 s->ctx_data.rx.event_count = 0; 1774 } 1775 1776 if (s->flags & CIP_NO_HEADER) 1777 s->tag = TAG_NO_CIP_HEADER; 1778 else 1779 s->tag = TAG_CIP; 1780 1781 // NOTE: When operating without hardIRQ/softIRQ, applications tends to call ioctl request 1782 // for runtime of PCM substream in the interval equivalent to the size of PCM buffer. It 1783 // could take a round over queue of AMDTP packet descriptors and small loss of history. For 1784 // safe, keep more 8 elements for the queue, equivalent to 1 ms. 1785 descs = kzalloc_objs(*descs, s->queue_size + 8); 1786 if (!descs) { 1787 err = -ENOMEM; 1788 goto err_context; 1789 } 1790 s->packet_descs = descs; 1791 1792 INIT_LIST_HEAD(&s->packet_descs_list); 1793 for (i = 0; i < s->queue_size; ++i) { 1794 INIT_LIST_HEAD(&descs->link); 1795 list_add_tail(&descs->link, &s->packet_descs_list); 1796 ++descs; 1797 } 1798 s->packet_descs_cursor = list_first_entry(&s->packet_descs_list, struct pkt_desc, link); 1799 1800 s->packet_index = 0; 1801 do { 1802 struct fw_iso_packet params; 1803 1804 if (s->direction == AMDTP_IN_STREAM) { 1805 err = queue_in_packet(s, ¶ms); 1806 } else { 1807 bool sched_irq = false; 1808 1809 params.header_length = 0; 1810 params.payload_length = 0; 1811 1812 if (is_irq_target) { 1813 sched_irq = !((s->packet_index + 1) % 1814 idle_irq_interval); 1815 } 1816 1817 err = queue_out_packet(s, ¶ms, sched_irq); 1818 } 1819 if (err < 0) 1820 goto err_pkt_descs; 1821 } while (s->packet_index > 0); 1822 1823 /* NOTE: TAG1 matches CIP. This just affects in stream. */ 1824 tag = FW_ISO_CONTEXT_MATCH_TAG1; 1825 if ((s->flags & CIP_EMPTY_WITH_TAG0) || (s->flags & CIP_NO_HEADER)) 1826 tag |= FW_ISO_CONTEXT_MATCH_TAG0; 1827 1828 s->ready_processing = false; 1829 err = fw_iso_context_start(s->context, -1, 0, tag); 1830 if (err < 0) 1831 goto err_pkt_descs; 1832 1833 return 0; 1834 err_pkt_descs: 1835 kfree(s->packet_descs); 1836 s->packet_descs = NULL; 1837 err_context: 1838 if (s->direction == AMDTP_OUT_STREAM) { 1839 kfree(s->ctx_data.rx.seq.descs); 1840 } else { 1841 if (s->domain->replay.enable) 1842 kfree(s->ctx_data.tx.cache.descs); 1843 } 1844 fw_iso_context_destroy(s->context); 1845 s->context = ERR_PTR(-1); 1846 err_buffer: 1847 iso_packets_buffer_destroy(&s->buffer, s->unit); 1848 1849 return err; 1850 } 1851 1852 /** 1853 * amdtp_domain_stream_pcm_pointer - get the PCM buffer position 1854 * @d: the AMDTP domain. 1855 * @s: the AMDTP stream that transports the PCM data 1856 * 1857 * Returns the current buffer position, in frames. 1858 */ 1859 unsigned long amdtp_domain_stream_pcm_pointer(struct amdtp_domain *d, 1860 struct amdtp_stream *s) 1861 { 1862 struct amdtp_stream *irq_target = d->irq_target; 1863 1864 if (irq_target && amdtp_stream_running(irq_target)) { 1865 // The work item to call snd_pcm_period_elapsed() can reach here by the call of 1866 // snd_pcm_ops.pointer(), however less packets would be available then. Therefore 1867 // the following call is just for user process contexts. 1868 if (current_work() != &s->period_work) 1869 fw_iso_context_flush_completions(irq_target->context); 1870 } 1871 1872 return READ_ONCE(s->pcm_buffer_pointer); 1873 } 1874 EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_pointer); 1875 1876 /** 1877 * amdtp_domain_stream_pcm_ack - acknowledge queued PCM frames 1878 * @d: the AMDTP domain. 1879 * @s: the AMDTP stream that transfers the PCM frames 1880 * 1881 * Returns zero always. 1882 */ 1883 int amdtp_domain_stream_pcm_ack(struct amdtp_domain *d, struct amdtp_stream *s) 1884 { 1885 struct amdtp_stream *irq_target = d->irq_target; 1886 1887 // Process isochronous packets for recent isochronous cycle to handle 1888 // queued PCM frames. 1889 if (irq_target && amdtp_stream_running(irq_target)) 1890 fw_iso_context_flush_completions(irq_target->context); 1891 1892 return 0; 1893 } 1894 EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_ack); 1895 1896 /** 1897 * amdtp_stream_update - update the stream after a bus reset 1898 * @s: the AMDTP stream 1899 */ 1900 void amdtp_stream_update(struct amdtp_stream *s) 1901 { 1902 /* Precomputing. */ 1903 WRITE_ONCE(s->source_node_id_field, 1904 (fw_parent_device(s->unit)->card->node_id << CIP_SID_SHIFT) & CIP_SID_MASK); 1905 } 1906 EXPORT_SYMBOL(amdtp_stream_update); 1907 1908 /** 1909 * amdtp_stream_stop - stop sending packets 1910 * @s: the AMDTP stream to stop 1911 * 1912 * All PCM and MIDI devices of the stream must be stopped before the stream 1913 * itself can be stopped. 1914 */ 1915 static void amdtp_stream_stop(struct amdtp_stream *s) 1916 { 1917 guard(mutex)(&s->mutex); 1918 1919 if (!amdtp_stream_running(s)) 1920 return; 1921 1922 cancel_work_sync(&s->period_work); 1923 fw_iso_context_stop(s->context); 1924 fw_iso_context_destroy(s->context); 1925 s->context = ERR_PTR(-1); 1926 iso_packets_buffer_destroy(&s->buffer, s->unit); 1927 kfree(s->packet_descs); 1928 s->packet_descs = NULL; 1929 1930 if (s->direction == AMDTP_OUT_STREAM) { 1931 kfree(s->ctx_data.rx.seq.descs); 1932 } else { 1933 if (s->domain->replay.enable) 1934 kfree(s->ctx_data.tx.cache.descs); 1935 } 1936 } 1937 1938 /** 1939 * amdtp_stream_pcm_abort - abort the running PCM device 1940 * @s: the AMDTP stream about to be stopped 1941 * 1942 * If the isochronous stream needs to be stopped asynchronously, call this 1943 * function first to stop the PCM device. 1944 */ 1945 void amdtp_stream_pcm_abort(struct amdtp_stream *s) 1946 { 1947 struct snd_pcm_substream *pcm; 1948 1949 pcm = READ_ONCE(s->pcm); 1950 if (pcm) 1951 snd_pcm_stop_xrun(pcm); 1952 } 1953 EXPORT_SYMBOL(amdtp_stream_pcm_abort); 1954 1955 /** 1956 * amdtp_domain_init - initialize an AMDTP domain structure 1957 * @d: the AMDTP domain to initialize. 1958 */ 1959 int amdtp_domain_init(struct amdtp_domain *d) 1960 { 1961 INIT_LIST_HEAD(&d->streams); 1962 1963 d->events_per_period = 0; 1964 1965 return 0; 1966 } 1967 EXPORT_SYMBOL_GPL(amdtp_domain_init); 1968 1969 /** 1970 * amdtp_domain_destroy - destroy an AMDTP domain structure 1971 * @d: the AMDTP domain to destroy. 1972 */ 1973 void amdtp_domain_destroy(struct amdtp_domain *d) 1974 { 1975 // At present nothing to do. 1976 return; 1977 } 1978 EXPORT_SYMBOL_GPL(amdtp_domain_destroy); 1979 1980 /** 1981 * amdtp_domain_add_stream - register isoc context into the domain. 1982 * @d: the AMDTP domain. 1983 * @s: the AMDTP stream. 1984 * @channel: the isochronous channel on the bus. 1985 * @speed: firewire speed code. 1986 */ 1987 int amdtp_domain_add_stream(struct amdtp_domain *d, struct amdtp_stream *s, 1988 int channel, int speed) 1989 { 1990 struct amdtp_stream *tmp; 1991 1992 list_for_each_entry(tmp, &d->streams, list) { 1993 if (s == tmp) 1994 return -EBUSY; 1995 } 1996 1997 list_add(&s->list, &d->streams); 1998 1999 s->channel = channel; 2000 s->speed = speed; 2001 s->domain = d; 2002 2003 return 0; 2004 } 2005 EXPORT_SYMBOL_GPL(amdtp_domain_add_stream); 2006 2007 // Make the reference from rx stream to tx stream for sequence replay. When the number of tx streams 2008 // is less than the number of rx streams, the first tx stream is selected. 2009 static int make_association(struct amdtp_domain *d) 2010 { 2011 unsigned int dst_index = 0; 2012 struct amdtp_stream *rx; 2013 2014 // Make association to replay target. 2015 list_for_each_entry(rx, &d->streams, list) { 2016 if (rx->direction == AMDTP_OUT_STREAM) { 2017 unsigned int src_index = 0; 2018 struct amdtp_stream *tx = NULL; 2019 struct amdtp_stream *s; 2020 2021 list_for_each_entry(s, &d->streams, list) { 2022 if (s->direction == AMDTP_IN_STREAM) { 2023 if (dst_index == src_index) { 2024 tx = s; 2025 break; 2026 } 2027 2028 ++src_index; 2029 } 2030 } 2031 if (!tx) { 2032 // Select the first entry. 2033 list_for_each_entry(s, &d->streams, list) { 2034 if (s->direction == AMDTP_IN_STREAM) { 2035 tx = s; 2036 break; 2037 } 2038 } 2039 // No target is available to replay sequence. 2040 if (!tx) 2041 return -EINVAL; 2042 } 2043 2044 rx->ctx_data.rx.replay_target = tx; 2045 2046 ++dst_index; 2047 } 2048 } 2049 2050 return 0; 2051 } 2052 2053 /** 2054 * amdtp_domain_start - start sending packets for isoc context in the domain. 2055 * @d: the AMDTP domain. 2056 * @tx_init_skip_cycles: the number of cycles to skip processing packets at initial stage of IR 2057 * contexts. 2058 * @replay_seq: whether to replay the sequence of packet in IR context for the sequence of packet in 2059 * IT context. 2060 * @replay_on_the_fly: transfer rx packets according to nominal frequency, then begin to replay 2061 * according to arrival of events in tx packets. 2062 */ 2063 int amdtp_domain_start(struct amdtp_domain *d, unsigned int tx_init_skip_cycles, bool replay_seq, 2064 bool replay_on_the_fly) 2065 { 2066 unsigned int events_per_buffer = d->events_per_buffer; 2067 unsigned int events_per_period = d->events_per_period; 2068 unsigned int queue_size; 2069 struct amdtp_stream *s; 2070 bool found = false; 2071 int err; 2072 2073 if (replay_seq) { 2074 err = make_association(d); 2075 if (err < 0) 2076 return err; 2077 } 2078 d->replay.enable = replay_seq; 2079 d->replay.on_the_fly = replay_on_the_fly; 2080 2081 // Select an IT context as IRQ target. 2082 list_for_each_entry(s, &d->streams, list) { 2083 if (s->direction == AMDTP_OUT_STREAM) { 2084 found = true; 2085 break; 2086 } 2087 } 2088 if (!found) 2089 return -ENXIO; 2090 d->irq_target = s; 2091 2092 d->processing_cycle.tx_init_skip = tx_init_skip_cycles; 2093 2094 // This is a case that AMDTP streams in domain run just for MIDI 2095 // substream. Use the number of events equivalent to 10 msec as 2096 // interval of hardware IRQ. 2097 if (events_per_period == 0) 2098 events_per_period = amdtp_rate_table[d->irq_target->sfc] / 100; 2099 if (events_per_buffer == 0) 2100 events_per_buffer = events_per_period * 3; 2101 2102 queue_size = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_buffer, 2103 amdtp_rate_table[d->irq_target->sfc]); 2104 2105 list_for_each_entry(s, &d->streams, list) { 2106 unsigned int idle_irq_interval = 0; 2107 2108 if (s->direction == AMDTP_OUT_STREAM && s == d->irq_target) { 2109 idle_irq_interval = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_period, 2110 amdtp_rate_table[d->irq_target->sfc]); 2111 } 2112 2113 // Starts immediately but actually DMA context starts several hundred cycles later. 2114 err = amdtp_stream_start(s, s->channel, s->speed, queue_size, idle_irq_interval); 2115 if (err < 0) 2116 goto error; 2117 } 2118 2119 return 0; 2120 error: 2121 list_for_each_entry(s, &d->streams, list) 2122 amdtp_stream_stop(s); 2123 return err; 2124 } 2125 EXPORT_SYMBOL_GPL(amdtp_domain_start); 2126 2127 /** 2128 * amdtp_domain_stop - stop sending packets for isoc context in the same domain. 2129 * @d: the AMDTP domain to which the isoc contexts belong. 2130 */ 2131 void amdtp_domain_stop(struct amdtp_domain *d) 2132 { 2133 struct amdtp_stream *s, *next; 2134 2135 if (d->irq_target) 2136 amdtp_stream_stop(d->irq_target); 2137 2138 list_for_each_entry_safe(s, next, &d->streams, list) { 2139 list_del(&s->list); 2140 2141 if (s != d->irq_target) 2142 amdtp_stream_stop(s); 2143 } 2144 2145 d->events_per_period = 0; 2146 d->irq_target = NULL; 2147 } 2148 EXPORT_SYMBOL_GPL(amdtp_domain_stop); 2149