1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Audio and Music Data Transmission Protocol (IEC 61883-6) streams 4 * with Common Isochronous Packet (IEC 61883-1) headers 5 * 6 * Copyright (c) Clemens Ladisch <clemens@ladisch.de> 7 */ 8 9 #include <linux/device.h> 10 #include <linux/err.h> 11 #include <linux/firewire.h> 12 #include <linux/firewire-constants.h> 13 #include <linux/module.h> 14 #include <linux/slab.h> 15 #include <sound/pcm.h> 16 #include <sound/pcm_params.h> 17 #include "amdtp-stream.h" 18 19 #define TICKS_PER_CYCLE 3072 20 #define CYCLES_PER_SECOND 8000 21 #define TICKS_PER_SECOND (TICKS_PER_CYCLE * CYCLES_PER_SECOND) 22 23 /* Always support Linux tracing subsystem. */ 24 #define CREATE_TRACE_POINTS 25 #include "amdtp-stream-trace.h" 26 27 #define TRANSFER_DELAY_TICKS 0x2e00 /* 479.17 microseconds */ 28 29 /* isochronous header parameters */ 30 #define ISO_DATA_LENGTH_SHIFT 16 31 #define TAG_NO_CIP_HEADER 0 32 #define TAG_CIP 1 33 34 /* common isochronous packet header parameters */ 35 #define CIP_EOH_SHIFT 31 36 #define CIP_EOH (1u << CIP_EOH_SHIFT) 37 #define CIP_EOH_MASK 0x80000000 38 #define CIP_SID_SHIFT 24 39 #define CIP_SID_MASK 0x3f000000 40 #define CIP_DBS_MASK 0x00ff0000 41 #define CIP_DBS_SHIFT 16 42 #define CIP_SPH_MASK 0x00000400 43 #define CIP_SPH_SHIFT 10 44 #define CIP_DBC_MASK 0x000000ff 45 #define CIP_FMT_SHIFT 24 46 #define CIP_FMT_MASK 0x3f000000 47 #define CIP_FDF_MASK 0x00ff0000 48 #define CIP_FDF_SHIFT 16 49 #define CIP_SYT_MASK 0x0000ffff 50 #define CIP_SYT_NO_INFO 0xffff 51 52 /* Audio and Music transfer protocol specific parameters */ 53 #define CIP_FMT_AM 0x10 54 #define AMDTP_FDF_NO_DATA 0xff 55 56 // For iso header, tstamp and 2 CIP header. 57 #define IR_CTX_HEADER_SIZE_CIP 16 58 // For iso header and tstamp. 59 #define IR_CTX_HEADER_SIZE_NO_CIP 8 60 #define HEADER_TSTAMP_MASK 0x0000ffff 61 62 #define IT_PKT_HEADER_SIZE_CIP 8 // For 2 CIP header. 63 #define IT_PKT_HEADER_SIZE_NO_CIP 0 // Nothing. 64 65 static void pcm_period_tasklet(unsigned long data); 66 67 /** 68 * amdtp_stream_init - initialize an AMDTP stream structure 69 * @s: the AMDTP stream to initialize 70 * @unit: the target of the stream 71 * @dir: the direction of stream 72 * @flags: the packet transmission method to use 73 * @fmt: the value of fmt field in CIP header 74 * @process_ctx_payloads: callback handler to process payloads of isoc context 75 * @protocol_size: the size to allocate newly for protocol 76 */ 77 int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit, 78 enum amdtp_stream_direction dir, enum cip_flags flags, 79 unsigned int fmt, 80 amdtp_stream_process_ctx_payloads_t process_ctx_payloads, 81 unsigned int protocol_size) 82 { 83 if (process_ctx_payloads == NULL) 84 return -EINVAL; 85 86 s->protocol = kzalloc(protocol_size, GFP_KERNEL); 87 if (!s->protocol) 88 return -ENOMEM; 89 90 s->unit = unit; 91 s->direction = dir; 92 s->flags = flags; 93 s->context = ERR_PTR(-1); 94 mutex_init(&s->mutex); 95 tasklet_init(&s->period_tasklet, pcm_period_tasklet, (unsigned long)s); 96 s->packet_index = 0; 97 98 init_waitqueue_head(&s->callback_wait); 99 s->callbacked = false; 100 101 s->fmt = fmt; 102 s->process_ctx_payloads = process_ctx_payloads; 103 104 if (dir == AMDTP_OUT_STREAM) 105 s->ctx_data.rx.syt_override = -1; 106 107 return 0; 108 } 109 EXPORT_SYMBOL(amdtp_stream_init); 110 111 /** 112 * amdtp_stream_destroy - free stream resources 113 * @s: the AMDTP stream to destroy 114 */ 115 void amdtp_stream_destroy(struct amdtp_stream *s) 116 { 117 /* Not initialized. */ 118 if (s->protocol == NULL) 119 return; 120 121 WARN_ON(amdtp_stream_running(s)); 122 kfree(s->protocol); 123 mutex_destroy(&s->mutex); 124 } 125 EXPORT_SYMBOL(amdtp_stream_destroy); 126 127 const unsigned int amdtp_syt_intervals[CIP_SFC_COUNT] = { 128 [CIP_SFC_32000] = 8, 129 [CIP_SFC_44100] = 8, 130 [CIP_SFC_48000] = 8, 131 [CIP_SFC_88200] = 16, 132 [CIP_SFC_96000] = 16, 133 [CIP_SFC_176400] = 32, 134 [CIP_SFC_192000] = 32, 135 }; 136 EXPORT_SYMBOL(amdtp_syt_intervals); 137 138 const unsigned int amdtp_rate_table[CIP_SFC_COUNT] = { 139 [CIP_SFC_32000] = 32000, 140 [CIP_SFC_44100] = 44100, 141 [CIP_SFC_48000] = 48000, 142 [CIP_SFC_88200] = 88200, 143 [CIP_SFC_96000] = 96000, 144 [CIP_SFC_176400] = 176400, 145 [CIP_SFC_192000] = 192000, 146 }; 147 EXPORT_SYMBOL(amdtp_rate_table); 148 149 static int apply_constraint_to_size(struct snd_pcm_hw_params *params, 150 struct snd_pcm_hw_rule *rule) 151 { 152 struct snd_interval *s = hw_param_interval(params, rule->var); 153 const struct snd_interval *r = 154 hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE); 155 struct snd_interval t = {0}; 156 unsigned int step = 0; 157 int i; 158 159 for (i = 0; i < CIP_SFC_COUNT; ++i) { 160 if (snd_interval_test(r, amdtp_rate_table[i])) 161 step = max(step, amdtp_syt_intervals[i]); 162 } 163 164 t.min = roundup(s->min, step); 165 t.max = rounddown(s->max, step); 166 t.integer = 1; 167 168 return snd_interval_refine(s, &t); 169 } 170 171 /** 172 * amdtp_stream_add_pcm_hw_constraints - add hw constraints for PCM substream 173 * @s: the AMDTP stream, which must be initialized. 174 * @runtime: the PCM substream runtime 175 */ 176 int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream *s, 177 struct snd_pcm_runtime *runtime) 178 { 179 struct snd_pcm_hardware *hw = &runtime->hw; 180 unsigned int ctx_header_size; 181 unsigned int maximum_usec_per_period; 182 int err; 183 184 hw->info = SNDRV_PCM_INFO_BATCH | 185 SNDRV_PCM_INFO_BLOCK_TRANSFER | 186 SNDRV_PCM_INFO_INTERLEAVED | 187 SNDRV_PCM_INFO_JOINT_DUPLEX | 188 SNDRV_PCM_INFO_MMAP | 189 SNDRV_PCM_INFO_MMAP_VALID; 190 191 /* SNDRV_PCM_INFO_BATCH */ 192 hw->periods_min = 2; 193 hw->periods_max = UINT_MAX; 194 195 /* bytes for a frame */ 196 hw->period_bytes_min = 4 * hw->channels_max; 197 198 /* Just to prevent from allocating much pages. */ 199 hw->period_bytes_max = hw->period_bytes_min * 2048; 200 hw->buffer_bytes_max = hw->period_bytes_max * hw->periods_min; 201 202 // Linux driver for 1394 OHCI controller voluntarily flushes isoc 203 // context when total size of accumulated context header reaches 204 // PAGE_SIZE. This kicks tasklet for the isoc context and brings 205 // callback in the middle of scheduled interrupts. 206 // Although AMDTP streams in the same domain use the same events per 207 // IRQ, use the largest size of context header between IT/IR contexts. 208 // Here, use the value of context header in IR context is for both 209 // contexts. 210 if (!(s->flags & CIP_NO_HEADER)) 211 ctx_header_size = IR_CTX_HEADER_SIZE_CIP; 212 else 213 ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP; 214 maximum_usec_per_period = USEC_PER_SEC * PAGE_SIZE / 215 CYCLES_PER_SECOND / ctx_header_size; 216 217 // In IEC 61883-6, one isoc packet can transfer events up to the value 218 // of syt interval. This comes from the interval of isoc cycle. As 1394 219 // OHCI controller can generate hardware IRQ per isoc packet, the 220 // interval is 125 usec. 221 // However, there are two ways of transmission in IEC 61883-6; blocking 222 // and non-blocking modes. In blocking mode, the sequence of isoc packet 223 // includes 'empty' or 'NODATA' packets which include no event. In 224 // non-blocking mode, the number of events per packet is variable up to 225 // the syt interval. 226 // Due to the above protocol design, the minimum PCM frames per 227 // interrupt should be double of the value of syt interval, thus it is 228 // 250 usec. 229 err = snd_pcm_hw_constraint_minmax(runtime, 230 SNDRV_PCM_HW_PARAM_PERIOD_TIME, 231 250, maximum_usec_per_period); 232 if (err < 0) 233 goto end; 234 235 /* Non-Blocking stream has no more constraints */ 236 if (!(s->flags & CIP_BLOCKING)) 237 goto end; 238 239 /* 240 * One AMDTP packet can include some frames. In blocking mode, the 241 * number equals to SYT_INTERVAL. So the number is 8, 16 or 32, 242 * depending on its sampling rate. For accurate period interrupt, it's 243 * preferrable to align period/buffer sizes to current SYT_INTERVAL. 244 */ 245 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 246 apply_constraint_to_size, NULL, 247 SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 248 SNDRV_PCM_HW_PARAM_RATE, -1); 249 if (err < 0) 250 goto end; 251 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 252 apply_constraint_to_size, NULL, 253 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 254 SNDRV_PCM_HW_PARAM_RATE, -1); 255 if (err < 0) 256 goto end; 257 end: 258 return err; 259 } 260 EXPORT_SYMBOL(amdtp_stream_add_pcm_hw_constraints); 261 262 /** 263 * amdtp_stream_set_parameters - set stream parameters 264 * @s: the AMDTP stream to configure 265 * @rate: the sample rate 266 * @data_block_quadlets: the size of a data block in quadlet unit 267 * 268 * The parameters must be set before the stream is started, and must not be 269 * changed while the stream is running. 270 */ 271 int amdtp_stream_set_parameters(struct amdtp_stream *s, unsigned int rate, 272 unsigned int data_block_quadlets) 273 { 274 unsigned int sfc; 275 276 for (sfc = 0; sfc < ARRAY_SIZE(amdtp_rate_table); ++sfc) { 277 if (amdtp_rate_table[sfc] == rate) 278 break; 279 } 280 if (sfc == ARRAY_SIZE(amdtp_rate_table)) 281 return -EINVAL; 282 283 s->sfc = sfc; 284 s->data_block_quadlets = data_block_quadlets; 285 s->syt_interval = amdtp_syt_intervals[sfc]; 286 287 // default buffering in the device. 288 if (s->direction == AMDTP_OUT_STREAM) { 289 s->ctx_data.rx.transfer_delay = 290 TRANSFER_DELAY_TICKS - TICKS_PER_CYCLE; 291 292 if (s->flags & CIP_BLOCKING) { 293 // additional buffering needed to adjust for no-data 294 // packets. 295 s->ctx_data.rx.transfer_delay += 296 TICKS_PER_SECOND * s->syt_interval / rate; 297 } 298 } 299 300 return 0; 301 } 302 EXPORT_SYMBOL(amdtp_stream_set_parameters); 303 304 /** 305 * amdtp_stream_get_max_payload - get the stream's packet size 306 * @s: the AMDTP stream 307 * 308 * This function must not be called before the stream has been configured 309 * with amdtp_stream_set_parameters(). 310 */ 311 unsigned int amdtp_stream_get_max_payload(struct amdtp_stream *s) 312 { 313 unsigned int multiplier = 1; 314 unsigned int cip_header_size = 0; 315 316 if (s->flags & CIP_JUMBO_PAYLOAD) 317 multiplier = 5; 318 if (!(s->flags & CIP_NO_HEADER)) 319 cip_header_size = sizeof(__be32) * 2; 320 321 return cip_header_size + 322 s->syt_interval * s->data_block_quadlets * sizeof(__be32) * multiplier; 323 } 324 EXPORT_SYMBOL(amdtp_stream_get_max_payload); 325 326 /** 327 * amdtp_stream_pcm_prepare - prepare PCM device for running 328 * @s: the AMDTP stream 329 * 330 * This function should be called from the PCM device's .prepare callback. 331 */ 332 void amdtp_stream_pcm_prepare(struct amdtp_stream *s) 333 { 334 tasklet_kill(&s->period_tasklet); 335 s->pcm_buffer_pointer = 0; 336 s->pcm_period_pointer = 0; 337 } 338 EXPORT_SYMBOL(amdtp_stream_pcm_prepare); 339 340 static unsigned int calculate_data_blocks(struct amdtp_stream *s, 341 unsigned int syt) 342 { 343 unsigned int phase, data_blocks; 344 345 /* Blocking mode. */ 346 if (s->flags & CIP_BLOCKING) { 347 /* This module generate empty packet for 'no data'. */ 348 if (syt == CIP_SYT_NO_INFO) 349 data_blocks = 0; 350 else 351 data_blocks = s->syt_interval; 352 /* Non-blocking mode. */ 353 } else { 354 if (!cip_sfc_is_base_44100(s->sfc)) { 355 // Sample_rate / 8000 is an integer, and precomputed. 356 data_blocks = s->ctx_data.rx.data_block_state; 357 } else { 358 phase = s->ctx_data.rx.data_block_state; 359 360 /* 361 * This calculates the number of data blocks per packet so that 362 * 1) the overall rate is correct and exactly synchronized to 363 * the bus clock, and 364 * 2) packets with a rounded-up number of blocks occur as early 365 * as possible in the sequence (to prevent underruns of the 366 * device's buffer). 367 */ 368 if (s->sfc == CIP_SFC_44100) 369 /* 6 6 5 6 5 6 5 ... */ 370 data_blocks = 5 + ((phase & 1) ^ 371 (phase == 0 || phase >= 40)); 372 else 373 /* 12 11 11 11 11 ... or 23 22 22 22 22 ... */ 374 data_blocks = 11 * (s->sfc >> 1) + (phase == 0); 375 if (++phase >= (80 >> (s->sfc >> 1))) 376 phase = 0; 377 s->ctx_data.rx.data_block_state = phase; 378 } 379 } 380 381 return data_blocks; 382 } 383 384 static unsigned int calculate_syt(struct amdtp_stream *s, 385 unsigned int cycle) 386 { 387 unsigned int syt_offset, phase, index, syt; 388 389 if (s->ctx_data.rx.last_syt_offset < TICKS_PER_CYCLE) { 390 if (!cip_sfc_is_base_44100(s->sfc)) 391 syt_offset = s->ctx_data.rx.last_syt_offset + 392 s->ctx_data.rx.syt_offset_state; 393 else { 394 /* 395 * The time, in ticks, of the n'th SYT_INTERVAL sample is: 396 * n * SYT_INTERVAL * 24576000 / sample_rate 397 * Modulo TICKS_PER_CYCLE, the difference between successive 398 * elements is about 1386.23. Rounding the results of this 399 * formula to the SYT precision results in a sequence of 400 * differences that begins with: 401 * 1386 1386 1387 1386 1386 1386 1387 1386 1386 1386 1387 ... 402 * This code generates _exactly_ the same sequence. 403 */ 404 phase = s->ctx_data.rx.syt_offset_state; 405 index = phase % 13; 406 syt_offset = s->ctx_data.rx.last_syt_offset; 407 syt_offset += 1386 + ((index && !(index & 3)) || 408 phase == 146); 409 if (++phase >= 147) 410 phase = 0; 411 s->ctx_data.rx.syt_offset_state = phase; 412 } 413 } else 414 syt_offset = s->ctx_data.rx.last_syt_offset - TICKS_PER_CYCLE; 415 s->ctx_data.rx.last_syt_offset = syt_offset; 416 417 if (syt_offset < TICKS_PER_CYCLE) { 418 syt_offset += s->ctx_data.rx.transfer_delay; 419 syt = (cycle + syt_offset / TICKS_PER_CYCLE) << 12; 420 syt += syt_offset % TICKS_PER_CYCLE; 421 422 return syt & CIP_SYT_MASK; 423 } else { 424 return CIP_SYT_NO_INFO; 425 } 426 } 427 428 static void update_pcm_pointers(struct amdtp_stream *s, 429 struct snd_pcm_substream *pcm, 430 unsigned int frames) 431 { 432 unsigned int ptr; 433 434 ptr = s->pcm_buffer_pointer + frames; 435 if (ptr >= pcm->runtime->buffer_size) 436 ptr -= pcm->runtime->buffer_size; 437 WRITE_ONCE(s->pcm_buffer_pointer, ptr); 438 439 s->pcm_period_pointer += frames; 440 if (s->pcm_period_pointer >= pcm->runtime->period_size) { 441 s->pcm_period_pointer -= pcm->runtime->period_size; 442 tasklet_hi_schedule(&s->period_tasklet); 443 } 444 } 445 446 static void pcm_period_tasklet(unsigned long data) 447 { 448 struct amdtp_stream *s = (void *)data; 449 struct snd_pcm_substream *pcm = READ_ONCE(s->pcm); 450 451 if (pcm) 452 snd_pcm_period_elapsed(pcm); 453 } 454 455 static int queue_packet(struct amdtp_stream *s, struct fw_iso_packet *params, 456 bool sched_irq) 457 { 458 int err; 459 460 params->interrupt = sched_irq; 461 params->tag = s->tag; 462 params->sy = 0; 463 464 err = fw_iso_context_queue(s->context, params, &s->buffer.iso_buffer, 465 s->buffer.packets[s->packet_index].offset); 466 if (err < 0) { 467 dev_err(&s->unit->device, "queueing error: %d\n", err); 468 goto end; 469 } 470 471 if (++s->packet_index >= s->queue_size) 472 s->packet_index = 0; 473 end: 474 return err; 475 } 476 477 static inline int queue_out_packet(struct amdtp_stream *s, 478 struct fw_iso_packet *params, bool sched_irq) 479 { 480 params->skip = 481 !!(params->header_length == 0 && params->payload_length == 0); 482 return queue_packet(s, params, sched_irq); 483 } 484 485 static inline int queue_in_packet(struct amdtp_stream *s, 486 struct fw_iso_packet *params) 487 { 488 // Queue one packet for IR context. 489 params->header_length = s->ctx_data.tx.ctx_header_size; 490 params->payload_length = s->ctx_data.tx.max_ctx_payload_length; 491 params->skip = false; 492 return queue_packet(s, params, false); 493 } 494 495 static void generate_cip_header(struct amdtp_stream *s, __be32 cip_header[2], 496 unsigned int data_block_counter, unsigned int syt) 497 { 498 cip_header[0] = cpu_to_be32(READ_ONCE(s->source_node_id_field) | 499 (s->data_block_quadlets << CIP_DBS_SHIFT) | 500 ((s->sph << CIP_SPH_SHIFT) & CIP_SPH_MASK) | 501 data_block_counter); 502 cip_header[1] = cpu_to_be32(CIP_EOH | 503 ((s->fmt << CIP_FMT_SHIFT) & CIP_FMT_MASK) | 504 ((s->ctx_data.rx.fdf << CIP_FDF_SHIFT) & CIP_FDF_MASK) | 505 (syt & CIP_SYT_MASK)); 506 } 507 508 static void build_it_pkt_header(struct amdtp_stream *s, unsigned int cycle, 509 struct fw_iso_packet *params, 510 unsigned int data_blocks, 511 unsigned int data_block_counter, 512 unsigned int syt, unsigned int index) 513 { 514 unsigned int payload_length; 515 __be32 *cip_header; 516 517 payload_length = data_blocks * sizeof(__be32) * s->data_block_quadlets; 518 params->payload_length = payload_length; 519 520 if (!(s->flags & CIP_NO_HEADER)) { 521 cip_header = (__be32 *)params->header; 522 generate_cip_header(s, cip_header, data_block_counter, syt); 523 params->header_length = 2 * sizeof(__be32); 524 payload_length += params->header_length; 525 } else { 526 cip_header = NULL; 527 } 528 529 trace_amdtp_packet(s, cycle, cip_header, payload_length, data_blocks, 530 data_block_counter, index); 531 } 532 533 static int check_cip_header(struct amdtp_stream *s, const __be32 *buf, 534 unsigned int payload_length, 535 unsigned int *data_blocks, 536 unsigned int *data_block_counter, unsigned int *syt) 537 { 538 u32 cip_header[2]; 539 unsigned int sph; 540 unsigned int fmt; 541 unsigned int fdf; 542 unsigned int dbc; 543 bool lost; 544 545 cip_header[0] = be32_to_cpu(buf[0]); 546 cip_header[1] = be32_to_cpu(buf[1]); 547 548 /* 549 * This module supports 'Two-quadlet CIP header with SYT field'. 550 * For convenience, also check FMT field is AM824 or not. 551 */ 552 if ((((cip_header[0] & CIP_EOH_MASK) == CIP_EOH) || 553 ((cip_header[1] & CIP_EOH_MASK) != CIP_EOH)) && 554 (!(s->flags & CIP_HEADER_WITHOUT_EOH))) { 555 dev_info_ratelimited(&s->unit->device, 556 "Invalid CIP header for AMDTP: %08X:%08X\n", 557 cip_header[0], cip_header[1]); 558 return -EAGAIN; 559 } 560 561 /* Check valid protocol or not. */ 562 sph = (cip_header[0] & CIP_SPH_MASK) >> CIP_SPH_SHIFT; 563 fmt = (cip_header[1] & CIP_FMT_MASK) >> CIP_FMT_SHIFT; 564 if (sph != s->sph || fmt != s->fmt) { 565 dev_info_ratelimited(&s->unit->device, 566 "Detect unexpected protocol: %08x %08x\n", 567 cip_header[0], cip_header[1]); 568 return -EAGAIN; 569 } 570 571 /* Calculate data blocks */ 572 fdf = (cip_header[1] & CIP_FDF_MASK) >> CIP_FDF_SHIFT; 573 if (payload_length < sizeof(__be32) * 2 || 574 (fmt == CIP_FMT_AM && fdf == AMDTP_FDF_NO_DATA)) { 575 *data_blocks = 0; 576 } else { 577 unsigned int data_block_quadlets = 578 (cip_header[0] & CIP_DBS_MASK) >> CIP_DBS_SHIFT; 579 /* avoid division by zero */ 580 if (data_block_quadlets == 0) { 581 dev_err(&s->unit->device, 582 "Detect invalid value in dbs field: %08X\n", 583 cip_header[0]); 584 return -EPROTO; 585 } 586 if (s->flags & CIP_WRONG_DBS) 587 data_block_quadlets = s->data_block_quadlets; 588 589 *data_blocks = (payload_length / sizeof(__be32) - 2) / 590 data_block_quadlets; 591 } 592 593 /* Check data block counter continuity */ 594 dbc = cip_header[0] & CIP_DBC_MASK; 595 if (*data_blocks == 0 && (s->flags & CIP_EMPTY_HAS_WRONG_DBC) && 596 *data_block_counter != UINT_MAX) 597 dbc = *data_block_counter; 598 599 if ((dbc == 0x00 && (s->flags & CIP_SKIP_DBC_ZERO_CHECK)) || 600 *data_block_counter == UINT_MAX) { 601 lost = false; 602 } else if (!(s->flags & CIP_DBC_IS_END_EVENT)) { 603 lost = dbc != *data_block_counter; 604 } else { 605 unsigned int dbc_interval; 606 607 if (*data_blocks > 0 && s->ctx_data.tx.dbc_interval > 0) 608 dbc_interval = s->ctx_data.tx.dbc_interval; 609 else 610 dbc_interval = *data_blocks; 611 612 lost = dbc != ((*data_block_counter + dbc_interval) & 0xff); 613 } 614 615 if (lost) { 616 dev_err(&s->unit->device, 617 "Detect discontinuity of CIP: %02X %02X\n", 618 *data_block_counter, dbc); 619 return -EIO; 620 } 621 622 *data_block_counter = dbc; 623 624 *syt = cip_header[1] & CIP_SYT_MASK; 625 626 return 0; 627 } 628 629 static int parse_ir_ctx_header(struct amdtp_stream *s, unsigned int cycle, 630 const __be32 *ctx_header, 631 unsigned int *payload_length, 632 unsigned int *data_blocks, 633 unsigned int *data_block_counter, 634 unsigned int *syt, unsigned int index) 635 { 636 const __be32 *cip_header; 637 int err; 638 639 *payload_length = be32_to_cpu(ctx_header[0]) >> ISO_DATA_LENGTH_SHIFT; 640 if (*payload_length > s->ctx_data.tx.ctx_header_size + 641 s->ctx_data.tx.max_ctx_payload_length) { 642 dev_err(&s->unit->device, 643 "Detect jumbo payload: %04x %04x\n", 644 *payload_length, s->ctx_data.tx.max_ctx_payload_length); 645 return -EIO; 646 } 647 648 if (!(s->flags & CIP_NO_HEADER)) { 649 cip_header = ctx_header + 2; 650 err = check_cip_header(s, cip_header, *payload_length, 651 data_blocks, data_block_counter, syt); 652 if (err < 0) 653 return err; 654 } else { 655 cip_header = NULL; 656 err = 0; 657 *data_blocks = *payload_length / sizeof(__be32) / 658 s->data_block_quadlets; 659 *syt = 0; 660 661 if (*data_block_counter == UINT_MAX) 662 *data_block_counter = 0; 663 } 664 665 trace_amdtp_packet(s, cycle, cip_header, *payload_length, *data_blocks, 666 *data_block_counter, index); 667 668 return err; 669 } 670 671 // In CYCLE_TIMER register of IEEE 1394, 7 bits are used to represent second. On 672 // the other hand, in DMA descriptors of 1394 OHCI, 3 bits are used to represent 673 // it. Thus, via Linux firewire subsystem, we can get the 3 bits for second. 674 static inline u32 compute_cycle_count(__be32 ctx_header_tstamp) 675 { 676 u32 tstamp = be32_to_cpu(ctx_header_tstamp) & HEADER_TSTAMP_MASK; 677 return (((tstamp >> 13) & 0x07) * 8000) + (tstamp & 0x1fff); 678 } 679 680 static inline u32 increment_cycle_count(u32 cycle, unsigned int addend) 681 { 682 cycle += addend; 683 if (cycle >= 8 * CYCLES_PER_SECOND) 684 cycle -= 8 * CYCLES_PER_SECOND; 685 return cycle; 686 } 687 688 // Align to actual cycle count for the packet which is going to be scheduled. 689 // This module queued the same number of isochronous cycle as the size of queue 690 // to kip isochronous cycle, therefore it's OK to just increment the cycle by 691 // the size of queue for scheduled cycle. 692 static inline u32 compute_it_cycle(const __be32 ctx_header_tstamp, 693 unsigned int queue_size) 694 { 695 u32 cycle = compute_cycle_count(ctx_header_tstamp); 696 return increment_cycle_count(cycle, queue_size); 697 } 698 699 static int generate_device_pkt_descs(struct amdtp_stream *s, 700 struct pkt_desc *descs, 701 const __be32 *ctx_header, 702 unsigned int packets) 703 { 704 unsigned int dbc = s->data_block_counter; 705 int i; 706 int err; 707 708 for (i = 0; i < packets; ++i) { 709 struct pkt_desc *desc = descs + i; 710 unsigned int index = (s->packet_index + i) % s->queue_size; 711 unsigned int cycle; 712 unsigned int payload_length; 713 unsigned int data_blocks; 714 unsigned int syt; 715 716 cycle = compute_cycle_count(ctx_header[1]); 717 718 err = parse_ir_ctx_header(s, cycle, ctx_header, &payload_length, 719 &data_blocks, &dbc, &syt, i); 720 if (err < 0) 721 return err; 722 723 desc->cycle = cycle; 724 desc->syt = syt; 725 desc->data_blocks = data_blocks; 726 desc->data_block_counter = dbc; 727 desc->ctx_payload = s->buffer.packets[index].buffer; 728 729 if (!(s->flags & CIP_DBC_IS_END_EVENT)) 730 dbc = (dbc + desc->data_blocks) & 0xff; 731 732 ctx_header += 733 s->ctx_data.tx.ctx_header_size / sizeof(*ctx_header); 734 } 735 736 s->data_block_counter = dbc; 737 738 return 0; 739 } 740 741 static void generate_ideal_pkt_descs(struct amdtp_stream *s, 742 struct pkt_desc *descs, 743 const __be32 *ctx_header, 744 unsigned int packets) 745 { 746 unsigned int dbc = s->data_block_counter; 747 int i; 748 749 for (i = 0; i < packets; ++i) { 750 struct pkt_desc *desc = descs + i; 751 unsigned int index = (s->packet_index + i) % s->queue_size; 752 753 desc->cycle = compute_it_cycle(*ctx_header, s->queue_size); 754 desc->syt = calculate_syt(s, desc->cycle); 755 desc->data_blocks = calculate_data_blocks(s, desc->syt); 756 757 if (s->flags & CIP_DBC_IS_END_EVENT) 758 dbc = (dbc + desc->data_blocks) & 0xff; 759 760 desc->data_block_counter = dbc; 761 762 if (!(s->flags & CIP_DBC_IS_END_EVENT)) 763 dbc = (dbc + desc->data_blocks) & 0xff; 764 765 desc->ctx_payload = s->buffer.packets[index].buffer; 766 767 ++ctx_header; 768 } 769 770 s->data_block_counter = dbc; 771 } 772 773 static inline void cancel_stream(struct amdtp_stream *s) 774 { 775 s->packet_index = -1; 776 if (in_interrupt()) 777 amdtp_stream_pcm_abort(s); 778 WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN); 779 } 780 781 static void process_ctx_payloads(struct amdtp_stream *s, 782 const struct pkt_desc *descs, 783 unsigned int packets) 784 { 785 struct snd_pcm_substream *pcm; 786 unsigned int pcm_frames; 787 788 pcm = READ_ONCE(s->pcm); 789 pcm_frames = s->process_ctx_payloads(s, descs, packets, pcm); 790 if (pcm) 791 update_pcm_pointers(s, pcm, pcm_frames); 792 } 793 794 static void amdtp_stream_master_callback(struct fw_iso_context *context, 795 u32 tstamp, size_t header_length, 796 void *header, void *private_data); 797 798 static void amdtp_stream_master_first_callback(struct fw_iso_context *context, 799 u32 tstamp, size_t header_length, 800 void *header, void *private_data); 801 802 static void out_stream_callback(struct fw_iso_context *context, u32 tstamp, 803 size_t header_length, void *header, 804 void *private_data) 805 { 806 struct amdtp_stream *s = private_data; 807 const __be32 *ctx_header = header; 808 unsigned int events_per_period = s->ctx_data.rx.events_per_period; 809 unsigned int event_count = s->ctx_data.rx.event_count; 810 unsigned int packets; 811 bool is_irq_target; 812 int i; 813 814 if (s->packet_index < 0) 815 return; 816 817 // Calculate the number of packets in buffer and check XRUN. 818 packets = header_length / sizeof(*ctx_header); 819 820 generate_ideal_pkt_descs(s, s->pkt_descs, ctx_header, packets); 821 822 process_ctx_payloads(s, s->pkt_descs, packets); 823 824 is_irq_target = 825 !!(context->callback.sc == amdtp_stream_master_callback || 826 context->callback.sc == amdtp_stream_master_first_callback); 827 828 for (i = 0; i < packets; ++i) { 829 const struct pkt_desc *desc = s->pkt_descs + i; 830 unsigned int syt; 831 struct { 832 struct fw_iso_packet params; 833 __be32 header[IT_PKT_HEADER_SIZE_CIP / sizeof(__be32)]; 834 } template = { {0}, {0} }; 835 bool sched_irq = false; 836 837 if (s->ctx_data.rx.syt_override < 0) 838 syt = desc->syt; 839 else 840 syt = s->ctx_data.rx.syt_override; 841 842 build_it_pkt_header(s, desc->cycle, &template.params, 843 desc->data_blocks, desc->data_block_counter, 844 syt, i); 845 846 if (is_irq_target) { 847 event_count += desc->data_blocks; 848 if (event_count >= events_per_period) { 849 event_count -= events_per_period; 850 sched_irq = true; 851 } 852 } 853 854 if (queue_out_packet(s, &template.params, sched_irq) < 0) { 855 cancel_stream(s); 856 return; 857 } 858 } 859 860 s->ctx_data.rx.event_count = event_count; 861 } 862 863 static void in_stream_callback(struct fw_iso_context *context, u32 tstamp, 864 size_t header_length, void *header, 865 void *private_data) 866 { 867 struct amdtp_stream *s = private_data; 868 __be32 *ctx_header = header; 869 unsigned int packets; 870 int i; 871 int err; 872 873 if (s->packet_index < 0) 874 return; 875 876 // Calculate the number of packets in buffer and check XRUN. 877 packets = header_length / s->ctx_data.tx.ctx_header_size; 878 879 err = generate_device_pkt_descs(s, s->pkt_descs, ctx_header, packets); 880 if (err < 0) { 881 if (err != -EAGAIN) { 882 cancel_stream(s); 883 return; 884 } 885 } else { 886 process_ctx_payloads(s, s->pkt_descs, packets); 887 } 888 889 for (i = 0; i < packets; ++i) { 890 struct fw_iso_packet params = {0}; 891 892 if (queue_in_packet(s, ¶ms) < 0) { 893 cancel_stream(s); 894 return; 895 } 896 } 897 } 898 899 static void amdtp_stream_master_callback(struct fw_iso_context *context, 900 u32 tstamp, size_t header_length, 901 void *header, void *private_data) 902 { 903 struct amdtp_domain *d = private_data; 904 struct amdtp_stream *irq_target = d->irq_target; 905 struct amdtp_stream *s; 906 907 out_stream_callback(context, tstamp, header_length, header, irq_target); 908 if (amdtp_streaming_error(irq_target)) 909 goto error; 910 911 list_for_each_entry(s, &d->streams, list) { 912 if (s != irq_target && amdtp_stream_running(s)) { 913 fw_iso_context_flush_completions(s->context); 914 if (amdtp_streaming_error(s)) 915 goto error; 916 } 917 } 918 919 return; 920 error: 921 if (amdtp_stream_running(irq_target)) 922 cancel_stream(irq_target); 923 924 list_for_each_entry(s, &d->streams, list) { 925 if (amdtp_stream_running(s)) 926 cancel_stream(s); 927 } 928 } 929 930 // this is executed one time. 931 static void amdtp_stream_first_callback(struct fw_iso_context *context, 932 u32 tstamp, size_t header_length, 933 void *header, void *private_data) 934 { 935 struct amdtp_stream *s = private_data; 936 const __be32 *ctx_header = header; 937 u32 cycle; 938 939 /* 940 * For in-stream, first packet has come. 941 * For out-stream, prepared to transmit first packet 942 */ 943 s->callbacked = true; 944 wake_up(&s->callback_wait); 945 946 if (s->direction == AMDTP_IN_STREAM) { 947 cycle = compute_cycle_count(ctx_header[1]); 948 949 context->callback.sc = in_stream_callback; 950 } else { 951 cycle = compute_it_cycle(*ctx_header, s->queue_size); 952 953 context->callback.sc = out_stream_callback; 954 } 955 956 s->start_cycle = cycle; 957 958 context->callback.sc(context, tstamp, header_length, header, s); 959 } 960 961 static void amdtp_stream_master_first_callback(struct fw_iso_context *context, 962 u32 tstamp, size_t header_length, 963 void *header, void *private_data) 964 { 965 struct amdtp_domain *d = private_data; 966 struct amdtp_stream *s = d->irq_target; 967 const __be32 *ctx_header = header; 968 969 s->callbacked = true; 970 wake_up(&s->callback_wait); 971 972 s->start_cycle = compute_it_cycle(*ctx_header, s->queue_size); 973 974 context->callback.sc = amdtp_stream_master_callback; 975 976 context->callback.sc(context, tstamp, header_length, header, d); 977 } 978 979 /** 980 * amdtp_stream_start - start transferring packets 981 * @s: the AMDTP stream to start 982 * @channel: the isochronous channel on the bus 983 * @speed: firewire speed code 984 * @d: the AMDTP domain to which the AMDTP stream belongs 985 * @is_irq_target: whether isoc context for the AMDTP stream is used to generate 986 * hardware IRQ. 987 * @start_cycle: the isochronous cycle to start the context. Start immediately 988 * if negative value is given. 989 * 990 * The stream cannot be started until it has been configured with 991 * amdtp_stream_set_parameters() and it must be started before any PCM or MIDI 992 * device can be started. 993 */ 994 static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed, 995 struct amdtp_domain *d, bool is_irq_target, 996 int start_cycle) 997 { 998 static const struct { 999 unsigned int data_block; 1000 unsigned int syt_offset; 1001 } *entry, initial_state[] = { 1002 [CIP_SFC_32000] = { 4, 3072 }, 1003 [CIP_SFC_48000] = { 6, 1024 }, 1004 [CIP_SFC_96000] = { 12, 1024 }, 1005 [CIP_SFC_192000] = { 24, 1024 }, 1006 [CIP_SFC_44100] = { 0, 67 }, 1007 [CIP_SFC_88200] = { 0, 67 }, 1008 [CIP_SFC_176400] = { 0, 67 }, 1009 }; 1010 unsigned int events_per_buffer = d->events_per_buffer; 1011 unsigned int events_per_period = d->events_per_period; 1012 unsigned int idle_irq_interval; 1013 unsigned int ctx_header_size; 1014 unsigned int max_ctx_payload_size; 1015 enum dma_data_direction dir; 1016 int type, tag, err; 1017 fw_iso_callback_t ctx_cb; 1018 void *ctx_data; 1019 1020 mutex_lock(&s->mutex); 1021 1022 if (WARN_ON(amdtp_stream_running(s) || 1023 (s->data_block_quadlets < 1))) { 1024 err = -EBADFD; 1025 goto err_unlock; 1026 } 1027 1028 if (s->direction == AMDTP_IN_STREAM) { 1029 // NOTE: IT context should be used for constant IRQ. 1030 if (is_irq_target) { 1031 err = -EINVAL; 1032 goto err_unlock; 1033 } 1034 1035 s->data_block_counter = UINT_MAX; 1036 } else { 1037 entry = &initial_state[s->sfc]; 1038 1039 s->data_block_counter = 0; 1040 s->ctx_data.rx.data_block_state = entry->data_block; 1041 s->ctx_data.rx.syt_offset_state = entry->syt_offset; 1042 s->ctx_data.rx.last_syt_offset = TICKS_PER_CYCLE; 1043 } 1044 1045 /* initialize packet buffer */ 1046 if (s->direction == AMDTP_IN_STREAM) { 1047 dir = DMA_FROM_DEVICE; 1048 type = FW_ISO_CONTEXT_RECEIVE; 1049 if (!(s->flags & CIP_NO_HEADER)) 1050 ctx_header_size = IR_CTX_HEADER_SIZE_CIP; 1051 else 1052 ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP; 1053 1054 max_ctx_payload_size = amdtp_stream_get_max_payload(s) - 1055 ctx_header_size; 1056 } else { 1057 dir = DMA_TO_DEVICE; 1058 type = FW_ISO_CONTEXT_TRANSMIT; 1059 ctx_header_size = 0; // No effect for IT context. 1060 1061 max_ctx_payload_size = amdtp_stream_get_max_payload(s); 1062 if (!(s->flags & CIP_NO_HEADER)) 1063 max_ctx_payload_size -= IT_PKT_HEADER_SIZE_CIP; 1064 } 1065 1066 // This is a case that AMDTP streams in domain run just for MIDI 1067 // substream. Use the number of events equivalent to 10 msec as 1068 // interval of hardware IRQ. 1069 if (events_per_period == 0) 1070 events_per_period = amdtp_rate_table[s->sfc] / 100; 1071 if (events_per_buffer == 0) 1072 events_per_buffer = events_per_period * 3; 1073 1074 idle_irq_interval = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_period, 1075 amdtp_rate_table[s->sfc]); 1076 s->queue_size = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_buffer, 1077 amdtp_rate_table[s->sfc]); 1078 1079 err = iso_packets_buffer_init(&s->buffer, s->unit, s->queue_size, 1080 max_ctx_payload_size, dir); 1081 if (err < 0) 1082 goto err_unlock; 1083 1084 if (is_irq_target) { 1085 s->ctx_data.rx.events_per_period = events_per_period; 1086 s->ctx_data.rx.event_count = 0; 1087 ctx_cb = amdtp_stream_master_first_callback; 1088 ctx_data = d; 1089 } else { 1090 ctx_cb = amdtp_stream_first_callback; 1091 ctx_data = s; 1092 } 1093 1094 s->context = fw_iso_context_create(fw_parent_device(s->unit)->card, 1095 type, channel, speed, ctx_header_size, 1096 ctx_cb, ctx_data); 1097 if (IS_ERR(s->context)) { 1098 err = PTR_ERR(s->context); 1099 if (err == -EBUSY) 1100 dev_err(&s->unit->device, 1101 "no free stream on this controller\n"); 1102 goto err_buffer; 1103 } 1104 1105 amdtp_stream_update(s); 1106 1107 if (s->direction == AMDTP_IN_STREAM) { 1108 s->ctx_data.tx.max_ctx_payload_length = max_ctx_payload_size; 1109 s->ctx_data.tx.ctx_header_size = ctx_header_size; 1110 } 1111 1112 if (s->flags & CIP_NO_HEADER) 1113 s->tag = TAG_NO_CIP_HEADER; 1114 else 1115 s->tag = TAG_CIP; 1116 1117 s->pkt_descs = kcalloc(s->queue_size, sizeof(*s->pkt_descs), 1118 GFP_KERNEL); 1119 if (!s->pkt_descs) { 1120 err = -ENOMEM; 1121 goto err_context; 1122 } 1123 1124 s->packet_index = 0; 1125 do { 1126 struct fw_iso_packet params; 1127 1128 if (s->direction == AMDTP_IN_STREAM) { 1129 err = queue_in_packet(s, ¶ms); 1130 } else { 1131 bool sched_irq = false; 1132 1133 params.header_length = 0; 1134 params.payload_length = 0; 1135 1136 if (is_irq_target) { 1137 sched_irq = !((s->packet_index + 1) % 1138 idle_irq_interval); 1139 } 1140 1141 err = queue_out_packet(s, ¶ms, sched_irq); 1142 } 1143 if (err < 0) 1144 goto err_pkt_descs; 1145 } while (s->packet_index > 0); 1146 1147 /* NOTE: TAG1 matches CIP. This just affects in stream. */ 1148 tag = FW_ISO_CONTEXT_MATCH_TAG1; 1149 if ((s->flags & CIP_EMPTY_WITH_TAG0) || (s->flags & CIP_NO_HEADER)) 1150 tag |= FW_ISO_CONTEXT_MATCH_TAG0; 1151 1152 s->callbacked = false; 1153 err = fw_iso_context_start(s->context, start_cycle, 0, tag); 1154 if (err < 0) 1155 goto err_pkt_descs; 1156 1157 mutex_unlock(&s->mutex); 1158 1159 return 0; 1160 err_pkt_descs: 1161 kfree(s->pkt_descs); 1162 err_context: 1163 fw_iso_context_destroy(s->context); 1164 s->context = ERR_PTR(-1); 1165 err_buffer: 1166 iso_packets_buffer_destroy(&s->buffer, s->unit); 1167 err_unlock: 1168 mutex_unlock(&s->mutex); 1169 1170 return err; 1171 } 1172 1173 /** 1174 * amdtp_domain_stream_pcm_pointer - get the PCM buffer position 1175 * @d: the AMDTP domain. 1176 * @s: the AMDTP stream that transports the PCM data 1177 * 1178 * Returns the current buffer position, in frames. 1179 */ 1180 unsigned long amdtp_domain_stream_pcm_pointer(struct amdtp_domain *d, 1181 struct amdtp_stream *s) 1182 { 1183 struct amdtp_stream *irq_target = d->irq_target; 1184 1185 if (irq_target && amdtp_stream_running(irq_target)) { 1186 // This function is called in software IRQ context of 1187 // period_tasklet or process context. 1188 // 1189 // When the software IRQ context was scheduled by software IRQ 1190 // context of IT contexts, queued packets were already handled. 1191 // Therefore, no need to flush the queue in buffer furthermore. 1192 // 1193 // When the process context reach here, some packets will be 1194 // already queued in the buffer. These packets should be handled 1195 // immediately to keep better granularity of PCM pointer. 1196 // 1197 // Later, the process context will sometimes schedules software 1198 // IRQ context of the period_tasklet. Then, no need to flush the 1199 // queue by the same reason as described in the above 1200 if (!in_interrupt()) { 1201 // Queued packet should be processed without any kernel 1202 // preemption to keep latency against bus cycle. 1203 preempt_disable(); 1204 fw_iso_context_flush_completions(irq_target->context); 1205 preempt_enable(); 1206 } 1207 } 1208 1209 return READ_ONCE(s->pcm_buffer_pointer); 1210 } 1211 EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_pointer); 1212 1213 /** 1214 * amdtp_domain_stream_pcm_ack - acknowledge queued PCM frames 1215 * @d: the AMDTP domain. 1216 * @s: the AMDTP stream that transfers the PCM frames 1217 * 1218 * Returns zero always. 1219 */ 1220 int amdtp_domain_stream_pcm_ack(struct amdtp_domain *d, struct amdtp_stream *s) 1221 { 1222 struct amdtp_stream *irq_target = d->irq_target; 1223 1224 // Process isochronous packets for recent isochronous cycle to handle 1225 // queued PCM frames. 1226 if (irq_target && amdtp_stream_running(irq_target)) { 1227 // Queued packet should be processed without any kernel 1228 // preemption to keep latency against bus cycle. 1229 preempt_disable(); 1230 fw_iso_context_flush_completions(irq_target->context); 1231 preempt_enable(); 1232 } 1233 1234 return 0; 1235 } 1236 EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_ack); 1237 1238 /** 1239 * amdtp_stream_update - update the stream after a bus reset 1240 * @s: the AMDTP stream 1241 */ 1242 void amdtp_stream_update(struct amdtp_stream *s) 1243 { 1244 /* Precomputing. */ 1245 WRITE_ONCE(s->source_node_id_field, 1246 (fw_parent_device(s->unit)->card->node_id << CIP_SID_SHIFT) & CIP_SID_MASK); 1247 } 1248 EXPORT_SYMBOL(amdtp_stream_update); 1249 1250 /** 1251 * amdtp_stream_stop - stop sending packets 1252 * @s: the AMDTP stream to stop 1253 * 1254 * All PCM and MIDI devices of the stream must be stopped before the stream 1255 * itself can be stopped. 1256 */ 1257 static void amdtp_stream_stop(struct amdtp_stream *s) 1258 { 1259 mutex_lock(&s->mutex); 1260 1261 if (!amdtp_stream_running(s)) { 1262 mutex_unlock(&s->mutex); 1263 return; 1264 } 1265 1266 tasklet_kill(&s->period_tasklet); 1267 fw_iso_context_stop(s->context); 1268 fw_iso_context_destroy(s->context); 1269 s->context = ERR_PTR(-1); 1270 iso_packets_buffer_destroy(&s->buffer, s->unit); 1271 kfree(s->pkt_descs); 1272 1273 s->callbacked = false; 1274 1275 mutex_unlock(&s->mutex); 1276 } 1277 1278 /** 1279 * amdtp_stream_pcm_abort - abort the running PCM device 1280 * @s: the AMDTP stream about to be stopped 1281 * 1282 * If the isochronous stream needs to be stopped asynchronously, call this 1283 * function first to stop the PCM device. 1284 */ 1285 void amdtp_stream_pcm_abort(struct amdtp_stream *s) 1286 { 1287 struct snd_pcm_substream *pcm; 1288 1289 pcm = READ_ONCE(s->pcm); 1290 if (pcm) 1291 snd_pcm_stop_xrun(pcm); 1292 } 1293 EXPORT_SYMBOL(amdtp_stream_pcm_abort); 1294 1295 /** 1296 * amdtp_domain_init - initialize an AMDTP domain structure 1297 * @d: the AMDTP domain to initialize. 1298 */ 1299 int amdtp_domain_init(struct amdtp_domain *d) 1300 { 1301 INIT_LIST_HEAD(&d->streams); 1302 1303 d->events_per_period = 0; 1304 1305 return 0; 1306 } 1307 EXPORT_SYMBOL_GPL(amdtp_domain_init); 1308 1309 /** 1310 * amdtp_domain_destroy - destroy an AMDTP domain structure 1311 * @d: the AMDTP domain to destroy. 1312 */ 1313 void amdtp_domain_destroy(struct amdtp_domain *d) 1314 { 1315 // At present nothing to do. 1316 return; 1317 } 1318 EXPORT_SYMBOL_GPL(amdtp_domain_destroy); 1319 1320 /** 1321 * amdtp_domain_add_stream - register isoc context into the domain. 1322 * @d: the AMDTP domain. 1323 * @s: the AMDTP stream. 1324 * @channel: the isochronous channel on the bus. 1325 * @speed: firewire speed code. 1326 */ 1327 int amdtp_domain_add_stream(struct amdtp_domain *d, struct amdtp_stream *s, 1328 int channel, int speed) 1329 { 1330 struct amdtp_stream *tmp; 1331 1332 list_for_each_entry(tmp, &d->streams, list) { 1333 if (s == tmp) 1334 return -EBUSY; 1335 } 1336 1337 list_add(&s->list, &d->streams); 1338 1339 s->channel = channel; 1340 s->speed = speed; 1341 1342 return 0; 1343 } 1344 EXPORT_SYMBOL_GPL(amdtp_domain_add_stream); 1345 1346 static int get_current_cycle_time(struct fw_card *fw_card, int *cur_cycle) 1347 { 1348 int generation; 1349 int rcode; 1350 __be32 reg; 1351 u32 data; 1352 1353 // This is a request to local 1394 OHCI controller and expected to 1354 // complete without any event waiting. 1355 generation = fw_card->generation; 1356 smp_rmb(); // node_id vs. generation. 1357 rcode = fw_run_transaction(fw_card, TCODE_READ_QUADLET_REQUEST, 1358 fw_card->node_id, generation, SCODE_100, 1359 CSR_REGISTER_BASE + CSR_CYCLE_TIME, 1360 ®, sizeof(reg)); 1361 if (rcode != RCODE_COMPLETE) 1362 return -EIO; 1363 1364 data = be32_to_cpu(reg); 1365 *cur_cycle = data >> 12; 1366 1367 return 0; 1368 } 1369 1370 /** 1371 * amdtp_domain_start - start sending packets for isoc context in the domain. 1372 * @d: the AMDTP domain. 1373 * @ir_delay_cycle: the cycle delay to start all IR contexts. 1374 */ 1375 int amdtp_domain_start(struct amdtp_domain *d, unsigned int ir_delay_cycle) 1376 { 1377 struct amdtp_stream *s; 1378 int cycle; 1379 int err; 1380 1381 // Select an IT context as IRQ target. 1382 list_for_each_entry(s, &d->streams, list) { 1383 if (s->direction == AMDTP_OUT_STREAM) 1384 break; 1385 } 1386 if (!s) 1387 return -ENXIO; 1388 d->irq_target = s; 1389 1390 if (ir_delay_cycle > 0) { 1391 struct fw_card *fw_card = fw_parent_device(s->unit)->card; 1392 1393 err = get_current_cycle_time(fw_card, &cycle); 1394 if (err < 0) 1395 return err; 1396 1397 // No need to care overflow in cycle field because of enough 1398 // width. 1399 cycle += ir_delay_cycle; 1400 1401 // Round up to sec field. 1402 if ((cycle & 0x00001fff) >= CYCLES_PER_SECOND) { 1403 unsigned int sec; 1404 1405 // The sec field can overflow. 1406 sec = (cycle & 0xffffe000) >> 13; 1407 cycle = (++sec << 13) | 1408 ((cycle & 0x00001fff) / CYCLES_PER_SECOND); 1409 } 1410 1411 // In OHCI 1394 specification, lower 2 bits are available for 1412 // sec field. 1413 cycle &= 0x00007fff; 1414 } else { 1415 cycle = -1; 1416 } 1417 1418 list_for_each_entry(s, &d->streams, list) { 1419 int cycle_match; 1420 1421 if (s->direction == AMDTP_IN_STREAM) { 1422 cycle_match = cycle; 1423 } else { 1424 // IT context starts immediately. 1425 cycle_match = -1; 1426 } 1427 1428 if (s != d->irq_target) { 1429 err = amdtp_stream_start(s, s->channel, s->speed, d, 1430 false, cycle_match); 1431 if (err < 0) 1432 goto error; 1433 } 1434 } 1435 1436 s = d->irq_target; 1437 err = amdtp_stream_start(s, s->channel, s->speed, d, true, -1); 1438 if (err < 0) 1439 goto error; 1440 1441 return 0; 1442 error: 1443 list_for_each_entry(s, &d->streams, list) 1444 amdtp_stream_stop(s); 1445 return err; 1446 } 1447 EXPORT_SYMBOL_GPL(amdtp_domain_start); 1448 1449 /** 1450 * amdtp_domain_stop - stop sending packets for isoc context in the same domain. 1451 * @d: the AMDTP domain to which the isoc contexts belong. 1452 */ 1453 void amdtp_domain_stop(struct amdtp_domain *d) 1454 { 1455 struct amdtp_stream *s, *next; 1456 1457 if (d->irq_target) 1458 amdtp_stream_stop(d->irq_target); 1459 1460 list_for_each_entry_safe(s, next, &d->streams, list) { 1461 list_del(&s->list); 1462 1463 if (s != d->irq_target) 1464 amdtp_stream_stop(s); 1465 } 1466 1467 d->events_per_period = 0; 1468 d->irq_target = NULL; 1469 } 1470 EXPORT_SYMBOL_GPL(amdtp_domain_stop); 1471