1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Isochronous I/O functionality: 4 * - Isochronous DMA context management 5 * - Isochronous bus resource management (channels, bandwidth), client side 6 * 7 * Copyright (C) 2006 Kristian Hoegsberg <krh@bitplanet.net> 8 */ 9 10 #include <linux/dma-mapping.h> 11 #include <linux/errno.h> 12 #include <linux/firewire.h> 13 #include <linux/firewire-constants.h> 14 #include <linux/kernel.h> 15 #include <linux/mm.h> 16 #include <linux/slab.h> 17 #include <linux/spinlock.h> 18 #include <linux/vmalloc.h> 19 #include <linux/export.h> 20 21 #include <asm/byteorder.h> 22 23 #include "core.h" 24 25 #include <trace/events/firewire.h> 26 27 /* 28 * Isochronous DMA context management 29 */ 30 31 int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count) 32 { 33 struct page **page_array __free(kfree) = kzalloc_objs(page_array[0], 34 page_count); 35 36 if (!page_array) 37 return -ENOMEM; 38 39 // Retrieve noncontiguous pages. The descriptors for 1394 OHCI isochronous DMA contexts 40 // have a set of address and length per each, while the reason to use pages is the 41 // convenience to map them into virtual address space of user process. 42 unsigned long nr_populated = alloc_pages_bulk(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO, 43 page_count, page_array); 44 if (nr_populated != page_count) { 45 // Assuming the above call fills page_array sequentially from the beginning. 46 release_pages(page_array, nr_populated); 47 return -ENOMEM; 48 } 49 50 buffer->page_count = page_count; 51 buffer->pages = no_free_ptr(page_array); 52 53 return 0; 54 } 55 56 int fw_iso_buffer_map_dma(struct fw_iso_buffer *buffer, struct fw_card *card, 57 enum dma_data_direction direction) 58 { 59 dma_addr_t *dma_addrs __free(kfree) = kzalloc_objs(dma_addrs[0], 60 buffer->page_count); 61 int i; 62 63 if (!dma_addrs) 64 return -ENOMEM; 65 66 // Retrieve DMA mapping addresses for the pages. They are not contiguous. Maintain the cache 67 // coherency for the pages by hand. 68 for (i = 0; i < buffer->page_count; i++) { 69 // The dma_map_phys() with a physical address per page is available here, instead. 70 dma_addr_t dma_addr = dma_map_page(card->device, buffer->pages[i], 0, PAGE_SIZE, 71 direction); 72 if (dma_mapping_error(card->device, dma_addr)) 73 break; 74 75 dma_addrs[i] = dma_addr; 76 } 77 if (i < buffer->page_count) { 78 while (i-- > 0) 79 dma_unmap_page(card->device, dma_addrs[i], PAGE_SIZE, buffer->direction); 80 return -ENOMEM; 81 } 82 83 buffer->direction = direction; 84 buffer->dma_addrs = no_free_ptr(dma_addrs); 85 86 return 0; 87 } 88 89 int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, 90 int page_count, enum dma_data_direction direction) 91 { 92 int ret; 93 94 ret = fw_iso_buffer_alloc(buffer, page_count); 95 if (ret < 0) 96 return ret; 97 98 ret = fw_iso_buffer_map_dma(buffer, card, direction); 99 if (ret < 0) 100 fw_iso_buffer_destroy(buffer, card); 101 102 return ret; 103 } 104 EXPORT_SYMBOL(fw_iso_buffer_init); 105 106 void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, 107 struct fw_card *card) 108 { 109 if (buffer->dma_addrs) { 110 for (int i = 0; i < buffer->page_count; ++i) { 111 dma_addr_t dma_addr = buffer->dma_addrs[i]; 112 dma_unmap_page(card->device, dma_addr, PAGE_SIZE, buffer->direction); 113 } 114 kfree(buffer->dma_addrs); 115 buffer->dma_addrs = NULL; 116 } 117 118 if (buffer->pages) { 119 release_pages(buffer->pages, buffer->page_count); 120 kfree(buffer->pages); 121 buffer->pages = NULL; 122 } 123 124 buffer->page_count = 0; 125 } 126 EXPORT_SYMBOL(fw_iso_buffer_destroy); 127 128 /* Convert DMA address to offset into virtually contiguous buffer. */ 129 size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed) 130 { 131 for (int i = 0; i < buffer->page_count; i++) { 132 dma_addr_t dma_addr = buffer->dma_addrs[i]; 133 ssize_t offset = (ssize_t)completed - (ssize_t)dma_addr; 134 if (offset > 0 && offset <= PAGE_SIZE) 135 return (i << PAGE_SHIFT) + offset; 136 } 137 138 return 0; 139 } 140 141 struct fw_iso_context *__fw_iso_context_create(struct fw_card *card, int type, int channel, 142 int speed, size_t header_size, size_t header_storage_size, 143 union fw_iso_callback callback, void *callback_data) 144 { 145 struct fw_iso_context *ctx; 146 147 ctx = card->driver->allocate_iso_context(card, type, channel, header_size, 148 header_storage_size); 149 if (IS_ERR(ctx)) 150 return ctx; 151 152 ctx->card = card; 153 ctx->type = type; 154 ctx->channel = channel; 155 ctx->speed = speed; 156 ctx->flags = 0; 157 ctx->header_size = header_size; 158 ctx->header_storage_size = header_storage_size; 159 ctx->callback = callback; 160 ctx->callback_data = callback_data; 161 162 trace_isoc_outbound_allocate(ctx, channel, speed); 163 trace_isoc_inbound_single_allocate(ctx, channel, header_size); 164 trace_isoc_inbound_multiple_allocate(ctx); 165 166 return ctx; 167 } 168 EXPORT_SYMBOL(__fw_iso_context_create); 169 170 void fw_iso_context_destroy(struct fw_iso_context *ctx) 171 { 172 trace_isoc_outbound_destroy(ctx); 173 trace_isoc_inbound_single_destroy(ctx); 174 trace_isoc_inbound_multiple_destroy(ctx); 175 176 ctx->card->driver->free_iso_context(ctx); 177 } 178 EXPORT_SYMBOL(fw_iso_context_destroy); 179 180 int fw_iso_context_start(struct fw_iso_context *ctx, 181 int cycle, int sync, int tags) 182 { 183 trace_isoc_outbound_start(ctx, cycle); 184 trace_isoc_inbound_single_start(ctx, cycle, sync, tags); 185 trace_isoc_inbound_multiple_start(ctx, cycle, sync, tags); 186 187 return ctx->card->driver->start_iso(ctx, cycle, sync, tags); 188 } 189 EXPORT_SYMBOL(fw_iso_context_start); 190 191 int fw_iso_context_set_channels(struct fw_iso_context *ctx, u64 *channels) 192 { 193 trace_isoc_inbound_multiple_channels(ctx, *channels); 194 195 return ctx->card->driver->set_iso_channels(ctx, channels); 196 } 197 198 int fw_iso_context_queue(struct fw_iso_context *ctx, 199 struct fw_iso_packet *packet, 200 struct fw_iso_buffer *buffer, 201 unsigned long payload) 202 { 203 trace_isoc_outbound_queue(ctx, payload, packet); 204 trace_isoc_inbound_single_queue(ctx, payload, packet); 205 trace_isoc_inbound_multiple_queue(ctx, payload, packet); 206 207 return ctx->card->driver->queue_iso(ctx, packet, buffer, payload); 208 } 209 EXPORT_SYMBOL(fw_iso_context_queue); 210 211 void fw_iso_context_queue_flush(struct fw_iso_context *ctx) 212 { 213 trace_isoc_outbound_flush(ctx); 214 trace_isoc_inbound_single_flush(ctx); 215 trace_isoc_inbound_multiple_flush(ctx); 216 217 ctx->card->driver->flush_queue_iso(ctx); 218 } 219 EXPORT_SYMBOL(fw_iso_context_queue_flush); 220 221 /** 222 * fw_iso_context_flush_completions() - process isochronous context in current process context. 223 * @ctx: the isochronous context 224 * 225 * Process the isochronous context in the current process context. The registered callback function 226 * is called when a queued packet buffer with the interrupt flag is completed, either after 227 * transmission in the IT context or after being filled in the IR context. Additionally, the 228 * callback function is also called for the packet buffer completed at last. Furthermore, the 229 * callback function is called as well when the header buffer in the context becomes full. If it is 230 * required to process the context asynchronously, fw_iso_context_schedule_flush_completions() is 231 * available instead. 232 * 233 * Context: Process context. May sleep due to disable_work_sync(). 234 */ 235 int fw_iso_context_flush_completions(struct fw_iso_context *ctx) 236 { 237 int err; 238 239 trace_isoc_outbound_flush_completions(ctx); 240 trace_isoc_inbound_single_flush_completions(ctx); 241 trace_isoc_inbound_multiple_flush_completions(ctx); 242 243 might_sleep(); 244 245 // Avoid dead lock due to programming mistake. 246 if (WARN_ON_ONCE(current_work() == &ctx->work)) 247 return 0; 248 249 disable_work_sync(&ctx->work); 250 251 err = ctx->card->driver->flush_iso_completions(ctx); 252 253 enable_work(&ctx->work); 254 255 return err; 256 } 257 EXPORT_SYMBOL(fw_iso_context_flush_completions); 258 259 int fw_iso_context_stop(struct fw_iso_context *ctx) 260 { 261 int err; 262 263 trace_isoc_outbound_stop(ctx); 264 trace_isoc_inbound_single_stop(ctx); 265 trace_isoc_inbound_multiple_stop(ctx); 266 267 might_sleep(); 268 269 // Avoid dead lock due to programming mistake. 270 if (WARN_ON_ONCE(current_work() == &ctx->work)) 271 return 0; 272 273 err = ctx->card->driver->stop_iso(ctx); 274 275 cancel_work_sync(&ctx->work); 276 277 return err; 278 } 279 EXPORT_SYMBOL(fw_iso_context_stop); 280 281 /* 282 * Isochronous bus resource management (channels, bandwidth), client side 283 */ 284 285 static int manage_bandwidth(struct fw_card *card, int irm_id, int generation, 286 int bandwidth, bool allocate) 287 { 288 int try, new, old = allocate ? BANDWIDTH_AVAILABLE_INITIAL : 0; 289 __be32 data[2]; 290 291 /* 292 * On a 1394a IRM with low contention, try < 1 is enough. 293 * On a 1394-1995 IRM, we need at least try < 2. 294 * Let's just do try < 5. 295 */ 296 for (try = 0; try < 5; try++) { 297 new = allocate ? old - bandwidth : old + bandwidth; 298 if (new < 0 || new > BANDWIDTH_AVAILABLE_INITIAL) 299 return -EBUSY; 300 301 data[0] = cpu_to_be32(old); 302 data[1] = cpu_to_be32(new); 303 switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, 304 irm_id, generation, SCODE_100, 305 CSR_REGISTER_BASE + CSR_BANDWIDTH_AVAILABLE, 306 data, 8)) { 307 case RCODE_GENERATION: 308 /* A generation change frees all bandwidth. */ 309 return allocate ? -EAGAIN : bandwidth; 310 311 case RCODE_COMPLETE: 312 if (be32_to_cpup(data) == old) 313 return bandwidth; 314 315 old = be32_to_cpup(data); 316 /* Fall through. */ 317 } 318 } 319 320 return -EIO; 321 } 322 323 static int manage_channel(struct fw_card *card, int irm_id, int generation, 324 u32 channels_mask, u64 offset, bool allocate) 325 { 326 __be32 bit, all, old; 327 __be32 data[2]; 328 int channel, ret = -EIO, retry = 5; 329 330 old = all = allocate ? cpu_to_be32(~0) : 0; 331 332 for (channel = 0; channel < 32; channel++) { 333 if (!(channels_mask & 1 << channel)) 334 continue; 335 336 ret = -EBUSY; 337 338 bit = cpu_to_be32(1 << (31 - channel)); 339 if ((old & bit) != (all & bit)) 340 continue; 341 342 data[0] = old; 343 data[1] = old ^ bit; 344 switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, 345 irm_id, generation, SCODE_100, 346 offset, data, 8)) { 347 case RCODE_GENERATION: 348 /* A generation change frees all channels. */ 349 return allocate ? -EAGAIN : channel; 350 351 case RCODE_COMPLETE: 352 if (data[0] == old) 353 return channel; 354 355 old = data[0]; 356 357 /* Is the IRM 1394a-2000 compliant? */ 358 if ((data[0] & bit) == (data[1] & bit)) 359 continue; 360 361 fallthrough; /* It's a 1394-1995 IRM, retry */ 362 default: 363 if (retry) { 364 retry--; 365 channel--; 366 } else { 367 ret = -EIO; 368 } 369 } 370 } 371 372 return ret; 373 } 374 375 static void deallocate_channel(struct fw_card *card, int irm_id, 376 int generation, int channel) 377 { 378 u32 mask; 379 u64 offset; 380 381 mask = channel < 32 ? 1 << channel : 1 << (channel - 32); 382 offset = channel < 32 ? CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI : 383 CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO; 384 385 manage_channel(card, irm_id, generation, mask, offset, false); 386 } 387 388 /** 389 * fw_iso_resource_manage() - Allocate or deallocate a channel and/or bandwidth 390 * @card: card interface for this action 391 * @generation: bus generation 392 * @channels_mask: bitmask for channel allocation 393 * @channel: pointer for returning channel allocation result 394 * @bandwidth: pointer for returning bandwidth allocation result 395 * @allocate: whether to allocate (true) or deallocate (false) 396 * 397 * In parameters: card, generation, channels_mask, bandwidth, allocate 398 * Out parameters: channel, bandwidth 399 * 400 * This function blocks (sleeps) during communication with the IRM. 401 * 402 * Allocates or deallocates at most one channel out of channels_mask. 403 * channels_mask is a bitfield with MSB for channel 63 and LSB for channel 0. 404 * (Note, the IRM's CHANNELS_AVAILABLE is a big-endian bitfield with MSB for 405 * channel 0 and LSB for channel 63.) 406 * Allocates or deallocates as many bandwidth allocation units as specified. 407 * 408 * Returns channel < 0 if no channel was allocated or deallocated. 409 * Returns bandwidth = 0 if no bandwidth was allocated or deallocated. 410 * 411 * If generation is stale, deallocations succeed but allocations fail with 412 * channel = -EAGAIN. 413 * 414 * If channel allocation fails, no bandwidth will be allocated either. 415 * If bandwidth allocation fails, no channel will be allocated either. 416 * But deallocations of channel and bandwidth are tried independently 417 * of each other's success. 418 */ 419 void fw_iso_resource_manage(struct fw_card *card, int generation, 420 u64 channels_mask, int *channel, int *bandwidth, 421 bool allocate) 422 { 423 u32 channels_hi = channels_mask; /* channels 31...0 */ 424 u32 channels_lo = channels_mask >> 32; /* channels 63...32 */ 425 int irm_id, ret, c = -EINVAL; 426 427 scoped_guard(spinlock_irq, &card->lock) 428 irm_id = card->irm_node->node_id; 429 430 if (channels_hi) 431 c = manage_channel(card, irm_id, generation, channels_hi, 432 CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI, 433 allocate); 434 if (channels_lo && c < 0) { 435 c = manage_channel(card, irm_id, generation, channels_lo, 436 CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO, 437 allocate); 438 if (c >= 0) 439 c += 32; 440 } 441 *channel = c; 442 443 if (allocate && channels_mask != 0 && c < 0) 444 *bandwidth = 0; 445 446 if (*bandwidth == 0) 447 return; 448 449 ret = manage_bandwidth(card, irm_id, generation, *bandwidth, allocate); 450 if (ret < 0) 451 *bandwidth = 0; 452 453 if (allocate && ret < 0) { 454 if (c >= 0) 455 deallocate_channel(card, irm_id, generation, c); 456 *channel = ret; 457 } 458 } 459 EXPORT_SYMBOL(fw_iso_resource_manage); 460