1 /* 2 * ALSA sequencer Memory Manager 3 * Copyright (c) 1998 by Frank van de Pol <fvdpol@coil.demon.nl> 4 * Jaroslav Kysela <perex@suse.cz> 5 * 2000 by Takashi Iwai <tiwai@suse.de> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, write to the Free Software 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 * 21 */ 22 23 #include <sound/driver.h> 24 #include <linux/init.h> 25 #include <linux/slab.h> 26 #include <linux/vmalloc.h> 27 #include <sound/core.h> 28 29 #include <sound/seq_kernel.h> 30 #include "seq_memory.h" 31 #include "seq_queue.h" 32 #include "seq_info.h" 33 #include "seq_lock.h" 34 35 static inline int snd_seq_pool_available(struct snd_seq_pool *pool) 36 { 37 return pool->total_elements - atomic_read(&pool->counter); 38 } 39 40 static inline int snd_seq_output_ok(struct snd_seq_pool *pool) 41 { 42 return snd_seq_pool_available(pool) >= pool->room; 43 } 44 45 /* 46 * Variable length event: 47 * The event like sysex uses variable length type. 48 * The external data may be stored in three different formats. 49 * 1) kernel space 50 * This is the normal case. 51 * ext.data.len = length 52 * ext.data.ptr = buffer pointer 53 * 2) user space 54 * When an event is generated via read(), the external data is 55 * kept in user space until expanded. 56 * ext.data.len = length | SNDRV_SEQ_EXT_USRPTR 57 * ext.data.ptr = userspace pointer 58 * 3) chained cells 59 * When the variable length event is enqueued (in prioq or fifo), 60 * the external data is decomposed to several cells. 61 * ext.data.len = length | SNDRV_SEQ_EXT_CHAINED 62 * ext.data.ptr = the additiona cell head 63 * -> cell.next -> cell.next -> .. 64 */ 65 66 /* 67 * exported: 68 * call dump function to expand external data. 69 */ 70 71 static int get_var_len(const struct snd_seq_event *event) 72 { 73 if ((event->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE) 74 return -EINVAL; 75 76 return event->data.ext.len & ~SNDRV_SEQ_EXT_MASK; 77 } 78 79 int snd_seq_dump_var_event(const struct snd_seq_event *event, 80 snd_seq_dump_func_t func, void *private_data) 81 { 82 int len, err; 83 struct snd_seq_event_cell *cell; 84 85 if ((len = get_var_len(event)) <= 0) 86 return len; 87 88 if (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR) { 89 char buf[32]; 90 char __user *curptr = (char __user *)event->data.ext.ptr; 91 while (len > 0) { 92 int size = sizeof(buf); 93 if (len < size) 94 size = len; 95 if (copy_from_user(buf, curptr, size)) 96 return -EFAULT; 97 err = func(private_data, buf, size); 98 if (err < 0) 99 return err; 100 curptr += size; 101 len -= size; 102 } 103 return 0; 104 } if (! (event->data.ext.len & SNDRV_SEQ_EXT_CHAINED)) { 105 return func(private_data, event->data.ext.ptr, len); 106 } 107 108 cell = (struct snd_seq_event_cell *)event->data.ext.ptr; 109 for (; len > 0 && cell; cell = cell->next) { 110 int size = sizeof(struct snd_seq_event); 111 if (len < size) 112 size = len; 113 err = func(private_data, &cell->event, size); 114 if (err < 0) 115 return err; 116 len -= size; 117 } 118 return 0; 119 } 120 121 122 /* 123 * exported: 124 * expand the variable length event to linear buffer space. 125 */ 126 127 static int seq_copy_in_kernel(char **bufptr, const void *src, int size) 128 { 129 memcpy(*bufptr, src, size); 130 *bufptr += size; 131 return 0; 132 } 133 134 static int seq_copy_in_user(char __user **bufptr, const void *src, int size) 135 { 136 if (copy_to_user(*bufptr, src, size)) 137 return -EFAULT; 138 *bufptr += size; 139 return 0; 140 } 141 142 int snd_seq_expand_var_event(const struct snd_seq_event *event, int count, char *buf, 143 int in_kernel, int size_aligned) 144 { 145 int len, newlen; 146 int err; 147 148 if ((len = get_var_len(event)) < 0) 149 return len; 150 newlen = len; 151 if (size_aligned > 0) 152 newlen = ((len + size_aligned - 1) / size_aligned) * size_aligned; 153 if (count < newlen) 154 return -EAGAIN; 155 156 if (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR) { 157 if (! in_kernel) 158 return -EINVAL; 159 if (copy_from_user(buf, (void __user *)event->data.ext.ptr, len)) 160 return -EFAULT; 161 return newlen; 162 } 163 err = snd_seq_dump_var_event(event, 164 in_kernel ? (snd_seq_dump_func_t)seq_copy_in_kernel : 165 (snd_seq_dump_func_t)seq_copy_in_user, 166 &buf); 167 return err < 0 ? err : newlen; 168 } 169 170 171 /* 172 * release this cell, free extended data if available 173 */ 174 175 static inline void free_cell(struct snd_seq_pool *pool, 176 struct snd_seq_event_cell *cell) 177 { 178 cell->next = pool->free; 179 pool->free = cell; 180 atomic_dec(&pool->counter); 181 } 182 183 void snd_seq_cell_free(struct snd_seq_event_cell * cell) 184 { 185 unsigned long flags; 186 struct snd_seq_pool *pool; 187 188 snd_assert(cell != NULL, return); 189 pool = cell->pool; 190 snd_assert(pool != NULL, return); 191 192 spin_lock_irqsave(&pool->lock, flags); 193 free_cell(pool, cell); 194 if (snd_seq_ev_is_variable(&cell->event)) { 195 if (cell->event.data.ext.len & SNDRV_SEQ_EXT_CHAINED) { 196 struct snd_seq_event_cell *curp, *nextptr; 197 curp = cell->event.data.ext.ptr; 198 for (; curp; curp = nextptr) { 199 nextptr = curp->next; 200 curp->next = pool->free; 201 free_cell(pool, curp); 202 } 203 } 204 } 205 if (waitqueue_active(&pool->output_sleep)) { 206 /* has enough space now? */ 207 if (snd_seq_output_ok(pool)) 208 wake_up(&pool->output_sleep); 209 } 210 spin_unlock_irqrestore(&pool->lock, flags); 211 } 212 213 214 /* 215 * allocate an event cell. 216 */ 217 static int snd_seq_cell_alloc(struct snd_seq_pool *pool, 218 struct snd_seq_event_cell **cellp, 219 int nonblock, struct file *file) 220 { 221 struct snd_seq_event_cell *cell; 222 unsigned long flags; 223 int err = -EAGAIN; 224 wait_queue_t wait; 225 226 if (pool == NULL) 227 return -EINVAL; 228 229 *cellp = NULL; 230 231 init_waitqueue_entry(&wait, current); 232 spin_lock_irqsave(&pool->lock, flags); 233 if (pool->ptr == NULL) { /* not initialized */ 234 snd_printd("seq: pool is not initialized\n"); 235 err = -EINVAL; 236 goto __error; 237 } 238 while (pool->free == NULL && ! nonblock && ! pool->closing) { 239 240 set_current_state(TASK_INTERRUPTIBLE); 241 add_wait_queue(&pool->output_sleep, &wait); 242 spin_unlock_irq(&pool->lock); 243 schedule(); 244 spin_lock_irq(&pool->lock); 245 remove_wait_queue(&pool->output_sleep, &wait); 246 /* interrupted? */ 247 if (signal_pending(current)) { 248 err = -ERESTARTSYS; 249 goto __error; 250 } 251 } 252 if (pool->closing) { /* closing.. */ 253 err = -ENOMEM; 254 goto __error; 255 } 256 257 cell = pool->free; 258 if (cell) { 259 int used; 260 pool->free = cell->next; 261 atomic_inc(&pool->counter); 262 used = atomic_read(&pool->counter); 263 if (pool->max_used < used) 264 pool->max_used = used; 265 pool->event_alloc_success++; 266 /* clear cell pointers */ 267 cell->next = NULL; 268 err = 0; 269 } else 270 pool->event_alloc_failures++; 271 *cellp = cell; 272 273 __error: 274 spin_unlock_irqrestore(&pool->lock, flags); 275 return err; 276 } 277 278 279 /* 280 * duplicate the event to a cell. 281 * if the event has external data, the data is decomposed to additional 282 * cells. 283 */ 284 int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event, 285 struct snd_seq_event_cell **cellp, int nonblock, 286 struct file *file) 287 { 288 int ncells, err; 289 unsigned int extlen; 290 struct snd_seq_event_cell *cell; 291 292 *cellp = NULL; 293 294 ncells = 0; 295 extlen = 0; 296 if (snd_seq_ev_is_variable(event)) { 297 extlen = event->data.ext.len & ~SNDRV_SEQ_EXT_MASK; 298 ncells = (extlen + sizeof(struct snd_seq_event) - 1) / sizeof(struct snd_seq_event); 299 } 300 if (ncells >= pool->total_elements) 301 return -ENOMEM; 302 303 err = snd_seq_cell_alloc(pool, &cell, nonblock, file); 304 if (err < 0) 305 return err; 306 307 /* copy the event */ 308 cell->event = *event; 309 310 /* decompose */ 311 if (snd_seq_ev_is_variable(event)) { 312 int len = extlen; 313 int is_chained = event->data.ext.len & SNDRV_SEQ_EXT_CHAINED; 314 int is_usrptr = event->data.ext.len & SNDRV_SEQ_EXT_USRPTR; 315 struct snd_seq_event_cell *src, *tmp, *tail; 316 char *buf; 317 318 cell->event.data.ext.len = extlen | SNDRV_SEQ_EXT_CHAINED; 319 cell->event.data.ext.ptr = NULL; 320 321 src = (struct snd_seq_event_cell *)event->data.ext.ptr; 322 buf = (char *)event->data.ext.ptr; 323 tail = NULL; 324 325 while (ncells-- > 0) { 326 int size = sizeof(struct snd_seq_event); 327 if (len < size) 328 size = len; 329 err = snd_seq_cell_alloc(pool, &tmp, nonblock, file); 330 if (err < 0) 331 goto __error; 332 if (cell->event.data.ext.ptr == NULL) 333 cell->event.data.ext.ptr = tmp; 334 if (tail) 335 tail->next = tmp; 336 tail = tmp; 337 /* copy chunk */ 338 if (is_chained && src) { 339 tmp->event = src->event; 340 src = src->next; 341 } else if (is_usrptr) { 342 if (copy_from_user(&tmp->event, (char __user *)buf, size)) { 343 err = -EFAULT; 344 goto __error; 345 } 346 } else { 347 memcpy(&tmp->event, buf, size); 348 } 349 buf += size; 350 len -= size; 351 } 352 } 353 354 *cellp = cell; 355 return 0; 356 357 __error: 358 snd_seq_cell_free(cell); 359 return err; 360 } 361 362 363 /* poll wait */ 364 int snd_seq_pool_poll_wait(struct snd_seq_pool *pool, struct file *file, 365 poll_table *wait) 366 { 367 poll_wait(file, &pool->output_sleep, wait); 368 return snd_seq_output_ok(pool); 369 } 370 371 372 /* allocate room specified number of events */ 373 int snd_seq_pool_init(struct snd_seq_pool *pool) 374 { 375 int cell; 376 struct snd_seq_event_cell *cellptr; 377 unsigned long flags; 378 379 snd_assert(pool != NULL, return -EINVAL); 380 if (pool->ptr) /* should be atomic? */ 381 return 0; 382 383 pool->ptr = vmalloc(sizeof(struct snd_seq_event_cell) * pool->size); 384 if (pool->ptr == NULL) { 385 snd_printd("seq: malloc for sequencer events failed\n"); 386 return -ENOMEM; 387 } 388 389 /* add new cells to the free cell list */ 390 spin_lock_irqsave(&pool->lock, flags); 391 pool->free = NULL; 392 393 for (cell = 0; cell < pool->size; cell++) { 394 cellptr = pool->ptr + cell; 395 cellptr->pool = pool; 396 cellptr->next = pool->free; 397 pool->free = cellptr; 398 } 399 pool->room = (pool->size + 1) / 2; 400 401 /* init statistics */ 402 pool->max_used = 0; 403 pool->total_elements = pool->size; 404 spin_unlock_irqrestore(&pool->lock, flags); 405 return 0; 406 } 407 408 /* remove events */ 409 int snd_seq_pool_done(struct snd_seq_pool *pool) 410 { 411 unsigned long flags; 412 struct snd_seq_event_cell *ptr; 413 int max_count = 5 * HZ; 414 415 snd_assert(pool != NULL, return -EINVAL); 416 417 /* wait for closing all threads */ 418 spin_lock_irqsave(&pool->lock, flags); 419 pool->closing = 1; 420 spin_unlock_irqrestore(&pool->lock, flags); 421 422 if (waitqueue_active(&pool->output_sleep)) 423 wake_up(&pool->output_sleep); 424 425 while (atomic_read(&pool->counter) > 0) { 426 if (max_count == 0) { 427 snd_printk(KERN_WARNING "snd_seq_pool_done timeout: %d cells remain\n", atomic_read(&pool->counter)); 428 break; 429 } 430 schedule_timeout_uninterruptible(1); 431 max_count--; 432 } 433 434 /* release all resources */ 435 spin_lock_irqsave(&pool->lock, flags); 436 ptr = pool->ptr; 437 pool->ptr = NULL; 438 pool->free = NULL; 439 pool->total_elements = 0; 440 spin_unlock_irqrestore(&pool->lock, flags); 441 442 vfree(ptr); 443 444 spin_lock_irqsave(&pool->lock, flags); 445 pool->closing = 0; 446 spin_unlock_irqrestore(&pool->lock, flags); 447 448 return 0; 449 } 450 451 452 /* init new memory pool */ 453 struct snd_seq_pool *snd_seq_pool_new(int poolsize) 454 { 455 struct snd_seq_pool *pool; 456 457 /* create pool block */ 458 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 459 if (pool == NULL) { 460 snd_printd("seq: malloc failed for pool\n"); 461 return NULL; 462 } 463 spin_lock_init(&pool->lock); 464 pool->ptr = NULL; 465 pool->free = NULL; 466 pool->total_elements = 0; 467 atomic_set(&pool->counter, 0); 468 pool->closing = 0; 469 init_waitqueue_head(&pool->output_sleep); 470 471 pool->size = poolsize; 472 473 /* init statistics */ 474 pool->max_used = 0; 475 return pool; 476 } 477 478 /* remove memory pool */ 479 int snd_seq_pool_delete(struct snd_seq_pool **ppool) 480 { 481 struct snd_seq_pool *pool = *ppool; 482 483 *ppool = NULL; 484 if (pool == NULL) 485 return 0; 486 snd_seq_pool_done(pool); 487 kfree(pool); 488 return 0; 489 } 490 491 /* initialize sequencer memory */ 492 int __init snd_sequencer_memory_init(void) 493 { 494 return 0; 495 } 496 497 /* release sequencer memory */ 498 void __exit snd_sequencer_memory_done(void) 499 { 500 } 501 502 503 /* exported to seq_clientmgr.c */ 504 void snd_seq_info_pool(struct snd_info_buffer *buffer, 505 struct snd_seq_pool *pool, char *space) 506 { 507 if (pool == NULL) 508 return; 509 snd_iprintf(buffer, "%sPool size : %d\n", space, pool->total_elements); 510 snd_iprintf(buffer, "%sCells in use : %d\n", space, atomic_read(&pool->counter)); 511 snd_iprintf(buffer, "%sPeak cells in use : %d\n", space, pool->max_used); 512 snd_iprintf(buffer, "%sAlloc success : %d\n", space, pool->event_alloc_success); 513 snd_iprintf(buffer, "%sAlloc failures : %d\n", space, pool->event_alloc_failures); 514 } 515