1 /* 2 * Tty buffer allocation management 3 */ 4 5 #include <linux/types.h> 6 #include <linux/errno.h> 7 #include <linux/tty.h> 8 #include <linux/tty_driver.h> 9 #include <linux/tty_flip.h> 10 #include <linux/timer.h> 11 #include <linux/string.h> 12 #include <linux/slab.h> 13 #include <linux/sched.h> 14 #include <linux/init.h> 15 #include <linux/wait.h> 16 #include <linux/bitops.h> 17 #include <linux/delay.h> 18 #include <linux/module.h> 19 20 /** 21 * tty_buffer_free_all - free buffers used by a tty 22 * @tty: tty to free from 23 * 24 * Remove all the buffers pending on a tty whether queued with data 25 * or in the free ring. Must be called when the tty is no longer in use 26 * 27 * Locking: none 28 */ 29 30 void tty_buffer_free_all(struct tty_struct *tty) 31 { 32 struct tty_buffer *thead; 33 while ((thead = tty->buf.head) != NULL) { 34 tty->buf.head = thead->next; 35 kfree(thead); 36 } 37 while ((thead = tty->buf.free) != NULL) { 38 tty->buf.free = thead->next; 39 kfree(thead); 40 } 41 tty->buf.tail = NULL; 42 tty->buf.memory_used = 0; 43 } 44 45 /** 46 * tty_buffer_alloc - allocate a tty buffer 47 * @tty: tty device 48 * @size: desired size (characters) 49 * 50 * Allocate a new tty buffer to hold the desired number of characters. 51 * Return NULL if out of memory or the allocation would exceed the 52 * per device queue 53 * 54 * Locking: Caller must hold tty->buf.lock 55 */ 56 57 static struct tty_buffer *tty_buffer_alloc(struct tty_struct *tty, size_t size) 58 { 59 struct tty_buffer *p; 60 61 if (tty->buf.memory_used + size > 65536) 62 return NULL; 63 p = kmalloc(sizeof(struct tty_buffer) + 2 * size, GFP_ATOMIC); 64 if (p == NULL) 65 return NULL; 66 p->used = 0; 67 p->size = size; 68 p->next = NULL; 69 p->commit = 0; 70 p->read = 0; 71 p->char_buf_ptr = (char *)(p->data); 72 p->flag_buf_ptr = (unsigned char *)p->char_buf_ptr + size; 73 tty->buf.memory_used += size; 74 return p; 75 } 76 77 /** 78 * tty_buffer_free - free a tty buffer 79 * @tty: tty owning the buffer 80 * @b: the buffer to free 81 * 82 * Free a tty buffer, or add it to the free list according to our 83 * internal strategy 84 * 85 * Locking: Caller must hold tty->buf.lock 86 */ 87 88 static void tty_buffer_free(struct tty_struct *tty, struct tty_buffer *b) 89 { 90 /* Dumb strategy for now - should keep some stats */ 91 tty->buf.memory_used -= b->size; 92 WARN_ON(tty->buf.memory_used < 0); 93 94 if (b->size >= 512) 95 kfree(b); 96 else { 97 b->next = tty->buf.free; 98 tty->buf.free = b; 99 } 100 } 101 102 /** 103 * __tty_buffer_flush - flush full tty buffers 104 * @tty: tty to flush 105 * 106 * flush all the buffers containing receive data. Caller must 107 * hold the buffer lock and must have ensured no parallel flush to 108 * ldisc is running. 109 * 110 * Locking: Caller must hold tty->buf.lock 111 */ 112 113 static void __tty_buffer_flush(struct tty_struct *tty) 114 { 115 struct tty_buffer *thead; 116 117 while ((thead = tty->buf.head) != NULL) { 118 tty->buf.head = thead->next; 119 tty_buffer_free(tty, thead); 120 } 121 tty->buf.tail = NULL; 122 } 123 124 /** 125 * tty_buffer_flush - flush full tty buffers 126 * @tty: tty to flush 127 * 128 * flush all the buffers containing receive data. If the buffer is 129 * being processed by flush_to_ldisc then we defer the processing 130 * to that function 131 * 132 * Locking: none 133 */ 134 135 void tty_buffer_flush(struct tty_struct *tty) 136 { 137 unsigned long flags; 138 spin_lock_irqsave(&tty->buf.lock, flags); 139 140 /* If the data is being pushed to the tty layer then we can't 141 process it here. Instead set a flag and the flush_to_ldisc 142 path will process the flush request before it exits */ 143 if (test_bit(TTY_FLUSHING, &tty->flags)) { 144 set_bit(TTY_FLUSHPENDING, &tty->flags); 145 spin_unlock_irqrestore(&tty->buf.lock, flags); 146 wait_event(tty->read_wait, 147 test_bit(TTY_FLUSHPENDING, &tty->flags) == 0); 148 return; 149 } else 150 __tty_buffer_flush(tty); 151 spin_unlock_irqrestore(&tty->buf.lock, flags); 152 } 153 154 /** 155 * tty_buffer_find - find a free tty buffer 156 * @tty: tty owning the buffer 157 * @size: characters wanted 158 * 159 * Locate an existing suitable tty buffer or if we are lacking one then 160 * allocate a new one. We round our buffers off in 256 character chunks 161 * to get better allocation behaviour. 162 * 163 * Locking: Caller must hold tty->buf.lock 164 */ 165 166 static struct tty_buffer *tty_buffer_find(struct tty_struct *tty, size_t size) 167 { 168 struct tty_buffer **tbh = &tty->buf.free; 169 while ((*tbh) != NULL) { 170 struct tty_buffer *t = *tbh; 171 if (t->size >= size) { 172 *tbh = t->next; 173 t->next = NULL; 174 t->used = 0; 175 t->commit = 0; 176 t->read = 0; 177 tty->buf.memory_used += t->size; 178 return t; 179 } 180 tbh = &((*tbh)->next); 181 } 182 /* Round the buffer size out */ 183 size = (size + 0xFF) & ~0xFF; 184 return tty_buffer_alloc(tty, size); 185 /* Should possibly check if this fails for the largest buffer we 186 have queued and recycle that ? */ 187 } 188 /** 189 * __tty_buffer_request_room - grow tty buffer if needed 190 * @tty: tty structure 191 * @size: size desired 192 * 193 * Make at least size bytes of linear space available for the tty 194 * buffer. If we fail return the size we managed to find. 195 * Locking: Caller must hold tty->buf.lock 196 */ 197 static int __tty_buffer_request_room(struct tty_struct *tty, size_t size) 198 { 199 struct tty_buffer *b, *n; 200 int left; 201 /* OPTIMISATION: We could keep a per tty "zero" sized buffer to 202 remove this conditional if its worth it. This would be invisible 203 to the callers */ 204 if ((b = tty->buf.tail) != NULL) 205 left = b->size - b->used; 206 else 207 left = 0; 208 209 if (left < size) { 210 /* This is the slow path - looking for new buffers to use */ 211 if ((n = tty_buffer_find(tty, size)) != NULL) { 212 if (b != NULL) { 213 b->next = n; 214 b->commit = b->used; 215 } else 216 tty->buf.head = n; 217 tty->buf.tail = n; 218 } else 219 size = left; 220 } 221 222 return size; 223 } 224 225 226 /** 227 * tty_buffer_request_room - grow tty buffer if needed 228 * @tty: tty structure 229 * @size: size desired 230 * 231 * Make at least size bytes of linear space available for the tty 232 * buffer. If we fail return the size we managed to find. 233 * 234 * Locking: Takes tty->buf.lock 235 */ 236 int tty_buffer_request_room(struct tty_struct *tty, size_t size) 237 { 238 unsigned long flags; 239 int length; 240 241 spin_lock_irqsave(&tty->buf.lock, flags); 242 length = __tty_buffer_request_room(tty, size); 243 spin_unlock_irqrestore(&tty->buf.lock, flags); 244 return length; 245 } 246 EXPORT_SYMBOL_GPL(tty_buffer_request_room); 247 248 /** 249 * tty_insert_flip_string_fixed_flag - Add characters to the tty buffer 250 * @tty: tty structure 251 * @chars: characters 252 * @flag: flag value for each character 253 * @size: size 254 * 255 * Queue a series of bytes to the tty buffering. All the characters 256 * passed are marked with the supplied flag. Returns the number added. 257 * 258 * Locking: Called functions may take tty->buf.lock 259 */ 260 261 int tty_insert_flip_string_fixed_flag(struct tty_struct *tty, 262 const unsigned char *chars, char flag, size_t size) 263 { 264 int copied = 0; 265 do { 266 int goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE); 267 int space; 268 unsigned long flags; 269 struct tty_buffer *tb; 270 271 spin_lock_irqsave(&tty->buf.lock, flags); 272 space = __tty_buffer_request_room(tty, goal); 273 tb = tty->buf.tail; 274 /* If there is no space then tb may be NULL */ 275 if (unlikely(space == 0)) { 276 spin_unlock_irqrestore(&tty->buf.lock, flags); 277 break; 278 } 279 memcpy(tb->char_buf_ptr + tb->used, chars, space); 280 memset(tb->flag_buf_ptr + tb->used, flag, space); 281 tb->used += space; 282 spin_unlock_irqrestore(&tty->buf.lock, flags); 283 copied += space; 284 chars += space; 285 /* There is a small chance that we need to split the data over 286 several buffers. If this is the case we must loop */ 287 } while (unlikely(size > copied)); 288 return copied; 289 } 290 EXPORT_SYMBOL(tty_insert_flip_string_fixed_flag); 291 292 /** 293 * tty_insert_flip_string_flags - Add characters to the tty buffer 294 * @tty: tty structure 295 * @chars: characters 296 * @flags: flag bytes 297 * @size: size 298 * 299 * Queue a series of bytes to the tty buffering. For each character 300 * the flags array indicates the status of the character. Returns the 301 * number added. 302 * 303 * Locking: Called functions may take tty->buf.lock 304 */ 305 306 int tty_insert_flip_string_flags(struct tty_struct *tty, 307 const unsigned char *chars, const char *flags, size_t size) 308 { 309 int copied = 0; 310 do { 311 int goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE); 312 int space; 313 unsigned long __flags; 314 struct tty_buffer *tb; 315 316 spin_lock_irqsave(&tty->buf.lock, __flags); 317 space = __tty_buffer_request_room(tty, goal); 318 tb = tty->buf.tail; 319 /* If there is no space then tb may be NULL */ 320 if (unlikely(space == 0)) { 321 spin_unlock_irqrestore(&tty->buf.lock, __flags); 322 break; 323 } 324 memcpy(tb->char_buf_ptr + tb->used, chars, space); 325 memcpy(tb->flag_buf_ptr + tb->used, flags, space); 326 tb->used += space; 327 spin_unlock_irqrestore(&tty->buf.lock, __flags); 328 copied += space; 329 chars += space; 330 flags += space; 331 /* There is a small chance that we need to split the data over 332 several buffers. If this is the case we must loop */ 333 } while (unlikely(size > copied)); 334 return copied; 335 } 336 EXPORT_SYMBOL(tty_insert_flip_string_flags); 337 338 /** 339 * tty_schedule_flip - push characters to ldisc 340 * @tty: tty to push from 341 * 342 * Takes any pending buffers and transfers their ownership to the 343 * ldisc side of the queue. It then schedules those characters for 344 * processing by the line discipline. 345 * 346 * Locking: Takes tty->buf.lock 347 */ 348 349 void tty_schedule_flip(struct tty_struct *tty) 350 { 351 unsigned long flags; 352 spin_lock_irqsave(&tty->buf.lock, flags); 353 if (tty->buf.tail != NULL) 354 tty->buf.tail->commit = tty->buf.tail->used; 355 spin_unlock_irqrestore(&tty->buf.lock, flags); 356 schedule_work(&tty->buf.work); 357 } 358 EXPORT_SYMBOL(tty_schedule_flip); 359 360 /** 361 * tty_prepare_flip_string - make room for characters 362 * @tty: tty 363 * @chars: return pointer for character write area 364 * @size: desired size 365 * 366 * Prepare a block of space in the buffer for data. Returns the length 367 * available and buffer pointer to the space which is now allocated and 368 * accounted for as ready for normal characters. This is used for drivers 369 * that need their own block copy routines into the buffer. There is no 370 * guarantee the buffer is a DMA target! 371 * 372 * Locking: May call functions taking tty->buf.lock 373 */ 374 375 int tty_prepare_flip_string(struct tty_struct *tty, unsigned char **chars, 376 size_t size) 377 { 378 int space; 379 unsigned long flags; 380 struct tty_buffer *tb; 381 382 spin_lock_irqsave(&tty->buf.lock, flags); 383 space = __tty_buffer_request_room(tty, size); 384 385 tb = tty->buf.tail; 386 if (likely(space)) { 387 *chars = tb->char_buf_ptr + tb->used; 388 memset(tb->flag_buf_ptr + tb->used, TTY_NORMAL, space); 389 tb->used += space; 390 } 391 spin_unlock_irqrestore(&tty->buf.lock, flags); 392 return space; 393 } 394 EXPORT_SYMBOL_GPL(tty_prepare_flip_string); 395 396 /** 397 * tty_prepare_flip_string_flags - make room for characters 398 * @tty: tty 399 * @chars: return pointer for character write area 400 * @flags: return pointer for status flag write area 401 * @size: desired size 402 * 403 * Prepare a block of space in the buffer for data. Returns the length 404 * available and buffer pointer to the space which is now allocated and 405 * accounted for as ready for characters. This is used for drivers 406 * that need their own block copy routines into the buffer. There is no 407 * guarantee the buffer is a DMA target! 408 * 409 * Locking: May call functions taking tty->buf.lock 410 */ 411 412 int tty_prepare_flip_string_flags(struct tty_struct *tty, 413 unsigned char **chars, char **flags, size_t size) 414 { 415 int space; 416 unsigned long __flags; 417 struct tty_buffer *tb; 418 419 spin_lock_irqsave(&tty->buf.lock, __flags); 420 space = __tty_buffer_request_room(tty, size); 421 422 tb = tty->buf.tail; 423 if (likely(space)) { 424 *chars = tb->char_buf_ptr + tb->used; 425 *flags = tb->flag_buf_ptr + tb->used; 426 tb->used += space; 427 } 428 spin_unlock_irqrestore(&tty->buf.lock, __flags); 429 return space; 430 } 431 EXPORT_SYMBOL_GPL(tty_prepare_flip_string_flags); 432 433 434 435 /** 436 * flush_to_ldisc 437 * @work: tty structure passed from work queue. 438 * 439 * This routine is called out of the software interrupt to flush data 440 * from the buffer chain to the line discipline. 441 * 442 * Locking: holds tty->buf.lock to guard buffer list. Drops the lock 443 * while invoking the line discipline receive_buf method. The 444 * receive_buf method is single threaded for each tty instance. 445 */ 446 447 static void flush_to_ldisc(struct work_struct *work) 448 { 449 struct tty_struct *tty = 450 container_of(work, struct tty_struct, buf.work); 451 unsigned long flags; 452 struct tty_ldisc *disc; 453 454 disc = tty_ldisc_ref(tty); 455 if (disc == NULL) /* !TTY_LDISC */ 456 return; 457 458 spin_lock_irqsave(&tty->buf.lock, flags); 459 460 if (!test_and_set_bit(TTY_FLUSHING, &tty->flags)) { 461 struct tty_buffer *head; 462 while ((head = tty->buf.head) != NULL) { 463 int count; 464 char *char_buf; 465 unsigned char *flag_buf; 466 467 count = head->commit - head->read; 468 if (!count) { 469 if (head->next == NULL) 470 break; 471 tty->buf.head = head->next; 472 tty_buffer_free(tty, head); 473 continue; 474 } 475 /* Ldisc or user is trying to flush the buffers 476 we are feeding to the ldisc, stop feeding the 477 line discipline as we want to empty the queue */ 478 if (test_bit(TTY_FLUSHPENDING, &tty->flags)) 479 break; 480 if (!tty->receive_room) 481 break; 482 if (count > tty->receive_room) 483 count = tty->receive_room; 484 char_buf = head->char_buf_ptr + head->read; 485 flag_buf = head->flag_buf_ptr + head->read; 486 head->read += count; 487 spin_unlock_irqrestore(&tty->buf.lock, flags); 488 disc->ops->receive_buf(tty, char_buf, 489 flag_buf, count); 490 spin_lock_irqsave(&tty->buf.lock, flags); 491 } 492 clear_bit(TTY_FLUSHING, &tty->flags); 493 } 494 495 /* We may have a deferred request to flush the input buffer, 496 if so pull the chain under the lock and empty the queue */ 497 if (test_bit(TTY_FLUSHPENDING, &tty->flags)) { 498 __tty_buffer_flush(tty); 499 clear_bit(TTY_FLUSHPENDING, &tty->flags); 500 wake_up(&tty->read_wait); 501 } 502 spin_unlock_irqrestore(&tty->buf.lock, flags); 503 504 tty_ldisc_deref(disc); 505 } 506 507 /** 508 * tty_flush_to_ldisc 509 * @tty: tty to push 510 * 511 * Push the terminal flip buffers to the line discipline. 512 * 513 * Must not be called from IRQ context. 514 */ 515 void tty_flush_to_ldisc(struct tty_struct *tty) 516 { 517 flush_work(&tty->buf.work); 518 } 519 520 /** 521 * tty_flip_buffer_push - terminal 522 * @tty: tty to push 523 * 524 * Queue a push of the terminal flip buffers to the line discipline. This 525 * function must not be called from IRQ context if tty->low_latency is set. 526 * 527 * In the event of the queue being busy for flipping the work will be 528 * held off and retried later. 529 * 530 * Locking: tty buffer lock. Driver locks in low latency mode. 531 */ 532 533 void tty_flip_buffer_push(struct tty_struct *tty) 534 { 535 unsigned long flags; 536 spin_lock_irqsave(&tty->buf.lock, flags); 537 if (tty->buf.tail != NULL) 538 tty->buf.tail->commit = tty->buf.tail->used; 539 spin_unlock_irqrestore(&tty->buf.lock, flags); 540 541 if (tty->low_latency) 542 flush_to_ldisc(&tty->buf.work); 543 else 544 schedule_work(&tty->buf.work); 545 } 546 EXPORT_SYMBOL(tty_flip_buffer_push); 547 548 /** 549 * tty_buffer_init - prepare a tty buffer structure 550 * @tty: tty to initialise 551 * 552 * Set up the initial state of the buffer management for a tty device. 553 * Must be called before the other tty buffer functions are used. 554 * 555 * Locking: none 556 */ 557 558 void tty_buffer_init(struct tty_struct *tty) 559 { 560 spin_lock_init(&tty->buf.lock); 561 tty->buf.head = NULL; 562 tty->buf.tail = NULL; 563 tty->buf.free = NULL; 564 tty->buf.memory_used = 0; 565 INIT_WORK(&tty->buf.work, flush_to_ldisc); 566 } 567 568