p->used = 0;
p->size = size;
p->next = NULL;
- p->active = 0;
p->commit = 0;
p->read = 0;
p->char_buf_ptr = (char *)(p->data);
/* OPTIMISATION: We could keep a per tty "zero" sized buffer to
remove this conditional if its worth it. This would be invisible
to the callers */
- if ((b = tty->buf.tail) != NULL) {
+ if ((b = tty->buf.tail) != NULL)
left = b->size - b->used;
- b->active = 1;
- } else
+ else
left = 0;
if (left < size) {
if ((n = tty_buffer_find(tty, size)) != NULL) {
if (b != NULL) {
b->next = n;
- b->active = 0;
b->commit = b->used;
} else
tty->buf.head = n;
tty->buf.tail = n;
- n->active = 1;
} else
size = left;
}
{
unsigned long flags;
spin_lock_irqsave(&tty->buf.lock, flags);
- if (tty->buf.tail != NULL) {
- tty->buf.tail->active = 0;
+ if (tty->buf.tail != NULL)
tty->buf.tail->commit = tty->buf.tail->used;
- }
spin_unlock_irqrestore(&tty->buf.lock, flags);
schedule_delayed_work(&tty->buf.work, 1);
}
{
unsigned long flags;
spin_lock_irqsave(&tty->buf.lock, flags);
- if (tty->buf.tail != NULL) {
- tty->buf.tail->active = 0;
+ if (tty->buf.tail != NULL)
tty->buf.tail->commit = tty->buf.tail->used;
- }
spin_unlock_irqrestore(&tty->buf.lock, flags);
if (tty->low_latency)
{
unsigned long flags;
spin_lock_irqsave(&t->buf.lock, flags);
- if (t->buf.tail != NULL) {
- t->buf.tail->active = 0;
+ if (t->buf.tail != NULL)
t->buf.tail->commit = t->buf.tail->used;
- }
spin_unlock_irqrestore(&t->buf.lock, flags);
schedule_work(&t->buf.work);
}
unsigned char ch, char flag)
{
struct tty_buffer *tb = tty->buf.tail;
- if (tb && tb->active && tb->used < tb->size) {
+ if (tb && tb->used < tb->size) {
tb->flag_buf_ptr[tb->used] = flag;
tb->char_buf_ptr[tb->used++] = ch;
return 1;