Merge branch 'tty-fixes'
* branch 'tty-fixes': tty: use the new 'flush_delayed_work()' helper to do ldisc flush workqueue: add 'flush_delayed_work()' to run and wait for delayed work Make flush_to_ldisc properly handle parallel calls
This commit is contained in:
commit
d6047d79b9
3 changed files with 33 additions and 17 deletions
|
@ -402,28 +402,26 @@ static void flush_to_ldisc(struct work_struct *work)
|
||||||
container_of(work, struct tty_struct, buf.work.work);
|
container_of(work, struct tty_struct, buf.work.work);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct tty_ldisc *disc;
|
struct tty_ldisc *disc;
|
||||||
struct tty_buffer *tbuf, *head;
|
|
||||||
char *char_buf;
|
|
||||||
unsigned char *flag_buf;
|
|
||||||
|
|
||||||
disc = tty_ldisc_ref(tty);
|
disc = tty_ldisc_ref(tty);
|
||||||
if (disc == NULL) /* !TTY_LDISC */
|
if (disc == NULL) /* !TTY_LDISC */
|
||||||
return;
|
return;
|
||||||
|
|
||||||
spin_lock_irqsave(&tty->buf.lock, flags);
|
spin_lock_irqsave(&tty->buf.lock, flags);
|
||||||
/* So we know a flush is running */
|
|
||||||
set_bit(TTY_FLUSHING, &tty->flags);
|
if (!test_and_set_bit(TTY_FLUSHING, &tty->flags)) {
|
||||||
head = tty->buf.head;
|
struct tty_buffer *head;
|
||||||
if (head != NULL) {
|
while ((head = tty->buf.head) != NULL) {
|
||||||
tty->buf.head = NULL;
|
int count;
|
||||||
for (;;) {
|
char *char_buf;
|
||||||
int count = head->commit - head->read;
|
unsigned char *flag_buf;
|
||||||
|
|
||||||
|
count = head->commit - head->read;
|
||||||
if (!count) {
|
if (!count) {
|
||||||
if (head->next == NULL)
|
if (head->next == NULL)
|
||||||
break;
|
break;
|
||||||
tbuf = head;
|
tty->buf.head = head->next;
|
||||||
head = head->next;
|
tty_buffer_free(tty, head);
|
||||||
tty_buffer_free(tty, tbuf);
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
/* Ldisc or user is trying to flush the buffers
|
/* Ldisc or user is trying to flush the buffers
|
||||||
|
@ -445,9 +443,9 @@ static void flush_to_ldisc(struct work_struct *work)
|
||||||
flag_buf, count);
|
flag_buf, count);
|
||||||
spin_lock_irqsave(&tty->buf.lock, flags);
|
spin_lock_irqsave(&tty->buf.lock, flags);
|
||||||
}
|
}
|
||||||
/* Restore the queue head */
|
clear_bit(TTY_FLUSHING, &tty->flags);
|
||||||
tty->buf.head = head;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* We may have a deferred request to flush the input buffer,
|
/* We may have a deferred request to flush the input buffer,
|
||||||
if so pull the chain under the lock and empty the queue */
|
if so pull the chain under the lock and empty the queue */
|
||||||
if (test_bit(TTY_FLUSHPENDING, &tty->flags)) {
|
if (test_bit(TTY_FLUSHPENDING, &tty->flags)) {
|
||||||
|
@ -455,7 +453,6 @@ static void flush_to_ldisc(struct work_struct *work)
|
||||||
clear_bit(TTY_FLUSHPENDING, &tty->flags);
|
clear_bit(TTY_FLUSHPENDING, &tty->flags);
|
||||||
wake_up(&tty->read_wait);
|
wake_up(&tty->read_wait);
|
||||||
}
|
}
|
||||||
clear_bit(TTY_FLUSHING, &tty->flags);
|
|
||||||
spin_unlock_irqrestore(&tty->buf.lock, flags);
|
spin_unlock_irqrestore(&tty->buf.lock, flags);
|
||||||
|
|
||||||
tty_ldisc_deref(disc);
|
tty_ldisc_deref(disc);
|
||||||
|
@ -471,7 +468,7 @@ static void flush_to_ldisc(struct work_struct *work)
|
||||||
*/
|
*/
|
||||||
void tty_flush_to_ldisc(struct tty_struct *tty)
|
void tty_flush_to_ldisc(struct tty_struct *tty)
|
||||||
{
|
{
|
||||||
flush_to_ldisc(&tty->buf.work.work);
|
flush_delayed_work(&tty->buf.work);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -207,6 +207,7 @@ extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
|
||||||
|
|
||||||
extern void flush_workqueue(struct workqueue_struct *wq);
|
extern void flush_workqueue(struct workqueue_struct *wq);
|
||||||
extern void flush_scheduled_work(void);
|
extern void flush_scheduled_work(void);
|
||||||
|
extern void flush_delayed_work(struct delayed_work *work);
|
||||||
|
|
||||||
extern int schedule_work(struct work_struct *work);
|
extern int schedule_work(struct work_struct *work);
|
||||||
extern int schedule_work_on(int cpu, struct work_struct *work);
|
extern int schedule_work_on(int cpu, struct work_struct *work);
|
||||||
|
|
|
@ -639,6 +639,24 @@ int schedule_delayed_work(struct delayed_work *dwork,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(schedule_delayed_work);
|
EXPORT_SYMBOL(schedule_delayed_work);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* flush_delayed_work - block until a dwork_struct's callback has terminated
|
||||||
|
* @dwork: the delayed work which is to be flushed
|
||||||
|
*
|
||||||
|
* Any timeout is cancelled, and any pending work is run immediately.
|
||||||
|
*/
|
||||||
|
void flush_delayed_work(struct delayed_work *dwork)
|
||||||
|
{
|
||||||
|
if (del_timer(&dwork->timer)) {
|
||||||
|
struct cpu_workqueue_struct *cwq;
|
||||||
|
cwq = wq_per_cpu(keventd_wq, get_cpu());
|
||||||
|
__queue_work(cwq, &dwork->work);
|
||||||
|
put_cpu();
|
||||||
|
}
|
||||||
|
flush_work(&dwork->work);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(flush_delayed_work);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* schedule_delayed_work_on - queue work in global workqueue on CPU after delay
|
* schedule_delayed_work_on - queue work in global workqueue on CPU after delay
|
||||||
* @cpu: cpu to use
|
* @cpu: cpu to use
|
||||||
|
|
Loading…
Add table
Reference in a new issue