fuse: iqueue locking
Use fiq->waitq.lock for protecting members of struct fuse_iqueue and FR_PENDING request flag, previously protected by fc->lock. Following patches will remove fc->lock protection from these members. Signed-off-by: Miklos Szeredi <mszeredi@suse.cz> Reviewed-by: Ashish Samant <ashish.samant@oracle.com>
This commit is contained in:
parent
ef75925886
commit
4ce6081260
1 changed files with 45 additions and 6 deletions
|
@ -328,7 +328,7 @@ static void queue_request(struct fuse_iqueue *fiq, struct fuse_req *req)
|
||||||
req->in.h.len = sizeof(struct fuse_in_header) +
|
req->in.h.len = sizeof(struct fuse_in_header) +
|
||||||
len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
|
len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
|
||||||
list_add_tail(&req->list, &fiq->pending);
|
list_add_tail(&req->list, &fiq->pending);
|
||||||
wake_up(&fiq->waitq);
|
wake_up_locked(&fiq->waitq);
|
||||||
kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
|
kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -341,14 +341,16 @@ void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
|
||||||
forget->forget_one.nlookup = nlookup;
|
forget->forget_one.nlookup = nlookup;
|
||||||
|
|
||||||
spin_lock(&fc->lock);
|
spin_lock(&fc->lock);
|
||||||
|
spin_lock(&fiq->waitq.lock);
|
||||||
if (fiq->connected) {
|
if (fiq->connected) {
|
||||||
fiq->forget_list_tail->next = forget;
|
fiq->forget_list_tail->next = forget;
|
||||||
fiq->forget_list_tail = forget;
|
fiq->forget_list_tail = forget;
|
||||||
wake_up(&fiq->waitq);
|
wake_up_locked(&fiq->waitq);
|
||||||
kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
|
kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
|
||||||
} else {
|
} else {
|
||||||
kfree(forget);
|
kfree(forget);
|
||||||
}
|
}
|
||||||
|
spin_unlock(&fiq->waitq.lock);
|
||||||
spin_unlock(&fc->lock);
|
spin_unlock(&fc->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -362,8 +364,10 @@ static void flush_bg_queue(struct fuse_conn *fc)
|
||||||
req = list_entry(fc->bg_queue.next, struct fuse_req, list);
|
req = list_entry(fc->bg_queue.next, struct fuse_req, list);
|
||||||
list_del(&req->list);
|
list_del(&req->list);
|
||||||
fc->active_background++;
|
fc->active_background++;
|
||||||
|
spin_lock(&fiq->waitq.lock);
|
||||||
req->in.h.unique = fuse_get_unique(fiq);
|
req->in.h.unique = fuse_get_unique(fiq);
|
||||||
queue_request(fiq, req);
|
queue_request(fiq, req);
|
||||||
|
spin_unlock(&fiq->waitq.lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -380,10 +384,13 @@ static void flush_bg_queue(struct fuse_conn *fc)
|
||||||
static void request_end(struct fuse_conn *fc, struct fuse_req *req)
|
static void request_end(struct fuse_conn *fc, struct fuse_req *req)
|
||||||
__releases(fc->lock)
|
__releases(fc->lock)
|
||||||
{
|
{
|
||||||
|
struct fuse_iqueue *fiq = &fc->iq;
|
||||||
void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
|
void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
|
||||||
req->end = NULL;
|
req->end = NULL;
|
||||||
list_del_init(&req->list);
|
list_del_init(&req->list);
|
||||||
|
spin_lock(&fiq->waitq.lock);
|
||||||
list_del_init(&req->intr_entry);
|
list_del_init(&req->intr_entry);
|
||||||
|
spin_unlock(&fiq->waitq.lock);
|
||||||
WARN_ON(test_bit(FR_PENDING, &req->flags));
|
WARN_ON(test_bit(FR_PENDING, &req->flags));
|
||||||
WARN_ON(test_bit(FR_SENT, &req->flags));
|
WARN_ON(test_bit(FR_SENT, &req->flags));
|
||||||
smp_wmb();
|
smp_wmb();
|
||||||
|
@ -415,13 +422,16 @@ __releases(fc->lock)
|
||||||
|
|
||||||
static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
|
static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
|
||||||
{
|
{
|
||||||
|
spin_lock(&fiq->waitq.lock);
|
||||||
list_add_tail(&req->intr_entry, &fiq->interrupts);
|
list_add_tail(&req->intr_entry, &fiq->interrupts);
|
||||||
wake_up(&fiq->waitq);
|
wake_up_locked(&fiq->waitq);
|
||||||
|
spin_unlock(&fiq->waitq.lock);
|
||||||
kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
|
kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
|
static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
|
||||||
{
|
{
|
||||||
|
struct fuse_iqueue *fiq = &fc->iq;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (!fc->no_interrupt) {
|
if (!fc->no_interrupt) {
|
||||||
|
@ -434,7 +444,7 @@ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
|
||||||
spin_lock(&fc->lock);
|
spin_lock(&fc->lock);
|
||||||
set_bit(FR_INTERRUPTED, &req->flags);
|
set_bit(FR_INTERRUPTED, &req->flags);
|
||||||
if (test_bit(FR_SENT, &req->flags))
|
if (test_bit(FR_SENT, &req->flags))
|
||||||
queue_interrupt(&fc->iq, req);
|
queue_interrupt(fiq, req);
|
||||||
spin_unlock(&fc->lock);
|
spin_unlock(&fc->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -451,14 +461,17 @@ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
spin_lock(&fc->lock);
|
spin_lock(&fc->lock);
|
||||||
|
spin_lock(&fiq->waitq.lock);
|
||||||
/* Request is not yet in userspace, bail out */
|
/* Request is not yet in userspace, bail out */
|
||||||
if (test_bit(FR_PENDING, &req->flags)) {
|
if (test_bit(FR_PENDING, &req->flags)) {
|
||||||
list_del(&req->list);
|
list_del(&req->list);
|
||||||
|
spin_unlock(&fiq->waitq.lock);
|
||||||
spin_unlock(&fc->lock);
|
spin_unlock(&fc->lock);
|
||||||
__fuse_put_request(req);
|
__fuse_put_request(req);
|
||||||
req->out.h.error = -EINTR;
|
req->out.h.error = -EINTR;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
spin_unlock(&fiq->waitq.lock);
|
||||||
spin_unlock(&fc->lock);
|
spin_unlock(&fc->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -475,8 +488,10 @@ static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
|
||||||
|
|
||||||
BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
|
BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
|
||||||
spin_lock(&fc->lock);
|
spin_lock(&fc->lock);
|
||||||
|
spin_lock(&fiq->waitq.lock);
|
||||||
if (!fiq->connected) {
|
if (!fiq->connected) {
|
||||||
spin_unlock(&fc->lock);
|
spin_unlock(&fc->lock);
|
||||||
|
spin_unlock(&fiq->waitq.lock);
|
||||||
req->out.h.error = -ENOTCONN;
|
req->out.h.error = -ENOTCONN;
|
||||||
} else {
|
} else {
|
||||||
req->in.h.unique = fuse_get_unique(fiq);
|
req->in.h.unique = fuse_get_unique(fiq);
|
||||||
|
@ -484,6 +499,7 @@ static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
|
||||||
/* acquire extra reference, since request is still needed
|
/* acquire extra reference, since request is still needed
|
||||||
after request_end() */
|
after request_end() */
|
||||||
__fuse_get_request(req);
|
__fuse_get_request(req);
|
||||||
|
spin_unlock(&fiq->waitq.lock);
|
||||||
spin_unlock(&fc->lock);
|
spin_unlock(&fc->lock);
|
||||||
|
|
||||||
request_wait_answer(fc, req);
|
request_wait_answer(fc, req);
|
||||||
|
@ -619,10 +635,12 @@ static int fuse_request_send_notify_reply(struct fuse_conn *fc,
|
||||||
__clear_bit(FR_ISREPLY, &req->flags);
|
__clear_bit(FR_ISREPLY, &req->flags);
|
||||||
req->in.h.unique = unique;
|
req->in.h.unique = unique;
|
||||||
spin_lock(&fc->lock);
|
spin_lock(&fc->lock);
|
||||||
|
spin_lock(&fiq->waitq.lock);
|
||||||
if (fiq->connected) {
|
if (fiq->connected) {
|
||||||
queue_request(fiq, req);
|
queue_request(fiq, req);
|
||||||
err = 0;
|
err = 0;
|
||||||
}
|
}
|
||||||
|
spin_unlock(&fiq->waitq.lock);
|
||||||
spin_unlock(&fc->lock);
|
spin_unlock(&fc->lock);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
|
@ -1064,8 +1082,10 @@ static int request_pending(struct fuse_iqueue *fiq)
|
||||||
|
|
||||||
/* Wait until a request is available on the pending list */
|
/* Wait until a request is available on the pending list */
|
||||||
static void request_wait(struct fuse_conn *fc)
|
static void request_wait(struct fuse_conn *fc)
|
||||||
|
__releases(fc->iq.waitq.lock)
|
||||||
__releases(fc->lock)
|
__releases(fc->lock)
|
||||||
__acquires(fc->lock)
|
__acquires(fc->lock)
|
||||||
|
__acquires(fc->iq.waitq.lock)
|
||||||
{
|
{
|
||||||
struct fuse_iqueue *fiq = &fc->iq;
|
struct fuse_iqueue *fiq = &fc->iq;
|
||||||
DECLARE_WAITQUEUE(wait, current);
|
DECLARE_WAITQUEUE(wait, current);
|
||||||
|
@ -1076,9 +1096,11 @@ __acquires(fc->lock)
|
||||||
if (signal_pending(current))
|
if (signal_pending(current))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
spin_unlock(&fiq->waitq.lock);
|
||||||
spin_unlock(&fc->lock);
|
spin_unlock(&fc->lock);
|
||||||
schedule();
|
schedule();
|
||||||
spin_lock(&fc->lock);
|
spin_lock(&fc->lock);
|
||||||
|
spin_lock(&fiq->waitq.lock);
|
||||||
}
|
}
|
||||||
set_current_state(TASK_RUNNING);
|
set_current_state(TASK_RUNNING);
|
||||||
remove_wait_queue(&fiq->waitq, &wait);
|
remove_wait_queue(&fiq->waitq, &wait);
|
||||||
|
@ -1094,15 +1116,17 @@ __acquires(fc->lock)
|
||||||
*/
|
*/
|
||||||
static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_copy_state *cs,
|
static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_copy_state *cs,
|
||||||
size_t nbytes, struct fuse_req *req)
|
size_t nbytes, struct fuse_req *req)
|
||||||
|
__releases(fc->iq.waitq.lock)
|
||||||
__releases(fc->lock)
|
__releases(fc->lock)
|
||||||
{
|
{
|
||||||
|
struct fuse_iqueue *fiq = &fc->iq;
|
||||||
struct fuse_in_header ih;
|
struct fuse_in_header ih;
|
||||||
struct fuse_interrupt_in arg;
|
struct fuse_interrupt_in arg;
|
||||||
unsigned reqsize = sizeof(ih) + sizeof(arg);
|
unsigned reqsize = sizeof(ih) + sizeof(arg);
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
list_del_init(&req->intr_entry);
|
list_del_init(&req->intr_entry);
|
||||||
req->intr_unique = fuse_get_unique(&fc->iq);
|
req->intr_unique = fuse_get_unique(fiq);
|
||||||
memset(&ih, 0, sizeof(ih));
|
memset(&ih, 0, sizeof(ih));
|
||||||
memset(&arg, 0, sizeof(arg));
|
memset(&arg, 0, sizeof(arg));
|
||||||
ih.len = reqsize;
|
ih.len = reqsize;
|
||||||
|
@ -1110,6 +1134,7 @@ __releases(fc->lock)
|
||||||
ih.unique = req->intr_unique;
|
ih.unique = req->intr_unique;
|
||||||
arg.unique = req->in.h.unique;
|
arg.unique = req->in.h.unique;
|
||||||
|
|
||||||
|
spin_unlock(&fiq->waitq.lock);
|
||||||
spin_unlock(&fc->lock);
|
spin_unlock(&fc->lock);
|
||||||
if (nbytes < reqsize)
|
if (nbytes < reqsize)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -1147,6 +1172,7 @@ static struct fuse_forget_link *dequeue_forget(struct fuse_iqueue *fiq,
|
||||||
static int fuse_read_single_forget(struct fuse_conn *fc,
|
static int fuse_read_single_forget(struct fuse_conn *fc,
|
||||||
struct fuse_copy_state *cs,
|
struct fuse_copy_state *cs,
|
||||||
size_t nbytes)
|
size_t nbytes)
|
||||||
|
__releases(fc->iq.waitq.lock)
|
||||||
__releases(fc->lock)
|
__releases(fc->lock)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
@ -1162,6 +1188,7 @@ __releases(fc->lock)
|
||||||
.len = sizeof(ih) + sizeof(arg),
|
.len = sizeof(ih) + sizeof(arg),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
spin_unlock(&fiq->waitq.lock);
|
||||||
spin_unlock(&fc->lock);
|
spin_unlock(&fc->lock);
|
||||||
kfree(forget);
|
kfree(forget);
|
||||||
if (nbytes < ih.len)
|
if (nbytes < ih.len)
|
||||||
|
@ -1180,6 +1207,7 @@ __releases(fc->lock)
|
||||||
|
|
||||||
static int fuse_read_batch_forget(struct fuse_conn *fc,
|
static int fuse_read_batch_forget(struct fuse_conn *fc,
|
||||||
struct fuse_copy_state *cs, size_t nbytes)
|
struct fuse_copy_state *cs, size_t nbytes)
|
||||||
|
__releases(fc->iq.waitq.lock)
|
||||||
__releases(fc->lock)
|
__releases(fc->lock)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
@ -1195,12 +1223,14 @@ __releases(fc->lock)
|
||||||
};
|
};
|
||||||
|
|
||||||
if (nbytes < ih.len) {
|
if (nbytes < ih.len) {
|
||||||
|
spin_unlock(&fiq->waitq.lock);
|
||||||
spin_unlock(&fc->lock);
|
spin_unlock(&fc->lock);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
|
max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
|
||||||
head = dequeue_forget(fiq, max_forgets, &count);
|
head = dequeue_forget(fiq, max_forgets, &count);
|
||||||
|
spin_unlock(&fiq->waitq.lock);
|
||||||
spin_unlock(&fc->lock);
|
spin_unlock(&fc->lock);
|
||||||
|
|
||||||
arg.count = count;
|
arg.count = count;
|
||||||
|
@ -1230,6 +1260,7 @@ __releases(fc->lock)
|
||||||
|
|
||||||
static int fuse_read_forget(struct fuse_conn *fc, struct fuse_copy_state *cs,
|
static int fuse_read_forget(struct fuse_conn *fc, struct fuse_copy_state *cs,
|
||||||
size_t nbytes)
|
size_t nbytes)
|
||||||
|
__releases(fc->iq.waitq.lock)
|
||||||
__releases(fc->lock)
|
__releases(fc->lock)
|
||||||
{
|
{
|
||||||
struct fuse_iqueue *fiq = &fc->iq;
|
struct fuse_iqueue *fiq = &fc->iq;
|
||||||
|
@ -1260,6 +1291,7 @@ static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file,
|
||||||
|
|
||||||
restart:
|
restart:
|
||||||
spin_lock(&fc->lock);
|
spin_lock(&fc->lock);
|
||||||
|
spin_lock(&fiq->waitq.lock);
|
||||||
err = -EAGAIN;
|
err = -EAGAIN;
|
||||||
if ((file->f_flags & O_NONBLOCK) && fiq->connected &&
|
if ((file->f_flags & O_NONBLOCK) && fiq->connected &&
|
||||||
!request_pending(fiq))
|
!request_pending(fiq))
|
||||||
|
@ -1290,6 +1322,8 @@ static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file,
|
||||||
req = list_entry(fiq->pending.next, struct fuse_req, list);
|
req = list_entry(fiq->pending.next, struct fuse_req, list);
|
||||||
clear_bit(FR_PENDING, &req->flags);
|
clear_bit(FR_PENDING, &req->flags);
|
||||||
list_del_init(&req->list);
|
list_del_init(&req->list);
|
||||||
|
spin_unlock(&fiq->waitq.lock);
|
||||||
|
|
||||||
list_add(&req->list, &fc->io);
|
list_add(&req->list, &fc->io);
|
||||||
|
|
||||||
in = &req->in;
|
in = &req->in;
|
||||||
|
@ -1333,6 +1367,7 @@ static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file,
|
||||||
return reqsize;
|
return reqsize;
|
||||||
|
|
||||||
err_unlock:
|
err_unlock:
|
||||||
|
spin_unlock(&fiq->waitq.lock);
|
||||||
spin_unlock(&fc->lock);
|
spin_unlock(&fc->lock);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -2055,10 +2090,12 @@ static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
|
||||||
poll_wait(file, &fiq->waitq, wait);
|
poll_wait(file, &fiq->waitq, wait);
|
||||||
|
|
||||||
spin_lock(&fc->lock);
|
spin_lock(&fc->lock);
|
||||||
|
spin_lock(&fiq->waitq.lock);
|
||||||
if (!fiq->connected)
|
if (!fiq->connected)
|
||||||
mask = POLLERR;
|
mask = POLLERR;
|
||||||
else if (request_pending(fiq))
|
else if (request_pending(fiq))
|
||||||
mask |= POLLIN | POLLRDNORM;
|
mask |= POLLIN | POLLRDNORM;
|
||||||
|
spin_unlock(&fiq->waitq.lock);
|
||||||
spin_unlock(&fc->lock);
|
spin_unlock(&fc->lock);
|
||||||
|
|
||||||
return mask;
|
return mask;
|
||||||
|
@ -2141,11 +2178,13 @@ void fuse_abort_conn(struct fuse_conn *fc)
|
||||||
fc->max_background = UINT_MAX;
|
fc->max_background = UINT_MAX;
|
||||||
flush_bg_queue(fc);
|
flush_bg_queue(fc);
|
||||||
|
|
||||||
|
spin_lock(&fiq->waitq.lock);
|
||||||
fiq->connected = 0;
|
fiq->connected = 0;
|
||||||
list_splice_init(&fiq->pending, &to_end2);
|
list_splice_init(&fiq->pending, &to_end2);
|
||||||
while (forget_pending(fiq))
|
while (forget_pending(fiq))
|
||||||
kfree(dequeue_forget(fiq, 1, NULL));
|
kfree(dequeue_forget(fiq, 1, NULL));
|
||||||
wake_up_all(&fiq->waitq);
|
wake_up_all_locked(&fiq->waitq);
|
||||||
|
spin_unlock(&fiq->waitq.lock);
|
||||||
kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
|
kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
|
||||||
|
|
||||||
list_splice_init(&fc->processing, &to_end2);
|
list_splice_init(&fc->processing, &to_end2);
|
||||||
|
|
Loading…
Add table
Reference in a new issue