summaryrefslogtreecommitdiffstats
path: root/tools/virtiofsd/fuse_virtio.c
diff options
context:
space:
mode:
authorPeter Maydell2021-02-17 15:44:18 +0100
committerPeter Maydell2021-02-17 15:44:18 +0100
commit1af5629673bb5c1592d993f9fb6119a62845f576 (patch)
tree24faff5cfb18f2ab4b95c2cb77e8e4244c62ad6e /tools/virtiofsd/fuse_virtio.c
parentMerge remote-tracking branch 'remotes/bonzini-gitlab/tags/for-upstream' into ... (diff)
parentvirtiofsd: Do not use a thread pool by default (diff)
downloadqemu-1af5629673bb5c1592d993f9fb6119a62845f576.tar.gz
qemu-1af5629673bb5c1592d993f9fb6119a62845f576.tar.xz
qemu-1af5629673bb5c1592d993f9fb6119a62845f576.zip
Merge remote-tracking branch 'remotes/dgilbert-gitlab/tags/pull-virtiofs-20210216' into staging
virtiofsd pull 2021-02-16 Vivek's support for new FUSE KILLPRIV_V2 and some smaller cleanups. Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com> # gpg: Signature made Tue 16 Feb 2021 18:34:32 GMT # gpg: using RSA key 45F5C71B4A0CB7FB977A9FA90516331EBC5BFDE7 # gpg: Good signature from "Dr. David Alan Gilbert (RH2) <dgilbert@redhat.com>" [full] # Primary key fingerprint: 45F5 C71B 4A0C B7FB 977A 9FA9 0516 331E BC5B FDE7 * remotes/dgilbert-gitlab/tags/pull-virtiofs-20210216: virtiofsd: Do not use a thread pool by default viriofsd: Add support for FUSE_HANDLE_KILLPRIV_V2 virtiofsd: Save error code early at the failure callsite tools/virtiofsd: Replace the word 'whitelist' virtiofsd: vu_dispatch locking should never fail virtiofsd: Allow to build it without the tools Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'tools/virtiofsd/fuse_virtio.c')
-rw-r--r--tools/virtiofsd/fuse_virtio.c49
1 files changed, 35 insertions, 14 deletions
diff --git a/tools/virtiofsd/fuse_virtio.c b/tools/virtiofsd/fuse_virtio.c
index ddcefee427..523ee64fb7 100644
--- a/tools/virtiofsd/fuse_virtio.c
+++ b/tools/virtiofsd/fuse_virtio.c
@@ -188,6 +188,31 @@ static void copy_iov(struct iovec *src_iov, int src_count,
}
/*
+ * pthread_rwlock_rdlock() and pthread_rwlock_wrlock can fail if
+ * a deadlock condition is detected or the current thread already
+ * owns the lock. They can also fail, like pthread_rwlock_unlock(),
+ * if the mutex wasn't properly initialized. None of these are ever
+ * expected to happen.
+ */
+static void vu_dispatch_rdlock(struct fv_VuDev *vud)
+{
+ int ret = pthread_rwlock_rdlock(&vud->vu_dispatch_rwlock);
+ assert(ret == 0);
+}
+
+static void vu_dispatch_wrlock(struct fv_VuDev *vud)
+{
+ int ret = pthread_rwlock_wrlock(&vud->vu_dispatch_rwlock);
+ assert(ret == 0);
+}
+
+static void vu_dispatch_unlock(struct fv_VuDev *vud)
+{
+ int ret = pthread_rwlock_unlock(&vud->vu_dispatch_rwlock);
+ assert(ret == 0);
+}
+
+/*
* Called back by ll whenever it wants to send a reply/message back
* The 1st element of the iov starts with the fuse_out_header
* 'unique'==0 means it's a notify message.
@@ -240,12 +265,12 @@ int virtio_send_msg(struct fuse_session *se, struct fuse_chan *ch,
copy_iov(iov, count, in_sg, in_num, tosend_len);
- pthread_rwlock_rdlock(&qi->virtio_dev->vu_dispatch_rwlock);
+ vu_dispatch_rdlock(qi->virtio_dev);
pthread_mutex_lock(&qi->vq_lock);
vu_queue_push(dev, q, elem, tosend_len);
vu_queue_notify(dev, q);
pthread_mutex_unlock(&qi->vq_lock);
- pthread_rwlock_unlock(&qi->virtio_dev->vu_dispatch_rwlock);
+ vu_dispatch_unlock(qi->virtio_dev);
req->reply_sent = true;
@@ -403,12 +428,12 @@ int virtio_send_data_iov(struct fuse_session *se, struct fuse_chan *ch,
ret = 0;
- pthread_rwlock_rdlock(&qi->virtio_dev->vu_dispatch_rwlock);
+ vu_dispatch_rdlock(qi->virtio_dev);
pthread_mutex_lock(&qi->vq_lock);
vu_queue_push(dev, q, elem, tosend_len);
vu_queue_notify(dev, q);
pthread_mutex_unlock(&qi->vq_lock);
- pthread_rwlock_unlock(&qi->virtio_dev->vu_dispatch_rwlock);
+ vu_dispatch_unlock(qi->virtio_dev);
err:
if (ret == 0) {
@@ -558,12 +583,12 @@ out:
fuse_log(FUSE_LOG_DEBUG, "%s: elem %d no reply sent\n", __func__,
elem->index);
- pthread_rwlock_rdlock(&qi->virtio_dev->vu_dispatch_rwlock);
+ vu_dispatch_rdlock(qi->virtio_dev);
pthread_mutex_lock(&qi->vq_lock);
vu_queue_push(dev, q, elem, 0);
vu_queue_notify(dev, q);
pthread_mutex_unlock(&qi->vq_lock);
- pthread_rwlock_unlock(&qi->virtio_dev->vu_dispatch_rwlock);
+ vu_dispatch_unlock(qi->virtio_dev);
}
pthread_mutex_destroy(&req->ch.lock);
@@ -596,7 +621,6 @@ static void *fv_queue_thread(void *opaque)
qi->qidx, qi->kick_fd);
while (1) {
struct pollfd pf[2];
- int ret;
pf[0].fd = qi->kick_fd;
pf[0].events = POLLIN;
@@ -645,8 +669,7 @@ static void *fv_queue_thread(void *opaque)
break;
}
/* Mutual exclusion with virtio_loop() */
- ret = pthread_rwlock_rdlock(&qi->virtio_dev->vu_dispatch_rwlock);
- assert(ret == 0); /* there is no possible error case */
+ vu_dispatch_rdlock(qi->virtio_dev);
pthread_mutex_lock(&qi->vq_lock);
/* out is from guest, in is too guest */
unsigned int in_bytes, out_bytes;
@@ -672,7 +695,7 @@ static void *fv_queue_thread(void *opaque)
}
pthread_mutex_unlock(&qi->vq_lock);
- pthread_rwlock_unlock(&qi->virtio_dev->vu_dispatch_rwlock);
+ vu_dispatch_unlock(qi->virtio_dev);
/* Process all the requests. */
if (!se->thread_pool_size && req_list != NULL) {
@@ -799,7 +822,6 @@ int virtio_loop(struct fuse_session *se)
while (!fuse_session_exited(se)) {
struct pollfd pf[1];
bool ok;
- int ret;
pf[0].fd = se->vu_socketfd;
pf[0].events = POLLIN;
pf[0].revents = 0;
@@ -825,12 +847,11 @@ int virtio_loop(struct fuse_session *se)
assert(pf[0].revents & POLLIN);
fuse_log(FUSE_LOG_DEBUG, "%s: Got VU event\n", __func__);
/* Mutual exclusion with fv_queue_thread() */
- ret = pthread_rwlock_wrlock(&se->virtio_dev->vu_dispatch_rwlock);
- assert(ret == 0); /* there is no possible error case */
+ vu_dispatch_wrlock(se->virtio_dev);
ok = vu_dispatch(&se->virtio_dev->dev);
- pthread_rwlock_unlock(&se->virtio_dev->vu_dispatch_rwlock);
+ vu_dispatch_unlock(se->virtio_dev);
if (!ok) {
fuse_log(FUSE_LOG_ERR, "%s: vu_dispatch failed\n", __func__);