summaryrefslogtreecommitdiffstats
path: root/tools/virtiofsd/fuse_virtio.c
diff options
context:
space:
mode:
authorRichard Henderson2021-10-26 16:38:41 +0200
committerRichard Henderson2021-10-26 16:38:41 +0200
commit931ce30859176f0f7daac6bac255dae5eb21284e (patch)
treea50f7407af869380f76876606ba80cb41d95a5ee /tools/virtiofsd/fuse_virtio.c
parentMerge remote-tracking branch 'remotes/vivier/tags/trivial-branch-for-6.2-pull... (diff)
parentvirtiofsd: Error on bad socket group name (diff)
downloadqemu-931ce30859176f0f7daac6bac255dae5eb21284e.tar.gz
qemu-931ce30859176f0f7daac6bac255dae5eb21284e.tar.xz
qemu-931ce30859176f0f7daac6bac255dae5eb21284e.zip
Merge remote-tracking branch 'remotes/dagrh/tags/pull-virtiofs-20211026' into staging
Virtiofsd pull 2021-10-26 New 'unsupported' feature for xattr mapping Good for hiding selinux Plus some tidy ups and error handling. Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com> # gpg: Signature made Tue 26 Oct 2021 03:28:44 AM PDT # gpg: using RSA key 45F5C71B4A0CB7FB977A9FA90516331EBC5BFDE7 # gpg: Good signature from "Dr. David Alan Gilbert (RH2) <dgilbert@redhat.com>" [full] * remotes/dagrh/tags/pull-virtiofs-20211026: virtiofsd: Error on bad socket group name virtiofsd: Add a helper to stop all queues virtiofsd: Add a helper to send element on virtqueue virtiofsd: Remove unused virtio_fs_config definition virtiofsd: xattr mapping add a new type "unsupported" Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'tools/virtiofsd/fuse_virtio.c')
-rw-r--r--tools/virtiofsd/fuse_virtio.c80
1 files changed, 38 insertions, 42 deletions
diff --git a/tools/virtiofsd/fuse_virtio.c b/tools/virtiofsd/fuse_virtio.c
index 8f4fd165b9..60b96470c5 100644
--- a/tools/virtiofsd/fuse_virtio.c
+++ b/tools/virtiofsd/fuse_virtio.c
@@ -82,12 +82,6 @@ struct fv_VuDev {
struct fv_QueueInfo **qi;
};
-/* From spec */
-struct virtio_fs_config {
- char tag[36];
- uint32_t num_queues;
-};
-
/* Callback from libvhost-user */
static uint64_t fv_get_features(VuDev *dev)
{
@@ -249,6 +243,21 @@ static void vu_dispatch_unlock(struct fv_VuDev *vud)
assert(ret == 0);
}
+static void vq_send_element(struct fv_QueueInfo *qi, VuVirtqElement *elem,
+ ssize_t len)
+{
+ struct fuse_session *se = qi->virtio_dev->se;
+ VuDev *dev = &se->virtio_dev->dev;
+ VuVirtq *q = vu_get_queue(dev, qi->qidx);
+
+ vu_dispatch_rdlock(qi->virtio_dev);
+ pthread_mutex_lock(&qi->vq_lock);
+ vu_queue_push(dev, q, elem, len);
+ vu_queue_notify(dev, q);
+ pthread_mutex_unlock(&qi->vq_lock);
+ vu_dispatch_unlock(qi->virtio_dev);
+}
+
/*
* Called back by ll whenever it wants to send a reply/message back
* The 1st element of the iov starts with the fuse_out_header
@@ -259,8 +268,6 @@ int virtio_send_msg(struct fuse_session *se, struct fuse_chan *ch,
{
FVRequest *req = container_of(ch, FVRequest, ch);
struct fv_QueueInfo *qi = ch->qi;
- VuDev *dev = &se->virtio_dev->dev;
- VuVirtq *q = vu_get_queue(dev, qi->qidx);
VuVirtqElement *elem = &req->elem;
int ret = 0;
@@ -302,13 +309,7 @@ int virtio_send_msg(struct fuse_session *se, struct fuse_chan *ch,
copy_iov(iov, count, in_sg, in_num, tosend_len);
- vu_dispatch_rdlock(qi->virtio_dev);
- pthread_mutex_lock(&qi->vq_lock);
- vu_queue_push(dev, q, elem, tosend_len);
- vu_queue_notify(dev, q);
- pthread_mutex_unlock(&qi->vq_lock);
- vu_dispatch_unlock(qi->virtio_dev);
-
+ vq_send_element(qi, elem, tosend_len);
req->reply_sent = true;
err:
@@ -327,8 +328,6 @@ int virtio_send_data_iov(struct fuse_session *se, struct fuse_chan *ch,
{
FVRequest *req = container_of(ch, FVRequest, ch);
struct fv_QueueInfo *qi = ch->qi;
- VuDev *dev = &se->virtio_dev->dev;
- VuVirtq *q = vu_get_queue(dev, qi->qidx);
VuVirtqElement *elem = &req->elem;
int ret = 0;
g_autofree struct iovec *in_sg_cpy = NULL;
@@ -436,12 +435,7 @@ int virtio_send_data_iov(struct fuse_session *se, struct fuse_chan *ch,
out_sg->len = tosend_len;
}
- vu_dispatch_rdlock(qi->virtio_dev);
- pthread_mutex_lock(&qi->vq_lock);
- vu_queue_push(dev, q, elem, tosend_len);
- vu_queue_notify(dev, q);
- pthread_mutex_unlock(&qi->vq_lock);
- vu_dispatch_unlock(qi->virtio_dev);
+ vq_send_element(qi, elem, tosend_len);
req->reply_sent = true;
return 0;
}
@@ -453,7 +447,6 @@ static void fv_queue_worker(gpointer data, gpointer user_data)
{
struct fv_QueueInfo *qi = user_data;
struct fuse_session *se = qi->virtio_dev->se;
- struct VuDev *dev = &qi->virtio_dev->dev;
FVRequest *req = data;
VuVirtqElement *elem = &req->elem;
struct fuse_buf fbuf = {};
@@ -595,17 +588,9 @@ out:
/* If the request has no reply, still recycle the virtqueue element */
if (!req->reply_sent) {
- struct VuVirtq *q = vu_get_queue(dev, qi->qidx);
-
fuse_log(FUSE_LOG_DEBUG, "%s: elem %d no reply sent\n", __func__,
elem->index);
-
- vu_dispatch_rdlock(qi->virtio_dev);
- pthread_mutex_lock(&qi->vq_lock);
- vu_queue_push(dev, q, elem, 0);
- vu_queue_notify(dev, q);
- pthread_mutex_unlock(&qi->vq_lock);
- vu_dispatch_unlock(qi->virtio_dev);
+ vq_send_element(qi, elem, 0);
}
pthread_mutex_destroy(&req->ch.lock);
@@ -755,6 +740,18 @@ static void fv_queue_cleanup_thread(struct fv_VuDev *vud, int qidx)
vud->qi[qidx] = NULL;
}
+static void stop_all_queues(struct fv_VuDev *vud)
+{
+ for (int i = 0; i < vud->nqueues; i++) {
+ if (!vud->qi[i]) {
+ continue;
+ }
+
+ fuse_log(FUSE_LOG_INFO, "%s: Stopping queue %d thread\n", __func__, i);
+ fv_queue_cleanup_thread(vud, i);
+ }
+}
+
/* Callback from libvhost-user on start or stop of a queue */
static void fv_queue_set_started(VuDev *dev, int qidx, bool started)
{
@@ -885,15 +882,7 @@ int virtio_loop(struct fuse_session *se)
* Make sure all fv_queue_thread()s quit on exit, as we're about to
* free virtio dev and fuse session, no one should access them anymore.
*/
- for (int i = 0; i < se->virtio_dev->nqueues; i++) {
- if (!se->virtio_dev->qi[i]) {
- continue;
- }
-
- fuse_log(FUSE_LOG_INFO, "%s: Stopping queue %d thread\n", __func__, i);
- fv_queue_cleanup_thread(se->virtio_dev, i);
- }
-
+ stop_all_queues(se->virtio_dev);
fuse_log(FUSE_LOG_INFO, "%s: Exit\n", __func__);
return 0;
@@ -999,6 +988,13 @@ static int fv_create_listen_socket(struct fuse_session *se)
"vhost socket failed to set group to %s (%d): %m\n",
se->vu_socket_group, g->gr_gid);
}
+ } else {
+ fuse_log(FUSE_LOG_ERR,
+ "vhost socket: unable to find group '%s'\n",
+ se->vu_socket_group);
+ close(listen_sock);
+ umask(old_umask);
+ return -1;
}
}
umask(old_umask);