summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorSimon Rettberg2020-11-20 14:19:21 +0100
committerSimon Rettberg2020-11-20 14:19:21 +0100
commitb4d8353913a2f54a9bce314e84415e868dc096cb (patch)
tree8712205f0b6b9970b1c197e8b9b4f86c52884798 /src
parent[KERNEL] Cleanup thread cleanup, fix closing of device when busy (diff)
downloaddnbd3-b4d8353913a2f54a9bce314e84415e868dc096cb.tar.gz
dnbd3-b4d8353913a2f54a9bce314e84415e868dc096cb.tar.xz
dnbd3-b4d8353913a2f54a9bce314e84415e868dc096cb.zip
[KERNEL] Fix race condition for request_queuereceive in receive thread
Formerly, the request that was about to be received was looked up in the receive queue without removing it, then the request payload was received from the socket while the lock was not being held, and finally, the lock was required again and the request removed from the queue. This is dangrous as another thread can concurrently take the request from the queue while the receive thread reads the payload from the socket, leading to a double-free by calling blk_mq_end_request twice.
Diffstat (limited to 'src')
-rw-r--r--src/kernel/net.c15
1 files changed, 7 insertions, 8 deletions
diff --git a/src/kernel/net.c b/src/kernel/net.c
index d846b64..70695d9 100644
--- a/src/kernel/net.c
+++ b/src/kernel/net.c
@@ -795,6 +795,7 @@ static int dnbd3_net_receive(void *data)
if ((uint64_t)(uintptr_t)received_request == dnbd3_reply.handle) // Double cast to prevent warning on 32bit
{
blk_request = received_request;
+ list_del_init(&blk_request->queuelist);
break;
}
}
@@ -813,30 +814,28 @@ static int dnbd3_net_receive(void *data)
iov.iov_base = kaddr;
iov.iov_len = bvec->bv_len;
ret = kernel_recvmsg(dev->sock, &msg, &iov, 1, bvec->bv_len, msg.msg_flags);
+ kunmap(bvec->bv_page);
if (ret != bvec->bv_len)
{
- kunmap(bvec->bv_page);
-
if (ret == 0)
{
/* have not received any data, but remote peer is shutdown properly */
dnbd3_dev_dbg_host_cur(dev, "remote peer has performed an orderly shutdown\n");
ret = 0;
- goto cleanup;
}
else
{
if (!atomic_read(&dev->connection_lock))
dnbd3_dev_err_host_cur(dev, "receiving from net to block layer\n");
ret = -EINVAL;
- goto cleanup;
}
+ // Requeue request
+ spin_lock_irqsave(&dev->blk_lock, irqflags);
+ list_add(&blk_request->queuelist, &dev->request_queue_send);
+ spin_unlock_irqrestore(&dev->blk_lock, irqflags);
+ goto cleanup;
}
- kunmap(bvec->bv_page);
}
- spin_lock_irqsave(&dev->blk_lock, irqflags);
- list_del_init(&blk_request->queuelist);
- spin_unlock_irqrestore(&dev->blk_lock, irqflags);
blk_mq_end_request(blk_request, BLK_STS_OK);
continue;