/*
* This file is part of the Distributed Network Block Device 3
*
* Copyright(c) 2019 Frederic Robra <frederic@robra.org>
*
* This file may be licensed under the terms of of the
* GNU General Public License Version 2 (the ``GPL'').
*
* Software distributed under the License is distributed
* on an ``AS IS'' basis, WITHOUT WARRANTY OF ANY KIND, either
* express or implied. See the GPL for the specific language
* governing rights and limitations.
*
* You should have received a copy of the GPL along with this
* program. If not, go to http://www.gnu.org/licenses/gpl.html
* or write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
*/
#include "mq.h"
#include "net-txrx.h"
#include <linux/wait.h>
#include <linux/blk-mq.h>
#include <linux/delay.h>
/**
* dnbd3_busy_iter - iterator to set a bit for busy reqeust
* @req: the request
* @priv: the passed argument busy
* @arg: unknown
*/
static void dnbd3_busy_iter(struct request *req, void *priv, bool arg)
{
struct dnbd3_cmd *cmd = blk_mq_rq_to_pdu(req);
unsigned long *busy = (unsigned long *) priv;
set_bit(cmd->index, busy);
}
/**
* dnbd3_is_mq_busy - check if mq is busy
* @dev: the device
*
* sets bit to 1, where socket is busy
*/
unsigned long dnbd3_is_mq_busy(struct dnbd3_device *dev)
{
struct blk_mq_tag_set *set = &dev->tag_set;
unsigned long busy = 0;
blk_mq_tagset_busy_iter(set, (busy_tag_iter_fn *)dnbd3_busy_iter, &busy);
/*
* just for demonstration
* with this it is possible to iterate through the hardware queues
*
int i = 0;
struct request_queue *q;
struct blk_mq_hw_ctx *hctx;
list_for_each_entry(q, &set->tag_list, tag_set_list) {
for (i = 0; i < NUMBER_CONNECTIONS; i++) {
hctx = q->queue_hw_ctx[i];
debug_dev(dev, "%i %lu", i, hctx->queued);
}
}
*/
return busy;
}
/**
* dnbd3_busy_iter - iterator to requeue inflight commands
* @req: the request
* @priv: the passed argument from blk_mq_tagset_busy_iter
* @arg: unknown
*/
static void dnbd3_busy_iter_requeue(struct request *req, void *priv, bool arg)
{
struct dnbd3_sock *sock = priv;
struct dnbd3_cmd *cmd = blk_mq_rq_to_pdu(req);
if (&cmd->dnbd3->socks[cmd->index] != sock) {
return;
}
if (!mutex_trylock(&cmd->lock)) {
/* request is in sending, so we will not requeue */
return;
}
if (!cmd->dnbd3->connected) {
dnbd3_end_cmd(cmd, BLK_STS_IOERR);
mutex_unlock(&cmd->lock);
return;
}
debug_sock(sock, "requeue busy request %p", req);
dnbd3_requeue_cmd(cmd);
mutex_unlock(&cmd->lock);
}
/**
* dndb3_reque_busy_requests - reque busy (inflight) requests
* @sock: the socket
*/
void dndb3_reque_busy_requests(struct dnbd3_sock *sock)
{
struct blk_mq_tag_set *set = &sock->device->tag_set;
blk_mq_tagset_busy_iter(set,(busy_tag_iter_fn *) dnbd3_busy_iter_requeue, sock);
}
/**
* dnbd3_wait_for_sockets - wait for any socket to be alive or a disconnect
* @dev: the device
*/
static void dnbd3_wait_for_sockets(struct dnbd3_device *dev) {
int i;
bool wait = true;
while (wait && dev->connected) {
for (i = 0; i < dev->number_connections; i++) {
if (dnbd3_is_sock_alive(dev->socks[i])) {
wait = false;
}
}
if (wait && dev->connected) {
debug("no socket alive sleeping");
msleep(100);
}
}
}
/**
* dnbd3_requeue_cmd - requeue a command once
* @cmd: the command to requeue
*/
void dnbd3_requeue_cmd(struct dnbd3_cmd *cmd)
{
struct request *req = blk_mq_rq_from_pdu(cmd);
dnbd3_wait_for_sockets(cmd->dnbd3);
if (!cmd->requed/* && blk_queued_rq(req)*/) {
cmd->requed = true;
blk_mq_requeue_request(req, true);
}
}
/**
* dnbd3_end_cmd - end a blk request
* @cmd: the command to end the request with
* @error: the status
*/
void dnbd3_end_cmd(struct dnbd3_cmd *cmd, blk_status_t error)
{
struct request *req = blk_mq_rq_from_pdu(cmd);
blk_mq_end_request(req, error);
}
/**
* dnbd3_handle_cmd - handles a mq command
* @cmd: the cmd to send
* @index: the index of the queue
*/
static int dnbd3_handle_cmd(struct dnbd3_cmd *cmd, int index)
{
struct request *req = blk_mq_rq_from_pdu(cmd);
struct dnbd3_device *dev = cmd->dnbd3;
struct dnbd3_sock *sock = &dev->socks[index];
int ret = -1;
// debug_dev(dev, "handle request at position %lu, size %d, index %d",
// blk_rq_pos(req), blk_rq_bytes(req), index);
if (!dev->socks || !dnbd3_is_sock_alive(*sock)) {
if (dev->connected) {
info_dev(dev, "reset request to new socket");
dnbd3_requeue_cmd(cmd);
return 0;
} else {
error_dev(dev, "ending request device not connected");
dnbd3_end_cmd(cmd, BLK_STS_IOERR);
return -EIO;
}
}
mutex_lock(&sock->tx_lock);
if (unlikely(!sock->sock)) {
mutex_unlock(&sock->tx_lock);
warn_sock(sock, "not connected");
return -EIO;
}
blk_mq_start_request(req);
if (unlikely(sock->pending && sock->pending != req)) {
dnbd3_requeue_cmd(cmd);
ret = 0;
goto out;
}
ret = dnbd3_send_request(sock, blk_mq_rq_from_pdu(cmd), cmd);
if (ret == -EAGAIN) {
error_dev(dev, "request send failed, requeueing");
dnbd3_requeue_cmd(cmd);
ret = 0;
}
out:
mutex_unlock(&sock->tx_lock);
return ret;
}
/**
* dnbd3_queue_rq - queue request
* @hctx: state for a hardware queue facing the hardware block device
* @bd: the queue data including the request
*/
static blk_status_t dnbd3_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct dnbd3_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
int ret;
mutex_lock(&cmd->lock);
cmd->requed = false;
cmd->index = hctx->queue_num;
ret = dnbd3_handle_cmd(cmd, hctx->queue_num);
if (ret < 0) {
ret = BLK_STS_IOERR;
} else if (ret >= 0) {
ret = BLK_STS_OK;
}
mutex_unlock(&cmd->lock);
return ret;
}
/**
* dnbd3_init_request - init a mq request
* @set: the mq tag set
* @rq: the request
* @hctx_idx:
* @numa_node:
*/
static int dnbd3_init_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx, unsigned int numa_node)
{
struct dnbd3_cmd *cmd = blk_mq_rq_to_pdu(rq);
cmd->dnbd3 = set->driver_data;
cmd->requed = false;
mutex_init(&cmd->lock);
return 0;
}
/**
* dnbd3_xmit_timeout - timeout function for mq
* @req: the timedout request
* @reserved:
*/
static enum blk_eh_timer_return dnbd3_xmit_timeout(struct request *req,
bool reserved)
{
struct dnbd3_cmd *cmd = blk_mq_rq_to_pdu(req);
struct dnbd3_device *dev = cmd->dnbd3;
warn_dev(dev, "request timed out");
if (!mutex_trylock(&cmd->lock)) {
return BLK_EH_RESET_TIMER;
}
if (dev->connected) {
info_dev(dev, "reset request to new socket");
dnbd3_requeue_cmd(cmd);
mutex_unlock(&cmd->lock);
return BLK_EH_DONE;
}
error_dev(dev, "connection timed out");
dnbd3_end_cmd(cmd, BLK_STS_TIMEOUT);
mutex_unlock(&cmd->lock);
return BLK_EH_DONE;
}
/**
* struct blk_mq_ops - dnbd3_mq_ops
* multiqueue operations
*/
struct blk_mq_ops dnbd3_mq_ops = {
.queue_rq = dnbd3_queue_rq,
.init_request = dnbd3_init_request,
.timeout = dnbd3_xmit_timeout,
};