summaryrefslogtreecommitdiffstats
path: root/src/kernel/mq.c
blob: ee6057441904228ecdbf256e9c7d9a8d6ff79be3 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
/*
 * mq.c
 *
 *  Created on: Jun 26, 2019
 *      Author: fred
 */


#include "mq.h"

#define DNBD3_CMD_REQUEUED 1


static int dnbd3_handle_cmd(struct dnbd3_cmd *cmd, int index)
{
	struct request *req = blk_mq_rq_from_pdu(cmd);
	struct dnbd3_device_t *dev = cmd->dnbd3;
	struct dnbd3_sock *nsock;
	int ret = -1;

	printk(KERN_DEBUG "dnbd3: handle command %i device %i\n", cmd->cmd_cookie, dev->minor);




	return ret;
}

static blk_status_t dnbd3_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd)
{
	struct dnbd3_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
	int ret;
	struct dnbd3_device_t *dev = cmd->dnbd3;

	printk(KERN_DEBUG "dnbd3: queue request device %i\n", dev->minor);


	mutex_lock(&cmd->lock);
	clear_bit(DNBD3_CMD_REQUEUED, &cmd->flags);


	ret = dnbd3_handle_cmd(cmd, hctx->queue_num);
	if (ret < 0)
		ret = BLK_STS_IOERR;
	else if (!ret)
		ret = BLK_STS_OK;
	mutex_unlock(&cmd->lock);

	return ret;
}

static void dnbd3_complete_rq(struct request *req)
{
	printk(KERN_DEBUG "dnbd3: dnbd3_complete_rq\n");

}

static int dnbd3_init_request(struct blk_mq_tag_set *set, struct request *rq, unsigned int hctx_idx, unsigned int numa_node)
{
	struct dnbd3_cmd *cmd = blk_mq_rq_to_pdu(rq);
	cmd->dnbd3 = set->driver_data;
	cmd->flags = 0;
	mutex_init(&cmd->lock);
	return 0;
}
static enum blk_eh_timer_return dnbd3_xmit_timeout(struct request *req, bool reserved)
{
	printk(KERN_DEBUG "dnbd3: dnbd3_xmit_timeout\n");
	return BLK_EH_DONE;
}


struct blk_mq_ops dnbd3_mq_ops = {
	.queue_rq = dnbd3_queue_rq,
	.complete = dnbd3_complete_rq,
	.init_request = dnbd3_init_request,
	.timeout = dnbd3_xmit_timeout,
};