summaryrefslogtreecommitdiffstats
path: root/kernel/queue.c
blob: 30a0112481cffa3df2819df1b52332c7aea4ad70 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
/*
 * queue.c - queues for requests to be submitted (tx_queue) 
 *           and outstanding requests (rx_queue)
 * Copyright (C) 2006 Thorsten Zitterell <thorsten@zitterell.de>
 */

#include <linux/blkdev.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/fs.h>

#include <linux/spinlock.h>

#include <linux/in.h>

#include "dnbd.h"
#include "queue.h"

/* enqueue to a queue */
void dnbd_enq_request(dnbd_queue_t * q, struct request *req, int wakeup)
{
	unsigned long flags;
	spin_lock_irqsave(&q->lock, flags);
	list_add(&req->queuelist, &q->head);
	spin_unlock_irqrestore(&q->lock,flags);
	if (wakeup)
		wake_up(&q->waiters);
}

/* dequeue from a queue with position */
struct request *dnbd_deq_request_handle(dnbd_queue_t * q, uint64_t pos)
{
	struct request *req = NULL;
	struct list_head *tmp;
		unsigned long flags;

	spin_lock_irqsave(&q->lock,flags);
	list_for_each(tmp, &q->head) {
		req = blkdev_entry_to_request(tmp);
		if (((u64) req->sector) << 9 == pos) {
			list_del_init(&req->queuelist);
			goto out;
		}

	}
	req = NULL;
      out:
	spin_unlock_irqrestore(&q->lock,flags);
	return req;
}

/* dequeue from queue */
struct request *dnbd_deq_request(dnbd_queue_t * q)
{
	struct request *req = NULL;
	unsigned long flags;

	spin_lock_irqsave(&q->lock, flags);
	if (!list_empty(&q->head)) {
		req = blkdev_entry_to_request(q->head.prev);
		list_del_init(&req->queuelist);
	}
	spin_unlock_irqrestore(&q->lock, flags);
	return req;
}

/* sleep until request can be dequeued */
struct request *dnbd_try_deq_request(dnbd_queue_t * q)
{
	struct request *req;


	req = dnbd_deq_request(q);
	if (!req) {
		struct task_struct *tsk = current;

		DECLARE_WAITQUEUE(wait, tsk);
		add_wait_queue(&q->waiters, &wait);

		for (;;) {

			set_current_state(TASK_INTERRUPTIBLE);
			req = dnbd_deq_request(q);

			if (req || signal_pending(current))
				break;

			schedule();
		}

		set_current_state(TASK_RUNNING);
		remove_wait_queue(&q->waiters, &wait);
	}

	return req;
}

/* requeue requests with timeout */
int dnbd_requeue_requests(dnbd_queue_t * to, dnbd_queue_t * from,
			  unsigned long timeout)
{
	struct request *req = NULL;
	struct list_head *tmp, *keep;
	int requeued = 0;
	unsigned long flags;

	spin_lock_irqsave(&from->lock,flags);

	list_for_each_safe(tmp, keep, &from->head) {
		req = blkdev_entry_to_request(tmp);
		if (req->start_time < timeout) {
			requeued++;
			list_del_init(&req->queuelist);

			spin_lock_irqsave(&to->lock,flags);
			list_add(&req->queuelist, &to->head);
			spin_unlock_irqrestore(&to->lock,flags);
		}
	}

	spin_unlock_irqrestore(&from->lock,flags);

	wake_up(&to->waiters);

	return requeued;
}