summaryrefslogtreecommitdiffstats
path: root/drivers/block/drbd/drbd_req.c
diff options
context:
space:
mode:
authorPhilipp Reisner2010-06-23 17:18:51 +0200
committerPhilipp Reisner2010-10-14 15:05:08 +0200
commitcfa03415a14dd0055f2ff8c3d348d4c1452acba6 (patch)
tree760aa04f76edd0333324705e86b7a7a306f457c4 /drivers/block/drbd/drbd_req.c
parentdrbd: Fixed a deadlock, probably only affected UP machines (diff)
downloadkernel-qcow2-linux-cfa03415a14dd0055f2ff8c3d348d4c1452acba6.tar.gz
kernel-qcow2-linux-cfa03415a14dd0055f2ff8c3d348d4c1452acba6.tar.xz
kernel-qcow2-linux-cfa03415a14dd0055f2ff8c3d348d4c1452acba6.zip
drbd: Allow tl_restart() to do IO completion while IO is suspended
Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com> Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
Diffstat (limited to 'drivers/block/drbd/drbd_req.c')
-rw-r--r--drivers/block/drbd/drbd_req.c34
1 files changed, 20 insertions, 14 deletions
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 76b668245612..4e1e10d67c4b 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -226,8 +226,6 @@ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m)
return;
if (s & RQ_LOCAL_PENDING)
return;
- if (mdev->state.susp)
- return;
if (req->master_bio) {
/* this is data_received (remote read)
@@ -284,6 +282,14 @@ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m)
* protocol A or B, barrier ack still pending... */
}
+static void _req_may_be_done_not_susp(struct drbd_request *req, struct bio_and_error *m)
+{
+ struct drbd_conf *mdev = req->mdev;
+
+ if (!mdev->state.susp)
+ _req_may_be_done(req, m);
+}
+
/*
* checks whether there was an overlapping request
* or ee already registered.
@@ -425,7 +431,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
req->rq_state |= (RQ_LOCAL_COMPLETED|RQ_LOCAL_OK);
req->rq_state &= ~RQ_LOCAL_PENDING;
- _req_may_be_done(req, m);
+ _req_may_be_done_not_susp(req, m);
put_ldev(mdev);
break;
@@ -434,7 +440,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
req->rq_state &= ~RQ_LOCAL_PENDING;
__drbd_chk_io_error(mdev, FALSE);
- _req_may_be_done(req, m);
+ _req_may_be_done_not_susp(req, m);
put_ldev(mdev);
break;
@@ -442,7 +448,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
/* it is legal to fail READA */
req->rq_state |= RQ_LOCAL_COMPLETED;
req->rq_state &= ~RQ_LOCAL_PENDING;
- _req_may_be_done(req, m);
+ _req_may_be_done_not_susp(req, m);
put_ldev(mdev);
break;
@@ -460,7 +466,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
/* no point in retrying if there is no good remote data,
* or we have no connection. */
if (mdev->state.pdsk != D_UP_TO_DATE) {
- _req_may_be_done(req, m);
+ _req_may_be_done_not_susp(req, m);
break;
}
@@ -546,7 +552,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
req->rq_state &= ~RQ_NET_QUEUED;
/* if we did it right, tl_clear should be scheduled only after
* this, so this should not be necessary! */
- _req_may_be_done(req, m);
+ _req_may_be_done_not_susp(req, m);
break;
case handed_over_to_network:
@@ -571,7 +577,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
* "completed_ok" events came in, once we return from
* _drbd_send_zc_bio (drbd_send_dblock), we have to check
* whether it is done already, and end it. */
- _req_may_be_done(req, m);
+ _req_may_be_done_not_susp(req, m);
break;
case read_retry_remote_canceled:
@@ -587,7 +593,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
/* if it is still queued, we may not complete it here.
* it will be canceled soon. */
if (!(req->rq_state & RQ_NET_QUEUED))
- _req_may_be_done(req, m);
+ _req_may_be_done(req, m); /* Allowed while state.susp */
break;
case write_acked_by_peer_and_sis:
@@ -622,7 +628,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
D_ASSERT(req->rq_state & RQ_NET_PENDING);
dec_ap_pending(mdev);
req->rq_state &= ~RQ_NET_PENDING;
- _req_may_be_done(req, m);
+ _req_may_be_done_not_susp(req, m);
break;
case neg_acked:
@@ -632,7 +638,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING);
req->rq_state |= RQ_NET_DONE;
- _req_may_be_done(req, m);
+ _req_may_be_done_not_susp(req, m);
/* else: done by handed_over_to_network */
break;
@@ -640,7 +646,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
if (!(req->rq_state & RQ_LOCAL_COMPLETED))
break;
- _req_may_be_done(req, m);
+ _req_may_be_done(req, m); /* Allowed while state.susp */
break;
case restart_frozen_disk_io:
@@ -685,7 +691,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
}
D_ASSERT(req->rq_state & RQ_NET_SENT);
req->rq_state |= RQ_NET_DONE;
- _req_may_be_done(req, m);
+ _req_may_be_done(req, m); /* Allowed while state.susp */
break;
case data_received:
@@ -693,7 +699,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
dec_ap_pending(mdev);
req->rq_state &= ~RQ_NET_PENDING;
req->rq_state |= (RQ_NET_OK|RQ_NET_DONE);
- _req_may_be_done(req, m);
+ _req_may_be_done_not_susp(req, m);
break;
};