summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/bio-integrity.c13
-rw-r--r--block/blk-core.c10
-rw-r--r--block/blk-mq-tag.c28
-rw-r--r--block/blk-mq.c2
-rw-r--r--block/elevator.c2
-rw-r--r--block/scsi_ioctl.c2
6 files changed, 44 insertions, 13 deletions
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 0984232e429f..5cbd5d9ea61d 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -216,9 +216,10 @@ static int bio_integrity_process(struct bio *bio,
{
struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
struct blk_integrity_iter iter;
- struct bio_vec *bv;
+ struct bvec_iter bviter;
+ struct bio_vec bv;
struct bio_integrity_payload *bip = bio_integrity(bio);
- unsigned int i, ret = 0;
+ unsigned int ret = 0;
void *prot_buf = page_address(bip->bip_vec->bv_page) +
bip->bip_vec->bv_offset;
@@ -227,11 +228,11 @@ static int bio_integrity_process(struct bio *bio,
iter.seed = bip_get_seed(bip);
iter.prot_buf = prot_buf;
- bio_for_each_segment_all(bv, bio, i) {
- void *kaddr = kmap_atomic(bv->bv_page);
+ bio_for_each_segment(bv, bio, bviter) {
+ void *kaddr = kmap_atomic(bv.bv_page);
- iter.data_buf = kaddr + bv->bv_offset;
- iter.data_size = bv->bv_len;
+ iter.data_buf = kaddr + bv.bv_offset;
+ iter.data_size = bv.bv_len;
ret = proc_fn(&iter);
if (ret) {
diff --git a/block/blk-core.c b/block/blk-core.c
index 0421b53e6431..ea1c4d0d7a44 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1266,7 +1266,7 @@ void blk_requeue_request(struct request_queue *q, struct request *rq)
blk_clear_rq_complete(rq);
trace_block_rq_requeue(q, rq);
- if (blk_rq_tagged(rq))
+ if (rq->cmd_flags & REQ_QUEUED)
blk_queue_end_tag(q, rq);
BUG_ON(blk_queued_rq(rq));
@@ -1325,7 +1325,7 @@ void part_round_stats(int cpu, struct hd_struct *part)
}
EXPORT_SYMBOL_GPL(part_round_stats);
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
static void blk_pm_put_request(struct request *rq)
{
if (rq->q->dev && !(rq->cmd_flags & REQ_PM) && !--rq->q->nr_pending)
@@ -2134,7 +2134,7 @@ void blk_account_io_done(struct request *req)
}
}
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
/*
* Don't process normal requests when queue is suspended
* or in the process of suspending/resuming
@@ -2554,7 +2554,7 @@ EXPORT_SYMBOL_GPL(blk_unprep_request);
*/
void blk_finish_request(struct request *req, int error)
{
- if (blk_rq_tagged(req))
+ if (req->cmd_flags & REQ_QUEUED)
blk_queue_end_tag(req->q, req);
BUG_ON(blk_queued_rq(req));
@@ -3159,7 +3159,7 @@ void blk_finish_plug(struct blk_plug *plug)
}
EXPORT_SYMBOL(blk_finish_plug);
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
/**
* blk_pm_runtime_init - Block layer runtime PM initialization routine
* @q: the queue of the device
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 8317175a3009..728b9a4d5f56 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -584,6 +584,34 @@ int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth)
return 0;
}
+/**
+ * blk_mq_unique_tag() - return a tag that is unique queue-wide
+ * @rq: request for which to compute a unique tag
+ *
+ * The tag field in struct request is unique per hardware queue but not over
+ * all hardware queues. Hence this function that returns a tag with the
+ * hardware context index in the upper bits and the per hardware queue tag in
+ * the lower bits.
+ *
+ * Note: When called for a request that is queued on a non-multiqueue request
+ * queue, the hardware context index is set to zero.
+ */
+u32 blk_mq_unique_tag(struct request *rq)
+{
+ struct request_queue *q = rq->q;
+ struct blk_mq_hw_ctx *hctx;
+ int hwq = 0;
+
+ if (q->mq_ops) {
+ hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu);
+ hwq = hctx->queue_num;
+ }
+
+ return (hwq << BLK_MQ_UNIQUE_TAG_BITS) |
+ (rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
+}
+EXPORT_SYMBOL(blk_mq_unique_tag);
+
ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
{
char *orig_page = page;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 1d016fc9a8b6..92ceef0d2ab9 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2049,6 +2049,8 @@ static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
*/
int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
{
+ BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
+
if (!set->nr_hw_queues)
return -EINVAL;
if (!set->queue_depth)
diff --git a/block/elevator.c b/block/elevator.c
index afa3b037a17c..59794d0d38e3 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -539,7 +539,7 @@ void elv_bio_merged(struct request_queue *q, struct request *rq,
e->type->ops.elevator_bio_merged_fn(q, rq, bio);
}
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
static void blk_pm_requeue_request(struct request *rq)
{
if (rq->q->dev && !(rq->cmd_flags & REQ_PM))
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index b0c2a616c8f9..28163fad3c5d 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -142,7 +142,7 @@ static void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter)
__set_bit(GPCMD_VERIFY_10, filter->read_ok);
__set_bit(VERIFY_16, filter->read_ok);
__set_bit(REPORT_LUNS, filter->read_ok);
- __set_bit(SERVICE_ACTION_IN, filter->read_ok);
+ __set_bit(SERVICE_ACTION_IN_16, filter->read_ok);
__set_bit(RECEIVE_DIAGNOSTIC, filter->read_ok);
__set_bit(MAINTENANCE_IN, filter->read_ok);
__set_bit(GPCMD_READ_BUFFER_CAPACITY, filter->read_ok);