summaryrefslogtreecommitdiffstats
path: root/drivers/mmc/card/queue.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc/card/queue.c')
-rw-r--r--drivers/mmc/card/queue.c34
1 files changed, 25 insertions, 9 deletions
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index d6ded247d941..4e42d030e097 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -30,9 +30,9 @@
static int mmc_prep_request(struct request_queue *q, struct request *req)
{
/*
- * We only like normal block requests.
+ * We only like normal block requests and discards.
*/
- if (!blk_fs_request(req)) {
+ if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) {
blk_dump_rq_flags(req, "MMC bad request");
return BLKPREP_KILL;
}
@@ -128,11 +128,25 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
mq->req = NULL;
blk_queue_prep_rq(mq->queue, mmc_prep_request);
- blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN, NULL);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
+ if (mmc_can_erase(card)) {
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mq->queue);
+ mq->queue->limits.max_discard_sectors = UINT_MAX;
+ if (card->erased_byte == 0)
+ mq->queue->limits.discard_zeroes_data = 1;
+ if (!mmc_can_trim(card) && is_power_of_2(card->erase_size)) {
+ mq->queue->limits.discard_granularity =
+ card->erase_size << 9;
+ mq->queue->limits.discard_alignment =
+ card->erase_size << 9;
+ }
+ if (mmc_can_secure_erase_trim(card))
+ queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD,
+ mq->queue);
+ }
#ifdef CONFIG_MMC_BLOCK_BOUNCE
- if (host->max_hw_segs == 1) {
+ if (host->max_segs == 1) {
unsigned int bouncesz;
bouncesz = MMC_QUEUE_BOUNCESZ;
@@ -182,21 +196,23 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
blk_queue_bounce_limit(mq->queue, limit);
blk_queue_max_hw_sectors(mq->queue,
min(host->max_blk_count, host->max_req_size / 512));
- blk_queue_max_segments(mq->queue, host->max_hw_segs);
+ blk_queue_max_segments(mq->queue, host->max_segs);
blk_queue_max_segment_size(mq->queue, host->max_seg_size);
mq->sg = kmalloc(sizeof(struct scatterlist) *
- host->max_phys_segs, GFP_KERNEL);
+ host->max_segs, GFP_KERNEL);
if (!mq->sg) {
ret = -ENOMEM;
goto cleanup_queue;
}
- sg_init_table(mq->sg, host->max_phys_segs);
+ sg_init_table(mq->sg, host->max_segs);
}
- init_MUTEX(&mq->thread_sem);
+ sema_init(&mq->thread_sem, 1);
+
+ mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d",
+ host->index);
- mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd");
if (IS_ERR(mq->thread)) {
ret = PTR_ERR(mq->thread);
goto free_bounce_sg;