summaryrefslogtreecommitdiffstats
path: root/drivers/block/nvme.c
diff options
context:
space:
mode:
authorMatthew Wilcox2011-02-06 13:28:06 +0100
committerMatthew Wilcox2011-11-04 20:52:55 +0100
commit58ffacb545f76fc2c65d1fbfa5acf5184a2a09e6 (patch)
tree382cf8004b88f9d2bd2b86d78121938afa863e42 /drivers/block/nvme.c
parentNVMe: Call put_nvmeq() before calling nvme_submit_sync_cmd() (diff)
downloadkernel-qcow2-linux-58ffacb545f76fc2c65d1fbfa5acf5184a2a09e6.tar.gz
kernel-qcow2-linux-58ffacb545f76fc2c65d1fbfa5acf5184a2a09e6.tar.xz
kernel-qcow2-linux-58ffacb545f76fc2c65d1fbfa5acf5184a2a09e6.zip
NVMe: Add a module parameter to use a threaded interrupt
We're currently calling bio_endio from hard interrupt context. This is not a good idea for preemptible kernels as it will cause longer latencies. Using a threaded interrupt will run the entire queue processing mechanism (including bio_endio) in a thread, which can be preempted. Unfortuantely, it also adds about 7us of latency to the single-I/O case, so make it a module parameter for the moment. Signed-off-by: Matthew Wilcox <matthew.r.wilcox@intel.com>
Diffstat (limited to 'drivers/block/nvme.c')
-rw-r--r--drivers/block/nvme.c27
1 files changed, 27 insertions, 0 deletions
diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c
index 1c3cd6cc0ad9..60c3786bc787 100644
--- a/drivers/block/nvme.c
+++ b/drivers/block/nvme.c
@@ -44,6 +44,9 @@
static int nvme_major;
module_param(nvme_major, int, 0);
+static int use_threaded_interrupts;
+module_param(use_threaded_interrupts, int, 0);
+
/*
* Represents an NVM Express device. Each nvme_dev is a PCI function.
*/
@@ -455,6 +458,25 @@ static irqreturn_t nvme_irq(int irq, void *data)
return nvme_process_cq(data);
}
+static irqreturn_t nvme_irq_thread(int irq, void *data)
+{
+ irqreturn_t result;
+ struct nvme_queue *nvmeq = data;
+ spin_lock(&nvmeq->q_lock);
+ result = nvme_process_cq(nvmeq);
+ spin_unlock(&nvmeq->q_lock);
+ return result;
+}
+
+static irqreturn_t nvme_irq_check(int irq, void *data)
+{
+ struct nvme_queue *nvmeq = data;
+ struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head];
+ if ((le16_to_cpu(cqe.status) & 1) != nvmeq->cq_phase)
+ return IRQ_NONE;
+ return IRQ_WAKE_THREAD;
+}
+
static void nvme_abort_command(struct nvme_queue *nvmeq, int cmdid)
{
spin_lock_irq(&nvmeq->q_lock);
@@ -630,6 +652,11 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq,
const char *name)
{
+ if (use_threaded_interrupts)
+ return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector,
+ nvme_irq_check, nvme_irq_thread,
+ IRQF_DISABLED | IRQF_SHARED,
+ name, nvmeq);
return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq,
IRQF_DISABLED | IRQF_SHARED, name, nvmeq);
}