@@ -1613,7 +1613,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
struct pci_dev *pdev = to_pci_dev(dev->dev);
int result, i, vecs, nr_io_queues, size;
- nr_io_queues = dev->max_qid + 1;
+ nr_io_queues = dev->max_qid;
result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
if (result < 0)
return result;
@@ -1653,7 +1653,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
adminq->q_db = dev->dbs;
}
- dev->max_qid = nr_io_queues - 1;
+ dev->max_qid = nr_io_queues;
/* Free previously allocated queues that are no longer usable */
nvme_free_queues(dev, nr_io_queues + 1);
@@ -1839,13 +1839,13 @@ static int nvme_pci_enable(struct nvme_dev *dev)
}
}
- if (vecs < 2) {
- dev_err(dev->ctrl.device, "Failed to get enough MSI/MSIX interrupts\n");
+ if (vecs < 1) {
+ dev_err(dev->ctrl.device, "Failed to get any MSI/MSIX interrupts\n");
result = -ENOSPC;
goto disable;
}
- dev->max_qid = vecs - 1;
+ dev->max_qid = vecs;
cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
BugLink: http://bugs.launchpad.net/bugs/1651602 Change the required minimum number of MSI/MSIX interrupt vectors from 2 to 1; the admin queue and first I/O queue actually share vector 0. Also change the max_qid assignment, to equal the number of vectors. Since the number of vectors is set to the number of cpus, on a system with only 1 cpu the previous code is guaranteed to fail, since there will only be 1 vector configured. The max_qid, while 0-based, also needs to be 1 larger than the number of (1-based) vectors, due to the admin queue and first I/O queue sharing the first vector. So the 0-based max_qid is set to equal the 1-based number of vectors, resulting in the correct number of total queues, which is 1 more than the total number of vectors, due to the admin queue and first I/O queue sharing vector 0. Fixes: 96fce9e4025b ("NVMe: only setup MSIX once") Signed-off-by: Dan Streetman <dan.streetman@canonical.com> --- drivers/nvme/host/pci.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-)