block/nvme: Use host PCI MMIO API
Use the host PCI MMIO functions to read/write to NVMe registers, rather than directly accessing them. Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Reviewed-by: Thomas Huth <thuth@redhat.com> Signed-off-by: Farhan Ali <alifm@linux.ibm.com> Message-id: 20250430185012.2303-4-alifm@linux.ibm.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
parent
40f940923f
commit
624379be3a
1 changed files with 23 additions and 18 deletions
41
block/nvme.c
41
block/nvme.c
|
|
@ -18,6 +18,7 @@
|
|||
#include "qobject/qstring.h"
|
||||
#include "qemu/defer-call.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "qemu/host-pci-mmio.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "qemu/module.h"
|
||||
#include "qemu/cutils.h"
|
||||
|
|
@ -60,7 +61,7 @@ typedef struct {
|
|||
uint8_t *queue;
|
||||
uint64_t iova;
|
||||
/* Hardware MMIO register */
|
||||
volatile uint32_t *doorbell;
|
||||
uint32_t *doorbell;
|
||||
} NVMeQueue;
|
||||
|
||||
typedef struct {
|
||||
|
|
@ -100,7 +101,7 @@ struct BDRVNVMeState {
|
|||
QEMUVFIOState *vfio;
|
||||
void *bar0_wo_map;
|
||||
/* Memory mapped registers */
|
||||
volatile struct {
|
||||
struct {
|
||||
uint32_t sq_tail;
|
||||
uint32_t cq_head;
|
||||
} *doorbells;
|
||||
|
|
@ -292,7 +293,7 @@ static void nvme_kick(NVMeQueuePair *q)
|
|||
assert(!(q->sq.tail & 0xFF00));
|
||||
/* Fence the write to submission queue entry before notifying the device. */
|
||||
smp_wmb();
|
||||
*q->sq.doorbell = cpu_to_le32(q->sq.tail);
|
||||
host_pci_stl_le_p(q->sq.doorbell, q->sq.tail);
|
||||
q->inflight += q->need_kick;
|
||||
q->need_kick = 0;
|
||||
}
|
||||
|
|
@ -441,7 +442,7 @@ static bool nvme_process_completion(NVMeQueuePair *q)
|
|||
if (progress) {
|
||||
/* Notify the device so it can post more completions. */
|
||||
smp_mb_release();
|
||||
*q->cq.doorbell = cpu_to_le32(q->cq.head);
|
||||
host_pci_stl_le_p(q->cq.doorbell, q->cq.head);
|
||||
nvme_wake_free_req_locked(q);
|
||||
}
|
||||
|
||||
|
|
@ -460,7 +461,7 @@ static void nvme_process_completion_bh(void *opaque)
|
|||
* so notify the device that it has space to fill in more completions now.
|
||||
*/
|
||||
smp_mb_release();
|
||||
*q->cq.doorbell = cpu_to_le32(q->cq.head);
|
||||
host_pci_stl_le_p(q->cq.doorbell, q->cq.head);
|
||||
nvme_wake_free_req_locked(q);
|
||||
|
||||
nvme_process_completion(q);
|
||||
|
|
@ -749,9 +750,10 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
|
|||
int ret;
|
||||
uint64_t cap;
|
||||
uint32_t ver;
|
||||
uint32_t cc;
|
||||
uint64_t timeout_ms;
|
||||
uint64_t deadline, now;
|
||||
volatile NvmeBar *regs = NULL;
|
||||
NvmeBar *regs = NULL;
|
||||
|
||||
qemu_co_mutex_init(&s->dma_map_lock);
|
||||
qemu_co_queue_init(&s->dma_flush_queue);
|
||||
|
|
@ -779,7 +781,7 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
|
|||
/* Perform initialize sequence as described in NVMe spec "7.6.1
|
||||
* Initialization". */
|
||||
|
||||
cap = le64_to_cpu(regs->cap);
|
||||
cap = host_pci_ldq_le_p(®s->cap);
|
||||
trace_nvme_controller_capability_raw(cap);
|
||||
trace_nvme_controller_capability("Maximum Queue Entries Supported",
|
||||
1 + NVME_CAP_MQES(cap));
|
||||
|
|
@ -805,16 +807,17 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
|
|||
bs->bl.request_alignment = s->page_size;
|
||||
timeout_ms = MIN(500 * NVME_CAP_TO(cap), 30000);
|
||||
|
||||
ver = le32_to_cpu(regs->vs);
|
||||
ver = host_pci_ldl_le_p(®s->vs);
|
||||
trace_nvme_controller_spec_version(extract32(ver, 16, 16),
|
||||
extract32(ver, 8, 8),
|
||||
extract32(ver, 0, 8));
|
||||
|
||||
/* Reset device to get a clean state. */
|
||||
regs->cc = cpu_to_le32(le32_to_cpu(regs->cc) & 0xFE);
|
||||
cc = host_pci_ldl_le_p(®s->cc);
|
||||
host_pci_stl_le_p(®s->cc, cc & 0xFE);
|
||||
/* Wait for CSTS.RDY = 0. */
|
||||
deadline = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + timeout_ms * SCALE_MS;
|
||||
while (NVME_CSTS_RDY(le32_to_cpu(regs->csts))) {
|
||||
while (NVME_CSTS_RDY(host_pci_ldl_le_p(®s->csts))) {
|
||||
if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) > deadline) {
|
||||
error_setg(errp, "Timeout while waiting for device to reset (%"
|
||||
PRId64 " ms)",
|
||||
|
|
@ -843,19 +846,21 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
|
|||
s->queues[INDEX_ADMIN] = q;
|
||||
s->queue_count = 1;
|
||||
QEMU_BUILD_BUG_ON((NVME_QUEUE_SIZE - 1) & 0xF000);
|
||||
regs->aqa = cpu_to_le32(((NVME_QUEUE_SIZE - 1) << AQA_ACQS_SHIFT) |
|
||||
((NVME_QUEUE_SIZE - 1) << AQA_ASQS_SHIFT));
|
||||
regs->asq = cpu_to_le64(q->sq.iova);
|
||||
regs->acq = cpu_to_le64(q->cq.iova);
|
||||
host_pci_stl_le_p(®s->aqa,
|
||||
((NVME_QUEUE_SIZE - 1) << AQA_ACQS_SHIFT) |
|
||||
((NVME_QUEUE_SIZE - 1) << AQA_ASQS_SHIFT));
|
||||
host_pci_stq_le_p(®s->asq, q->sq.iova);
|
||||
host_pci_stq_le_p(®s->acq, q->cq.iova);
|
||||
|
||||
/* After setting up all control registers we can enable device now. */
|
||||
regs->cc = cpu_to_le32((ctz32(NVME_CQ_ENTRY_BYTES) << CC_IOCQES_SHIFT) |
|
||||
(ctz32(NVME_SQ_ENTRY_BYTES) << CC_IOSQES_SHIFT) |
|
||||
CC_EN_MASK);
|
||||
host_pci_stl_le_p(®s->cc,
|
||||
(ctz32(NVME_CQ_ENTRY_BYTES) << CC_IOCQES_SHIFT) |
|
||||
(ctz32(NVME_SQ_ENTRY_BYTES) << CC_IOSQES_SHIFT) |
|
||||
CC_EN_MASK);
|
||||
/* Wait for CSTS.RDY = 1. */
|
||||
now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
|
||||
deadline = now + timeout_ms * SCALE_MS;
|
||||
while (!NVME_CSTS_RDY(le32_to_cpu(regs->csts))) {
|
||||
while (!NVME_CSTS_RDY(host_pci_ldl_le_p(®s->csts))) {
|
||||
if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) > deadline) {
|
||||
error_setg(errp, "Timeout while waiting for device to start (%"
|
||||
PRId64 " ms)",
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue