1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
|
/*
* QEMU paravirtual RDMA - Device rings
*
* Copyright (C) 2018 Oracle
* Copyright (C) 2018 Red Hat Inc
*
* Authors:
* Yuval Shaia <yuval.shaia@oracle.com>
* Marcel Apfelbaum <marcel@redhat.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/
#include "qemu/osdep.h"
#include "qemu/cutils.h"
#include "hw/pci/pci.h"
#include "cpu.h"
#include "qemu/cutils.h"
#include "trace.h"
#include "../rdma_utils.h"
#include "standard-headers/drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h"
#include "pvrdma_dev_ring.h"
int pvrdma_ring_init(PvrdmaRing *ring, const char *name, PCIDevice *dev,
struct pvrdma_ring *ring_state, uint32_t max_elems,
size_t elem_sz, dma_addr_t *tbl, uint32_t npages)
{
int i;
int rc = 0;
pstrcpy(ring->name, MAX_RING_NAME_SZ, name);
ring->dev = dev;
ring->ring_state = ring_state;
ring->max_elems = max_elems;
ring->elem_sz = elem_sz;
/* TODO: Give a moment to think if we want to redo driver settings
qatomic_set(&ring->ring_state->prod_tail, 0);
qatomic_set(&ring->ring_state->cons_head, 0);
*/
ring->npages = npages;
ring->pages = g_malloc(npages * sizeof(void *));
for (i = 0; i < npages; i++) {
if (!tbl[i]) {
rdma_error_report("npages=%d but tbl[%d] is NULL", npages, i);
continue;
}
ring->pages[i] = rdma_pci_dma_map(dev, tbl[i], TARGET_PAGE_SIZE);
if (!ring->pages[i]) {
rc = -ENOMEM;
rdma_error_report("Failed to map to page %d in ring %s", i, name);
goto out_free;
}
memset(ring->pages[i], 0, TARGET_PAGE_SIZE);
}
goto out;
out_free:
while (i--) {
rdma_pci_dma_unmap(dev, ring->pages[i], TARGET_PAGE_SIZE);
}
g_free(ring->pages);
out:
return rc;
}
void *pvrdma_ring_next_elem_read(PvrdmaRing *ring)
{
int e;
unsigned int idx = 0, offset;
e = pvrdma_idx_ring_has_data(ring->ring_state, ring->max_elems, &idx);
if (e <= 0) {
trace_pvrdma_ring_next_elem_read_no_data(ring->name);
return NULL;
}
offset = idx * ring->elem_sz;
return ring->pages[offset / TARGET_PAGE_SIZE] + (offset % TARGET_PAGE_SIZE);
}
void pvrdma_ring_read_inc(PvrdmaRing *ring)
{
pvrdma_idx_ring_inc(&ring->ring_state->cons_head, ring->max_elems);
}
void *pvrdma_ring_next_elem_write(PvrdmaRing *ring)
{
int idx;
unsigned int offset, tail;
idx = pvrdma_idx_ring_has_space(ring->ring_state, ring->max_elems, &tail);
if (idx <= 0) {
rdma_error_report("CQ is full");
return NULL;
}
idx = pvrdma_idx(&ring->ring_state->prod_tail, ring->max_elems);
if (idx < 0 || tail != idx) {
rdma_error_report("Invalid idx %d", idx);
return NULL;
}
offset = idx * ring->elem_sz;
return ring->pages[offset / TARGET_PAGE_SIZE] + (offset % TARGET_PAGE_SIZE);
}
void pvrdma_ring_write_inc(PvrdmaRing *ring)
{
pvrdma_idx_ring_inc(&ring->ring_state->prod_tail, ring->max_elems);
}
void pvrdma_ring_free(PvrdmaRing *ring)
{
if (!ring) {
return;
}
if (!ring->pages) {
return;
}
while (ring->npages--) {
rdma_pci_dma_unmap(ring->dev, ring->pages[ring->npages],
TARGET_PAGE_SIZE);
}
g_free(ring->pages);
ring->pages = NULL;
}
|