diff options
author | Alexey Kardashevskiy | 2015-01-29 06:04:58 +0100 |
---|---|---|
committer | Alexander Graf | 2015-03-09 14:59:52 +0100 |
commit | ee9a569ab88edd0755402aaf31ec0c69decf7756 (patch) | |
tree | e7bf5a463ae6c5401a92241a16a8d728949032ca /hw/ppc/spapr_iommu.c | |
parent | Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging (diff) | |
download | qemu-ee9a569ab88edd0755402aaf31ec0c69decf7756.tar.gz qemu-ee9a569ab88edd0755402aaf31ec0c69decf7756.tar.xz qemu-ee9a569ab88edd0755402aaf31ec0c69decf7756.zip |
spapr_vio/spapr_iommu: Move VIO bypass where it belongs
Instead of tweaking a TCE table device by adding there a bypass flag,
let's add an alias to RAM and IOMMU memory region, and enable/disable
those according to the selected bypass mode.
This way IOMMU memory region can have size of the actual window rather
than ram_size which is essential for upcoming DDW support.
This moves bypass logic to VIO layer and keeps @bypass flag in TCE table
for migration compatibility only. This replaces spapr_tce_set_bypass()
calls with explicit assignment to avoid confusion as the function could
do something more that just syncing the @bypass flag.
This adds a pointer to VIO device into the sPAPRTCETable struct to provide
the sPAPRTCETable device a way to update bypass mode for the VIO device.
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'hw/ppc/spapr_iommu.c')
-rw-r--r-- | hw/ppc/spapr_iommu.c | 26 |
1 files changed, 16 insertions, 10 deletions
diff --git a/hw/ppc/spapr_iommu.c b/hw/ppc/spapr_iommu.c index ba003da39e..f3990fdc32 100644 --- a/hw/ppc/spapr_iommu.c +++ b/hw/ppc/spapr_iommu.c @@ -25,6 +25,7 @@ #include "trace.h" #include "hw/ppc/spapr.h" +#include "hw/ppc/spapr_vio.h" #include <libfdt.h> @@ -73,9 +74,7 @@ static IOMMUTLBEntry spapr_tce_translate_iommu(MemoryRegion *iommu, hwaddr addr, .perm = IOMMU_NONE, }; - if (tcet->bypass) { - ret.perm = IOMMU_RW; - } else if ((addr >> tcet->page_shift) < tcet->nb_table) { + if ((addr >> tcet->page_shift) < tcet->nb_table) { /* Check if we are in bound */ hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift); @@ -91,10 +90,22 @@ static IOMMUTLBEntry spapr_tce_translate_iommu(MemoryRegion *iommu, hwaddr addr, return ret; } +static int spapr_tce_table_post_load(void *opaque, int version_id) +{ + sPAPRTCETable *tcet = SPAPR_TCE_TABLE(opaque); + + if (tcet->vdev) { + spapr_vio_set_bypass(tcet->vdev, tcet->bypass); + } + + return 0; +} + static const VMStateDescription vmstate_spapr_tce_table = { .name = "spapr_iommu", .version_id = 2, .minimum_version_id = 2, + .post_load = spapr_tce_table_post_load, .fields = (VMStateField []) { /* Sanity check */ VMSTATE_UINT32_EQUAL(liobn, sPAPRTCETable), @@ -132,7 +143,8 @@ static int spapr_tce_table_realize(DeviceState *dev) trace_spapr_iommu_new_table(tcet->liobn, tcet, tcet->table, tcet->fd); memory_region_init_iommu(&tcet->iommu, OBJECT(dev), &spapr_iommu_ops, - "iommu-spapr", ram_size); + "iommu-spapr", + (uint64_t)tcet->nb_table << tcet->page_shift); QLIST_INSERT_HEAD(&spapr_tce_tables, tcet, list); @@ -192,17 +204,11 @@ MemoryRegion *spapr_tce_get_iommu(sPAPRTCETable *tcet) return &tcet->iommu; } -void spapr_tce_set_bypass(sPAPRTCETable *tcet, bool bypass) -{ - tcet->bypass = bypass; -} - static void spapr_tce_reset(DeviceState *dev) { sPAPRTCETable *tcet = SPAPR_TCE_TABLE(dev); size_t table_size = tcet->nb_table * sizeof(uint64_t); - tcet->bypass = false; memset(tcet->table, 0, table_size); } |