summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/gvt
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/gvt')
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c21
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c92
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c55
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h3
9 files changed, 127 insertions, 63 deletions
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index ab19545d59a1..4ce2e6bd0680 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -208,6 +208,20 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu,
return 0;
}
+static int emulate_pci_rom_bar_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 *pval = (u32 *)(vgpu_cfg_space(vgpu) + offset);
+ u32 new = *(u32 *)(p_data);
+
+ if ((new & PCI_ROM_ADDRESS_MASK) == PCI_ROM_ADDRESS_MASK)
+ /* We don't have rom, return size of 0. */
+ *pval = 0;
+ else
+ vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
+ return 0;
+}
+
static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
@@ -300,6 +314,11 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
}
switch (rounddown(offset, 4)) {
+ case PCI_ROM_ADDRESS:
+ if (WARN_ON(!IS_ALIGNED(offset, 4)))
+ return -EINVAL;
+ return emulate_pci_rom_bar_write(vgpu, offset, p_data, bytes);
+
case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_5:
if (WARN_ON(!IS_ALIGNED(offset, 4)))
return -EINVAL;
@@ -375,6 +394,8 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
pci_resource_len(gvt->dev_priv->drm.pdev, 0);
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size =
pci_resource_len(gvt->dev_priv->drm.pdev, 2);
+
+ memset(vgpu_cfg_space(vgpu) + PCI_ROM_ADDRESS, 0, 4);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 701a3c6f1669..85d4c57870fb 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -1628,7 +1628,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
struct intel_shadow_bb_entry *entry_obj;
struct intel_vgpu *vgpu = s->vgpu;
unsigned long gma = 0;
- uint32_t bb_size;
+ int bb_size;
void *dst = NULL;
int ret = 0;
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 3c318439a659..309f3fa6794a 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -266,6 +266,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
/* Clear host CRT status, so guest couldn't detect this host CRT. */
if (IS_BROADWELL(dev_priv))
vgpu_vreg(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK;
+
+ vgpu_vreg(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
}
static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
@@ -307,6 +309,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
port->type = type;
emulate_monitor_status_change(vgpu);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index 4427be18e4a9..940cdaaa3f24 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -496,6 +496,12 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
goto err_unpin_mm;
}
+ ret = intel_gvt_generate_request(workload);
+ if (ret) {
+ gvt_vgpu_err("fail to generate request\n");
+ goto err_unpin_mm;
+ }
+
ret = prepare_shadow_batch_buffer(workload);
if (ret) {
gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 2801d70579d8..8e331142badb 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -311,9 +311,9 @@ static inline int gtt_set_entry64(void *pt,
#define GTT_HAW 46
-#define ADDR_1G_MASK (((1UL << (GTT_HAW - 30 + 1)) - 1) << 30)
-#define ADDR_2M_MASK (((1UL << (GTT_HAW - 21 + 1)) - 1) << 21)
-#define ADDR_4K_MASK (((1UL << (GTT_HAW - 12 + 1)) - 1) << 12)
+#define ADDR_1G_MASK (((1UL << (GTT_HAW - 30)) - 1) << 30)
+#define ADDR_2M_MASK (((1UL << (GTT_HAW - 21)) - 1) << 21)
+#define ADDR_4K_MASK (((1UL << (GTT_HAW - 12)) - 1) << 12)
static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
{
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index a5bed2e71b92..1f840f6b81bb 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -137,17 +137,26 @@ static int new_mmio_info(struct intel_gvt *gvt,
return 0;
}
-static int render_mmio_to_ring_id(struct intel_gvt *gvt, unsigned int reg)
+/**
+ * intel_gvt_render_mmio_to_ring_id - convert a mmio offset into ring id
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ * Returns:
+ * Ring ID on success, negative error code if failed.
+ */
+int intel_gvt_render_mmio_to_ring_id(struct intel_gvt *gvt,
+ unsigned int offset)
{
enum intel_engine_id id;
struct intel_engine_cs *engine;
- reg &= ~GENMASK(11, 0);
+ offset &= ~GENMASK(11, 0);
for_each_engine(engine, gvt->dev_priv, id) {
- if (engine->mmio_base == reg)
+ if (engine->mmio_base == offset)
return id;
}
- return -1;
+ return -ENODEV;
}
#define offset_to_fence_num(offset) \
@@ -1381,40 +1390,6 @@ static int skl_power_well_ctl_write(struct intel_vgpu *vgpu,
return intel_vgpu_default_mmio_write(vgpu, offset, &v, bytes);
}
-static int skl_misc_ctl_write(struct intel_vgpu *vgpu, unsigned int offset,
- void *p_data, unsigned int bytes)
-{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- u32 v = *(u32 *)p_data;
-
- if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv))
- return intel_vgpu_default_mmio_write(vgpu,
- offset, p_data, bytes);
-
- switch (offset) {
- case 0x4ddc:
- /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
- vgpu_vreg(vgpu, offset) = v & ~(1 << 31);
- break;
- case 0x42080:
- /* bypass WaCompressedResourceDisplayNewHashMode */
- vgpu_vreg(vgpu, offset) = v & ~(1 << 15);
- break;
- case 0xe194:
- /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
- vgpu_vreg(vgpu, offset) = v & ~(1 << 8);
- break;
- case 0x7014:
- /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
- vgpu_vreg(vgpu, offset) = v & ~(1 << 13);
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
@@ -1432,18 +1407,36 @@ static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
static int mmio_read_from_hw(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ int ring_id;
+ u32 ring_base;
+
+ ring_id = intel_gvt_render_mmio_to_ring_id(gvt, offset);
+ /**
+ * Read HW reg in following case
+ * a. the offset isn't a ring mmio
+ * b. the offset's ring is running on hw.
+ * c. the offset is ring time stamp mmio
+ */
+ if (ring_id >= 0)
+ ring_base = dev_priv->engine[ring_id]->mmio_base;
+
+ if (ring_id < 0 || vgpu == gvt->scheduler.engine_owner[ring_id] ||
+ offset == i915_mmio_reg_offset(RING_TIMESTAMP(ring_base)) ||
+ offset == i915_mmio_reg_offset(RING_TIMESTAMP_UDW(ring_base))) {
+ mmio_hw_access_pre(dev_priv);
+ vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
+ mmio_hw_access_post(dev_priv);
+ }
- mmio_hw_access_pre(dev_priv);
- vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
- mmio_hw_access_post(dev_priv);
return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
}
static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- int ring_id = render_mmio_to_ring_id(vgpu->gvt, offset);
+ int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
struct intel_vgpu_execlist *execlist;
u32 data = *(u32 *)p_data;
int ret = 0;
@@ -1470,7 +1463,7 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
u32 data = *(u32 *)p_data;
- int ring_id = render_mmio_to_ring_id(vgpu->gvt, offset);
+ int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
bool enable_execlist;
write_vreg(vgpu, offset, p_data, bytes);
@@ -1671,8 +1664,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
- MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL,
- skl_misc_ctl_write);
+ MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
+ NULL, NULL);
MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL);
@@ -2564,8 +2557,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_D(0x6e570, D_BDW_PLUS);
MMIO_D(0x65f10, D_BDW_PLUS);
- MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL,
- skl_misc_ctl_write);
+ MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
@@ -2615,8 +2607,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DH(0x4ddc, D_SKL_PLUS, NULL, skl_misc_ctl_write);
- MMIO_DH(0x42080, D_SKL_PLUS, NULL, skl_misc_ctl_write);
+ MMIO_DH(0x4ddc, D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(0x42080, D_SKL_PLUS, NULL, NULL);
MMIO_D(0x45504, D_SKL_PLUS);
MMIO_D(0x45520, D_SKL_PLUS);
MMIO_D(0x46000, D_SKL_PLUS);
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index 32cd64ddad26..dbc04ad2c7a1 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -65,6 +65,8 @@ struct intel_gvt_mmio_info {
struct hlist_node node;
};
+int intel_gvt_render_mmio_to_ring_id(struct intel_gvt *gvt,
+ unsigned int reg);
unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt);
bool intel_gvt_match_device(struct intel_gvt *gvt, unsigned long device);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index f6ded475bb2c..69f8f0d155b9 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -131,6 +131,20 @@ static inline bool is_gvt_request(struct drm_i915_gem_request *req)
return i915_gem_context_force_single_submission(req->ctx);
}
+static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ u32 ring_base = dev_priv->engine[ring_id]->mmio_base;
+ i915_reg_t reg;
+
+ reg = RING_INSTDONE(ring_base);
+ vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
+ reg = RING_ACTHD(ring_base);
+ vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
+ reg = RING_ACTHD_UDW(ring_base);
+ vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
+}
+
static int shadow_context_status_change(struct notifier_block *nb,
unsigned long action, void *data)
{
@@ -140,9 +154,10 @@ static int shadow_context_status_change(struct notifier_block *nb,
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
enum intel_engine_id ring_id = req->engine->id;
struct intel_vgpu_workload *workload;
+ unsigned long flags;
if (!is_gvt_request(req)) {
- spin_lock_bh(&scheduler->mmio_context_lock);
+ spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
if (action == INTEL_CONTEXT_SCHEDULE_IN &&
scheduler->engine_owner[ring_id]) {
/* Switch ring from vGPU to host. */
@@ -150,7 +165,7 @@ static int shadow_context_status_change(struct notifier_block *nb,
NULL, ring_id);
scheduler->engine_owner[ring_id] = NULL;
}
- spin_unlock_bh(&scheduler->mmio_context_lock);
+ spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
return NOTIFY_OK;
}
@@ -161,7 +176,7 @@ static int shadow_context_status_change(struct notifier_block *nb,
switch (action) {
case INTEL_CONTEXT_SCHEDULE_IN:
- spin_lock_bh(&scheduler->mmio_context_lock);
+ spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
if (workload->vgpu != scheduler->engine_owner[ring_id]) {
/* Switch ring from host to vGPU or vGPU to vGPU. */
intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
@@ -170,13 +185,16 @@ static int shadow_context_status_change(struct notifier_block *nb,
} else
gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
ring_id, workload->vgpu->id);
- spin_unlock_bh(&scheduler->mmio_context_lock);
+ spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
atomic_set(&workload->shadow_ctx_active, 1);
break;
case INTEL_CONTEXT_SCHEDULE_OUT:
- case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
+ save_ring_hw_state(workload->vgpu, ring_id);
atomic_set(&workload->shadow_ctx_active, 0);
break;
+ case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
+ save_ring_hw_state(workload->vgpu, ring_id);
+ break;
default:
WARN_ON(1);
return NOTIFY_OK;
@@ -253,7 +271,6 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
- struct drm_i915_gem_request *rq;
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_ring *ring;
int ret;
@@ -299,6 +316,26 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
ret = populate_shadow_context(workload);
if (ret)
goto err_unpin;
+ workload->shadowed = true;
+ return 0;
+
+err_unpin:
+ engine->context_unpin(engine, shadow_ctx);
+err_shadow:
+ release_shadow_wa_ctx(&workload->wa_ctx);
+err_scan:
+ return ret;
+}
+
+int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
+{
+ int ring_id = workload->ring_id;
+ struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
+ struct intel_engine_cs *engine = dev_priv->engine[ring_id];
+ struct drm_i915_gem_request *rq;
+ struct intel_vgpu *vgpu = workload->vgpu;
+ struct i915_gem_context *shadow_ctx = vgpu->shadow_ctx;
+ int ret;
rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
if (IS_ERR(rq)) {
@@ -313,14 +350,11 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
ret = copy_workload_to_ring_buffer(workload);
if (ret)
goto err_unpin;
- workload->shadowed = true;
return 0;
err_unpin:
engine->context_unpin(engine, shadow_ctx);
-err_shadow:
release_shadow_wa_ctx(&workload->wa_ctx);
-err_scan:
return ret;
}
@@ -723,6 +757,9 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
if (IS_ERR(vgpu->shadow_ctx))
return PTR_ERR(vgpu->shadow_ctx);
+ if (INTEL_INFO(vgpu->gvt->dev_priv)->has_logical_ring_preemption)
+ vgpu->shadow_ctx->priority = INT_MAX;
+
vgpu->shadow_ctx->engine[RCS].initialised = true;
bitmap_zero(vgpu->shadow_ctx_desc_updated, I915_NUM_ENGINES);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 2d694f6c0907..b9f872204d7e 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -142,4 +142,7 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu);
void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu);
void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
+
+int intel_gvt_generate_request(struct intel_vgpu_workload *workload);
+
#endif