summaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorRadim Krčmář2017-04-11 20:54:40 +0200
committerRadim Krčmář2017-04-11 20:54:40 +0200
commitf7b1a77d3bfbdc7e91a1e48e0a3dc10e7d890e04 (patch)
treef73da1077b7053b24a9af2d8e34b6180d5d5217e /arch/s390
parentkvm: nVMX: Disallow userspace-injected exceptions in guest mode (diff)
parentKVM: s390: introduce AIS capability (diff)
downloadkernel-qcow2-linux-f7b1a77d3bfbdc7e91a1e48e0a3dc10e7d890e04.tar.gz
kernel-qcow2-linux-f7b1a77d3bfbdc7e91a1e48e0a3dc10e7d890e04.tar.xz
kernel-qcow2-linux-f7b1a77d3bfbdc7e91a1e48e0a3dc10e7d890e04.zip
Merge tag 'kvm-s390-next-4.12-1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux
From: Christian Borntraeger <borntraeger@de.ibm.com> KVM: s390: features for 4.12 1. guarded storage support for guests This contains an s390 base Linux feature branch that is necessary to implement the KVM part 2. Provide an interface to implement adapter interruption suppression which is necessary for proper zPCI support 3. Use more defines instead of numbers 4. Provide logging for lazy enablement of runtime instrumentation
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/include/asm/elf.h1
-rw-r--r--arch/s390/include/asm/kvm_host.h40
-rw-r--r--arch/s390/include/asm/lowcore.h9
-rw-r--r--arch/s390/include/asm/nmi.h12
-rw-r--r--arch/s390/include/asm/processor.h5
-rw-r--r--arch/s390/include/asm/setup.h2
-rw-r--r--arch/s390/include/asm/switch_to.h3
-rw-r--r--arch/s390/include/asm/thread_info.h12
-rw-r--r--arch/s390/include/uapi/asm/Kbuild1
-rw-r--r--arch/s390/include/uapi/asm/guarded_storage.h77
-rw-r--r--arch/s390/include/uapi/asm/kvm.h25
-rw-r--r--arch/s390/include/uapi/asm/unistd.h2
-rw-r--r--arch/s390/kernel/Makefile2
-rw-r--r--arch/s390/kernel/asm-offsets.c2
-rw-r--r--arch/s390/kernel/compat_wrapper.c1
-rw-r--r--arch/s390/kernel/early.c2
-rw-r--r--arch/s390/kernel/entry.S26
-rw-r--r--arch/s390/kernel/entry.h2
-rw-r--r--arch/s390/kernel/guarded_storage.c128
-rw-r--r--arch/s390/kernel/machine_kexec.c13
-rw-r--r--arch/s390/kernel/nmi.c19
-rw-r--r--arch/s390/kernel/process.c7
-rw-r--r--arch/s390/kernel/processor.c2
-rw-r--r--arch/s390/kernel/ptrace.c86
-rw-r--r--arch/s390/kernel/setup.c18
-rw-r--r--arch/s390/kernel/smp.c43
-rw-r--r--arch/s390/kernel/syscalls.S2
-rw-r--r--arch/s390/kvm/gaccess.c6
-rw-r--r--arch/s390/kvm/intercept.c24
-rw-r--r--arch/s390/kvm/interrupt.c135
-rw-r--r--arch/s390/kvm/kvm-s390.c127
-rw-r--r--arch/s390/kvm/kvm-s390.h3
-rw-r--r--arch/s390/kvm/priv.c31
-rw-r--r--arch/s390/kvm/sthyi.c3
-rw-r--r--arch/s390/kvm/trace-s390.h52
-rw-r--r--arch/s390/kvm/vsie.c72
36 files changed, 881 insertions, 114 deletions
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 1d48880b3cc1..e8f623041769 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -105,6 +105,7 @@
#define HWCAP_S390_VXRS 2048
#define HWCAP_S390_VXRS_BCD 4096
#define HWCAP_S390_VXRS_EXT 8192
+#define HWCAP_S390_GS 16384
/* Internal bits, not exposed via elf */
#define HWCAP_INT_SIE 1UL
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index a41faf34b034..552c319483c6 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -25,6 +25,7 @@
#include <asm/cpu.h>
#include <asm/fpu/api.h>
#include <asm/isc.h>
+#include <asm/guarded_storage.h>
#define KVM_S390_BSCA_CPU_SLOTS 64
#define KVM_S390_ESCA_CPU_SLOTS 248
@@ -164,11 +165,21 @@ struct kvm_s390_sie_block {
#define ICTL_RRBE 0x00001000
#define ICTL_TPROT 0x00000200
__u32 ictl; /* 0x0048 */
+#define ECA_CEI 0x80000000
+#define ECA_IB 0x40000000
+#define ECA_SIGPI 0x10000000
+#define ECA_MVPGI 0x01000000
+#define ECA_VX 0x00020000
+#define ECA_PROTEXCI 0x00002000
+#define ECA_SII 0x00000001
__u32 eca; /* 0x004c */
#define ICPT_INST 0x04
#define ICPT_PROGI 0x08
#define ICPT_INSTPROGI 0x0C
+#define ICPT_EXTREQ 0x10
#define ICPT_EXTINT 0x14
+#define ICPT_IOREQ 0x18
+#define ICPT_WAIT 0x1c
#define ICPT_VALIDITY 0x20
#define ICPT_STOP 0x28
#define ICPT_OPEREXC 0x2C
@@ -182,10 +193,19 @@ struct kvm_s390_sie_block {
__u32 ipb; /* 0x0058 */
__u32 scaoh; /* 0x005c */
__u8 reserved60; /* 0x0060 */
+#define ECB_GS 0x40
+#define ECB_TE 0x10
+#define ECB_SRSI 0x04
+#define ECB_HOSTPROTINT 0x02
__u8 ecb; /* 0x0061 */
+#define ECB2_CMMA 0x80
+#define ECB2_IEP 0x20
+#define ECB2_PFMFI 0x08
+#define ECB2_ESCA 0x04
__u8 ecb2; /* 0x0062 */
-#define ECB3_AES 0x04
#define ECB3_DEA 0x08
+#define ECB3_AES 0x04
+#define ECB3_RI 0x01
__u8 ecb3; /* 0x0063 */
__u32 scaol; /* 0x0064 */
__u8 reserved68[4]; /* 0x0068 */
@@ -219,11 +239,14 @@ struct kvm_s390_sie_block {
__u32 crycbd; /* 0x00fc */
__u64 gcr[16]; /* 0x0100 */
__u64 gbea; /* 0x0180 */
- __u8 reserved188[24]; /* 0x0188 */
+ __u8 reserved188[8]; /* 0x0188 */
+ __u64 sdnxo; /* 0x0190 */
+ __u8 reserved198[8]; /* 0x0198 */
__u32 fac; /* 0x01a0 */
__u8 reserved1a4[20]; /* 0x01a4 */
__u64 cbrlo; /* 0x01b8 */
__u8 reserved1c0[8]; /* 0x01c0 */
+#define ECD_HOSTREGMGMT 0x20000000
__u32 ecd; /* 0x01c8 */
__u8 reserved1cc[18]; /* 0x01cc */
__u64 pp; /* 0x01de */
@@ -498,6 +521,12 @@ struct kvm_s390_local_interrupt {
#define FIRQ_CNTR_PFAULT 3
#define FIRQ_MAX_COUNT 4
+/* mask the AIS mode for a given ISC */
+#define AIS_MODE_MASK(isc) (0x80 >> isc)
+
+#define KVM_S390_AIS_MODE_ALL 0
+#define KVM_S390_AIS_MODE_SINGLE 1
+
struct kvm_s390_float_interrupt {
unsigned long pending_irqs;
spinlock_t lock;
@@ -507,6 +536,10 @@ struct kvm_s390_float_interrupt {
struct kvm_s390_ext_info srv_signal;
int next_rr_cpu;
unsigned long idle_mask[BITS_TO_LONGS(KVM_MAX_VCPUS)];
+ struct mutex ais_lock;
+ u8 simm;
+ u8 nimm;
+ int ais_enabled;
};
struct kvm_hw_wp_info_arch {
@@ -554,6 +587,7 @@ struct kvm_vcpu_arch {
/* if vsie is active, currently executed shadow sie control block */
struct kvm_s390_sie_block *vsie_block;
unsigned int host_acrs[NUM_ACRS];
+ struct gs_cb *host_gscb;
struct fpu host_fpregs;
struct kvm_s390_local_interrupt local_int;
struct hrtimer ckc_timer;
@@ -574,6 +608,7 @@ struct kvm_vcpu_arch {
*/
seqcount_t cputm_seqcount;
__u64 cputm_start;
+ bool gs_enabled;
};
struct kvm_vm_stat {
@@ -596,6 +631,7 @@ struct s390_io_adapter {
bool maskable;
bool masked;
bool swap;
+ bool suppressible;
struct rw_semaphore maps_lock;
struct list_head maps;
atomic_t nr_maps;
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 61261e0e95c0..8a5b082797f8 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -157,8 +157,8 @@ struct lowcore {
__u64 stfle_fac_list[32]; /* 0x0f00 */
__u8 pad_0x1000[0x11b0-0x1000]; /* 0x1000 */
- /* Pointer to vector register save area */
- __u64 vector_save_area_addr; /* 0x11b0 */
+ /* Pointer to the machine check extended save area */
+ __u64 mcesad; /* 0x11b0 */
/* 64 bit extparam used for pfault/diag 250: defined by architecture */
__u64 ext_params2; /* 0x11B8 */
@@ -182,10 +182,7 @@ struct lowcore {
/* Transaction abort diagnostic block */
__u8 pgm_tdb[256]; /* 0x1800 */
- __u8 pad_0x1900[0x1c00-0x1900]; /* 0x1900 */
-
- /* Software defined save area for vector registers */
- __u8 vector_save_area[1024]; /* 0x1c00 */
+ __u8 pad_0x1900[0x2000-0x1900]; /* 0x1900 */
} __packed;
#define S390_lowcore (*((struct lowcore *) 0))
diff --git a/arch/s390/include/asm/nmi.h b/arch/s390/include/asm/nmi.h
index b75fd910386a..e3e8895f5d3e 100644
--- a/arch/s390/include/asm/nmi.h
+++ b/arch/s390/include/asm/nmi.h
@@ -58,7 +58,9 @@ union mci {
u64 ie : 1; /* 32 indirect storage error */
u64 ar : 1; /* 33 access register validity */
u64 da : 1; /* 34 delayed access exception */
- u64 : 7; /* 35-41 */
+ u64 : 1; /* 35 */
+ u64 gs : 1; /* 36 guarded storage registers */
+ u64 : 5; /* 37-41 */
u64 pr : 1; /* 42 tod programmable register validity */
u64 fc : 1; /* 43 fp control register validity */
u64 ap : 1; /* 44 ancillary report */
@@ -69,6 +71,14 @@ union mci {
};
};
+#define MCESA_ORIGIN_MASK (~0x3ffUL)
+#define MCESA_LC_MASK (0xfUL)
+
+struct mcesa {
+ u8 vector_save_area[1024];
+ u8 guarded_storage_save_area[32];
+};
+
struct pt_regs;
extern void s390_handle_mcck(void);
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index e4988710aa86..cc101f9371cb 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -135,6 +135,8 @@ struct thread_struct {
struct list_head list;
/* cpu runtime instrumentation */
struct runtime_instr_cb *ri_cb;
+ struct gs_cb *gs_cb; /* Current guarded storage cb */
+ struct gs_cb *gs_bc_cb; /* Broadcast guarded storage cb */
unsigned char trap_tdb[256]; /* Transaction abort diagnose block */
/*
* Warning: 'fpu' is dynamically-sized. It *MUST* be at
@@ -215,6 +217,9 @@ void show_cacheinfo(struct seq_file *m);
/* Free all resources held by a thread. */
extern void release_thread(struct task_struct *);
+/* Free guarded storage control block for current */
+void exit_thread_gs(void);
+
/*
* Return saved PC of a blocked thread.
*/
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 30bdb5a027f3..383bd8358a8c 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -31,6 +31,7 @@
#define MACHINE_FLAG_VX _BITUL(13)
#define MACHINE_FLAG_CAD _BITUL(14)
#define MACHINE_FLAG_NX _BITUL(15)
+#define MACHINE_FLAG_GS _BITUL(16)
#define LPP_MAGIC _BITUL(31)
#define LPP_PFAULT_PID_MASK _AC(0xffffffff, UL)
@@ -70,6 +71,7 @@ extern void detect_memory_memblock(void);
#define MACHINE_HAS_VX (S390_lowcore.machine_flags & MACHINE_FLAG_VX)
#define MACHINE_HAS_CAD (S390_lowcore.machine_flags & MACHINE_FLAG_CAD)
#define MACHINE_HAS_NX (S390_lowcore.machine_flags & MACHINE_FLAG_NX)
+#define MACHINE_HAS_GS (S390_lowcore.machine_flags & MACHINE_FLAG_GS)
/*
* Console mode. Override with conmode=
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
index 12d45f0cfdd9..f6c2b5814ab0 100644
--- a/arch/s390/include/asm/switch_to.h
+++ b/arch/s390/include/asm/switch_to.h
@@ -10,6 +10,7 @@
#include <linux/thread_info.h>
#include <asm/fpu/api.h>
#include <asm/ptrace.h>
+#include <asm/guarded_storage.h>
extern struct task_struct *__switch_to(void *, void *);
extern void update_cr_regs(struct task_struct *task);
@@ -33,12 +34,14 @@ static inline void restore_access_regs(unsigned int *acrs)
save_fpu_regs(); \
save_access_regs(&prev->thread.acrs[0]); \
save_ri_cb(prev->thread.ri_cb); \
+ save_gs_cb(prev->thread.gs_cb); \
} \
if (next->mm) { \
update_cr_regs(next); \
set_cpu_flag(CIF_FPU); \
restore_access_regs(&next->thread.acrs[0]); \
restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \
+ restore_gs_cb(next->thread.gs_cb); \
} \
prev = __switch_to(prev,next); \
} while (0)
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index a5b54a445eb8..f36e6e2b73f0 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -54,11 +54,12 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
#define TIF_NOTIFY_RESUME 0 /* callback before returning to user */
#define TIF_SIGPENDING 1 /* signal pending */
#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
-#define TIF_SYSCALL_TRACE 3 /* syscall trace active */
-#define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */
-#define TIF_SECCOMP 5 /* secure computing */
-#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
-#define TIF_UPROBE 7 /* breakpointed or single-stepping */
+#define TIF_UPROBE 3 /* breakpointed or single-stepping */
+#define TIF_GUARDED_STORAGE 4 /* load guarded storage control block */
+#define TIF_SYSCALL_TRACE 8 /* syscall trace active */
+#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */
+#define TIF_SECCOMP 10 /* secure computing */
+#define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
#define TIF_31BIT 16 /* 32bit process */
#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal() */
@@ -76,5 +77,6 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
#define _TIF_UPROBE _BITUL(TIF_UPROBE)
#define _TIF_31BIT _BITUL(TIF_31BIT)
#define _TIF_SINGLE_STEP _BITUL(TIF_SINGLE_STEP)
+#define _TIF_GUARDED_STORAGE _BITUL(TIF_GUARDED_STORAGE)
#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/s390/include/uapi/asm/Kbuild b/arch/s390/include/uapi/asm/Kbuild
index 6848ba5c1454..86b761e583e3 100644
--- a/arch/s390/include/uapi/asm/Kbuild
+++ b/arch/s390/include/uapi/asm/Kbuild
@@ -12,6 +12,7 @@ header-y += dasd.h
header-y += debug.h
header-y += errno.h
header-y += fcntl.h
+header-y += guarded_storage.h
header-y += hypfs.h
header-y += ioctl.h
header-y += ioctls.h
diff --git a/arch/s390/include/uapi/asm/guarded_storage.h b/arch/s390/include/uapi/asm/guarded_storage.h
new file mode 100644
index 000000000000..852850e8e17e
--- /dev/null
+++ b/arch/s390/include/uapi/asm/guarded_storage.h
@@ -0,0 +1,77 @@
+#ifndef _GUARDED_STORAGE_H
+#define _GUARDED_STORAGE_H
+
+#include <linux/types.h>
+
+struct gs_cb {
+ __u64 reserved;
+ __u64 gsd;
+ __u64 gssm;
+ __u64 gs_epl_a;
+};
+
+struct gs_epl {
+ __u8 pad1;
+ union {
+ __u8 gs_eam;
+ struct {
+ __u8 : 6;
+ __u8 e : 1;
+ __u8 b : 1;
+ };
+ };
+ union {
+ __u8 gs_eci;
+ struct {
+ __u8 tx : 1;
+ __u8 cx : 1;
+ __u8 : 5;
+ __u8 in : 1;
+ };
+ };
+ union {
+ __u8 gs_eai;
+ struct {
+ __u8 : 1;
+ __u8 t : 1;
+ __u8 as : 2;
+ __u8 ar : 4;
+ };
+ };
+ __u32 pad2;
+ __u64 gs_eha;
+ __u64 gs_eia;
+ __u64 gs_eoa;
+ __u64 gs_eir;
+ __u64 gs_era;
+};
+
+#define GS_ENABLE 0
+#define GS_DISABLE 1
+#define GS_SET_BC_CB 2
+#define GS_CLEAR_BC_CB 3
+#define GS_BROADCAST 4
+
+static inline void load_gs_cb(struct gs_cb *gs_cb)
+{
+ asm volatile(".insn rxy,0xe3000000004d,0,%0" : : "Q" (*gs_cb));
+}
+
+static inline void store_gs_cb(struct gs_cb *gs_cb)
+{
+ asm volatile(".insn rxy,0xe30000000049,0,%0" : : "Q" (*gs_cb));
+}
+
+static inline void save_gs_cb(struct gs_cb *gs_cb)
+{
+ if (gs_cb)
+ store_gs_cb(gs_cb);
+}
+
+static inline void restore_gs_cb(struct gs_cb *gs_cb)
+{
+ if (gs_cb)
+ load_gs_cb(gs_cb);
+}
+
+#endif /* _GUARDED_STORAGE_H */
diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h
index a2ffec4139ad..2c9ad251fa33 100644
--- a/arch/s390/include/uapi/asm/kvm.h
+++ b/arch/s390/include/uapi/asm/kvm.h
@@ -26,6 +26,8 @@
#define KVM_DEV_FLIC_ADAPTER_REGISTER 6
#define KVM_DEV_FLIC_ADAPTER_MODIFY 7
#define KVM_DEV_FLIC_CLEAR_IO_IRQ 8
+#define KVM_DEV_FLIC_AISM 9
+#define KVM_DEV_FLIC_AIRQ_INJECT 10
/*
* We can have up to 4*64k pending subchannels + 8 adapter interrupts,
* as well as up to ASYNC_PF_PER_VCPU*KVM_MAX_VCPUS pfault done interrupts.
@@ -41,7 +43,14 @@ struct kvm_s390_io_adapter {
__u8 isc;
__u8 maskable;
__u8 swap;
- __u8 pad;
+ __u8 flags;
+};
+
+#define KVM_S390_ADAPTER_SUPPRESSIBLE 0x01
+
+struct kvm_s390_ais_req {
+ __u8 isc;
+ __u16 mode;
};
#define KVM_S390_IO_ADAPTER_MASK 1
@@ -197,6 +206,10 @@ struct kvm_guest_debug_arch {
#define KVM_SYNC_VRS (1UL << 6)
#define KVM_SYNC_RICCB (1UL << 7)
#define KVM_SYNC_FPRS (1UL << 8)
+#define KVM_SYNC_GSCB (1UL << 9)
+/* length and alignment of the sdnx as a power of two */
+#define SDNXC 8
+#define SDNXL (1UL << SDNXC)
/* definition of registers in kvm_run */
struct kvm_sync_regs {
__u64 prefix; /* prefix register */
@@ -217,8 +230,16 @@ struct kvm_sync_regs {
};
__u8 reserved[512]; /* for future vector expansion */
__u32 fpc; /* valid on KVM_SYNC_VRS or KVM_SYNC_FPRS */
- __u8 padding[52]; /* riccb needs to be 64byte aligned */
+ __u8 padding1[52]; /* riccb needs to be 64byte aligned */
__u8 riccb[64]; /* runtime instrumentation controls block */
+ __u8 padding2[192]; /* sdnx needs to be 256byte aligned */
+ union {
+ __u8 sdnx[SDNXL]; /* state description annex */
+ struct {
+ __u64 reserved1[2];
+ __u64 gscb[4];
+ };
+ };
};
#define KVM_REG_S390_TODPR (KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1)
diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h
index 152de9b796e1..ea42290e7d51 100644
--- a/arch/s390/include/uapi/asm/unistd.h
+++ b/arch/s390/include/uapi/asm/unistd.h
@@ -313,7 +313,7 @@
#define __NR_copy_file_range 375
#define __NR_preadv2 376
#define __NR_pwritev2 377
-/* Number 378 is reserved for guarded storage */
+#define __NR_s390_guarded_storage 378
#define __NR_statx 379
#define NR_syscalls 380
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 060ce548fe8b..aa5adbdaf200 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -57,7 +57,7 @@ obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o
obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o als.o
obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
-obj-y += runtime_instr.o cache.o fpu.o dumpstack.o
+obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o
obj-y += entry.o reipl.o relocate_kernel.o
extra-y += head.o head64.o vmlinux.lds
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index c4b3570ded5b..6bb29633e1f1 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -175,7 +175,7 @@ int main(void)
/* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */
OFFSET(__LC_DUMP_REIPL, lowcore, ipib);
/* hardware defined lowcore locations 0x1000 - 0x18ff */
- OFFSET(__LC_VX_SAVE_AREA_ADDR, lowcore, vector_save_area_addr);
+ OFFSET(__LC_MCESAD, lowcore, mcesad);
OFFSET(__LC_EXT_PARAMS2, lowcore, ext_params2);
OFFSET(__LC_FPREGS_SAVE_AREA, lowcore, floating_pt_save_area);
OFFSET(__LC_GPREGS_SAVE_AREA, lowcore, gpregs_save_area);
diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c
index e89cc2e71db1..986642a3543b 100644
--- a/arch/s390/kernel/compat_wrapper.c
+++ b/arch/s390/kernel/compat_wrapper.c
@@ -178,4 +178,5 @@ COMPAT_SYSCALL_WRAP3(getpeername, int, fd, struct sockaddr __user *, usockaddr,
COMPAT_SYSCALL_WRAP6(sendto, int, fd, void __user *, buff, size_t, len, unsigned int, flags, struct sockaddr __user *, addr, int, addr_len);
COMPAT_SYSCALL_WRAP3(mlock2, unsigned long, start, size_t, len, int, flags);
COMPAT_SYSCALL_WRAP6(copy_file_range, int, fd_in, loff_t __user *, off_in, int, fd_out, loff_t __user *, off_out, size_t, len, unsigned int, flags);
+COMPAT_SYSCALL_WRAP2(s390_guarded_storage, int, command, struct gs_cb *, gs_cb);
COMPAT_SYSCALL_WRAP5(statx, int, dfd, const char __user *, path, unsigned, flags, unsigned, mask, struct statx __user *, buffer);
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 4e65c79cc5f2..95298a41076f 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -358,6 +358,8 @@ static __init void detect_machine_facilities(void)
S390_lowcore.machine_flags |= MACHINE_FLAG_NX;
__ctl_set_bit(0, 20);
}
+ if (test_facility(133))
+ S390_lowcore.machine_flags |= MACHINE_FLAG_GS;
}
static inline void save_vector_registers(void)
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 6a7d737d514c..fa8b8f28e08b 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -47,7 +47,7 @@ STACK_SIZE = 1 << STACK_SHIFT
STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
_TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
- _TIF_UPROBE)
+ _TIF_UPROBE | _TIF_GUARDED_STORAGE)
_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
_TIF_SYSCALL_TRACEPOINT)
_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE_PRIMARY | \
@@ -332,6 +332,8 @@ ENTRY(system_call)
TSTMSK __TI_flags(%r12),_TIF_UPROBE
jo .Lsysc_uprobe_notify
#endif
+ TSTMSK __TI_flags(%r12),_TIF_GUARDED_STORAGE
+ jo .Lsysc_guarded_storage
TSTMSK __PT_FLAGS(%r11),_PIF_PER_TRAP
jo .Lsysc_singlestep
TSTMSK __TI_flags(%r12),_TIF_SIGPENDING
@@ -409,6 +411,14 @@ ENTRY(system_call)
#endif
#
+# _TIF_GUARDED_STORAGE is set, call guarded_storage_load
+#
+.Lsysc_guarded_storage:
+ lgr %r2,%r11 # pass pointer to pt_regs
+ larl %r14,.Lsysc_return
+ jg gs_load_bc_cb
+
+#
# _PIF_PER_TRAP is set, call do_per_trap
#
.Lsysc_singlestep:
@@ -663,6 +673,8 @@ ENTRY(io_int_handler)
jo .Lio_sigpending
TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME
jo .Lio_notify_resume
+ TSTMSK __TI_flags(%r12),_TIF_GUARDED_STORAGE
+ jo .Lio_guarded_storage
TSTMSK __LC_CPU_FLAGS,_CIF_FPU
jo .Lio_vxrs
TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY)
@@ -697,6 +709,18 @@ ENTRY(io_int_handler)
jg load_fpu_regs
#
+# _TIF_GUARDED_STORAGE is set, call guarded_storage_load
+#
+.Lio_guarded_storage:
+ # TRACE_IRQS_ON already done at .Lio_return
+ ssm __LC_SVC_NEW_PSW # reenable interrupts
+ lgr %r2,%r11 # pass pointer to pt_regs
+ brasl %r14,gs_load_bc_cb
+ ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
+ TRACE_IRQS_OFF
+ j .Lio_return
+
+#
# _TIF_NEED_RESCHED is set, call schedule
#
.Lio_reschedule:
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index 33f901865326..dbf5f7e18246 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -74,12 +74,14 @@ long sys_sigreturn(void);
long sys_s390_personality(unsigned int personality);
long sys_s390_runtime_instr(int command, int signum);
+long sys_s390_guarded_storage(int command, struct gs_cb __user *);
long sys_s390_pci_mmio_write(unsigned long, const void __user *, size_t);
long sys_s390_pci_mmio_read(unsigned long, void __user *, size_t);
DECLARE_PER_CPU(u64, mt_cycles[8]);
void verify_facilities(void);
+void gs_load_bc_cb(struct pt_regs *regs);
void set_fs_fixup(void);
#endif /* _ENTRY_H */
diff --git a/arch/s390/kernel/guarded_storage.c b/arch/s390/kernel/guarded_storage.c
new file mode 100644
index 000000000000..6f064745c3b1
--- /dev/null
+++ b/arch/s390/kernel/guarded_storage.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright IBM Corp. 2016
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/syscalls.h>
+#include <linux/signal.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <asm/guarded_storage.h>
+#include "entry.h"
+
+void exit_thread_gs(void)
+{
+ kfree(current->thread.gs_cb);
+ kfree(current->thread.gs_bc_cb);
+ current->thread.gs_cb = current->thread.gs_bc_cb = NULL;
+}
+
+static int gs_enable(void)
+{
+ struct gs_cb *gs_cb;
+
+ if (!current->thread.gs_cb) {
+ gs_cb = kzalloc(sizeof(*gs_cb), GFP_KERNEL);
+ if (!gs_cb)
+ return -ENOMEM;
+ gs_cb->gsd = 25;
+ preempt_disable();
+ __ctl_set_bit(2, 4);
+ load_gs_cb(gs_cb);
+ current->thread.gs_cb = gs_cb;
+ preempt_enable();
+ }
+ return 0;
+}
+
+static int gs_disable(void)
+{
+ if (current->thread.gs_cb) {
+ preempt_disable();
+ kfree(current->thread.gs_cb);
+ current->thread.gs_cb = NULL;
+ __ctl_clear_bit(2, 4);
+ preempt_enable();
+ }
+ return 0;
+}
+
+static int gs_set_bc_cb(struct gs_cb __user *u_gs_cb)
+{
+ struct gs_cb *gs_cb;
+
+ gs_cb = current->thread.gs_bc_cb;
+ if (!gs_cb) {
+ gs_cb = kzalloc(sizeof(*gs_cb), GFP_KERNEL);
+ if (!gs_cb)
+ return -ENOMEM;
+ current->thread.gs_bc_cb = gs_cb;
+ }
+ if (copy_from_user(gs_cb, u_gs_cb, sizeof(*gs_cb)))
+ return -EFAULT;
+ return 0;
+}
+
+static int gs_clear_bc_cb(void)
+{
+ struct gs_cb *gs_cb;
+
+ gs_cb = current->thread.gs_bc_cb;
+ current->thread.gs_bc_cb = NULL;
+ kfree(gs_cb);
+ return 0;
+}
+
+void gs_load_bc_cb(struct pt_regs *regs)
+{
+ struct gs_cb *gs_cb;
+
+ preempt_disable();
+ clear_thread_flag(TIF_GUARDED_STORAGE);
+ gs_cb = current->thread.gs_bc_cb;
+ if (gs_cb) {
+ kfree(current->thread.gs_cb);
+ current->thread.gs_bc_cb = NULL;
+ __ctl_set_bit(2, 4);
+ load_gs_cb(gs_cb);
+ current->thread.gs_cb = gs_cb;
+ }
+ preempt_enable();
+}
+
+static int gs_broadcast(void)
+{
+ struct task_struct *sibling;
+
+ read_lock(&tasklist_lock);
+ for_each_thread(current, sibling) {
+ if (!sibling->thread.gs_bc_cb)
+ continue;
+ if (test_and_set_tsk_thread_flag(sibling, TIF_GUARDED_STORAGE))
+ kick_process(sibling);
+ }
+ read_unlock(&tasklist_lock);
+ return 0;
+}
+
+SYSCALL_DEFINE2(s390_guarded_storage, int, command,
+ struct gs_cb __user *, gs_cb)
+{
+ if (!MACHINE_HAS_GS)
+ return -EOPNOTSUPP;
+ switch (command) {
+ case GS_ENABLE:
+ return gs_enable();
+ case GS_DISABLE:
+ return gs_disable();
+ case GS_SET_BC_CB:
+ return gs_set_bc_cb(gs_cb);
+ case GS_CLEAR_BC_CB:
+ return gs_clear_bc_cb();
+ case GS_BROADCAST:
+ return gs_broadcast();
+ default:
+ return -EINVAL;
+ }
+}
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index 3074c1d83829..db5658daf994 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -27,6 +27,7 @@
#include <asm/cacheflush.h>
#include <asm/os_info.h>
#include <asm/switch_to.h>
+#include <asm/nmi.h>
typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long);
@@ -102,6 +103,8 @@ static void __do_machine_kdump(void *image)
*/
static noinline void __machine_kdump(void *image)
{
+ struct mcesa *mcesa;
+ unsigned long cr2_old, cr2_new;
int this_cpu, cpu;
lgr_info_log();
@@ -114,8 +117,16 @@ static noinline void __machine_kdump(void *image)
continue;
}
/* Store status of the boot CPU */
+ mcesa = (struct mcesa *)(S390_lowcore.mcesad & MCESA_ORIGIN_MASK);
if (MACHINE_HAS_VX)
- save_vx_regs((void *) &S390_lowcore.vector_save_area);
+ save_vx_regs((__vector128 *) mcesa->vector_save_area);
+ if (MACHINE_HAS_GS) {
+ __ctl_store(cr2_old, 2, 2);
+ cr2_new = cr2_old | (1UL << 4);
+ __ctl_load(cr2_new, 2, 2);
+ save_gs_cb((struct gs_cb *) mcesa->guarded_storage_save_area);
+ __ctl_load(cr2_old, 2, 2);
+ }
/*
* To create a good backchain for this CPU in the dump store_status
* is passed the address of a function. The address is saved into
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 9bf8327154ee..985589523970 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -106,6 +106,7 @@ static int notrace s390_validate_registers(union mci mci, int umode)
int kill_task;
u64 zero;
void *fpt_save_area;
+ struct mcesa *mcesa;
kill_task = 0;
zero = 0;
@@ -165,6 +166,7 @@ static int notrace s390_validate_registers(union mci mci, int umode)
: : "Q" (S390_lowcore.fpt_creg_save_area));
}
+ mcesa = (struct mcesa *)(S390_lowcore.mcesad & MCESA_ORIGIN_MASK);
if (!MACHINE_HAS_VX) {
/* Validate floating point registers */
asm volatile(
@@ -209,8 +211,8 @@ static int notrace s390_validate_registers(union mci mci, int umode)
" la 1,%0\n"
" .word 0xe70f,0x1000,0x0036\n" /* vlm 0,15,0(1) */
" .word 0xe70f,0x1100,0x0c36\n" /* vlm 16,31,256(1) */
- : : "Q" (*(struct vx_array *)
- &S390_lowcore.vector_save_area) : "1");
+ : : "Q" (*(struct vx_array *) mcesa->vector_save_area)
+ : "1");
__ctl_load(S390_lowcore.cregs_save_area[0], 0, 0);
}
/* Validate access registers */
@@ -224,6 +226,19 @@ static int notrace s390_validate_registers(union mci mci, int umode)
*/
kill_task = 1;
}
+ /* Validate guarded storage registers */
+ if (MACHINE_HAS_GS && (S390_lowcore.cregs_save_area[2] & (1UL << 4))) {
+ if (!mci.gs)
+ /*
+ * Guarded storage register can't be restored and
+ * the current processes uses guarded storage.
+ * It has to be terminated.
+ */
+ kill_task = 1;
+ else
+ load_gs_cb((struct gs_cb *)
+ mcesa->guarded_storage_save_area);
+ }
/*
* We don't even try to validate the TOD register, since we simply
* can't write something sensible into that register.
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index f29e41c5e2ec..999d7154bbdc 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -73,8 +73,10 @@ extern void kernel_thread_starter(void);
*/
void exit_thread(struct task_struct *tsk)
{
- if (tsk == current)
+ if (tsk == current) {
exit_thread_runtime_instr();
+ exit_thread_gs();
+ }
}
void flush_thread(void)
@@ -159,6 +161,9 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long new_stackp,
/* Don't copy runtime instrumentation info */
p->thread.ri_cb = NULL;
frame->childregs.psw.mask &= ~PSW_MASK_RI;
+ /* Don't copy guarded storage control block */
+ p->thread.gs_cb = NULL;
+ p->thread.gs_bc_cb = NULL;
/* Set a new TLS ? */
if (clone_flags & CLONE_SETTLS) {
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 928b929a6261..c73709869447 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -95,7 +95,7 @@ static void show_cpu_summary(struct seq_file *m, void *v)
{
static const char *hwcap_str[] = {
"esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp",
- "edat", "etf3eh", "highgprs", "te", "vx", "vxd", "vxe"
+ "edat", "etf3eh", "highgprs", "te", "vx", "vxd", "vxe", "gs"
};
static const char * const int_hwcap_str[] = {
"sie"
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index c14df0a1ec3c..c933e255b5d5 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -44,30 +44,42 @@ void update_cr_regs(struct task_struct *task)
struct pt_regs *regs = task_pt_regs(task);
struct thread_struct *thread = &task->thread;
struct per_regs old, new;
-
+ unsigned long cr0_old, cr0_new;
+ unsigned long cr2_old, cr2_new;
+ int cr0_changed, cr2_changed;
+
+ __ctl_store(cr0_old, 0, 0);
+ __ctl_store(cr2_old, 2, 2);
+ cr0_new = cr0_old;
+ cr2_new = cr2_old;
/* Take care of the enable/disable of transactional execution. */
if (MACHINE_HAS_TE) {
- unsigned long cr, cr_new;
-
- __ctl_store(cr, 0, 0);
/* Set or clear transaction execution TXC bit 8. */
- cr_new = cr | (1UL << 55);
+ cr0_new |= (1UL << 55);
if (task->thread.per_flags & PER_FLAG_NO_TE)
- cr_new &= ~(1UL << 55);
- if (cr_new != cr)
- __ctl_load(cr_new, 0, 0);
+ cr0_new &= ~(1UL << 55);
/* Set or clear transaction execution TDC bits 62 and 63. */
- __ctl_store(cr, 2, 2);
- cr_new = cr & ~3UL;
+ cr2_new &= ~3UL;
if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) {
if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND)
- cr_new |= 1UL;
+ cr2_new |= 1UL;
else
- cr_new |= 2UL;
+ cr2_new |= 2UL;
}
- if (cr_new != cr)
- __ctl_load(cr_new, 2, 2);
}
+ /* Take care of enable/disable of guarded storage. */
+ if (MACHINE_HAS_GS) {
+ cr2_new &= ~(1UL << 4);
+ if (task->thread.gs_cb)
+ cr2_new |= (1UL << 4);
+ }
+ /* Load control register 0/2 iff changed */
+ cr0_changed = cr0_new != cr0_old;
+ cr2_changed = cr2_new != cr2_old;
+ if (cr0_changed)
+ __ctl_load(cr0_new, 0, 0);
+ if (cr2_changed)
+ __ctl_load(cr2_new, 2, 2);
/* Copy user specified PER registers */
new.control = thread->per_user.control;
new.start = thread->per_user.start;
@@ -1137,6 +1149,36 @@ static int s390_system_call_set(struct task_struct *target,
data, 0, sizeof(unsigned int));
}
+static int s390_gs_cb_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ struct gs_cb *data = target->thread.gs_cb;
+
+ if (!MACHINE_HAS_GS)
+ return -ENODEV;
+ if (!data)
+ return -ENODATA;
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ data, 0, sizeof(struct gs_cb));
+}
+
+static int s390_gs_cb_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ struct gs_cb *data = target->thread.gs_cb;
+
+ if (!MACHINE_HAS_GS)
+ return -ENODEV;
+ if (!data)
+ return -ENODATA;
+ return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ data, 0, sizeof(struct gs_cb));
+}
+
static const struct user_regset s390_regsets[] = {
{
.core_note_type = NT_PRSTATUS,
@@ -1194,6 +1236,14 @@ static const struct user_regset s390_regsets[] = {
.get = s390_vxrs_high_get,
.set = s390_vxrs_high_set,
},
+ {
+ .core_note_type = NT_S390_GS_CB,
+ .n = sizeof(struct gs_cb) / sizeof(__u64),
+ .size = sizeof(__u64),
+ .align = sizeof(__u64),
+ .get = s390_gs_cb_get,
+ .set = s390_gs_cb_set,
+ },
};
static const struct user_regset_view user_s390_view = {
@@ -1422,6 +1472,14 @@ static const struct user_regset s390_compat_regsets[] = {
.get = s390_compat_regs_high_get,
.set = s390_compat_regs_high_set,
},
+ {
+ .core_note_type = NT_S390_GS_CB,
+ .n = sizeof(struct gs_cb) / sizeof(__u64),
+ .size = sizeof(__u64),
+ .align = sizeof(__u64),
+ .get = s390_gs_cb_get,
+ .set = s390_gs_cb_set,
+ },
};
static const struct user_regset_view user_s390_compat_view = {
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 911dc0b49be0..3ae756c0db3d 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -339,9 +339,15 @@ static void __init setup_lowcore(void)
lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
MAX_FACILITY_BIT/8);
- if (MACHINE_HAS_VX)
- lc->vector_save_area_addr =
- (unsigned long) &lc->vector_save_area;
+ if (MACHINE_HAS_VX || MACHINE_HAS_GS) {
+ unsigned long bits, size;
+
+ bits = MACHINE_HAS_GS ? 11 : 10;
+ size = 1UL << bits;
+ lc->mcesad = (__u64) memblock_virt_alloc(size, size);
+ if (MACHINE_HAS_GS)
+ lc->mcesad |= bits;
+ }
lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0];
lc->sync_enter_timer = S390_lowcore.sync_enter_timer;
lc->async_enter_timer = S390_lowcore.async_enter_timer;
@@ -779,6 +785,12 @@ static int __init setup_hwcaps(void)
elf_hwcap |= HWCAP_S390_VXRS_BCD;
}
+ /*
+ * Guarded storage support HWCAP_S390_GS is bit 12.
+ */
+ if (MACHINE_HAS_GS)
+ elf_hwcap |= HWCAP_S390_GS;
+
get_cpu_id(&cpu_id);
add_device_randomness(&cpu_id, sizeof(cpu_id));
switch (cpu_id.machine) {
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 47a973b5b4f1..286bcee800f4 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -51,6 +51,7 @@
#include <asm/os_info.h>
#include <asm/sigp.h>
#include <asm/idle.h>
+#include <asm/nmi.h>
#include "entry.h"
enum {
@@ -78,6 +79,8 @@ struct pcpu {
static u8 boot_core_type;
static struct pcpu pcpu_devices[NR_CPUS];
+static struct kmem_cache *pcpu_mcesa_cache;
+
unsigned int smp_cpu_mt_shift;
EXPORT_SYMBOL(smp_cpu_mt_shift);
@@ -188,8 +191,10 @@ static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
{
unsigned long async_stack, panic_stack;
+ unsigned long mcesa_origin, mcesa_bits;
struct lowcore *lc;
+ mcesa_origin = mcesa_bits = 0;
if (pcpu != &pcpu_devices[0]) {
pcpu->lowcore = (struct lowcore *)
__get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
@@ -197,20 +202,27 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
panic_stack = __get_free_page(GFP_KERNEL);
if (!pcpu->lowcore || !panic_stack || !async_stack)
goto out;
+ if (MACHINE_HAS_VX || MACHINE_HAS_GS) {
+ mcesa_origin = (unsigned long)
+ kmem_cache_alloc(pcpu_mcesa_cache, GFP_KERNEL);
+ if (!mcesa_origin)
+ goto out;
+ mcesa_bits = MACHINE_HAS_GS ? 11 : 0;
+ }
} else {
async_stack = pcpu->lowcore->async_stack - ASYNC_FRAME_OFFSET;
panic_stack = pcpu->lowcore->panic_stack - PANIC_FRAME_OFFSET;
+ mcesa_origin = pcpu->lowcore->mcesad & MCESA_ORIGIN_MASK;
+ mcesa_bits = pcpu->lowcore->mcesad & MCESA_LC_MASK;
}
lc = pcpu->lowcore;
memcpy(lc, &S390_lowcore, 512);
memset((char *) lc + 512, 0, sizeof(*lc) - 512);
lc->async_stack = async_stack + ASYNC_FRAME_OFFSET;
lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET;
+ lc->mcesad = mcesa_origin | mcesa_bits;
lc->cpu_nr = cpu;
lc->spinlock_lockval = arch_spin_lockval(cpu);
- if (MACHINE_HAS_VX)
- lc->vector_save_area_addr =
- (unsigned long) &lc->vector_save_area;
if (vdso_alloc_per_cpu(lc))
goto out;
lowcore_ptr[cpu] = lc;
@@ -218,6 +230,9 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
return 0;
out:
if (pcpu != &pcpu_devices[0]) {
+ if (mcesa_origin)
+ kmem_cache_free(pcpu_mcesa_cache,
+ (void *) mcesa_origin);
free_page(panic_stack);
free_pages(async_stack, ASYNC_ORDER);
free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
@@ -229,11 +244,17 @@ out:
static void pcpu_free_lowcore(struct pcpu *pcpu)
{
+ unsigned long mcesa_origin;
+
pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
lowcore_ptr[pcpu - pcpu_devices] = NULL;
vdso_free_per_cpu(pcpu->lowcore);
if (pcpu == &pcpu_devices[0])
return;
+ if (MACHINE_HAS_VX || MACHINE_HAS_GS) {
+ mcesa_origin = pcpu->lowcore->mcesad & MCESA_ORIGIN_MASK;
+ kmem_cache_free(pcpu_mcesa_cache, (void *) mcesa_origin);
+ }
free_page(pcpu->lowcore->panic_stack-PANIC_FRAME_OFFSET);
free_pages(pcpu->lowcore->async_stack-ASYNC_FRAME_OFFSET, ASYNC_ORDER);
free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
@@ -550,9 +571,11 @@ int smp_store_status(int cpu)
if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_STATUS_AT_ADDRESS,
pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
return -EIO;
- if (!MACHINE_HAS_VX)
+ if (!MACHINE_HAS_VX && !MACHINE_HAS_GS)
return 0;
- pa = __pa(pcpu->lowcore->vector_save_area_addr);
+ pa = __pa(pcpu->lowcore->mcesad & MCESA_ORIGIN_MASK);
+ if (MACHINE_HAS_GS)
+ pa |= pcpu->lowcore->mcesad & MCESA_LC_MASK;
if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS,
pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
return -EIO;
@@ -897,12 +920,22 @@ void __init smp_fill_possible_mask(void)
void __init smp_prepare_cpus(unsigned int max_cpus)
{
+ unsigned long size;
+
/* request the 0x1201 emergency signal external interrupt */
if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt))
panic("Couldn't request external interrupt 0x1201");
/* request the 0x1202 external call external interrupt */
if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
panic("Couldn't request external interrupt 0x1202");
+ /* create slab cache for the machine-check-extended-save-areas */
+ if (MACHINE_HAS_VX || MACHINE_HAS_GS) {
+ size = 1UL << (MACHINE_HAS_GS ? 11 : 10);
+ pcpu_mcesa_cache = kmem_cache_create("nmi_save_areas",
+ size, size, 0, NULL);
+ if (!pcpu_mcesa_cache)
+ panic("Couldn't create nmi save area cache");
+ }
}
void __init smp_prepare_boot_cpu(void)
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 2659b5cfeddb..54fce7b065de 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -386,5 +386,5 @@ SYSCALL(sys_mlock2,compat_sys_mlock2)
SYSCALL(sys_copy_file_range,compat_sys_copy_file_range) /* 375 */
SYSCALL(sys_preadv2,compat_sys_preadv2)
SYSCALL(sys_pwritev2,compat_sys_pwritev2)
-NI_SYSCALL
+SYSCALL(sys_s390_guarded_storage,compat_sys_s390_guarded_storage) /* 378 */
SYSCALL(sys_statx,compat_sys_statx)
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index d55c829a5944..709aca9ceb05 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -262,7 +262,7 @@ struct aste {
int ipte_lock_held(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.sie_block->eca & 1) {
+ if (vcpu->arch.sie_block->eca & ECA_SII) {
int rc;
read_lock(&vcpu->kvm->arch.sca_lock);
@@ -361,7 +361,7 @@ static void ipte_unlock_siif(struct kvm_vcpu *vcpu)
void ipte_lock(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.sie_block->eca & 1)
+ if (vcpu->arch.sie_block->eca & ECA_SII)
ipte_lock_siif(vcpu);
else
ipte_lock_simple(vcpu);
@@ -369,7 +369,7 @@ void ipte_lock(struct kvm_vcpu *vcpu)
void ipte_unlock(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.sie_block->eca & 1)
+ if (vcpu->arch.sie_block->eca & ECA_SII)
ipte_unlock_siif(vcpu);
else
ipte_unlock_simple(vcpu);
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 59920f96ebc0..f5378f336127 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -35,6 +35,7 @@ static const intercept_handler_t instruction_handlers[256] = {
[0xb6] = kvm_s390_handle_stctl,
[0xb7] = kvm_s390_handle_lctl,
[0xb9] = kvm_s390_handle_b9,
+ [0xe3] = kvm_s390_handle_e3,
[0xe5] = kvm_s390_handle_e5,
[0xeb] = kvm_s390_handle_eb,
};
@@ -368,8 +369,7 @@ static int handle_operexc(struct kvm_vcpu *vcpu)
trace_kvm_s390_handle_operexc(vcpu, vcpu->arch.sie_block->ipa,
vcpu->arch.sie_block->ipb);
- if (vcpu->arch.sie_block->ipa == 0xb256 &&
- test_kvm_facility(vcpu->kvm, 74))
+ if (vcpu->arch.sie_block->ipa == 0xb256)
return handle_sthyi(vcpu);
if (vcpu->arch.sie_block->ipa == 0 && vcpu->kvm->arch.user_instr0)
@@ -404,26 +404,26 @@ int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
return -EOPNOTSUPP;
switch (vcpu->arch.sie_block->icptcode) {
- case 0x10:
- case 0x18:
+ case ICPT_EXTREQ:
+ case ICPT_IOREQ:
return handle_noop(vcpu);
- case 0x04:
+ case ICPT_INST:
rc = handle_instruction(vcpu);
break;
- case 0x08:
+ case ICPT_PROGI:
return handle_prog(vcpu);
- case 0x14:
+ case ICPT_EXTINT:
return handle_external_interrupt(vcpu);
- case 0x1c:
+ case ICPT_WAIT:
return kvm_s390_handle_wait(vcpu);
- case 0x20:
+ case ICPT_VALIDITY:
return handle_validity(vcpu);
- case 0x28:
+ case ICPT_STOP:
return handle_stop(vcpu);
- case 0x2c:
+ case ICPT_OPEREXC:
rc = handle_operexc(vcpu);
break;
- case 0x38:
+ case ICPT_PARTEXEC:
rc = handle_partial_execution(vcpu);
break;
default:
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 0f8f14199734..482673e3436d 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -410,6 +410,7 @@ static int __write_machine_check(struct kvm_vcpu *vcpu,
struct kvm_s390_mchk_info *mchk)
{
unsigned long ext_sa_addr;
+ unsigned long lc;
freg_t fprs[NUM_FPRS];
union mci mci;
int rc;
@@ -420,10 +421,30 @@ static int __write_machine_check(struct kvm_vcpu *vcpu,
save_access_regs(vcpu->run->s.regs.acrs);
/* Extended save area */
- rc = read_guest_lc(vcpu, __LC_VX_SAVE_AREA_ADDR, &ext_sa_addr,
- sizeof(unsigned long));
- /* Only bits 0-53 are used for address formation */
- ext_sa_addr &= ~0x3ffUL;
+ rc = read_guest_lc(vcpu, __LC_MCESAD, &ext_sa_addr,
+ sizeof(unsigned long));
+ /* Only bits 0 through 63-LC are used for address formation */
+ lc = ext_sa_addr & MCESA_LC_MASK;
+ if (test_kvm_facility(vcpu->kvm, 133)) {
+ switch (lc) {
+ case 0:
+ case 10:
+ ext_sa_addr &= ~0x3ffUL;
+ break;
+ case 11:
+ ext_sa_addr &= ~0x7ffUL;
+ break;
+ case 12:
+ ext_sa_addr &= ~0xfffUL;
+ break;
+ default:
+ ext_sa_addr = 0;
+ break;
+ }
+ } else {
+ ext_sa_addr &= ~0x3ffUL;
+ }
+
if (!rc && mci.vr && ext_sa_addr && test_kvm_facility(vcpu->kvm, 129)) {
if (write_guest_abs(vcpu, ext_sa_addr, vcpu->run->s.regs.vrs,
512))
@@ -431,6 +452,14 @@ static int __write_machine_check(struct kvm_vcpu *vcpu,
} else {
mci.vr = 0;
}
+ if (!rc && mci.gs && ext_sa_addr && test_kvm_facility(vcpu->kvm, 133)
+ && (lc == 11 || lc == 12)) {
+ if (write_guest_abs(vcpu, ext_sa_addr + 1024,
+ &vcpu->run->s.regs.gscb, 32))
+ mci.gs = 0;
+ } else {
+ mci.gs = 0;
+ }
/* General interruption information */
rc |= put_guest_lc(vcpu, 1, (u8 __user *) __LC_AR_MODE_ID);
@@ -1968,6 +1997,8 @@ static int register_io_adapter(struct kvm_device *dev,
adapter->maskable = adapter_info.maskable;
adapter->masked = false;
adapter->swap = adapter_info.swap;
+ adapter->suppressible = (adapter_info.flags) &
+ KVM_S390_ADAPTER_SUPPRESSIBLE;
dev->kvm->arch.adapters[adapter->id] = adapter;
return 0;
@@ -2121,6 +2152,87 @@ static int clear_io_irq(struct kvm *kvm, struct kvm_device_attr *attr)
return 0;
}
+static int modify_ais_mode(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+ struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
+ struct kvm_s390_ais_req req;
+ int ret = 0;
+
+ if (!fi->ais_enabled)
+ return -ENOTSUPP;
+
+ if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
+ return -EFAULT;
+
+ if (req.isc > MAX_ISC)
+ return -EINVAL;
+
+ trace_kvm_s390_modify_ais_mode(req.isc,
+ (fi->simm & AIS_MODE_MASK(req.isc)) ?
+ (fi->nimm & AIS_MODE_MASK(req.isc)) ?
+ 2 : KVM_S390_AIS_MODE_SINGLE :
+ KVM_S390_AIS_MODE_ALL, req.mode);
+
+ mutex_lock(&fi->ais_lock);
+ switch (req.mode) {
+ case KVM_S390_AIS_MODE_ALL:
+ fi->simm &= ~AIS_MODE_MASK(req.isc);
+ fi->nimm &= ~AIS_MODE_MASK(req.isc);
+ break;
+ case KVM_S390_AIS_MODE_SINGLE:
+ fi->simm |= AIS_MODE_MASK(req.isc);
+ fi->nimm &= ~AIS_MODE_MASK(req.isc);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ mutex_unlock(&fi->ais_lock);
+
+ return ret;
+}
+
+static int kvm_s390_inject_airq(struct kvm *kvm,
+ struct s390_io_adapter *adapter)
+{
+ struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
+ struct kvm_s390_interrupt s390int = {
+ .type = KVM_S390_INT_IO(1, 0, 0, 0),
+ .parm = 0,
+ .parm64 = (adapter->isc << 27) | 0x80000000,
+ };
+ int ret = 0;
+
+ if (!fi->ais_enabled || !adapter->suppressible)
+ return kvm_s390_inject_vm(kvm, &s390int);
+
+ mutex_lock(&fi->ais_lock);
+ if (fi->nimm & AIS_MODE_MASK(adapter->isc)) {
+ trace_kvm_s390_airq_suppressed(adapter->id, adapter->isc);
+ goto out;
+ }
+
+ ret = kvm_s390_inject_vm(kvm, &s390int);
+ if (!ret && (fi->simm & AIS_MODE_MASK(adapter->isc))) {
+ fi->nimm |= AIS_MODE_MASK(adapter->isc);
+ trace_kvm_s390_modify_ais_mode(adapter->isc,
+ KVM_S390_AIS_MODE_SINGLE, 2);
+ }
+out:
+ mutex_unlock(&fi->ais_lock);
+ return ret;
+}
+
+static int flic_inject_airq(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+ unsigned int id = attr->attr;
+ struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
+
+ if (!adapter)
+ return -EINVAL;
+
+ return kvm_s390_inject_airq(kvm, adapter);
+}
+
static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
{
int r = 0;
@@ -2157,6 +2269,12 @@ static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
case KVM_DEV_FLIC_CLEAR_IO_IRQ:
r = clear_io_irq(dev->kvm, attr);
break;
+ case KVM_DEV_FLIC_AISM:
+ r = modify_ais_mode(dev->kvm, attr);
+ break;
+ case KVM_DEV_FLIC_AIRQ_INJECT:
+ r = flic_inject_airq(dev->kvm, attr);
+ break;
default:
r = -EINVAL;
}
@@ -2176,6 +2294,8 @@ static int flic_has_attr(struct kvm_device *dev,
case KVM_DEV_FLIC_ADAPTER_REGISTER:
case KVM_DEV_FLIC_ADAPTER_MODIFY:
case KVM_DEV_FLIC_CLEAR_IO_IRQ:
+ case KVM_DEV_FLIC_AISM:
+ case KVM_DEV_FLIC_AIRQ_INJECT:
return 0;
}
return -ENXIO;
@@ -2286,12 +2406,7 @@ static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e,
ret = adapter_indicators_set(kvm, adapter, &e->adapter);
up_read(&adapter->maps_lock);
if ((ret > 0) && !adapter->masked) {
- struct kvm_s390_interrupt s390int = {
- .type = KVM_S390_INT_IO(1, 0, 0, 0),
- .parm = 0,
- .parm64 = (adapter->isc << 27) | 0x80000000,
- };
- ret = kvm_s390_inject_vm(kvm, &s390int);
+ ret = kvm_s390_inject_airq(kvm, adapter);
if (ret == 0)
ret = 1;
}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index fd6cd05bb6a7..11b7d6638991 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -380,6 +380,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_S390_SKEYS:
case KVM_CAP_S390_IRQ_STATE:
case KVM_CAP_S390_USER_INSTR0:
+ case KVM_CAP_S390_AIS:
r = 1;
break;
case KVM_CAP_S390_MEM_OP:
@@ -405,6 +406,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_S390_RI:
r = test_facility(64);
break;
+ case KVM_CAP_S390_GS:
+ r = test_facility(133);
+ break;
default:
r = 0;
}
@@ -541,6 +545,34 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
r ? "(not available)" : "(success)");
break;
+ case KVM_CAP_S390_AIS:
+ mutex_lock(&kvm->lock);
+ if (kvm->created_vcpus) {
+ r = -EBUSY;
+ } else {
+ set_kvm_facility(kvm->arch.model.fac_mask, 72);
+ set_kvm_facility(kvm->arch.model.fac_list, 72);
+ kvm->arch.float_int.ais_enabled = 1;
+ r = 0;
+ }
+ mutex_unlock(&kvm->lock);
+ VM_EVENT(kvm, 3, "ENABLE: AIS %s",
+ r ? "(not available)" : "(success)");
+ break;
+ case KVM_CAP_S390_GS:
+ r = -EINVAL;
+ mutex_lock(&kvm->lock);
+ if (atomic_read(&kvm->online_vcpus)) {
+ r = -EBUSY;
+ } else if (test_facility(133)) {
+ set_kvm_facility(kvm->arch.model.fac_mask, 133);
+ set_kvm_facility(kvm->arch.model.fac_list, 133);
+ r = 0;
+ }
+ mutex_unlock(&kvm->lock);
+ VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
+ r ? "(not available)" : "(success)");
+ break;
case KVM_CAP_S390_USER_STSI:
VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
kvm->arch.user_stsi = 1;
@@ -1498,6 +1530,10 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm_s390_crypto_init(kvm);
+ mutex_init(&kvm->arch.float_int.ais_lock);
+ kvm->arch.float_int.simm = 0;
+ kvm->arch.float_int.nimm = 0;
+ kvm->arch.float_int.ais_enabled = 0;
spin_lock_init(&kvm->arch.float_int.lock);
for (i = 0; i < FIRQ_LIST_COUNT; i++)
INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
@@ -1646,7 +1682,7 @@ static void sca_add_vcpu(struct kvm_vcpu *vcpu)
sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
- vcpu->arch.sie_block->ecb2 |= 0x04U;
+ vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
} else {
struct bsca_block *sca = vcpu->kvm->arch.sca;
@@ -1700,7 +1736,7 @@ static int sca_switch_to_extended(struct kvm *kvm)
kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
vcpu->arch.sie_block->scaoh = scaoh;
vcpu->arch.sie_block->scaol = scaol;
- vcpu->arch.sie_block->ecb2 |= 0x04U;
+ vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
}
kvm->arch.sca = new_sca;
kvm->arch.use_esca = 1;
@@ -1749,6 +1785,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
kvm_s390_set_prefix(vcpu, 0);
if (test_kvm_facility(vcpu->kvm, 64))
vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
+ if (test_kvm_facility(vcpu->kvm, 133))
+ vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
/* fprs can be synchronized via vrs, even if the guest has no vx. With
* MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
*/
@@ -1939,8 +1977,8 @@ int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
if (!vcpu->arch.sie_block->cbrlo)
return -ENOMEM;
- vcpu->arch.sie_block->ecb2 |= 0x80;
- vcpu->arch.sie_block->ecb2 &= ~0x08;
+ vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
+ vcpu->arch.sie_block->ecb2 &= ~ECB2_PFMFI;
return 0;
}
@@ -1970,29 +2008,31 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
/* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
if (MACHINE_HAS_ESOP)
- vcpu->arch.sie_block->ecb |= 0x02;
+ vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
if (test_kvm_facility(vcpu->kvm, 9))
- vcpu->arch.sie_block->ecb |= 0x04;
+ vcpu->arch.sie_block->ecb |= ECB_SRSI;
if (test_kvm_facility(vcpu->kvm, 73))
- vcpu->arch.sie_block->ecb |= 0x10;
+ vcpu->arch.sie_block->ecb |= ECB_TE;
if (test_kvm_facility(vcpu->kvm, 8) && sclp.has_pfmfi)
- vcpu->arch.sie_block->ecb2 |= 0x08;
+ vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
if (test_kvm_facility(vcpu->kvm, 130))
- vcpu->arch.sie_block->ecb2 |= 0x20;
- vcpu->arch.sie_block->eca = 0x1002000U;
+ vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
+ vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
if (sclp.has_cei)
- vcpu->arch.sie_block->eca |= 0x80000000U;
+ vcpu->arch.sie_block->eca |= ECA_CEI;
if (sclp.has_ib)
- vcpu->arch.sie_block->eca |= 0x40000000U;
+ vcpu->arch.sie_block->eca |= ECA_IB;
if (sclp.has_siif)
- vcpu->arch.sie_block->eca |= 1;
+ vcpu->arch.sie_block->eca |= ECA_SII;
if (sclp.has_sigpif)
- vcpu->arch.sie_block->eca |= 0x10000000U;
+ vcpu->arch.sie_block->eca |= ECA_SIGPI;
if (test_kvm_facility(vcpu->kvm, 129)) {
- vcpu->arch.sie_block->eca |= 0x00020000;
- vcpu->arch.sie_block->ecd |= 0x20000000;
+ vcpu->arch.sie_block->eca |= ECA_VX;
+ vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
}
+ vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
+ | SDNXC;
vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
@@ -2719,6 +2759,11 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
+ struct runtime_instr_cb *riccb;
+ struct gs_cb *gscb;
+
+ riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
+ gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
@@ -2747,12 +2792,24 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
* we should enable RI here instead of doing the lazy enablement.
*/
if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
- test_kvm_facility(vcpu->kvm, 64)) {
- struct runtime_instr_cb *riccb =
- (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
-
- if (riccb->valid)
- vcpu->arch.sie_block->ecb3 |= 0x01;
+ test_kvm_facility(vcpu->kvm, 64) &&
+ riccb->valid &&
+ !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
+ VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
+ vcpu->arch.sie_block->ecb3 |= ECB3_RI;
+ }
+ /*
+ * If userspace sets the gscb (e.g. after migration) to non-zero,
+ * we should enable GS here instead of doing the lazy enablement.
+ */
+ if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
+ test_kvm_facility(vcpu->kvm, 133) &&
+ gscb->gssm &&
+ !vcpu->arch.gs_enabled) {
+ VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
+ vcpu->arch.sie_block->ecb |= ECB_GS;
+ vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
+ vcpu->arch.gs_enabled = 1;
}
save_access_regs(vcpu->arch.host_acrs);
restore_access_regs(vcpu->run->s.regs.acrs);
@@ -2768,6 +2825,20 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
if (test_fp_ctl(current->thread.fpu.fpc))
/* User space provided an invalid FPC, let's clear it */
current->thread.fpu.fpc = 0;
+ if (MACHINE_HAS_GS) {
+ preempt_disable();
+ __ctl_set_bit(2, 4);
+ if (current->thread.gs_cb) {
+ vcpu->arch.host_gscb = current->thread.gs_cb;
+ save_gs_cb(vcpu->arch.host_gscb);
+ }
+ if (vcpu->arch.gs_enabled) {
+ current->thread.gs_cb = (struct gs_cb *)
+ &vcpu->run->s.regs.gscb;
+ restore_gs_cb(current->thread.gs_cb);
+ }
+ preempt_enable();
+ }
kvm_run->kvm_dirty_regs = 0;
}
@@ -2794,6 +2865,18 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
/* Restore will be done lazily at return */
current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
+ if (MACHINE_HAS_GS) {
+ __ctl_set_bit(2, 4);
+ if (vcpu->arch.gs_enabled)
+ save_gs_cb(current->thread.gs_cb);
+ preempt_disable();
+ current->thread.gs_cb = vcpu->arch.host_gscb;
+ restore_gs_cb(vcpu->arch.host_gscb);
+ preempt_enable();
+ if (!vcpu->arch.host_gscb)
+ __ctl_clear_bit(2, 4);
+ vcpu->arch.host_gscb = NULL;
+ }
}
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index af9fa91a0c91..455124fe0647 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -25,7 +25,7 @@
typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu);
/* Transactional Memory Execution related macros */
-#define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & 0x10))
+#define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & ECB_TE))
#define TDB_FORMAT1 1
#define IS_ITDB_VALID(vcpu) ((*(char *)vcpu->arch.sie_block->itdba == TDB_FORMAT1))
@@ -246,6 +246,7 @@ static inline void kvm_s390_retry_instr(struct kvm_vcpu *vcpu)
int is_valid_psw(psw_t *psw);
int kvm_s390_handle_aa(struct kvm_vcpu *vcpu);
int kvm_s390_handle_b2(struct kvm_vcpu *vcpu);
+int kvm_s390_handle_e3(struct kvm_vcpu *vcpu);
int kvm_s390_handle_e5(struct kvm_vcpu *vcpu);
int kvm_s390_handle_01(struct kvm_vcpu *vcpu);
int kvm_s390_handle_b9(struct kvm_vcpu *vcpu);
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 64b6a309f2c4..0ffe973535fa 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -37,7 +37,8 @@
static int handle_ri(struct kvm_vcpu *vcpu)
{
if (test_kvm_facility(vcpu->kvm, 64)) {
- vcpu->arch.sie_block->ecb3 |= 0x01;
+ VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (lazy)");
+ vcpu->arch.sie_block->ecb3 |= ECB3_RI;
kvm_s390_retry_instr(vcpu);
return 0;
} else
@@ -52,6 +53,33 @@ int kvm_s390_handle_aa(struct kvm_vcpu *vcpu)
return -EOPNOTSUPP;
}
+static int handle_gs(struct kvm_vcpu *vcpu)
+{
+ if (test_kvm_facility(vcpu->kvm, 133)) {
+ VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (lazy)");
+ preempt_disable();
+ __ctl_set_bit(2, 4);
+ current->thread.gs_cb = (struct gs_cb *)&vcpu->run->s.regs.gscb;
+ restore_gs_cb(current->thread.gs_cb);
+ preempt_enable();
+ vcpu->arch.sie_block->ecb |= ECB_GS;
+ vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
+ vcpu->arch.gs_enabled = 1;
+ kvm_s390_retry_instr(vcpu);
+ return 0;
+ } else
+ return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
+}
+
+int kvm_s390_handle_e3(struct kvm_vcpu *vcpu)
+{
+ int code = vcpu->arch.sie_block->ipb & 0xff;
+
+ if (code == 0x49 || code == 0x4d)
+ return handle_gs(vcpu);
+ else
+ return -EOPNOTSUPP;
+}
/* Handle SCK (SET CLOCK) interception */
static int handle_set_clock(struct kvm_vcpu *vcpu)
{
@@ -759,6 +787,7 @@ static const intercept_handler_t b2_handlers[256] = {
[0x3b] = handle_io_inst,
[0x3c] = handle_io_inst,
[0x50] = handle_ipte_interlock,
+ [0x56] = handle_sthyi,
[0x5f] = handle_io_inst,
[0x74] = handle_io_inst,
[0x76] = handle_io_inst,
diff --git a/arch/s390/kvm/sthyi.c b/arch/s390/kvm/sthyi.c
index 05c98bb853cf..926b5244263e 100644
--- a/arch/s390/kvm/sthyi.c
+++ b/arch/s390/kvm/sthyi.c
@@ -404,6 +404,9 @@ int handle_sthyi(struct kvm_vcpu *vcpu)
u64 code, addr, cc = 0;
struct sthyi_sctns *sctns = NULL;
+ if (!test_kvm_facility(vcpu->kvm, 74))
+ return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
+
/*
* STHYI requires extensive locking in the higher hypervisors
* and is very computational/memory expensive. Therefore we
diff --git a/arch/s390/kvm/trace-s390.h b/arch/s390/kvm/trace-s390.h
index 396485bca191..78b7e847984a 100644
--- a/arch/s390/kvm/trace-s390.h
+++ b/arch/s390/kvm/trace-s390.h
@@ -280,6 +280,58 @@ TRACE_EVENT(kvm_s390_enable_disable_ibs,
__entry->state ? "enabling" : "disabling", __entry->id)
);
+/*
+ * Trace point for modifying ais mode for a given isc.
+ */
+TRACE_EVENT(kvm_s390_modify_ais_mode,
+ TP_PROTO(__u8 isc, __u16 from, __u16 to),
+ TP_ARGS(isc, from, to),
+
+ TP_STRUCT__entry(
+ __field(__u8, isc)
+ __field(__u16, from)
+ __field(__u16, to)
+ ),
+
+ TP_fast_assign(
+ __entry->isc = isc;
+ __entry->from = from;
+ __entry->to = to;
+ ),
+
+ TP_printk("for isc %x, modifying interruption mode from %s to %s",
+ __entry->isc,
+ (__entry->from == KVM_S390_AIS_MODE_ALL) ?
+ "ALL-Interruptions Mode" :
+ (__entry->from == KVM_S390_AIS_MODE_SINGLE) ?
+ "Single-Interruption Mode" : "No-Interruptions Mode",
+ (__entry->to == KVM_S390_AIS_MODE_ALL) ?
+ "ALL-Interruptions Mode" :
+ (__entry->to == KVM_S390_AIS_MODE_SINGLE) ?
+ "Single-Interruption Mode" : "No-Interruptions Mode")
+ );
+
+/*
+ * Trace point for suppressed adapter I/O interrupt.
+ */
+TRACE_EVENT(kvm_s390_airq_suppressed,
+ TP_PROTO(__u32 id, __u8 isc),
+ TP_ARGS(id, isc),
+
+ TP_STRUCT__entry(
+ __field(__u32, id)
+ __field(__u8, isc)
+ ),
+
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->isc = isc;
+ ),
+
+ TP_printk("adapter I/O interrupt suppressed (id:%x isc:%x)",
+ __entry->id, __entry->isc)
+ );
+
#endif /* _TRACE_KVMS390_H */
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index 5491be39776b..2fafc2be777f 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -249,7 +249,7 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
{
struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
- bool had_tx = scb_s->ecb & 0x10U;
+ bool had_tx = scb_s->ecb & ECB_TE;
unsigned long new_mso = 0;
int rc;
@@ -307,34 +307,39 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
scb_s->ihcpu = scb_o->ihcpu;
/* MVPG and Protection Exception Interpretation are always available */
- scb_s->eca |= scb_o->eca & 0x01002000U;
+ scb_s->eca |= scb_o->eca & (ECA_MVPGI | ECA_PROTEXCI);
/* Host-protection-interruption introduced with ESOP */
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_ESOP))
- scb_s->ecb |= scb_o->ecb & 0x02U;
+ scb_s->ecb |= scb_o->ecb & ECB_HOSTPROTINT;
/* transactional execution */
if (test_kvm_facility(vcpu->kvm, 73)) {
/* remap the prefix is tx is toggled on */
- if ((scb_o->ecb & 0x10U) && !had_tx)
+ if ((scb_o->ecb & ECB_TE) && !had_tx)
prefix_unmapped(vsie_page);
- scb_s->ecb |= scb_o->ecb & 0x10U;
+ scb_s->ecb |= scb_o->ecb & ECB_TE;
}
/* SIMD */
if (test_kvm_facility(vcpu->kvm, 129)) {
- scb_s->eca |= scb_o->eca & 0x00020000U;
- scb_s->ecd |= scb_o->ecd & 0x20000000U;
+ scb_s->eca |= scb_o->eca & ECA_VX;
+ scb_s->ecd |= scb_o->ecd & ECD_HOSTREGMGMT;
}
/* Run-time-Instrumentation */
if (test_kvm_facility(vcpu->kvm, 64))
- scb_s->ecb3 |= scb_o->ecb3 & 0x01U;
+ scb_s->ecb3 |= scb_o->ecb3 & ECB3_RI;
/* Instruction Execution Prevention */
if (test_kvm_facility(vcpu->kvm, 130))
- scb_s->ecb2 |= scb_o->ecb2 & 0x20U;
+ scb_s->ecb2 |= scb_o->ecb2 & ECB2_IEP;
+ /* Guarded Storage */
+ if (test_kvm_facility(vcpu->kvm, 133)) {
+ scb_s->ecb |= scb_o->ecb & ECB_GS;
+ scb_s->ecd |= scb_o->ecd & ECD_HOSTREGMGMT;
+ }
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_SIIF))
- scb_s->eca |= scb_o->eca & 0x00000001U;
+ scb_s->eca |= scb_o->eca & ECA_SII;
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IB))
- scb_s->eca |= scb_o->eca & 0x40000000U;
+ scb_s->eca |= scb_o->eca & ECA_IB;
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_CEI))
- scb_s->eca |= scb_o->eca & 0x80000000U;
+ scb_s->eca |= scb_o->eca & ECA_CEI;
prepare_ibc(vcpu, vsie_page);
rc = shadow_crycb(vcpu, vsie_page);
@@ -406,7 +411,7 @@ static int map_prefix(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
prefix += scb_s->mso;
rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix);
- if (!rc && (scb_s->ecb & 0x10U))
+ if (!rc && (scb_s->ecb & ECB_TE))
rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
prefix + PAGE_SIZE);
/*
@@ -496,6 +501,13 @@ static void unpin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
unpin_guest_page(vcpu->kvm, gpa, hpa);
scb_s->riccbd = 0;
}
+
+ hpa = scb_s->sdnxo;
+ if (hpa) {
+ gpa = scb_o->sdnxo;
+ unpin_guest_page(vcpu->kvm, gpa, hpa);
+ scb_s->sdnxo = 0;
+ }
}
/*
@@ -543,7 +555,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
}
gpa = scb_o->itdba & ~0xffUL;
- if (gpa && (scb_s->ecb & 0x10U)) {
+ if (gpa && (scb_s->ecb & ECB_TE)) {
if (!(gpa & ~0x1fffU)) {
rc = set_validity_icpt(scb_s, 0x0080U);
goto unpin;
@@ -558,8 +570,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
}
gpa = scb_o->gvrd & ~0x1ffUL;
- if (gpa && (scb_s->eca & 0x00020000U) &&
- !(scb_s->ecd & 0x20000000U)) {
+ if (gpa && (scb_s->eca & ECA_VX) && !(scb_s->ecd & ECD_HOSTREGMGMT)) {
if (!(gpa & ~0x1fffUL)) {
rc = set_validity_icpt(scb_s, 0x1310U);
goto unpin;
@@ -577,7 +588,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
}
gpa = scb_o->riccbd & ~0x3fUL;
- if (gpa && (scb_s->ecb3 & 0x01U)) {
+ if (gpa && (scb_s->ecb3 & ECB3_RI)) {
if (!(gpa & ~0x1fffUL)) {
rc = set_validity_icpt(scb_s, 0x0043U);
goto unpin;
@@ -591,6 +602,33 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
goto unpin;
scb_s->riccbd = hpa;
}
+ if ((scb_s->ecb & ECB_GS) && !(scb_s->ecd & ECD_HOSTREGMGMT)) {
+ unsigned long sdnxc;
+
+ gpa = scb_o->sdnxo & ~0xfUL;
+ sdnxc = scb_o->sdnxo & 0xfUL;
+ if (!gpa || !(gpa & ~0x1fffUL)) {
+ rc = set_validity_icpt(scb_s, 0x10b0U);
+ goto unpin;
+ }
+ if (sdnxc < 6 || sdnxc > 12) {
+ rc = set_validity_icpt(scb_s, 0x10b1U);
+ goto unpin;
+ }
+ if (gpa & ((1 << sdnxc) - 1)) {
+ rc = set_validity_icpt(scb_s, 0x10b2U);
+ goto unpin;
+ }
+ /* Due to alignment rules (checked above) this cannot
+ * cross page boundaries
+ */
+ rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
+ if (rc == -EINVAL)
+ rc = set_validity_icpt(scb_s, 0x10b0U);
+ if (rc)
+ goto unpin;
+ scb_s->sdnxo = hpa;
+ }
return 0;
unpin:
unpin_blocks(vcpu, vsie_page);