diff options
Diffstat (limited to 'arch/powerpc')
28 files changed, 177 insertions, 368 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index a8ee573fe610..281f4f1fcd1f 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -164,7 +164,6 @@ config PPC select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE select HAVE_ARCH_HARDENED_USERCOPY select HAVE_KERNEL_GZIP - select HAVE_CC_STACKPROTECTOR config GENERIC_CSUM def_bool CPU_LITTLE_ENDIAN @@ -484,6 +483,7 @@ config RELOCATABLE bool "Build a relocatable kernel" depends on (PPC64 && !COMPILE_TEST) || (FLATMEM && (44x || FSL_BOOKE)) select NONSTATIC_KERNEL + select MODULE_REL_CRCS if MODVERSIONS help This builds a kernel image that is capable of running at the location the kernel is loaded at. For ppc32, there is no any diff --git a/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi b/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi index c744569a20e1..a97296c64eb2 100644 --- a/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi @@ -678,5 +678,6 @@ compatible = "fsl,t2080-l2-cache-controller"; reg = <0xc20000 0x40000>; next-level-cache = <&cpc>; + interrupts = <16 2 1 9>; }; }; diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig index 3ce91a3df27f..1d2d69dd6409 100644 --- a/arch/powerpc/configs/ppc6xx_defconfig +++ b/arch/powerpc/configs/ppc6xx_defconfig @@ -62,7 +62,6 @@ CONFIG_MPC8610_HPCD=y CONFIG_GEF_SBC610=y CONFIG_CPU_FREQ=y CONFIG_CPU_FREQ_STAT=m -CONFIG_CPU_FREQ_STAT_DETAILS=y CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y CONFIG_CPU_FREQ_GOV_PERFORMANCE=y CONFIG_CPU_FREQ_GOV_POWERSAVE=m diff --git a/arch/powerpc/include/asm/accounting.h b/arch/powerpc/include/asm/accounting.h index c133246df467..3abcf98ed2e0 100644 --- a/arch/powerpc/include/asm/accounting.h +++ b/arch/powerpc/include/asm/accounting.h @@ -12,9 +12,17 @@ /* Stuff for accurate time accounting */ struct cpu_accounting_data { - unsigned long user_time; /* accumulated usermode TB ticks */ - unsigned long system_time; /* accumulated system TB ticks */ - unsigned long user_time_scaled; /* accumulated usermode SPURR ticks */ + /* Accumulated cputime values to flush on ticks*/ + unsigned long utime; + unsigned long stime; + unsigned long utime_scaled; + unsigned long stime_scaled; + unsigned long gtime; + unsigned long hardirq_time; + unsigned long softirq_time; + unsigned long steal_time; + unsigned long idle_time; + /* Internal counters */ unsigned long starttime; /* TB value snapshot */ unsigned long starttime_user; /* TB value on exit to usermode */ unsigned long startspurr; /* SPURR value snapshot */ diff --git a/arch/powerpc/include/asm/cpu_has_feature.h b/arch/powerpc/include/asm/cpu_has_feature.h index b312b152461b..6e834caa3720 100644 --- a/arch/powerpc/include/asm/cpu_has_feature.h +++ b/arch/powerpc/include/asm/cpu_has_feature.h @@ -23,7 +23,9 @@ static __always_inline bool cpu_has_feature(unsigned long feature) { int i; +#ifndef __clang__ /* clang can't cope with this */ BUILD_BUG_ON(!__builtin_constant_p(feature)); +#endif #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG if (!static_key_initialized) { diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h index aa2e6a34b872..99b541865d8d 100644 --- a/arch/powerpc/include/asm/cputime.h +++ b/arch/powerpc/include/asm/cputime.h @@ -16,12 +16,7 @@ #ifndef __POWERPC_CPUTIME_H #define __POWERPC_CPUTIME_H -#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE -#include <asm-generic/cputime.h> -#ifdef __KERNEL__ -static inline void setup_cputime_one_jiffy(void) { } -#endif -#else +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE #include <linux/types.h> #include <linux/time.h> @@ -36,65 +31,6 @@ typedef u64 __nocast cputime64_t; #define cmpxchg_cputime(ptr, old, new) cmpxchg(ptr, old, new) #ifdef __KERNEL__ - -/* - * One jiffy in timebase units computed during initialization - */ -extern cputime_t cputime_one_jiffy; - -/* - * Convert cputime <-> jiffies - */ -extern u64 __cputime_jiffies_factor; - -static inline unsigned long cputime_to_jiffies(const cputime_t ct) -{ - return mulhdu((__force u64) ct, __cputime_jiffies_factor); -} - -static inline cputime_t jiffies_to_cputime(const unsigned long jif) -{ - u64 ct; - unsigned long sec; - - /* have to be a little careful about overflow */ - ct = jif % HZ; - sec = jif / HZ; - if (ct) { - ct *= tb_ticks_per_sec; - do_div(ct, HZ); - } - if (sec) - ct += (cputime_t) sec * tb_ticks_per_sec; - return (__force cputime_t) ct; -} - -static inline void setup_cputime_one_jiffy(void) -{ - cputime_one_jiffy = jiffies_to_cputime(1); -} - -static inline cputime64_t jiffies64_to_cputime64(const u64 jif) -{ - u64 ct; - u64 sec = jif; - - /* have to be a little careful about overflow */ - ct = do_div(sec, HZ); - if (ct) { - ct *= tb_ticks_per_sec; - do_div(ct, HZ); - } - if (sec) - ct += (u64) sec * tb_ticks_per_sec; - return (__force cputime64_t) ct; -} - -static inline u64 cputime64_to_jiffies64(const cputime_t ct) -{ - return mulhdu((__force u64) ct, __cputime_jiffies_factor); -} - /* * Convert cputime <-> microseconds */ @@ -105,117 +41,6 @@ static inline unsigned long cputime_to_usecs(const cputime_t ct) return mulhdu((__force u64) ct, __cputime_usec_factor); } -static inline cputime_t usecs_to_cputime(const unsigned long us) -{ - u64 ct; - unsigned long sec; - - /* have to be a little careful about overflow */ - ct = us % 1000000; - sec = us / 1000000; - if (ct) { - ct *= tb_ticks_per_sec; - do_div(ct, 1000000); - } - if (sec) - ct += (cputime_t) sec * tb_ticks_per_sec; - return (__force cputime_t) ct; -} - -#define usecs_to_cputime64(us) usecs_to_cputime(us) - -/* - * Convert cputime <-> seconds - */ -extern u64 __cputime_sec_factor; - -static inline unsigned long cputime_to_secs(const cputime_t ct) -{ - return mulhdu((__force u64) ct, __cputime_sec_factor); -} - -static inline cputime_t secs_to_cputime(const unsigned long sec) -{ - return (__force cputime_t)((u64) sec * tb_ticks_per_sec); -} - -/* - * Convert cputime <-> timespec - */ -static inline void cputime_to_timespec(const cputime_t ct, struct timespec *p) -{ - u64 x = (__force u64) ct; - unsigned int frac; - - frac = do_div(x, tb_ticks_per_sec); - p->tv_sec = x; - x = (u64) frac * 1000000000; - do_div(x, tb_ticks_per_sec); - p->tv_nsec = x; -} - -static inline cputime_t timespec_to_cputime(const struct timespec *p) -{ - u64 ct; - - ct = (u64) p->tv_nsec * tb_ticks_per_sec; - do_div(ct, 1000000000); - return (__force cputime_t)(ct + (u64) p->tv_sec * tb_ticks_per_sec); -} - -/* - * Convert cputime <-> timeval - */ -static inline void cputime_to_timeval(const cputime_t ct, struct timeval *p) -{ - u64 x = (__force u64) ct; - unsigned int frac; - - frac = do_div(x, tb_ticks_per_sec); - p->tv_sec = x; - x = (u64) frac * 1000000; - do_div(x, tb_ticks_per_sec); - p->tv_usec = x; -} - -static inline cputime_t timeval_to_cputime(const struct timeval *p) -{ - u64 ct; - - ct = (u64) p->tv_usec * tb_ticks_per_sec; - do_div(ct, 1000000); - return (__force cputime_t)(ct + (u64) p->tv_sec * tb_ticks_per_sec); -} - -/* - * Convert cputime <-> clock_t (units of 1/USER_HZ seconds) - */ -extern u64 __cputime_clockt_factor; - -static inline unsigned long cputime_to_clock_t(const cputime_t ct) -{ - return mulhdu((__force u64) ct, __cputime_clockt_factor); -} - -static inline cputime_t clock_t_to_cputime(const unsigned long clk) -{ - u64 ct; - unsigned long sec; - - /* have to be a little careful about overflow */ - ct = clk % USER_HZ; - sec = clk / USER_HZ; - if (ct) { - ct *= tb_ticks_per_sec; - do_div(ct, USER_HZ); - } - if (sec) - ct += (u64) sec * tb_ticks_per_sec; - return (__force cputime_t) ct; -} - -#define cputime64_to_clock_t(ct) cputime_to_clock_t((cputime_t)(ct)) - /* * PPC64 uses PACA which is task independent for storing accounting data while * PPC32 uses struct thread_info, therefore at task switch the accounting data diff --git a/arch/powerpc/include/asm/livepatch.h b/arch/powerpc/include/asm/livepatch.h index a402f7f94896..47a03b9b528b 100644 --- a/arch/powerpc/include/asm/livepatch.h +++ b/arch/powerpc/include/asm/livepatch.h @@ -28,13 +28,6 @@ static inline int klp_check_compiler_support(void) return 0; } -static inline int klp_write_module_reloc(struct module *mod, unsigned long - type, unsigned long loc, unsigned long value) -{ - /* This requires infrastructure changes; we need the loadinfos. */ - return -ENOSYS; -} - static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip) { regs->nip = ip; diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index a34c764ca8dd..233a7e8cc8e3 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -160,7 +160,9 @@ static __always_inline bool mmu_has_feature(unsigned long feature) { int i; +#ifndef __clang__ /* clang can't cope with this */ BUILD_BUG_ON(!__builtin_constant_p(feature)); +#endif #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG if (!static_key_initialized) { diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h index cc12c61ef315..53885512b8d3 100644 --- a/arch/powerpc/include/asm/module.h +++ b/arch/powerpc/include/asm/module.h @@ -90,9 +90,5 @@ static inline int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sec } #endif -#if defined(CONFIG_MODVERSIONS) && defined(CONFIG_PPC64) -#define ARCH_RELOCATES_KCRCTAB -#define reloc_start PHYSICAL_START -#endif #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_MODULE_H */ diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index 6a6792bb39fb..708c3e592eeb 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -187,7 +187,6 @@ struct paca_struct { /* Stuff for accurate time accounting */ struct cpu_accounting_data accounting; - u64 stolen_time; /* TB ticks taken by hypervisor */ u64 dtl_ridx; /* read index in dispatch log */ struct dtl_entry *dtl_curr; /* pointer corresponding to dtl_ridx */ diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 0d4531aa2052..dff79798903d 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -649,9 +649,10 @@ #define SRR1_ISI_N_OR_G 0x10000000 /* ISI: Access is no-exec or G */ #define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */ #define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */ -#define SRR1_WAKEMASK_P8 0x003c0000 /* reason for wakeup on POWER8 */ +#define SRR1_WAKEMASK_P8 0x003c0000 /* reason for wakeup on POWER8 and 9 */ #define SRR1_WAKESYSERR 0x00300000 /* System error */ #define SRR1_WAKEEE 0x00200000 /* External interrupt */ +#define SRR1_WAKEHVI 0x00240000 /* Hypervisor Virtualization Interrupt (P9) */ #define SRR1_WAKEMT 0x00280000 /* mtctrl */ #define SRR1_WAKEHMI 0x00280000 /* Hypervisor maintenance */ #define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */ diff --git a/arch/powerpc/include/asm/stackprotector.h b/arch/powerpc/include/asm/stackprotector.h deleted file mode 100644 index 6720190eabec..000000000000 --- a/arch/powerpc/include/asm/stackprotector.h +++ /dev/null @@ -1,40 +0,0 @@ -/* - * GCC stack protector support. - * - * Stack protector works by putting predefined pattern at the start of - * the stack frame and verifying that it hasn't been overwritten when - * returning from the function. The pattern is called stack canary - * and gcc expects it to be defined by a global variable called - * "__stack_chk_guard" on PPC. This unfortunately means that on SMP - * we cannot have a different canary value per task. - */ - -#ifndef _ASM_STACKPROTECTOR_H -#define _ASM_STACKPROTECTOR_H - -#include <linux/random.h> -#include <linux/version.h> -#include <asm/reg.h> - -extern unsigned long __stack_chk_guard; - -/* - * Initialize the stackprotector canary value. - * - * NOTE: this must only be called from functions that never return, - * and it must always be inlined. - */ -static __always_inline void boot_init_stack_canary(void) -{ - unsigned long canary; - - /* Try to get a semi random initial value. */ - get_random_bytes(&canary, sizeof(canary)); - canary ^= mftb(); - canary ^= LINUX_VERSION_CODE; - - current->stack_canary = canary; - __stack_chk_guard = current->stack_canary; -} - -#endif /* _ASM_STACKPROTECTOR_H */ diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h index f0b238516e9b..e0b9e576905a 100644 --- a/arch/powerpc/include/asm/xics.h +++ b/arch/powerpc/include/asm/xics.h @@ -44,6 +44,7 @@ static inline int icp_hv_init(void) { return -ENODEV; } #ifdef CONFIG_PPC_POWERNV extern int icp_opal_init(void); +extern void icp_opal_flush_interrupt(void); #else static inline int icp_opal_init(void) { return -ENODEV; } #endif diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 23f8082d7bfa..f4c2b52e58b3 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -19,10 +19,6 @@ CFLAGS_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) -# -fstack-protector triggers protection checks in this code, -# but it is being used too early to link to meaningful stack_chk logic. -CFLAGS_prom_init.o += $(call cc-option, -fno-stack-protector) - ifdef CONFIG_FUNCTION_TRACER # Do not trace early boot code CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE) diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 0601e6a7297c..9e8e771f8acb 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -91,9 +91,6 @@ int main(void) DEFINE(TI_livepatch_sp, offsetof(struct thread_info, livepatch_sp)); #endif -#ifdef CONFIG_CC_STACKPROTECTOR - DEFINE(TSK_STACK_CANARY, offsetof(struct task_struct, stack_canary)); -#endif DEFINE(KSP, offsetof(struct thread_struct, ksp)); DEFINE(PT_REGS, offsetof(struct thread_struct, regs)); #ifdef CONFIG_BOOKE @@ -252,9 +249,9 @@ int main(void) DEFINE(ACCOUNT_STARTTIME_USER, offsetof(struct paca_struct, accounting.starttime_user)); DEFINE(ACCOUNT_USER_TIME, - offsetof(struct paca_struct, accounting.user_time)); + offsetof(struct paca_struct, accounting.utime)); DEFINE(ACCOUNT_SYSTEM_TIME, - offsetof(struct paca_struct, accounting.system_time)); + offsetof(struct paca_struct, accounting.stime)); DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save)); DEFINE(PACA_NAPSTATELOST, offsetof(struct paca_struct, nap_state_lost)); DEFINE(PACA_SPRG_VDSO, offsetof(struct paca_struct, sprg_vdso)); @@ -265,9 +262,9 @@ int main(void) DEFINE(ACCOUNT_STARTTIME_USER, offsetof(struct thread_info, accounting.starttime_user)); DEFINE(ACCOUNT_USER_TIME, - offsetof(struct thread_info, accounting.user_time)); + offsetof(struct thread_info, accounting.utime)); DEFINE(ACCOUNT_SYSTEM_TIME, - offsetof(struct thread_info, accounting.system_time)); + offsetof(struct thread_info, accounting.stime)); #endif #endif /* CONFIG_PPC64 */ diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index d88573bdd090..b94887165a10 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c @@ -545,7 +545,7 @@ static void *eeh_pe_detach_dev(void *data, void *userdata) static void *__eeh_clear_pe_frozen_state(void *data, void *flag) { struct eeh_pe *pe = (struct eeh_pe *)data; - bool *clear_sw_state = flag; + bool clear_sw_state = *(bool *)flag; int i, rc = 1; for (i = 0; rc && i < 3; i++) diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 5742dbdbee46..3841d749a430 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -674,11 +674,7 @@ BEGIN_FTR_SECTION mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */ END_FTR_SECTION_IFSET(CPU_FTR_SPE) #endif /* CONFIG_SPE */ -#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) - lwz r0,TSK_STACK_CANARY(r2) - lis r4,__stack_chk_guard@ha - stw r0,__stack_chk_guard@l(r4) -#endif + lwz r0,_CCR(r1) mtcrf 0xFF,r0 /* r3-r12 are destroyed -- Cort */ diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index bb1807184bad..0b0f89685b67 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -286,14 +286,6 @@ static void dedotify_versions(struct modversion_info *vers, for (end = (void *)vers + size; vers < end; vers++) if (vers->name[0] == '.') { memmove(vers->name, vers->name+1, strlen(vers->name)); -#ifdef ARCH_RELOCATES_KCRCTAB - /* The TOC symbol has no CRC computed. To avoid CRC - * check failing, we must force it to the expected - * value (see CRC check in module.c). - */ - if (!strcmp(vers->name, "TOC.")) - vers->crc = -(unsigned long)reloc_start; -#endif } } diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 04885cec24df..5dd056df0baa 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -64,12 +64,6 @@ #include <linux/kprobes.h> #include <linux/kdebug.h> -#ifdef CONFIG_CC_STACKPROTECTOR -#include <linux/stackprotector.h> -unsigned long __stack_chk_guard __read_mostly; -EXPORT_SYMBOL(__stack_chk_guard); -#endif - /* Transactional Memory debug */ #ifdef TM_DEBUG_SW #define TM_DEBUG(x...) printk(KERN_INFO x) diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index ec47a939cbdd..ac83eb04a8b8 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -2834,6 +2834,9 @@ static void __init prom_find_boot_cpu(void) cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu); + if (!PHANDLE_VALID(cpu_pkg)) + return; + prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval)); prom.cpu = be32_to_cpu(rval); diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index bc2e08d415fa..14e485525e31 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -57,6 +57,7 @@ #include <linux/clk-provider.h> #include <linux/suspend.h> #include <linux/rtc.h> +#include <linux/cputime.h> #include <asm/trace.h> #include <asm/io.h> @@ -72,7 +73,6 @@ #include <asm/smp.h> #include <asm/vdso_datapage.h> #include <asm/firmware.h> -#include <asm/cputime.h> #include <asm/asm-prototypes.h> /* powerpc clocksource/clockevent code */ @@ -152,20 +152,11 @@ EXPORT_SYMBOL_GPL(ppc_tb_freq); #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE /* - * Factors for converting from cputime_t (timebase ticks) to - * jiffies, microseconds, seconds, and clock_t (1/USER_HZ seconds). - * These are all stored as 0.64 fixed-point binary fractions. + * Factor for converting from cputime_t (timebase ticks) to + * microseconds. This is stored as 0.64 fixed-point binary fraction. */ -u64 __cputime_jiffies_factor; -EXPORT_SYMBOL(__cputime_jiffies_factor); u64 __cputime_usec_factor; EXPORT_SYMBOL(__cputime_usec_factor); -u64 __cputime_sec_factor; -EXPORT_SYMBOL(__cputime_sec_factor); -u64 __cputime_clockt_factor; -EXPORT_SYMBOL(__cputime_clockt_factor); - -cputime_t cputime_one_jiffy; #ifdef CONFIG_PPC_SPLPAR void (*dtl_consumer)(struct dtl_entry *, u64); @@ -181,14 +172,8 @@ static void calc_cputime_factors(void) { struct div_result res; - div128_by_32(HZ, 0, tb_ticks_per_sec, &res); - __cputime_jiffies_factor = res.result_low; div128_by_32(1000000, 0, tb_ticks_per_sec, &res); __cputime_usec_factor = res.result_low; - div128_by_32(1, 0, tb_ticks_per_sec, &res); - __cputime_sec_factor = res.result_low; - div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res); - __cputime_clockt_factor = res.result_low; } /* @@ -271,25 +256,19 @@ void accumulate_stolen_time(void) sst = scan_dispatch_log(acct->starttime_user); ust = scan_dispatch_log(acct->starttime); - acct->system_time -= sst; - acct->user_time -= ust; - local_paca->stolen_time += ust + sst; + acct->stime -= sst; + acct->utime -= ust; + acct->steal_time += ust + sst; local_paca->soft_enabled = save_soft_enabled; } static inline u64 calculate_stolen_time(u64 stop_tb) { - u64 stolen = 0; + if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx)) + return scan_dispatch_log(stop_tb); - if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx)) { - stolen = scan_dispatch_log(stop_tb); - get_paca()->accounting.system_time -= stolen; - } - - stolen += get_paca()->stolen_time; - get_paca()->stolen_time = 0; - return stolen; + return 0; } #else /* CONFIG_PPC_SPLPAR */ @@ -305,28 +284,27 @@ static inline u64 calculate_stolen_time(u64 stop_tb) * or soft irq state. */ static unsigned long vtime_delta(struct task_struct *tsk, - unsigned long *sys_scaled, - unsigned long *stolen) + unsigned long *stime_scaled, + unsigned long *steal_time) { unsigned long now, nowscaled, deltascaled; - unsigned long udelta, delta, user_scaled; + unsigned long stime; + unsigned long utime, utime_scaled; struct cpu_accounting_data *acct = get_accounting(tsk); WARN_ON_ONCE(!irqs_disabled()); now = mftb(); nowscaled = read_spurr(now); - acct->system_time += now - acct->starttime; + stime = now - acct->starttime; acct->starttime = now; deltascaled = nowscaled - acct->startspurr; acct->startspurr = nowscaled; - *stolen = calculate_stolen_time(now); + *steal_time = calculate_stolen_time(now); - delta = acct->system_time; - acct->system_time = 0; - udelta = acct->user_time - acct->utime_sspurr; - acct->utime_sspurr = acct->user_time; + utime = acct->utime - acct->utime_sspurr; + acct->utime_sspurr = acct->utime; /* * Because we don't read the SPURR on every kernel entry/exit, @@ -338,62 +316,105 @@ static unsigned long vtime_delta(struct task_struct *tsk, * the user ticks get saved up in paca->user_time_scaled to be * used by account_process_tick. */ - *sys_scaled = delta; - user_scaled = udelta; - if (deltascaled != delta + udelta) { - if (udelta) { - *sys_scaled = deltascaled * delta / (delta + udelta); - user_scaled = deltascaled - *sys_scaled; + *stime_scaled = stime; + utime_scaled = utime; + if (deltascaled != stime + utime) { + if (utime) { + *stime_scaled = deltascaled * stime / (stime + utime); + utime_scaled = deltascaled - *stime_scaled; } else { - *sys_scaled = deltascaled; + *stime_scaled = deltascaled; } } - acct->user_time_scaled += user_scaled; + acct->utime_scaled += utime_scaled; - return delta; + return stime; } void vtime_account_system(struct task_struct *tsk) { - unsigned long delta, sys_scaled, stolen; + unsigned long stime, stime_scaled, steal_time; + struct cpu_accounting_data *acct = get_accounting(tsk); + + stime = vtime_delta(tsk, &stime_scaled, &steal_time); - delta = vtime_delta(tsk, &sys_scaled, &stolen); - account_system_time(tsk, 0, delta); - tsk->stimescaled += sys_scaled; - if (stolen) - account_steal_time(stolen); + stime -= min(stime, steal_time); + acct->steal_time += steal_time; + + if ((tsk->flags & PF_VCPU) && !irq_count()) { + acct->gtime += stime; + acct->utime_scaled += stime_scaled; + } else { + if (hardirq_count()) + acct->hardirq_time += stime; + else if (in_serving_softirq()) + acct->softirq_time += stime; + else + acct->stime += stime; + + acct->stime_scaled += stime_scaled; + } } EXPORT_SYMBOL_GPL(vtime_account_system); void vtime_account_idle(struct task_struct *tsk) { - unsigned long delta, sys_scaled, stolen; + unsigned long stime, stime_scaled, steal_time; + struct cpu_accounting_data *acct = get_accounting(tsk); - delta = vtime_delta(tsk, &sys_scaled, &stolen); - account_idle_time(delta + stolen); + stime = vtime_delta(tsk, &stime_scaled, &steal_time); + acct->idle_time += stime + steal_time; } /* - * Transfer the user time accumulated in the paca - * by the exception entry and exit code to the generic - * process user time records. + * Account the whole cputime accumulated in the paca * Must be called with interrupts disabled. * Assumes that vtime_account_system/idle() has been called * recently (i.e. since the last entry from usermode) so that * get_paca()->user_time_scaled is up to date. */ -void vtime_account_user(struct task_struct *tsk) +void vtime_flush(struct task_struct *tsk) { - cputime_t utime, utimescaled; struct cpu_accounting_data *acct = get_accounting(tsk); - utime = acct->user_time; - utimescaled = acct->user_time_scaled; - acct->user_time = 0; - acct->user_time_scaled = 0; + if (acct->utime) + account_user_time(tsk, cputime_to_nsecs(acct->utime)); + + if (acct->utime_scaled) + tsk->utimescaled += cputime_to_nsecs(acct->utime_scaled); + + if (acct->gtime) + account_guest_time(tsk, cputime_to_nsecs(acct->gtime)); + + if (acct->steal_time) + account_steal_time(cputime_to_nsecs(acct->steal_time)); + + if (acct->idle_time) + account_idle_time(cputime_to_nsecs(acct->idle_time)); + + if (acct->stime) + account_system_index_time(tsk, cputime_to_nsecs(acct->stime), + CPUTIME_SYSTEM); + if (acct->stime_scaled) + tsk->stimescaled += cputime_to_nsecs(acct->stime_scaled); + + if (acct->hardirq_time) + account_system_index_time(tsk, cputime_to_nsecs(acct->hardirq_time), + CPUTIME_IRQ); + if (acct->softirq_time) + account_system_index_time(tsk, cputime_to_nsecs(acct->softirq_time), + CPUTIME_SOFTIRQ); + + acct->utime = 0; + acct->utime_scaled = 0; acct->utime_sspurr = 0; - account_user_time(tsk, utime); - tsk->utimescaled += utimescaled; + acct->gtime = 0; + acct->steal_time = 0; + acct->idle_time = 0; + acct->stime = 0; + acct->stime_scaled = 0; + acct->hardirq_time = 0; + acct->softirq_time = 0; } #ifdef CONFIG_PPC32 @@ -407,8 +428,7 @@ void arch_vtime_task_switch(struct task_struct *prev) struct cpu_accounting_data *acct = get_accounting(current); acct->starttime = get_accounting(prev)->starttime; - acct->system_time = 0; - acct->user_time = 0; + acct->startspurr = get_accounting(prev)->startspurr; } #endif /* CONFIG_PPC32 */ @@ -1018,7 +1038,6 @@ void __init time_init(void) tb_ticks_per_sec = ppc_tb_freq; tb_ticks_per_usec = ppc_tb_freq / 1000000; calc_cputime_factors(); - setup_cputime_one_jiffy(); /* * Compute scale factor for sched_clock. diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 6fd30ac7d14a..62a50d6d1053 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -253,8 +253,11 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, if (unlikely(debugger_fault_handler(regs))) goto bail; - /* On a kernel SLB miss we can only check for a valid exception entry */ - if (!user_mode(regs) && (address >= TASK_SIZE)) { + /* + * The kernel should never take an execute fault nor should it + * take a page fault to a kernel address. + */ + if (!user_mode(regs) && (is_exec || (address >= TASK_SIZE))) { rc = SIGSEGV; goto bail; } @@ -391,20 +394,6 @@ good_area: if (is_exec) { /* - * An execution fault + no execute ? - * - * On CPUs that don't have CPU_FTR_COHERENT_ICACHE we - * deliberately create NX mappings, and use the fault to do the - * cache flush. This is usually handled in hash_page_do_lazy_icache() - * but we could end up here if that races with a concurrent PTE - * update. In that case we need to fall through here to the VMA - * check below. - */ - if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE) && - (regs->msr & SRR1_ISI_N_OR_G)) - goto bad_area; - - /* * Allow execution from readable areas if the MMU does not * provide separate controls over reading and executing. * diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 93abf8a9813d..8e1588021d1c 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -347,7 +347,8 @@ early_param("disable_radix", parse_disable_radix); void __init mmu_early_init_devtree(void) { /* Disable radix mode based on kernel command line. */ - if (disable_radix) + /* We don't yet have the machinery to do radix as a guest. */ + if (disable_radix || !(mfmsr() & MSR_HV)) cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; if (early_radix_enabled()) diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c index cfa53ccc8baf..34f1a0dbc898 100644 --- a/arch/powerpc/mm/pgtable-radix.c +++ b/arch/powerpc/mm/pgtable-radix.c @@ -65,7 +65,7 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa, if (!pmdp) return -ENOMEM; if (map_page_size == PMD_SIZE) { - ptep = (pte_t *)pudp; + ptep = pmdp_ptep(pmdp); goto set_the_pte; } ptep = pte_alloc_kernel(pmdp, ea); @@ -90,7 +90,7 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa, } pmdp = pmd_offset(pudp, ea); if (map_page_size == PMD_SIZE) { - ptep = (pte_t *)pudp; + ptep = pmdp_ptep(pmdp); goto set_the_pte; } if (!pmd_present(*pmdp)) { diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c index 61b79119065f..952713d6cf04 100644 --- a/arch/powerpc/mm/tlb-radix.c +++ b/arch/powerpc/mm/tlb-radix.c @@ -50,9 +50,7 @@ static inline void _tlbiel_pid(unsigned long pid, unsigned long ric) for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) { __tlbiel_pid(pid, set, ric); } - if (cpu_has_feature(CPU_FTR_POWER9_DD1)) - asm volatile(PPC_INVALIDATE_ERAT : : :"memory"); - return; + asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory"); } static inline void _tlbie_pid(unsigned long pid, unsigned long ric) @@ -85,8 +83,6 @@ static inline void _tlbiel_va(unsigned long va, unsigned long pid, asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); asm volatile("ptesync": : :"memory"); - if (cpu_has_feature(CPU_FTR_POWER9_DD1)) - asm volatile(PPC_INVALIDATE_ERAT : : :"memory"); } static inline void _tlbie_va(unsigned long va, unsigned long pid, diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c index c789258ae1e1..eec0e8d0454d 100644 --- a/arch/powerpc/platforms/powernv/smp.c +++ b/arch/powerpc/platforms/powernv/smp.c @@ -155,8 +155,10 @@ static void pnv_smp_cpu_kill_self(void) wmask = SRR1_WAKEMASK_P8; idle_states = pnv_get_supported_cpuidle_states(); + /* We don't want to take decrementer interrupts while we are offline, - * so clear LPCR:PECE1. We keep PECE2 enabled. + * so clear LPCR:PECE1. We keep PECE2 (and LPCR_PECE_HVEE on P9) + * enabled as to let IPIs in. */ mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1); @@ -206,8 +208,12 @@ static void pnv_smp_cpu_kill_self(void) * contains 0. */ if (((srr1 & wmask) == SRR1_WAKEEE) || + ((srr1 & wmask) == SRR1_WAKEHVI) || (local_paca->irq_happened & PACA_IRQ_EE)) { - icp_native_flush_interrupt(); + if (cpu_has_feature(CPU_FTR_ARCH_300)) + icp_opal_flush_interrupt(); + else + icp_native_flush_interrupt(); } else if ((srr1 & wmask) == SRR1_WAKEHDBELL) { unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); asm volatile(PPC_MSGCLR(%0) : : "r" (msg)); @@ -221,6 +227,8 @@ static void pnv_smp_cpu_kill_self(void) if (srr1 && !generic_check_cpu_restart(cpu)) DBG("CPU%d Unexpected exit while offline !\n", cpu); } + + /* Re-enable decrementer interrupts */ mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1); DBG("CPU%d coming online...\n", cpu); } diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c index 60c57657c772..f9670eabfcfa 100644 --- a/arch/powerpc/sysdev/xics/icp-opal.c +++ b/arch/powerpc/sysdev/xics/icp-opal.c @@ -120,18 +120,49 @@ static void icp_opal_cause_ipi(int cpu, unsigned long data) { int hw_cpu = get_hard_smp_processor_id(cpu); + kvmppc_set_host_ipi(cpu, 1); opal_int_set_mfrr(hw_cpu, IPI_PRIORITY); } static irqreturn_t icp_opal_ipi_action(int irq, void *dev_id) { - int hw_cpu = hard_smp_processor_id(); + int cpu = smp_processor_id(); - opal_int_set_mfrr(hw_cpu, 0xff); + kvmppc_set_host_ipi(cpu, 0); + opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff); return smp_ipi_demux(); } +/* + * Called when an interrupt is received on an off-line CPU to + * clear the interrupt, so that the CPU can go back to nap mode. + */ +void icp_opal_flush_interrupt(void) +{ + unsigned int xirr; + unsigned int vec; + + do { + xirr = icp_opal_get_xirr(); + vec = xirr & 0x00ffffff; + if (vec == XICS_IRQ_SPURIOUS) + break; + if (vec == XICS_IPI) { + /* Clear pending IPI */ + int cpu = smp_processor_id(); + kvmppc_set_host_ipi(cpu, 0); + opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff); + } else { + pr_err("XICS: hw interrupt 0x%x to offline cpu, " + "disabling\n", vec); + xics_mask_unknown_vec(vec); + } + + /* EOI the interrupt */ + } while (opal_int_eoi(xirr) > 0); +} + #endif /* CONFIG_SMP */ static const struct icp_ops icp_opal_ops = { diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 9c0e17cf6886..3f864c36d847 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -2287,14 +2287,14 @@ static void dump_one_paca(int cpu) DUMP(p, subcore_sibling_mask, "x"); #endif - DUMP(p, accounting.user_time, "llx"); - DUMP(p, accounting.system_time, "llx"); - DUMP(p, accounting.user_time_scaled, "llx"); + DUMP(p, accounting.utime, "llx"); + DUMP(p, accounting.stime, "llx"); + DUMP(p, accounting.utime_scaled, "llx"); DUMP(p, accounting.starttime, "llx"); DUMP(p, accounting.starttime_user, "llx"); DUMP(p, accounting.startspurr, "llx"); DUMP(p, accounting.utime_sspurr, "llx"); - DUMP(p, stolen_time, "llx"); + DUMP(p, accounting.steal_time, "llx"); #undef DUMP catch_memory_errors = 0; |