diff options
Diffstat (limited to 'arch')
1070 files changed, 41168 insertions, 56091 deletions
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 5b4f88363453..5da6ff54b3e7 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -5,6 +5,7 @@ config ALPHA select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_SERIO select ARCH_NO_PREEMPT + select ARCH_NO_SG_CHAIN select ARCH_USE_CMPXCHG_LOCKREF select HAVE_AOUT select HAVE_IDE @@ -202,7 +203,6 @@ config ALPHA_EIGER config ALPHA_JENSEN bool "Jensen" depends on BROKEN - select DMA_DIRECT_OPS help DEC PC 150 AXP (aka Jensen): This is a very old Digital system - one of the first-generation Alpha systems. A number of these systems diff --git a/arch/alpha/include/asm/dma-mapping.h b/arch/alpha/include/asm/dma-mapping.h index 8beeafd4f68e..0ee6a5c99b16 100644 --- a/arch/alpha/include/asm/dma-mapping.h +++ b/arch/alpha/include/asm/dma-mapping.h @@ -7,7 +7,7 @@ extern const struct dma_map_ops alpha_pci_ops; static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { #ifdef CONFIG_ALPHA_JENSEN - return &dma_direct_ops; + return NULL; #else return &alpha_pci_ops; #endif diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c index 46e08e0d9181..aa0f50d0f823 100644 --- a/arch/alpha/kernel/pci_iommu.c +++ b/arch/alpha/kernel/pci_iommu.c @@ -291,7 +291,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size, use direct_map above, it now must be considered an error. */ if (! alpha_mv.mv_pci_tbi) { printk_once(KERN_WARNING "pci_map_single: no HW sg\n"); - return 0; + return DMA_MAPPING_ERROR; } arena = hose->sg_pci; @@ -307,7 +307,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size, if (dma_ofs < 0) { printk(KERN_WARNING "pci_map_single failed: " "could not allocate dma page tables\n"); - return 0; + return DMA_MAPPING_ERROR; } paddr &= PAGE_MASK; @@ -443,7 +443,7 @@ static void *alpha_pci_alloc_coherent(struct device *dev, size_t size, gfp &= ~GFP_DMA; try_again: - cpu_addr = (void *)__get_free_pages(gfp, order); + cpu_addr = (void *)__get_free_pages(gfp | __GFP_ZERO, order); if (! cpu_addr) { printk(KERN_INFO "pci_alloc_consistent: " "get_free_pages failed from %pf\n", @@ -455,7 +455,7 @@ try_again: memset(cpu_addr, 0, size); *dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0); - if (*dma_addrp == 0) { + if (*dma_addrp == DMA_MAPPING_ERROR) { free_pages((unsigned long)cpu_addr, order); if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA)) return NULL; @@ -671,7 +671,7 @@ static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg, sg->dma_address = pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg), sg->length, dac_allowed); - return sg->dma_address != 0; + return sg->dma_address != DMA_MAPPING_ERROR; } start = sg; @@ -935,11 +935,6 @@ iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count) return 0; } -static int alpha_pci_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return dma_addr == 0; -} - const struct dma_map_ops alpha_pci_ops = { .alloc = alpha_pci_alloc_coherent, .free = alpha_pci_free_coherent, @@ -947,7 +942,6 @@ const struct dma_map_ops alpha_pci_ops = { .unmap_page = alpha_pci_unmap_page, .map_sg = alpha_pci_map_sg, .unmap_sg = alpha_pci_unmap_sg, - .mapping_error = alpha_pci_mapping_error, .dma_supported = alpha_pci_supported, }; EXPORT_SYMBOL(alpha_pci_ops); diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index 6dd783557330..ca8e26d045a9 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -13,12 +13,10 @@ config ARC select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE - select ARCH_HAS_SG_CHAIN select ARCH_SUPPORTS_ATOMIC_RMW if ARC_HAS_LLSC select BUILDTIME_EXTABLE_SORT select CLONE_BACKWARDS select COMMON_CLK - select DMA_DIRECT_OPS select GENERIC_ATOMIC64 if !ISA_ARCV2 || !(ARC_HAS_LL64 && ARC_HAS_LLSC) select GENERIC_CLOCKEVENTS select GENERIC_FIND_FIRST_BIT @@ -26,6 +24,7 @@ config ARC select GENERIC_IRQ_SHOW select GENERIC_PCI_IOMAP select GENERIC_PENDING_IRQ if SMP + select GENERIC_SCHED_CLOCK select GENERIC_SMP_IDLE_THREAD select HAVE_ARCH_KGDB select HAVE_ARCH_TRACEHOOK diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c index cf9619d4efb4..4135abec3fb0 100644 --- a/arch/arc/mm/cache.c +++ b/arch/arc/mm/cache.c @@ -1294,7 +1294,7 @@ void __init arc_cache_init_master(void) /* * In case of IOC (say IOC+SLC case), pointers above could still be set * but end up not being relevant as the first function in chain is not - * called at all for @dma_direct_ops + * called at all for devices using coherent DMA. * arch_sync_dma_for_cpu() -> dma_cache_*() -> __dma_cache_*() */ } diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c index db203ff69ccf..1525ac00fd02 100644 --- a/arch/arc/mm/dma.c +++ b/arch/arc/mm/dma.c @@ -33,7 +33,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, */ BUG_ON(gfp & __GFP_HIGHMEM); - page = alloc_pages(gfp, order); + page = alloc_pages(gfp | __GFP_ZERO, order); if (!page) return NULL; diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 91be74d8df65..2196aac0e45c 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -19,6 +19,7 @@ config ARM select ARCH_HAVE_CUSTOM_GPIO_H select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_MIGHT_HAVE_PC_PARPORT + select ARCH_NO_SG_CHAIN if !ARM_HAS_SG_CHAIN select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT if CPU_V7 select ARCH_SUPPORTS_ATOMIC_RMW @@ -29,7 +30,7 @@ config ARM select CLONE_BACKWARDS select CPU_PM if (SUSPEND || CPU_IDLE) select DCACHE_WORD_ACCESS if HAVE_EFFICIENT_UNALIGNED_ACCESS - select DMA_DIRECT_OPS if !MMU + select DMA_REMAP if MMU select EDAC_SUPPORT select EDAC_ATOMIC_SCRUB select GENERIC_ALLOCATOR @@ -118,7 +119,6 @@ config ARM <http://www.arm.linux.org.uk/>. config ARM_HAS_SG_CHAIN - select ARCH_HAS_SG_CHAIN bool config ARM_DMA_USE_IOMMU @@ -1810,6 +1810,21 @@ config XEN help Say Y if you want to run Linux in a Virtual Machine on Xen on ARM. +config STACKPROTECTOR_PER_TASK + bool "Use a unique stack canary value for each task" + depends on GCC_PLUGINS && STACKPROTECTOR && SMP && !XIP_DEFLATED_DATA + select GCC_PLUGIN_ARM_SSP_PER_TASK + default y + help + Due to the fact that GCC uses an ordinary symbol reference from + which to load the value of the stack canary, this value can only + change at reboot time on SMP systems, and all tasks running in the + kernel's address space are forced to use the same canary value for + the entire duration that the system is up. + + Enable this option to switch to a different method that uses a + different canary value for each task. + endmenu menu "Boot options" diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 05a91d8b89f3..0436002d5091 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -303,6 +303,18 @@ else KBUILD_IMAGE := $(boot)/zImage endif +ifeq ($(CONFIG_STACKPROTECTOR_PER_TASK),y) +prepare: stack_protector_prepare +stack_protector_prepare: prepare0 + $(eval KBUILD_CFLAGS += \ + -fplugin-arg-arm_ssp_per_task_plugin-tso=$(shell \ + awk '{if ($$2 == "THREAD_SZ_ORDER") print $$3;}'\ + include/generated/asm-offsets.h) \ + -fplugin-arg-arm_ssp_per_task_plugin-offset=$(shell \ + awk '{if ($$2 == "TI_STACK_CANARY") print $$3;}'\ + include/generated/asm-offsets.h)) +endif + all: $(notdir $(KBUILD_IMAGE)) diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index 1f5a5ffe7fcf..01bf2585a0fa 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile @@ -101,6 +101,7 @@ clean-files += piggy_data lib1funcs.S ashldi3.S bswapsdi2.S \ $(libfdt) $(libfdt_hdrs) hyp-stub.S KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING +KBUILD_CFLAGS += $(DISABLE_ARM_SSP_PER_TASK_PLUGIN) ifeq ($(CONFIG_FUNCTION_TRACER),y) ORIG_CFLAGS := $(KBUILD_CFLAGS) diff --git a/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2.dts b/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2.dts index b828677f331d..ffafe9720b35 100644 --- a/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2.dts +++ b/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2.dts @@ -245,6 +245,8 @@ regulator-min-microvolt = <2800000>; regulator-max-microvolt = <2800000>; regulator-name = "vddio-csi0"; + regulator-soft-start; + regulator-ramp-delay = <1600>; }; ®_ldo4 { diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index 9a92de63426f..5ba4622030ca 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c @@ -257,7 +257,7 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size, if (buf == NULL) { dev_err(dev, "%s: unable to map unsafe buffer %p!\n", __func__, ptr); - return ARM_MAPPING_ERROR; + return DMA_MAPPING_ERROR; } dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", @@ -327,7 +327,7 @@ static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page, ret = needs_bounce(dev, dma_addr, size); if (ret < 0) - return ARM_MAPPING_ERROR; + return DMA_MAPPING_ERROR; if (ret == 0) { arm_dma_ops.sync_single_for_device(dev, dma_addr, size, dir); @@ -336,7 +336,7 @@ static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page, if (PageHighMem(page)) { dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n"); - return ARM_MAPPING_ERROR; + return DMA_MAPPING_ERROR; } return map_single(dev, page_address(page) + offset, size, dir, attrs); @@ -453,11 +453,6 @@ static int dmabounce_dma_supported(struct device *dev, u64 dma_mask) return arm_dma_ops.dma_supported(dev, dma_mask); } -static int dmabounce_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return arm_dma_ops.mapping_error(dev, dma_addr); -} - static const struct dma_map_ops dmabounce_ops = { .alloc = arm_dma_alloc, .free = arm_dma_free, @@ -472,7 +467,6 @@ static const struct dma_map_ops dmabounce_ops = { .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, .sync_sg_for_device = arm_dma_sync_sg_for_device, .dma_supported = dmabounce_dma_supported, - .mapping_error = dmabounce_mapping_error, }; static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, diff --git a/arch/arm/configs/davinci_all_defconfig b/arch/arm/configs/davinci_all_defconfig index 07b2eadac3dd..207962a656a2 100644 --- a/arch/arm/configs/davinci_all_defconfig +++ b/arch/arm/configs/davinci_all_defconfig @@ -167,8 +167,9 @@ CONFIG_SOUND=m CONFIG_SND=m CONFIG_SND_USB_AUDIO=m CONFIG_SND_SOC=m -CONFIG_SND_EDMA_SOC=m -CONFIG_SND_DA850_SOC_EVM=m +CONFIG_SND_SOC_TLV320AIC3X=m +CONFIG_SND_SOC_DAVINCI_MCASP=m +CONFIG_SND_SOC_DAVINCI_EVM=m CONFIG_SND_SIMPLE_CARD=m CONFIG_HID=m CONFIG_HID_A4TECH=m diff --git a/arch/arm/configs/omap1_defconfig b/arch/arm/configs/omap1_defconfig index 72f4bc83f467..cfc00b0961ec 100644 --- a/arch/arm/configs/omap1_defconfig +++ b/arch/arm/configs/omap1_defconfig @@ -175,8 +175,6 @@ CONFIG_SND_PCM_OSS=y # CONFIG_SND_VERBOSE_PROCFS is not set CONFIG_SND_DUMMY=y CONFIG_SND_USB_AUDIO=y -CONFIG_SND_SOC=y -CONFIG_SND_OMAP_SOC=y # CONFIG_USB_HID is not set CONFIG_USB=y CONFIG_USB_PHY=y diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index 6491419b1dad..2274e45623f9 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig @@ -381,13 +381,13 @@ CONFIG_SND_VERBOSE_PRINTK=y CONFIG_SND_DEBUG=y CONFIG_SND_USB_AUDIO=m CONFIG_SND_SOC=m -CONFIG_SND_EDMA_SOC=m -CONFIG_SND_AM33XX_SOC_EVM=m -CONFIG_SND_OMAP_SOC=m -CONFIG_SND_OMAP_SOC_HDMI_AUDIO=m -CONFIG_SND_OMAP_SOC_OMAP_TWL4030=m -CONFIG_SND_OMAP_SOC_OMAP_ABE_TWL6040=m -CONFIG_SND_OMAP_SOC_OMAP3_PANDORA=m +CONFIG_SND_SOC_TLV320AIC3X=m +CONFIG_SND_SOC_DAVINCI_MCASP=m +CONFIG_SND_SOC_NOKIA_RX51=m +CONFIG_SND_SOC_OMAP_HDMI=m +CONFIG_SND_SOC_OMAP_ABE_TWL6040=m +CONFIG_SND_SOC_OMAP3_PANDORA=m +CONFIG_SND_SOC_OMAP3_TWL4030=m CONFIG_SND_SOC_CPCAP=m CONFIG_SND_SIMPLE_CARD=m CONFIG_SND_AUDIO_GRAPH_CARD=m diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig index ef0c7feea6e2..a95322b59799 100644 --- a/arch/arm/crypto/Kconfig +++ b/arch/arm/crypto/Kconfig @@ -69,6 +69,15 @@ config CRYPTO_AES_ARM help Use optimized AES assembler routines for ARM platforms. + On ARM processors without the Crypto Extensions, this is the + fastest AES implementation for single blocks. For multiple + blocks, the NEON bit-sliced implementation is usually faster. + + This implementation may be vulnerable to cache timing attacks, + since it uses lookup tables. However, as countermeasures it + disables IRQs and preloads the tables; it is hoped this makes + such attacks very difficult. + config CRYPTO_AES_ARM_BS tristate "Bit sliced AES using NEON instructions" depends on KERNEL_MODE_NEON @@ -117,9 +126,14 @@ config CRYPTO_CRC32_ARM_CE select CRYPTO_HASH config CRYPTO_CHACHA20_NEON - tristate "NEON accelerated ChaCha20 symmetric cipher" + tristate "NEON accelerated ChaCha stream cipher algorithms" depends on KERNEL_MODE_NEON select CRYPTO_BLKCIPHER select CRYPTO_CHACHA20 +config CRYPTO_NHPOLY1305_NEON + tristate "NEON accelerated NHPoly1305 hash function (for Adiantum)" + depends on KERNEL_MODE_NEON + select CRYPTO_NHPOLY1305 + endif diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile index bd5bceef0605..b65d6bfab8e6 100644 --- a/arch/arm/crypto/Makefile +++ b/arch/arm/crypto/Makefile @@ -9,7 +9,8 @@ obj-$(CONFIG_CRYPTO_SHA1_ARM) += sha1-arm.o obj-$(CONFIG_CRYPTO_SHA1_ARM_NEON) += sha1-arm-neon.o obj-$(CONFIG_CRYPTO_SHA256_ARM) += sha256-arm.o obj-$(CONFIG_CRYPTO_SHA512_ARM) += sha512-arm.o -obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha20-neon.o +obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha-neon.o +obj-$(CONFIG_CRYPTO_NHPOLY1305_NEON) += nhpoly1305-neon.o ce-obj-$(CONFIG_CRYPTO_AES_ARM_CE) += aes-arm-ce.o ce-obj-$(CONFIG_CRYPTO_SHA1_ARM_CE) += sha1-arm-ce.o @@ -52,7 +53,8 @@ aes-arm-ce-y := aes-ce-core.o aes-ce-glue.o ghash-arm-ce-y := ghash-ce-core.o ghash-ce-glue.o crct10dif-arm-ce-y := crct10dif-ce-core.o crct10dif-ce-glue.o crc32-arm-ce-y:= crc32-ce-core.o crc32-ce-glue.o -chacha20-neon-y := chacha20-neon-core.o chacha20-neon-glue.o +chacha-neon-y := chacha-neon-core.o chacha-neon-glue.o +nhpoly1305-neon-y := nh-neon-core.o nhpoly1305-neon-glue.o ifdef REGENERATE_ARM_CRYPTO quiet_cmd_perl = PERL $@ diff --git a/arch/arm/crypto/aes-ce-glue.c b/arch/arm/crypto/aes-ce-glue.c index d0a9cec73707..5affb8482379 100644 --- a/arch/arm/crypto/aes-ce-glue.c +++ b/arch/arm/crypto/aes-ce-glue.c @@ -10,7 +10,6 @@ #include <asm/hwcap.h> #include <asm/neon.h> -#include <asm/hwcap.h> #include <crypto/aes.h> #include <crypto/internal/simd.h> #include <crypto/internal/skcipher.h> diff --git a/arch/arm/crypto/aes-cipher-core.S b/arch/arm/crypto/aes-cipher-core.S index 184d6c2d15d5..f2d67c095e59 100644 --- a/arch/arm/crypto/aes-cipher-core.S +++ b/arch/arm/crypto/aes-cipher-core.S @@ -10,6 +10,7 @@ */ #include <linux/linkage.h> +#include <asm/assembler.h> #include <asm/cache.h> .text @@ -41,7 +42,7 @@ .endif .endm - .macro __hround, out0, out1, in0, in1, in2, in3, t3, t4, enc, sz, op + .macro __hround, out0, out1, in0, in1, in2, in3, t3, t4, enc, sz, op, oldcpsr __select \out0, \in0, 0 __select t0, \in1, 1 __load \out0, \out0, 0, \sz, \op @@ -73,6 +74,14 @@ __load t0, t0, 3, \sz, \op __load \t4, \t4, 3, \sz, \op + .ifnb \oldcpsr + /* + * This is the final round and we're done with all data-dependent table + * lookups, so we can safely re-enable interrupts. + */ + restore_irqs \oldcpsr + .endif + eor \out1, \out1, t1, ror #24 eor \out0, \out0, t2, ror #16 ldm rk!, {t1, t2} @@ -83,14 +92,14 @@ eor \out1, \out1, t2 .endm - .macro fround, out0, out1, out2, out3, in0, in1, in2, in3, sz=2, op + .macro fround, out0, out1, out2, out3, in0, in1, in2, in3, sz=2, op, oldcpsr __hround \out0, \out1, \in0, \in1, \in2, \in3, \out2, \out3, 1, \sz, \op - __hround \out2, \out3, \in2, \in3, \in0, \in1, \in1, \in2, 1, \sz, \op + __hround \out2, \out3, \in2, \in3, \in0, \in1, \in1, \in2, 1, \sz, \op, \oldcpsr .endm - .macro iround, out0, out1, out2, out3, in0, in1, in2, in3, sz=2, op + .macro iround, out0, out1, out2, out3, in0, in1, in2, in3, sz=2, op, oldcpsr __hround \out0, \out1, \in0, \in3, \in2, \in1, \out2, \out3, 0, \sz, \op - __hround \out2, \out3, \in2, \in1, \in0, \in3, \in1, \in0, 0, \sz, \op + __hround \out2, \out3, \in2, \in1, \in0, \in3, \in1, \in0, 0, \sz, \op, \oldcpsr .endm .macro __rev, out, in @@ -118,13 +127,14 @@ .macro do_crypt, round, ttab, ltab, bsz push {r3-r11, lr} + // Load keys first, to reduce latency in case they're not cached yet. + ldm rk!, {r8-r11} + ldr r4, [in] ldr r5, [in, #4] ldr r6, [in, #8] ldr r7, [in, #12] - ldm rk!, {r8-r11} - #ifdef CONFIG_CPU_BIG_ENDIAN __rev r4, r4 __rev r5, r5 @@ -138,6 +148,25 @@ eor r7, r7, r11 __adrl ttab, \ttab + /* + * Disable interrupts and prefetch the 1024-byte 'ft' or 'it' table into + * L1 cache, assuming cacheline size >= 32. This is a hardening measure + * intended to make cache-timing attacks more difficult. They may not + * be fully prevented, however; see the paper + * https://cr.yp.to/antiforgery/cachetiming-20050414.pdf + * ("Cache-timing attacks on AES") for a discussion of the many + * difficulties involved in writing truly constant-time AES software. + */ + save_and_disable_irqs t0 + .set i, 0 + .rept 1024 / 128 + ldr r8, [ttab, #i + 0] + ldr r9, [ttab, #i + 32] + ldr r10, [ttab, #i + 64] + ldr r11, [ttab, #i + 96] + .set i, i + 128 + .endr + push {t0} // oldcpsr tst rounds, #2 bne 1f @@ -151,8 +180,21 @@ \round r4, r5, r6, r7, r8, r9, r10, r11 b 0b -2: __adrl ttab, \ltab - \round r4, r5, r6, r7, r8, r9, r10, r11, \bsz, b +2: .ifb \ltab + add ttab, ttab, #1 + .else + __adrl ttab, \ltab + // Prefetch inverse S-box for final round; see explanation above + .set i, 0 + .rept 256 / 64 + ldr t0, [ttab, #i + 0] + ldr t1, [ttab, #i + 32] + .set i, i + 64 + .endr + .endif + + pop {rounds} // oldcpsr + \round r4, r5, r6, r7, r8, r9, r10, r11, \bsz, b, rounds #ifdef CONFIG_CPU_BIG_ENDIAN __rev r4, r4 @@ -175,7 +217,7 @@ .endm ENTRY(__aes_arm_encrypt) - do_crypt fround, crypto_ft_tab, crypto_ft_tab + 1, 2 + do_crypt fround, crypto_ft_tab,, 2 ENDPROC(__aes_arm_encrypt) .align 5 diff --git a/arch/arm/crypto/chacha20-neon-core.S b/arch/arm/crypto/chacha-neon-core.S index 50e7b9896818..eb22926d4912 100644 --- a/arch/arm/crypto/chacha20-neon-core.S +++ b/arch/arm/crypto/chacha-neon-core.S @@ -1,5 +1,5 @@ /* - * ChaCha20 256-bit cipher algorithm, RFC7539, ARM NEON functions + * ChaCha/XChaCha NEON helper functions * * Copyright (C) 2016 Linaro, Ltd. <ard.biesheuvel@linaro.org> * @@ -27,9 +27,9 @@ * (d) vtbl.8 + vtbl.8 (multiple of 8 bits rotations only, * needs index vector) * - * ChaCha20 has 16, 12, 8, and 7-bit rotations. For the 12 and 7-bit - * rotations, the only choices are (a) and (b). We use (a) since it takes - * two-thirds the cycles of (b) on both Cortex-A7 and Cortex-A53. + * ChaCha has 16, 12, 8, and 7-bit rotations. For the 12 and 7-bit rotations, + * the only choices are (a) and (b). We use (a) since it takes two-thirds the + * cycles of (b) on both Cortex-A7 and Cortex-A53. * * For the 16-bit rotation, we use vrev32.16 since it's consistently fastest * and doesn't need a temporary register. @@ -52,30 +52,20 @@ .fpu neon .align 5 -ENTRY(chacha20_block_xor_neon) - // r0: Input state matrix, s - // r1: 1 data block output, o - // r2: 1 data block input, i - - // - // This function encrypts one ChaCha20 block by loading the state matrix - // in four NEON registers. It performs matrix operation on four words in - // parallel, but requireds shuffling to rearrange the words after each - // round. - // - - // x0..3 = s0..3 - add ip, r0, #0x20 - vld1.32 {q0-q1}, [r0] - vld1.32 {q2-q3}, [ip] - - vmov q8, q0 - vmov q9, q1 - vmov q10, q2 - vmov q11, q3 +/* + * chacha_permute - permute one block + * + * Permute one 64-byte block where the state matrix is stored in the four NEON + * registers q0-q3. It performs matrix operations on four words in parallel, + * but requires shuffling to rearrange the words after each round. + * + * The round count is given in r3. + * + * Clobbers: r3, ip, q4-q5 + */ +chacha_permute: adr ip, .Lrol8_table - mov r3, #10 vld1.8 {d10}, [ip, :64] .Ldoubleround: @@ -139,9 +129,31 @@ ENTRY(chacha20_block_xor_neon) // x3 = shuffle32(x3, MASK(0, 3, 2, 1)) vext.8 q3, q3, q3, #4 - subs r3, r3, #1 + subs r3, r3, #2 bne .Ldoubleround + bx lr +ENDPROC(chacha_permute) + +ENTRY(chacha_block_xor_neon) + // r0: Input state matrix, s + // r1: 1 data block output, o + // r2: 1 data block input, i + // r3: nrounds + push {lr} + + // x0..3 = s0..3 + add ip, r0, #0x20 + vld1.32 {q0-q1}, [r0] + vld1.32 {q2-q3}, [ip] + + vmov q8, q0 + vmov q9, q1 + vmov q10, q2 + vmov q11, q3 + + bl chacha_permute + add ip, r2, #0x20 vld1.8 {q4-q5}, [r2] vld1.8 {q6-q7}, [ip] @@ -166,15 +178,33 @@ ENTRY(chacha20_block_xor_neon) vst1.8 {q0-q1}, [r1] vst1.8 {q2-q3}, [ip] - bx lr -ENDPROC(chacha20_block_xor_neon) + pop {pc} +ENDPROC(chacha_block_xor_neon) + +ENTRY(hchacha_block_neon) + // r0: Input state matrix, s + // r1: output (8 32-bit words) + // r2: nrounds + push {lr} + + vld1.32 {q0-q1}, [r0]! + vld1.32 {q2-q3}, [r0] + + mov r3, r2 + bl chacha_permute + + vst1.32 {q0}, [r1]! + vst1.32 {q3}, [r1] + + pop {pc} +ENDPROC(hchacha_block_neon) .align 4 .Lctrinc: .word 0, 1, 2, 3 .Lrol8_table: .byte 3, 0, 1, 2, 7, 4, 5, 6 .align 5 -ENTRY(chacha20_4block_xor_neon) +ENTRY(chacha_4block_xor_neon) push {r4-r5} mov r4, sp // preserve the stack pointer sub ip, sp, #0x20 // allocate a 32 byte buffer @@ -184,9 +214,10 @@ ENTRY(chacha20_4block_xor_neon) // r0: Input state matrix, s // r1: 4 data blocks output, o // r2: 4 data blocks input, i + // r3: nrounds // - // This function encrypts four consecutive ChaCha20 blocks by loading + // This function encrypts four consecutive ChaCha blocks by loading // the state matrix in NEON registers four times. The algorithm performs // each operation on the corresponding word of each state matrix, hence // requires no word shuffling. The words are re-interleaved before the @@ -219,7 +250,6 @@ ENTRY(chacha20_4block_xor_neon) vdup.32 q0, d0[0] adr ip, .Lrol8_table - mov r3, #10 b 1f .Ldoubleround4: @@ -417,7 +447,7 @@ ENTRY(chacha20_4block_xor_neon) vsri.u32 q5, q8, #25 vsri.u32 q6, q9, #25 - subs r3, r3, #1 + subs r3, r3, #2 bne .Ldoubleround4 // x0..7[0-3] are in q0-q7, x10..15[0-3] are in q10-q15. @@ -527,4 +557,4 @@ ENTRY(chacha20_4block_xor_neon) pop {r4-r5} bx lr -ENDPROC(chacha20_4block_xor_neon) +ENDPROC(chacha_4block_xor_neon) diff --git a/arch/arm/crypto/chacha-neon-glue.c b/arch/arm/crypto/chacha-neon-glue.c new file mode 100644 index 000000000000..9d6fda81986d --- /dev/null +++ b/arch/arm/crypto/chacha-neon-glue.c @@ -0,0 +1,201 @@ +/* + * ARM NEON accelerated ChaCha and XChaCha stream ciphers, + * including ChaCha20 (RFC7539) + * + * Copyright (C) 2016 Linaro, Ltd. <ard.biesheuvel@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Based on: + * ChaCha20 256-bit cipher algorithm, RFC7539, SIMD glue code + * + * Copyright (C) 2015 Martin Willi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <crypto/algapi.h> +#include <crypto/chacha.h> +#include <crypto/internal/skcipher.h> +#include <linux/kernel.h> +#include <linux/module.h> + +#include <asm/hwcap.h> +#include <asm/neon.h> +#include <asm/simd.h> + +asmlinkage void chacha_block_xor_neon(const u32 *state, u8 *dst, const u8 *src, + int nrounds); +asmlinkage void chacha_4block_xor_neon(const u32 *state, u8 *dst, const u8 *src, + int nrounds); +asmlinkage void hchacha_block_neon(const u32 *state, u32 *out, int nrounds); + +static void chacha_doneon(u32 *state, u8 *dst, const u8 *src, + unsigned int bytes, int nrounds) +{ + u8 buf[CHACHA_BLOCK_SIZE]; + + while (bytes >= CHACHA_BLOCK_SIZE * 4) { + chacha_4block_xor_neon(state, dst, src, nrounds); + bytes -= CHACHA_BLOCK_SIZE * 4; + src += CHACHA_BLOCK_SIZE * 4; + dst += CHACHA_BLOCK_SIZE * 4; + state[12] += 4; + } + while (bytes >= CHACHA_BLOCK_SIZE) { + chacha_block_xor_neon(state, dst, src, nrounds); + bytes -= CHACHA_BLOCK_SIZE; + src += CHACHA_BLOCK_SIZE; + dst += CHACHA_BLOCK_SIZE; + state[12]++; + } + if (bytes) { + memcpy(buf, src, bytes); + chacha_block_xor_neon(state, buf, buf, nrounds); + memcpy(dst, buf, bytes); + } +} + +static int chacha_neon_stream_xor(struct skcipher_request *req, + struct chacha_ctx *ctx, u8 *iv) +{ + struct skcipher_walk walk; + u32 state[16]; + int err; + + err = skcipher_walk_virt(&walk, req, false); + + crypto_chacha_init(state, ctx, iv); + + while (walk.nbytes > 0) { + unsigned int nbytes = walk.nbytes; + + if (nbytes < walk.total) + nbytes = round_down(nbytes, walk.stride); + + kernel_neon_begin(); + chacha_doneon(state, walk.dst.virt.addr, walk.src.virt.addr, + nbytes, ctx->nrounds); + kernel_neon_end(); + err = skcipher_walk_done(&walk, walk.nbytes - nbytes); + } + + return err; +} + +static int chacha_neon(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); + + if (req->cryptlen <= CHACHA_BLOCK_SIZE || !may_use_simd()) + return crypto_chacha_crypt(req); + + return chacha_neon_stream_xor(req, ctx, req->iv); +} + +static int xchacha_neon(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); + struct chacha_ctx subctx; + u32 state[16]; + u8 real_iv[16]; + + if (req->cryptlen <= CHACHA_BLOCK_SIZE || !may_use_simd()) + return crypto_xchacha_crypt(req); + + crypto_chacha_init(state, ctx, req->iv); + + kernel_neon_begin(); + hchacha_block_neon(state, subctx.key, ctx->nrounds); + kernel_neon_end(); + subctx.nrounds = ctx->nrounds; + + memcpy(&real_iv[0], req->iv + 24, 8); + memcpy(&real_iv[8], req->iv + 16, 8); + return chacha_neon_stream_xor(req, &subctx, real_iv); +} + +static struct skcipher_alg algs[] = { + { + .base.cra_name = "chacha20", + .base.cra_driver_name = "chacha20-neon", + .base.cra_priority = 300, + .base.cra_blocksize = 1, + .base.cra_ctxsize = sizeof(struct chacha_ctx), + .base.cra_module = THIS_MODULE, + + .min_keysize = CHACHA_KEY_SIZE, + .max_keysize = CHACHA_KEY_SIZE, + .ivsize = CHACHA_IV_SIZE, + .chunksize = CHACHA_BLOCK_SIZE, + .walksize = 4 * CHACHA_BLOCK_SIZE, + .setkey = crypto_chacha20_setkey, + .encrypt = chacha_neon, + .decrypt = chacha_neon, + }, { + .base.cra_name = "xchacha20", + .base.cra_driver_name = "xchacha20-neon", + .base.cra_priority = 300, + .base.cra_blocksize = 1, + .base.cra_ctxsize = sizeof(struct chacha_ctx), + .base.cra_module = THIS_MODULE, + + .min_keysize = CHACHA_KEY_SIZE, + .max_keysize = CHACHA_KEY_SIZE, + .ivsize = XCHACHA_IV_SIZE, + .chunksize = CHACHA_BLOCK_SIZE, + .walksize = 4 * CHACHA_BLOCK_SIZE, + .setkey = crypto_chacha20_setkey, + .encrypt = xchacha_neon, + .decrypt = xchacha_neon, + }, { + .base.cra_name = "xchacha12", + .base.cra_driver_name = "xchacha12-neon", + .base.cra_priority = 300, + .base.cra_blocksize = 1, + .base.cra_ctxsize = sizeof(struct chacha_ctx), + .base.cra_module = THIS_MODULE, + + .min_keysize = CHACHA_KEY_SIZE, + .max_keysize = CHACHA_KEY_SIZE, + .ivsize = XCHACHA_IV_SIZE, + .chunksize = CHACHA_BLOCK_SIZE, + .walksize = 4 * CHACHA_BLOCK_SIZE, + .setkey = crypto_chacha12_setkey, + .encrypt = xchacha_neon, + .decrypt = xchacha_neon, + } +}; + +static int __init chacha_simd_mod_init(void) +{ + if (!(elf_hwcap & HWCAP_NEON)) + return -ENODEV; + + return crypto_register_skciphers(algs, ARRAY_SIZE(algs)); +} + +static void __exit chacha_simd_mod_fini(void) +{ + crypto_unregister_skciphers(algs, ARRAY_SIZE(algs)); +} + +module_init(chacha_simd_mod_init); +module_exit(chacha_simd_mod_fini); + +MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (NEON accelerated)"); +MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS_CRYPTO("chacha20"); +MODULE_ALIAS_CRYPTO("chacha20-neon"); +MODULE_ALIAS_CRYPTO("xchacha20"); +MODULE_ALIAS_CRYPTO("xchacha20-neon"); +MODULE_ALIAS_CRYPTO("xchacha12"); +MODULE_ALIAS_CRYPTO("xchacha12-neon"); diff --git a/arch/arm/crypto/chacha20-neon-glue.c b/arch/arm/crypto/chacha20-neon-glue.c deleted file mode 100644 index 59a7be08e80c..000000000000 --- a/arch/arm/crypto/chacha20-neon-glue.c +++ /dev/null @@ -1,127 +0,0 @@ -/* - * ChaCha20 256-bit cipher algorithm, RFC7539, ARM NEON functions - * - * Copyright (C) 2016 Linaro, Ltd. <ard.biesheuvel@linaro.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Based on: - * ChaCha20 256-bit cipher algorithm, RFC7539, SIMD glue code - * - * Copyright (C) 2015 Martin Willi - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include <crypto/algapi.h> -#include <crypto/chacha20.h> -#include <crypto/internal/skcipher.h> -#include <linux/kernel.h> -#include <linux/module.h> - -#include <asm/hwcap.h> -#include <asm/neon.h> -#include <asm/simd.h> - -asmlinkage void chacha20_block_xor_neon(u32 *state, u8 *dst, const u8 *src); -asmlinkage void chacha20_4block_xor_neon(u32 *state, u8 *dst, const u8 *src); - -static void chacha20_doneon(u32 *state, u8 *dst, const u8 *src, - unsigned int bytes) -{ - u8 buf[CHACHA20_BLOCK_SIZE]; - - while (bytes >= CHACHA20_BLOCK_SIZE * 4) { - chacha20_4block_xor_neon(state, dst, src); - bytes -= CHACHA20_BLOCK_SIZE * 4; - src += CHACHA20_BLOCK_SIZE * 4; - dst += CHACHA20_BLOCK_SIZE * 4; - state[12] += 4; - } - while (bytes >= CHACHA20_BLOCK_SIZE) { - chacha20_block_xor_neon(state, dst, src); - bytes -= CHACHA20_BLOCK_SIZE; - src += CHACHA20_BLOCK_SIZE; - dst += CHACHA20_BLOCK_SIZE; - state[12]++; - } - if (bytes) { - memcpy(buf, src, bytes); - chacha20_block_xor_neon(state, buf, buf); - memcpy(dst, buf, bytes); - } -} - -static int chacha20_neon(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct chacha20_ctx *ctx = crypto_skcipher_ctx(tfm); - struct skcipher_walk walk; - u32 state[16]; - int err; - - if (req->cryptlen <= CHACHA20_BLOCK_SIZE || !may_use_simd()) - return crypto_chacha20_crypt(req); - - err = skcipher_walk_virt(&walk, req, true); - - crypto_chacha20_init(state, ctx, walk.iv); - - kernel_neon_begin(); - while (walk.nbytes > 0) { - unsigned int nbytes = walk.nbytes; - - if (nbytes < walk.total) - nbytes = round_down(nbytes, walk.stride); - - chacha20_doneon(state, walk.dst.virt.addr, walk.src.virt.addr, - nbytes); - err = skcipher_walk_done(&walk, walk.nbytes - nbytes); - } - kernel_neon_end(); - - return err; -} - -static struct skcipher_alg alg = { - .base.cra_name = "chacha20", - .base.cra_driver_name = "chacha20-neon", - .base.cra_priority = 300, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct chacha20_ctx), - .base.cra_module = THIS_MODULE, - - .min_keysize = CHACHA20_KEY_SIZE, - .max_keysize = CHACHA20_KEY_SIZE, - .ivsize = CHACHA20_IV_SIZE, - .chunksize = CHACHA20_BLOCK_SIZE, - .walksize = 4 * CHACHA20_BLOCK_SIZE, - .setkey = crypto_chacha20_setkey, - .encrypt = chacha20_neon, - .decrypt = chacha20_neon, -}; - -static int __init chacha20_simd_mod_init(void) -{ - if (!(elf_hwcap & HWCAP_NEON)) - return -ENODEV; - - return crypto_register_skcipher(&alg); -} - -static void __exit chacha20_simd_mod_fini(void) -{ - crypto_unregister_skcipher(&alg); -} - -module_init(chacha20_simd_mod_init); -module_exit(chacha20_simd_mod_fini); - -MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); -MODULE_LICENSE("GPL v2"); -MODULE_ALIAS_CRYPTO("chacha20"); diff --git a/arch/arm/crypto/nh-neon-core.S b/arch/arm/crypto/nh-neon-core.S new file mode 100644 index 000000000000..434d80ab531c --- /dev/null +++ b/arch/arm/crypto/nh-neon-core.S @@ -0,0 +1,116 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * NH - ε-almost-universal hash function, NEON accelerated version + * + * Copyright 2018 Google LLC + * + * Author: Eric Biggers <ebiggers@google.com> + */ + +#include <linux/linkage.h> + + .text + .fpu neon + + KEY .req r0 + MESSAGE .req r1 + MESSAGE_LEN .req r2 + HASH .req r3 + + PASS0_SUMS .req q0 + PASS0_SUM_A .req d0 + PASS0_SUM_B .req d1 + PASS1_SUMS .req q1 + PASS1_SUM_A .req d2 + PASS1_SUM_B .req d3 + PASS2_SUMS .req q2 + PASS2_SUM_A .req d4 + PASS2_SUM_B .req d5 + PASS3_SUMS .req q3 + PASS3_SUM_A .req d6 + PASS3_SUM_B .req d7 + K0 .req q4 + K1 .req q5 + K2 .req q6 + K3 .req q7 + T0 .req q8 + T0_L .req d16 + T0_H .req d17 + T1 .req q9 + T1_L .req d18 + T1_H .req d19 + T2 .req q10 + T2_L .req d20 + T2_H .req d21 + T3 .req q11 + T3_L .req d22 + T3_H .req d23 + +.macro _nh_stride k0, k1, k2, k3 + + // Load next message stride + vld1.8 {T3}, [MESSAGE]! + + // Load next key stride + vld1.32 {\k3}, [KEY]! + + // Add message words to key words + vadd.u32 T0, T3, \k0 + vadd.u32 T1, T3, \k1 + vadd.u32 T2, T3, \k2 + vadd.u32 T3, T3, \k3 + + // Multiply 32x32 => 64 and accumulate + vmlal.u32 PASS0_SUMS, T0_L, T0_H + vmlal.u32 PASS1_SUMS, T1_L, T1_H + vmlal.u32 PASS2_SUMS, T2_L, T2_H + vmlal.u32 PASS3_SUMS, T3_L, T3_H +.endm + +/* + * void nh_neon(const u32 *key, const u8 *message, size_t message_len, + * u8 hash[NH_HASH_BYTES]) + * + * It's guaranteed that message_len % 16 == 0. + */ +ENTRY(nh_neon) + + vld1.32 {K0,K1}, [KEY]! + vmov.u64 PASS0_SUMS, #0 + vmov.u64 PASS1_SUMS, #0 + vld1.32 {K2}, [KEY]! + vmov.u64 PASS2_SUMS, #0 + vmov.u64 PASS3_SUMS, #0 + + subs MESSAGE_LEN, MESSAGE_LEN, #64 + blt .Lloop4_done +.Lloop4: + _nh_stride K0, K1, K2, K3 + _nh_stride K1, K2, K3, K0 + _nh_stride K2, K3, K0, K1 + _nh_stride K3, K0, K1, K2 + subs MESSAGE_LEN, MESSAGE_LEN, #64 + bge .Lloop4 + +.Lloop4_done: + ands MESSAGE_LEN, MESSAGE_LEN, #63 + beq .Ldone + _nh_stride K0, K1, K2, K3 + + subs MESSAGE_LEN, MESSAGE_LEN, #16 + beq .Ldone + _nh_stride K1, K2, K3, K0 + + subs MESSAGE_LEN, MESSAGE_LEN, #16 + beq .Ldone + _nh_stride K2, K3, K0, K1 + +.Ldone: + // Sum the accumulators for each pass, then store the sums to 'hash' + vadd.u64 T0_L, PASS0_SUM_A, PASS0_SUM_B + vadd.u64 T0_H, PASS1_SUM_A, PASS1_SUM_B + vadd.u64 T1_L, PASS2_SUM_A, PASS2_SUM_B + vadd.u64 T1_H, PASS3_SUM_A, PASS3_SUM_B + vst1.8 {T0-T1}, [HASH] + bx lr +ENDPROC(nh_neon) diff --git a/arch/arm/crypto/nhpoly1305-neon-glue.c b/arch/arm/crypto/nhpoly1305-neon-glue.c new file mode 100644 index 000000000000..49aae87cb2bc --- /dev/null +++ b/arch/arm/crypto/nhpoly1305-neon-glue.c @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * NHPoly1305 - ε-almost-∆-universal hash function for Adiantum + * (NEON accelerated version) + * + * Copyright 2018 Google LLC + */ + +#include <asm/neon.h> +#include <asm/simd.h> +#include <crypto/internal/hash.h> +#include <crypto/nhpoly1305.h> +#include <linux/module.h> + +asmlinkage void nh_neon(const u32 *key, const u8 *message, size_t message_len, + u8 hash[NH_HASH_BYTES]); + +/* wrapper to avoid indirect call to assembly, which doesn't work with CFI */ +static void _nh_neon(const u32 *key, const u8 *message, size_t message_len, + __le64 hash[NH_NUM_PASSES]) +{ + nh_neon(key, message, message_len, (u8 *)hash); +} + +static int nhpoly1305_neon_update(struct shash_desc *desc, + const u8 *src, unsigned int srclen) +{ + if (srclen < 64 || !may_use_simd()) + return crypto_nhpoly1305_update(desc, src, srclen); + + do { + unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE); + + kernel_neon_begin(); + crypto_nhpoly1305_update_helper(desc, src, n, _nh_neon); + kernel_neon_end(); + src += n; + srclen -= n; + } while (srclen); + return 0; +} + +static struct shash_alg nhpoly1305_alg = { + .base.cra_name = "nhpoly1305", + .base.cra_driver_name = "nhpoly1305-neon", + .base.cra_priority = 200, + .base.cra_ctxsize = sizeof(struct nhpoly1305_key), + .base.cra_module = THIS_MODULE, + .digestsize = POLY1305_DIGEST_SIZE, + .init = crypto_nhpoly1305_init, + .update = nhpoly1305_neon_update, + .final = crypto_nhpoly1305_final, + .setkey = crypto_nhpoly1305_setkey, + .descsize = sizeof(struct nhpoly1305_state), +}; + +static int __init nhpoly1305_mod_init(void) +{ + if (!(elf_hwcap & HWCAP_NEON)) + return -ENODEV; + + return crypto_register_shash(&nhpoly1305_alg); +} + +static void __exit nhpoly1305_mod_exit(void) +{ + crypto_unregister_shash(&nhpoly1305_alg); +} + +module_init(nhpoly1305_mod_init); +module_exit(nhpoly1305_mod_exit); + +MODULE_DESCRIPTION("NHPoly1305 ε-almost-∆-universal hash function (NEON-accelerated)"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>"); +MODULE_ALIAS_CRYPTO("nhpoly1305"); +MODULE_ALIAS_CRYPTO("nhpoly1305-neon"); diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h index 6821f1249300..772f48ef84b7 100644 --- a/arch/arm/include/asm/dma-iommu.h +++ b/arch/arm/include/asm/dma-iommu.h @@ -9,8 +9,6 @@ #include <linux/dma-debug.h> #include <linux/kref.h> -#define ARM_MAPPING_ERROR (~(dma_addr_t)0x0) - struct dma_iommu_mapping { /* iommu specific data */ struct iommu_domain *domain; diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 965b7c846ecb..31d3b96f0f4b 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -18,7 +18,7 @@ extern const struct dma_map_ops arm_coherent_dma_ops; static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { - return IS_ENABLED(CONFIG_MMU) ? &arm_dma_ops : &dma_direct_ops; + return IS_ENABLED(CONFIG_MMU) ? &arm_dma_ops : NULL; } #ifdef __arch_page_to_dma diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h index 231e87ad45d5..35491af87985 100644 --- a/arch/arm/include/asm/kvm_asm.h +++ b/arch/arm/include/asm/kvm_asm.h @@ -23,6 +23,10 @@ #define ARM_EXIT_WITH_ABORT_BIT 31 #define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_ABORT_BIT)) +#define ARM_EXCEPTION_IS_TRAP(x) \ + (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_PREF_ABORT || \ + ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_DATA_ABORT || \ + ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_HVC) #define ARM_ABORT_PENDING(x) !!((x) & (1U << ARM_EXIT_WITH_ABORT_BIT)) #define ARM_EXCEPTION_RESET 0 diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 5ca5d9af0c26..ca56537b61bc 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -225,7 +225,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, #define KVM_ARCH_WANT_MMU_NOTIFIER int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end); -void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); +int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); @@ -285,7 +285,7 @@ void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot); struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr); -static inline bool kvm_arch_check_sve_has_vhe(void) { return true; } +static inline bool kvm_arch_requires_vhe(void) { return false; } static inline void kvm_arch_hardware_unsetup(void) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {} @@ -296,11 +296,6 @@ static inline void kvm_arm_init_debug(void) {} static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {} static inline void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) {} static inline void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) {} -static inline bool kvm_arm_handle_step_debug(struct kvm_vcpu *vcpu, - struct kvm_run *run) -{ - return false; -} int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index 1098ffc3d54b..3a875fc1b63c 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -82,6 +82,67 @@ void kvm_clear_hyp_idmap(void); #define kvm_mk_pud(pmdp) __pud(__pa(pmdp) | PMD_TYPE_TABLE) #define kvm_mk_pgd(pudp) ({ BUILD_BUG(); 0; }) +#define kvm_pfn_pte(pfn, prot) pfn_pte(pfn, prot) +#define kvm_pfn_pmd(pfn, prot) pfn_pmd(pfn, prot) +#define kvm_pfn_pud(pfn, prot) (__pud(0)) + +#define kvm_pud_pfn(pud) ({ WARN_ON(1); 0; }) + + +#define kvm_pmd_mkhuge(pmd) pmd_mkhuge(pmd) +/* No support for pud hugepages */ +#define kvm_pud_mkhuge(pud) ( {WARN_ON(1); pud; }) + +/* + * The following kvm_*pud*() functions are provided strictly to allow + * sharing code with arm64. They should never be called in practice. + */ +static inline void kvm_set_s2pud_readonly(pud_t *pud) +{ + WARN_ON(1); +} + +static inline bool kvm_s2pud_readonly(pud_t *pud) +{ + WARN_ON(1); + return false; +} + +static inline void kvm_set_pud(pud_t *pud, pud_t new_pud) +{ + WARN_ON(1); +} + +static inline pud_t kvm_s2pud_mkwrite(pud_t pud) +{ + WARN_ON(1); + return pud; +} + +static inline pud_t kvm_s2pud_mkexec(pud_t pud) +{ + WARN_ON(1); + return pud; +} + +static inline bool kvm_s2pud_exec(pud_t *pud) +{ + WARN_ON(1); + return false; +} + +static inline pud_t kvm_s2pud_mkyoung(pud_t pud) +{ + BUG(); + return pud; +} + +static inline bool kvm_s2pud_young(pud_t pud) +{ + WARN_ON(1); + return false; +} + static inline pte_t kvm_s2pte_mkwrite(pte_t pte) { pte_val(pte) |= L_PTE_S2_RDWR; diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h index 9e81b7c498d8..182163b55546 100644 --- a/arch/arm/include/asm/module.h +++ b/arch/arm/include/asm/module.h @@ -61,4 +61,15 @@ u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val); MODULE_ARCH_VERMAGIC_ARMTHUMB \ MODULE_ARCH_VERMAGIC_P2V +#ifdef CONFIG_THUMB2_KERNEL +#define HAVE_ARCH_KALLSYMS_SYMBOL_VALUE +static inline unsigned long kallsyms_symbol_value(const Elf_Sym *sym) +{ + if (ELF_ST_TYPE(sym->st_info) == STT_FUNC) + return sym->st_value & ~1; + + return sym->st_value; +} +#endif + #endif /* _ASM_ARM_MODULE_H */ diff --git a/arch/arm/include/asm/stackprotector.h b/arch/arm/include/asm/stackprotector.h index ef5f7b69443e..72a20c3a0a90 100644 --- a/arch/arm/include/asm/stackprotector.h +++ b/arch/arm/include/asm/stackprotector.h @@ -6,8 +6,10 @@ * the stack frame and verifying that it hasn't been overwritten when * returning from the function. The pattern is called stack canary * and gcc expects it to be defined by a global variable called - * "__stack_chk_guard" on ARM. This unfortunately means that on SMP - * we cannot have a different canary value per task. + * "__stack_chk_guard" on ARM. This prevents SMP systems from using a + * different value for each task unless we enable a GCC plugin that + * replaces these symbol references with references to each task's own + * value. */ #ifndef _ASM_STACKPROTECTOR_H @@ -16,6 +18,8 @@ #include <linux/random.h> #include <linux/version.h> +#include <asm/thread_info.h> + extern unsigned long __stack_chk_guard; /* @@ -33,7 +37,11 @@ static __always_inline void boot_init_stack_canary(void) canary ^= LINUX_VERSION_CODE; current->stack_canary = canary; +#ifndef CONFIG_STACKPROTECTOR_PER_TASK __stack_chk_guard = current->stack_canary; +#else + current_thread_info()->stack_canary = current->stack_canary; +#endif } #endif /* _ASM_STACKPROTECTOR_H */ diff --git a/arch/arm/include/asm/stage2_pgtable.h b/arch/arm/include/asm/stage2_pgtable.h index f6a7ea805232..c4b1d4fb1797 100644 --- a/arch/arm/include/asm/stage2_pgtable.h +++ b/arch/arm/include/asm/stage2_pgtable.h @@ -68,4 +68,12 @@ stage2_pmd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) #define stage2_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp) #define stage2_pud_table_empty(kvm, pudp) false +static inline bool kvm_stage2_has_pud(struct kvm *kvm) +{ + return false; +} + +#define S2_PMD_MASK PMD_MASK +#define S2_PMD_SIZE PMD_SIZE + #endif /* __ARM_S2_PGTABLE_H_ */ diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 8f55dc520a3e..286eb61c632b 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -53,6 +53,9 @@ struct thread_info { struct task_struct *task; /* main task structure */ __u32 cpu; /* cpu */ __u32 cpu_domain; /* cpu domain */ +#ifdef CONFIG_STACKPROTECTOR_PER_TASK + unsigned long stack_canary; +#endif struct cpu_context_save cpu_context; /* cpu context */ __u32 syscall; /* syscall number */ __u8 used_cp[16]; /* thread used copro */ diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 3968d6c22455..28b27104ac0c 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -79,6 +79,10 @@ int main(void) #ifdef CONFIG_CRUNCH DEFINE(TI_CRUNCH_STATE, offsetof(struct thread_info, crunchstate)); #endif +#ifdef CONFIG_STACKPROTECTOR_PER_TASK + DEFINE(TI_STACK_CANARY, offsetof(struct thread_info, stack_canary)); +#endif + DEFINE(THREAD_SZ_ORDER, THREAD_SIZE_ORDER); BLANK(); DEFINE(S_R0, offsetof(struct pt_regs, ARM_r0)); DEFINE(S_R1, offsetof(struct pt_regs, ARM_r1)); diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 82ab015bf42b..16601d1442d1 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -39,7 +39,7 @@ #include <asm/tls.h> #include <asm/vdso.h> -#ifdef CONFIG_STACKPROTECTOR +#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK) #include <linux/stackprotector.h> unsigned long __stack_chk_guard __read_mostly; EXPORT_SYMBOL(__stack_chk_guard); @@ -267,6 +267,10 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start, thread_notify(THREAD_NOTIFY_COPY, thread); +#ifdef CONFIG_STACKPROTECTOR_PER_TASK + thread->stack_canary = p->stack_canary; +#endif + return 0; } diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index cb094e55dc5f..222c1635bc7a 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c @@ -602,8 +602,8 @@ static int emulate_cp15(struct kvm_vcpu *vcpu, } } else { /* If access function fails, it should complain. */ - kvm_err("Unsupported guest CP15 access at: %08lx\n", - *vcpu_pc(vcpu)); + kvm_err("Unsupported guest CP15 access at: %08lx [%08lx]\n", + *vcpu_pc(vcpu), *vcpu_cpsr(vcpu)); print_cp_instr(params); kvm_inject_undefined(vcpu); } diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c index 8143756ff38b..09e439d4abf5 100644 --- a/arch/arm/mach-davinci/board-dm365-evm.c +++ b/arch/arm/mach-davinci/board-dm365-evm.c @@ -794,9 +794,9 @@ static __init void dm365_evm_init(void) /* maybe setup mmc1/etc ... _after_ mmc0 */ evm_init_cpld(); -#ifdef CONFIG_SND_DM365_AIC3X_CODEC +#ifdef CONFIG_SND_SOC_DM365_AIC3X_CODEC dm365_init_asp(); -#elif defined(CONFIG_SND_DM365_VOICE_CODEC) +#elif defined(CONFIG_SND_SOC_DM365_VOICE_CODEC) dm365_init_vc(); #endif dm365_init_rtc(); diff --git a/arch/arm/mach-omap1/Makefile b/arch/arm/mach-omap1/Makefile index e8ccf51c6f29..a7e9c6d19fb5 100644 --- a/arch/arm/mach-omap1/Makefile +++ b/arch/arm/mach-omap1/Makefile @@ -8,7 +8,7 @@ obj-y := io.o id.o sram-init.o sram.o time.o irq.o mux.o flash.o \ serial.o devices.o dma.o fb.o obj-y += clock.o clock_data.o opp_data.o reset.o pm_bus.o timer.o -ifneq ($(CONFIG_SND_OMAP_SOC_MCBSP),) +ifneq ($(CONFIG_SND_SOC_OMAP_MCBSP),) obj-y += mcbsp.o endif diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c index 17886744dbe6..691a8da13fac 100644 --- a/arch/arm/mach-omap1/board-ams-delta.c +++ b/arch/arm/mach-omap1/board-ams-delta.c @@ -296,23 +296,13 @@ struct modem_private_data { static struct modem_private_data modem_priv; -static struct resource ams_delta_nand_resources[] = { - [0] = { - .start = OMAP1_MPUIO_BASE, - .end = OMAP1_MPUIO_BASE + - OMAP_MPUIO_IO_CNTL + sizeof(u32) - 1, - .flags = IORESOURCE_MEM, - }, -}; - static struct platform_device ams_delta_nand_device = { .name = "ams-delta-nand", .id = -1, - .num_resources = ARRAY_SIZE(ams_delta_nand_resources), - .resource = ams_delta_nand_resources, }; -#define OMAP_GPIO_LABEL "gpio-0-15" +#define OMAP_GPIO_LABEL "gpio-0-15" +#define OMAP_MPUIO_LABEL "mpuio" static struct gpiod_lookup_table ams_delta_nand_gpio_table = { .table = { @@ -324,6 +314,14 @@ static struct gpiod_lookup_table ams_delta_nand_gpio_table = { GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_NAND_NWE, "nwe", 0), GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_NAND_ALE, "ale", 0), GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_NAND_CLE, "cle", 0), + GPIO_LOOKUP_IDX(OMAP_MPUIO_LABEL, 0, "data", 0, 0), + GPIO_LOOKUP_IDX(OMAP_MPUIO_LABEL, 1, "data", 1, 0), + GPIO_LOOKUP_IDX(OMAP_MPUIO_LABEL, 2, "data", 2, 0), + GPIO_LOOKUP_IDX(OMAP_MPUIO_LABEL, 3, "data", 3, 0), + GPIO_LOOKUP_IDX(OMAP_MPUIO_LABEL, 4, "data", 4, 0), + GPIO_LOOKUP_IDX(OMAP_MPUIO_LABEL, 5, "data", 5, 0), + GPIO_LOOKUP_IDX(OMAP_MPUIO_LABEL, 6, "data", 6, 0), + GPIO_LOOKUP_IDX(OMAP_MPUIO_LABEL, 7, "data", 7, 0), { }, }, }; diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile index 01377c292db4..899c60fac159 100644 --- a/arch/arm/mach-omap2/Makefile +++ b/arch/arm/mach-omap2/Makefile @@ -24,7 +24,7 @@ obj-$(CONFIG_SOC_OMAP5) += $(hwmod-common) $(secure-common) obj-$(CONFIG_SOC_AM43XX) += $(hwmod-common) $(secure-common) obj-$(CONFIG_SOC_DRA7XX) += $(hwmod-common) $(secure-common) -ifneq ($(CONFIG_SND_OMAP_SOC_MCBSP),) +ifneq ($(CONFIG_SND_SOC_OMAP_MCBSP),) obj-y += mcbsp.o endif diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c index 9fec5f84bf77..8a5b6ed4ec36 100644 --- a/arch/arm/mach-omap2/pdata-quirks.c +++ b/arch/arm/mach-omap2/pdata-quirks.c @@ -524,7 +524,7 @@ void omap_auxdata_legacy_init(struct device *dev) dev->platform_data = &twl_gpio_auxdata; } -#if IS_ENABLED(CONFIG_SND_OMAP_SOC_MCBSP) +#if IS_ENABLED(CONFIG_SND_SOC_OMAP_MCBSP) static struct omap_mcbsp_platform_data mcbsp_pdata; static void __init omap3_mcbsp_init(void) { @@ -572,7 +572,7 @@ static struct of_dev_auxdata omap_auxdata_lookup[] = { OF_DEV_AUXDATA("ti,am3517-emac", 0x5c000000, "davinci_emac.0", &am35xx_emac_pdata), /* McBSP modules with sidetone core */ -#if IS_ENABLED(CONFIG_SND_OMAP_SOC_MCBSP) +#if IS_ENABLED(CONFIG_SND_SOC_OMAP_MCBSP) OF_DEV_AUXDATA("ti,omap3-mcbsp", 0x49022000, "49022000.mcbsp", &mcbsp_pdata), OF_DEV_AUXDATA("ti,omap3-mcbsp", 0x49024000, "49024000.mcbsp", &mcbsp_pdata), #endif diff --git a/arch/arm/mach-pxa/palmld.c b/arch/arm/mach-pxa/palmld.c index 93d1124d21c2..bf2b0cfc86df 100644 --- a/arch/arm/mach-pxa/palmld.c +++ b/arch/arm/mach-pxa/palmld.c @@ -288,8 +288,20 @@ static struct platform_device palmld_ide_device = { .id = -1, }; +static struct gpiod_lookup_table palmld_ide_gpio_table = { + .dev_id = "pata_palmld", + .table = { + GPIO_LOOKUP("gpio-pxa", GPIO_NR_PALMLD_IDE_PWEN, + "power", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("gpio-pxa", GPIO_NR_PALMLD_IDE_RESET, + "reset", GPIO_ACTIVE_LOW), + { }, + }, +}; + static void __init palmld_ide_init(void) { + gpiod_add_lookup_table(&palmld_ide_gpio_table); platform_device_register(&palmld_ide_device); } #else diff --git a/arch/arm/mach-s3c64xx/mach-crag6410-module.c b/arch/arm/mach-s3c64xx/mach-crag6410-module.c index 5aa472892465..76c4855a03bc 100644 --- a/arch/arm/mach-s3c64xx/mach-crag6410-module.c +++ b/arch/arm/mach-s3c64xx/mach-crag6410-module.c @@ -194,8 +194,8 @@ static struct wm8994_pdata wm8994_pdata = { 0x3, /* IRQ out, active high, CMOS */ }, .ldo = { - { .enable = S3C64XX_GPN(6), .init_data = &wm8994_ldo1, }, - { .enable = S3C64XX_GPN(4), .init_data = &wm8994_ldo2, }, + { .init_data = &wm8994_ldo1, }, + { .init_data = &wm8994_ldo2, }, }, }; @@ -203,6 +203,18 @@ static const struct i2c_board_info wm1277_devs[] = { { I2C_BOARD_INFO("wm8958", 0x1a), /* WM8958 is the superset */ .platform_data = &wm8994_pdata, .irq = GLENFARCLAS_PMIC_IRQ_BASE + WM831X_IRQ_GPIO_2, + .dev_name = "wm8958", + }, +}; + +static struct gpiod_lookup_table wm8994_gpiod_table = { + .dev_id = "i2c-wm8958", /* I2C device name */ + .table = { + GPIO_LOOKUP("GPION", 6, + "wlf,ldo1ena", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("GPION", 4, + "wlf,ldo2ena", GPIO_ACTIVE_HIGH), + { }, }, }; @@ -381,6 +393,7 @@ static int wlf_gf_module_probe(struct i2c_client *i2c, gpiod_add_lookup_table(&wm5102_reva_gpiod_table); gpiod_add_lookup_table(&wm5102_gpiod_table); + gpiod_add_lookup_table(&wm8994_gpiod_table); if (i < ARRAY_SIZE(gf_mods)) { dev_info(&i2c->dev, "%s revision %d\n", diff --git a/arch/arm/mm/dma-mapping-nommu.c b/arch/arm/mm/dma-mapping-nommu.c index 712416ecd8e6..f304b10e23a4 100644 --- a/arch/arm/mm/dma-mapping-nommu.c +++ b/arch/arm/mm/dma-mapping-nommu.c @@ -22,7 +22,7 @@ #include "dma.h" /* - * dma_direct_ops is used if + * The generic direct mapping code is used if * - MMU/MPU is off * - cpu is v7m w/o cache support * - device is coherent @@ -209,16 +209,9 @@ const struct dma_map_ops arm_nommu_dma_ops = { }; EXPORT_SYMBOL(arm_nommu_dma_ops); -static const struct dma_map_ops *arm_nommu_get_dma_map_ops(bool coherent) -{ - return coherent ? &dma_direct_ops : &arm_nommu_dma_ops; -} - void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, const struct iommu_ops *iommu, bool coherent) { - const struct dma_map_ops *dma_ops; - if (IS_ENABLED(CONFIG_CPU_V7M)) { /* * Cache support for v7m is optional, so can be treated as @@ -234,7 +227,6 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, dev->archdata.dma_coherent = (get_cr() & CR_M) ? coherent : true; } - dma_ops = arm_nommu_get_dma_map_ops(dev->archdata.dma_coherent); - - set_dma_ops(dev, dma_ops); + if (!dev->archdata.dma_coherent) + set_dma_ops(dev, &arm_nommu_dma_ops); } diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 78de138aa66d..f1e2922e447c 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -179,11 +179,6 @@ static void arm_dma_sync_single_for_device(struct device *dev, __dma_page_cpu_to_dev(page, offset, size, dir); } -static int arm_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return dma_addr == ARM_MAPPING_ERROR; -} - const struct dma_map_ops arm_dma_ops = { .alloc = arm_dma_alloc, .free = arm_dma_free, @@ -197,7 +192,6 @@ const struct dma_map_ops arm_dma_ops = { .sync_single_for_device = arm_dma_sync_single_for_device, .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, .sync_sg_for_device = arm_dma_sync_sg_for_device, - .mapping_error = arm_dma_mapping_error, .dma_supported = arm_dma_supported, }; EXPORT_SYMBOL(arm_dma_ops); @@ -217,7 +211,6 @@ const struct dma_map_ops arm_coherent_dma_ops = { .get_sgtable = arm_dma_get_sgtable, .map_page = arm_coherent_dma_map_page, .map_sg = arm_dma_map_sg, - .mapping_error = arm_dma_mapping_error, .dma_supported = arm_dma_supported, }; EXPORT_SYMBOL(arm_coherent_dma_ops); @@ -774,7 +767,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp &= ~(__GFP_COMP); args.gfp = gfp; - *handle = ARM_MAPPING_ERROR; + *handle = DMA_MAPPING_ERROR; allowblock = gfpflags_allow_blocking(gfp); cma = allowblock ? dev_get_cma_area(dev) : false; @@ -1217,7 +1210,7 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, if (i == mapping->nr_bitmaps) { if (extend_iommu_mapping(mapping)) { spin_unlock_irqrestore(&mapping->lock, flags); - return ARM_MAPPING_ERROR; + return DMA_MAPPING_ERROR; } start = bitmap_find_next_zero_area(mapping->bitmaps[i], @@ -1225,7 +1218,7 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, if (start > mapping->bits) { spin_unlock_irqrestore(&mapping->lock, flags); - return ARM_MAPPING_ERROR; + return DMA_MAPPING_ERROR; } bitmap_set(mapping->bitmaps[i], start, count); @@ -1409,7 +1402,7 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size, int i; dma_addr = __alloc_iova(mapping, size); - if (dma_addr == ARM_MAPPING_ERROR) + if (dma_addr == DMA_MAPPING_ERROR) return dma_addr; iova = dma_addr; @@ -1436,7 +1429,7 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size, fail: iommu_unmap(mapping->domain, dma_addr, iova-dma_addr); __free_iova(mapping, dma_addr, size); - return ARM_MAPPING_ERROR; + return DMA_MAPPING_ERROR; } static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size) @@ -1497,7 +1490,7 @@ static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp, return NULL; *handle = __iommu_create_mapping(dev, &page, size, attrs); - if (*handle == ARM_MAPPING_ERROR) + if (*handle == DMA_MAPPING_ERROR) goto err_mapping; return addr; @@ -1525,7 +1518,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size, struct page **pages; void *addr = NULL; - *handle = ARM_MAPPING_ERROR; + *handle = DMA_MAPPING_ERROR; size = PAGE_ALIGN(size); if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp)) @@ -1546,7 +1539,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size, return NULL; *handle = __iommu_create_mapping(dev, pages, size, attrs); - if (*handle == ARM_MAPPING_ERROR) + if (*handle == DMA_MAPPING_ERROR) goto err_buffer; if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) @@ -1696,10 +1689,10 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, int prot; size = PAGE_ALIGN(size); - *handle = ARM_MAPPING_ERROR; + *handle = DMA_MAPPING_ERROR; iova_base = iova = __alloc_iova(mapping, size); - if (iova == ARM_MAPPING_ERROR) + if (iova == DMA_MAPPING_ERROR) return -ENOMEM; for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { @@ -1739,7 +1732,7 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, for (i = 1; i < nents; i++) { s = sg_next(s); - s->dma_address = ARM_MAPPING_ERROR; + s->dma_address = DMA_MAPPING_ERROR; s->dma_length = 0; if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { @@ -1914,7 +1907,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p int ret, prot, len = PAGE_ALIGN(size + offset); dma_addr = __alloc_iova(mapping, len); - if (dma_addr == ARM_MAPPING_ERROR) + if (dma_addr == DMA_MAPPING_ERROR) return dma_addr; prot = __dma_info_to_prot(dir, attrs); @@ -1926,7 +1919,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p return dma_addr + offset; fail: __free_iova(mapping, dma_addr, len); - return ARM_MAPPING_ERROR; + return DMA_MAPPING_ERROR; } /** @@ -2020,7 +2013,7 @@ static dma_addr_t arm_iommu_map_resource(struct device *dev, size_t len = PAGE_ALIGN(size + offset); dma_addr = __alloc_iova(mapping, len); - if (dma_addr == ARM_MAPPING_ERROR) + if (dma_addr == DMA_MAPPING_ERROR) return dma_addr; prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO; @@ -2032,7 +2025,7 @@ static dma_addr_t arm_iommu_map_resource(struct device *dev, return dma_addr + offset; fail: __free_iova(mapping, dma_addr, len); - return ARM_MAPPING_ERROR; + return DMA_MAPPING_ERROR; } /** @@ -2105,7 +2098,6 @@ const struct dma_map_ops iommu_ops = { .map_resource = arm_iommu_map_resource, .unmap_resource = arm_iommu_unmap_resource, - .mapping_error = arm_dma_mapping_error, .dma_supported = arm_dma_supported, }; @@ -2124,7 +2116,6 @@ const struct dma_map_ops iommu_coherent_ops = { .map_resource = arm_iommu_map_resource, .unmap_resource = arm_iommu_unmap_resource, - .mapping_error = arm_dma_mapping_error, .dma_supported = arm_dma_supported, }; diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index ea2ab0330e3a..ff9291872372 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -5,7 +5,7 @@ config ARM64 select ACPI_GTDT if ACPI select ACPI_IORT if ACPI select ACPI_REDUCED_HARDWARE_ONLY if ACPI - select ACPI_MCFG if ACPI + select ACPI_MCFG if (ACPI && PCI) select ACPI_SPCR_TABLE if ACPI select ACPI_PPTT if ACPI select ARCH_CLOCKSOURCE_DATA @@ -23,7 +23,6 @@ config ARM64 select ARCH_HAS_MEMBARRIER_SYNC_CORE select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_SET_MEMORY - select ARCH_HAS_SG_CHAIN select ARCH_HAS_STRICT_KERNEL_RWX select ARCH_HAS_STRICT_MODULE_RWX select ARCH_HAS_SYNC_DMA_FOR_DEVICE @@ -81,7 +80,7 @@ config ARM64 select CPU_PM if (SUSPEND || CPU_IDLE) select CRC32 select DCACHE_WORD_ACCESS - select DMA_DIRECT_OPS + select DMA_DIRECT_REMAP select EDAC_SUPPORT select FRAME_POINTER select GENERIC_ALLOCATOR @@ -163,7 +162,7 @@ config ARM64 select OF select OF_EARLY_FLATTREE select OF_RESERVED_MEM - select PCI_ECAM if ACPI + select PCI_ECAM if (ACPI && PCI) select POWER_RESET select POWER_SUPPLY select REFCOUNT_FULL @@ -261,6 +260,9 @@ config ZONE_DMA32 config HAVE_GENERIC_GUP def_bool y +config ARCH_ENABLE_MEMORY_HOTPLUG + def_bool y + config SMP def_bool y @@ -274,7 +276,7 @@ config PGTABLE_LEVELS int default 2 if ARM64_16K_PAGES && ARM64_VA_BITS_36 default 2 if ARM64_64K_PAGES && ARM64_VA_BITS_42 - default 3 if ARM64_64K_PAGES && ARM64_VA_BITS_48 + default 3 if ARM64_64K_PAGES && (ARM64_VA_BITS_48 || ARM64_USER_VA_BITS_52) default 3 if ARM64_4K_PAGES && ARM64_VA_BITS_39 default 3 if ARM64_16K_PAGES && ARM64_VA_BITS_47 default 4 if !ARM64_64K_PAGES && ARM64_VA_BITS_48 @@ -313,9 +315,13 @@ menu "Kernel Features" menu "ARM errata workarounds via the alternatives framework" +config ARM64_WORKAROUND_CLEAN_CACHE + def_bool n + config ARM64_ERRATUM_826319 bool "Cortex-A53: 826319: System might deadlock if a write cannot complete until read data is accepted" default y + select ARM64_WORKAROUND_CLEAN_CACHE help This option adds an alternative code sequence to work around ARM erratum 826319 on Cortex-A53 parts up to r0p2 with an AMBA 4 ACE or @@ -337,6 +343,7 @@ config ARM64_ERRATUM_826319 config ARM64_ERRATUM_827319 bool "Cortex-A53: 827319: Data cache clean instructions might cause overlapping transactions to the interconnect" default y + select ARM64_WORKAROUND_CLEAN_CACHE help This option adds an alternative code sequence to work around ARM erratum 827319 on Cortex-A53 parts up to r0p2 with an AMBA 5 CHI @@ -358,6 +365,7 @@ config ARM64_ERRATUM_827319 config ARM64_ERRATUM_824069 bool "Cortex-A53: 824069: Cache line might not be marked as clean after a CleanShared snoop" default y + select ARM64_WORKAROUND_CLEAN_CACHE help This option adds an alternative code sequence to work around ARM erratum 824069 on Cortex-A53 parts up to r0p2 when it is connected @@ -380,6 +388,7 @@ config ARM64_ERRATUM_824069 config ARM64_ERRATUM_819472 bool "Cortex-A53: 819472: Store exclusive instructions might cause data corruption" default y + select ARM64_WORKAROUND_CLEAN_CACHE help This option adds an alternative code sequence to work around ARM erratum 819472 on Cortex-A53 parts up to r0p1 with an L2 cache @@ -497,6 +506,18 @@ config ARM64_ERRATUM_1188873 If unsure, say Y. +config ARM64_ERRATUM_1165522 + bool "Cortex-A76: Speculative AT instruction using out-of-context translation regime could cause subsequent request to generate an incorrect translation" + default y + help + This option adds work arounds for ARM Cortex-A76 erratum 1165522 + + Affected Cortex-A76 cores (r0p0, r1p0, r2p0) could end-up with + corrupted TLBs by speculating an AT instruction during a guest + context switch. + + If unsure, say Y. + config ARM64_ERRATUM_1286807 bool "Cortex-A76: Modification of the translation table for a virtual address might lead to read-after-read ordering violation" default y @@ -700,15 +721,43 @@ config ARM64_VA_BITS_47 config ARM64_VA_BITS_48 bool "48-bit" +config ARM64_USER_VA_BITS_52 + bool "52-bit (user)" + depends on ARM64_64K_PAGES && (ARM64_PAN || !ARM64_SW_TTBR0_PAN) + help + Enable 52-bit virtual addressing for userspace when explicitly + requested via a hint to mmap(). The kernel will continue to + use 48-bit virtual addresses for its own mappings. + + NOTE: Enabling 52-bit virtual addressing in conjunction with + ARMv8.3 Pointer Authentication will result in the PAC being + reduced from 7 bits to 3 bits, which may have a significant + impact on its susceptibility to brute-force attacks. + + If unsure, select 48-bit virtual addressing instead. + endchoice +config ARM64_FORCE_52BIT + bool "Force 52-bit virtual addresses for userspace" + depends on ARM64_USER_VA_BITS_52 && EXPERT + help + For systems with 52-bit userspace VAs enabled, the kernel will attempt + to maintain compatibility with older software by providing 48-bit VAs + unless a hint is supplied to mmap. + + This configuration option disables the 48-bit compatibility logic, and + forces all userspace addresses to be 52-bit on HW that supports it. One + should only enable this configuration option for stress testing userspace + memory management code. If unsure say N here. + config ARM64_VA_BITS int default 36 if ARM64_VA_BITS_36 default 39 if ARM64_VA_BITS_39 default 42 if ARM64_VA_BITS_42 default 47 if ARM64_VA_BITS_47 - default 48 if ARM64_VA_BITS_48 + default 48 if ARM64_VA_BITS_48 || ARM64_USER_VA_BITS_52 choice prompt "Physical address space size" @@ -883,6 +932,39 @@ config KEXEC but it is independent of the system firmware. And like a reboot you can start any kernel with it, not just Linux. +config KEXEC_FILE + bool "kexec file based system call" + select KEXEC_CORE + help + This is new version of kexec system call. This system call is + file based and takes file descriptors as system call argument + for kernel and initramfs as opposed to list of segments as + accepted by previous system call. + +config KEXEC_VERIFY_SIG + bool "Verify kernel signature during kexec_file_load() syscall" + depends on KEXEC_FILE + help + Select this option to verify a signature with loaded kernel + image. If configured, any attempt of loading a image without + valid signature will fail. + + In addition to that option, you need to enable signature + verification for the corresponding kernel image type being + loaded in order for this to work. + +config KEXEC_IMAGE_VERIFY_SIG + bool "Enable Image signature verification support" + default y + depends on KEXEC_VERIFY_SIG + depends on EFI && SIGNED_PE_FILE_VERIFICATION + help + Enable Image signature verification support. + +comment "Support for PE file signature verification disabled" + depends on KEXEC_VERIFY_SIG + depends on !EFI || !SIGNED_PE_FILE_VERIFICATION + config CRASH_DUMP bool "Build kdump crash kernel" help @@ -983,6 +1065,20 @@ config ARM64_SSBD If unsure, say Y. +config RODATA_FULL_DEFAULT_ENABLED + bool "Apply r/o permissions of VM areas also to their linear aliases" + default y + help + Apply read-only attributes of VM areas to the linear alias of + the backing pages as well. This prevents code or read-only data + from being modified (inadvertently or intentionally) via another + mapping of the same memory page. This additional enhancement can + be turned off at runtime by passing rodata=[off|on] (and turned on + with rodata=full if this option is set to 'n') + + This requires the linear region to be mapped down to pages, + which may adversely affect performance in some cases. + menuconfig ARMV8_DEPRECATED bool "Emulate deprecated/obsolete ARMv8 instructions" depends on COMPAT @@ -1188,6 +1284,29 @@ config ARM64_CNP endmenu +menu "ARMv8.3 architectural features" + +config ARM64_PTR_AUTH + bool "Enable support for pointer authentication" + default y + help + Pointer authentication (part of the ARMv8.3 Extensions) provides + instructions for signing and authenticating pointers against secret + keys, which can be used to mitigate Return Oriented Programming (ROP) + and other attacks. + + This option enables these instructions at EL0 (i.e. for userspace). + + Choosing this option will cause the kernel to initialise secret keys + for each process at exec() time, with these keys being + context-switched along with the process. + + The feature is detected at runtime. If the feature is not present in + hardware it will not be advertised to userspace nor will it be + enabled. + +endmenu + config ARM64_SVE bool "ARM Scalable Vector Extension support" default y @@ -1272,6 +1391,13 @@ config RANDOMIZE_MODULE_REGION_FULL a limited range that contains the [_stext, _etext] interval of the core kernel, so branch relocations are always in range. +config CC_HAVE_STACKPROTECTOR_SYSREG + def_bool $(cc-option,-mstack-protector-guard=sysreg -mstack-protector-guard-reg=sp_el0 -mstack-protector-guard-offset=0) + +config STACKPROTECTOR_PER_TASK + def_bool y + depends on STACKPROTECTOR && CC_HAVE_STACKPROTECTOR_SYSREG + endmenu menu "Boot options" diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 6cb9fc7e9382..398bdb81a900 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -18,7 +18,7 @@ ifeq ($(CONFIG_RELOCATABLE), y) # Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour # for relative relocs, since this leads to better Image compression # with the relocation offsets always being zero. -LDFLAGS_vmlinux += -pie -shared -Bsymbolic \ +LDFLAGS_vmlinux += -shared -Bsymbolic -z notext -z norelro \ $(call ld-option, --no-apply-dynamic-relocs) endif @@ -56,6 +56,16 @@ KBUILD_AFLAGS += $(lseinstr) $(brokengasinst) KBUILD_CFLAGS += $(call cc-option,-mabi=lp64) KBUILD_AFLAGS += $(call cc-option,-mabi=lp64) +ifeq ($(CONFIG_STACKPROTECTOR_PER_TASK),y) +prepare: stack_protector_prepare +stack_protector_prepare: prepare0 + $(eval KBUILD_CFLAGS += -mstack-protector-guard=sysreg \ + -mstack-protector-guard-reg=sp_el0 \ + -mstack-protector-guard-offset=$(shell \ + awk '{if ($$2 == "TSK_STACK_CANARY") print $$3;}' \ + include/generated/asm-offsets.h)) +endif + ifeq ($(CONFIG_CPU_BIG_ENDIAN), y) KBUILD_CPPFLAGS += -mbig-endian CHECKFLAGS += -D__AARCH64EB__ diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig index a5606823ed4d..d9a523ecdd83 100644 --- a/arch/arm64/crypto/Kconfig +++ b/arch/arm64/crypto/Kconfig @@ -101,11 +101,16 @@ config CRYPTO_AES_ARM64_NEON_BLK select CRYPTO_SIMD config CRYPTO_CHACHA20_NEON - tristate "NEON accelerated ChaCha20 symmetric cipher" + tristate "ChaCha20, XChaCha20, and XChaCha12 stream ciphers using NEON instructions" depends on KERNEL_MODE_NEON select CRYPTO_BLKCIPHER select CRYPTO_CHACHA20 +config CRYPTO_NHPOLY1305_NEON + tristate "NHPoly1305 hash function using NEON instructions (for Adiantum)" + depends on KERNEL_MODE_NEON + select CRYPTO_NHPOLY1305 + config CRYPTO_AES_ARM64_BS tristate "AES in ECB/CBC/CTR/XTS modes using bit-sliced NEON algorithm" depends on KERNEL_MODE_NEON diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile index f476fede09ba..a4ffd9fe3265 100644 --- a/arch/arm64/crypto/Makefile +++ b/arch/arm64/crypto/Makefile @@ -50,8 +50,11 @@ sha256-arm64-y := sha256-glue.o sha256-core.o obj-$(CONFIG_CRYPTO_SHA512_ARM64) += sha512-arm64.o sha512-arm64-y := sha512-glue.o sha512-core.o -obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha20-neon.o -chacha20-neon-y := chacha20-neon-core.o chacha20-neon-glue.o +obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha-neon.o +chacha-neon-y := chacha-neon-core.o chacha-neon-glue.o + +obj-$(CONFIG_CRYPTO_NHPOLY1305_NEON) += nhpoly1305-neon.o +nhpoly1305-neon-y := nh-neon-core.o nhpoly1305-neon-glue.o obj-$(CONFIG_CRYPTO_AES_ARM64) += aes-arm64.o aes-arm64-y := aes-cipher-core.o aes-cipher-glue.o diff --git a/arch/arm64/crypto/chacha20-neon-core.S b/arch/arm64/crypto/chacha-neon-core.S index 13c85e272c2a..021bb9e9784b 100644 --- a/arch/arm64/crypto/chacha20-neon-core.S +++ b/arch/arm64/crypto/chacha-neon-core.S @@ -1,13 +1,13 @@ /* - * ChaCha20 256-bit cipher algorithm, RFC7539, arm64 NEON functions + * ChaCha/XChaCha NEON helper functions * - * Copyright (C) 2016 Linaro, Ltd. <ard.biesheuvel@linaro.org> + * Copyright (C) 2016-2018 Linaro, Ltd. <ard.biesheuvel@linaro.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * - * Based on: + * Originally based on: * ChaCha20 256-bit cipher algorithm, RFC7539, x64 SSSE3 functions * * Copyright (C) 2015 Martin Willi @@ -19,29 +19,27 @@ */ #include <linux/linkage.h> +#include <asm/assembler.h> +#include <asm/cache.h> .text .align 6 -ENTRY(chacha20_block_xor_neon) - // x0: Input state matrix, s - // x1: 1 data block output, o - // x2: 1 data block input, i - - // - // This function encrypts one ChaCha20 block by loading the state matrix - // in four NEON registers. It performs matrix operation on four words in - // parallel, but requires shuffling to rearrange the words after each - // round. - // - - // x0..3 = s0..3 - adr x3, ROT8 - ld1 {v0.4s-v3.4s}, [x0] - ld1 {v8.4s-v11.4s}, [x0] - ld1 {v12.4s}, [x3] +/* + * chacha_permute - permute one block + * + * Permute one 64-byte block where the state matrix is stored in the four NEON + * registers v0-v3. It performs matrix operations on four words in parallel, + * but requires shuffling to rearrange the words after each round. + * + * The round count is given in w3. + * + * Clobbers: w3, x10, v4, v12 + */ +chacha_permute: - mov x3, #10 + adr_l x10, ROT8 + ld1 {v12.4s}, [x10] .Ldoubleround: // x0 += x1, x3 = rotl32(x3 ^ x0, 16) @@ -102,9 +100,27 @@ ENTRY(chacha20_block_xor_neon) // x3 = shuffle32(x3, MASK(0, 3, 2, 1)) ext v3.16b, v3.16b, v3.16b, #4 - subs x3, x3, #1 + subs w3, w3, #2 b.ne .Ldoubleround + ret +ENDPROC(chacha_permute) + +ENTRY(chacha_block_xor_neon) + // x0: Input state matrix, s + // x1: 1 data block output, o + // x2: 1 data block input, i + // w3: nrounds + + stp x29, x30, [sp, #-16]! + mov x29, sp + + // x0..3 = s0..3 + ld1 {v0.4s-v3.4s}, [x0] + ld1 {v8.4s-v11.4s}, [x0] + + bl chacha_permute + ld1 {v4.16b-v7.16b}, [x2] // o0 = i0 ^ (x0 + s0) @@ -125,71 +141,156 @@ ENTRY(chacha20_block_xor_neon) st1 {v0.16b-v3.16b}, [x1] + ldp x29, x30, [sp], #16 ret -ENDPROC(chacha20_block_xor_neon) +ENDPROC(chacha_block_xor_neon) + +ENTRY(hchacha_block_neon) + // x0: Input state matrix, s + // x1: output (8 32-bit words) + // w2: nrounds + + stp x29, x30, [sp, #-16]! + mov x29, sp + + ld1 {v0.4s-v3.4s}, [x0] + + mov w3, w2 + bl chacha_permute + + st1 {v0.16b}, [x1], #16 + st1 {v3.16b}, [x1] + + ldp x29, x30, [sp], #16 + ret +ENDPROC(hchacha_block_neon) + + a0 .req w12 + a1 .req w13 + a2 .req w14 + a3 .req w15 + a4 .req w16 + a5 .req w17 + a6 .req w19 + a7 .req w20 + a8 .req w21 + a9 .req w22 + a10 .req w23 + a11 .req w24 + a12 .req w25 + a13 .req w26 + a14 .req w27 + a15 .req w28 .align 6 -ENTRY(chacha20_4block_xor_neon) +ENTRY(chacha_4block_xor_neon) + frame_push 10 + // x0: Input state matrix, s // x1: 4 data blocks output, o // x2: 4 data blocks input, i + // w3: nrounds + // x4: byte count + + adr_l x10, .Lpermute + and x5, x4, #63 + add x10, x10, x5 + add x11, x10, #64 // - // This function encrypts four consecutive ChaCha20 blocks by loading + // This function encrypts four consecutive ChaCha blocks by loading // the state matrix in NEON registers four times. The algorithm performs // each operation on the corresponding word of each state matrix, hence // requires no word shuffling. For final XORing step we transpose the // matrix by interleaving 32- and then 64-bit words, which allows us to // do XOR in NEON registers. // - adr x3, CTRINC // ... and ROT8 - ld1 {v30.4s-v31.4s}, [x3] + // At the same time, a fifth block is encrypted in parallel using + // scalar registers + // + adr_l x9, CTRINC // ... and ROT8 + ld1 {v30.4s-v31.4s}, [x9] // x0..15[0-3] = s0..3[0..3] - mov x4, x0 - ld4r { v0.4s- v3.4s}, [x4], #16 - ld4r { v4.4s- v7.4s}, [x4], #16 - ld4r { v8.4s-v11.4s}, [x4], #16 - ld4r {v12.4s-v15.4s}, [x4] - - // x12 += counter values 0-3 + add x8, x0, #16 + ld4r { v0.4s- v3.4s}, [x0] + ld4r { v4.4s- v7.4s}, [x8], #16 + ld4r { v8.4s-v11.4s}, [x8], #16 + ld4r {v12.4s-v15.4s}, [x8] + + mov a0, v0.s[0] + mov a1, v1.s[0] + mov a2, v2.s[0] + mov a3, v3.s[0] + mov a4, v4.s[0] + mov a5, v5.s[0] + mov a6, v6.s[0] + mov a7, v7.s[0] + mov a8, v8.s[0] + mov a9, v9.s[0] + mov a10, v10.s[0] + mov a11, v11.s[0] + mov a12, v12.s[0] + mov a13, v13.s[0] + mov a14, v14.s[0] + mov a15, v15.s[0] + + // x12 += counter values 1-4 add v12.4s, v12.4s, v30.4s - mov x3, #10 - .Ldoubleround4: // x0 += x4, x12 = rotl32(x12 ^ x0, 16) // x1 += x5, x13 = rotl32(x13 ^ x1, 16) // x2 += x6, x14 = rotl32(x14 ^ x2, 16) // x3 += x7, x15 = rotl32(x15 ^ x3, 16) add v0.4s, v0.4s, v4.4s + add a0, a0, a4 add v1.4s, v1.4s, v5.4s + add a1, a1, a5 add v2.4s, v2.4s, v6.4s + add a2, a2, a6 add v3.4s, v3.4s, v7.4s + add a3, a3, a7 eor v12.16b, v12.16b, v0.16b + eor a12, a12, a0 eor v13.16b, v13.16b, v1.16b + eor a13, a13, a1 eor v14.16b, v14.16b, v2.16b + eor a14, a14, a2 eor v15.16b, v15.16b, v3.16b + eor a15, a15, a3 rev32 v12.8h, v12.8h + ror a12, a12, #16 rev32 v13.8h, v13.8h + ror a13, a13, #16 rev32 v14.8h, v14.8h + ror a14, a14, #16 rev32 v15.8h, v15.8h + ror a15, a15, #16 // x8 += x12, x4 = rotl32(x4 ^ x8, 12) // x9 += x13, x5 = rotl32(x5 ^ x9, 12) // x10 += x14, x6 = rotl32(x6 ^ x10, 12) // x11 += x15, x7 = rotl32(x7 ^ x11, 12) add v8.4s, v8.4s, v12.4s + add a8, a8, a12 add v9.4s, v9.4s, v13.4s + add a9, a9, a13 add v10.4s, v10.4s, v14.4s + add a10, a10, a14 add v11.4s, v11.4s, v15.4s + add a11, a11, a15 eor v16.16b, v4.16b, v8.16b + eor a4, a4, a8 eor v17.16b, v5.16b, v9.16b + eor a5, a5, a9 eor v18.16b, v6.16b, v10.16b + eor a6, a6, a10 eor v19.16b, v7.16b, v11.16b + eor a7, a7, a11 shl v4.4s, v16.4s, #12 shl v5.4s, v17.4s, #12 @@ -197,42 +298,66 @@ ENTRY(chacha20_4block_xor_neon) shl v7.4s, v19.4s, #12 sri v4.4s, v16.4s, #20 + ror a4, a4, #20 sri v5.4s, v17.4s, #20 + ror a5, a5, #20 sri v6.4s, v18.4s, #20 + ror a6, a6, #20 sri v7.4s, v19.4s, #20 + ror a7, a7, #20 // x0 += x4, x12 = rotl32(x12 ^ x0, 8) // x1 += x5, x13 = rotl32(x13 ^ x1, 8) // x2 += x6, x14 = rotl32(x14 ^ x2, 8) // x3 += x7, x15 = rotl32(x15 ^ x3, 8) add v0.4s, v0.4s, v4.4s + add a0, a0, a4 add v1.4s, v1.4s, v5.4s + add a1, a1, a5 add v2.4s, v2.4s, v6.4s + add a2, a2, a6 add v3.4s, v3.4s, v7.4s + add a3, a3, a7 eor v12.16b, v12.16b, v0.16b + eor a12, a12, a0 eor v13.16b, v13.16b, v1.16b + eor a13, a13, a1 eor v14.16b, v14.16b, v2.16b + eor a14, a14, a2 eor v15.16b, v15.16b, v3.16b + eor a15, a15, a3 tbl v12.16b, {v12.16b}, v31.16b + ror a12, a12, #24 tbl v13.16b, {v13.16b}, v31.16b + ror a13, a13, #24 tbl v14.16b, {v14.16b}, v31.16b + ror a14, a14, #24 tbl v15.16b, {v15.16b}, v31.16b + ror a15, a15, #24 // x8 += x12, x4 = rotl32(x4 ^ x8, 7) // x9 += x13, x5 = rotl32(x5 ^ x9, 7) // x10 += x14, x6 = rotl32(x6 ^ x10, 7) // x11 += x15, x7 = rotl32(x7 ^ x11, 7) add v8.4s, v8.4s, v12.4s + add a8, a8, a12 add v9.4s, v9.4s, v13.4s + add a9, a9, a13 add v10.4s, v10.4s, v14.4s + add a10, a10, a14 add v11.4s, v11.4s, v15.4s + add a11, a11, a15 eor v16.16b, v4.16b, v8.16b + eor a4, a4, a8 eor v17.16b, v5.16b, v9.16b + eor a5, a5, a9 eor v18.16b, v6.16b, v10.16b + eor a6, a6, a10 eor v19.16b, v7.16b, v11.16b + eor a7, a7, a11 shl v4.4s, v16.4s, #7 shl v5.4s, v17.4s, #7 @@ -240,42 +365,66 @@ ENTRY(chacha20_4block_xor_neon) shl v7.4s, v19.4s, #7 sri v4.4s, v16.4s, #25 + ror a4, a4, #25 sri v5.4s, v17.4s, #25 + ror a5, a5, #25 sri v6.4s, v18.4s, #25 + ror a6, a6, #25 sri v7.4s, v19.4s, #25 + ror a7, a7, #25 // x0 += x5, x15 = rotl32(x15 ^ x0, 16) // x1 += x6, x12 = rotl32(x12 ^ x1, 16) // x2 += x7, x13 = rotl32(x13 ^ x2, 16) // x3 += x4, x14 = rotl32(x14 ^ x3, 16) add v0.4s, v0.4s, v5.4s + add a0, a0, a5 add v1.4s, v1.4s, v6.4s + add a1, a1, a6 add v2.4s, v2.4s, v7.4s + add a2, a2, a7 add v3.4s, v3.4s, v4.4s + add a3, a3, a4 eor v15.16b, v15.16b, v0.16b + eor a15, a15, a0 eor v12.16b, v12.16b, v1.16b + eor a12, a12, a1 eor v13.16b, v13.16b, v2.16b + eor a13, a13, a2 eor v14.16b, v14.16b, v3.16b + eor a14, a14, a3 rev32 v15.8h, v15.8h + ror a15, a15, #16 rev32 v12.8h, v12.8h + ror a12, a12, #16 rev32 v13.8h, v13.8h + ror a13, a13, #16 rev32 v14.8h, v14.8h + ror a14, a14, #16 // x10 += x15, x5 = rotl32(x5 ^ x10, 12) // x11 += x12, x6 = rotl32(x6 ^ x11, 12) // x8 += x13, x7 = rotl32(x7 ^ x8, 12) // x9 += x14, x4 = rotl32(x4 ^ x9, 12) add v10.4s, v10.4s, v15.4s + add a10, a10, a15 add v11.4s, v11.4s, v12.4s + add a11, a11, a12 add v8.4s, v8.4s, v13.4s + add a8, a8, a13 add v9.4s, v9.4s, v14.4s + add a9, a9, a14 eor v16.16b, v5.16b, v10.16b + eor a5, a5, a10 eor v17.16b, v6.16b, v11.16b + eor a6, a6, a11 eor v18.16b, v7.16b, v8.16b + eor a7, a7, a8 eor v19.16b, v4.16b, v9.16b + eor a4, a4, a9 shl v5.4s, v16.4s, #12 shl v6.4s, v17.4s, #12 @@ -283,42 +432,66 @@ ENTRY(chacha20_4block_xor_neon) shl v4.4s, v19.4s, #12 sri v5.4s, v16.4s, #20 + ror a5, a5, #20 sri v6.4s, v17.4s, #20 + ror a6, a6, #20 sri v7.4s, v18.4s, #20 + ror a7, a7, #20 sri v4.4s, v19.4s, #20 + ror a4, a4, #20 // x0 += x5, x15 = rotl32(x15 ^ x0, 8) // x1 += x6, x12 = rotl32(x12 ^ x1, 8) // x2 += x7, x13 = rotl32(x13 ^ x2, 8) // x3 += x4, x14 = rotl32(x14 ^ x3, 8) add v0.4s, v0.4s, v5.4s + add a0, a0, a5 add v1.4s, v1.4s, v6.4s + add a1, a1, a6 add v2.4s, v2.4s, v7.4s + add a2, a2, a7 add v3.4s, v3.4s, v4.4s + add a3, a3, a4 eor v15.16b, v15.16b, v0.16b + eor a15, a15, a0 eor v12.16b, v12.16b, v1.16b + eor a12, a12, a1 eor v13.16b, v13.16b, v2.16b + eor a13, a13, a2 eor v14.16b, v14.16b, v3.16b + eor a14, a14, a3 tbl v15.16b, {v15.16b}, v31.16b + ror a15, a15, #24 tbl v12.16b, {v12.16b}, v31.16b + ror a12, a12, #24 tbl v13.16b, {v13.16b}, v31.16b + ror a13, a13, #24 tbl v14.16b, {v14.16b}, v31.16b + ror a14, a14, #24 // x10 += x15, x5 = rotl32(x5 ^ x10, 7) // x11 += x12, x6 = rotl32(x6 ^ x11, 7) // x8 += x13, x7 = rotl32(x7 ^ x8, 7) // x9 += x14, x4 = rotl32(x4 ^ x9, 7) add v10.4s, v10.4s, v15.4s + add a10, a10, a15 add v11.4s, v11.4s, v12.4s + add a11, a11, a12 add v8.4s, v8.4s, v13.4s + add a8, a8, a13 add v9.4s, v9.4s, v14.4s + add a9, a9, a14 eor v16.16b, v5.16b, v10.16b + eor a5, a5, a10 eor v17.16b, v6.16b, v11.16b + eor a6, a6, a11 eor v18.16b, v7.16b, v8.16b + eor a7, a7, a8 eor v19.16b, v4.16b, v9.16b + eor a4, a4, a9 shl v5.4s, v16.4s, #7 shl v6.4s, v17.4s, #7 @@ -326,11 +499,15 @@ ENTRY(chacha20_4block_xor_neon) shl v4.4s, v19.4s, #7 sri v5.4s, v16.4s, #25 + ror a5, a5, #25 sri v6.4s, v17.4s, #25 + ror a6, a6, #25 sri v7.4s, v18.4s, #25 + ror a7, a7, #25 sri v4.4s, v19.4s, #25 + ror a4, a4, #25 - subs x3, x3, #1 + subs w3, w3, #2 b.ne .Ldoubleround4 ld4r {v16.4s-v19.4s}, [x0], #16 @@ -344,9 +521,17 @@ ENTRY(chacha20_4block_xor_neon) // x2[0-3] += s0[2] // x3[0-3] += s0[3] add v0.4s, v0.4s, v16.4s + mov w6, v16.s[0] + mov w7, v17.s[0] add v1.4s, v1.4s, v17.4s + mov w8, v18.s[0] + mov w9, v19.s[0] add v2.4s, v2.4s, v18.4s + add a0, a0, w6 + add a1, a1, w7 add v3.4s, v3.4s, v19.4s + add a2, a2, w8 + add a3, a3, w9 ld4r {v24.4s-v27.4s}, [x0], #16 ld4r {v28.4s-v31.4s}, [x0] @@ -356,95 +541,304 @@ ENTRY(chacha20_4block_xor_neon) // x6[0-3] += s1[2] // x7[0-3] += s1[3] add v4.4s, v4.4s, v20.4s + mov w6, v20.s[0] + mov w7, v21.s[0] add v5.4s, v5.4s, v21.4s + mov w8, v22.s[0] + mov w9, v23.s[0] add v6.4s, v6.4s, v22.4s + add a4, a4, w6 + add a5, a5, w7 add v7.4s, v7.4s, v23.4s + add a6, a6, w8 + add a7, a7, w9 // x8[0-3] += s2[0] // x9[0-3] += s2[1] // x10[0-3] += s2[2] // x11[0-3] += s2[3] add v8.4s, v8.4s, v24.4s + mov w6, v24.s[0] + mov w7, v25.s[0] add v9.4s, v9.4s, v25.4s + mov w8, v26.s[0] + mov w9, v27.s[0] add v10.4s, v10.4s, v26.4s + add a8, a8, w6 + add a9, a9, w7 add v11.4s, v11.4s, v27.4s + add a10, a10, w8 + add a11, a11, w9 // x12[0-3] += s3[0] // x13[0-3] += s3[1] // x14[0-3] += s3[2] // x15[0-3] += s3[3] add v12.4s, v12.4s, v28.4s + mov w6, v28.s[0] + mov w7, v29.s[0] add v13.4s, v13.4s, v29.4s + mov w8, v30.s[0] + mov w9, v31.s[0] add v14.4s, v14.4s, v30.4s + add a12, a12, w6 + add a13, a13, w7 add v15.4s, v15.4s, v31.4s + add a14, a14, w8 + add a15, a15, w9 // interleave 32-bit words in state n, n+1 + ldp w6, w7, [x2], #64 zip1 v16.4s, v0.4s, v1.4s + ldp w8, w9, [x2, #-56] + eor a0, a0, w6 zip2 v17.4s, v0.4s, v1.4s + eor a1, a1, w7 zip1 v18.4s, v2.4s, v3.4s + eor a2, a2, w8 zip2 v19.4s, v2.4s, v3.4s + eor a3, a3, w9 + ldp w6, w7, [x2, #-48] zip1 v20.4s, v4.4s, v5.4s + ldp w8, w9, [x2, #-40] + eor a4, a4, w6 zip2 v21.4s, v4.4s, v5.4s + eor a5, a5, w7 zip1 v22.4s, v6.4s, v7.4s + eor a6, a6, w8 zip2 v23.4s, v6.4s, v7.4s + eor a7, a7, w9 + ldp w6, w7, [x2, #-32] zip1 v24.4s, v8.4s, v9.4s + ldp w8, w9, [x2, #-24] + eor a8, a8, w6 zip2 v25.4s, v8.4s, v9.4s + eor a9, a9, w7 zip1 v26.4s, v10.4s, v11.4s + eor a10, a10, w8 zip2 v27.4s, v10.4s, v11.4s + eor a11, a11, w9 + ldp w6, w7, [x2, #-16] zip1 v28.4s, v12.4s, v13.4s + ldp w8, w9, [x2, #-8] + eor a12, a12, w6 zip2 v29.4s, v12.4s, v13.4s + eor a13, a13, w7 zip1 v30.4s, v14.4s, v15.4s + eor a14, a14, w8 zip2 v31.4s, v14.4s, v15.4s + eor a15, a15, w9 + + mov x3, #64 + subs x5, x4, #128 + add x6, x5, x2 + csel x3, x3, xzr, ge + csel x2, x2, x6, ge // interleave 64-bit words in state n, n+2 zip1 v0.2d, v16.2d, v18.2d zip2 v4.2d, v16.2d, v18.2d + stp a0, a1, [x1], #64 zip1 v8.2d, v17.2d, v19.2d zip2 v12.2d, v17.2d, v19.2d - ld1 {v16.16b-v19.16b}, [x2], #64 + stp a2, a3, [x1, #-56] + ld1 {v16.16b-v19.16b}, [x2], x3 + + subs x6, x4, #192 + ccmp x3, xzr, #4, lt + add x7, x6, x2 + csel x3, x3, xzr, eq + csel x2, x2, x7, eq zip1 v1.2d, v20.2d, v22.2d zip2 v5.2d, v20.2d, v22.2d + stp a4, a5, [x1, #-48] zip1 v9.2d, v21.2d, v23.2d zip2 v13.2d, v21.2d, v23.2d - ld1 {v20.16b-v23.16b}, [x2], #64 + stp a6, a7, [x1, #-40] + ld1 {v20.16b-v23.16b}, [x2], x3 + + subs x7, x4, #256 + ccmp x3, xzr, #4, lt + add x8, x7, x2 + csel x3, x3, xzr, eq + csel x2, x2, x8, eq zip1 v2.2d, v24.2d, v26.2d zip2 v6.2d, v24.2d, v26.2d + stp a8, a9, [x1, #-32] zip1 v10.2d, v25.2d, v27.2d zip2 v14.2d, v25.2d, v27.2d - ld1 {v24.16b-v27.16b}, [x2], #64 + stp a10, a11, [x1, #-24] + ld1 {v24.16b-v27.16b}, [x2], x3 + + subs x8, x4, #320 + ccmp x3, xzr, #4, lt + add x9, x8, x2 + csel x2, x2, x9, eq zip1 v3.2d, v28.2d, v30.2d zip2 v7.2d, v28.2d, v30.2d + stp a12, a13, [x1, #-16] zip1 v11.2d, v29.2d, v31.2d zip2 v15.2d, v29.2d, v31.2d + stp a14, a15, [x1, #-8] ld1 {v28.16b-v31.16b}, [x2] // xor with corresponding input, write to output + tbnz x5, #63, 0f eor v16.16b, v16.16b, v0.16b eor v17.16b, v17.16b, v1.16b eor v18.16b, v18.16b, v2.16b eor v19.16b, v19.16b, v3.16b + st1 {v16.16b-v19.16b}, [x1], #64 + cbz x5, .Lout + + tbnz x6, #63, 1f eor v20.16b, v20.16b, v4.16b eor v21.16b, v21.16b, v5.16b - st1 {v16.16b-v19.16b}, [x1], #64 eor v22.16b, v22.16b, v6.16b eor v23.16b, v23.16b, v7.16b + st1 {v20.16b-v23.16b}, [x1], #64 + cbz x6, .Lout + + tbnz x7, #63, 2f eor v24.16b, v24.16b, v8.16b eor v25.16b, v25.16b, v9.16b - st1 {v20.16b-v23.16b}, [x1], #64 eor v26.16b, v26.16b, v10.16b eor v27.16b, v27.16b, v11.16b - eor v28.16b, v28.16b, v12.16b st1 {v24.16b-v27.16b}, [x1], #64 + cbz x7, .Lout + + tbnz x8, #63, 3f + eor v28.16b, v28.16b, v12.16b eor v29.16b, v29.16b, v13.16b eor v30.16b, v30.16b, v14.16b eor v31.16b, v31.16b, v15.16b st1 {v28.16b-v31.16b}, [x1] +.Lout: frame_pop ret -ENDPROC(chacha20_4block_xor_neon) -CTRINC: .word 0, 1, 2, 3 + // fewer than 128 bytes of in/output +0: ld1 {v8.16b}, [x10] + ld1 {v9.16b}, [x11] + movi v10.16b, #16 + sub x2, x1, #64 + add x1, x1, x5 + ld1 {v16.16b-v19.16b}, [x2] + tbl v4.16b, {v0.16b-v3.16b}, v8.16b + tbx v20.16b, {v16.16b-v19.16b}, v9.16b + add v8.16b, v8.16b, v10.16b + add v9.16b, v9.16b, v10.16b + tbl v5.16b, {v0.16b-v3.16b}, v8.16b + tbx v21.16b, {v16.16b-v19.16b}, v9.16b + add v8.16b, v8.16b, v10.16b + add v9.16b, v9.16b, v10.16b + tbl v6.16b, {v0.16b-v3.16b}, v8.16b + tbx v22.16b, {v16.16b-v19.16b}, v9.16b + add v8.16b, v8.16b, v10.16b + add v9.16b, v9.16b, v10.16b + tbl v7.16b, {v0.16b-v3.16b}, v8.16b + tbx v23.16b, {v16.16b-v19.16b}, v9.16b + + eor v20.16b, v20.16b, v4.16b + eor v21.16b, v21.16b, v5.16b + eor v22.16b, v22.16b, v6.16b + eor v23.16b, v23.16b, v7.16b + st1 {v20.16b-v23.16b}, [x1] + b .Lout + + // fewer than 192 bytes of in/output +1: ld1 {v8.16b}, [x10] + ld1 {v9.16b}, [x11] + movi v10.16b, #16 + add x1, x1, x6 + tbl v0.16b, {v4.16b-v7.16b}, v8.16b + tbx v20.16b, {v16.16b-v19.16b}, v9.16b + add v8.16b, v8.16b, v10.16b + add v9.16b, v9.16b, v10.16b + tbl v1.16b, {v4.16b-v7.16b}, v8.16b + tbx v21.16b, {v16.16b-v19.16b}, v9.16b + add v8.16b, v8.16b, v10.16b + add v9.16b, v9.16b, v10.16b + tbl v2.16b, {v4.16b-v7.16b}, v8.16b + tbx v22.16b, {v16.16b-v19.16b}, v9.16b + add v8.16b, v8.16b, v10.16b + add v9.16b, v9.16b, v10.16b + tbl v3.16b, {v4.16b-v7.16b}, v8.16b + tbx v23.16b, {v16.16b-v19.16b}, v9.16b + + eor v20.16b, v20.16b, v0.16b + eor v21.16b, v21.16b, v1.16b + eor v22.16b, v22.16b, v2.16b + eor v23.16b, v23.16b, v3.16b + st1 {v20.16b-v23.16b}, [x1] + b .Lout + + // fewer than 256 bytes of in/output +2: ld1 {v4.16b}, [x10] + ld1 {v5.16b}, [x11] + movi v6.16b, #16 + add x1, x1, x7 + tbl v0.16b, {v8.16b-v11.16b}, v4.16b + tbx v24.16b, {v20.16b-v23.16b}, v5.16b + add v4.16b, v4.16b, v6.16b + add v5.16b, v5.16b, v6.16b + tbl v1.16b, {v8.16b-v11.16b}, v4.16b + tbx v25.16b, {v20.16b-v23.16b}, v5.16b + add v4.16b, v4.16b, v6.16b + add v5.16b, v5.16b, v6.16b + tbl v2.16b, {v8.16b-v11.16b}, v4.16b + tbx v26.16b, {v20.16b-v23.16b}, v5.16b + add v4.16b, v4.16b, v6.16b + add v5.16b, v5.16b, v6.16b + tbl v3.16b, {v8.16b-v11.16b}, v4.16b + tbx v27.16b, {v20.16b-v23.16b}, v5.16b + + eor v24.16b, v24.16b, v0.16b + eor v25.16b, v25.16b, v1.16b + eor v26.16b, v26.16b, v2.16b + eor v27.16b, v27.16b, v3.16b + st1 {v24.16b-v27.16b}, [x1] + b .Lout + + // fewer than 320 bytes of in/output +3: ld1 {v4.16b}, [x10] + ld1 {v5.16b}, [x11] + movi v6.16b, #16 + add x1, x1, x8 + tbl v0.16b, {v12.16b-v15.16b}, v4.16b + tbx v28.16b, {v24.16b-v27.16b}, v5.16b + add v4.16b, v4.16b, v6.16b + add v5.16b, v5.16b, v6.16b + tbl v1.16b, {v12.16b-v15.16b}, v4.16b + tbx v29.16b, {v24.16b-v27.16b}, v5.16b + add v4.16b, v4.16b, v6.16b + add v5.16b, v5.16b, v6.16b + tbl v2.16b, {v12.16b-v15.16b}, v4.16b + tbx v30.16b, {v24.16b-v27.16b}, v5.16b + add v4.16b, v4.16b, v6.16b + add v5.16b, v5.16b, v6.16b + tbl v3.16b, {v12.16b-v15.16b}, v4.16b + tbx v31.16b, {v24.16b-v27.16b}, v5.16b + + eor v28.16b, v28.16b, v0.16b + eor v29.16b, v29.16b, v1.16b + eor v30.16b, v30.16b, v2.16b + eor v31.16b, v31.16b, v3.16b + st1 {v28.16b-v31.16b}, [x1] + b .Lout +ENDPROC(chacha_4block_xor_neon) + + .section ".rodata", "a", %progbits + .align L1_CACHE_SHIFT +.Lpermute: + .set .Li, 0 + .rept 192 + .byte (.Li - 64) + .set .Li, .Li + 1 + .endr + +CTRINC: .word 1, 2, 3, 4 ROT8: .word 0x02010003, 0x06050407, 0x0a09080b, 0x0e0d0c0f diff --git a/arch/arm64/crypto/chacha-neon-glue.c b/arch/arm64/crypto/chacha-neon-glue.c new file mode 100644 index 000000000000..bece1d85bd81 --- /dev/null +++ b/arch/arm64/crypto/chacha-neon-glue.c @@ -0,0 +1,198 @@ +/* + * ARM NEON accelerated ChaCha and XChaCha stream ciphers, + * including ChaCha20 (RFC7539) + * + * Copyright (C) 2016 - 2017 Linaro, Ltd. <ard.biesheuvel@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Based on: + * ChaCha20 256-bit cipher algorithm, RFC7539, SIMD glue code + * + * Copyright (C) 2015 Martin Willi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <crypto/algapi.h> +#include <crypto/chacha.h> +#include <crypto/internal/skcipher.h> +#include <linux/kernel.h> +#include <linux/module.h> + +#include <asm/hwcap.h> +#include <asm/neon.h> +#include <asm/simd.h> + +asmlinkage void chacha_block_xor_neon(u32 *state, u8 *dst, const u8 *src, + int nrounds); +asmlinkage void chacha_4block_xor_neon(u32 *state, u8 *dst, const u8 *src, + int nrounds, int bytes); +asmlinkage void hchacha_block_neon(const u32 *state, u32 *out, int nrounds); + +static void chacha_doneon(u32 *state, u8 *dst, const u8 *src, + int bytes, int nrounds) +{ + while (bytes > 0) { + int l = min(bytes, CHACHA_BLOCK_SIZE * 5); + + if (l <= CHACHA_BLOCK_SIZE) { + u8 buf[CHACHA_BLOCK_SIZE]; + + memcpy(buf, src, l); + chacha_block_xor_neon(state, buf, buf, nrounds); + memcpy(dst, buf, l); + state[12] += 1; + break; + } + chacha_4block_xor_neon(state, dst, src, nrounds, l); + bytes -= CHACHA_BLOCK_SIZE * 5; + src += CHACHA_BLOCK_SIZE * 5; + dst += CHACHA_BLOCK_SIZE * 5; + state[12] += 5; + } +} + +static int chacha_neon_stream_xor(struct skcipher_request *req, + struct chacha_ctx *ctx, u8 *iv) +{ + struct skcipher_walk walk; + u32 state[16]; + int err; + + err = skcipher_walk_virt(&walk, req, false); + + crypto_chacha_init(state, ctx, iv); + + while (walk.nbytes > 0) { + unsigned int nbytes = walk.nbytes; + + if (nbytes < walk.total) + nbytes = rounddown(nbytes, walk.stride); + + kernel_neon_begin(); + chacha_doneon(state, walk.dst.virt.addr, walk.src.virt.addr, + nbytes, ctx->nrounds); + kernel_neon_end(); + err = skcipher_walk_done(&walk, walk.nbytes - nbytes); + } + + return err; +} + +static int chacha_neon(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); + + if (req->cryptlen <= CHACHA_BLOCK_SIZE || !may_use_simd()) + return crypto_chacha_crypt(req); + + return chacha_neon_stream_xor(req, ctx, req->iv); +} + +static int xchacha_neon(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); + struct chacha_ctx subctx; + u32 state[16]; + u8 real_iv[16]; + + if (req->cryptlen <= CHACHA_BLOCK_SIZE || !may_use_simd()) + return crypto_xchacha_crypt(req); + + crypto_chacha_init(state, ctx, req->iv); + + kernel_neon_begin(); + hchacha_block_neon(state, subctx.key, ctx->nrounds); + kernel_neon_end(); + subctx.nrounds = ctx->nrounds; + + memcpy(&real_iv[0], req->iv + 24, 8); + memcpy(&real_iv[8], req->iv + 16, 8); + return chacha_neon_stream_xor(req, &subctx, real_iv); +} + +static struct skcipher_alg algs[] = { + { + .base.cra_name = "chacha20", + .base.cra_driver_name = "chacha20-neon", + .base.cra_priority = 300, + .base.cra_blocksize = 1, + .base.cra_ctxsize = sizeof(struct chacha_ctx), + .base.cra_module = THIS_MODULE, + + .min_keysize = CHACHA_KEY_SIZE, + .max_keysize = CHACHA_KEY_SIZE, + .ivsize = CHACHA_IV_SIZE, + .chunksize = CHACHA_BLOCK_SIZE, + .walksize = 5 * CHACHA_BLOCK_SIZE, + .setkey = crypto_chacha20_setkey, + .encrypt = chacha_neon, + .decrypt = chacha_neon, + }, { + .base.cra_name = "xchacha20", + .base.cra_driver_name = "xchacha20-neon", + .base.cra_priority = 300, + .base.cra_blocksize = 1, + .base.cra_ctxsize = sizeof(struct chacha_ctx), + .base.cra_module = THIS_MODULE, + + .min_keysize = CHACHA_KEY_SIZE, + .max_keysize = CHACHA_KEY_SIZE, + .ivsize = XCHACHA_IV_SIZE, + .chunksize = CHACHA_BLOCK_SIZE, + .walksize = 5 * CHACHA_BLOCK_SIZE, + .setkey = crypto_chacha20_setkey, + .encrypt = xchacha_neon, + .decrypt = xchacha_neon, + }, { + .base.cra_name = "xchacha12", + .base.cra_driver_name = "xchacha12-neon", + .base.cra_priority = 300, + .base.cra_blocksize = 1, + .base.cra_ctxsize = sizeof(struct chacha_ctx), + .base.cra_module = THIS_MODULE, + + .min_keysize = CHACHA_KEY_SIZE, + .max_keysize = CHACHA_KEY_SIZE, + .ivsize = XCHACHA_IV_SIZE, + .chunksize = CHACHA_BLOCK_SIZE, + .walksize = 5 * CHACHA_BLOCK_SIZE, + .setkey = crypto_chacha12_setkey, + .encrypt = xchacha_neon, + .decrypt = xchacha_neon, + } +}; + +static int __init chacha_simd_mod_init(void) +{ + if (!(elf_hwcap & HWCAP_ASIMD)) + return -ENODEV; + + return crypto_register_skciphers(algs, ARRAY_SIZE(algs)); +} + +static void __exit chacha_simd_mod_fini(void) +{ + crypto_unregister_skciphers(algs, ARRAY_SIZE(algs)); +} + +module_init(chacha_simd_mod_init); +module_exit(chacha_simd_mod_fini); + +MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (NEON accelerated)"); +MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS_CRYPTO("chacha20"); +MODULE_ALIAS_CRYPTO("chacha20-neon"); +MODULE_ALIAS_CRYPTO("xchacha20"); +MODULE_ALIAS_CRYPTO("xchacha20-neon"); +MODULE_ALIAS_CRYPTO("xchacha12"); +MODULE_ALIAS_CRYPTO("xchacha12-neon"); diff --git a/arch/arm64/crypto/chacha20-neon-glue.c b/arch/arm64/crypto/chacha20-neon-glue.c deleted file mode 100644 index 727579c93ded..000000000000 --- a/arch/arm64/crypto/chacha20-neon-glue.c +++ /dev/null @@ -1,133 +0,0 @@ -/* - * ChaCha20 256-bit cipher algorithm, RFC7539, arm64 NEON functions - * - * Copyright (C) 2016 - 2017 Linaro, Ltd. <ard.biesheuvel@linaro.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Based on: - * ChaCha20 256-bit cipher algorithm, RFC7539, SIMD glue code - * - * Copyright (C) 2015 Martin Willi - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include <crypto/algapi.h> -#include <crypto/chacha20.h> -#include <crypto/internal/skcipher.h> -#include <linux/kernel.h> -#include <linux/module.h> - -#include <asm/hwcap.h> -#include <asm/neon.h> -#include <asm/simd.h> - -asmlinkage void chacha20_block_xor_neon(u32 *state, u8 *dst, const u8 *src); -asmlinkage void chacha20_4block_xor_neon(u32 *state, u8 *dst, const u8 *src); - -static void chacha20_doneon(u32 *state, u8 *dst, const u8 *src, - unsigned int bytes) -{ - u8 buf[CHACHA20_BLOCK_SIZE]; - - while (bytes >= CHACHA20_BLOCK_SIZE * 4) { - kernel_neon_begin(); - chacha20_4block_xor_neon(state, dst, src); - kernel_neon_end(); - bytes -= CHACHA20_BLOCK_SIZE * 4; - src += CHACHA20_BLOCK_SIZE * 4; - dst += CHACHA20_BLOCK_SIZE * 4; - state[12] += 4; - } - - if (!bytes) - return; - - kernel_neon_begin(); - while (bytes >= CHACHA20_BLOCK_SIZE) { - chacha20_block_xor_neon(state, dst, src); - bytes -= CHACHA20_BLOCK_SIZE; - src += CHACHA20_BLOCK_SIZE; - dst += CHACHA20_BLOCK_SIZE; - state[12]++; - } - if (bytes) { - memcpy(buf, src, bytes); - chacha20_block_xor_neon(state, buf, buf); - memcpy(dst, buf, bytes); - } - kernel_neon_end(); -} - -static int chacha20_neon(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct chacha20_ctx *ctx = crypto_skcipher_ctx(tfm); - struct skcipher_walk walk; - u32 state[16]; - int err; - - if (!may_use_simd() || req->cryptlen <= CHACHA20_BLOCK_SIZE) - return crypto_chacha20_crypt(req); - - err = skcipher_walk_virt(&walk, req, false); - - crypto_chacha20_init(state, ctx, walk.iv); - - while (walk.nbytes > 0) { - unsigned int nbytes = walk.nbytes; - - if (nbytes < walk.total) - nbytes = round_down(nbytes, walk.stride); - - chacha20_doneon(state, walk.dst.virt.addr, walk.src.virt.addr, - nbytes); - err = skcipher_walk_done(&walk, walk.nbytes - nbytes); - } - - return err; -} - -static struct skcipher_alg alg = { - .base.cra_name = "chacha20", - .base.cra_driver_name = "chacha20-neon", - .base.cra_priority = 300, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct chacha20_ctx), - .base.cra_module = THIS_MODULE, - - .min_keysize = CHACHA20_KEY_SIZE, - .max_keysize = CHACHA20_KEY_SIZE, - .ivsize = CHACHA20_IV_SIZE, - .chunksize = CHACHA20_BLOCK_SIZE, - .walksize = 4 * CHACHA20_BLOCK_SIZE, - .setkey = crypto_chacha20_setkey, - .encrypt = chacha20_neon, - .decrypt = chacha20_neon, -}; - -static int __init chacha20_simd_mod_init(void) -{ - if (!(elf_hwcap & HWCAP_ASIMD)) - return -ENODEV; - - return crypto_register_skcipher(&alg); -} - -static void __exit chacha20_simd_mod_fini(void) -{ - crypto_unregister_skcipher(&alg); -} - -module_init(chacha20_simd_mod_init); -module_exit(chacha20_simd_mod_fini); - -MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); -MODULE_LICENSE("GPL v2"); -MODULE_ALIAS_CRYPTO("chacha20"); diff --git a/arch/arm64/crypto/nh-neon-core.S b/arch/arm64/crypto/nh-neon-core.S new file mode 100644 index 000000000000..e05570c38de7 --- /dev/null +++ b/arch/arm64/crypto/nh-neon-core.S @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * NH - ε-almost-universal hash function, ARM64 NEON accelerated version + * + * Copyright 2018 Google LLC + * + * Author: Eric Biggers <ebiggers@google.com> + */ + +#include <linux/linkage.h> + + KEY .req x0 + MESSAGE .req x1 + MESSAGE_LEN .req x2 + HASH .req x3 + + PASS0_SUMS .req v0 + PASS1_SUMS .req v1 + PASS2_SUMS .req v2 + PASS3_SUMS .req v3 + K0 .req v4 + K1 .req v5 + K2 .req v6 + K3 .req v7 + T0 .req v8 + T1 .req v9 + T2 .req v10 + T3 .req v11 + T4 .req v12 + T5 .req v13 + T6 .req v14 + T7 .req v15 + +.macro _nh_stride k0, k1, k2, k3 + + // Load next message stride + ld1 {T3.16b}, [MESSAGE], #16 + + // Load next key stride + ld1 {\k3\().4s}, [KEY], #16 + + // Add message words to key words + add T0.4s, T3.4s, \k0\().4s + add T1.4s, T3.4s, \k1\().4s + add T2.4s, T3.4s, \k2\().4s + add T3.4s, T3.4s, \k3\().4s + + // Multiply 32x32 => 64 and accumulate + mov T4.d[0], T0.d[1] + mov T5.d[0], T1.d[1] + mov T6.d[0], T2.d[1] + mov T7.d[0], T3.d[1] + umlal PASS0_SUMS.2d, T0.2s, T4.2s + umlal PASS1_SUMS.2d, T1.2s, T5.2s + umlal PASS2_SUMS.2d, T2.2s, T6.2s + umlal PASS3_SUMS.2d, T3.2s, T7.2s +.endm + +/* + * void nh_neon(const u32 *key, const u8 *message, size_t message_len, + * u8 hash[NH_HASH_BYTES]) + * + * It's guaranteed that message_len % 16 == 0. + */ +ENTRY(nh_neon) + + ld1 {K0.4s,K1.4s}, [KEY], #32 + movi PASS0_SUMS.2d, #0 + movi PASS1_SUMS.2d, #0 + ld1 {K2.4s}, [KEY], #16 + movi PASS2_SUMS.2d, #0 + movi PASS3_SUMS.2d, #0 + + subs MESSAGE_LEN, MESSAGE_LEN, #64 + blt .Lloop4_done +.Lloop4: + _nh_stride K0, K1, K2, K3 + _nh_stride K1, K2, K3, K0 + _nh_stride K2, K3, K0, K1 + _nh_stride K3, K0, K1, K2 + subs MESSAGE_LEN, MESSAGE_LEN, #64 + bge .Lloop4 + +.Lloop4_done: + ands MESSAGE_LEN, MESSAGE_LEN, #63 + beq .Ldone + _nh_stride K0, K1, K2, K3 + + subs MESSAGE_LEN, MESSAGE_LEN, #16 + beq .Ldone + _nh_stride K1, K2, K3, K0 + + subs MESSAGE_LEN, MESSAGE_LEN, #16 + beq .Ldone + _nh_stride K2, K3, K0, K1 + +.Ldone: + // Sum the accumulators for each pass, then store the sums to 'hash' + addp T0.2d, PASS0_SUMS.2d, PASS1_SUMS.2d + addp T1.2d, PASS2_SUMS.2d, PASS3_SUMS.2d + st1 {T0.16b,T1.16b}, [HASH] + ret +ENDPROC(nh_neon) diff --git a/arch/arm64/crypto/nhpoly1305-neon-glue.c b/arch/arm64/crypto/nhpoly1305-neon-glue.c new file mode 100644 index 000000000000..22cc32ac9448 --- /dev/null +++ b/arch/arm64/crypto/nhpoly1305-neon-glue.c @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * NHPoly1305 - ε-almost-∆-universal hash function for Adiantum + * (ARM64 NEON accelerated version) + * + * Copyright 2018 Google LLC + */ + +#include <asm/neon.h> +#include <asm/simd.h> +#include <crypto/internal/hash.h> +#include <crypto/nhpoly1305.h> +#include <linux/module.h> + +asmlinkage void nh_neon(const u32 *key, const u8 *message, size_t message_len, + u8 hash[NH_HASH_BYTES]); + +/* wrapper to avoid indirect call to assembly, which doesn't work with CFI */ +static void _nh_neon(const u32 *key, const u8 *message, size_t message_len, + __le64 hash[NH_NUM_PASSES]) +{ + nh_neon(key, message, message_len, (u8 *)hash); +} + +static int nhpoly1305_neon_update(struct shash_desc *desc, + const u8 *src, unsigned int srclen) +{ + if (srclen < 64 || !may_use_simd()) + return crypto_nhpoly1305_update(desc, src, srclen); + + do { + unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE); + + kernel_neon_begin(); + crypto_nhpoly1305_update_helper(desc, src, n, _nh_neon); + kernel_neon_end(); + src += n; + srclen -= n; + } while (srclen); + return 0; +} + +static struct shash_alg nhpoly1305_alg = { + .base.cra_name = "nhpoly1305", + .base.cra_driver_name = "nhpoly1305-neon", + .base.cra_priority = 200, + .base.cra_ctxsize = sizeof(struct nhpoly1305_key), + .base.cra_module = THIS_MODULE, + .digestsize = POLY1305_DIGEST_SIZE, + .init = crypto_nhpoly1305_init, + .update = nhpoly1305_neon_update, + .final = crypto_nhpoly1305_final, + .setkey = crypto_nhpoly1305_setkey, + .descsize = sizeof(struct nhpoly1305_state), +}; + +static int __init nhpoly1305_mod_init(void) +{ + if (!(elf_hwcap & HWCAP_ASIMD)) + return -ENODEV; + + return crypto_register_shash(&nhpoly1305_alg); +} + +static void __exit nhpoly1305_mod_exit(void) +{ + crypto_unregister_shash(&nhpoly1305_alg); +} + +module_init(nhpoly1305_mod_init); +module_exit(nhpoly1305_mod_exit); + +MODULE_DESCRIPTION("NHPoly1305 ε-almost-∆-universal hash function (NEON-accelerated)"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>"); +MODULE_ALIAS_CRYPTO("nhpoly1305"); +MODULE_ALIAS_CRYPTO("nhpoly1305-neon"); diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild index 6cd5d77b6b44..1e17ea5c372b 100644 --- a/arch/arm64/include/asm/Kbuild +++ b/arch/arm64/include/asm/Kbuild @@ -14,7 +14,6 @@ generic-y += local64.h generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h generic-y += msi.h -generic-y += preempt.h generic-y += qrwlock.h generic-y += qspinlock.h generic-y += rwsem.h @@ -27,4 +26,3 @@ generic-y += trace_clock.h generic-y += unaligned.h generic-y += user.h generic-y += vga.h -generic-y += xor.h diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h index 709208dfdc8b..2def77ec14be 100644 --- a/arch/arm64/include/asm/acpi.h +++ b/arch/arm64/include/asm/acpi.h @@ -22,12 +22,23 @@ #include <asm/tlbflush.h> /* Macros for consistency checks of the GICC subtable of MADT */ -#define ACPI_MADT_GICC_LENGTH \ - (acpi_gbl_FADT.header.revision < 6 ? 76 : 80) + +/* + * MADT GICC minimum length refers to the MADT GICC structure table length as + * defined in the earliest ACPI version supported on arm64, ie ACPI 5.1. + * + * The efficiency_class member was added to the + * struct acpi_madt_generic_interrupt to represent the MADT GICC structure + * "Processor Power Efficiency Class" field, added in ACPI 6.0 whose offset + * is therefore used to delimit the MADT GICC structure minimum length + * appropriately. + */ +#define ACPI_MADT_GICC_MIN_LENGTH ACPI_OFFSET( \ + struct acpi_madt_generic_interrupt, efficiency_class) #define BAD_MADT_GICC_ENTRY(entry, end) \ - (!(entry) || (entry)->header.length != ACPI_MADT_GICC_LENGTH || \ - (unsigned long)(entry) + ACPI_MADT_GICC_LENGTH > (end)) + (!(entry) || (entry)->header.length < ACPI_MADT_GICC_MIN_LENGTH || \ + (unsigned long)(entry) + (entry)->header.length > (end)) /* Basic configuration for ACPI */ #ifdef CONFIG_ACPI diff --git a/arch/arm64/include/asm/asm-prototypes.h b/arch/arm64/include/asm/asm-prototypes.h new file mode 100644 index 000000000000..2173ad32d550 --- /dev/null +++ b/arch/arm64/include/asm/asm-prototypes.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_PROTOTYPES_H +#define __ASM_PROTOTYPES_H +/* + * CONFIG_MODEVERIONS requires a C declaration to generate the appropriate CRC + * for each symbol. Since commit: + * + * 4efca4ed05cbdfd1 ("kbuild: modversions for EXPORT_SYMBOL() for asm") + * + * ... kbuild will automatically pick these up from <asm/asm-prototypes.h> and + * feed this to genksyms when building assembly files. + */ +#include <linux/arm-smccc.h> + +#include <asm/ftrace.h> +#include <asm/page.h> +#include <asm/string.h> +#include <asm/uaccess.h> + +#include <asm-generic/asm-prototypes.h> + +long long __ashlti3(long long a, int b); +long long __ashrti3(long long a, int b); +long long __lshrti3(long long a, int b); + +#endif /* __ASM_PROTOTYPES_H */ diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 6142402c2eb4..4feb6119c3c9 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -23,6 +23,8 @@ #ifndef __ASM_ASSEMBLER_H #define __ASM_ASSEMBLER_H +#include <asm-generic/export.h> + #include <asm/asm-offsets.h> #include <asm/cpufeature.h> #include <asm/debug-monitors.h> @@ -123,6 +125,19 @@ .endm /* + * Speculation barrier + */ + .macro sb +alternative_if_not ARM64_HAS_SB + dsb nsh + isb +alternative_else + SB_BARRIER_INSN + nop +alternative_endif + .endm + +/* * Sanitise a 64-bit bounded index wrt speculation, returning zero if out * of bounds. */ @@ -342,11 +357,10 @@ alternative_endif .endm /* - * tcr_set_idmap_t0sz - update TCR.T0SZ so that we can load the ID map + * tcr_set_t0sz - update TCR.T0SZ so that we can load the ID map */ - .macro tcr_set_idmap_t0sz, valreg, tmpreg - ldr_l \tmpreg, idmap_t0sz - bfi \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH + .macro tcr_set_t0sz, valreg, t0sz + bfi \valreg, \t0sz, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH .endm /* @@ -377,27 +391,33 @@ alternative_endif * size: size of the region * Corrupts: kaddr, size, tmp1, tmp2 */ + .macro __dcache_op_workaround_clean_cache, op, kaddr +alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE + dc \op, \kaddr +alternative_else + dc civac, \kaddr +alternative_endif + .endm + .macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2 dcache_line_size \tmp1, \tmp2 add \size, \kaddr, \size sub \tmp2, \tmp1, #1 bic \kaddr, \kaddr, \tmp2 9998: - .if (\op == cvau || \op == cvac) -alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE - dc \op, \kaddr -alternative_else - dc civac, \kaddr -alternative_endif - .elseif (\op == cvap) -alternative_if ARM64_HAS_DCPOP - sys 3, c7, c12, 1, \kaddr // dc cvap -alternative_else - dc cvac, \kaddr -alternative_endif + .ifc \op, cvau + __dcache_op_workaround_clean_cache \op, \kaddr + .else + .ifc \op, cvac + __dcache_op_workaround_clean_cache \op, \kaddr + .else + .ifc \op, cvap + sys 3, c7, c12, 1, \kaddr // dc cvap .else dc \op, \kaddr .endif + .endif + .endif add \kaddr, \kaddr, \tmp1 cmp \kaddr, \size b.lo 9998b @@ -477,6 +497,13 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU #else #define NOKPROBE(x) #endif + +#ifdef CONFIG_KASAN +#define EXPORT_SYMBOL_NOKASAN(name) +#else +#define EXPORT_SYMBOL_NOKASAN(name) EXPORT_SYMBOL(name) +#endif + /* * Emit a 64-bit absolute little endian symbol reference in a way that * ensures that it will be resolved at build time, even when building a @@ -516,6 +543,29 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU .endm /* + * Offset ttbr1 to allow for 48-bit kernel VAs set with 52-bit PTRS_PER_PGD. + * orr is used as it can cover the immediate value (and is idempotent). + * In future this may be nop'ed out when dealing with 52-bit kernel VAs. + * ttbr: Value of ttbr to set, modified. + */ + .macro offset_ttbr1, ttbr +#ifdef CONFIG_ARM64_USER_VA_BITS_52 + orr \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET +#endif + .endm + +/* + * Perform the reverse of offset_ttbr1. + * bic is used as it can cover the immediate value and, in future, won't need + * to be nop'ed out when dealing with 52-bit kernel VAs. + */ + .macro restore_ttbr1, ttbr +#ifdef CONFIG_ARM64_USER_VA_BITS_52 + bic \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET +#endif + .endm + +/* * Arrange a physical address in a TTBR register, taking care of 52-bit * addresses. * @@ -672,11 +722,9 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU .macro if_will_cond_yield_neon #ifdef CONFIG_PREEMPT get_thread_info x0 - ldr w1, [x0, #TSK_TI_PREEMPT] - ldr x0, [x0, #TSK_TI_FLAGS] - cmp w1, #PREEMPT_DISABLE_OFFSET - csel x0, x0, xzr, eq - tbnz x0, #TIF_NEED_RESCHED, .Lyield_\@ // needs rescheduling? + ldr x0, [x0, #TSK_TI_PREEMPT] + sub x0, x0, #PREEMPT_DISABLE_OFFSET + cbz x0, .Lyield_\@ /* fall through to endif_yield_neon */ .subsection 1 .Lyield_\@ : diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h index f5a2d09afb38..af7b99005453 100644 --- a/arch/arm64/include/asm/atomic_ll_sc.h +++ b/arch/arm64/include/asm/atomic_ll_sc.h @@ -248,48 +248,57 @@ __LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v)) } __LL_SC_EXPORT(atomic64_dec_if_positive); -#define __CMPXCHG_CASE(w, sz, name, mb, acq, rel, cl) \ -__LL_SC_INLINE unsigned long \ -__LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr, \ - unsigned long old, \ - unsigned long new)) \ +#define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl) \ +__LL_SC_INLINE u##sz \ +__LL_SC_PREFIX(__cmpxchg_case_##name##sz(volatile void *ptr, \ + unsigned long old, \ + u##sz new)) \ { \ - unsigned long tmp, oldval; \ + unsigned long tmp; \ + u##sz oldval; \ + \ + /* \ + * Sub-word sizes require explicit casting so that the compare \ + * part of the cmpxchg doesn't end up interpreting non-zero \ + * upper bits of the register containing "old". \ + */ \ + if (sz < 32) \ + old = (u##sz)old; \ \ asm volatile( \ " prfm pstl1strm, %[v]\n" \ - "1: ld" #acq "xr" #sz "\t%" #w "[oldval], %[v]\n" \ + "1: ld" #acq "xr" #sfx "\t%" #w "[oldval], %[v]\n" \ " eor %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n" \ " cbnz %" #w "[tmp], 2f\n" \ - " st" #rel "xr" #sz "\t%w[tmp], %" #w "[new], %[v]\n" \ + " st" #rel "xr" #sfx "\t%w[tmp], %" #w "[new], %[v]\n" \ " cbnz %w[tmp], 1b\n" \ " " #mb "\n" \ "2:" \ : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \ - [v] "+Q" (*(unsigned long *)ptr) \ - : [old] "Lr" (old), [new] "r" (new) \ + [v] "+Q" (*(u##sz *)ptr) \ + : [old] "Kr" (old), [new] "r" (new) \ : cl); \ \ return oldval; \ } \ -__LL_SC_EXPORT(__cmpxchg_case_##name); +__LL_SC_EXPORT(__cmpxchg_case_##name##sz); -__CMPXCHG_CASE(w, b, 1, , , , ) -__CMPXCHG_CASE(w, h, 2, , , , ) -__CMPXCHG_CASE(w, , 4, , , , ) -__CMPXCHG_CASE( , , 8, , , , ) -__CMPXCHG_CASE(w, b, acq_1, , a, , "memory") -__CMPXCHG_CASE(w, h, acq_2, , a, , "memory") -__CMPXCHG_CASE(w, , acq_4, , a, , "memory") -__CMPXCHG_CASE( , , acq_8, , a, , "memory") -__CMPXCHG_CASE(w, b, rel_1, , , l, "memory") -__CMPXCHG_CASE(w, h, rel_2, , , l, "memory") -__CMPXCHG_CASE(w, , rel_4, , , l, "memory") -__CMPXCHG_CASE( , , rel_8, , , l, "memory") -__CMPXCHG_CASE(w, b, mb_1, dmb ish, , l, "memory") -__CMPXCHG_CASE(w, h, mb_2, dmb ish, , l, "memory") -__CMPXCHG_CASE(w, , mb_4, dmb ish, , l, "memory") -__CMPXCHG_CASE( , , mb_8, dmb ish, , l, "memory") +__CMPXCHG_CASE(w, b, , 8, , , , ) +__CMPXCHG_CASE(w, h, , 16, , , , ) +__CMPXCHG_CASE(w, , , 32, , , , ) +__CMPXCHG_CASE( , , , 64, , , , ) +__CMPXCHG_CASE(w, b, acq_, 8, , a, , "memory") +__CMPXCHG_CASE(w, h, acq_, 16, , a, , "memory") +__CMPXCHG_CASE(w, , acq_, 32, , a, , "memory") +__CMPXCHG_CASE( , , acq_, 64, , a, , "memory") +__CMPXCHG_CASE(w, b, rel_, 8, , , l, "memory") +__CMPXCHG_CASE(w, h, rel_, 16, , , l, "memory") +__CMPXCHG_CASE(w, , rel_, 32, , , l, "memory") +__CMPXCHG_CASE( , , rel_, 64, , , l, "memory") +__CMPXCHG_CASE(w, b, mb_, 8, dmb ish, , l, "memory") +__CMPXCHG_CASE(w, h, mb_, 16, dmb ish, , l, "memory") +__CMPXCHG_CASE(w, , mb_, 32, dmb ish, , l, "memory") +__CMPXCHG_CASE( , , mb_, 64, dmb ish, , l, "memory") #undef __CMPXCHG_CASE diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h index f9b0b09153e0..a424355240c5 100644 --- a/arch/arm64/include/asm/atomic_lse.h +++ b/arch/arm64/include/asm/atomic_lse.h @@ -446,22 +446,22 @@ static inline long atomic64_dec_if_positive(atomic64_t *v) #define __LL_SC_CMPXCHG(op) __LL_SC_CALL(__cmpxchg_case_##op) -#define __CMPXCHG_CASE(w, sz, name, mb, cl...) \ -static inline unsigned long __cmpxchg_case_##name(volatile void *ptr, \ - unsigned long old, \ - unsigned long new) \ +#define __CMPXCHG_CASE(w, sfx, name, sz, mb, cl...) \ +static inline u##sz __cmpxchg_case_##name##sz(volatile void *ptr, \ + u##sz old, \ + u##sz new) \ { \ register unsigned long x0 asm ("x0") = (unsigned long)ptr; \ - register unsigned long x1 asm ("x1") = old; \ - register unsigned long x2 asm ("x2") = new; \ + register u##sz x1 asm ("x1") = old; \ + register u##sz x2 asm ("x2") = new; \ \ asm volatile(ARM64_LSE_ATOMIC_INSN( \ /* LL/SC */ \ - __LL_SC_CMPXCHG(name) \ + __LL_SC_CMPXCHG(name##sz) \ __nops(2), \ /* LSE atomics */ \ " mov " #w "30, %" #w "[old]\n" \ - " cas" #mb #sz "\t" #w "30, %" #w "[new], %[v]\n" \ + " cas" #mb #sfx "\t" #w "30, %" #w "[new], %[v]\n" \ " mov %" #w "[ret], " #w "30") \ : [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr) \ : [old] "r" (x1), [new] "r" (x2) \ @@ -470,22 +470,22 @@ static inline unsigned long __cmpxchg_case_##name(volatile void *ptr, \ return x0; \ } -__CMPXCHG_CASE(w, b, 1, ) -__CMPXCHG_CASE(w, h, 2, ) -__CMPXCHG_CASE(w, , 4, ) -__CMPXCHG_CASE(x, , 8, ) -__CMPXCHG_CASE(w, b, acq_1, a, "memory") -__CMPXCHG_CASE(w, h, acq_2, a, "memory") -__CMPXCHG_CASE(w, , acq_4, a, "memory") -__CMPXCHG_CASE(x, , acq_8, a, "memory") -__CMPXCHG_CASE(w, b, rel_1, l, "memory") -__CMPXCHG_CASE(w, h, rel_2, l, "memory") -__CMPXCHG_CASE(w, , rel_4, l, "memory") -__CMPXCHG_CASE(x, , rel_8, l, "memory") -__CMPXCHG_CASE(w, b, mb_1, al, "memory") -__CMPXCHG_CASE(w, h, mb_2, al, "memory") -__CMPXCHG_CASE(w, , mb_4, al, "memory") -__CMPXCHG_CASE(x, , mb_8, al, "memory") +__CMPXCHG_CASE(w, b, , 8, ) +__CMPXCHG_CASE(w, h, , 16, ) +__CMPXCHG_CASE(w, , , 32, ) +__CMPXCHG_CASE(x, , , 64, ) +__CMPXCHG_CASE(w, b, acq_, 8, a, "memory") +__CMPXCHG_CASE(w, h, acq_, 16, a, "memory") +__CMPXCHG_CASE(w, , acq_, 32, a, "memory") +__CMPXCHG_CASE(x, , acq_, 64, a, "memory") +__CMPXCHG_CASE(w, b, rel_, 8, l, "memory") +__CMPXCHG_CASE(w, h, rel_, 16, l, "memory") +__CMPXCHG_CASE(w, , rel_, 32, l, "memory") +__CMPXCHG_CASE(x, , rel_, 64, l, "memory") +__CMPXCHG_CASE(w, b, mb_, 8, al, "memory") +__CMPXCHG_CASE(w, h, mb_, 16, al, "memory") +__CMPXCHG_CASE(w, , mb_, 32, al, "memory") +__CMPXCHG_CASE(x, , mb_, 64, al, "memory") #undef __LL_SC_CMPXCHG #undef __CMPXCHG_CASE diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index 822a9192c551..f66bb04fdf2d 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -34,6 +34,10 @@ #define psb_csync() asm volatile("hint #17" : : : "memory") #define csdb() asm volatile("hint #20" : : : "memory") +#define spec_bar() asm volatile(ALTERNATIVE("dsb nsh\nisb\n", \ + SB_BARRIER_INSN"nop\n", \ + ARM64_HAS_SB)) + #define mb() dsb(sy) #define rmb() dsb(ld) #define wmb() dsb(st) diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h index 3b0938281541..3f9376f1c409 100644 --- a/arch/arm64/include/asm/cmpxchg.h +++ b/arch/arm64/include/asm/cmpxchg.h @@ -30,46 +30,46 @@ * barrier case is generated as release+dmb for the former and * acquire+release for the latter. */ -#define __XCHG_CASE(w, sz, name, mb, nop_lse, acq, acq_lse, rel, cl) \ -static inline unsigned long __xchg_case_##name(unsigned long x, \ - volatile void *ptr) \ -{ \ - unsigned long ret, tmp; \ - \ - asm volatile(ARM64_LSE_ATOMIC_INSN( \ - /* LL/SC */ \ - " prfm pstl1strm, %2\n" \ - "1: ld" #acq "xr" #sz "\t%" #w "0, %2\n" \ - " st" #rel "xr" #sz "\t%w1, %" #w "3, %2\n" \ - " cbnz %w1, 1b\n" \ - " " #mb, \ - /* LSE atomics */ \ - " swp" #acq_lse #rel #sz "\t%" #w "3, %" #w "0, %2\n" \ - __nops(3) \ - " " #nop_lse) \ - : "=&r" (ret), "=&r" (tmp), "+Q" (*(unsigned long *)ptr) \ - : "r" (x) \ - : cl); \ - \ - return ret; \ +#define __XCHG_CASE(w, sfx, name, sz, mb, nop_lse, acq, acq_lse, rel, cl) \ +static inline u##sz __xchg_case_##name##sz(u##sz x, volatile void *ptr) \ +{ \ + u##sz ret; \ + unsigned long tmp; \ + \ + asm volatile(ARM64_LSE_ATOMIC_INSN( \ + /* LL/SC */ \ + " prfm pstl1strm, %2\n" \ + "1: ld" #acq "xr" #sfx "\t%" #w "0, %2\n" \ + " st" #rel "xr" #sfx "\t%w1, %" #w "3, %2\n" \ + " cbnz %w1, 1b\n" \ + " " #mb, \ + /* LSE atomics */ \ + " swp" #acq_lse #rel #sfx "\t%" #w "3, %" #w "0, %2\n" \ + __nops(3) \ + " " #nop_lse) \ + : "=&r" (ret), "=&r" (tmp), "+Q" (*(u##sz *)ptr) \ + : "r" (x) \ + : cl); \ + \ + return ret; \ } -__XCHG_CASE(w, b, 1, , , , , , ) -__XCHG_CASE(w, h, 2, , , , , , ) -__XCHG_CASE(w, , 4, , , , , , ) -__XCHG_CASE( , , 8, , , , , , ) -__XCHG_CASE(w, b, acq_1, , , a, a, , "memory") -__XCHG_CASE(w, h, acq_2, , , a, a, , "memory") -__XCHG_CASE(w, , acq_4, , , a, a, , "memory") -__XCHG_CASE( , , acq_8, , , a, a, , "memory") -__XCHG_CASE(w, b, rel_1, , , , , l, "memory") -__XCHG_CASE(w, h, rel_2, , , , , l, "memory") -__XCHG_CASE(w, , rel_4, , , , , l, "memory") -__XCHG_CASE( , , rel_8, , , , , l, "memory") -__XCHG_CASE(w, b, mb_1, dmb ish, nop, , a, l, "memory") -__XCHG_CASE(w, h, mb_2, dmb ish, nop, , a, l, "memory") -__XCHG_CASE(w, , mb_4, dmb ish, nop, , a, l, "memory") -__XCHG_CASE( , , mb_8, dmb ish, nop, , a, l, "memory") +__XCHG_CASE(w, b, , 8, , , , , , ) +__XCHG_CASE(w, h, , 16, , , , , , ) +__XCHG_CASE(w, , , 32, , , , , , ) +__XCHG_CASE( , , , 64, , , , , , ) +__XCHG_CASE(w, b, acq_, 8, , , a, a, , "memory") +__XCHG_CASE(w, h, acq_, 16, , , a, a, , "memory") +__XCHG_CASE(w, , acq_, 32, , , a, a, , "memory") +__XCHG_CASE( , , acq_, 64, , , a, a, , "memory") +__XCHG_CASE(w, b, rel_, 8, , , , , l, "memory") +__XCHG_CASE(w, h, rel_, 16, , , , , l, "memory") +__XCHG_CASE(w, , rel_, 32, , , , , l, "memory") +__XCHG_CASE( , , rel_, 64, , , , , l, "memory") +__XCHG_CASE(w, b, mb_, 8, dmb ish, nop, , a, l, "memory") +__XCHG_CASE(w, h, mb_, 16, dmb ish, nop, , a, l, "memory") +__XCHG_CASE(w, , mb_, 32, dmb ish, nop, , a, l, "memory") +__XCHG_CASE( , , mb_, 64, dmb ish, nop, , a, l, "memory") #undef __XCHG_CASE @@ -80,13 +80,13 @@ static inline unsigned long __xchg##sfx(unsigned long x, \ { \ switch (size) { \ case 1: \ - return __xchg_case##sfx##_1(x, ptr); \ + return __xchg_case##sfx##_8(x, ptr); \ case 2: \ - return __xchg_case##sfx##_2(x, ptr); \ + return __xchg_case##sfx##_16(x, ptr); \ case 4: \ - return __xchg_case##sfx##_4(x, ptr); \ + return __xchg_case##sfx##_32(x, ptr); \ case 8: \ - return __xchg_case##sfx##_8(x, ptr); \ + return __xchg_case##sfx##_64(x, ptr); \ default: \ BUILD_BUG(); \ } \ @@ -123,13 +123,13 @@ static inline unsigned long __cmpxchg##sfx(volatile void *ptr, \ { \ switch (size) { \ case 1: \ - return __cmpxchg_case##sfx##_1(ptr, (u8)old, new); \ + return __cmpxchg_case##sfx##_8(ptr, old, new); \ case 2: \ - return __cmpxchg_case##sfx##_2(ptr, (u16)old, new); \ + return __cmpxchg_case##sfx##_16(ptr, old, new); \ case 4: \ - return __cmpxchg_case##sfx##_4(ptr, old, new); \ + return __cmpxchg_case##sfx##_32(ptr, old, new); \ case 8: \ - return __cmpxchg_case##sfx##_8(ptr, old, new); \ + return __cmpxchg_case##sfx##_64(ptr, old, new); \ default: \ BUILD_BUG(); \ } \ @@ -197,16 +197,16 @@ __CMPXCHG_GEN(_mb) __ret; \ }) -#define __CMPWAIT_CASE(w, sz, name) \ -static inline void __cmpwait_case_##name(volatile void *ptr, \ - unsigned long val) \ +#define __CMPWAIT_CASE(w, sfx, sz) \ +static inline void __cmpwait_case_##sz(volatile void *ptr, \ + unsigned long val) \ { \ unsigned long tmp; \ \ asm volatile( \ " sevl\n" \ " wfe\n" \ - " ldxr" #sz "\t%" #w "[tmp], %[v]\n" \ + " ldxr" #sfx "\t%" #w "[tmp], %[v]\n" \ " eor %" #w "[tmp], %" #w "[tmp], %" #w "[val]\n" \ " cbnz %" #w "[tmp], 1f\n" \ " wfe\n" \ @@ -215,10 +215,10 @@ static inline void __cmpwait_case_##name(volatile void *ptr, \ : [val] "r" (val)); \ } -__CMPWAIT_CASE(w, b, 1); -__CMPWAIT_CASE(w, h, 2); -__CMPWAIT_CASE(w, , 4); -__CMPWAIT_CASE( , , 8); +__CMPWAIT_CASE(w, b, 8); +__CMPWAIT_CASE(w, h, 16); +__CMPWAIT_CASE(w, , 32); +__CMPWAIT_CASE( , , 64); #undef __CMPWAIT_CASE @@ -229,13 +229,13 @@ static inline void __cmpwait##sfx(volatile void *ptr, \ { \ switch (size) { \ case 1: \ - return __cmpwait_case##sfx##_1(ptr, (u8)val); \ + return __cmpwait_case##sfx##_8(ptr, (u8)val); \ case 2: \ - return __cmpwait_case##sfx##_2(ptr, (u16)val); \ + return __cmpwait_case##sfx##_16(ptr, (u16)val); \ case 4: \ - return __cmpwait_case##sfx##_4(ptr, val); \ + return __cmpwait_case##sfx##_32(ptr, val); \ case 8: \ - return __cmpwait_case##sfx##_8(ptr, val); \ + return __cmpwait_case##sfx##_64(ptr, val); \ default: \ BUILD_BUG(); \ } \ diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index 6e2d254c09eb..82e9099834ae 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -54,7 +54,13 @@ #define ARM64_HAS_CRC32 33 #define ARM64_SSBS 34 #define ARM64_WORKAROUND_1188873 35 +#define ARM64_HAS_SB 36 +#define ARM64_WORKAROUND_1165522 37 +#define ARM64_HAS_ADDRESS_AUTH_ARCH 38 +#define ARM64_HAS_ADDRESS_AUTH_IMP_DEF 39 +#define ARM64_HAS_GENERIC_AUTH_ARCH 40 +#define ARM64_HAS_GENERIC_AUTH_IMP_DEF 41 -#define ARM64_NCAPS 36 +#define ARM64_NCAPS 42 #endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 7e2ec64aa414..dfcfba725d72 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -321,19 +321,20 @@ struct arm64_cpu_capabilities { bool sign; unsigned long hwcap; }; - /* - * A list of "matches/cpu_enable" pair for the same - * "capability" of the same "type" as described by the parent. - * Only matches(), cpu_enable() and fields relevant to these - * methods are significant in the list. The cpu_enable is - * invoked only if the corresponding entry "matches()". - * However, if a cpu_enable() method is associated - * with multiple matches(), care should be taken that either - * the match criteria are mutually exclusive, or that the - * method is robust against being called multiple times. - */ - const struct arm64_cpu_capabilities *match_list; }; + + /* + * An optional list of "matches/cpu_enable" pair for the same + * "capability" of the same "type" as described by the parent. + * Only matches(), cpu_enable() and fields relevant to these + * methods are significant in the list. The cpu_enable is + * invoked only if the corresponding entry "matches()". + * However, if a cpu_enable() method is associated + * with multiple matches(), care should be taken that either + * the match criteria are mutually exclusive, or that the + * method is robust against being called multiple times. + */ + const struct arm64_cpu_capabilities *match_list; }; static inline int cpucap_default_scope(const struct arm64_cpu_capabilities *cap) @@ -353,10 +354,46 @@ cpucap_late_cpu_permitted(const struct arm64_cpu_capabilities *cap) return !!(cap->type & ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU); } +/* + * Generic helper for handling capabilties with multiple (match,enable) pairs + * of call backs, sharing the same capability bit. + * Iterate over each entry to see if at least one matches. + */ +static inline bool +cpucap_multi_entry_cap_matches(const struct arm64_cpu_capabilities *entry, + int scope) +{ + const struct arm64_cpu_capabilities *caps; + + for (caps = entry->match_list; caps->matches; caps++) + if (caps->matches(caps, scope)) + return true; + + return false; +} + +/* + * Take appropriate action for all matching entries in the shared capability + * entry. + */ +static inline void +cpucap_multi_entry_cap_cpu_enable(const struct arm64_cpu_capabilities *entry) +{ + const struct arm64_cpu_capabilities *caps; + + for (caps = entry->match_list; caps->matches; caps++) + if (caps->matches(caps, SCOPE_LOCAL_CPU) && + caps->cpu_enable) + caps->cpu_enable(caps); +} + extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS]; extern struct static_key_false arm64_const_caps_ready; +#define for_each_available_cap(cap) \ + for_each_set_bit(cap, cpu_hwcaps, ARM64_NCAPS) + bool this_cpu_has_cap(unsigned int cap); static inline bool cpu_have_feature(unsigned int num) @@ -473,7 +510,6 @@ static inline bool id_aa64pfr0_sve(u64 pfr0) void __init setup_cpu_features(void); void check_local_cpu_capabilities(void); - u64 read_sanitised_ftr_reg(u32 id); static inline bool cpu_supports_mixed_endian_el0(void) @@ -486,11 +522,59 @@ static inline bool system_supports_32bit_el0(void) return cpus_have_const_cap(ARM64_HAS_32BIT_EL0); } +static inline bool system_supports_4kb_granule(void) +{ + u64 mmfr0; + u32 val; + + mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); + val = cpuid_feature_extract_unsigned_field(mmfr0, + ID_AA64MMFR0_TGRAN4_SHIFT); + + return val == ID_AA64MMFR0_TGRAN4_SUPPORTED; +} + +static inline bool system_supports_64kb_granule(void) +{ + u64 mmfr0; + u32 val; + + mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); + val = cpuid_feature_extract_unsigned_field(mmfr0, + ID_AA64MMFR0_TGRAN64_SHIFT); + + return val == ID_AA64MMFR0_TGRAN64_SUPPORTED; +} + +static inline bool system_supports_16kb_granule(void) +{ + u64 mmfr0; + u32 val; + + mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); + val = cpuid_feature_extract_unsigned_field(mmfr0, + ID_AA64MMFR0_TGRAN16_SHIFT); + + return val == ID_AA64MMFR0_TGRAN16_SUPPORTED; +} + static inline bool system_supports_mixed_endian_el0(void) { return id_aa64mmfr0_mixed_endian_el0(read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1)); } +static inline bool system_supports_mixed_endian(void) +{ + u64 mmfr0; + u32 val; + + mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); + val = cpuid_feature_extract_unsigned_field(mmfr0, + ID_AA64MMFR0_BIGENDEL_SHIFT); + + return val == 0x1; +} + static inline bool system_supports_fpsimd(void) { return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD); @@ -514,6 +598,20 @@ static inline bool system_supports_cnp(void) cpus_have_const_cap(ARM64_HAS_CNP); } +static inline bool system_supports_address_auth(void) +{ + return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) && + (cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH_ARCH) || + cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH_IMP_DEF)); +} + +static inline bool system_supports_generic_auth(void) +{ + return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) && + (cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH_ARCH) || + cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH_IMP_DEF)); +} + #define ARM64_SSBD_UNKNOWN -1 #define ARM64_SSBD_FORCE_DISABLE 0 #define ARM64_SSBD_KERNEL 1 diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 12f93e4d2452..951ed1a4e5c9 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -151,6 +151,8 @@ struct midr_range { .rv_max = MIDR_CPU_VAR_REV(v_max, r_max), \ } +#define MIDR_REV_RANGE(m, v, r_min, r_max) MIDR_RANGE(m, v, r_min, v, r_max) +#define MIDR_REV(m, v, r) MIDR_RANGE(m, v, r, v, r) #define MIDR_ALL_VERSIONS(m) MIDR_RANGE(m, 0, 0, 0xf, 0xf) static inline bool is_midr_in_range(u32 midr, struct midr_range const *range) diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index c41f3fb1446c..95dbf3ef735a 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h @@ -24,15 +24,9 @@ #include <xen/xen.h> #include <asm/xen/hypervisor.h> -extern const struct dma_map_ops dummy_dma_ops; - static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { - /* - * We expect no ISA devices, and all other DMA masters are expected to - * have someone call arch_setup_dma_ops at device creation time. - */ - return &dummy_dma_ops; + return NULL; } void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h index 433b9554c6a1..6adc1a90e7e6 100644 --- a/arch/arm64/include/asm/elf.h +++ b/arch/arm64/include/asm/elf.h @@ -117,7 +117,11 @@ * 64-bit, this is above 4GB to leave the entire 32-bit address * space open for things that want to use the area for 32-bit pointers. */ +#ifdef CONFIG_ARM64_FORCE_52BIT #define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3) +#else +#define ELF_ET_DYN_BASE (2 * DEFAULT_MAP_WINDOW_64 / 3) +#endif /* CONFIG_ARM64_FORCE_52BIT */ #ifndef __ASSEMBLY__ diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h index 676de2ec1762..52233f00d53d 100644 --- a/arch/arm64/include/asm/esr.h +++ b/arch/arm64/include/asm/esr.h @@ -29,23 +29,24 @@ #define ESR_ELx_EC_CP14_MR (0x05) #define ESR_ELx_EC_CP14_LS (0x06) #define ESR_ELx_EC_FP_ASIMD (0x07) -#define ESR_ELx_EC_CP10_ID (0x08) -/* Unallocated EC: 0x09 - 0x0B */ +#define ESR_ELx_EC_CP10_ID (0x08) /* EL2 only */ +#define ESR_ELx_EC_PAC (0x09) /* EL2 and above */ +/* Unallocated EC: 0x0A - 0x0B */ #define ESR_ELx_EC_CP14_64 (0x0C) /* Unallocated EC: 0x0d */ #define ESR_ELx_EC_ILL (0x0E) /* Unallocated EC: 0x0F - 0x10 */ #define ESR_ELx_EC_SVC32 (0x11) -#define ESR_ELx_EC_HVC32 (0x12) -#define ESR_ELx_EC_SMC32 (0x13) +#define ESR_ELx_EC_HVC32 (0x12) /* EL2 only */ +#define ESR_ELx_EC_SMC32 (0x13) /* EL2 and above */ /* Unallocated EC: 0x14 */ #define ESR_ELx_EC_SVC64 (0x15) -#define ESR_ELx_EC_HVC64 (0x16) -#define ESR_ELx_EC_SMC64 (0x17) +#define ESR_ELx_EC_HVC64 (0x16) /* EL2 and above */ +#define ESR_ELx_EC_SMC64 (0x17) /* EL2 and above */ #define ESR_ELx_EC_SYS64 (0x18) #define ESR_ELx_EC_SVE (0x19) /* Unallocated EC: 0x1A - 0x1E */ -#define ESR_ELx_EC_IMP_DEF (0x1f) +#define ESR_ELx_EC_IMP_DEF (0x1f) /* EL3 only */ #define ESR_ELx_EC_IABT_LOW (0x20) #define ESR_ELx_EC_IABT_CUR (0x21) #define ESR_ELx_EC_PC_ALIGN (0x22) @@ -68,7 +69,7 @@ /* Unallocated EC: 0x36 - 0x37 */ #define ESR_ELx_EC_BKPT32 (0x38) /* Unallocated EC: 0x39 */ -#define ESR_ELx_EC_VECTOR32 (0x3A) +#define ESR_ELx_EC_VECTOR32 (0x3A) /* EL2 only */ /* Unallocted EC: 0x3B */ #define ESR_ELx_EC_BRK64 (0x3C) /* Unallocated EC: 0x3D - 0x3F */ diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h index fac54fb050d0..15a6587e12f9 100644 --- a/arch/arm64/include/asm/ftrace.h +++ b/arch/arm64/include/asm/ftrace.h @@ -13,6 +13,7 @@ #include <asm/insn.h> +#define HAVE_FUNCTION_GRAPH_FP_TEST #define MCOUNT_ADDR ((unsigned long)_mcount) #define MCOUNT_INSN_SIZE AARCH64_INSN_SIZE diff --git a/arch/arm64/include/asm/image.h b/arch/arm64/include/asm/image.h new file mode 100644 index 000000000000..e2c27a2278e9 --- /dev/null +++ b/arch/arm64/include/asm/image.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_IMAGE_H +#define __ASM_IMAGE_H + +#define ARM64_IMAGE_MAGIC "ARM\x64" + +#define ARM64_IMAGE_FLAG_BE_SHIFT 0 +#define ARM64_IMAGE_FLAG_PAGE_SIZE_SHIFT (ARM64_IMAGE_FLAG_BE_SHIFT + 1) +#define ARM64_IMAGE_FLAG_PHYS_BASE_SHIFT \ + (ARM64_IMAGE_FLAG_PAGE_SIZE_SHIFT + 2) +#define ARM64_IMAGE_FLAG_BE_MASK 0x1 +#define ARM64_IMAGE_FLAG_PAGE_SIZE_MASK 0x3 +#define ARM64_IMAGE_FLAG_PHYS_BASE_MASK 0x1 + +#define ARM64_IMAGE_FLAG_LE 0 +#define ARM64_IMAGE_FLAG_BE 1 +#define ARM64_IMAGE_FLAG_PAGE_SIZE_4K 1 +#define ARM64_IMAGE_FLAG_PAGE_SIZE_16K 2 +#define ARM64_IMAGE_FLAG_PAGE_SIZE_64K 3 +#define ARM64_IMAGE_FLAG_PHYS_BASE 1 + +#ifndef __ASSEMBLY__ + +#define arm64_image_flag_field(flags, field) \ + (((flags) >> field##_SHIFT) & field##_MASK) + +/* + * struct arm64_image_header - arm64 kernel image header + * See Documentation/arm64/booting.txt for details + * + * @code0: Executable code, or + * @mz_header alternatively used for part of MZ header + * @code1: Executable code + * @text_offset: Image load offset + * @image_size: Effective Image size + * @flags: kernel flags + * @reserved: reserved + * @magic: Magic number + * @reserved5: reserved, or + * @pe_header: alternatively used for PE COFF offset + */ + +struct arm64_image_header { + __le32 code0; + __le32 code1; + __le64 text_offset; + __le64 image_size; + __le64 flags; + __le64 res2; + __le64 res3; + __le64 res4; + __le32 magic; + __le32 res5; +}; + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_IMAGE_H */ diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index c6802dea6cab..9c01f04db64d 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -261,6 +261,11 @@ enum aarch64_insn_prfm_policy { AARCH64_INSN_PRFM_POLICY_STRM, }; +enum aarch64_insn_adr_type { + AARCH64_INSN_ADR_TYPE_ADRP, + AARCH64_INSN_ADR_TYPE_ADR, +}; + #define __AARCH64_INSN_FUNCS(abbr, mask, val) \ static __always_inline bool aarch64_insn_is_##abbr(u32 code) \ { return (code & (mask)) == (val); } \ @@ -393,6 +398,9 @@ u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst, enum aarch64_insn_register src, int imm, enum aarch64_insn_variant variant, enum aarch64_insn_adsb_type type); +u32 aarch64_insn_gen_adr(unsigned long pc, unsigned long addr, + enum aarch64_insn_register reg, + enum aarch64_insn_adr_type type); u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst, enum aarch64_insn_register src, int immr, int imms, diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index 9f8b915af3a7..ee723835c1f4 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h @@ -104,7 +104,23 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) } /* IO barriers */ -#define __iormb() rmb() +#define __iormb(v) \ +({ \ + unsigned long tmp; \ + \ + rmb(); \ + \ + /* \ + * Create a dummy control dependency from the IO read to any \ + * later instructions. This ensures that a subsequent call to \ + * udelay() will be ordered due to the ISB in get_cycles(). \ + */ \ + asm volatile("eor %0, %1, %1\n" \ + "cbnz %0, ." \ + : "=r" (tmp) : "r" ((unsigned long)(v)) \ + : "memory"); \ +}) + #define __iowmb() wmb() #define mmiowb() do { } while (0) @@ -129,10 +145,10 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) * following Normal memory access. Writes are ordered relative to any prior * Normal memory access. */ -#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; }) -#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; }) -#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; }) -#define readq(c) ({ u64 __v = readq_relaxed(c); __iormb(); __v; }) +#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(__v); __v; }) +#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(__v); __v; }) +#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(__v); __v; }) +#define readq(c) ({ u64 __v = readq_relaxed(c); __iormb(__v); __v; }) #define writeb(v,c) ({ __iowmb(); writeb_relaxed((v),(c)); }) #define writew(v,c) ({ __iowmb(); writew_relaxed((v),(c)); }) @@ -183,9 +199,9 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size); /* * io{read,write}{16,32,64}be() macros */ -#define ioread16be(p) ({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; }) -#define ioread32be(p) ({ __u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; }) -#define ioread64be(p) ({ __u64 __v = be64_to_cpu((__force __be64)__raw_readq(p)); __iormb(); __v; }) +#define ioread16be(p) ({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(__v); __v; }) +#define ioread32be(p) ({ __u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(__v); __v; }) +#define ioread64be(p) ({ __u64 __v = be64_to_cpu((__force __be64)__raw_readq(p)); __iormb(__v); __v; }) #define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_be16(v), p); }) #define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); }) diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h index e17f0529a882..67e4cb75d1fd 100644 --- a/arch/arm64/include/asm/kexec.h +++ b/arch/arm64/include/asm/kexec.h @@ -93,6 +93,25 @@ static inline void crash_prepare_suspend(void) {} static inline void crash_post_resume(void) {} #endif +#ifdef CONFIG_KEXEC_FILE +#define ARCH_HAS_KIMAGE_ARCH + +struct kimage_arch { + void *dtb; + unsigned long dtb_mem; +}; + +extern const struct kexec_file_ops kexec_image_ops; + +struct kimage; + +extern int arch_kimage_file_post_load_cleanup(struct kimage *image); +extern int load_other_segments(struct kimage *image, + unsigned long kernel_load_addr, unsigned long kernel_size, + char *initrd, unsigned long initrd_len, + char *cmdline); +#endif + #endif /* __ASSEMBLY__ */ #endif diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 6f602af5263c..7f9d2bfcf82e 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -24,6 +24,8 @@ /* Hyp Configuration Register (HCR) bits */ #define HCR_FWB (UL(1) << 46) +#define HCR_API (UL(1) << 41) +#define HCR_APK (UL(1) << 40) #define HCR_TEA (UL(1) << 37) #define HCR_TERR (UL(1) << 36) #define HCR_TLOR (UL(1) << 35) @@ -87,6 +89,7 @@ HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \ HCR_FMO | HCR_IMO) #define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF) +#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK) #define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H) /* TCR_EL2 Registers bits */ @@ -104,7 +107,7 @@ TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK | TCR_EL2_T0SZ_MASK) /* VTCR_EL2 Registers bits */ -#define VTCR_EL2_RES1 (1 << 31) +#define VTCR_EL2_RES1 (1U << 31) #define VTCR_EL2_HD (1 << 22) #define VTCR_EL2_HA (1 << 21) #define VTCR_EL2_PS_SHIFT TCR_EL2_PS_SHIFT @@ -320,10 +323,6 @@ #define PAR_TO_HPFAR(par) \ (((par) & GENMASK_ULL(PHYS_MASK_SHIFT - 1, 12)) >> 8) -#define kvm_arm_exception_type \ - {0, "IRQ" }, \ - {1, "TRAP" } - #define ECN(x) { ESR_ELx_EC_##x, #x } #define kvm_arm_exception_class \ diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index aea01a09eb94..f5b79e995f40 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -25,6 +25,7 @@ #define ARM_EXIT_WITH_SERROR_BIT 31 #define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT)) +#define ARM_EXCEPTION_IS_TRAP(x) (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_TRAP) #define ARM_SERROR_PENDING(x) !!((x) & (1U << ARM_EXIT_WITH_SERROR_BIT)) #define ARM_EXCEPTION_IRQ 0 @@ -34,6 +35,12 @@ /* The hyp-stub will return this for any kvm_call_hyp() call */ #define ARM_EXCEPTION_HYP_GONE HVC_STUB_ERR +#define kvm_arm_exception_type \ + {ARM_EXCEPTION_IRQ, "IRQ" }, \ + {ARM_EXCEPTION_EL1_SERROR, "SERROR" }, \ + {ARM_EXCEPTION_TRAP, "TRAP" }, \ + {ARM_EXCEPTION_HYP_GONE, "HYP_GONE" } + #ifndef __ASSEMBLY__ #include <linux/mm.h> diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 21247870def7..506386a3edde 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -24,6 +24,7 @@ #include <linux/kvm_host.h> +#include <asm/debug-monitors.h> #include <asm/esr.h> #include <asm/kvm_arm.h> #include <asm/kvm_hyp.h> @@ -147,14 +148,6 @@ static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu) return true; } -static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) -{ - if (vcpu_mode_is_32bit(vcpu)) - kvm_skip_instr32(vcpu, is_wide_instr); - else - *vcpu_pc(vcpu) += 4; -} - static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) { *vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT; @@ -424,4 +417,30 @@ static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu, return data; /* Leave LE untouched */ } +static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) +{ + if (vcpu_mode_is_32bit(vcpu)) + kvm_skip_instr32(vcpu, is_wide_instr); + else + *vcpu_pc(vcpu) += 4; + + /* advance the singlestep state machine */ + *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS; +} + +/* + * Skip an instruction which has been emulated at hyp while most guest sysregs + * are live. + */ +static inline void __hyp_text __kvm_skip_instr(struct kvm_vcpu *vcpu) +{ + *vcpu_pc(vcpu) = read_sysreg_el2(elr); + vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(spsr); + + kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); + + write_sysreg_el2(vcpu->arch.ctxt.gp_regs.regs.pstate, spsr); + write_sysreg_el2(*vcpu_pc(vcpu), elr); +} + #endif /* __ARM64_KVM_EMULATE_H__ */ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 52fbc823ff8c..7732d0ba4e60 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -319,7 +319,7 @@ struct kvm_vcpu_arch { */ #define __vcpu_sys_reg(v,r) ((v)->arch.ctxt.sys_regs[(r)]) -u64 vcpu_read_sys_reg(struct kvm_vcpu *vcpu, int reg); +u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg); void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg); /* @@ -360,7 +360,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, #define KVM_ARCH_WANT_MMU_NOTIFIER int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end); -void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); +int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); @@ -422,7 +422,7 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr, } } -static inline bool kvm_arch_check_sve_has_vhe(void) +static inline bool kvm_arch_requires_vhe(void) { /* * The Arm architecture specifies that implementation of SVE @@ -430,9 +430,13 @@ static inline bool kvm_arch_check_sve_has_vhe(void) * relies on this when SVE is present: */ if (system_supports_sve()) - return has_vhe(); - else return true; + + /* Some implementations have defects that confine them to VHE */ + if (cpus_have_cap(ARM64_WORKAROUND_1165522)) + return true; + + return false; } static inline void kvm_arch_hardware_unsetup(void) {} @@ -445,7 +449,6 @@ void kvm_arm_init_debug(void); void kvm_arm_setup_debug(struct kvm_vcpu *vcpu); void kvm_arm_clear_debug(struct kvm_vcpu *vcpu); void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu); -bool kvm_arm_handle_step_debug(struct kvm_vcpu *vcpu, struct kvm_run *run); int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h index 23aca66767f9..a80a7ef57325 100644 --- a/arch/arm64/include/asm/kvm_hyp.h +++ b/arch/arm64/include/asm/kvm_hyp.h @@ -20,6 +20,7 @@ #include <linux/compiler.h> #include <linux/kvm_host.h> +#include <asm/alternative.h> #include <asm/sysreg.h> #define __hyp_text __section(.hyp.text) notrace @@ -163,6 +164,13 @@ static __always_inline void __hyp_text __load_guest_stage2(struct kvm *kvm) { write_sysreg(kvm->arch.vtcr, vtcr_el2); write_sysreg(kvm->arch.vttbr, vttbr_el2); + + /* + * ARM erratum 1165522 requires the actual execution of the above + * before we can switch to the EL1/EL0 translation regime used by + * the guest. + */ + asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_1165522)); } #endif /* __ARM64_KVM_HYP_H__ */ diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 658657367f2f..8af4b1befa42 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -184,6 +184,17 @@ void kvm_clear_hyp_idmap(void); #define kvm_mk_pgd(pudp) \ __pgd(__phys_to_pgd_val(__pa(pudp)) | PUD_TYPE_TABLE) +#define kvm_set_pud(pudp, pud) set_pud(pudp, pud) + +#define kvm_pfn_pte(pfn, prot) pfn_pte(pfn, prot) +#define kvm_pfn_pmd(pfn, prot) pfn_pmd(pfn, prot) +#define kvm_pfn_pud(pfn, prot) pfn_pud(pfn, prot) + +#define kvm_pud_pfn(pud) pud_pfn(pud) + +#define kvm_pmd_mkhuge(pmd) pmd_mkhuge(pmd) +#define kvm_pud_mkhuge(pud) pud_mkhuge(pud) + static inline pte_t kvm_s2pte_mkwrite(pte_t pte) { pte_val(pte) |= PTE_S2_RDWR; @@ -196,6 +207,12 @@ static inline pmd_t kvm_s2pmd_mkwrite(pmd_t pmd) return pmd; } +static inline pud_t kvm_s2pud_mkwrite(pud_t pud) +{ + pud_val(pud) |= PUD_S2_RDWR; + return pud; +} + static inline pte_t kvm_s2pte_mkexec(pte_t pte) { pte_val(pte) &= ~PTE_S2_XN; @@ -208,6 +225,12 @@ static inline pmd_t kvm_s2pmd_mkexec(pmd_t pmd) return pmd; } +static inline pud_t kvm_s2pud_mkexec(pud_t pud) +{ + pud_val(pud) &= ~PUD_S2_XN; + return pud; +} + static inline void kvm_set_s2pte_readonly(pte_t *ptep) { pteval_t old_pteval, pteval; @@ -246,6 +269,31 @@ static inline bool kvm_s2pmd_exec(pmd_t *pmdp) return !(READ_ONCE(pmd_val(*pmdp)) & PMD_S2_XN); } +static inline void kvm_set_s2pud_readonly(pud_t *pudp) +{ + kvm_set_s2pte_readonly((pte_t *)pudp); +} + +static inline bool kvm_s2pud_readonly(pud_t *pudp) +{ + return kvm_s2pte_readonly((pte_t *)pudp); +} + +static inline bool kvm_s2pud_exec(pud_t *pudp) +{ + return !(READ_ONCE(pud_val(*pudp)) & PUD_S2_XN); +} + +static inline pud_t kvm_s2pud_mkyoung(pud_t pud) +{ + return pud_mkyoung(pud); +} + +static inline bool kvm_s2pud_young(pud_t pud) +{ + return pud_young(pud); +} + #define hyp_pte_table_empty(ptep) kvm_page_empty(ptep) #ifdef __PAGETABLE_PMD_FOLDED diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index f0a5c9531e8b..0385752bd079 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -53,8 +53,11 @@ #define PAGE_OFFSET (UL(0xffffffffffffffff) - \ (UL(1) << (VA_BITS - 1)) + 1) #define KIMAGE_VADDR (MODULES_END) +#define BPF_JIT_REGION_START (VA_START + KASAN_SHADOW_SIZE) +#define BPF_JIT_REGION_SIZE (SZ_128M) +#define BPF_JIT_REGION_END (BPF_JIT_REGION_START + BPF_JIT_REGION_SIZE) #define MODULES_END (MODULES_VADDR + MODULES_VSIZE) -#define MODULES_VADDR (VA_START + KASAN_SHADOW_SIZE) +#define MODULES_VADDR (BPF_JIT_REGION_END) #define MODULES_VSIZE (SZ_128M) #define VMEMMAP_START (PAGE_OFFSET - VMEMMAP_SIZE) #define PCI_IO_END (VMEMMAP_START - SZ_2M) @@ -64,15 +67,26 @@ #define KERNEL_START _text #define KERNEL_END _end +#ifdef CONFIG_ARM64_USER_VA_BITS_52 +#define MAX_USER_VA_BITS 52 +#else +#define MAX_USER_VA_BITS VA_BITS +#endif + /* * KASAN requires 1/8th of the kernel virtual address space for the shadow * region. KASAN can bloat the stack significantly, so double the (minimum) - * stack size when KASAN is in use. + * stack size when KASAN is in use, and then double it again if KASAN_EXTRA is + * on. */ #ifdef CONFIG_KASAN #define KASAN_SHADOW_SCALE_SHIFT 3 #define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT)) +#ifdef CONFIG_KASAN_EXTRA +#define KASAN_THREAD_SHIFT 2 +#else #define KASAN_THREAD_SHIFT 1 +#endif /* CONFIG_KASAN_EXTRA */ #else #define KASAN_SHADOW_SIZE (0) #define KASAN_THREAD_SHIFT 0 @@ -187,6 +201,9 @@ static inline unsigned long kaslr_offset(void) return kimage_vaddr - KIMAGE_VADDR; } +/* the actual size of a user virtual address */ +extern u64 vabits_user; + /* * Allow all memory at the discovery stage. We will clip it later. */ diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index 1e58bf58c22b..2da3e478fd8f 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -35,6 +35,8 @@ #include <asm/sysreg.h> #include <asm/tlbflush.h> +extern bool rodata_full; + static inline void contextidr_thread_switch(struct task_struct *next) { if (!IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR)) @@ -72,6 +74,9 @@ extern u64 idmap_ptrs_per_pgd; static inline bool __cpu_uses_extended_idmap(void) { + if (IS_ENABLED(CONFIG_ARM64_USER_VA_BITS_52)) + return false; + return unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS)); } diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h index 97d0ef12e2ff..905e1bb0e7bd 100644 --- a/arch/arm64/include/asm/module.h +++ b/arch/arm64/include/asm/module.h @@ -22,7 +22,7 @@ #ifdef CONFIG_ARM64_MODULE_PLTS struct mod_plt_sec { - struct elf64_shdr *plt; + int plt_shndx; int plt_num_entries; int plt_max_entries; }; @@ -36,10 +36,12 @@ struct mod_arch_specific { }; #endif -u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela, +u64 module_emit_plt_entry(struct module *mod, Elf64_Shdr *sechdrs, + void *loc, const Elf64_Rela *rela, Elf64_Sym *sym); -u64 module_emit_veneer_for_adrp(struct module *mod, void *loc, u64 val); +u64 module_emit_veneer_for_adrp(struct module *mod, Elf64_Shdr *sechdrs, + void *loc, u64 val); #ifdef CONFIG_RANDOMIZE_BASE extern u64 module_alloc_base; @@ -56,39 +58,19 @@ struct plt_entry { * is exactly what we are dealing with here, we are free to use x16 * as a scratch register in the PLT veneers. */ - __le32 mov0; /* movn x16, #0x.... */ - __le32 mov1; /* movk x16, #0x...., lsl #16 */ - __le32 mov2; /* movk x16, #0x...., lsl #32 */ + __le32 adrp; /* adrp x16, .... */ + __le32 add; /* add x16, x16, #0x.... */ __le32 br; /* br x16 */ }; -static inline struct plt_entry get_plt_entry(u64 val) +static inline bool is_forbidden_offset_for_adrp(void *place) { - /* - * MOVK/MOVN/MOVZ opcode: - * +--------+------------+--------+-----------+-------------+---------+ - * | sf[31] | opc[30:29] | 100101 | hw[22:21] | imm16[20:5] | Rd[4:0] | - * +--------+------------+--------+-----------+-------------+---------+ - * - * Rd := 0x10 (x16) - * hw := 0b00 (no shift), 0b01 (lsl #16), 0b10 (lsl #32) - * opc := 0b11 (MOVK), 0b00 (MOVN), 0b10 (MOVZ) - * sf := 1 (64-bit variant) - */ - return (struct plt_entry){ - cpu_to_le32(0x92800010 | (((~val ) & 0xffff)) << 5), - cpu_to_le32(0xf2a00010 | ((( val >> 16) & 0xffff)) << 5), - cpu_to_le32(0xf2c00010 | ((( val >> 32) & 0xffff)) << 5), - cpu_to_le32(0xd61f0200) - }; + return IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) && + cpus_have_const_cap(ARM64_WORKAROUND_843419) && + ((u64)place & 0xfff) >= 0xff8; } -static inline bool plt_entries_equal(const struct plt_entry *a, - const struct plt_entry *b) -{ - return a->mov0 == b->mov0 && - a->mov1 == b->mov1 && - a->mov2 == b->mov2; -} +struct plt_entry get_plt_entry(u64 dst, void *pc); +bool plt_entries_equal(const struct plt_entry *a, const struct plt_entry *b); #endif /* __ASM_MODULE_H */ diff --git a/arch/arm64/include/asm/neon-intrinsics.h b/arch/arm64/include/asm/neon-intrinsics.h new file mode 100644 index 000000000000..2ba6c6b9541f --- /dev/null +++ b/arch/arm64/include/asm/neon-intrinsics.h @@ -0,0 +1,39 @@ +/* + * Copyright (C) 2018 Linaro, Ltd. <ard.biesheuvel@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __ASM_NEON_INTRINSICS_H +#define __ASM_NEON_INTRINSICS_H + +#include <asm-generic/int-ll64.h> + +/* + * In the kernel, u64/s64 are [un]signed long long, not [un]signed long. + * So by redefining these macros to the former, we can force gcc-stdint.h + * to define uint64_t / in64_t in a compatible manner. + */ + +#ifdef __INT64_TYPE__ +#undef __INT64_TYPE__ +#define __INT64_TYPE__ long long +#endif + +#ifdef __UINT64_TYPE__ +#undef __UINT64_TYPE__ +#define __UINT64_TYPE__ unsigned long long +#endif + +/* + * genksyms chokes on the ARM NEON instrinsics system header, but we + * don't export anything it defines anyway, so just disregard when + * genksyms execute. + */ +#ifndef __GENKSYMS__ +#include <arm_neon.h> +#endif + +#endif /* __ASM_NEON_INTRINSICS_H */ diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h index 21a81b59a0cc..6b81dd8cee01 100644 --- a/arch/arm64/include/asm/percpu.h +++ b/arch/arm64/include/asm/percpu.h @@ -48,263 +48,193 @@ static inline unsigned long __my_cpu_offset(void) } #define __my_cpu_offset __my_cpu_offset() -#define PERCPU_OP(op, asm_op) \ -static inline unsigned long __percpu_##op(void *ptr, \ - unsigned long val, int size) \ +#define PERCPU_RW_OPS(sz) \ +static inline unsigned long __percpu_read_##sz(void *ptr) \ { \ - unsigned long loop, ret; \ + return READ_ONCE(*(u##sz *)ptr); \ +} \ \ - switch (size) { \ - case 1: \ - asm ("//__per_cpu_" #op "_1\n" \ - "1: ldxrb %w[ret], %[ptr]\n" \ - #asm_op " %w[ret], %w[ret], %w[val]\n" \ - " stxrb %w[loop], %w[ret], %[ptr]\n" \ - " cbnz %w[loop], 1b" \ - : [loop] "=&r" (loop), [ret] "=&r" (ret), \ - [ptr] "+Q"(*(u8 *)ptr) \ - : [val] "Ir" (val)); \ - break; \ - case 2: \ - asm ("//__per_cpu_" #op "_2\n" \ - "1: ldxrh %w[ret], %[ptr]\n" \ - #asm_op " %w[ret], %w[ret], %w[val]\n" \ - " stxrh %w[loop], %w[ret], %[ptr]\n" \ - " cbnz %w[loop], 1b" \ - : [loop] "=&r" (loop), [ret] "=&r" (ret), \ - [ptr] "+Q"(*(u16 *)ptr) \ - : [val] "Ir" (val)); \ - break; \ - case 4: \ - asm ("//__per_cpu_" #op "_4\n" \ - "1: ldxr %w[ret], %[ptr]\n" \ - #asm_op " %w[ret], %w[ret], %w[val]\n" \ - " stxr %w[loop], %w[ret], %[ptr]\n" \ - " cbnz %w[loop], 1b" \ - : [loop] "=&r" (loop), [ret] "=&r" (ret), \ - [ptr] "+Q"(*(u32 *)ptr) \ - : [val] "Ir" (val)); \ - break; \ - case 8: \ - asm ("//__per_cpu_" #op "_8\n" \ - "1: ldxr %[ret], %[ptr]\n" \ - #asm_op " %[ret], %[ret], %[val]\n" \ - " stxr %w[loop], %[ret], %[ptr]\n" \ - " cbnz %w[loop], 1b" \ - : [loop] "=&r" (loop), [ret] "=&r" (ret), \ - [ptr] "+Q"(*(u64 *)ptr) \ - : [val] "Ir" (val)); \ - break; \ - default: \ - ret = 0; \ - BUILD_BUG(); \ - } \ - \ - return ret; \ -} - -PERCPU_OP(add, add) -PERCPU_OP(and, and) -PERCPU_OP(or, orr) -#undef PERCPU_OP - -static inline unsigned long __percpu_read(void *ptr, int size) -{ - unsigned long ret; - - switch (size) { - case 1: - ret = READ_ONCE(*(u8 *)ptr); - break; - case 2: - ret = READ_ONCE(*(u16 *)ptr); - break; - case 4: - ret = READ_ONCE(*(u32 *)ptr); - break; - case 8: - ret = READ_ONCE(*(u64 *)ptr); - break; - default: - ret = 0; - BUILD_BUG(); - } - - return ret; +static inline void __percpu_write_##sz(void *ptr, unsigned long val) \ +{ \ + WRITE_ONCE(*(u##sz *)ptr, (u##sz)val); \ } -static inline void __percpu_write(void *ptr, unsigned long val, int size) -{ - switch (size) { - case 1: - WRITE_ONCE(*(u8 *)ptr, (u8)val); - break; - case 2: - WRITE_ONCE(*(u16 *)ptr, (u16)val); - break; - case 4: - WRITE_ONCE(*(u32 *)ptr, (u32)val); - break; - case 8: - WRITE_ONCE(*(u64 *)ptr, (u64)val); - break; - default: - BUILD_BUG(); - } +#define __PERCPU_OP_CASE(w, sfx, name, sz, op_llsc, op_lse) \ +static inline void \ +__percpu_##name##_case_##sz(void *ptr, unsigned long val) \ +{ \ + unsigned int loop; \ + u##sz tmp; \ + \ + asm volatile (ARM64_LSE_ATOMIC_INSN( \ + /* LL/SC */ \ + "1: ldxr" #sfx "\t%" #w "[tmp], %[ptr]\n" \ + #op_llsc "\t%" #w "[tmp], %" #w "[tmp], %" #w "[val]\n" \ + " stxr" #sfx "\t%w[loop], %" #w "[tmp], %[ptr]\n" \ + " cbnz %w[loop], 1b", \ + /* LSE atomics */ \ + #op_lse "\t%" #w "[val], %[ptr]\n" \ + __nops(3)) \ + : [loop] "=&r" (loop), [tmp] "=&r" (tmp), \ + [ptr] "+Q"(*(u##sz *)ptr) \ + : [val] "r" ((u##sz)(val))); \ } -static inline unsigned long __percpu_xchg(void *ptr, unsigned long val, - int size) -{ - unsigned long ret, loop; - - switch (size) { - case 1: - asm ("//__percpu_xchg_1\n" - "1: ldxrb %w[ret], %[ptr]\n" - " stxrb %w[loop], %w[val], %[ptr]\n" - " cbnz %w[loop], 1b" - : [loop] "=&r"(loop), [ret] "=&r"(ret), - [ptr] "+Q"(*(u8 *)ptr) - : [val] "r" (val)); - break; - case 2: - asm ("//__percpu_xchg_2\n" - "1: ldxrh %w[ret], %[ptr]\n" - " stxrh %w[loop], %w[val], %[ptr]\n" - " cbnz %w[loop], 1b" - : [loop] "=&r"(loop), [ret] "=&r"(ret), - [ptr] "+Q"(*(u16 *)ptr) - : [val] "r" (val)); - break; - case 4: - asm ("//__percpu_xchg_4\n" - "1: ldxr %w[ret], %[ptr]\n" - " stxr %w[loop], %w[val], %[ptr]\n" - " cbnz %w[loop], 1b" - : [loop] "=&r"(loop), [ret] "=&r"(ret), - [ptr] "+Q"(*(u32 *)ptr) - : [val] "r" (val)); - break; - case 8: - asm ("//__percpu_xchg_8\n" - "1: ldxr %[ret], %[ptr]\n" - " stxr %w[loop], %[val], %[ptr]\n" - " cbnz %w[loop], 1b" - : [loop] "=&r"(loop), [ret] "=&r"(ret), - [ptr] "+Q"(*(u64 *)ptr) - : [val] "r" (val)); - break; - default: - ret = 0; - BUILD_BUG(); - } - - return ret; +#define __PERCPU_RET_OP_CASE(w, sfx, name, sz, op_llsc, op_lse) \ +static inline u##sz \ +__percpu_##name##_return_case_##sz(void *ptr, unsigned long val) \ +{ \ + unsigned int loop; \ + u##sz ret; \ + \ + asm volatile (ARM64_LSE_ATOMIC_INSN( \ + /* LL/SC */ \ + "1: ldxr" #sfx "\t%" #w "[ret], %[ptr]\n" \ + #op_llsc "\t%" #w "[ret], %" #w "[ret], %" #w "[val]\n" \ + " stxr" #sfx "\t%w[loop], %" #w "[ret], %[ptr]\n" \ + " cbnz %w[loop], 1b", \ + /* LSE atomics */ \ + #op_lse "\t%" #w "[val], %" #w "[ret], %[ptr]\n" \ + #op_llsc "\t%" #w "[ret], %" #w "[ret], %" #w "[val]\n" \ + __nops(2)) \ + : [loop] "=&r" (loop), [ret] "=&r" (ret), \ + [ptr] "+Q"(*(u##sz *)ptr) \ + : [val] "r" ((u##sz)(val))); \ + \ + return ret; \ } -/* this_cpu_cmpxchg */ -#define _protect_cmpxchg_local(pcp, o, n) \ -({ \ - typeof(*raw_cpu_ptr(&(pcp))) __ret; \ - preempt_disable(); \ - __ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n); \ - preempt_enable(); \ - __ret; \ -}) - -#define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) -#define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) -#define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) -#define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) +#define PERCPU_OP(name, op_llsc, op_lse) \ + __PERCPU_OP_CASE(w, b, name, 8, op_llsc, op_lse) \ + __PERCPU_OP_CASE(w, h, name, 16, op_llsc, op_lse) \ + __PERCPU_OP_CASE(w, , name, 32, op_llsc, op_lse) \ + __PERCPU_OP_CASE( , , name, 64, op_llsc, op_lse) + +#define PERCPU_RET_OP(name, op_llsc, op_lse) \ + __PERCPU_RET_OP_CASE(w, b, name, 8, op_llsc, op_lse) \ + __PERCPU_RET_OP_CASE(w, h, name, 16, op_llsc, op_lse) \ + __PERCPU_RET_OP_CASE(w, , name, 32, op_llsc, op_lse) \ + __PERCPU_RET_OP_CASE( , , name, 64, op_llsc, op_lse) + +PERCPU_RW_OPS(8) +PERCPU_RW_OPS(16) +PERCPU_RW_OPS(32) +PERCPU_RW_OPS(64) +PERCPU_OP(add, add, stadd) +PERCPU_OP(andnot, bic, stclr) +PERCPU_OP(or, orr, stset) +PERCPU_RET_OP(add, add, ldadd) + +#undef PERCPU_RW_OPS +#undef __PERCPU_OP_CASE +#undef __PERCPU_RET_OP_CASE +#undef PERCPU_OP +#undef PERCPU_RET_OP +/* + * It would be nice to avoid the conditional call into the scheduler when + * re-enabling preemption for preemptible kernels, but doing that in a way + * which builds inside a module would mean messing directly with the preempt + * count. If you do this, peterz and tglx will hunt you down. + */ #define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \ ({ \ int __ret; \ - preempt_disable(); \ + preempt_disable_notrace(); \ __ret = cmpxchg_double_local( raw_cpu_ptr(&(ptr1)), \ raw_cpu_ptr(&(ptr2)), \ o1, o2, n1, n2); \ - preempt_enable(); \ + preempt_enable_notrace(); \ __ret; \ }) -#define _percpu_read(pcp) \ +#define _pcp_protect(op, pcp, ...) \ ({ \ - typeof(pcp) __retval; \ preempt_disable_notrace(); \ - __retval = (typeof(pcp))__percpu_read(raw_cpu_ptr(&(pcp)), \ - sizeof(pcp)); \ + op(raw_cpu_ptr(&(pcp)), __VA_ARGS__); \ preempt_enable_notrace(); \ - __retval; \ }) -#define _percpu_write(pcp, val) \ -do { \ +#define _pcp_protect_return(op, pcp, args...) \ +({ \ + typeof(pcp) __retval; \ preempt_disable_notrace(); \ - __percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), \ - sizeof(pcp)); \ + __retval = (typeof(pcp))op(raw_cpu_ptr(&(pcp)), ##args); \ preempt_enable_notrace(); \ -} while(0) \ - -#define _pcp_protect(operation, pcp, val) \ -({ \ - typeof(pcp) __retval; \ - preempt_disable(); \ - __retval = (typeof(pcp))operation(raw_cpu_ptr(&(pcp)), \ - (val), sizeof(pcp)); \ - preempt_enable(); \ - __retval; \ + __retval; \ }) -#define _percpu_add(pcp, val) \ - _pcp_protect(__percpu_add, pcp, val) - -#define _percpu_add_return(pcp, val) _percpu_add(pcp, val) - -#define _percpu_and(pcp, val) \ - _pcp_protect(__percpu_and, pcp, val) - -#define _percpu_or(pcp, val) \ - _pcp_protect(__percpu_or, pcp, val) - -#define _percpu_xchg(pcp, val) (typeof(pcp)) \ - _pcp_protect(__percpu_xchg, pcp, (unsigned long)(val)) - -#define this_cpu_add_1(pcp, val) _percpu_add(pcp, val) -#define this_cpu_add_2(pcp, val) _percpu_add(pcp, val) -#define this_cpu_add_4(pcp, val) _percpu_add(pcp, val) -#define this_cpu_add_8(pcp, val) _percpu_add(pcp, val) - -#define this_cpu_add_return_1(pcp, val) _percpu_add_return(pcp, val) -#define this_cpu_add_return_2(pcp, val) _percpu_add_return(pcp, val) -#define this_cpu_add_return_4(pcp, val) _percpu_add_return(pcp, val) -#define this_cpu_add_return_8(pcp, val) _percpu_add_return(pcp, val) - -#define this_cpu_and_1(pcp, val) _percpu_and(pcp, val) -#define this_cpu_and_2(pcp, val) _percpu_and(pcp, val) -#define this_cpu_and_4(pcp, val) _percpu_and(pcp, val) -#define this_cpu_and_8(pcp, val) _percpu_and(pcp, val) - -#define this_cpu_or_1(pcp, val) _percpu_or(pcp, val) -#define this_cpu_or_2(pcp, val) _percpu_or(pcp, val) -#define this_cpu_or_4(pcp, val) _percpu_or(pcp, val) -#define this_cpu_or_8(pcp, val) _percpu_or(pcp, val) - -#define this_cpu_read_1(pcp) _percpu_read(pcp) -#define this_cpu_read_2(pcp) _percpu_read(pcp) -#define this_cpu_read_4(pcp) _percpu_read(pcp) -#define this_cpu_read_8(pcp) _percpu_read(pcp) - -#define this_cpu_write_1(pcp, val) _percpu_write(pcp, val) -#define this_cpu_write_2(pcp, val) _percpu_write(pcp, val) -#define this_cpu_write_4(pcp, val) _percpu_write(pcp, val) -#define this_cpu_write_8(pcp, val) _percpu_write(pcp, val) - -#define this_cpu_xchg_1(pcp, val) _percpu_xchg(pcp, val) -#define this_cpu_xchg_2(pcp, val) _percpu_xchg(pcp, val) -#define this_cpu_xchg_4(pcp, val) _percpu_xchg(pcp, val) -#define this_cpu_xchg_8(pcp, val) _percpu_xchg(pcp, val) +#define this_cpu_read_1(pcp) \ + _pcp_protect_return(__percpu_read_8, pcp) +#define this_cpu_read_2(pcp) \ + _pcp_protect_return(__percpu_read_16, pcp) +#define this_cpu_read_4(pcp) \ + _pcp_protect_return(__percpu_read_32, pcp) +#define this_cpu_read_8(pcp) \ + _pcp_protect_return(__percpu_read_64, pcp) + +#define this_cpu_write_1(pcp, val) \ + _pcp_protect(__percpu_write_8, pcp, (unsigned long)val) +#define this_cpu_write_2(pcp, val) \ + _pcp_protect(__percpu_write_16, pcp, (unsigned long)val) +#define this_cpu_write_4(pcp, val) \ + _pcp_protect(__percpu_write_32, pcp, (unsigned long)val) +#define this_cpu_write_8(pcp, val) \ + _pcp_protect(__percpu_write_64, pcp, (unsigned long)val) + +#define this_cpu_add_1(pcp, val) \ + _pcp_protect(__percpu_add_case_8, pcp, val) +#define this_cpu_add_2(pcp, val) \ + _pcp_protect(__percpu_add_case_16, pcp, val) +#define this_cpu_add_4(pcp, val) \ + _pcp_protect(__percpu_add_case_32, pcp, val) +#define this_cpu_add_8(pcp, val) \ + _pcp_protect(__percpu_add_case_64, pcp, val) + +#define this_cpu_add_return_1(pcp, val) \ + _pcp_protect_return(__percpu_add_return_case_8, pcp, val) +#define this_cpu_add_return_2(pcp, val) \ + _pcp_protect_return(__percpu_add_return_case_16, pcp, val) +#define this_cpu_add_return_4(pcp, val) \ + _pcp_protect_return(__percpu_add_return_case_32, pcp, val) +#define this_cpu_add_return_8(pcp, val) \ + _pcp_protect_return(__percpu_add_return_case_64, pcp, val) + +#define this_cpu_and_1(pcp, val) \ + _pcp_protect(__percpu_andnot_case_8, pcp, ~val) +#define this_cpu_and_2(pcp, val) \ + _pcp_protect(__percpu_andnot_case_16, pcp, ~val) +#define this_cpu_and_4(pcp, val) \ + _pcp_protect(__percpu_andnot_case_32, pcp, ~val) +#define this_cpu_and_8(pcp, val) \ + _pcp_protect(__percpu_andnot_case_64, pcp, ~val) + +#define this_cpu_or_1(pcp, val) \ + _pcp_protect(__percpu_or_case_8, pcp, val) +#define this_cpu_or_2(pcp, val) \ + _pcp_protect(__percpu_or_case_16, pcp, val) +#define this_cpu_or_4(pcp, val) \ + _pcp_protect(__percpu_or_case_32, pcp, val) +#define this_cpu_or_8(pcp, val) \ + _pcp_protect(__percpu_or_case_64, pcp, val) + +#define this_cpu_xchg_1(pcp, val) \ + _pcp_protect_return(xchg_relaxed, pcp, val) +#define this_cpu_xchg_2(pcp, val) \ + _pcp_protect_return(xchg_relaxed, pcp, val) +#define this_cpu_xchg_4(pcp, val) \ + _pcp_protect_return(xchg_relaxed, pcp, val) +#define this_cpu_xchg_8(pcp, val) \ + _pcp_protect_return(xchg_relaxed, pcp, val) + +#define this_cpu_cmpxchg_1(pcp, o, n) \ + _pcp_protect_return(cmpxchg_relaxed, pcp, o, n) +#define this_cpu_cmpxchg_2(pcp, o, n) \ + _pcp_protect_return(cmpxchg_relaxed, pcp, o, n) +#define this_cpu_cmpxchg_4(pcp, o, n) \ + _pcp_protect_return(cmpxchg_relaxed, pcp, o, n) +#define this_cpu_cmpxchg_8(pcp, o, n) \ + _pcp_protect_return(cmpxchg_relaxed, pcp, o, n) #include <asm-generic/percpu.h> diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h index f9ccc36d3dc3..c593761ba61c 100644 --- a/arch/arm64/include/asm/perf_event.h +++ b/arch/arm64/include/asm/perf_event.h @@ -24,6 +24,160 @@ #define ARMV8_PMU_COUNTER_MASK (ARMV8_PMU_MAX_COUNTERS - 1) /* + * Common architectural and microarchitectural event numbers. + */ +#define ARMV8_PMUV3_PERFCTR_SW_INCR 0x00 +#define ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL 0x01 +#define ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL 0x02 +#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL 0x03 +#define ARMV8_PMUV3_PERFCTR_L1D_CACHE 0x04 +#define ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL 0x05 +#define ARMV8_PMUV3_PERFCTR_LD_RETIRED 0x06 +#define ARMV8_PMUV3_PERFCTR_ST_RETIRED 0x07 +#define ARMV8_PMUV3_PERFCTR_INST_RETIRED 0x08 +#define ARMV8_PMUV3_PERFCTR_EXC_TAKEN 0x09 +#define ARMV8_PMUV3_PERFCTR_EXC_RETURN 0x0A +#define ARMV8_PMUV3_PERFCTR_CID_WRITE_RETIRED 0x0B +#define ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED 0x0C +#define ARMV8_PMUV3_PERFCTR_BR_IMMED_RETIRED 0x0D +#define ARMV8_PMUV3_PERFCTR_BR_RETURN_RETIRED 0x0E +#define ARMV8_PMUV3_PERFCTR_UNALIGNED_LDST_RETIRED 0x0F +#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED 0x10 +#define ARMV8_PMUV3_PERFCTR_CPU_CYCLES 0x11 +#define ARMV8_PMUV3_PERFCTR_BR_PRED 0x12 +#define ARMV8_PMUV3_PERFCTR_MEM_ACCESS 0x13 +#define ARMV8_PMUV3_PERFCTR_L1I_CACHE 0x14 +#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_WB 0x15 +#define ARMV8_PMUV3_PERFCTR_L2D_CACHE 0x16 +#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_REFILL 0x17 +#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_WB 0x18 +#define ARMV8_PMUV3_PERFCTR_BUS_ACCESS 0x19 +#define ARMV8_PMUV3_PERFCTR_MEMORY_ERROR 0x1A +#define ARMV8_PMUV3_PERFCTR_INST_SPEC 0x1B +#define ARMV8_PMUV3_PERFCTR_TTBR_WRITE_RETIRED 0x1C +#define ARMV8_PMUV3_PERFCTR_BUS_CYCLES 0x1D +#define ARMV8_PMUV3_PERFCTR_CHAIN 0x1E +#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_ALLOCATE 0x1F +#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_ALLOCATE 0x20 +#define ARMV8_PMUV3_PERFCTR_BR_RETIRED 0x21 +#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED_RETIRED 0x22 +#define ARMV8_PMUV3_PERFCTR_STALL_FRONTEND 0x23 +#define ARMV8_PMUV3_PERFCTR_STALL_BACKEND 0x24 +#define ARMV8_PMUV3_PERFCTR_L1D_TLB 0x25 +#define ARMV8_PMUV3_PERFCTR_L1I_TLB 0x26 +#define ARMV8_PMUV3_PERFCTR_L2I_CACHE 0x27 +#define ARMV8_PMUV3_PERFCTR_L2I_CACHE_REFILL 0x28 +#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_ALLOCATE 0x29 +#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_REFILL 0x2A +#define ARMV8_PMUV3_PERFCTR_L3D_CACHE 0x2B +#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_WB 0x2C +#define ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL 0x2D +#define ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL 0x2E +#define ARMV8_PMUV3_PERFCTR_L2D_TLB 0x2F +#define ARMV8_PMUV3_PERFCTR_L2I_TLB 0x30 +#define ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS 0x31 +#define ARMV8_PMUV3_PERFCTR_LL_CACHE 0x32 +#define ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS 0x33 +#define ARMV8_PMUV3_PERFCTR_DTLB_WALK 0x34 +#define ARMV8_PMUV3_PERFCTR_ITLB_WALK 0x35 +#define ARMV8_PMUV3_PERFCTR_LL_CACHE_RD 0x36 +#define ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD 0x37 +#define ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS_RD 0x38 + +/* Statistical profiling extension microarchitectural events */ +#define ARMV8_SPE_PERFCTR_SAMPLE_POP 0x4000 +#define ARMV8_SPE_PERFCTR_SAMPLE_FEED 0x4001 +#define ARMV8_SPE_PERFCTR_SAMPLE_FILTRATE 0x4002 +#define ARMV8_SPE_PERFCTR_SAMPLE_COLLISION 0x4003 + +/* ARMv8 recommended implementation defined event types */ +#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD 0x40 +#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR 0x41 +#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD 0x42 +#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR 0x43 +#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_INNER 0x44 +#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_OUTER 0x45 +#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WB_VICTIM 0x46 +#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WB_CLEAN 0x47 +#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_INVAL 0x48 + +#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD 0x4C +#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR 0x4D +#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD 0x4E +#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR 0x4F +#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_RD 0x50 +#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WR 0x51 +#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_REFILL_RD 0x52 +#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_REFILL_WR 0x53 + +#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WB_VICTIM 0x56 +#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WB_CLEAN 0x57 +#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_INVAL 0x58 + +#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_REFILL_RD 0x5C +#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_REFILL_WR 0x5D +#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_RD 0x5E +#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_WR 0x5F +#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD 0x60 +#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR 0x61 +#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_SHARED 0x62 +#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_NOT_SHARED 0x63 +#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_NORMAL 0x64 +#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_PERIPH 0x65 +#define ARMV8_IMPDEF_PERFCTR_MEM_ACCESS_RD 0x66 +#define ARMV8_IMPDEF_PERFCTR_MEM_ACCESS_WR 0x67 +#define ARMV8_IMPDEF_PERFCTR_UNALIGNED_LD_SPEC 0x68 +#define ARMV8_IMPDEF_PERFCTR_UNALIGNED_ST_SPEC 0x69 +#define ARMV8_IMPDEF_PERFCTR_UNALIGNED_LDST_SPEC 0x6A + +#define ARMV8_IMPDEF_PERFCTR_LDREX_SPEC 0x6C +#define ARMV8_IMPDEF_PERFCTR_STREX_PASS_SPEC 0x6D +#define ARMV8_IMPDEF_PERFCTR_STREX_FAIL_SPEC 0x6E +#define ARMV8_IMPDEF_PERFCTR_STREX_SPEC 0x6F +#define ARMV8_IMPDEF_PERFCTR_LD_SPEC 0x70 +#define ARMV8_IMPDEF_PERFCTR_ST_SPEC 0x71 +#define ARMV8_IMPDEF_PERFCTR_LDST_SPEC 0x72 +#define ARMV8_IMPDEF_PERFCTR_DP_SPEC 0x73 +#define ARMV8_IMPDEF_PERFCTR_ASE_SPEC 0x74 +#define ARMV8_IMPDEF_PERFCTR_VFP_SPEC 0x75 +#define ARMV8_IMPDEF_PERFCTR_PC_WRITE_SPEC 0x76 +#define ARMV8_IMPDEF_PERFCTR_CRYPTO_SPEC 0x77 +#define ARMV8_IMPDEF_PERFCTR_BR_IMMED_SPEC 0x78 +#define ARMV8_IMPDEF_PERFCTR_BR_RETURN_SPEC 0x79 +#define ARMV8_IMPDEF_PERFCTR_BR_INDIRECT_SPEC 0x7A + +#define ARMV8_IMPDEF_PERFCTR_ISB_SPEC 0x7C +#define ARMV8_IMPDEF_PERFCTR_DSB_SPEC 0x7D +#define ARMV8_IMPDEF_PERFCTR_DMB_SPEC 0x7E + +#define ARMV8_IMPDEF_PERFCTR_EXC_UNDEF 0x81 +#define ARMV8_IMPDEF_PERFCTR_EXC_SVC 0x82 +#define ARMV8_IMPDEF_PERFCTR_EXC_PABORT 0x83 +#define ARMV8_IMPDEF_PERFCTR_EXC_DABORT 0x84 + +#define ARMV8_IMPDEF_PERFCTR_EXC_IRQ 0x86 +#define ARMV8_IMPDEF_PERFCTR_EXC_FIQ 0x87 +#define ARMV8_IMPDEF_PERFCTR_EXC_SMC 0x88 + +#define ARMV8_IMPDEF_PERFCTR_EXC_HVC 0x8A +#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_PABORT 0x8B +#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_DABORT 0x8C +#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_OTHER 0x8D +#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_IRQ 0x8E +#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_FIQ 0x8F +#define ARMV8_IMPDEF_PERFCTR_RC_LD_SPEC 0x90 +#define ARMV8_IMPDEF_PERFCTR_RC_ST_SPEC 0x91 + +#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_RD 0xA0 +#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WR 0xA1 +#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_REFILL_RD 0xA2 +#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_REFILL_WR 0xA3 + +#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WB_VICTIM 0xA6 +#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WB_CLEAN 0xA7 +#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_INVAL 0xA8 + +/* * Per-CPU PMCR: config reg */ #define ARMV8_PMU_PMCR_E (1 << 0) /* Enable all counters */ @@ -50,21 +204,11 @@ #define ARMV8_PMU_EVTYPE_EVENT 0xffff /* Mask for EVENT bits */ /* - * PMUv3 event types: required events - */ -#define ARMV8_PMUV3_PERFCTR_SW_INCR 0x00 -#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL 0x03 -#define ARMV8_PMUV3_PERFCTR_L1D_CACHE 0x04 -#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED 0x10 -#define ARMV8_PMUV3_PERFCTR_CPU_CYCLES 0x11 -#define ARMV8_PMUV3_PERFCTR_BR_PRED 0x12 - -/* * Event filters for PMUv3 */ -#define ARMV8_PMU_EXCLUDE_EL1 (1 << 31) -#define ARMV8_PMU_EXCLUDE_EL0 (1 << 30) -#define ARMV8_PMU_INCLUDE_EL2 (1 << 27) +#define ARMV8_PMU_EXCLUDE_EL1 (1U << 31) +#define ARMV8_PMU_EXCLUDE_EL0 (1U << 30) +#define ARMV8_PMU_INCLUDE_EL2 (1U << 27) /* * PMUSERENR: user enable reg diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index 1d7d8da2ef9b..22bb3ae514f5 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -80,7 +80,7 @@ #define PGDIR_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(4 - CONFIG_PGTABLE_LEVELS) #define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE-1)) -#define PTRS_PER_PGD (1 << (VA_BITS - PGDIR_SHIFT)) +#define PTRS_PER_PGD (1 << (MAX_USER_VA_BITS - PGDIR_SHIFT)) /* * Section address mask and size definitions. @@ -193,6 +193,10 @@ #define PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */ #define PMD_S2_XN (_AT(pmdval_t, 2) << 53) /* XN[1:0] */ +#define PUD_S2_RDONLY (_AT(pudval_t, 1) << 6) /* HAP[2:1] */ +#define PUD_S2_RDWR (_AT(pudval_t, 3) << 6) /* HAP[2:1] */ +#define PUD_S2_XN (_AT(pudval_t, 2) << 53) /* XN[1:0] */ + /* * Memory Attribute override for Stage-2 (MemAttr[3:0]) */ @@ -224,6 +228,8 @@ #define TCR_TxSZ_WIDTH 6 #define TCR_T0SZ_MASK (((UL(1) << TCR_TxSZ_WIDTH) - 1) << TCR_T0SZ_OFFSET) +#define TCR_EPD0_SHIFT 7 +#define TCR_EPD0_MASK (UL(1) << TCR_EPD0_SHIFT) #define TCR_IRGN0_SHIFT 8 #define TCR_IRGN0_MASK (UL(3) << TCR_IRGN0_SHIFT) #define TCR_IRGN0_NC (UL(0) << TCR_IRGN0_SHIFT) @@ -231,6 +237,8 @@ #define TCR_IRGN0_WT (UL(2) << TCR_IRGN0_SHIFT) #define TCR_IRGN0_WBnWA (UL(3) << TCR_IRGN0_SHIFT) +#define TCR_EPD1_SHIFT 23 +#define TCR_EPD1_MASK (UL(1) << TCR_EPD1_SHIFT) #define TCR_IRGN1_SHIFT 24 #define TCR_IRGN1_MASK (UL(3) << TCR_IRGN1_SHIFT) #define TCR_IRGN1_NC (UL(0) << TCR_IRGN1_SHIFT) @@ -306,4 +314,10 @@ #define TTBR_BADDR_MASK_52 (((UL(1) << 46) - 1) << 2) #endif +#ifdef CONFIG_ARM64_USER_VA_BITS_52 +/* Must be at least 64-byte aligned to prevent corruption of the TTBR */ +#define TTBR1_BADDR_4852_OFFSET (((UL(1) << (52 - PGDIR_SHIFT)) - \ + (UL(1) << (48 - PGDIR_SHIFT))) * 8) +#endif + #endif diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 50b1ef8584c0..de70c1eabf33 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -22,6 +22,7 @@ #include <asm/memory.h> #include <asm/pgtable-hwdef.h> #include <asm/pgtable-prot.h> +#include <asm/tlbflush.h> /* * VMALLOC range. @@ -314,6 +315,11 @@ static inline pte_t pud_pte(pud_t pud) return __pte(pud_val(pud)); } +static inline pud_t pte_pud(pte_t pte) +{ + return __pud(pte_val(pte)); +} + static inline pmd_t pud_pmd(pud_t pud) { return __pmd(pud_val(pud)); @@ -381,8 +387,12 @@ static inline int pmd_protnone(pmd_t pmd) #define pfn_pmd(pfn,prot) __pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) +#define pud_young(pud) pte_young(pud_pte(pud)) +#define pud_mkyoung(pud) pte_pud(pte_mkyoung(pud_pte(pud))) #define pud_write(pud) pte_write(pud_pte(pud)) +#define pud_mkhuge(pud) (__pud(pud_val(pud) & ~PUD_TABLE_BIT)) + #define __pud_to_phys(pud) __pte_to_phys(pud_pte(pud)) #define __phys_to_pud_val(phys) __phys_to_pte_val(phys) #define pud_pfn(pud) ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT) @@ -685,6 +695,27 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, return __ptep_test_and_clear_young(ptep); } +#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH +static inline int ptep_clear_flush_young(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep) +{ + int young = ptep_test_and_clear_young(vma, address, ptep); + + if (young) { + /* + * We can elide the trailing DSB here since the worst that can + * happen is that a CPU continues to use the young entry in its + * TLB and we mistakenly reclaim the associated page. The + * window for such an event is bounded by the next + * context-switch, which provides a DSB to complete the TLB + * invalidation. + */ + flush_tlb_page_nosync(vma, address); + } + + return young; +} + #ifdef CONFIG_TRANSPARENT_HUGEPAGE #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, diff --git a/arch/arm64/include/asm/pointer_auth.h b/arch/arm64/include/asm/pointer_auth.h new file mode 100644 index 000000000000..15d49515efdd --- /dev/null +++ b/arch/arm64/include/asm/pointer_auth.h @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: GPL-2.0 +#ifndef __ASM_POINTER_AUTH_H +#define __ASM_POINTER_AUTH_H + +#include <linux/bitops.h> +#include <linux/random.h> + +#include <asm/cpufeature.h> +#include <asm/memory.h> +#include <asm/sysreg.h> + +#ifdef CONFIG_ARM64_PTR_AUTH +/* + * Each key is a 128-bit quantity which is split across a pair of 64-bit + * registers (Lo and Hi). + */ +struct ptrauth_key { + unsigned long lo, hi; +}; + +/* + * We give each process its own keys, which are shared by all threads. The keys + * are inherited upon fork(), and reinitialised upon exec*(). + */ +struct ptrauth_keys { + struct ptrauth_key apia; + struct ptrauth_key apib; + struct ptrauth_key apda; + struct ptrauth_key apdb; + struct ptrauth_key apga; +}; + +static inline void ptrauth_keys_init(struct ptrauth_keys *keys) +{ + if (system_supports_address_auth()) { + get_random_bytes(&keys->apia, sizeof(keys->apia)); + get_random_bytes(&keys->apib, sizeof(keys->apib)); + get_random_bytes(&keys->apda, sizeof(keys->apda)); + get_random_bytes(&keys->apdb, sizeof(keys->apdb)); + } + + if (system_supports_generic_auth()) + get_random_bytes(&keys->apga, sizeof(keys->apga)); +} + +#define __ptrauth_key_install(k, v) \ +do { \ + struct ptrauth_key __pki_v = (v); \ + write_sysreg_s(__pki_v.lo, SYS_ ## k ## KEYLO_EL1); \ + write_sysreg_s(__pki_v.hi, SYS_ ## k ## KEYHI_EL1); \ +} while (0) + +static inline void ptrauth_keys_switch(struct ptrauth_keys *keys) +{ + if (system_supports_address_auth()) { + __ptrauth_key_install(APIA, keys->apia); + __ptrauth_key_install(APIB, keys->apib); + __ptrauth_key_install(APDA, keys->apda); + __ptrauth_key_install(APDB, keys->apdb); + } + + if (system_supports_generic_auth()) + __ptrauth_key_install(APGA, keys->apga); +} + +extern int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg); + +/* + * The EL0 pointer bits used by a pointer authentication code. + * This is dependent on TBI0 being enabled, or bits 63:56 would also apply. + */ +#define ptrauth_user_pac_mask() GENMASK(54, vabits_user) + +/* Only valid for EL0 TTBR0 instruction pointers */ +static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr) +{ + return ptr & ~ptrauth_user_pac_mask(); +} + +#define ptrauth_thread_init_user(tsk) \ +do { \ + struct task_struct *__ptiu_tsk = (tsk); \ + ptrauth_keys_init(&__ptiu_tsk->thread.keys_user); \ + ptrauth_keys_switch(&__ptiu_tsk->thread.keys_user); \ +} while (0) + +#define ptrauth_thread_switch(tsk) \ + ptrauth_keys_switch(&(tsk)->thread.keys_user) + +#else /* CONFIG_ARM64_PTR_AUTH */ +#define ptrauth_prctl_reset_keys(tsk, arg) (-EINVAL) +#define ptrauth_strip_insn_pac(lr) (lr) +#define ptrauth_thread_init_user(tsk) +#define ptrauth_thread_switch(tsk) +#endif /* CONFIG_ARM64_PTR_AUTH */ + +#endif /* __ASM_POINTER_AUTH_H */ diff --git a/arch/arm64/include/asm/preempt.h b/arch/arm64/include/asm/preempt.h new file mode 100644 index 000000000000..d49951647014 --- /dev/null +++ b/arch/arm64/include/asm/preempt.h @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_PREEMPT_H +#define __ASM_PREEMPT_H + +#include <linux/thread_info.h> + +#define PREEMPT_NEED_RESCHED BIT(32) +#define PREEMPT_ENABLED (PREEMPT_NEED_RESCHED) + +static inline int preempt_count(void) +{ + return READ_ONCE(current_thread_info()->preempt.count); +} + +static inline void preempt_count_set(u64 pc) +{ + /* Preserve existing value of PREEMPT_NEED_RESCHED */ + WRITE_ONCE(current_thread_info()->preempt.count, pc); +} + +#define init_task_preempt_count(p) do { \ + task_thread_info(p)->preempt_count = FORK_PREEMPT_COUNT; \ +} while (0) + +#define init_idle_preempt_count(p, cpu) do { \ + task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \ +} while (0) + +static inline void set_preempt_need_resched(void) +{ + current_thread_info()->preempt.need_resched = 0; +} + +static inline void clear_preempt_need_resched(void) +{ + current_thread_info()->preempt.need_resched = 1; +} + +static inline bool test_preempt_need_resched(void) +{ + return !current_thread_info()->preempt.need_resched; +} + +static inline void __preempt_count_add(int val) +{ + u32 pc = READ_ONCE(current_thread_info()->preempt.count); + pc += val; + WRITE_ONCE(current_thread_info()->preempt.count, pc); +} + +static inline void __preempt_count_sub(int val) +{ + u32 pc = READ_ONCE(current_thread_info()->preempt.count); + pc -= val; + WRITE_ONCE(current_thread_info()->preempt.count, pc); +} + +static inline bool __preempt_count_dec_and_test(void) +{ + struct thread_info *ti = current_thread_info(); + u64 pc = READ_ONCE(ti->preempt_count); + + /* Update only the count field, leaving need_resched unchanged */ + WRITE_ONCE(ti->preempt.count, --pc); + + /* + * If we wrote back all zeroes, then we're preemptible and in + * need of a reschedule. Otherwise, we need to reload the + * preempt_count in case the need_resched flag was cleared by an + * interrupt occurring between the non-atomic READ_ONCE/WRITE_ONCE + * pair. + */ + return !pc || !READ_ONCE(ti->preempt_count); +} + +static inline bool should_resched(int preempt_offset) +{ + u64 pc = READ_ONCE(current_thread_info()->preempt_count); + return pc == preempt_offset; +} + +#ifdef CONFIG_PREEMPT +void preempt_schedule(void); +#define __preempt_schedule() preempt_schedule() +void preempt_schedule_notrace(void); +#define __preempt_schedule_notrace() preempt_schedule_notrace() +#endif /* CONFIG_PREEMPT */ + +#endif /* __ASM_PREEMPT_H */ diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 6b0d4dff5012..f1a7ab18faf3 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -19,10 +19,8 @@ #ifndef __ASM_PROCESSOR_H #define __ASM_PROCESSOR_H -#define TASK_SIZE_64 (UL(1) << VA_BITS) - -#define KERNEL_DS UL(-1) -#define USER_DS (TASK_SIZE_64 - 1) +#define KERNEL_DS UL(-1) +#define USER_DS ((UL(1) << MAX_USER_VA_BITS) - 1) /* * On arm64 systems, unaligned accesses by the CPU are cheap, and so there is @@ -46,6 +44,7 @@ #include <asm/hw_breakpoint.h> #include <asm/lse.h> #include <asm/pgtable-hwdef.h> +#include <asm/pointer_auth.h> #include <asm/ptrace.h> #include <asm/types.h> @@ -53,19 +52,31 @@ * TASK_SIZE - the maximum size of a user space task. * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area. */ + +#define DEFAULT_MAP_WINDOW_64 (UL(1) << VA_BITS) +#define TASK_SIZE_64 (UL(1) << vabits_user) + #ifdef CONFIG_COMPAT #define TASK_SIZE_32 UL(0x100000000) #define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \ TASK_SIZE_32 : TASK_SIZE_64) #define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \ TASK_SIZE_32 : TASK_SIZE_64) +#define DEFAULT_MAP_WINDOW (test_thread_flag(TIF_32BIT) ? \ + TASK_SIZE_32 : DEFAULT_MAP_WINDOW_64) #else #define TASK_SIZE TASK_SIZE_64 +#define DEFAULT_MAP_WINDOW DEFAULT_MAP_WINDOW_64 #endif /* CONFIG_COMPAT */ +#ifdef CONFIG_ARM64_FORCE_52BIT +#define STACK_TOP_MAX TASK_SIZE_64 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 4)) +#else +#define STACK_TOP_MAX DEFAULT_MAP_WINDOW_64 +#define TASK_UNMAPPED_BASE (PAGE_ALIGN(DEFAULT_MAP_WINDOW / 4)) +#endif /* CONFIG_ARM64_FORCE_52BIT */ -#define STACK_TOP_MAX TASK_SIZE_64 #ifdef CONFIG_COMPAT #define AARCH32_VECTORS_BASE 0xffff0000 #define STACK_TOP (test_thread_flag(TIF_32BIT) ? \ @@ -74,6 +85,15 @@ #define STACK_TOP STACK_TOP_MAX #endif /* CONFIG_COMPAT */ +#ifndef CONFIG_ARM64_FORCE_52BIT +#define arch_get_mmap_end(addr) ((addr > DEFAULT_MAP_WINDOW) ? TASK_SIZE :\ + DEFAULT_MAP_WINDOW) + +#define arch_get_mmap_base(addr, base) ((addr > DEFAULT_MAP_WINDOW) ? \ + base + TASK_SIZE - DEFAULT_MAP_WINDOW :\ + base) +#endif /* CONFIG_ARM64_FORCE_52BIT */ + extern phys_addr_t arm64_dma_phys_limit; #define ARCH_LOW_ADDRESS_LIMIT (arm64_dma_phys_limit - 1) @@ -127,6 +147,9 @@ struct thread_struct { unsigned long fault_address; /* fault info */ unsigned long fault_code; /* ESR_EL1 value */ struct debug_info debug; /* debugging */ +#ifdef CONFIG_ARM64_PTR_AUTH + struct ptrauth_keys keys_user; +#endif }; static inline void arch_thread_struct_whitelist(unsigned long *offset, @@ -270,6 +293,9 @@ extern void __init minsigstksz_setup(void); #define SVE_SET_VL(arg) sve_set_current_vl(arg) #define SVE_GET_VL() sve_get_current_vl() +/* PR_PAC_RESET_KEYS prctl */ +#define PAC_RESET_KEYS(tsk, arg) ptrauth_prctl_reset_keys(tsk, arg) + /* * For CONFIG_GCC_PLUGIN_STACKLEAK * diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h index f82b447bd34f..1895561839a9 100644 --- a/arch/arm64/include/asm/smp.h +++ b/arch/arm64/include/asm/smp.h @@ -17,15 +17,20 @@ #define __ASM_SMP_H /* Values for secondary_data.status */ +#define CPU_STUCK_REASON_SHIFT (8) +#define CPU_BOOT_STATUS_MASK ((1U << CPU_STUCK_REASON_SHIFT) - 1) -#define CPU_MMU_OFF (-1) -#define CPU_BOOT_SUCCESS (0) +#define CPU_MMU_OFF (-1) +#define CPU_BOOT_SUCCESS (0) /* The cpu invoked ops->cpu_die, synchronise it with cpu_kill */ -#define CPU_KILL_ME (1) +#define CPU_KILL_ME (1) /* The cpu couldn't die gracefully and is looping in the kernel */ -#define CPU_STUCK_IN_KERNEL (2) +#define CPU_STUCK_IN_KERNEL (2) /* Fatal system error detected by secondary CPU, crash the system */ -#define CPU_PANIC_KERNEL (3) +#define CPU_PANIC_KERNEL (3) + +#define CPU_STUCK_REASON_52_BIT_VA (1U << CPU_STUCK_REASON_SHIFT) +#define CPU_STUCK_REASON_NO_GRAN (2U << CPU_STUCK_REASON_SHIFT) #ifndef __ASSEMBLY__ diff --git a/arch/arm64/include/asm/stackprotector.h b/arch/arm64/include/asm/stackprotector.h index 58d15be11c4d..5884a2b02827 100644 --- a/arch/arm64/include/asm/stackprotector.h +++ b/arch/arm64/include/asm/stackprotector.h @@ -34,7 +34,8 @@ static __always_inline void boot_init_stack_canary(void) canary &= CANARY_MASK; current->stack_canary = canary; - __stack_chk_guard = current->stack_canary; + if (!IS_ENABLED(CONFIG_STACKPROTECTOR_PER_TASK)) + __stack_chk_guard = current->stack_canary; } #endif /* _ASM_STACKPROTECTOR_H */ diff --git a/arch/arm64/include/asm/stage2_pgtable.h b/arch/arm64/include/asm/stage2_pgtable.h index d352f6df8d2c..5412fa40825e 100644 --- a/arch/arm64/include/asm/stage2_pgtable.h +++ b/arch/arm64/include/asm/stage2_pgtable.h @@ -30,16 +30,14 @@ #define pt_levels_pgdir_shift(lvls) ARM64_HW_PGTABLE_LEVEL_SHIFT(4 - (lvls)) /* - * The hardware supports concatenation of up to 16 tables at stage2 entry level - * and we use the feature whenever possible. + * The hardware supports concatenation of up to 16 tables at stage2 entry + * level and we use the feature whenever possible, which means we resolve 4 + * additional bits of address at the entry level. * - * Now, the minimum number of bits resolved at any level is (PAGE_SHIFT - 3). - * On arm64, the smallest PAGE_SIZE supported is 4k, which means - * (PAGE_SHIFT - 3) > 4 holds for all page sizes. - * This implies, the total number of page table levels at stage2 expected - * by the hardware is actually the number of levels required for (IPA_SHIFT - 4) - * in normal translations(e.g, stage1), since we cannot have another level in - * the range (IPA_SHIFT, IPA_SHIFT - 4). + * This implies, the total number of page table levels required for + * IPA_SHIFT at stage2 expected by the hardware can be calculated using + * the same logic used for the (non-collapsable) stage1 page tables but for + * (IPA_SHIFT - 4). */ #define stage2_pgtable_levels(ipa) ARM64_HW_PGTABLE_LEVELS((ipa) - 4) #define kvm_stage2_levels(kvm) VTCR_EL2_LVLS(kvm->arch.vtcr) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 842fb9572661..72dc4c011014 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -20,6 +20,7 @@ #ifndef __ASM_SYSREG_H #define __ASM_SYSREG_H +#include <linux/const.h> #include <linux/stringify.h> /* @@ -104,6 +105,11 @@ #define SET_PSTATE_UAO(x) __emit_inst(0xd500401f | PSTATE_UAO | ((!!x) << PSTATE_Imm_shift)) #define SET_PSTATE_SSBS(x) __emit_inst(0xd500401f | PSTATE_SSBS | ((!!x) << PSTATE_Imm_shift)) +#define __SYS_BARRIER_INSN(CRm, op2, Rt) \ + __emit_inst(0xd5000000 | sys_insn(0, 3, 3, (CRm), (op2)) | ((Rt) & 0x1f)) + +#define SB_BARRIER_INSN __SYS_BARRIER_INSN(0, 7, 31) + #define SYS_DC_ISW sys_insn(1, 0, 7, 6, 2) #define SYS_DC_CSW sys_insn(1, 0, 7, 10, 2) #define SYS_DC_CISW sys_insn(1, 0, 7, 14, 2) @@ -183,6 +189,19 @@ #define SYS_TTBR1_EL1 sys_reg(3, 0, 2, 0, 1) #define SYS_TCR_EL1 sys_reg(3, 0, 2, 0, 2) +#define SYS_APIAKEYLO_EL1 sys_reg(3, 0, 2, 1, 0) +#define SYS_APIAKEYHI_EL1 sys_reg(3, 0, 2, 1, 1) +#define SYS_APIBKEYLO_EL1 sys_reg(3, 0, 2, 1, 2) +#define SYS_APIBKEYHI_EL1 sys_reg(3, 0, 2, 1, 3) + +#define SYS_APDAKEYLO_EL1 sys_reg(3, 0, 2, 2, 0) +#define SYS_APDAKEYHI_EL1 sys_reg(3, 0, 2, 2, 1) +#define SYS_APDBKEYLO_EL1 sys_reg(3, 0, 2, 2, 2) +#define SYS_APDBKEYHI_EL1 sys_reg(3, 0, 2, 2, 3) + +#define SYS_APGAKEYLO_EL1 sys_reg(3, 0, 2, 3, 0) +#define SYS_APGAKEYHI_EL1 sys_reg(3, 0, 2, 3, 1) + #define SYS_ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0) #define SYS_AFSR0_EL1 sys_reg(3, 0, 5, 1, 0) @@ -431,27 +450,31 @@ #define SYS_ICH_LR15_EL2 __SYS__LR8_EL2(7) /* Common SCTLR_ELx flags. */ -#define SCTLR_ELx_DSSBS (1UL << 44) -#define SCTLR_ELx_EE (1 << 25) -#define SCTLR_ELx_IESB (1 << 21) -#define SCTLR_ELx_WXN (1 << 19) -#define SCTLR_ELx_I (1 << 12) -#define SCTLR_ELx_SA (1 << 3) -#define SCTLR_ELx_C (1 << 2) -#define SCTLR_ELx_A (1 << 1) -#define SCTLR_ELx_M 1 +#define SCTLR_ELx_DSSBS (_BITUL(44)) +#define SCTLR_ELx_ENIA (_BITUL(31)) +#define SCTLR_ELx_ENIB (_BITUL(30)) +#define SCTLR_ELx_ENDA (_BITUL(27)) +#define SCTLR_ELx_EE (_BITUL(25)) +#define SCTLR_ELx_IESB (_BITUL(21)) +#define SCTLR_ELx_WXN (_BITUL(19)) +#define SCTLR_ELx_ENDB (_BITUL(13)) +#define SCTLR_ELx_I (_BITUL(12)) +#define SCTLR_ELx_SA (_BITUL(3)) +#define SCTLR_ELx_C (_BITUL(2)) +#define SCTLR_ELx_A (_BITUL(1)) +#define SCTLR_ELx_M (_BITUL(0)) #define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \ SCTLR_ELx_SA | SCTLR_ELx_I | SCTLR_ELx_IESB) /* SCTLR_EL2 specific flags. */ -#define SCTLR_EL2_RES1 ((1 << 4) | (1 << 5) | (1 << 11) | (1 << 16) | \ - (1 << 18) | (1 << 22) | (1 << 23) | (1 << 28) | \ - (1 << 29)) -#define SCTLR_EL2_RES0 ((1 << 6) | (1 << 7) | (1 << 8) | (1 << 9) | \ - (1 << 10) | (1 << 13) | (1 << 14) | (1 << 15) | \ - (1 << 17) | (1 << 20) | (1 << 24) | (1 << 26) | \ - (1 << 27) | (1 << 30) | (1 << 31) | \ +#define SCTLR_EL2_RES1 ((_BITUL(4)) | (_BITUL(5)) | (_BITUL(11)) | (_BITUL(16)) | \ + (_BITUL(18)) | (_BITUL(22)) | (_BITUL(23)) | (_BITUL(28)) | \ + (_BITUL(29))) +#define SCTLR_EL2_RES0 ((_BITUL(6)) | (_BITUL(7)) | (_BITUL(8)) | (_BITUL(9)) | \ + (_BITUL(10)) | (_BITUL(13)) | (_BITUL(14)) | (_BITUL(15)) | \ + (_BITUL(17)) | (_BITUL(20)) | (_BITUL(24)) | (_BITUL(26)) | \ + (_BITUL(27)) | (_BITUL(30)) | (_BITUL(31)) | \ (0xffffefffUL << 32)) #ifdef CONFIG_CPU_BIG_ENDIAN @@ -473,23 +496,23 @@ #endif /* SCTLR_EL1 specific flags. */ -#define SCTLR_EL1_UCI (1 << 26) -#define SCTLR_EL1_E0E (1 << 24) -#define SCTLR_EL1_SPAN (1 << 23) -#define SCTLR_EL1_NTWE (1 << 18) -#define SCTLR_EL1_NTWI (1 << 16) -#define SCTLR_EL1_UCT (1 << 15) -#define SCTLR_EL1_DZE (1 << 14) -#define SCTLR_EL1_UMA (1 << 9) -#define SCTLR_EL1_SED (1 << 8) -#define SCTLR_EL1_ITD (1 << 7) -#define SCTLR_EL1_CP15BEN (1 << 5) -#define SCTLR_EL1_SA0 (1 << 4) - -#define SCTLR_EL1_RES1 ((1 << 11) | (1 << 20) | (1 << 22) | (1 << 28) | \ - (1 << 29)) -#define SCTLR_EL1_RES0 ((1 << 6) | (1 << 10) | (1 << 13) | (1 << 17) | \ - (1 << 27) | (1 << 30) | (1 << 31) | \ +#define SCTLR_EL1_UCI (_BITUL(26)) +#define SCTLR_EL1_E0E (_BITUL(24)) +#define SCTLR_EL1_SPAN (_BITUL(23)) +#define SCTLR_EL1_NTWE (_BITUL(18)) +#define SCTLR_EL1_NTWI (_BITUL(16)) +#define SCTLR_EL1_UCT (_BITUL(15)) +#define SCTLR_EL1_DZE (_BITUL(14)) +#define SCTLR_EL1_UMA (_BITUL(9)) +#define SCTLR_EL1_SED (_BITUL(8)) +#define SCTLR_EL1_ITD (_BITUL(7)) +#define SCTLR_EL1_CP15BEN (_BITUL(5)) +#define SCTLR_EL1_SA0 (_BITUL(4)) + +#define SCTLR_EL1_RES1 ((_BITUL(11)) | (_BITUL(20)) | (_BITUL(22)) | (_BITUL(28)) | \ + (_BITUL(29))) +#define SCTLR_EL1_RES0 ((_BITUL(6)) | (_BITUL(10)) | (_BITUL(13)) | (_BITUL(17)) | \ + (_BITUL(27)) | (_BITUL(30)) | (_BITUL(31)) | \ (0xffffefffUL << 32)) #ifdef CONFIG_CPU_BIG_ENDIAN @@ -528,11 +551,25 @@ #define ID_AA64ISAR0_AES_SHIFT 4 /* id_aa64isar1 */ +#define ID_AA64ISAR1_SB_SHIFT 36 +#define ID_AA64ISAR1_GPI_SHIFT 28 +#define ID_AA64ISAR1_GPA_SHIFT 24 #define ID_AA64ISAR1_LRCPC_SHIFT 20 #define ID_AA64ISAR1_FCMA_SHIFT 16 #define ID_AA64ISAR1_JSCVT_SHIFT 12 +#define ID_AA64ISAR1_API_SHIFT 8 +#define ID_AA64ISAR1_APA_SHIFT 4 #define ID_AA64ISAR1_DPB_SHIFT 0 +#define ID_AA64ISAR1_APA_NI 0x0 +#define ID_AA64ISAR1_APA_ARCHITECTED 0x1 +#define ID_AA64ISAR1_API_NI 0x0 +#define ID_AA64ISAR1_API_IMP_DEF 0x1 +#define ID_AA64ISAR1_GPA_NI 0x0 +#define ID_AA64ISAR1_GPA_ARCHITECTED 0x1 +#define ID_AA64ISAR1_GPI_NI 0x0 +#define ID_AA64ISAR1_GPI_IMP_DEF 0x1 + /* id_aa64pfr0 */ #define ID_AA64PFR0_CSV3_SHIFT 60 #define ID_AA64PFR0_CSV2_SHIFT 56 @@ -676,13 +713,13 @@ #define ZCR_ELx_LEN_SIZE 9 #define ZCR_ELx_LEN_MASK 0x1ff -#define CPACR_EL1_ZEN_EL1EN (1 << 16) /* enable EL1 access */ -#define CPACR_EL1_ZEN_EL0EN (1 << 17) /* enable EL0 access, if EL1EN set */ +#define CPACR_EL1_ZEN_EL1EN (_BITUL(16)) /* enable EL1 access */ +#define CPACR_EL1_ZEN_EL0EN (_BITUL(17)) /* enable EL0 access, if EL1EN set */ #define CPACR_EL1_ZEN (CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN) /* Safe value for MPIDR_EL1: Bit31:RES1, Bit30:U:0, Bit24:MT:0 */ -#define SYS_MPIDR_SAFE_VAL (1UL << 31) +#define SYS_MPIDR_SAFE_VAL (_BITUL(31)) #ifdef __ASSEMBLY__ diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index cb2c10a8f0a8..bbca68b54732 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -42,7 +42,18 @@ struct thread_info { #ifdef CONFIG_ARM64_SW_TTBR0_PAN u64 ttbr0; /* saved TTBR0_EL1 */ #endif - int preempt_count; /* 0 => preemptable, <0 => bug */ + union { + u64 preempt_count; /* 0 => preemptible, <0 => bug */ + struct { +#ifdef CONFIG_CPU_BIG_ENDIAN + u32 need_resched; + u32 count; +#else + u32 count; + u32 need_resched; +#endif + } preempt; + }; }; #define thread_saved_pc(tsk) \ diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index 5dfd23897dea..3a1870228946 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -21,6 +21,7 @@ #ifndef __ASSEMBLY__ +#include <linux/mm_types.h> #include <linux/sched.h> #include <asm/cputype.h> #include <asm/mmu.h> @@ -164,14 +165,20 @@ static inline void flush_tlb_mm(struct mm_struct *mm) dsb(ish); } -static inline void flush_tlb_page(struct vm_area_struct *vma, - unsigned long uaddr) +static inline void flush_tlb_page_nosync(struct vm_area_struct *vma, + unsigned long uaddr) { unsigned long addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm)); dsb(ishst); __tlbi(vale1is, addr); __tlbi_user(vale1is, addr); +} + +static inline void flush_tlb_page(struct vm_area_struct *vma, + unsigned long uaddr) +{ + flush_tlb_page_nosync(vma, uaddr); dsb(ish); } @@ -179,7 +186,7 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, * This is meant to avoid soft lock-ups on large TLB flushing ranges and not * necessarily a performance improvement. */ -#define MAX_TLBI_OPS 1024UL +#define MAX_TLBI_OPS PTRS_PER_PTE static inline void __flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, @@ -188,7 +195,7 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma, unsigned long asid = ASID(vma->vm_mm); unsigned long addr; - if ((end - start) > (MAX_TLBI_OPS * stride)) { + if ((end - start) >= (MAX_TLBI_OPS * stride)) { flush_tlb_mm(vma->vm_mm); return; } diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 07c34087bd5e..fad33f5fde47 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -45,8 +45,7 @@ static inline void set_fs(mm_segment_t fs) * Prevent a mispredicted conditional call to set_fs from forwarding * the wrong address limit to access_ok under speculation. */ - dsb(nsh); - isb(); + spec_bar(); /* On user-mode return, check fs is correct */ set_thread_flag(TIF_FSCHECK); diff --git a/arch/arm64/include/asm/xor.h b/arch/arm64/include/asm/xor.h new file mode 100644 index 000000000000..856386ad076c --- /dev/null +++ b/arch/arm64/include/asm/xor.h @@ -0,0 +1,73 @@ +/* + * arch/arm64/include/asm/xor.h + * + * Authors: Jackie Liu <liuyun01@kylinos.cn> + * Copyright (C) 2018,Tianjin KYLIN Information Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/hardirq.h> +#include <asm-generic/xor.h> +#include <asm/hwcap.h> +#include <asm/neon.h> + +#ifdef CONFIG_KERNEL_MODE_NEON + +extern struct xor_block_template const xor_block_inner_neon; + +static void +xor_neon_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) +{ + kernel_neon_begin(); + xor_block_inner_neon.do_2(bytes, p1, p2); + kernel_neon_end(); +} + +static void +xor_neon_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, + unsigned long *p3) +{ + kernel_neon_begin(); + xor_block_inner_neon.do_3(bytes, p1, p2, p3); + kernel_neon_end(); +} + +static void +xor_neon_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, + unsigned long *p3, unsigned long *p4) +{ + kernel_neon_begin(); + xor_block_inner_neon.do_4(bytes, p1, p2, p3, p4); + kernel_neon_end(); +} + +static void +xor_neon_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, + unsigned long *p3, unsigned long *p4, unsigned long *p5) +{ + kernel_neon_begin(); + xor_block_inner_neon.do_5(bytes, p1, p2, p3, p4, p5); + kernel_neon_end(); +} + +static struct xor_block_template xor_block_arm64 = { + .name = "arm64_neon", + .do_2 = xor_neon_2, + .do_3 = xor_neon_3, + .do_4 = xor_neon_4, + .do_5 = xor_neon_5 +}; +#undef XOR_TRY_TEMPLATES +#define XOR_TRY_TEMPLATES \ + do { \ + xor_speed(&xor_block_8regs); \ + xor_speed(&xor_block_32regs); \ + if (cpu_has_neon()) { \ + xor_speed(&xor_block_arm64);\ + } \ + } while (0) + +#endif /* ! CONFIG_KERNEL_MODE_NEON */ diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h index 2bcd6e4f3474..5f0750c2199c 100644 --- a/arch/arm64/include/uapi/asm/hwcap.h +++ b/arch/arm64/include/uapi/asm/hwcap.h @@ -49,5 +49,8 @@ #define HWCAP_ILRCPC (1 << 26) #define HWCAP_FLAGM (1 << 27) #define HWCAP_SSBS (1 << 28) +#define HWCAP_SB (1 << 29) +#define HWCAP_PACA (1 << 30) +#define HWCAP_PACG (1UL << 31) #endif /* _UAPI__ASM_HWCAP_H */ diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h index a36227fdb084..c2f249bcd829 100644 --- a/arch/arm64/include/uapi/asm/ptrace.h +++ b/arch/arm64/include/uapi/asm/ptrace.h @@ -229,6 +229,13 @@ struct user_sve_header { SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, flags) \ : SVE_PT_FPSIMD_OFFSET + SVE_PT_FPSIMD_SIZE(vq, flags)) +/* pointer authentication masks (NT_ARM_PAC_MASK) */ + +struct user_pac_mask { + __u64 data_mask; + __u64 insn_mask; +}; + #endif /* __ASSEMBLY__ */ #endif /* _UAPI__ASM_PTRACE_H */ diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 4c8b13bede80..df08d735b21d 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -30,7 +30,7 @@ $(obj)/%.stub.o: $(obj)/%.o FORCE arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ sys_compat.o arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o -arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o +arm64-obj-$(CONFIG_MODULES) += module.o arm64-obj-$(CONFIG_ARM64_MODULE_PLTS) += module-plts.o arm64-obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o @@ -49,14 +49,16 @@ arm64-obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL) += acpi_parking_protocol.o arm64-obj-$(CONFIG_PARAVIRT) += paravirt.o arm64-obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o arm64-obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate-asm.o -arm64-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o \ +arm64-obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o relocate_kernel.o \ cpu-reset.o +arm64-obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file.o kexec_image.o arm64-obj-$(CONFIG_ARM64_RELOC_TEST) += arm64-reloc-test.o arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o arm64-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o arm64-obj-$(CONFIG_CRASH_CORE) += crash_core.o arm64-obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o arm64-obj-$(CONFIG_ARM64_SSBD) += ssbd.o +arm64-obj-$(CONFIG_ARM64_PTR_AUTH) += pointer_auth.o obj-y += $(arm64-obj-y) vdso/ probes/ obj-m += $(arm64-obj-m) diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c deleted file mode 100644 index 72f63a59b008..000000000000 --- a/arch/arm64/kernel/arm64ksyms.c +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Based on arch/arm/kernel/armksyms.c - * - * Copyright (C) 2000 Russell King - * Copyright (C) 2012 ARM Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - */ - -#include <linux/export.h> -#include <linux/sched.h> -#include <linux/string.h> -#include <linux/cryptohash.h> -#include <linux/delay.h> -#include <linux/in6.h> -#include <linux/syscalls.h> -#include <linux/uaccess.h> -#include <linux/io.h> -#include <linux/arm-smccc.h> -#include <linux/kprobes.h> - -#include <asm/checksum.h> - -EXPORT_SYMBOL(copy_page); -EXPORT_SYMBOL(clear_page); - - /* user mem (segment) */ -EXPORT_SYMBOL(__arch_copy_from_user); -EXPORT_SYMBOL(__arch_copy_to_user); -EXPORT_SYMBOL(__arch_clear_user); -EXPORT_SYMBOL(__arch_copy_in_user); - - /* physical memory */ -EXPORT_SYMBOL(memstart_addr); - - /* string / mem functions */ -#ifndef CONFIG_KASAN -EXPORT_SYMBOL(strchr); -EXPORT_SYMBOL(strrchr); -EXPORT_SYMBOL(strcmp); -EXPORT_SYMBOL(strncmp); -EXPORT_SYMBOL(strlen); -EXPORT_SYMBOL(strnlen); -EXPORT_SYMBOL(memcmp); -EXPORT_SYMBOL(memchr); -#endif - -EXPORT_SYMBOL(memset); -EXPORT_SYMBOL(memcpy); -EXPORT_SYMBOL(memmove); -EXPORT_SYMBOL(__memset); -EXPORT_SYMBOL(__memcpy); -EXPORT_SYMBOL(__memmove); - - /* atomic bitops */ -EXPORT_SYMBOL(set_bit); -EXPORT_SYMBOL(test_and_set_bit); -EXPORT_SYMBOL(clear_bit); -EXPORT_SYMBOL(test_and_clear_bit); -EXPORT_SYMBOL(change_bit); -EXPORT_SYMBOL(test_and_change_bit); - -#ifdef CONFIG_FUNCTION_TRACER -EXPORT_SYMBOL(_mcount); -NOKPROBE_SYMBOL(_mcount); -#endif - - /* arm-smccc */ -EXPORT_SYMBOL(__arm_smccc_smc); -EXPORT_SYMBOL(__arm_smccc_hvc); - - /* tishift.S */ -extern long long __ashlti3(long long a, int b); -EXPORT_SYMBOL(__ashlti3); -extern long long __ashrti3(long long a, int b); -EXPORT_SYMBOL(__ashrti3); -extern long long __lshrti3(long long a, int b); -EXPORT_SYMBOL(__lshrti3); diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 323aeb5f2fe6..65b8afc84466 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -46,6 +46,9 @@ int main(void) DEFINE(TSK_TI_TTBR0, offsetof(struct task_struct, thread_info.ttbr0)); #endif DEFINE(TSK_STACK, offsetof(struct task_struct, stack)); +#ifdef CONFIG_STACKPROTECTOR + DEFINE(TSK_STACK_CANARY, offsetof(struct task_struct, stack_canary)); +#endif BLANK(); DEFINE(THREAD_CPU_CONTEXT, offsetof(struct task_struct, thread.cpu_context)); BLANK(); diff --git a/arch/arm64/kernel/cpu-reset.S b/arch/arm64/kernel/cpu-reset.S index 8021b46c9743..a2be30275a73 100644 --- a/arch/arm64/kernel/cpu-reset.S +++ b/arch/arm64/kernel/cpu-reset.S @@ -22,11 +22,11 @@ * __cpu_soft_restart(el2_switch, entry, arg0, arg1, arg2) - Helper for * cpu_soft_restart. * - * @el2_switch: Flag to indicate a swich to EL2 is needed. + * @el2_switch: Flag to indicate a switch to EL2 is needed. * @entry: Location to jump to for soft reset. - * arg0: First argument passed to @entry. - * arg1: Second argument passed to @entry. - * arg2: Third argument passed to @entry. + * arg0: First argument passed to @entry. (relocation list) + * arg1: Second argument passed to @entry.(physical kernel entry) + * arg2: Third argument passed to @entry. (physical dtb address) * * Put the CPU into the same state as it would be if it had been reset, and * branch to what would be the reset vector. It must be executed with the diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 6ad715d67df8..09ac548c9d44 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -135,7 +135,7 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn, const char *hyp_vecs_start, const char *hyp_vecs_end) { - static DEFINE_SPINLOCK(bp_lock); + static DEFINE_RAW_SPINLOCK(bp_lock); int cpu, slot = -1; /* @@ -147,7 +147,7 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn, return; } - spin_lock(&bp_lock); + raw_spin_lock(&bp_lock); for_each_possible_cpu(cpu) { if (per_cpu(bp_hardening_data.fn, cpu) == fn) { slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu); @@ -163,7 +163,7 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn, __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot); __this_cpu_write(bp_hardening_data.fn, fn); - spin_unlock(&bp_lock); + raw_spin_unlock(&bp_lock); } #else #define __smccc_workaround_1_smc_start NULL @@ -507,38 +507,6 @@ cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused) .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ CAP_MIDR_RANGE_LIST(midr_list) -/* - * Generic helper for handling capabilties with multiple (match,enable) pairs - * of call backs, sharing the same capability bit. - * Iterate over each entry to see if at least one matches. - */ -static bool __maybe_unused -multi_entry_cap_matches(const struct arm64_cpu_capabilities *entry, int scope) -{ - const struct arm64_cpu_capabilities *caps; - - for (caps = entry->match_list; caps->matches; caps++) - if (caps->matches(caps, scope)) - return true; - - return false; -} - -/* - * Take appropriate action for all matching entries in the shared capability - * entry. - */ -static void __maybe_unused -multi_entry_cap_cpu_enable(const struct arm64_cpu_capabilities *entry) -{ - const struct arm64_cpu_capabilities *caps; - - for (caps = entry->match_list; caps->matches; caps++) - if (caps->matches(caps, SCOPE_LOCAL_CPU) && - caps->cpu_enable) - caps->cpu_enable(caps); -} - #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR /* @@ -584,24 +552,63 @@ static const struct midr_range arm64_repeat_tlbi_cpus[] = { #endif -const struct arm64_cpu_capabilities arm64_errata[] = { +#ifdef CONFIG_CAVIUM_ERRATUM_27456 +static const struct midr_range cavium_erratum_27456_cpus[] = { + /* Cavium ThunderX, T88 pass 1.x - 2.1 */ + MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1), + /* Cavium ThunderX, T81 pass 1.0 */ + MIDR_REV(MIDR_THUNDERX_81XX, 0, 0), + {}, +}; +#endif + +#ifdef CONFIG_CAVIUM_ERRATUM_30115 +static const struct midr_range cavium_erratum_30115_cpus[] = { + /* Cavium ThunderX, T88 pass 1.x - 2.2 */ + MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 2), + /* Cavium ThunderX, T81 pass 1.0 - 1.2 */ + MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2), + /* Cavium ThunderX, T83 pass 1.0 */ + MIDR_REV(MIDR_THUNDERX_83XX, 0, 0), + {}, +}; +#endif + +#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 +static const struct arm64_cpu_capabilities qcom_erratum_1003_list[] = { + { + ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0), + }, + { + .midr_range.model = MIDR_QCOM_KRYO, + .matches = is_kryo_midr, + }, + {}, +}; +#endif + +#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE +static const struct midr_range workaround_clean_cache[] = { #if defined(CONFIG_ARM64_ERRATUM_826319) || \ defined(CONFIG_ARM64_ERRATUM_827319) || \ defined(CONFIG_ARM64_ERRATUM_824069) - { - /* Cortex-A53 r0p[012] */ - .desc = "ARM errata 826319, 827319, 824069", - .capability = ARM64_WORKAROUND_CLEAN_CACHE, - ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2), - .cpu_enable = cpu_enable_cache_maint_trap, - }, + /* Cortex-A53 r0p[012]: ARM errata 826319, 827319, 824069 */ + MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2), +#endif +#ifdef CONFIG_ARM64_ERRATUM_819472 + /* Cortex-A53 r0p[01] : ARM errata 819472 */ + MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1), #endif -#ifdef CONFIG_ARM64_ERRATUM_819472 + {}, +}; +#endif + +const struct arm64_cpu_capabilities arm64_errata[] = { +#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE { - /* Cortex-A53 r0p[01] */ - .desc = "ARM errata 819472", + .desc = "ARM errata 826319, 827319, 824069, 819472", .capability = ARM64_WORKAROUND_CLEAN_CACHE, - ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1), + ERRATA_MIDR_RANGE_LIST(workaround_clean_cache), .cpu_enable = cpu_enable_cache_maint_trap, }, #endif @@ -652,40 +659,16 @@ const struct arm64_cpu_capabilities arm64_errata[] = { #endif #ifdef CONFIG_CAVIUM_ERRATUM_27456 { - /* Cavium ThunderX, T88 pass 1.x - 2.1 */ - .desc = "Cavium erratum 27456", - .capability = ARM64_WORKAROUND_CAVIUM_27456, - ERRATA_MIDR_RANGE(MIDR_THUNDERX, - 0, 0, - 1, 1), - }, - { - /* Cavium ThunderX, T81 pass 1.0 */ .desc = "Cavium erratum 27456", .capability = ARM64_WORKAROUND_CAVIUM_27456, - ERRATA_MIDR_REV(MIDR_THUNDERX_81XX, 0, 0), + ERRATA_MIDR_RANGE_LIST(cavium_erratum_27456_cpus), }, #endif #ifdef CONFIG_CAVIUM_ERRATUM_30115 { - /* Cavium ThunderX, T88 pass 1.x - 2.2 */ .desc = "Cavium erratum 30115", .capability = ARM64_WORKAROUND_CAVIUM_30115, - ERRATA_MIDR_RANGE(MIDR_THUNDERX, - 0, 0, - 1, 2), - }, - { - /* Cavium ThunderX, T81 pass 1.0 - 1.2 */ - .desc = "Cavium erratum 30115", - .capability = ARM64_WORKAROUND_CAVIUM_30115, - ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2), - }, - { - /* Cavium ThunderX, T83 pass 1.0 */ - .desc = "Cavium erratum 30115", - .capability = ARM64_WORKAROUND_CAVIUM_30115, - ERRATA_MIDR_REV(MIDR_THUNDERX_83XX, 0, 0), + ERRATA_MIDR_RANGE_LIST(cavium_erratum_30115_cpus), }, #endif { @@ -697,16 +680,10 @@ const struct arm64_cpu_capabilities arm64_errata[] = { }, #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 { - .desc = "Qualcomm Technologies Falkor erratum 1003", - .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003, - ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0), - }, - { - .desc = "Qualcomm Technologies Kryo erratum 1003", + .desc = "Qualcomm Technologies Falkor/Kryo erratum 1003", .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003, - .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, - .midr_range.model = MIDR_QCOM_KRYO, - .matches = is_kryo_midr, + .matches = cpucap_multi_entry_cap_matches, + .match_list = qcom_erratum_1003_list, }, #endif #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI @@ -754,6 +731,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = { ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0), }, #endif +#ifdef CONFIG_ARM64_ERRATUM_1165522 + { + /* Cortex-A76 r0p0 to r2p0 */ + .desc = "ARM erratum 1165522", + .capability = ARM64_WORKAROUND_1165522, + ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0), + }, +#endif { } }; diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index aec5ecb85737..4f272399de89 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -52,6 +52,7 @@ unsigned int compat_elf_hwcap2 __read_mostly; DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); EXPORT_SYMBOL(cpu_hwcaps); +static struct arm64_cpu_capabilities const __ro_after_init *cpu_hwcaps_ptrs[ARM64_NCAPS]; /* * Flag to indicate if we have computed the system wide @@ -141,9 +142,18 @@ static const struct arm64_ftr_bits ftr_id_aa64isar0[] = { }; static const struct arm64_ftr_bits ftr_id_aa64isar1[] = { + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_SB_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_GPI_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_GPA_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_API_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_APA_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DPB_SHIFT, 4, 0), ARM64_FTR_END, }; @@ -518,6 +528,29 @@ static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new) } extern const struct arm64_cpu_capabilities arm64_errata[]; +static const struct arm64_cpu_capabilities arm64_features[]; + +static void __init +init_cpu_hwcaps_indirect_list_from_array(const struct arm64_cpu_capabilities *caps) +{ + for (; caps->matches; caps++) { + if (WARN(caps->capability >= ARM64_NCAPS, + "Invalid capability %d\n", caps->capability)) + continue; + if (WARN(cpu_hwcaps_ptrs[caps->capability], + "Duplicate entry for capability %d\n", + caps->capability)) + continue; + cpu_hwcaps_ptrs[caps->capability] = caps; + } +} + +static void __init init_cpu_hwcaps_indirect_list(void) +{ + init_cpu_hwcaps_indirect_list_from_array(arm64_features); + init_cpu_hwcaps_indirect_list_from_array(arm64_errata); +} + static void __init setup_boot_cpu_capabilities(void); void __init init_cpu_features(struct cpuinfo_arm64 *info) @@ -564,6 +597,12 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info) } /* + * Initialize the indirect array of CPU hwcaps capabilities pointers + * before we handle the boot CPU below. + */ + init_cpu_hwcaps_indirect_list(); + + /* * Detect and enable early CPU capabilities based on the boot CPU, * after we have initialised the CPU feature infrastructure. */ @@ -915,6 +954,12 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, static const struct midr_range kpti_safe_list[] = { MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A35), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A53), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), { /* sentinel */ } }; char const *str = "command line option"; @@ -1145,6 +1190,14 @@ static void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused) } #endif /* CONFIG_ARM64_RAS_EXTN */ +#ifdef CONFIG_ARM64_PTR_AUTH +static void cpu_enable_address_auth(struct arm64_cpu_capabilities const *cap) +{ + sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | + SCTLR_ELx_ENDA | SCTLR_ELx_ENDB); +} +#endif /* CONFIG_ARM64_PTR_AUTH */ + static const struct arm64_cpu_capabilities arm64_features[] = { { .desc = "GIC system register CPU interface", @@ -1368,22 +1421,115 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .cpu_enable = cpu_enable_cnp, }, #endif + { + .desc = "Speculation barrier (SB)", + .capability = ARM64_HAS_SB, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_cpuid_feature, + .sys_reg = SYS_ID_AA64ISAR1_EL1, + .field_pos = ID_AA64ISAR1_SB_SHIFT, + .sign = FTR_UNSIGNED, + .min_field_value = 1, + }, +#ifdef CONFIG_ARM64_PTR_AUTH + { + .desc = "Address authentication (architected algorithm)", + .capability = ARM64_HAS_ADDRESS_AUTH_ARCH, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .sys_reg = SYS_ID_AA64ISAR1_EL1, + .sign = FTR_UNSIGNED, + .field_pos = ID_AA64ISAR1_APA_SHIFT, + .min_field_value = ID_AA64ISAR1_APA_ARCHITECTED, + .matches = has_cpuid_feature, + .cpu_enable = cpu_enable_address_auth, + }, + { + .desc = "Address authentication (IMP DEF algorithm)", + .capability = ARM64_HAS_ADDRESS_AUTH_IMP_DEF, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .sys_reg = SYS_ID_AA64ISAR1_EL1, + .sign = FTR_UNSIGNED, + .field_pos = ID_AA64ISAR1_API_SHIFT, + .min_field_value = ID_AA64ISAR1_API_IMP_DEF, + .matches = has_cpuid_feature, + .cpu_enable = cpu_enable_address_auth, + }, + { + .desc = "Generic authentication (architected algorithm)", + .capability = ARM64_HAS_GENERIC_AUTH_ARCH, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .sys_reg = SYS_ID_AA64ISAR1_EL1, + .sign = FTR_UNSIGNED, + .field_pos = ID_AA64ISAR1_GPA_SHIFT, + .min_field_value = ID_AA64ISAR1_GPA_ARCHITECTED, + .matches = has_cpuid_feature, + }, + { + .desc = "Generic authentication (IMP DEF algorithm)", + .capability = ARM64_HAS_GENERIC_AUTH_IMP_DEF, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .sys_reg = SYS_ID_AA64ISAR1_EL1, + .sign = FTR_UNSIGNED, + .field_pos = ID_AA64ISAR1_GPI_SHIFT, + .min_field_value = ID_AA64ISAR1_GPI_IMP_DEF, + .matches = has_cpuid_feature, + }, +#endif /* CONFIG_ARM64_PTR_AUTH */ {}, }; -#define HWCAP_CAP(reg, field, s, min_value, cap_type, cap) \ - { \ - .desc = #cap, \ - .type = ARM64_CPUCAP_SYSTEM_FEATURE, \ - .matches = has_cpuid_feature, \ - .sys_reg = reg, \ - .field_pos = field, \ - .sign = s, \ - .min_field_value = min_value, \ - .hwcap_type = cap_type, \ - .hwcap = cap, \ +#define HWCAP_CPUID_MATCH(reg, field, s, min_value) \ + .matches = has_cpuid_feature, \ + .sys_reg = reg, \ + .field_pos = field, \ + .sign = s, \ + .min_field_value = min_value, + +#define __HWCAP_CAP(name, cap_type, cap) \ + .desc = name, \ + .type = ARM64_CPUCAP_SYSTEM_FEATURE, \ + .hwcap_type = cap_type, \ + .hwcap = cap, \ + +#define HWCAP_CAP(reg, field, s, min_value, cap_type, cap) \ + { \ + __HWCAP_CAP(#cap, cap_type, cap) \ + HWCAP_CPUID_MATCH(reg, field, s, min_value) \ } +#define HWCAP_MULTI_CAP(list, cap_type, cap) \ + { \ + __HWCAP_CAP(#cap, cap_type, cap) \ + .matches = cpucap_multi_entry_cap_matches, \ + .match_list = list, \ + } + +#ifdef CONFIG_ARM64_PTR_AUTH +static const struct arm64_cpu_capabilities ptr_auth_hwcap_addr_matches[] = { + { + HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_APA_SHIFT, + FTR_UNSIGNED, ID_AA64ISAR1_APA_ARCHITECTED) + }, + { + HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_API_SHIFT, + FTR_UNSIGNED, ID_AA64ISAR1_API_IMP_DEF) + }, + {}, +}; + +static const struct arm64_cpu_capabilities ptr_auth_hwcap_gen_matches[] = { + { + HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_GPA_SHIFT, + FTR_UNSIGNED, ID_AA64ISAR1_GPA_ARCHITECTED) + }, + { + HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_GPI_SHIFT, + FTR_UNSIGNED, ID_AA64ISAR1_GPI_IMP_DEF) + }, + {}, +}; +#endif + static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_PMULL), HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_AES), @@ -1409,11 +1555,16 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FCMA), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_LRCPC), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ILRCPC), + HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_SB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SB), HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_USCAT), #ifdef CONFIG_ARM64_SVE HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, HWCAP_SVE), #endif HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, HWCAP_SSBS), +#ifdef CONFIG_ARM64_PTR_AUTH + HWCAP_MULTI_CAP(ptr_auth_hwcap_addr_matches, CAP_HWCAP, HWCAP_PACA), + HWCAP_MULTI_CAP(ptr_auth_hwcap_gen_matches, CAP_HWCAP, HWCAP_PACG), +#endif {}, }; @@ -1482,52 +1633,46 @@ static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps) cap_set_elf_hwcap(hwcaps); } -/* - * Check if the current CPU has a given feature capability. - * Should be called from non-preemptible context. - */ -static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array, - unsigned int cap) +static void update_cpu_capabilities(u16 scope_mask) { + int i; const struct arm64_cpu_capabilities *caps; - if (WARN_ON(preemptible())) - return false; - - for (caps = cap_array; caps->matches; caps++) - if (caps->capability == cap) - return caps->matches(caps, SCOPE_LOCAL_CPU); - - return false; -} - -static void __update_cpu_capabilities(const struct arm64_cpu_capabilities *caps, - u16 scope_mask, const char *info) -{ scope_mask &= ARM64_CPUCAP_SCOPE_MASK; - for (; caps->matches; caps++) { - if (!(caps->type & scope_mask) || + for (i = 0; i < ARM64_NCAPS; i++) { + caps = cpu_hwcaps_ptrs[i]; + if (!caps || !(caps->type & scope_mask) || + cpus_have_cap(caps->capability) || !caps->matches(caps, cpucap_default_scope(caps))) continue; - if (!cpus_have_cap(caps->capability) && caps->desc) - pr_info("%s %s\n", info, caps->desc); + if (caps->desc) + pr_info("detected: %s\n", caps->desc); cpus_set_cap(caps->capability); } } -static void update_cpu_capabilities(u16 scope_mask) +/* + * Enable all the available capabilities on this CPU. The capabilities + * with BOOT_CPU scope are handled separately and hence skipped here. + */ +static int cpu_enable_non_boot_scope_capabilities(void *__unused) { - __update_cpu_capabilities(arm64_errata, scope_mask, - "enabling workaround for"); - __update_cpu_capabilities(arm64_features, scope_mask, "detected:"); -} + int i; + u16 non_boot_scope = SCOPE_ALL & ~SCOPE_BOOT_CPU; -static int __enable_cpu_capability(void *arg) -{ - const struct arm64_cpu_capabilities *cap = arg; + for_each_available_cap(i) { + const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[i]; + + if (WARN_ON(!cap)) + continue; - cap->cpu_enable(cap); + if (!(cap->type & non_boot_scope)) + continue; + + if (cap->cpu_enable) + cap->cpu_enable(cap); + } return 0; } @@ -1535,21 +1680,29 @@ static int __enable_cpu_capability(void *arg) * Run through the enabled capabilities and enable() it on all active * CPUs */ -static void __init -__enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps, - u16 scope_mask) +static void __init enable_cpu_capabilities(u16 scope_mask) { + int i; + const struct arm64_cpu_capabilities *caps; + bool boot_scope; + scope_mask &= ARM64_CPUCAP_SCOPE_MASK; - for (; caps->matches; caps++) { - unsigned int num = caps->capability; + boot_scope = !!(scope_mask & SCOPE_BOOT_CPU); - if (!(caps->type & scope_mask) || !cpus_have_cap(num)) + for (i = 0; i < ARM64_NCAPS; i++) { + unsigned int num; + + caps = cpu_hwcaps_ptrs[i]; + if (!caps || !(caps->type & scope_mask)) + continue; + num = caps->capability; + if (!cpus_have_cap(num)) continue; /* Ensure cpus_have_const_cap(num) works */ static_branch_enable(&cpu_hwcap_keys[num]); - if (caps->cpu_enable) { + if (boot_scope && caps->cpu_enable) /* * Capabilities with SCOPE_BOOT_CPU scope are finalised * before any secondary CPU boots. Thus, each secondary @@ -1558,25 +1711,19 @@ __enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps, * the boot CPU, for which the capability must be * enabled here. This approach avoids costly * stop_machine() calls for this case. - * - * Otherwise, use stop_machine() as it schedules the - * work allowing us to modify PSTATE, instead of - * on_each_cpu() which uses an IPI, giving us a PSTATE - * that disappears when we return. */ - if (scope_mask & SCOPE_BOOT_CPU) - caps->cpu_enable(caps); - else - stop_machine(__enable_cpu_capability, - (void *)caps, cpu_online_mask); - } + caps->cpu_enable(caps); } -} -static void __init enable_cpu_capabilities(u16 scope_mask) -{ - __enable_cpu_capabilities(arm64_errata, scope_mask); - __enable_cpu_capabilities(arm64_features, scope_mask); + /* + * For all non-boot scope capabilities, use stop_machine() + * as it schedules the work allowing us to modify PSTATE, + * instead of on_each_cpu() which uses an IPI, giving us a + * PSTATE that disappears when we return. + */ + if (!boot_scope) + stop_machine(cpu_enable_non_boot_scope_capabilities, + NULL, cpu_online_mask); } /* @@ -1586,16 +1733,17 @@ static void __init enable_cpu_capabilities(u16 scope_mask) * * Returns "false" on conflicts. */ -static bool -__verify_local_cpu_caps(const struct arm64_cpu_capabilities *caps, - u16 scope_mask) +static bool verify_local_cpu_caps(u16 scope_mask) { + int i; bool cpu_has_cap, system_has_cap; + const struct arm64_cpu_capabilities *caps; scope_mask &= ARM64_CPUCAP_SCOPE_MASK; - for (; caps->matches; caps++) { - if (!(caps->type & scope_mask)) + for (i = 0; i < ARM64_NCAPS; i++) { + caps = cpu_hwcaps_ptrs[i]; + if (!caps || !(caps->type & scope_mask)) continue; cpu_has_cap = caps->matches(caps, SCOPE_LOCAL_CPU); @@ -1626,7 +1774,7 @@ __verify_local_cpu_caps(const struct arm64_cpu_capabilities *caps, } } - if (caps->matches) { + if (i < ARM64_NCAPS) { pr_crit("CPU%d: Detected conflict for capability %d (%s), System: %d, CPU: %d\n", smp_processor_id(), caps->capability, caps->desc, system_has_cap, cpu_has_cap); @@ -1636,12 +1784,6 @@ __verify_local_cpu_caps(const struct arm64_cpu_capabilities *caps, return true; } -static bool verify_local_cpu_caps(u16 scope_mask) -{ - return __verify_local_cpu_caps(arm64_errata, scope_mask) && - __verify_local_cpu_caps(arm64_features, scope_mask); -} - /* * Check for CPU features that are used in early boot * based on the Boot CPU value. @@ -1750,12 +1892,16 @@ static void __init mark_const_caps_ready(void) static_branch_enable(&arm64_const_caps_ready); } -extern const struct arm64_cpu_capabilities arm64_errata[]; - -bool this_cpu_has_cap(unsigned int cap) +bool this_cpu_has_cap(unsigned int n) { - return (__this_cpu_has_cap(arm64_features, cap) || - __this_cpu_has_cap(arm64_errata, cap)); + if (!WARN_ON(preemptible()) && n < ARM64_NCAPS) { + const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[n]; + + if (cap) + return cap->matches(cap, SCOPE_LOCAL_CPU); + } + + return false; } static void __init setup_system_capabilities(void) diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index bcc2831399cb..ca0685f33900 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -82,6 +82,9 @@ static const char *const hwcap_str[] = { "ilrcpc", "flagm", "ssbs", + "sb", + "paca", + "pacg", NULL }; diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S index 1175f5827ae1..81b8eb5c4633 100644 --- a/arch/arm64/kernel/entry-ftrace.S +++ b/arch/arm64/kernel/entry-ftrace.S @@ -79,7 +79,6 @@ .macro mcount_get_lr reg ldr \reg, [x29] ldr \reg, [\reg, #8] - mcount_adjust_addr \reg, \reg .endm .macro mcount_get_lr_addr reg @@ -121,6 +120,8 @@ skip_ftrace_call: // } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ mcount_exit ENDPROC(_mcount) +EXPORT_SYMBOL(_mcount) +NOKPROBE(_mcount) #else /* CONFIG_DYNAMIC_FTRACE */ /* @@ -132,6 +133,8 @@ ENDPROC(_mcount) ENTRY(_mcount) ret ENDPROC(_mcount) +EXPORT_SYMBOL(_mcount) +NOKPROBE(_mcount) /* * void ftrace_caller(unsigned long return_address) @@ -148,14 +151,12 @@ ENTRY(ftrace_caller) mcount_get_pc0 x0 // function's pc mcount_get_lr x1 // function's lr - .global ftrace_call -ftrace_call: // tracer(pc, lr); +GLOBAL(ftrace_call) // tracer(pc, lr); nop // This will be replaced with "bl xxx" // where xxx can be any kind of tracer. #ifdef CONFIG_FUNCTION_GRAPH_TRACER - .global ftrace_graph_call -ftrace_graph_call: // ftrace_graph_caller(); +GLOBAL(ftrace_graph_call) // ftrace_graph_caller(); nop // If enabled, this will be replaced // "b ftrace_graph_caller" #endif @@ -169,24 +170,6 @@ ENTRY(ftrace_stub) ENDPROC(ftrace_stub) #ifdef CONFIG_FUNCTION_GRAPH_TRACER - /* save return value regs*/ - .macro save_return_regs - sub sp, sp, #64 - stp x0, x1, [sp] - stp x2, x3, [sp, #16] - stp x4, x5, [sp, #32] - stp x6, x7, [sp, #48] - .endm - - /* restore return value regs*/ - .macro restore_return_regs - ldp x0, x1, [sp] - ldp x2, x3, [sp, #16] - ldp x4, x5, [sp, #32] - ldp x6, x7, [sp, #48] - add sp, sp, #64 - .endm - /* * void ftrace_graph_caller(void) * @@ -197,10 +180,10 @@ ENDPROC(ftrace_stub) * and run return_to_handler() later on its exit. */ ENTRY(ftrace_graph_caller) - mcount_get_lr_addr x0 // pointer to function's saved lr - mcount_get_pc x1 // function's pc + mcount_get_pc x0 // function's pc + mcount_get_lr_addr x1 // pointer to function's saved lr mcount_get_parent_fp x2 // parent's fp - bl prepare_ftrace_return // prepare_ftrace_return(&lr, pc, fp) + bl prepare_ftrace_return // prepare_ftrace_return(pc, &lr, fp) mcount_exit ENDPROC(ftrace_graph_caller) @@ -209,15 +192,27 @@ ENDPROC(ftrace_graph_caller) * void return_to_handler(void) * * Run ftrace_return_to_handler() before going back to parent. - * @fp is checked against the value passed by ftrace_graph_caller() - * only when HAVE_FUNCTION_GRAPH_FP_TEST is enabled. + * @fp is checked against the value passed by ftrace_graph_caller(). */ ENTRY(return_to_handler) - save_return_regs + /* save return value regs */ + sub sp, sp, #64 + stp x0, x1, [sp] + stp x2, x3, [sp, #16] + stp x4, x5, [sp, #32] + stp x6, x7, [sp, #48] + mov x0, x29 // parent's fp bl ftrace_return_to_handler// addr = ftrace_return_to_hander(fp); mov x30, x0 // restore the original return address - restore_return_regs + + /* restore return value regs */ + ldp x0, x1, [sp] + ldp x2, x3, [sp, #16] + ldp x4, x5, [sp, #32] + ldp x6, x7, [sp, #48] + add sp, sp, #64 + ret END(return_to_handler) #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 039144ecbcb2..763f03dc4d9e 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -344,10 +344,6 @@ alternative_else_nop_endif ldp x28, x29, [sp, #16 * 14] ldr lr, [sp, #S_LR] add sp, sp, #S_FRAME_SIZE // restore sp - /* - * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on eret context synchronization - * when returning from IPI handler, and when returning to user-space. - */ .if \el == 0 alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0 @@ -363,6 +359,7 @@ alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0 .else eret .endif + sb .endm .macro irq_stack_entry @@ -622,10 +619,8 @@ el1_irq: irq_handler #ifdef CONFIG_PREEMPT - ldr w24, [tsk, #TSK_TI_PREEMPT] // get preempt count - cbnz w24, 1f // preempt count != 0 - ldr x0, [tsk, #TSK_TI_FLAGS] // get flags - tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling? + ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count + cbnz x24, 1f // preempt count != 0 bl el1_preempt 1: #endif @@ -1006,6 +1001,7 @@ alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003 mrs x30, far_el1 .endif eret + sb .endm .align 11 diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c index 57e962290df3..c1f30f854fb3 100644 --- a/arch/arm64/kernel/ftrace.c +++ b/arch/arm64/kernel/ftrace.c @@ -104,7 +104,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) * is added in the future, but for now, the pr_err() below * deals with a theoretical issue only. */ - trampoline = get_plt_entry(addr); + trampoline = get_plt_entry(addr, mod->arch.ftrace_trampoline); if (!plt_entries_equal(mod->arch.ftrace_trampoline, &trampoline)) { if (!plt_entries_equal(mod->arch.ftrace_trampoline, @@ -211,7 +211,7 @@ int __init ftrace_dyn_arch_init(void) * * Note that @frame_pointer is used only for sanity check later. */ -void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, +void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent, unsigned long frame_pointer) { unsigned long return_hooker = (unsigned long)&return_to_handler; diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 4471f570a295..c7213674cb24 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -31,6 +31,7 @@ #include <asm/cache.h> #include <asm/cputype.h> #include <asm/elf.h> +#include <asm/image.h> #include <asm/kernel-pgtable.h> #include <asm/kvm_arm.h> #include <asm/memory.h> @@ -91,7 +92,7 @@ _head: .quad 0 // reserved .quad 0 // reserved .quad 0 // reserved - .ascii "ARM\x64" // Magic number + .ascii ARM64_IMAGE_MAGIC // Magic number #ifdef CONFIG_EFI .long pe_header - _head // Offset to the PE header. @@ -318,6 +319,19 @@ __create_page_tables: adrp x0, idmap_pg_dir adrp x3, __idmap_text_start // __pa(__idmap_text_start) +#ifdef CONFIG_ARM64_USER_VA_BITS_52 + mrs_s x6, SYS_ID_AA64MMFR2_EL1 + and x6, x6, #(0xf << ID_AA64MMFR2_LVA_SHIFT) + mov x5, #52 + cbnz x6, 1f +#endif + mov x5, #VA_BITS +1: + adr_l x6, vabits_user + str x5, [x6] + dmb sy + dc ivac, x6 // Invalidate potentially stale cache line + /* * VA_BITS may be too small to allow for an ID mapping to be created * that covers system RAM if that is located sufficiently high in the @@ -496,10 +510,9 @@ ENTRY(el2_setup) #endif /* Hyp configuration. */ - mov x0, #HCR_RW // 64-bit EL1 + mov_q x0, HCR_HOST_NVHE_FLAGS cbz x2, set_hcr - orr x0, x0, #HCR_TGE // Enable Host Extensions - orr x0, x0, #HCR_E2H + mov_q x0, HCR_HOST_VHE_FLAGS set_hcr: msr hcr_el2, x0 isb @@ -707,6 +720,7 @@ secondary_startup: /* * Common entry point for secondary CPUs. */ + bl __cpu_secondary_check52bitva bl __cpu_setup // initialise processor adrp x1, swapper_pg_dir bl __enable_mmu @@ -769,6 +783,7 @@ ENTRY(__enable_mmu) phys_to_ttbr x1, x1 phys_to_ttbr x2, x2 msr ttbr0_el1, x2 // load TTBR0 + offset_ttbr1 x1 msr ttbr1_el1, x1 // load TTBR1 isb msr sctlr_el1, x0 @@ -784,9 +799,30 @@ ENTRY(__enable_mmu) ret ENDPROC(__enable_mmu) +ENTRY(__cpu_secondary_check52bitva) +#ifdef CONFIG_ARM64_USER_VA_BITS_52 + ldr_l x0, vabits_user + cmp x0, #52 + b.ne 2f + + mrs_s x0, SYS_ID_AA64MMFR2_EL1 + and x0, x0, #(0xf << ID_AA64MMFR2_LVA_SHIFT) + cbnz x0, 2f + + update_early_cpu_boot_status \ + CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_52_BIT_VA, x0, x1 +1: wfe + wfi + b 1b + +#endif +2: ret +ENDPROC(__cpu_secondary_check52bitva) + __no_granule_support: /* Indicate that this CPU can't boot and is stuck in the kernel */ - update_early_cpu_boot_status CPU_STUCK_IN_KERNEL, x1, x2 + update_early_cpu_boot_status \ + CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_NO_GRAN, x1, x2 1: wfe wfi diff --git a/arch/arm64/kernel/hibernate-asm.S b/arch/arm64/kernel/hibernate-asm.S index dd14ab8c9f72..fe36d85c60bd 100644 --- a/arch/arm64/kernel/hibernate-asm.S +++ b/arch/arm64/kernel/hibernate-asm.S @@ -40,6 +40,7 @@ tlbi vmalle1 dsb nsh phys_to_ttbr \tmp, \page_table + offset_ttbr1 \tmp msr ttbr1_el1, \tmp isb .endm diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h index a820ed07fb80..33f14e484040 100644 --- a/arch/arm64/kernel/image.h +++ b/arch/arm64/kernel/image.h @@ -15,13 +15,15 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ -#ifndef __ASM_IMAGE_H -#define __ASM_IMAGE_H +#ifndef __ARM64_KERNEL_IMAGE_H +#define __ARM64_KERNEL_IMAGE_H #ifndef LINKER_SCRIPT #error This file should only be included in vmlinux.lds.S #endif +#include <asm/image.h> + /* * There aren't any ELF relocations we can use to endian-swap values known only * at link time (e.g. the subtraction of two symbol addresses), so we must get @@ -47,19 +49,22 @@ sym##_lo32 = DATA_LE32((data) & 0xffffffff); \ sym##_hi32 = DATA_LE32((data) >> 32) +#define __HEAD_FLAG(field) (__HEAD_FLAG_##field << \ + ARM64_IMAGE_FLAG_##field##_SHIFT) + #ifdef CONFIG_CPU_BIG_ENDIAN -#define __HEAD_FLAG_BE 1 +#define __HEAD_FLAG_BE ARM64_IMAGE_FLAG_BE #else -#define __HEAD_FLAG_BE 0 +#define __HEAD_FLAG_BE ARM64_IMAGE_FLAG_LE #endif #define __HEAD_FLAG_PAGE_SIZE ((PAGE_SHIFT - 10) / 2) #define __HEAD_FLAG_PHYS_BASE 1 -#define __HEAD_FLAGS ((__HEAD_FLAG_BE << 0) | \ - (__HEAD_FLAG_PAGE_SIZE << 1) | \ - (__HEAD_FLAG_PHYS_BASE << 3)) +#define __HEAD_FLAGS (__HEAD_FLAG(BE) | \ + __HEAD_FLAG(PAGE_SIZE) | \ + __HEAD_FLAG(PHYS_BASE)) /* * These will output as part of the Image header, which should be little-endian @@ -76,16 +81,6 @@ __efistub_stext_offset = stext - _text; /* - * Prevent the symbol aliases below from being emitted into the kallsyms - * table, by forcing them to be absolute symbols (which are conveniently - * ignored by scripts/kallsyms) rather than section relative symbols. - * The distinction is only relevant for partial linking, and only for symbols - * that are defined within a section declaration (which is not the case for - * the definitions below) so the resulting values will be identical. - */ -#define KALLSYMS_HIDE(sym) ABSOLUTE(sym) - -/* * The EFI stub has its own symbol namespace prefixed by __efistub_, to * isolate it from the kernel proper. The following symbols are legally * accessed by the stub, so provide some aliases to make them accessible. @@ -94,29 +89,29 @@ __efistub_stext_offset = stext - _text; * linked at. The routines below are all implemented in assembler in a * position independent manner */ -__efistub_memcmp = KALLSYMS_HIDE(__pi_memcmp); -__efistub_memchr = KALLSYMS_HIDE(__pi_memchr); -__efistub_memcpy = KALLSYMS_HIDE(__pi_memcpy); -__efistub_memmove = KALLSYMS_HIDE(__pi_memmove); -__efistub_memset = KALLSYMS_HIDE(__pi_memset); -__efistub_strlen = KALLSYMS_HIDE(__pi_strlen); -__efistub_strnlen = KALLSYMS_HIDE(__pi_strnlen); -__efistub_strcmp = KALLSYMS_HIDE(__pi_strcmp); -__efistub_strncmp = KALLSYMS_HIDE(__pi_strncmp); -__efistub_strrchr = KALLSYMS_HIDE(__pi_strrchr); -__efistub___flush_dcache_area = KALLSYMS_HIDE(__pi___flush_dcache_area); +__efistub_memcmp = __pi_memcmp; +__efistub_memchr = __pi_memchr; +__efistub_memcpy = __pi_memcpy; +__efistub_memmove = __pi_memmove; +__efistub_memset = __pi_memset; +__efistub_strlen = __pi_strlen; +__efistub_strnlen = __pi_strnlen; +__efistub_strcmp = __pi_strcmp; +__efistub_strncmp = __pi_strncmp; +__efistub_strrchr = __pi_strrchr; +__efistub___flush_dcache_area = __pi___flush_dcache_area; #ifdef CONFIG_KASAN -__efistub___memcpy = KALLSYMS_HIDE(__pi_memcpy); -__efistub___memmove = KALLSYMS_HIDE(__pi_memmove); -__efistub___memset = KALLSYMS_HIDE(__pi_memset); +__efistub___memcpy = __pi_memcpy; +__efistub___memmove = __pi_memmove; +__efistub___memset = __pi_memset; #endif -__efistub__text = KALLSYMS_HIDE(_text); -__efistub__end = KALLSYMS_HIDE(_end); -__efistub__edata = KALLSYMS_HIDE(_edata); -__efistub_screen_info = KALLSYMS_HIDE(screen_info); +__efistub__text = _text; +__efistub__end = _end; +__efistub__edata = _edata; +__efistub_screen_info = screen_info; #endif -#endif /* __ASM_IMAGE_H */ +#endif /* __ARM64_KERNEL_IMAGE_H */ diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index 2b3413549734..7820a4a688fa 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -1239,6 +1239,35 @@ u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst, return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift); } +u32 aarch64_insn_gen_adr(unsigned long pc, unsigned long addr, + enum aarch64_insn_register reg, + enum aarch64_insn_adr_type type) +{ + u32 insn; + s32 offset; + + switch (type) { + case AARCH64_INSN_ADR_TYPE_ADR: + insn = aarch64_insn_get_adr_value(); + offset = addr - pc; + break; + case AARCH64_INSN_ADR_TYPE_ADRP: + insn = aarch64_insn_get_adrp_value(); + offset = (addr - ALIGN_DOWN(pc, SZ_4K)) >> 12; + break; + default: + pr_err("%s: unknown adr encoding %d\n", __func__, type); + return AARCH64_BREAK_FAULT; + } + + if (offset < -SZ_1M || offset >= SZ_1M) + return AARCH64_BREAK_FAULT; + + insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, reg); + + return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn, offset); +} + /* * Decode the imm field of a branch, and return the byte offset as a * signed value (so it can be used when computing a new branch diff --git a/arch/arm64/kernel/kexec_image.c b/arch/arm64/kernel/kexec_image.c new file mode 100644 index 000000000000..07bf740bea91 --- /dev/null +++ b/arch/arm64/kernel/kexec_image.c @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Kexec image loader + + * Copyright (C) 2018 Linaro Limited + * Author: AKASHI Takahiro <takahiro.akashi@linaro.org> + */ + +#define pr_fmt(fmt) "kexec_file(Image): " fmt + +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/kexec.h> +#include <linux/pe.h> +#include <linux/string.h> +#include <linux/verification.h> +#include <asm/byteorder.h> +#include <asm/cpufeature.h> +#include <asm/image.h> +#include <asm/memory.h> + +static int image_probe(const char *kernel_buf, unsigned long kernel_len) +{ + const struct arm64_image_header *h = + (const struct arm64_image_header *)(kernel_buf); + + if (!h || (kernel_len < sizeof(*h))) + return -EINVAL; + + if (memcmp(&h->magic, ARM64_IMAGE_MAGIC, sizeof(h->magic))) + return -EINVAL; + + return 0; +} + +static void *image_load(struct kimage *image, + char *kernel, unsigned long kernel_len, + char *initrd, unsigned long initrd_len, + char *cmdline, unsigned long cmdline_len) +{ + struct arm64_image_header *h; + u64 flags, value; + bool be_image, be_kernel; + struct kexec_buf kbuf; + unsigned long text_offset; + struct kexec_segment *kernel_segment; + int ret; + + /* We don't support crash kernels yet. */ + if (image->type == KEXEC_TYPE_CRASH) + return ERR_PTR(-EOPNOTSUPP); + + /* + * We require a kernel with an unambiguous Image header. Per + * Documentation/booting.txt, this is the case when image_size + * is non-zero (practically speaking, since v3.17). + */ + h = (struct arm64_image_header *)kernel; + if (!h->image_size) + return ERR_PTR(-EINVAL); + + /* Check cpu features */ + flags = le64_to_cpu(h->flags); + be_image = arm64_image_flag_field(flags, ARM64_IMAGE_FLAG_BE); + be_kernel = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN); + if ((be_image != be_kernel) && !system_supports_mixed_endian()) + return ERR_PTR(-EINVAL); + + value = arm64_image_flag_field(flags, ARM64_IMAGE_FLAG_PAGE_SIZE); + if (((value == ARM64_IMAGE_FLAG_PAGE_SIZE_4K) && + !system_supports_4kb_granule()) || + ((value == ARM64_IMAGE_FLAG_PAGE_SIZE_64K) && + !system_supports_64kb_granule()) || + ((value == ARM64_IMAGE_FLAG_PAGE_SIZE_16K) && + !system_supports_16kb_granule())) + return ERR_PTR(-EINVAL); + + /* Load the kernel */ + kbuf.image = image; + kbuf.buf_min = 0; + kbuf.buf_max = ULONG_MAX; + kbuf.top_down = false; + + kbuf.buffer = kernel; + kbuf.bufsz = kernel_len; + kbuf.mem = 0; + kbuf.memsz = le64_to_cpu(h->image_size); + text_offset = le64_to_cpu(h->text_offset); + kbuf.buf_align = MIN_KIMG_ALIGN; + + /* Adjust kernel segment with TEXT_OFFSET */ + kbuf.memsz += text_offset; + + ret = kexec_add_buffer(&kbuf); + if (ret) + return ERR_PTR(ret); + + kernel_segment = &image->segment[image->nr_segments - 1]; + kernel_segment->mem += text_offset; + kernel_segment->memsz -= text_offset; + image->start = kernel_segment->mem; + + pr_debug("Loaded kernel at 0x%lx bufsz=0x%lx memsz=0x%lx\n", + kernel_segment->mem, kbuf.bufsz, + kernel_segment->memsz); + + /* Load additional data */ + ret = load_other_segments(image, + kernel_segment->mem, kernel_segment->memsz, + initrd, initrd_len, cmdline); + + return ERR_PTR(ret); +} + +#ifdef CONFIG_KEXEC_IMAGE_VERIFY_SIG +static int image_verify_sig(const char *kernel, unsigned long kernel_len) +{ + return verify_pefile_signature(kernel, kernel_len, NULL, + VERIFYING_KEXEC_PE_SIGNATURE); +} +#endif + +const struct kexec_file_ops kexec_image_ops = { + .probe = image_probe, + .load = image_load, +#ifdef CONFIG_KEXEC_IMAGE_VERIFY_SIG + .verify_sig = image_verify_sig, +#endif +}; diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c index 922add8adb74..aa9c94113700 100644 --- a/arch/arm64/kernel/machine_kexec.c +++ b/arch/arm64/kernel/machine_kexec.c @@ -212,9 +212,17 @@ void machine_kexec(struct kimage *kimage) * uses physical addressing to relocate the new image to its final * position and transfers control to the image entry point when the * relocation is complete. + * In kexec case, kimage->start points to purgatory assuming that + * kernel entry and dtb address are embedded in purgatory by + * userspace (kexec-tools). + * In kexec_file case, the kernel starts directly without purgatory. */ - - cpu_soft_restart(reboot_code_buffer_phys, kimage->head, kimage->start, 0); + cpu_soft_restart(reboot_code_buffer_phys, kimage->head, kimage->start, +#ifdef CONFIG_KEXEC_FILE + kimage->arch.dtb_mem); +#else + 0); +#endif BUG(); /* Should never get here. */ } diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c new file mode 100644 index 000000000000..10e33860e47a --- /dev/null +++ b/arch/arm64/kernel/machine_kexec_file.c @@ -0,0 +1,224 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * kexec_file for arm64 + * + * Copyright (C) 2018 Linaro Limited + * Author: AKASHI Takahiro <takahiro.akashi@linaro.org> + * + * Most code is derived from arm64 port of kexec-tools + */ + +#define pr_fmt(fmt) "kexec_file: " fmt + +#include <linux/ioport.h> +#include <linux/kernel.h> +#include <linux/kexec.h> +#include <linux/libfdt.h> +#include <linux/memblock.h> +#include <linux/of_fdt.h> +#include <linux/random.h> +#include <linux/string.h> +#include <linux/types.h> +#include <linux/vmalloc.h> +#include <asm/byteorder.h> + +/* relevant device tree properties */ +#define FDT_PROP_INITRD_START "linux,initrd-start" +#define FDT_PROP_INITRD_END "linux,initrd-end" +#define FDT_PROP_BOOTARGS "bootargs" +#define FDT_PROP_KASLR_SEED "kaslr-seed" + +const struct kexec_file_ops * const kexec_file_loaders[] = { + &kexec_image_ops, + NULL +}; + +int arch_kimage_file_post_load_cleanup(struct kimage *image) +{ + vfree(image->arch.dtb); + image->arch.dtb = NULL; + + return kexec_image_post_load_cleanup_default(image); +} + +static int setup_dtb(struct kimage *image, + unsigned long initrd_load_addr, unsigned long initrd_len, + char *cmdline, void *dtb) +{ + int off, ret; + + ret = fdt_path_offset(dtb, "/chosen"); + if (ret < 0) + goto out; + + off = ret; + + /* add bootargs */ + if (cmdline) { + ret = fdt_setprop_string(dtb, off, FDT_PROP_BOOTARGS, cmdline); + if (ret) + goto out; + } else { + ret = fdt_delprop(dtb, off, FDT_PROP_BOOTARGS); + if (ret && (ret != -FDT_ERR_NOTFOUND)) + goto out; + } + + /* add initrd-* */ + if (initrd_load_addr) { + ret = fdt_setprop_u64(dtb, off, FDT_PROP_INITRD_START, + initrd_load_addr); + if (ret) + goto out; + + ret = fdt_setprop_u64(dtb, off, FDT_PROP_INITRD_END, + initrd_load_addr + initrd_len); + if (ret) + goto out; + } else { + ret = fdt_delprop(dtb, off, FDT_PROP_INITRD_START); + if (ret && (ret != -FDT_ERR_NOTFOUND)) + goto out; + + ret = fdt_delprop(dtb, off, FDT_PROP_INITRD_END); + if (ret && (ret != -FDT_ERR_NOTFOUND)) + goto out; + } + + /* add kaslr-seed */ + ret = fdt_delprop(dtb, off, FDT_PROP_KASLR_SEED); + if (ret && (ret != -FDT_ERR_NOTFOUND)) + goto out; + + if (rng_is_initialized()) { + u64 seed = get_random_u64(); + ret = fdt_setprop_u64(dtb, off, FDT_PROP_KASLR_SEED, seed); + if (ret) + goto out; + } else { + pr_notice("RNG is not initialised: omitting \"%s\" property\n", + FDT_PROP_KASLR_SEED); + } + +out: + if (ret) + return (ret == -FDT_ERR_NOSPACE) ? -ENOMEM : -EINVAL; + + return 0; +} + +/* + * More space needed so that we can add initrd, bootargs and kaslr-seed. + */ +#define DTB_EXTRA_SPACE 0x1000 + +static int create_dtb(struct kimage *image, + unsigned long initrd_load_addr, unsigned long initrd_len, + char *cmdline, void **dtb) +{ + void *buf; + size_t buf_size; + int ret; + + buf_size = fdt_totalsize(initial_boot_params) + + strlen(cmdline) + DTB_EXTRA_SPACE; + + for (;;) { + buf = vmalloc(buf_size); + if (!buf) + return -ENOMEM; + + /* duplicate a device tree blob */ + ret = fdt_open_into(initial_boot_params, buf, buf_size); + if (ret) + return -EINVAL; + + ret = setup_dtb(image, initrd_load_addr, initrd_len, + cmdline, buf); + if (ret) { + vfree(buf); + if (ret == -ENOMEM) { + /* unlikely, but just in case */ + buf_size += DTB_EXTRA_SPACE; + continue; + } else { + return ret; + } + } + + /* trim it */ + fdt_pack(buf); + *dtb = buf; + + return 0; + } +} + +int load_other_segments(struct kimage *image, + unsigned long kernel_load_addr, + unsigned long kernel_size, + char *initrd, unsigned long initrd_len, + char *cmdline) +{ + struct kexec_buf kbuf; + void *dtb = NULL; + unsigned long initrd_load_addr = 0, dtb_len; + int ret = 0; + + kbuf.image = image; + /* not allocate anything below the kernel */ + kbuf.buf_min = kernel_load_addr + kernel_size; + + /* load initrd */ + if (initrd) { + kbuf.buffer = initrd; + kbuf.bufsz = initrd_len; + kbuf.mem = 0; + kbuf.memsz = initrd_len; + kbuf.buf_align = 0; + /* within 1GB-aligned window of up to 32GB in size */ + kbuf.buf_max = round_down(kernel_load_addr, SZ_1G) + + (unsigned long)SZ_1G * 32; + kbuf.top_down = false; + + ret = kexec_add_buffer(&kbuf); + if (ret) + goto out_err; + initrd_load_addr = kbuf.mem; + + pr_debug("Loaded initrd at 0x%lx bufsz=0x%lx memsz=0x%lx\n", + initrd_load_addr, initrd_len, initrd_len); + } + + /* load dtb */ + ret = create_dtb(image, initrd_load_addr, initrd_len, cmdline, &dtb); + if (ret) { + pr_err("Preparing for new dtb failed\n"); + goto out_err; + } + + dtb_len = fdt_totalsize(dtb); + kbuf.buffer = dtb; + kbuf.bufsz = dtb_len; + kbuf.mem = 0; + kbuf.memsz = dtb_len; + /* not across 2MB boundary */ + kbuf.buf_align = SZ_2M; + kbuf.buf_max = ULONG_MAX; + kbuf.top_down = true; + + ret = kexec_add_buffer(&kbuf); + if (ret) + goto out_err; + image->arch.dtb = dtb; + image->arch.dtb_mem = kbuf.mem; + + pr_debug("Loaded dtb at 0x%lx bufsz=0x%lx memsz=0x%lx\n", + kbuf.mem, dtb_len, dtb_len); + + return 0; + +out_err: + vfree(dtb); + return ret; +} diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c index f0690c2ca3e0..255941394941 100644 --- a/arch/arm64/kernel/module-plts.c +++ b/arch/arm64/kernel/module-plts.c @@ -11,31 +11,91 @@ #include <linux/module.h> #include <linux/sort.h> +static struct plt_entry __get_adrp_add_pair(u64 dst, u64 pc, + enum aarch64_insn_register reg) +{ + u32 adrp, add; + + adrp = aarch64_insn_gen_adr(pc, dst, reg, AARCH64_INSN_ADR_TYPE_ADRP); + add = aarch64_insn_gen_add_sub_imm(reg, reg, dst % SZ_4K, + AARCH64_INSN_VARIANT_64BIT, + AARCH64_INSN_ADSB_ADD); + + return (struct plt_entry){ cpu_to_le32(adrp), cpu_to_le32(add) }; +} + +struct plt_entry get_plt_entry(u64 dst, void *pc) +{ + struct plt_entry plt; + static u32 br; + + if (!br) + br = aarch64_insn_gen_branch_reg(AARCH64_INSN_REG_16, + AARCH64_INSN_BRANCH_NOLINK); + + plt = __get_adrp_add_pair(dst, (u64)pc, AARCH64_INSN_REG_16); + plt.br = cpu_to_le32(br); + + return plt; +} + +bool plt_entries_equal(const struct plt_entry *a, const struct plt_entry *b) +{ + u64 p, q; + + /* + * Check whether both entries refer to the same target: + * do the cheapest checks first. + * If the 'add' or 'br' opcodes are different, then the target + * cannot be the same. + */ + if (a->add != b->add || a->br != b->br) + return false; + + p = ALIGN_DOWN((u64)a, SZ_4K); + q = ALIGN_DOWN((u64)b, SZ_4K); + + /* + * If the 'adrp' opcodes are the same then we just need to check + * that they refer to the same 4k region. + */ + if (a->adrp == b->adrp && p == q) + return true; + + return (p + aarch64_insn_adrp_get_offset(le32_to_cpu(a->adrp))) == + (q + aarch64_insn_adrp_get_offset(le32_to_cpu(b->adrp))); +} + static bool in_init(const struct module *mod, void *loc) { return (u64)loc - (u64)mod->init_layout.base < mod->init_layout.size; } -u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela, +u64 module_emit_plt_entry(struct module *mod, Elf64_Shdr *sechdrs, + void *loc, const Elf64_Rela *rela, Elf64_Sym *sym) { struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core : &mod->arch.init; - struct plt_entry *plt = (struct plt_entry *)pltsec->plt->sh_addr; + struct plt_entry *plt = (struct plt_entry *)sechdrs[pltsec->plt_shndx].sh_addr; int i = pltsec->plt_num_entries; + int j = i - 1; u64 val = sym->st_value + rela->r_addend; - plt[i] = get_plt_entry(val); + if (is_forbidden_offset_for_adrp(&plt[i].adrp)) + i++; + + plt[i] = get_plt_entry(val, &plt[i]); /* * Check if the entry we just created is a duplicate. Given that the * relocations are sorted, this will be the last entry we allocated. * (if one exists). */ - if (i > 0 && plt_entries_equal(plt + i, plt + i - 1)) - return (u64)&plt[i - 1]; + if (j >= 0 && plt_entries_equal(plt + i, plt + j)) + return (u64)&plt[j]; - pltsec->plt_num_entries++; + pltsec->plt_num_entries += i - j; if (WARN_ON(pltsec->plt_num_entries > pltsec->plt_max_entries)) return 0; @@ -43,41 +103,31 @@ u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela, } #ifdef CONFIG_ARM64_ERRATUM_843419 -u64 module_emit_veneer_for_adrp(struct module *mod, void *loc, u64 val) +u64 module_emit_veneer_for_adrp(struct module *mod, Elf64_Shdr *sechdrs, + void *loc, u64 val) { struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core : &mod->arch.init; - struct plt_entry *plt = (struct plt_entry *)pltsec->plt->sh_addr; + struct plt_entry *plt = (struct plt_entry *)sechdrs[pltsec->plt_shndx].sh_addr; int i = pltsec->plt_num_entries++; - u32 mov0, mov1, mov2, br; + u32 br; int rd; if (WARN_ON(pltsec->plt_num_entries > pltsec->plt_max_entries)) return 0; + if (is_forbidden_offset_for_adrp(&plt[i].adrp)) + i = pltsec->plt_num_entries++; + /* get the destination register of the ADRP instruction */ rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, le32_to_cpup((__le32 *)loc)); - /* generate the veneer instructions */ - mov0 = aarch64_insn_gen_movewide(rd, (u16)~val, 0, - AARCH64_INSN_VARIANT_64BIT, - AARCH64_INSN_MOVEWIDE_INVERSE); - mov1 = aarch64_insn_gen_movewide(rd, (u16)(val >> 16), 16, - AARCH64_INSN_VARIANT_64BIT, - AARCH64_INSN_MOVEWIDE_KEEP); - mov2 = aarch64_insn_gen_movewide(rd, (u16)(val >> 32), 32, - AARCH64_INSN_VARIANT_64BIT, - AARCH64_INSN_MOVEWIDE_KEEP); br = aarch64_insn_gen_branch_imm((u64)&plt[i].br, (u64)loc + 4, AARCH64_INSN_BRANCH_NOLINK); - plt[i] = (struct plt_entry){ - cpu_to_le32(mov0), - cpu_to_le32(mov1), - cpu_to_le32(mov2), - cpu_to_le32(br) - }; + plt[i] = __get_adrp_add_pair(val, (u64)&plt[i], rd); + plt[i].br = cpu_to_le32(br); return (u64)&plt[i]; } @@ -193,6 +243,15 @@ static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num, break; } } + + if (IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) && + cpus_have_const_cap(ARM64_WORKAROUND_843419)) + /* + * Add some slack so we can skip PLT slots that may trigger + * the erratum due to the placement of the ADRP instruction. + */ + ret += DIV_ROUND_UP(ret, (SZ_4K / sizeof(struct plt_entry))); + return ret; } @@ -202,7 +261,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, unsigned long core_plts = 0; unsigned long init_plts = 0; Elf64_Sym *syms = NULL; - Elf_Shdr *tramp = NULL; + Elf_Shdr *pltsec, *tramp = NULL; int i; /* @@ -211,9 +270,9 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, */ for (i = 0; i < ehdr->e_shnum; i++) { if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt")) - mod->arch.core.plt = sechdrs + i; + mod->arch.core.plt_shndx = i; else if (!strcmp(secstrings + sechdrs[i].sh_name, ".init.plt")) - mod->arch.init.plt = sechdrs + i; + mod->arch.init.plt_shndx = i; else if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) && !strcmp(secstrings + sechdrs[i].sh_name, ".text.ftrace_trampoline")) @@ -222,7 +281,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, syms = (Elf64_Sym *)sechdrs[i].sh_addr; } - if (!mod->arch.core.plt || !mod->arch.init.plt) { + if (!mod->arch.core.plt_shndx || !mod->arch.init.plt_shndx) { pr_err("%s: module PLT section(s) missing\n", mod->name); return -ENOEXEC; } @@ -254,17 +313,19 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, sechdrs[i].sh_info, dstsec); } - mod->arch.core.plt->sh_type = SHT_NOBITS; - mod->arch.core.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC; - mod->arch.core.plt->sh_addralign = L1_CACHE_BYTES; - mod->arch.core.plt->sh_size = (core_plts + 1) * sizeof(struct plt_entry); + pltsec = sechdrs + mod->arch.core.plt_shndx; + pltsec->sh_type = SHT_NOBITS; + pltsec->sh_flags = SHF_EXECINSTR | SHF_ALLOC; + pltsec->sh_addralign = L1_CACHE_BYTES; + pltsec->sh_size = (core_plts + 1) * sizeof(struct plt_entry); mod->arch.core.plt_num_entries = 0; mod->arch.core.plt_max_entries = core_plts; - mod->arch.init.plt->sh_type = SHT_NOBITS; - mod->arch.init.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC; - mod->arch.init.plt->sh_addralign = L1_CACHE_BYTES; - mod->arch.init.plt->sh_size = (init_plts + 1) * sizeof(struct plt_entry); + pltsec = sechdrs + mod->arch.init.plt_shndx; + pltsec->sh_type = SHT_NOBITS; + pltsec->sh_flags = SHF_EXECINSTR | SHF_ALLOC; + pltsec->sh_addralign = L1_CACHE_BYTES; + pltsec->sh_size = (init_plts + 1) * sizeof(struct plt_entry); mod->arch.init.plt_num_entries = 0; mod->arch.init.plt_max_entries = init_plts; diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index f0f27aeefb73..f713e2fc4d75 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c @@ -198,13 +198,12 @@ static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val, return 0; } -static int reloc_insn_adrp(struct module *mod, __le32 *place, u64 val) +static int reloc_insn_adrp(struct module *mod, Elf64_Shdr *sechdrs, + __le32 *place, u64 val) { u32 insn; - if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) || - !cpus_have_const_cap(ARM64_WORKAROUND_843419) || - ((u64)place & 0xfff) < 0xff8) + if (!is_forbidden_offset_for_adrp(place)) return reloc_insn_imm(RELOC_OP_PAGE, place, val, 12, 21, AARCH64_INSN_IMM_ADR); @@ -215,7 +214,7 @@ static int reloc_insn_adrp(struct module *mod, __le32 *place, u64 val) insn &= ~BIT(31); } else { /* out of range for ADR -> emit a veneer */ - val = module_emit_veneer_for_adrp(mod, place, val & ~0xfff); + val = module_emit_veneer_for_adrp(mod, sechdrs, place, val & ~0xfff); if (!val) return -ENOEXEC; insn = aarch64_insn_gen_branch_imm((u64)place, val, @@ -368,7 +367,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, case R_AARCH64_ADR_PREL_PG_HI21_NC: overflow_check = false; case R_AARCH64_ADR_PREL_PG_HI21: - ovf = reloc_insn_adrp(me, loc, val); + ovf = reloc_insn_adrp(me, sechdrs, loc, val); if (ovf && ovf != -ERANGE) return ovf; break; @@ -413,7 +412,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && ovf == -ERANGE) { - val = module_emit_plt_entry(me, loc, &rel[i], sym); + val = module_emit_plt_entry(me, sechdrs, loc, &rel[i], sym); if (!val) return -ENOEXEC; ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c index bcafd7dcfe8b..94754f07f67a 100644 --- a/arch/arm64/kernel/perf_callchain.c +++ b/arch/arm64/kernel/perf_callchain.c @@ -18,6 +18,7 @@ #include <linux/perf_event.h> #include <linux/uaccess.h> +#include <asm/pointer_auth.h> #include <asm/stacktrace.h> struct frame_tail { @@ -35,6 +36,7 @@ user_backtrace(struct frame_tail __user *tail, { struct frame_tail buftail; unsigned long err; + unsigned long lr; /* Also check accessibility of one struct frame_tail beyond */ if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) @@ -47,7 +49,9 @@ user_backtrace(struct frame_tail __user *tail, if (err) return NULL; - perf_callchain_store(entry, buftail.lr); + lr = ptrauth_strip_insn_pac(buftail.lr); + + perf_callchain_store(entry, lr); /* * Frame pointers should strictly progress back up the stack diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index e213f8e867f6..1620a371b1f5 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -1,5 +1,5 @@ /* - * PMU support + * ARMv8 PMUv3 Performance Events handling code. * * Copyright (C) 2012 ARM Limited * Author: Will Deacon <will.deacon@arm.com> @@ -30,149 +30,6 @@ #include <linux/perf/arm_pmu.h> #include <linux/platform_device.h> -/* - * ARMv8 PMUv3 Performance Events handling code. - * Common event types (some are defined in asm/perf_event.h). - */ - -/* At least one of the following is required. */ -#define ARMV8_PMUV3_PERFCTR_INST_RETIRED 0x08 -#define ARMV8_PMUV3_PERFCTR_INST_SPEC 0x1B - -/* Common architectural events. */ -#define ARMV8_PMUV3_PERFCTR_LD_RETIRED 0x06 -#define ARMV8_PMUV3_PERFCTR_ST_RETIRED 0x07 -#define ARMV8_PMUV3_PERFCTR_EXC_TAKEN 0x09 -#define ARMV8_PMUV3_PERFCTR_EXC_RETURN 0x0A -#define ARMV8_PMUV3_PERFCTR_CID_WRITE_RETIRED 0x0B -#define ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED 0x0C -#define ARMV8_PMUV3_PERFCTR_BR_IMMED_RETIRED 0x0D -#define ARMV8_PMUV3_PERFCTR_BR_RETURN_RETIRED 0x0E -#define ARMV8_PMUV3_PERFCTR_UNALIGNED_LDST_RETIRED 0x0F -#define ARMV8_PMUV3_PERFCTR_TTBR_WRITE_RETIRED 0x1C -#define ARMV8_PMUV3_PERFCTR_CHAIN 0x1E -#define ARMV8_PMUV3_PERFCTR_BR_RETIRED 0x21 - -/* Common microarchitectural events. */ -#define ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL 0x01 -#define ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL 0x02 -#define ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL 0x05 -#define ARMV8_PMUV3_PERFCTR_MEM_ACCESS 0x13 -#define ARMV8_PMUV3_PERFCTR_L1I_CACHE 0x14 -#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_WB 0x15 -#define ARMV8_PMUV3_PERFCTR_L2D_CACHE 0x16 -#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_REFILL 0x17 -#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_WB 0x18 -#define ARMV8_PMUV3_PERFCTR_BUS_ACCESS 0x19 -#define ARMV8_PMUV3_PERFCTR_MEMORY_ERROR 0x1A -#define ARMV8_PMUV3_PERFCTR_BUS_CYCLES 0x1D -#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_ALLOCATE 0x1F -#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_ALLOCATE 0x20 -#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED_RETIRED 0x22 -#define ARMV8_PMUV3_PERFCTR_STALL_FRONTEND 0x23 -#define ARMV8_PMUV3_PERFCTR_STALL_BACKEND 0x24 -#define ARMV8_PMUV3_PERFCTR_L1D_TLB 0x25 -#define ARMV8_PMUV3_PERFCTR_L1I_TLB 0x26 -#define ARMV8_PMUV3_PERFCTR_L2I_CACHE 0x27 -#define ARMV8_PMUV3_PERFCTR_L2I_CACHE_REFILL 0x28 -#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_ALLOCATE 0x29 -#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_REFILL 0x2A -#define ARMV8_PMUV3_PERFCTR_L3D_CACHE 0x2B -#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_WB 0x2C -#define ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL 0x2D -#define ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL 0x2E -#define ARMV8_PMUV3_PERFCTR_L2D_TLB 0x2F -#define ARMV8_PMUV3_PERFCTR_L2I_TLB 0x30 - -/* ARMv8 recommended implementation defined event types */ -#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD 0x40 -#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR 0x41 -#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD 0x42 -#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR 0x43 -#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_INNER 0x44 -#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_OUTER 0x45 -#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WB_VICTIM 0x46 -#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WB_CLEAN 0x47 -#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_INVAL 0x48 - -#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD 0x4C -#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR 0x4D -#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD 0x4E -#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR 0x4F -#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_RD 0x50 -#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WR 0x51 -#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_REFILL_RD 0x52 -#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_REFILL_WR 0x53 - -#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WB_VICTIM 0x56 -#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WB_CLEAN 0x57 -#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_INVAL 0x58 - -#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_REFILL_RD 0x5C -#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_REFILL_WR 0x5D -#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_RD 0x5E -#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_WR 0x5F - -#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD 0x60 -#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR 0x61 -#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_SHARED 0x62 -#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_NOT_SHARED 0x63 -#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_NORMAL 0x64 -#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_PERIPH 0x65 - -#define ARMV8_IMPDEF_PERFCTR_MEM_ACCESS_RD 0x66 -#define ARMV8_IMPDEF_PERFCTR_MEM_ACCESS_WR 0x67 -#define ARMV8_IMPDEF_PERFCTR_UNALIGNED_LD_SPEC 0x68 -#define ARMV8_IMPDEF_PERFCTR_UNALIGNED_ST_SPEC 0x69 -#define ARMV8_IMPDEF_PERFCTR_UNALIGNED_LDST_SPEC 0x6A - -#define ARMV8_IMPDEF_PERFCTR_LDREX_SPEC 0x6C -#define ARMV8_IMPDEF_PERFCTR_STREX_PASS_SPEC 0x6D -#define ARMV8_IMPDEF_PERFCTR_STREX_FAIL_SPEC 0x6E -#define ARMV8_IMPDEF_PERFCTR_STREX_SPEC 0x6F -#define ARMV8_IMPDEF_PERFCTR_LD_SPEC 0x70 -#define ARMV8_IMPDEF_PERFCTR_ST_SPEC 0x71 -#define ARMV8_IMPDEF_PERFCTR_LDST_SPEC 0x72 -#define ARMV8_IMPDEF_PERFCTR_DP_SPEC 0x73 -#define ARMV8_IMPDEF_PERFCTR_ASE_SPEC 0x74 -#define ARMV8_IMPDEF_PERFCTR_VFP_SPEC 0x75 -#define ARMV8_IMPDEF_PERFCTR_PC_WRITE_SPEC 0x76 -#define ARMV8_IMPDEF_PERFCTR_CRYPTO_SPEC 0x77 -#define ARMV8_IMPDEF_PERFCTR_BR_IMMED_SPEC 0x78 -#define ARMV8_IMPDEF_PERFCTR_BR_RETURN_SPEC 0x79 -#define ARMV8_IMPDEF_PERFCTR_BR_INDIRECT_SPEC 0x7A - -#define ARMV8_IMPDEF_PERFCTR_ISB_SPEC 0x7C -#define ARMV8_IMPDEF_PERFCTR_DSB_SPEC 0x7D -#define ARMV8_IMPDEF_PERFCTR_DMB_SPEC 0x7E - -#define ARMV8_IMPDEF_PERFCTR_EXC_UNDEF 0x81 -#define ARMV8_IMPDEF_PERFCTR_EXC_SVC 0x82 -#define ARMV8_IMPDEF_PERFCTR_EXC_PABORT 0x83 -#define ARMV8_IMPDEF_PERFCTR_EXC_DABORT 0x84 - -#define ARMV8_IMPDEF_PERFCTR_EXC_IRQ 0x86 -#define ARMV8_IMPDEF_PERFCTR_EXC_FIQ 0x87 -#define ARMV8_IMPDEF_PERFCTR_EXC_SMC 0x88 - -#define ARMV8_IMPDEF_PERFCTR_EXC_HVC 0x8A -#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_PABORT 0x8B -#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_DABORT 0x8C -#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_OTHER 0x8D -#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_IRQ 0x8E -#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_FIQ 0x8F -#define ARMV8_IMPDEF_PERFCTR_RC_LD_SPEC 0x90 -#define ARMV8_IMPDEF_PERFCTR_RC_ST_SPEC 0x91 - -#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_RD 0xA0 -#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WR 0xA1 -#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_REFILL_RD 0xA2 -#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_REFILL_WR 0xA3 - -#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WB_VICTIM 0xA6 -#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WB_CLEAN 0xA7 -#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_INVAL 0xA8 - /* ARMv8 Cortex-A53 specific event types. */ #define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2 @@ -183,12 +40,10 @@ #define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS 0xEC #define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS 0xED -/* PMUv3 HW events mapping. */ - /* * ARMv8 Architectural defined events, not all of these may - * be supported on any given implementation. Undefined events will - * be disabled at run-time. + * be supported on any given implementation. Unsupported events will + * be disabled at run-time based on the PMCEID registers. */ static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = { PERF_MAP_ALL_UNSUPPORTED, @@ -210,8 +65,6 @@ static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE, [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL, - [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE, - [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL, [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE, [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL, @@ -224,8 +77,6 @@ static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_BR_PRED, [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED, - [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_BR_PRED, - [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED, }; static const unsigned armv8_a53_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] @@ -370,6 +221,18 @@ ARMV8_EVENT_ATTR(l2d_tlb_refill, ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL); ARMV8_EVENT_ATTR(l2i_tlb_refill, ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL); ARMV8_EVENT_ATTR(l2d_tlb, ARMV8_PMUV3_PERFCTR_L2D_TLB); ARMV8_EVENT_ATTR(l2i_tlb, ARMV8_PMUV3_PERFCTR_L2I_TLB); +ARMV8_EVENT_ATTR(remote_access, ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS); +ARMV8_EVENT_ATTR(ll_cache, ARMV8_PMUV3_PERFCTR_LL_CACHE); +ARMV8_EVENT_ATTR(ll_cache_miss, ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS); +ARMV8_EVENT_ATTR(dtlb_walk, ARMV8_PMUV3_PERFCTR_DTLB_WALK); +ARMV8_EVENT_ATTR(itlb_walk, ARMV8_PMUV3_PERFCTR_ITLB_WALK); +ARMV8_EVENT_ATTR(ll_cache_rd, ARMV8_PMUV3_PERFCTR_LL_CACHE_RD); +ARMV8_EVENT_ATTR(ll_cache_miss_rd, ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD); +ARMV8_EVENT_ATTR(remote_access_rd, ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS_RD); +ARMV8_EVENT_ATTR(sample_pop, ARMV8_SPE_PERFCTR_SAMPLE_POP); +ARMV8_EVENT_ATTR(sample_feed, ARMV8_SPE_PERFCTR_SAMPLE_FEED); +ARMV8_EVENT_ATTR(sample_filtrate, ARMV8_SPE_PERFCTR_SAMPLE_FILTRATE); +ARMV8_EVENT_ATTR(sample_collision, ARMV8_SPE_PERFCTR_SAMPLE_COLLISION); static struct attribute *armv8_pmuv3_event_attrs[] = { &armv8_event_attr_sw_incr.attr.attr, @@ -420,6 +283,18 @@ static struct attribute *armv8_pmuv3_event_attrs[] = { &armv8_event_attr_l2i_tlb_refill.attr.attr, &armv8_event_attr_l2d_tlb.attr.attr, &armv8_event_attr_l2i_tlb.attr.attr, + &armv8_event_attr_remote_access.attr.attr, + &armv8_event_attr_ll_cache.attr.attr, + &armv8_event_attr_ll_cache_miss.attr.attr, + &armv8_event_attr_dtlb_walk.attr.attr, + &armv8_event_attr_itlb_walk.attr.attr, + &armv8_event_attr_ll_cache_rd.attr.attr, + &armv8_event_attr_ll_cache_miss_rd.attr.attr, + &armv8_event_attr_remote_access_rd.attr.attr, + &armv8_event_attr_sample_pop.attr.attr, + &armv8_event_attr_sample_feed.attr.attr, + &armv8_event_attr_sample_filtrate.attr.attr, + &armv8_event_attr_sample_collision.attr.attr, NULL, }; @@ -434,7 +309,13 @@ armv8pmu_event_attr_is_visible(struct kobject *kobj, pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr); - if (test_bit(pmu_attr->id, cpu_pmu->pmceid_bitmap)) + if (pmu_attr->id < ARMV8_PMUV3_MAX_COMMON_EVENTS && + test_bit(pmu_attr->id, cpu_pmu->pmceid_bitmap)) + return attr->mode; + + pmu_attr->id -= ARMV8_PMUV3_EXT_COMMON_EVENT_BASE; + if (pmu_attr->id < ARMV8_PMUV3_MAX_COMMON_EVENTS && + test_bit(pmu_attr->id, cpu_pmu->pmceid_ext_bitmap)) return attr->mode; return 0; @@ -1009,7 +890,7 @@ static int __armv8_pmuv3_map_event(struct perf_event *event, if (armv8pmu_event_is_64bit(event)) event->hw.flags |= ARMPMU_EVT_64BIT; - /* Onl expose micro/arch events supported by this PMU */ + /* Only expose micro/arch events supported by this PMU */ if ((hw_event_id > 0) && (hw_event_id < ARMV8_PMUV3_MAX_COMMON_EVENTS) && test_bit(hw_event_id, armpmu->pmceid_bitmap)) { return hw_event_id; @@ -1061,6 +942,7 @@ static void __armv8pmu_probe_pmu(void *info) struct armv8pmu_probe_info *probe = info; struct arm_pmu *cpu_pmu = probe->pmu; u64 dfr0; + u64 pmceid_raw[2]; u32 pmceid[2]; int pmuver; @@ -1079,11 +961,17 @@ static void __armv8pmu_probe_pmu(void *info) /* Add the CPU cycles counter */ cpu_pmu->num_events += 1; - pmceid[0] = read_sysreg(pmceid0_el0); - pmceid[1] = read_sysreg(pmceid1_el0); + pmceid[0] = pmceid_raw[0] = read_sysreg(pmceid0_el0); + pmceid[1] = pmceid_raw[1] = read_sysreg(pmceid1_el0); bitmap_from_arr32(cpu_pmu->pmceid_bitmap, pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS); + + pmceid[0] = pmceid_raw[0] >> 32; + pmceid[1] = pmceid_raw[1] >> 32; + + bitmap_from_arr32(cpu_pmu->pmceid_ext_bitmap, + pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS); } static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu) @@ -1109,16 +997,16 @@ static int armv8_pmu_init(struct arm_pmu *cpu_pmu) if (ret) return ret; - cpu_pmu->handle_irq = armv8pmu_handle_irq, - cpu_pmu->enable = armv8pmu_enable_event, - cpu_pmu->disable = armv8pmu_disable_event, - cpu_pmu->read_counter = armv8pmu_read_counter, - cpu_pmu->write_counter = armv8pmu_write_counter, - cpu_pmu->get_event_idx = armv8pmu_get_event_idx, - cpu_pmu->clear_event_idx = armv8pmu_clear_event_idx, - cpu_pmu->start = armv8pmu_start, - cpu_pmu->stop = armv8pmu_stop, - cpu_pmu->reset = armv8pmu_reset, + cpu_pmu->handle_irq = armv8pmu_handle_irq; + cpu_pmu->enable = armv8pmu_enable_event; + cpu_pmu->disable = armv8pmu_disable_event; + cpu_pmu->read_counter = armv8pmu_read_counter; + cpu_pmu->write_counter = armv8pmu_write_counter; + cpu_pmu->get_event_idx = armv8pmu_get_event_idx; + cpu_pmu->clear_event_idx = armv8pmu_clear_event_idx; + cpu_pmu->start = armv8pmu_start; + cpu_pmu->stop = armv8pmu_stop; + cpu_pmu->reset = armv8pmu_reset; cpu_pmu->set_event_filter = armv8pmu_set_event_filter; cpu_pmu->filter_match = armv8pmu_filter_match; @@ -1274,6 +1162,7 @@ static struct platform_driver armv8_pmu_driver = { .driver = { .name = ARMV8_PMU_PDEV_NAME, .of_match_table = armv8_pmu_of_device_ids, + .suppress_bind_attrs = true, }, .probe = armv8_pmu_device_probe, }; diff --git a/arch/arm64/kernel/pointer_auth.c b/arch/arm64/kernel/pointer_auth.c new file mode 100644 index 000000000000..c507b584259d --- /dev/null +++ b/arch/arm64/kernel/pointer_auth.c @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/errno.h> +#include <linux/prctl.h> +#include <linux/random.h> +#include <linux/sched.h> +#include <asm/cpufeature.h> +#include <asm/pointer_auth.h> + +int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg) +{ + struct ptrauth_keys *keys = &tsk->thread.keys_user; + unsigned long addr_key_mask = PR_PAC_APIAKEY | PR_PAC_APIBKEY | + PR_PAC_APDAKEY | PR_PAC_APDBKEY; + unsigned long key_mask = addr_key_mask | PR_PAC_APGAKEY; + + if (!system_supports_address_auth() && !system_supports_generic_auth()) + return -EINVAL; + + if (!arg) { + ptrauth_keys_init(keys); + ptrauth_keys_switch(keys); + return 0; + } + + if (arg & ~key_mask) + return -EINVAL; + + if (((arg & addr_key_mask) && !system_supports_address_auth()) || + ((arg & PR_PAC_APGAKEY) && !system_supports_generic_auth())) + return -EINVAL; + + if (arg & PR_PAC_APIAKEY) + get_random_bytes(&keys->apia, sizeof(keys->apia)); + if (arg & PR_PAC_APIBKEY) + get_random_bytes(&keys->apib, sizeof(keys->apib)); + if (arg & PR_PAC_APDAKEY) + get_random_bytes(&keys->apda, sizeof(keys->apda)); + if (arg & PR_PAC_APDBKEY) + get_random_bytes(&keys->apdb, sizeof(keys->apdb)); + if (arg & PR_PAC_APGAKEY) + get_random_bytes(&keys->apga, sizeof(keys->apga)); + + ptrauth_keys_switch(keys); + + return 0; +} diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index d9a4c2d6dd8b..e0a443730e04 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -57,9 +57,10 @@ #include <asm/fpsimd.h> #include <asm/mmu_context.h> #include <asm/processor.h> +#include <asm/pointer_auth.h> #include <asm/stacktrace.h> -#ifdef CONFIG_STACKPROTECTOR +#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK) #include <linux/stackprotector.h> unsigned long __stack_chk_guard __read_mostly; EXPORT_SYMBOL(__stack_chk_guard); @@ -429,6 +430,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, contextidr_thread_switch(next); entry_task_switch(next); uao_thread_switch(next); + ptrauth_thread_switch(next); /* * Complete any pending TLB or cache maintenance on this CPU in case @@ -496,4 +498,6 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) void arch_setup_new_exec(void) { current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0; + + ptrauth_thread_init_user(current); } diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 1710a2d01669..9dce33b0e260 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -46,6 +46,7 @@ #include <asm/debug-monitors.h> #include <asm/fpsimd.h> #include <asm/pgtable.h> +#include <asm/pointer_auth.h> #include <asm/stacktrace.h> #include <asm/syscall.h> #include <asm/traps.h> @@ -956,6 +957,30 @@ out: #endif /* CONFIG_ARM64_SVE */ +#ifdef CONFIG_ARM64_PTR_AUTH +static int pac_mask_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + /* + * The PAC bits can differ across data and instruction pointers + * depending on TCR_EL1.TBID*, which we may make use of in future, so + * we expose separate masks. + */ + unsigned long mask = ptrauth_user_pac_mask(); + struct user_pac_mask uregs = { + .data_mask = mask, + .insn_mask = mask, + }; + + if (!system_supports_address_auth()) + return -EINVAL; + + return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &uregs, 0, -1); +} +#endif /* CONFIG_ARM64_PTR_AUTH */ + enum aarch64_regset { REGSET_GPR, REGSET_FPR, @@ -968,6 +993,9 @@ enum aarch64_regset { #ifdef CONFIG_ARM64_SVE REGSET_SVE, #endif +#ifdef CONFIG_ARM64_PTR_AUTH + REGSET_PAC_MASK, +#endif }; static const struct user_regset aarch64_regsets[] = { @@ -1037,6 +1065,16 @@ static const struct user_regset aarch64_regsets[] = { .get_size = sve_get_size, }, #endif +#ifdef CONFIG_ARM64_PTR_AUTH + [REGSET_PAC_MASK] = { + .core_note_type = NT_ARM_PAC_MASK, + .n = sizeof(struct user_pac_mask) / sizeof(u64), + .size = sizeof(u64), + .align = sizeof(u64), + .get = pac_mask_get, + /* this cannot be set dynamically */ + }, +#endif }; static const struct user_regset_view user_aarch64_view = { diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S index f407e422a720..95fd94209aae 100644 --- a/arch/arm64/kernel/relocate_kernel.S +++ b/arch/arm64/kernel/relocate_kernel.S @@ -32,6 +32,7 @@ ENTRY(arm64_relocate_new_kernel) /* Setup the list loop variables. */ + mov x18, x2 /* x18 = dtb address */ mov x17, x1 /* x17 = kimage_start */ mov x16, x0 /* x16 = kimage_head */ raw_dcache_line_size x15, x0 /* x15 = dcache line size */ @@ -107,7 +108,7 @@ ENTRY(arm64_relocate_new_kernel) isb /* Start new image. */ - mov x0, xzr + mov x0, x18 mov x1, xzr mov x2, xzr mov x3, xzr diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index f4fc1e0544b7..4b0e1231625c 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -388,6 +388,7 @@ static int dump_kernel_offset(struct notifier_block *self, unsigned long v, if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && offset > 0) { pr_emerg("Kernel Offset: 0x%lx from 0x%lx\n", offset, KIMAGE_VADDR); + pr_emerg("PHYS_OFFSET: 0x%llx\n", PHYS_OFFSET); } else { pr_emerg("Kernel Offset: disabled\n"); } diff --git a/arch/arm64/kernel/smccc-call.S b/arch/arm64/kernel/smccc-call.S index 62522342e1e4..184332286a81 100644 --- a/arch/arm64/kernel/smccc-call.S +++ b/arch/arm64/kernel/smccc-call.S @@ -13,7 +13,9 @@ */ #include <linux/linkage.h> #include <linux/arm-smccc.h> + #include <asm/asm-offsets.h> +#include <asm/assembler.h> .macro SMCCC instr .cfi_startproc @@ -40,6 +42,7 @@ ENTRY(__arm_smccc_smc) SMCCC smc ENDPROC(__arm_smccc_smc) +EXPORT_SYMBOL(__arm_smccc_smc) /* * void arm_smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2, @@ -50,3 +53,4 @@ ENDPROC(__arm_smccc_smc) ENTRY(__arm_smccc_hvc) SMCCC hvc ENDPROC(__arm_smccc_hvc) +EXPORT_SYMBOL(__arm_smccc_hvc) diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 96b8f2f51ab2..1598d6f7200a 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -141,6 +141,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) } } else { pr_err("CPU%u: failed to boot: %d\n", cpu, ret); + return ret; } secondary_data.task = NULL; @@ -151,7 +152,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) if (status == CPU_MMU_OFF) status = READ_ONCE(__early_cpu_boot_status); - switch (status) { + switch (status & CPU_BOOT_STATUS_MASK) { default: pr_err("CPU%u: failed in unknown state : 0x%lx\n", cpu, status); @@ -165,6 +166,10 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) pr_crit("CPU%u: may not have shut down cleanly\n", cpu); case CPU_STUCK_IN_KERNEL: pr_crit("CPU%u: is stuck in kernel\n", cpu); + if (status & CPU_STUCK_REASON_52_BIT_VA) + pr_crit("CPU%u: does not support 52-bit VAs\n", cpu); + if (status & CPU_STUCK_REASON_NO_GRAN) + pr_crit("CPU%u: does not support %luK granule \n", cpu, PAGE_SIZE / SZ_1K); cpus_stuck_in_kernel++; break; case CPU_PANIC_KERNEL: diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 03b00007553d..7fa008374907 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -99,7 +99,8 @@ SECTIONS *(.discard) *(.discard.*) *(.interp .dynamic) - *(.dynsym .dynstr .hash) + *(.dynsym .dynstr .hash .gnu.hash) + *(.eh_frame) } . = KIMAGE_VADDR + TEXT_OFFSET; @@ -192,12 +193,12 @@ SECTIONS PERCPU_SECTION(L1_CACHE_BYTES) - .rela : ALIGN(8) { + .rela.dyn : ALIGN(8) { *(.rela .rela*) } - __rela_offset = ABSOLUTE(ADDR(.rela) - KIMAGE_VADDR); - __rela_size = SIZEOF(.rela); + __rela_offset = ABSOLUTE(ADDR(.rela.dyn) - KIMAGE_VADDR); + __rela_size = SIZEOF(.rela.dyn); . = ALIGN(SEGMENT_ALIGN); __initdata_end = .; diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c index 00d422336a45..f39801e4136c 100644 --- a/arch/arm64/kvm/debug.c +++ b/arch/arm64/kvm/debug.c @@ -236,24 +236,3 @@ void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) } } } - - -/* - * After successfully emulating an instruction, we might want to - * return to user space with a KVM_EXIT_DEBUG. We can only do this - * once the emulation is complete, though, so for userspace emulations - * we have to wait until we have re-entered KVM before calling this - * helper. - * - * Return true (and set exit_reason) to return to userspace or false - * if no further action is required. - */ -bool kvm_arm_handle_step_debug(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ - if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { - run->exit_reason = KVM_EXIT_DEBUG; - run->debug.arch.hsr = ESR_ELx_EC_SOFTSTP_LOW << ESR_ELx_EC_SHIFT; - return true; - } - return false; -} diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 35a81bebd02b..0b7983442071 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -173,6 +173,23 @@ static int handle_sve(struct kvm_vcpu *vcpu, struct kvm_run *run) return 1; } +/* + * Guest usage of a ptrauth instruction (which the guest EL1 did not turn into + * a NOP). + */ +static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + /* + * We don't currently support ptrauth in a guest, and we mask the ID + * registers to prevent well-behaved guests from trying to make use of + * it. + * + * Inject an UNDEF, as if the feature really isn't present. + */ + kvm_inject_undefined(vcpu); + return 1; +} + static exit_handle_fn arm_exit_handlers[] = { [0 ... ESR_ELx_EC_MAX] = kvm_handle_unknown_ec, [ESR_ELx_EC_WFx] = kvm_handle_wfx, @@ -195,6 +212,7 @@ static exit_handle_fn arm_exit_handlers[] = { [ESR_ELx_EC_BKPT32] = kvm_handle_guest_debug, [ESR_ELx_EC_BRK64] = kvm_handle_guest_debug, [ESR_ELx_EC_FP_ASIMD] = handle_no_fpsimd, + [ESR_ELx_EC_PAC] = kvm_handle_ptrauth, }; static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu) @@ -229,13 +247,6 @@ static int handle_trap_exceptions(struct kvm_vcpu *vcpu, struct kvm_run *run) handled = exit_handler(vcpu, run); } - /* - * kvm_arm_handle_step_debug() sets the exit_reason on the kvm_run - * structure if we need to return to userspace. - */ - if (handled > 0 && kvm_arm_handle_step_debug(vcpu, run)) - handled = 0; - return handled; } @@ -269,12 +280,7 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, case ARM_EXCEPTION_IRQ: return 1; case ARM_EXCEPTION_EL1_SERROR: - /* We may still need to return for single-step */ - if (!(*vcpu_cpsr(vcpu) & DBG_SPSR_SS) - && kvm_arm_handle_step_debug(vcpu, run)) - return 0; - else - return 1; + return 1; case ARM_EXCEPTION_TRAP: return handle_trap_exceptions(vcpu, run); case ARM_EXCEPTION_HYP_GONE: diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S index fad1e164fe48..675fdc186e3b 100644 --- a/arch/arm64/kvm/hyp/entry.S +++ b/arch/arm64/kvm/hyp/entry.S @@ -83,6 +83,7 @@ ENTRY(__guest_enter) // Do not touch any register after this! eret + sb ENDPROC(__guest_enter) ENTRY(__guest_exit) diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index b1f14f736962..73c1b483ec39 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -96,6 +96,7 @@ el1_sync: // Guest trapped into EL2 do_el2_call eret + sb el1_hvc_guest: /* @@ -146,6 +147,7 @@ wa_epilogue: mov x0, xzr add sp, sp, #16 eret + sb el1_trap: get_vcpu_ptr x1, x0 @@ -199,6 +201,7 @@ el2_error: b.ne __hyp_panic mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT) eret + sb ENTRY(__hyp_do_panic) mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ @@ -207,6 +210,7 @@ ENTRY(__hyp_do_panic) ldr lr, =panic msr elr_el2, lr eret + sb ENDPROC(__hyp_do_panic) ENTRY(__hyp_panic) diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index 7cc175c88a37..b0b1478094b4 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -143,6 +143,14 @@ static void deactivate_traps_vhe(void) { extern char vectors[]; /* kernel exception vectors */ write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2); + + /* + * ARM erratum 1165522 requires the actual execution of the above + * before we can switch to the EL2/EL0 translation regime used by + * the host. + */ + asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_1165522)); + write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1); write_sysreg(vectors, vbar_el1); } @@ -157,7 +165,7 @@ static void __hyp_text __deactivate_traps_nvhe(void) mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT; write_sysreg(mdcr_el2, mdcr_el2); - write_sysreg(HCR_RW, hcr_el2); + write_sysreg(HCR_HOST_NVHE_FLAGS, hcr_el2); write_sysreg(CPTR_EL2_DEFAULT, cptr_el2); } @@ -305,33 +313,6 @@ static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu) return true; } -/* Skip an instruction which has been emulated. Returns true if - * execution can continue or false if we need to exit hyp mode because - * single-step was in effect. - */ -static bool __hyp_text __skip_instr(struct kvm_vcpu *vcpu) -{ - *vcpu_pc(vcpu) = read_sysreg_el2(elr); - - if (vcpu_mode_is_32bit(vcpu)) { - vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(spsr); - kvm_skip_instr32(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); - write_sysreg_el2(vcpu->arch.ctxt.gp_regs.regs.pstate, spsr); - } else { - *vcpu_pc(vcpu) += 4; - } - - write_sysreg_el2(*vcpu_pc(vcpu), elr); - - if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { - vcpu->arch.fault.esr_el2 = - (ESR_ELx_EC_SOFTSTP_LOW << ESR_ELx_EC_SHIFT) | 0x22; - return false; - } else { - return true; - } -} - static bool __hyp_text __hyp_switch_fpsimd(struct kvm_vcpu *vcpu) { struct user_fpsimd_state *host_fpsimd = vcpu->arch.host_fpsimd_state; @@ -420,20 +401,12 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) if (valid) { int ret = __vgic_v2_perform_cpuif_access(vcpu); - if (ret == 1 && __skip_instr(vcpu)) + if (ret == 1) return true; - if (ret == -1) { - /* Promote an illegal access to an - * SError. If we would be returning - * due to single-step clear the SS - * bit so handle_exit knows what to - * do after dealing with the error. - */ - if (!__skip_instr(vcpu)) - *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS; + /* Promote an illegal access to an SError.*/ + if (ret == -1) *exit_code = ARM_EXCEPTION_EL1_SERROR; - } goto exit; } @@ -444,7 +417,7 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_CP15_32)) { int ret = __vgic_v3_perform_cpuif_access(vcpu); - if (ret == 1 && __skip_instr(vcpu)) + if (ret == 1) return true; } @@ -499,8 +472,19 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) sysreg_save_host_state_vhe(host_ctxt); - __activate_traps(vcpu); + /* + * ARM erratum 1165522 requires us to configure both stage 1 and + * stage 2 translation for the guest context before we clear + * HCR_EL2.TGE. + * + * We have already configured the guest's stage 1 translation in + * kvm_vcpu_load_sysregs above. We must now call __activate_vm + * before __activate_traps, because __activate_vm configures + * stage 2 translation, and __activate_traps clear HCR_EL2.TGE + * (among other things). + */ __activate_vm(vcpu->kvm); + __activate_traps(vcpu); sysreg_restore_guest_state_vhe(guest_ctxt); __debug_switch_to_guest(vcpu); @@ -545,8 +529,8 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) __sysreg_save_state_nvhe(host_ctxt); - __activate_traps(vcpu); __activate_vm(kern_hyp_va(vcpu->kvm)); + __activate_traps(vcpu); __hyp_vgic_restore_state(vcpu); __timer_enable_traps(vcpu); diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c index 4dbd9c69a96d..76c30866069e 100644 --- a/arch/arm64/kvm/hyp/tlb.c +++ b/arch/arm64/kvm/hyp/tlb.c @@ -15,20 +15,54 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <linux/irqflags.h> + #include <asm/kvm_hyp.h> #include <asm/kvm_mmu.h> #include <asm/tlbflush.h> -static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm) +struct tlb_inv_context { + unsigned long flags; + u64 tcr; + u64 sctlr; +}; + +static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm, + struct tlb_inv_context *cxt) { u64 val; + local_irq_save(cxt->flags); + + if (cpus_have_const_cap(ARM64_WORKAROUND_1165522)) { + /* + * For CPUs that are affected by ARM erratum 1165522, we + * cannot trust stage-1 to be in a correct state at that + * point. Since we do not want to force a full load of the + * vcpu state, we prevent the EL1 page-table walker to + * allocate new TLBs. This is done by setting the EPD bits + * in the TCR_EL1 register. We also need to prevent it to + * allocate IPA->PA walks, so we enable the S1 MMU... + */ + val = cxt->tcr = read_sysreg_el1(tcr); + val |= TCR_EPD1_MASK | TCR_EPD0_MASK; + write_sysreg_el1(val, tcr); + val = cxt->sctlr = read_sysreg_el1(sctlr); + val |= SCTLR_ELx_M; + write_sysreg_el1(val, sctlr); + } + /* * With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and * most TLB operations target EL2/EL0. In order to affect the * guest TLBs (EL1/EL0), we need to change one of these two * bits. Changing E2H is impossible (goodbye TTBR1_EL2), so * let's flip TGE before executing the TLB operation. + * + * ARM erratum 1165522 requires some special handling (again), + * as we need to make sure both stages of translation are in + * place before clearing TGE. __load_guest_stage2() already + * has an ISB in order to deal with this. */ __load_guest_stage2(kvm); val = read_sysreg(hcr_el2); @@ -37,7 +71,8 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm) isb(); } -static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm) +static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm, + struct tlb_inv_context *cxt) { __load_guest_stage2(kvm); isb(); @@ -48,7 +83,8 @@ static hyp_alternate_select(__tlb_switch_to_guest, __tlb_switch_to_guest_vhe, ARM64_HAS_VIRT_HOST_EXTN); -static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm) +static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm, + struct tlb_inv_context *cxt) { /* * We're done with the TLB operation, let's restore the host's @@ -56,9 +92,19 @@ static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm) */ write_sysreg(0, vttbr_el2); write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2); + isb(); + + if (cpus_have_const_cap(ARM64_WORKAROUND_1165522)) { + /* Restore the registers to what they were */ + write_sysreg_el1(cxt->tcr, tcr); + write_sysreg_el1(cxt->sctlr, sctlr); + } + + local_irq_restore(cxt->flags); } -static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm) +static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm, + struct tlb_inv_context *cxt) { write_sysreg(0, vttbr_el2); } @@ -70,11 +116,13 @@ static hyp_alternate_select(__tlb_switch_to_host, void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) { + struct tlb_inv_context cxt; + dsb(ishst); /* Switch to requested VMID */ kvm = kern_hyp_va(kvm); - __tlb_switch_to_guest()(kvm); + __tlb_switch_to_guest()(kvm, &cxt); /* * We could do so much better if we had the VA as well. @@ -117,36 +165,39 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) if (!has_vhe() && icache_is_vpipt()) __flush_icache_all(); - __tlb_switch_to_host()(kvm); + __tlb_switch_to_host()(kvm, &cxt); } void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm) { + struct tlb_inv_context cxt; + dsb(ishst); /* Switch to requested VMID */ kvm = kern_hyp_va(kvm); - __tlb_switch_to_guest()(kvm); + __tlb_switch_to_guest()(kvm, &cxt); __tlbi(vmalls12e1is); dsb(ish); isb(); - __tlb_switch_to_host()(kvm); + __tlb_switch_to_host()(kvm, &cxt); } void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) { struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm); + struct tlb_inv_context cxt; /* Switch to requested VMID */ - __tlb_switch_to_guest()(kvm); + __tlb_switch_to_guest()(kvm, &cxt); __tlbi(vmalle1); dsb(nsh); isb(); - __tlb_switch_to_host()(kvm); + __tlb_switch_to_host()(kvm, &cxt); } void __hyp_text __kvm_flush_vm_context(void) diff --git a/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c b/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c index 215c7c0eb3b0..9cbdd034a563 100644 --- a/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c +++ b/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c @@ -41,7 +41,7 @@ static bool __hyp_text __is_be(struct kvm_vcpu *vcpu) * Returns: * 1: GICV access successfully performed * 0: Not a GICV access - * -1: Illegal GICV access + * -1: Illegal GICV access successfully performed */ int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu) { @@ -61,12 +61,16 @@ int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu) return 0; /* Reject anything but a 32bit access */ - if (kvm_vcpu_dabt_get_as(vcpu) != sizeof(u32)) + if (kvm_vcpu_dabt_get_as(vcpu) != sizeof(u32)) { + __kvm_skip_instr(vcpu); return -1; + } /* Not aligned? Don't bother */ - if (fault_ipa & 3) + if (fault_ipa & 3) { + __kvm_skip_instr(vcpu); return -1; + } rd = kvm_vcpu_dabt_get_rd(vcpu); addr = hyp_symbol_addr(kvm_vgic_global_state)->vcpu_hyp_va; @@ -88,5 +92,7 @@ int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu) vcpu_set_reg(vcpu, rd, data); } + __kvm_skip_instr(vcpu); + return 1; } diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 22fbbdbece3c..e3e37228ae4e 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -76,7 +76,7 @@ static bool write_to_read_only(struct kvm_vcpu *vcpu, return false; } -u64 vcpu_read_sys_reg(struct kvm_vcpu *vcpu, int reg) +u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg) { if (!vcpu->arch.sysregs_loaded_on_cpu) goto immediate_read; @@ -1040,6 +1040,14 @@ static u64 read_id_reg(struct sys_reg_desc const *r, bool raz) kvm_debug("SVE unsupported for guests, suppressing\n"); val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT); + } else if (id == SYS_ID_AA64ISAR1_EL1) { + const u64 ptrauth_mask = (0xfUL << ID_AA64ISAR1_APA_SHIFT) | + (0xfUL << ID_AA64ISAR1_API_SHIFT) | + (0xfUL << ID_AA64ISAR1_GPA_SHIFT) | + (0xfUL << ID_AA64ISAR1_GPI_SHIFT); + if (val & ptrauth_mask) + kvm_debug("ptrauth unsupported for guests, suppressing\n"); + val &= ~ptrauth_mask; } else if (id == SYS_ID_AA64MMFR1_EL1) { if (val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT)) kvm_debug("LORegions unsupported for guests, suppressing\n"); @@ -1850,6 +1858,8 @@ static void perform_access(struct kvm_vcpu *vcpu, struct sys_reg_params *params, const struct sys_reg_desc *r) { + trace_kvm_sys_access(*vcpu_pc(vcpu), params, r); + /* * Not having an accessor means that we have configured a trap * that we don't know how to handle. This certainly qualifies @@ -1912,8 +1922,8 @@ static void unhandled_cp_access(struct kvm_vcpu *vcpu, WARN_ON(1); } - kvm_err("Unsupported guest CP%d access at: %08lx\n", - cp, *vcpu_pc(vcpu)); + kvm_err("Unsupported guest CP%d access at: %08lx [%08lx]\n", + cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu)); print_sys_reg_instr(params); kvm_inject_undefined(vcpu); } @@ -2063,8 +2073,8 @@ static int emulate_sys_reg(struct kvm_vcpu *vcpu, if (likely(r)) { perform_access(vcpu, params, r); } else { - kvm_err("Unsupported guest sys_reg access at: %lx\n", - *vcpu_pc(vcpu)); + kvm_err("Unsupported guest sys_reg access at: %lx [%08lx]\n", + *vcpu_pc(vcpu), *vcpu_cpsr(vcpu)); print_sys_reg_instr(params); kvm_inject_undefined(vcpu); } diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h index cd710f8b63e0..3b1bc7f01d0b 100644 --- a/arch/arm64/kvm/sys_regs.h +++ b/arch/arm64/kvm/sys_regs.h @@ -35,6 +35,9 @@ struct sys_reg_params { }; struct sys_reg_desc { + /* Sysreg string for debug */ + const char *name; + /* MRS/MSR instruction which accesses it. */ u8 Op0; u8 Op1; @@ -130,6 +133,7 @@ const struct sys_reg_desc *find_reg_by_id(u64 id, #define Op2(_x) .Op2 = _x #define SYS_DESC(reg) \ + .name = #reg, \ Op0(sys_reg_Op0(reg)), Op1(sys_reg_Op1(reg)), \ CRn(sys_reg_CRn(reg)), CRm(sys_reg_CRm(reg)), \ Op2(sys_reg_Op2(reg)) diff --git a/arch/arm64/kvm/trace.h b/arch/arm64/kvm/trace.h index 3b82fb1ddd09..eab91ad0effb 100644 --- a/arch/arm64/kvm/trace.h +++ b/arch/arm64/kvm/trace.h @@ -3,6 +3,7 @@ #define _TRACE_ARM64_KVM_H #include <linux/tracepoint.h> +#include "sys_regs.h" #undef TRACE_SYSTEM #define TRACE_SYSTEM kvm @@ -152,6 +153,40 @@ TRACE_EVENT(kvm_handle_sys_reg, TP_printk("HSR 0x%08lx", __entry->hsr) ); +TRACE_EVENT(kvm_sys_access, + TP_PROTO(unsigned long vcpu_pc, struct sys_reg_params *params, const struct sys_reg_desc *reg), + TP_ARGS(vcpu_pc, params, reg), + + TP_STRUCT__entry( + __field(unsigned long, vcpu_pc) + __field(bool, is_write) + __field(const char *, name) + __field(u8, Op0) + __field(u8, Op1) + __field(u8, CRn) + __field(u8, CRm) + __field(u8, Op2) + ), + + TP_fast_assign( + __entry->vcpu_pc = vcpu_pc; + __entry->is_write = params->is_write; + __entry->name = reg->name; + __entry->Op0 = reg->Op0; + __entry->Op0 = reg->Op0; + __entry->Op1 = reg->Op1; + __entry->CRn = reg->CRn; + __entry->CRm = reg->CRm; + __entry->Op2 = reg->Op2; + ), + + TP_printk("PC: %lx %s (%d,%d,%d,%d,%d) %s", + __entry->vcpu_pc, __entry->name ?: "UNKN", + __entry->Op0, __entry->Op1, __entry->CRn, + __entry->CRm, __entry->Op2, + __entry->is_write ? "write" : "read") +); + TRACE_EVENT(kvm_set_guest_debug, TP_PROTO(struct kvm_vcpu *vcpu, __u32 guest_debug), TP_ARGS(vcpu, guest_debug), diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile index 69ff9887f724..5540a1638baf 100644 --- a/arch/arm64/lib/Makefile +++ b/arch/arm64/lib/Makefile @@ -5,6 +5,12 @@ lib-y := clear_user.o delay.o copy_from_user.o \ memcmp.o strcmp.o strncmp.o strlen.o strnlen.o \ strchr.o strrchr.o tishift.o +ifeq ($(CONFIG_KERNEL_MODE_NEON), y) +obj-$(CONFIG_XOR_BLOCKS) += xor-neon.o +CFLAGS_REMOVE_xor-neon.o += -mgeneral-regs-only +CFLAGS_xor-neon.o += -ffreestanding +endif + # Tell the compiler to treat all general purpose registers (with the # exception of the IP registers, which are already handled by the caller # in case of a PLT) as callee-saved, which allows for efficient runtime diff --git a/arch/arm64/lib/clear_page.S b/arch/arm64/lib/clear_page.S index ef08e905e35b..6d13b0d64ad5 100644 --- a/arch/arm64/lib/clear_page.S +++ b/arch/arm64/lib/clear_page.S @@ -37,3 +37,4 @@ ENTRY(clear_page) b.ne 1b ret ENDPROC(clear_page) +EXPORT_SYMBOL(clear_page) diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S index 21ba0b29621b..feb225bd4b80 100644 --- a/arch/arm64/lib/clear_user.S +++ b/arch/arm64/lib/clear_user.S @@ -18,6 +18,7 @@ #include <linux/linkage.h> #include <asm/asm-uaccess.h> +#include <asm/assembler.h> .text @@ -53,6 +54,7 @@ uao_user_alternative 9f, strb, sttrb, wzr, x0, 0 uaccess_disable_not_uao x2, x3 ret ENDPROC(__arch_clear_user) +EXPORT_SYMBOL(__arch_clear_user) .section .fixup,"ax" .align 2 diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S index 20305d485046..dea6c762d52f 100644 --- a/arch/arm64/lib/copy_from_user.S +++ b/arch/arm64/lib/copy_from_user.S @@ -16,8 +16,9 @@ #include <linux/linkage.h> -#include <asm/cache.h> #include <asm/asm-uaccess.h> +#include <asm/assembler.h> +#include <asm/cache.h> /* * Copy from user space to a kernel buffer (alignment handled by the hardware) @@ -71,6 +72,7 @@ ENTRY(__arch_copy_from_user) mov x0, #0 // Nothing to copy ret ENDPROC(__arch_copy_from_user) +EXPORT_SYMBOL(__arch_copy_from_user) .section .fixup,"ax" .align 2 diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S index 54b75deb1d16..a84227fbf716 100644 --- a/arch/arm64/lib/copy_in_user.S +++ b/arch/arm64/lib/copy_in_user.S @@ -18,8 +18,9 @@ #include <linux/linkage.h> -#include <asm/cache.h> #include <asm/asm-uaccess.h> +#include <asm/assembler.h> +#include <asm/cache.h> /* * Copy from user space to user space (alignment handled by the hardware) @@ -73,6 +74,7 @@ ENTRY(__arch_copy_in_user) mov x0, #0 ret ENDPROC(__arch_copy_in_user) +EXPORT_SYMBOL(__arch_copy_in_user) .section .fixup,"ax" .align 2 diff --git a/arch/arm64/lib/copy_page.S b/arch/arm64/lib/copy_page.S index 076c43715e64..98313e24a987 100644 --- a/arch/arm64/lib/copy_page.S +++ b/arch/arm64/lib/copy_page.S @@ -87,3 +87,4 @@ alternative_else_nop_endif ret ENDPROC(copy_page) +EXPORT_SYMBOL(copy_page) diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S index fda6172d6b88..ef44c7ca3ffb 100644 --- a/arch/arm64/lib/copy_to_user.S +++ b/arch/arm64/lib/copy_to_user.S @@ -16,8 +16,9 @@ #include <linux/linkage.h> -#include <asm/cache.h> #include <asm/asm-uaccess.h> +#include <asm/assembler.h> +#include <asm/cache.h> /* * Copy to user space from a kernel buffer (alignment handled by the hardware) @@ -70,6 +71,7 @@ ENTRY(__arch_copy_to_user) mov x0, #0 ret ENDPROC(__arch_copy_to_user) +EXPORT_SYMBOL(__arch_copy_to_user) .section .fixup,"ax" .align 2 diff --git a/arch/arm64/lib/crc32.S b/arch/arm64/lib/crc32.S index 5bc1e85b4e1c..f132f2a7522e 100644 --- a/arch/arm64/lib/crc32.S +++ b/arch/arm64/lib/crc32.S @@ -15,15 +15,59 @@ .cpu generic+crc .macro __crc32, c -0: subs x2, x2, #16 - b.mi 8f - ldp x3, x4, [x1], #16 + cmp x2, #16 + b.lt 8f // less than 16 bytes + + and x7, x2, #0x1f + and x2, x2, #~0x1f + cbz x7, 32f // multiple of 32 bytes + + and x8, x7, #0xf + ldp x3, x4, [x1] + add x8, x8, x1 + add x1, x1, x7 + ldp x5, x6, [x8] CPU_BE( rev x3, x3 ) CPU_BE( rev x4, x4 ) +CPU_BE( rev x5, x5 ) +CPU_BE( rev x6, x6 ) + + tst x7, #8 + crc32\c\()x w8, w0, x3 + csel x3, x3, x4, eq + csel w0, w0, w8, eq + tst x7, #4 + lsr x4, x3, #32 + crc32\c\()w w8, w0, w3 + csel x3, x3, x4, eq + csel w0, w0, w8, eq + tst x7, #2 + lsr w4, w3, #16 + crc32\c\()h w8, w0, w3 + csel w3, w3, w4, eq + csel w0, w0, w8, eq + tst x7, #1 + crc32\c\()b w8, w0, w3 + csel w0, w0, w8, eq + tst x7, #16 + crc32\c\()x w8, w0, x5 + crc32\c\()x w8, w8, x6 + csel w0, w0, w8, eq + cbz x2, 0f + +32: ldp x3, x4, [x1], #32 + sub x2, x2, #32 + ldp x5, x6, [x1, #-16] +CPU_BE( rev x3, x3 ) +CPU_BE( rev x4, x4 ) +CPU_BE( rev x5, x5 ) +CPU_BE( rev x6, x6 ) crc32\c\()x w0, w0, x3 crc32\c\()x w0, w0, x4 - b.ne 0b - ret + crc32\c\()x w0, w0, x5 + crc32\c\()x w0, w0, x6 + cbnz x2, 32b +0: ret 8: tbz x2, #3, 4f ldr x3, [x1], #8 diff --git a/arch/arm64/lib/memchr.S b/arch/arm64/lib/memchr.S index 0f164a4baf52..f146b7ecd28f 100644 --- a/arch/arm64/lib/memchr.S +++ b/arch/arm64/lib/memchr.S @@ -42,3 +42,4 @@ WEAK(memchr) 2: mov x0, #0 ret ENDPIPROC(memchr) +EXPORT_SYMBOL_NOKASAN(memchr) diff --git a/arch/arm64/lib/memcmp.S b/arch/arm64/lib/memcmp.S index fb295f52e9f8..e2e629b09049 100644 --- a/arch/arm64/lib/memcmp.S +++ b/arch/arm64/lib/memcmp.S @@ -256,3 +256,4 @@ CPU_LE( rev data2, data2 ) mov result, #0 ret ENDPIPROC(memcmp) +EXPORT_SYMBOL_NOKASAN(memcmp) diff --git a/arch/arm64/lib/memcpy.S b/arch/arm64/lib/memcpy.S index 67613937711f..b4f82888ed60 100644 --- a/arch/arm64/lib/memcpy.S +++ b/arch/arm64/lib/memcpy.S @@ -74,4 +74,6 @@ ENTRY(memcpy) #include "copy_template.S" ret ENDPIPROC(memcpy) +EXPORT_SYMBOL(memcpy) ENDPROC(__memcpy) +EXPORT_SYMBOL(__memcpy) diff --git a/arch/arm64/lib/memmove.S b/arch/arm64/lib/memmove.S index a5a4459013b1..ef12f719d99d 100644 --- a/arch/arm64/lib/memmove.S +++ b/arch/arm64/lib/memmove.S @@ -197,4 +197,6 @@ ENTRY(memmove) b.ne .Ltail63 ret ENDPIPROC(memmove) +EXPORT_SYMBOL(memmove) ENDPROC(__memmove) +EXPORT_SYMBOL(__memmove) diff --git a/arch/arm64/lib/memset.S b/arch/arm64/lib/memset.S index f2670a9f218c..a79cf118d6d0 100644 --- a/arch/arm64/lib/memset.S +++ b/arch/arm64/lib/memset.S @@ -216,4 +216,6 @@ ENTRY(memset) b.ne .Ltail_maybe_long ret ENDPIPROC(memset) +EXPORT_SYMBOL(memset) ENDPROC(__memset) +EXPORT_SYMBOL(__memset) diff --git a/arch/arm64/lib/strchr.S b/arch/arm64/lib/strchr.S index 7c83091d1bcd..b179421f46c7 100644 --- a/arch/arm64/lib/strchr.S +++ b/arch/arm64/lib/strchr.S @@ -40,3 +40,4 @@ WEAK(strchr) csel x0, x0, xzr, eq ret ENDPROC(strchr) +EXPORT_SYMBOL_NOKASAN(strchr) diff --git a/arch/arm64/lib/strcmp.S b/arch/arm64/lib/strcmp.S index 7d5d15398bfb..c306c7b88574 100644 --- a/arch/arm64/lib/strcmp.S +++ b/arch/arm64/lib/strcmp.S @@ -232,3 +232,4 @@ CPU_BE( orr syndrome, diff, has_nul ) sub result, data1, data2, lsr #56 ret ENDPIPROC(strcmp) +EXPORT_SYMBOL_NOKASAN(strcmp) diff --git a/arch/arm64/lib/strlen.S b/arch/arm64/lib/strlen.S index 8e0b14205dcb..2a0240937416 100644 --- a/arch/arm64/lib/strlen.S +++ b/arch/arm64/lib/strlen.S @@ -124,3 +124,4 @@ CPU_LE( lsr tmp2, tmp2, tmp1 ) /* Shift (tmp1 & 63). */ csel data2, data2, data2a, le b .Lrealigned ENDPIPROC(strlen) +EXPORT_SYMBOL_NOKASAN(strlen) diff --git a/arch/arm64/lib/strncmp.S b/arch/arm64/lib/strncmp.S index 66bd145935d9..c5d567afb039 100644 --- a/arch/arm64/lib/strncmp.S +++ b/arch/arm64/lib/strncmp.S @@ -308,3 +308,4 @@ CPU_BE( orr syndrome, diff, has_nul ) mov result, #0 ret ENDPIPROC(strncmp) +EXPORT_SYMBOL_NOKASAN(strncmp) diff --git a/arch/arm64/lib/strnlen.S b/arch/arm64/lib/strnlen.S index 355be04441fe..e21e536d420e 100644 --- a/arch/arm64/lib/strnlen.S +++ b/arch/arm64/lib/strnlen.S @@ -169,3 +169,4 @@ CPU_LE( lsr tmp2, tmp2, tmp4 ) /* Shift (tmp1 & 63). */ mov len, limit ret ENDPIPROC(strnlen) +EXPORT_SYMBOL_NOKASAN(strnlen) diff --git a/arch/arm64/lib/strrchr.S b/arch/arm64/lib/strrchr.S index ea84924d5990..47e1593016dc 100644 --- a/arch/arm64/lib/strrchr.S +++ b/arch/arm64/lib/strrchr.S @@ -41,3 +41,4 @@ WEAK(strrchr) 2: mov x0, x3 ret ENDPIPROC(strrchr) +EXPORT_SYMBOL_NOKASAN(strrchr) diff --git a/arch/arm64/lib/tishift.S b/arch/arm64/lib/tishift.S index 0fdff97794de..047622536535 100644 --- a/arch/arm64/lib/tishift.S +++ b/arch/arm64/lib/tishift.S @@ -5,6 +5,8 @@ #include <linux/linkage.h> +#include <asm/assembler.h> + ENTRY(__ashlti3) cbz x2, 1f mov x3, #64 @@ -25,6 +27,7 @@ ENTRY(__ashlti3) mov x0, x2 ret ENDPROC(__ashlti3) +EXPORT_SYMBOL(__ashlti3) ENTRY(__ashrti3) cbz x2, 1f @@ -46,6 +49,7 @@ ENTRY(__ashrti3) mov x1, x2 ret ENDPROC(__ashrti3) +EXPORT_SYMBOL(__ashrti3) ENTRY(__lshrti3) cbz x2, 1f @@ -67,3 +71,4 @@ ENTRY(__lshrti3) mov x1, x2 ret ENDPROC(__lshrti3) +EXPORT_SYMBOL(__lshrti3) diff --git a/arch/arm64/lib/xor-neon.c b/arch/arm64/lib/xor-neon.c new file mode 100644 index 000000000000..131c60c27dff --- /dev/null +++ b/arch/arm64/lib/xor-neon.c @@ -0,0 +1,184 @@ +/* + * arch/arm64/lib/xor-neon.c + * + * Authors: Jackie Liu <liuyun01@kylinos.cn> + * Copyright (C) 2018,Tianjin KYLIN Information Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/raid/xor.h> +#include <linux/module.h> +#include <asm/neon-intrinsics.h> + +void xor_arm64_neon_2(unsigned long bytes, unsigned long *p1, + unsigned long *p2) +{ + uint64_t *dp1 = (uint64_t *)p1; + uint64_t *dp2 = (uint64_t *)p2; + + register uint64x2_t v0, v1, v2, v3; + long lines = bytes / (sizeof(uint64x2_t) * 4); + + do { + /* p1 ^= p2 */ + v0 = veorq_u64(vld1q_u64(dp1 + 0), vld1q_u64(dp2 + 0)); + v1 = veorq_u64(vld1q_u64(dp1 + 2), vld1q_u64(dp2 + 2)); + v2 = veorq_u64(vld1q_u64(dp1 + 4), vld1q_u64(dp2 + 4)); + v3 = veorq_u64(vld1q_u64(dp1 + 6), vld1q_u64(dp2 + 6)); + + /* store */ + vst1q_u64(dp1 + 0, v0); + vst1q_u64(dp1 + 2, v1); + vst1q_u64(dp1 + 4, v2); + vst1q_u64(dp1 + 6, v3); + + dp1 += 8; + dp2 += 8; + } while (--lines > 0); +} + +void xor_arm64_neon_3(unsigned long bytes, unsigned long *p1, + unsigned long *p2, unsigned long *p3) +{ + uint64_t *dp1 = (uint64_t *)p1; + uint64_t *dp2 = (uint64_t *)p2; + uint64_t *dp3 = (uint64_t *)p3; + + register uint64x2_t v0, v1, v2, v3; + long lines = bytes / (sizeof(uint64x2_t) * 4); + + do { + /* p1 ^= p2 */ + v0 = veorq_u64(vld1q_u64(dp1 + 0), vld1q_u64(dp2 + 0)); + v1 = veorq_u64(vld1q_u64(dp1 + 2), vld1q_u64(dp2 + 2)); + v2 = veorq_u64(vld1q_u64(dp1 + 4), vld1q_u64(dp2 + 4)); + v3 = veorq_u64(vld1q_u64(dp1 + 6), vld1q_u64(dp2 + 6)); + + /* p1 ^= p3 */ + v0 = veorq_u64(v0, vld1q_u64(dp3 + 0)); + v1 = veorq_u64(v1, vld1q_u64(dp3 + 2)); + v2 = veorq_u64(v2, vld1q_u64(dp3 + 4)); + v3 = veorq_u64(v3, vld1q_u64(dp3 + 6)); + + /* store */ + vst1q_u64(dp1 + 0, v0); + vst1q_u64(dp1 + 2, v1); + vst1q_u64(dp1 + 4, v2); + vst1q_u64(dp1 + 6, v3); + + dp1 += 8; + dp2 += 8; + dp3 += 8; + } while (--lines > 0); +} + +void xor_arm64_neon_4(unsigned long bytes, unsigned long *p1, + unsigned long *p2, unsigned long *p3, unsigned long *p4) +{ + uint64_t *dp1 = (uint64_t *)p1; + uint64_t *dp2 = (uint64_t *)p2; + uint64_t *dp3 = (uint64_t *)p3; + uint64_t *dp4 = (uint64_t *)p4; + + register uint64x2_t v0, v1, v2, v3; + long lines = bytes / (sizeof(uint64x2_t) * 4); + + do { + /* p1 ^= p2 */ + v0 = veorq_u64(vld1q_u64(dp1 + 0), vld1q_u64(dp2 + 0)); + v1 = veorq_u64(vld1q_u64(dp1 + 2), vld1q_u64(dp2 + 2)); + v2 = veorq_u64(vld1q_u64(dp1 + 4), vld1q_u64(dp2 + 4)); + v3 = veorq_u64(vld1q_u64(dp1 + 6), vld1q_u64(dp2 + 6)); + + /* p1 ^= p3 */ + v0 = veorq_u64(v0, vld1q_u64(dp3 + 0)); + v1 = veorq_u64(v1, vld1q_u64(dp3 + 2)); + v2 = veorq_u64(v2, vld1q_u64(dp3 + 4)); + v3 = veorq_u64(v3, vld1q_u64(dp3 + 6)); + + /* p1 ^= p4 */ + v0 = veorq_u64(v0, vld1q_u64(dp4 + 0)); + v1 = veorq_u64(v1, vld1q_u64(dp4 + 2)); + v2 = veorq_u64(v2, vld1q_u64(dp4 + 4)); + v3 = veorq_u64(v3, vld1q_u64(dp4 + 6)); + + /* store */ + vst1q_u64(dp1 + 0, v0); + vst1q_u64(dp1 + 2, v1); + vst1q_u64(dp1 + 4, v2); + vst1q_u64(dp1 + 6, v3); + + dp1 += 8; + dp2 += 8; + dp3 += 8; + dp4 += 8; + } while (--lines > 0); +} + +void xor_arm64_neon_5(unsigned long bytes, unsigned long *p1, + unsigned long *p2, unsigned long *p3, + unsigned long *p4, unsigned long *p5) +{ + uint64_t *dp1 = (uint64_t *)p1; + uint64_t *dp2 = (uint64_t *)p2; + uint64_t *dp3 = (uint64_t *)p3; + uint64_t *dp4 = (uint64_t *)p4; + uint64_t *dp5 = (uint64_t *)p5; + + register uint64x2_t v0, v1, v2, v3; + long lines = bytes / (sizeof(uint64x2_t) * 4); + + do { + /* p1 ^= p2 */ + v0 = veorq_u64(vld1q_u64(dp1 + 0), vld1q_u64(dp2 + 0)); + v1 = veorq_u64(vld1q_u64(dp1 + 2), vld1q_u64(dp2 + 2)); + v2 = veorq_u64(vld1q_u64(dp1 + 4), vld1q_u64(dp2 + 4)); + v3 = veorq_u64(vld1q_u64(dp1 + 6), vld1q_u64(dp2 + 6)); + + /* p1 ^= p3 */ + v0 = veorq_u64(v0, vld1q_u64(dp3 + 0)); + v1 = veorq_u64(v1, vld1q_u64(dp3 + 2)); + v2 = veorq_u64(v2, vld1q_u64(dp3 + 4)); + v3 = veorq_u64(v3, vld1q_u64(dp3 + 6)); + + /* p1 ^= p4 */ + v0 = veorq_u64(v0, vld1q_u64(dp4 + 0)); + v1 = veorq_u64(v1, vld1q_u64(dp4 + 2)); + v2 = veorq_u64(v2, vld1q_u64(dp4 + 4)); + v3 = veorq_u64(v3, vld1q_u64(dp4 + 6)); + + /* p1 ^= p5 */ + v0 = veorq_u64(v0, vld1q_u64(dp5 + 0)); + v1 = veorq_u64(v1, vld1q_u64(dp5 + 2)); + v2 = veorq_u64(v2, vld1q_u64(dp5 + 4)); + v3 = veorq_u64(v3, vld1q_u64(dp5 + 6)); + + /* store */ + vst1q_u64(dp1 + 0, v0); + vst1q_u64(dp1 + 2, v1); + vst1q_u64(dp1 + 4, v2); + vst1q_u64(dp1 + 6, v3); + + dp1 += 8; + dp2 += 8; + dp3 += 8; + dp4 += 8; + dp5 += 8; + } while (--lines > 0); +} + +struct xor_block_template const xor_block_inner_neon = { + .name = "__inner_neon__", + .do_2 = xor_arm64_neon_2, + .do_3 = xor_arm64_neon_3, + .do_4 = xor_arm64_neon_4, + .do_5 = xor_arm64_neon_5, +}; +EXPORT_SYMBOL(xor_block_inner_neon); + +MODULE_AUTHOR("Jackie Liu <liuyun01@kylinos.cn>"); +MODULE_DESCRIPTION("ARMv8 XOR Extensions"); +MODULE_LICENSE("GPL"); diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index 0c22ede52f90..a194fd0e837f 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -212,6 +212,9 @@ ENDPROC(__dma_clean_area) * - size - size in question */ ENTRY(__clean_dcache_area_pop) + alternative_if_not ARM64_HAS_DCPOP + b __clean_dcache_area_poc + alternative_else_nop_endif dcache_by_line_op cvap, sy, x0, x1, x2, x3 ret ENDPIPROC(__clean_dcache_area_pop) diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index a53704406099..fb0908456a1f 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -33,113 +33,6 @@ #include <asm/cacheflush.h> -static struct gen_pool *atomic_pool __ro_after_init; - -#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K -static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE; - -static int __init early_coherent_pool(char *p) -{ - atomic_pool_size = memparse(p, &p); - return 0; -} -early_param("coherent_pool", early_coherent_pool); - -static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags) -{ - unsigned long val; - void *ptr = NULL; - - if (!atomic_pool) { - WARN(1, "coherent pool not initialised!\n"); - return NULL; - } - - val = gen_pool_alloc(atomic_pool, size); - if (val) { - phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val); - - *ret_page = phys_to_page(phys); - ptr = (void *)val; - memset(ptr, 0, size); - } - - return ptr; -} - -static bool __in_atomic_pool(void *start, size_t size) -{ - return addr_in_gen_pool(atomic_pool, (unsigned long)start, size); -} - -static int __free_from_pool(void *start, size_t size) -{ - if (!__in_atomic_pool(start, size)) - return 0; - - gen_pool_free(atomic_pool, (unsigned long)start, size); - - return 1; -} - -void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, - gfp_t flags, unsigned long attrs) -{ - struct page *page; - void *ptr, *coherent_ptr; - pgprot_t prot = pgprot_writecombine(PAGE_KERNEL); - - size = PAGE_ALIGN(size); - - if (!gfpflags_allow_blocking(flags)) { - struct page *page = NULL; - void *addr = __alloc_from_pool(size, &page, flags); - - if (addr) - *dma_handle = phys_to_dma(dev, page_to_phys(page)); - - return addr; - } - - ptr = dma_direct_alloc_pages(dev, size, dma_handle, flags, attrs); - if (!ptr) - goto no_mem; - - /* remove any dirty cache lines on the kernel alias */ - __dma_flush_area(ptr, size); - - /* create a coherent mapping */ - page = virt_to_page(ptr); - coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP, - prot, __builtin_return_address(0)); - if (!coherent_ptr) - goto no_map; - - return coherent_ptr; - -no_map: - dma_direct_free_pages(dev, size, ptr, *dma_handle, attrs); -no_mem: - return NULL; -} - -void arch_dma_free(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle, unsigned long attrs) -{ - if (!__free_from_pool(vaddr, PAGE_ALIGN(size))) { - void *kaddr = phys_to_virt(dma_to_phys(dev, dma_handle)); - - vunmap(vaddr); - dma_direct_free_pages(dev, size, kaddr, dma_handle, attrs); - } -} - -long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr, - dma_addr_t dma_addr) -{ - return __phys_to_pfn(dma_to_phys(dev, dma_addr)); -} - pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs) { @@ -160,6 +53,11 @@ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, __dma_unmap_area(phys_to_virt(paddr), size, dir); } +void arch_dma_prep_coherent(struct page *page, size_t size) +{ + __dma_flush_area(page_address(page), size); +} + #ifdef CONFIG_IOMMU_DMA static int __swiotlb_get_sgtable_page(struct sg_table *sgt, struct page *page, size_t size) @@ -191,167 +89,13 @@ static int __swiotlb_mmap_pfn(struct vm_area_struct *vma, } #endif /* CONFIG_IOMMU_DMA */ -static int __init atomic_pool_init(void) -{ - pgprot_t prot = __pgprot(PROT_NORMAL_NC); - unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT; - struct page *page; - void *addr; - unsigned int pool_size_order = get_order(atomic_pool_size); - - if (dev_get_cma_area(NULL)) - page = dma_alloc_from_contiguous(NULL, nr_pages, - pool_size_order, false); - else - page = alloc_pages(GFP_DMA32, pool_size_order); - - if (page) { - int ret; - void *page_addr = page_address(page); - - memset(page_addr, 0, atomic_pool_size); - __dma_flush_area(page_addr, atomic_pool_size); - - atomic_pool = gen_pool_create(PAGE_SHIFT, -1); - if (!atomic_pool) - goto free_page; - - addr = dma_common_contiguous_remap(page, atomic_pool_size, - VM_USERMAP, prot, atomic_pool_init); - - if (!addr) - goto destroy_genpool; - - ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr, - page_to_phys(page), - atomic_pool_size, -1); - if (ret) - goto remove_mapping; - - gen_pool_set_algo(atomic_pool, - gen_pool_first_fit_order_align, - NULL); - - pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n", - atomic_pool_size / 1024); - return 0; - } - goto out; - -remove_mapping: - dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP); -destroy_genpool: - gen_pool_destroy(atomic_pool); - atomic_pool = NULL; -free_page: - if (!dma_release_from_contiguous(NULL, page, nr_pages)) - __free_pages(page, pool_size_order); -out: - pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n", - atomic_pool_size / 1024); - return -ENOMEM; -} - -/******************************************** - * The following APIs are for dummy DMA ops * - ********************************************/ - -static void *__dummy_alloc(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flags, - unsigned long attrs) -{ - return NULL; -} - -static void __dummy_free(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle, - unsigned long attrs) -{ -} - -static int __dummy_mmap(struct device *dev, - struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size, - unsigned long attrs) -{ - return -ENXIO; -} - -static dma_addr_t __dummy_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir, - unsigned long attrs) -{ - return 0; -} - -static void __dummy_unmap_page(struct device *dev, dma_addr_t dev_addr, - size_t size, enum dma_data_direction dir, - unsigned long attrs) -{ -} - -static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl, - int nelems, enum dma_data_direction dir, - unsigned long attrs) -{ - return 0; -} - -static void __dummy_unmap_sg(struct device *dev, - struct scatterlist *sgl, int nelems, - enum dma_data_direction dir, - unsigned long attrs) -{ -} - -static void __dummy_sync_single(struct device *dev, - dma_addr_t dev_addr, size_t size, - enum dma_data_direction dir) -{ -} - -static void __dummy_sync_sg(struct device *dev, - struct scatterlist *sgl, int nelems, - enum dma_data_direction dir) -{ -} - -static int __dummy_mapping_error(struct device *hwdev, dma_addr_t dma_addr) -{ - return 1; -} - -static int __dummy_dma_supported(struct device *hwdev, u64 mask) -{ - return 0; -} - -const struct dma_map_ops dummy_dma_ops = { - .alloc = __dummy_alloc, - .free = __dummy_free, - .mmap = __dummy_mmap, - .map_page = __dummy_map_page, - .unmap_page = __dummy_unmap_page, - .map_sg = __dummy_map_sg, - .unmap_sg = __dummy_unmap_sg, - .sync_single_for_cpu = __dummy_sync_single, - .sync_single_for_device = __dummy_sync_single, - .sync_sg_for_cpu = __dummy_sync_sg, - .sync_sg_for_device = __dummy_sync_sg, - .mapping_error = __dummy_mapping_error, - .dma_supported = __dummy_dma_supported, -}; -EXPORT_SYMBOL(dummy_dma_ops); - static int __init arm64_dma_init(void) { WARN_TAINT(ARCH_DMA_MINALIGN < cache_line_size(), TAINT_CPU_OUT_OF_SPEC, "ARCH_DMA_MINALIGN smaller than CTR_EL0.CWG (%d < %d)", ARCH_DMA_MINALIGN, cache_line_size()); - - return atomic_pool_init(); + return dma_atomic_pool_init(GFP_DMA32, __pgprot(PROT_NORMAL_NC)); } arch_initcall(arm64_dma_init); @@ -397,17 +141,17 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, page = alloc_pages(gfp, get_order(size)); addr = page ? page_address(page) : NULL; } else { - addr = __alloc_from_pool(size, &page, gfp); + addr = dma_alloc_from_pool(size, &page, gfp); } if (!addr) return NULL; *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot); - if (iommu_dma_mapping_error(dev, *handle)) { + if (*handle == DMA_MAPPING_ERROR) { if (coherent) __free_pages(page, get_order(size)); else - __free_from_pool(addr, size); + dma_free_from_pool(addr, size); addr = NULL; } } else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) { @@ -420,7 +164,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, return NULL; *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot); - if (iommu_dma_mapping_error(dev, *handle)) { + if (*handle == DMA_MAPPING_ERROR) { dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); return NULL; @@ -471,9 +215,9 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, * coherent devices. * Hence how dodgy the below logic looks... */ - if (__in_atomic_pool(cpu_addr, size)) { + if (dma_in_atomic_pool(cpu_addr, size)) { iommu_dma_unmap_page(dev, handle, iosize, 0, 0); - __free_from_pool(cpu_addr, size); + dma_free_from_pool(cpu_addr, size); } else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) { struct page *page = vmalloc_to_page(cpu_addr); @@ -580,7 +324,7 @@ static dma_addr_t __iommu_map_page(struct device *dev, struct page *page, dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot); if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && - !iommu_dma_mapping_error(dev, dev_addr)) + dev_addr != DMA_MAPPING_ERROR) __dma_map_area(page_address(page) + offset, size, dir); return dev_addr; @@ -663,7 +407,6 @@ static const struct dma_map_ops iommu_dma_ops = { .sync_sg_for_device = __iommu_sync_sg_for_device, .map_resource = iommu_dma_map_resource, .unmap_resource = iommu_dma_unmap_resource, - .mapping_error = iommu_dma_mapping_error, }; static int __init __iommu_dma_init(void) @@ -719,9 +462,6 @@ static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, const struct iommu_ops *iommu, bool coherent) { - if (!dev->dma_ops) - dev->dma_ops = &swiotlb_dma_ops; - dev->dma_coherent = coherent; __iommu_setup_dma_ops(dev, dma_base, size, iommu); diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 7d9571f4ae3d..5fe6d2e40e9b 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -160,7 +160,7 @@ void show_pte(unsigned long addr) pr_alert("%s pgtable: %luk pages, %u-bit VAs, pgdp = %p\n", mm == &init_mm ? "swapper" : "user", PAGE_SIZE / SZ_1K, - VA_BITS, mm->pgd); + mm == &init_mm ? VA_BITS : (int) vabits_user, mm->pgd); pgdp = pgd_offset(mm, addr); pgd = READ_ONCE(*pgdp); pr_alert("[%016lx] pgd=%016llx", addr, pgd_val(pgd)); diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index f58ea503ad01..28cbc22d7e30 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -429,6 +429,27 @@ void huge_ptep_clear_flush(struct vm_area_struct *vma, clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig); } +static void __init add_huge_page_size(unsigned long size) +{ + if (size_to_hstate(size)) + return; + + hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT); +} + +static int __init hugetlbpage_init(void) +{ +#ifdef CONFIG_ARM64_4K_PAGES + add_huge_page_size(PUD_SIZE); +#endif + add_huge_page_size(PMD_SIZE * CONT_PMDS); + add_huge_page_size(PMD_SIZE); + add_huge_page_size(PAGE_SIZE * CONT_PTES); + + return 0; +} +arch_initcall(hugetlbpage_init); + static __init int setup_hugepagesz(char *opt) { unsigned long ps = memparse(opt, &opt); @@ -440,7 +461,7 @@ static __init int setup_hugepagesz(char *opt) case PMD_SIZE * CONT_PMDS: case PMD_SIZE: case PAGE_SIZE * CONT_PTES: - hugetlb_add_hstate(ilog2(ps) - PAGE_SHIFT); + add_huge_page_size(ps); return 1; } @@ -449,13 +470,3 @@ static __init int setup_hugepagesz(char *opt) return 0; } __setup("hugepagesz=", setup_hugepagesz); - -#ifdef CONFIG_ARM64_64K_PAGES -static __init int add_default_hugepagesz(void) -{ - if (size_to_hstate(CONT_PTES * PAGE_SIZE) == NULL) - hugetlb_add_hstate(CONT_PTE_SHIFT); - return 0; -} -arch_initcall(add_default_hugepagesz); -#endif diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 0340e45655c6..cbba537ba3d2 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -59,6 +59,8 @@ * that cannot be mistaken for a real physical address. */ s64 memstart_addr __ro_after_init = -1; +EXPORT_SYMBOL(memstart_addr); + phys_addr_t arm64_dma_phys_limit __ro_after_init; #ifdef CONFIG_BLK_DEV_INITRD @@ -289,6 +291,14 @@ int pfn_valid(unsigned long pfn) if ((addr >> PAGE_SHIFT) != pfn) return 0; + +#ifdef CONFIG_SPARSEMEM + if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) + return 0; + + if (!valid_section(__nr_to_section(pfn_to_section_nr(pfn)))) + return 0; +#endif return memblock_is_map_memory(addr); } EXPORT_SYMBOL(pfn_valid); @@ -607,7 +617,7 @@ void __init mem_init(void) * detected at build time already. */ #ifdef CONFIG_COMPAT - BUILD_BUG_ON(TASK_SIZE_32 > TASK_SIZE_64); + BUILD_BUG_ON(TASK_SIZE_32 > DEFAULT_MAP_WINDOW_64); #endif if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index d1d6601b385d..da513a1facf4 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -52,6 +52,8 @@ u64 idmap_t0sz = TCR_T0SZ(VA_BITS); u64 idmap_ptrs_per_pgd = PTRS_PER_PGD; +u64 vabits_user __ro_after_init; +EXPORT_SYMBOL(vabits_user); u64 kimage_voffset __ro_after_init; EXPORT_SYMBOL(kimage_voffset); @@ -451,7 +453,7 @@ static void __init map_mem(pgd_t *pgdp) struct memblock_region *reg; int flags = 0; - if (debug_pagealloc_enabled()) + if (rodata_full || debug_pagealloc_enabled()) flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; /* @@ -552,7 +554,19 @@ static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end, static int __init parse_rodata(char *arg) { - return strtobool(arg, &rodata_enabled); + int ret = strtobool(arg, &rodata_enabled); + if (!ret) { + rodata_full = false; + return 0; + } + + /* permit 'full' in addition to boolean options */ + if (strcmp(arg, "full")) + return -EINVAL; + + rodata_enabled = true; + rodata_full = true; + return 0; } early_param("rodata", parse_rodata); @@ -1032,3 +1046,20 @@ int pud_free_pmd_page(pud_t *pudp, unsigned long addr) pmd_free(NULL, table); return 1; } + +#ifdef CONFIG_MEMORY_HOTPLUG +int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap, + bool want_memblock) +{ + int flags = 0; + + if (rodata_full || debug_pagealloc_enabled()) + flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; + + __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start), + size, PAGE_KERNEL, pgd_pgtable_alloc, flags); + + return __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT, + altmap, want_memblock); +} +#endif diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c index 27a31efd9e8e..ae34e3a1cef1 100644 --- a/arch/arm64/mm/numa.c +++ b/arch/arm64/mm/numa.c @@ -466,3 +466,13 @@ void __init arm64_numa_init(void) numa_init(dummy_numa_init); } + +/* + * We hope that we will be hotplugging memory on nodes we already know about, + * such that acpi_get_node() succeeds and we never fall back to this... + */ +int memory_add_physaddr_to_nid(u64 addr) +{ + pr_warn("Unknown node for memory at 0x%llx, assuming node 0\n", addr); + return 0; +} diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index a56359373d8b..6cd645edcf35 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c @@ -25,6 +25,8 @@ struct page_change_data { pgprot_t clear_mask; }; +bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED); + static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr, void *data) { @@ -64,6 +66,7 @@ static int change_memory_common(unsigned long addr, int numpages, unsigned long size = PAGE_SIZE*numpages; unsigned long end = start + size; struct vm_struct *area; + int i; if (!PAGE_ALIGNED(addr)) { start &= PAGE_MASK; @@ -93,6 +96,24 @@ static int change_memory_common(unsigned long addr, int numpages, if (!numpages) return 0; + /* + * If we are manipulating read-only permissions, apply the same + * change to the linear mapping of the pages that back this VM area. + */ + if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY || + pgprot_val(clear_mask) == PTE_RDONLY)) { + for (i = 0; i < area->nr_pages; i++) { + __change_memory_common((u64)page_address(area->pages[i]), + PAGE_SIZE, set_mask, clear_mask); + } + } + + /* + * Get rid of potentially aliasing lazily unmapped vm areas that may + * have permissions set that deviate from the ones we are setting here. + */ + vm_unmap_aliases(); + return __change_memory_common(start, size, set_mask, clear_mask); } diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 2c75b0b903ae..e05b3ce1db6b 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -182,6 +182,7 @@ ENDPROC(cpu_do_switch_mm) .macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2 adrp \tmp1, empty_zero_page phys_to_ttbr \tmp2, \tmp1 + offset_ttbr1 \tmp2 msr ttbr1_el1, \tmp2 isb tlbi vmalle1 @@ -200,6 +201,7 @@ ENTRY(idmap_cpu_replace_ttbr1) __idmap_cpu_set_reserved_ttbr1 x1, x3 + offset_ttbr1 x0 msr ttbr1_el1, x0 isb @@ -254,6 +256,7 @@ ENTRY(idmap_kpti_install_ng_mappings) pte .req x16 mrs swapper_ttb, ttbr1_el1 + restore_ttbr1 swapper_ttb adr flag_ptr, __idmap_kpti_flag cbnz cpu, __idmap_kpti_secondary @@ -373,6 +376,7 @@ __idmap_kpti_secondary: cbnz w18, 1b /* All done, act like nothing happened */ + offset_ttbr1 swapper_ttb msr ttbr1_el1, swapper_ttb isb ret @@ -446,7 +450,15 @@ ENTRY(__cpu_setup) ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \ TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \ TCR_TBI0 | TCR_A1 - tcr_set_idmap_t0sz x10, x9 + +#ifdef CONFIG_ARM64_USER_VA_BITS_52 + ldr_l x9, vabits_user + sub x9, xzr, x9 + add x9, x9, #64 +#else + ldr_l x9, idmap_t0sz +#endif + tcr_set_t0sz x10, x9 /* * Set the IPS bits in TCR_EL1. diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index 89198017e8e6..1542df00b23c 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -134,10 +134,9 @@ static inline void emit_a64_mov_i64(const int reg, const u64 val, } /* - * This is an unoptimized 64 immediate emission used for BPF to BPF call - * addresses. It will always do a full 64 bit decomposition as otherwise - * more complexity in the last extra pass is required since we previously - * reserved 4 instructions for the address. + * Kernel addresses in the vmalloc space use at most 48 bits, and the + * remaining bits are guaranteed to be 0x1. So we can compose the address + * with a fixed length movn/movk/movk sequence. */ static inline void emit_addr_mov_i64(const int reg, const u64 val, struct jit_ctx *ctx) @@ -145,8 +144,8 @@ static inline void emit_addr_mov_i64(const int reg, const u64 val, u64 tmp = val; int shift = 0; - emit(A64_MOVZ(1, reg, tmp & 0xffff, shift), ctx); - for (;shift < 48;) { + emit(A64_MOVN(1, reg, ~tmp & 0xffff, shift), ctx); + while (shift < 32) { tmp >>= 16; shift += 16; emit(A64_MOVK(1, reg, tmp & 0xffff, shift), ctx); @@ -634,11 +633,7 @@ emit_cond_jmp: &func_addr, &func_addr_fixed); if (ret < 0) return ret; - if (func_addr_fixed) - /* We can use optimized emission here. */ - emit_a64_mov_i64(tmp, func_addr, ctx); - else - emit_addr_mov_i64(tmp, func_addr, ctx); + emit_addr_mov_i64(tmp, func_addr, ctx); emit(A64_BLR(tmp), ctx); emit(A64_MOV(1, r0, A64_R(0)), ctx); break; @@ -937,6 +932,7 @@ skip_init_ctx: prog->jited_len = image_size; if (!prog->is_func || extra_pass) { + bpf_prog_fill_jited_linfo(prog, ctx.offset); out_off: kfree(ctx.offset); kfree(jit_data); @@ -948,3 +944,16 @@ out: tmp : orig_prog); return prog; } + +void *bpf_jit_alloc_exec(unsigned long size) +{ + return __vmalloc_node_range(size, PAGE_SIZE, BPF_JIT_REGION_START, + BPF_JIT_REGION_END, GFP_KERNEL, + PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE, + __builtin_return_address(0)); +} + +void bpf_jit_free_exec(void *addr) +{ + return vfree(addr); +} diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig index 84420109113d..456e154674d1 100644 --- a/arch/c6x/Kconfig +++ b/arch/c6x/Kconfig @@ -9,7 +9,6 @@ config C6X select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE select CLKDEV_LOOKUP - select DMA_DIRECT_OPS select GENERIC_ATOMIC64 select GENERIC_IRQ_SHOW select HAVE_ARCH_TRACEHOOK diff --git a/arch/c6x/mm/dma-coherent.c b/arch/c6x/mm/dma-coherent.c index 01305c787201..75b79571732c 100644 --- a/arch/c6x/mm/dma-coherent.c +++ b/arch/c6x/mm/dma-coherent.c @@ -78,6 +78,7 @@ static void __free_dma_pages(u32 addr, int order) void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, unsigned long attrs) { + void *ret; u32 paddr; int order; @@ -94,7 +95,9 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, if (!paddr) return NULL; - return phys_to_virt(paddr); + ret = phys_to_virt(paddr); + memset(ret, 0, 1 << order); + return ret; } /* diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig index cb64f8dacd08..37bed8aadf95 100644 --- a/arch/csky/Kconfig +++ b/arch/csky/Kconfig @@ -7,8 +7,7 @@ config CSKY select COMMON_CLK select CLKSRC_MMIO select CLKSRC_OF - select DMA_DIRECT_OPS - select DMA_NONCOHERENT_OPS + select DMA_DIRECT_REMAP select IRQ_DOMAIN select HANDLE_DOMAIN_IRQ select DW_APB_TIMER_OF diff --git a/arch/csky/mm/dma-mapping.c b/arch/csky/mm/dma-mapping.c index 85437b21e045..80783bb71c5c 100644 --- a/arch/csky/mm/dma-mapping.c +++ b/arch/csky/mm/dma-mapping.c @@ -14,73 +14,13 @@ #include <linux/version.h> #include <asm/cache.h> -static struct gen_pool *atomic_pool; -static size_t atomic_pool_size __initdata = SZ_256K; - -static int __init early_coherent_pool(char *p) -{ - atomic_pool_size = memparse(p, &p); - return 0; -} -early_param("coherent_pool", early_coherent_pool); - static int __init atomic_pool_init(void) { - struct page *page; - size_t size = atomic_pool_size; - void *ptr; - int ret; - - atomic_pool = gen_pool_create(PAGE_SHIFT, -1); - if (!atomic_pool) - BUG(); - - page = alloc_pages(GFP_KERNEL | GFP_DMA, get_order(size)); - if (!page) - BUG(); - - ptr = dma_common_contiguous_remap(page, size, VM_ALLOC, - pgprot_noncached(PAGE_KERNEL), - __builtin_return_address(0)); - if (!ptr) - BUG(); - - ret = gen_pool_add_virt(atomic_pool, (unsigned long)ptr, - page_to_phys(page), atomic_pool_size, -1); - if (ret) - BUG(); - - gen_pool_set_algo(atomic_pool, gen_pool_first_fit_order_align, NULL); - - pr_info("DMA: preallocated %zu KiB pool for atomic coherent pool\n", - atomic_pool_size / 1024); - - pr_info("DMA: vaddr: 0x%x phy: 0x%lx,\n", (unsigned int)ptr, - page_to_phys(page)); - - return 0; + return dma_atomic_pool_init(GFP_KERNEL, pgprot_noncached(PAGE_KERNEL)); } postcore_initcall(atomic_pool_init); -static void *csky_dma_alloc_atomic(struct device *dev, size_t size, - dma_addr_t *dma_handle) -{ - unsigned long addr; - - addr = gen_pool_alloc(atomic_pool, size); - if (addr) - *dma_handle = gen_pool_virt_to_phys(atomic_pool, addr); - - return (void *)addr; -} - -static void csky_dma_free_atomic(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle, unsigned long attrs) -{ - gen_pool_free(atomic_pool, (unsigned long)vaddr, size); -} - -static void __dma_clear_buffer(struct page *page, size_t size) +void arch_dma_prep_coherent(struct page *page, size_t size) { if (PageHighMem(page)) { unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; @@ -107,84 +47,6 @@ static void __dma_clear_buffer(struct page *page, size_t size) } } -static void *csky_dma_alloc_nonatomic(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp, - unsigned long attrs) -{ - void *vaddr; - struct page *page; - unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; - - if (DMA_ATTR_NON_CONSISTENT & attrs) { - pr_err("csky %s can't support DMA_ATTR_NON_CONSISTENT.\n", __func__); - return NULL; - } - - if (IS_ENABLED(CONFIG_DMA_CMA)) - page = dma_alloc_from_contiguous(dev, count, get_order(size), - gfp); - else - page = alloc_pages(gfp, get_order(size)); - - if (!page) { - pr_err("csky %s no more free pages.\n", __func__); - return NULL; - } - - *dma_handle = page_to_phys(page); - - __dma_clear_buffer(page, size); - - if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) - return page; - - vaddr = dma_common_contiguous_remap(page, PAGE_ALIGN(size), VM_USERMAP, - pgprot_noncached(PAGE_KERNEL), __builtin_return_address(0)); - if (!vaddr) - BUG(); - - return vaddr; -} - -static void csky_dma_free_nonatomic( - struct device *dev, - size_t size, - void *vaddr, - dma_addr_t dma_handle, - unsigned long attrs - ) -{ - struct page *page = phys_to_page(dma_handle); - unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; - - if ((unsigned int)vaddr >= VMALLOC_START) - dma_common_free_remap(vaddr, size, VM_USERMAP); - - if (IS_ENABLED(CONFIG_DMA_CMA)) - dma_release_from_contiguous(dev, page, count); - else - __free_pages(page, get_order(size)); -} - -void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, - gfp_t gfp, unsigned long attrs) -{ - if (gfpflags_allow_blocking(gfp)) - return csky_dma_alloc_nonatomic(dev, size, dma_handle, gfp, - attrs); - else - return csky_dma_alloc_atomic(dev, size, dma_handle); -} - -void arch_dma_free(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle, unsigned long attrs) -{ - if (!addr_in_gen_pool(atomic_pool, (unsigned int) vaddr, size)) - csky_dma_free_nonatomic(dev, size, vaddr, dma_handle, attrs); - else - csky_dma_free_atomic(dev, size, vaddr, dma_handle, attrs); -} - static inline void cache_op(phys_addr_t paddr, size_t size, void (*fn)(unsigned long start, unsigned long end)) { diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig index d19c6b16cd5d..6472a0685470 100644 --- a/arch/h8300/Kconfig +++ b/arch/h8300/Kconfig @@ -22,7 +22,6 @@ config H8300 select HAVE_ARCH_KGDB select HAVE_ARCH_HASH select CPU_NO_EFFICIENT_FFS - select DMA_DIRECT_OPS config CPU_BIG_ENDIAN def_bool y diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig index 2b688af379e6..d71036c598de 100644 --- a/arch/hexagon/Kconfig +++ b/arch/hexagon/Kconfig @@ -31,7 +31,6 @@ config HEXAGON select GENERIC_CLOCKEVENTS_BROADCAST select MODULES_USE_ELF_RELA select GENERIC_CPU_DEVICES - select DMA_DIRECT_OPS ---help--- Qualcomm Hexagon is a processor architecture designed for high performance and low power across a wide variety of applications. diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 36773def6920..cbf6c67c7166 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -28,8 +28,8 @@ config IA64 select HAVE_ARCH_TRACEHOOK select HAVE_MEMBLOCK_NODE_MAP select HAVE_VIRT_CPU_ACCOUNTING - select ARCH_HAS_DMA_MARK_CLEAN - select ARCH_HAS_SG_CHAIN + select ARCH_HAS_DMA_COHERENT_TO_PFN if SWIOTLB + select ARCH_HAS_SYNC_DMA_FOR_CPU select VIRT_TO_BUS select ARCH_DISCARD_MEMBLOCK select GENERIC_IRQ_PROBE diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile index 45f59808b842..320d86f192ee 100644 --- a/arch/ia64/Makefile +++ b/arch/ia64/Makefile @@ -80,6 +80,9 @@ unwcheck: vmlinux archclean: $(Q)$(MAKE) $(clean)=$(boot) +archheaders: + $(Q)$(MAKE) $(build)=arch/ia64/kernel/syscalls all + CLEAN_FILES += vmlinux.gz bootloader boot: lib/lib.a vmlinux diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c index 58969039bed2..8840ed97712f 100644 --- a/arch/ia64/hp/common/hwsw_iommu.c +++ b/arch/ia64/hp/common/hwsw_iommu.c @@ -38,7 +38,7 @@ static inline int use_swiotlb(struct device *dev) const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev) { if (use_swiotlb(dev)) - return &swiotlb_dma_ops; + return NULL; return &sba_dma_ops; } EXPORT_SYMBOL(hwsw_dma_get_ops); diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index e8a93b07283e..5a361e51cb1e 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c @@ -907,11 +907,12 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) } /** - * sba_map_single_attrs - map one buffer and return IOVA for DMA + * sba_map_page - map one buffer and return IOVA for DMA * @dev: instance of PCI owned by the driver that's asking. - * @addr: driver buffer to map. - * @size: number of bytes to map in driver buffer. - * @dir: R/W or both. + * @page: page to map + * @poff: offset into page + * @size: number of bytes to map + * @dir: dma direction * @attrs: optional dma attributes * * See Documentation/DMA-API-HOWTO.txt @@ -944,7 +945,7 @@ static dma_addr_t sba_map_page(struct device *dev, struct page *page, ** Device is bit capable of DMA'ing to the buffer... ** just return the PCI address of ptr */ - DBG_BYPASS("sba_map_single_attrs() bypass mask/addr: " + DBG_BYPASS("sba_map_page() bypass mask/addr: " "0x%lx/0x%lx\n", to_pci_dev(dev)->dma_mask, pci_addr); return pci_addr; @@ -966,14 +967,14 @@ static dma_addr_t sba_map_page(struct device *dev, struct page *page, #ifdef ASSERT_PDIR_SANITY spin_lock_irqsave(&ioc->res_lock, flags); - if (sba_check_pdir(ioc,"Check before sba_map_single_attrs()")) + if (sba_check_pdir(ioc,"Check before sba_map_page()")) panic("Sanity check failed"); spin_unlock_irqrestore(&ioc->res_lock, flags); #endif pide = sba_alloc_range(ioc, dev, size); if (pide < 0) - return 0; + return DMA_MAPPING_ERROR; iovp = (dma_addr_t) pide << iovp_shift; @@ -997,20 +998,12 @@ static dma_addr_t sba_map_page(struct device *dev, struct page *page, /* form complete address */ #ifdef ASSERT_PDIR_SANITY spin_lock_irqsave(&ioc->res_lock, flags); - sba_check_pdir(ioc,"Check after sba_map_single_attrs()"); + sba_check_pdir(ioc,"Check after sba_map_page()"); spin_unlock_irqrestore(&ioc->res_lock, flags); #endif return SBA_IOVA(ioc, iovp, offset); } -static dma_addr_t sba_map_single_attrs(struct device *dev, void *addr, - size_t size, enum dma_data_direction dir, - unsigned long attrs) -{ - return sba_map_page(dev, virt_to_page(addr), - (unsigned long)addr & ~PAGE_MASK, size, dir, attrs); -} - #ifdef ENABLE_MARK_CLEAN static SBA_INLINE void sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size) @@ -1036,7 +1029,7 @@ sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size) #endif /** - * sba_unmap_single_attrs - unmap one IOVA and free resources + * sba_unmap_page - unmap one IOVA and free resources * @dev: instance of PCI owned by the driver that's asking. * @iova: IOVA of driver buffer previously mapped. * @size: number of bytes mapped in driver buffer. @@ -1063,7 +1056,7 @@ static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, /* ** Address does not fall w/in IOVA, must be bypassing */ - DBG_BYPASS("sba_unmap_single_attrs() bypass addr: 0x%lx\n", + DBG_BYPASS("sba_unmap_page() bypass addr: 0x%lx\n", iova); #ifdef ENABLE_MARK_CLEAN @@ -1114,12 +1107,6 @@ static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, #endif /* DELAYED_RESOURCE_CNT == 0 */ } -void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size, - enum dma_data_direction dir, unsigned long attrs) -{ - sba_unmap_page(dev, iova, size, dir, attrs); -} - /** * sba_alloc_coherent - allocate/map shared mem for DMA * @dev: instance of PCI owned by the driver that's asking. @@ -1132,30 +1119,24 @@ static void * sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs) { + struct page *page; struct ioc *ioc; + int node = -1; void *addr; ioc = GET_IOC(dev); ASSERT(ioc); - #ifdef CONFIG_NUMA - { - struct page *page; - - page = alloc_pages_node(ioc->node, flags, get_order(size)); - if (unlikely(!page)) - return NULL; - - addr = page_address(page); - } -#else - addr = (void *) __get_free_pages(flags, get_order(size)); + node = ioc->node; #endif - if (unlikely(!addr)) + + page = alloc_pages_node(node, flags, get_order(size)); + if (unlikely(!page)) return NULL; + addr = page_address(page); memset(addr, 0, size); - *dma_handle = virt_to_phys(addr); + *dma_handle = page_to_phys(page); #ifdef ALLOW_IOV_BYPASS ASSERT(dev->coherent_dma_mask); @@ -1174,9 +1155,10 @@ sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, * If device can't bypass or bypass is disabled, pass the 32bit fake * device to map single to get an iova mapping. */ - *dma_handle = sba_map_single_attrs(&ioc->sac_only_dev->dev, addr, - size, 0, 0); - + *dma_handle = sba_map_page(&ioc->sac_only_dev->dev, page, 0, size, + DMA_BIDIRECTIONAL, 0); + if (dma_mapping_error(dev, *dma_handle)) + return NULL; return addr; } @@ -1193,7 +1175,7 @@ sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, static void sba_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs) { - sba_unmap_single_attrs(dev, dma_handle, size, 0, 0); + sba_unmap_page(dev, dma_handle, size, 0, 0); free_pages((unsigned long) vaddr, get_order(size)); } @@ -1483,7 +1465,10 @@ static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, /* Fast path single entry scatterlists. */ if (nents == 1) { sglist->dma_length = sglist->length; - sglist->dma_address = sba_map_single_attrs(dev, sba_sg_address(sglist), sglist->length, dir, attrs); + sglist->dma_address = sba_map_page(dev, sg_page(sglist), + sglist->offset, sglist->length, dir, attrs); + if (dma_mapping_error(dev, sglist->dma_address)) + return 0; return 1; } @@ -1572,8 +1557,8 @@ static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, while (nents && sglist->dma_length) { - sba_unmap_single_attrs(dev, sglist->dma_address, - sglist->dma_length, dir, attrs); + sba_unmap_page(dev, sglist->dma_address, sglist->dma_length, + dir, attrs); sglist = sg_next(sglist); nents--; } @@ -2080,8 +2065,6 @@ static int __init acpi_sba_ioc_init_acpi(void) /* This has to run before acpi_scan_init(). */ arch_initcall(acpi_sba_ioc_init_acpi); -extern const struct dma_map_ops swiotlb_dma_ops; - static int __init sba_init(void) { @@ -2095,7 +2078,7 @@ sba_init(void) * a successful kdump kernel boot is to use the swiotlb. */ if (is_kdump_kernel()) { - dma_ops = &swiotlb_dma_ops; + dma_ops = NULL; if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0) panic("Unable to initialize software I/O TLB:" " Try machvec=dig boot option"); @@ -2117,7 +2100,7 @@ sba_init(void) * If we didn't find something sba_iommu can claim, we * need to setup the swiotlb and switch to the dig machvec. */ - dma_ops = &swiotlb_dma_ops; + dma_ops = NULL; if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0) panic("Unable to find SBA IOMMU or initialize " "software I/O TLB: Try machvec=dig boot option"); @@ -2170,11 +2153,6 @@ static int sba_dma_supported (struct device *dev, u64 mask) return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL); } -static int sba_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return 0; -} - __setup("nosbagart", nosbagart); static int __init @@ -2208,7 +2186,6 @@ const struct dma_map_ops sba_dma_ops = { .map_sg = sba_map_sg_attrs, .unmap_sg = sba_unmap_sg_attrs, .dma_supported = sba_dma_supported, - .mapping_error = sba_dma_mapping_error, }; void sba_dma_init(void) diff --git a/arch/ia64/hp/sim/simscsi.c b/arch/ia64/hp/sim/simscsi.c index 7e1426e76d96..f86844fc0725 100644 --- a/arch/ia64/hp/sim/simscsi.c +++ b/arch/ia64/hp/sim/simscsi.c @@ -347,7 +347,7 @@ static struct scsi_host_template driver_template = { .sg_tablesize = SG_ALL, .max_sectors = 1024, .cmd_per_lun = SIMSCSI_REQ_QUEUE_LEN, - .use_clustering = DISABLE_CLUSTERING, + .dma_boundary = PAGE_SIZE - 1, }; static int __init diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild index 557bbc8ba9f5..43e21fe3499c 100644 --- a/arch/ia64/include/asm/Kbuild +++ b/arch/ia64/include/asm/Kbuild @@ -1,3 +1,4 @@ +generated-y += syscall_table.h generic-y += compat.h generic-y += exec.h generic-y += irq_work.h diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h index 49e34db2529c..0b08ebd2dfde 100644 --- a/arch/ia64/include/asm/unistd.h +++ b/arch/ia64/include/asm/unistd.h @@ -10,9 +10,7 @@ #include <uapi/asm/unistd.h> - - -#define NR_syscalls 326 /* length of syscall table */ +#define NR_syscalls __NR_syscalls /* length of syscall table */ /* * The following defines stop scripts/checksyscalls.sh from complaining about diff --git a/arch/ia64/include/uapi/asm/Kbuild b/arch/ia64/include/uapi/asm/Kbuild index 3982e673e967..ccce0ea65e05 100644 --- a/arch/ia64/include/uapi/asm/Kbuild +++ b/arch/ia64/include/uapi/asm/Kbuild @@ -1,6 +1,7 @@ # UAPI Header export list include include/uapi/asm-generic/Kbuild.asm +generated-y += unistd_64.h generic-y += bpf_perf_event.h generic-y += ipcbuf.h generic-y += kvm_para.h diff --git a/arch/ia64/include/uapi/asm/unistd.h b/arch/ia64/include/uapi/asm/unistd.h index 5fe71d4a43de..b2513922dcb5 100644 --- a/arch/ia64/include/uapi/asm/unistd.h +++ b/arch/ia64/include/uapi/asm/unistd.h @@ -11,334 +11,10 @@ #include <asm/break.h> -#define __BREAK_SYSCALL __IA64_BREAK_SYSCALL +#define __BREAK_SYSCALL __IA64_BREAK_SYSCALL -#define __NR_ni_syscall 1024 -#define __NR_exit 1025 -#define __NR_read 1026 -#define __NR_write 1027 -#define __NR_open 1028 -#define __NR_close 1029 -#define __NR_creat 1030 -#define __NR_link 1031 -#define __NR_unlink 1032 -#define __NR_execve 1033 -#define __NR_chdir 1034 -#define __NR_fchdir 1035 -#define __NR_utimes 1036 -#define __NR_mknod 1037 -#define __NR_chmod 1038 -#define __NR_chown 1039 -#define __NR_lseek 1040 -#define __NR_getpid 1041 -#define __NR_getppid 1042 -#define __NR_mount 1043 -#define __NR_umount 1044 -#define __NR_setuid 1045 -#define __NR_getuid 1046 -#define __NR_geteuid 1047 -#define __NR_ptrace 1048 -#define __NR_access 1049 -#define __NR_sync 1050 -#define __NR_fsync 1051 -#define __NR_fdatasync 1052 -#define __NR_kill 1053 -#define __NR_rename 1054 -#define __NR_mkdir 1055 -#define __NR_rmdir 1056 -#define __NR_dup 1057 -#define __NR_pipe 1058 -#define __NR_times 1059 -#define __NR_brk 1060 -#define __NR_setgid 1061 -#define __NR_getgid 1062 -#define __NR_getegid 1063 -#define __NR_acct 1064 -#define __NR_ioctl 1065 -#define __NR_fcntl 1066 -#define __NR_umask 1067 -#define __NR_chroot 1068 -#define __NR_ustat 1069 -#define __NR_dup2 1070 -#define __NR_setreuid 1071 -#define __NR_setregid 1072 -#define __NR_getresuid 1073 -#define __NR_setresuid 1074 -#define __NR_getresgid 1075 -#define __NR_setresgid 1076 -#define __NR_getgroups 1077 -#define __NR_setgroups 1078 -#define __NR_getpgid 1079 -#define __NR_setpgid 1080 -#define __NR_setsid 1081 -#define __NR_getsid 1082 -#define __NR_sethostname 1083 -#define __NR_setrlimit 1084 -#define __NR_getrlimit 1085 -#define __NR_getrusage 1086 -#define __NR_gettimeofday 1087 -#define __NR_settimeofday 1088 -#define __NR_select 1089 -#define __NR_poll 1090 -#define __NR_symlink 1091 -#define __NR_readlink 1092 -#define __NR_uselib 1093 -#define __NR_swapon 1094 -#define __NR_swapoff 1095 -#define __NR_reboot 1096 -#define __NR_truncate 1097 -#define __NR_ftruncate 1098 -#define __NR_fchmod 1099 -#define __NR_fchown 1100 -#define __NR_getpriority 1101 -#define __NR_setpriority 1102 -#define __NR_statfs 1103 -#define __NR_fstatfs 1104 -#define __NR_gettid 1105 -#define __NR_semget 1106 -#define __NR_semop 1107 -#define __NR_semctl 1108 -#define __NR_msgget 1109 -#define __NR_msgsnd 1110 -#define __NR_msgrcv 1111 -#define __NR_msgctl 1112 -#define __NR_shmget 1113 -#define __NR_shmat 1114 -#define __NR_shmdt 1115 -#define __NR_shmctl 1116 -/* also known as klogctl() in GNU libc: */ -#define __NR_syslog 1117 -#define __NR_setitimer 1118 -#define __NR_getitimer 1119 -/* 1120 was __NR_old_stat */ -/* 1121 was __NR_old_lstat */ -/* 1122 was __NR_old_fstat */ -#define __NR_vhangup 1123 -#define __NR_lchown 1124 -#define __NR_remap_file_pages 1125 -#define __NR_wait4 1126 -#define __NR_sysinfo 1127 -#define __NR_clone 1128 -#define __NR_setdomainname 1129 -#define __NR_uname 1130 -#define __NR_adjtimex 1131 -/* 1132 was __NR_create_module */ -#define __NR_init_module 1133 -#define __NR_delete_module 1134 -/* 1135 was __NR_get_kernel_syms */ -/* 1136 was __NR_query_module */ -#define __NR_quotactl 1137 -#define __NR_bdflush 1138 -#define __NR_sysfs 1139 -#define __NR_personality 1140 -#define __NR_afs_syscall 1141 -#define __NR_setfsuid 1142 -#define __NR_setfsgid 1143 -#define __NR_getdents 1144 -#define __NR_flock 1145 -#define __NR_readv 1146 -#define __NR_writev 1147 -#define __NR_pread64 1148 -#define __NR_pwrite64 1149 -#define __NR__sysctl 1150 -#define __NR_mmap 1151 -#define __NR_munmap 1152 -#define __NR_mlock 1153 -#define __NR_mlockall 1154 -#define __NR_mprotect 1155 -#define __NR_mremap 1156 -#define __NR_msync 1157 -#define __NR_munlock 1158 -#define __NR_munlockall 1159 -#define __NR_sched_getparam 1160 -#define __NR_sched_setparam 1161 -#define __NR_sched_getscheduler 1162 -#define __NR_sched_setscheduler 1163 -#define __NR_sched_yield 1164 -#define __NR_sched_get_priority_max 1165 -#define __NR_sched_get_priority_min 1166 -#define __NR_sched_rr_get_interval 1167 -#define __NR_nanosleep 1168 -#define __NR_nfsservctl 1169 -#define __NR_prctl 1170 -/* 1171 is reserved for backwards compatibility with old __NR_getpagesize */ -#define __NR_mmap2 1172 -#define __NR_pciconfig_read 1173 -#define __NR_pciconfig_write 1174 -#define __NR_perfmonctl 1175 -#define __NR_sigaltstack 1176 -#define __NR_rt_sigaction 1177 -#define __NR_rt_sigpending 1178 -#define __NR_rt_sigprocmask 1179 -#define __NR_rt_sigqueueinfo 1180 -#define __NR_rt_sigreturn 1181 -#define __NR_rt_sigsuspend 1182 -#define __NR_rt_sigtimedwait 1183 -#define __NR_getcwd 1184 -#define __NR_capget 1185 -#define __NR_capset 1186 -#define __NR_sendfile 1187 -#define __NR_getpmsg 1188 -#define __NR_putpmsg 1189 -#define __NR_socket 1190 -#define __NR_bind 1191 -#define __NR_connect 1192 -#define __NR_listen 1193 -#define __NR_accept 1194 -#define __NR_getsockname 1195 -#define __NR_getpeername 1196 -#define __NR_socketpair 1197 -#define __NR_send 1198 -#define __NR_sendto 1199 -#define __NR_recv 1200 -#define __NR_recvfrom 1201 -#define __NR_shutdown 1202 -#define __NR_setsockopt 1203 -#define __NR_getsockopt 1204 -#define __NR_sendmsg 1205 -#define __NR_recvmsg 1206 -#define __NR_pivot_root 1207 -#define __NR_mincore 1208 -#define __NR_madvise 1209 -#define __NR_stat 1210 -#define __NR_lstat 1211 -#define __NR_fstat 1212 -#define __NR_clone2 1213 -#define __NR_getdents64 1214 -#define __NR_getunwind 1215 -#define __NR_readahead 1216 -#define __NR_setxattr 1217 -#define __NR_lsetxattr 1218 -#define __NR_fsetxattr 1219 -#define __NR_getxattr 1220 -#define __NR_lgetxattr 1221 -#define __NR_fgetxattr 1222 -#define __NR_listxattr 1223 -#define __NR_llistxattr 1224 -#define __NR_flistxattr 1225 -#define __NR_removexattr 1226 -#define __NR_lremovexattr 1227 -#define __NR_fremovexattr 1228 -#define __NR_tkill 1229 -#define __NR_futex 1230 -#define __NR_sched_setaffinity 1231 -#define __NR_sched_getaffinity 1232 -#define __NR_set_tid_address 1233 -#define __NR_fadvise64 1234 -#define __NR_tgkill 1235 -#define __NR_exit_group 1236 -#define __NR_lookup_dcookie 1237 -#define __NR_io_setup 1238 -#define __NR_io_destroy 1239 -#define __NR_io_getevents 1240 -#define __NR_io_submit 1241 -#define __NR_io_cancel 1242 -#define __NR_epoll_create 1243 -#define __NR_epoll_ctl 1244 -#define __NR_epoll_wait 1245 -#define __NR_restart_syscall 1246 -#define __NR_semtimedop 1247 -#define __NR_timer_create 1248 -#define __NR_timer_settime 1249 -#define __NR_timer_gettime 1250 -#define __NR_timer_getoverrun 1251 -#define __NR_timer_delete 1252 -#define __NR_clock_settime 1253 -#define __NR_clock_gettime 1254 -#define __NR_clock_getres 1255 -#define __NR_clock_nanosleep 1256 -#define __NR_fstatfs64 1257 -#define __NR_statfs64 1258 -#define __NR_mbind 1259 -#define __NR_get_mempolicy 1260 -#define __NR_set_mempolicy 1261 -#define __NR_mq_open 1262 -#define __NR_mq_unlink 1263 -#define __NR_mq_timedsend 1264 -#define __NR_mq_timedreceive 1265 -#define __NR_mq_notify 1266 -#define __NR_mq_getsetattr 1267 -#define __NR_kexec_load 1268 -#define __NR_vserver 1269 -#define __NR_waitid 1270 -#define __NR_add_key 1271 -#define __NR_request_key 1272 -#define __NR_keyctl 1273 -#define __NR_ioprio_set 1274 -#define __NR_ioprio_get 1275 -#define __NR_move_pages 1276 -#define __NR_inotify_init 1277 -#define __NR_inotify_add_watch 1278 -#define __NR_inotify_rm_watch 1279 -#define __NR_migrate_pages 1280 -#define __NR_openat 1281 -#define __NR_mkdirat 1282 -#define __NR_mknodat 1283 -#define __NR_fchownat 1284 -#define __NR_futimesat 1285 -#define __NR_newfstatat 1286 -#define __NR_unlinkat 1287 -#define __NR_renameat 1288 -#define __NR_linkat 1289 -#define __NR_symlinkat 1290 -#define __NR_readlinkat 1291 -#define __NR_fchmodat 1292 -#define __NR_faccessat 1293 -#define __NR_pselect6 1294 -#define __NR_ppoll 1295 -#define __NR_unshare 1296 -#define __NR_splice 1297 -#define __NR_set_robust_list 1298 -#define __NR_get_robust_list 1299 -#define __NR_sync_file_range 1300 -#define __NR_tee 1301 -#define __NR_vmsplice 1302 -#define __NR_fallocate 1303 -#define __NR_getcpu 1304 -#define __NR_epoll_pwait 1305 -#define __NR_utimensat 1306 -#define __NR_signalfd 1307 -#define __NR_timerfd 1308 -#define __NR_eventfd 1309 -#define __NR_timerfd_create 1310 -#define __NR_timerfd_settime 1311 -#define __NR_timerfd_gettime 1312 -#define __NR_signalfd4 1313 -#define __NR_eventfd2 1314 -#define __NR_epoll_create1 1315 -#define __NR_dup3 1316 -#define __NR_pipe2 1317 -#define __NR_inotify_init1 1318 -#define __NR_preadv 1319 -#define __NR_pwritev 1320 -#define __NR_rt_tgsigqueueinfo 1321 -#define __NR_recvmmsg 1322 -#define __NR_fanotify_init 1323 -#define __NR_fanotify_mark 1324 -#define __NR_prlimit64 1325 -#define __NR_name_to_handle_at 1326 -#define __NR_open_by_handle_at 1327 -#define __NR_clock_adjtime 1328 -#define __NR_syncfs 1329 -#define __NR_setns 1330 -#define __NR_sendmmsg 1331 -#define __NR_process_vm_readv 1332 -#define __NR_process_vm_writev 1333 -#define __NR_accept4 1334 -#define __NR_finit_module 1335 -#define __NR_sched_setattr 1336 -#define __NR_sched_getattr 1337 -#define __NR_renameat2 1338 -#define __NR_getrandom 1339 -#define __NR_memfd_create 1340 -#define __NR_bpf 1341 -#define __NR_execveat 1342 -#define __NR_userfaultfd 1343 -#define __NR_membarrier 1344 -#define __NR_kcmp 1345 -#define __NR_mlock2 1346 -#define __NR_copy_file_range 1347 -#define __NR_preadv2 1348 -#define __NR_pwritev2 1349 +#define __NR_Linux 1024 + +#include <asm/unistd_64.h> #endif /* _UAPI_ASM_IA64_UNISTD_H */ diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c index 7a471d8d67d4..ad7d9963de34 100644 --- a/arch/ia64/kernel/dma-mapping.c +++ b/arch/ia64/kernel/dma-mapping.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -#include <linux/dma-mapping.h> +#include <linux/dma-direct.h> #include <linux/swiotlb.h> #include <linux/export.h> @@ -16,9 +16,26 @@ const struct dma_map_ops *dma_get_ops(struct device *dev) EXPORT_SYMBOL(dma_get_ops); #ifdef CONFIG_SWIOTLB +void *arch_dma_alloc(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) +{ + return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs); +} + +void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t dma_addr, unsigned long attrs) +{ + dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs); +} + +long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr, + dma_addr_t dma_addr) +{ + return page_to_pfn(virt_to_page(cpu_addr)); +} + void __init swiotlb_dma_init(void) { - dma_ops = &swiotlb_dma_ops; swiotlb_init(1); } #endif diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index 68362b30ea47..a9992be5718b 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S @@ -1426,335 +1426,10 @@ END(ftrace_stub) #endif /* CONFIG_FUNCTION_TRACER */ +#define __SYSCALL(nr, entry, nargs) data8 entry .rodata .align 8 .globl sys_call_table sys_call_table: - data8 sys_ni_syscall // This must be sys_ni_syscall! See ivt.S. - data8 sys_exit // 1025 - data8 sys_read - data8 sys_write - data8 sys_open - data8 sys_close - data8 sys_creat // 1030 - data8 sys_link - data8 sys_unlink - data8 ia64_execve - data8 sys_chdir - data8 sys_fchdir // 1035 - data8 sys_utimes - data8 sys_mknod - data8 sys_chmod - data8 sys_chown - data8 sys_lseek // 1040 - data8 sys_getpid - data8 sys_getppid - data8 sys_mount - data8 sys_umount - data8 sys_setuid // 1045 - data8 sys_getuid - data8 sys_geteuid - data8 sys_ptrace - data8 sys_access - data8 sys_sync // 1050 - data8 sys_fsync - data8 sys_fdatasync - data8 sys_kill - data8 sys_rename - data8 sys_mkdir // 1055 - data8 sys_rmdir - data8 sys_dup - data8 sys_ia64_pipe - data8 sys_times - data8 ia64_brk // 1060 - data8 sys_setgid - data8 sys_getgid - data8 sys_getegid - data8 sys_acct - data8 sys_ioctl // 1065 - data8 sys_fcntl - data8 sys_umask - data8 sys_chroot - data8 sys_ustat - data8 sys_dup2 // 1070 - data8 sys_setreuid - data8 sys_setregid - data8 sys_getresuid - data8 sys_setresuid - data8 sys_getresgid // 1075 - data8 sys_setresgid - data8 sys_getgroups - data8 sys_setgroups - data8 sys_getpgid - data8 sys_setpgid // 1080 - data8 sys_setsid - data8 sys_getsid - data8 sys_sethostname - data8 sys_setrlimit - data8 sys_getrlimit // 1085 - data8 sys_getrusage - data8 sys_gettimeofday - data8 sys_settimeofday - data8 sys_select - data8 sys_poll // 1090 - data8 sys_symlink - data8 sys_readlink - data8 sys_uselib - data8 sys_swapon - data8 sys_swapoff // 1095 - data8 sys_reboot - data8 sys_truncate - data8 sys_ftruncate - data8 sys_fchmod - data8 sys_fchown // 1100 - data8 ia64_getpriority - data8 sys_setpriority - data8 sys_statfs - data8 sys_fstatfs - data8 sys_gettid // 1105 - data8 sys_semget - data8 sys_semop - data8 sys_semctl - data8 sys_msgget - data8 sys_msgsnd // 1110 - data8 sys_msgrcv - data8 sys_msgctl - data8 sys_shmget - data8 sys_shmat - data8 sys_shmdt // 1115 - data8 sys_shmctl - data8 sys_syslog - data8 sys_setitimer - data8 sys_getitimer - data8 sys_ni_syscall // 1120 /* was: ia64_oldstat */ - data8 sys_ni_syscall /* was: ia64_oldlstat */ - data8 sys_ni_syscall /* was: ia64_oldfstat */ - data8 sys_vhangup - data8 sys_lchown - data8 sys_remap_file_pages // 1125 - data8 sys_wait4 - data8 sys_sysinfo - data8 sys_clone - data8 sys_setdomainname - data8 sys_newuname // 1130 - data8 sys_adjtimex - data8 sys_ni_syscall /* was: ia64_create_module */ - data8 sys_init_module - data8 sys_delete_module - data8 sys_ni_syscall // 1135 /* was: sys_get_kernel_syms */ - data8 sys_ni_syscall /* was: sys_query_module */ - data8 sys_quotactl - data8 sys_bdflush - data8 sys_sysfs - data8 sys_personality // 1140 - data8 sys_ni_syscall // sys_afs_syscall - data8 sys_setfsuid - data8 sys_setfsgid - data8 sys_getdents - data8 sys_flock // 1145 - data8 sys_readv - data8 sys_writev - data8 sys_pread64 - data8 sys_pwrite64 - data8 sys_sysctl // 1150 - data8 sys_mmap - data8 sys_munmap - data8 sys_mlock - data8 sys_mlockall - data8 sys_mprotect // 1155 - data8 ia64_mremap - data8 sys_msync - data8 sys_munlock - data8 sys_munlockall - data8 sys_sched_getparam // 1160 - data8 sys_sched_setparam - data8 sys_sched_getscheduler - data8 sys_sched_setscheduler - data8 sys_sched_yield - data8 sys_sched_get_priority_max // 1165 - data8 sys_sched_get_priority_min - data8 sys_sched_rr_get_interval - data8 sys_nanosleep - data8 sys_ni_syscall // old nfsservctl - data8 sys_prctl // 1170 - data8 sys_getpagesize - data8 sys_mmap2 - data8 sys_pciconfig_read - data8 sys_pciconfig_write - data8 sys_perfmonctl // 1175 - data8 sys_sigaltstack - data8 sys_rt_sigaction - data8 sys_rt_sigpending - data8 sys_rt_sigprocmask - data8 sys_rt_sigqueueinfo // 1180 - data8 sys_rt_sigreturn - data8 sys_rt_sigsuspend - data8 sys_rt_sigtimedwait - data8 sys_getcwd - data8 sys_capget // 1185 - data8 sys_capset - data8 sys_sendfile64 - data8 sys_ni_syscall // sys_getpmsg (STREAMS) - data8 sys_ni_syscall // sys_putpmsg (STREAMS) - data8 sys_socket // 1190 - data8 sys_bind - data8 sys_connect - data8 sys_listen - data8 sys_accept - data8 sys_getsockname // 1195 - data8 sys_getpeername - data8 sys_socketpair - data8 sys_send - data8 sys_sendto - data8 sys_recv // 1200 - data8 sys_recvfrom - data8 sys_shutdown - data8 sys_setsockopt - data8 sys_getsockopt - data8 sys_sendmsg // 1205 - data8 sys_recvmsg - data8 sys_pivot_root - data8 sys_mincore - data8 sys_madvise - data8 sys_newstat // 1210 - data8 sys_newlstat - data8 sys_newfstat - data8 sys_clone2 - data8 sys_getdents64 - data8 sys_getunwind // 1215 - data8 sys_readahead - data8 sys_setxattr - data8 sys_lsetxattr - data8 sys_fsetxattr - data8 sys_getxattr // 1220 - data8 sys_lgetxattr - data8 sys_fgetxattr - data8 sys_listxattr - data8 sys_llistxattr - data8 sys_flistxattr // 1225 - data8 sys_removexattr - data8 sys_lremovexattr - data8 sys_fremovexattr - data8 sys_tkill - data8 sys_futex // 1230 - data8 sys_sched_setaffinity - data8 sys_sched_getaffinity - data8 sys_set_tid_address - data8 sys_fadvise64_64 - data8 sys_tgkill // 1235 - data8 sys_exit_group - data8 sys_lookup_dcookie - data8 sys_io_setup - data8 sys_io_destroy - data8 sys_io_getevents // 1240 - data8 sys_io_submit - data8 sys_io_cancel - data8 sys_epoll_create - data8 sys_epoll_ctl - data8 sys_epoll_wait // 1245 - data8 sys_restart_syscall - data8 sys_semtimedop - data8 sys_timer_create - data8 sys_timer_settime - data8 sys_timer_gettime // 1250 - data8 sys_timer_getoverrun - data8 sys_timer_delete - data8 sys_clock_settime - data8 sys_clock_gettime - data8 sys_clock_getres // 1255 - data8 sys_clock_nanosleep - data8 sys_fstatfs64 - data8 sys_statfs64 - data8 sys_mbind - data8 sys_get_mempolicy // 1260 - data8 sys_set_mempolicy - data8 sys_mq_open - data8 sys_mq_unlink - data8 sys_mq_timedsend - data8 sys_mq_timedreceive // 1265 - data8 sys_mq_notify - data8 sys_mq_getsetattr - data8 sys_kexec_load - data8 sys_ni_syscall // reserved for vserver - data8 sys_waitid // 1270 - data8 sys_add_key - data8 sys_request_key - data8 sys_keyctl - data8 sys_ioprio_set - data8 sys_ioprio_get // 1275 - data8 sys_move_pages - data8 sys_inotify_init - data8 sys_inotify_add_watch - data8 sys_inotify_rm_watch - data8 sys_migrate_pages // 1280 - data8 sys_openat - data8 sys_mkdirat - data8 sys_mknodat - data8 sys_fchownat - data8 sys_futimesat // 1285 - data8 sys_newfstatat - data8 sys_unlinkat - data8 sys_renameat - data8 sys_linkat - data8 sys_symlinkat // 1290 - data8 sys_readlinkat - data8 sys_fchmodat - data8 sys_faccessat - data8 sys_pselect6 - data8 sys_ppoll // 1295 - data8 sys_unshare - data8 sys_splice - data8 sys_set_robust_list - data8 sys_get_robust_list - data8 sys_sync_file_range // 1300 - data8 sys_tee - data8 sys_vmsplice - data8 sys_fallocate - data8 sys_getcpu - data8 sys_epoll_pwait // 1305 - data8 sys_utimensat - data8 sys_signalfd - data8 sys_ni_syscall - data8 sys_eventfd - data8 sys_timerfd_create // 1310 - data8 sys_timerfd_settime - data8 sys_timerfd_gettime - data8 sys_signalfd4 - data8 sys_eventfd2 - data8 sys_epoll_create1 // 1315 - data8 sys_dup3 - data8 sys_pipe2 - data8 sys_inotify_init1 - data8 sys_preadv - data8 sys_pwritev // 1320 - data8 sys_rt_tgsigqueueinfo - data8 sys_recvmmsg - data8 sys_fanotify_init - data8 sys_fanotify_mark - data8 sys_prlimit64 // 1325 - data8 sys_name_to_handle_at - data8 sys_open_by_handle_at - data8 sys_clock_adjtime - data8 sys_syncfs - data8 sys_setns // 1330 - data8 sys_sendmmsg - data8 sys_process_vm_readv - data8 sys_process_vm_writev - data8 sys_accept4 - data8 sys_finit_module // 1335 - data8 sys_sched_setattr - data8 sys_sched_getattr - data8 sys_renameat2 - data8 sys_getrandom - data8 sys_memfd_create // 1340 - data8 sys_bpf - data8 sys_execveat - data8 sys_userfaultfd - data8 sys_membarrier - data8 sys_kcmp // 1345 - data8 sys_mlock2 - data8 sys_copy_file_range - data8 sys_preadv2 - data8 sys_pwritev2 - - .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls +#include <asm/syscall_table.h> +#undef __SYSCALL diff --git a/arch/ia64/kernel/syscalls/Makefile b/arch/ia64/kernel/syscalls/Makefile new file mode 100644 index 000000000000..813a58cba39c --- /dev/null +++ b/arch/ia64/kernel/syscalls/Makefile @@ -0,0 +1,40 @@ +# SPDX-License-Identifier: GPL-2.0 +kapi := arch/$(SRCARCH)/include/generated/asm +uapi := arch/$(SRCARCH)/include/generated/uapi/asm + +_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \ + $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)') + +syscall := $(srctree)/$(src)/syscall.tbl +syshdr := $(srctree)/$(src)/syscallhdr.sh +systbl := $(srctree)/$(src)/syscalltbl.sh + +quiet_cmd_syshdr = SYSHDR $@ + cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \ + '$(syshdr_abis_$(basetarget))' \ + '$(syshdr_pfx_$(basetarget))' \ + '$(syshdr_offset_$(basetarget))' + +quiet_cmd_systbl = SYSTBL $@ + cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \ + '$(systbl_abis_$(basetarget))' \ + '$(systbl_abi_$(basetarget))' \ + '$(systbl_offset_$(basetarget))' + +syshdr_offset_unistd_64 := __NR_Linux +$(uapi)/unistd_64.h: $(syscall) $(syshdr) + $(call if_changed,syshdr) + +systbl_offset_syscall_table := 1024 +$(kapi)/syscall_table.h: $(syscall) $(systbl) + $(call if_changed,systbl) + +uapisyshdr-y += unistd_64.h +kapisyshdr-y += syscall_table.h + +targets += $(uapisyshdr-y) $(kapisyshdr-y) + +PHONY += all +all: $(addprefix $(uapi)/,$(uapisyshdr-y)) +all: $(addprefix $(kapi)/,$(kapisyshdr-y)) + @: diff --git a/arch/ia64/kernel/syscalls/syscall.tbl b/arch/ia64/kernel/syscalls/syscall.tbl new file mode 100644 index 000000000000..b22203b40bfe --- /dev/null +++ b/arch/ia64/kernel/syscalls/syscall.tbl @@ -0,0 +1,337 @@ +# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note +# +# Linux system call numbers and entry vectors for ia64 +# +# The format is: +# <number> <abi> <name> <entry point> +# +# Add 1024 to <number> will get the actual system call number +# +# The <abi> is always "common" for this file +# +0 common ni_syscall sys_ni_syscall +1 common exit sys_exit +2 common read sys_read +3 common write sys_write +4 common open sys_open +5 common close sys_close +6 common creat sys_creat +7 common link sys_link +8 common unlink sys_unlink +9 common execve ia64_execve +10 common chdir sys_chdir +11 common fchdir sys_fchdir +12 common utimes sys_utimes +13 common mknod sys_mknod +14 common chmod sys_chmod +15 common chown sys_chown +16 common lseek sys_lseek +17 common getpid sys_getpid +18 common getppid sys_getppid +19 common mount sys_mount +20 common umount sys_umount +21 common setuid sys_setuid +22 common getuid sys_getuid +23 common geteuid sys_geteuid +24 common ptrace sys_ptrace +25 common access sys_access +26 common sync sys_sync +27 common fsync sys_fsync +28 common fdatasync sys_fdatasync +29 common kill sys_kill +30 common rename sys_rename +31 common mkdir sys_mkdir +32 common rmdir sys_rmdir +33 common dup sys_dup +34 common pipe sys_ia64_pipe +35 common times sys_times +36 common brk ia64_brk +37 common setgid sys_setgid +38 common getgid sys_getgid +39 common getegid sys_getegid +40 common acct sys_acct +41 common ioctl sys_ioctl +42 common fcntl sys_fcntl +43 common umask sys_umask +44 common chroot sys_chroot +45 common ustat sys_ustat +46 common dup2 sys_dup2 +47 common setreuid sys_setreuid +48 common setregid sys_setregid +49 common getresuid sys_getresuid +50 common setresuid sys_setresuid +51 common getresgid sys_getresgid +52 common setresgid sys_setresgid +53 common getgroups sys_getgroups +54 common setgroups sys_setgroups +55 common getpgid sys_getpgid +56 common setpgid sys_setpgid +57 common setsid sys_setsid +58 common getsid sys_getsid +59 common sethostname sys_sethostname +60 common setrlimit sys_setrlimit +61 common getrlimit sys_getrlimit +62 common getrusage sys_getrusage +63 common gettimeofday sys_gettimeofday +64 common settimeofday sys_settimeofday +65 common select sys_select +66 common poll sys_poll +67 common symlink sys_symlink +68 common readlink sys_readlink +69 common uselib sys_uselib +70 common swapon sys_swapon +71 common swapoff sys_swapoff +72 common reboot sys_reboot +73 common truncate sys_truncate +74 common ftruncate sys_ftruncate +75 common fchmod sys_fchmod +76 common fchown sys_fchown +77 common getpriority ia64_getpriority +78 common setpriority sys_setpriority +79 common statfs sys_statfs +80 common fstatfs sys_fstatfs +81 common gettid sys_gettid +82 common semget sys_semget +83 common semop sys_semop +84 common semctl sys_semctl +85 common msgget sys_msgget +86 common msgsnd sys_msgsnd +87 common msgrcv sys_msgrcv +88 common msgctl sys_msgctl +89 common shmget sys_shmget +90 common shmat sys_shmat +91 common shmdt sys_shmdt +92 common shmctl sys_shmctl +93 common syslog sys_syslog +94 common setitimer sys_setitimer +95 common getitimer sys_getitimer +# 1120 was old_stat +# 1121 was old_lstat +# 1122 was old_fstat +99 common vhangup sys_vhangup +100 common lchown sys_lchown +101 common remap_file_pages sys_remap_file_pages +102 common wait4 sys_wait4 +103 common sysinfo sys_sysinfo +104 common clone sys_clone +105 common setdomainname sys_setdomainname +106 common uname sys_newuname +107 common adjtimex sys_adjtimex +# 1132 was create_module +109 common init_module sys_init_module +110 common delete_module sys_delete_module +# 1135 was get_kernel_syms +# 1136 was query_module +113 common quotactl sys_quotactl +114 common bdflush sys_bdflush +115 common sysfs sys_sysfs +116 common personality sys_personality +117 common afs_syscall sys_ni_syscall +118 common setfsuid sys_setfsuid +119 common setfsgid sys_setfsgid +120 common getdents sys_getdents +121 common flock sys_flock +122 common readv sys_readv +123 common writev sys_writev +124 common pread64 sys_pread64 +125 common pwrite64 sys_pwrite64 +126 common _sysctl sys_sysctl +127 common mmap sys_mmap +128 common munmap sys_munmap +129 common mlock sys_mlock +130 common mlockall sys_mlockall +131 common mprotect sys_mprotect +132 common mremap ia64_mremap +133 common msync sys_msync +134 common munlock sys_munlock +135 common munlockall sys_munlockall +136 common sched_getparam sys_sched_getparam +137 common sched_setparam sys_sched_setparam +138 common sched_getscheduler sys_sched_getscheduler +139 common sched_setscheduler sys_sched_setscheduler +140 common sched_yield sys_sched_yield +141 common sched_get_priority_max sys_sched_get_priority_max +142 common sched_get_priority_min sys_sched_get_priority_min +143 common sched_rr_get_interval sys_sched_rr_get_interval +144 common nanosleep sys_nanosleep +145 common nfsservctl sys_ni_syscall +146 common prctl sys_prctl +147 common old_getpagesize sys_getpagesize +148 common mmap2 sys_mmap2 +149 common pciconfig_read sys_pciconfig_read +150 common pciconfig_write sys_pciconfig_write +151 common perfmonctl sys_perfmonctl +152 common sigaltstack sys_sigaltstack +153 common rt_sigaction sys_rt_sigaction +154 common rt_sigpending sys_rt_sigpending +155 common rt_sigprocmask sys_rt_sigprocmask +156 common rt_sigqueueinfo sys_rt_sigqueueinfo +157 common rt_sigreturn sys_rt_sigreturn +158 common rt_sigsuspend sys_rt_sigsuspend +159 common rt_sigtimedwait sys_rt_sigtimedwait +160 common getcwd sys_getcwd +161 common capget sys_capget +162 common capset sys_capset +163 common sendfile sys_sendfile64 +164 common getpmsg sys_ni_syscall +165 common putpmsg sys_ni_syscall +166 common socket sys_socket +167 common bind sys_bind +168 common connect sys_connect +169 common listen sys_listen +170 common accept sys_accept +171 common getsockname sys_getsockname +172 common getpeername sys_getpeername +173 common socketpair sys_socketpair +174 common send sys_send +175 common sendto sys_sendto +176 common recv sys_recv +177 common recvfrom sys_recvfrom +178 common shutdown sys_shutdown +179 common setsockopt sys_setsockopt +180 common getsockopt sys_getsockopt +181 common sendmsg sys_sendmsg +182 common recvmsg sys_recvmsg +183 common pivot_root sys_pivot_root +184 common mincore sys_mincore +185 common madvise sys_madvise +186 common stat sys_newstat +187 common lstat sys_newlstat +188 common fstat sys_newfstat +189 common clone2 sys_clone2 +190 common getdents64 sys_getdents64 +191 common getunwind sys_getunwind +192 common readahead sys_readahead +193 common setxattr sys_setxattr +194 common lsetxattr sys_lsetxattr +195 common fsetxattr sys_fsetxattr +196 common getxattr sys_getxattr +197 common lgetxattr sys_lgetxattr +198 common fgetxattr sys_fgetxattr +199 common listxattr sys_listxattr +200 common llistxattr sys_llistxattr +201 common flistxattr sys_flistxattr +202 common removexattr sys_removexattr +203 common lremovexattr sys_lremovexattr +204 common fremovexattr sys_fremovexattr +205 common tkill sys_tkill +206 common futex sys_futex +207 common sched_setaffinity sys_sched_setaffinity +208 common sched_getaffinity sys_sched_getaffinity +209 common set_tid_address sys_set_tid_address +210 common fadvise64 sys_fadvise64_64 +211 common tgkill sys_tgkill +212 common exit_group sys_exit_group +213 common lookup_dcookie sys_lookup_dcookie +214 common io_setup sys_io_setup +215 common io_destroy sys_io_destroy +216 common io_getevents sys_io_getevents +217 common io_submit sys_io_submit +218 common io_cancel sys_io_cancel +219 common epoll_create sys_epoll_create +220 common epoll_ctl sys_epoll_ctl +221 common epoll_wait sys_epoll_wait +222 common restart_syscall sys_restart_syscall +223 common semtimedop sys_semtimedop +224 common timer_create sys_timer_create +225 common timer_settime sys_timer_settime +226 common timer_gettime sys_timer_gettime +227 common timer_getoverrun sys_timer_getoverrun +228 common timer_delete sys_timer_delete +229 common clock_settime sys_clock_settime +230 common clock_gettime sys_clock_gettime +231 common clock_getres sys_clock_getres +232 common clock_nanosleep sys_clock_nanosleep +233 common fstatfs64 sys_fstatfs64 +234 common statfs64 sys_statfs64 +235 common mbind sys_mbind +236 common get_mempolicy sys_get_mempolicy +237 common set_mempolicy sys_set_mempolicy +238 common mq_open sys_mq_open +239 common mq_unlink sys_mq_unlink +240 common mq_timedsend sys_mq_timedsend +241 common mq_timedreceive sys_mq_timedreceive +242 common mq_notify sys_mq_notify +243 common mq_getsetattr sys_mq_getsetattr +244 common kexec_load sys_kexec_load +245 common vserver sys_ni_syscall +246 common waitid sys_waitid +247 common add_key sys_add_key +248 common request_key sys_request_key +249 common keyctl sys_keyctl +250 common ioprio_set sys_ioprio_set +251 common ioprio_get sys_ioprio_get +252 common move_pages sys_move_pages +253 common inotify_init sys_inotify_init +254 common inotify_add_watch sys_inotify_add_watch +255 common inotify_rm_watch sys_inotify_rm_watch +256 common migrate_pages sys_migrate_pages +257 common openat sys_openat +258 common mkdirat sys_mkdirat +259 common mknodat sys_mknodat +260 common fchownat sys_fchownat +261 common futimesat sys_futimesat +262 common newfstatat sys_newfstatat +263 common unlinkat sys_unlinkat +264 common renameat sys_renameat +265 common linkat sys_linkat +266 common symlinkat sys_symlinkat +267 common readlinkat sys_readlinkat +268 common fchmodat sys_fchmodat +269 common faccessat sys_faccessat +270 common pselect6 sys_pselect6 +271 common ppoll sys_ppoll +272 common unshare sys_unshare +273 common splice sys_splice +274 common set_robust_list sys_set_robust_list +275 common get_robust_list sys_get_robust_list +276 common sync_file_range sys_sync_file_range +277 common tee sys_tee +278 common vmsplice sys_vmsplice +279 common fallocate sys_fallocate +280 common getcpu sys_getcpu +281 common epoll_pwait sys_epoll_pwait +282 common utimensat sys_utimensat +283 common signalfd sys_signalfd +284 common timerfd sys_ni_syscall +285 common eventfd sys_eventfd +286 common timerfd_create sys_timerfd_create +287 common timerfd_settime sys_timerfd_settime +288 common timerfd_gettime sys_timerfd_gettime +289 common signalfd4 sys_signalfd4 +290 common eventfd2 sys_eventfd2 +291 common epoll_create1 sys_epoll_create1 +292 common dup3 sys_dup3 +293 common pipe2 sys_pipe2 +294 common inotify_init1 sys_inotify_init1 +295 common preadv sys_preadv +296 common pwritev sys_pwritev +297 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo +298 common recvmmsg sys_recvmmsg +299 common fanotify_init sys_fanotify_init +300 common fanotify_mark sys_fanotify_mark +301 common prlimit64 sys_prlimit64 +302 common name_to_handle_at sys_name_to_handle_at +303 common open_by_handle_at sys_open_by_handle_at +304 common clock_adjtime sys_clock_adjtime +305 common syncfs sys_syncfs +306 common setns sys_setns +307 common sendmmsg sys_sendmmsg +308 common process_vm_readv sys_process_vm_readv +309 common process_vm_writev sys_process_vm_writev +310 common accept4 sys_accept4 +311 common finit_module sys_finit_module +312 common sched_setattr sys_sched_setattr +313 common sched_getattr sys_sched_getattr +314 common renameat2 sys_renameat2 +315 common getrandom sys_getrandom +316 common memfd_create sys_memfd_create +317 common bpf sys_bpf +318 common execveat sys_execveat +319 common userfaultfd sys_userfaultfd +320 common membarrier sys_membarrier +321 common kcmp sys_kcmp +322 common mlock2 sys_mlock2 +323 common copy_file_range sys_copy_file_range +324 common preadv2 sys_preadv2 +325 common pwritev2 sys_pwritev2 diff --git a/arch/ia64/kernel/syscalls/syscallhdr.sh b/arch/ia64/kernel/syscalls/syscallhdr.sh new file mode 100644 index 000000000000..0c2d2c748565 --- /dev/null +++ b/arch/ia64/kernel/syscalls/syscallhdr.sh @@ -0,0 +1,36 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 + +in="$1" +out="$2" +my_abis=`echo "($3)" | tr ',' '|'` +prefix="$4" +offset="$5" + +fileguard=_UAPI_ASM_IA64_`basename "$out" | sed \ + -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \ + -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'` +grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( + printf "#ifndef %s\n" "${fileguard}" + printf "#define %s\n" "${fileguard}" + printf "\n" + + nxt=0 + while read nr abi name entry ; do + if [ -z "$offset" ]; then + printf "#define __NR_%s%s\t%s\n" \ + "${prefix}" "${name}" "${nr}" + else + printf "#define __NR_%s%s\t(%s + %s)\n" \ + "${prefix}" "${name}" "${offset}" "${nr}" + fi + nxt=$((nr+1)) + done + + printf "\n" + printf "#ifdef __KERNEL__\n" + printf "#define __NR_syscalls\t%s\n" "${nxt}" + printf "#endif\n" + printf "\n" + printf "#endif /* %s */" "${fileguard}" +) > "$out" diff --git a/arch/ia64/kernel/syscalls/syscalltbl.sh b/arch/ia64/kernel/syscalls/syscalltbl.sh new file mode 100644 index 000000000000..85d78d9309ad --- /dev/null +++ b/arch/ia64/kernel/syscalls/syscalltbl.sh @@ -0,0 +1,32 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 + +in="$1" +out="$2" +my_abis=`echo "($3)" | tr ',' '|'` +my_abi="$4" +offset="$5" + +emit() { + t_nxt="$1" + t_nr="$2" + t_entry="$3" + + while [ $t_nxt -lt $t_nr ]; do + printf "__SYSCALL(%s, sys_ni_syscall, )\n" "${t_nxt}" + t_nxt=$((t_nxt+1)) + done + printf "__SYSCALL(%s, %s, )\n" "${t_nxt}" "${t_entry}" +} + +grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( + nxt=0 + if [ -z "$offset" ]; then + offset=0 + fi + + while read nr abi name entry ; do + emit $((nxt+offset)) $((nr+offset)) $entry + nxt=$((nr+1)) + done +) > "$out" diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index d5e12ff1d73c..0cf43bb13d6e 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -8,6 +8,7 @@ #include <linux/kernel.h> #include <linux/init.h> +#include <linux/dma-noncoherent.h> #include <linux/efi.h> #include <linux/elf.h> #include <linux/memblock.h> @@ -71,18 +72,14 @@ __ia64_sync_icache_dcache (pte_t pte) * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to * flush them when they get mapped into an executable vm-area. */ -void -dma_mark_clean(void *addr, size_t size) +void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, + size_t size, enum dma_data_direction dir) { - unsigned long pg_addr, end; - - pg_addr = PAGE_ALIGN((unsigned long) addr); - end = (unsigned long) addr + size; - while (pg_addr + PAGE_SIZE <= end) { - struct page *page = virt_to_page(pg_addr); - set_bit(PG_arch_1, &page->flags); - pg_addr += PAGE_SIZE; - } + unsigned long pfn = PHYS_PFN(paddr); + + do { + set_bit(PG_arch_1, &pfn_to_page(pfn)->flags); + } while (++pfn <= PHYS_PFN(paddr + size - 1)); } inline void diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c index 4ce4ee4ef9f2..b7d42e4edc1f 100644 --- a/arch/ia64/sn/pci/pci_dma.c +++ b/arch/ia64/sn/pci/pci_dma.c @@ -196,7 +196,7 @@ static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page, if (!dma_addr) { printk(KERN_ERR "%s: out of ATEs\n", __func__); - return 0; + return DMA_MAPPING_ERROR; } return dma_addr; } @@ -314,11 +314,6 @@ static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, return nhwentries; } -static int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return 0; -} - static u64 sn_dma_get_required_mask(struct device *dev) { return DMA_BIT_MASK(64); @@ -441,7 +436,6 @@ static struct dma_map_ops sn_dma_ops = { .unmap_page = sn_dma_unmap_page, .map_sg = sn_dma_map_sg, .unmap_sg = sn_dma_unmap_sg, - .mapping_error = sn_dma_mapping_error, .dma_supported = sn_dma_supported, .get_required_mask = sn_dma_get_required_mask, }; diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index 1bc9f1ba759a..8a5868e9a3a0 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig @@ -26,7 +26,6 @@ config M68K select MODULES_USE_ELF_RELA select OLD_SIGSUSPEND3 select OLD_SIGACTION - select DMA_DIRECT_OPS if HAS_DMA select ARCH_DISCARD_MEMBLOCK config CPU_BIG_ENDIAN diff --git a/arch/m68k/Kconfig.machine b/arch/m68k/Kconfig.machine index 64a641467736..328ba83d735b 100644 --- a/arch/m68k/Kconfig.machine +++ b/arch/m68k/Kconfig.machine @@ -317,7 +317,6 @@ config UBOOT help If you say Y here kernel will try to collect command line parameters from the initial u-boot stack. - default n config 4KSTACKS bool "Use 4Kb for kernel stacks instead of 8Kb" @@ -395,7 +394,6 @@ comment "ROM configuration" config ROM bool "Specify ROM linker regions" - default n help Define a ROM region for the linker script. This creates a kernel that can be stored in flash, with possibly the text, and data diff --git a/arch/m68k/Makefile b/arch/m68k/Makefile index 997c9f20ea0f..f00ca53f8c14 100644 --- a/arch/m68k/Makefile +++ b/arch/m68k/Makefile @@ -154,5 +154,8 @@ endif archclean: rm -f vmlinux.gz vmlinux.bz2 +archheaders: + $(Q)$(MAKE) $(build)=arch/m68k/kernel/syscalls all + install: sh $(srctree)/arch/m68k/install.sh $(KERNELRELEASE) vmlinux.gz System.map "$(INSTALL_PATH)" diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig index 85904b73e261..bfd4648e76e3 100644 --- a/arch/m68k/configs/amiga_defconfig +++ b/arch/m68k/configs/amiga_defconfig @@ -12,6 +12,20 @@ CONFIG_BLK_DEV_INITRD=y CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_USERFAULTFD=y CONFIG_SLAB=y +CONFIG_KEXEC=y +CONFIG_BOOTINFO_PROC=y +CONFIG_M68020=y +CONFIG_M68030=y +CONFIG_M68040=y +CONFIG_M68060=y +CONFIG_AMIGA=y +CONFIG_ZORRO=y +CONFIG_AMIGA_PCMCIA=y +CONFIG_ZORRO_NAMES=y +CONFIG_HEARTBEAT=y +CONFIG_PROC_HARDWARE=y +CONFIG_AMIGA_BUILTIN_SERIAL=y +CONFIG_SERIAL_CONSOLE=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_PARTITION_ADVANCED=y @@ -28,22 +42,12 @@ CONFIG_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_KYBER=m CONFIG_IOSCHED_BFQ=m -CONFIG_KEXEC=y -CONFIG_BOOTINFO_PROC=y -CONFIG_M68020=y -CONFIG_M68030=y -CONFIG_M68040=y -CONFIG_M68060=y -CONFIG_AMIGA=y -CONFIG_ZORRO=y -CONFIG_AMIGA_PCMCIA=y -CONFIG_ZORRO_NAMES=y -# CONFIG_COMPACTION is not set -CONFIG_CLEANCACHE=y -CONFIG_ZPOOL=m # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_AOUT=m CONFIG_BINFMT_MISC=m +# CONFIG_COMPACTION is not set +CONFIG_CLEANCACHE=y +CONFIG_ZPOOL=m CONFIG_NET=y CONFIG_PACKET=y CONFIG_PACKET_DIAG=m @@ -112,6 +116,7 @@ CONFIG_NFT_LIMIT=m CONFIG_NFT_MASQ=m CONFIG_NFT_REDIR=m CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m CONFIG_NFT_OBJREF=m CONFIG_NFT_QUEUE=m CONFIG_NFT_QUOTA=m @@ -119,7 +124,10 @@ CONFIG_NFT_REJECT=m CONFIG_NFT_COMPAT=m CONFIG_NFT_HASH=m CONFIG_NFT_FIB_INET=m +CONFIG_NFT_XFRM=m CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m CONFIG_NFT_DUP_NETDEV=m CONFIG_NFT_FWD_NETDEV=m CONFIG_NFT_FIB_NETDEV=m @@ -197,7 +205,6 @@ CONFIG_IP_SET_HASH_NETNET=m CONFIG_IP_SET_HASH_NETPORT=m CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m -CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m CONFIG_NFT_DUP_IPV4=m CONFIG_NFT_FIB_IPV4=m @@ -227,7 +234,6 @@ CONFIG_IP_NF_RAW=m CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m -CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m @@ -297,7 +303,6 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m # CONFIG_BATMAN_ADV_BATMAN_V is not set -CONFIG_BATMAN_ADV_DAT=y CONFIG_BATMAN_ADV_NC=y CONFIG_BATMAN_ADV_MCAST=y CONFIG_NETLINK_DIAG=m @@ -337,6 +342,7 @@ CONFIG_BLK_DEV_GAYLE=y CONFIG_BLK_DEV_BUDDHA=y CONFIG_RAID_ATTRS=m CONFIG_SCSI=y +# CONFIG_SCSI_MQ_DEFAULT is not set CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m @@ -400,8 +406,10 @@ CONFIG_A2065=y CONFIG_ARIADNE=y # CONFIG_NET_VENDOR_AQUANTIA is not set # CONFIG_NET_VENDOR_ARC is not set +# CONFIG_NET_VENDOR_AURORA is not set # CONFIG_NET_VENDOR_BROADCOM is not set -# CONFIG_NET_CADENCE is not set +# CONFIG_NET_VENDOR_CADENCE is not set +# CONFIG_NET_VENDOR_CAVIUM is not set # CONFIG_NET_VENDOR_CIRRUS is not set # CONFIG_NET_VENDOR_CORTINA is not set # CONFIG_NET_VENDOR_EZCHIP is not set @@ -487,10 +495,6 @@ CONFIG_RTC_DRV_RP5C01=m # CONFIG_VIRTIO_MENU is not set # CONFIG_IOMMU_SUPPORT is not set CONFIG_DAX=m -CONFIG_HEARTBEAT=y -CONFIG_PROC_HARDWARE=y -CONFIG_AMIGA_BUILTIN_SERIAL=y -CONFIG_SERIAL_CONSOLE=y CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m @@ -588,31 +592,6 @@ CONFIG_NLS_MAC_INUIT=m CONFIG_NLS_MAC_ROMANIAN=m CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m -# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set -CONFIG_MAGIC_SYSRQ=y -CONFIG_WW_MUTEX_SELFTEST=m -CONFIG_TEST_LIST_SORT=m -CONFIG_TEST_SORT=m -CONFIG_ATOMIC64_SELFTEST=m -CONFIG_ASYNC_RAID6_TEST=m -CONFIG_TEST_HEXDUMP=m -CONFIG_TEST_STRING_HELPERS=m -CONFIG_TEST_KSTRTOX=m -CONFIG_TEST_PRINTF=m -CONFIG_TEST_BITMAP=m -CONFIG_TEST_UUID=m -CONFIG_TEST_OVERFLOW=m -CONFIG_TEST_RHASHTABLE=m -CONFIG_TEST_HASH=m -CONFIG_TEST_USER_COPY=m -CONFIG_TEST_BPF=m -CONFIG_FIND_BIT_BENCHMARK=m -CONFIG_TEST_FIRMWARE=m -CONFIG_TEST_SYSCTL=m -CONFIG_TEST_UDELAY=m -CONFIG_TEST_STATIC_KEYS=m -CONFIG_TEST_KMOD=m -CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_HARDENED_USERCOPY=y CONFIG_CRYPTO_RSA=m @@ -630,6 +609,7 @@ CONFIG_CRYPTO_MORUS640=m CONFIG_CRYPTO_MORUS1280=m CONFIG_CRYPTO_CFB=m CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_OFB=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m CONFIG_CRYPTO_XCBC=m @@ -639,7 +619,6 @@ CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD160=m CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m -CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_SM3=m CONFIG_CRYPTO_TGR192=m @@ -672,5 +651,35 @@ CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set CONFIG_CRC32_SELFTEST=m +CONFIG_CRC64=m CONFIG_XZ_DEC_TEST=m CONFIG_STRING_SELFTEST=m +# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_WW_MUTEX_SELFTEST=m +CONFIG_TEST_LIST_SORT=m +CONFIG_TEST_SORT=m +CONFIG_ATOMIC64_SELFTEST=m +CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_HEXDUMP=m +CONFIG_TEST_STRING_HELPERS=m +CONFIG_TEST_KSTRTOX=m +CONFIG_TEST_PRINTF=m +CONFIG_TEST_BITMAP=m +CONFIG_TEST_BITFIELD=m +CONFIG_TEST_UUID=m +CONFIG_TEST_XARRAY=m +CONFIG_TEST_OVERFLOW=m +CONFIG_TEST_RHASHTABLE=m +CONFIG_TEST_HASH=m +CONFIG_TEST_IDA=m +CONFIG_TEST_USER_COPY=m +CONFIG_TEST_BPF=m +CONFIG_FIND_BIT_BENCHMARK=m +CONFIG_TEST_FIRMWARE=m +CONFIG_TEST_SYSCTL=m +CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m +CONFIG_TEST_KMOD=m +CONFIG_TEST_MEMCAT_P=m +CONFIG_EARLY_PRINTK=y diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig index 9b3818bbb68b..81112af1e478 100644 --- a/arch/m68k/configs/apollo_defconfig +++ b/arch/m68k/configs/apollo_defconfig @@ -12,6 +12,15 @@ CONFIG_BLK_DEV_INITRD=y CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_USERFAULTFD=y CONFIG_SLAB=y +CONFIG_KEXEC=y +CONFIG_BOOTINFO_PROC=y +CONFIG_M68020=y +CONFIG_M68030=y +CONFIG_M68040=y +CONFIG_M68060=y +CONFIG_APOLLO=y +CONFIG_HEARTBEAT=y +CONFIG_PROC_HARDWARE=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_PARTITION_ADVANCED=y @@ -29,19 +38,12 @@ CONFIG_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_KYBER=m CONFIG_IOSCHED_BFQ=m -CONFIG_KEXEC=y -CONFIG_BOOTINFO_PROC=y -CONFIG_M68020=y -CONFIG_M68030=y -CONFIG_M68040=y -CONFIG_M68060=y -CONFIG_APOLLO=y -# CONFIG_COMPACTION is not set -CONFIG_CLEANCACHE=y -CONFIG_ZPOOL=m # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_AOUT=m CONFIG_BINFMT_MISC=m +# CONFIG_COMPACTION is not set +CONFIG_CLEANCACHE=y +CONFIG_ZPOOL=m CONFIG_NET=y CONFIG_PACKET=y CONFIG_PACKET_DIAG=m @@ -110,6 +112,7 @@ CONFIG_NFT_LIMIT=m CONFIG_NFT_MASQ=m CONFIG_NFT_REDIR=m CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m CONFIG_NFT_OBJREF=m CONFIG_NFT_QUEUE=m CONFIG_NFT_QUOTA=m @@ -117,7 +120,10 @@ CONFIG_NFT_REJECT=m CONFIG_NFT_COMPAT=m CONFIG_NFT_HASH=m CONFIG_NFT_FIB_INET=m +CONFIG_NFT_XFRM=m CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m CONFIG_NFT_DUP_NETDEV=m CONFIG_NFT_FWD_NETDEV=m CONFIG_NFT_FIB_NETDEV=m @@ -195,7 +201,6 @@ CONFIG_IP_SET_HASH_NETNET=m CONFIG_IP_SET_HASH_NETPORT=m CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m -CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m CONFIG_NFT_DUP_IPV4=m CONFIG_NFT_FIB_IPV4=m @@ -225,7 +230,6 @@ CONFIG_IP_NF_RAW=m CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m -CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m @@ -295,7 +299,6 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m # CONFIG_BATMAN_ADV_BATMAN_V is not set -CONFIG_BATMAN_ADV_DAT=y CONFIG_BATMAN_ADV_NC=y CONFIG_BATMAN_ADV_MCAST=y CONFIG_NETLINK_DIAG=m @@ -324,6 +327,7 @@ CONFIG_ATA_OVER_ETH=m CONFIG_DUMMY_IRQ=m CONFIG_RAID_ATTRS=m CONFIG_SCSI=y +# CONFIG_SCSI_MQ_DEFAULT is not set CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m @@ -378,8 +382,10 @@ CONFIG_VETH=m # CONFIG_NET_VENDOR_AMAZON is not set # CONFIG_NET_VENDOR_AQUANTIA is not set # CONFIG_NET_VENDOR_ARC is not set +# CONFIG_NET_VENDOR_AURORA is not set # CONFIG_NET_VENDOR_BROADCOM is not set -# CONFIG_NET_CADENCE is not set +# CONFIG_NET_VENDOR_CADENCE is not set +# CONFIG_NET_VENDOR_CAVIUM is not set # CONFIG_NET_VENDOR_CORTINA is not set # CONFIG_NET_VENDOR_EZCHIP is not set # CONFIG_NET_VENDOR_HUAWEI is not set @@ -446,8 +452,6 @@ CONFIG_RTC_DRV_GENERIC=m # CONFIG_VIRTIO_MENU is not set # CONFIG_IOMMU_SUPPORT is not set CONFIG_DAX=m -CONFIG_HEARTBEAT=y -CONFIG_PROC_HARDWARE=y CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m @@ -545,31 +549,6 @@ CONFIG_NLS_MAC_INUIT=m CONFIG_NLS_MAC_ROMANIAN=m CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m -# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set -CONFIG_MAGIC_SYSRQ=y -CONFIG_WW_MUTEX_SELFTEST=m -CONFIG_TEST_LIST_SORT=m -CONFIG_TEST_SORT=m -CONFIG_ATOMIC64_SELFTEST=m -CONFIG_ASYNC_RAID6_TEST=m -CONFIG_TEST_HEXDUMP=m -CONFIG_TEST_STRING_HELPERS=m -CONFIG_TEST_KSTRTOX=m -CONFIG_TEST_PRINTF=m -CONFIG_TEST_BITMAP=m -CONFIG_TEST_UUID=m -CONFIG_TEST_OVERFLOW=m -CONFIG_TEST_RHASHTABLE=m -CONFIG_TEST_HASH=m -CONFIG_TEST_USER_COPY=m -CONFIG_TEST_BPF=m -CONFIG_FIND_BIT_BENCHMARK=m -CONFIG_TEST_FIRMWARE=m -CONFIG_TEST_SYSCTL=m -CONFIG_TEST_UDELAY=m -CONFIG_TEST_STATIC_KEYS=m -CONFIG_TEST_KMOD=m -CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_HARDENED_USERCOPY=y CONFIG_CRYPTO_RSA=m @@ -587,6 +566,7 @@ CONFIG_CRYPTO_MORUS640=m CONFIG_CRYPTO_MORUS1280=m CONFIG_CRYPTO_CFB=m CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_OFB=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m CONFIG_CRYPTO_XCBC=m @@ -596,7 +576,6 @@ CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD160=m CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m -CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_SM3=m CONFIG_CRYPTO_TGR192=m @@ -629,5 +608,35 @@ CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set CONFIG_CRC32_SELFTEST=m +CONFIG_CRC64=m CONFIG_XZ_DEC_TEST=m CONFIG_STRING_SELFTEST=m +# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_WW_MUTEX_SELFTEST=m +CONFIG_TEST_LIST_SORT=m +CONFIG_TEST_SORT=m +CONFIG_ATOMIC64_SELFTEST=m +CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_HEXDUMP=m +CONFIG_TEST_STRING_HELPERS=m +CONFIG_TEST_KSTRTOX=m +CONFIG_TEST_PRINTF=m +CONFIG_TEST_BITMAP=m +CONFIG_TEST_BITFIELD=m +CONFIG_TEST_UUID=m +CONFIG_TEST_XARRAY=m +CONFIG_TEST_OVERFLOW=m +CONFIG_TEST_RHASHTABLE=m +CONFIG_TEST_HASH=m +CONFIG_TEST_IDA=m +CONFIG_TEST_USER_COPY=m +CONFIG_TEST_BPF=m +CONFIG_FIND_BIT_BENCHMARK=m +CONFIG_TEST_FIRMWARE=m +CONFIG_TEST_SYSCTL=m +CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m +CONFIG_TEST_KMOD=m +CONFIG_TEST_MEMCAT_P=m +CONFIG_EARLY_PRINTK=y diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig index 769677809945..6d4b6023a2f0 100644 --- a/arch/m68k/configs/atari_defconfig +++ b/arch/m68k/configs/atari_defconfig @@ -12,6 +12,23 @@ CONFIG_BLK_DEV_INITRD=y CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_USERFAULTFD=y CONFIG_SLAB=y +CONFIG_KEXEC=y +CONFIG_BOOTINFO_PROC=y +CONFIG_M68020=y +CONFIG_M68030=y +CONFIG_M68040=y +CONFIG_M68060=y +CONFIG_ATARI=y +CONFIG_ATARI_ROM_ISA=y +CONFIG_HEARTBEAT=y +CONFIG_PROC_HARDWARE=y +CONFIG_NATFEAT=y +CONFIG_NFBLOCK=y +CONFIG_NFCON=y +CONFIG_NFETH=y +CONFIG_ATARI_ETHERNAT=y +CONFIG_ATARI_ETHERNEC=y +CONFIG_ATARI_DSP56K=m CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_PARTITION_ADVANCED=y @@ -28,20 +45,12 @@ CONFIG_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_KYBER=m CONFIG_IOSCHED_BFQ=m -CONFIG_KEXEC=y -CONFIG_BOOTINFO_PROC=y -CONFIG_M68020=y -CONFIG_M68030=y -CONFIG_M68040=y -CONFIG_M68060=y -CONFIG_ATARI=y -CONFIG_ATARI_ROM_ISA=y -# CONFIG_COMPACTION is not set -CONFIG_CLEANCACHE=y -CONFIG_ZPOOL=m # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_AOUT=m CONFIG_BINFMT_MISC=m +# CONFIG_COMPACTION is not set +CONFIG_CLEANCACHE=y +CONFIG_ZPOOL=m CONFIG_NET=y CONFIG_PACKET=y CONFIG_PACKET_DIAG=m @@ -110,6 +119,7 @@ CONFIG_NFT_LIMIT=m CONFIG_NFT_MASQ=m CONFIG_NFT_REDIR=m CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m CONFIG_NFT_OBJREF=m CONFIG_NFT_QUEUE=m CONFIG_NFT_QUOTA=m @@ -117,7 +127,10 @@ CONFIG_NFT_REJECT=m CONFIG_NFT_COMPAT=m CONFIG_NFT_HASH=m CONFIG_NFT_FIB_INET=m +CONFIG_NFT_XFRM=m CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m CONFIG_NFT_DUP_NETDEV=m CONFIG_NFT_FWD_NETDEV=m CONFIG_NFT_FIB_NETDEV=m @@ -195,7 +208,6 @@ CONFIG_IP_SET_HASH_NETNET=m CONFIG_IP_SET_HASH_NETPORT=m CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m -CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m CONFIG_NFT_DUP_IPV4=m CONFIG_NFT_FIB_IPV4=m @@ -225,7 +237,6 @@ CONFIG_IP_NF_RAW=m CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m -CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m @@ -295,7 +306,6 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m # CONFIG_BATMAN_ADV_BATMAN_V is not set -CONFIG_BATMAN_ADV_DAT=y CONFIG_BATMAN_ADV_NC=y CONFIG_BATMAN_ADV_MCAST=y CONFIG_NETLINK_DIAG=m @@ -332,6 +342,7 @@ CONFIG_BLK_DEV_IDECD=y CONFIG_BLK_DEV_FALCON_IDE=y CONFIG_RAID_ATTRS=m CONFIG_SCSI=y +# CONFIG_SCSI_MQ_DEFAULT is not set CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m @@ -388,8 +399,10 @@ CONFIG_VETH=m CONFIG_ATARILANCE=y # CONFIG_NET_VENDOR_AQUANTIA is not set # CONFIG_NET_VENDOR_ARC is not set +# CONFIG_NET_VENDOR_AURORA is not set # CONFIG_NET_VENDOR_BROADCOM is not set -# CONFIG_NET_CADENCE is not set +# CONFIG_NET_VENDOR_CADENCE is not set +# CONFIG_NET_VENDOR_CAVIUM is not set # CONFIG_NET_VENDOR_CORTINA is not set # CONFIG_NET_VENDOR_EZCHIP is not set # CONFIG_NET_VENDOR_HUAWEI is not set @@ -460,15 +473,6 @@ CONFIG_RTC_DRV_GENERIC=m # CONFIG_VIRTIO_MENU is not set # CONFIG_IOMMU_SUPPORT is not set CONFIG_DAX=m -CONFIG_HEARTBEAT=y -CONFIG_PROC_HARDWARE=y -CONFIG_NATFEAT=y -CONFIG_NFBLOCK=y -CONFIG_NFCON=y -CONFIG_NFETH=y -CONFIG_ATARI_ETHERNAT=y -CONFIG_ATARI_ETHERNEC=y -CONFIG_ATARI_DSP56K=m CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m @@ -566,31 +570,6 @@ CONFIG_NLS_MAC_INUIT=m CONFIG_NLS_MAC_ROMANIAN=m CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m -# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set -CONFIG_MAGIC_SYSRQ=y -CONFIG_WW_MUTEX_SELFTEST=m -CONFIG_TEST_LIST_SORT=m -CONFIG_TEST_SORT=m -CONFIG_ATOMIC64_SELFTEST=m -CONFIG_ASYNC_RAID6_TEST=m -CONFIG_TEST_HEXDUMP=m -CONFIG_TEST_STRING_HELPERS=m -CONFIG_TEST_KSTRTOX=m -CONFIG_TEST_PRINTF=m -CONFIG_TEST_BITMAP=m -CONFIG_TEST_UUID=m -CONFIG_TEST_OVERFLOW=m -CONFIG_TEST_RHASHTABLE=m -CONFIG_TEST_HASH=m -CONFIG_TEST_USER_COPY=m -CONFIG_TEST_BPF=m -CONFIG_FIND_BIT_BENCHMARK=m -CONFIG_TEST_FIRMWARE=m -CONFIG_TEST_SYSCTL=m -CONFIG_TEST_UDELAY=m -CONFIG_TEST_STATIC_KEYS=m -CONFIG_TEST_KMOD=m -CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_HARDENED_USERCOPY=y CONFIG_CRYPTO_RSA=m @@ -608,6 +587,7 @@ CONFIG_CRYPTO_MORUS640=m CONFIG_CRYPTO_MORUS1280=m CONFIG_CRYPTO_CFB=m CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_OFB=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m CONFIG_CRYPTO_XCBC=m @@ -617,7 +597,6 @@ CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD160=m CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m -CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_SM3=m CONFIG_CRYPTO_TGR192=m @@ -650,5 +629,35 @@ CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set CONFIG_CRC32_SELFTEST=m +CONFIG_CRC64=m CONFIG_XZ_DEC_TEST=m CONFIG_STRING_SELFTEST=m +# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_WW_MUTEX_SELFTEST=m +CONFIG_TEST_LIST_SORT=m +CONFIG_TEST_SORT=m +CONFIG_ATOMIC64_SELFTEST=m +CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_HEXDUMP=m +CONFIG_TEST_STRING_HELPERS=m +CONFIG_TEST_KSTRTOX=m +CONFIG_TEST_PRINTF=m +CONFIG_TEST_BITMAP=m +CONFIG_TEST_BITFIELD=m +CONFIG_TEST_UUID=m +CONFIG_TEST_XARRAY=m +CONFIG_TEST_OVERFLOW=m +CONFIG_TEST_RHASHTABLE=m +CONFIG_TEST_HASH=m +CONFIG_TEST_IDA=m +CONFIG_TEST_USER_COPY=m +CONFIG_TEST_BPF=m +CONFIG_FIND_BIT_BENCHMARK=m +CONFIG_TEST_FIRMWARE=m +CONFIG_TEST_SYSCTL=m +CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m +CONFIG_TEST_KMOD=m +CONFIG_TEST_MEMCAT_P=m +CONFIG_EARLY_PRINTK=y diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig index 7dd264ddf2ea..3306dff09d3c 100644 --- a/arch/m68k/configs/bvme6000_defconfig +++ b/arch/m68k/configs/bvme6000_defconfig @@ -12,6 +12,13 @@ CONFIG_BLK_DEV_INITRD=y CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_USERFAULTFD=y CONFIG_SLAB=y +CONFIG_KEXEC=y +CONFIG_BOOTINFO_PROC=y +CONFIG_M68040=y +CONFIG_M68060=y +CONFIG_VME=y +CONFIG_BVME6000=y +CONFIG_PROC_HARDWARE=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_PARTITION_ADVANCED=y @@ -28,18 +35,12 @@ CONFIG_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_KYBER=m CONFIG_IOSCHED_BFQ=m -CONFIG_KEXEC=y -CONFIG_BOOTINFO_PROC=y -CONFIG_M68040=y -CONFIG_M68060=y -CONFIG_VME=y -CONFIG_BVME6000=y -# CONFIG_COMPACTION is not set -CONFIG_CLEANCACHE=y -CONFIG_ZPOOL=m # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_AOUT=m CONFIG_BINFMT_MISC=m +# CONFIG_COMPACTION is not set +CONFIG_CLEANCACHE=y +CONFIG_ZPOOL=m CONFIG_NET=y CONFIG_PACKET=y CONFIG_PACKET_DIAG=m @@ -108,6 +109,7 @@ CONFIG_NFT_LIMIT=m CONFIG_NFT_MASQ=m CONFIG_NFT_REDIR=m CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m CONFIG_NFT_OBJREF=m CONFIG_NFT_QUEUE=m CONFIG_NFT_QUOTA=m @@ -115,7 +117,10 @@ CONFIG_NFT_REJECT=m CONFIG_NFT_COMPAT=m CONFIG_NFT_HASH=m CONFIG_NFT_FIB_INET=m +CONFIG_NFT_XFRM=m CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m CONFIG_NFT_DUP_NETDEV=m CONFIG_NFT_FWD_NETDEV=m CONFIG_NFT_FIB_NETDEV=m @@ -193,7 +198,6 @@ CONFIG_IP_SET_HASH_NETNET=m CONFIG_IP_SET_HASH_NETPORT=m CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m -CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m CONFIG_NFT_DUP_IPV4=m CONFIG_NFT_FIB_IPV4=m @@ -223,7 +227,6 @@ CONFIG_IP_NF_RAW=m CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m -CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m @@ -293,7 +296,6 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m # CONFIG_BATMAN_ADV_BATMAN_V is not set -CONFIG_BATMAN_ADV_DAT=y CONFIG_BATMAN_ADV_NC=y CONFIG_BATMAN_ADV_MCAST=y CONFIG_NETLINK_DIAG=m @@ -322,6 +324,7 @@ CONFIG_ATA_OVER_ETH=m CONFIG_DUMMY_IRQ=m CONFIG_RAID_ATTRS=m CONFIG_SCSI=y +# CONFIG_SCSI_MQ_DEFAULT is not set CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m @@ -377,8 +380,10 @@ CONFIG_VETH=m # CONFIG_NET_VENDOR_AMAZON is not set # CONFIG_NET_VENDOR_AQUANTIA is not set # CONFIG_NET_VENDOR_ARC is not set +# CONFIG_NET_VENDOR_AURORA is not set # CONFIG_NET_VENDOR_BROADCOM is not set -# CONFIG_NET_CADENCE is not set +# CONFIG_NET_VENDOR_CADENCE is not set +# CONFIG_NET_VENDOR_CAVIUM is not set # CONFIG_NET_VENDOR_CORTINA is not set # CONFIG_NET_VENDOR_EZCHIP is not set # CONFIG_NET_VENDOR_HUAWEI is not set @@ -439,7 +444,6 @@ CONFIG_RTC_DRV_GENERIC=m # CONFIG_VIRTIO_MENU is not set # CONFIG_IOMMU_SUPPORT is not set CONFIG_DAX=m -CONFIG_PROC_HARDWARE=y CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m @@ -537,31 +541,6 @@ CONFIG_NLS_MAC_INUIT=m CONFIG_NLS_MAC_ROMANIAN=m CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m -# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set -CONFIG_MAGIC_SYSRQ=y -CONFIG_WW_MUTEX_SELFTEST=m -CONFIG_TEST_LIST_SORT=m -CONFIG_TEST_SORT=m -CONFIG_ATOMIC64_SELFTEST=m -CONFIG_ASYNC_RAID6_TEST=m -CONFIG_TEST_HEXDUMP=m -CONFIG_TEST_STRING_HELPERS=m -CONFIG_TEST_KSTRTOX=m -CONFIG_TEST_PRINTF=m -CONFIG_TEST_BITMAP=m -CONFIG_TEST_UUID=m -CONFIG_TEST_OVERFLOW=m -CONFIG_TEST_RHASHTABLE=m -CONFIG_TEST_HASH=m -CONFIG_TEST_USER_COPY=m -CONFIG_TEST_BPF=m -CONFIG_FIND_BIT_BENCHMARK=m -CONFIG_TEST_FIRMWARE=m -CONFIG_TEST_SYSCTL=m -CONFIG_TEST_UDELAY=m -CONFIG_TEST_STATIC_KEYS=m -CONFIG_TEST_KMOD=m -CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_HARDENED_USERCOPY=y CONFIG_CRYPTO_RSA=m @@ -579,6 +558,7 @@ CONFIG_CRYPTO_MORUS640=m CONFIG_CRYPTO_MORUS1280=m CONFIG_CRYPTO_CFB=m CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_OFB=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m CONFIG_CRYPTO_XCBC=m @@ -588,7 +568,6 @@ CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD160=m CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m -CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_SM3=m CONFIG_CRYPTO_TGR192=m @@ -621,5 +600,35 @@ CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set CONFIG_CRC32_SELFTEST=m +CONFIG_CRC64=m CONFIG_XZ_DEC_TEST=m CONFIG_STRING_SELFTEST=m +# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_WW_MUTEX_SELFTEST=m +CONFIG_TEST_LIST_SORT=m +CONFIG_TEST_SORT=m +CONFIG_ATOMIC64_SELFTEST=m +CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_HEXDUMP=m +CONFIG_TEST_STRING_HELPERS=m +CONFIG_TEST_KSTRTOX=m +CONFIG_TEST_PRINTF=m +CONFIG_TEST_BITMAP=m +CONFIG_TEST_BITFIELD=m +CONFIG_TEST_UUID=m +CONFIG_TEST_XARRAY=m +CONFIG_TEST_OVERFLOW=m +CONFIG_TEST_RHASHTABLE=m +CONFIG_TEST_HASH=m +CONFIG_TEST_IDA=m +CONFIG_TEST_USER_COPY=m +CONFIG_TEST_BPF=m +CONFIG_FIND_BIT_BENCHMARK=m +CONFIG_TEST_FIRMWARE=m +CONFIG_TEST_SYSCTL=m +CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m +CONFIG_TEST_KMOD=m +CONFIG_TEST_MEMCAT_P=m +CONFIG_EARLY_PRINTK=y diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig index 515f7439c755..c15e15b68d39 100644 --- a/arch/m68k/configs/hp300_defconfig +++ b/arch/m68k/configs/hp300_defconfig @@ -12,6 +12,14 @@ CONFIG_BLK_DEV_INITRD=y CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_USERFAULTFD=y CONFIG_SLAB=y +CONFIG_KEXEC=y +CONFIG_BOOTINFO_PROC=y +CONFIG_M68020=y +CONFIG_M68030=y +CONFIG_M68040=y +CONFIG_M68060=y +CONFIG_HP300=y +CONFIG_PROC_HARDWARE=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_PARTITION_ADVANCED=y @@ -29,19 +37,12 @@ CONFIG_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_KYBER=m CONFIG_IOSCHED_BFQ=m -CONFIG_KEXEC=y -CONFIG_BOOTINFO_PROC=y -CONFIG_M68020=y -CONFIG_M68030=y -CONFIG_M68040=y -CONFIG_M68060=y -CONFIG_HP300=y -# CONFIG_COMPACTION is not set -CONFIG_CLEANCACHE=y -CONFIG_ZPOOL=m # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_AOUT=m CONFIG_BINFMT_MISC=m +# CONFIG_COMPACTION is not set +CONFIG_CLEANCACHE=y +CONFIG_ZPOOL=m CONFIG_NET=y CONFIG_PACKET=y CONFIG_PACKET_DIAG=m @@ -110,6 +111,7 @@ CONFIG_NFT_LIMIT=m CONFIG_NFT_MASQ=m CONFIG_NFT_REDIR=m CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m CONFIG_NFT_OBJREF=m CONFIG_NFT_QUEUE=m CONFIG_NFT_QUOTA=m @@ -117,7 +119,10 @@ CONFIG_NFT_REJECT=m CONFIG_NFT_COMPAT=m CONFIG_NFT_HASH=m CONFIG_NFT_FIB_INET=m +CONFIG_NFT_XFRM=m CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m CONFIG_NFT_DUP_NETDEV=m CONFIG_NFT_FWD_NETDEV=m CONFIG_NFT_FIB_NETDEV=m @@ -195,7 +200,6 @@ CONFIG_IP_SET_HASH_NETNET=m CONFIG_IP_SET_HASH_NETPORT=m CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m -CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m CONFIG_NFT_DUP_IPV4=m CONFIG_NFT_FIB_IPV4=m @@ -225,7 +229,6 @@ CONFIG_IP_NF_RAW=m CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m -CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m @@ -295,7 +298,6 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m # CONFIG_BATMAN_ADV_BATMAN_V is not set -CONFIG_BATMAN_ADV_DAT=y CONFIG_BATMAN_ADV_NC=y CONFIG_BATMAN_ADV_MCAST=y CONFIG_NETLINK_DIAG=m @@ -324,6 +326,7 @@ CONFIG_ATA_OVER_ETH=m CONFIG_DUMMY_IRQ=m CONFIG_RAID_ATTRS=m CONFIG_SCSI=y +# CONFIG_SCSI_MQ_DEFAULT is not set CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m @@ -379,8 +382,10 @@ CONFIG_VETH=m CONFIG_HPLANCE=y # CONFIG_NET_VENDOR_AQUANTIA is not set # CONFIG_NET_VENDOR_ARC is not set +# CONFIG_NET_VENDOR_AURORA is not set # CONFIG_NET_VENDOR_BROADCOM is not set -# CONFIG_NET_CADENCE is not set +# CONFIG_NET_VENDOR_CADENCE is not set +# CONFIG_NET_VENDOR_CAVIUM is not set # CONFIG_NET_VENDOR_CORTINA is not set # CONFIG_NET_VENDOR_EZCHIP is not set # CONFIG_NET_VENDOR_HUAWEI is not set @@ -449,7 +454,6 @@ CONFIG_RTC_DRV_GENERIC=m # CONFIG_VIRTIO_MENU is not set # CONFIG_IOMMU_SUPPORT is not set CONFIG_DAX=m -CONFIG_PROC_HARDWARE=y CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m @@ -547,31 +551,6 @@ CONFIG_NLS_MAC_INUIT=m CONFIG_NLS_MAC_ROMANIAN=m CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m -# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set -CONFIG_MAGIC_SYSRQ=y -CONFIG_WW_MUTEX_SELFTEST=m -CONFIG_TEST_LIST_SORT=m -CONFIG_TEST_SORT=m -CONFIG_ATOMIC64_SELFTEST=m -CONFIG_ASYNC_RAID6_TEST=m -CONFIG_TEST_HEXDUMP=m -CONFIG_TEST_STRING_HELPERS=m -CONFIG_TEST_KSTRTOX=m -CONFIG_TEST_PRINTF=m -CONFIG_TEST_BITMAP=m -CONFIG_TEST_UUID=m -CONFIG_TEST_OVERFLOW=m -CONFIG_TEST_RHASHTABLE=m -CONFIG_TEST_HASH=m -CONFIG_TEST_USER_COPY=m -CONFIG_TEST_BPF=m -CONFIG_FIND_BIT_BENCHMARK=m -CONFIG_TEST_FIRMWARE=m -CONFIG_TEST_SYSCTL=m -CONFIG_TEST_UDELAY=m -CONFIG_TEST_STATIC_KEYS=m -CONFIG_TEST_KMOD=m -CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_HARDENED_USERCOPY=y CONFIG_CRYPTO_RSA=m @@ -589,6 +568,7 @@ CONFIG_CRYPTO_MORUS640=m CONFIG_CRYPTO_MORUS1280=m CONFIG_CRYPTO_CFB=m CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_OFB=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m CONFIG_CRYPTO_XCBC=m @@ -598,7 +578,6 @@ CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD160=m CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m -CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_SM3=m CONFIG_CRYPTO_TGR192=m @@ -631,5 +610,35 @@ CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set CONFIG_CRC32_SELFTEST=m +CONFIG_CRC64=m CONFIG_XZ_DEC_TEST=m CONFIG_STRING_SELFTEST=m +# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_WW_MUTEX_SELFTEST=m +CONFIG_TEST_LIST_SORT=m +CONFIG_TEST_SORT=m +CONFIG_ATOMIC64_SELFTEST=m +CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_HEXDUMP=m +CONFIG_TEST_STRING_HELPERS=m +CONFIG_TEST_KSTRTOX=m +CONFIG_TEST_PRINTF=m +CONFIG_TEST_BITMAP=m +CONFIG_TEST_BITFIELD=m +CONFIG_TEST_UUID=m +CONFIG_TEST_XARRAY=m +CONFIG_TEST_OVERFLOW=m +CONFIG_TEST_RHASHTABLE=m +CONFIG_TEST_HASH=m +CONFIG_TEST_IDA=m +CONFIG_TEST_USER_COPY=m +CONFIG_TEST_BPF=m +CONFIG_FIND_BIT_BENCHMARK=m +CONFIG_TEST_FIRMWARE=m +CONFIG_TEST_SYSCTL=m +CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m +CONFIG_TEST_KMOD=m +CONFIG_TEST_MEMCAT_P=m +CONFIG_EARLY_PRINTK=y diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig index 8e1038ceb407..1a0ce0d11267 100644 --- a/arch/m68k/configs/mac_defconfig +++ b/arch/m68k/configs/mac_defconfig @@ -12,6 +12,14 @@ CONFIG_BLK_DEV_INITRD=y CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_USERFAULTFD=y CONFIG_SLAB=y +CONFIG_KEXEC=y +CONFIG_BOOTINFO_PROC=y +CONFIG_M68020=y +CONFIG_M68030=y +CONFIG_M68040=y +CONFIG_M68KFPU_EMU=y +CONFIG_MAC=y +CONFIG_PROC_HARDWARE=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_PARTITION_ADVANCED=y @@ -28,19 +36,12 @@ CONFIG_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_KYBER=m CONFIG_IOSCHED_BFQ=m -CONFIG_KEXEC=y -CONFIG_BOOTINFO_PROC=y -CONFIG_M68020=y -CONFIG_M68030=y -CONFIG_M68040=y -CONFIG_M68KFPU_EMU=y -CONFIG_MAC=y -# CONFIG_COMPACTION is not set -CONFIG_CLEANCACHE=y -CONFIG_ZPOOL=m # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_AOUT=m CONFIG_BINFMT_MISC=m +# CONFIG_COMPACTION is not set +CONFIG_CLEANCACHE=y +CONFIG_ZPOOL=m CONFIG_NET=y CONFIG_PACKET=y CONFIG_PACKET_DIAG=m @@ -109,6 +110,7 @@ CONFIG_NFT_LIMIT=m CONFIG_NFT_MASQ=m CONFIG_NFT_REDIR=m CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m CONFIG_NFT_OBJREF=m CONFIG_NFT_QUEUE=m CONFIG_NFT_QUOTA=m @@ -116,7 +118,10 @@ CONFIG_NFT_REJECT=m CONFIG_NFT_COMPAT=m CONFIG_NFT_HASH=m CONFIG_NFT_FIB_INET=m +CONFIG_NFT_XFRM=m CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m CONFIG_NFT_DUP_NETDEV=m CONFIG_NFT_FWD_NETDEV=m CONFIG_NFT_FIB_NETDEV=m @@ -194,7 +199,6 @@ CONFIG_IP_SET_HASH_NETNET=m CONFIG_IP_SET_HASH_NETPORT=m CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m -CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m CONFIG_NFT_DUP_IPV4=m CONFIG_NFT_FIB_IPV4=m @@ -224,7 +228,6 @@ CONFIG_IP_NF_RAW=m CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m -CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m @@ -297,7 +300,6 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m # CONFIG_BATMAN_ADV_BATMAN_V is not set -CONFIG_BATMAN_ADV_DAT=y CONFIG_BATMAN_ADV_NC=y CONFIG_BATMAN_ADV_MCAST=y CONFIG_NETLINK_DIAG=m @@ -331,6 +333,7 @@ CONFIG_BLK_DEV_IDECD=y CONFIG_BLK_DEV_MAC_IDE=y CONFIG_RAID_ATTRS=m CONFIG_SCSI=y +# CONFIG_SCSI_MQ_DEFAULT is not set CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m @@ -366,8 +369,8 @@ CONFIG_TCM_PSCSI=m CONFIG_ADB=y CONFIG_ADB_MACII=y CONFIG_ADB_IOP=y -CONFIG_ADB_PMU=y CONFIG_ADB_CUDA=y +CONFIG_ADB_PMU=y CONFIG_INPUT_ADBHID=y CONFIG_MAC_EMUMOUSEBTN=y CONFIG_NETDEVICES=y @@ -395,8 +398,10 @@ CONFIG_VETH=m CONFIG_MACMACE=y # CONFIG_NET_VENDOR_AQUANTIA is not set # CONFIG_NET_VENDOR_ARC is not set +# CONFIG_NET_VENDOR_AURORA is not set # CONFIG_NET_VENDOR_BROADCOM is not set -# CONFIG_NET_CADENCE is not set +# CONFIG_NET_VENDOR_CADENCE is not set +# CONFIG_NET_VENDOR_CAVIUM is not set CONFIG_MAC89x0=y # CONFIG_NET_VENDOR_CORTINA is not set # CONFIG_NET_VENDOR_EZCHIP is not set @@ -471,7 +476,6 @@ CONFIG_RTC_DRV_GENERIC=m # CONFIG_VIRTIO_MENU is not set # CONFIG_IOMMU_SUPPORT is not set CONFIG_DAX=m -CONFIG_PROC_HARDWARE=y CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m @@ -569,31 +573,6 @@ CONFIG_NLS_MAC_INUIT=m CONFIG_NLS_MAC_ROMANIAN=m CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m -# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set -CONFIG_MAGIC_SYSRQ=y -CONFIG_WW_MUTEX_SELFTEST=m -CONFIG_TEST_LIST_SORT=m -CONFIG_TEST_SORT=m -CONFIG_ATOMIC64_SELFTEST=m -CONFIG_ASYNC_RAID6_TEST=m -CONFIG_TEST_HEXDUMP=m -CONFIG_TEST_STRING_HELPERS=m -CONFIG_TEST_KSTRTOX=m -CONFIG_TEST_PRINTF=m -CONFIG_TEST_BITMAP=m -CONFIG_TEST_UUID=m -CONFIG_TEST_OVERFLOW=m -CONFIG_TEST_RHASHTABLE=m -CONFIG_TEST_HASH=m -CONFIG_TEST_USER_COPY=m -CONFIG_TEST_BPF=m -CONFIG_FIND_BIT_BENCHMARK=m -CONFIG_TEST_FIRMWARE=m -CONFIG_TEST_SYSCTL=m -CONFIG_TEST_UDELAY=m -CONFIG_TEST_STATIC_KEYS=m -CONFIG_TEST_KMOD=m -CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_HARDENED_USERCOPY=y CONFIG_CRYPTO_RSA=m @@ -611,6 +590,7 @@ CONFIG_CRYPTO_MORUS640=m CONFIG_CRYPTO_MORUS1280=m CONFIG_CRYPTO_CFB=m CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_OFB=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m CONFIG_CRYPTO_XCBC=m @@ -620,7 +600,6 @@ CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD160=m CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m -CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_SM3=m CONFIG_CRYPTO_TGR192=m @@ -653,5 +632,35 @@ CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set CONFIG_CRC32_SELFTEST=m +CONFIG_CRC64=m CONFIG_XZ_DEC_TEST=m CONFIG_STRING_SELFTEST=m +# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_WW_MUTEX_SELFTEST=m +CONFIG_TEST_LIST_SORT=m +CONFIG_TEST_SORT=m +CONFIG_ATOMIC64_SELFTEST=m +CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_HEXDUMP=m +CONFIG_TEST_STRING_HELPERS=m +CONFIG_TEST_KSTRTOX=m +CONFIG_TEST_PRINTF=m +CONFIG_TEST_BITMAP=m +CONFIG_TEST_BITFIELD=m +CONFIG_TEST_UUID=m +CONFIG_TEST_XARRAY=m +CONFIG_TEST_OVERFLOW=m +CONFIG_TEST_RHASHTABLE=m +CONFIG_TEST_HASH=m +CONFIG_TEST_IDA=m +CONFIG_TEST_USER_COPY=m +CONFIG_TEST_BPF=m +CONFIG_FIND_BIT_BENCHMARK=m +CONFIG_TEST_FIRMWARE=m +CONFIG_TEST_SYSCTL=m +CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m +CONFIG_TEST_KMOD=m +CONFIG_TEST_MEMCAT_P=m +CONFIG_EARLY_PRINTK=y diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig index 62c8aaa15cc7..9758839b74bd 100644 --- a/arch/m68k/configs/multi_defconfig +++ b/arch/m68k/configs/multi_defconfig @@ -12,18 +12,6 @@ CONFIG_BLK_DEV_INITRD=y CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_USERFAULTFD=y CONFIG_SLAB=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -CONFIG_PARTITION_ADVANCED=y -CONFIG_BSD_DISKLABEL=y -CONFIG_MINIX_SUBPARTITION=y -CONFIG_SOLARIS_X86_PARTITION=y -CONFIG_UNIXWARE_DISKLABEL=y -# CONFIG_EFI_PARTITION is not set -CONFIG_IOSCHED_DEADLINE=m -CONFIG_MQ_IOSCHED_DEADLINE=m -CONFIG_MQ_IOSCHED_KYBER=m -CONFIG_IOSCHED_BFQ=m CONFIG_KEXEC=y CONFIG_BOOTINFO_PROC=y CONFIG_M68020=y @@ -45,12 +33,35 @@ CONFIG_ZORRO=y CONFIG_AMIGA_PCMCIA=y CONFIG_ATARI_ROM_ISA=y CONFIG_ZORRO_NAMES=y -# CONFIG_COMPACTION is not set -CONFIG_CLEANCACHE=y -CONFIG_ZPOOL=m +CONFIG_HEARTBEAT=y +CONFIG_PROC_HARDWARE=y +CONFIG_NATFEAT=y +CONFIG_NFBLOCK=y +CONFIG_NFCON=y +CONFIG_NFETH=y +CONFIG_ATARI_ETHERNAT=y +CONFIG_ATARI_ETHERNEC=y +CONFIG_ATARI_DSP56K=m +CONFIG_AMIGA_BUILTIN_SERIAL=y +CONFIG_SERIAL_CONSOLE=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +# CONFIG_EFI_PARTITION is not set +CONFIG_IOSCHED_DEADLINE=m +CONFIG_MQ_IOSCHED_DEADLINE=m +CONFIG_MQ_IOSCHED_KYBER=m +CONFIG_IOSCHED_BFQ=m # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_AOUT=m CONFIG_BINFMT_MISC=m +# CONFIG_COMPACTION is not set +CONFIG_CLEANCACHE=y +CONFIG_ZPOOL=m CONFIG_NET=y CONFIG_PACKET=y CONFIG_PACKET_DIAG=m @@ -119,6 +130,7 @@ CONFIG_NFT_LIMIT=m CONFIG_NFT_MASQ=m CONFIG_NFT_REDIR=m CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m CONFIG_NFT_OBJREF=m CONFIG_NFT_QUEUE=m CONFIG_NFT_QUOTA=m @@ -126,7 +138,10 @@ CONFIG_NFT_REJECT=m CONFIG_NFT_COMPAT=m CONFIG_NFT_HASH=m CONFIG_NFT_FIB_INET=m +CONFIG_NFT_XFRM=m CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m CONFIG_NFT_DUP_NETDEV=m CONFIG_NFT_FWD_NETDEV=m CONFIG_NFT_FIB_NETDEV=m @@ -204,7 +219,6 @@ CONFIG_IP_SET_HASH_NETNET=m CONFIG_IP_SET_HASH_NETPORT=m CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m -CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m CONFIG_NFT_DUP_IPV4=m CONFIG_NFT_FIB_IPV4=m @@ -234,7 +248,6 @@ CONFIG_IP_NF_RAW=m CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m -CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m @@ -307,7 +320,6 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m # CONFIG_BATMAN_ADV_BATMAN_V is not set -CONFIG_BATMAN_ADV_DAT=y CONFIG_BATMAN_ADV_NC=y CONFIG_BATMAN_ADV_MCAST=y CONFIG_NETLINK_DIAG=m @@ -354,6 +366,7 @@ CONFIG_BLK_DEV_MAC_IDE=y CONFIG_BLK_DEV_Q40IDE=y CONFIG_RAID_ATTRS=m CONFIG_SCSI=y +# CONFIG_SCSI_MQ_DEFAULT is not set CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m @@ -400,8 +413,8 @@ CONFIG_TCM_PSCSI=m CONFIG_ADB=y CONFIG_ADB_MACII=y CONFIG_ADB_IOP=y -CONFIG_ADB_PMU=y CONFIG_ADB_CUDA=y +CONFIG_ADB_PMU=y CONFIG_INPUT_ADBHID=y CONFIG_MAC_EMUMOUSEBTN=y CONFIG_NETDEVICES=y @@ -436,8 +449,10 @@ CONFIG_SUN3LANCE=y CONFIG_MACMACE=y # CONFIG_NET_VENDOR_AQUANTIA is not set # CONFIG_NET_VENDOR_ARC is not set +# CONFIG_NET_VENDOR_AURORA is not set # CONFIG_NET_VENDOR_BROADCOM is not set -# CONFIG_NET_CADENCE is not set +# CONFIG_NET_VENDOR_CADENCE is not set +# CONFIG_NET_VENDOR_CAVIUM is not set CONFIG_MAC89x0=y # CONFIG_NET_VENDOR_CORTINA is not set # CONFIG_NET_VENDOR_EZCHIP is not set @@ -543,17 +558,6 @@ CONFIG_RTC_DRV_GENERIC=m # CONFIG_VIRTIO_MENU is not set # CONFIG_IOMMU_SUPPORT is not set CONFIG_DAX=m -CONFIG_HEARTBEAT=y -CONFIG_PROC_HARDWARE=y -CONFIG_NATFEAT=y -CONFIG_NFBLOCK=y -CONFIG_NFCON=y -CONFIG_NFETH=y -CONFIG_ATARI_ETHERNAT=y -CONFIG_ATARI_ETHERNEC=y -CONFIG_ATARI_DSP56K=m -CONFIG_AMIGA_BUILTIN_SERIAL=y -CONFIG_SERIAL_CONSOLE=y CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m @@ -651,31 +655,6 @@ CONFIG_NLS_MAC_INUIT=m CONFIG_NLS_MAC_ROMANIAN=m CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m -# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set -CONFIG_MAGIC_SYSRQ=y -CONFIG_WW_MUTEX_SELFTEST=m -CONFIG_TEST_LIST_SORT=m -CONFIG_TEST_SORT=m -CONFIG_ATOMIC64_SELFTEST=m -CONFIG_ASYNC_RAID6_TEST=m -CONFIG_TEST_HEXDUMP=m -CONFIG_TEST_STRING_HELPERS=m -CONFIG_TEST_KSTRTOX=m -CONFIG_TEST_PRINTF=m -CONFIG_TEST_BITMAP=m -CONFIG_TEST_UUID=m -CONFIG_TEST_OVERFLOW=m -CONFIG_TEST_RHASHTABLE=m -CONFIG_TEST_HASH=m -CONFIG_TEST_USER_COPY=m -CONFIG_TEST_BPF=m -CONFIG_FIND_BIT_BENCHMARK=m -CONFIG_TEST_FIRMWARE=m -CONFIG_TEST_SYSCTL=m -CONFIG_TEST_UDELAY=m -CONFIG_TEST_STATIC_KEYS=m -CONFIG_TEST_KMOD=m -CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_HARDENED_USERCOPY=y CONFIG_CRYPTO_RSA=m @@ -693,6 +672,7 @@ CONFIG_CRYPTO_MORUS640=m CONFIG_CRYPTO_MORUS1280=m CONFIG_CRYPTO_CFB=m CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_OFB=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m CONFIG_CRYPTO_XCBC=m @@ -702,7 +682,6 @@ CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD160=m CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m -CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_SM3=m CONFIG_CRYPTO_TGR192=m @@ -735,5 +714,35 @@ CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set CONFIG_CRC32_SELFTEST=m +CONFIG_CRC64=m CONFIG_XZ_DEC_TEST=m CONFIG_STRING_SELFTEST=m +# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_WW_MUTEX_SELFTEST=m +CONFIG_TEST_LIST_SORT=m +CONFIG_TEST_SORT=m +CONFIG_ATOMIC64_SELFTEST=m +CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_HEXDUMP=m +CONFIG_TEST_STRING_HELPERS=m +CONFIG_TEST_KSTRTOX=m +CONFIG_TEST_PRINTF=m +CONFIG_TEST_BITMAP=m +CONFIG_TEST_BITFIELD=m +CONFIG_TEST_UUID=m +CONFIG_TEST_XARRAY=m +CONFIG_TEST_OVERFLOW=m +CONFIG_TEST_RHASHTABLE=m +CONFIG_TEST_HASH=m +CONFIG_TEST_IDA=m +CONFIG_TEST_USER_COPY=m +CONFIG_TEST_BPF=m +CONFIG_FIND_BIT_BENCHMARK=m +CONFIG_TEST_FIRMWARE=m +CONFIG_TEST_SYSCTL=m +CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m +CONFIG_TEST_KMOD=m +CONFIG_TEST_MEMCAT_P=m +CONFIG_EARLY_PRINTK=y diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig index 733973f91297..f5526731ccdb 100644 --- a/arch/m68k/configs/mvme147_defconfig +++ b/arch/m68k/configs/mvme147_defconfig @@ -12,6 +12,12 @@ CONFIG_BLK_DEV_INITRD=y CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_USERFAULTFD=y CONFIG_SLAB=y +CONFIG_KEXEC=y +CONFIG_BOOTINFO_PROC=y +CONFIG_M68030=y +CONFIG_VME=y +CONFIG_MVME147=y +CONFIG_PROC_HARDWARE=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_PARTITION_ADVANCED=y @@ -28,17 +34,12 @@ CONFIG_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_KYBER=m CONFIG_IOSCHED_BFQ=m -CONFIG_KEXEC=y -CONFIG_BOOTINFO_PROC=y -CONFIG_M68030=y -CONFIG_VME=y -CONFIG_MVME147=y -# CONFIG_COMPACTION is not set -CONFIG_CLEANCACHE=y -CONFIG_ZPOOL=m # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_AOUT=m CONFIG_BINFMT_MISC=m +# CONFIG_COMPACTION is not set +CONFIG_CLEANCACHE=y +CONFIG_ZPOOL=m CONFIG_NET=y CONFIG_PACKET=y CONFIG_PACKET_DIAG=m @@ -107,6 +108,7 @@ CONFIG_NFT_LIMIT=m CONFIG_NFT_MASQ=m CONFIG_NFT_REDIR=m CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m CONFIG_NFT_OBJREF=m CONFIG_NFT_QUEUE=m CONFIG_NFT_QUOTA=m @@ -114,7 +116,10 @@ CONFIG_NFT_REJECT=m CONFIG_NFT_COMPAT=m CONFIG_NFT_HASH=m CONFIG_NFT_FIB_INET=m +CONFIG_NFT_XFRM=m CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m CONFIG_NFT_DUP_NETDEV=m CONFIG_NFT_FWD_NETDEV=m CONFIG_NFT_FIB_NETDEV=m @@ -192,7 +197,6 @@ CONFIG_IP_SET_HASH_NETNET=m CONFIG_IP_SET_HASH_NETPORT=m CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m -CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m CONFIG_NFT_DUP_IPV4=m CONFIG_NFT_FIB_IPV4=m @@ -222,7 +226,6 @@ CONFIG_IP_NF_RAW=m CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m -CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m @@ -292,7 +295,6 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m # CONFIG_BATMAN_ADV_BATMAN_V is not set -CONFIG_BATMAN_ADV_DAT=y CONFIG_BATMAN_ADV_NC=y CONFIG_BATMAN_ADV_MCAST=y CONFIG_NETLINK_DIAG=m @@ -321,6 +323,7 @@ CONFIG_ATA_OVER_ETH=m CONFIG_DUMMY_IRQ=m CONFIG_RAID_ATTRS=m CONFIG_SCSI=y +# CONFIG_SCSI_MQ_DEFAULT is not set CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m @@ -377,8 +380,10 @@ CONFIG_VETH=m CONFIG_MVME147_NET=y # CONFIG_NET_VENDOR_AQUANTIA is not set # CONFIG_NET_VENDOR_ARC is not set +# CONFIG_NET_VENDOR_AURORA is not set # CONFIG_NET_VENDOR_BROADCOM is not set -# CONFIG_NET_CADENCE is not set +# CONFIG_NET_VENDOR_CADENCE is not set +# CONFIG_NET_VENDOR_CAVIUM is not set # CONFIG_NET_VENDOR_CORTINA is not set # CONFIG_NET_VENDOR_EZCHIP is not set # CONFIG_NET_VENDOR_HUAWEI is not set @@ -439,7 +444,6 @@ CONFIG_RTC_DRV_GENERIC=m # CONFIG_VIRTIO_MENU is not set # CONFIG_IOMMU_SUPPORT is not set CONFIG_DAX=m -CONFIG_PROC_HARDWARE=y CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m @@ -537,31 +541,6 @@ CONFIG_NLS_MAC_INUIT=m CONFIG_NLS_MAC_ROMANIAN=m CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m -# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set -CONFIG_MAGIC_SYSRQ=y -CONFIG_WW_MUTEX_SELFTEST=m -CONFIG_TEST_LIST_SORT=m -CONFIG_TEST_SORT=m -CONFIG_ATOMIC64_SELFTEST=m -CONFIG_ASYNC_RAID6_TEST=m -CONFIG_TEST_HEXDUMP=m -CONFIG_TEST_STRING_HELPERS=m -CONFIG_TEST_KSTRTOX=m -CONFIG_TEST_PRINTF=m -CONFIG_TEST_BITMAP=m -CONFIG_TEST_UUID=m -CONFIG_TEST_OVERFLOW=m -CONFIG_TEST_RHASHTABLE=m -CONFIG_TEST_HASH=m -CONFIG_TEST_USER_COPY=m -CONFIG_TEST_BPF=m -CONFIG_FIND_BIT_BENCHMARK=m -CONFIG_TEST_FIRMWARE=m -CONFIG_TEST_SYSCTL=m -CONFIG_TEST_UDELAY=m -CONFIG_TEST_STATIC_KEYS=m -CONFIG_TEST_KMOD=m -CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_HARDENED_USERCOPY=y CONFIG_CRYPTO_RSA=m @@ -579,6 +558,7 @@ CONFIG_CRYPTO_MORUS640=m CONFIG_CRYPTO_MORUS1280=m CONFIG_CRYPTO_CFB=m CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_OFB=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m CONFIG_CRYPTO_XCBC=m @@ -588,7 +568,6 @@ CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD160=m CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m -CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_SM3=m CONFIG_CRYPTO_TGR192=m @@ -621,5 +600,35 @@ CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set CONFIG_CRC32_SELFTEST=m +CONFIG_CRC64=m CONFIG_XZ_DEC_TEST=m CONFIG_STRING_SELFTEST=m +# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_WW_MUTEX_SELFTEST=m +CONFIG_TEST_LIST_SORT=m +CONFIG_TEST_SORT=m +CONFIG_ATOMIC64_SELFTEST=m +CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_HEXDUMP=m +CONFIG_TEST_STRING_HELPERS=m +CONFIG_TEST_KSTRTOX=m +CONFIG_TEST_PRINTF=m +CONFIG_TEST_BITMAP=m +CONFIG_TEST_BITFIELD=m +CONFIG_TEST_UUID=m +CONFIG_TEST_XARRAY=m +CONFIG_TEST_OVERFLOW=m +CONFIG_TEST_RHASHTABLE=m +CONFIG_TEST_HASH=m +CONFIG_TEST_IDA=m +CONFIG_TEST_USER_COPY=m +CONFIG_TEST_BPF=m +CONFIG_FIND_BIT_BENCHMARK=m +CONFIG_TEST_FIRMWARE=m +CONFIG_TEST_SYSCTL=m +CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m +CONFIG_TEST_KMOD=m +CONFIG_TEST_MEMCAT_P=m +CONFIG_EARLY_PRINTK=y diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig index fee30cc9ac16..5db58ff4b107 100644 --- a/arch/m68k/configs/mvme16x_defconfig +++ b/arch/m68k/configs/mvme16x_defconfig @@ -12,6 +12,13 @@ CONFIG_BLK_DEV_INITRD=y CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_USERFAULTFD=y CONFIG_SLAB=y +CONFIG_KEXEC=y +CONFIG_BOOTINFO_PROC=y +CONFIG_M68040=y +CONFIG_M68060=y +CONFIG_VME=y +CONFIG_MVME16x=y +CONFIG_PROC_HARDWARE=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_PARTITION_ADVANCED=y @@ -28,18 +35,12 @@ CONFIG_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_KYBER=m CONFIG_IOSCHED_BFQ=m -CONFIG_KEXEC=y -CONFIG_BOOTINFO_PROC=y -CONFIG_M68040=y -CONFIG_M68060=y -CONFIG_VME=y -CONFIG_MVME16x=y -# CONFIG_COMPACTION is not set -CONFIG_CLEANCACHE=y -CONFIG_ZPOOL=m # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_AOUT=m CONFIG_BINFMT_MISC=m +# CONFIG_COMPACTION is not set +CONFIG_CLEANCACHE=y +CONFIG_ZPOOL=m CONFIG_NET=y CONFIG_PACKET=y CONFIG_PACKET_DIAG=m @@ -108,6 +109,7 @@ CONFIG_NFT_LIMIT=m CONFIG_NFT_MASQ=m CONFIG_NFT_REDIR=m CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m CONFIG_NFT_OBJREF=m CONFIG_NFT_QUEUE=m CONFIG_NFT_QUOTA=m @@ -115,7 +117,10 @@ CONFIG_NFT_REJECT=m CONFIG_NFT_COMPAT=m CONFIG_NFT_HASH=m CONFIG_NFT_FIB_INET=m +CONFIG_NFT_XFRM=m CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m CONFIG_NFT_DUP_NETDEV=m CONFIG_NFT_FWD_NETDEV=m CONFIG_NFT_FIB_NETDEV=m @@ -193,7 +198,6 @@ CONFIG_IP_SET_HASH_NETNET=m CONFIG_IP_SET_HASH_NETPORT=m CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m -CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m CONFIG_NFT_DUP_IPV4=m CONFIG_NFT_FIB_IPV4=m @@ -223,7 +227,6 @@ CONFIG_IP_NF_RAW=m CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m -CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m @@ -293,7 +296,6 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m # CONFIG_BATMAN_ADV_BATMAN_V is not set -CONFIG_BATMAN_ADV_DAT=y CONFIG_BATMAN_ADV_NC=y CONFIG_BATMAN_ADV_MCAST=y CONFIG_NETLINK_DIAG=m @@ -322,6 +324,7 @@ CONFIG_ATA_OVER_ETH=m CONFIG_DUMMY_IRQ=m CONFIG_RAID_ATTRS=m CONFIG_SCSI=y +# CONFIG_SCSI_MQ_DEFAULT is not set CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m @@ -377,8 +380,10 @@ CONFIG_VETH=m # CONFIG_NET_VENDOR_AMAZON is not set # CONFIG_NET_VENDOR_AQUANTIA is not set # CONFIG_NET_VENDOR_ARC is not set +# CONFIG_NET_VENDOR_AURORA is not set # CONFIG_NET_VENDOR_BROADCOM is not set -# CONFIG_NET_CADENCE is not set +# CONFIG_NET_VENDOR_CADENCE is not set +# CONFIG_NET_VENDOR_CAVIUM is not set # CONFIG_NET_VENDOR_CORTINA is not set # CONFIG_NET_VENDOR_EZCHIP is not set # CONFIG_NET_VENDOR_HUAWEI is not set @@ -439,7 +444,6 @@ CONFIG_RTC_DRV_GENERIC=m # CONFIG_VIRTIO_MENU is not set # CONFIG_IOMMU_SUPPORT is not set CONFIG_DAX=m -CONFIG_PROC_HARDWARE=y CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m @@ -537,31 +541,6 @@ CONFIG_NLS_MAC_INUIT=m CONFIG_NLS_MAC_ROMANIAN=m CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m -# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set -CONFIG_MAGIC_SYSRQ=y -CONFIG_WW_MUTEX_SELFTEST=m -CONFIG_TEST_LIST_SORT=m -CONFIG_TEST_SORT=m -CONFIG_ATOMIC64_SELFTEST=m -CONFIG_ASYNC_RAID6_TEST=m -CONFIG_TEST_HEXDUMP=m -CONFIG_TEST_STRING_HELPERS=m -CONFIG_TEST_KSTRTOX=m -CONFIG_TEST_PRINTF=m -CONFIG_TEST_BITMAP=m -CONFIG_TEST_UUID=m -CONFIG_TEST_OVERFLOW=m -CONFIG_TEST_RHASHTABLE=m -CONFIG_TEST_HASH=m -CONFIG_TEST_USER_COPY=m -CONFIG_TEST_BPF=m -CONFIG_FIND_BIT_BENCHMARK=m -CONFIG_TEST_FIRMWARE=m -CONFIG_TEST_SYSCTL=m -CONFIG_TEST_UDELAY=m -CONFIG_TEST_STATIC_KEYS=m -CONFIG_TEST_KMOD=m -CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_HARDENED_USERCOPY=y CONFIG_CRYPTO_RSA=m @@ -579,6 +558,7 @@ CONFIG_CRYPTO_MORUS640=m CONFIG_CRYPTO_MORUS1280=m CONFIG_CRYPTO_CFB=m CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_OFB=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m CONFIG_CRYPTO_XCBC=m @@ -588,7 +568,6 @@ CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD160=m CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m -CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_SM3=m CONFIG_CRYPTO_TGR192=m @@ -621,5 +600,35 @@ CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set CONFIG_CRC32_SELFTEST=m +CONFIG_CRC64=m CONFIG_XZ_DEC_TEST=m CONFIG_STRING_SELFTEST=m +# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_WW_MUTEX_SELFTEST=m +CONFIG_TEST_LIST_SORT=m +CONFIG_TEST_SORT=m +CONFIG_ATOMIC64_SELFTEST=m +CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_HEXDUMP=m +CONFIG_TEST_STRING_HELPERS=m +CONFIG_TEST_KSTRTOX=m +CONFIG_TEST_PRINTF=m +CONFIG_TEST_BITMAP=m +CONFIG_TEST_BITFIELD=m +CONFIG_TEST_UUID=m +CONFIG_TEST_XARRAY=m +CONFIG_TEST_OVERFLOW=m +CONFIG_TEST_RHASHTABLE=m +CONFIG_TEST_HASH=m +CONFIG_TEST_IDA=m +CONFIG_TEST_USER_COPY=m +CONFIG_TEST_BPF=m +CONFIG_FIND_BIT_BENCHMARK=m +CONFIG_TEST_FIRMWARE=m +CONFIG_TEST_SYSCTL=m +CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m +CONFIG_TEST_KMOD=m +CONFIG_TEST_MEMCAT_P=m +CONFIG_EARLY_PRINTK=y diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig index eebf9c9088e7..b645230da128 100644 --- a/arch/m68k/configs/q40_defconfig +++ b/arch/m68k/configs/q40_defconfig @@ -12,6 +12,13 @@ CONFIG_BLK_DEV_INITRD=y CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_USERFAULTFD=y CONFIG_SLAB=y +CONFIG_KEXEC=y +CONFIG_BOOTINFO_PROC=y +CONFIG_M68040=y +CONFIG_M68060=y +CONFIG_Q40=y +CONFIG_HEARTBEAT=y +CONFIG_PROC_HARDWARE=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_PARTITION_ADVANCED=y @@ -29,17 +36,12 @@ CONFIG_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_KYBER=m CONFIG_IOSCHED_BFQ=m -CONFIG_KEXEC=y -CONFIG_BOOTINFO_PROC=y -CONFIG_M68040=y -CONFIG_M68060=y -CONFIG_Q40=y -# CONFIG_COMPACTION is not set -CONFIG_CLEANCACHE=y -CONFIG_ZPOOL=m # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_AOUT=m CONFIG_BINFMT_MISC=m +# CONFIG_COMPACTION is not set +CONFIG_CLEANCACHE=y +CONFIG_ZPOOL=m CONFIG_NET=y CONFIG_PACKET=y CONFIG_PACKET_DIAG=m @@ -108,6 +110,7 @@ CONFIG_NFT_LIMIT=m CONFIG_NFT_MASQ=m CONFIG_NFT_REDIR=m CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m CONFIG_NFT_OBJREF=m CONFIG_NFT_QUEUE=m CONFIG_NFT_QUOTA=m @@ -115,7 +118,10 @@ CONFIG_NFT_REJECT=m CONFIG_NFT_COMPAT=m CONFIG_NFT_HASH=m CONFIG_NFT_FIB_INET=m +CONFIG_NFT_XFRM=m CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m CONFIG_NFT_DUP_NETDEV=m CONFIG_NFT_FWD_NETDEV=m CONFIG_NFT_FIB_NETDEV=m @@ -193,7 +199,6 @@ CONFIG_IP_SET_HASH_NETNET=m CONFIG_IP_SET_HASH_NETPORT=m CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m -CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m CONFIG_NFT_DUP_IPV4=m CONFIG_NFT_FIB_IPV4=m @@ -223,7 +228,6 @@ CONFIG_IP_NF_RAW=m CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m -CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m @@ -293,7 +297,6 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m # CONFIG_BATMAN_ADV_BATMAN_V is not set -CONFIG_BATMAN_ADV_DAT=y CONFIG_BATMAN_ADV_NC=y CONFIG_BATMAN_ADV_MCAST=y CONFIG_NETLINK_DIAG=m @@ -329,6 +332,7 @@ CONFIG_BLK_DEV_IDECD=y CONFIG_BLK_DEV_Q40IDE=y CONFIG_RAID_ATTRS=m CONFIG_SCSI=y +# CONFIG_SCSI_MQ_DEFAULT is not set CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m @@ -385,8 +389,10 @@ CONFIG_VETH=m # CONFIG_NET_VENDOR_AMD is not set # CONFIG_NET_VENDOR_AQUANTIA is not set # CONFIG_NET_VENDOR_ARC is not set +# CONFIG_NET_VENDOR_AURORA is not set # CONFIG_NET_VENDOR_BROADCOM is not set -# CONFIG_NET_CADENCE is not set +# CONFIG_NET_VENDOR_CADENCE is not set +# CONFIG_NET_VENDOR_CAVIUM is not set # CONFIG_NET_VENDOR_CIRRUS is not set # CONFIG_NET_VENDOR_CORTINA is not set # CONFIG_NET_VENDOR_EZCHIP is not set @@ -461,8 +467,6 @@ CONFIG_RTC_DRV_GENERIC=m # CONFIG_VIRTIO_MENU is not set # CONFIG_IOMMU_SUPPORT is not set CONFIG_DAX=m -CONFIG_HEARTBEAT=y -CONFIG_PROC_HARDWARE=y CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m @@ -560,31 +564,6 @@ CONFIG_NLS_MAC_INUIT=m CONFIG_NLS_MAC_ROMANIAN=m CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m -# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set -CONFIG_MAGIC_SYSRQ=y -CONFIG_WW_MUTEX_SELFTEST=m -CONFIG_TEST_LIST_SORT=m -CONFIG_TEST_SORT=m -CONFIG_ATOMIC64_SELFTEST=m -CONFIG_ASYNC_RAID6_TEST=m -CONFIG_TEST_HEXDUMP=m -CONFIG_TEST_STRING_HELPERS=m -CONFIG_TEST_KSTRTOX=m -CONFIG_TEST_PRINTF=m -CONFIG_TEST_BITMAP=m -CONFIG_TEST_UUID=m -CONFIG_TEST_OVERFLOW=m -CONFIG_TEST_RHASHTABLE=m -CONFIG_TEST_HASH=m -CONFIG_TEST_USER_COPY=m -CONFIG_TEST_BPF=m -CONFIG_FIND_BIT_BENCHMARK=m -CONFIG_TEST_FIRMWARE=m -CONFIG_TEST_SYSCTL=m -CONFIG_TEST_UDELAY=m -CONFIG_TEST_STATIC_KEYS=m -CONFIG_TEST_KMOD=m -CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_HARDENED_USERCOPY=y CONFIG_CRYPTO_RSA=m @@ -602,6 +581,7 @@ CONFIG_CRYPTO_MORUS640=m CONFIG_CRYPTO_MORUS1280=m CONFIG_CRYPTO_CFB=m CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_OFB=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m CONFIG_CRYPTO_XCBC=m @@ -611,7 +591,6 @@ CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD160=m CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m -CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_SM3=m CONFIG_CRYPTO_TGR192=m @@ -644,5 +623,35 @@ CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set CONFIG_CRC32_SELFTEST=m +CONFIG_CRC64=m CONFIG_XZ_DEC_TEST=m CONFIG_STRING_SELFTEST=m +# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_WW_MUTEX_SELFTEST=m +CONFIG_TEST_LIST_SORT=m +CONFIG_TEST_SORT=m +CONFIG_ATOMIC64_SELFTEST=m +CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_HEXDUMP=m +CONFIG_TEST_STRING_HELPERS=m +CONFIG_TEST_KSTRTOX=m +CONFIG_TEST_PRINTF=m +CONFIG_TEST_BITMAP=m +CONFIG_TEST_BITFIELD=m +CONFIG_TEST_UUID=m +CONFIG_TEST_XARRAY=m +CONFIG_TEST_OVERFLOW=m +CONFIG_TEST_RHASHTABLE=m +CONFIG_TEST_HASH=m +CONFIG_TEST_IDA=m +CONFIG_TEST_USER_COPY=m +CONFIG_TEST_BPF=m +CONFIG_FIND_BIT_BENCHMARK=m +CONFIG_TEST_FIRMWARE=m +CONFIG_TEST_SYSCTL=m +CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m +CONFIG_TEST_KMOD=m +CONFIG_TEST_MEMCAT_P=m +CONFIG_EARLY_PRINTK=y diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig index dabc54318c09..4afe2100947e 100644 --- a/arch/m68k/configs/sun3_defconfig +++ b/arch/m68k/configs/sun3_defconfig @@ -12,6 +12,10 @@ CONFIG_BLK_DEV_INITRD=y CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_USERFAULTFD=y CONFIG_SLAB=y +CONFIG_KEXEC=y +CONFIG_BOOTINFO_PROC=y +CONFIG_SUN3=y +CONFIG_PROC_HARDWARE=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_PARTITION_ADVANCED=y @@ -28,15 +32,12 @@ CONFIG_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_KYBER=m CONFIG_IOSCHED_BFQ=m -CONFIG_KEXEC=y -CONFIG_BOOTINFO_PROC=y -CONFIG_SUN3=y -# CONFIG_COMPACTION is not set -CONFIG_CLEANCACHE=y -CONFIG_ZPOOL=m # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_AOUT=m CONFIG_BINFMT_MISC=m +# CONFIG_COMPACTION is not set +CONFIG_CLEANCACHE=y +CONFIG_ZPOOL=m CONFIG_NET=y CONFIG_PACKET=y CONFIG_PACKET_DIAG=m @@ -105,6 +106,7 @@ CONFIG_NFT_LIMIT=m CONFIG_NFT_MASQ=m CONFIG_NFT_REDIR=m CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m CONFIG_NFT_OBJREF=m CONFIG_NFT_QUEUE=m CONFIG_NFT_QUOTA=m @@ -112,7 +114,10 @@ CONFIG_NFT_REJECT=m CONFIG_NFT_COMPAT=m CONFIG_NFT_HASH=m CONFIG_NFT_FIB_INET=m +CONFIG_NFT_XFRM=m CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m CONFIG_NFT_DUP_NETDEV=m CONFIG_NFT_FWD_NETDEV=m CONFIG_NFT_FIB_NETDEV=m @@ -190,7 +195,6 @@ CONFIG_IP_SET_HASH_NETNET=m CONFIG_IP_SET_HASH_NETPORT=m CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m -CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m CONFIG_NFT_DUP_IPV4=m CONFIG_NFT_FIB_IPV4=m @@ -220,7 +224,6 @@ CONFIG_IP_NF_RAW=m CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m -CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m @@ -290,7 +293,6 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m # CONFIG_BATMAN_ADV_BATMAN_V is not set -CONFIG_BATMAN_ADV_DAT=y CONFIG_BATMAN_ADV_NC=y CONFIG_BATMAN_ADV_MCAST=y CONFIG_NETLINK_DIAG=m @@ -319,6 +321,7 @@ CONFIG_ATA_OVER_ETH=m CONFIG_DUMMY_IRQ=m CONFIG_RAID_ATTRS=m CONFIG_SCSI=y +# CONFIG_SCSI_MQ_DEFAULT is not set CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m @@ -375,7 +378,9 @@ CONFIG_VETH=m CONFIG_SUN3LANCE=y # CONFIG_NET_VENDOR_AQUANTIA is not set # CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_CADENCE is not set +# CONFIG_NET_VENDOR_AURORA is not set +# CONFIG_NET_VENDOR_CADENCE is not set +# CONFIG_NET_VENDOR_CAVIUM is not set # CONFIG_NET_VENDOR_CORTINA is not set # CONFIG_NET_VENDOR_EZCHIP is not set # CONFIG_NET_VENDOR_HUAWEI is not set @@ -441,7 +446,6 @@ CONFIG_RTC_DRV_GENERIC=m # CONFIG_VIRTIO_MENU is not set # CONFIG_IOMMU_SUPPORT is not set CONFIG_DAX=m -CONFIG_PROC_HARDWARE=y CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m @@ -539,30 +543,6 @@ CONFIG_NLS_MAC_INUIT=m CONFIG_NLS_MAC_ROMANIAN=m CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m -# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set -CONFIG_MAGIC_SYSRQ=y -CONFIG_WW_MUTEX_SELFTEST=m -CONFIG_TEST_LIST_SORT=m -CONFIG_TEST_SORT=m -CONFIG_ATOMIC64_SELFTEST=m -CONFIG_ASYNC_RAID6_TEST=m -CONFIG_TEST_HEXDUMP=m -CONFIG_TEST_STRING_HELPERS=m -CONFIG_TEST_KSTRTOX=m -CONFIG_TEST_PRINTF=m -CONFIG_TEST_BITMAP=m -CONFIG_TEST_UUID=m -CONFIG_TEST_OVERFLOW=m -CONFIG_TEST_RHASHTABLE=m -CONFIG_TEST_HASH=m -CONFIG_TEST_USER_COPY=m -CONFIG_TEST_BPF=m -CONFIG_FIND_BIT_BENCHMARK=m -CONFIG_TEST_FIRMWARE=m -CONFIG_TEST_SYSCTL=m -CONFIG_TEST_UDELAY=m -CONFIG_TEST_STATIC_KEYS=m -CONFIG_TEST_KMOD=m CONFIG_ENCRYPTED_KEYS=m CONFIG_HARDENED_USERCOPY=y CONFIG_CRYPTO_RSA=m @@ -580,6 +560,7 @@ CONFIG_CRYPTO_MORUS640=m CONFIG_CRYPTO_MORUS1280=m CONFIG_CRYPTO_CFB=m CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_OFB=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m CONFIG_CRYPTO_XCBC=m @@ -589,7 +570,6 @@ CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD160=m CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m -CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_SM3=m CONFIG_CRYPTO_TGR192=m @@ -622,5 +602,34 @@ CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set CONFIG_CRC32_SELFTEST=m +CONFIG_CRC64=m CONFIG_XZ_DEC_TEST=m CONFIG_STRING_SELFTEST=m +# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_WW_MUTEX_SELFTEST=m +CONFIG_TEST_LIST_SORT=m +CONFIG_TEST_SORT=m +CONFIG_ATOMIC64_SELFTEST=m +CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_HEXDUMP=m +CONFIG_TEST_STRING_HELPERS=m +CONFIG_TEST_KSTRTOX=m +CONFIG_TEST_PRINTF=m +CONFIG_TEST_BITMAP=m +CONFIG_TEST_BITFIELD=m +CONFIG_TEST_UUID=m +CONFIG_TEST_XARRAY=m +CONFIG_TEST_OVERFLOW=m +CONFIG_TEST_RHASHTABLE=m +CONFIG_TEST_HASH=m +CONFIG_TEST_IDA=m +CONFIG_TEST_USER_COPY=m +CONFIG_TEST_BPF=m +CONFIG_FIND_BIT_BENCHMARK=m +CONFIG_TEST_FIRMWARE=m +CONFIG_TEST_SYSCTL=m +CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m +CONFIG_TEST_KMOD=m +CONFIG_TEST_MEMCAT_P=m diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig index 0d9a5c2a311a..bd22893d0dc3 100644 --- a/arch/m68k/configs/sun3x_defconfig +++ b/arch/m68k/configs/sun3x_defconfig @@ -12,6 +12,10 @@ CONFIG_BLK_DEV_INITRD=y CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_USERFAULTFD=y CONFIG_SLAB=y +CONFIG_KEXEC=y +CONFIG_BOOTINFO_PROC=y +CONFIG_SUN3X=y +CONFIG_PROC_HARDWARE=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_PARTITION_ADVANCED=y @@ -28,15 +32,12 @@ CONFIG_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_KYBER=m CONFIG_IOSCHED_BFQ=m -CONFIG_KEXEC=y -CONFIG_BOOTINFO_PROC=y -CONFIG_SUN3X=y -# CONFIG_COMPACTION is not set -CONFIG_CLEANCACHE=y -CONFIG_ZPOOL=m # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_AOUT=m CONFIG_BINFMT_MISC=m +# CONFIG_COMPACTION is not set +CONFIG_CLEANCACHE=y +CONFIG_ZPOOL=m CONFIG_NET=y CONFIG_PACKET=y CONFIG_PACKET_DIAG=m @@ -105,6 +106,7 @@ CONFIG_NFT_LIMIT=m CONFIG_NFT_MASQ=m CONFIG_NFT_REDIR=m CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m CONFIG_NFT_OBJREF=m CONFIG_NFT_QUEUE=m CONFIG_NFT_QUOTA=m @@ -112,7 +114,10 @@ CONFIG_NFT_REJECT=m CONFIG_NFT_COMPAT=m CONFIG_NFT_HASH=m CONFIG_NFT_FIB_INET=m +CONFIG_NFT_XFRM=m CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m CONFIG_NFT_DUP_NETDEV=m CONFIG_NFT_FWD_NETDEV=m CONFIG_NFT_FIB_NETDEV=m @@ -190,7 +195,6 @@ CONFIG_IP_SET_HASH_NETNET=m CONFIG_IP_SET_HASH_NETPORT=m CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m -CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m CONFIG_NFT_DUP_IPV4=m CONFIG_NFT_FIB_IPV4=m @@ -220,7 +224,6 @@ CONFIG_IP_NF_RAW=m CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m -CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m @@ -290,7 +293,6 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m # CONFIG_BATMAN_ADV_BATMAN_V is not set -CONFIG_BATMAN_ADV_DAT=y CONFIG_BATMAN_ADV_NC=y CONFIG_BATMAN_ADV_MCAST=y CONFIG_NETLINK_DIAG=m @@ -319,6 +321,7 @@ CONFIG_ATA_OVER_ETH=m CONFIG_DUMMY_IRQ=m CONFIG_RAID_ATTRS=m CONFIG_SCSI=y +# CONFIG_SCSI_MQ_DEFAULT is not set CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m @@ -375,8 +378,10 @@ CONFIG_VETH=m CONFIG_SUN3LANCE=y # CONFIG_NET_VENDOR_AQUANTIA is not set # CONFIG_NET_VENDOR_ARC is not set +# CONFIG_NET_VENDOR_AURORA is not set # CONFIG_NET_VENDOR_BROADCOM is not set -# CONFIG_NET_CADENCE is not set +# CONFIG_NET_VENDOR_CADENCE is not set +# CONFIG_NET_VENDOR_CAVIUM is not set # CONFIG_NET_VENDOR_CORTINA is not set # CONFIG_NET_VENDOR_EZCHIP is not set # CONFIG_NET_VENDOR_HUAWEI is not set @@ -441,7 +446,6 @@ CONFIG_RTC_DRV_GENERIC=m # CONFIG_VIRTIO_MENU is not set # CONFIG_IOMMU_SUPPORT is not set CONFIG_DAX=m -CONFIG_PROC_HARDWARE=y CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m @@ -539,31 +543,6 @@ CONFIG_NLS_MAC_INUIT=m CONFIG_NLS_MAC_ROMANIAN=m CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m -# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set -CONFIG_MAGIC_SYSRQ=y -CONFIG_WW_MUTEX_SELFTEST=m -CONFIG_TEST_LIST_SORT=m -CONFIG_TEST_SORT=m -CONFIG_ATOMIC64_SELFTEST=m -CONFIG_ASYNC_RAID6_TEST=m -CONFIG_TEST_HEXDUMP=m -CONFIG_TEST_STRING_HELPERS=m -CONFIG_TEST_KSTRTOX=m -CONFIG_TEST_PRINTF=m -CONFIG_TEST_BITMAP=m -CONFIG_TEST_UUID=m -CONFIG_TEST_OVERFLOW=m -CONFIG_TEST_RHASHTABLE=m -CONFIG_TEST_HASH=m -CONFIG_TEST_USER_COPY=m -CONFIG_TEST_BPF=m -CONFIG_FIND_BIT_BENCHMARK=m -CONFIG_TEST_FIRMWARE=m -CONFIG_TEST_SYSCTL=m -CONFIG_TEST_UDELAY=m -CONFIG_TEST_STATIC_KEYS=m -CONFIG_TEST_KMOD=m -CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_HARDENED_USERCOPY=y CONFIG_CRYPTO_RSA=m @@ -581,6 +560,7 @@ CONFIG_CRYPTO_MORUS640=m CONFIG_CRYPTO_MORUS1280=m CONFIG_CRYPTO_CFB=m CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_OFB=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m CONFIG_CRYPTO_XCBC=m @@ -590,7 +570,6 @@ CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD160=m CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m -CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_SM3=m CONFIG_CRYPTO_TGR192=m @@ -623,5 +602,35 @@ CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set CONFIG_CRC32_SELFTEST=m +CONFIG_CRC64=m CONFIG_XZ_DEC_TEST=m CONFIG_STRING_SELFTEST=m +# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_WW_MUTEX_SELFTEST=m +CONFIG_TEST_LIST_SORT=m +CONFIG_TEST_SORT=m +CONFIG_ATOMIC64_SELFTEST=m +CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_HEXDUMP=m +CONFIG_TEST_STRING_HELPERS=m +CONFIG_TEST_KSTRTOX=m +CONFIG_TEST_PRINTF=m +CONFIG_TEST_BITMAP=m +CONFIG_TEST_BITFIELD=m +CONFIG_TEST_UUID=m +CONFIG_TEST_XARRAY=m +CONFIG_TEST_OVERFLOW=m +CONFIG_TEST_RHASHTABLE=m +CONFIG_TEST_HASH=m +CONFIG_TEST_IDA=m +CONFIG_TEST_USER_COPY=m +CONFIG_TEST_BPF=m +CONFIG_FIND_BIT_BENCHMARK=m +CONFIG_TEST_FIRMWARE=m +CONFIG_TEST_SYSCTL=m +CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m +CONFIG_TEST_KMOD=m +CONFIG_TEST_MEMCAT_P=m +CONFIG_EARLY_PRINTK=y diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild index a4b8d3331a9e..9f1dd26903e3 100644 --- a/arch/m68k/include/asm/Kbuild +++ b/arch/m68k/include/asm/Kbuild @@ -1,3 +1,4 @@ +generated-y += syscall_table.h generic-y += barrier.h generic-y += compat.h generic-y += device.h diff --git a/arch/m68k/include/asm/raw_io.h b/arch/m68k/include/asm/raw_io.h index 85761255dde5..8a6dc6e5a279 100644 --- a/arch/m68k/include/asm/raw_io.h +++ b/arch/m68k/include/asm/raw_io.h @@ -107,12 +107,43 @@ static inline void raw_insb(volatile u8 __iomem *port, u8 *buf, unsigned int len } static inline void raw_outsb(volatile u8 __iomem *port, const u8 *buf, - unsigned int len) + unsigned int nr) { - unsigned int i; + unsigned int tmp; - for (i = 0; i < len; i++) - out_8(port, *buf++); + if (nr & 15) { + tmp = (nr & 15) - 1; + asm volatile ( + "1: moveb %0@+,%2@; dbra %1,1b" + : "=a" (buf), "=d" (tmp) + : "a" (port), "0" (buf), + "1" (tmp)); + } + if (nr >> 4) { + tmp = (nr >> 4) - 1; + asm volatile ( + "1: " + "moveb %0@+,%2@; " + "moveb %0@+,%2@; " + "moveb %0@+,%2@; " + "moveb %0@+,%2@; " + "moveb %0@+,%2@; " + "moveb %0@+,%2@; " + "moveb %0@+,%2@; " + "moveb %0@+,%2@; " + "moveb %0@+,%2@; " + "moveb %0@+,%2@; " + "moveb %0@+,%2@; " + "moveb %0@+,%2@; " + "moveb %0@+,%2@; " + "moveb %0@+,%2@; " + "moveb %0@+,%2@; " + "moveb %0@+,%2@; " + "dbra %1,1b" + : "=a" (buf), "=d" (tmp) + : "a" (port), "0" (buf), + "1" (tmp)); + } } static inline void raw_insw(volatile u16 __iomem *port, u16 *buf, unsigned int nr) diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h index e680031bda7b..49d5de18646b 100644 --- a/arch/m68k/include/asm/unistd.h +++ b/arch/m68k/include/asm/unistd.h @@ -4,8 +4,7 @@ #include <uapi/asm/unistd.h> - -#define NR_syscalls 380 +#define NR_syscalls __NR_syscalls #define __ARCH_WANT_NEW_STAT #define __ARCH_WANT_OLD_READDIR diff --git a/arch/m68k/include/uapi/asm/Kbuild b/arch/m68k/include/uapi/asm/Kbuild index c2e26a44c482..b6452910d7e1 100644 --- a/arch/m68k/include/uapi/asm/Kbuild +++ b/arch/m68k/include/uapi/asm/Kbuild @@ -1,6 +1,7 @@ # UAPI Header export list include include/uapi/asm-generic/Kbuild.asm +generated-y += unistd_32.h generic-y += auxvec.h generic-y += bitsperlong.h generic-y += bpf_perf_event.h diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h index de3054f8a681..cdbd090d44a2 100644 --- a/arch/m68k/include/uapi/asm/unistd.h +++ b/arch/m68k/include/uapi/asm/unistd.h @@ -2,389 +2,6 @@ #ifndef _UAPI_ASM_M68K_UNISTD_H_ #define _UAPI_ASM_M68K_UNISTD_H_ -/* - * This file contains the system call numbers. - */ - -#define __NR_restart_syscall 0 -#define __NR_exit 1 -#define __NR_fork 2 -#define __NR_read 3 -#define __NR_write 4 -#define __NR_open 5 -#define __NR_close 6 -#define __NR_waitpid 7 -#define __NR_creat 8 -#define __NR_link 9 -#define __NR_unlink 10 -#define __NR_execve 11 -#define __NR_chdir 12 -#define __NR_time 13 -#define __NR_mknod 14 -#define __NR_chmod 15 -#define __NR_chown 16 -/*#define __NR_break 17*/ -#define __NR_oldstat 18 -#define __NR_lseek 19 -#define __NR_getpid 20 -#define __NR_mount 21 -#define __NR_umount 22 -#define __NR_setuid 23 -#define __NR_getuid 24 -#define __NR_stime 25 -#define __NR_ptrace 26 -#define __NR_alarm 27 -#define __NR_oldfstat 28 -#define __NR_pause 29 -#define __NR_utime 30 -/*#define __NR_stty 31*/ -/*#define __NR_gtty 32*/ -#define __NR_access 33 -#define __NR_nice 34 -/*#define __NR_ftime 35*/ -#define __NR_sync 36 -#define __NR_kill 37 -#define __NR_rename 38 -#define __NR_mkdir 39 -#define __NR_rmdir 40 -#define __NR_dup 41 -#define __NR_pipe 42 -#define __NR_times 43 -/*#define __NR_prof 44*/ -#define __NR_brk 45 -#define __NR_setgid 46 -#define __NR_getgid 47 -#define __NR_signal 48 -#define __NR_geteuid 49 -#define __NR_getegid 50 -#define __NR_acct 51 -#define __NR_umount2 52 -/*#define __NR_lock 53*/ -#define __NR_ioctl 54 -#define __NR_fcntl 55 -/*#define __NR_mpx 56*/ -#define __NR_setpgid 57 -/*#define __NR_ulimit 58*/ -/*#define __NR_oldolduname 59*/ -#define __NR_umask 60 -#define __NR_chroot 61 -#define __NR_ustat 62 -#define __NR_dup2 63 -#define __NR_getppid 64 -#define __NR_getpgrp 65 -#define __NR_setsid 66 -#define __NR_sigaction 67 -#define __NR_sgetmask 68 -#define __NR_ssetmask 69 -#define __NR_setreuid 70 -#define __NR_setregid 71 -#define __NR_sigsuspend 72 -#define __NR_sigpending 73 -#define __NR_sethostname 74 -#define __NR_setrlimit 75 -#define __NR_getrlimit 76 -#define __NR_getrusage 77 -#define __NR_gettimeofday 78 -#define __NR_settimeofday 79 -#define __NR_getgroups 80 -#define __NR_setgroups 81 -#define __NR_select 82 -#define __NR_symlink 83 -#define __NR_oldlstat 84 -#define __NR_readlink 85 -#define __NR_uselib 86 -#define __NR_swapon 87 -#define __NR_reboot 88 -#define __NR_readdir 89 -#define __NR_mmap 90 -#define __NR_munmap 91 -#define __NR_truncate 92 -#define __NR_ftruncate 93 -#define __NR_fchmod 94 -#define __NR_fchown 95 -#define __NR_getpriority 96 -#define __NR_setpriority 97 -/*#define __NR_profil 98*/ -#define __NR_statfs 99 -#define __NR_fstatfs 100 -/*#define __NR_ioperm 101*/ -#define __NR_socketcall 102 -#define __NR_syslog 103 -#define __NR_setitimer 104 -#define __NR_getitimer 105 -#define __NR_stat 106 -#define __NR_lstat 107 -#define __NR_fstat 108 -/*#define __NR_olduname 109*/ -/*#define __NR_iopl 110*/ /* not supported */ -#define __NR_vhangup 111 -/*#define __NR_idle 112*/ /* Obsolete */ -/*#define __NR_vm86 113*/ /* not supported */ -#define __NR_wait4 114 -#define __NR_swapoff 115 -#define __NR_sysinfo 116 -#define __NR_ipc 117 -#define __NR_fsync 118 -#define __NR_sigreturn 119 -#define __NR_clone 120 -#define __NR_setdomainname 121 -#define __NR_uname 122 -#define __NR_cacheflush 123 -#define __NR_adjtimex 124 -#define __NR_mprotect 125 -#define __NR_sigprocmask 126 -#define __NR_create_module 127 -#define __NR_init_module 128 -#define __NR_delete_module 129 -#define __NR_get_kernel_syms 130 -#define __NR_quotactl 131 -#define __NR_getpgid 132 -#define __NR_fchdir 133 -#define __NR_bdflush 134 -#define __NR_sysfs 135 -#define __NR_personality 136 -/*#define __NR_afs_syscall 137*/ /* Syscall for Andrew File System */ -#define __NR_setfsuid 138 -#define __NR_setfsgid 139 -#define __NR__llseek 140 -#define __NR_getdents 141 -#define __NR__newselect 142 -#define __NR_flock 143 -#define __NR_msync 144 -#define __NR_readv 145 -#define __NR_writev 146 -#define __NR_getsid 147 -#define __NR_fdatasync 148 -#define __NR__sysctl 149 -#define __NR_mlock 150 -#define __NR_munlock 151 -#define __NR_mlockall 152 -#define __NR_munlockall 153 -#define __NR_sched_setparam 154 -#define __NR_sched_getparam 155 -#define __NR_sched_setscheduler 156 -#define __NR_sched_getscheduler 157 -#define __NR_sched_yield 158 -#define __NR_sched_get_priority_max 159 -#define __NR_sched_get_priority_min 160 -#define __NR_sched_rr_get_interval 161 -#define __NR_nanosleep 162 -#define __NR_mremap 163 -#define __NR_setresuid 164 -#define __NR_getresuid 165 -#define __NR_getpagesize 166 -#define __NR_query_module 167 -#define __NR_poll 168 -#define __NR_nfsservctl 169 -#define __NR_setresgid 170 -#define __NR_getresgid 171 -#define __NR_prctl 172 -#define __NR_rt_sigreturn 173 -#define __NR_rt_sigaction 174 -#define __NR_rt_sigprocmask 175 -#define __NR_rt_sigpending 176 -#define __NR_rt_sigtimedwait 177 -#define __NR_rt_sigqueueinfo 178 -#define __NR_rt_sigsuspend 179 -#define __NR_pread64 180 -#define __NR_pwrite64 181 -#define __NR_lchown 182 -#define __NR_getcwd 183 -#define __NR_capget 184 -#define __NR_capset 185 -#define __NR_sigaltstack 186 -#define __NR_sendfile 187 -#define __NR_getpmsg 188 /* some people actually want streams */ -#define __NR_putpmsg 189 /* some people actually want streams */ -#define __NR_vfork 190 -#define __NR_ugetrlimit 191 -#define __NR_mmap2 192 -#define __NR_truncate64 193 -#define __NR_ftruncate64 194 -#define __NR_stat64 195 -#define __NR_lstat64 196 -#define __NR_fstat64 197 -#define __NR_chown32 198 -#define __NR_getuid32 199 -#define __NR_getgid32 200 -#define __NR_geteuid32 201 -#define __NR_getegid32 202 -#define __NR_setreuid32 203 -#define __NR_setregid32 204 -#define __NR_getgroups32 205 -#define __NR_setgroups32 206 -#define __NR_fchown32 207 -#define __NR_setresuid32 208 -#define __NR_getresuid32 209 -#define __NR_setresgid32 210 -#define __NR_getresgid32 211 -#define __NR_lchown32 212 -#define __NR_setuid32 213 -#define __NR_setgid32 214 -#define __NR_setfsuid32 215 -#define __NR_setfsgid32 216 -#define __NR_pivot_root 217 -/* 218*/ -/* 219*/ -#define __NR_getdents64 220 -#define __NR_gettid 221 -#define __NR_tkill 222 -#define __NR_setxattr 223 -#define __NR_lsetxattr 224 -#define __NR_fsetxattr 225 -#define __NR_getxattr 226 -#define __NR_lgetxattr 227 -#define __NR_fgetxattr 228 -#define __NR_listxattr 229 -#define __NR_llistxattr 230 -#define __NR_flistxattr 231 -#define __NR_removexattr 232 -#define __NR_lremovexattr 233 -#define __NR_fremovexattr 234 -#define __NR_futex 235 -#define __NR_sendfile64 236 -#define __NR_mincore 237 -#define __NR_madvise 238 -#define __NR_fcntl64 239 -#define __NR_readahead 240 -#define __NR_io_setup 241 -#define __NR_io_destroy 242 -#define __NR_io_getevents 243 -#define __NR_io_submit 244 -#define __NR_io_cancel 245 -#define __NR_fadvise64 246 -#define __NR_exit_group 247 -#define __NR_lookup_dcookie 248 -#define __NR_epoll_create 249 -#define __NR_epoll_ctl 250 -#define __NR_epoll_wait 251 -#define __NR_remap_file_pages 252 -#define __NR_set_tid_address 253 -#define __NR_timer_create 254 -#define __NR_timer_settime 255 -#define __NR_timer_gettime 256 -#define __NR_timer_getoverrun 257 -#define __NR_timer_delete 258 -#define __NR_clock_settime 259 -#define __NR_clock_gettime 260 -#define __NR_clock_getres 261 -#define __NR_clock_nanosleep 262 -#define __NR_statfs64 263 -#define __NR_fstatfs64 264 -#define __NR_tgkill 265 -#define __NR_utimes 266 -#define __NR_fadvise64_64 267 -#define __NR_mbind 268 -#define __NR_get_mempolicy 269 -#define __NR_set_mempolicy 270 -#define __NR_mq_open 271 -#define __NR_mq_unlink 272 -#define __NR_mq_timedsend 273 -#define __NR_mq_timedreceive 274 -#define __NR_mq_notify 275 -#define __NR_mq_getsetattr 276 -#define __NR_waitid 277 -/*#define __NR_vserver 278*/ -#define __NR_add_key 279 -#define __NR_request_key 280 -#define __NR_keyctl 281 -#define __NR_ioprio_set 282 -#define __NR_ioprio_get 283 -#define __NR_inotify_init 284 -#define __NR_inotify_add_watch 285 -#define __NR_inotify_rm_watch 286 -#define __NR_migrate_pages 287 -#define __NR_openat 288 -#define __NR_mkdirat 289 -#define __NR_mknodat 290 -#define __NR_fchownat 291 -#define __NR_futimesat 292 -#define __NR_fstatat64 293 -#define __NR_unlinkat 294 -#define __NR_renameat 295 -#define __NR_linkat 296 -#define __NR_symlinkat 297 -#define __NR_readlinkat 298 -#define __NR_fchmodat 299 -#define __NR_faccessat 300 -#define __NR_pselect6 301 -#define __NR_ppoll 302 -#define __NR_unshare 303 -#define __NR_set_robust_list 304 -#define __NR_get_robust_list 305 -#define __NR_splice 306 -#define __NR_sync_file_range 307 -#define __NR_tee 308 -#define __NR_vmsplice 309 -#define __NR_move_pages 310 -#define __NR_sched_setaffinity 311 -#define __NR_sched_getaffinity 312 -#define __NR_kexec_load 313 -#define __NR_getcpu 314 -#define __NR_epoll_pwait 315 -#define __NR_utimensat 316 -#define __NR_signalfd 317 -#define __NR_timerfd_create 318 -#define __NR_eventfd 319 -#define __NR_fallocate 320 -#define __NR_timerfd_settime 321 -#define __NR_timerfd_gettime 322 -#define __NR_signalfd4 323 -#define __NR_eventfd2 324 -#define __NR_epoll_create1 325 -#define __NR_dup3 326 -#define __NR_pipe2 327 -#define __NR_inotify_init1 328 -#define __NR_preadv 329 -#define __NR_pwritev 330 -#define __NR_rt_tgsigqueueinfo 331 -#define __NR_perf_event_open 332 -#define __NR_get_thread_area 333 -#define __NR_set_thread_area 334 -#define __NR_atomic_cmpxchg_32 335 -#define __NR_atomic_barrier 336 -#define __NR_fanotify_init 337 -#define __NR_fanotify_mark 338 -#define __NR_prlimit64 339 -#define __NR_name_to_handle_at 340 -#define __NR_open_by_handle_at 341 -#define __NR_clock_adjtime 342 -#define __NR_syncfs 343 -#define __NR_setns 344 -#define __NR_process_vm_readv 345 -#define __NR_process_vm_writev 346 -#define __NR_kcmp 347 -#define __NR_finit_module 348 -#define __NR_sched_setattr 349 -#define __NR_sched_getattr 350 -#define __NR_renameat2 351 -#define __NR_getrandom 352 -#define __NR_memfd_create 353 -#define __NR_bpf 354 -#define __NR_execveat 355 -#define __NR_socket 356 -#define __NR_socketpair 357 -#define __NR_bind 358 -#define __NR_connect 359 -#define __NR_listen 360 -#define __NR_accept4 361 -#define __NR_getsockopt 362 -#define __NR_setsockopt 363 -#define __NR_getsockname 364 -#define __NR_getpeername 365 -#define __NR_sendto 366 -#define __NR_sendmsg 367 -#define __NR_recvfrom 368 -#define __NR_recvmsg 369 -#define __NR_shutdown 370 -#define __NR_recvmmsg 371 -#define __NR_sendmmsg 372 -#define __NR_userfaultfd 373 -#define __NR_membarrier 374 -#define __NR_mlock2 375 -#define __NR_copy_file_range 376 -#define __NR_preadv2 377 -#define __NR_pwritev2 378 -#define __NR_statx 379 +#include <asm/unistd_32.h> #endif /* _UAPI_ASM_M68K_UNISTD_H_ */ diff --git a/arch/m68k/kernel/dma.c b/arch/m68k/kernel/dma.c index e99993c57d6b..b4aa853051bd 100644 --- a/arch/m68k/kernel/dma.c +++ b/arch/m68k/kernel/dma.c @@ -32,7 +32,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, size = PAGE_ALIGN(size); order = get_order(size); - page = alloc_pages(flag, order); + page = alloc_pages(flag | __GFP_ZERO, order); if (!page) return NULL; diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c index a1a3eaeaf58c..ad0195cbe042 100644 --- a/arch/m68k/kernel/setup_mm.c +++ b/arch/m68k/kernel/setup_mm.c @@ -164,8 +164,6 @@ static void __init m68k_parse_bootinfo(const struct bi_record *record) be32_to_cpu(m->addr); m68k_memory[m68k_num_memory].size = be32_to_cpu(m->size); - memblock_add(m68k_memory[m68k_num_memory].addr, - m68k_memory[m68k_num_memory].size); m68k_num_memory++; } else pr_warn("%s: too many memory chunks\n", diff --git a/arch/m68k/kernel/syscalls/Makefile b/arch/m68k/kernel/syscalls/Makefile new file mode 100644 index 000000000000..659faefdcb1d --- /dev/null +++ b/arch/m68k/kernel/syscalls/Makefile @@ -0,0 +1,38 @@ +# SPDX-License-Identifier: GPL-2.0 +kapi := arch/$(SRCARCH)/include/generated/asm +uapi := arch/$(SRCARCH)/include/generated/uapi/asm + +_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \ + $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)') + +syscall := $(srctree)/$(src)/syscall.tbl +syshdr := $(srctree)/$(src)/syscallhdr.sh +systbl := $(srctree)/$(src)/syscalltbl.sh + +quiet_cmd_syshdr = SYSHDR $@ + cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \ + '$(syshdr_abis_$(basetarget))' \ + '$(syshdr_pfx_$(basetarget))' \ + '$(syshdr_offset_$(basetarget))' + +quiet_cmd_systbl = SYSTBL $@ + cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \ + '$(systbl_abis_$(basetarget))' \ + '$(systbl_abi_$(basetarget))' \ + '$(systbl_offset_$(basetarget))' + +$(uapi)/unistd_32.h: $(syscall) $(syshdr) + $(call if_changed,syshdr) + +$(kapi)/syscall_table.h: $(syscall) $(systbl) + $(call if_changed,systbl) + +uapisyshdr-y += unistd_32.h +kapisyshdr-y += syscall_table.h + +targets += $(uapisyshdr-y) $(kapisyshdr-y) + +PHONY += all +all: $(addprefix $(uapi)/,$(uapisyshdr-y)) +all: $(addprefix $(kapi)/,$(kapisyshdr-y)) + @: diff --git a/arch/m68k/kernel/syscalls/syscall.tbl b/arch/m68k/kernel/syscalls/syscall.tbl new file mode 100644 index 000000000000..1a95c4a1bc0d --- /dev/null +++ b/arch/m68k/kernel/syscalls/syscall.tbl @@ -0,0 +1,389 @@ +# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note +# +# system call numbers and entry vectors for m68k +# +# The format is: +# <number> <abi> <name> <entry point> +# +# The <abi> is always "common" for this file +# +0 common restart_syscall sys_restart_syscall +1 common exit sys_exit +2 common fork __sys_fork +3 common read sys_read +4 common write sys_write +5 common open sys_open +6 common close sys_close +7 common waitpid sys_waitpid +8 common creat sys_creat +9 common link sys_link +10 common unlink sys_unlink +11 common execve sys_execve +12 common chdir sys_chdir +13 common time sys_time +14 common mknod sys_mknod +15 common chmod sys_chmod +16 common chown sys_chown16 +# 17 was break +18 common oldstat sys_stat +19 common lseek sys_lseek +20 common getpid sys_getpid +21 common mount sys_mount +22 common umount sys_oldumount +23 common setuid sys_setuid16 +24 common getuid sys_getuid16 +25 common stime sys_stime +26 common ptrace sys_ptrace +27 common alarm sys_alarm +28 common oldfstat sys_fstat +29 common pause sys_pause +30 common utime sys_utime +# 31 was stty +# 32 was gtty +33 common access sys_access +34 common nice sys_nice +# 35 was ftime +36 common sync sys_sync +37 common kill sys_kill +38 common rename sys_rename +39 common mkdir sys_mkdir +40 common rmdir sys_rmdir +41 common dup sys_dup +42 common pipe sys_pipe +43 common times sys_times +# 44 was prof +45 common brk sys_brk +46 common setgid sys_setgid16 +47 common getgid sys_getgid16 +48 common signal sys_signal +49 common geteuid sys_geteuid16 +50 common getegid sys_getegid16 +51 common acct sys_acct +52 common umount2 sys_umount +# 53 was lock +54 common ioctl sys_ioctl +55 common fcntl sys_fcntl +# 56 was mpx +57 common setpgid sys_setpgid +# 58 was ulimit +# 59 was oldolduname +60 common umask sys_umask +61 common chroot sys_chroot +62 common ustat sys_ustat +63 common dup2 sys_dup2 +64 common getppid sys_getppid +65 common getpgrp sys_getpgrp +66 common setsid sys_setsid +67 common sigaction sys_sigaction +68 common sgetmask sys_sgetmask +69 common ssetmask sys_ssetmask +70 common setreuid sys_setreuid16 +71 common setregid sys_setregid16 +72 common sigsuspend sys_sigsuspend +73 common sigpending sys_sigpending +74 common sethostname sys_sethostname +75 common setrlimit sys_setrlimit +76 common getrlimit sys_old_getrlimit +77 common getrusage sys_getrusage +78 common gettimeofday sys_gettimeofday +79 common settimeofday sys_settimeofday +80 common getgroups sys_getgroups16 +81 common setgroups sys_setgroups16 +82 common select sys_old_select +83 common symlink sys_symlink +84 common oldlstat sys_lstat +85 common readlink sys_readlink +86 common uselib sys_uselib +87 common swapon sys_swapon +88 common reboot sys_reboot +89 common readdir sys_old_readdir +90 common mmap sys_old_mmap +91 common munmap sys_munmap +92 common truncate sys_truncate +93 common ftruncate sys_ftruncate +94 common fchmod sys_fchmod +95 common fchown sys_fchown16 +96 common getpriority sys_getpriority +97 common setpriority sys_setpriority +# 98 was profil +99 common statfs sys_statfs +100 common fstatfs sys_fstatfs +# 101 was ioperm +102 common socketcall sys_socketcall +103 common syslog sys_syslog +104 common setitimer sys_setitimer +105 common getitimer sys_getitimer +106 common stat sys_newstat +107 common lstat sys_newlstat +108 common fstat sys_newfstat +# 109 was olduname +# 110 was iopl +111 common vhangup sys_vhangup +# 112 was idle +# 113 was vm86 +114 common wait4 sys_wait4 +115 common swapoff sys_swapoff +116 common sysinfo sys_sysinfo +117 common ipc sys_ipc +118 common fsync sys_fsync +119 common sigreturn sys_sigreturn +120 common clone __sys_clone +121 common setdomainname sys_setdomainname +122 common uname sys_newuname +123 common cacheflush sys_cacheflush +124 common adjtimex sys_adjtimex +125 common mprotect sys_mprotect +126 common sigprocmask sys_sigprocmask +127 common create_module sys_ni_syscall +128 common init_module sys_init_module +129 common delete_module sys_delete_module +130 common get_kernel_syms sys_ni_syscall +131 common quotactl sys_quotactl +132 common getpgid sys_getpgid +133 common fchdir sys_fchdir +134 common bdflush sys_bdflush +135 common sysfs sys_sysfs +136 common personality sys_personality +# 137 was afs_syscall +138 common setfsuid sys_setfsuid16 +139 common setfsgid sys_setfsgid16 +140 common _llseek sys_llseek +141 common getdents sys_getdents +142 common _newselect sys_select +143 common flock sys_flock +144 common msync sys_msync +145 common readv sys_readv +146 common writev sys_writev +147 common getsid sys_getsid +148 common fdatasync sys_fdatasync +149 common _sysctl sys_sysctl +150 common mlock sys_mlock +151 common munlock sys_munlock +152 common mlockall sys_mlockall +153 common munlockall sys_munlockall +154 common sched_setparam sys_sched_setparam +155 common sched_getparam sys_sched_getparam +156 common sched_setscheduler sys_sched_setscheduler +157 common sched_getscheduler sys_sched_getscheduler +158 common sched_yield sys_sched_yield +159 common sched_get_priority_max sys_sched_get_priority_max +160 common sched_get_priority_min sys_sched_get_priority_min +161 common sched_rr_get_interval sys_sched_rr_get_interval +162 common nanosleep sys_nanosleep +163 common mremap sys_mremap +164 common setresuid sys_setresuid16 +165 common getresuid sys_getresuid16 +166 common getpagesize sys_getpagesize +167 common query_module sys_ni_syscall +168 common poll sys_poll +169 common nfsservctl sys_ni_syscall +170 common setresgid sys_setresgid16 +171 common getresgid sys_getresgid16 +172 common prctl sys_prctl +173 common rt_sigreturn sys_rt_sigreturn +174 common rt_sigaction sys_rt_sigaction +175 common rt_sigprocmask sys_rt_sigprocmask +176 common rt_sigpending sys_rt_sigpending +177 common rt_sigtimedwait sys_rt_sigtimedwait +178 common rt_sigqueueinfo sys_rt_sigqueueinfo +179 common rt_sigsuspend sys_rt_sigsuspend +180 common pread64 sys_pread64 +181 common pwrite64 sys_pwrite64 +182 common lchown sys_lchown16 +183 common getcwd sys_getcwd +184 common capget sys_capget +185 common capset sys_capset +186 common sigaltstack sys_sigaltstack +187 common sendfile sys_sendfile +188 common getpmsg sys_ni_syscall +189 common putpmsg sys_ni_syscall +190 common vfork __sys_vfork +191 common ugetrlimit sys_getrlimit +192 common mmap2 sys_mmap2 +193 common truncate64 sys_truncate64 +194 common ftruncate64 sys_ftruncate64 +195 common stat64 sys_stat64 +196 common lstat64 sys_lstat64 +197 common fstat64 sys_fstat64 +198 common chown32 sys_chown +199 common getuid32 sys_getuid +200 common getgid32 sys_getgid +201 common geteuid32 sys_geteuid +202 common getegid32 sys_getegid +203 common setreuid32 sys_setreuid +204 common setregid32 sys_setregid +205 common getgroups32 sys_getgroups +206 common setgroups32 sys_setgroups +207 common fchown32 sys_fchown +208 common setresuid32 sys_setresuid +209 common getresuid32 sys_getresuid +210 common setresgid32 sys_setresgid +211 common getresgid32 sys_getresgid +212 common lchown32 sys_lchown +213 common setuid32 sys_setuid +214 common setgid32 sys_setgid +215 common setfsuid32 sys_setfsuid +216 common setfsgid32 sys_setfsgid +217 common pivot_root sys_pivot_root +# 218 is reserved +# 219 is reserved +220 common getdents64 sys_getdents64 +221 common gettid sys_gettid +222 common tkill sys_tkill +223 common setxattr sys_setxattr +224 common lsetxattr sys_lsetxattr +225 common fsetxattr sys_fsetxattr +226 common getxattr sys_getxattr +227 common lgetxattr sys_lgetxattr +228 common fgetxattr sys_fgetxattr +229 common listxattr sys_listxattr +230 common llistxattr sys_llistxattr +231 common flistxattr sys_flistxattr +232 common removexattr sys_removexattr +233 common lremovexattr sys_lremovexattr +234 common fremovexattr sys_fremovexattr +235 common futex sys_futex +236 common sendfile64 sys_sendfile64 +237 common mincore sys_mincore +238 common madvise sys_madvise +239 common fcntl64 sys_fcntl64 +240 common readahead sys_readahead +241 common io_setup sys_io_setup +242 common io_destroy sys_io_destroy +243 common io_getevents sys_io_getevents +244 common io_submit sys_io_submit +245 common io_cancel sys_io_cancel +246 common fadvise64 sys_fadvise64 +247 common exit_group sys_exit_group +248 common lookup_dcookie sys_lookup_dcookie +249 common epoll_create sys_epoll_create +250 common epoll_ctl sys_epoll_ctl +251 common epoll_wait sys_epoll_wait +252 common remap_file_pages sys_remap_file_pages +253 common set_tid_address sys_set_tid_address +254 common timer_create sys_timer_create +255 common timer_settime sys_timer_settime +256 common timer_gettime sys_timer_gettime +257 common timer_getoverrun sys_timer_getoverrun +258 common timer_delete sys_timer_delete +259 common clock_settime sys_clock_settime +260 common clock_gettime sys_clock_gettime +261 common clock_getres sys_clock_getres +262 common clock_nanosleep sys_clock_nanosleep +263 common statfs64 sys_statfs64 +264 common fstatfs64 sys_fstatfs64 +265 common tgkill sys_tgkill +266 common utimes sys_utimes +267 common fadvise64_64 sys_fadvise64_64 +268 common mbind sys_mbind +269 common get_mempolicy sys_get_mempolicy +270 common set_mempolicy sys_set_mempolicy +271 common mq_open sys_mq_open +272 common mq_unlink sys_mq_unlink +273 common mq_timedsend sys_mq_timedsend +274 common mq_timedreceive sys_mq_timedreceive +275 common mq_notify sys_mq_notify +276 common mq_getsetattr sys_mq_getsetattr +277 common waitid sys_waitid +# 278 was vserver +279 common add_key sys_add_key +280 common request_key sys_request_key +281 common keyctl sys_keyctl +282 common ioprio_set sys_ioprio_set +283 common ioprio_get sys_ioprio_get +284 common inotify_init sys_inotify_init +285 common inotify_add_watch sys_inotify_add_watch +286 common inotify_rm_watch sys_inotify_rm_watch +287 common migrate_pages sys_migrate_pages +288 common openat sys_openat +289 common mkdirat sys_mkdirat +290 common mknodat sys_mknodat +291 common fchownat sys_fchownat +292 common futimesat sys_futimesat +293 common fstatat64 sys_fstatat64 +294 common unlinkat sys_unlinkat +295 common renameat sys_renameat +296 common linkat sys_linkat +297 common symlinkat sys_symlinkat +298 common readlinkat sys_readlinkat +299 common fchmodat sys_fchmodat +300 common faccessat sys_faccessat +301 common pselect6 sys_pselect6 +302 common ppoll sys_ppoll +303 common unshare sys_unshare +304 common set_robust_list sys_set_robust_list +305 common get_robust_list sys_get_robust_list +306 common splice sys_splice +307 common sync_file_range sys_sync_file_range +308 common tee sys_tee +309 common vmsplice sys_vmsplice +310 common move_pages sys_move_pages +311 common sched_setaffinity sys_sched_setaffinity +312 common sched_getaffinity sys_sched_getaffinity +313 common kexec_load sys_kexec_load +314 common getcpu sys_getcpu +315 common epoll_pwait sys_epoll_pwait +316 common utimensat sys_utimensat +317 common signalfd sys_signalfd +318 common timerfd_create sys_timerfd_create +319 common eventfd sys_eventfd +320 common fallocate sys_fallocate +321 common timerfd_settime sys_timerfd_settime +322 common timerfd_gettime sys_timerfd_gettime +323 common signalfd4 sys_signalfd4 +324 common eventfd2 sys_eventfd2 +325 common epoll_create1 sys_epoll_create1 +326 common dup3 sys_dup3 +327 common pipe2 sys_pipe2 +328 common inotify_init1 sys_inotify_init1 +329 common preadv sys_preadv +330 common pwritev sys_pwritev +331 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo +332 common perf_event_open sys_perf_event_open +333 common get_thread_area sys_get_thread_area +334 common set_thread_area sys_set_thread_area +335 common atomic_cmpxchg_32 sys_atomic_cmpxchg_32 +336 common atomic_barrier sys_atomic_barrier +337 common fanotify_init sys_fanotify_init +338 common fanotify_mark sys_fanotify_mark +339 common prlimit64 sys_prlimit64 +340 common name_to_handle_at sys_name_to_handle_at +341 common open_by_handle_at sys_open_by_handle_at +342 common clock_adjtime sys_clock_adjtime +343 common syncfs sys_syncfs +344 common setns sys_setns +345 common process_vm_readv sys_process_vm_readv +346 common process_vm_writev sys_process_vm_writev +347 common kcmp sys_kcmp +348 common finit_module sys_finit_module +349 common sched_setattr sys_sched_setattr +350 common sched_getattr sys_sched_getattr +351 common renameat2 sys_renameat2 +352 common getrandom sys_getrandom +353 common memfd_create sys_memfd_create +354 common bpf sys_bpf +355 common execveat sys_execveat +356 common socket sys_socket +357 common socketpair sys_socketpair +358 common bind sys_bind +359 common connect sys_connect +360 common listen sys_listen +361 common accept4 sys_accept4 +362 common getsockopt sys_getsockopt +363 common setsockopt sys_setsockopt +364 common getsockname sys_getsockname +365 common getpeername sys_getpeername +366 common sendto sys_sendto +367 common sendmsg sys_sendmsg +368 common recvfrom sys_recvfrom +369 common recvmsg sys_recvmsg +370 common shutdown sys_shutdown +371 common recvmmsg sys_recvmmsg +372 common sendmmsg sys_sendmmsg +373 common userfaultfd sys_userfaultfd +374 common membarrier sys_membarrier +375 common mlock2 sys_mlock2 +376 common copy_file_range sys_copy_file_range +377 common preadv2 sys_preadv2 +378 common pwritev2 sys_pwritev2 +379 common statx sys_statx diff --git a/arch/m68k/kernel/syscalls/syscallhdr.sh b/arch/m68k/kernel/syscalls/syscallhdr.sh new file mode 100644 index 000000000000..6f357d68ef44 --- /dev/null +++ b/arch/m68k/kernel/syscalls/syscallhdr.sh @@ -0,0 +1,36 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 + +in="$1" +out="$2" +my_abis=`echo "($3)" | tr ',' '|'` +prefix="$4" +offset="$5" + +fileguard=_UAPI_ASM_M68K_`basename "$out" | sed \ + -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \ + -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'` +grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( + printf "#ifndef %s\n" "${fileguard}" + printf "#define %s\n" "${fileguard}" + printf "\n" + + nxt=0 + while read nr abi name entry ; do + if [ -z "$offset" ]; then + printf "#define __NR_%s%s\t%s\n" \ + "${prefix}" "${name}" "${nr}" + else + printf "#define __NR_%s%s\t(%s + %s)\n" \ + "${prefix}" "${name}" "${offset}" "${nr}" + fi + nxt=$((nr+1)) + done + + printf "\n" + printf "#ifdef __KERNEL__\n" + printf "#define __NR_syscalls\t%s\n" "${nxt}" + printf "#endif\n" + printf "\n" + printf "#endif /* %s */\n" "${fileguard}" +) > "$out" diff --git a/arch/m68k/kernel/syscalls/syscalltbl.sh b/arch/m68k/kernel/syscalls/syscalltbl.sh new file mode 100644 index 000000000000..85d78d9309ad --- /dev/null +++ b/arch/m68k/kernel/syscalls/syscalltbl.sh @@ -0,0 +1,32 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 + +in="$1" +out="$2" +my_abis=`echo "($3)" | tr ',' '|'` +my_abi="$4" +offset="$5" + +emit() { + t_nxt="$1" + t_nr="$2" + t_entry="$3" + + while [ $t_nxt -lt $t_nr ]; do + printf "__SYSCALL(%s, sys_ni_syscall, )\n" "${t_nxt}" + t_nxt=$((t_nxt+1)) + done + printf "__SYSCALL(%s, %s, )\n" "${t_nxt}" "${t_entry}" +} + +grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( + nxt=0 + if [ -z "$offset" ]; then + offset=0 + fi + + while read nr abi name entry ; do + emit $((nxt+offset)) $((nr+offset)) $entry + nxt=$((nr+1)) + done +) > "$out" diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S index 2c8402e75f62..d329cc7b481c 100644 --- a/arch/m68k/kernel/syscalltable.S +++ b/arch/m68k/kernel/syscalltable.S @@ -15,389 +15,12 @@ #include <linux/linkage.h> #ifndef CONFIG_MMU -#define sys_mmap2 sys_mmap_pgoff +#define sys_mmap2 sys_mmap_pgoff #endif -.section .rodata +#define __SYSCALL(nr, entry, nargs) .long entry + .section .rodata ALIGN ENTRY(sys_call_table) - .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */ - .long sys_exit - .long __sys_fork - .long sys_read - .long sys_write - .long sys_open /* 5 */ - .long sys_close - .long sys_waitpid - .long sys_creat - .long sys_link - .long sys_unlink /* 10 */ - .long sys_execve - .long sys_chdir - .long sys_time - .long sys_mknod - .long sys_chmod /* 15 */ - .long sys_chown16 - .long sys_ni_syscall /* old break syscall holder */ - .long sys_stat - .long sys_lseek - .long sys_getpid /* 20 */ - .long sys_mount - .long sys_oldumount - .long sys_setuid16 - .long sys_getuid16 - .long sys_stime /* 25 */ - .long sys_ptrace - .long sys_alarm - .long sys_fstat - .long sys_pause - .long sys_utime /* 30 */ - .long sys_ni_syscall /* old stty syscall holder */ - .long sys_ni_syscall /* old gtty syscall holder */ - .long sys_access - .long sys_nice - .long sys_ni_syscall /* 35 - old ftime syscall holder */ - .long sys_sync - .long sys_kill - .long sys_rename - .long sys_mkdir - .long sys_rmdir /* 40 */ - .long sys_dup - .long sys_pipe - .long sys_times - .long sys_ni_syscall /* old prof syscall holder */ - .long sys_brk /* 45 */ - .long sys_setgid16 - .long sys_getgid16 - .long sys_signal - .long sys_geteuid16 - .long sys_getegid16 /* 50 */ - .long sys_acct - .long sys_umount /* recycled never used phys() */ - .long sys_ni_syscall /* old lock syscall holder */ - .long sys_ioctl - .long sys_fcntl /* 55 */ - .long sys_ni_syscall /* old mpx syscall holder */ - .long sys_setpgid - .long sys_ni_syscall /* old ulimit syscall holder */ - .long sys_ni_syscall - .long sys_umask /* 60 */ - .long sys_chroot - .long sys_ustat - .long sys_dup2 - .long sys_getppid - .long sys_getpgrp /* 65 */ - .long sys_setsid - .long sys_sigaction - .long sys_sgetmask - .long sys_ssetmask - .long sys_setreuid16 /* 70 */ - .long sys_setregid16 - .long sys_sigsuspend - .long sys_sigpending - .long sys_sethostname - .long sys_setrlimit /* 75 */ - .long sys_old_getrlimit - .long sys_getrusage - .long sys_gettimeofday - .long sys_settimeofday - .long sys_getgroups16 /* 80 */ - .long sys_setgroups16 - .long sys_old_select - .long sys_symlink - .long sys_lstat - .long sys_readlink /* 85 */ - .long sys_uselib - .long sys_swapon - .long sys_reboot - .long sys_old_readdir - .long sys_old_mmap /* 90 */ - .long sys_munmap - .long sys_truncate - .long sys_ftruncate - .long sys_fchmod - .long sys_fchown16 /* 95 */ - .long sys_getpriority - .long sys_setpriority - .long sys_ni_syscall /* old profil syscall holder */ - .long sys_statfs - .long sys_fstatfs /* 100 */ - .long sys_ni_syscall /* ioperm for i386 */ - .long sys_socketcall - .long sys_syslog - .long sys_setitimer - .long sys_getitimer /* 105 */ - .long sys_newstat - .long sys_newlstat - .long sys_newfstat - .long sys_ni_syscall - .long sys_ni_syscall /* 110 - iopl for i386 */ - .long sys_vhangup - .long sys_ni_syscall /* obsolete idle() syscall */ - .long sys_ni_syscall /* vm86old for i386 */ - .long sys_wait4 - .long sys_swapoff /* 115 */ - .long sys_sysinfo - .long sys_ipc - .long sys_fsync - .long sys_sigreturn - .long __sys_clone /* 120 */ - .long sys_setdomainname - .long sys_newuname - .long sys_cacheflush /* modify_ldt for i386 */ - .long sys_adjtimex - .long sys_mprotect /* 125 */ - .long sys_sigprocmask - .long sys_ni_syscall /* old "create_module" */ - .long sys_init_module - .long sys_delete_module - .long sys_ni_syscall /* 130 - old "get_kernel_syms" */ - .long sys_quotactl - .long sys_getpgid - .long sys_fchdir - .long sys_bdflush - .long sys_sysfs /* 135 */ - .long sys_personality - .long sys_ni_syscall /* for afs_syscall */ - .long sys_setfsuid16 - .long sys_setfsgid16 - .long sys_llseek /* 140 */ - .long sys_getdents - .long sys_select - .long sys_flock - .long sys_msync - .long sys_readv /* 145 */ - .long sys_writev - .long sys_getsid - .long sys_fdatasync - .long sys_sysctl - .long sys_mlock /* 150 */ - .long sys_munlock - .long sys_mlockall - .long sys_munlockall - .long sys_sched_setparam - .long sys_sched_getparam /* 155 */ - .long sys_sched_setscheduler - .long sys_sched_getscheduler - .long sys_sched_yield - .long sys_sched_get_priority_max - .long sys_sched_get_priority_min /* 160 */ - .long sys_sched_rr_get_interval - .long sys_nanosleep - .long sys_mremap - .long sys_setresuid16 - .long sys_getresuid16 /* 165 */ - .long sys_getpagesize - .long sys_ni_syscall /* old "query_module" */ - .long sys_poll - .long sys_ni_syscall /* old nfsservctl */ - .long sys_setresgid16 /* 170 */ - .long sys_getresgid16 - .long sys_prctl - .long sys_rt_sigreturn - .long sys_rt_sigaction - .long sys_rt_sigprocmask /* 175 */ - .long sys_rt_sigpending - .long sys_rt_sigtimedwait - .long sys_rt_sigqueueinfo - .long sys_rt_sigsuspend - .long sys_pread64 /* 180 */ - .long sys_pwrite64 - .long sys_lchown16 - .long sys_getcwd - .long sys_capget - .long sys_capset /* 185 */ - .long sys_sigaltstack - .long sys_sendfile - .long sys_ni_syscall /* streams1 */ - .long sys_ni_syscall /* streams2 */ - .long __sys_vfork /* 190 */ - .long sys_getrlimit - .long sys_mmap2 - .long sys_truncate64 - .long sys_ftruncate64 - .long sys_stat64 /* 195 */ - .long sys_lstat64 - .long sys_fstat64 - .long sys_chown - .long sys_getuid - .long sys_getgid /* 200 */ - .long sys_geteuid - .long sys_getegid - .long sys_setreuid - .long sys_setregid - .long sys_getgroups /* 205 */ - .long sys_setgroups - .long sys_fchown - .long sys_setresuid - .long sys_getresuid - .long sys_setresgid /* 210 */ - .long sys_getresgid - .long sys_lchown - .long sys_setuid - .long sys_setgid - .long sys_setfsuid /* 215 */ - .long sys_setfsgid - .long sys_pivot_root - .long sys_ni_syscall - .long sys_ni_syscall - .long sys_getdents64 /* 220 */ - .long sys_gettid - .long sys_tkill - .long sys_setxattr - .long sys_lsetxattr - .long sys_fsetxattr /* 225 */ - .long sys_getxattr - .long sys_lgetxattr - .long sys_fgetxattr - .long sys_listxattr - .long sys_llistxattr /* 230 */ - .long sys_flistxattr - .long sys_removexattr - .long sys_lremovexattr - .long sys_fremovexattr - .long sys_futex /* 235 */ - .long sys_sendfile64 - .long sys_mincore - .long sys_madvise - .long sys_fcntl64 - .long sys_readahead /* 240 */ - .long sys_io_setup - .long sys_io_destroy - .long sys_io_getevents - .long sys_io_submit - .long sys_io_cancel /* 245 */ - .long sys_fadvise64 - .long sys_exit_group - .long sys_lookup_dcookie - .long sys_epoll_create - .long sys_epoll_ctl /* 250 */ - .long sys_epoll_wait - .long sys_remap_file_pages - .long sys_set_tid_address - .long sys_timer_create - .long sys_timer_settime /* 255 */ - .long sys_timer_gettime - .long sys_timer_getoverrun - .long sys_timer_delete - .long sys_clock_settime - .long sys_clock_gettime /* 260 */ - .long sys_clock_getres - .long sys_clock_nanosleep - .long sys_statfs64 - .long sys_fstatfs64 - .long sys_tgkill /* 265 */ - .long sys_utimes - .long sys_fadvise64_64 - .long sys_mbind - .long sys_get_mempolicy - .long sys_set_mempolicy /* 270 */ - .long sys_mq_open - .long sys_mq_unlink - .long sys_mq_timedsend - .long sys_mq_timedreceive - .long sys_mq_notify /* 275 */ - .long sys_mq_getsetattr - .long sys_waitid - .long sys_ni_syscall /* for sys_vserver */ - .long sys_add_key - .long sys_request_key /* 280 */ - .long sys_keyctl - .long sys_ioprio_set - .long sys_ioprio_get - .long sys_inotify_init - .long sys_inotify_add_watch /* 285 */ - .long sys_inotify_rm_watch - .long sys_migrate_pages - .long sys_openat - .long sys_mkdirat - .long sys_mknodat /* 290 */ - .long sys_fchownat - .long sys_futimesat - .long sys_fstatat64 - .long sys_unlinkat - .long sys_renameat /* 295 */ - .long sys_linkat - .long sys_symlinkat - .long sys_readlinkat - .long sys_fchmodat - .long sys_faccessat /* 300 */ - .long sys_pselect6 - .long sys_ppoll - .long sys_unshare - .long sys_set_robust_list - .long sys_get_robust_list /* 305 */ - .long sys_splice - .long sys_sync_file_range - .long sys_tee - .long sys_vmsplice - .long sys_move_pages /* 310 */ - .long sys_sched_setaffinity - .long sys_sched_getaffinity - .long sys_kexec_load - .long sys_getcpu - .long sys_epoll_pwait /* 315 */ - .long sys_utimensat - .long sys_signalfd - .long sys_timerfd_create - .long sys_eventfd - .long sys_fallocate /* 320 */ - .long sys_timerfd_settime - .long sys_timerfd_gettime - .long sys_signalfd4 - .long sys_eventfd2 - .long sys_epoll_create1 /* 325 */ - .long sys_dup3 - .long sys_pipe2 - .long sys_inotify_init1 - .long sys_preadv - .long sys_pwritev /* 330 */ - .long sys_rt_tgsigqueueinfo - .long sys_perf_event_open - .long sys_get_thread_area - .long sys_set_thread_area - .long sys_atomic_cmpxchg_32 /* 335 */ - .long sys_atomic_barrier - .long sys_fanotify_init - .long sys_fanotify_mark - .long sys_prlimit64 - .long sys_name_to_handle_at /* 340 */ - .long sys_open_by_handle_at - .long sys_clock_adjtime - .long sys_syncfs - .long sys_setns - .long sys_process_vm_readv /* 345 */ - .long sys_process_vm_writev - .long sys_kcmp - .long sys_finit_module - .long sys_sched_setattr - .long sys_sched_getattr /* 350 */ - .long sys_renameat2 - .long sys_getrandom - .long sys_memfd_create - .long sys_bpf - .long sys_execveat /* 355 */ - .long sys_socket - .long sys_socketpair - .long sys_bind - .long sys_connect - .long sys_listen /* 360 */ - .long sys_accept4 - .long sys_getsockopt - .long sys_setsockopt - .long sys_getsockname - .long sys_getpeername /* 365 */ - .long sys_sendto - .long sys_sendmsg - .long sys_recvfrom - .long sys_recvmsg - .long sys_shutdown /* 370 */ - .long sys_recvmmsg - .long sys_sendmmsg - .long sys_userfaultfd - .long sys_membarrier - .long sys_mlock2 /* 375 */ - .long sys_copy_file_range - .long sys_preadv2 - .long sys_pwritev2 - .long sys_statx +#include <asm/syscall_table.h> +#undef __SYSCALL diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c index 7497cf30bf1c..3f3d0bf36091 100644 --- a/arch/m68k/mm/motorola.c +++ b/arch/m68k/mm/motorola.c @@ -228,6 +228,7 @@ void __init paging_init(void) min_addr = m68k_memory[0].addr; max_addr = min_addr + m68k_memory[0].size; + memblock_add(m68k_memory[0].addr, m68k_memory[0].size); for (i = 1; i < m68k_num_memory;) { if (m68k_memory[i].addr < min_addr) { printk("Ignoring memory chunk at 0x%lx:0x%lx before the first chunk\n", @@ -238,6 +239,7 @@ void __init paging_init(void) (m68k_num_memory - i) * sizeof(struct m68k_mem_info)); continue; } + memblock_add(m68k_memory[i].addr, m68k_memory[i].size); addr = m68k_memory[i].addr + m68k_memory[i].size; if (addr > max_addr) max_addr = addr; diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index effed2efd306..eda9e2315ef5 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -12,7 +12,6 @@ config MICROBLAZE select TIMER_OF select CLONE_BACKWARDS3 select COMMON_CLK - select DMA_DIRECT_OPS select GENERIC_ATOMIC64 select GENERIC_CLOCKEVENTS select GENERIC_CPU_DEVICES diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile index 0823d291fbeb..7b340a35b194 100644 --- a/arch/microblaze/Makefile +++ b/arch/microblaze/Makefile @@ -79,21 +79,30 @@ all: linux.bin archclean: $(Q)$(MAKE) $(clean)=$(boot) -linux.bin linux.bin.gz linux.bin.ub: vmlinux - $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ +archheaders: + $(Q)$(MAKE) $(build)=arch/microblaze/kernel/syscalls all -simpleImage.%: vmlinux +PHONY += linux.bin linux.bin.gz linux.bin.ub +linux.bin.ub linux.bin.gz: linux.bin +linux.bin: vmlinux +linux.bin linux.bin.gz linux.bin.ub: $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ + @echo 'Kernel: $(boot)/$@ is ready' ' (#'`cat .version`')' + +PHONY += simpleImage.$(DTB) +simpleImage.$(DTB): vmlinux + $(Q)$(MAKE) $(build)=$(boot) $(addprefix $(boot)/$@., ub unstrip strip) + @echo 'Kernel: $(boot)/$@ is ready' ' (#'`cat .version`')' define archhelp echo '* linux.bin - Create raw binary' echo ' linux.bin.gz - Create compressed raw binary' echo ' linux.bin.ub - Create U-Boot wrapped raw binary' - echo ' simpleImage.<dt> - ELF image with $(arch)/boot/dts/<dt>.dts linked in' - echo ' - stripped elf with fdt blob' - echo ' simpleImage.<dt>.unstrip - full ELF image with fdt blob' - echo ' *_defconfig - Select default config from arch/microblaze/configs' - echo '' + echo ' simpleImage.<dt> - Create the following images with <dt>.dtb linked in' + echo ' simpleImage.<dt> : raw image' + echo ' simpleImage.<dt>.ub : raw image with U-Boot header' + echo ' simpleImage.<dt>.unstrip: ELF (identical to vmlinux)' + echo ' simpleImage.<dt>.strip : stripped ELF' echo ' Targets with <dt> embed a device tree blob inside the image' echo ' These targets support board with firmware that does not' echo ' support passing a device tree directly. Replace <dt> with the' diff --git a/arch/microblaze/boot/Makefile b/arch/microblaze/boot/Makefile index 600e5a198bd2..cff570a71946 100644 --- a/arch/microblaze/boot/Makefile +++ b/arch/microblaze/boot/Makefile @@ -3,38 +3,33 @@ # arch/microblaze/boot/Makefile # -targets := linux.bin linux.bin.gz linux.bin.ub simpleImage.% +targets := linux.bin linux.bin.gz linux.bin.ub simpleImage.* OBJCOPYFLAGS := -R .note -R .comment -R .note.gnu.build-id -O binary $(obj)/linux.bin: vmlinux FORCE $(call if_changed,objcopy) - @echo 'Kernel: $@ is ready' ' (#'`cat .version`')' $(obj)/linux.bin.ub: $(obj)/linux.bin FORCE $(call if_changed,uimage) - @echo 'Kernel: $@ is ready' ' (#'`cat .version`')' $(obj)/linux.bin.gz: $(obj)/linux.bin FORCE $(call if_changed,gzip) - @echo 'Kernel: $@ is ready' ' (#'`cat .version`')' - -quiet_cmd_cp = CP $< $@$2 - cmd_cp = cat $< >$@$2 || (rm -f $@ && echo false) quiet_cmd_strip = STRIP $< $@$2 cmd_strip = $(STRIP) -K microblaze_start -K _end -K __log_buf \ -K _fdt_start $< -o $@$2 UIMAGE_LOADADDR = $(CONFIG_KERNEL_BASE_ADDR) -UIMAGE_IN = $@ -UIMAGE_OUT = $@.ub -$(obj)/simpleImage.%: vmlinux FORCE - $(call if_changed,cp,.unstrip) +$(obj)/simpleImage.$(DTB): vmlinux FORCE $(call if_changed,objcopy) + +$(obj)/simpleImage.$(DTB).ub: $(obj)/simpleImage.$(DTB) FORCE $(call if_changed,uimage) - $(call if_changed,strip,.strip) - @echo 'Kernel: $(UIMAGE_OUT) is ready' ' (#'`cat .version`')' -clean-files += simpleImage.*.unstrip linux.bin.ub +$(obj)/simpleImage.$(DTB).unstrip: vmlinux FORCE + $(call if_changed,shipped) + +$(obj)/simpleImage.$(DTB).strip: vmlinux FORCE + $(call if_changed,strip) diff --git a/arch/microblaze/boot/dts/Makefile b/arch/microblaze/boot/dts/Makefile index c7324e74f9ef..ef00dd30d19a 100644 --- a/arch/microblaze/boot/dts/Makefile +++ b/arch/microblaze/boot/dts/Makefile @@ -12,12 +12,9 @@ $(obj)/linked_dtb.o: $(obj)/system.dtb # Generate system.dtb from $(DTB).dtb ifneq ($(DTB),system) $(obj)/system.dtb: $(obj)/$(DTB).dtb - $(call if_changed,cp) + $(call if_changed,shipped) endif endif -quiet_cmd_cp = CP $< $@$2 - cmd_cp = cat $< >$@$2 || (rm -f $@ && echo false) - # Rule to build device tree blobs DTC_FLAGS := -p 1024 diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild index 569ba9e670c1..9c7d1d25bf3d 100644 --- a/arch/microblaze/include/asm/Kbuild +++ b/arch/microblaze/include/asm/Kbuild @@ -1,3 +1,4 @@ +generated-y += syscall_table.h generic-y += barrier.h generic-y += bitops.h generic-y += bug.h diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h index e14b6621c933..142d3f004848 100644 --- a/arch/microblaze/include/asm/pgtable.h +++ b/arch/microblaze/include/asm/pgtable.h @@ -200,7 +200,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; } * is cleared in the TLB miss handler before the TLB entry is loaded. * - All other bits of the PTE are loaded into TLBLO without * * modification, leaving us only the bits 20, 21, 24, 25, 26, 30 for - * software PTE bits. We actually use use bits 21, 24, 25, and + * software PTE bits. We actually use bits 21, 24, 25, and * 30 respectively for the software bits: ACCESSED, DIRTY, RW, and * PRESENT. */ diff --git a/arch/microblaze/include/asm/unistd.h b/arch/microblaze/include/asm/unistd.h index f42c40f5001b..9b7c2c4eaf12 100644 --- a/arch/microblaze/include/asm/unistd.h +++ b/arch/microblaze/include/asm/unistd.h @@ -38,6 +38,4 @@ #endif /* __ASSEMBLY__ */ -#define __NR_syscalls 401 - #endif /* _ASM_MICROBLAZE_UNISTD_H */ diff --git a/arch/microblaze/include/uapi/asm/Kbuild b/arch/microblaze/include/uapi/asm/Kbuild index 2c6a6bffea32..b6656d930a0e 100644 --- a/arch/microblaze/include/uapi/asm/Kbuild +++ b/arch/microblaze/include/uapi/asm/Kbuild @@ -1,6 +1,7 @@ # UAPI Header export list include include/uapi/asm-generic/Kbuild.asm +generated-y += unistd_32.h generic-y += bitsperlong.h generic-y += bpf_perf_event.h generic-y += errno.h diff --git a/arch/microblaze/include/uapi/asm/unistd.h b/arch/microblaze/include/uapi/asm/unistd.h index 7a9f16a76413..3f2d7cb6836c 100644 --- a/arch/microblaze/include/uapi/asm/unistd.h +++ b/arch/microblaze/include/uapi/asm/unistd.h @@ -11,411 +11,6 @@ #ifndef _UAPI_ASM_MICROBLAZE_UNISTD_H #define _UAPI_ASM_MICROBLAZE_UNISTD_H -#define __NR_restart_syscall 0 /* ok */ -#define __NR_exit 1 /* ok */ -#define __NR_fork 2 /* not for no MMU - weird */ -#define __NR_read 3 /* ok */ -#define __NR_write 4 /* ok */ -#define __NR_open 5 /* openat */ -#define __NR_close 6 /* ok */ -#define __NR_waitpid 7 /* waitid */ -#define __NR_creat 8 /* openat */ -#define __NR_link 9 /* linkat */ -#define __NR_unlink 10 /* unlinkat */ -#define __NR_execve 11 /* ok */ -#define __NR_chdir 12 /* ok */ -#define __NR_time 13 /* obsolete -> sys_gettimeofday */ -#define __NR_mknod 14 /* mknodat */ -#define __NR_chmod 15 /* fchmodat */ -#define __NR_lchown 16 /* ok */ -#define __NR_break 17 /* don't know */ -#define __NR_oldstat 18 /* remove */ -#define __NR_lseek 19 /* ok */ -#define __NR_getpid 20 /* ok */ -#define __NR_mount 21 /* ok */ -#define __NR_umount 22 /* ok */ /* use only umount2 */ -#define __NR_setuid 23 /* ok */ -#define __NR_getuid 24 /* ok */ -#define __NR_stime 25 /* obsolete -> sys_settimeofday */ -#define __NR_ptrace 26 /* ok */ -#define __NR_alarm 27 /* obsolete -> sys_setitimer */ -#define __NR_oldfstat 28 /* remove */ -#define __NR_pause 29 /* obsolete -> sys_rt_sigtimedwait */ -#define __NR_utime 30 /* obsolete -> sys_utimesat */ -#define __NR_stty 31 /* remove */ -#define __NR_gtty 32 /* remove */ -#define __NR_access 33 /* faccessat */ -/* can be implemented by sys_setpriority */ -#define __NR_nice 34 -#define __NR_ftime 35 /* remove */ -#define __NR_sync 36 /* ok */ -#define __NR_kill 37 /* ok */ -#define __NR_rename 38 /* renameat */ -#define __NR_mkdir 39 /* mkdirat */ -#define __NR_rmdir 40 /* unlinkat */ -#define __NR_dup 41 /* ok */ -#define __NR_pipe 42 /* ok */ -#define __NR_times 43 /* ok */ -#define __NR_prof 44 /* remove */ -#define __NR_brk 45 /* ok -mmu, nommu specific */ -#define __NR_setgid 46 /* ok */ -#define __NR_getgid 47 /* ok */ -#define __NR_signal 48 /* obsolete -> sys_rt_sigaction */ -#define __NR_geteuid 49 /* ok */ -#define __NR_getegid 50 /* ok */ -#define __NR_acct 51 /* add it and then I can disable it */ -#define __NR_umount2 52 /* remove */ -#define __NR_lock 53 /* remove */ -#define __NR_ioctl 54 /* ok */ -#define __NR_fcntl 55 /* ok -> 64bit version*/ -#define __NR_mpx 56 /* remove */ -#define __NR_setpgid 57 /* ok */ -#define __NR_ulimit 58 /* remove */ -#define __NR_oldolduname 59 /* remove */ -#define __NR_umask 60 /* ok */ -#define __NR_chroot 61 /* ok */ -#define __NR_ustat 62 /* obsolete -> statfs64 */ -#define __NR_dup2 63 /* ok */ -#define __NR_getppid 64 /* ok */ -#define __NR_getpgrp 65 /* obsolete -> sys_getpgid */ -#define __NR_setsid 66 /* ok */ -#define __NR_sigaction 67 /* obsolete -> rt_sigaction */ -#define __NR_sgetmask 68 /* obsolete -> sys_rt_sigprocmask */ -#define __NR_ssetmask 69 /* obsolete ->sys_rt_sigprocmask */ -#define __NR_setreuid 70 /* ok */ -#define __NR_setregid 71 /* ok */ -#define __NR_sigsuspend 72 /* obsolete -> rt_sigsuspend */ -#define __NR_sigpending 73 /* obsolete -> sys_rt_sigpending */ -#define __NR_sethostname 74 /* ok */ -#define __NR_setrlimit 75 /* ok */ -#define __NR_getrlimit 76 /* ok Back compatible 2G limited rlimit */ -#define __NR_getrusage 77 /* ok */ -#define __NR_gettimeofday 78 /* ok */ -#define __NR_settimeofday 79 /* ok */ -#define __NR_getgroups 80 /* ok */ -#define __NR_setgroups 81 /* ok */ -#define __NR_select 82 /* obsolete -> sys_pselect6 */ -#define __NR_symlink 83 /* symlinkat */ -#define __NR_oldlstat 84 /* remove */ -#define __NR_readlink 85 /* obsolete -> sys_readlinkat */ -#define __NR_uselib 86 /* remove */ -#define __NR_swapon 87 /* ok */ -#define __NR_reboot 88 /* ok */ -#define __NR_readdir 89 /* remove ? */ -#define __NR_mmap 90 /* obsolete -> sys_mmap2 */ -#define __NR_munmap 91 /* ok - mmu and nommu */ -#define __NR_truncate 92 /* ok or truncate64 */ -#define __NR_ftruncate 93 /* ok or ftruncate64 */ -#define __NR_fchmod 94 /* ok */ -#define __NR_fchown 95 /* ok */ -#define __NR_getpriority 96 /* ok */ -#define __NR_setpriority 97 /* ok */ -#define __NR_profil 98 /* remove */ -#define __NR_statfs 99 /* ok or statfs64 */ -#define __NR_fstatfs 100 /* ok or fstatfs64 */ -#define __NR_ioperm 101 /* remove */ -#define __NR_socketcall 102 /* remove */ -#define __NR_syslog 103 /* ok */ -#define __NR_setitimer 104 /* ok */ -#define __NR_getitimer 105 /* ok */ -#define __NR_stat 106 /* remove */ -#define __NR_lstat 107 /* remove */ -#define __NR_fstat 108 /* remove */ -#define __NR_olduname 109 /* remove */ -#define __NR_iopl 110 /* remove */ -#define __NR_vhangup 111 /* ok */ -#define __NR_idle 112 /* remove */ -#define __NR_vm86old 113 /* remove */ -#define __NR_wait4 114 /* obsolete -> waitid */ -#define __NR_swapoff 115 /* ok */ -#define __NR_sysinfo 116 /* ok */ -#define __NR_ipc 117 /* remove - direct call */ -#define __NR_fsync 118 /* ok */ -#define __NR_sigreturn 119 /* obsolete -> sys_rt_sigreturn */ -#define __NR_clone 120 /* ok */ -#define __NR_setdomainname 121 /* ok */ -#define __NR_uname 122 /* remove */ -#define __NR_modify_ldt 123 /* remove */ -#define __NR_adjtimex 124 /* ok */ -#define __NR_mprotect 125 /* remove */ -#define __NR_sigprocmask 126 /* obsolete -> sys_rt_sigprocmask */ -#define __NR_create_module 127 /* remove */ -#define __NR_init_module 128 /* ok */ -#define __NR_delete_module 129 /* ok */ -#define __NR_get_kernel_syms 130 /* remove */ -#define __NR_quotactl 131 /* ok */ -#define __NR_getpgid 132 /* ok */ -#define __NR_fchdir 133 /* ok */ -#define __NR_bdflush 134 /* remove */ -#define __NR_sysfs 135 /* needed for busybox */ -#define __NR_personality 136 /* ok */ -#define __NR_afs_syscall 137 /* Syscall for Andrew File System */ -#define __NR_setfsuid 138 /* ok */ -#define __NR_setfsgid 139 /* ok */ -#define __NR__llseek 140 /* remove only lseek */ -#define __NR_getdents 141 /* ok or getdents64 */ -#define __NR__newselect 142 /* remove */ -#define __NR_flock 143 /* ok */ -#define __NR_msync 144 /* remove */ -#define __NR_readv 145 /* ok */ -#define __NR_writev 146 /* ok */ -#define __NR_getsid 147 /* ok */ -#define __NR_fdatasync 148 /* ok */ -#define __NR__sysctl 149 /* remove */ -#define __NR_mlock 150 /* ok - nommu or mmu */ -#define __NR_munlock 151 /* ok - nommu or mmu */ -#define __NR_mlockall 152 /* ok - nommu or mmu */ -#define __NR_munlockall 153 /* ok - nommu or mmu */ -#define __NR_sched_setparam 154 /* ok */ -#define __NR_sched_getparam 155 /* ok */ -#define __NR_sched_setscheduler 156 /* ok */ -#define __NR_sched_getscheduler 157 /* ok */ -#define __NR_sched_yield 158 /* ok */ -#define __NR_sched_get_priority_max 159 /* ok */ -#define __NR_sched_get_priority_min 160 /* ok */ -#define __NR_sched_rr_get_interval 161 /* ok */ -#define __NR_nanosleep 162 /* ok */ -#define __NR_mremap 163 /* ok - nommu or mmu */ -#define __NR_setresuid 164 /* ok */ -#define __NR_getresuid 165 /* ok */ -#define __NR_vm86 166 /* remove */ -#define __NR_query_module 167 /* ok */ -#define __NR_poll 168 /* obsolete -> sys_ppoll */ -#define __NR_nfsservctl 169 /* ok */ -#define __NR_setresgid 170 /* ok */ -#define __NR_getresgid 171 /* ok */ -#define __NR_prctl 172 /* ok */ -#define __NR_rt_sigreturn 173 /* ok */ -#define __NR_rt_sigaction 174 /* ok */ -#define __NR_rt_sigprocmask 175 /* ok */ -#define __NR_rt_sigpending 176 /* ok */ -#define __NR_rt_sigtimedwait 177 /* ok */ -#define __NR_rt_sigqueueinfo 178 /* ok */ -#define __NR_rt_sigsuspend 179 /* ok */ -#define __NR_pread64 180 /* ok */ -#define __NR_pwrite64 181 /* ok */ -#define __NR_chown 182 /* obsolete -> fchownat */ -#define __NR_getcwd 183 /* ok */ -#define __NR_capget 184 /* ok */ -#define __NR_capset 185 /* ok */ -#define __NR_sigaltstack 186 /* remove */ -#define __NR_sendfile 187 /* ok -> exist 64bit version*/ -#define __NR_getpmsg 188 /* remove */ -/* remove - some people actually want streams */ -#define __NR_putpmsg 189 -/* for noMMU - group with clone -> maybe remove */ -#define __NR_vfork 190 -#define __NR_ugetrlimit 191 /* remove - SuS compliant getrlimit */ -#define __NR_mmap2 192 /* ok */ -#define __NR_truncate64 193 /* ok */ -#define __NR_ftruncate64 194 /* ok */ -#define __NR_stat64 195 /* remove _ARCH_WANT_STAT64 */ -#define __NR_lstat64 196 /* remove _ARCH_WANT_STAT64 */ -#define __NR_fstat64 197 /* remove _ARCH_WANT_STAT64 */ -#define __NR_lchown32 198 /* ok - without 32 */ -#define __NR_getuid32 199 /* ok - without 32 */ -#define __NR_getgid32 200 /* ok - without 32 */ -#define __NR_geteuid32 201 /* ok - without 32 */ -#define __NR_getegid32 202 /* ok - without 32 */ -#define __NR_setreuid32 203 /* ok - without 32 */ -#define __NR_setregid32 204 /* ok - without 32 */ -#define __NR_getgroups32 205 /* ok - without 32 */ -#define __NR_setgroups32 206 /* ok - without 32 */ -#define __NR_fchown32 207 /* ok - without 32 */ -#define __NR_setresuid32 208 /* ok - without 32 */ -#define __NR_getresuid32 209 /* ok - without 32 */ -#define __NR_setresgid32 210 /* ok - without 32 */ -#define __NR_getresgid32 211 /* ok - without 32 */ -#define __NR_chown32 212 /* ok - without 32 -obsolete -> fchownat */ -#define __NR_setuid32 213 /* ok - without 32 */ -#define __NR_setgid32 214 /* ok - without 32 */ -#define __NR_setfsuid32 215 /* ok - without 32 */ -#define __NR_setfsgid32 216 /* ok - without 32 */ -#define __NR_pivot_root 217 /* ok */ -#define __NR_mincore 218 /* ok */ -#define __NR_madvise 219 /* ok */ -#define __NR_getdents64 220 /* ok */ -#define __NR_fcntl64 221 /* ok */ -/* 223 is unused */ -#define __NR_gettid 224 /* ok */ -#define __NR_readahead 225 /* ok */ -#define __NR_setxattr 226 /* ok */ -#define __NR_lsetxattr 227 /* ok */ -#define __NR_fsetxattr 228 /* ok */ -#define __NR_getxattr 229 /* ok */ -#define __NR_lgetxattr 230 /* ok */ -#define __NR_fgetxattr 231 /* ok */ -#define __NR_listxattr 232 /* ok */ -#define __NR_llistxattr 233 /* ok */ -#define __NR_flistxattr 234 /* ok */ -#define __NR_removexattr 235 /* ok */ -#define __NR_lremovexattr 236 /* ok */ -#define __NR_fremovexattr 237 /* ok */ -#define __NR_tkill 238 /* ok */ -#define __NR_sendfile64 239 /* ok */ -#define __NR_futex 240 /* ok */ -#define __NR_sched_setaffinity 241 /* ok */ -#define __NR_sched_getaffinity 242 /* ok */ -#define __NR_set_thread_area 243 /* remove */ -#define __NR_get_thread_area 244 /* remove */ -#define __NR_io_setup 245 /* ok */ -#define __NR_io_destroy 246 /* ok */ -#define __NR_io_getevents 247 /* ok */ -#define __NR_io_submit 248 /* ok */ -#define __NR_io_cancel 249 /* ok */ -#define __NR_fadvise64 250 /* remove -> sys_fadvise64_64 */ -/* 251 is available for reuse (was briefly sys_set_zone_reclaim) */ -#define __NR_exit_group 252 /* ok */ -#define __NR_lookup_dcookie 253 /* ok */ -#define __NR_epoll_create 254 /* ok */ -#define __NR_epoll_ctl 255 /* ok */ -#define __NR_epoll_wait 256 /* obsolete -> sys_epoll_pwait */ -#define __NR_remap_file_pages 257 /* only for mmu */ -#define __NR_set_tid_address 258 /* ok */ -#define __NR_timer_create 259 /* ok */ -#define __NR_timer_settime (__NR_timer_create+1) /* 260 */ /* ok */ -#define __NR_timer_gettime (__NR_timer_create+2) /* 261 */ /* ok */ -#define __NR_timer_getoverrun (__NR_timer_create+3) /* 262 */ /* ok */ -#define __NR_timer_delete (__NR_timer_create+4) /* 263 */ /* ok */ -#define __NR_clock_settime (__NR_timer_create+5) /* 264 */ /* ok */ -#define __NR_clock_gettime (__NR_timer_create+6) /* 265 */ /* ok */ -#define __NR_clock_getres (__NR_timer_create+7) /* 266 */ /* ok */ -#define __NR_clock_nanosleep (__NR_timer_create+8) /* 267 */ /* ok */ -#define __NR_statfs64 268 /* ok */ -#define __NR_fstatfs64 269 /* ok */ -#define __NR_tgkill 270 /* ok */ -#define __NR_utimes 271 /* obsolete -> sys_futimesat */ -#define __NR_fadvise64_64 272 /* ok */ -#define __NR_vserver 273 /* ok */ -#define __NR_mbind 274 /* only for mmu */ -#define __NR_get_mempolicy 275 /* only for mmu */ -#define __NR_set_mempolicy 276 /* only for mmu */ -#define __NR_mq_open 277 /* ok */ -#define __NR_mq_unlink (__NR_mq_open+1) /* 278 */ /* ok */ -#define __NR_mq_timedsend (__NR_mq_open+2) /* 279 */ /* ok */ -#define __NR_mq_timedreceive (__NR_mq_open+3) /* 280 */ /* ok */ -#define __NR_mq_notify (__NR_mq_open+4) /* 281 */ /* ok */ -#define __NR_mq_getsetattr (__NR_mq_open+5) /* 282 */ /* ok */ -#define __NR_kexec_load 283 /* ok */ -#define __NR_waitid 284 /* ok */ -/* #define __NR_sys_setaltroot 285 */ -#define __NR_add_key 286 /* ok */ -#define __NR_request_key 287 /* ok */ -#define __NR_keyctl 288 /* ok */ -#define __NR_ioprio_set 289 /* ok */ -#define __NR_ioprio_get 290 /* ok */ -#define __NR_inotify_init 291 /* ok */ -#define __NR_inotify_add_watch 292 /* ok */ -#define __NR_inotify_rm_watch 293 /* ok */ -#define __NR_migrate_pages 294 /* mmu */ -#define __NR_openat 295 /* ok */ -#define __NR_mkdirat 296 /* ok */ -#define __NR_mknodat 297 /* ok */ -#define __NR_fchownat 298 /* ok */ -#define __NR_futimesat 299 /* obsolete -> sys_utimesat */ -#define __NR_fstatat64 300 /* stat64 */ -#define __NR_unlinkat 301 /* ok */ -#define __NR_renameat 302 /* ok */ -#define __NR_linkat 303 /* ok */ -#define __NR_symlinkat 304 /* ok */ -#define __NR_readlinkat 305 /* ok */ -#define __NR_fchmodat 306 /* ok */ -#define __NR_faccessat 307 /* ok */ -#define __NR_pselect6 308 /* ok */ -#define __NR_ppoll 309 /* ok */ -#define __NR_unshare 310 /* ok */ -#define __NR_set_robust_list 311 /* ok */ -#define __NR_get_robust_list 312 /* ok */ -#define __NR_splice 313 /* ok */ -#define __NR_sync_file_range 314 /* ok */ -#define __NR_tee 315 /* ok */ -#define __NR_vmsplice 316 /* ok */ -#define __NR_move_pages 317 /* mmu */ -#define __NR_getcpu 318 /* ok */ -#define __NR_epoll_pwait 319 /* ok */ -#define __NR_utimensat 320 /* ok */ -#define __NR_signalfd 321 /* ok */ -#define __NR_timerfd_create 322 /* ok */ -#define __NR_eventfd 323 /* ok */ -#define __NR_fallocate 324 /* ok */ -#define __NR_semtimedop 325 /* ok - semaphore group */ -#define __NR_timerfd_settime 326 /* ok */ -#define __NR_timerfd_gettime 327 /* ok */ -/* sysv ipc syscalls */ -#define __NR_semctl 328 /* ok */ -#define __NR_semget 329 /* ok */ -#define __NR_semop 330 /* ok */ -#define __NR_msgctl 331 /* ok */ -#define __NR_msgget 332 /* ok */ -#define __NR_msgrcv 333 /* ok */ -#define __NR_msgsnd 334 /* ok */ -#define __NR_shmat 335 /* ok */ -#define __NR_shmctl 336 /* ok */ -#define __NR_shmdt 337 /* ok */ -#define __NR_shmget 338 /* ok */ - - -#define __NR_signalfd4 339 /* new */ -#define __NR_eventfd2 340 /* new */ -#define __NR_epoll_create1 341 /* new */ -#define __NR_dup3 342 /* new */ -#define __NR_pipe2 343 /* new */ -#define __NR_inotify_init1 344 /* new */ -#define __NR_socket 345 /* new */ -#define __NR_socketpair 346 /* new */ -#define __NR_bind 347 /* new */ -#define __NR_listen 348 /* new */ -#define __NR_accept 349 /* new */ -#define __NR_connect 350 /* new */ -#define __NR_getsockname 351 /* new */ -#define __NR_getpeername 352 /* new */ -#define __NR_sendto 353 /* new */ -#define __NR_send 354 /* new */ -#define __NR_recvfrom 355 /* new */ -#define __NR_recv 356 /* new */ -#define __NR_setsockopt 357 /* new */ -#define __NR_getsockopt 358 /* new */ -#define __NR_shutdown 359 /* new */ -#define __NR_sendmsg 360 /* new */ -#define __NR_recvmsg 361 /* new */ -#define __NR_accept4 362 /* new */ -#define __NR_preadv 363 /* new */ -#define __NR_pwritev 364 /* new */ -#define __NR_rt_tgsigqueueinfo 365 /* new */ -#define __NR_perf_event_open 366 /* new */ -#define __NR_recvmmsg 367 /* new */ -#define __NR_fanotify_init 368 -#define __NR_fanotify_mark 369 -#define __NR_prlimit64 370 -#define __NR_name_to_handle_at 371 -#define __NR_open_by_handle_at 372 -#define __NR_clock_adjtime 373 -#define __NR_syncfs 374 -#define __NR_setns 375 -#define __NR_sendmmsg 376 -#define __NR_process_vm_readv 377 -#define __NR_process_vm_writev 378 -#define __NR_kcmp 379 -#define __NR_finit_module 380 -#define __NR_sched_setattr 381 -#define __NR_sched_getattr 382 -#define __NR_renameat2 383 -#define __NR_seccomp 384 -#define __NR_getrandom 385 -#define __NR_memfd_create 386 -#define __NR_bpf 387 -#define __NR_execveat 388 -#define __NR_userfaultfd 389 -#define __NR_membarrier 390 -#define __NR_mlock2 391 -#define __NR_copy_file_range 392 -#define __NR_preadv2 393 -#define __NR_pwritev2 394 -#define __NR_pkey_mprotect 395 -#define __NR_pkey_alloc 396 -#define __NR_pkey_free 397 -#define __NR_statx 398 -#define __NR_io_pgetevents 399 -#define __NR_rseq 400 +#include <asm/unistd_32.h> #endif /* _UAPI_ASM_MICROBLAZE_UNISTD_H */ diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S index 6ab650593792..ce006646f741 100644 --- a/arch/microblaze/kernel/syscall_table.S +++ b/arch/microblaze/kernel/syscall_table.S @@ -1,404 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ + +#define __SYSCALL(nr, entry, nargs) .long entry ENTRY(sys_call_table) - .long sys_restart_syscall /* 0 - old "setup()" system call, - * used for restarting */ - .long sys_exit - .long sys_fork - .long sys_read - .long sys_write - .long sys_open /* 5 */ - .long sys_close - .long sys_waitpid - .long sys_creat - .long sys_link - .long sys_unlink /* 10 */ - .long sys_execve - .long sys_chdir - .long sys_time - .long sys_mknod - .long sys_chmod /* 15 */ - .long sys_lchown - .long sys_ni_syscall /* old break syscall holder */ - .long sys_ni_syscall /* old stat */ - .long sys_lseek - .long sys_getpid /* 20 */ - .long sys_mount - .long sys_oldumount - .long sys_setuid - .long sys_getuid - .long sys_stime /* 25 */ - .long sys_ptrace - .long sys_alarm - .long sys_ni_syscall /* oldfstat */ - .long sys_pause - .long sys_utime /* 30 */ - .long sys_ni_syscall /* old stty syscall holder */ - .long sys_ni_syscall /* old gtty syscall holder */ - .long sys_access - .long sys_nice - .long sys_ni_syscall /* 35 - old ftime syscall holder */ - .long sys_sync - .long sys_kill - .long sys_rename - .long sys_mkdir - .long sys_rmdir /* 40 */ - .long sys_dup - .long sys_pipe - .long sys_times - .long sys_ni_syscall /* old prof syscall holder */ - .long sys_brk /* 45 */ - .long sys_setgid - .long sys_getgid - .long sys_signal - .long sys_geteuid - .long sys_getegid /* 50 */ - .long sys_acct - .long sys_umount /* recycled never used phys() */ - .long sys_ni_syscall /* old lock syscall holder */ - .long sys_ioctl - .long sys_fcntl /* 55 */ - .long sys_ni_syscall /* old mpx syscall holder */ - .long sys_setpgid - .long sys_ni_syscall /* old ulimit syscall holder */ - .long sys_ni_syscall /* olduname */ - .long sys_umask /* 60 */ - .long sys_chroot - .long sys_ustat - .long sys_dup2 - .long sys_getppid - .long sys_getpgrp /* 65 */ - .long sys_setsid - .long sys_ni_syscall /* sys_sigaction */ - .long sys_sgetmask - .long sys_ssetmask - .long sys_setreuid /* 70 */ - .long sys_setregid - .long sys_ni_syscall /* sys_sigsuspend_wrapper */ - .long sys_sigpending - .long sys_sethostname - .long sys_setrlimit /* 75 */ - .long sys_ni_syscall /* old_getrlimit */ - .long sys_getrusage - .long sys_gettimeofday - .long sys_settimeofday - .long sys_getgroups /* 80 */ - .long sys_setgroups - .long sys_ni_syscall /* old_select */ - .long sys_symlink - .long sys_ni_syscall /* oldlstat */ - .long sys_readlink /* 85 */ - .long sys_uselib - .long sys_swapon - .long sys_reboot - .long sys_ni_syscall /* old_readdir */ - .long sys_mmap /* 90 */ /* old_mmap */ - .long sys_munmap - .long sys_truncate - .long sys_ftruncate - .long sys_fchmod - .long sys_fchown /* 95 */ - .long sys_getpriority - .long sys_setpriority - .long sys_ni_syscall /* old profil syscall holder */ - .long sys_statfs - .long sys_fstatfs /* 100 */ - .long sys_ni_syscall /* ioperm */ - .long sys_socketcall - .long sys_syslog /* operation with system console */ - .long sys_setitimer - .long sys_getitimer /* 105 */ - .long sys_newstat - .long sys_newlstat - .long sys_newfstat - .long sys_ni_syscall /* uname */ - .long sys_ni_syscall /* 110 */ /* iopl */ - .long sys_vhangup - .long sys_ni_syscall /* old "idle" system call */ - .long sys_ni_syscall /* old sys_vm86old */ - .long sys_wait4 - .long sys_swapoff /* 115 */ - .long sys_sysinfo - .long sys_ni_syscall /* old sys_ipc */ - .long sys_fsync - .long sys_ni_syscall /* sys_sigreturn_wrapper */ - .long sys_clone /* 120 */ - .long sys_setdomainname - .long sys_newuname - .long sys_ni_syscall /* modify_ldt */ - .long sys_adjtimex - .long sys_mprotect /* 125: sys_mprotect */ - .long sys_sigprocmask - .long sys_ni_syscall /* old "create_module" */ - .long sys_init_module - .long sys_delete_module - .long sys_ni_syscall /* 130: old "get_kernel_syms" */ - .long sys_quotactl - .long sys_getpgid - .long sys_fchdir - .long sys_bdflush - .long sys_sysfs /* 135 */ - .long sys_personality - .long sys_ni_syscall /* reserved for afs_syscall */ - .long sys_setfsuid - .long sys_setfsgid - .long sys_llseek /* 140 */ - .long sys_getdents - .long sys_select - .long sys_flock - .long sys_msync - .long sys_readv /* 145 */ - .long sys_writev - .long sys_getsid - .long sys_fdatasync - .long sys_sysctl - .long sys_mlock /* 150: sys_mlock */ - .long sys_munlock - .long sys_mlockall - .long sys_munlockall - .long sys_sched_setparam - .long sys_sched_getparam /* 155 */ - .long sys_sched_setscheduler - .long sys_sched_getscheduler - .long sys_sched_yield - .long sys_sched_get_priority_max - .long sys_sched_get_priority_min /* 160 */ - .long sys_sched_rr_get_interval - .long sys_nanosleep - .long sys_mremap - .long sys_setresuid - .long sys_getresuid /* 165 */ - .long sys_ni_syscall /* sys_vm86 */ - .long sys_ni_syscall /* Old sys_query_module */ - .long sys_poll - .long sys_ni_syscall /* old nfsservctl */ - .long sys_setresgid /* 170 */ - .long sys_getresgid - .long sys_prctl - .long sys_rt_sigreturn_wrapper - .long sys_rt_sigaction - .long sys_rt_sigprocmask /* 175 */ - .long sys_rt_sigpending - .long sys_rt_sigtimedwait - .long sys_rt_sigqueueinfo - .long sys_rt_sigsuspend - .long sys_pread64 /* 180 */ - .long sys_pwrite64 - .long sys_chown - .long sys_getcwd - .long sys_capget - .long sys_capset /* 185 */ - .long sys_ni_syscall /* sigaltstack */ - .long sys_sendfile - .long sys_ni_syscall /* reserved for streams1 */ - .long sys_ni_syscall /* reserved for streams2 */ - .long sys_vfork /* 190 */ - .long sys_getrlimit - .long sys_mmap2 - .long sys_truncate64 - .long sys_ftruncate64 - .long sys_stat64 /* 195 */ - .long sys_lstat64 - .long sys_fstat64 - .long sys_lchown - .long sys_getuid - .long sys_getgid /* 200 */ - .long sys_geteuid - .long sys_getegid - .long sys_setreuid - .long sys_setregid - .long sys_getgroups /* 205 */ - .long sys_setgroups - .long sys_fchown - .long sys_setresuid - .long sys_getresuid - .long sys_setresgid /* 210 */ - .long sys_getresgid - .long sys_chown - .long sys_setuid - .long sys_setgid - .long sys_setfsuid /* 215 */ - .long sys_setfsgid - .long sys_pivot_root - .long sys_mincore - .long sys_madvise - .long sys_getdents64 /* 220 */ - .long sys_fcntl64 - .long sys_ni_syscall /* reserved for TUX */ - .long sys_ni_syscall - .long sys_gettid - .long sys_readahead /* 225 */ - .long sys_setxattr - .long sys_lsetxattr - .long sys_fsetxattr - .long sys_getxattr - .long sys_lgetxattr /* 230 */ - .long sys_fgetxattr - .long sys_listxattr - .long sys_llistxattr - .long sys_flistxattr - .long sys_removexattr /* 235 */ - .long sys_lremovexattr - .long sys_fremovexattr - .long sys_tkill - .long sys_sendfile64 - .long sys_futex /* 240 */ - .long sys_sched_setaffinity - .long sys_sched_getaffinity - .long sys_ni_syscall /* set_thread_area */ - .long sys_ni_syscall /* get_thread_area */ - .long sys_io_setup /* 245 */ - .long sys_io_destroy - .long sys_io_getevents - .long sys_io_submit - .long sys_io_cancel - .long sys_fadvise64 /* 250 */ - .long sys_ni_syscall - .long sys_exit_group - .long sys_lookup_dcookie - .long sys_epoll_create - .long sys_epoll_ctl /* 255 */ - .long sys_epoll_wait - .long sys_remap_file_pages - .long sys_set_tid_address - .long sys_timer_create - .long sys_timer_settime /* 260 */ - .long sys_timer_gettime - .long sys_timer_getoverrun - .long sys_timer_delete - .long sys_clock_settime - .long sys_clock_gettime /* 265 */ - .long sys_clock_getres - .long sys_clock_nanosleep - .long sys_statfs64 - .long sys_fstatfs64 - .long sys_tgkill /* 270 */ - .long sys_utimes - .long sys_fadvise64_64 - .long sys_ni_syscall /* sys_vserver */ - .long sys_mbind - .long sys_get_mempolicy - .long sys_set_mempolicy - .long sys_mq_open - .long sys_mq_unlink - .long sys_mq_timedsend - .long sys_mq_timedreceive /* 280 */ - .long sys_mq_notify - .long sys_mq_getsetattr - .long sys_kexec_load - .long sys_waitid - .long sys_ni_syscall /* 285 */ /* available */ - .long sys_add_key - .long sys_request_key - .long sys_keyctl - .long sys_ioprio_set - .long sys_ioprio_get /* 290 */ - .long sys_inotify_init - .long sys_inotify_add_watch - .long sys_inotify_rm_watch - .long sys_ni_syscall /* sys_migrate_pages */ - .long sys_openat /* 295 */ - .long sys_mkdirat - .long sys_mknodat - .long sys_fchownat - .long sys_futimesat - .long sys_fstatat64 /* 300 */ - .long sys_unlinkat - .long sys_renameat - .long sys_linkat - .long sys_symlinkat - .long sys_readlinkat /* 305 */ - .long sys_fchmodat - .long sys_faccessat - .long sys_pselect6 - .long sys_ppoll - .long sys_unshare /* 310 */ - .long sys_set_robust_list - .long sys_get_robust_list - .long sys_splice - .long sys_sync_file_range - .long sys_tee /* 315 */ - .long sys_vmsplice - .long sys_move_pages - .long sys_getcpu - .long sys_epoll_pwait - .long sys_utimensat /* 320 */ - .long sys_signalfd - .long sys_timerfd_create - .long sys_eventfd - .long sys_fallocate - .long sys_semtimedop /* 325 */ - .long sys_timerfd_settime - .long sys_timerfd_gettime - .long sys_semctl - .long sys_semget - .long sys_semop /* 330 */ - .long sys_msgctl - .long sys_msgget - .long sys_msgrcv - .long sys_msgsnd - .long sys_shmat /* 335 */ - .long sys_shmctl - .long sys_shmdt - .long sys_shmget - .long sys_signalfd4 /* new syscall */ - .long sys_eventfd2 /* 340 */ - .long sys_epoll_create1 - .long sys_dup3 - .long sys_pipe2 - .long sys_inotify_init1 - .long sys_socket /* 345 */ - .long sys_socketpair - .long sys_bind - .long sys_listen - .long sys_accept - .long sys_connect /* 350 */ - .long sys_getsockname - .long sys_getpeername - .long sys_sendto - .long sys_send - .long sys_recvfrom /* 355 */ - .long sys_recv - .long sys_setsockopt - .long sys_getsockopt - .long sys_shutdown - .long sys_sendmsg /* 360 */ - .long sys_recvmsg - .long sys_accept4 - .long sys_preadv - .long sys_pwritev - .long sys_rt_tgsigqueueinfo /* 365 */ - .long sys_perf_event_open - .long sys_recvmmsg - .long sys_fanotify_init - .long sys_fanotify_mark - .long sys_prlimit64 /* 370 */ - .long sys_name_to_handle_at - .long sys_open_by_handle_at - .long sys_clock_adjtime - .long sys_syncfs - .long sys_setns /* 375 */ - .long sys_sendmmsg - .long sys_process_vm_readv - .long sys_process_vm_writev - .long sys_kcmp - .long sys_finit_module /* 380 */ - .long sys_sched_setattr - .long sys_sched_getattr - .long sys_renameat2 - .long sys_seccomp - .long sys_getrandom /* 385 */ - .long sys_memfd_create - .long sys_bpf - .long sys_execveat - .long sys_userfaultfd - .long sys_membarrier /* 390 */ - .long sys_mlock2 - .long sys_copy_file_range - .long sys_preadv2 - .long sys_pwritev2 - .long sys_pkey_mprotect /* 395 */ - .long sys_pkey_alloc - .long sys_pkey_free - .long sys_statx - .long sys_io_pgetevents - .long sys_rseq +#include <asm/syscall_table.h> +#undef __SYSCALL diff --git a/arch/microblaze/kernel/syscalls/Makefile b/arch/microblaze/kernel/syscalls/Makefile new file mode 100644 index 000000000000..659faefdcb1d --- /dev/null +++ b/arch/microblaze/kernel/syscalls/Makefile @@ -0,0 +1,38 @@ +# SPDX-License-Identifier: GPL-2.0 +kapi := arch/$(SRCARCH)/include/generated/asm +uapi := arch/$(SRCARCH)/include/generated/uapi/asm + +_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \ + $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)') + +syscall := $(srctree)/$(src)/syscall.tbl +syshdr := $(srctree)/$(src)/syscallhdr.sh +systbl := $(srctree)/$(src)/syscalltbl.sh + +quiet_cmd_syshdr = SYSHDR $@ + cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \ + '$(syshdr_abis_$(basetarget))' \ + '$(syshdr_pfx_$(basetarget))' \ + '$(syshdr_offset_$(basetarget))' + +quiet_cmd_systbl = SYSTBL $@ + cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \ + '$(systbl_abis_$(basetarget))' \ + '$(systbl_abi_$(basetarget))' \ + '$(systbl_offset_$(basetarget))' + +$(uapi)/unistd_32.h: $(syscall) $(syshdr) + $(call if_changed,syshdr) + +$(kapi)/syscall_table.h: $(syscall) $(systbl) + $(call if_changed,systbl) + +uapisyshdr-y += unistd_32.h +kapisyshdr-y += syscall_table.h + +targets += $(uapisyshdr-y) $(kapisyshdr-y) + +PHONY += all +all: $(addprefix $(uapi)/,$(uapisyshdr-y)) +all: $(addprefix $(kapi)/,$(kapisyshdr-y)) + @: diff --git a/arch/microblaze/kernel/syscalls/syscall.tbl b/arch/microblaze/kernel/syscalls/syscall.tbl new file mode 100644 index 000000000000..a24d09e937dd --- /dev/null +++ b/arch/microblaze/kernel/syscalls/syscall.tbl @@ -0,0 +1,410 @@ +# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note +# +# system call numbers and entry vectors for microblaze +# +# The format is: +# <number> <abi> <name> <entry point> +# +# The <abi> is always "common" for this file +# +0 common restart_syscall sys_restart_syscall +1 common exit sys_exit +2 common fork sys_fork +3 common read sys_read +4 common write sys_write +5 common open sys_open +6 common close sys_close +7 common waitpid sys_waitpid +8 common creat sys_creat +9 common link sys_link +10 common unlink sys_unlink +11 common execve sys_execve +12 common chdir sys_chdir +13 common time sys_time +14 common mknod sys_mknod +15 common chmod sys_chmod +16 common lchown sys_lchown +17 common break sys_ni_syscall +18 common oldstat sys_ni_syscall +19 common lseek sys_lseek +20 common getpid sys_getpid +21 common mount sys_mount +22 common umount sys_oldumount +23 common setuid sys_setuid +24 common getuid sys_getuid +25 common stime sys_stime +26 common ptrace sys_ptrace +27 common alarm sys_alarm +28 common oldfstat sys_ni_syscall +29 common pause sys_pause +30 common utime sys_utime +31 common stty sys_ni_syscall +32 common gtty sys_ni_syscall +33 common access sys_access +34 common nice sys_nice +35 common ftime sys_ni_syscall +36 common sync sys_sync +37 common kill sys_kill +38 common rename sys_rename +39 common mkdir sys_mkdir +40 common rmdir sys_rmdir +41 common dup sys_dup +42 common pipe sys_pipe +43 common times sys_times +44 common prof sys_ni_syscall +45 common brk sys_brk +46 common setgid sys_setgid +47 common getgid sys_getgid +48 common signal sys_signal +49 common geteuid sys_geteuid +50 common getegid sys_getegid +51 common acct sys_acct +52 common umount2 sys_umount +53 common lock sys_ni_syscall +54 common ioctl sys_ioctl +55 common fcntl sys_fcntl +56 common mpx sys_ni_syscall +57 common setpgid sys_setpgid +58 common ulimit sys_ni_syscall +59 common oldolduname sys_ni_syscall +60 common umask sys_umask +61 common chroot sys_chroot +62 common ustat sys_ustat +63 common dup2 sys_dup2 +64 common getppid sys_getppid +65 common getpgrp sys_getpgrp +66 common setsid sys_setsid +67 common sigaction sys_ni_syscall +68 common sgetmask sys_sgetmask +69 common ssetmask sys_ssetmask +70 common setreuid sys_setreuid +71 common setregid sys_setregid +72 common sigsuspend sys_ni_syscall +73 common sigpending sys_sigpending +74 common sethostname sys_sethostname +75 common setrlimit sys_setrlimit +76 common getrlimit sys_ni_syscall +77 common getrusage sys_getrusage +78 common gettimeofday sys_gettimeofday +79 common settimeofday sys_settimeofday +80 common getgroups sys_getgroups +81 common setgroups sys_setgroups +82 common select sys_ni_syscall +83 common symlink sys_symlink +84 common oldlstat sys_ni_syscall +85 common readlink sys_readlink +86 common uselib sys_uselib +87 common swapon sys_swapon +88 common reboot sys_reboot +89 common readdir sys_ni_syscall +90 common mmap sys_mmap +91 common munmap sys_munmap +92 common truncate sys_truncate +93 common ftruncate sys_ftruncate +94 common fchmod sys_fchmod +95 common fchown sys_fchown +96 common getpriority sys_getpriority +97 common setpriority sys_setpriority +98 common profil sys_ni_syscall +99 common statfs sys_statfs +100 common fstatfs sys_fstatfs +101 common ioperm sys_ni_syscall +102 common socketcall sys_socketcall +103 common syslog sys_syslog +104 common setitimer sys_setitimer +105 common getitimer sys_getitimer +106 common stat sys_newstat +107 common lstat sys_newlstat +108 common fstat sys_newfstat +109 common olduname sys_ni_syscall +110 common iopl sys_ni_syscall +111 common vhangup sys_vhangup +112 common idle sys_ni_syscall +113 common vm86old sys_ni_syscall +114 common wait4 sys_wait4 +115 common swapoff sys_swapoff +116 common sysinfo sys_sysinfo +117 common ipc sys_ni_syscall +118 common fsync sys_fsync +119 common sigreturn sys_ni_syscall +120 common clone sys_clone +121 common setdomainname sys_setdomainname +122 common uname sys_newuname +123 common modify_ldt sys_ni_syscall +124 common adjtimex sys_adjtimex +125 common mprotect sys_mprotect +126 common sigprocmask sys_sigprocmask +127 common create_module sys_ni_syscall +128 common init_module sys_init_module +129 common delete_module sys_delete_module +130 common get_kernel_syms sys_ni_syscall +131 common quotactl sys_quotactl +132 common getpgid sys_getpgid +133 common fchdir sys_fchdir +134 common bdflush sys_bdflush +135 common sysfs sys_sysfs +136 common personality sys_personality +137 common afs_syscall sys_ni_syscall +138 common setfsuid sys_setfsuid +139 common setfsgid sys_setfsgid +140 common _llseek sys_llseek +141 common getdents sys_getdents +142 common _newselect sys_select +143 common flock sys_flock +144 common msync sys_msync +145 common readv sys_readv +146 common writev sys_writev +147 common getsid sys_getsid +148 common fdatasync sys_fdatasync +149 common _sysctl sys_sysctl +150 common mlock sys_mlock +151 common munlock sys_munlock +152 common mlockall sys_mlockall +153 common munlockall sys_munlockall +154 common sched_setparam sys_sched_setparam +155 common sched_getparam sys_sched_getparam +156 common sched_setscheduler sys_sched_setscheduler +157 common sched_getscheduler sys_sched_getscheduler +158 common sched_yield sys_sched_yield +159 common sched_get_priority_max sys_sched_get_priority_max +160 common sched_get_priority_min sys_sched_get_priority_min +161 common sched_rr_get_interval sys_sched_rr_get_interval +162 common nanosleep sys_nanosleep +163 common mremap sys_mremap +164 common setresuid sys_setresuid +165 common getresuid sys_getresuid +166 common vm86 sys_ni_syscall +167 common query_module sys_ni_syscall +168 common poll sys_poll +169 common nfsservctl sys_ni_syscall +170 common setresgid sys_setresgid +171 common getresgid sys_getresgid +172 common prctl sys_prctl +173 common rt_sigreturn sys_rt_sigreturn_wrapper +174 common rt_sigaction sys_rt_sigaction +175 common rt_sigprocmask sys_rt_sigprocmask +176 common rt_sigpending sys_rt_sigpending +177 common rt_sigtimedwait sys_rt_sigtimedwait +178 common rt_sigqueueinfo sys_rt_sigqueueinfo +179 common rt_sigsuspend sys_rt_sigsuspend +180 common pread64 sys_pread64 +181 common pwrite64 sys_pwrite64 +182 common chown sys_chown +183 common getcwd sys_getcwd +184 common capget sys_capget +185 common capset sys_capset +186 common sigaltstack sys_ni_syscall +187 common sendfile sys_sendfile +188 common getpmsg sys_ni_syscall +189 common putpmsg sys_ni_syscall +190 common vfork sys_vfork +191 common ugetrlimit sys_getrlimit +192 common mmap2 sys_mmap2 +193 common truncate64 sys_truncate64 +194 common ftruncate64 sys_ftruncate64 +195 common stat64 sys_stat64 +196 common lstat64 sys_lstat64 +197 common fstat64 sys_fstat64 +198 common lchown32 sys_lchown +199 common getuid32 sys_getuid +200 common getgid32 sys_getgid +201 common geteuid32 sys_geteuid +202 common getegid32 sys_getegid +203 common setreuid32 sys_setreuid +204 common setregid32 sys_setregid +205 common getgroups32 sys_getgroups +206 common setgroups32 sys_setgroups +207 common fchown32 sys_fchown +208 common setresuid32 sys_setresuid +209 common getresuid32 sys_getresuid +210 common setresgid32 sys_setresgid +211 common getresgid32 sys_getresgid +212 common chown32 sys_chown +213 common setuid32 sys_setuid +214 common setgid32 sys_setgid +215 common setfsuid32 sys_setfsuid +216 common setfsgid32 sys_setfsgid +217 common pivot_root sys_pivot_root +218 common mincore sys_mincore +219 common madvise sys_madvise +220 common getdents64 sys_getdents64 +221 common fcntl64 sys_fcntl64 +# 222 is reserved for TUX +# 223 is unused +224 common gettid sys_gettid +225 common readahead sys_readahead +226 common setxattr sys_setxattr +227 common lsetxattr sys_lsetxattr +228 common fsetxattr sys_fsetxattr +229 common getxattr sys_getxattr +230 common lgetxattr sys_lgetxattr +231 common fgetxattr sys_fgetxattr +232 common listxattr sys_listxattr +233 common llistxattr sys_llistxattr +234 common flistxattr sys_flistxattr +235 common removexattr sys_removexattr +236 common lremovexattr sys_lremovexattr +237 common fremovexattr sys_fremovexattr +238 common tkill sys_tkill +239 common sendfile64 sys_sendfile64 +240 common futex sys_futex +241 common sched_setaffinity sys_sched_setaffinity +242 common sched_getaffinity sys_sched_getaffinity +243 common set_thread_area sys_ni_syscall +244 common get_thread_area sys_ni_syscall +245 common io_setup sys_io_setup +246 common io_destroy sys_io_destroy +247 common io_getevents sys_io_getevents +248 common io_submit sys_io_submit +249 common io_cancel sys_io_cancel +250 common fadvise64 sys_fadvise64 +# 251 is available for reuse (was briefly sys_set_zone_reclaim) +252 common exit_group sys_exit_group +253 common lookup_dcookie sys_lookup_dcookie +254 common epoll_create sys_epoll_create +255 common epoll_ctl sys_epoll_ctl +256 common epoll_wait sys_epoll_wait +257 common remap_file_pages sys_remap_file_pages +258 common set_tid_address sys_set_tid_address +259 common timer_create sys_timer_create +260 common timer_settime sys_timer_settime +261 common timer_gettime sys_timer_gettime +262 common timer_getoverrun sys_timer_getoverrun +263 common timer_delete sys_timer_delete +264 common clock_settime sys_clock_settime +265 common clock_gettime sys_clock_gettime +266 common clock_getres sys_clock_getres +267 common clock_nanosleep sys_clock_nanosleep +268 common statfs64 sys_statfs64 +269 common fstatfs64 sys_fstatfs64 +270 common tgkill sys_tgkill +271 common utimes sys_utimes +272 common fadvise64_64 sys_fadvise64_64 +273 common vserver sys_ni_syscall +274 common mbind sys_mbind +275 common get_mempolicy sys_get_mempolicy +276 common set_mempolicy sys_set_mempolicy +277 common mq_open sys_mq_open +278 common mq_unlink sys_mq_unlink +279 common mq_timedsend sys_mq_timedsend +280 common mq_timedreceive sys_mq_timedreceive +281 common mq_notify sys_mq_notify +282 common mq_getsetattr sys_mq_getsetattr +283 common kexec_load sys_kexec_load +284 common waitid sys_waitid +# 285 was setaltroot +286 common add_key sys_add_key +287 common request_key sys_request_key +288 common keyctl sys_keyctl +289 common ioprio_set sys_ioprio_set +290 common ioprio_get sys_ioprio_get +291 common inotify_init sys_inotify_init +292 common inotify_add_watch sys_inotify_add_watch +293 common inotify_rm_watch sys_inotify_rm_watch +294 common migrate_pages sys_ni_syscall +295 common openat sys_openat +296 common mkdirat sys_mkdirat +297 common mknodat sys_mknodat +298 common fchownat sys_fchownat +299 common futimesat sys_futimesat +300 common fstatat64 sys_fstatat64 +301 common unlinkat sys_unlinkat +302 common renameat sys_renameat +303 common linkat sys_linkat +304 common symlinkat sys_symlinkat +305 common readlinkat sys_readlinkat +306 common fchmodat sys_fchmodat +307 common faccessat sys_faccessat +308 common pselect6 sys_pselect6 +309 common ppoll sys_ppoll +310 common unshare sys_unshare +311 common set_robust_list sys_set_robust_list +312 common get_robust_list sys_get_robust_list +313 common splice sys_splice +314 common sync_file_range sys_sync_file_range +315 common tee sys_tee +316 common vmsplice sys_vmsplice +317 common move_pages sys_move_pages +318 common getcpu sys_getcpu +319 common epoll_pwait sys_epoll_pwait +320 common utimensat sys_utimensat +321 common signalfd sys_signalfd +322 common timerfd_create sys_timerfd_create +323 common eventfd sys_eventfd +324 common fallocate sys_fallocate +325 common semtimedop sys_semtimedop +326 common timerfd_settime sys_timerfd_settime +327 common timerfd_gettime sys_timerfd_gettime +328 common semctl sys_semctl +329 common semget sys_semget +330 common semop sys_semop +331 common msgctl sys_msgctl +332 common msgget sys_msgget +333 common msgrcv sys_msgrcv +334 common msgsnd sys_msgsnd +335 common shmat sys_shmat +336 common shmctl sys_shmctl +337 common shmdt sys_shmdt +338 common shmget sys_shmget +339 common signalfd4 sys_signalfd4 +340 common eventfd2 sys_eventfd2 +341 common epoll_create1 sys_epoll_create1 +342 common dup3 sys_dup3 +343 common pipe2 sys_pipe2 +344 common inotify_init1 sys_inotify_init1 +345 common socket sys_socket +346 common socketpair sys_socketpair +347 common bind sys_bind +348 common listen sys_listen +349 common accept sys_accept +350 common connect sys_connect +351 common getsockname sys_getsockname +352 common getpeername sys_getpeername +353 common sendto sys_sendto +354 common send sys_send +355 common recvfrom sys_recvfrom +356 common recv sys_recv +357 common setsockopt sys_setsockopt +358 common getsockopt sys_getsockopt +359 common shutdown sys_shutdown +360 common sendmsg sys_sendmsg +361 common recvmsg sys_recvmsg +362 common accept4 sys_accept4 +363 common preadv sys_preadv +364 common pwritev sys_pwritev +365 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo +366 common perf_event_open sys_perf_event_open +367 common recvmmsg sys_recvmmsg +368 common fanotify_init sys_fanotify_init +369 common fanotify_mark sys_fanotify_mark +370 common prlimit64 sys_prlimit64 +371 common name_to_handle_at sys_name_to_handle_at +372 common open_by_handle_at sys_open_by_handle_at +373 common clock_adjtime sys_clock_adjtime +374 common syncfs sys_syncfs +375 common setns sys_setns +376 common sendmmsg sys_sendmmsg +377 common process_vm_readv sys_process_vm_readv +378 common process_vm_writev sys_process_vm_writev +379 common kcmp sys_kcmp +380 common finit_module sys_finit_module +381 common sched_setattr sys_sched_setattr +382 common sched_getattr sys_sched_getattr +383 common renameat2 sys_renameat2 +384 common seccomp sys_seccomp +385 common getrandom sys_getrandom +386 common memfd_create sys_memfd_create +387 common bpf sys_bpf +388 common execveat sys_execveat +389 common userfaultfd sys_userfaultfd +390 common membarrier sys_membarrier +391 common mlock2 sys_mlock2 +392 common copy_file_range sys_copy_file_range +393 common preadv2 sys_preadv2 +394 common pwritev2 sys_pwritev2 +395 common pkey_mprotect sys_pkey_mprotect +396 common pkey_alloc sys_pkey_alloc +397 common pkey_free sys_pkey_free +398 common statx sys_statx +399 common io_pgetevents sys_io_pgetevents +400 common rseq sys_rseq diff --git a/arch/microblaze/kernel/syscalls/syscallhdr.sh b/arch/microblaze/kernel/syscalls/syscallhdr.sh new file mode 100644 index 000000000000..2e9062a926a3 --- /dev/null +++ b/arch/microblaze/kernel/syscalls/syscallhdr.sh @@ -0,0 +1,36 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 + +in="$1" +out="$2" +my_abis=`echo "($3)" | tr ',' '|'` +prefix="$4" +offset="$5" + +fileguard=_UAPI_ASM_MICROBLAZE_`basename "$out" | sed \ + -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \ + -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'` +grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( + printf "#ifndef %s\n" "${fileguard}" + printf "#define %s\n" "${fileguard}" + printf "\n" + + nxt=0 + while read nr abi name entry ; do + if [ -z "$offset" ]; then + printf "#define __NR_%s%s\t%s\n" \ + "${prefix}" "${name}" "${nr}" + else + printf "#define __NR_%s%s\t(%s + %s)\n" \ + "${prefix}" "${name}" "${offset}" "${nr}" + fi + nxt=$((nr+1)) + done + + printf "\n" + printf "#ifdef __KERNEL__\n" + printf "#define __NR_syscalls\t%s\n" "${nxt}" + printf "#endif\n" + printf "\n" + printf "#endif /* %s */" "${fileguard}" +) > "$out" diff --git a/arch/microblaze/kernel/syscalls/syscalltbl.sh b/arch/microblaze/kernel/syscalls/syscalltbl.sh new file mode 100644 index 000000000000..85d78d9309ad --- /dev/null +++ b/arch/microblaze/kernel/syscalls/syscalltbl.sh @@ -0,0 +1,32 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 + +in="$1" +out="$2" +my_abis=`echo "($3)" | tr ',' '|'` +my_abi="$4" +offset="$5" + +emit() { + t_nxt="$1" + t_nr="$2" + t_entry="$3" + + while [ $t_nxt -lt $t_nr ]; do + printf "__SYSCALL(%s, sys_ni_syscall, )\n" "${t_nxt}" + t_nxt=$((t_nxt+1)) + done + printf "__SYSCALL(%s, %s, )\n" "${t_nxt}" "${t_entry}" +} + +grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( + nxt=0 + if [ -z "$offset" ]; then + offset=0 + fi + + while read nr abi name entry ; do + emit $((nxt+offset)) $((nr+offset)) $entry + nxt=$((nr+1)) + done +) > "$out" diff --git a/arch/microblaze/mm/consistent.c b/arch/microblaze/mm/consistent.c index 45e0a1aa9357..3002cbca3059 100644 --- a/arch/microblaze/mm/consistent.c +++ b/arch/microblaze/mm/consistent.c @@ -81,7 +81,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, size = PAGE_ALIGN(size); order = get_order(size); - vaddr = __get_free_pages(gfp, order); + vaddr = __get_free_pages(gfp | __GFP_ZERO, order); if (!vaddr) return NULL; diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 8272ea4c7264..63183a8454d6 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -2,11 +2,12 @@ config MIPS bool default y - select ARCH_BINFMT_ELF_STATE + select ARCH_BINFMT_ELF_STATE if MIPS_FP_SUPPORT select ARCH_CLOCKSOURCE_DATA select ARCH_DISCARD_MEMBLOCK select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST + select ARCH_HAS_UBSAN_SANITIZE_ALL select ARCH_SUPPORTS_UPROBES select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_CMPXCHG_LOCKREF if 64BIT @@ -15,8 +16,8 @@ config MIPS select ARCH_WANT_IPC_PARSE_VERSION select BUILDTIME_EXTABLE_SORT select CLONE_BACKWARDS + select CPU_NO_EFFICIENT_FFS if (TARGET_ISA_REV < 1) select CPU_PM if CPU_IDLE - select DMA_DIRECT_OPS select GENERIC_ATOMIC64 if !64BIT select GENERIC_CLOCKEVENTS select GENERIC_CMOS_UPDATE @@ -56,10 +57,12 @@ config MIPS select HAVE_FUNCTION_TRACER select HAVE_GENERIC_DMA_COHERENT select HAVE_IDE + select HAVE_IOREMAP_PROT select HAVE_IRQ_EXIT_ON_IRQ_STACK select HAVE_IRQ_TIME_ACCOUNTING select HAVE_KPROBES select HAVE_KRETPROBES + select HAVE_LD_DEAD_CODE_DATA_ELIMINATION select HAVE_MEMBLOCK_NODE_MAP select HAVE_MOD_ARCH_SPECIFIC select HAVE_NMI @@ -494,22 +497,23 @@ config MIPS_MALTA select BOOT_RAW select BUILTIN_DTB select CEVT_R4K - select CSRC_R4K select CLKSRC_MIPS_GIC select COMMON_CLK + select CSRC_R4K select DMA_MAYBE_COHERENT select GENERIC_ISA_DMA select HAVE_PCSPKR_PLATFORM - select IRQ_MIPS_CPU - select MIPS_GIC select HW_HAS_PCI select I8253 select I8259 + select IRQ_MIPS_CPU + select LIBFDT select MIPS_BONITO64 select MIPS_CPU_SCACHE + select MIPS_GIC select MIPS_L1_CACHE_SHIFT_6 - select PCI_GT64XXX_PCI0 select MIPS_MSC + select PCI_GT64XXX_PCI0 select SMP_UP if SMP select SWAP_IO_SPACE select SYS_HAS_CPU_MIPS32_R1 @@ -528,19 +532,16 @@ config MIPS_MALTA select SYS_SUPPORTS_HIGHMEM select SYS_SUPPORTS_LITTLE_ENDIAN select SYS_SUPPORTS_MICROMIPS + select SYS_SUPPORTS_MIPS16 select SYS_SUPPORTS_MIPS_CMP select SYS_SUPPORTS_MIPS_CPS - select SYS_SUPPORTS_MIPS16 select SYS_SUPPORTS_MULTITHREADING + select SYS_SUPPORTS_RELOCATABLE select SYS_SUPPORTS_SMARTMIPS select SYS_SUPPORTS_VPE_LOADER select SYS_SUPPORTS_ZBOOT - select SYS_SUPPORTS_RELOCATABLE select USE_OF - select LIBFDT select ZONE_DMA32 if 64BIT - select BUILTIN_DTB - select LIBFDT help This enables support for the MIPS Technologies Malta evaluation board. @@ -794,6 +795,7 @@ config SIBYTE_SWARM select SYS_SUPPORTS_HIGHMEM select SYS_SUPPORTS_LITTLE_ENDIAN select ZONE_DMA32 if 64BIT + select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI config SIBYTE_LITTLESUR bool "Sibyte BCM91250C2-LittleSur" @@ -805,6 +807,7 @@ config SIBYTE_LITTLESUR select SYS_SUPPORTS_BIG_ENDIAN select SYS_SUPPORTS_HIGHMEM select SYS_SUPPORTS_LITTLE_ENDIAN + select ZONE_DMA32 if 64BIT config SIBYTE_SENTOSA bool "Sibyte BCM91250E-Sentosa" @@ -814,6 +817,7 @@ config SIBYTE_SENTOSA select SYS_HAS_CPU_SB1 select SYS_SUPPORTS_BIG_ENDIAN select SYS_SUPPORTS_LITTLE_ENDIAN + select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI config SIBYTE_BIGSUR bool "Sibyte BCM91480B-BigSur" @@ -826,6 +830,7 @@ config SIBYTE_BIGSUR select SYS_SUPPORTS_HIGHMEM select SYS_SUPPORTS_LITTLE_ENDIAN select ZONE_DMA32 if 64BIT + select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI config SNI_RM bool "SNI RM200/300/400" @@ -2032,7 +2037,7 @@ config CPU_MIPS64 default y if CPU_MIPS64_R1 || CPU_MIPS64_R2 || CPU_MIPS64_R6 # -# These two indicate the revision of the architecture, either Release 1 or Release 2 +# These indicate the revision of the architecture # config CPU_MIPSR1 bool @@ -2053,6 +2058,16 @@ config CPU_MIPSR6 select MIPS_CRC_SUPPORT select MIPS_SPRAM +config TARGET_ISA_REV + int + default 1 if CPU_MIPSR1 + default 2 if CPU_MIPSR2 + default 6 if CPU_MIPSR6 + default 0 + help + Reflects the ISA revision being targeted by the kernel build. This + is effectively the Kconfig equivalent of MIPS_ISA_REV. + config EVA bool @@ -2254,9 +2269,30 @@ config CPU_GENERIC_DUMP_TLB bool default y if !(CPU_R3000 || CPU_R8000 || CPU_TX39XX) +config MIPS_FP_SUPPORT + bool "Floating Point support" if EXPERT + default y + help + Select y to include support for floating point in the kernel + including initialization of FPU hardware, FP context save & restore + and emulation of an FPU where necessary. Without this support any + userland program attempting to use floating point instructions will + receive a SIGILL. + + If you know that your userland will not attempt to use floating point + instructions then you can say n here to shrink the kernel a little. + + If unsure, say y. + +config CPU_R2300_FPU + bool + depends on MIPS_FP_SUPPORT + default y if CPU_R3000 || CPU_TX39XX + config CPU_R4K_FPU bool - default y if !(CPU_R3000 || CPU_TX39XX) + depends on MIPS_FP_SUPPORT + default y if !CPU_R2300_FPU config CPU_R4K_CACHE_TLB bool @@ -2308,6 +2344,7 @@ config MIPS_MT_FPAFF config MIPSR2_TO_R6_EMULATOR bool "MIPS R2-to-R6 emulator" depends on CPU_MIPSR6 + depends on MIPS_FP_SUPPORT default y help Choose this option if you want to run non-R6 MIPS userland code. @@ -2455,6 +2492,7 @@ endchoice config CPU_HAS_MSA bool "Support for the MIPS SIMD Architecture" depends on CPU_SUPPORTS_MSA + depends on MIPS_FP_SUPPORT depends on 64BIT || MIPS_O32_FP64_SUPPORT help MIPS SIMD Architecture (MSA) introduces 128 bit wide vector registers @@ -2902,7 +2940,7 @@ config SECCOMP If unsure, say Y. Only embedded should say N here. config MIPS_O32_FP64_SUPPORT - bool "Support for O32 binaries using 64-bit FP" + bool "Support for O32 binaries using 64-bit FP" if !CPU_MIPSR6 depends on 32BIT || MIPS32_O32 help When this is enabled, the kernel will support use of 64-bit floating diff --git a/arch/mips/Makefile b/arch/mips/Makefile index 68410490e12f..5b174c3d0de3 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -319,7 +319,7 @@ OBJCOPYFLAGS += --remove-section=.reginfo head-y := arch/mips/kernel/head.o libs-y += arch/mips/lib/ -libs-y += arch/mips/math-emu/ +libs-$(CONFIG_MIPS_FP_SUPPORT) += arch/mips/math-emu/ # See arch/mips/Kbuild for content of core part of the kernel core-y += arch/mips/ @@ -430,6 +430,9 @@ archclean: $(Q)$(MAKE) $(clean)=arch/mips/boot/tools $(Q)$(MAKE) $(clean)=arch/mips/lasat +archheaders: + $(Q)$(MAKE) $(build)=arch/mips/kernel/syscalls all + define archhelp echo ' install - install kernel into $(INSTALL_PATH)' echo ' vmlinux.ecoff - ECOFF boot image' diff --git a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c index 37fe58c19a90..542c3ede9722 100644 --- a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c +++ b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c @@ -13,6 +13,7 @@ #include <stdint.h> #include <stdio.h> #include <stdlib.h> +#include "../../../../include/linux/sizes.h" int main(int argc, char *argv[]) { @@ -45,11 +46,11 @@ int main(int argc, char *argv[]) vmlinuz_load_addr = vmlinux_load_addr + vmlinux_size; /* - * Align with 16 bytes: "greater than that used for any standard data - * types by a MIPS compiler." -- See MIPS Run Linux (Second Edition). + * Align with 64KB: KEXEC needs load sections to be aligned to PAGE_SIZE, + * which may be as large as 64KB depending on the kernel configuration. */ - vmlinuz_load_addr += (16 - vmlinux_size % 16); + vmlinuz_load_addr += (SZ_64K - vmlinux_size % SZ_64K); printf("0x%llx\n", vmlinuz_load_addr); diff --git a/arch/mips/boot/dts/img/boston.dts b/arch/mips/boot/dts/img/boston.dts index 65af3f6ba81c..84328afa3a55 100644 --- a/arch/mips/boot/dts/img/boston.dts +++ b/arch/mips/boot/dts/img/boston.dts @@ -141,6 +141,12 @@ #size-cells = <2>; #interrupt-cells = <1>; + eg20t_phub@2,0,0 { + compatible = "pci8086,8801"; + reg = <0x00020000 0 0 0 0>; + intel,eg20t-prefetch = <0>; + }; + eg20t_mac@2,0,1 { compatible = "pci8086,8802"; reg = <0x00020100 0 0 0 0>; diff --git a/arch/mips/boot/dts/mti/malta.dts b/arch/mips/boot/dts/mti/malta.dts index 9944e716eac8..f03279b1cde7 100644 --- a/arch/mips/boot/dts/mti/malta.dts +++ b/arch/mips/boot/dts/mti/malta.dts @@ -87,6 +87,11 @@ reg = <0x1f000000 0x1000>; native-endian; + lcd@410 { + compatible = "mti,malta-lcd"; + offset = <0x410>; + }; + reboot { compatible = "syscon-reboot"; regmap = <&fpga_regs>; diff --git a/arch/mips/cavium-octeon/csrc-octeon.c b/arch/mips/cavium-octeon/csrc-octeon.c index 39f153fe0022..124817609ce0 100644 --- a/arch/mips/cavium-octeon/csrc-octeon.c +++ b/arch/mips/cavium-octeon/csrc-octeon.c @@ -7,6 +7,7 @@ * Copyright (C) 2009, 2012 Cavium, Inc. */ #include <linux/clocksource.h> +#include <linux/sched/clock.h> #include <linux/export.h> #include <linux/init.h> #include <linux/smp.h> diff --git a/arch/mips/cavium-octeon/executive/cvmx-bootmem.c b/arch/mips/cavium-octeon/executive/cvmx-bootmem.c index 94d97ebfa036..ba8f82a29a81 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-bootmem.c +++ b/arch/mips/cavium-octeon/executive/cvmx-bootmem.c @@ -122,8 +122,21 @@ static uint64_t cvmx_bootmem_phy_get_next(uint64_t addr) return cvmx_read64_uint64((addr + NEXT_OFFSET) | (1ull << 63)); } -void *cvmx_bootmem_alloc_range(uint64_t size, uint64_t alignment, - uint64_t min_addr, uint64_t max_addr) +/** + * Allocate a block of memory from the free list that was + * passed to the application by the bootloader within a specified + * address range. This is an allocate-only algorithm, so + * freeing memory is not possible. Allocation will fail if + * memory cannot be allocated in the requested range. + * + * @size: Size in bytes of block to allocate + * @min_addr: defines the minimum address of the range + * @max_addr: defines the maximum address of the range + * @alignment: Alignment required - must be power of 2 + * Returns pointer to block of memory, NULL on error + */ +static void *cvmx_bootmem_alloc_range(uint64_t size, uint64_t alignment, + uint64_t min_addr, uint64_t max_addr) { int64_t address; address = @@ -142,47 +155,6 @@ void *cvmx_bootmem_alloc_address(uint64_t size, uint64_t address, address + size); } -void *cvmx_bootmem_alloc(uint64_t size, uint64_t alignment) -{ - return cvmx_bootmem_alloc_range(size, alignment, 0, 0); -} - -void *cvmx_bootmem_alloc_named_range_once(uint64_t size, uint64_t min_addr, - uint64_t max_addr, uint64_t align, - char *name, - void (*init) (void *)) -{ - int64_t addr; - void *ptr; - uint64_t named_block_desc_addr; - - named_block_desc_addr = (uint64_t) - cvmx_bootmem_phy_named_block_find(name, - (uint32_t)CVMX_BOOTMEM_FLAG_NO_LOCKING); - - if (named_block_desc_addr) { - addr = CVMX_BOOTMEM_NAMED_GET_FIELD(named_block_desc_addr, - base_addr); - return cvmx_phys_to_ptr(addr); - } - - addr = cvmx_bootmem_phy_named_block_alloc(size, min_addr, max_addr, - align, name, - (uint32_t)CVMX_BOOTMEM_FLAG_NO_LOCKING); - - if (addr < 0) - return NULL; - ptr = cvmx_phys_to_ptr(addr); - - if (init) - init(ptr); - else - memset(ptr, 0, size); - - return ptr; -} -EXPORT_SYMBOL(cvmx_bootmem_alloc_named_range_once); - void *cvmx_bootmem_alloc_named_range(uint64_t size, uint64_t min_addr, uint64_t max_addr, uint64_t align, char *name) @@ -197,30 +169,12 @@ void *cvmx_bootmem_alloc_named_range(uint64_t size, uint64_t min_addr, return NULL; } -void *cvmx_bootmem_alloc_named_address(uint64_t size, uint64_t address, - char *name) -{ - return cvmx_bootmem_alloc_named_range(size, address, address + size, - 0, name); -} - void *cvmx_bootmem_alloc_named(uint64_t size, uint64_t alignment, char *name) { return cvmx_bootmem_alloc_named_range(size, 0, 0, alignment, name); } EXPORT_SYMBOL(cvmx_bootmem_alloc_named); -int cvmx_bootmem_free_named(char *name) -{ - return cvmx_bootmem_phy_named_block_free(name, 0); -} - -struct cvmx_bootmem_named_block_desc *cvmx_bootmem_find_named_block(char *name) -{ - return cvmx_bootmem_phy_named_block_find(name, 0); -} -EXPORT_SYMBOL(cvmx_bootmem_find_named_block); - void cvmx_bootmem_lock(void) { cvmx_spinlock_lock((cvmx_spinlock_t *) &(cvmx_bootmem_desc->lock)); @@ -603,7 +557,20 @@ bootmem_free_done: } -struct cvmx_bootmem_named_block_desc * +/** + * Finds a named memory block by name. + * Also used for finding an unused entry in the named block table. + * + * @name: Name of memory block to find. If NULL pointer given, then + * finds unused descriptor, if available. + * + * @flags: Flags to control options for the allocation. + * + * Returns Pointer to memory block descriptor, NULL if not found. + * If NULL returned when name parameter is NULL, then no memory + * block descriptors are available. + */ +static struct cvmx_bootmem_named_block_desc * cvmx_bootmem_phy_named_block_find(char *name, uint32_t flags) { unsigned int i; @@ -655,7 +622,58 @@ struct cvmx_bootmem_named_block_desc * return NULL; } -int cvmx_bootmem_phy_named_block_free(char *name, uint32_t flags) +void *cvmx_bootmem_alloc_named_range_once(uint64_t size, uint64_t min_addr, + uint64_t max_addr, uint64_t align, + char *name, + void (*init) (void *)) +{ + int64_t addr; + void *ptr; + uint64_t named_block_desc_addr; + + named_block_desc_addr = (uint64_t) + cvmx_bootmem_phy_named_block_find(name, + (uint32_t)CVMX_BOOTMEM_FLAG_NO_LOCKING); + + if (named_block_desc_addr) { + addr = CVMX_BOOTMEM_NAMED_GET_FIELD(named_block_desc_addr, + base_addr); + return cvmx_phys_to_ptr(addr); + } + + addr = cvmx_bootmem_phy_named_block_alloc(size, min_addr, max_addr, + align, name, + (uint32_t)CVMX_BOOTMEM_FLAG_NO_LOCKING); + + if (addr < 0) + return NULL; + ptr = cvmx_phys_to_ptr(addr); + + if (init) + init(ptr); + else + memset(ptr, 0, size); + + return ptr; +} +EXPORT_SYMBOL(cvmx_bootmem_alloc_named_range_once); + +struct cvmx_bootmem_named_block_desc *cvmx_bootmem_find_named_block(char *name) +{ + return cvmx_bootmem_phy_named_block_find(name, 0); +} +EXPORT_SYMBOL(cvmx_bootmem_find_named_block); + +/** + * Frees a named block. + * + * @name: name of block to free + * @flags: flags for passing options + * + * Returns 0 on failure + * 1 on success + */ +static int cvmx_bootmem_phy_named_block_free(char *name, uint32_t flags) { struct cvmx_bootmem_named_block_desc *named_block_ptr; @@ -699,6 +717,11 @@ int cvmx_bootmem_phy_named_block_free(char *name, uint32_t flags) return named_block_ptr != NULL; /* 0 on failure, 1 on success */ } +int cvmx_bootmem_free_named(char *name) +{ + return cvmx_bootmem_phy_named_block_free(name, 0); +} + int64_t cvmx_bootmem_phy_named_block_alloc(uint64_t size, uint64_t min_addr, uint64_t max_addr, uint64_t alignment, diff --git a/arch/mips/cavium-octeon/executive/cvmx-cmd-queue.c b/arch/mips/cavium-octeon/executive/cvmx-cmd-queue.c index 8241fc6aa17d..3839feba68f2 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-cmd-queue.c +++ b/arch/mips/cavium-octeon/executive/cvmx-cmd-queue.c @@ -266,7 +266,7 @@ int cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id) } else { union cvmx_pko_mem_debug8 debug8; debug8.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG8); - return debug8.cn58xx.doorbell; + return debug8.cn50xx.doorbell; } case CVMX_CMD_QUEUE_ZIP: case CVMX_CMD_QUEUE_DFA: diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c b/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c index b8898e2b8a6f..e812ed9a03bb 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c +++ b/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c @@ -449,71 +449,3 @@ int __cvmx_helper_rgmii_link_set(int ipd_port, return result; } - -/** - * Configure a port for internal and/or external loopback. Internal loopback - * causes packets sent by the port to be received by Octeon. External loopback - * causes packets received from the wire to sent out again. - * - * @ipd_port: IPD/PKO port to loopback. - * @enable_internal: - * Non zero if you want internal loopback - * @enable_external: - * Non zero if you want external loopback - * - * Returns Zero on success, negative on failure. - */ -int __cvmx_helper_rgmii_configure_loopback(int ipd_port, int enable_internal, - int enable_external) -{ - int interface = cvmx_helper_get_interface_num(ipd_port); - int index = cvmx_helper_get_interface_index_num(ipd_port); - int original_enable; - union cvmx_gmxx_prtx_cfg gmx_cfg; - union cvmx_asxx_prt_loop asxx_prt_loop; - - /* Read the current enable state and save it */ - gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); - original_enable = gmx_cfg.s.en; - /* Force port to be disabled */ - gmx_cfg.s.en = 0; - if (enable_internal) { - /* Force speed if we're doing internal loopback */ - gmx_cfg.s.duplex = 1; - gmx_cfg.s.slottime = 1; - gmx_cfg.s.speed = 1; - cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 1); - cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200); - cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000); - } - cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64); - - /* Set the loopback bits */ - asxx_prt_loop.u64 = cvmx_read_csr(CVMX_ASXX_PRT_LOOP(interface)); - if (enable_internal) - asxx_prt_loop.s.int_loop |= 1 << index; - else - asxx_prt_loop.s.int_loop &= ~(1 << index); - if (enable_external) - asxx_prt_loop.s.ext_loop |= 1 << index; - else - asxx_prt_loop.s.ext_loop &= ~(1 << index); - cvmx_write_csr(CVMX_ASXX_PRT_LOOP(interface), asxx_prt_loop.u64); - - /* Force enables in internal loopback */ - if (enable_internal) { - uint64_t tmp; - tmp = cvmx_read_csr(CVMX_ASXX_TX_PRT_EN(interface)); - cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(interface), - (1 << index) | tmp); - tmp = cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface)); - cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface), - (1 << index) | tmp); - original_enable = 1; - } - - /* Restore the enable state */ - gmx_cfg.s.en = original_enable; - cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64); - return 0; -} diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c b/arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c index a176358c5a21..f6ebf63dc84c 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c +++ b/arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c @@ -513,41 +513,3 @@ int __cvmx_helper_sgmii_link_set(int ipd_port, return __cvmx_helper_sgmii_hardware_init_link_speed(interface, index, link_info); } - -/** - * Configure a port for internal and/or external loopback. Internal - * loopback causes packets sent by the port to be received by - * Octeon. External loopback causes packets received from the wire to - * sent out again. - * - * @ipd_port: IPD/PKO port to loopback. - * @enable_internal: - * Non zero if you want internal loopback - * @enable_external: - * Non zero if you want external loopback - * - * Returns Zero on success, negative on failure. - */ -int __cvmx_helper_sgmii_configure_loopback(int ipd_port, int enable_internal, - int enable_external) -{ - int interface = cvmx_helper_get_interface_num(ipd_port); - int index = cvmx_helper_get_interface_index_num(ipd_port); - union cvmx_pcsx_mrx_control_reg pcsx_mrx_control_reg; - union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg; - - pcsx_mrx_control_reg.u64 = - cvmx_read_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface)); - pcsx_mrx_control_reg.s.loopbck1 = enable_internal; - cvmx_write_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface), - pcsx_mrx_control_reg.u64); - - pcsx_miscx_ctl_reg.u64 = - cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface)); - pcsx_miscx_ctl_reg.s.loopbck2 = enable_external; - cvmx_write_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface), - pcsx_miscx_ctl_reg.u64); - - __cvmx_helper_sgmii_hardware_init_link(interface, index); - return 0; -} diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-util.c b/arch/mips/cavium-octeon/executive/cvmx-helper-util.c index b45b2975746d..53b912745dbd 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-helper-util.c +++ b/arch/mips/cavium-octeon/executive/cvmx-helper-util.c @@ -81,93 +81,6 @@ const char *cvmx_helper_interface_mode_to_string(cvmx_helper_interface_mode_t } /** - * Debug routine to dump the packet structure to the console - * - * @work: Work queue entry containing the packet to dump - * Returns - */ -int cvmx_helper_dump_packet(cvmx_wqe_t *work) -{ - uint64_t count; - uint64_t remaining_bytes; - union cvmx_buf_ptr buffer_ptr; - uint64_t start_of_buffer; - uint8_t *data_address; - uint8_t *end_of_data; - - cvmx_dprintf("Packet Length: %u\n", work->word1.len); - cvmx_dprintf(" Input Port: %u\n", cvmx_wqe_get_port(work)); - cvmx_dprintf(" QoS: %u\n", cvmx_wqe_get_qos(work)); - cvmx_dprintf(" Buffers: %u\n", work->word2.s.bufs); - - if (work->word2.s.bufs == 0) { - union cvmx_ipd_wqe_fpa_queue wqe_pool; - wqe_pool.u64 = cvmx_read_csr(CVMX_IPD_WQE_FPA_QUEUE); - buffer_ptr.u64 = 0; - buffer_ptr.s.pool = wqe_pool.s.wqe_pool; - buffer_ptr.s.size = 128; - buffer_ptr.s.addr = cvmx_ptr_to_phys(work->packet_data); - if (likely(!work->word2.s.not_IP)) { - union cvmx_pip_ip_offset pip_ip_offset; - pip_ip_offset.u64 = cvmx_read_csr(CVMX_PIP_IP_OFFSET); - buffer_ptr.s.addr += - (pip_ip_offset.s.offset << 3) - - work->word2.s.ip_offset; - buffer_ptr.s.addr += (work->word2.s.is_v6 ^ 1) << 2; - } else { - /* - * WARNING: This code assumes that the packet - * is not RAW. If it was, we would use - * PIP_GBL_CFG[RAW_SHF] instead of - * PIP_GBL_CFG[NIP_SHF]. - */ - union cvmx_pip_gbl_cfg pip_gbl_cfg; - pip_gbl_cfg.u64 = cvmx_read_csr(CVMX_PIP_GBL_CFG); - buffer_ptr.s.addr += pip_gbl_cfg.s.nip_shf; - } - } else - buffer_ptr = work->packet_ptr; - remaining_bytes = work->word1.len; - - while (remaining_bytes) { - start_of_buffer = - ((buffer_ptr.s.addr >> 7) - buffer_ptr.s.back) << 7; - cvmx_dprintf(" Buffer Start:%llx\n", - (unsigned long long)start_of_buffer); - cvmx_dprintf(" Buffer I : %u\n", buffer_ptr.s.i); - cvmx_dprintf(" Buffer Back: %u\n", buffer_ptr.s.back); - cvmx_dprintf(" Buffer Pool: %u\n", buffer_ptr.s.pool); - cvmx_dprintf(" Buffer Data: %llx\n", - (unsigned long long)buffer_ptr.s.addr); - cvmx_dprintf(" Buffer Size: %u\n", buffer_ptr.s.size); - - cvmx_dprintf("\t\t"); - data_address = (uint8_t *) cvmx_phys_to_ptr(buffer_ptr.s.addr); - end_of_data = data_address + buffer_ptr.s.size; - count = 0; - while (data_address < end_of_data) { - if (remaining_bytes == 0) - break; - else - remaining_bytes--; - cvmx_dprintf("%02x", (unsigned int)*data_address); - data_address++; - if (remaining_bytes && (count == 7)) { - cvmx_dprintf("\n\t\t"); - count = 0; - } else - count++; - } - cvmx_dprintf("\n"); - - if (remaining_bytes) - buffer_ptr = *(union cvmx_buf_ptr *) - cvmx_phys_to_ptr(buffer_ptr.s.addr - 8); - } - return 0; -} - -/** * Setup Random Early Drop on a specific input queue * * @queue: Input queue to setup RED on (0-7) @@ -179,7 +92,8 @@ int cvmx_helper_dump_packet(cvmx_wqe_t *work) * than this many free packet buffers in FPA 0. * Returns Zero on success. Negative on failure */ -int cvmx_helper_setup_red_queue(int queue, int pass_thresh, int drop_thresh) +static int cvmx_helper_setup_red_queue(int queue, int pass_thresh, + int drop_thresh) { union cvmx_ipd_qosx_red_marks red_marks; union cvmx_ipd_red_quex_param red_param; diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c b/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c index 2bb6912a580d..93a498d05184 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c +++ b/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c @@ -319,42 +319,3 @@ int __cvmx_helper_xaui_link_set(int ipd_port, cvmx_helper_link_info_t link_info) /* Bring the link up */ return __cvmx_helper_xaui_enable(interface); } - -/** - * Configure a port for internal and/or external loopback. Internal loopback - * causes packets sent by the port to be received by Octeon. External loopback - * causes packets received from the wire to sent out again. - * - * @ipd_port: IPD/PKO port to loopback. - * @enable_internal: - * Non zero if you want internal loopback - * @enable_external: - * Non zero if you want external loopback - * - * Returns Zero on success, negative on failure. - */ -extern int __cvmx_helper_xaui_configure_loopback(int ipd_port, - int enable_internal, - int enable_external) -{ - int interface = cvmx_helper_get_interface_num(ipd_port); - union cvmx_pcsxx_control1_reg pcsxx_control1_reg; - union cvmx_gmxx_xaui_ext_loopback gmxx_xaui_ext_loopback; - - /* Set the internal loop */ - pcsxx_control1_reg.u64 = - cvmx_read_csr(CVMX_PCSXX_CONTROL1_REG(interface)); - pcsxx_control1_reg.s.loopbck1 = enable_internal; - cvmx_write_csr(CVMX_PCSXX_CONTROL1_REG(interface), - pcsxx_control1_reg.u64); - - /* Set the external loop */ - gmxx_xaui_ext_loopback.u64 = - cvmx_read_csr(CVMX_GMXX_XAUI_EXT_LOOPBACK(interface)); - gmxx_xaui_ext_loopback.s.en = enable_external; - cvmx_write_csr(CVMX_GMXX_XAUI_EXT_LOOPBACK(interface), - gmxx_xaui_ext_loopback.u64); - - /* Take the link through a reset */ - return __cvmx_helper_xaui_enable(interface); -} diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper.c b/arch/mips/cavium-octeon/executive/cvmx-helper.c index 6c79e8a16a26..a76bbcc30f95 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-helper.c +++ b/arch/mips/cavium-octeon/executive/cvmx-helper.c @@ -46,26 +46,6 @@ #include <asm/octeon/cvmx-smix-defs.h> #include <asm/octeon/cvmx-asxx-defs.h> -/** - * cvmx_override_pko_queue_priority(int ipd_port, uint64_t - * priorities[16]) is a function pointer. It is meant to allow - * customization of the PKO queue priorities based on the port - * number. Users should set this pointer to a function before - * calling any cvmx-helper operations. - */ -void (*cvmx_override_pko_queue_priority) (int pko_port, - uint64_t priorities[16]); - -/** - * cvmx_override_ipd_port_setup(int ipd_port) is a function - * pointer. It is meant to allow customization of the IPD port - * setup before packet input/output comes online. It is called - * after cvmx-helper does the default IPD configuration, but - * before IPD is enabled. Users should set this pointer to a - * function before calling any cvmx-helper operations. - */ -void (*cvmx_override_ipd_port_setup) (int ipd_port); - /* Port count per interface */ static int interface_port_count[9]; @@ -238,7 +218,7 @@ static cvmx_helper_interface_mode_t __cvmx_get_mode_octeon2(int interface) mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface)); if (OCTEON_IS_MODEL(OCTEON_CN63XX)) { - switch (mode.cn63xx.mode) { + switch (mode.cn61xx.mode) { case 0: return CVMX_HELPER_INTERFACE_MODE_SGMII; case 1: @@ -362,7 +342,7 @@ cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int interface) mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface)); if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX)) { - switch (mode.cn56xx.mode) { + switch (mode.cn52xx.mode) { case 0: return CVMX_HELPER_INTERFACE_MODE_DISABLED; case 1: @@ -436,10 +416,6 @@ static int __cvmx_helper_port_setup_ipd(int ipd_port) cvmx_pip_config_port(ipd_port, port_config, tag_config); - /* Give the user a chance to override our setting for each port */ - if (cvmx_override_ipd_port_setup) - cvmx_override_ipd_port_setup(ipd_port); - return 0; } @@ -663,13 +639,6 @@ static int __cvmx_helper_interface_setup_pko(int interface) int ipd_port = cvmx_helper_get_ipd_port(interface, 0); int num_ports = interface_port_count[interface]; while (num_ports--) { - /* - * Give the user a chance to override the per queue - * priorities. - */ - if (cvmx_override_pko_queue_priority) - cvmx_override_pko_queue_priority(ipd_port, priorities); - cvmx_pko_config_port(ipd_port, cvmx_pko_get_base_queue_per_core(ipd_port, 0), @@ -818,7 +787,7 @@ static int __cvmx_helper_packet_hardware_enable(int interface) * Returns 0 on success * !0 on failure */ -int __cvmx_helper_errata_fix_ipd_ptr_alignment(void) +static int __cvmx_helper_errata_fix_ipd_ptr_alignment(void) { #define FIX_IPD_FIRST_BUFF_PAYLOAD_BYTES \ (CVMX_FPA_PACKET_POOL_SIZE-8-CVMX_HELPER_FIRST_MBUFF_SKIP) @@ -1239,57 +1208,3 @@ int cvmx_helper_link_set(int ipd_port, cvmx_helper_link_info_t link_info) return result; } EXPORT_SYMBOL_GPL(cvmx_helper_link_set); - -/** - * Configure a port for internal and/or external loopback. Internal loopback - * causes packets sent by the port to be received by Octeon. External loopback - * causes packets received from the wire to sent out again. - * - * @ipd_port: IPD/PKO port to loopback. - * @enable_internal: - * Non zero if you want internal loopback - * @enable_external: - * Non zero if you want external loopback - * - * Returns Zero on success, negative on failure. - */ -int cvmx_helper_configure_loopback(int ipd_port, int enable_internal, - int enable_external) -{ - int result = -1; - int interface = cvmx_helper_get_interface_num(ipd_port); - int index = cvmx_helper_get_interface_index_num(ipd_port); - - if (index >= cvmx_helper_ports_on_interface(interface)) - return -1; - - switch (cvmx_helper_interface_get_mode(interface)) { - case CVMX_HELPER_INTERFACE_MODE_DISABLED: - case CVMX_HELPER_INTERFACE_MODE_PCIE: - case CVMX_HELPER_INTERFACE_MODE_SPI: - case CVMX_HELPER_INTERFACE_MODE_NPI: - case CVMX_HELPER_INTERFACE_MODE_LOOP: - break; - case CVMX_HELPER_INTERFACE_MODE_XAUI: - result = - __cvmx_helper_xaui_configure_loopback(ipd_port, - enable_internal, - enable_external); - break; - case CVMX_HELPER_INTERFACE_MODE_RGMII: - case CVMX_HELPER_INTERFACE_MODE_GMII: - result = - __cvmx_helper_rgmii_configure_loopback(ipd_port, - enable_internal, - enable_external); - break; - case CVMX_HELPER_INTERFACE_MODE_SGMII: - case CVMX_HELPER_INTERFACE_MODE_PICMG: - result = - __cvmx_helper_sgmii_configure_loopback(ipd_port, - enable_internal, - enable_external); - break; - } - return result; -} diff --git a/arch/mips/cavium-octeon/executive/cvmx-interrupt-rsl.c b/arch/mips/cavium-octeon/executive/cvmx-interrupt-rsl.c index fa327ec891cd..d23f46736dd6 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-interrupt-rsl.c +++ b/arch/mips/cavium-octeon/executive/cvmx-interrupt-rsl.c @@ -84,7 +84,7 @@ void __cvmx_interrupt_gmxx_enable(int interface) if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX)) { if (mode.s.en) { - switch (mode.cn56xx.mode) { + switch (mode.cn52xx.mode) { case 1: /* XAUI */ num_ports = 1; break; diff --git a/arch/mips/cavium-octeon/executive/cvmx-l2c.c b/arch/mips/cavium-octeon/executive/cvmx-l2c.c index f091c9b70603..83df0a963a8b 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-l2c.c +++ b/arch/mips/cavium-octeon/executive/cvmx-l2c.c @@ -44,7 +44,7 @@ * if multiple applications or operating systems are running, then it * is up to the user program to coordinate between them. */ -cvmx_spinlock_t cvmx_l2c_spinlock; +static cvmx_spinlock_t cvmx_l2c_spinlock; int cvmx_l2c_get_core_way_partition(uint32_t core) { diff --git a/arch/mips/cavium-octeon/executive/octeon-model.c b/arch/mips/cavium-octeon/executive/octeon-model.c index 341052387b49..657dbad9644e 100644 --- a/arch/mips/cavium-octeon/executive/octeon-model.c +++ b/arch/mips/cavium-octeon/executive/octeon-model.c @@ -305,7 +305,7 @@ static const char *__init octeon_model_get_string_buffer(uint32_t chip_id, if (fus_dat3.s.nozip) suffix = "SCP"; - if (fus_dat3.cn56xx.bar2_en) + if (fus_dat3.cn38xx.bar2_en) suffix = "NSPB2"; } if (l2d_fus3) @@ -344,7 +344,7 @@ static const char *__init octeon_model_get_string_buffer(uint32_t chip_id, suffix = "CP"; else if (fus_dat2.cn63xx.dorm_crypto) suffix = "DAP"; - else if (fus_dat3.cn63xx.nozip) + else if (fus_dat3.cn61xx.nozip) suffix = "SCP"; else suffix = "AAP"; @@ -359,18 +359,18 @@ static const char *__init octeon_model_get_string_buffer(uint32_t chip_id, suffix = "CP"; else if (fus_dat2.cn66xx.dorm_crypto) suffix = "DAP"; - else if (fus_dat3.cn66xx.nozip) + else if (fus_dat3.cn61xx.nozip) suffix = "SCP"; else suffix = "AAP"; break; case 0x91: /* CN68XX */ family = "68"; - if (fus_dat2.cn68xx.nocrypto && fus_dat3.cn68xx.nozip) + if (fus_dat2.cn68xx.nocrypto && fus_dat3.cn61xx.nozip) suffix = "CP"; else if (fus_dat2.cn68xx.dorm_crypto) suffix = "DAP"; - else if (fus_dat3.cn68xx.nozip) + else if (fus_dat3.cn61xx.nozip) suffix = "SCP"; else if (fus_dat2.cn68xx.nocrypto) suffix = "SP"; @@ -379,7 +379,7 @@ static const char *__init octeon_model_get_string_buffer(uint32_t chip_id, break; case 0x94: /* CNF71XX */ family = "F71"; - if (fus_dat3.cnf71xx.nozip) + if (fus_dat3.cn61xx.nozip) suffix = "SCP"; else suffix = "AAP"; diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c index cc1d8525e651..f97be32bf699 100644 --- a/arch/mips/cavium-octeon/octeon-irq.c +++ b/arch/mips/cavium-octeon/octeon-irq.c @@ -2483,8 +2483,8 @@ void octeon_irq_ciu3_mask_ack(struct irq_data *data) } #ifdef CONFIG_SMP -int octeon_irq_ciu3_set_affinity(struct irq_data *data, - const struct cpumask *dest, bool force) +static int octeon_irq_ciu3_set_affinity(struct irq_data *data, + const struct cpumask *dest, bool force) { union cvmx_ciu3_iscx_ctl isc_ctl; union cvmx_ciu3_iscx_w1c isc_w1c; diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c index 807cadaf554e..1f9ba60f7375 100644 --- a/arch/mips/cavium-octeon/octeon-platform.c +++ b/arch/mips/cavium-octeon/octeon-platform.c @@ -440,7 +440,7 @@ out: } device_initcall(octeon_rng_device_init); -const struct of_device_id octeon_ids[] __initconst = { +static const struct of_device_id octeon_ids[] __initconst = { { .compatible = "simple-bus", }, { .compatible = "cavium,octeon-6335-uctl", }, { .compatible = "cavium,octeon-5750-usbn", }, @@ -501,7 +501,7 @@ static void __init octeon_fdt_set_phy(int eth, int phy_addr) if (phy_addr >= 256 && alt_phy > 0) { const struct fdt_property *phy_prop; struct fdt_property *alt_prop; - u32 phy_handle_name; + fdt32_t phy_handle_name; /* Use the alt phy node instead.*/ phy_prop = fdt_get_property(initial_boot_params, eth, "phy-handle", NULL); diff --git a/arch/mips/cavium-octeon/octeon-usb.c b/arch/mips/cavium-octeon/octeon-usb.c index bfdfaf32d2c4..1f730ded5224 100644 --- a/arch/mips/cavium-octeon/octeon-usb.c +++ b/arch/mips/cavium-octeon/octeon-usb.c @@ -253,17 +253,17 @@ static int dwc3_octeon_config_power(struct device *dev, u64 base) && gpio <= 31) { gpio_bit.u64 = cvmx_read_csr(CVMX_GPIO_BIT_CFGX(gpio)); gpio_bit.s.tx_oe = 1; - gpio_bit.cn73xx.output_sel = (index == 0 ? 0x14 : 0x15); + gpio_bit.s.output_sel = (index == 0 ? 0x14 : 0x15); cvmx_write_csr(CVMX_GPIO_BIT_CFGX(gpio), gpio_bit.u64); } else if (gpio <= 15) { gpio_bit.u64 = cvmx_read_csr(CVMX_GPIO_BIT_CFGX(gpio)); gpio_bit.s.tx_oe = 1; - gpio_bit.cn70xx.output_sel = (index == 0 ? 0x14 : 0x19); + gpio_bit.s.output_sel = (index == 0 ? 0x14 : 0x19); cvmx_write_csr(CVMX_GPIO_BIT_CFGX(gpio), gpio_bit.u64); } else { gpio_bit.u64 = cvmx_read_csr(CVMX_GPIO_XBIT_CFGX(gpio)); gpio_bit.s.tx_oe = 1; - gpio_bit.cn70xx.output_sel = (index == 0 ? 0x14 : 0x19); + gpio_bit.s.output_sel = (index == 0 ? 0x14 : 0x19); cvmx_write_csr(CVMX_GPIO_XBIT_CFGX(gpio), gpio_bit.u64); } diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index dfb95cffef3e..2c79ab52977a 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c @@ -36,7 +36,9 @@ #include <asm/mipsregs.h> #include <asm/bootinfo.h> #include <asm/sections.h> +#include <asm/fw/fw.h> #include <asm/setup.h> +#include <asm/prom.h> #include <asm/time.h> #include <asm/octeon/octeon.h> @@ -72,7 +74,7 @@ static unsigned long long reserve_low_mem; DEFINE_SEMAPHORE(octeon_bootbus_sem); EXPORT_SYMBOL(octeon_bootbus_sem); -struct octeon_boot_descriptor *octeon_boot_desc_ptr; +static struct octeon_boot_descriptor *octeon_boot_desc_ptr; struct cvmx_bootinfo *octeon_bootinfo; EXPORT_SYMBOL(octeon_bootinfo); @@ -351,7 +353,7 @@ EXPORT_SYMBOL(octeon_get_io_clock_rate); * * @s: String to write */ -void octeon_write_lcd(const char *s) +static void octeon_write_lcd(const char *s) { if (octeon_bootinfo->led_display_base_addr) { void __iomem *lcd_address = @@ -373,7 +375,7 @@ void octeon_write_lcd(const char *s) * * Returns uart (0 or 1) */ -int octeon_get_boot_uart(void) +static int octeon_get_boot_uart(void) { return (octeon_boot_desc_ptr->flags & OCTEON_BL_FLAG_CONSOLE_UART1) ? 1 : 0; diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c index 39f2a2ec1286..076db9a06b5e 100644 --- a/arch/mips/cavium-octeon/smp.c +++ b/arch/mips/cavium-octeon/smp.c @@ -284,7 +284,7 @@ static void octeon_smp_finish(void) #ifdef CONFIG_HOTPLUG_CPU /* State of each CPU. */ -DEFINE_PER_CPU(int, cpu_state); +static DEFINE_PER_CPU(int, cpu_state); static int octeon_cpu_disable(void) { @@ -413,7 +413,7 @@ late_initcall(register_cavium_notifier); #endif /* CONFIG_HOTPLUG_CPU */ -const struct plat_smp_ops octeon_smp_ops = { +static const struct plat_smp_ops octeon_smp_ops = { .send_ipi_single = octeon_send_ipi_single, .send_ipi_mask = octeon_send_ipi_mask, .init_secondary = octeon_init_secondary, diff --git a/arch/mips/configs/ar7_defconfig b/arch/mips/configs/ar7_defconfig index 5651f4d8f45c..9fbfb6e5c7d2 100644 --- a/arch/mips/configs/ar7_defconfig +++ b/arch/mips/configs/ar7_defconfig @@ -1,29 +1,27 @@ -CONFIG_AR7=y -CONFIG_HIGH_RES_TIMERS=y -CONFIG_HZ_100=y -CONFIG_KEXEC=y -# CONFIG_SECCOMP is not set # CONFIG_LOCALVERSION_AUTO is not set CONFIG_KERNEL_LZMA=y CONFIG_SYSVIPC=y +CONFIG_HIGH_RES_TIMERS=y CONFIG_BSD_PROCESS_ACCT=y -CONFIG_TINY_RCU=y CONFIG_LOG_BUF_SHIFT=14 -CONFIG_SYSFS_DEPRECATED_V2=y CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y -CONFIG_RD_LZMA=y CONFIG_EXPERT=y -# CONFIG_KALLSYMS is not set # CONFIG_ELF_CORE is not set -# CONFIG_PCSPKR_PLATFORM is not set +# CONFIG_KALLSYMS is not set # CONFIG_VM_EVENT_COUNTERS is not set # CONFIG_COMPAT_BRK is not set CONFIG_SLAB=y +CONFIG_AR7=y +CONFIG_HZ_100=y +CONFIG_KEXEC=y +# CONFIG_SECCOMP is not set CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_LBDAF is not set # CONFIG_BLK_DEV_BSG is not set +CONFIG_PARTITION_ADVANCED=y +CONFIG_BSD_DISKLABEL=y # CONFIG_IOSCHED_CFQ is not set CONFIG_NET=y CONFIG_PACKET=y @@ -35,7 +33,6 @@ CONFIG_IP_MULTIPLE_TABLES=y CONFIG_IP_ROUTE_MULTIPATH=y CONFIG_IP_ROUTE_VERBOSE=y CONFIG_IP_MROUTE=y -CONFIG_ARPD=y CONFIG_SYN_COOKIES=y # CONFIG_INET_XFRM_MODE_TRANSPORT is not set # CONFIG_INET_XFRM_MODE_TUNNEL is not set @@ -59,13 +56,9 @@ CONFIG_NETFILTER_XT_MATCH_LIMIT=m CONFIG_NETFILTER_XT_MATCH_MAC=m CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m CONFIG_NETFILTER_XT_MATCH_STATE=m -CONFIG_NF_CONNTRACK_IPV4=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m -CONFIG_IP_NF_TARGET_LOG=m -CONFIG_NF_NAT=m -CONFIG_IP_NF_TARGET_MASQUERADE=m CONFIG_IP_NF_MANGLE=m CONFIG_IP_NF_RAW=m CONFIG_ATM=m @@ -79,8 +72,6 @@ CONFIG_NET_ACT_POLICE=y CONFIG_HAMRADIO=y CONFIG_CFG80211=m CONFIG_MAC80211=m -CONFIG_MAC80211_RC_PID=y -CONFIG_MAC80211_RC_DEFAULT_PID=y CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_MTD=y CONFIG_MTD_BLOCK=y @@ -91,25 +82,22 @@ CONFIG_MTD_CFI_STAA=y CONFIG_MTD_COMPLEX_MAPPINGS=y CONFIG_MTD_PHYSMAP=y CONFIG_NETDEVICES=y -CONFIG_FIXED_PHY=y -CONFIG_NET_ETHERNET=y -CONFIG_MII=y CONFIG_CPMAC=y +CONFIG_FIXED_PHY=y CONFIG_PPP=m -CONFIG_PPP_MULTILINK=y CONFIG_PPP_FILTER=y -CONFIG_PPP_ASYNC=m -CONFIG_PPPOE=m +CONFIG_PPP_MULTILINK=y CONFIG_PPPOATM=m +CONFIG_PPPOE=m +CONFIG_PPP_ASYNC=m # CONFIG_INPUT is not set # CONFIG_SERIO is not set # CONFIG_VT is not set -# CONFIG_DEVKMEM is not set +# CONFIG_LEGACY_PTYS is not set CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_NR_UARTS=2 CONFIG_SERIAL_8250_RUNTIME_UARTS=2 -# CONFIG_LEGACY_PTYS is not set CONFIG_HW_RANDOM=y CONFIG_GPIO_SYSFS=y # CONFIG_HWMON is not set @@ -131,13 +119,9 @@ CONFIG_JFFS2_FS=y CONFIG_JFFS2_SUMMARY=y CONFIG_JFFS2_COMPRESSION_OPTIONS=y CONFIG_SQUASHFS=y -CONFIG_PARTITION_ADVANCED=y -CONFIG_BSD_DISKLABEL=y +# CONFIG_CRYPTO_HW is not set # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_STRIP_ASM_SYMS=y CONFIG_DEBUG_FS=y CONFIG_CMDLINE_BOOL=y CONFIG_CMDLINE="rootfstype=squashfs,jffs2" -CONFIG_CRYPTO=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set -# CONFIG_CRYPTO_HW is not set diff --git a/arch/mips/configs/ath25_defconfig b/arch/mips/configs/ath25_defconfig index b8d48038e74f..5dd6b1939e9c 100644 --- a/arch/mips/configs/ath25_defconfig +++ b/arch/mips/configs/ath25_defconfig @@ -1,11 +1,6 @@ -CONFIG_ATH25=y -# CONFIG_COMPACTION is not set -CONFIG_HZ_100=y -# CONFIG_SECCOMP is not set # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y # CONFIG_CROSS_MEMORY_ATTACH is not set -# CONFIG_FHANDLE is not set CONFIG_HIGH_RES_TIMERS=y CONFIG_BLK_DEV_INITRD=y # CONFIG_RD_GZIP is not set @@ -14,16 +9,21 @@ CONFIG_BLK_DEV_INITRD=y # CONFIG_RD_LZO is not set # CONFIG_RD_LZ4 is not set CONFIG_CC_OPTIMIZE_FOR_SIZE=y +# CONFIG_FHANDLE is not set # CONFIG_AIO is not set CONFIG_EMBEDDED=y # CONFIG_VM_EVENT_COUNTERS is not set # CONFIG_SLUB_DEBUG is not set # CONFIG_COMPAT_BRK is not set +CONFIG_ATH25=y +CONFIG_HZ_100=y +# CONFIG_SECCOMP is not set +# CONFIG_SUSPEND is not set CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set # CONFIG_IOSCHED_CFQ is not set -# CONFIG_SUSPEND is not set +# CONFIG_COMPACTION is not set CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -75,7 +75,6 @@ CONFIG_INPUT=m # CONFIG_SERIO is not set # CONFIG_VT is not set # CONFIG_LEGACY_PTYS is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y # CONFIG_SERIAL_8250_PCI is not set @@ -104,15 +103,15 @@ CONFIG_SQUASHFS_FILE_DIRECT=y CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU=y # CONFIG_SQUASHFS_ZLIB is not set CONFIG_SQUASHFS_XZ=y -CONFIG_PRINTK_TIME=y -# CONFIG_ENABLE_MUST_CHECK is not set -CONFIG_STRIP_ASM_SYMS=y -CONFIG_DEBUG_FS=y -# CONFIG_SCHED_DEBUG is not set -# CONFIG_FTRACE is not set # CONFIG_XZ_DEC_X86 is not set # CONFIG_XZ_DEC_POWERPC is not set # CONFIG_XZ_DEC_IA64 is not set # CONFIG_XZ_DEC_ARM is not set # CONFIG_XZ_DEC_ARMTHUMB is not set # CONFIG_XZ_DEC_SPARC is not set +CONFIG_PRINTK_TIME=y +# CONFIG_ENABLE_MUST_CHECK is not set +CONFIG_STRIP_ASM_SYMS=y +CONFIG_DEBUG_FS=y +# CONFIG_SCHED_DEBUG is not set +# CONFIG_FTRACE is not set diff --git a/arch/mips/configs/ath79_defconfig b/arch/mips/configs/ath79_defconfig index 951c4231bdb8..4e4ec779f182 100644 --- a/arch/mips/configs/ath79_defconfig +++ b/arch/mips/configs/ath79_defconfig @@ -1,30 +1,29 @@ -CONFIG_ATH79=y -CONFIG_ATH79_MACH_AP121=y -CONFIG_ATH79_MACH_AP136=y -CONFIG_ATH79_MACH_AP81=y -CONFIG_ATH79_MACH_DB120=y -CONFIG_ATH79_MACH_PB44=y -CONFIG_ATH79_MACH_UBNT_XM=y -CONFIG_HZ_100=y -# CONFIG_SECCOMP is not set # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y CONFIG_HIGH_RES_TIMERS=y CONFIG_BLK_DEV_INITRD=y # CONFIG_RD_GZIP is not set -CONFIG_RD_LZMA=y -# CONFIG_KALLSYMS is not set # CONFIG_AIO is not set +# CONFIG_KALLSYMS is not set CONFIG_EMBEDDED=y # CONFIG_VM_EVENT_COUNTERS is not set # CONFIG_SLUB_DEBUG is not set # CONFIG_COMPAT_BRK is not set +CONFIG_ATH79=y +CONFIG_ATH79_MACH_AP121=y +CONFIG_ATH79_MACH_AP136=y +CONFIG_ATH79_MACH_AP81=y +CONFIG_ATH79_MACH_DB120=y +CONFIG_ATH79_MACH_PB44=y +CONFIG_ATH79_MACH_UBNT_XM=y +CONFIG_HZ_100=y +# CONFIG_SECCOMP is not set +CONFIG_PCI=y +# CONFIG_SUSPEND is not set CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set # CONFIG_IOSCHED_CFQ is not set -CONFIG_PCI=y -# CONFIG_SUSPEND is not set CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -52,12 +51,9 @@ CONFIG_MTD_PHYSMAP=y CONFIG_MTD_M25P80=y CONFIG_MTD_SPI_NOR=y CONFIG_NETDEVICES=y -# CONFIG_NET_PACKET_ENGINE is not set -CONFIG_ATH_COMMON=m CONFIG_ATH9K=m CONFIG_ATH9K_AHB=y CONFIG_INPUT=m -# CONFIG_INPUT_MOUSEDEV is not set # CONFIG_KEYBOARD_ATKBD is not set CONFIG_KEYBOARD_GPIO_POLLED=m # CONFIG_INPUT_MOUSE is not set @@ -65,7 +61,6 @@ CONFIG_INPUT_MISC=y # CONFIG_SERIO is not set # CONFIG_VT is not set # CONFIG_LEGACY_PTYS is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y # CONFIG_SERIAL_8250_PCI is not set @@ -98,11 +93,9 @@ CONFIG_LEDS_GPIO=y # CONFIG_IOMMU_SUPPORT is not set # CONFIG_DNOTIFY is not set # CONFIG_PROC_PAGE_MONITOR is not set +CONFIG_CRC_ITU_T=m # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_STRIP_ASM_SYMS=y CONFIG_DEBUG_FS=y # CONFIG_SCHED_DEBUG is not set # CONFIG_FTRACE is not set -CONFIG_CRYPTO=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRC_ITU_T=m diff --git a/arch/mips/configs/bcm47xx_defconfig b/arch/mips/configs/bcm47xx_defconfig index ba800a892384..249f5285e343 100644 --- a/arch/mips/configs/bcm47xx_defconfig +++ b/arch/mips/configs/bcm47xx_defconfig @@ -1,16 +1,15 @@ -CONFIG_BCM47XX=y CONFIG_SYSVIPC=y CONFIG_HIGH_RES_TIMERS=y -CONFIG_UIDGID_STRICT_TYPE_CHECKS=y CONFIG_BLK_DEV_INITRD=y CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_EMBEDDED=y CONFIG_SLAB=y +CONFIG_BCM47XX=y +CONFIG_PCI=y +# CONFIG_SUSPEND is not set CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_PARTITION_ADVANCED=y -CONFIG_PCI=y -# CONFIG_SUSPEND is not set CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -47,8 +46,6 @@ CONFIG_MTD_NAND_BCM47XXNFLASH=y CONFIG_NETDEVICES=y CONFIG_B44=y CONFIG_TIGON3=y -CONFIG_BGMAC=y -CONFIG_ATH_CARDS=y CONFIG_ATH5K=y CONFIG_B43=y CONFIG_B43LEGACY=y @@ -73,6 +70,7 @@ CONFIG_USB_HCD_BCMA=y CONFIG_USB_HCD_SSB=y CONFIG_LEDS_TRIGGER_TIMER=y CONFIG_LEDS_TRIGGER_DEFAULT_ON=y +CONFIG_CRC32_SARWATE=y CONFIG_PRINTK_TIME=y CONFIG_DEBUG_INFO=y CONFIG_DEBUG_INFO_REDUCED=y @@ -81,4 +79,3 @@ CONFIG_DEBUG_FS=y CONFIG_MAGIC_SYSRQ=y CONFIG_CMDLINE_BOOL=y CONFIG_CMDLINE="console=ttyS0,115200" -CONFIG_CRC32_SARWATE=y diff --git a/arch/mips/configs/bcm63xx_defconfig b/arch/mips/configs/bcm63xx_defconfig index 131b350f014f..d22fe62adad3 100644 --- a/arch/mips/configs/bcm63xx_defconfig +++ b/arch/mips/configs/bcm63xx_defconfig @@ -1,16 +1,7 @@ -CONFIG_BCM63XX=y -CONFIG_BCM63XX_CPU_6338=y -CONFIG_BCM63XX_CPU_6345=y -CONFIG_BCM63XX_CPU_6348=y -CONFIG_BCM63XX_CPU_6358=y -CONFIG_NO_HZ=y -# CONFIG_SECCOMP is not set # CONFIG_LOCALVERSION_AUTO is not set # CONFIG_SWAP is not set -CONFIG_TINY_RCU=y -CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_NO_HZ=y CONFIG_EXPERT=y -# CONFIG_PCSPKR_PLATFORM is not set # CONFIG_FUTEX is not set # CONFIG_EPOLL is not set # CONFIG_SIGNALFD is not set @@ -20,12 +11,18 @@ CONFIG_EXPERT=y # CONFIG_AIO is not set # CONFIG_VM_EVENT_COUNTERS is not set # CONFIG_SLUB_DEBUG is not set -# CONFIG_BLK_DEV_BSG is not set -# CONFIG_IOSCHED_DEADLINE is not set -# CONFIG_IOSCHED_CFQ is not set +CONFIG_BCM63XX=y +CONFIG_BCM63XX_CPU_6338=y +CONFIG_BCM63XX_CPU_6345=y +CONFIG_BCM63XX_CPU_6348=y +CONFIG_BCM63XX_CPU_6358=y +# CONFIG_SECCOMP is not set CONFIG_PCI=y CONFIG_PCCARD=y CONFIG_PCMCIA_BCM63XX=y +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_IOSCHED_DEADLINE is not set +# CONFIG_IOSCHED_CFQ is not set CONFIG_NET=y CONFIG_UNIX=y CONFIG_INET=y @@ -37,7 +34,6 @@ CONFIG_INET=y CONFIG_CFG80211=y CONFIG_NL80211_TESTMODE=y CONFIG_MAC80211=y -CONFIG_MAC80211_LEDS=y CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" # CONFIG_STANDALONE is not set # CONFIG_PREVENT_FIRMWARE_BUILD is not set @@ -49,18 +45,16 @@ CONFIG_MTD_CFI_AMDSTD=y CONFIG_MTD_PHYSMAP=y # CONFIG_BLK_DEV is not set CONFIG_NETDEVICES=y -CONFIG_BCM63XX_PHY=y -CONFIG_NET_ETHERNET=y CONFIG_BCM63XX_ENET=y +CONFIG_BCM63XX_PHY=y CONFIG_B43=y # CONFIG_B43_PHY_LP is not set # CONFIG_INPUT is not set # CONFIG_SERIO is not set # CONFIG_VT is not set -# CONFIG_DEVKMEM is not set +# CONFIG_UNIX98_PTYS is not set CONFIG_SERIAL_BCM63XX=y CONFIG_SERIAL_BCM63XX_CONSOLE=y -# CONFIG_UNIX98_PTYS is not set # CONFIG_HW_RANDOM is not set # CONFIG_HWMON is not set # CONFIG_VGA_ARB is not set @@ -68,16 +62,11 @@ CONFIG_USB=y CONFIG_USB_EHCI_HCD=y # CONFIG_USB_EHCI_TT_NEWSCHED is not set CONFIG_USB_OHCI_HCD=y -CONFIG_LEDS_CLASS=y -CONFIG_LEDS_GPIO=y -CONFIG_LEDS_TRIGGER_TIMER=y -CONFIG_LEDS_TRIGGER_GPIO=y -CONFIG_LEDS_TRIGGER_DEFAULT_ON=y # CONFIG_FILE_LOCKING is not set # CONFIG_DNOTIFY is not set CONFIG_PROC_KCORE=y # CONFIG_NETWORK_FILESYSTEMS is not set +# CONFIG_CRYPTO_HW is not set CONFIG_MAGIC_SYSRQ=y CONFIG_CMDLINE_BOOL=y CONFIG_CMDLINE="console=ttyS0,115200" -# CONFIG_CRYPTO_HW is not set diff --git a/arch/mips/configs/bigsur_defconfig b/arch/mips/configs/bigsur_defconfig index 5e73fe755be6..597bc0aa2653 100644 --- a/arch/mips/configs/bigsur_defconfig +++ b/arch/mips/configs/bigsur_defconfig @@ -1,45 +1,37 @@ -CONFIG_SIBYTE_BIGSUR=y -CONFIG_64BIT=y -CONFIG_SMP=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y -CONFIG_HZ_1000=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y +CONFIG_AUDIT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_TASKSTATS=y CONFIG_TASK_DELAY_ACCT=y CONFIG_TASK_XACCT=y CONFIG_TASK_IO_ACCOUNTING=y -CONFIG_AUDIT=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=16 -CONFIG_RELAY=y CONFIG_NAMESPACES=y -CONFIG_UTS_NS=y -CONFIG_IPC_NS=y CONFIG_USER_NS=y -CONFIG_PID_NS=y -CONFIG_NET_NS=y +CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_EXPERT=y -# CONFIG_SYSCTL_SYSCALL is not set -# CONFIG_PCSPKR_PLATFORM is not set CONFIG_SLAB=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -CONFIG_MODVERSIONS=y -CONFIG_MODULE_SRCVERSION_ALL=y -# CONFIG_BLK_DEV_BSG is not set +CONFIG_SIBYTE_BIGSUR=y +CONFIG_64BIT=y +CONFIG_SMP=y +CONFIG_HZ_1000=y CONFIG_PCI=y CONFIG_PCI_DEBUG=y -CONFIG_MIPS32_COMPAT=y CONFIG_MIPS32_O32=y CONFIG_MIPS32_N32=y CONFIG_PM=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +# CONFIG_BLK_DEV_BSG is not set CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -94,7 +86,6 @@ CONFIG_IP_VS_SED=m CONFIG_IP_VS_NQ=m CONFIG_IP_VS_FTP=m CONFIG_IP_DCCP=m -CONFIG_SCTP_HMAC_SHA1=y CONFIG_BRIDGE=m CONFIG_VLAN_8021Q=m CONFIG_VLAN_8021Q_GVRP=y @@ -134,20 +125,18 @@ CONFIG_PATA_SIL680=y CONFIG_ATA_GENERIC=y CONFIG_PATA_LEGACY=y CONFIG_NETDEVICES=y -CONFIG_NET_ETHERNET=y -CONFIG_MII=y CONFIG_SB1250_MAC=y CONFIG_CHELSIO_T3=m CONFIG_NETXEN_NIC=m CONFIG_PPP=m -CONFIG_PPP_MULTILINK=y -CONFIG_PPP_FILTER=y -CONFIG_PPP_ASYNC=m -CONFIG_PPP_SYNC_TTY=m -CONFIG_PPP_DEFLATE=m CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y CONFIG_PPPOE=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m CONFIG_SLIP=m CONFIG_SLIP_COMPRESSED=y CONFIG_SLIP_SMART=y @@ -168,13 +157,10 @@ CONFIG_EXT2_FS=m CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y CONFIG_EXT2_FS_SECURITY=y -CONFIG_EXT2_FS_XIP=y CONFIG_EXT3_FS=m CONFIG_EXT3_FS_POSIX_ACL=y CONFIG_EXT3_FS_SECURITY=y CONFIG_EXT4_FS=y -CONFIG_EXT4_FS_POSIX_ACL=y -CONFIG_EXT4_FS_SECURITY=y CONFIG_QUOTA=y CONFIG_QUOTA_NETLINK_INTERFACE=y # CONFIG_PRINT_QUOTA_WARNING is not set @@ -192,10 +178,7 @@ CONFIG_NTFS_RW=y CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_NFS_FS=y -CONFIG_NFS_V3=y CONFIG_ROOT_NFS=y -CONFIG_RPCSEC_GSS_KRB5=m -CONFIG_RPCSEC_GSS_SPKM3=m CONFIG_NLS_CODEPAGE_437=m CONFIG_NLS_CODEPAGE_737=m CONFIG_NLS_CODEPAGE_775=m @@ -234,13 +217,6 @@ CONFIG_NLS_ISO8859_15=m CONFIG_NLS_KOI8_R=m CONFIG_NLS_KOI8_U=m CONFIG_NLS_UTF8=m -CONFIG_DLM=m -CONFIG_MAGIC_SYSRQ=y -CONFIG_DEBUG_KERNEL=y -CONFIG_DETECT_HUNG_TASK=y -CONFIG_DEBUG_SPINLOCK_SLEEP=y -CONFIG_DEBUG_MEMORY_INIT=y -CONFIG_DEBUG_LIST=y CONFIG_KEYS=y CONFIG_SECURITY=y CONFIG_SECURITY_NETWORK=y @@ -265,7 +241,6 @@ CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD160=m CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m -CONFIG_CRYPTO_SHA256=m CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m @@ -283,3 +258,7 @@ CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m CONFIG_CRC_T10DIF=m CONFIG_CRC7=m +CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_MEMORY_INIT=y +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEBUG_LIST=y diff --git a/arch/mips/configs/bmips_be_defconfig b/arch/mips/configs/bmips_be_defconfig index a7072a14d396..8a91f0101134 100644 --- a/arch/mips/configs/bmips_be_defconfig +++ b/arch/mips/configs/bmips_be_defconfig @@ -1,17 +1,16 @@ -CONFIG_BMIPS_GENERIC=y -CONFIG_HIGHMEM=y -CONFIG_SMP=y -CONFIG_NR_CPUS=4 -# CONFIG_SECCOMP is not set -CONFIG_MIPS_O32_FP64_SUPPORT=y # CONFIG_LOCALVERSION_AUTO is not set # CONFIG_SWAP is not set CONFIG_NO_HZ=y CONFIG_BLK_DEV_INITRD=y -CONFIG_RD_GZIP=y CONFIG_EXPERT=y # CONFIG_VM_EVENT_COUNTERS is not set # CONFIG_SLUB_DEBUG is not set +CONFIG_BMIPS_GENERIC=y +CONFIG_HIGHMEM=y +CONFIG_SMP=y +CONFIG_NR_CPUS=4 +# CONFIG_SECCOMP is not set +CONFIG_MIPS_O32_FP64_SUPPORT=y # CONFIG_BLK_DEV_BSG is not set # CONFIG_IOSCHED_DEADLINE is not set # CONFIG_IOSCHED_CFQ is not set @@ -32,8 +31,6 @@ CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y # CONFIG_STANDALONE is not set # CONFIG_PREVENT_FIRMWARE_BUILD is not set -CONFIG_PRINTK_TIME=y -CONFIG_BRCMSTB_GISB_ARB=y CONFIG_MTD=y CONFIG_MTD_BCM63XX_PARTS=y CONFIG_MTD_CFI=y @@ -50,14 +47,12 @@ CONFIG_USB_USBNET=y # CONFIG_INPUT is not set # CONFIG_SERIO is not set # CONFIG_VT is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_BCM63XX=y CONFIG_SERIAL_BCM63XX_CONSOLE=y # CONFIG_HW_RANDOM is not set -CONFIG_POWER_SUPPLY=y CONFIG_POWER_RESET=y -CONFIG_POWER_RESET_BRCMSTB=y CONFIG_POWER_RESET_SYSCON=y +CONFIG_POWER_SUPPLY=y # CONFIG_HWMON is not set CONFIG_USB=y CONFIG_USB_EHCI_HCD=y @@ -79,8 +74,9 @@ CONFIG_CIFS=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ASCII=y CONFIG_NLS_ISO8859_1=y +# CONFIG_CRYPTO_HW is not set +CONFIG_PRINTK_TIME=y CONFIG_DEBUG_FS=y CONFIG_MAGIC_SYSRQ=y CONFIG_CMDLINE_BOOL=y CONFIG_CMDLINE="earlycon" -# CONFIG_CRYPTO_HW is not set diff --git a/arch/mips/configs/bmips_stb_defconfig b/arch/mips/configs/bmips_stb_defconfig index 47aecb8750e6..39adcca46bb0 100644 --- a/arch/mips/configs/bmips_stb_defconfig +++ b/arch/mips/configs/bmips_stb_defconfig @@ -1,10 +1,3 @@ -CONFIG_BMIPS_GENERIC=y -CONFIG_CPU_LITTLE_ENDIAN=y -CONFIG_HIGHMEM=y -CONFIG_SMP=y -CONFIG_NR_CPUS=4 -# CONFIG_SECCOMP is not set -CONFIG_MIPS_O32_FP64_SUPPORT=y # CONFIG_LOCALVERSION_AUTO is not set # CONFIG_SWAP is not set CONFIG_NO_HZ=y @@ -12,9 +5,13 @@ CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y # CONFIG_VM_EVENT_COUNTERS is not set # CONFIG_SLUB_DEBUG is not set -# CONFIG_BLK_DEV_BSG is not set -# CONFIG_IOSCHED_DEADLINE is not set -# CONFIG_IOSCHED_CFQ is not set +CONFIG_BMIPS_GENERIC=y +CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_HIGHMEM=y +CONFIG_SMP=y +CONFIG_NR_CPUS=4 +# CONFIG_SECCOMP is not set +CONFIG_MIPS_O32_FP64_SUPPORT=y CONFIG_CPU_FREQ=y CONFIG_CPU_FREQ_STAT=y CONFIG_CPU_FREQ_GOV_POWERSAVE=y @@ -23,6 +20,9 @@ CONFIG_CPU_FREQ_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y CONFIG_BMIPS_CPUFREQ=y +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_IOSCHED_DEADLINE is not set +# CONFIG_IOSCHED_CFQ is not set CONFIG_NET=y CONFIG_PACKET=y CONFIG_PACKET_DIAG=y @@ -61,7 +61,6 @@ CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_OF_PLATFORM=y # CONFIG_HW_RANDOM is not set CONFIG_POWER_RESET=y -CONFIG_POWER_RESET_BRCMSTB=y CONFIG_POWER_RESET_SYSCON=y CONFIG_POWER_SUPPLY=y # CONFIG_HWMON is not set @@ -86,9 +85,9 @@ CONFIG_CIFS=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ASCII=y CONFIG_NLS_ISO8859_1=y +# CONFIG_CRYPTO_HW is not set CONFIG_PRINTK_TIME=y CONFIG_DEBUG_FS=y CONFIG_MAGIC_SYSRQ=y CONFIG_CMDLINE_BOOL=y CONFIG_CMDLINE="earlycon" -# CONFIG_CRYPTO_HW is not set diff --git a/arch/mips/configs/capcella_defconfig b/arch/mips/configs/capcella_defconfig index bd80b5c852dd..7bf8971af53b 100644 --- a/arch/mips/configs/capcella_defconfig +++ b/arch/mips/configs/capcella_defconfig @@ -1,10 +1,9 @@ -CONFIG_MACH_VR41XX=y -CONFIG_ZAO_CAPCELLA=y CONFIG_SYSVIPC=y CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_EXPERT=y CONFIG_SLAB=y +CONFIG_MACH_VR41XX=y +CONFIG_ZAO_CAPCELLA=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y @@ -34,18 +33,15 @@ CONFIG_BLK_DEV_SD=y CONFIG_ATA=y CONFIG_PATA_LEGACY=y CONFIG_NETDEVICES=y +CONFIG_8139TOO=y CONFIG_PHYLIB=m -CONFIG_MARVELL_PHY=m +CONFIG_CICADA_PHY=m CONFIG_DAVICOM_PHY=m -CONFIG_QSEMI_PHY=m CONFIG_LXT_PHY=m -CONFIG_CICADA_PHY=m -CONFIG_VITESSE_PHY=m +CONFIG_MARVELL_PHY=m +CONFIG_QSEMI_PHY=m CONFIG_SMSC_PHY=m -CONFIG_NET_ETHERNET=y -CONFIG_NET_PCI=y -CONFIG_8139TOO=y -# CONFIG_INPUT_MOUSEDEV is not set +CONFIG_VITESSE_PHY=m # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set @@ -67,9 +63,6 @@ CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y CONFIG_NFS_FS=y CONFIG_ROOT_NFS=y -CONFIG_CMDLINE_BOOL=y -CONFIG_CMDLINE="mem=32M console=ttyVR0,38400" -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CBC=m CONFIG_CRYPTO_ECB=m CONFIG_CRYPTO_LRW=m @@ -77,7 +70,6 @@ CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_MD4=m CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_SHA256=m CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m @@ -95,3 +87,5 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_DEFLATE=m # CONFIG_CRYPTO_HW is not set +CONFIG_CMDLINE_BOOL=y +CONFIG_CMDLINE="mem=32M console=ttyVR0,38400" diff --git a/arch/mips/configs/cavium_octeon_defconfig b/arch/mips/configs/cavium_octeon_defconfig index c52d0efacd14..d7abb648b8a0 100644 --- a/arch/mips/configs/cavium_octeon_defconfig +++ b/arch/mips/configs/cavium_octeon_defconfig @@ -1,13 +1,6 @@ -CONFIG_CAVIUM_OCTEON_SOC=y -CONFIG_CAVIUM_CN63XXP1=y -CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE=2 -CONFIG_TRANSPARENT_HUGEPAGE=y -CONFIG_SMP=y -CONFIG_NR_CPUS=32 -CONFIG_HZ_100=y -CONFIG_PREEMPT=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y +CONFIG_PREEMPT=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_IKCONFIG=y @@ -17,14 +10,21 @@ CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y CONFIG_SLAB=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -# CONFIG_BLK_DEV_BSG is not set +CONFIG_CAVIUM_OCTEON_SOC=y +CONFIG_CAVIUM_CN63XXP1=y +CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE=2 +CONFIG_OCTEON_ILM=m +CONFIG_SMP=y +CONFIG_NR_CPUS=32 +CONFIG_HZ_100=y CONFIG_PCI=y CONFIG_PCI_MSI=y -CONFIG_MIPS32_COMPAT=y CONFIG_MIPS32_O32=y CONFIG_MIPS32_N32=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +# CONFIG_BLK_DEV_BSG is not set +CONFIG_TRANSPARENT_HUGEPAGE=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -42,7 +42,6 @@ CONFIG_IP_MROUTE=y CONFIG_IP_PIMSM_V1=y CONFIG_IP_PIMSM_V2=y CONFIG_SYN_COOKIES=y -CONFIG_IPV6=y CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y # CONFIG_FW_LOADER is not set @@ -52,7 +51,6 @@ CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y CONFIG_MTD_CFI_AMDSTD=y CONFIG_MTD_SLRAM=y -CONFIG_PROC_DEVICETREE=y CONFIG_BLK_DEV_LOOP=y CONFIG_EEPROM_AT24=y CONFIG_EEPROM_AT25=y @@ -74,7 +72,6 @@ CONFIG_NETDEVICES=y # CONFIG_NET_VENDOR_DEC is not set # CONFIG_NET_VENDOR_DLINK is not set # CONFIG_NET_VENDOR_EMULEX is not set -# CONFIG_NET_VENDOR_EXAR is not set # CONFIG_NET_VENDOR_HP is not set # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_MARVELL is not set @@ -84,10 +81,9 @@ CONFIG_NETDEVICES=y # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_NVIDIA is not set # CONFIG_NET_VENDOR_OKI is not set -# CONFIG_NET_PACKET_ENGINE is not set # CONFIG_NET_VENDOR_QLOGIC is not set -# CONFIG_NET_VENDOR_REALTEK is not set # CONFIG_NET_VENDOR_RDC is not set +# CONFIG_NET_VENDOR_REALTEK is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_SILAN is not set # CONFIG_NET_VENDOR_SIS is not set @@ -99,9 +95,9 @@ CONFIG_NETDEVICES=y # CONFIG_NET_VENDOR_TOSHIBA is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set -CONFIG_MARVELL_PHY=y -CONFIG_BROADCOM_PHY=y CONFIG_BCM87XX_PHY=y +CONFIG_BROADCOM_PHY=y +CONFIG_MARVELL_PHY=y # CONFIG_WLAN is not set # CONFIG_INPUT is not set # CONFIG_SERIO is not set @@ -111,7 +107,6 @@ CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_NR_UARTS=2 CONFIG_SERIAL_8250_RUNTIME_UARTS=2 CONFIG_SERIAL_8250_DW=y -# CONFIG_HW_RANDOM is not set CONFIG_I2C=y CONFIG_I2C_OCTEON=y CONFIG_SPI=y @@ -159,10 +154,6 @@ CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ASCII=y CONFIG_NLS_ISO8859_1=y CONFIG_NLS_UTF8=y -CONFIG_MAGIC_SYSRQ=y -CONFIG_DEBUG_FS=y -# CONFIG_SCHED_DEBUG is not set -CONFIG_DEBUG_INFO=y CONFIG_SECURITY=y CONFIG_SECURITY_NETWORK=y CONFIG_CRYPTO_CBC=y @@ -172,4 +163,7 @@ CONFIG_CRYPTO_SHA1_OCTEON=m CONFIG_CRYPTO_SHA256_OCTEON=m CONFIG_CRYPTO_SHA512_OCTEON=m CONFIG_CRYPTO_DES=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set +CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_FS=y +CONFIG_MAGIC_SYSRQ=y +# CONFIG_SCHED_DEBUG is not set diff --git a/arch/mips/configs/ci20_defconfig b/arch/mips/configs/ci20_defconfig index 030ff9c205fb..412800d5d7e0 100644 --- a/arch/mips/configs/ci20_defconfig +++ b/arch/mips/configs/ci20_defconfig @@ -1,18 +1,10 @@ -CONFIG_MACH_INGENIC=y -CONFIG_JZ4780_CI20=y -CONFIG_HIGHMEM=y -# CONFIG_COMPACTION is not set -CONFIG_CMA=y -CONFIG_HZ_100=y -CONFIG_PREEMPT=y -# CONFIG_SECCOMP is not set # CONFIG_LOCALVERSION_AUTO is not set CONFIG_KERNEL_XZ=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y -CONFIG_FHANDLE=y CONFIG_NO_HZ_IDLE=y CONFIG_HIGH_RES_TIMERS=y +CONFIG_PREEMPT=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 @@ -20,7 +12,6 @@ CONFIG_CGROUPS=y CONFIG_MEMCG=y CONFIG_CGROUP_SCHED=y CONFIG_CGROUP_FREEZER=y -CONFIG_CPUSETS=y CONFIG_CGROUP_DEVICE=y CONFIG_CGROUP_CPUACCT=y CONFIG_NAMESPACES=y @@ -32,8 +23,15 @@ CONFIG_EMBEDDED=y # CONFIG_VM_EVENT_COUNTERS is not set # CONFIG_COMPAT_BRK is not set CONFIG_SLAB=y -# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_MACH_INGENIC=y +CONFIG_JZ4780_CI20=y +CONFIG_HIGHMEM=y +CONFIG_HZ_100=y +# CONFIG_SECCOMP is not set # CONFIG_SUSPEND is not set +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +# CONFIG_COMPACTION is not set +CONFIG_CMA=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -59,7 +57,6 @@ CONFIG_MTD_UBI=y CONFIG_MTD_UBI_FASTMAP=y CONFIG_NETDEVICES=y # CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set CONFIG_DM9000=y CONFIG_DM9000_FORCE_SIMPLE_PHY_POLL=y @@ -76,13 +73,11 @@ CONFIG_DM9000_FORCE_SIMPLE_PHY_POLL=y # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set # CONFIG_WLAN is not set -# CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set CONFIG_VT_HW_CONSOLE_BINDING=y CONFIG_LEGACY_PTY_COUNT=2 -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_NR_UARTS=5 @@ -95,7 +90,6 @@ CONFIG_I2C_JZ4780=y CONFIG_SPI=y CONFIG_SPI_GPIO=y CONFIG_GPIO_SYSFS=y -CONFIG_GPIO_INGENIC=y # CONFIG_HWMON is not set CONFIG_WATCHDOG=y CONFIG_JZ4740_WDT=y @@ -166,9 +160,6 @@ CONFIG_DEBUG_INFO=y CONFIG_STRIP_ASM_SYMS=y CONFIG_DEBUG_FS=y CONFIG_MAGIC_SYSRQ=y -CONFIG_LOCKUP_DETECTOR=y -CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y -CONFIG_BOOTPARAM_HUNG_TASK_PANIC=y CONFIG_PANIC_ON_OOPS=y CONFIG_PANIC_TIMEOUT=10 # CONFIG_SCHED_DEBUG is not set diff --git a/arch/mips/configs/cobalt_defconfig b/arch/mips/configs/cobalt_defconfig index a9066f300665..20c62841827f 100644 --- a/arch/mips/configs/cobalt_defconfig +++ b/arch/mips/configs/cobalt_defconfig @@ -1,9 +1,8 @@ -CONFIG_MIPS_COBALT=y CONFIG_SYSVIPC=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_RELAY=y -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_EXPERT=y +CONFIG_MIPS_COBALT=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set @@ -17,7 +16,6 @@ CONFIG_INET=y # CONFIG_IPV6 is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_MTD=y -CONFIG_MTD_BLKDEVS=y CONFIG_MTD_JEDECPROBE=y CONFIG_MTD_CFI_AMDSTD=y CONFIG_MTD_PHYSMAP=y @@ -28,11 +26,9 @@ CONFIG_BLK_DEV_SD=y CONFIG_ATA=y CONFIG_PATA_VIA=y CONFIG_NETDEVICES=y -CONFIG_NET_ETHERNET=y CONFIG_NET_TULIP=y CONFIG_DE2104X=y CONFIG_TULIP=y -# CONFIG_INPUT_MOUSEDEV is not set CONFIG_INPUT_EVDEV=y # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set @@ -72,10 +68,8 @@ CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y CONFIG_CONFIGFS_FS=y CONFIG_NFS_FS=y -CONFIG_NFS_V3=y CONFIG_NFS_V3_ACL=y CONFIG_NFSD=y CONFIG_NFSD_V3=y CONFIG_NFSD_V3_ACL=y -CONFIG_CRC16=y CONFIG_LIBCRC32C=y diff --git a/arch/mips/configs/db1xxx_defconfig b/arch/mips/configs/db1xxx_defconfig index 0108bb9f1e37..34633b7611cb 100644 --- a/arch/mips/configs/db1xxx_defconfig +++ b/arch/mips/configs/db1xxx_defconfig @@ -1,41 +1,36 @@ -CONFIG_MIPS_ALCHEMY=y -CONFIG_MIPS_DB1XXX=y -CONFIG_CMA=y -CONFIG_CMA_DEBUG=y -CONFIG_HZ_100=y CONFIG_LOCALVERSION="-db1xxx" CONFIG_KERNEL_XZ=y CONFIG_DEFAULT_HOSTNAME="db1xxx" CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y -CONFIG_FHANDLE=y CONFIG_AUDIT=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y CONFIG_LOG_BUF_SHIFT=16 CONFIG_CGROUPS=y -CONFIG_CGROUP_FREEZER=y -CONFIG_CGROUP_DEVICE=y -CONFIG_CPUSETS=y -CONFIG_CGROUP_CPUACCT=y CONFIG_MEMCG=y CONFIG_MEMCG_SWAP=y -CONFIG_MEMCG_KMEM=y +CONFIG_BLK_CGROUP=y CONFIG_CGROUP_SCHED=y CONFIG_CFS_BANDWIDTH=y CONFIG_RT_GROUP_SCHED=y -CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y CONFIG_SLAB=y -CONFIG_BLK_DEV_BSGLIB=y -CONFIG_PARTITION_ADVANCED=y -CONFIG_DEFAULT_NOOP=y +CONFIG_MIPS_ALCHEMY=y +CONFIG_HZ_100=y CONFIG_PCI=y -CONFIG_PCI_REALLOC_ENABLE_AUTO=y CONFIG_PCCARD=y CONFIG_PCMCIA_ALCHEMY_DEVBOARD=y -CONFIG_PM=y +CONFIG_FIRMWARE_MEMMAP=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_DEFAULT_NOOP=y +CONFIG_CMA=y +CONFIG_CMA_DEBUG=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_PACKET_DIAG=y @@ -78,13 +73,6 @@ CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y CONFIG_IPV6_PIMSM_V2=y CONFIG_BRIDGE=y CONFIG_NETLINK_DIAG=y -CONFIG_IRDA=y -CONFIG_IRLAN=y -CONFIG_IRCOMM=y -CONFIG_IRDA_ULTRA=y -CONFIG_IRDA_CACHE_LAST_LSAP=y -CONFIG_IRDA_FAST_RR=y -CONFIG_AU1000_FIR=y CONFIG_BT=y CONFIG_BT_RFCOMM=y CONFIG_BT_RFCOMM_TTY=y @@ -116,7 +104,6 @@ CONFIG_EEPROM_AT24=y CONFIG_EEPROM_AT25=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_SG=y -CONFIG_SCSI_MULTI_LUN=y CONFIG_ATA=y CONFIG_PATA_HPT37X=y CONFIG_PATA_HPT3X2N=y @@ -155,9 +142,9 @@ CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y CONFIG_SOUND=y CONFIG_SND=y -CONFIG_SND_SEQUENCER=y CONFIG_SND_HRTIMER=y CONFIG_SND_DYNAMIC_MINORS=y +CONFIG_SND_SEQUENCER=y CONFIG_SND_AC97_POWER_SAVE=y CONFIG_SND_AC97_POWER_SAVE_DEFAULT=1 CONFIG_SND_SOC=y @@ -180,7 +167,6 @@ CONFIG_USB_OHCI_HCD=y CONFIG_USB_OHCI_HCD_PLATFORM=y CONFIG_USB_STORAGE=y CONFIG_MMC=y -CONFIG_MMC_CLKGATE=y CONFIG_SDIO_UART=y CONFIG_MMC_AU1X=y CONFIG_NEW_LEDS=y @@ -188,12 +174,13 @@ CONFIG_LEDS_CLASS=y CONFIG_LEDS_TRIGGERS=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_AU1XXX=y -CONFIG_FIRMWARE_MEMMAP=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y CONFIG_XFS_FS=y CONFIG_XFS_POSIX_ACL=y +CONFIG_F2FS_FS=y +CONFIG_F2FS_FS_SECURITY=y CONFIG_FANOTIFY=y CONFIG_FUSE_FS=y CONFIG_CUSE=y @@ -211,8 +198,6 @@ CONFIG_SQUASHFS_FILE_DIRECT=y CONFIG_SQUASHFS_XATTR=y CONFIG_SQUASHFS_LZO=y CONFIG_SQUASHFS_XZ=y -CONFIG_F2FS_FS=y -CONFIG_F2FS_FS_SECURITY=y CONFIG_NFS_FS=y CONFIG_NFS_V3_ACL=y CONFIG_NFS_V4=y @@ -232,7 +217,6 @@ CONFIG_NLS_ISO8859_1=y CONFIG_NLS_ISO8859_2=y CONFIG_NLS_ISO8859_15=y CONFIG_NLS_UTF8=y -CONFIG_MAGIC_SYSRQ=y CONFIG_SECURITYFS=y CONFIG_CRYPTO_USER=y CONFIG_CRYPTO_CRYPTD=y @@ -241,3 +225,4 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=y CONFIG_CRC32_SLICEBY4=y CONFIG_FONTS=y CONFIG_FONT_8x8=y +CONFIG_MAGIC_SYSRQ=y diff --git a/arch/mips/configs/decstation_64_defconfig b/arch/mips/configs/decstation_64_defconfig new file mode 100644 index 000000000000..85f1955b4b00 --- /dev/null +++ b/arch/mips/configs/decstation_64_defconfig @@ -0,0 +1,227 @@ +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_LOG_BUF_SHIFT=15 +CONFIG_EXPERT=y +# CONFIG_SGETMASK_SYSCALL is not set +# CONFIG_SYSFS_SYSCALL is not set +CONFIG_BPF_SYSCALL=y +# CONFIG_COMPAT_BRK is not set +CONFIG_SLAB=y +CONFIG_MACH_DECSTATION=y +CONFIG_64BIT=y +CONFIG_PAGE_SIZE_16KB=y +CONFIG_TC=y +CONFIG_MIPS32_O32=y +CONFIG_MIPS32_N32=y +# CONFIG_SUSPEND is not set +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_OSF_PARTITION=y +# CONFIG_EFI_PARTITION is not set +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_SYN_COOKIES=y +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_IPCOMP=m +CONFIG_INET_XFRM_MODE_TRANSPORT=m +CONFIG_INET_XFRM_MODE_TUNNEL=m +CONFIG_INET_XFRM_MODE_BEET=m +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_NETWORK_SECMARK=y +CONFIG_IP_SCTP=m +CONFIG_VLAN_8021Q=m +CONFIG_DECNET=m +CONFIG_DECNET_ROUTER=y +# CONFIG_WIRELESS is not set +# CONFIG_UEVENT_HELPER is not set +# CONFIG_FW_LOADER is not set +# CONFIG_ALLOW_DEV_COREDUMP is not set +CONFIG_MTD=m +CONFIG_MTD_BLOCK=m +CONFIG_MTD_BLOCK_RO=m +CONFIG_MTD_MS02NV=m +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_RAM=m +CONFIG_SCSI=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=m +CONFIG_BLK_DEV_SR=m +CONFIG_CHR_DEV_SG=m +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_SPI_ATTRS=m +CONFIG_ISCSI_TCP=m +CONFIG_NETDEVICES=y +# CONFIG_NET_VENDOR_ALACRITECH is not set +# CONFIG_NET_VENDOR_AMAZON is not set +CONFIG_DECLANCE=y +# CONFIG_NET_VENDOR_AQUANTIA is not set +# CONFIG_NET_VENDOR_ARC is not set +# CONFIG_NET_VENDOR_AURORA is not set +# CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_NET_VENDOR_CADENCE is not set +# CONFIG_NET_VENDOR_CAVIUM is not set +# CONFIG_NET_VENDOR_CORTINA is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +# CONFIG_NET_VENDOR_HUAWEI is not set +# CONFIG_NET_VENDOR_INTEL is not set +# CONFIG_NET_VENDOR_MARVELL is not set +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +# CONFIG_NET_VENDOR_MICROSEMI is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NETRONOME is not set +# CONFIG_NET_VENDOR_NI is not set +# CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RENESAS is not set +# CONFIG_NET_VENDOR_ROCKER is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SOLARFLARE is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_SOCIONEXT is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_NET_VENDOR_XILINX is not set +CONFIG_FDDI=y +CONFIG_DEFZA=y +CONFIG_DEFXX=y +# CONFIG_WLAN is not set +# CONFIG_KEYBOARD_ATKBD is not set +CONFIG_KEYBOARD_LKKBD=y +# CONFIG_MOUSE_PS2 is not set +CONFIG_MOUSE_VSXXXAA=y +# CONFIG_HW_RANDOM is not set +# CONFIG_HWMON is not set +CONFIG_FB=y +CONFIG_FB_TGA=y +CONFIG_FB_PMAG_AA=y +CONFIG_FB_PMAG_BA=y +CONFIG_FB_PMAGB_B=y +# CONFIG_VGA_CONSOLE is not set +CONFIG_DUMMY_CONSOLE_COLUMNS=160 +CONFIG_DUMMY_CONSOLE_ROWS=64 +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_VGA16 is not set +# CONFIG_LOGO_LINUX_CLUT224 is not set +# CONFIG_HID is not set +# CONFIG_USB_SUPPORT is not set +CONFIG_RTC_CLASS=y +CONFIG_RTC_INTF_DEV_UIE_EMUL=y +CONFIG_RTC_DRV_CMOS=y +# CONFIG_MIPS_PLATFORM_DEVICES is not set +# CONFIG_IOMMU_SUPPORT is not set +CONFIG_EXT2_FS=y +CONFIG_EXT2_FS_XATTR=y +CONFIG_EXT2_FS_POSIX_ACL=y +CONFIG_EXT2_FS_SECURITY=y +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT3_FS_SECURITY=y +# CONFIG_MANDATORY_FILE_LOCKING is not set +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_CHILDREN=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_HUGETLBFS=y +CONFIG_CONFIGFS_FS=y +CONFIG_UFS_FS=y +CONFIG_UFS_FS_WRITE=y +CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_SWAP=y +CONFIG_ROOT_NFS=y +CONFIG_NFSD=m +CONFIG_NFSD_V3=y +CONFIG_NFSD_V3_ACL=y +# CONFIG_RPCSEC_GSS_KRB5 is not set +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_ASCII=m +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_UTF8=m +CONFIG_CRYPTO_RSA=m +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_CCM=m +CONFIG_CRYPTO_GCM=m +CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_CTS=m +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_OFB=m +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_XTS=m +CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_CMAC=m +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_VMAC=m +CONFIG_CRYPTO_CRC32=m +CONFIG_CRYPTO_CRCT10DIF=m +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_RMD128=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_RMD256=m +CONFIG_CRYPTO_RMD320=m +CONFIG_CRYPTO_SHA512=m +CONFIG_CRYPTO_TGR192=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SALSA20=m +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_842=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_HASH=y +CONFIG_CRYPTO_DRBG_CTR=y +# CONFIG_CRYPTO_HW is not set +CONFIG_MAGIC_SYSRQ=y +# CONFIG_FTRACE is not set diff --git a/arch/mips/configs/decstation_defconfig b/arch/mips/configs/decstation_defconfig index e149f78901f8..0c86ed86266a 100644 --- a/arch/mips/configs/decstation_defconfig +++ b/arch/mips/configs/decstation_defconfig @@ -1,17 +1,26 @@ -CONFIG_MACH_DECSTATION=y -CONFIG_CPU_R3000=y CONFIG_SYSVIPC=y -CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_POSIX_MQUEUE=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_LOG_BUF_SHIFT=15 CONFIG_EXPERT=y -# CONFIG_SYSCTL_SYSCALL is not set -# CONFIG_HOTPLUG is not set +# CONFIG_SGETMASK_SYSCALL is not set +# CONFIG_SYSFS_SYSCALL is not set +CONFIG_BPF_SYSCALL=y +# CONFIG_COMPAT_BRK is not set CONFIG_SLAB=y +CONFIG_MACH_DECSTATION=y +CONFIG_CPU_R3000=y +CONFIG_TC=y +# CONFIG_SUSPEND is not set CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_SRCVERSION_ALL=y -CONFIG_TC=y -CONFIG_PM=y +# CONFIG_LBDAF is not set +CONFIG_PARTITION_ADVANCED=y +CONFIG_OSF_PARTITION=y +# CONFIG_EFI_PARTITION is not set CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -39,37 +48,92 @@ CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m CONFIG_IPV6_MULTIPLE_TABLES=y CONFIG_IPV6_SUBTREES=y CONFIG_NETWORK_SECMARK=y +CONFIG_IP_SCTP=m CONFIG_VLAN_8021Q=m -CONFIG_CONNECTOR=m +CONFIG_DECNET=m +CONFIG_DECNET_ROUTER=y +# CONFIG_WIRELESS is not set +# CONFIG_UEVENT_HELPER is not set +# CONFIG_FW_LOADER is not set +# CONFIG_ALLOW_DEV_COREDUMP is not set +CONFIG_MTD=m +CONFIG_MTD_BLOCK=m +CONFIG_MTD_BLOCK_RO=m +CONFIG_MTD_MS02NV=m CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_RAM=m CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=m CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y -CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_SPI_ATTRS=m -CONFIG_SCSI_SAS_ATTRS=m CONFIG_ISCSI_TCP=m CONFIG_NETDEVICES=y -CONFIG_NET_ETHERNET=y +# CONFIG_NET_VENDOR_ALACRITECH is not set +# CONFIG_NET_VENDOR_AMAZON is not set CONFIG_DECLANCE=y +# CONFIG_NET_VENDOR_AQUANTIA is not set +# CONFIG_NET_VENDOR_ARC is not set +# CONFIG_NET_VENDOR_AURORA is not set +# CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_NET_VENDOR_CADENCE is not set +# CONFIG_NET_VENDOR_CAVIUM is not set +# CONFIG_NET_VENDOR_CORTINA is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +# CONFIG_NET_VENDOR_HUAWEI is not set +# CONFIG_NET_VENDOR_INTEL is not set +# CONFIG_NET_VENDOR_MARVELL is not set +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +# CONFIG_NET_VENDOR_MICROSEMI is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NETRONOME is not set +# CONFIG_NET_VENDOR_NI is not set +# CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RENESAS is not set +# CONFIG_NET_VENDOR_ROCKER is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SOLARFLARE is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_SOCIONEXT is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_NET_VENDOR_XILINX is not set CONFIG_FDDI=y -CONFIG_DEFXX=m -# CONFIG_INPUT is not set -# CONFIG_SERIO is not set -# CONFIG_VT is not set -# CONFIG_SERIAL_DZ is not set +CONFIG_DEFZA=y +CONFIG_DEFXX=y +# CONFIG_WLAN is not set +# CONFIG_KEYBOARD_ATKBD is not set +CONFIG_KEYBOARD_LKKBD=y +# CONFIG_MOUSE_PS2 is not set +CONFIG_MOUSE_VSXXXAA=y # CONFIG_HW_RANDOM is not set # CONFIG_HWMON is not set CONFIG_FB=y +CONFIG_FB_TGA=y +CONFIG_FB_PMAG_AA=y CONFIG_FB_PMAG_BA=y CONFIG_FB_PMAGB_B=y +# CONFIG_VGA_CONSOLE is not set +CONFIG_DUMMY_CONSOLE_COLUMNS=160 +CONFIG_DUMMY_CONSOLE_ROWS=64 +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y CONFIG_LOGO=y -# CONFIG_LOGO_LINUX_MONO is not set # CONFIG_LOGO_LINUX_VGA16 is not set # CONFIG_LOGO_LINUX_CLUT224 is not set +# CONFIG_HID is not set +# CONFIG_USB_SUPPORT is not set +CONFIG_RTC_CLASS=y +CONFIG_RTC_INTF_DEV_UIE_EMUL=y +CONFIG_RTC_DRV_CMOS=y +# CONFIG_MIPS_PLATFORM_DEVICES is not set +# CONFIG_IOMMU_SUPPORT is not set CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y @@ -77,30 +141,60 @@ CONFIG_EXT2_FS_SECURITY=y CONFIG_EXT3_FS=y CONFIG_EXT3_FS_POSIX_ACL=y CONFIG_EXT3_FS_SECURITY=y -CONFIG_FUSE_FS=m +# CONFIG_MANDATORY_FILE_LOCKING is not set +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y CONFIG_PROC_KCORE=y +CONFIG_PROC_CHILDREN=y CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y CONFIG_CONFIGFS_FS=y CONFIG_UFS_FS=y CONFIG_UFS_FS_WRITE=y CONFIG_NFS_FS=y -CONFIG_NFS_V3=y CONFIG_NFS_V3_ACL=y +CONFIG_NFS_SWAP=y CONFIG_ROOT_NFS=y -CONFIG_PARTITION_ADVANCED=y -CONFIG_OSF_PARTITION=y -CONFIG_DLM=m -CONFIG_MAGIC_SYSRQ=y -CONFIG_CRYPTO_NULL=m -CONFIG_CRYPTO_ECB=m +CONFIG_NFSD=m +CONFIG_NFSD_V3=y +CONFIG_NFSD_V3_ACL=y +# CONFIG_RPCSEC_GSS_KRB5 is not set +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_ASCII=m +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_UTF8=m +CONFIG_CRYPTO_RSA=m +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_CCM=m +CONFIG_CRYPTO_GCM=m +CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_OFB=m CONFIG_CRYPTO_PCBC=m -CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_XTS=m +CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_CMAC=m CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_VMAC=m +CONFIG_CRYPTO_CRC32=m +CONFIG_CRYPTO_CRCT10DIF=m CONFIG_CRYPTO_MD4=m CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_SHA256=m +CONFIG_CRYPTO_RMD128=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_RMD256=m +CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m @@ -112,6 +206,19 @@ CONFIG_CRYPTO_CAST5=m CONFIG_CRYPTO_CAST6=m CONFIG_CRYPTO_FCRYPT=m CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SALSA20=m +CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_842=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_HASH=y +CONFIG_CRYPTO_DRBG_CTR=y +# CONFIG_CRYPTO_HW is not set +CONFIG_FRAME_WARN=2048 +CONFIG_MAGIC_SYSRQ=y +# CONFIG_FTRACE is not set diff --git a/arch/mips/configs/decstation_r4k_defconfig b/arch/mips/configs/decstation_r4k_defconfig new file mode 100644 index 000000000000..0e54ab2680ce --- /dev/null +++ b/arch/mips/configs/decstation_r4k_defconfig @@ -0,0 +1,224 @@ +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_LOG_BUF_SHIFT=15 +CONFIG_EXPERT=y +# CONFIG_SGETMASK_SYSCALL is not set +# CONFIG_SYSFS_SYSCALL is not set +CONFIG_BPF_SYSCALL=y +# CONFIG_COMPAT_BRK is not set +CONFIG_SLAB=y +CONFIG_MACH_DECSTATION=y +CONFIG_TC=y +# CONFIG_SUSPEND is not set +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_SRCVERSION_ALL=y +# CONFIG_LBDAF is not set +CONFIG_PARTITION_ADVANCED=y +CONFIG_OSF_PARTITION=y +# CONFIG_EFI_PARTITION is not set +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_SYN_COOKIES=y +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_IPCOMP=m +CONFIG_INET_XFRM_MODE_TRANSPORT=m +CONFIG_INET_XFRM_MODE_TUNNEL=m +CONFIG_INET_XFRM_MODE_BEET=m +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_NETWORK_SECMARK=y +CONFIG_IP_SCTP=m +CONFIG_VLAN_8021Q=m +CONFIG_DECNET=m +CONFIG_DECNET_ROUTER=y +# CONFIG_WIRELESS is not set +# CONFIG_UEVENT_HELPER is not set +# CONFIG_FW_LOADER is not set +# CONFIG_ALLOW_DEV_COREDUMP is not set +CONFIG_MTD=m +CONFIG_MTD_BLOCK=m +CONFIG_MTD_BLOCK_RO=m +CONFIG_MTD_MS02NV=m +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_RAM=m +CONFIG_SCSI=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=m +CONFIG_BLK_DEV_SR=m +CONFIG_CHR_DEV_SG=m +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_SPI_ATTRS=m +CONFIG_ISCSI_TCP=m +CONFIG_NETDEVICES=y +# CONFIG_NET_VENDOR_ALACRITECH is not set +# CONFIG_NET_VENDOR_AMAZON is not set +CONFIG_DECLANCE=y +# CONFIG_NET_VENDOR_AQUANTIA is not set +# CONFIG_NET_VENDOR_ARC is not set +# CONFIG_NET_VENDOR_AURORA is not set +# CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_NET_VENDOR_CADENCE is not set +# CONFIG_NET_VENDOR_CAVIUM is not set +# CONFIG_NET_VENDOR_CORTINA is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +# CONFIG_NET_VENDOR_HUAWEI is not set +# CONFIG_NET_VENDOR_INTEL is not set +# CONFIG_NET_VENDOR_MARVELL is not set +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +# CONFIG_NET_VENDOR_MICROSEMI is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NETRONOME is not set +# CONFIG_NET_VENDOR_NI is not set +# CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RENESAS is not set +# CONFIG_NET_VENDOR_ROCKER is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SOLARFLARE is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_SOCIONEXT is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_NET_VENDOR_XILINX is not set +CONFIG_FDDI=y +CONFIG_DEFZA=y +CONFIG_DEFXX=y +# CONFIG_WLAN is not set +# CONFIG_KEYBOARD_ATKBD is not set +CONFIG_KEYBOARD_LKKBD=y +# CONFIG_MOUSE_PS2 is not set +CONFIG_MOUSE_VSXXXAA=y +# CONFIG_SERIAL_DZ is not set +# CONFIG_HW_RANDOM is not set +# CONFIG_HWMON is not set +CONFIG_FB=y +CONFIG_FB_TGA=y +CONFIG_FB_PMAG_AA=y +CONFIG_FB_PMAG_BA=y +CONFIG_FB_PMAGB_B=y +# CONFIG_VGA_CONSOLE is not set +CONFIG_DUMMY_CONSOLE_COLUMNS=160 +CONFIG_DUMMY_CONSOLE_ROWS=64 +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_VGA16 is not set +# CONFIG_LOGO_LINUX_CLUT224 is not set +# CONFIG_HID is not set +# CONFIG_USB_SUPPORT is not set +CONFIG_RTC_CLASS=y +CONFIG_RTC_INTF_DEV_UIE_EMUL=y +CONFIG_RTC_DRV_CMOS=y +# CONFIG_MIPS_PLATFORM_DEVICES is not set +# CONFIG_IOMMU_SUPPORT is not set +CONFIG_EXT2_FS=y +CONFIG_EXT2_FS_XATTR=y +CONFIG_EXT2_FS_POSIX_ACL=y +CONFIG_EXT2_FS_SECURITY=y +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT3_FS_SECURITY=y +# CONFIG_MANDATORY_FILE_LOCKING is not set +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_CHILDREN=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_CONFIGFS_FS=y +CONFIG_UFS_FS=y +CONFIG_UFS_FS_WRITE=y +CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_SWAP=y +CONFIG_ROOT_NFS=y +CONFIG_NFSD=m +CONFIG_NFSD_V3=y +CONFIG_NFSD_V3_ACL=y +# CONFIG_RPCSEC_GSS_KRB5 is not set +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_ASCII=m +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_UTF8=m +CONFIG_CRYPTO_RSA=m +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_CCM=m +CONFIG_CRYPTO_GCM=m +CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_CTS=m +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_OFB=m +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_XTS=m +CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_CMAC=m +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_VMAC=m +CONFIG_CRYPTO_CRC32=m +CONFIG_CRYPTO_CRCT10DIF=m +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_RMD128=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_RMD256=m +CONFIG_CRYPTO_RMD320=m +CONFIG_CRYPTO_SHA512=m +CONFIG_CRYPTO_TGR192=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SALSA20=m +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_842=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_HASH=y +CONFIG_CRYPTO_DRBG_CTR=y +# CONFIG_CRYPTO_HW is not set +CONFIG_FRAME_WARN=2048 +CONFIG_MAGIC_SYSRQ=y +# CONFIG_FTRACE is not set diff --git a/arch/mips/configs/e55_defconfig b/arch/mips/configs/e55_defconfig index c3ac0209457c..fd82b858a8f0 100644 --- a/arch/mips/configs/e55_defconfig +++ b/arch/mips/configs/e55_defconfig @@ -1,11 +1,9 @@ -CONFIG_MACH_VR41XX=y -CONFIG_CASIO_E55=y CONFIG_SYSVIPC=y CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_EXPERT=y -# CONFIG_HOTPLUG is not set CONFIG_SLAB=y +CONFIG_MACH_VR41XX=y +CONFIG_CASIO_E55=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y @@ -16,7 +14,6 @@ CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_SD=y CONFIG_ATA=y CONFIG_PATA_LEGACY=y -# CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set @@ -38,4 +35,3 @@ CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y CONFIG_CMDLINE_BOOL=y CONFIG_CMDLINE="console=ttyVR0,19200 ide0=0x1f0,0x3f6,40 mem=8M" -# CONFIG_CRC32 is not set diff --git a/arch/mips/configs/fuloong2e_defconfig b/arch/mips/configs/fuloong2e_defconfig index 499f51498ecb..8bcb61a6ec15 100644 --- a/arch/mips/configs/fuloong2e_defconfig +++ b/arch/mips/configs/fuloong2e_defconfig @@ -1,39 +1,33 @@ -CONFIG_MACH_LOONGSON64=y -CONFIG_64BIT=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y -CONFIG_PREEMPT_VOLUNTARY=y CONFIG_LOCALVERSION="-fuloong2e" # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_PREEMPT_VOLUNTARY=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 -CONFIG_SYSFS_DEPRECATED_V2=y CONFIG_NAMESPACES=y CONFIG_USER_NS=y -CONFIG_PID_NS=y -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_EXPERT=y -# CONFIG_PCSPKR_PLATFORM is not set # CONFIG_COMPAT_BRK is not set CONFIG_SLAB=y CONFIG_PROFILING=y -CONFIG_OPROFILE=m -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_MACH_LOONGSON64=y CONFIG_PCI=y -CONFIG_BINFMT_MISC=y -CONFIG_MIPS32_COMPAT=y CONFIG_MIPS32_O32=y CONFIG_MIPS32_N32=y -CONFIG_PM=y # CONFIG_SUSPEND is not set CONFIG_HIBERNATION=y CONFIG_PM_STD_PARTITION="/dev/sda3" +CONFIG_OPROFILE=m +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_BINFMT_MISC=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -42,14 +36,11 @@ CONFIG_IP_MULTICAST=y CONFIG_IP_PNP=y CONFIG_IP_PNP_BOOTP=y CONFIG_NET_IPIP=m -CONFIG_NET_IPGRE=m -CONFIG_NET_IPGRE_BROADCAST=y # CONFIG_INET_XFRM_MODE_TRANSPORT is not set # CONFIG_INET_XFRM_MODE_TUNNEL is not set # CONFIG_INET_DIAG is not set # CONFIG_IPV6 is not set CONFIG_NETFILTER=y -CONFIG_NETFILTER_NETLINK_QUEUE=m CONFIG_NETFILTER_NETLINK_LOG=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m CONFIG_NETFILTER_XT_TARGET_MARK=m @@ -78,13 +69,11 @@ CONFIG_NETFILTER_XT_MATCH_TCPMSS=m CONFIG_NETFILTER_XT_MATCH_TIME=m CONFIG_NETFILTER_XT_MATCH_U32=m CONFIG_IP_NF_IPTABLES=m -CONFIG_IP_NF_MATCH_ADDRTYPE=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m -CONFIG_IP_NF_TARGET_LOG=m CONFIG_IP_NF_MANGLE=m CONFIG_IP_NF_TARGET_ECN=m CONFIG_IP_NF_TARGET_TTL=m @@ -120,32 +109,30 @@ CONFIG_PATA_VIA=y CONFIG_ATA_GENERIC=y CONFIG_PATA_LEGACY=y CONFIG_NETDEVICES=y +CONFIG_NET_FC=y CONFIG_MACVLAN=m CONFIG_VETH=m +CONFIG_8139TOO=y +# CONFIG_8139TOO_PIO is not set CONFIG_PHYLIB=m -CONFIG_MARVELL_PHY=m +CONFIG_CICADA_PHY=m CONFIG_DAVICOM_PHY=m -CONFIG_QSEMI_PHY=m CONFIG_LXT_PHY=m -CONFIG_CICADA_PHY=m -CONFIG_NET_ETHERNET=y -CONFIG_NET_PCI=y -CONFIG_8139TOO=y -# CONFIG_8139TOO_PIO is not set +CONFIG_MARVELL_PHY=m +CONFIG_QSEMI_PHY=m CONFIG_PPP=m -CONFIG_PPP_MULTILINK=y -CONFIG_PPP_FILTER=y -CONFIG_PPP_ASYNC=m -CONFIG_PPP_SYNC_TTY=m -CONFIG_PPP_DEFLATE=m CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y CONFIG_PPPOE=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m CONFIG_SLIP=m CONFIG_SLIP_COMPRESSED=y CONFIG_SLIP_SMART=y CONFIG_SLIP_MODE_SLIP6=y -CONFIG_NET_FC=y CONFIG_INPUT_FF_MEMLESS=y CONFIG_MOUSE_SERIAL=y CONFIG_SERIAL_8250=y @@ -153,7 +140,6 @@ CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_NR_UARTS=2 CONFIG_SERIAL_8250_RUNTIME_UARTS=2 CONFIG_HW_RANDOM=y -CONFIG_RTC=y CONFIG_I2C=m CONFIG_I2C_CHARDEV=m CONFIG_I2C_VIAPRO=m @@ -167,9 +153,6 @@ CONFIG_SOUND=y CONFIG_SND=m CONFIG_SND_SEQUENCER=m CONFIG_SND_SEQ_DUMMY=m -CONFIG_SND_MIXER_OSS=m -CONFIG_SND_PCM_OSS=m -CONFIG_SND_SEQUENCER_OSS=y CONFIG_SND_VIA82XX=m CONFIG_HIDRAW=y # CONFIG_USB_HID is not set @@ -183,7 +166,6 @@ CONFIG_USB_WUSB_CBAF=m CONFIG_USB_C67X00_HCD=m CONFIG_USB_EHCI_HCD=y CONFIG_USB_EHCI_ROOT_HUB_TT=y -CONFIG_USB_ISP1760=m CONFIG_USB_OHCI_HCD=y CONFIG_USB_UHCI_HCD=m CONFIG_USB_R8A66597_HCD=m @@ -194,16 +176,13 @@ CONFIG_USB_TMC=m CONFIG_USB_STORAGE=y CONFIG_USB_STORAGE_ONETOUCH=y CONFIG_USB_STORAGE_CYPRESS_ATACB=y +CONFIG_USB_ISP1760=m CONFIG_USB_SEVSEG=m CONFIG_USB_ISIGHTFW=m CONFIG_UIO=m CONFIG_UIO_CIF=m CONFIG_EXT2_FS=y -CONFIG_EXT2_FS_XIP=y CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -# CONFIG_EXT3_FS_XATTR is not set -CONFIG_EXT4_FS=m CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y CONFIG_REISERFS_FS=m @@ -223,33 +202,22 @@ CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_OMFS_FS=m CONFIG_NFS_FS=m -CONFIG_NFS_V3=y CONFIG_NFS_V3_ACL=y -CONFIG_NFS_V4=y +CONFIG_NFS_V4=m CONFIG_NFSD=m CONFIG_NFSD_V3_ACL=y CONFIG_NFSD_V4=y -CONFIG_SMB_FS=m -CONFIG_SMB_NLS_DEFAULT=y -CONFIG_SMB_NLS_REMOTE="cp936" CONFIG_CIFS=m -CONFIG_CIFS_STATS=y CONFIG_CIFS_STATS2=y CONFIG_CIFS_WEAK_PW_HASH=y CONFIG_CIFS_XATTR=y CONFIG_CIFS_POSIX=y CONFIG_CIFS_DEBUG2=y -CONFIG_CIFS_EXPERIMENTAL=y -CONFIG_PARTITION_ADVANCED=y CONFIG_NLS_DEFAULT="utf8" CONFIG_NLS_CODEPAGE_936=y CONFIG_NLS_ISO8859_1=y CONFIG_NLS_UTF8=y -# CONFIG_ENABLE_MUST_CHECK is not set -CONFIG_DEBUG_FS=y -CONFIG_CRYPTO_FIPS=y CONFIG_CRYPTO_AUTHENC=m -CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_GCM=m CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_PCBC=m @@ -266,3 +234,4 @@ CONFIG_CRYPTO_LZO=m # CONFIG_CRYPTO_HW is not set CONFIG_CRC_CCITT=y CONFIG_CRC7=m +# CONFIG_ENABLE_MUST_CHECK is not set diff --git a/arch/mips/configs/gcw0_defconfig b/arch/mips/configs/gcw0_defconfig index 99ac1fa3b35f..a3e3eb3c5a8b 100644 --- a/arch/mips/configs/gcw0_defconfig +++ b/arch/mips/configs/gcw0_defconfig @@ -1,14 +1,14 @@ +CONFIG_NO_HZ_IDLE=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_PREEMPT_VOLUNTARY=y +CONFIG_EMBEDDED=y CONFIG_MACH_INGENIC=y CONFIG_JZ4770_GCW0=y CONFIG_HIGHMEM=y -# CONFIG_BOUNCE is not set -CONFIG_PREEMPT_VOLUNTARY=y # CONFIG_SECCOMP is not set -CONFIG_NO_HZ_IDLE=y -CONFIG_HIGH_RES_TIMERS=y -CONFIG_EMBEDDED=y -# CONFIG_BLK_DEV_BSG is not set # CONFIG_SUSPEND is not set +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_BOUNCE is not set CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/mips/configs/generic_defconfig b/arch/mips/configs/generic_defconfig index 684c9dcba126..7c138dab87df 100644 --- a/arch/mips/configs/generic_defconfig +++ b/arch/mips/configs/generic_defconfig @@ -1,10 +1,3 @@ -CONFIG_MIPS_GENERIC=y -CONFIG_CPU_LITTLE_ENDIAN=y -CONFIG_MIPS_CPS=y -CONFIG_CPU_HAS_MSA=y -CONFIG_HIGHMEM=y -CONFIG_NR_CPUS=16 -CONFIG_MIPS_O32_FP64_SUPPORT=y CONFIG_SYSVIPC=y CONFIG_NO_HZ_IDLE=y CONFIG_IKCONFIG=y @@ -28,7 +21,11 @@ CONFIG_USERFAULTFD=y CONFIG_EMBEDDED=y # CONFIG_SLUB_DEBUG is not set # CONFIG_COMPAT_BRK is not set -CONFIG_CC_STACKPROTECTOR_REGULAR=y +CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_MIPS_CPS=y +CONFIG_HIGHMEM=y +CONFIG_NR_CPUS=16 +CONFIG_MIPS_O32_FP64_SUPPORT=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_TRIM_UNUSED_KSYMS=y @@ -43,7 +40,6 @@ CONFIG_NETFILTER=y CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y CONFIG_SCSI=y -# CONFIG_SERIO is not set CONFIG_HW_RANDOM=y # CONFIG_HWMON is not set CONFIG_MFD_SYSCON=y @@ -79,6 +75,12 @@ CONFIG_NFS_V4=y CONFIG_NFS_V4_1=y CONFIG_NFS_V4_2=y CONFIG_ROOT_NFS=y +# CONFIG_XZ_DEC_X86 is not set +# CONFIG_XZ_DEC_POWERPC is not set +# CONFIG_XZ_DEC_IA64 is not set +# CONFIG_XZ_DEC_ARM is not set +# CONFIG_XZ_DEC_ARMTHUMB is not set +# CONFIG_XZ_DEC_SPARC is not set CONFIG_PRINTK_TIME=y CONFIG_DEBUG_INFO=y CONFIG_DEBUG_INFO_REDUCED=y @@ -87,9 +89,3 @@ CONFIG_DEBUG_FS=y # CONFIG_FTRACE is not set CONFIG_CMDLINE_BOOL=y CONFIG_CMDLINE="earlycon" -# CONFIG_XZ_DEC_X86 is not set -# CONFIG_XZ_DEC_POWERPC is not set -# CONFIG_XZ_DEC_IA64 is not set -# CONFIG_XZ_DEC_ARM is not set -# CONFIG_XZ_DEC_ARMTHUMB is not set -# CONFIG_XZ_DEC_SPARC is not set diff --git a/arch/mips/configs/gpr_defconfig b/arch/mips/configs/gpr_defconfig index 55438fc9991e..9d9af5f923c3 100644 --- a/arch/mips/configs/gpr_defconfig +++ b/arch/mips/configs/gpr_defconfig @@ -1,22 +1,21 @@ -CONFIG_MIPS_ALCHEMY=y -CONFIG_MIPS_GPR=y -CONFIG_HIGH_RES_TIMERS=y -CONFIG_PREEMPT_VOLUNTARY=y # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_PREEMPT_VOLUNTARY=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_EXPERT=y CONFIG_SLAB=y CONFIG_PROFILING=y +CONFIG_MIPS_ALCHEMY=y +CONFIG_MIPS_GPR=y +CONFIG_PCI=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y -# CONFIG_BLK_DEV_BSG is not set -CONFIG_PCI=y +CONFIG_PARTITION_ADVANCED=y CONFIG_BINFMT_MISC=m CONFIG_NET=y CONFIG_PACKET=y @@ -36,7 +35,6 @@ CONFIG_SYN_COOKIES=y # CONFIG_IPV6 is not set CONFIG_NETWORK_SECMARK=y CONFIG_NETFILTER=y -CONFIG_NETFILTER_NETLINK_QUEUE=m CONFIG_NETFILTER_NETLINK_LOG=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m CONFIG_NETFILTER_XT_TARGET_DSCP=m @@ -59,13 +57,11 @@ CONFIG_NETFILTER_XT_MATCH_STATISTIC=m CONFIG_NETFILTER_XT_MATCH_STRING=m CONFIG_NETFILTER_XT_MATCH_TCPMSS=m CONFIG_IP_NF_IPTABLES=m -CONFIG_IP_NF_MATCH_ADDRTYPE=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m -CONFIG_IP_NF_TARGET_LOG=m CONFIG_IP_NF_MANGLE=m CONFIG_IP_NF_TARGET_ECN=m CONFIG_IP_NF_TARGET_TTL=m @@ -93,7 +89,6 @@ CONFIG_BRIDGE_EBT_MARK_T=m CONFIG_BRIDGE_EBT_REDIRECT=m CONFIG_BRIDGE_EBT_SNAT=m CONFIG_BRIDGE_EBT_LOG=m -CONFIG_BRIDGE_EBT_ULOG=m CONFIG_IP_DCCP=m CONFIG_IP_SCTP=m CONFIG_TIPC=m @@ -106,14 +101,12 @@ CONFIG_BRIDGE=m CONFIG_VLAN_8021Q=m CONFIG_DECNET=m CONFIG_LLC2=m -CONFIG_IPX=m CONFIG_ATALK=m CONFIG_DEV_APPLETALK=m CONFIG_IPDDP=m CONFIG_IPDDP_ENCAP=y CONFIG_X25=m CONFIG_LAPB=m -CONFIG_WAN_ROUTER=m CONFIG_NET_SCHED=y CONFIG_NET_SCH_CBQ=m CONFIG_NET_SCH_HTB=m @@ -173,26 +166,50 @@ CONFIG_TIFM_CORE=m CONFIG_SCSI=m CONFIG_BLK_DEV_SD=m CONFIG_CHR_DEV_SG=m -CONFIG_SCSI_MULTI_LUN=y CONFIG_SCSI_LOGGING=y CONFIG_SCSI_SPI_ATTRS=m CONFIG_SCSI_FC_ATTRS=m CONFIG_SCSI_ISCSI_ATTRS=m CONFIG_SCSI_SAS_LIBSAS=m -# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set # CONFIG_SCSI_LOWLEVEL is not set CONFIG_NETDEVICES=y -CONFIG_MARVELL_PHY=m +CONFIG_NET_FC=y +CONFIG_NETCONSOLE=m +CONFIG_ATM_TCP=m +CONFIG_ATM_LANAI=m +CONFIG_ATM_ENI=m +CONFIG_ATM_FIRESTREAM=m +CONFIG_ATM_ZATM=m +CONFIG_ATM_NICSTAR=m +CONFIG_ATM_IDT77252=m +CONFIG_ATM_AMBASSADOR=m +CONFIG_ATM_HORIZON=m +CONFIG_ATM_IA=m +CONFIG_ATM_FORE200E=m +CONFIG_ATM_HE=m +CONFIG_ATM_HE_USE_SUNI=y +CONFIG_MIPS_AU1X00_ENET=y +CONFIG_CICADA_PHY=m CONFIG_DAVICOM_PHY=m -CONFIG_QSEMI_PHY=m CONFIG_LXT_PHY=m -CONFIG_CICADA_PHY=m -CONFIG_VITESSE_PHY=m +CONFIG_MARVELL_PHY=m +CONFIG_QSEMI_PHY=m CONFIG_SMSC_PHY=m -CONFIG_NET_ETHERNET=y -CONFIG_MII=y -CONFIG_MIPS_AU1X00_ENET=y -CONFIG_ATH_COMMON=y +CONFIG_VITESSE_PHY=m +CONFIG_PPP=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOATM=m +CONFIG_PPPOE=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +CONFIG_SLIP=m +CONFIG_SLIP_COMPRESSED=y +CONFIG_SLIP_SMART=y +CONFIG_SLIP_MODE_SLIP6=y CONFIG_ATH_DEBUG=y CONFIG_ATH5K=y CONFIG_ATH5K_DEBUG=y @@ -212,41 +229,8 @@ CONFIG_DSCC4=m CONFIG_DSCC4_PCISYNC=y CONFIG_DSCC4_PCI_RST=y CONFIG_DLCI=m -CONFIG_WAN_ROUTER_DRIVERS=m -CONFIG_CYCLADES_SYNC=m -CONFIG_CYCLOMX_X25=y CONFIG_LAPBETHER=m CONFIG_X25_ASY=m -CONFIG_ATM_TCP=m -CONFIG_ATM_LANAI=m -CONFIG_ATM_ENI=m -CONFIG_ATM_FIRESTREAM=m -CONFIG_ATM_ZATM=m -CONFIG_ATM_NICSTAR=m -CONFIG_ATM_IDT77252=m -CONFIG_ATM_AMBASSADOR=m -CONFIG_ATM_HORIZON=m -CONFIG_ATM_IA=m -CONFIG_ATM_FORE200E=m -CONFIG_ATM_HE=m -CONFIG_ATM_HE_USE_SUNI=y -CONFIG_PPP=m -CONFIG_PPP_MULTILINK=y -CONFIG_PPP_FILTER=y -CONFIG_PPP_ASYNC=m -CONFIG_PPP_SYNC_TTY=m -CONFIG_PPP_DEFLATE=m -CONFIG_PPP_BSDCOMP=m -CONFIG_PPP_MPPE=m -CONFIG_PPPOE=m -CONFIG_PPPOATM=m -CONFIG_SLIP=m -CONFIG_SLIP_COMPRESSED=y -CONFIG_SLIP_SMART=y -CONFIG_SLIP_MODE_SLIP6=y -CONFIG_NET_FC=y -CONFIG_NETCONSOLE=m -# CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set @@ -258,7 +242,6 @@ CONFIG_HW_RANDOM=y CONFIG_I2C=y CONFIG_I2C_CHARDEV=y CONFIG_I2C_GPIO=y -CONFIG_GPIOLIB=y CONFIG_GPIO_SYSFS=y CONFIG_SENSORS_LM83=y CONFIG_WATCHDOG=y @@ -283,7 +266,6 @@ CONFIG_USB_OHCI_HCD=y CONFIG_USB_OHCI_HCD_PLATFORM=y CONFIG_USB_STORAGE=m CONFIG_USB_SERIAL=y -CONFIG_USB_EZUSB=y CONFIG_USB_SERIAL_GENERIC=y CONFIG_USB_SERIAL_SIERRAWIRELESS=y CONFIG_LEDS_GPIO=y @@ -304,26 +286,16 @@ CONFIG_JFFS2_FS=y CONFIG_JFFS2_COMPRESSION_OPTIONS=y CONFIG_JFFS2_RUBIN=y CONFIG_NFS_FS=y -CONFIG_NFS_V3=y CONFIG_NFS_V4=y CONFIG_ROOT_NFS=y -CONFIG_PARTITION_ADVANCED=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_CODEPAGE_850=y CONFIG_NLS_ISO8859_1=y -# CONFIG_ENABLE_MUST_CHECK is not set -CONFIG_MAGIC_SYSRQ=y -CONFIG_DEBUG_FS=y -CONFIG_CMDLINE_BOOL=y -CONFIG_CMDLINE="console=ttyS0,115200 root=/dev/nfs rw ip=auto" -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_AUTHENC=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_PCBC=m -CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_MD4=m CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_SHA256=m CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m @@ -336,3 +308,7 @@ CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_DEFLATE=m +# CONFIG_ENABLE_MUST_CHECK is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_CMDLINE_BOOL=y +CONFIG_CMDLINE="console=ttyS0,115200 root=/dev/nfs rw ip=auto" diff --git a/arch/mips/configs/ip22_defconfig b/arch/mips/configs/ip22_defconfig index 7ddfb4ef9479..ff40fbc2f439 100644 --- a/arch/mips/configs/ip22_defconfig +++ b/arch/mips/configs/ip22_defconfig @@ -1,35 +1,28 @@ -CONFIG_SGI_IP22=y -CONFIG_ARC_CONSOLE=y -CONFIG_CPU_R5000=y +CONFIG_SYSVIPC=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y -CONFIG_HZ_1000=y CONFIG_PREEMPT_VOLUNTARY=y -CONFIG_SYSVIPC=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 -CONFIG_SYSFS_DEPRECATED_V2=y -CONFIG_RELAY=y CONFIG_NAMESPACES=y -CONFIG_UTS_NS=y -CONFIG_IPC_NS=y CONFIG_USER_NS=y -CONFIG_PID_NS=y -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_RELAY=y CONFIG_EXPERT=y -# CONFIG_HOTPLUG is not set -# CONFIG_PCSPKR_PLATFORM is not set # CONFIG_COMPAT_BRK is not set CONFIG_SLAB=y +CONFIG_SGI_IP22=y +CONFIG_ARC_CONSOLE=y +CONFIG_CPU_R5000=y +CONFIG_HZ_1000=y +# CONFIG_SUSPEND is not set +CONFIG_PM=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y -# CONFIG_BLK_DEV_BSG is not set +CONFIG_PARTITION_ADVANCED=y CONFIG_BINFMT_MISC=m -CONFIG_PM=y -# CONFIG_SUSPEND is not set CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -62,12 +55,9 @@ CONFIG_IPV6_MROUTE=y CONFIG_IPV6_PIMSM_V2=y CONFIG_NETWORK_SECMARK=y CONFIG_NETFILTER=y -CONFIG_NETFILTER_NETLINK_QUEUE=m CONFIG_NF_CONNTRACK=m CONFIG_NF_CONNTRACK_SECMARK=y CONFIG_NF_CONNTRACK_EVENTS=y -CONFIG_NF_CT_PROTO_DCCP=y -CONFIG_NF_CT_PROTO_UDPLITE=y CONFIG_NF_CONNTRACK_AMANDA=m CONFIG_NF_CONNTRACK_FTP=m CONFIG_NF_CONNTRACK_H323=m @@ -77,7 +67,6 @@ CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_CT_NETLINK=m -CONFIG_NETFILTER_TPROXY=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m CONFIG_NETFILTER_XT_TARGET_CONNMARK=m CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m @@ -136,21 +125,12 @@ CONFIG_IP_VS_DH=m CONFIG_IP_VS_SH=m CONFIG_IP_VS_SED=m CONFIG_IP_VS_NQ=m -CONFIG_IP_VS_FTP=m -CONFIG_NF_CONNTRACK_IPV4=m CONFIG_IP_NF_IPTABLES=m -CONFIG_IP_NF_MATCH_ADDRTYPE=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m -CONFIG_IP_NF_TARGET_LOG=m -CONFIG_NF_NAT=m -CONFIG_IP_NF_TARGET_MASQUERADE=m -CONFIG_IP_NF_TARGET_NETMAP=m -CONFIG_IP_NF_TARGET_REDIRECT=m -CONFIG_NF_NAT_SNMP_BASIC=m CONFIG_IP_NF_MANGLE=m CONFIG_IP_NF_TARGET_CLUSTERIP=m CONFIG_IP_NF_TARGET_ECN=m @@ -159,8 +139,6 @@ CONFIG_IP_NF_RAW=m CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m -CONFIG_NF_CONNTRACK_IPV6=m -CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m CONFIG_IP6_NF_MATCH_FRAG=m @@ -222,23 +200,22 @@ CONFIG_SCSI_SPI_ATTRS=m CONFIG_ISCSI_TCP=m CONFIG_SGIWD93_SCSI=y CONFIG_NETDEVICES=y -CONFIG_DUMMY=m CONFIG_BONDING=m -CONFIG_MACVLAN=m +CONFIG_DUMMY=m CONFIG_EQUALIZER=m +CONFIG_MACVLAN=m CONFIG_TUN=m CONFIG_VETH=m +CONFIG_SGISEEQ=y +CONFIG_SMC91X=m +CONFIG_MDIO_BITBANG=m CONFIG_PHYLIB=m -CONFIG_MARVELL_PHY=m +CONFIG_CICADA_PHY=m CONFIG_DAVICOM_PHY=m -CONFIG_QSEMI_PHY=m CONFIG_LXT_PHY=m -CONFIG_CICADA_PHY=m +CONFIG_MARVELL_PHY=m +CONFIG_QSEMI_PHY=m CONFIG_REALTEK_PHY=m -CONFIG_MDIO_BITBANG=m -CONFIG_NET_ETHERNET=y -CONFIG_SMC91X=m -CONFIG_SGISEEQ=y CONFIG_HOSTAP=m CONFIG_INPUT_MOUSEDEV=m CONFIG_MOUSE_PS2=m @@ -261,7 +238,6 @@ CONFIG_LOGO=y # CONFIG_LOGO_LINUX_VGA16 is not set # CONFIG_LOGO_LINUX_CLUT224 is not set CONFIG_HIDRAW=y -CONFIG_HID_PID=y CONFIG_RTC_CLASS=y CONFIG_RTC_INTF_DEV_UIE_EMUL=y CONFIG_RTC_DRV_DS1286=y @@ -269,9 +245,6 @@ CONFIG_EXT2_FS=m CONFIG_EXT3_FS=y CONFIG_EXT3_FS_POSIX_ACL=y CONFIG_EXT3_FS_SECURITY=y -CONFIG_EXT4_FS=m -CONFIG_EXT4_FS_POSIX_ACL=y -CONFIG_EXT4_FS_SECURITY=y CONFIG_XFS_FS=m CONFIG_XFS_QUOTA=y CONFIG_QUOTA=y @@ -294,18 +267,13 @@ CONFIG_MINIX_FS=m CONFIG_OMFS_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=m -CONFIG_NFS_V3=y CONFIG_NFS_V3_ACL=y CONFIG_NFSD=m CONFIG_NFSD_V3=y CONFIG_NFSD_V3_ACL=y -CONFIG_RPCSEC_GSS_KRB5=m -CONFIG_SMB_FS=m -CONFIG_SMB_NLS_DEFAULT=y CONFIG_CIFS=m CONFIG_CIFS_UPCALL=y CONFIG_CODA_FS=m -CONFIG_PARTITION_ADVANCED=y CONFIG_NLS_CODEPAGE_437=m CONFIG_NLS_CODEPAGE_737=m CONFIG_NLS_CODEPAGE_775=m @@ -344,13 +312,8 @@ CONFIG_NLS_ISO8859_15=m CONFIG_NLS_KOI8_R=m CONFIG_NLS_KOI8_U=m CONFIG_NLS_UTF8=m -CONFIG_DLM=m -CONFIG_DEBUG_MEMORY_INIT=y CONFIG_KEYS=y -CONFIG_CRYPTO_FIPS=y -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_GCM=m CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_LRW=m @@ -358,13 +321,10 @@ CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_XTS=m CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_XCBC=m -CONFIG_CRYPTO_MD4=m CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD160=m CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m -CONFIG_CRYPTO_SHA256=m -CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_ANUBIS=m @@ -382,4 +342,4 @@ CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m # CONFIG_CRYPTO_HW is not set CONFIG_CRC_T10DIF=m -CONFIG_CRC32=m +CONFIG_DEBUG_MEMORY_INIT=y diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig index 91a9c13e2c82..81c47e18131b 100644 --- a/arch/mips/configs/ip27_defconfig +++ b/arch/mips/configs/ip27_defconfig @@ -1,32 +1,28 @@ -CONFIG_SGI_IP27=y -CONFIG_NUMA=y -CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 -CONFIG_SMP=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y -CONFIG_HZ_1000=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=15 CONFIG_CGROUPS=y CONFIG_CPUSETS=y CONFIG_RELAY=y -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_EXPERT=y -# CONFIG_PCSPKR_PLATFORM is not set CONFIG_SLAB=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -CONFIG_MODULE_SRCVERSION_ALL=y -# CONFIG_BLK_DEV_BSG is not set +CONFIG_SGI_IP27=y +CONFIG_NUMA=y +CONFIG_SMP=y +CONFIG_HZ_1000=y CONFIG_PCI=y -CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y -CONFIG_MIPS32_COMPAT=y CONFIG_MIPS32_O32=y CONFIG_MIPS32_N32=y CONFIG_PM=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -41,7 +37,6 @@ CONFIG_INET_XFRM_MODE_TRANSPORT=m CONFIG_INET_XFRM_MODE_TUNNEL=m CONFIG_INET_XFRM_MODE_BEET=m CONFIG_TCP_MD5SIG=y -CONFIG_IPV6=y CONFIG_IPV6_ROUTER_PREF=y CONFIG_IPV6_ROUTE_INFO=y CONFIG_IPV6_OPTIMISTIC_DAD=y @@ -95,12 +90,10 @@ CONFIG_NET_ACT_PEDIT=m CONFIG_NET_ACT_SKBEDIT=m CONFIG_CFG80211=m CONFIG_MAC80211=m -CONFIG_MAC80211_RC_PID=y CONFIG_RFKILL=m CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_CRYPTOLOOP=m -CONFIG_BLK_DEV_OSD=m CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m CONFIG_SCSI=y @@ -115,7 +108,6 @@ CONFIG_SCSI_LOGGING=y CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_SPI_ATTRS=y CONFIG_SCSI_FC_ATTRS=y -# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set CONFIG_SCSI_CXGB3_ISCSI=m CONFIG_SCSI_BNX2_ISCSI=m CONFIG_BE2ISCSI=m @@ -160,69 +152,56 @@ CONFIG_DM_UEVENT=y CONFIG_IFB=m CONFIG_MACVLAN=m CONFIG_VETH=m -CONFIG_PHYLIB=y -CONFIG_MARVELL_PHY=m -CONFIG_DAVICOM_PHY=m -CONFIG_QSEMI_PHY=m -CONFIG_LXT_PHY=m -CONFIG_CICADA_PHY=m -CONFIG_VITESSE_PHY=m -CONFIG_SMSC_PHY=m -CONFIG_ICPLUS_PHY=m -CONFIG_REALTEK_PHY=m -CONFIG_NATIONAL_PHY=m -CONFIG_STE10XP=m -CONFIG_LSI_ET1011C_PHY=m -CONFIG_MDIO_BITBANG=m -CONFIG_NET_ETHERNET=y -CONFIG_AX88796=m -CONFIG_AX88796_93CX6=y -CONFIG_SGI_IOC3_ETH=y -CONFIG_SMC91X=m -CONFIG_ETHOC=m -CONFIG_SMSC911X=m -CONFIG_DNET=m -CONFIG_B44=m -CONFIG_KS8851_MLL=m CONFIG_ATL2=m -CONFIG_E1000E=m -CONFIG_IP1000=m -CONFIG_IGB=m -CONFIG_IGBVF=m -CONFIG_VIA_VELOCITY=m -CONFIG_QLA3XXX=m CONFIG_ATL1E=m CONFIG_ATL1C=m -CONFIG_JME=m +CONFIG_B44=m +CONFIG_BNX2X=m CONFIG_ENIC=m +CONFIG_DNET=m +CONFIG_BE2NET=m +CONFIG_E1000E=m +CONFIG_IGB=m +CONFIG_IGBVF=m CONFIG_IXGBE=m +CONFIG_JME=m +CONFIG_MLX4_EN=m +# CONFIG_MLX4_DEBUG is not set +CONFIG_KS8851_MLL=m CONFIG_VXGE=m +CONFIG_AX88796=m +CONFIG_AX88796_93CX6=y +CONFIG_ETHOC=m +CONFIG_QLA3XXX=m CONFIG_NETXEN_NIC=m +CONFIG_SFC=m +CONFIG_SGI_IOC3_ETH=y +CONFIG_SMC91X=m +CONFIG_SMSC911X=m CONFIG_NIU=m -CONFIG_MLX4_EN=m -# CONFIG_MLX4_DEBUG is not set CONFIG_TEHUTI=m -CONFIG_BNX2X=m -CONFIG_SFC=m -CONFIG_BE2NET=m -CONFIG_LIBERTAS_THINFIRM=m -CONFIG_ATMEL=m -CONFIG_PCI_ATMEL=m -CONFIG_PRISM54=m -CONFIG_RTL8180=m +CONFIG_VIA_VELOCITY=m +CONFIG_PHYLIB=y +CONFIG_CICADA_PHY=m +CONFIG_DAVICOM_PHY=m +CONFIG_ICPLUS_PHY=m +CONFIG_LSI_ET1011C_PHY=m +CONFIG_LXT_PHY=m +CONFIG_MARVELL_PHY=m +CONFIG_NATIONAL_PHY=m +CONFIG_QSEMI_PHY=m +CONFIG_REALTEK_PHY=m +CONFIG_SMSC_PHY=m +CONFIG_STE10XP=m +CONFIG_VITESSE_PHY=m CONFIG_ADM8211=m -CONFIG_MWL8K=m -CONFIG_ATH_COMMON=m CONFIG_ATH5K=m CONFIG_ATH9K=m +CONFIG_ATMEL=m +CONFIG_PCI_ATMEL=m CONFIG_B43=m CONFIG_B43LEGACY=m # CONFIG_B43LEGACY_DEBUG is not set -CONFIG_HOSTAP=m -CONFIG_HOSTAP_FIRMWARE=y -CONFIG_HOSTAP_FIRMWARE_NVRAM=y -CONFIG_HOSTAP_PLX=m -CONFIG_HOSTAP_PCI=m CONFIG_IPW2100=m CONFIG_IPW2100_MONITOR=y CONFIG_IPW2100_DEBUG=y @@ -231,12 +210,14 @@ CONFIG_IPW2200_MONITOR=y CONFIG_IPW2200_PROMISCUOUS=y CONFIG_IPW2200_QOS=y CONFIG_IPW2200_DEBUG=y -CONFIG_IWLWIFI=m -CONFIG_IWLAGN=m -CONFIG_IWL4965=y -CONFIG_IWL5000=y +CONFIG_IWL4965=m CONFIG_IWL3945=m -CONFIG_LIBERTAS=m +CONFIG_IWLWIFI=m +CONFIG_HOSTAP=m +CONFIG_HOSTAP_FIRMWARE=y +CONFIG_HOSTAP_FIRMWARE_NVRAM=y +CONFIG_HOSTAP_PLX=m +CONFIG_HOSTAP_PCI=m CONFIG_HERMES=m # CONFIG_HERMES_CACHE_FW_ON_INIT is not set CONFIG_PLX_HERMES=m @@ -244,13 +225,18 @@ CONFIG_TMD_HERMES=m CONFIG_NORTEL_HERMES=m CONFIG_P54_COMMON=m CONFIG_P54_PCI=m +CONFIG_PRISM54=m +CONFIG_LIBERTAS=m +CONFIG_LIBERTAS_THINFIRM=m +CONFIG_MWL8K=m CONFIG_RT2X00=m CONFIG_RT2400PCI=m CONFIG_RT2500PCI=m CONFIG_RT61PCI=m CONFIG_RT2800PCI=m -CONFIG_WL12XX=m +CONFIG_RTL8180=m CONFIG_WL1251=m +CONFIG_WL12XX=m # CONFIG_INPUT is not set CONFIG_SERIO_LIBPS2=m CONFIG_SERIO_RAW=m @@ -262,7 +248,6 @@ CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_EXTENDED=y CONFIG_SERIAL_8250_MANY_PORTS=y CONFIG_SERIAL_8250_SHARE_IRQ=y -CONFIG_DEVPTS_MULTIPLE_INSTANCES=y CONFIG_HW_RANDOM_TIMERIOMEM=m CONFIG_I2C_CHARDEV=m CONFIG_I2C_ALI1535=m @@ -285,7 +270,6 @@ CONFIG_I2C_SIMTEC=m CONFIG_I2C_PARPORT_LIGHT=m CONFIG_I2C_TAOS_EVM=m CONFIG_I2C_STUB=m -CONFIG_PPS=m # CONFIG_HWMON is not set CONFIG_THERMAL=m CONFIG_MFD_PCF50633=m @@ -310,12 +294,8 @@ CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y CONFIG_EXT2_FS_SECURITY=y CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set CONFIG_EXT3_FS_POSIX_ACL=y CONFIG_EXT3_FS_SECURITY=y -CONFIG_EXT4_FS=y -CONFIG_EXT4_FS_POSIX_ACL=y -CONFIG_EXT4_FS_SECURITY=y CONFIG_XFS_FS=m CONFIG_XFS_QUOTA=y CONFIG_XFS_POSIX_ACL=y @@ -334,17 +314,8 @@ CONFIG_SQUASHFS=m CONFIG_OMFS_FS=m CONFIG_EXOFS_FS=m CONFIG_NFS_FS=y -CONFIG_NFS_V3=y -CONFIG_RPCSEC_GSS_KRB5=y -CONFIG_PARTITION_ADVANCED=y -CONFIG_DLM=m -CONFIG_KEYS=y CONFIG_SECURITYFS=y -CONFIG_CRYPTO_FIPS=y -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_CCM=m -CONFIG_CRYPTO_GCM=m CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m @@ -357,7 +328,6 @@ CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD160=m CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m -CONFIG_CRYPTO_SHA256=m CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m @@ -374,5 +344,4 @@ CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m -CONFIG_CRYPTO_DEV_HIFN_795X=m CONFIG_CRC_T10DIF=m diff --git a/arch/mips/configs/ip28_defconfig b/arch/mips/configs/ip28_defconfig index d0a4c2cfacf8..0921ef38e9fb 100644 --- a/arch/mips/configs/ip28_defconfig +++ b/arch/mips/configs/ip28_defconfig @@ -1,26 +1,24 @@ -CONFIG_SGI_IP28=y -CONFIG_ARC_CONSOLE=y -CONFIG_PREEMPT_VOLUNTARY=y CONFIG_SYSVIPC=y +CONFIG_PREEMPT_VOLUNTARY=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_RELAY=y -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_EXPERT=y -# CONFIG_HOTPLUG is not set CONFIG_SLAB=y +CONFIG_SGI_IP28=y +CONFIG_ARC_CONSOLE=y +CONFIG_EISA=y +CONFIG_MIPS32_O32=y +CONFIG_MIPS32_N32=y +# CONFIG_SUSPEND is not set +CONFIG_PM=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y # CONFIG_BLK_DEV_BSG is not set -CONFIG_EISA=y -CONFIG_MIPS32_COMPAT=y -CONFIG_MIPS32_O32=y -CONFIG_MIPS32_N32=y -CONFIG_PM=y -# CONFIG_SUSPEND is not set +CONFIG_PARTITION_ADVANCED=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -43,7 +41,6 @@ CONFIG_SCSI_CONSTANTS=y CONFIG_SGIWD93_SCSI=y CONFIG_NETDEVICES=y CONFIG_DUMMY=m -CONFIG_NET_ETHERNET=y CONFIG_SGISEEQ=y # CONFIG_MOUSE_PS2_ALPS is not set # CONFIG_MOUSE_PS2_SYNAPTICS is not set @@ -65,11 +62,8 @@ CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y CONFIG_NFS_FS=y -CONFIG_NFS_V3=y CONFIG_NFS_V3_ACL=y CONFIG_ROOT_NFS=y -CONFIG_PARTITION_ADVANCED=y -CONFIG_MAGIC_SYSRQ=y CONFIG_CRYPTO_MANAGER=y # CONFIG_CRYPTO_HW is not set -# CONFIG_CRC32 is not set +CONFIG_MAGIC_SYSRQ=y diff --git a/arch/mips/configs/ip32_defconfig b/arch/mips/configs/ip32_defconfig index ebff297328ae..8f6d8af2e3c0 100644 --- a/arch/mips/configs/ip32_defconfig +++ b/arch/mips/configs/ip32_defconfig @@ -1,26 +1,25 @@ -CONFIG_SGI_IP32=y -# CONFIG_SECCOMP is not set CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y -CONFIG_BSD_PROCESS_ACCT=y CONFIG_AUDIT=y +CONFIG_BSD_PROCESS_ACCT=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 -CONFIG_SYSFS_DEPRECATED_V2=y CONFIG_RELAY=y CONFIG_EXPERT=y CONFIG_SLAB=y CONFIG_PROFILING=y +CONFIG_SGI_IP32=y +# CONFIG_SECCOMP is not set +CONFIG_PCI=y +CONFIG_MIPS32_O32=y +CONFIG_MIPS32_N32=y CONFIG_OPROFILE=m CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y -# CONFIG_BLK_DEV_BSG is not set -CONFIG_PCI=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_SGI_PARTITION=y CONFIG_BINFMT_MISC=y -CONFIG_MIPS32_COMPAT=y -CONFIG_MIPS32_O32=y -CONFIG_MIPS32_N32=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -33,7 +32,6 @@ CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y CONFIG_IP_PNP_BOOTP=y CONFIG_NET_IPIP=m -CONFIG_NET_IPGRE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -56,24 +54,20 @@ CONFIG_BLK_DEV_SD=y CONFIG_BLK_DEV_SR=y CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m -CONFIG_SCSI_MULTI_LUN=y CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_LOGGING=y CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_SAS_LIBSAS=y -# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set CONFIG_SCSI_AIC7XXX=y CONFIG_AIC7XXX_RESET_DELAY_MS=15000 CONFIG_NETDEVICES=y -CONFIG_DUMMY=m CONFIG_BONDING=m -CONFIG_NET_ETHERNET=y -CONFIG_MII=y -CONFIG_SGI_O2MACE_ETH=y +CONFIG_DUMMY=m CONFIG_NET_TULIP=y CONFIG_DE2104X=m CONFIG_TULIP=m CONFIG_TULIP_MMIO=y +CONFIG_SGI_O2MACE_ETH=y CONFIG_INPUT_EVDEV=m CONFIG_SERIO_MACEPS2=y CONFIG_SERIO_RAW=y @@ -87,9 +81,6 @@ CONFIG_FIRMWARE_EDID=y CONFIG_FB_GBE=y # CONFIG_VGA_CONSOLE is not set CONFIG_FRAMEBUFFER_CONSOLE=y -CONFIG_FONTS=y -CONFIG_FONT_8x8=y -CONFIG_FONT_8x16=y CONFIG_LOGO=y # CONFIG_LOGO_LINUX_MONO is not set # CONFIG_LOGO_LINUX_VGA16 is not set @@ -100,7 +91,6 @@ CONFIG_RTC_CLASS=y # CONFIG_RTC_INTF_SYSFS is not set # CONFIG_RTC_INTF_PROC is not set CONFIG_RTC_DRV_DS1685_FAMILY=y -CONFIG_RTC_DRV_DS1685=y CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y @@ -124,13 +114,10 @@ CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y CONFIG_CONFIGFS_FS=y CONFIG_NFS_FS=y -CONFIG_NFS_V3=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m CONFIG_NFSD_V3=y CONFIG_CIFS=m -CONFIG_PARTITION_ADVANCED=y -CONFIG_SGI_PARTITION=y CONFIG_NLS=y CONFIG_NLS_CODEPAGE_437=m CONFIG_NLS_CODEPAGE_737=m @@ -170,7 +157,6 @@ CONFIG_NLS_ISO8859_15=m CONFIG_NLS_KOI8_R=m CONFIG_NLS_KOI8_U=m CONFIG_NLS_UTF8=m -CONFIG_MAGIC_SYSRQ=y CONFIG_KEYS=y CONFIG_CRYPTO_NULL=y CONFIG_CRYPTO_CBC=y @@ -186,7 +172,6 @@ CONFIG_CRYPTO_SHA256=y CONFIG_CRYPTO_SHA512=y CONFIG_CRYPTO_TGR192=y CONFIG_CRYPTO_WP512=y -CONFIG_CRYPTO_AES=y CONFIG_CRYPTO_ANUBIS=y CONFIG_CRYPTO_ARC4=y CONFIG_CRYPTO_BLOWFISH=y @@ -200,7 +185,9 @@ CONFIG_CRYPTO_SERPENT=y CONFIG_CRYPTO_TEA=y CONFIG_CRYPTO_TWOFISH=y CONFIG_CRYPTO_DEFLATE=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRC16=y CONFIG_CRC_T10DIF=y CONFIG_LIBCRC32C=y +CONFIG_FONTS=y +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +CONFIG_MAGIC_SYSRQ=y diff --git a/arch/mips/configs/jazz_defconfig b/arch/mips/configs/jazz_defconfig index 9ad1c94376c8..328d4dfeb4cb 100644 --- a/arch/mips/configs/jazz_defconfig +++ b/arch/mips/configs/jazz_defconfig @@ -1,22 +1,20 @@ -CONFIG_MACH_JAZZ=y -CONFIG_OLIVETTI_M700=y -CONFIG_PREEMPT_VOLUNTARY=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y +CONFIG_PREEMPT_VOLUNTARY=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_RELAY=y -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_EXPERT=y -# CONFIG_SYSCTL_SYSCALL is not set CONFIG_SLAB=y +CONFIG_MACH_JAZZ=y +CONFIG_OLIVETTI_M700=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODVERSIONS=y +CONFIG_PARTITION_ADVANCED=y CONFIG_BINFMT_MISC=m -CONFIG_PM=y CONFIG_NET=y CONFIG_PACKET=m CONFIG_UNIX=y @@ -25,8 +23,6 @@ CONFIG_NET_KEY_MIGRATE=y CONFIG_INET=y CONFIG_IP_MULTICAST=y CONFIG_NET_IPIP=m -CONFIG_NET_IPGRE=m -CONFIG_NET_IPGRE_BROADCAST=y CONFIG_IP_MROUTE=y CONFIG_IP_PIMSM_V1=y CONFIG_IP_PIMSM_V2=y @@ -41,7 +37,6 @@ CONFIG_INET6_IPCOMP=m CONFIG_IPV6_TUNNEL=m CONFIG_NETWORK_SECMARK=y CONFIG_NETFILTER=y -CONFIG_NETFILTER_NETLINK_QUEUE=m CONFIG_NF_CONNTRACK=m CONFIG_NF_CONNTRACK_SECMARK=y CONFIG_NF_CONNTRACK_EVENTS=y @@ -83,20 +78,12 @@ CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m CONFIG_NETFILTER_XT_MATCH_STRING=m CONFIG_NETFILTER_XT_MATCH_TCPMSS=m -CONFIG_NF_CONNTRACK_IPV4=m CONFIG_IP_NF_IPTABLES=m -CONFIG_IP_NF_MATCH_ADDRTYPE=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m -CONFIG_IP_NF_TARGET_LOG=m -CONFIG_NF_NAT=m -CONFIG_IP_NF_TARGET_MASQUERADE=m -CONFIG_IP_NF_TARGET_NETMAP=m -CONFIG_IP_NF_TARGET_REDIRECT=m -CONFIG_NF_NAT_SNMP_BASIC=m CONFIG_IP_NF_MANGLE=m CONFIG_IP_NF_TARGET_CLUSTERIP=m CONFIG_IP_NF_TARGET_ECN=m @@ -105,7 +92,6 @@ CONFIG_IP_NF_RAW=m CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m -CONFIG_NF_CONNTRACK_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -140,7 +126,6 @@ CONFIG_BRIDGE_EBT_MARK_T=m CONFIG_BRIDGE_EBT_REDIRECT=m CONFIG_BRIDGE_EBT_SNAT=m CONFIG_BRIDGE_EBT_LOG=m -CONFIG_BRIDGE_EBT_ULOG=m CONFIG_BRIDGE=m CONFIG_DECNET=m CONFIG_NET_SCHED=y @@ -230,24 +215,20 @@ CONFIG_DM_MIRROR=m CONFIG_DM_ZERO=m CONFIG_DM_MULTIPATH=m CONFIG_NETDEVICES=y -CONFIG_DUMMY=m CONFIG_BONDING=m +CONFIG_DUMMY=m CONFIG_EQUALIZER=m CONFIG_TUN=m +CONFIG_MIPS_JAZZ_SONIC=y +CONFIG_NE2000=m CONFIG_PHYLIB=m -CONFIG_MARVELL_PHY=m +CONFIG_CICADA_PHY=m CONFIG_DAVICOM_PHY=m -CONFIG_QSEMI_PHY=m CONFIG_LXT_PHY=m -CONFIG_CICADA_PHY=m -CONFIG_VITESSE_PHY=m +CONFIG_MARVELL_PHY=m +CONFIG_QSEMI_PHY=m CONFIG_SMSC_PHY=m -CONFIG_NET_ETHERNET=y -CONFIG_MII=y -CONFIG_MIPS_JAZZ_SONIC=y -CONFIG_NET_ISA=y -CONFIG_NE2000=m -CONFIG_NET_PCI=y +CONFIG_VITESSE_PHY=m CONFIG_PLIP=m CONFIG_INPUT_FF_MEMLESS=m CONFIG_SERIO_PARKBD=m @@ -297,25 +278,11 @@ CONFIG_ROMFS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=m -CONFIG_NFS_V3=y CONFIG_NFSD=m CONFIG_NFSD_V3=y -CONFIG_RPCSEC_GSS_KRB5=m -CONFIG_RPCSEC_GSS_SPKM3=m -CONFIG_SMB_FS=m CONFIG_CIFS=m -CONFIG_NCP_FS=m -CONFIG_NCPFS_PACKET_SIGNING=y -CONFIG_NCPFS_IOCTL_LOCKING=y -CONFIG_NCPFS_STRONG=y -CONFIG_NCPFS_NFS_NS=y -CONFIG_NCPFS_OS2_NS=y -CONFIG_NCPFS_SMALLDOS=y -CONFIG_NCPFS_NLS=y -CONFIG_NCPFS_EXTRAS=y CONFIG_CODA_FS=m CONFIG_AFS_FS=m -CONFIG_PARTITION_ADVANCED=y CONFIG_NLS_CODEPAGE_437=m CONFIG_NLS_CODEPAGE_737=m CONFIG_NLS_CODEPAGE_775=m @@ -354,21 +321,14 @@ CONFIG_NLS_ISO8859_15=m CONFIG_NLS_KOI8_R=m CONFIG_NLS_KOI8_U=m CONFIG_NLS_UTF8=m -CONFIG_DLM=m -CONFIG_CRYPTO_NULL=m -CONFIG_CRYPTO_ECB=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_XCBC=m -CONFIG_CRYPTO_MD4=m CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_SHA256=m -CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_ANUBIS=m -CONFIG_CRYPTO_ARC4=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAMELLIA=m CONFIG_CRYPTO_CAST6=m diff --git a/arch/mips/configs/jmr3927_defconfig b/arch/mips/configs/jmr3927_defconfig index af12281a5c33..24b96faf9b4e 100644 --- a/arch/mips/configs/jmr3927_defconfig +++ b/arch/mips/configs/jmr3927_defconfig @@ -1,13 +1,10 @@ -CONFIG_MACH_TX39XX=y -CONFIG_TOSHIBA_JMR3927=y -# CONFIG_SECCOMP is not set CONFIG_SYSVIPC=y CONFIG_LOG_BUF_SHIFT=14 -CONFIG_SYSFS_DEPRECATED_V2=y CONFIG_EXPERT=y -# CONFIG_HOTPLUG is not set -# CONFIG_PCSPKR_PLATFORM is not set CONFIG_SLAB=y +CONFIG_MACH_TX39XX=y +CONFIG_TOSHIBA_JMR3927=y +# CONFIG_SECCOMP is not set CONFIG_PCI=y CONFIG_NET=y CONFIG_PACKET=y @@ -27,16 +24,14 @@ CONFIG_MTD_JEDECPROBE=y CONFIG_MTD_CFI_AMDSTD=y CONFIG_MTD_PHYSMAP=y CONFIG_NETDEVICES=y -CONFIG_NET_ETHERNET=y -CONFIG_NET_PCI=y CONFIG_TC35815=y # CONFIG_INPUT is not set # CONFIG_SERIO is not set # CONFIG_VT is not set +# CONFIG_UNIX98_PTYS is not set CONFIG_SERIAL_NONSTANDARD=y CONFIG_SERIAL_TXX9_CONSOLE=y CONFIG_SERIAL_TXX9_STDSERIAL=y -# CONFIG_UNIX98_PTYS is not set # CONFIG_HW_RANDOM is not set # CONFIG_HWMON is not set CONFIG_WATCHDOG=y diff --git a/arch/mips/configs/lasat_defconfig b/arch/mips/configs/lasat_defconfig index 947a35c7c46c..c66ca3785655 100644 --- a/arch/mips/configs/lasat_defconfig +++ b/arch/mips/configs/lasat_defconfig @@ -1,25 +1,23 @@ -CONFIG_LASAT=y -CONFIG_PICVUE=y -CONFIG_PICVUE_PROC=y -CONFIG_DS1603=y -CONFIG_LASAT_SYSCTL=y -CONFIG_HZ_1000=y -# CONFIG_SECCOMP is not set CONFIG_SYSVIPC=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_EXPERT=y -# CONFIG_SYSCTL_SYSCALL is not set -# CONFIG_KALLSYMS is not set -# CONFIG_HOTPLUG is not set # CONFIG_EPOLL is not set # CONFIG_SIGNALFD is not set # CONFIG_TIMERFD is not set # CONFIG_EVENTFD is not set +# CONFIG_KALLSYMS is not set CONFIG_SLAB=y +CONFIG_LASAT=y +CONFIG_PICVUE=y +CONFIG_PICVUE_PROC=y +CONFIG_DS1603=y +CONFIG_LASAT_SYSCTL=y +CONFIG_HZ_1000=y +# CONFIG_SECCOMP is not set +CONFIG_PCI=y # CONFIG_BLK_DEV_BSG is not set # CONFIG_IOSCHED_DEADLINE is not set # CONFIG_IOSCHED_CFQ is not set -CONFIG_PCI=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -39,10 +37,7 @@ CONFIG_PATA_CMD64X=y CONFIG_ATA_GENERIC=y CONFIG_PATA_LEGACY=y CONFIG_NETDEVICES=y -CONFIG_NET_ETHERNET=y -CONFIG_NET_PCI=y CONFIG_PCNET32=y -# CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set CONFIG_SERIO_RAW=y @@ -55,7 +50,6 @@ CONFIG_SERIAL_8250_CONSOLE=y # CONFIG_USB_SUPPORT is not set CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y -# CONFIG_EXT3_FS_XATTR is not set # CONFIG_DNOTIFY is not set CONFIG_PROC_KCORE=y CONFIG_TMPFS=y diff --git a/arch/mips/configs/lemote2f_defconfig b/arch/mips/configs/lemote2f_defconfig index 02be95c1b712..300127b0f5b7 100644 --- a/arch/mips/configs/lemote2f_defconfig +++ b/arch/mips/configs/lemote2f_defconfig @@ -1,48 +1,33 @@ -CONFIG_MACH_LOONGSON64=y -CONFIG_LEMOTE_MACH2F=y -CONFIG_CS5536_MFGPT=y -CONFIG_64BIT=y +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_SYSVIPC=y +CONFIG_AUDIT=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y CONFIG_PREEMPT=y -CONFIG_KEXEC=y -# CONFIG_SECCOMP is not set -# CONFIG_LOCALVERSION_AUTO is not set -CONFIG_SYSVIPC=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT_V3=y -CONFIG_AUDIT=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=15 -CONFIG_SYSFS_DEPRECATED_V2=y CONFIG_BLK_DEV_INITRD=y -CONFIG_RD_BZIP2=y -CONFIG_RD_LZMA=y -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_EXPERT=y CONFIG_PROFILING=y +CONFIG_MACH_LOONGSON64=y +CONFIG_LEMOTE_MACH2F=y +CONFIG_KEXEC=y +# CONFIG_SECCOMP is not set +CONFIG_PCI=y +CONFIG_MIPS32_O32=y +CONFIG_MIPS32_N32=y +CONFIG_HIBERNATION=y +CONFIG_PM_STD_PARTITION="/dev/hda3" CONFIG_OPROFILE=m CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_BLK_DEV_INTEGRITY=y CONFIG_IOSCHED_DEADLINE=m -CONFIG_PCI=y CONFIG_BINFMT_MISC=m -CONFIG_MIPS32_COMPAT=y -CONFIG_MIPS32_O32=y -CONFIG_MIPS32_N32=y -CONFIG_PM=y -CONFIG_HIBERNATION=y -CONFIG_PM_STD_PARTITION="/dev/hda3" -CONFIG_CPU_FREQ=y -CONFIG_CPU_FREQ_STAT=y -CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y -CONFIG_CPU_FREQ_GOV_POWERSAVE=m -CONFIG_CPU_FREQ_GOV_USERSPACE=m -CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m -CONFIG_LOONGSON2_CPUFREQ=m CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -55,11 +40,9 @@ CONFIG_IP_MULTIPLE_TABLES=y CONFIG_IP_ROUTE_MULTIPATH=y CONFIG_IP_ROUTE_VERBOSE=y CONFIG_NET_IPIP=m -CONFIG_NET_IPGRE=m CONFIG_IP_MROUTE=y CONFIG_IP_PIMSM_V1=y CONFIG_IP_PIMSM_V2=y -CONFIG_ARPD=y CONFIG_SYN_COOKIES=y CONFIG_INET_XFRM_MODE_TRANSPORT=m CONFIG_INET_XFRM_MODE_TUNNEL=m @@ -76,7 +59,6 @@ CONFIG_NETWORK_SECMARK=y CONFIG_NETFILTER=y CONFIG_BRIDGE=m CONFIG_VLAN_8021Q=m -CONFIG_IPX=m CONFIG_NET_SCHED=y CONFIG_NET_EMATCH=y CONFIG_NET_CLS_ACT=y @@ -91,8 +73,6 @@ CONFIG_BT_HCIBTUSB=m CONFIG_BT_HCIBFUSB=m CONFIG_BT_HCIVHCI=m CONFIG_CFG80211=m -CONFIG_LIB80211=m -CONFIG_LIB80211_DEBUG=y CONFIG_MAC80211=m CONFIG_MAC80211_LEDS=y CONFIG_RFKILL=m @@ -130,18 +110,14 @@ CONFIG_DM_DELAY=m CONFIG_DM_UEVENT=y CONFIG_NETDEVICES=y CONFIG_DUMMY=m +CONFIG_NETCONSOLE=m CONFIG_TUN=m CONFIG_VETH=m -CONFIG_NET_ETHERNET=y -CONFIG_NET_PCI=y CONFIG_8139TOO=y # CONFIG_8139TOO_PIO is not set CONFIG_R8169=y -CONFIG_R8169_VLAN=y CONFIG_USB_USBNET=m CONFIG_USB_NET_CDC_EEM=m -CONFIG_NETCONSOLE=m -CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_INPUT_POLLDEV=m CONFIG_INPUT_EVDEV=y # CONFIG_MOUSE_PS2_ALPS is not set @@ -149,6 +125,7 @@ CONFIG_INPUT_EVDEV=y # CONFIG_MOUSE_PS2_TRACKPOINT is not set CONFIG_MOUSE_APPLETOUCH=m # CONFIG_SERIO_SERPORT is not set +CONFIG_LEGACY_PTY_COUNT=16 CONFIG_SERIAL_NONSTANDARD=y CONFIG_SERIAL_8250=m # CONFIG_SERIAL_8250_PCI is not set @@ -156,50 +133,10 @@ CONFIG_SERIAL_8250_NR_UARTS=16 CONFIG_SERIAL_8250_EXTENDED=y CONFIG_SERIAL_8250_MANY_PORTS=y CONFIG_SERIAL_8250_FOURPORT=y -CONFIG_LEGACY_PTY_COUNT=16 CONFIG_HW_RANDOM=y -CONFIG_RTC=y CONFIG_GPIO_LOONGSON=y CONFIG_THERMAL=y CONFIG_MEDIA_SUPPORT=m -CONFIG_VIDEO_DEV=m -CONFIG_VIDEO_HELPER_CHIPS_AUTO=y -CONFIG_VIDEO_VIVI=m -CONFIG_USB_VIDEO_CLASS=m -CONFIG_USB_M5602=m -CONFIG_USB_STV06XX=m -CONFIG_USB_GSPCA_CONEX=m -CONFIG_USB_GSPCA_ETOMS=m -CONFIG_USB_GSPCA_FINEPIX=m -CONFIG_USB_GSPCA_MARS=m -CONFIG_USB_GSPCA_MR97310A=m -CONFIG_USB_GSPCA_OV519=m -CONFIG_USB_GSPCA_OV534=m -CONFIG_USB_GSPCA_PAC207=m -CONFIG_USB_GSPCA_PAC7311=m -CONFIG_USB_GSPCA_SN9C20X=m -CONFIG_USB_GSPCA_SONIXB=m -CONFIG_USB_GSPCA_SONIXJ=m -CONFIG_USB_GSPCA_SPCA500=m -CONFIG_USB_GSPCA_SPCA501=m -CONFIG_USB_GSPCA_SPCA505=m -CONFIG_USB_GSPCA_SPCA506=m -CONFIG_USB_GSPCA_SPCA508=m -CONFIG_USB_GSPCA_SPCA561=m -CONFIG_USB_GSPCA_SQ905=m -CONFIG_USB_GSPCA_SQ905C=m -CONFIG_USB_GSPCA_STK014=m -CONFIG_USB_GSPCA_SUNPLUS=m -CONFIG_USB_GSPCA_T613=m -CONFIG_USB_GSPCA_TV8532=m -CONFIG_USB_GSPCA_VC032X=m -CONFIG_USB_GSPCA_ZC3XX=m -CONFIG_USB_ET61X251=m -CONFIG_USB_SN9C102=m -CONFIG_USB_ZR364XX=m -CONFIG_USB_STKWEBCAM=m -CONFIG_USB_S2255=m -# CONFIG_RADIO_ADAPTERS is not set CONFIG_FB=y CONFIG_FIRMWARE_EDID=y CONFIG_FB_MODE_HELPERS=y @@ -214,27 +151,14 @@ CONFIG_BACKLIGHT_GENERIC=m # CONFIG_VGA_CONSOLE is not set CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y -CONFIG_FONTS=y -CONFIG_FONT_8x8=y -CONFIG_FONT_6x11=y -CONFIG_FONT_7x14=y -CONFIG_FONT_PEARL_8x8=y -CONFIG_FONT_ACORN_8x8=y -CONFIG_FONT_MINI_4x6=y -CONFIG_FONT_SUN8x16=y -CONFIG_FONT_SUN12x22=y -CONFIG_FONT_10x18=y CONFIG_LOGO=y # CONFIG_LOGO_LINUX_MONO is not set # CONFIG_LOGO_LINUX_VGA16 is not set CONFIG_SOUND=m CONFIG_SND=m +CONFIG_SND_HRTIMER=m CONFIG_SND_SEQUENCER=m CONFIG_SND_SEQ_DUMMY=m -CONFIG_SND_MIXER_OSS=m -CONFIG_SND_PCM_OSS=m -CONFIG_SND_SEQUENCER_OSS=y -CONFIG_SND_HRTIMER=m CONFIG_SND_DUMMY=m CONFIG_SND_VIRMIDI=m CONFIG_SND_SERIAL_U16550=m @@ -247,7 +171,6 @@ CONFIG_SND_USB_AUDIO=m CONFIG_SND_USB_CAIAQ=m CONFIG_SND_USB_CAIAQ_INPUT=y CONFIG_HIDRAW=y -CONFIG_USB_HIDDEV=y CONFIG_HID_A4TECH=m CONFIG_HID_APPLE=m CONFIG_HID_BELKIN=m @@ -283,6 +206,7 @@ CONFIG_THRUSTMASTER_FF=y CONFIG_HID_WACOM=m CONFIG_HID_ZEROPLUS=m CONFIG_ZEROPLUS_FF=y +CONFIG_USB_HIDDEV=y CONFIG_USB=y CONFIG_USB_DYNAMIC_MINORS=y CONFIG_USB_OTG_WHITELIST=y @@ -292,8 +216,6 @@ CONFIG_USB_EHCI_ROOT_HUB_TT=y # CONFIG_USB_EHCI_TT_NEWSCHED is not set CONFIG_USB_OHCI_HCD=y CONFIG_USB_UHCI_HCD=m -CONFIG_USB_WHCI_HCD=m -CONFIG_USB_HWA_HCD=m CONFIG_USB_ACM=m CONFIG_USB_PRINTER=m CONFIG_USB_WDM=m @@ -309,18 +231,13 @@ CONFIG_USB_STORAGE_ALAUDA=m CONFIG_USB_SERIAL=m CONFIG_USB_SERIAL_GENERIC=y CONFIG_USB_GADGET=m -CONFIG_USB_GADGET_M66592=y CONFIG_MMC=m CONFIG_LEDS_CLASS=y CONFIG_STAGING=y -# CONFIG_STAGING_EXCLUDE_BUILD is not set -CONFIG_FB_SM7XX=y CONFIG_EXT2_FS=m CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set CONFIG_EXT3_FS_POSIX_ACL=y CONFIG_EXT3_FS_SECURITY=y -CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_REISERFS_PROC_INFO=y CONFIG_REISERFS_FS_XATTR=y @@ -349,7 +266,6 @@ CONFIG_SQUASHFS=m CONFIG_SQUASHFS_EMBEDDED=y CONFIG_ROMFS_FS=m CONFIG_NFS_FS=m -CONFIG_NFS_V3=y CONFIG_NFS_V3_ACL=y CONFIG_NFSD=m CONFIG_NFSD_V4=y @@ -393,32 +309,19 @@ CONFIG_NLS_ISO8859_15=m CONFIG_NLS_KOI8_R=m CONFIG_NLS_KOI8_U=m CONFIG_NLS_UTF8=y -CONFIG_PRINTK_TIME=y -CONFIG_FRAME_WARN=1024 -CONFIG_STRIP_ASM_SYMS=y -CONFIG_DEBUG_FS=y -CONFIG_KEYS=y -CONFIG_CRYPTO_FIPS=y -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_AUTHENC=m CONFIG_CRYPTO_TEST=m -CONFIG_CRYPTO_CCM=m -CONFIG_CRYPTO_GCM=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_XTS=m -CONFIG_CRYPTO_HMAC=m CONFIG_CRYPTO_XCBC=m -CONFIG_CRYPTO_MD4=m CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD160=m CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_SHA1=m -CONFIG_CRYPTO_SHA256=m -CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_ANUBIS=m @@ -435,4 +338,16 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_DEFLATE=m CONFIG_CRYPTO_LZO=m -CONFIG_CRC_T10DIF=y +CONFIG_FONTS=y +CONFIG_FONT_8x8=y +CONFIG_FONT_6x11=y +CONFIG_FONT_7x14=y +CONFIG_FONT_PEARL_8x8=y +CONFIG_FONT_ACORN_8x8=y +CONFIG_FONT_MINI_4x6=y +CONFIG_FONT_10x18=y +CONFIG_FONT_SUN8x16=y +CONFIG_FONT_SUN12x22=y +CONFIG_PRINTK_TIME=y +CONFIG_FRAME_WARN=1024 +CONFIG_STRIP_ASM_SYMS=y diff --git a/arch/mips/configs/loongson1b_defconfig b/arch/mips/configs/loongson1b_defconfig index 914c867887bd..b064d68a5424 100644 --- a/arch/mips/configs/loongson1b_defconfig +++ b/arch/mips/configs/loongson1b_defconfig @@ -1,10 +1,8 @@ -CONFIG_MACH_LOONGSON32=y -CONFIG_PREEMPT=y -# CONFIG_SECCOMP is not set # CONFIG_LOCALVERSION_AUTO is not set CONFIG_KERNEL_XZ=y CONFIG_SYSVIPC=y CONFIG_HIGH_RES_TIMERS=y +CONFIG_PREEMPT=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_IKCONFIG=y @@ -15,13 +13,15 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_EXPERT=y CONFIG_PERF_EVENTS=y # CONFIG_COMPAT_BRK is not set +CONFIG_MACH_LOONGSON32=y +# CONFIG_SECCOMP is not set +# CONFIG_SUSPEND is not set CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODVERSIONS=y # CONFIG_LBDAF is not set # CONFIG_BLK_DEV_BSG is not set # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set -# CONFIG_SUSPEND is not set CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -43,7 +43,6 @@ CONFIG_MTD=y CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_BLOCK=y CONFIG_MTD_NAND=y -CONFIG_MTD_NAND_LOONGSON1=y CONFIG_MTD_UBI=y CONFIG_BLK_DEV_LOOP=y CONFIG_SCSI=m @@ -67,7 +66,6 @@ CONFIG_INPUT_EVDEV=y # CONFIG_SERIO is not set CONFIG_VT_HW_CONSOLE_BINDING=y CONFIG_LEGACY_PTY_COUNT=8 -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y # CONFIG_HW_RANDOM is not set @@ -116,8 +114,9 @@ CONFIG_NFS_FS=y CONFIG_ROOT_NFS=y CONFIG_NLS_CODEPAGE_437=m CONFIG_NLS_ISO8859_1=m +# CONFIG_CRYPTO_ECHAINIV is not set +# CONFIG_CRYPTO_HW is not set CONFIG_DYNAMIC_DEBUG=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_DEBUG_FS=y CONFIG_MAGIC_SYSRQ=y @@ -125,5 +124,3 @@ CONFIG_MAGIC_SYSRQ=y # CONFIG_DEBUG_PREEMPT is not set # CONFIG_FTRACE is not set # CONFIG_EARLY_PRINTK is not set -# CONFIG_CRYPTO_ECHAINIV is not set -# CONFIG_CRYPTO_HW is not set diff --git a/arch/mips/configs/loongson1c_defconfig b/arch/mips/configs/loongson1c_defconfig index 68e42eff908e..5d76559b56cd 100644 --- a/arch/mips/configs/loongson1c_defconfig +++ b/arch/mips/configs/loongson1c_defconfig @@ -1,11 +1,8 @@ -CONFIG_MACH_LOONGSON32=y -CONFIG_LOONGSON1_LS1C=y -CONFIG_PREEMPT=y -# CONFIG_SECCOMP is not set # CONFIG_LOCALVERSION_AUTO is not set CONFIG_KERNEL_XZ=y CONFIG_SYSVIPC=y CONFIG_HIGH_RES_TIMERS=y +CONFIG_PREEMPT=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_IKCONFIG=y @@ -16,13 +13,16 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_EXPERT=y CONFIG_PERF_EVENTS=y # CONFIG_COMPAT_BRK is not set +CONFIG_MACH_LOONGSON32=y +CONFIG_LOONGSON1_LS1C=y +# CONFIG_SECCOMP is not set +# CONFIG_SUSPEND is not set CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODVERSIONS=y # CONFIG_LBDAF is not set # CONFIG_BLK_DEV_BSG is not set # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set -# CONFIG_SUSPEND is not set CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -44,7 +44,6 @@ CONFIG_MTD=y CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_BLOCK=y CONFIG_MTD_NAND=y -CONFIG_MTD_NAND_LOONGSON1=y CONFIG_MTD_UBI=y CONFIG_BLK_DEV_LOOP=y CONFIG_SCSI=m @@ -68,7 +67,6 @@ CONFIG_INPUT_EVDEV=y # CONFIG_SERIO is not set CONFIG_VT_HW_CONSOLE_BINDING=y CONFIG_LEGACY_PTY_COUNT=8 -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y # CONFIG_HW_RANDOM is not set @@ -117,8 +115,9 @@ CONFIG_NFS_FS=y CONFIG_ROOT_NFS=y CONFIG_NLS_CODEPAGE_437=m CONFIG_NLS_ISO8859_1=m +# CONFIG_CRYPTO_ECHAINIV is not set +# CONFIG_CRYPTO_HW is not set CONFIG_DYNAMIC_DEBUG=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_DEBUG_FS=y CONFIG_MAGIC_SYSRQ=y @@ -126,5 +125,3 @@ CONFIG_MAGIC_SYSRQ=y # CONFIG_DEBUG_PREEMPT is not set # CONFIG_FTRACE is not set # CONFIG_EARLY_PRINTK is not set -# CONFIG_CRYPTO_ECHAINIV is not set -# CONFIG_CRYPTO_HW is not set diff --git a/arch/mips/configs/loongson3_defconfig b/arch/mips/configs/loongson3_defconfig index 324dfee23dfb..1322adb705c8 100644 --- a/arch/mips/configs/loongson3_defconfig +++ b/arch/mips/configs/loongson3_defconfig @@ -1,15 +1,3 @@ -CONFIG_MACH_LOONGSON64=y -CONFIG_SWIOTLB=y -CONFIG_LOONGSON_MACH3X=y -CONFIG_CPU_LOONGSON3=y -CONFIG_64BIT=y -CONFIG_PAGE_SIZE_16KB=y -CONFIG_KSM=y -CONFIG_SMP=y -CONFIG_NR_CPUS=4 -CONFIG_HZ_256=y -CONFIG_PREEMPT=y -CONFIG_KEXEC=y # CONFIG_LOCALVERSION_AUTO is not set CONFIG_KERNEL_LZMA=y CONFIG_SYSVIPC=y @@ -17,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y CONFIG_AUDIT=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y +CONFIG_PREEMPT=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_TASKSTATS=y @@ -24,40 +13,38 @@ CONFIG_TASK_DELAY_ACCT=y CONFIG_TASK_XACCT=y CONFIG_TASK_IO_ACCOUNTING=y CONFIG_LOG_BUF_SHIFT=14 -CONFIG_CPUSETS=y CONFIG_MEMCG=y CONFIG_MEMCG_SWAP=y CONFIG_BLK_CGROUP=y +CONFIG_CPUSETS=y CONFIG_SCHED_AUTOGROUP=y CONFIG_SYSFS_DEPRECATED=y CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y -CONFIG_RD_BZIP2=y -CONFIG_RD_LZMA=y CONFIG_SYSCTL_SYSCALL=y CONFIG_EMBEDDED=y +CONFIG_MACH_LOONGSON64=y +CONFIG_LOONGSON_MACH3X=y +CONFIG_SMP=y +CONFIG_HZ_256=y +CONFIG_KEXEC=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +# CONFIG_PCIEAER is not set +CONFIG_PCIEASPM_PERFORMANCE=y +CONFIG_HOTPLUG_PCI=y +CONFIG_MIPS32_O32=y +CONFIG_MIPS32_N32=y CONFIG_MODULES=y CONFIG_MODULE_FORCE_LOAD=y CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_MODVERSIONS=y -CONFIG_BLK_DEV_INTEGRITY=y CONFIG_PARTITION_ADVANCED=y CONFIG_IOSCHED_DEADLINE=m CONFIG_CFQ_GROUP_IOSCHED=y -CONFIG_PCI=y -CONFIG_HT_PCI=y -CONFIG_PCIEPORTBUS=y -CONFIG_HOTPLUG_PCI_PCIE=y -# CONFIG_PCIEAER is not set -CONFIG_PCIEASPM_PERFORMANCE=y -CONFIG_HOTPLUG_PCI=y -CONFIG_HOTPLUG_PCI_SHPC=m CONFIG_BINFMT_MISC=m -CONFIG_MIPS32_COMPAT=y -CONFIG_MIPS32_O32=y -CONFIG_MIPS32_N32=y -CONFIG_PM=y +CONFIG_KSM=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -123,7 +110,6 @@ CONFIG_BLK_DEV_SD=y CONFIG_BLK_DEV_SR=y CONFIG_CHR_DEV_SG=y CONFIG_CHR_DEV_SCH=m -CONFIG_SCSI_MULTI_LUN=y CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_LOGGING=y CONFIG_SCSI_SPI_ATTRS=m @@ -164,7 +150,6 @@ CONFIG_TUN=m # CONFIG_NET_VENDOR_AMD is not set # CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_VENDOR_ATHEROS is not set -# CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set # CONFIG_NET_VENDOR_BROCADE is not set # CONFIG_NET_VENDOR_CHELSIO is not set @@ -173,14 +158,13 @@ CONFIG_TUN=m # CONFIG_NET_VENDOR_DEC is not set # CONFIG_NET_VENDOR_DLINK is not set # CONFIG_NET_VENDOR_EMULEX is not set -# CONFIG_NET_VENDOR_EXAR is not set # CONFIG_NET_VENDOR_HP is not set +# CONFIG_NET_VENDOR_I825XX is not set CONFIG_E1000=y CONFIG_E1000E=y CONFIG_IGB=y CONFIG_IXGB=y CONFIG_IXGBE=y -# CONFIG_NET_VENDOR_I825XX is not set # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MELLANOX is not set # CONFIG_NET_VENDOR_MICREL is not set @@ -188,12 +172,11 @@ CONFIG_IXGBE=y # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_NVIDIA is not set # CONFIG_NET_VENDOR_OKI is not set -# CONFIG_NET_PACKET_ENGINE is not set # CONFIG_NET_VENDOR_QLOGIC is not set +# CONFIG_NET_VENDOR_RDC is not set CONFIG_8139CP=m CONFIG_8139TOO=m CONFIG_R8169=y -# CONFIG_NET_VENDOR_RDC is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_SILAN is not set # CONFIG_NET_VENDOR_SIS is not set @@ -215,7 +198,6 @@ CONFIG_PPPOE=m CONFIG_PPPOL2TP=m CONFIG_PPP_ASYNC=m CONFIG_PPP_SYNC_TTY=m -CONFIG_ATH_CARDS=m CONFIG_ATH9K=m CONFIG_HOSTAP=m CONFIG_INPUT_POLLDEV=m @@ -296,9 +278,6 @@ CONFIG_EXT2_FS_SECURITY=y CONFIG_EXT3_FS=y CONFIG_EXT3_FS_POSIX_ACL=y CONFIG_EXT3_FS_SECURITY=y -CONFIG_EXT4_FS=y -CONFIG_EXT4_FS_POSIX_ACL=y -CONFIG_EXT4_FS_SECURITY=y CONFIG_QUOTA=y # CONFIG_PRINT_QUOTA_WARNING is not set CONFIG_AUTOFS4_FS=y @@ -327,13 +306,6 @@ CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_CODEPAGE_936=y CONFIG_NLS_ASCII=y CONFIG_NLS_UTF8=y -CONFIG_PRINTK_TIME=y -CONFIG_FRAME_WARN=1024 -CONFIG_STRIP_ASM_SYMS=y -CONFIG_MAGIC_SYSRQ=y -# CONFIG_SCHED_DEBUG is not set -# CONFIG_DEBUG_PREEMPT is not set -# CONFIG_FTRACE is not set CONFIG_SECURITY=y CONFIG_SECURITYFS=y CONFIG_SECURITY_NETWORK=y @@ -345,7 +317,6 @@ CONFIG_DEFAULT_SECURITY_DAC=y CONFIG_CRYPTO_AUTHENC=m CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_MD5=y -CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_ANUBIS=m @@ -357,3 +328,10 @@ CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_DEFLATE=m +CONFIG_PRINTK_TIME=y +CONFIG_FRAME_WARN=1024 +CONFIG_STRIP_ASM_SYMS=y +CONFIG_MAGIC_SYSRQ=y +# CONFIG_SCHED_DEBUG is not set +# CONFIG_DEBUG_PREEMPT is not set +# CONFIG_FTRACE is not set diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig index 81058295d35f..0ee5e677662e 100644 --- a/arch/mips/configs/malta_defconfig +++ b/arch/mips/configs/malta_defconfig @@ -1,9 +1,3 @@ -CONFIG_MIPS_MALTA=y -CONFIG_CPU_LITTLE_ENDIAN=y -CONFIG_CPU_MIPS32_R2=y -CONFIG_PAGE_SIZE_16KB=y -CONFIG_NR_CPUS=8 -CONFIG_HZ_100=y CONFIG_SYSVIPC=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y @@ -13,11 +7,17 @@ CONFIG_RELAY=y CONFIG_EXPERT=y # CONFIG_COMPAT_BRK is not set CONFIG_SLAB=y +CONFIG_MIPS_MALTA=y +CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_CPU_MIPS32_R2=y +CONFIG_PAGE_SIZE_16KB=y +CONFIG_NR_CPUS=8 +CONFIG_HZ_100=y +CONFIG_PCI=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y -CONFIG_PCI=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -58,8 +58,6 @@ CONFIG_NETFILTER=y CONFIG_NF_CONNTRACK=m CONFIG_NF_CONNTRACK_SECMARK=y CONFIG_NF_CONNTRACK_EVENTS=y -CONFIG_NF_CT_PROTO_DCCP=y -CONFIG_NF_CT_PROTO_UDPLITE=y CONFIG_NF_CONNTRACK_AMANDA=m CONFIG_NF_CONNTRACK_FTP=m CONFIG_NF_CONNTRACK_H323=m @@ -124,7 +122,6 @@ CONFIG_IP_VS_DH=m CONFIG_IP_VS_SH=m CONFIG_IP_VS_SED=m CONFIG_IP_VS_NQ=m -CONFIG_NF_CONNTRACK_IPV4=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -139,7 +136,6 @@ CONFIG_IP_NF_RAW=m CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m -CONFIG_NF_CONNTRACK_IPV6=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m CONFIG_IP6_NF_MATCH_FRAG=m @@ -291,26 +287,26 @@ CONFIG_CHELSIO_T3=m CONFIG_AX88796=m CONFIG_NETXEN_NIC=m CONFIG_TC35815=m -CONFIG_MARVELL_PHY=m -CONFIG_DAVICOM_PHY=m -CONFIG_QSEMI_PHY=m -CONFIG_LXT_PHY=m -CONFIG_CICADA_PHY=m -CONFIG_VITESSE_PHY=m -CONFIG_SMSC_PHY=m CONFIG_BROADCOM_PHY=m +CONFIG_CICADA_PHY=m +CONFIG_DAVICOM_PHY=m CONFIG_ICPLUS_PHY=m +CONFIG_LXT_PHY=m +CONFIG_MARVELL_PHY=m +CONFIG_QSEMI_PHY=m CONFIG_REALTEK_PHY=m +CONFIG_SMSC_PHY=m +CONFIG_VITESSE_PHY=m CONFIG_ATMEL=m CONFIG_PCI_ATMEL=m -CONFIG_PRISM54=m +CONFIG_IPW2100=m +CONFIG_IPW2100_MONITOR=y CONFIG_HOSTAP=m CONFIG_HOSTAP_FIRMWARE=y CONFIG_HOSTAP_FIRMWARE_NVRAM=y CONFIG_HOSTAP_PLX=m CONFIG_HOSTAP_PCI=m -CONFIG_IPW2100=m -CONFIG_IPW2100_MONITOR=y +CONFIG_PRISM54=m CONFIG_LIBERTAS=m CONFIG_INPUT_MOUSEDEV=y CONFIG_MOUSE_PS2_ELANTECH=y @@ -331,7 +327,6 @@ CONFIG_UIO=m CONFIG_UIO_CIF=m CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y -CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_REISERFS_PROC_INFO=y CONFIG_REISERFS_FS_XATTR=y @@ -411,14 +406,12 @@ CONFIG_NLS_ISO8859_14=m CONFIG_NLS_ISO8859_15=m CONFIG_NLS_KOI8_R=m CONFIG_NLS_KOI8_U=m -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_MD4=m -CONFIG_CRYPTO_SHA256=m CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m @@ -432,4 +425,3 @@ CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/mips/configs/malta_kvm_defconfig b/arch/mips/configs/malta_kvm_defconfig index 5c10cddc39d3..041bffac043b 100644 --- a/arch/mips/configs/malta_kvm_defconfig +++ b/arch/mips/configs/malta_kvm_defconfig @@ -1,9 +1,3 @@ -CONFIG_MIPS_MALTA=y -CONFIG_CPU_LITTLE_ENDIAN=y -CONFIG_CPU_MIPS32_R2=y -CONFIG_PAGE_SIZE_16KB=y -CONFIG_NR_CPUS=8 -CONFIG_HZ_100=y CONFIG_SYSVIPC=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y @@ -14,11 +8,21 @@ CONFIG_EXPERT=y CONFIG_PERF_EVENTS=y # CONFIG_COMPAT_BRK is not set CONFIG_SLAB=y +CONFIG_MIPS_MALTA=y +CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_CPU_MIPS32_R2=y +CONFIG_PAGE_SIZE_16KB=y +CONFIG_NR_CPUS=8 +CONFIG_HZ_100=y +CONFIG_PCI=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=m +CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS=y +CONFIG_VHOST_NET=m CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y -CONFIG_PCI=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -59,8 +63,6 @@ CONFIG_NETFILTER=y CONFIG_NF_CONNTRACK=m CONFIG_NF_CONNTRACK_SECMARK=y CONFIG_NF_CONNTRACK_EVENTS=y -CONFIG_NF_CT_PROTO_DCCP=y -CONFIG_NF_CT_PROTO_UDPLITE=y CONFIG_NF_CONNTRACK_AMANDA=m CONFIG_NF_CONNTRACK_FTP=m CONFIG_NF_CONNTRACK_H323=m @@ -125,7 +127,6 @@ CONFIG_IP_VS_DH=m CONFIG_IP_VS_SH=m CONFIG_IP_VS_SED=m CONFIG_IP_VS_NQ=m -CONFIG_NF_CONNTRACK_IPV4=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -140,7 +141,6 @@ CONFIG_IP_NF_RAW=m CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m -CONFIG_NF_CONNTRACK_IPV6=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m CONFIG_IP6_NF_MATCH_FRAG=m @@ -174,7 +174,6 @@ CONFIG_BRIDGE_EBT_MARK_T=m CONFIG_BRIDGE_EBT_REDIRECT=m CONFIG_BRIDGE_EBT_SNAT=m CONFIG_BRIDGE_EBT_LOG=m -CONFIG_BRIDGE_EBT_ULOG=m CONFIG_BRIDGE_EBT_NFLOG=m CONFIG_IP_SCTP=m CONFIG_BRIDGE=m @@ -219,8 +218,6 @@ CONFIG_NET_ACT_SKBEDIT=m CONFIG_NET_CLS_IND=y CONFIG_CFG80211=m CONFIG_MAC80211=m -CONFIG_MAC80211_RC_PID=y -CONFIG_MAC80211_RC_DEFAULT_PID=y CONFIG_MAC80211_MESH=y CONFIG_RFKILL=m CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" @@ -254,7 +251,6 @@ CONFIG_CHR_DEV_OSST=m CONFIG_BLK_DEV_SR=y CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m -CONFIG_SCSI_MULTI_LUN=y CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_LOGGING=y CONFIG_SCSI_SCAN_ASYNC=y @@ -297,32 +293,31 @@ CONFIG_IFB=m CONFIG_MACVLAN=m CONFIG_TUN=m CONFIG_VETH=m -CONFIG_VHOST_NET=m CONFIG_PCNET32=y CONFIG_CHELSIO_T3=m CONFIG_AX88796=m CONFIG_NETXEN_NIC=m CONFIG_TC35815=m -CONFIG_MARVELL_PHY=m -CONFIG_DAVICOM_PHY=m -CONFIG_QSEMI_PHY=m -CONFIG_LXT_PHY=m -CONFIG_CICADA_PHY=m -CONFIG_VITESSE_PHY=m -CONFIG_SMSC_PHY=m CONFIG_BROADCOM_PHY=m +CONFIG_CICADA_PHY=m +CONFIG_DAVICOM_PHY=m CONFIG_ICPLUS_PHY=m +CONFIG_LXT_PHY=m +CONFIG_MARVELL_PHY=m +CONFIG_QSEMI_PHY=m CONFIG_REALTEK_PHY=m +CONFIG_SMSC_PHY=m +CONFIG_VITESSE_PHY=m CONFIG_ATMEL=m CONFIG_PCI_ATMEL=m -CONFIG_PRISM54=m +CONFIG_IPW2100=m +CONFIG_IPW2100_MONITOR=y CONFIG_HOSTAP=m CONFIG_HOSTAP_FIRMWARE=y CONFIG_HOSTAP_FIRMWARE_NVRAM=y CONFIG_HOSTAP_PLX=m CONFIG_HOSTAP_PCI=m -CONFIG_IPW2100=m -CONFIG_IPW2100_MONITOR=y +CONFIG_PRISM54=m CONFIG_LIBERTAS=m CONFIG_INPUT_MOUSEDEV=y CONFIG_SERIAL_8250=y @@ -422,16 +417,12 @@ CONFIG_NLS_ISO8859_14=m CONFIG_NLS_ISO8859_15=m CONFIG_NLS_KOI8_R=m CONFIG_NLS_KOI8_U=m -CONFIG_RCU_CPU_STALL_TIMEOUT=60 -CONFIG_ENABLE_DEFAULT_TRACERS=y -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_MD4=m -CONFIG_CRYPTO_SHA256=m CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m @@ -445,9 +436,5 @@ CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRC16=m -CONFIG_VIRTUALIZATION=y -CONFIG_KVM=m -CONFIG_KVM_MIPS_DYN_TRANS=y -CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS=y +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +CONFIG_ENABLE_DEFAULT_TRACERS=y diff --git a/arch/mips/configs/malta_kvm_guest_defconfig b/arch/mips/configs/malta_kvm_guest_defconfig index bb694f5065f1..511065e62182 100644 --- a/arch/mips/configs/malta_kvm_guest_defconfig +++ b/arch/mips/configs/malta_kvm_guest_defconfig @@ -1,10 +1,3 @@ -CONFIG_MIPS_MALTA=y -CONFIG_CPU_LITTLE_ENDIAN=y -CONFIG_CPU_MIPS32_R2=y -CONFIG_KVM_GUEST=y -CONFIG_PAGE_SIZE_16KB=y -# CONFIG_MIPS_MT_SMP is not set -CONFIG_HZ_100=y CONFIG_SYSVIPC=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y @@ -15,11 +8,18 @@ CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y # CONFIG_COMPAT_BRK is not set CONFIG_SLAB=y +CONFIG_MIPS_MALTA=y +CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_CPU_MIPS32_R2=y +CONFIG_KVM_GUEST=y +CONFIG_PAGE_SIZE_16KB=y +# CONFIG_MIPS_MT_SMP is not set +CONFIG_HZ_100=y +CONFIG_PCI=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y -CONFIG_PCI=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -60,8 +60,6 @@ CONFIG_NETFILTER=y CONFIG_NF_CONNTRACK=m CONFIG_NF_CONNTRACK_SECMARK=y CONFIG_NF_CONNTRACK_EVENTS=y -CONFIG_NF_CT_PROTO_DCCP=y -CONFIG_NF_CT_PROTO_UDPLITE=y CONFIG_NF_CONNTRACK_AMANDA=m CONFIG_NF_CONNTRACK_FTP=m CONFIG_NF_CONNTRACK_H323=m @@ -126,7 +124,6 @@ CONFIG_IP_VS_DH=m CONFIG_IP_VS_SH=m CONFIG_IP_VS_SED=m CONFIG_IP_VS_NQ=m -CONFIG_NF_CONNTRACK_IPV4=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -141,7 +138,6 @@ CONFIG_IP_NF_RAW=m CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m -CONFIG_NF_CONNTRACK_IPV6=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m CONFIG_IP6_NF_MATCH_FRAG=m @@ -175,7 +171,6 @@ CONFIG_BRIDGE_EBT_MARK_T=m CONFIG_BRIDGE_EBT_REDIRECT=m CONFIG_BRIDGE_EBT_SNAT=m CONFIG_BRIDGE_EBT_LOG=m -CONFIG_BRIDGE_EBT_ULOG=m CONFIG_BRIDGE_EBT_NFLOG=m CONFIG_IP_SCTP=m CONFIG_BRIDGE=m @@ -220,8 +215,6 @@ CONFIG_NET_ACT_SKBEDIT=m CONFIG_NET_CLS_IND=y CONFIG_CFG80211=m CONFIG_MAC80211=m -CONFIG_MAC80211_RC_PID=y -CONFIG_MAC80211_RC_DEFAULT_PID=y CONFIG_MAC80211_MESH=y CONFIG_RFKILL=m CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" @@ -256,7 +249,6 @@ CONFIG_CHR_DEV_OSST=m CONFIG_BLK_DEV_SR=y CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m -CONFIG_SCSI_MULTI_LUN=y CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_LOGGING=y CONFIG_SCSI_SCAN_ASYNC=y @@ -305,26 +297,26 @@ CONFIG_CHELSIO_T3=m CONFIG_AX88796=m CONFIG_NETXEN_NIC=m CONFIG_TC35815=m -CONFIG_MARVELL_PHY=m -CONFIG_DAVICOM_PHY=m -CONFIG_QSEMI_PHY=m -CONFIG_LXT_PHY=m -CONFIG_CICADA_PHY=m -CONFIG_VITESSE_PHY=m -CONFIG_SMSC_PHY=m CONFIG_BROADCOM_PHY=m +CONFIG_CICADA_PHY=m +CONFIG_DAVICOM_PHY=m CONFIG_ICPLUS_PHY=m +CONFIG_LXT_PHY=m +CONFIG_MARVELL_PHY=m +CONFIG_QSEMI_PHY=m CONFIG_REALTEK_PHY=m +CONFIG_SMSC_PHY=m +CONFIG_VITESSE_PHY=m CONFIG_ATMEL=m CONFIG_PCI_ATMEL=m -CONFIG_PRISM54=m +CONFIG_IPW2100=m +CONFIG_IPW2100_MONITOR=y CONFIG_HOSTAP=m CONFIG_HOSTAP_FIRMWARE=y CONFIG_HOSTAP_FIRMWARE_NVRAM=y CONFIG_HOSTAP_PLX=m CONFIG_HOSTAP_PCI=m -CONFIG_IPW2100=m -CONFIG_IPW2100_MONITOR=y +CONFIG_PRISM54=m CONFIG_LIBERTAS=m CONFIG_INPUT_MOUSEDEV=y CONFIG_SERIAL_8250=y @@ -426,14 +418,12 @@ CONFIG_NLS_ISO8859_14=m CONFIG_NLS_ISO8859_15=m CONFIG_NLS_KOI8_R=m CONFIG_NLS_KOI8_U=m -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_MD4=m -CONFIG_CRYPTO_SHA256=m CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m @@ -447,5 +437,3 @@ CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRC16=m diff --git a/arch/mips/configs/malta_qemu_32r6_defconfig b/arch/mips/configs/malta_qemu_32r6_defconfig index 5b5306b80576..299088043164 100644 --- a/arch/mips/configs/malta_qemu_32r6_defconfig +++ b/arch/mips/configs/malta_qemu_32r6_defconfig @@ -1,8 +1,3 @@ -CONFIG_MIPS_MALTA=y -CONFIG_CPU_LITTLE_ENDIAN=y -CONFIG_CPU_MIPS32_R6=y -CONFIG_PAGE_SIZE_16KB=y -CONFIG_HZ_100=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_AUDIT=y @@ -13,12 +8,17 @@ CONFIG_LOG_BUF_SHIFT=15 CONFIG_SYSCTL_SYSCALL=y CONFIG_EMBEDDED=y CONFIG_SLAB=y +CONFIG_MIPS_MALTA=y +CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_CPU_MIPS32_R6=y +CONFIG_PAGE_SIZE_16KB=y +CONFIG_HZ_100=y +CONFIG_PCI=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y # CONFIG_BLK_DEV_BSG is not set -CONFIG_PCI=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_NET=y CONFIG_PACKET=y @@ -100,7 +100,6 @@ CONFIG_PCNET32=y # CONFIG_NET_VENDOR_DEC is not set # CONFIG_NET_VENDOR_DLINK is not set # CONFIG_NET_VENDOR_EMULEX is not set -# CONFIG_NET_VENDOR_EXAR is not set # CONFIG_NET_VENDOR_HP is not set # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_MARVELL is not set @@ -110,10 +109,9 @@ CONFIG_PCNET32=y # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_NVIDIA is not set # CONFIG_NET_VENDOR_OKI is not set -# CONFIG_NET_PACKET_ENGINE is not set # CONFIG_NET_VENDOR_QLOGIC is not set -# CONFIG_NET_VENDOR_REALTEK is not set # CONFIG_NET_VENDOR_RDC is not set +# CONFIG_NET_VENDOR_REALTEK is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_SILAN is not set # CONFIG_NET_VENDOR_SIS is not set @@ -157,7 +155,6 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_CMOS=y CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set CONFIG_XFS_FS=y CONFIG_XFS_QUOTA=y CONFIG_XFS_POSIX_ACL=y @@ -175,12 +172,9 @@ CONFIG_CIFS_XATTR=y CONFIG_CIFS_POSIX=y CONFIG_NLS_CODEPAGE_437=m CONFIG_NLS_ISO8859_1=m -# CONFIG_FTRACE is not set -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_ANUBIS=m @@ -191,5 +185,5 @@ CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set # CONFIG_CRYPTO_HW is not set +# CONFIG_FTRACE is not set diff --git a/arch/mips/configs/maltaaprp_defconfig b/arch/mips/configs/maltaaprp_defconfig index 85543599448f..2b4b3a24f637 100644 --- a/arch/mips/configs/maltaaprp_defconfig +++ b/arch/mips/configs/maltaaprp_defconfig @@ -1,9 +1,3 @@ -CONFIG_MIPS_MALTA=y -CONFIG_CPU_LITTLE_ENDIAN=y -CONFIG_CPU_MIPS32_R2=y -CONFIG_MIPS_VPE_LOADER=y -CONFIG_MIPS_VPE_APSP_API=y -CONFIG_HZ_100=y CONFIG_LOCALVERSION="aprp" CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y @@ -14,12 +8,19 @@ CONFIG_LOG_BUF_SHIFT=15 CONFIG_SYSCTL_SYSCALL=y CONFIG_EMBEDDED=y CONFIG_SLAB=y +CONFIG_MIPS_MALTA=y +CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_CPU_MIPS32_R2=y +CONFIG_MIPS_VPE_LOADER=y +CONFIG_MIPS_VPE_APSP_API=y +CONFIG_NR_CPUS=2 +CONFIG_HZ_100=y +CONFIG_PCI=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y # CONFIG_BLK_DEV_BSG is not set -CONFIG_PCI=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_NET=y CONFIG_PACKET=y @@ -101,7 +102,6 @@ CONFIG_PCNET32=y # CONFIG_NET_VENDOR_DEC is not set # CONFIG_NET_VENDOR_DLINK is not set # CONFIG_NET_VENDOR_EMULEX is not set -# CONFIG_NET_VENDOR_EXAR is not set # CONFIG_NET_VENDOR_HP is not set # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_MARVELL is not set @@ -111,10 +111,9 @@ CONFIG_PCNET32=y # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_NVIDIA is not set # CONFIG_NET_VENDOR_OKI is not set -# CONFIG_NET_PACKET_ENGINE is not set # CONFIG_NET_VENDOR_QLOGIC is not set -# CONFIG_NET_VENDOR_REALTEK is not set # CONFIG_NET_VENDOR_RDC is not set +# CONFIG_NET_VENDOR_REALTEK is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_SILAN is not set # CONFIG_NET_VENDOR_SIS is not set @@ -157,7 +156,6 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_CMOS=y CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set CONFIG_XFS_FS=y CONFIG_XFS_QUOTA=y CONFIG_XFS_POSIX_ACL=y @@ -175,12 +173,9 @@ CONFIG_CIFS_XATTR=y CONFIG_CIFS_POSIX=y CONFIG_NLS_CODEPAGE_437=m CONFIG_NLS_ISO8859_1=m -# CONFIG_FTRACE is not set -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_ANUBIS=m @@ -191,5 +186,5 @@ CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set # CONFIG_CRYPTO_HW is not set +# CONFIG_FTRACE is not set diff --git a/arch/mips/configs/maltasmvp_defconfig b/arch/mips/configs/maltasmvp_defconfig index 067bb84ac916..425ddfd7cd78 100644 --- a/arch/mips/configs/maltasmvp_defconfig +++ b/arch/mips/configs/maltasmvp_defconfig @@ -1,11 +1,3 @@ -CONFIG_MIPS_MALTA=y -CONFIG_CPU_LITTLE_ENDIAN=y -CONFIG_CPU_MIPS32_R2=y -CONFIG_PAGE_SIZE_16KB=y -CONFIG_SCHED_SMT=y -CONFIG_MIPS_CPS=y -CONFIG_NR_CPUS=8 -CONFIG_HZ_100=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_AUDIT=y @@ -16,12 +8,20 @@ CONFIG_LOG_BUF_SHIFT=15 CONFIG_SYSCTL_SYSCALL=y CONFIG_EMBEDDED=y CONFIG_SLAB=y +CONFIG_MIPS_MALTA=y +CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_CPU_MIPS32_R2=y +CONFIG_PAGE_SIZE_16KB=y +CONFIG_SCHED_SMT=y +CONFIG_MIPS_CPS=y +CONFIG_NR_CPUS=8 +CONFIG_HZ_100=y +CONFIG_PCI=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y # CONFIG_BLK_DEV_BSG is not set -CONFIG_PCI=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_NET=y CONFIG_PACKET=y @@ -101,7 +101,6 @@ CONFIG_PCNET32=y # CONFIG_NET_VENDOR_DEC is not set # CONFIG_NET_VENDOR_DLINK is not set # CONFIG_NET_VENDOR_EMULEX is not set -# CONFIG_NET_VENDOR_EXAR is not set # CONFIG_NET_VENDOR_HP is not set # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_MARVELL is not set @@ -111,10 +110,9 @@ CONFIG_PCNET32=y # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_NVIDIA is not set # CONFIG_NET_VENDOR_OKI is not set -# CONFIG_NET_PACKET_ENGINE is not set # CONFIG_NET_VENDOR_QLOGIC is not set -# CONFIG_NET_VENDOR_REALTEK is not set # CONFIG_NET_VENDOR_RDC is not set +# CONFIG_NET_VENDOR_REALTEK is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_SILAN is not set # CONFIG_NET_VENDOR_SIS is not set @@ -159,9 +157,6 @@ CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y CONFIG_EXT3_FS_POSIX_ACL=y CONFIG_EXT3_FS_SECURITY=y -CONFIG_EXT4_FS=y -CONFIG_EXT4_FS_POSIX_ACL=y -CONFIG_EXT4_FS_SECURITY=y CONFIG_XFS_FS=y CONFIG_XFS_QUOTA=y CONFIG_XFS_POSIX_ACL=y @@ -179,12 +174,9 @@ CONFIG_CIFS_XATTR=y CONFIG_CIFS_POSIX=y CONFIG_NLS_CODEPAGE_437=m CONFIG_NLS_ISO8859_1=m -# CONFIG_FTRACE is not set -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_ANUBIS=m @@ -195,5 +187,5 @@ CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set # CONFIG_CRYPTO_HW is not set +# CONFIG_FTRACE is not set diff --git a/arch/mips/configs/maltasmvp_eva_defconfig b/arch/mips/configs/maltasmvp_eva_defconfig index dfc78c3172a3..8beaa7ba1e52 100644 --- a/arch/mips/configs/maltasmvp_eva_defconfig +++ b/arch/mips/configs/maltasmvp_eva_defconfig @@ -1,12 +1,3 @@ -CONFIG_MIPS_MALTA=y -CONFIG_CPU_LITTLE_ENDIAN=y -CONFIG_CPU_MIPS32_R2=y -CONFIG_CPU_MIPS32_3_5_FEATURES=y -CONFIG_PAGE_SIZE_16KB=y -CONFIG_SCHED_SMT=y -CONFIG_MIPS_CPS=y -CONFIG_NR_CPUS=8 -CONFIG_HZ_100=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_AUDIT=y @@ -17,12 +8,21 @@ CONFIG_LOG_BUF_SHIFT=15 CONFIG_SYSCTL_SYSCALL=y CONFIG_EMBEDDED=y CONFIG_SLAB=y +CONFIG_MIPS_MALTA=y +CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_CPU_MIPS32_R2=y +CONFIG_CPU_MIPS32_3_5_FEATURES=y +CONFIG_PAGE_SIZE_16KB=y +CONFIG_SCHED_SMT=y +CONFIG_MIPS_CPS=y +CONFIG_NR_CPUS=8 +CONFIG_HZ_100=y +CONFIG_PCI=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y # CONFIG_BLK_DEV_BSG is not set -CONFIG_PCI=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_NET=y CONFIG_PACKET=y @@ -104,7 +104,6 @@ CONFIG_PCNET32=y # CONFIG_NET_VENDOR_DEC is not set # CONFIG_NET_VENDOR_DLINK is not set # CONFIG_NET_VENDOR_EMULEX is not set -# CONFIG_NET_VENDOR_EXAR is not set # CONFIG_NET_VENDOR_HP is not set # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_MARVELL is not set @@ -114,10 +113,9 @@ CONFIG_PCNET32=y # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_NVIDIA is not set # CONFIG_NET_VENDOR_OKI is not set -# CONFIG_NET_PACKET_ENGINE is not set # CONFIG_NET_VENDOR_QLOGIC is not set -# CONFIG_NET_VENDOR_REALTEK is not set # CONFIG_NET_VENDOR_RDC is not set +# CONFIG_NET_VENDOR_REALTEK is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_SILAN is not set # CONFIG_NET_VENDOR_SIS is not set @@ -161,7 +159,6 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_CMOS=y CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set CONFIG_XFS_FS=y CONFIG_XFS_QUOTA=y CONFIG_XFS_POSIX_ACL=y @@ -179,12 +176,9 @@ CONFIG_CIFS_XATTR=y CONFIG_CIFS_POSIX=y CONFIG_NLS_CODEPAGE_437=m CONFIG_NLS_ISO8859_1=m -# CONFIG_FTRACE is not set -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_ANUBIS=m @@ -195,5 +189,5 @@ CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set # CONFIG_CRYPTO_HW is not set +# CONFIG_FTRACE is not set diff --git a/arch/mips/configs/maltaup_defconfig b/arch/mips/configs/maltaup_defconfig index 50a2288c69f8..6e8b95ceb54a 100644 --- a/arch/mips/configs/maltaup_defconfig +++ b/arch/mips/configs/maltaup_defconfig @@ -1,7 +1,3 @@ -CONFIG_MIPS_MALTA=y -CONFIG_CPU_LITTLE_ENDIAN=y -CONFIG_CPU_MIPS32_R2=y -CONFIG_HZ_100=y CONFIG_LOCALVERSION="up" CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y @@ -13,12 +9,17 @@ CONFIG_LOG_BUF_SHIFT=15 CONFIG_SYSCTL_SYSCALL=y CONFIG_EMBEDDED=y CONFIG_SLAB=y +CONFIG_MIPS_MALTA=y +CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_CPU_MIPS32_R2=y +CONFIG_NR_CPUS=2 +CONFIG_HZ_100=y +CONFIG_PCI=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y # CONFIG_BLK_DEV_BSG is not set -CONFIG_PCI=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_NET=y CONFIG_PACKET=y @@ -100,7 +101,6 @@ CONFIG_PCNET32=y # CONFIG_NET_VENDOR_DEC is not set # CONFIG_NET_VENDOR_DLINK is not set # CONFIG_NET_VENDOR_EMULEX is not set -# CONFIG_NET_VENDOR_EXAR is not set # CONFIG_NET_VENDOR_HP is not set # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_MARVELL is not set @@ -110,10 +110,9 @@ CONFIG_PCNET32=y # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_NVIDIA is not set # CONFIG_NET_VENDOR_OKI is not set -# CONFIG_NET_PACKET_ENGINE is not set # CONFIG_NET_VENDOR_QLOGIC is not set -# CONFIG_NET_VENDOR_REALTEK is not set # CONFIG_NET_VENDOR_RDC is not set +# CONFIG_NET_VENDOR_REALTEK is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_SILAN is not set # CONFIG_NET_VENDOR_SIS is not set @@ -156,7 +155,6 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_CMOS=y CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set CONFIG_XFS_FS=y CONFIG_XFS_QUOTA=y CONFIG_XFS_POSIX_ACL=y @@ -174,12 +172,9 @@ CONFIG_CIFS_XATTR=y CONFIG_CIFS_POSIX=y CONFIG_NLS_CODEPAGE_437=m CONFIG_NLS_ISO8859_1=m -# CONFIG_FTRACE is not set -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_ANUBIS=m @@ -190,5 +185,5 @@ CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set # CONFIG_CRYPTO_HW is not set +# CONFIG_FTRACE is not set diff --git a/arch/mips/configs/maltaup_xpa_defconfig b/arch/mips/configs/maltaup_xpa_defconfig index 99a19cf5f9ba..6c026db96ff9 100644 --- a/arch/mips/configs/maltaup_xpa_defconfig +++ b/arch/mips/configs/maltaup_xpa_defconfig @@ -1,10 +1,3 @@ -CONFIG_MIPS_MALTA=y -CONFIG_CPU_LITTLE_ENDIAN=y -CONFIG_CPU_MIPS32_R2=y -CONFIG_CPU_MIPS32_R5_FEATURES=y -CONFIG_CPU_MIPS32_R5_XPA=y -CONFIG_PAGE_SIZE_16KB=y -CONFIG_HZ_100=y CONFIG_SYSVIPC=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y @@ -16,11 +9,19 @@ CONFIG_RELAY=y CONFIG_EXPERT=y # CONFIG_COMPAT_BRK is not set CONFIG_SLAB=y +CONFIG_MIPS_MALTA=y +CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_CPU_MIPS32_R2=y +CONFIG_CPU_MIPS32_R5_FEATURES=y +CONFIG_CPU_MIPS32_R5_XPA=y +CONFIG_PAGE_SIZE_16KB=y +CONFIG_NR_CPUS=2 +CONFIG_HZ_100=y +CONFIG_PCI=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y -CONFIG_PCI=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -61,8 +62,6 @@ CONFIG_NETFILTER=y CONFIG_NF_CONNTRACK=m CONFIG_NF_CONNTRACK_SECMARK=y CONFIG_NF_CONNTRACK_EVENTS=y -CONFIG_NF_CT_PROTO_DCCP=y -CONFIG_NF_CT_PROTO_UDPLITE=y CONFIG_NF_CONNTRACK_AMANDA=m CONFIG_NF_CONNTRACK_FTP=m CONFIG_NF_CONNTRACK_H323=m @@ -125,7 +124,6 @@ CONFIG_IP_VS_DH=m CONFIG_IP_VS_SH=m CONFIG_IP_VS_SED=m CONFIG_IP_VS_NQ=m -CONFIG_NF_CONNTRACK_IPV4=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -140,7 +138,6 @@ CONFIG_IP_NF_RAW=m CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m -CONFIG_NF_CONNTRACK_IPV6=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m CONFIG_IP6_NF_MATCH_FRAG=m @@ -300,26 +297,26 @@ CONFIG_CHELSIO_T3=m CONFIG_AX88796=m CONFIG_NETXEN_NIC=m CONFIG_TC35815=m -CONFIG_MARVELL_PHY=m -CONFIG_DAVICOM_PHY=m -CONFIG_QSEMI_PHY=m -CONFIG_LXT_PHY=m -CONFIG_CICADA_PHY=m -CONFIG_VITESSE_PHY=m -CONFIG_SMSC_PHY=m CONFIG_BROADCOM_PHY=m +CONFIG_CICADA_PHY=m +CONFIG_DAVICOM_PHY=m CONFIG_ICPLUS_PHY=m +CONFIG_LXT_PHY=m +CONFIG_MARVELL_PHY=m +CONFIG_QSEMI_PHY=m CONFIG_REALTEK_PHY=m +CONFIG_SMSC_PHY=m +CONFIG_VITESSE_PHY=m CONFIG_ATMEL=m CONFIG_PCI_ATMEL=m -CONFIG_PRISM54=m +CONFIG_IPW2100=m +CONFIG_IPW2100_MONITOR=y CONFIG_HOSTAP=m CONFIG_HOSTAP_FIRMWARE=y CONFIG_HOSTAP_FIRMWARE_NVRAM=y CONFIG_HOSTAP_PLX=m CONFIG_HOSTAP_PCI=m -CONFIG_IPW2100=m -CONFIG_IPW2100_MONITOR=y +CONFIG_PRISM54=m CONFIG_LIBERTAS=m CONFIG_INPUT_MOUSEDEV=y CONFIG_MOUSE_PS2_ELANTECH=y @@ -425,7 +422,6 @@ CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_MD4=m -CONFIG_CRYPTO_SHA256=m CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m @@ -439,5 +435,3 @@ CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRC16=m diff --git a/arch/mips/configs/markeins_defconfig b/arch/mips/configs/markeins_defconfig index 43ce6576ab1c..ae93a94f8c71 100644 --- a/arch/mips/configs/markeins_defconfig +++ b/arch/mips/configs/markeins_defconfig @@ -1,21 +1,19 @@ -CONFIG_NEC_MARKEINS=y -CONFIG_HZ_1000=y -CONFIG_PREEMPT=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y +CONFIG_PREEMPT=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_EXPERT=y CONFIG_SLAB=y +CONFIG_NEC_MARKEINS=y +CONFIG_HZ_1000=y +CONFIG_PCI=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_MODVERSIONS=y -CONFIG_PCI=y -CONFIG_PM=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -82,20 +80,12 @@ CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m CONFIG_NETFILTER_XT_MATCH_STRING=m CONFIG_NETFILTER_XT_MATCH_TCPMSS=m -CONFIG_NF_CONNTRACK_IPV4=m CONFIG_IP_NF_IPTABLES=m -CONFIG_IP_NF_MATCH_ADDRTYPE=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m -CONFIG_IP_NF_TARGET_LOG=m -CONFIG_NF_NAT=m -CONFIG_IP_NF_TARGET_MASQUERADE=m -CONFIG_IP_NF_TARGET_NETMAP=m -CONFIG_IP_NF_TARGET_REDIRECT=m -CONFIG_NF_NAT_SNMP_BASIC=m CONFIG_IP_NF_MANGLE=m CONFIG_IP_NF_TARGET_CLUSTERIP=m CONFIG_IP_NF_TARGET_ECN=m @@ -104,7 +94,6 @@ CONFIG_IP_NF_RAW=m CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m -CONFIG_NF_CONNTRACK_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -134,23 +123,18 @@ CONFIG_SCSI=m CONFIG_BLK_DEV_SD=m CONFIG_CHR_DEV_SG=m CONFIG_SCSI_SCAN_ASYNC=y -# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set CONFIG_SCSI_AIC94XX=m # CONFIG_AIC94XX_DEBUG is not set CONFIG_NETDEVICES=y CONFIG_TUN=m -CONFIG_NET_ETHERNET=y -CONFIG_MII=y -CONFIG_NET_PCI=y +CONFIG_CHELSIO_T3=m CONFIG_NATSEMI=y CONFIG_QLA3XXX=m -CONFIG_CHELSIO_T3=m CONFIG_NETXEN_NIC=m CONFIG_PPP=m +CONFIG_PPP_DEFLATE=m CONFIG_PPP_ASYNC=m CONFIG_PPP_SYNC_TTY=m -CONFIG_PPP_DEFLATE=m -# CONFIG_INPUT_MOUSEDEV is not set CONFIG_INPUT_EVDEV=m # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set @@ -182,20 +166,15 @@ CONFIG_JFFS2_FS=y CONFIG_JFFS2_COMPRESSION_OPTIONS=y CONFIG_CRAMFS=y CONFIG_NFS_FS=y -CONFIG_NFS_V3=y CONFIG_NFS_V4=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m CONFIG_NFSD_V3=y -CONFIG_SMB_FS=m CONFIG_NLS_DEFAULT="" CONFIG_NLS_CODEPAGE_437=m CONFIG_NLS_ASCII=m CONFIG_NLS_ISO8859_1=m CONFIG_NLS_UTF8=m -CONFIG_DLM=m -CONFIG_CMDLINE_BOOL=y -CONFIG_CMDLINE="console=ttyS0,115200 mem=192m ip=bootp root=/dev/nfs rw" CONFIG_CRYPTO_ECB=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m @@ -203,3 +182,5 @@ CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_CAMELLIA=m CONFIG_CRYPTO_FCRYPT=m +CONFIG_CMDLINE_BOOL=y +CONFIG_CMDLINE="console=ttyS0,115200 mem=192m ip=bootp root=/dev/nfs rw" diff --git a/arch/mips/configs/mips_paravirt_defconfig b/arch/mips/configs/mips_paravirt_defconfig index accf0db1dc6f..8dc5d96a08de 100644 --- a/arch/mips/configs/mips_paravirt_defconfig +++ b/arch/mips/configs/mips_paravirt_defconfig @@ -1,11 +1,5 @@ -CONFIG_MIPS_PARAVIRT=y -CONFIG_CPU_MIPS64_R2=y -CONFIG_64BIT=y -CONFIG_TRANSPARENT_HUGEPAGE=y -CONFIG_SMP=y -CONFIG_HZ_1000=y -CONFIG_PREEMPT=y CONFIG_SYSVIPC=y +CONFIG_PREEMPT=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_IKCONFIG=y @@ -15,13 +9,18 @@ CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y CONFIG_SLAB=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -# CONFIG_BLK_DEV_BSG is not set +CONFIG_MIPS_PARAVIRT=y +CONFIG_CPU_MIPS64_R2=y +CONFIG_64BIT=y +CONFIG_SMP=y +CONFIG_HZ_1000=y CONFIG_PCI=y -CONFIG_MIPS32_COMPAT=y CONFIG_MIPS32_O32=y CONFIG_MIPS32_N32=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +# CONFIG_BLK_DEV_BSG is not set +CONFIG_TRANSPARENT_HUGEPAGE=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -39,7 +38,6 @@ CONFIG_IP_MROUTE=y CONFIG_IP_PIMSM_V1=y CONFIG_IP_PIMSM_V2=y CONFIG_SYN_COOKIES=y -CONFIG_IPV6=y # CONFIG_WIRELESS is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" # CONFIG_FW_LOADER is not set @@ -58,9 +56,9 @@ CONFIG_VIRTIO_NET=y # CONFIG_NET_VENDOR_STMICRO is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PHYLIB=y -CONFIG_MARVELL_PHY=y -CONFIG_BROADCOM_PHY=y CONFIG_BCM87XX_PHY=y +CONFIG_BROADCOM_PHY=y +CONFIG_MARVELL_PHY=y # CONFIG_WLAN is not set # CONFIG_INPUT is not set # CONFIG_SERIO is not set @@ -90,13 +88,12 @@ CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ASCII=y CONFIG_NLS_ISO8859_1=y CONFIG_NLS_UTF8=y +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_DES=y CONFIG_DEBUG_INFO=y CONFIG_DEBUG_FS=y CONFIG_MAGIC_SYSRQ=y # CONFIG_SCHED_DEBUG is not set # CONFIG_FTRACE is not set -CONFIG_CRYPTO_CBC=y -CONFIG_CRYPTO_HMAC=y -CONFIG_CRYPTO_MD5=y -CONFIG_CRYPTO_DES=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/mips/configs/mpc30x_defconfig b/arch/mips/configs/mpc30x_defconfig index 3486b034f726..d4e038802510 100644 --- a/arch/mips/configs/mpc30x_defconfig +++ b/arch/mips/configs/mpc30x_defconfig @@ -1,11 +1,10 @@ -CONFIG_MACH_VR41XX=y -CONFIG_VICTOR_MPC30X=y CONFIG_SYSVIPC=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_RELAY=y -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_EXPERT=y CONFIG_SLAB=y +CONFIG_MACH_VR41XX=y +CONFIG_VICTOR_MPC30X=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODVERSIONS=y @@ -31,7 +30,6 @@ CONFIG_ATA=y CONFIG_PATA_LEGACY=y CONFIG_NETDEVICES=y CONFIG_USB_PEGASUS=m -# CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set @@ -53,4 +51,3 @@ CONFIG_CONFIGFS_FS=m CONFIG_NFS_FS=y CONFIG_CMDLINE_BOOL=y CONFIG_CMDLINE="mem=32M console=ttyVR0,19200 ide0=0x170,0x376,73" -# CONFIG_CRC32 is not set diff --git a/arch/mips/configs/msp71xx_defconfig b/arch/mips/configs/msp71xx_defconfig index 3c8c16b10732..0fdc03fda12e 100644 --- a/arch/mips/configs/msp71xx_defconfig +++ b/arch/mips/configs/msp71xx_defconfig @@ -1,21 +1,21 @@ -CONFIG_PMC_MSP=y -CONFIG_PMC_MSP7120_GW=y -CONFIG_CPU_MIPS32_R2=y -CONFIG_PREEMPT=y CONFIG_LOCALVERSION="-pmc" # CONFIG_SWAP is not set CONFIG_SYSVIPC=y +CONFIG_PREEMPT=y CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_EXPERT=y # CONFIG_SHMEM is not set CONFIG_SLAB=y +CONFIG_PMC_MSP=y +CONFIG_PMC_MSP7120_GW=y +CONFIG_CPU_MIPS32_R2=y +CONFIG_NR_CPUS=2 +CONFIG_PCI=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODVERSIONS=y # CONFIG_IOSCHED_DEADLINE is not set # CONFIG_IOSCHED_CFQ is not set -CONFIG_PCI=y CONFIG_NET=y CONFIG_UNIX=y CONFIG_XFRM_USER=y @@ -47,18 +47,15 @@ CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_NETDEVICES=y CONFIG_DUMMY=y -CONFIG_NET_ETHERNET=y -CONFIG_MII=y CONFIG_PPP=y -# CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set # CONFIG_VT is not set +# CONFIG_LEGACY_PTYS is not set # CONFIG_SERIAL_8250_PCI is not set CONFIG_SERIAL_8250_NR_UARTS=2 CONFIG_SERIAL_8250_RUNTIME_UARTS=2 -# CONFIG_LEGACY_PTYS is not set # CONFIG_HW_RANDOM is not set CONFIG_I2C=y CONFIG_I2C_CHARDEV=y @@ -80,6 +77,3 @@ CONFIG_SQUASHFS_EMBEDDED=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y CONFIG_MAGIC_SYSRQ=y -CONFIG_DEBUG_KERNEL=y -CONFIG_CRYPTO_NULL=y -CONFIG_CRYPTO_AES=y diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig index c3d0d0a6e044..16bef819fe98 100644 --- a/arch/mips/configs/mtx1_defconfig +++ b/arch/mips/configs/mtx1_defconfig @@ -1,31 +1,45 @@ -CONFIG_MIPS_ALCHEMY=y -CONFIG_MIPS_MTX1=y -CONFIG_PREEMPT_VOLUNTARY=y # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y +CONFIG_AUDIT=y +CONFIG_PREEMPT_VOLUNTARY=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT_V3=y -CONFIG_AUDIT=y CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_EXPERT=y CONFIG_SLAB=y CONFIG_PROFILING=y -CONFIG_OPROFILE=m -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -CONFIG_MODVERSIONS=y -CONFIG_MODULE_SRCVERSION_ALL=y -# CONFIG_BLK_DEV_BSG is not set +CONFIG_MIPS_ALCHEMY=y +CONFIG_MIPS_MTX1=y CONFIG_PCI=y CONFIG_PCCARD=m CONFIG_YENTA=m CONFIG_PD6729=m CONFIG_I82092=m +CONFIG_OPROFILE=m +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_ACORN_PARTITION=y +CONFIG_ACORN_PARTITION_ICS=y +CONFIG_ACORN_PARTITION_RISCIX=y +CONFIG_OSF_PARTITION=y +CONFIG_AMIGA_PARTITION=y +CONFIG_ATARI_PARTITION=y +CONFIG_MAC_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_LDM_PARTITION=y +CONFIG_SGI_PARTITION=y +CONFIG_ULTRIX_PARTITION=y +CONFIG_SUN_PARTITION=y +CONFIG_KARMA_PARTITION=y CONFIG_BINFMT_MISC=m -CONFIG_PM=y CONFIG_NET=y CONFIG_PACKET=m CONFIG_UNIX=y @@ -38,8 +52,6 @@ CONFIG_IP_MULTIPLE_TABLES=y CONFIG_IP_ROUTE_MULTIPATH=y CONFIG_IP_ROUTE_VERBOSE=y CONFIG_NET_IPIP=m -CONFIG_NET_IPGRE=m -CONFIG_NET_IPGRE_BROADCAST=y CONFIG_IP_MROUTE=y CONFIG_IP_PIMSM_V1=y CONFIG_IP_PIMSM_V2=y @@ -57,7 +69,6 @@ CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m CONFIG_IPV6_TUNNEL=m CONFIG_NETWORK_SECMARK=y CONFIG_NETFILTER=y -CONFIG_NETFILTER_NETLINK_QUEUE=m CONFIG_NETFILTER_NETLINK_LOG=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m CONFIG_NETFILTER_XT_TARGET_DSCP=m @@ -81,13 +92,11 @@ CONFIG_NETFILTER_XT_MATCH_STATISTIC=m CONFIG_NETFILTER_XT_MATCH_STRING=m CONFIG_NETFILTER_XT_MATCH_TCPMSS=m CONFIG_IP_NF_IPTABLES=m -CONFIG_IP_NF_MATCH_ADDRTYPE=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m -CONFIG_IP_NF_TARGET_LOG=m CONFIG_IP_NF_MANGLE=m CONFIG_IP_NF_TARGET_ECN=m CONFIG_IP_NF_TARGET_TTL=m @@ -128,7 +137,6 @@ CONFIG_BRIDGE_EBT_MARK_T=m CONFIG_BRIDGE_EBT_REDIRECT=m CONFIG_BRIDGE_EBT_SNAT=m CONFIG_BRIDGE_EBT_LOG=m -CONFIG_BRIDGE_EBT_ULOG=m CONFIG_IP_DCCP=m CONFIG_IP_SCTP=m CONFIG_TIPC=m @@ -141,14 +149,12 @@ CONFIG_BRIDGE=m CONFIG_VLAN_8021Q=m CONFIG_DECNET=m CONFIG_LLC2=m -CONFIG_IPX=m CONFIG_ATALK=m CONFIG_DEV_APPLETALK=m CONFIG_IPDDP=m CONFIG_IPDDP_ENCAP=y CONFIG_X25=m CONFIG_LAPB=m -CONFIG_WAN_ROUTER=m CONFIG_NET_SCHED=y CONFIG_NET_SCH_CBQ=m CONFIG_NET_SCH_HTB=m @@ -191,30 +197,6 @@ CONFIG_BPQETHER=m CONFIG_BAYCOM_SER_FDX=m CONFIG_BAYCOM_SER_HDX=m CONFIG_YAM=m -CONFIG_IRDA=m -CONFIG_IRLAN=m -CONFIG_IRNET=m -CONFIG_IRCOMM=m -CONFIG_IRDA_ULTRA=y -CONFIG_IRDA_CACHE_LAST_LSAP=y -CONFIG_IRDA_FAST_RR=y -CONFIG_IRDA_DEBUG=y -CONFIG_IRTTY_SIR=m -CONFIG_DONGLE=y -CONFIG_ESI_DONGLE=m -CONFIG_ACTISYS_DONGLE=m -CONFIG_TEKRAM_DONGLE=m -CONFIG_LITELINK_DONGLE=m -CONFIG_MA600_DONGLE=m -CONFIG_GIRBIL_DONGLE=m -CONFIG_MCP2120_DONGLE=m -CONFIG_OLD_BELKIN_DONGLE=m -CONFIG_ACT200L_DONGLE=m -CONFIG_USB_IRDA=m -CONFIG_SIGMATEL_FIR=m -CONFIG_TOSHIBA_FIR=m -CONFIG_VLSI_FIR=m -CONFIG_MCS_FIR=m CONFIG_BT=m CONFIG_BT_RFCOMM=m CONFIG_BT_RFCOMM_TTY=y @@ -231,7 +213,6 @@ CONFIG_BT_HCIBFUSB=m CONFIG_BT_HCIDTL1=m CONFIG_BT_HCIBT3C=m CONFIG_BT_HCIBLUECARD=m -CONFIG_BT_HCIBTUART=m CONFIG_BT_HCIVHCI=m CONFIG_CONNECTOR=m CONFIG_MTD=y @@ -248,18 +229,18 @@ CONFIG_BLK_DEV_RAM_SIZE=65536 CONFIG_SCSI=m CONFIG_BLK_DEV_SD=m CONFIG_CHR_DEV_SG=m -CONFIG_SCSI_MULTI_LUN=y CONFIG_SCSI_LOGGING=y CONFIG_SCSI_SPI_ATTRS=m CONFIG_SCSI_FC_ATTRS=m CONFIG_SCSI_ISCSI_ATTRS=m CONFIG_SCSI_SAS_LIBSAS=m -# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set # CONFIG_SCSI_LOWLEVEL is not set CONFIG_NETDEVICES=y -CONFIG_DUMMY=m CONFIG_BONDING=m +CONFIG_DUMMY=m CONFIG_EQUALIZER=m +CONFIG_NET_FC=y +CONFIG_NETCONSOLE=m CONFIG_TUN=m CONFIG_ARCNET=m CONFIG_ARCNET_1201=m @@ -271,20 +252,33 @@ CONFIG_ARCNET_COM90xxIO=m CONFIG_ARCNET_RIM_I=m CONFIG_ARCNET_COM20020=m CONFIG_ARCNET_COM20020_PCI=m -CONFIG_MARVELL_PHY=m -CONFIG_DAVICOM_PHY=m -CONFIG_QSEMI_PHY=m -CONFIG_LXT_PHY=m -CONFIG_CICADA_PHY=m -CONFIG_VITESSE_PHY=m -CONFIG_SMSC_PHY=m -CONFIG_NET_ETHERNET=y -CONFIG_HAPPYMEAL=m -CONFIG_SUNGEM=m -CONFIG_CASSINI=m -CONFIG_NET_VENDOR_3COM=y +CONFIG_ARCNET_COM20020_CS=m +CONFIG_ATM_TCP=m +CONFIG_ATM_LANAI=m +CONFIG_ATM_ENI=m +CONFIG_ATM_FIRESTREAM=m +CONFIG_ATM_ZATM=m +CONFIG_ATM_NICSTAR=m +CONFIG_ATM_IDT77252=m +CONFIG_ATM_AMBASSADOR=m +CONFIG_ATM_HORIZON=m +CONFIG_ATM_IA=m +CONFIG_ATM_FORE200E=m +CONFIG_ATM_HE=m +CONFIG_ATM_HE_USE_SUNI=y +CONFIG_PCMCIA_3C574=m +CONFIG_PCMCIA_3C589=m CONFIG_VORTEX=m CONFIG_TYPHOON=m +CONFIG_ADAPTEC_STARFIRE=m +CONFIG_ACENIC=m +CONFIG_AMD8111_ETH=m +CONFIG_PCNET32=m +CONFIG_PCMCIA_NMCLAN=m +CONFIG_B44=m +CONFIG_BNX2=m +CONFIG_TIGON3=m +CONFIG_CHELSIO_T1=m CONFIG_NET_TULIP=y CONFIG_DE2104X=m CONFIG_TULIP=m @@ -293,49 +287,69 @@ CONFIG_WINBOND_840=m CONFIG_DM9102=m CONFIG_ULI526X=m CONFIG_PCMCIA_XIRCOM=m +CONFIG_DL2K=m +CONFIG_SUNDANCE=m +CONFIG_PCMCIA_FMVJ18X=m CONFIG_HP100=m -CONFIG_NET_PCI=y -CONFIG_PCNET32=m -CONFIG_AMD8111_ETH=m -CONFIG_ADAPTEC_STARFIRE=m -CONFIG_B44=m -CONFIG_FORCEDETH=m CONFIG_E100=m +CONFIG_E1000=m +CONFIG_IXGB=m +CONFIG_SKGE=m +CONFIG_SKY2=m +CONFIG_MYRI10GE=m CONFIG_FEALNX=m CONFIG_NATSEMI=m +CONFIG_NS83820=m +CONFIG_S2IO=m +CONFIG_PCMCIA_AXNET=m CONFIG_NE2K_PCI=m +CONFIG_PCMCIA_PCNET=m +CONFIG_FORCEDETH=m +CONFIG_HAMACHI=m +CONFIG_YELLOWFIN=m +CONFIG_QLA3XXX=m CONFIG_8139CP=m CONFIG_8139TOO=m # CONFIG_8139TOO_PIO is not set CONFIG_8139TOO_8129=y +CONFIG_R8169=m CONFIG_SIS900=m +CONFIG_SIS190=m +CONFIG_PCMCIA_SMC91C92=m CONFIG_EPIC100=m -CONFIG_SUNDANCE=m +CONFIG_HAPPYMEAL=m +CONFIG_SUNGEM=m +CONFIG_CASSINI=m CONFIG_TLAN=m CONFIG_VIA_RHINE=m -CONFIG_ACENIC=m -CONFIG_DL2K=m -CONFIG_E1000=m -CONFIG_NS83820=m -CONFIG_HAMACHI=m -CONFIG_YELLOWFIN=m -CONFIG_R8169=m -CONFIG_R8169_VLAN=y -CONFIG_SIS190=m -CONFIG_SKGE=m -CONFIG_SKY2=m CONFIG_VIA_VELOCITY=m -CONFIG_TIGON3=m -CONFIG_BNX2=m -CONFIG_QLA3XXX=m -CONFIG_CHELSIO_T1=m -CONFIG_IXGB=m -CONFIG_S2IO=m -CONFIG_MYRI10GE=m -CONFIG_IBMOL=m -CONFIG_IBMLS=m -CONFIG_TMSPCI=m -CONFIG_ABYSS=m +CONFIG_PCMCIA_XIRC2PS=m +CONFIG_FDDI=y +CONFIG_DEFXX=m +CONFIG_SKFP=m +CONFIG_HIPPI=y +CONFIG_ROADRUNNER=m +CONFIG_CICADA_PHY=m +CONFIG_DAVICOM_PHY=m +CONFIG_LXT_PHY=m +CONFIG_MARVELL_PHY=m +CONFIG_QSEMI_PHY=m +CONFIG_SMSC_PHY=m +CONFIG_VITESSE_PHY=m +CONFIG_PPP=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOATM=m +CONFIG_PPPOE=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +CONFIG_SLIP=m +CONFIG_SLIP_COMPRESSED=y +CONFIG_SLIP_SMART=y +CONFIG_SLIP_MODE_SLIP6=y CONFIG_USB_CATC=m CONFIG_USB_KAWETH=m CONFIG_USB_PEGASUS=m @@ -349,16 +363,6 @@ CONFIG_USB_ALI_M5632=y CONFIG_USB_AN2720=y CONFIG_USB_EPSON2888=y CONFIG_USB_SIERRA_NET=m -CONFIG_NET_PCMCIA=y -CONFIG_PCMCIA_3C589=m -CONFIG_PCMCIA_3C574=m -CONFIG_PCMCIA_FMVJ18X=m -CONFIG_PCMCIA_PCNET=m -CONFIG_PCMCIA_NMCLAN=m -CONFIG_PCMCIA_SMC91C92=m -CONFIG_PCMCIA_XIRC2PS=m -CONFIG_PCMCIA_AXNET=m -CONFIG_ARCNET_COM20020_CS=m CONFIG_WAN=y CONFIG_LANMEDIA=m CONFIG_HDLC=m @@ -375,46 +379,8 @@ CONFIG_DSCC4=m CONFIG_DSCC4_PCISYNC=y CONFIG_DSCC4_PCI_RST=y CONFIG_DLCI=m -CONFIG_WAN_ROUTER_DRIVERS=m -CONFIG_CYCLADES_SYNC=m -CONFIG_CYCLOMX_X25=y CONFIG_LAPBETHER=m CONFIG_X25_ASY=m -CONFIG_ATM_TCP=m -CONFIG_ATM_LANAI=m -CONFIG_ATM_ENI=m -CONFIG_ATM_FIRESTREAM=m -CONFIG_ATM_ZATM=m -CONFIG_ATM_NICSTAR=m -CONFIG_ATM_IDT77252=m -CONFIG_ATM_AMBASSADOR=m -CONFIG_ATM_HORIZON=m -CONFIG_ATM_IA=m -CONFIG_ATM_FORE200E=m -CONFIG_ATM_HE=m -CONFIG_ATM_HE_USE_SUNI=y -CONFIG_FDDI=y -CONFIG_DEFXX=m -CONFIG_SKFP=m -CONFIG_HIPPI=y -CONFIG_ROADRUNNER=m -CONFIG_PPP=m -CONFIG_PPP_MULTILINK=y -CONFIG_PPP_FILTER=y -CONFIG_PPP_ASYNC=m -CONFIG_PPP_SYNC_TTY=m -CONFIG_PPP_DEFLATE=m -CONFIG_PPP_BSDCOMP=m -CONFIG_PPP_MPPE=m -CONFIG_PPPOE=m -CONFIG_PPPOATM=m -CONFIG_SLIP=m -CONFIG_SLIP_COMPRESSED=y -CONFIG_SLIP_SMART=y -CONFIG_SLIP_MODE_SLIP6=y -CONFIG_NET_FC=y -CONFIG_NETCONSOLE=m -# CONFIG_INPUT_MOUSEDEV is not set # CONFIG_KEYBOARD_ATKBD is not set CONFIG_KEYBOARD_GPIO=y # CONFIG_INPUT_MOUSE is not set @@ -440,7 +406,6 @@ CONFIG_HW_RANDOM=y CONFIG_I2C=m CONFIG_I2C_CHARDEV=m CONFIG_I2C_GPIO=m -CONFIG_GPIOLIB=y CONFIG_GPIO_SYSFS=y CONFIG_SENSORS_ADM1021=m CONFIG_SENSORS_ADM1025=m @@ -453,6 +418,7 @@ CONFIG_SENSORS_F71805F=m CONFIG_SENSORS_GL518SM=m CONFIG_SENSORS_GL520SM=m CONFIG_SENSORS_IT87=m +CONFIG_SENSORS_MAX1619=m CONFIG_SENSORS_LM63=m CONFIG_SENSORS_LM75=m CONFIG_SENSORS_LM77=m @@ -463,7 +429,6 @@ CONFIG_SENSORS_LM85=m CONFIG_SENSORS_LM87=m CONFIG_SENSORS_LM90=m CONFIG_SENSORS_LM92=m -CONFIG_SENSORS_MAX1619=m CONFIG_SENSORS_PC87360=m CONFIG_SENSORS_PCF8591=m CONFIG_SENSORS_SIS5595=m @@ -491,23 +456,17 @@ CONFIG_SOUND=m CONFIG_SND=m CONFIG_SND_SEQUENCER=m CONFIG_SND_SEQ_DUMMY=m -CONFIG_SND_MIXER_OSS=m -CONFIG_SND_PCM_OSS=m -CONFIG_SND_SEQUENCER_OSS=y CONFIG_SND_DUMMY=m CONFIG_SND_VIRMIDI=m CONFIG_SND_MTPAV=m CONFIG_SND_SERIAL_U16550=m CONFIG_SND_MPU401=m CONFIG_SND_AD1889=m -CONFIG_SND_ALS300=m -CONFIG_SND_ALI5451=m CONFIG_SND_ATIIXP=m CONFIG_SND_ATIIXP_MODEM=m CONFIG_SND_AU8810=m CONFIG_SND_AU8820=m CONFIG_SND_AU8830=m -CONFIG_SND_AZT3328=m CONFIG_SND_BT87X=m CONFIG_SND_CA0106=m CONFIG_SND_CMIPCI=m @@ -525,22 +484,15 @@ CONFIG_SND_ECHO3G=m CONFIG_SND_INDIGO=m CONFIG_SND_INDIGOIO=m CONFIG_SND_INDIGODJ=m -CONFIG_SND_EMU10K1=m -CONFIG_SND_EMU10K1X=m CONFIG_SND_ENS1370=m CONFIG_SND_ENS1371=m -CONFIG_SND_ES1938=m -CONFIG_SND_ES1968=m CONFIG_SND_FM801=m -CONFIG_SND_HDA_INTEL=m CONFIG_SND_HDSP=m CONFIG_SND_HDSPM=m -CONFIG_SND_ICE1712=m CONFIG_SND_ICE1724=m CONFIG_SND_INTEL8X0=m CONFIG_SND_INTEL8X0M=m CONFIG_SND_KORG1212=m -CONFIG_SND_MAESTRO3=m CONFIG_SND_MIXART=m CONFIG_SND_NM256=m CONFIG_SND_PCXHR=m @@ -548,16 +500,14 @@ CONFIG_SND_RIPTIDE=m CONFIG_SND_RME32=m CONFIG_SND_RME96=m CONFIG_SND_RME9652=m -CONFIG_SND_SONICVIBES=m -CONFIG_SND_TRIDENT=m CONFIG_SND_VIA82XX=m CONFIG_SND_VIA82XX_MODEM=m CONFIG_SND_VX222=m CONFIG_SND_YMFPCI=m +CONFIG_SND_HDA_INTEL=m CONFIG_SND_USB_AUDIO=m CONFIG_SND_VXPOCKET=m CONFIG_SND_PDAUDIOCF=m -CONFIG_SOUND_PRIME=m CONFIG_USB_HIDDEV=y CONFIG_USB_KBD=m CONFIG_USB_MOUSE=m @@ -566,7 +516,7 @@ CONFIG_USB_MON=m CONFIG_USB_EHCI_HCD=m CONFIG_USB_EHCI_ROOT_HUB_TT=y CONFIG_USB_OHCI_HCD=m -CONFIG_USB_OHCI_HCD_PLATFORM=y +CONFIG_USB_OHCI_HCD_PLATFORM=m CONFIG_USB_UHCI_HCD=m CONFIG_USB_U132_HCD=m CONFIG_USB_SL811_HCD=m @@ -595,7 +545,6 @@ CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m CONFIG_USB_SERIAL_CYPRESS_M8=m CONFIG_USB_SERIAL_EMPEG=m CONFIG_USB_SERIAL_FTDI_SIO=m -CONFIG_USB_SERIAL_FUNSOFT=m CONFIG_USB_SERIAL_VISOR=m CONFIG_USB_SERIAL_IPAQ=m CONFIG_USB_SERIAL_IR=m @@ -612,7 +561,6 @@ CONFIG_USB_SERIAL_MOS7720=m CONFIG_USB_SERIAL_MOS7840=m CONFIG_USB_SERIAL_NAVMAN=m CONFIG_USB_SERIAL_PL2303=m -CONFIG_USB_SERIAL_HP4X=m CONFIG_USB_SERIAL_SAFE=m CONFIG_USB_SERIAL_SIERRAWIRELESS=m CONFIG_USB_SERIAL_TI=m @@ -641,7 +589,6 @@ CONFIG_USB_CXACRU=m CONFIG_USB_UEAGLEATM=m CONFIG_USB_XUSBATM=m CONFIG_USB_GADGET=m -CONFIG_USB_GADGET_NET2280=y CONFIG_USB_ZERO=m CONFIG_USB_ETH=m CONFIG_USB_GADGETFS=m @@ -677,7 +624,6 @@ CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y CONFIG_EXT2_FS_SECURITY=y CONFIG_EXT3_FS=m -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set CONFIG_EXT3_FS_POSIX_ACL=y CONFIG_EXT3_FS_SECURITY=y CONFIG_QUOTA=y @@ -692,48 +638,18 @@ CONFIG_VFAT_FS=m CONFIG_NTFS_FS=m CONFIG_PROC_KCORE=y CONFIG_TMPFS=y -CONFIG_CONFIGFS_FS=m CONFIG_JFFS2_FS=y CONFIG_JFFS2_FS_XATTR=y CONFIG_JFFS2_COMPRESSION_OPTIONS=y CONFIG_CRAMFS=y CONFIG_SQUASHFS=y CONFIG_NFS_FS=m -CONFIG_NFS_V3=y -CONFIG_NFS_V4=y +CONFIG_NFS_V4=m CONFIG_NFSD=m CONFIG_NFSD_V4=y -CONFIG_RPCSEC_GSS_SPKM3=m -CONFIG_SMB_FS=m CONFIG_CIFS=m -CONFIG_NCP_FS=m -CONFIG_NCPFS_PACKET_SIGNING=y -CONFIG_NCPFS_IOCTL_LOCKING=y -CONFIG_NCPFS_STRONG=y -CONFIG_NCPFS_NFS_NS=y -CONFIG_NCPFS_OS2_NS=y -CONFIG_NCPFS_NLS=y -CONFIG_NCPFS_EXTRAS=y CONFIG_CODA_FS=m CONFIG_AFS_FS=m -CONFIG_PARTITION_ADVANCED=y -CONFIG_ACORN_PARTITION=y -CONFIG_ACORN_PARTITION_ICS=y -CONFIG_ACORN_PARTITION_RISCIX=y -CONFIG_OSF_PARTITION=y -CONFIG_AMIGA_PARTITION=y -CONFIG_ATARI_PARTITION=y -CONFIG_MAC_PARTITION=y -CONFIG_BSD_DISKLABEL=y -CONFIG_MINIX_SUBPARTITION=y -CONFIG_SOLARIS_X86_PARTITION=y -CONFIG_UNIXWARE_DISKLABEL=y -CONFIG_LDM_PARTITION=y -CONFIG_SGI_PARTITION=y -CONFIG_ULTRIX_PARTITION=y -CONFIG_SUN_PARTITION=y -CONFIG_KARMA_PARTITION=y -CONFIG_EFI_PARTITION=y CONFIG_NLS=y CONFIG_NLS_DEFAULT="cp437" CONFIG_NLS_CODEPAGE_437=m @@ -774,18 +690,11 @@ CONFIG_NLS_ISO8859_15=m CONFIG_NLS_KOI8_R=m CONFIG_NLS_KOI8_U=m CONFIG_NLS_UTF8=m -# CONFIG_ENABLE_MUST_CHECK is not set -CONFIG_MAGIC_SYSRQ=y -CONFIG_DEBUG_FS=y -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_HMAC=y -CONFIG_CRYPTO_MD4=m CONFIG_CRYPTO_MD5=y CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_SHA256=m -CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_ANUBIS=m @@ -795,3 +704,5 @@ CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m +# CONFIG_ENABLE_MUST_CHECK is not set +CONFIG_MAGIC_SYSRQ=y diff --git a/arch/mips/configs/nlm_xlp_defconfig b/arch/mips/configs/nlm_xlp_defconfig index e8e1dd8e0e99..72a211d2d556 100644 --- a/arch/mips/configs/nlm_xlp_defconfig +++ b/arch/mips/configs/nlm_xlp_defconfig @@ -1,32 +1,35 @@ -CONFIG_NLM_XLP_BOARD=y -CONFIG_64BIT=y -CONFIG_PAGE_SIZE_16KB=y -# CONFIG_HW_PERF_EVENTS is not set -CONFIG_KSM=y -CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 -CONFIG_SMP=y -# CONFIG_SECCOMP is not set # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y +CONFIG_AUDIT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_TASKSTATS=y CONFIG_TASK_DELAY_ACCT=y CONFIG_TASK_XACCT=y CONFIG_TASK_IO_ACCOUNTING=y -CONFIG_AUDIT=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y CONFIG_CGROUPS=y CONFIG_NAMESPACES=y CONFIG_BLK_DEV_INITRD=y -CONFIG_RD_BZIP2=y -CONFIG_RD_LZMA=y CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y # CONFIG_COMPAT_BRK is not set CONFIG_PROFILING=y +CONFIG_NLM_XLP_BOARD=y +CONFIG_64BIT=y +CONFIG_PAGE_SIZE_16KB=y +# CONFIG_HW_PERF_EVENTS is not set +CONFIG_SMP=y +# CONFIG_SECCOMP is not set +CONFIG_PCI=y +CONFIG_PCI_DEBUG=y +CONFIG_PCI_STUB=y +CONFIG_MIPS32_O32=y +CONFIG_MIPS32_N32=y +CONFIG_PM=y +CONFIG_PM_DEBUG=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODVERSIONS=y @@ -49,19 +52,11 @@ CONFIG_SGI_PARTITION=y CONFIG_ULTRIX_PARTITION=y CONFIG_SUN_PARTITION=y CONFIG_KARMA_PARTITION=y -CONFIG_EFI_PARTITION=y CONFIG_SYSV68_PARTITION=y -CONFIG_PCI=y -CONFIG_PCI_DEBUG=y -CONFIG_PCI_REALLOC_ENABLE_AUTO=y -CONFIG_PCI_STUB=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_MISC=y -CONFIG_MIPS32_COMPAT=y -CONFIG_MIPS32_O32=y -CONFIG_MIPS32_N32=y -CONFIG_PM=y -CONFIG_PM_DEBUG=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -93,7 +88,6 @@ CONFIG_TCP_CONG_VENO=m CONFIG_TCP_CONG_YEAH=m CONFIG_TCP_CONG_ILLINOIS=m CONFIG_TCP_MD5SIG=y -CONFIG_IPV6=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m @@ -104,12 +98,10 @@ CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m CONFIG_IPV6_SIT=m CONFIG_IPV6_TUNNEL=m CONFIG_IPV6_MULTIPLE_TABLES=y -CONFIG_NETLABEL=y CONFIG_NETFILTER=y CONFIG_NF_CONNTRACK=m CONFIG_NF_CONNTRACK_SECMARK=y CONFIG_NF_CONNTRACK_EVENTS=y -CONFIG_NF_CT_PROTO_UDPLITE=y CONFIG_NF_CONNTRACK_AMANDA=m CONFIG_NF_CONNTRACK_FTP=m CONFIG_NF_CONNTRACK_H323=m @@ -120,7 +112,6 @@ CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_CT_NETLINK=m -CONFIG_NETFILTER_TPROXY=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m CONFIG_NETFILTER_XT_TARGET_CONNMARK=m CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m @@ -180,18 +171,12 @@ CONFIG_IP_VS_DH=m CONFIG_IP_VS_SH=m CONFIG_IP_VS_SED=m CONFIG_IP_VS_NQ=m -CONFIG_IP_VS_FTP=m -CONFIG_NF_CONNTRACK_IPV4=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m -CONFIG_NF_NAT=m -CONFIG_IP_NF_TARGET_MASQUERADE=m -CONFIG_IP_NF_TARGET_NETMAP=m -CONFIG_IP_NF_TARGET_REDIRECT=m CONFIG_IP_NF_MANGLE=m CONFIG_IP_NF_TARGET_CLUSTERIP=m CONFIG_IP_NF_TARGET_ECN=m @@ -201,8 +186,6 @@ CONFIG_IP_NF_SECURITY=m CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m -CONFIG_NF_CONNTRACK_IPV6=m -CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m CONFIG_IP6_NF_MATCH_FRAG=m @@ -238,7 +221,6 @@ CONFIG_BRIDGE_EBT_MARK_T=m CONFIG_BRIDGE_EBT_REDIRECT=m CONFIG_BRIDGE_EBT_SNAT=m CONFIG_BRIDGE_EBT_LOG=m -CONFIG_BRIDGE_EBT_ULOG=m CONFIG_BRIDGE_EBT_NFLOG=m CONFIG_IP_DCCP=m CONFIG_RDS=m @@ -254,14 +236,12 @@ CONFIG_VLAN_8021Q=m CONFIG_VLAN_8021Q_GVRP=y CONFIG_DECNET=m CONFIG_LLC2=m -CONFIG_IPX=m CONFIG_ATALK=m CONFIG_DEV_APPLETALK=m CONFIG_IPDDP=m CONFIG_IPDDP_ENCAP=y CONFIG_X25=m CONFIG_LAPB=m -CONFIG_WAN_ROUTER=m CONFIG_PHONET=m CONFIG_IEEE802154=m CONFIG_NET_SCHED=y @@ -324,7 +304,6 @@ CONFIG_MTD_PHYSMAP_OF=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_NBD=m -CONFIG_BLK_DEV_OSD=m CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=65536 CONFIG_CDROM_PKTCDVD=y @@ -335,7 +314,6 @@ CONFIG_CHR_DEV_OSST=m CONFIG_BLK_DEV_SR=y CONFIG_CHR_DEV_SG=y CONFIG_CHR_DEV_SCH=m -CONFIG_SCSI_MULTI_LUN=y CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_LOGGING=y CONFIG_SCSI_SCAN_ASYNC=y @@ -343,7 +321,6 @@ CONFIG_SCSI_SPI_ATTRS=m CONFIG_SCSI_SAS_LIBSAS=m CONFIG_SCSI_SRP_ATTRS=m CONFIG_ISCSI_TCP=m -CONFIG_LIBFCOE=m CONFIG_SCSI_DEBUG=m CONFIG_SCSI_DH=y CONFIG_SCSI_DH_RDAC=m @@ -368,10 +345,9 @@ CONFIG_NETDEVICES=y # CONFIG_NET_VENDOR_DEC is not set # CONFIG_NET_VENDOR_DLINK is not set # CONFIG_NET_VENDOR_EMULEX is not set -# CONFIG_NET_VENDOR_EXAR is not set # CONFIG_NET_VENDOR_HP is not set -CONFIG_E1000E=y # CONFIG_NET_VENDOR_I825XX is not set +CONFIG_E1000E=y CONFIG_SKY2=y # CONFIG_NET_VENDOR_MELLANOX is not set # CONFIG_NET_VENDOR_MICREL is not set @@ -379,10 +355,9 @@ CONFIG_SKY2=y # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_NVIDIA is not set # CONFIG_NET_VENDOR_OKI is not set -# CONFIG_NET_PACKET_ENGINE is not set # CONFIG_NET_VENDOR_QLOGIC is not set -# CONFIG_NET_VENDOR_REALTEK is not set # CONFIG_NET_VENDOR_RDC is not set +# CONFIG_NET_VENDOR_REALTEK is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_SILAN is not set # CONFIG_NET_VENDOR_SIS is not set @@ -394,7 +369,6 @@ CONFIG_SKY2=y # CONFIG_NET_VENDOR_TOSHIBA is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set -# CONFIG_INPUT_MOUSEDEV is not set CONFIG_INPUT_EVDEV=y CONFIG_INPUT_EVBUG=m # CONFIG_INPUT_KEYBOARD is not set @@ -403,12 +377,9 @@ CONFIG_SERIO_SERPORT=m CONFIG_SERIO_LIBPS2=y CONFIG_SERIO_RAW=m CONFIG_VT_HW_CONSOLE_BINDING=y -CONFIG_DEVPTS_MULTIPLE_INSTANCES=y CONFIG_LEGACY_PTY_COUNT=0 CONFIG_SERIAL_NONSTANDARD=y CONFIG_N_HDLC=m -# CONFIG_DEVKMEM is not set -CONFIG_STALDRV=y CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_NR_UARTS=48 @@ -430,7 +401,6 @@ CONFIG_THERMAL=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_DS1374=y CONFIG_UIO=y -CONFIG_UIO_PDRV=m CONFIG_UIO_PDRV_GENIRQ=m # CONFIG_IOMMU_SUPPORT is not set CONFIG_EXT2_FS=y @@ -440,9 +410,6 @@ CONFIG_EXT2_FS_SECURITY=y CONFIG_EXT3_FS=y CONFIG_EXT3_FS_POSIX_ACL=y CONFIG_EXT3_FS_SECURITY=y -CONFIG_EXT4_FS=y -CONFIG_EXT4_FS_POSIX_ACL=y -CONFIG_EXT4_FS_SECURITY=y CONFIG_GFS2_FS=m CONFIG_BTRFS_FS=m CONFIG_BTRFS_FS_POSIX_ACL=y @@ -487,7 +454,7 @@ CONFIG_UFS_FS=m CONFIG_EXOFS_FS=m CONFIG_NFS_FS=m CONFIG_NFS_V3_ACL=y -CONFIG_NFS_V4=y +CONFIG_NFS_V4=m CONFIG_NFS_FSCACHE=y CONFIG_NFSD=m CONFIG_NFSD_V3_ACL=y @@ -498,14 +465,6 @@ CONFIG_CIFS_UPCALL=y CONFIG_CIFS_XATTR=y CONFIG_CIFS_POSIX=y CONFIG_CIFS_DFS_UPCALL=y -CONFIG_NCP_FS=m -CONFIG_NCPFS_PACKET_SIGNING=y -CONFIG_NCPFS_IOCTL_LOCKING=y -CONFIG_NCPFS_STRONG=y -CONFIG_NCPFS_NFS_NS=y -CONFIG_NCPFS_OS2_NS=y -CONFIG_NCPFS_NLS=y -CONFIG_NCPFS_EXTRAS=y CONFIG_CODA_FS=m CONFIG_AFS_FS=m CONFIG_NLS=y @@ -547,19 +506,6 @@ CONFIG_NLS_ISO8859_14=m CONFIG_NLS_ISO8859_15=m CONFIG_NLS_KOI8_R=m CONFIG_NLS_KOI8_U=m -CONFIG_PRINTK_TIME=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set -# CONFIG_ENABLE_MUST_CHECK is not set -CONFIG_FRAME_WARN=1024 -CONFIG_UNUSED_SYMBOLS=y -CONFIG_DETECT_HUNG_TASK=y -CONFIG_SCHEDSTATS=y -CONFIG_TIMER_STATS=y -CONFIG_DEBUG_INFO=y -CONFIG_DEBUG_MEMORY_INIT=y -CONFIG_SCHED_TRACER=y -CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_KGDB=y CONFIG_SECURITY=y CONFIG_LSM_MMAP_MIN_ADDR=0 CONFIG_SECURITY_SELINUX=y @@ -568,10 +514,8 @@ CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 CONFIG_SECURITY_SELINUX_DISABLE=y CONFIG_SECURITY_SMACK=y CONFIG_SECURITY_TOMOYO=y -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m -CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_GCM=m CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_LRW=m @@ -585,8 +529,6 @@ CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD160=m CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m -CONFIG_CRYPTO_SHA256=m -CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_ANUBIS=m @@ -602,5 +544,15 @@ CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m -CONFIG_CRC_CCITT=m CONFIG_CRC7=m +CONFIG_PRINTK_TIME=y +CONFIG_DEBUG_INFO=y +# CONFIG_ENABLE_MUST_CHECK is not set +CONFIG_FRAME_WARN=1024 +CONFIG_UNUSED_SYMBOLS=y +CONFIG_DEBUG_MEMORY_INIT=y +CONFIG_DETECT_HUNG_TASK=y +CONFIG_SCHEDSTATS=y +CONFIG_SCHED_TRACER=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_KGDB=y diff --git a/arch/mips/configs/nlm_xlr_defconfig b/arch/mips/configs/nlm_xlr_defconfig index c4477a4d40c1..4ecb157e56d4 100644 --- a/arch/mips/configs/nlm_xlr_defconfig +++ b/arch/mips/configs/nlm_xlr_defconfig @@ -1,47 +1,60 @@ -CONFIG_NLM_XLR_BOARD=y -CONFIG_HIGHMEM=y -CONFIG_KSM=y -CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 -CONFIG_SMP=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y -CONFIG_PREEMPT_VOLUNTARY=y -CONFIG_KEXEC=y -CONFIG_CROSS_COMPILE="" # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y +CONFIG_AUDIT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_PREEMPT_VOLUNTARY=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_TASKSTATS=y CONFIG_TASK_DELAY_ACCT=y CONFIG_TASK_XACCT=y CONFIG_TASK_IO_ACCOUNTING=y -CONFIG_AUDIT=y CONFIG_NAMESPACES=y CONFIG_SCHED_AUTOGROUP=y CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="" -CONFIG_RD_BZIP2=y -CONFIG_RD_LZMA=y -CONFIG_INITRAMFS_COMPRESSION_GZIP=y CONFIG_EXPERT=y -CONFIG_KALLSYMS_ALL=y # CONFIG_ELF_CORE is not set +CONFIG_KALLSYMS_ALL=y # CONFIG_PERF_EVENTS is not set # CONFIG_COMPAT_BRK is not set CONFIG_PROFILING=y +CONFIG_NLM_XLR_BOARD=y +CONFIG_HIGHMEM=y +CONFIG_SMP=y +CONFIG_KEXEC=y +CONFIG_PCI=y +CONFIG_PCI_MSI=y +CONFIG_PCI_DEBUG=y +CONFIG_PM=y +CONFIG_PM_DEBUG=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y CONFIG_BLK_DEV_INTEGRITY=y -CONFIG_PCI=y -CONFIG_PCI_MSI=y -CONFIG_PCI_DEBUG=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_ACORN_PARTITION=y +CONFIG_ACORN_PARTITION_ICS=y +CONFIG_ACORN_PARTITION_RISCIX=y +CONFIG_OSF_PARTITION=y +CONFIG_AMIGA_PARTITION=y +CONFIG_ATARI_PARTITION=y +CONFIG_MAC_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_LDM_PARTITION=y +CONFIG_SGI_PARTITION=y +CONFIG_ULTRIX_PARTITION=y +CONFIG_SUN_PARTITION=y +CONFIG_KARMA_PARTITION=y +CONFIG_SYSV68_PARTITION=y CONFIG_BINFMT_MISC=m -CONFIG_PM=y -CONFIG_PM_DEBUG=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -73,7 +86,6 @@ CONFIG_TCP_CONG_VENO=m CONFIG_TCP_CONG_YEAH=m CONFIG_TCP_CONG_ILLINOIS=m CONFIG_TCP_MD5SIG=y -CONFIG_IPV6=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m @@ -84,12 +96,10 @@ CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m CONFIG_IPV6_SIT=m CONFIG_IPV6_TUNNEL=m CONFIG_IPV6_MULTIPLE_TABLES=y -CONFIG_NETLABEL=y CONFIG_NETFILTER=y CONFIG_NF_CONNTRACK=m CONFIG_NF_CONNTRACK_SECMARK=y CONFIG_NF_CONNTRACK_EVENTS=y -CONFIG_NF_CT_PROTO_UDPLITE=y CONFIG_NF_CONNTRACK_AMANDA=m CONFIG_NF_CONNTRACK_FTP=m CONFIG_NF_CONNTRACK_H323=m @@ -100,7 +110,6 @@ CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_CT_NETLINK=m -CONFIG_NETFILTER_TPROXY=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m CONFIG_NETFILTER_XT_TARGET_CONNMARK=m CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m @@ -160,19 +169,12 @@ CONFIG_IP_VS_DH=m CONFIG_IP_VS_SH=m CONFIG_IP_VS_SED=m CONFIG_IP_VS_NQ=m -CONFIG_IP_VS_FTP=m -CONFIG_NF_CONNTRACK_IPV4=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m -CONFIG_IP_NF_TARGET_LOG=m -CONFIG_NF_NAT=m -CONFIG_IP_NF_TARGET_MASQUERADE=m -CONFIG_IP_NF_TARGET_NETMAP=m -CONFIG_IP_NF_TARGET_REDIRECT=m CONFIG_IP_NF_MANGLE=m CONFIG_IP_NF_TARGET_CLUSTERIP=m CONFIG_IP_NF_TARGET_ECN=m @@ -182,8 +184,6 @@ CONFIG_IP_NF_SECURITY=m CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m -CONFIG_NF_CONNTRACK_IPV6=m -CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m CONFIG_IP6_NF_MATCH_FRAG=m @@ -219,7 +219,6 @@ CONFIG_BRIDGE_EBT_MARK_T=m CONFIG_BRIDGE_EBT_REDIRECT=m CONFIG_BRIDGE_EBT_SNAT=m CONFIG_BRIDGE_EBT_LOG=m -CONFIG_BRIDGE_EBT_ULOG=m CONFIG_BRIDGE_EBT_NFLOG=m CONFIG_IP_DCCP=m CONFIG_RDS=m @@ -235,14 +234,12 @@ CONFIG_VLAN_8021Q=m CONFIG_VLAN_8021Q_GVRP=y CONFIG_DECNET=m CONFIG_LLC2=m -CONFIG_IPX=m CONFIG_ATALK=m CONFIG_DEV_APPLETALK=m CONFIG_IPDDP=m CONFIG_IPDDP_ENCAP=y CONFIG_X25=m CONFIG_LAPB=m -CONFIG_WAN_ROUTER=m CONFIG_PHONET=m CONFIG_IEEE802154=m CONFIG_NET_SCHED=y @@ -295,7 +292,6 @@ CONFIG_CONNECTOR=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_NBD=m -CONFIG_BLK_DEV_OSD=m CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=65536 CONFIG_CDROM_PKTCDVD=y @@ -307,7 +303,6 @@ CONFIG_CHR_DEV_OSST=m CONFIG_BLK_DEV_SR=y CONFIG_CHR_DEV_SG=y CONFIG_CHR_DEV_SCH=m -CONFIG_SCSI_MULTI_LUN=y CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_LOGGING=y CONFIG_SCSI_SCAN_ASYNC=y @@ -315,7 +310,6 @@ CONFIG_SCSI_SPI_ATTRS=m CONFIG_SCSI_SAS_LIBSAS=m CONFIG_SCSI_SRP_ATTRS=m CONFIG_ISCSI_TCP=m -CONFIG_LIBFCOE=m CONFIG_SCSI_DEBUG=m CONFIG_SCSI_DH=y CONFIG_SCSI_DH_RDAC=m @@ -327,7 +321,6 @@ CONFIG_SCSI_OSD_ULD=m CONFIG_NETDEVICES=y CONFIG_E1000E=y CONFIG_SKY2=y -# CONFIG_INPUT_MOUSEDEV is not set CONFIG_INPUT_EVDEV=y CONFIG_INPUT_EVBUG=m # CONFIG_INPUT_KEYBOARD is not set @@ -336,12 +329,9 @@ CONFIG_SERIO_SERPORT=m CONFIG_SERIO_LIBPS2=y CONFIG_SERIO_RAW=m CONFIG_VT_HW_CONSOLE_BINDING=y -CONFIG_DEVPTS_MULTIPLE_INSTANCES=y CONFIG_LEGACY_PTY_COUNT=0 CONFIG_SERIAL_NONSTANDARD=y CONFIG_N_HDLC=m -# CONFIG_DEVKMEM is not set -CONFIG_STALDRV=y CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_NR_UARTS=48 @@ -354,13 +344,12 @@ CONFIG_HW_RANDOM_TIMERIOMEM=m CONFIG_RAW_DRIVER=m CONFIG_I2C=y CONFIG_I2C_XLR=y -CONFIG_RTC_CLASS=y -CONFIG_RTC_DRV_DS1374=y # CONFIG_HWMON is not set # CONFIG_VGA_CONSOLE is not set # CONFIG_USB_SUPPORT is not set +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_DS1374=y CONFIG_UIO=y -CONFIG_UIO_PDRV=m CONFIG_UIO_PDRV_GENIRQ=m CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y @@ -369,11 +358,7 @@ CONFIG_EXT2_FS_SECURITY=y CONFIG_EXT3_FS=y CONFIG_EXT3_FS_POSIX_ACL=y CONFIG_EXT3_FS_SECURITY=y -CONFIG_EXT4_FS=y -CONFIG_EXT4_FS_POSIX_ACL=y -CONFIG_EXT4_FS_SECURITY=y CONFIG_GFS2_FS=m -CONFIG_GFS2_FS_LOCKING_DLM=y CONFIG_OCFS2_FS=m CONFIG_BTRFS_FS=m CONFIG_BTRFS_FS_POSIX_ACL=y @@ -420,9 +405,8 @@ CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_EXOFS_FS=m CONFIG_NFS_FS=m -CONFIG_NFS_V3=y CONFIG_NFS_V3_ACL=y -CONFIG_NFS_V4=y +CONFIG_NFS_V4=m CONFIG_NFS_FSCACHE=y CONFIG_NFSD=m CONFIG_NFSD_V3_ACL=y @@ -433,35 +417,8 @@ CONFIG_CIFS_UPCALL=y CONFIG_CIFS_XATTR=y CONFIG_CIFS_POSIX=y CONFIG_CIFS_DFS_UPCALL=y -CONFIG_NCP_FS=m -CONFIG_NCPFS_PACKET_SIGNING=y -CONFIG_NCPFS_IOCTL_LOCKING=y -CONFIG_NCPFS_STRONG=y -CONFIG_NCPFS_NFS_NS=y -CONFIG_NCPFS_OS2_NS=y -CONFIG_NCPFS_NLS=y -CONFIG_NCPFS_EXTRAS=y CONFIG_CODA_FS=m CONFIG_AFS_FS=m -CONFIG_PARTITION_ADVANCED=y -CONFIG_ACORN_PARTITION=y -CONFIG_ACORN_PARTITION_ICS=y -CONFIG_ACORN_PARTITION_RISCIX=y -CONFIG_OSF_PARTITION=y -CONFIG_AMIGA_PARTITION=y -CONFIG_ATARI_PARTITION=y -CONFIG_MAC_PARTITION=y -CONFIG_BSD_DISKLABEL=y -CONFIG_MINIX_SUBPARTITION=y -CONFIG_SOLARIS_X86_PARTITION=y -CONFIG_UNIXWARE_DISKLABEL=y -CONFIG_LDM_PARTITION=y -CONFIG_SGI_PARTITION=y -CONFIG_ULTRIX_PARTITION=y -CONFIG_SUN_PARTITION=y -CONFIG_KARMA_PARTITION=y -CONFIG_EFI_PARTITION=y -CONFIG_SYSV68_PARTITION=y CONFIG_NLS=y CONFIG_NLS_DEFAULT="cp437" CONFIG_NLS_CODEPAGE_437=m @@ -501,20 +458,7 @@ CONFIG_NLS_ISO8859_14=m CONFIG_NLS_ISO8859_15=m CONFIG_NLS_KOI8_R=m CONFIG_NLS_KOI8_U=m -CONFIG_PRINTK_TIME=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set -# CONFIG_ENABLE_MUST_CHECK is not set -CONFIG_UNUSED_SYMBOLS=y -CONFIG_DETECT_HUNG_TASK=y -CONFIG_SCHEDSTATS=y -CONFIG_TIMER_STATS=y -CONFIG_DEBUG_INFO=y -CONFIG_DEBUG_MEMORY_INIT=y -CONFIG_SCHED_TRACER=y -CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_KGDB=y CONFIG_SECURITY=y -CONFIG_SECURITY_NETWORK=y CONFIG_LSM_MMAP_MIN_ADDR=0 CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SELINUX_BOOTPARAM=y @@ -522,10 +466,8 @@ CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 CONFIG_SECURITY_SELINUX_DISABLE=y CONFIG_SECURITY_SMACK=y CONFIG_SECURITY_TOMOYO=y -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m -CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_GCM=m CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_LRW=m @@ -539,8 +481,6 @@ CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD160=m CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m -CONFIG_CRYPTO_SHA256=m -CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_ANUBIS=m @@ -556,5 +496,14 @@ CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m -CONFIG_CRC_CCITT=m CONFIG_CRC7=m +CONFIG_PRINTK_TIME=y +CONFIG_DEBUG_INFO=y +# CONFIG_ENABLE_MUST_CHECK is not set +CONFIG_UNUSED_SYMBOLS=y +CONFIG_DEBUG_MEMORY_INIT=y +CONFIG_DETECT_HUNG_TASK=y +CONFIG_SCHEDSTATS=y +CONFIG_SCHED_TRACER=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_KGDB=y diff --git a/arch/mips/configs/omega2p_defconfig b/arch/mips/configs/omega2p_defconfig index e2731c3cc7e7..0649b8f06b7c 100644 --- a/arch/mips/configs/omega2p_defconfig +++ b/arch/mips/configs/omega2p_defconfig @@ -1,17 +1,9 @@ -CONFIG_RALINK=y -CONFIG_SOC_MT7620=y -CONFIG_DTB_OMEGA2P=y -CONFIG_CPU_MIPS32_R2=y -# CONFIG_COMPACTION is not set -CONFIG_HZ_100=y -CONFIG_PREEMPT=y -# CONFIG_SECCOMP is not set -CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER=y # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_NO_HZ_IDLE=y CONFIG_HIGH_RES_TIMERS=y +CONFIG_PREEMPT=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 @@ -30,8 +22,16 @@ CONFIG_EMBEDDED=y # CONFIG_VM_EVENT_COUNTERS is not set # CONFIG_SLUB_DEBUG is not set # CONFIG_COMPAT_BRK is not set -# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_RALINK=y +CONFIG_SOC_MT7620=y +CONFIG_DTB_OMEGA2P=y +CONFIG_CPU_MIPS32_R2=y +CONFIG_HZ_100=y +# CONFIG_SECCOMP is not set +CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER=y # CONFIG_SUSPEND is not set +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +# CONFIG_COMPACTION is not set CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -113,6 +113,10 @@ CONFIG_NLS_ISO8859_15=y CONFIG_NLS_KOI8_R=y CONFIG_NLS_KOI8_U=y CONFIG_NLS_UTF8=y +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +CONFIG_CRC16=y +CONFIG_XZ_DEC=y CONFIG_PRINTK_TIME=y CONFIG_DEBUG_INFO=y CONFIG_STRIP_ASM_SYMS=y @@ -123,7 +127,3 @@ CONFIG_PANIC_TIMEOUT=10 # CONFIG_DEBUG_PREEMPT is not set CONFIG_STACKTRACE=y # CONFIG_FTRACE is not set -CONFIG_CRYPTO_DEFLATE=y -CONFIG_CRYPTO_LZO=y -CONFIG_CRC16=y -CONFIG_XZ_DEC=y diff --git a/arch/mips/configs/pic32mzda_defconfig b/arch/mips/configs/pic32mzda_defconfig index 41190c2036e6..63fe2da1b37f 100644 --- a/arch/mips/configs/pic32mzda_defconfig +++ b/arch/mips/configs/pic32mzda_defconfig @@ -1,11 +1,7 @@ -CONFIG_MACH_PIC32=y -CONFIG_DTB_PIC32_MZDA_SK=y -CONFIG_HZ_100=y -CONFIG_PREEMPT_VOLUNTARY=y -# CONFIG_SECCOMP is not set CONFIG_SYSVIPC=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y +CONFIG_PREEMPT_VOLUNTARY=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 @@ -14,6 +10,11 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_EMBEDDED=y # CONFIG_COMPAT_BRK is not set CONFIG_SLAB=y +CONFIG_MACH_PIC32=y +CONFIG_DTB_PIC32_MZDA_SK=y +CONFIG_HZ_100=y +# CONFIG_SECCOMP is not set +# CONFIG_SUSPEND is not set CONFIG_JUMP_LABEL=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -23,7 +24,6 @@ CONFIG_BLK_DEV_BSGLIB=y CONFIG_PARTITION_ADVANCED=y CONFIG_SGI_PARTITION=y CONFIG_BINFMT_MISC=m -# CONFIG_SUSPEND is not set CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y # CONFIG_ALLOW_DEV_COREDUMP is not set diff --git a/arch/mips/configs/pistachio_defconfig b/arch/mips/configs/pistachio_defconfig index b22a3cf149b6..2f08d071ada6 100644 --- a/arch/mips/configs/pistachio_defconfig +++ b/arch/mips/configs/pistachio_defconfig @@ -1,23 +1,16 @@ -CONFIG_MACH_PISTACHIO=y -CONFIG_MIPS_MT_SMP=y -CONFIG_MIPS_CPS=y -# CONFIG_COMPACTION is not set -CONFIG_DEFAULT_MMAP_MIN_ADDR=32768 -CONFIG_ZSMALLOC=y -CONFIG_NR_CPUS=4 -CONFIG_PREEMPT_VOLUNTARY=y # CONFIG_LOCALVERSION_AUTO is not set CONFIG_DEFAULT_HOSTNAME="localhost" CONFIG_SYSVIPC=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y +CONFIG_PREEMPT_VOLUNTARY=y CONFIG_IKCONFIG=m CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=18 CONFIG_CGROUPS=y -CONFIG_CGROUP_FREEZER=y CONFIG_CGROUP_SCHED=y CONFIG_CFS_BANDWIDTH=y +CONFIG_CGROUP_FREEZER=y CONFIG_NAMESPACES=y CONFIG_USER_NS=y CONFIG_BLK_DEV_INITRD=y @@ -29,14 +22,20 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_EMBEDDED=y # CONFIG_COMPAT_BRK is not set CONFIG_PROFILING=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -CONFIG_MODULE_FORCE_UNLOAD=y -CONFIG_PARTITION_ADVANCED=y +CONFIG_MACH_PISTACHIO=y +CONFIG_MIPS_CPS=y +CONFIG_NR_CPUS=4 CONFIG_PM_DEBUG=y CONFIG_PM_ADVANCED_DEBUG=y CONFIG_CPU_IDLE=y # CONFIG_MIPS_CPS_CPUIDLE is not set +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_PARTITION_ADVANCED=y +# CONFIG_COMPACTION is not set +CONFIG_DEFAULT_MMAP_MIN_ADDR=32768 +CONFIG_ZSMALLOC=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -66,7 +65,6 @@ CONFIG_TCP_CONG_ADVANCED=y # CONFIG_TCP_CONG_HTCP is not set CONFIG_TCP_CONG_LP=m CONFIG_TCP_MD5SIG=y -CONFIG_IPV6=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_XFRM_MODE_TRANSPORT=m @@ -89,13 +87,11 @@ CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y CONFIG_NETFILTER_XT_MATCH_DSCP=y CONFIG_NETFILTER_XT_MATCH_POLICY=y CONFIG_NETFILTER_XT_MATCH_STATE=y -CONFIG_NF_CONNTRACK_IPV4=y CONFIG_NF_NAT_IPV4=m CONFIG_IP_NF_IPTABLES=y CONFIG_IP_NF_FILTER=y CONFIG_IP_NF_TARGET_REJECT=y CONFIG_IP_NF_MANGLE=y -CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NF_NAT_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_IPV6HEADER=m @@ -167,15 +163,14 @@ CONFIG_USB_NET_SMSC95XX=m CONFIG_USB_NET_MCS7830=m # CONFIG_USB_NET_CDC_SUBSET is not set # CONFIG_USB_NET_ZAURUS is not set -CONFIG_LIBERTAS_THINFIRM=m -CONFIG_USB_NET_RNDIS_WLAN=m -CONFIG_MAC80211_HWSIM=m CONFIG_HOSTAP=m CONFIG_HOSTAP_FIRMWARE=y CONFIG_HOSTAP_FIRMWARE_NVRAM=y +CONFIG_LIBERTAS_THINFIRM=m CONFIG_RT2X00=m CONFIG_RT2800USB=m -# CONFIG_INPUT_MOUSEDEV is not set +CONFIG_MAC80211_HWSIM=m +CONFIG_USB_NET_RNDIS_WLAN=m CONFIG_INPUT_EVDEV=y # CONFIG_KEYBOARD_ATKBD is not set CONFIG_KEYBOARD_GPIO=y @@ -183,7 +178,6 @@ CONFIG_KEYBOARD_GPIO=y # CONFIG_SERIO is not set # CONFIG_VT is not set # CONFIG_LEGACY_PTYS is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_8250=y # CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set CONFIG_SERIAL_8250_CONSOLE=y @@ -204,13 +198,10 @@ CONFIG_GPIO_SYSFS=y CONFIG_POWER_SUPPLY=y CONFIG_THERMAL=y CONFIG_WATCHDOG=y -CONFIG_WATCHDOG_CORE=y CONFIG_IMGPDC_WDT=y CONFIG_REGULATOR_FIXED_VOLTAGE=y CONFIG_REGULATOR_GPIO=y -CONFIG_MEDIA_SUPPORT=y CONFIG_RC_CORE=y -# CONFIG_RC_DECODERS is not set CONFIG_RC_DEVICES=y CONFIG_IR_IMG=y CONFIG_IR_IMG_NEC=y @@ -220,8 +211,7 @@ CONFIG_IR_IMG_SHARP=y CONFIG_IR_IMG_SANYO=y CONFIG_IR_IMG_RC5=y CONFIG_IR_IMG_RC6=y -# CONFIG_DVB_TUNER_DIB0070 is not set -# CONFIG_DVB_TUNER_DIB0090 is not set +CONFIG_MEDIA_SUPPORT=y CONFIG_FB=y CONFIG_FB_MODE_HELPERS=y CONFIG_BACKLIGHT_LCD_SUPPORT=y @@ -229,10 +219,10 @@ CONFIG_BACKLIGHT_LCD_SUPPORT=y CONFIG_BACKLIGHT_CLASS_DEVICE=y CONFIG_SOUND=y CONFIG_SND=y -CONFIG_SND_SEQUENCER=m -CONFIG_SND_SEQ_DUMMY=m CONFIG_SND_HRTIMER=m CONFIG_SND_DYNAMIC_MINORS=y +CONFIG_SND_SEQUENCER=m +CONFIG_SND_SEQ_DUMMY=m # CONFIG_SND_SPI is not set CONFIG_SND_USB_AUDIO=m CONFIG_USB=y @@ -300,27 +290,9 @@ CONFIG_NLS_DEFAULT="utf8" CONFIG_NLS_CODEPAGE_437=m CONFIG_NLS_ASCII=m CONFIG_NLS_ISO8859_1=m -CONFIG_PRINTK_TIME=y -CONFIG_DEBUG_INFO=y -CONFIG_MAGIC_SYSRQ=y -CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0 -CONFIG_LOCKUP_DETECTOR=y -CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y -CONFIG_BOOTPARAM_HUNG_TASK_PANIC=y -# CONFIG_SCHED_DEBUG is not set -CONFIG_SCHEDSTATS=y -CONFIG_TIMER_STATS=y -CONFIG_DEBUG_SPINLOCK=y -CONFIG_DEBUG_CREDENTIALS=y -CONFIG_FUNCTION_TRACER=y -CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_LKDTM=y -CONFIG_TEST_UDELAY=m -CONFIG_KEYS=y CONFIG_SECURITY=y CONFIG_SECURITY_NETWORK=y CONFIG_SECURITY_YAMA=y -CONFIG_DEFAULT_SECURITY_DAC=y CONFIG_CRYPTO_AUTHENC=y CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_SHA1=y @@ -328,9 +300,19 @@ CONFIG_CRYPTO_SHA256=y CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_ARC4=y CONFIG_CRYPTO_DES=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRC_CCITT=y CONFIG_CRC_T10DIF=m CONFIG_CRC7=m -CONFIG_LIBCRC32C=m # CONFIG_XZ_DEC_X86 is not set +CONFIG_PRINTK_TIME=y +CONFIG_DEBUG_INFO=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0 +# CONFIG_SCHED_DEBUG is not set +CONFIG_SCHEDSTATS=y +CONFIG_DEBUG_SPINLOCK=y +CONFIG_DEBUG_CREDENTIALS=y +CONFIG_FUNCTION_TRACER=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_LKDTM=y +CONFIG_TEST_UDELAY=m diff --git a/arch/mips/configs/pnx8335_stb225_defconfig b/arch/mips/configs/pnx8335_stb225_defconfig index e73cdb08fc6e..aa0b169800e0 100644 --- a/arch/mips/configs/pnx8335_stb225_defconfig +++ b/arch/mips/configs/pnx8335_stb225_defconfig @@ -1,23 +1,21 @@ -CONFIG_NXP_STB225=y -CONFIG_CPU_LITTLE_ENDIAN=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y -CONFIG_HZ_128=y -CONFIG_PREEMPT_VOLUNTARY=y -# CONFIG_SECCOMP is not set # CONFIG_LOCALVERSION_AUTO is not set # CONFIG_SWAP is not set CONFIG_SYSVIPC=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_PREEMPT_VOLUNTARY=y CONFIG_LOG_BUF_SHIFT=14 -CONFIG_SYSFS_DEPRECATED_V2=y CONFIG_EXPERT=y CONFIG_SLAB=y +CONFIG_NXP_STB225=y +CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_HZ_128=y +# CONFIG_SECCOMP is not set CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set # CONFIG_IOSCHED_DEADLINE is not set # CONFIG_IOSCHED_CFQ is not set -CONFIG_PM=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -42,17 +40,14 @@ CONFIG_BLK_DEV_SD=y # CONFIG_SCSI_LOWLEVEL is not set CONFIG_ATA=y CONFIG_NETDEVICES=y -CONFIG_NET_ETHERNET=y -CONFIG_MII=y -# CONFIG_INPUT_MOUSEDEV is not set CONFIG_INPUT_EVDEV=m CONFIG_INPUT_EVBUG=m # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_VT_CONSOLE is not set +# CONFIG_LEGACY_PTYS is not set CONFIG_SERIAL_PNX8XXX=y CONFIG_SERIAL_PNX8XXX_CONSOLE=y -# CONFIG_LEGACY_PTYS is not set CONFIG_HW_RANDOM=y CONFIG_I2C=y CONFIG_I2C_CHARDEV=y @@ -61,12 +56,9 @@ CONFIG_FB=y # CONFIG_VGA_CONSOLE is not set CONFIG_SOUND=m CONFIG_SND=m -CONFIG_SND_SEQUENCER=m -CONFIG_SND_MIXER_OSS=m -CONFIG_SND_PCM_OSS=m -CONFIG_SND_SEQUENCER_OSS=y CONFIG_SND_VERBOSE_PRINTK=y CONFIG_SND_DEBUG=y +CONFIG_SND_SEQUENCER=m CONFIG_EXT2_FS=m # CONFIG_DNOTIFY is not set CONFIG_MSDOS_FS=m @@ -75,7 +67,6 @@ CONFIG_TMPFS=y CONFIG_JFFS2_FS=y CONFIG_CRAMFS=y CONFIG_NFS_FS=y -CONFIG_NFS_V3=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m CONFIG_NFSD_V3=y diff --git a/arch/mips/configs/qi_lb60_defconfig b/arch/mips/configs/qi_lb60_defconfig index d8b7211a7b0f..7671fe6a8042 100644 --- a/arch/mips/configs/qi_lb60_defconfig +++ b/arch/mips/configs/qi_lb60_defconfig @@ -1,11 +1,7 @@ -CONFIG_MACH_INGENIC=y -# CONFIG_COMPACTION is not set -# CONFIG_CROSS_MEMORY_ATTACH is not set -CONFIG_HZ_100=y -CONFIG_PREEMPT=y -# CONFIG_SECCOMP is not set # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y +# CONFIG_CROSS_MEMORY_ATTACH is not set +CONFIG_PREEMPT=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_SYSCTL_SYSCALL=y CONFIG_KALLSYMS_ALL=y @@ -13,6 +9,9 @@ CONFIG_EMBEDDED=y # CONFIG_VM_EVENT_COUNTERS is not set # CONFIG_COMPAT_BRK is not set CONFIG_SLAB=y +CONFIG_MACH_INGENIC=y +CONFIG_HZ_100=y +# CONFIG_SECCOMP is not set CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set @@ -20,6 +19,7 @@ CONFIG_PARTITION_ADVANCED=y # CONFIG_EFI_PARTITION is not set # CONFIG_IOSCHED_CFQ is not set # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +# CONFIG_COMPACTION is not set CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -49,7 +49,6 @@ CONFIG_MTD_NAND_JZ4740=y CONFIG_MTD_UBI=y CONFIG_NETDEVICES=y # CONFIG_WLAN is not set -# CONFIG_INPUT_MOUSEDEV is not set CONFIG_INPUT_EVDEV=y # CONFIG_KEYBOARD_ATKBD is not set CONFIG_KEYBOARD_GPIO=y @@ -58,7 +57,6 @@ CONFIG_KEYBOARD_MATRIX=y CONFIG_INPUT_MISC=y # CONFIG_SERIO is not set CONFIG_LEGACY_PTY_COUNT=2 -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y # CONFIG_SERIAL_8250_DMA is not set @@ -109,7 +107,6 @@ CONFIG_USB_GADGET_DEBUG=y CONFIG_USB_ETH=y # CONFIG_USB_ETH_RNDIS is not set CONFIG_MMC=y -# CONFIG_MMC_BLOCK_BOUNCE is not set CONFIG_MMC_JZ4740=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_JZ4740=y @@ -119,8 +116,6 @@ CONFIG_PWM=y CONFIG_PWM_JZ4740=y CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -# CONFIG_EXT3_FS_XATTR is not set # CONFIG_DNOTIFY is not set CONFIG_VFAT_FS=y CONFIG_PROC_KCORE=y @@ -171,6 +166,8 @@ CONFIG_NLS_ISO8859_15=y CONFIG_NLS_KOI8_R=y CONFIG_NLS_KOI8_U=y CONFIG_NLS_UTF8=y +CONFIG_FONTS=y +CONFIG_FONT_SUN8x16=y CONFIG_PRINTK_TIME=y CONFIG_DEBUG_INFO=y CONFIG_STRIP_ASM_SYMS=y @@ -181,7 +178,3 @@ CONFIG_DEBUG_STACKOVERFLOW=y CONFIG_PANIC_ON_OOPS=y # CONFIG_FTRACE is not set CONFIG_KGDB=y -CONFIG_RUNTIME_DEBUG=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_FONTS=y -CONFIG_FONT_SUN8x16=y diff --git a/arch/mips/configs/rb532_defconfig b/arch/mips/configs/rb532_defconfig index 6fa56c6e53f5..7befe05fd813 100644 --- a/arch/mips/configs/rb532_defconfig +++ b/arch/mips/configs/rb532_defconfig @@ -1,29 +1,30 @@ -CONFIG_MIKROTIK_RB532=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y -CONFIG_HZ_100=y -# CONFIG_SECCOMP is not set # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y CONFIG_BSD_PROCESS_ACCT=y -CONFIG_TINY_RCU=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 -CONFIG_SYSFS_DEPRECATED_V2=y CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y -# CONFIG_KALLSYMS is not set # CONFIG_ELF_CORE is not set +# CONFIG_KALLSYMS is not set # CONFIG_VM_EVENT_COUNTERS is not set -# CONFIG_PCI_QUIRKS is not set CONFIG_SLAB=y +CONFIG_MIKROTIK_RB532=y +CONFIG_HZ_100=y +# CONFIG_SECCOMP is not set +CONFIG_PCI=y +# CONFIG_PCI_QUIRKS is not set CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_LBDAF is not set # CONFIG_BLK_DEV_BSG is not set +CONFIG_PARTITION_ADVANCED=y +CONFIG_MAC_PARTITION=y +CONFIG_BSD_DISKLABEL=y # CONFIG_IOSCHED_CFQ is not set -CONFIG_PCI=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -33,7 +34,6 @@ CONFIG_IP_ADVANCED_ROUTER=y CONFIG_IP_MULTIPLE_TABLES=y CONFIG_IP_ROUTE_MULTIPATH=y CONFIG_IP_ROUTE_VERBOSE=y -CONFIG_ARPD=y CONFIG_SYN_COOKIES=y # CONFIG_INET_XFRM_MODE_TRANSPORT is not set # CONFIG_INET_XFRM_MODE_TUNNEL is not set @@ -70,13 +70,9 @@ CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_SCTP=m CONFIG_NETFILTER_XT_MATCH_STATE=y CONFIG_NETFILTER_XT_MATCH_U32=m -CONFIG_NF_CONNTRACK_IPV4=y CONFIG_IP_NF_IPTABLES=y -CONFIG_IP_NF_MATCH_ADDRTYPE=m CONFIG_IP_NF_FILTER=y CONFIG_IP_NF_TARGET_REJECT=y -CONFIG_NF_NAT=y -CONFIG_IP_NF_TARGET_MASQUERADE=y CONFIG_IP_NF_MANGLE=y CONFIG_IP_NF_RAW=m CONFIG_BRIDGE=y @@ -122,31 +118,27 @@ CONFIG_ATA=y CONFIG_PATA_RB532=y CONFIG_NETDEVICES=y CONFIG_IFB=m -CONFIG_NET_ETHERNET=y CONFIG_KORINA=y -CONFIG_NET_PCI=y CONFIG_VIA_RHINE=y -CONFIG_ATMEL=m CONFIG_PPP=m -CONFIG_PPP_MULTILINK=y -CONFIG_PPP_FILTER=y -CONFIG_PPP_ASYNC=m -CONFIG_PPP_DEFLATE=m CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y +CONFIG_PPP_MULTILINK=y CONFIG_PPPOE=m -# CONFIG_INPUT_MOUSEDEV is not set +CONFIG_PPP_ASYNC=m # CONFIG_KEYBOARD_ATKBD is not set # CONFIG_INPUT_MOUSE is not set CONFIG_INPUT_MISC=y CONFIG_INPUT_RB532_BUTTON=y # CONFIG_SERIO is not set # CONFIG_VT is not set +# CONFIG_LEGACY_PTYS is not set CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y # CONFIG_SERIAL_8250_PCI is not set CONFIG_SERIAL_8250_NR_UARTS=2 CONFIG_SERIAL_8250_RUNTIME_UARTS=2 -# CONFIG_LEGACY_PTYS is not set CONFIG_HW_RANDOM=y CONFIG_GPIO_SYSFS=y # CONFIG_HWMON is not set @@ -171,13 +163,8 @@ CONFIG_JFFS2_FS=y CONFIG_JFFS2_SUMMARY=y CONFIG_JFFS2_COMPRESSION_OPTIONS=y CONFIG_SQUASHFS=y -CONFIG_PARTITION_ADVANCED=y -CONFIG_MAC_PARTITION=y -CONFIG_BSD_DISKLABEL=y -# CONFIG_ENABLE_MUST_CHECK is not set -CONFIG_STRIP_ASM_SYMS=y -CONFIG_CRYPTO=y CONFIG_CRYPTO_TEST=m # CONFIG_CRYPTO_HW is not set CONFIG_CRC16=m -CONFIG_LIBCRC32C=m +# CONFIG_ENABLE_MUST_CHECK is not set +CONFIG_STRIP_ASM_SYMS=y diff --git a/arch/mips/configs/rbtx49xx_defconfig b/arch/mips/configs/rbtx49xx_defconfig index fb195e29e449..50a2c9ad583f 100644 --- a/arch/mips/configs/rbtx49xx_defconfig +++ b/arch/mips/configs/rbtx49xx_defconfig @@ -1,27 +1,24 @@ -CONFIG_MACH_TX49XX=y -CONFIG_TOSHIBA_RBTX4927=y -CONFIG_TOSHIBA_RBTX4938=y -CONFIG_TOSHIBA_RBTX4939=y -CONFIG_TOSHIBA_RBTX4938_MPLEX_KEEP=y +CONFIG_SYSVIPC=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y -# CONFIG_SECCOMP is not set -CONFIG_SYSVIPC=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 -CONFIG_SYSFS_DEPRECATED_V2=y CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y -# CONFIG_HOTPLUG is not set -# CONFIG_PCSPKR_PLATFORM is not set # CONFIG_EPOLL is not set CONFIG_SLAB=y +CONFIG_MACH_TX49XX=y +CONFIG_TOSHIBA_RBTX4927=y +CONFIG_TOSHIBA_RBTX4938=y +CONFIG_TOSHIBA_RBTX4939=y +CONFIG_TOSHIBA_RBTX4938_MPLEX_KEEP=y +# CONFIG_SECCOMP is not set +CONFIG_PCI=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_LBDAF is not set # CONFIG_BLK_DEV_BSG is not set -CONFIG_PCI=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -52,10 +49,8 @@ CONFIG_IDE=y CONFIG_BLK_DEV_IDE_TX4938=y CONFIG_BLK_DEV_IDE_TX4939=y CONFIG_NETDEVICES=y -CONFIG_NET_ETHERNET=y -CONFIG_SMC91X=y CONFIG_NE2000=y -CONFIG_NET_PCI=y +CONFIG_SMC91X=y CONFIG_TC35815=y # CONFIG_WLAN is not set # CONFIG_INPUT is not set @@ -99,7 +94,6 @@ CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y CONFIG_JFFS2_FS=m CONFIG_NFS_FS=y -CONFIG_NFS_V3=y CONFIG_ROOT_NFS=y CONFIG_STRIP_ASM_SYMS=y CONFIG_DEBUG_FS=y diff --git a/arch/mips/configs/rm200_defconfig b/arch/mips/configs/rm200_defconfig index 5f71aa598b06..0f4b09f8a0ee 100644 --- a/arch/mips/configs/rm200_defconfig +++ b/arch/mips/configs/rm200_defconfig @@ -1,24 +1,23 @@ -CONFIG_SNI_RM=y -CONFIG_CPU_LITTLE_ENDIAN=y -CONFIG_ARC_CONSOLE=y -CONFIG_HZ_1000=y -CONFIG_PREEMPT_VOLUNTARY=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y +CONFIG_PREEMPT_VOLUNTARY=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_RELAY=y -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_EXPERT=y CONFIG_SLAB=y +CONFIG_SNI_RM=y +CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_ARC_CONSOLE=y +CONFIG_HZ_1000=y +CONFIG_PCI=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODVERSIONS=y -CONFIG_PCI=y +CONFIG_PARTITION_ADVANCED=y CONFIG_BINFMT_MISC=m -CONFIG_PM=y CONFIG_NET=y CONFIG_PACKET=m CONFIG_UNIX=y @@ -27,8 +26,6 @@ CONFIG_NET_KEY_MIGRATE=y CONFIG_INET=y CONFIG_IP_MULTICAST=y CONFIG_NET_IPIP=m -CONFIG_NET_IPGRE=m -CONFIG_NET_IPGRE_BROADCAST=y CONFIG_IP_MROUTE=y CONFIG_IP_PIMSM_V1=y CONFIG_IP_PIMSM_V2=y @@ -48,7 +45,6 @@ CONFIG_IPV6_MULTIPLE_TABLES=y CONFIG_IPV6_SUBTREES=y CONFIG_NETWORK_SECMARK=y CONFIG_NETFILTER=y -CONFIG_NETFILTER_NETLINK_QUEUE=m CONFIG_NF_CONNTRACK=m CONFIG_NF_CONNTRACK_SECMARK=y CONFIG_NF_CONNTRACK_EVENTS=y @@ -92,20 +88,12 @@ CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m CONFIG_NETFILTER_XT_MATCH_STRING=m CONFIG_NETFILTER_XT_MATCH_TCPMSS=m -CONFIG_NF_CONNTRACK_IPV4=m CONFIG_IP_NF_IPTABLES=m -CONFIG_IP_NF_MATCH_ADDRTYPE=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m -CONFIG_IP_NF_TARGET_LOG=m -CONFIG_NF_NAT=m -CONFIG_IP_NF_TARGET_MASQUERADE=m -CONFIG_IP_NF_TARGET_NETMAP=m -CONFIG_IP_NF_TARGET_REDIRECT=m -CONFIG_NF_NAT_SNMP_BASIC=m CONFIG_IP_NF_MANGLE=m CONFIG_IP_NF_TARGET_CLUSTERIP=m CONFIG_IP_NF_TARGET_ECN=m @@ -114,7 +102,6 @@ CONFIG_IP_NF_RAW=m CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m -CONFIG_NF_CONNTRACK_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -149,7 +136,6 @@ CONFIG_BRIDGE_EBT_MARK_T=m CONFIG_BRIDGE_EBT_REDIRECT=m CONFIG_BRIDGE_EBT_SNAT=m CONFIG_BRIDGE_EBT_LOG=m -CONFIG_BRIDGE_EBT_ULOG=m CONFIG_BRIDGE=m CONFIG_DECNET=m CONFIG_NET_SCHED=y @@ -222,7 +208,6 @@ CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_FC_ATTRS=y -# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set CONFIG_ISCSI_TCP=m CONFIG_SCSI_AIC94XX=m # CONFIG_AIC94XX_DEBUG is not set @@ -247,34 +232,30 @@ CONFIG_DM_MIRROR=m CONFIG_DM_ZERO=m CONFIG_DM_MULTIPATH=m CONFIG_NETDEVICES=y -CONFIG_DUMMY=m CONFIG_BONDING=m +CONFIG_DUMMY=m CONFIG_EQUALIZER=m CONFIG_TUN=m -CONFIG_PHYLIB=m -CONFIG_MARVELL_PHY=m -CONFIG_DAVICOM_PHY=m -CONFIG_QSEMI_PHY=m -CONFIG_LXT_PHY=m -CONFIG_CICADA_PHY=m -CONFIG_VITESSE_PHY=m -CONFIG_SMSC_PHY=m -CONFIG_NET_ETHERNET=y -CONFIG_NET_ISA=y -CONFIG_NE2000=m -CONFIG_NET_PCI=y CONFIG_PCNET32=y -CONFIG_VIA_VELOCITY=m -CONFIG_QLA3XXX=m CONFIG_CHELSIO_T3=m +CONFIG_NE2000=m +CONFIG_QLA3XXX=m CONFIG_NETXEN_NIC=m +CONFIG_VIA_VELOCITY=m +CONFIG_CICADA_PHY=m +CONFIG_DAVICOM_PHY=m +CONFIG_LXT_PHY=m +CONFIG_MARVELL_PHY=m +CONFIG_QSEMI_PHY=m +CONFIG_SMSC_PHY=m +CONFIG_VITESSE_PHY=m +CONFIG_PLIP=m CONFIG_USB_CATC=m CONFIG_USB_KAWETH=m CONFIG_USB_PEGASUS=m CONFIG_USB_RTL8150=m CONFIG_USB_USBNET=m # CONFIG_USB_NET_CDC_SUBSET is not set -CONFIG_PLIP=m CONFIG_INPUT_FF_MEMLESS=m CONFIG_SERIO_PARKBD=m CONFIG_SERIO_RAW=m @@ -329,7 +310,6 @@ CONFIG_USB_SERIAL_KLSI=m CONFIG_USB_SERIAL_KOBIL_SCT=m CONFIG_USB_SERIAL_MCT_U232=m CONFIG_USB_SERIAL_PL2303=m -CONFIG_USB_SERIAL_HP4X=m CONFIG_USB_SERIAL_SAFE=m CONFIG_USB_SERIAL_SAFE_PADDED=y CONFIG_USB_SERIAL_CYBERJACK=m @@ -377,25 +357,11 @@ CONFIG_ROMFS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=m -CONFIG_NFS_V3=y CONFIG_NFSD=m CONFIG_NFSD_V3=y -CONFIG_RPCSEC_GSS_KRB5=m -CONFIG_RPCSEC_GSS_SPKM3=m -CONFIG_SMB_FS=m CONFIG_CIFS=m -CONFIG_NCP_FS=m -CONFIG_NCPFS_PACKET_SIGNING=y -CONFIG_NCPFS_IOCTL_LOCKING=y -CONFIG_NCPFS_STRONG=y -CONFIG_NCPFS_NFS_NS=y -CONFIG_NCPFS_OS2_NS=y -CONFIG_NCPFS_SMALLDOS=y -CONFIG_NCPFS_NLS=y -CONFIG_NCPFS_EXTRAS=y CONFIG_CODA_FS=m CONFIG_AFS_FS=m -CONFIG_PARTITION_ADVANCED=y CONFIG_NLS_CODEPAGE_437=m CONFIG_NLS_CODEPAGE_737=m CONFIG_NLS_CODEPAGE_775=m @@ -434,21 +400,14 @@ CONFIG_NLS_ISO8859_15=m CONFIG_NLS_KOI8_R=m CONFIG_NLS_KOI8_U=m CONFIG_NLS_UTF8=m -CONFIG_DLM=m -CONFIG_CRYPTO_NULL=m -CONFIG_CRYPTO_ECB=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_XCBC=m -CONFIG_CRYPTO_MD4=m CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_SHA256=m -CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_ANUBIS=m -CONFIG_CRYPTO_ARC4=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAMELLIA=m CONFIG_CRYPTO_CAST6=m diff --git a/arch/mips/configs/rt305x_defconfig b/arch/mips/configs/rt305x_defconfig index dbe6a4639d05..0392e38010e6 100644 --- a/arch/mips/configs/rt305x_defconfig +++ b/arch/mips/configs/rt305x_defconfig @@ -1,32 +1,29 @@ -CONFIG_RALINK=y -CONFIG_DTB_RT305X_EVAL=y -CONFIG_CPU_MIPS32_R2=y -# CONFIG_COMPACTION is not set -# CONFIG_CROSS_MEMORY_ATTACH is not set -CONFIG_HZ_100=y -# CONFIG_SECCOMP is not set # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y +# CONFIG_CROSS_MEMORY_ATTACH is not set CONFIG_HIGH_RES_TIMERS=y CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="" -CONFIG_INITRAMFS_ROOT_UID=1000 -CONFIG_INITRAMFS_ROOT_GID=1000 # CONFIG_RD_GZIP is not set CONFIG_CC_OPTIMIZE_FOR_SIZE=y -CONFIG_KALLSYMS_ALL=y # CONFIG_AIO is not set +CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y # CONFIG_VM_EVENT_COUNTERS is not set # CONFIG_SLUB_DEBUG is not set # CONFIG_COMPAT_BRK is not set +CONFIG_RALINK=y +CONFIG_DTB_RT305X_EVAL=y +CONFIG_CPU_MIPS32_R2=y +CONFIG_HZ_100=y +# CONFIG_SECCOMP is not set +# CONFIG_SUSPEND is not set CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set CONFIG_PARTITION_ADVANCED=y # CONFIG_IOSCHED_CFQ is not set # CONFIG_COREDUMP is not set -# CONFIG_SUSPEND is not set +# CONFIG_COMPACTION is not set CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -38,7 +35,6 @@ CONFIG_IP_ROUTE_MULTIPATH=y CONFIG_IP_ROUTE_VERBOSE=y CONFIG_IP_MROUTE=y CONFIG_IP_MROUTE_MULTIPLE_TABLES=y -CONFIG_ARPD=y CONFIG_SYN_COOKIES=y # CONFIG_INET_XFRM_MODE_TRANSPORT is not set # CONFIG_INET_XFRM_MODE_TUNNEL is not set @@ -63,8 +59,6 @@ CONFIG_NETFILTER_XT_MATCH_LIMIT=m CONFIG_NETFILTER_XT_MATCH_MAC=m CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m CONFIG_NETFILTER_XT_MATCH_STATE=m -CONFIG_NF_CONNTRACK_IPV4=m -# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m @@ -100,14 +94,12 @@ CONFIG_PPP_ASYNC=m CONFIG_ISDN=y CONFIG_INPUT=m CONFIG_INPUT_POLLDEV=m -# CONFIG_INPUT_MOUSEDEV is not set # CONFIG_KEYBOARD_ATKBD is not set # CONFIG_INPUT_MOUSE is not set CONFIG_INPUT_MISC=y # CONFIG_SERIO is not set # CONFIG_VT is not set # CONFIG_LEGACY_PTYS is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_RUNTIME_UARTS=2 @@ -142,17 +134,7 @@ CONFIG_JFFS2_COMPRESSION_OPTIONS=y CONFIG_SQUASHFS=y # CONFIG_SQUASHFS_ZLIB is not set CONFIG_SQUASHFS_XZ=y -CONFIG_PRINTK_TIME=y -# CONFIG_ENABLE_MUST_CHECK is not set -CONFIG_MAGIC_SYSRQ=y -CONFIG_STRIP_ASM_SYMS=y -CONFIG_DEBUG_FS=y -# CONFIG_SCHED_DEBUG is not set -# CONFIG_FTRACE is not set -CONFIG_CMDLINE_BOOL=y -CONFIG_CRYPTO_MANAGER=m CONFIG_CRYPTO_ARC4=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRC_ITU_T=m CONFIG_CRC32_SARWATE=y # CONFIG_XZ_DEC_X86 is not set @@ -161,4 +143,11 @@ CONFIG_CRC32_SARWATE=y # CONFIG_XZ_DEC_ARM is not set # CONFIG_XZ_DEC_ARMTHUMB is not set # CONFIG_XZ_DEC_SPARC is not set -CONFIG_AVERAGE=y +CONFIG_PRINTK_TIME=y +# CONFIG_ENABLE_MUST_CHECK is not set +CONFIG_STRIP_ASM_SYMS=y +CONFIG_DEBUG_FS=y +CONFIG_MAGIC_SYSRQ=y +# CONFIG_SCHED_DEBUG is not set +# CONFIG_FTRACE is not set +CONFIG_CMDLINE_BOOL=y diff --git a/arch/mips/configs/sb1250_swarm_defconfig b/arch/mips/configs/sb1250_swarm_defconfig index 1edd8430ad61..ad8981666ee4 100644 --- a/arch/mips/configs/sb1250_swarm_defconfig +++ b/arch/mips/configs/sb1250_swarm_defconfig @@ -1,30 +1,29 @@ -CONFIG_SIBYTE_SWARM=y -CONFIG_CPU_SB1_PASS_2_2=y -CONFIG_64BIT=y -CONFIG_SMP=y -CONFIG_HIGH_RES_TIMERS=y -CONFIG_HZ_1000=y CONFIG_SYSVIPC=y +CONFIG_HIGH_RES_TIMERS=y CONFIG_LOG_BUF_SHIFT=15 CONFIG_CGROUPS=y CONFIG_CPUSETS=y # CONFIG_PROC_PID_CPUSET is not set CONFIG_CGROUP_CPUACCT=y -CONFIG_RELAY=y CONFIG_NAMESPACES=y +CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_EXPERT=y # CONFIG_COMPAT_BRK is not set CONFIG_SLAB=y +CONFIG_SIBYTE_SWARM=y +CONFIG_CPU_SB1_PASS_2_2=y +CONFIG_64BIT=y +CONFIG_SMP=y +CONFIG_NR_CPUS=2 +CONFIG_HZ_1000=y +CONFIG_PCI=y +CONFIG_MIPS32_O32=y +CONFIG_PM=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y -CONFIG_PCI=y -CONFIG_MIPS32_COMPAT=y -CONFIG_MIPS32_O32=y -CONFIG_PM=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -43,8 +42,6 @@ CONFIG_TCP_MD5SIG=y CONFIG_NETWORK_SECMARK=y CONFIG_CFG80211=m CONFIG_MAC80211=m -CONFIG_MAC80211_RC_PID=y -CONFIG_MAC80211_RC_DEFAULT_PID=y CONFIG_RFKILL=m CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_FW_LOADER=m @@ -60,10 +57,8 @@ CONFIG_BLK_DEV_IDETAPE=y CONFIG_RAID_ATTRS=m CONFIG_NETDEVICES=y CONFIG_MACVLAN=m -CONFIG_BROADCOM_PHY=y -CONFIG_NET_ETHERNET=y -CONFIG_MII=y CONFIG_SB1250_MAC=y +CONFIG_BROADCOM_PHY=y # CONFIG_INPUT is not set CONFIG_SERIO_RAW=m # CONFIG_VT is not set @@ -81,15 +76,9 @@ CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y CONFIG_NFS_FS=y -CONFIG_NFS_V3=y CONFIG_ROOT_NFS=y -CONFIG_DLM=m -CONFIG_KEYS=y -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_AUTHENC=m -CONFIG_CRYPTO_CCM=m -CONFIG_CRYPTO_GCM=m CONFIG_CRYPTO_CBC=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m @@ -98,7 +87,6 @@ CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_MD4=m CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_SHA256=m CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m diff --git a/arch/mips/configs/tb0219_defconfig b/arch/mips/configs/tb0219_defconfig index 4041597e3170..f0a11a72307e 100644 --- a/arch/mips/configs/tb0219_defconfig +++ b/arch/mips/configs/tb0219_defconfig @@ -1,12 +1,9 @@ -CONFIG_MACH_VR41XX=y -CONFIG_TANBAC_TB0219=y CONFIG_SYSVIPC=y CONFIG_LOG_BUF_SHIFT=14 -CONFIG_SYSFS_DEPRECATED_V2=y -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_EXPERT=y -# CONFIG_PCSPKR_PLATFORM is not set CONFIG_SLAB=y +CONFIG_MACH_VR41XX=y +CONFIG_TANBAC_TB0219=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y @@ -25,7 +22,6 @@ CONFIG_IP_ROUTE_VERBOSE=y CONFIG_IP_PNP=y CONFIG_IP_PNP_BOOTP=y CONFIG_NET_IPIP=m -CONFIG_NET_IPGRE=m CONFIG_SYN_COOKIES=y # CONFIG_INET_XFRM_MODE_TRANSPORT is not set # CONFIG_INET_XFRM_MODE_TUNNEL is not set @@ -33,33 +29,26 @@ CONFIG_SYN_COOKIES=y # CONFIG_IPV6 is not set CONFIG_NETWORK_SECMARK=y CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" -CONFIG_FW_LOADER=m CONFIG_BLK_DEV_LOOP=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_XIP=y CONFIG_NETDEVICES=y -CONFIG_PHYLIB=m -CONFIG_MARVELL_PHY=m -CONFIG_DAVICOM_PHY=m -CONFIG_QSEMI_PHY=m -CONFIG_LXT_PHY=m -CONFIG_CICADA_PHY=m -CONFIG_VITESSE_PHY=m -CONFIG_SMSC_PHY=m -CONFIG_NET_ETHERNET=y -CONFIG_NET_PCI=y CONFIG_8139TOO=y +CONFIG_R8169=y CONFIG_VIA_RHINE=y CONFIG_VIA_RHINE_MMIO=y -CONFIG_R8169=y CONFIG_VIA_VELOCITY=y -# CONFIG_INPUT_MOUSEDEV is not set +CONFIG_CICADA_PHY=m +CONFIG_DAVICOM_PHY=m +CONFIG_LXT_PHY=m +CONFIG_MARVELL_PHY=m +CONFIG_QSEMI_PHY=m +CONFIG_SMSC_PHY=m +CONFIG_VITESSE_PHY=m # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set CONFIG_VT_HW_CONSOLE_BINDING=y -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_VR41XX=y CONFIG_SERIAL_VR41XX_CONSOLE=y # CONFIG_HW_RANDOM is not set @@ -82,7 +71,6 @@ CONFIG_TMPFS_POSIX_ACL=y CONFIG_CRAMFS=m CONFIG_ROMFS_FS=m CONFIG_NFS_FS=y -CONFIG_NFS_V3=y CONFIG_ROOT_NFS=y CONFIG_NFSD=y CONFIG_NFSD_V3=y diff --git a/arch/mips/configs/tb0226_defconfig b/arch/mips/configs/tb0226_defconfig index 565f0441c50d..025e45656359 100644 --- a/arch/mips/configs/tb0226_defconfig +++ b/arch/mips/configs/tb0226_defconfig @@ -1,18 +1,14 @@ -CONFIG_MACH_VR41XX=y -CONFIG_TANBAC_TB0226=y CONFIG_SYSVIPC=y CONFIG_LOG_BUF_SHIFT=14 -CONFIG_SYSFS_DEPRECATED_V2=y -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_EXPERT=y -# CONFIG_PCSPKR_PLATFORM is not set CONFIG_SLAB=y +CONFIG_MACH_VR41XX=y +CONFIG_TANBAC_TB0226=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y -# CONFIG_BLK_DEV_BSG is not set CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -34,28 +30,21 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_BLK_DEV_LOOP=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_XIP=y CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y -CONFIG_SCSI_MULTI_LUN=y CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_SAS_LIBSAS=m -# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set # CONFIG_SCSI_LOWLEVEL is not set CONFIG_NETDEVICES=y -CONFIG_NET_ETHERNET=y -CONFIG_NET_PCI=y CONFIG_E100=y CONFIG_USB_CATC=m CONFIG_USB_KAWETH=m CONFIG_USB_PEGASUS=m CONFIG_USB_RTL8150=m -# CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set CONFIG_VT_HW_CONSOLE_BINDING=y -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_VR41XX=y CONFIG_SERIAL_VR41XX_CONSOLE=y # CONFIG_HW_RANDOM is not set @@ -77,10 +66,8 @@ CONFIG_TMPFS_POSIX_ACL=y CONFIG_CRAMFS=m CONFIG_ROMFS_FS=m CONFIG_NFS_FS=y -CONFIG_NFS_V3=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m CONFIG_NFSD_V3=y CONFIG_CMDLINE_BOOL=y CONFIG_CMDLINE="cca=3 mem=32M console=ttyVR0,115200" -CONFIG_CRC32=m diff --git a/arch/mips/configs/tb0287_defconfig b/arch/mips/configs/tb0287_defconfig index a702be602fb9..68490248e3f1 100644 --- a/arch/mips/configs/tb0287_defconfig +++ b/arch/mips/configs/tb0287_defconfig @@ -1,12 +1,8 @@ -CONFIG_MACH_VR41XX=y CONFIG_SYSVIPC=y CONFIG_LOG_BUF_SHIFT=14 -CONFIG_SYSFS_DEPRECATED_V2=y -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_EXPERT=y -# CONFIG_SYSCTL_SYSCALL is not set -# CONFIG_PCSPKR_PLATFORM is not set CONFIG_SLAB=y +CONFIG_MACH_VR41XX=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y @@ -25,7 +21,6 @@ CONFIG_IP_ROUTE_VERBOSE=y CONFIG_IP_PNP=y CONFIG_IP_PNP_BOOTP=y CONFIG_NET_IPIP=m -CONFIG_NET_IPGRE=m CONFIG_SYN_COOKIES=y # CONFIG_INET_XFRM_MODE_TRANSPORT is not set # CONFIG_INET_XFRM_MODE_TUNNEL is not set @@ -36,36 +31,23 @@ CONFIG_TCP_CONG_CUBIC=m # CONFIG_IPV6 is not set CONFIG_NETWORK_SECMARK=y CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" -CONFIG_FW_LOADER=m CONFIG_BLK_DEV_LOOP=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_XIP=y CONFIG_BLK_DEV_SD=y CONFIG_SCSI_SCAN_ASYNC=y # CONFIG_SCSI_LOWLEVEL is not set CONFIG_ATA=y CONFIG_PATA_SIL680=y -CONFIG_IEEE1394=m -CONFIG_IEEE1394_OHCI1394=m -CONFIG_IEEE1394_SBP2=m -CONFIG_IEEE1394_ETH1394=m -CONFIG_IEEE1394_RAWIO=m -CONFIG_IEEE1394_VIDEO1394=m -CONFIG_IEEE1394_DV1394=m CONFIG_NETDEVICES=y -CONFIG_NET_ETHERNET=y -CONFIG_NET_PCI=y CONFIG_8139TOO=y +CONFIG_R8169=y CONFIG_VIA_RHINE=y CONFIG_VIA_RHINE_MMIO=y -CONFIG_R8169=y CONFIG_VIA_VELOCITY=y # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set -CONFIG_VT_HW_CONSOLE_BINDING=y -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_VR41XX=y CONFIG_SERIAL_VR41XX_CONSOLE=y # CONFIG_HW_RANDOM is not set @@ -76,9 +58,6 @@ CONFIG_FB=y CONFIG_FB_SM501=y # CONFIG_VGA_CONSOLE is not set CONFIG_FRAMEBUFFER_CONSOLE=y -CONFIG_FONTS=y -CONFIG_FONT_8x8=y -CONFIG_FONT_8x16=y CONFIG_USB=m CONFIG_USB_MON=m CONFIG_USB_EHCI_HCD=m @@ -97,9 +76,11 @@ CONFIG_TMPFS_POSIX_ACL=y CONFIG_CRAMFS=m CONFIG_ROMFS_FS=m CONFIG_NFS_FS=y -CONFIG_NFS_V3=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m CONFIG_NFSD_V3=y +CONFIG_FONTS=y +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y CONFIG_CMDLINE_BOOL=y CONFIG_CMDLINE="cca=3 mem=64M console=ttyVR0,115200 ip=any root=/dev/nfs" diff --git a/arch/mips/configs/vocore2_defconfig b/arch/mips/configs/vocore2_defconfig index 9121e4194a63..ded3dce911d5 100644 --- a/arch/mips/configs/vocore2_defconfig +++ b/arch/mips/configs/vocore2_defconfig @@ -1,17 +1,9 @@ -CONFIG_RALINK=y -CONFIG_SOC_MT7620=y -CONFIG_DTB_VOCORE2=y -CONFIG_CPU_MIPS32_R2=y -# CONFIG_COMPACTION is not set -CONFIG_HZ_100=y -CONFIG_PREEMPT=y -# CONFIG_SECCOMP is not set -CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER=y # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_NO_HZ_IDLE=y CONFIG_HIGH_RES_TIMERS=y +CONFIG_PREEMPT=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 @@ -30,8 +22,16 @@ CONFIG_EMBEDDED=y # CONFIG_VM_EVENT_COUNTERS is not set # CONFIG_SLUB_DEBUG is not set # CONFIG_COMPAT_BRK is not set -# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_RALINK=y +CONFIG_SOC_MT7620=y +CONFIG_DTB_VOCORE2=y +CONFIG_CPU_MIPS32_R2=y +CONFIG_HZ_100=y +# CONFIG_SECCOMP is not set +CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER=y # CONFIG_SUSPEND is not set +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +# CONFIG_COMPACTION is not set CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -113,6 +113,10 @@ CONFIG_NLS_ISO8859_15=y CONFIG_NLS_KOI8_R=y CONFIG_NLS_KOI8_U=y CONFIG_NLS_UTF8=y +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +CONFIG_CRC16=y +CONFIG_XZ_DEC=y CONFIG_PRINTK_TIME=y CONFIG_DEBUG_INFO=y CONFIG_STRIP_ASM_SYMS=y @@ -123,7 +127,3 @@ CONFIG_PANIC_TIMEOUT=10 # CONFIG_DEBUG_PREEMPT is not set CONFIG_STACKTRACE=y # CONFIG_FTRACE is not set -CONFIG_CRYPTO_DEFLATE=y -CONFIG_CRYPTO_LZO=y -CONFIG_CRC16=y -CONFIG_XZ_DEC=y diff --git a/arch/mips/configs/workpad_defconfig b/arch/mips/configs/workpad_defconfig index a84eac409c9c..891a5f77305d 100644 --- a/arch/mips/configs/workpad_defconfig +++ b/arch/mips/configs/workpad_defconfig @@ -1,18 +1,17 @@ -CONFIG_MACH_VR41XX=y -CONFIG_IBM_WORKPAD=y CONFIG_SYSVIPC=y CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_EXPERT=y CONFIG_SLAB=y +CONFIG_MACH_VR41XX=y +CONFIG_IBM_WORKPAD=y +CONFIG_PCCARD=y +CONFIG_PCMCIA_VRC4171=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y # CONFIG_BLK_DEV_BSG is not set -CONFIG_PCCARD=y -CONFIG_PCMCIA_VRC4171=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -31,16 +30,14 @@ CONFIG_IDE=y CONFIG_BLK_DEV_IDECS=m CONFIG_IDE_GENERIC=y CONFIG_NETDEVICES=y -CONFIG_NET_PCMCIA=y -CONFIG_PCMCIA_3C589=m CONFIG_PCMCIA_3C574=m +CONFIG_PCMCIA_3C589=m +CONFIG_PCMCIA_NMCLAN=m CONFIG_PCMCIA_FMVJ18X=m +CONFIG_PCMCIA_AXNET=m CONFIG_PCMCIA_PCNET=m -CONFIG_PCMCIA_NMCLAN=m CONFIG_PCMCIA_SMC91C92=m CONFIG_PCMCIA_XIRC2PS=m -CONFIG_PCMCIA_AXNET=m -# CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set @@ -62,7 +59,6 @@ CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y CONFIG_NFS_FS=m -CONFIG_NFS_V3=y CONFIG_NFSD=m CONFIG_NFSD_V3=y CONFIG_CMDLINE_BOOL=y diff --git a/arch/mips/configs/xway_defconfig b/arch/mips/configs/xway_defconfig index fa750d501c11..c3cac29e8414 100644 --- a/arch/mips/configs/xway_defconfig +++ b/arch/mips/configs/xway_defconfig @@ -1,13 +1,3 @@ -CONFIG_LANTIQ=y -CONFIG_PCI_LANTIQ=y -CONFIG_XRX200_PHY_FW=y -CONFIG_CPU_MIPS32_R2=y -CONFIG_MIPS_MT_SMP=y -CONFIG_MIPS_VPE_LOADER=y -# CONFIG_COMPACTION is not set -CONFIG_NR_CPUS=2 -CONFIG_HZ_100=y -# CONFIG_SECCOMP is not set # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y # CONFIG_CROSS_MEMORY_ATTACH is not set @@ -15,19 +5,28 @@ CONFIG_HIGH_RES_TIMERS=y CONFIG_BLK_DEV_INITRD=y # CONFIG_RD_GZIP is not set CONFIG_CC_OPTIMIZE_FOR_SIZE=y -CONFIG_KALLSYMS_ALL=y # CONFIG_AIO is not set +CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y # CONFIG_VM_EVENT_COUNTERS is not set # CONFIG_SLUB_DEBUG is not set # CONFIG_COMPAT_BRK is not set +CONFIG_LANTIQ=y +CONFIG_PCI_LANTIQ=y +CONFIG_XRX200_PHY_FW=y +CONFIG_CPU_MIPS32_R2=y +CONFIG_MIPS_VPE_LOADER=y +CONFIG_NR_CPUS=2 +CONFIG_HZ_100=y +# CONFIG_SECCOMP is not set +CONFIG_PCI=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set CONFIG_PARTITION_ADVANCED=y # CONFIG_IOSCHED_CFQ is not set -CONFIG_PCI=y # CONFIG_COREDUMP is not set +# CONFIG_COMPACTION is not set CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -63,7 +62,6 @@ CONFIG_NETFILTER_XT_MATCH_LIMIT=m CONFIG_NETFILTER_XT_MATCH_MAC=m CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m CONFIG_NETFILTER_XT_MATCH_STATE=m -CONFIG_NF_CONNTRACK_IPV4=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m @@ -102,14 +100,12 @@ CONFIG_PPP_ASYNC=m CONFIG_ISDN=y CONFIG_INPUT=m CONFIG_INPUT_POLLDEV=m -# CONFIG_INPUT_MOUSEDEV is not set # CONFIG_KEYBOARD_ATKBD is not set # CONFIG_INPUT_MOUSE is not set CONFIG_INPUT_MISC=y # CONFIG_SERIO is not set # CONFIG_VT is not set # CONFIG_LEGACY_PTYS is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_RUNTIME_UARTS=2 @@ -150,6 +146,9 @@ CONFIG_JFFS2_COMPRESSION_OPTIONS=y CONFIG_SQUASHFS=y # CONFIG_SQUASHFS_ZLIB is not set CONFIG_SQUASHFS_XZ=y +CONFIG_CRYPTO_ARC4=m +CONFIG_CRC_ITU_T=m +CONFIG_CRC32_SARWATE=y CONFIG_PRINTK_TIME=y # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_STRIP_ASM_SYMS=y @@ -158,6 +157,3 @@ CONFIG_MAGIC_SYSRQ=y # CONFIG_SCHED_DEBUG is not set # CONFIG_FTRACE is not set CONFIG_CMDLINE_BOOL=y -CONFIG_CRYPTO_ARC4=m -CONFIG_CRC_ITU_T=m -CONFIG_CRC32_SARWATE=y diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild index 9a81e72119da..f15d5db5dd67 100644 --- a/arch/mips/include/asm/Kbuild +++ b/arch/mips/include/asm/Kbuild @@ -1,4 +1,8 @@ # MIPS headers +generated-y += syscall_table_32_o32.h +generated-y += syscall_table_64_n32.h +generated-y += syscall_table_64_n64.h +generated-y += syscall_table_64_o32.h generic-(CONFIG_GENERIC_CSUM) += checksum.h generic-y += current.h generic-y += device.h diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index d4ea7a5b60cf..e8fbfd419151 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h @@ -59,12 +59,13 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \ int temp; \ \ __asm__ __volatile__( \ + " .set push \n" \ " .set "MIPS_ISA_LEVEL" \n" \ "1: ll %0, %1 # atomic_" #op " \n" \ " " #asm_op " %0, %2 \n" \ " sc %0, %1 \n" \ "\t" __scbeqz " %0, 1b \n" \ - " .set mips0 \n" \ + " .set pop \n" \ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ : "Ir" (i)); \ } else { \ @@ -85,13 +86,14 @@ static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \ int temp; \ \ __asm__ __volatile__( \ + " .set push \n" \ " .set "MIPS_ISA_LEVEL" \n" \ "1: ll %1, %2 # atomic_" #op "_return \n" \ " " #asm_op " %0, %1, %3 \n" \ " sc %0, %2 \n" \ "\t" __scbeqz " %0, 1b \n" \ " " #asm_op " %0, %1, %3 \n" \ - " .set mips0 \n" \ + " .set pop \n" \ : "=&r" (result), "=&r" (temp), \ "+" GCC_OFF_SMALL_ASM() (v->counter) \ : "Ir" (i)); \ @@ -117,12 +119,13 @@ static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \ int temp; \ \ __asm__ __volatile__( \ + " .set push \n" \ " .set "MIPS_ISA_LEVEL" \n" \ "1: ll %1, %2 # atomic_fetch_" #op " \n" \ " " #asm_op " %0, %1, %3 \n" \ " sc %0, %2 \n" \ "\t" __scbeqz " %0, 1b \n" \ - " .set mips0 \n" \ + " .set pop \n" \ " move %0, %1 \n" \ : "=&r" (result), "=&r" (temp), \ "+" GCC_OFF_SMALL_ASM() (v->counter) \ @@ -188,17 +191,19 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) int temp; __asm__ __volatile__( + " .set push \n" " .set "MIPS_ISA_LEVEL" \n" "1: ll %1, %2 # atomic_sub_if_positive\n" - " .set mips0 \n" + " .set pop \n" " subu %0, %1, %3 \n" " move %1, %0 \n" " bltz %0, 1f \n" + " .set push \n" " .set "MIPS_ISA_LEVEL" \n" " sc %1, %2 \n" "\t" __scbeqz " %1, 1b \n" "1: \n" - " .set mips0 \n" + " .set pop \n" : "=&r" (result), "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) : "Ir" (i)); @@ -252,12 +257,13 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \ long temp; \ \ __asm__ __volatile__( \ + " .set push \n" \ " .set "MIPS_ISA_LEVEL" \n" \ "1: lld %0, %1 # atomic64_" #op " \n" \ " " #asm_op " %0, %2 \n" \ " scd %0, %1 \n" \ "\t" __scbeqz " %0, 1b \n" \ - " .set mips0 \n" \ + " .set pop \n" \ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ : "Ir" (i)); \ } else { \ @@ -278,13 +284,14 @@ static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \ long temp; \ \ __asm__ __volatile__( \ + " .set push \n" \ " .set "MIPS_ISA_LEVEL" \n" \ "1: lld %1, %2 # atomic64_" #op "_return\n" \ " " #asm_op " %0, %1, %3 \n" \ " scd %0, %2 \n" \ "\t" __scbeqz " %0, 1b \n" \ " " #asm_op " %0, %1, %3 \n" \ - " .set mips0 \n" \ + " .set pop \n" \ : "=&r" (result), "=&r" (temp), \ "+" GCC_OFF_SMALL_ASM() (v->counter) \ : "Ir" (i)); \ @@ -310,13 +317,14 @@ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \ long temp; \ \ __asm__ __volatile__( \ + " .set push \n" \ " .set "MIPS_ISA_LEVEL" \n" \ "1: lld %1, %2 # atomic64_fetch_" #op "\n" \ " " #asm_op " %0, %1, %3 \n" \ " scd %0, %2 \n" \ "\t" __scbeqz " %0, 1b \n" \ " move %0, %1 \n" \ - " .set mips0 \n" \ + " .set pop \n" \ : "=&r" (result), "=&r" (temp), \ "+" GCC_OFF_SMALL_ASM() (v->counter) \ : "Ir" (i)); \ @@ -382,6 +390,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) long temp; __asm__ __volatile__( + " .set push \n" " .set "MIPS_ISA_LEVEL" \n" "1: lld %1, %2 # atomic64_sub_if_positive\n" " dsubu %0, %1, %3 \n" @@ -390,7 +399,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) " scd %1, %2 \n" "\t" __scbeqz " %1, 1b \n" "1: \n" - " .set mips0 \n" + " .set pop \n" : "=&r" (result), "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) : "Ir" (i)); diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h index da1b8718861e..f2a840fb6a9a 100644 --- a/arch/mips/include/asm/bitops.h +++ b/arch/mips/include/asm/bitops.h @@ -58,12 +58,13 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) if (kernel_uses_llsc && R10000_LLSC_WAR) { __asm__ __volatile__( + " .set push \n" " .set arch=r4000 \n" "1: " __LL "%0, %1 # set_bit \n" " or %0, %2 \n" " " __SC "%0, %1 \n" " beqzl %0, 1b \n" - " .set mips0 \n" + " .set pop \n" : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*m) : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m)); #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) @@ -80,11 +81,12 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) } else if (kernel_uses_llsc) { do { __asm__ __volatile__( + " .set push \n" " .set "MIPS_ISA_ARCH_LEVEL" \n" " " __LL "%0, %1 # set_bit \n" " or %0, %2 \n" " " __SC "%0, %1 \n" - " .set mips0 \n" + " .set pop \n" : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) : "ir" (1UL << bit)); } while (unlikely(!temp)); @@ -110,12 +112,13 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) if (kernel_uses_llsc && R10000_LLSC_WAR) { __asm__ __volatile__( + " .set push \n" " .set arch=r4000 \n" "1: " __LL "%0, %1 # clear_bit \n" " and %0, %2 \n" " " __SC "%0, %1 \n" " beqzl %0, 1b \n" - " .set mips0 \n" + " .set pop \n" : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) : "ir" (~(1UL << bit))); #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) @@ -132,11 +135,12 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) } else if (kernel_uses_llsc) { do { __asm__ __volatile__( + " .set push \n" " .set "MIPS_ISA_ARCH_LEVEL" \n" " " __LL "%0, %1 # clear_bit \n" " and %0, %2 \n" " " __SC "%0, %1 \n" - " .set mips0 \n" + " .set pop \n" : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) : "ir" (~(1UL << bit))); } while (unlikely(!temp)); @@ -176,12 +180,13 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) unsigned long temp; __asm__ __volatile__( + " .set push \n" " .set arch=r4000 \n" "1: " __LL "%0, %1 # change_bit \n" " xor %0, %2 \n" " " __SC "%0, %1 \n" " beqzl %0, 1b \n" - " .set mips0 \n" + " .set pop \n" : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) : "ir" (1UL << bit)); } else if (kernel_uses_llsc) { @@ -190,11 +195,12 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) do { __asm__ __volatile__( + " .set push \n" " .set "MIPS_ISA_ARCH_LEVEL" \n" " " __LL "%0, %1 # change_bit \n" " xor %0, %2 \n" " " __SC "%0, %1 \n" - " .set mips0 \n" + " .set pop \n" : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) : "ir" (1UL << bit)); } while (unlikely(!temp)); @@ -223,13 +229,14 @@ static inline int test_and_set_bit(unsigned long nr, unsigned long temp; __asm__ __volatile__( + " .set push \n" " .set arch=r4000 \n" "1: " __LL "%0, %1 # test_and_set_bit \n" " or %2, %0, %3 \n" " " __SC "%2, %1 \n" " beqzl %2, 1b \n" " and %2, %0, %3 \n" - " .set mips0 \n" + " .set pop \n" : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) : "r" (1UL << bit) : "memory"); @@ -239,11 +246,12 @@ static inline int test_and_set_bit(unsigned long nr, do { __asm__ __volatile__( + " .set push \n" " .set "MIPS_ISA_ARCH_LEVEL" \n" " " __LL "%0, %1 # test_and_set_bit \n" " or %2, %0, %3 \n" " " __SC "%2, %1 \n" - " .set mips0 \n" + " .set pop \n" : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) : "r" (1UL << bit) : "memory"); @@ -277,13 +285,14 @@ static inline int test_and_set_bit_lock(unsigned long nr, unsigned long temp; __asm__ __volatile__( + " .set push \n" " .set arch=r4000 \n" "1: " __LL "%0, %1 # test_and_set_bit \n" " or %2, %0, %3 \n" " " __SC "%2, %1 \n" " beqzl %2, 1b \n" " and %2, %0, %3 \n" - " .set mips0 \n" + " .set pop \n" : "=&r" (temp), "+m" (*m), "=&r" (res) : "r" (1UL << bit) : "memory"); @@ -293,11 +302,12 @@ static inline int test_and_set_bit_lock(unsigned long nr, do { __asm__ __volatile__( + " .set push \n" " .set "MIPS_ISA_ARCH_LEVEL" \n" " " __LL "%0, %1 # test_and_set_bit \n" " or %2, %0, %3 \n" " " __SC "%2, %1 \n" - " .set mips0 \n" + " .set pop \n" : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) : "r" (1UL << bit) : "memory"); @@ -332,6 +342,7 @@ static inline int test_and_clear_bit(unsigned long nr, unsigned long temp; __asm__ __volatile__( + " .set push \n" " .set arch=r4000 \n" "1: " __LL "%0, %1 # test_and_clear_bit \n" " or %2, %0, %3 \n" @@ -339,7 +350,7 @@ static inline int test_and_clear_bit(unsigned long nr, " " __SC "%2, %1 \n" " beqzl %2, 1b \n" " and %2, %0, %3 \n" - " .set mips0 \n" + " .set pop \n" : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) : "r" (1UL << bit) : "memory"); @@ -365,12 +376,13 @@ static inline int test_and_clear_bit(unsigned long nr, do { __asm__ __volatile__( + " .set push \n" " .set "MIPS_ISA_ARCH_LEVEL" \n" " " __LL "%0, %1 # test_and_clear_bit \n" " or %2, %0, %3 \n" " xor %2, %3 \n" " " __SC "%2, %1 \n" - " .set mips0 \n" + " .set pop \n" : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) : "r" (1UL << bit) : "memory"); @@ -406,13 +418,14 @@ static inline int test_and_change_bit(unsigned long nr, unsigned long temp; __asm__ __volatile__( + " .set push \n" " .set arch=r4000 \n" "1: " __LL "%0, %1 # test_and_change_bit \n" " xor %2, %0, %3 \n" " " __SC "%2, %1 \n" " beqzl %2, 1b \n" " and %2, %0, %3 \n" - " .set mips0 \n" + " .set pop \n" : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) : "r" (1UL << bit) : "memory"); @@ -422,11 +435,12 @@ static inline int test_and_change_bit(unsigned long nr, do { __asm__ __volatile__( + " .set push \n" " .set "MIPS_ISA_ARCH_LEVEL" \n" " " __LL "%0, %1 # test_and_change_bit \n" " xor %2, %0, %3 \n" " " __SC "\t%2, %1 \n" - " .set mips0 \n" + " .set pop \n" : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) : "r" (1UL << bit) : "memory"); diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h index 89e9fb7976fe..638de0c25249 100644 --- a/arch/mips/include/asm/cmpxchg.h +++ b/arch/mips/include/asm/cmpxchg.h @@ -47,9 +47,10 @@ extern unsigned long __xchg_called_with_bad_pointer(void) __asm__ __volatile__( \ " .set push \n" \ " .set noat \n" \ + " .set push \n" \ " .set " MIPS_ISA_ARCH_LEVEL " \n" \ "1: " ld " %0, %2 # __xchg_asm \n" \ - " .set mips0 \n" \ + " .set pop \n" \ " move $1, %z3 \n" \ " .set " MIPS_ISA_ARCH_LEVEL " \n" \ " " st " $1, %1 \n" \ @@ -117,10 +118,11 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long x, __asm__ __volatile__( \ " .set push \n" \ " .set noat \n" \ + " .set push \n" \ " .set "MIPS_ISA_ARCH_LEVEL" \n" \ "1: " ld " %0, %2 # __cmpxchg_asm \n" \ " bne %0, %z3, 2f \n" \ - " .set mips0 \n" \ + " .set pop \n" \ " move $1, %z4 \n" \ " .set "MIPS_ISA_ARCH_LEVEL" \n" \ " " st " $1, %1 \n" \ diff --git a/arch/mips/include/asm/compiler.h b/arch/mips/include/asm/compiler.h index cc2eb1b06050..f77e99f1722e 100644 --- a/arch/mips/include/asm/compiler.h +++ b/arch/mips/include/asm/compiler.h @@ -43,28 +43,16 @@ #undef barrier_before_unreachable #define barrier_before_unreachable() asm volatile(".insn") -#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) -#define GCC_IMM_ASM() "n" -#define GCC_REG_ACCUM "$0" +#if !defined(CONFIG_CC_IS_GCC) || \ + (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9) +# define GCC_OFF_SMALL_ASM() "ZC" +#elif defined(CONFIG_CPU_MICROMIPS) +# error "microMIPS compilation unsupported with GCC older than 4.9" #else -#define GCC_IMM_ASM() "rn" -#define GCC_REG_ACCUM "accum" +# define GCC_OFF_SMALL_ASM() "R" #endif #ifdef CONFIG_CPU_MIPSR6 -/* All MIPS R6 toolchains support the ZC constrain */ -#define GCC_OFF_SMALL_ASM() "ZC" -#else -#ifndef CONFIG_CPU_MICROMIPS -#define GCC_OFF_SMALL_ASM() "R" -#elif __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9) -#define GCC_OFF_SMALL_ASM() "ZC" -#else -#error "microMIPS compilation unsupported with GCC older than 4.9" -#endif /* CONFIG_CPU_MICROMIPS */ -#endif /* CONFIG_CPU_MIPSR6 */ - -#ifdef CONFIG_CPU_MIPSR6 #define MIPS_ISA_LEVEL "mips64r6" #define MIPS_ISA_ARCH_LEVEL MIPS_ISA_LEVEL #define MIPS_ISA_LEVEL_RAW mips64r6 diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h index 0edba3e75747..701e525641b8 100644 --- a/arch/mips/include/asm/cpu-features.h +++ b/arch/mips/include/asm/cpu-features.h @@ -15,6 +15,7 @@ #include <cpu-feature-overrides.h> #define __ase(ase) (cpu_data[0].ases & (ase)) +#define __isa(isa) (cpu_data[0].isa_level & (isa)) #define __opt(opt) (cpu_data[0].options & (opt)) /* @@ -53,6 +54,18 @@ #define __isa_lt_and_opt(isa, opt) ((MIPS_ISA_REV < (isa)) && __opt(opt)) /* + * Similarly allow for ISA level checks that take into account knowledge of the + * ISA targeted by the kernel build, provided by MIPS_ISA_REV. + */ +#define __isa_ge_and_flag(isa, flag) ((MIPS_ISA_REV >= (isa)) && __isa(flag)) +#define __isa_ge_or_flag(isa, flag) ((MIPS_ISA_REV >= (isa)) || __isa(flag)) +#define __isa_lt_and_flag(isa, flag) ((MIPS_ISA_REV < (isa)) && __isa(flag)) +#define __isa_range(ge, lt) \ + ((MIPS_ISA_REV >= (ge)) && (MIPS_ISA_REV < (lt))) +#define __isa_range_or_flag(ge, lt, flag) \ + (__isa_range(ge, lt) || ((MIPS_ISA_REV < (lt)) && __isa(flag))) + +/* * SMP assumption: Options of CPU 0 are a superset of all processors. * This is true for all known MIPS systems. */ @@ -115,10 +128,15 @@ #endif /* Don't override `cpu_has_fpu' to 1 or the "nofpu" option won't work. */ #ifndef cpu_has_fpu -#define cpu_has_fpu (current_cpu_data.options & MIPS_CPU_FPU) -#define raw_cpu_has_fpu (raw_current_cpu_data.options & MIPS_CPU_FPU) +# ifdef CONFIG_MIPS_FP_SUPPORT +# define cpu_has_fpu (current_cpu_data.options & MIPS_CPU_FPU) +# define raw_cpu_has_fpu (raw_current_cpu_data.options & MIPS_CPU_FPU) +# else +# define cpu_has_fpu 0 +# define raw_cpu_has_fpu 0 +# endif #else -#define raw_cpu_has_fpu cpu_has_fpu +# define raw_cpu_has_fpu cpu_has_fpu #endif #ifndef cpu_has_32fpr #define cpu_has_32fpr __isa_ge_or_opt(1, MIPS_CPU_32FPR) @@ -195,7 +213,9 @@ #endif #ifndef cpu_has_mmips -# ifdef CONFIG_SYS_SUPPORTS_MICROMIPS +# if defined(__mips_micromips) +# define cpu_has_mmips 1 +# elif defined(CONFIG_SYS_SUPPORTS_MICROMIPS) # define cpu_has_mmips __opt(MIPS_CPU_MICROMIPS) # else # define cpu_has_mmips 0 @@ -246,48 +266,38 @@ #endif #endif -/* __builtin_constant_p(cpu_has_mips_r) && cpu_has_mips_r */ -#if !((defined(cpu_has_mips32r1) && cpu_has_mips32r1) || \ - (defined(cpu_has_mips32r2) && cpu_has_mips32r2) || \ - (defined(cpu_has_mips32r6) && cpu_has_mips32r6) || \ - (defined(cpu_has_mips64r1) && cpu_has_mips64r1) || \ - (defined(cpu_has_mips64r2) && cpu_has_mips64r2) || \ - (defined(cpu_has_mips64r6) && cpu_has_mips64r6)) -#define CPU_NO_EFFICIENT_FFS 1 -#endif - #ifndef cpu_has_mips_1 -# define cpu_has_mips_1 (!cpu_has_mips_r6) +# define cpu_has_mips_1 (MIPS_ISA_REV < 6) #endif #ifndef cpu_has_mips_2 -# define cpu_has_mips_2 (cpu_data[0].isa_level & MIPS_CPU_ISA_II) +# define cpu_has_mips_2 __isa_lt_and_flag(6, MIPS_CPU_ISA_II) #endif #ifndef cpu_has_mips_3 -# define cpu_has_mips_3 (cpu_data[0].isa_level & MIPS_CPU_ISA_III) +# define cpu_has_mips_3 __isa_lt_and_flag(6, MIPS_CPU_ISA_III) #endif #ifndef cpu_has_mips_4 -# define cpu_has_mips_4 (cpu_data[0].isa_level & MIPS_CPU_ISA_IV) +# define cpu_has_mips_4 __isa_lt_and_flag(6, MIPS_CPU_ISA_IV) #endif #ifndef cpu_has_mips_5 -# define cpu_has_mips_5 (cpu_data[0].isa_level & MIPS_CPU_ISA_V) +# define cpu_has_mips_5 __isa_lt_and_flag(6, MIPS_CPU_ISA_V) #endif #ifndef cpu_has_mips32r1 -# define cpu_has_mips32r1 (cpu_data[0].isa_level & MIPS_CPU_ISA_M32R1) +# define cpu_has_mips32r1 __isa_range_or_flag(1, 6, MIPS_CPU_ISA_M32R1) #endif #ifndef cpu_has_mips32r2 -# define cpu_has_mips32r2 (cpu_data[0].isa_level & MIPS_CPU_ISA_M32R2) +# define cpu_has_mips32r2 __isa_range_or_flag(2, 6, MIPS_CPU_ISA_M32R2) #endif #ifndef cpu_has_mips32r6 -# define cpu_has_mips32r6 (cpu_data[0].isa_level & MIPS_CPU_ISA_M32R6) +# define cpu_has_mips32r6 __isa_ge_or_flag(6, MIPS_CPU_ISA_M32R6) #endif #ifndef cpu_has_mips64r1 -# define cpu_has_mips64r1 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R1) +# define cpu_has_mips64r1 __isa_range_or_flag(1, 6, MIPS_CPU_ISA_M64R1) #endif #ifndef cpu_has_mips64r2 -# define cpu_has_mips64r2 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R2) +# define cpu_has_mips64r2 __isa_range_or_flag(2, 6, MIPS_CPU_ISA_M64R2) #endif #ifndef cpu_has_mips64r6 -# define cpu_has_mips64r6 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R6) +# define cpu_has_mips64r6 __isa_ge_and_flag(6, MIPS_CPU_ISA_M64R6) #endif /* diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h index a41059d47d31..ed7ffe4e63a3 100644 --- a/arch/mips/include/asm/cpu-info.h +++ b/arch/mips/include/asm/cpu-info.h @@ -50,7 +50,7 @@ struct guest_info { #define MIPS_CACHE_PINDEX 0x00000020 /* Physically indexed cache */ struct cpuinfo_mips { - unsigned long asid_cache; + u64 asid_cache; #ifdef CONFIG_MIPS_ASID_BITS_VARIABLE unsigned long asid_mask; #endif diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h index dacbdb84516a..532b49b1dbb3 100644 --- a/arch/mips/include/asm/cpu.h +++ b/arch/mips/include/asm/cpu.h @@ -248,8 +248,9 @@ #define PRID_REV_LOONGSON3A_R1 0x0005 #define PRID_REV_LOONGSON3B_R1 0x0006 #define PRID_REV_LOONGSON3B_R2 0x0007 -#define PRID_REV_LOONGSON3A_R2 0x0008 +#define PRID_REV_LOONGSON3A_R2_0 0x0008 #define PRID_REV_LOONGSON3A_R3_0 0x0009 +#define PRID_REV_LOONGSON3A_R2_1 0x000c #define PRID_REV_LOONGSON3A_R3_1 0x000d /* diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h index b4c477eb46ce..20dfaad3a55d 100644 --- a/arch/mips/include/asm/dma-mapping.h +++ b/arch/mips/include/asm/dma-mapping.h @@ -10,10 +10,8 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { #if defined(CONFIG_MACH_JAZZ) return &jazz_dma_ops; -#elif defined(CONFIG_SWIOTLB) - return &swiotlb_dma_ops; #else - return &dma_direct_ops; + return NULL; #endif } diff --git a/arch/mips/include/asm/dsemul.h b/arch/mips/include/asm/dsemul.h index b47a97527673..6d5b781ad518 100644 --- a/arch/mips/include/asm/dsemul.h +++ b/arch/mips/include/asm/dsemul.h @@ -52,7 +52,14 @@ extern int mips_dsemul(struct pt_regs *regs, mips_instruction ir, * * Return: True if an emulation frame was returned from, else false. */ +#ifdef CONFIG_MIPS_FP_SUPPORT extern bool do_dsemulret(struct pt_regs *xcp); +#else +static inline bool do_dsemulret(struct pt_regs *xcp) +{ + return false; +} +#endif /** * dsemul_thread_cleanup() - Cleanup thread 'emulation' frame @@ -63,8 +70,14 @@ extern bool do_dsemulret(struct pt_regs *xcp); * * Return: True if a frame was freed, else false. */ +#ifdef CONFIG_MIPS_FP_SUPPORT extern bool dsemul_thread_cleanup(struct task_struct *tsk); - +#else +static inline bool dsemul_thread_cleanup(struct task_struct *tsk) +{ + return false; +} +#endif /** * dsemul_thread_rollback() - Rollback from an 'emulation' frame * @regs: User thread register context. @@ -77,7 +90,14 @@ extern bool dsemul_thread_cleanup(struct task_struct *tsk); * * Return: True if a frame was exited, else false. */ +#ifdef CONFIG_MIPS_FP_SUPPORT extern bool dsemul_thread_rollback(struct pt_regs *regs); +#else +static inline bool dsemul_thread_rollback(struct pt_regs *regs) +{ + return false; +} +#endif /** * dsemul_mm_cleanup() - Cleanup per-mm delay slot 'emulation' state @@ -87,6 +107,13 @@ extern bool dsemul_thread_rollback(struct pt_regs *regs); * for delay slot 'emulation' book-keeping is freed. This is to be called * before @mm is freed in order to avoid memory leaks. */ +#ifdef CONFIG_MIPS_FP_SUPPORT extern void dsemul_mm_cleanup(struct mm_struct *mm); +#else +static inline void dsemul_mm_cleanup(struct mm_struct *mm) +{ + /* no-op */ +} +#endif #endif /* __MIPS_ASM_DSEMUL_H__ */ diff --git a/arch/mips/include/asm/edac.h b/arch/mips/include/asm/edac.h index fc467767329b..c5d147744423 100644 --- a/arch/mips/include/asm/edac.h +++ b/arch/mips/include/asm/edac.h @@ -21,12 +21,13 @@ static inline void edac_atomic_scrub(void *va, u32 size) */ __asm__ __volatile__ ( + " .set push \n" " .set mips2 \n" "1: ll %0, %1 # edac_atomic_scrub \n" " addu %0, $0 \n" " sc %0, %1 \n" " beqz %0, 1b \n" - " .set mips0 \n" + " .set pop \n" : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*virt_addr) : GCC_OFF_SMALL_ASM() (*virt_addr)); diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h index 0eb1a75be105..f8f44b1a6cbb 100644 --- a/arch/mips/include/asm/elf.h +++ b/arch/mips/include/asm/elf.h @@ -481,6 +481,8 @@ struct linux_binprm; extern int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp); +#ifdef CONFIG_MIPS_FP_SUPPORT + struct arch_elf_state { int nan_2008; int fp_abi; @@ -497,19 +499,35 @@ struct arch_elf_state { .overall_fp_mode = -1, \ } -/* Whether to accept legacy-NaN and 2008-NaN user binaries. */ -extern bool mips_use_nan_legacy; -extern bool mips_use_nan_2008; - extern int arch_elf_pt_proc(void *ehdr, void *phdr, struct file *elf, bool is_interp, struct arch_elf_state *state); extern int arch_check_elf(void *ehdr, bool has_interpreter, void *interp_ehdr, struct arch_elf_state *state); +/* Whether to accept legacy-NaN and 2008-NaN user binaries. */ +extern bool mips_use_nan_legacy; +extern bool mips_use_nan_2008; + extern void mips_set_personality_nan(struct arch_elf_state *state); extern void mips_set_personality_fp(struct arch_elf_state *state); +#else /* !CONFIG_MIPS_FP_SUPPORT */ + +struct arch_elf_state; + +static inline void mips_set_personality_nan(struct arch_elf_state *state) +{ + /* no-op */ +} + +static inline void mips_set_personality_fp(struct arch_elf_state *state) +{ + /* no-op */ +} + +#endif /* !CONFIG_MIPS_FP_SUPPORT */ + #define elf_read_implies_exec(ex, stk) mips_elf_read_implies_exec(&(ex), stk) extern int mips_elf_read_implies_exec(void *elf_ex, int exstack); diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h index a2813fe381cf..42bc2bbbd3d7 100644 --- a/arch/mips/include/asm/fpu.h +++ b/arch/mips/include/asm/fpu.h @@ -30,13 +30,6 @@ #include <asm/mips_mt.h> #endif -struct sigcontext; -struct sigcontext32; - -extern void _init_fpu(unsigned int); -extern void _save_fp(struct task_struct *); -extern void _restore_fp(struct task_struct *); - /* * This enum specifies a mode in which we want the FPU to operate, for cores * which implement the Status.FR bit. Note that the bottom bit of the value @@ -51,6 +44,11 @@ enum fpu_mode { #define FPU_FR_MASK 0x1 }; +#ifdef CONFIG_MIPS_FP_SUPPORT + +extern void _save_fp(struct task_struct *); +extern void _restore_fp(struct task_struct *); + #define __disable_fpu() \ do { \ clear_c0_status(ST0_CU1); \ @@ -198,42 +196,36 @@ static inline void lose_fpu(int save) preempt_enable(); } -static inline int init_fpu(void) +/** + * init_fp_ctx() - Initialize task FP context + * @target: The task whose FP context should be initialized. + * + * Initializes the FP context of the target task to sane default values if that + * target task does not already have valid FP context. Once the context has + * been initialized, the task will be marked as having used FP & thus having + * valid FP context. + * + * Returns: true if context is initialized, else false. + */ +static inline bool init_fp_ctx(struct task_struct *target) { - unsigned int fcr31 = current->thread.fpu.fcr31; - int ret = 0; + /* If FP has been used then the target already has context */ + if (tsk_used_math(target)) + return false; - if (cpu_has_fpu) { - unsigned int config5; - - ret = __own_fpu(); - if (ret) - return ret; + /* Begin with data registers set to all 1s... */ + memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr)); - if (!cpu_has_fre) { - _init_fpu(fcr31); + /* FCSR has been preset by `mips_set_personality_nan'. */ - return 0; - } - - /* - * Ensure FRE is clear whilst running _init_fpu, since - * single precision FP instructions are used. If FRE - * was set then we'll just end up initialising all 32 - * 64b registers. - */ - config5 = clear_c0_config5(MIPS_CONF5_FRE); - enable_fpu_hazard(); + /* + * Record that the target has "used" math, such that the context + * just initialised, and any modifications made by the caller, + * aren't discarded. + */ + set_stopped_child_used_math(target); - _init_fpu(fcr31); - - /* Restore FRE */ - write_c0_config5(config5); - enable_fpu_hazard(); - } else - fpu_emulator_init_fpu(); - - return ret; + return true; } static inline void save_fp(struct task_struct *tsk) @@ -260,4 +252,81 @@ static inline union fpureg *get_fpu_regs(struct task_struct *tsk) return tsk->thread.fpu.fpr; } +#else /* !CONFIG_MIPS_FP_SUPPORT */ + +/* + * When FP support is disabled we provide only a minimal set of stub functions + * to avoid callers needing to care too much about CONFIG_MIPS_FP_SUPPORT. + */ + +static inline int __enable_fpu(enum fpu_mode mode) +{ + return SIGILL; +} + +static inline void __disable_fpu(void) +{ + /* no-op */ +} + + +static inline int is_fpu_owner(void) +{ + return 0; +} + +static inline void clear_fpu_owner(void) +{ + /* no-op */ +} + +static inline int own_fpu_inatomic(int restore) +{ + return SIGILL; +} + +static inline int own_fpu(int restore) +{ + return SIGILL; +} + +static inline void lose_fpu_inatomic(int save, struct task_struct *tsk) +{ + /* no-op */ +} + +static inline void lose_fpu(int save) +{ + /* no-op */ +} + +static inline bool init_fp_ctx(struct task_struct *target) +{ + return false; +} + +/* + * The following functions should only be called in paths where we know that FP + * support is enabled, typically a path where own_fpu() or __enable_fpu() have + * returned successfully. When CONFIG_MIPS_FP_SUPPORT=n it is known at compile + * time that this should never happen, so calls to these functions should be + * optimized away & never actually be emitted. + */ + +extern void save_fp(struct task_struct *tsk) + __compiletime_error("save_fp() should not be called when CONFIG_MIPS_FP_SUPPORT=n"); + +extern void _save_fp(struct task_struct *) + __compiletime_error("_save_fp() should not be called when CONFIG_MIPS_FP_SUPPORT=n"); + +extern void restore_fp(struct task_struct *tsk) + __compiletime_error("restore_fp() should not be called when CONFIG_MIPS_FP_SUPPORT=n"); + +extern void _restore_fp(struct task_struct *) + __compiletime_error("_restore_fp() should not be called when CONFIG_MIPS_FP_SUPPORT=n"); + +extern union fpureg *get_fpu_regs(struct task_struct *tsk) + __compiletime_error("get_fpu_regs() should not be called when CONFIG_MIPS_FP_SUPPORT=n"); + +#endif /* !CONFIG_MIPS_FP_SUPPORT */ #endif /* _ASM_FPU_H */ diff --git a/arch/mips/include/asm/fpu_emulator.h b/arch/mips/include/asm/fpu_emulator.h index b36097d3cbf4..7e233055f7b4 100644 --- a/arch/mips/include/asm/fpu_emulator.h +++ b/arch/mips/include/asm/fpu_emulator.h @@ -188,17 +188,6 @@ int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, int mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, unsigned long *contpc); -#define SIGNALLING_NAN 0x7ff800007ff80000LL - -static inline void fpu_emulator_init_fpu(void) -{ - struct task_struct *t = current; - int i; - - for (i = 0; i < 32; i++) - set_fpr64(&t->thread.fpu.fpr[i], 0, SIGNALLING_NAN); -} - /* * Mask the FCSR Cause bits according to the Enable bits, observing * that Unimplemented is always enabled. diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h index a9e61ea54ca9..8eff134b3a43 100644 --- a/arch/mips/include/asm/futex.h +++ b/arch/mips/include/asm/futex.h @@ -24,9 +24,10 @@ __asm__ __volatile__( \ " .set push \n" \ " .set noat \n" \ + " .set push \n" \ " .set arch=r4000 \n" \ "1: ll %1, %4 # __futex_atomic_op \n" \ - " .set mips0 \n" \ + " .set pop \n" \ " " insn " \n" \ " .set arch=r4000 \n" \ "2: sc $1, %2 \n" \ @@ -35,7 +36,6 @@ "3: \n" \ " .insn \n" \ " .set pop \n" \ - " .set mips0 \n" \ " .section .fixup,\"ax\" \n" \ "4: li %0, %6 \n" \ " j 3b \n" \ @@ -53,9 +53,10 @@ __asm__ __volatile__( \ " .set push \n" \ " .set noat \n" \ + " .set push \n" \ " .set "MIPS_ISA_ARCH_LEVEL" \n" \ "1: "user_ll("%1", "%4")" # __futex_atomic_op\n" \ - " .set mips0 \n" \ + " .set pop \n" \ " " insn " \n" \ " .set "MIPS_ISA_ARCH_LEVEL" \n" \ "2: "user_sc("$1", "%2")" \n" \ @@ -64,7 +65,6 @@ "3: \n" \ " .insn \n" \ " .set pop \n" \ - " .set mips0 \n" \ " .section .fixup,\"ax\" \n" \ "4: li %0, %6 \n" \ " j 3b \n" \ @@ -137,10 +137,11 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, "# futex_atomic_cmpxchg_inatomic \n" " .set push \n" " .set noat \n" + " .set push \n" " .set arch=r4000 \n" "1: ll %1, %3 \n" " bne %1, %z4, 3f \n" - " .set mips0 \n" + " .set pop \n" " move $1, %z5 \n" " .set arch=r4000 \n" "2: sc $1, %2 \n" @@ -166,10 +167,11 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, "# futex_atomic_cmpxchg_inatomic \n" " .set push \n" " .set noat \n" + " .set push \n" " .set "MIPS_ISA_ARCH_LEVEL" \n" "1: "user_ll("%1", "%3")" \n" " bne %1, %z4, 3f \n" - " .set mips0 \n" + " .set pop \n" " move $1, %z5 \n" " .set "MIPS_ISA_ARCH_LEVEL" \n" "2: "user_sc("$1", "%2")" \n" diff --git a/arch/mips/include/asm/hazards.h b/arch/mips/include/asm/hazards.h index e0fecf206f2c..0fa27446869a 100644 --- a/arch/mips/include/asm/hazards.h +++ b/arch/mips/include/asm/hazards.h @@ -66,10 +66,11 @@ do { \ unsigned long tmp; \ \ __asm__ __volatile__( \ + " .set push \n" \ " .set "MIPS_ISA_LEVEL" \n" \ " dla %0, 1f \n" \ " jr.hb %0 \n" \ - " .set mips0 \n" \ + " .set pop \n" \ "1: \n" \ : "=r" (tmp)); \ } while (0) @@ -141,10 +142,11 @@ do { \ unsigned long tmp; \ \ __asm__ __volatile__( \ + " .set push \n" \ " .set mips64r2 \n" \ " dla %0, 1f \n" \ " jr.hb %0 \n" \ - " .set mips0 \n" \ + " .set pop \n" \ "1: \n" \ : "=r" (tmp)); \ } while (0) diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h index 266257d56fb6..845fbbc7a2e3 100644 --- a/arch/mips/include/asm/io.h +++ b/arch/mips/include/asm/io.h @@ -218,6 +218,18 @@ static inline void __iomem * __ioremap_mode(phys_addr_t offset, unsigned long si } /* + * ioremap_prot - map bus memory into CPU space + * @offset: bus address of the memory + * @size: size of the resource to map + + * ioremap_prot gives the caller control over cache coherency attributes (CCA) + */ +static inline void __iomem *ioremap_prot(phys_addr_t offset, + unsigned long size, unsigned long prot_val) { + return __ioremap_mode(offset, size, prot_val & _CACHE_MASK); +} + +/* * ioremap - map bus memory into CPU space * @offset: bus address of the memory * @size: size of the resource to map @@ -342,13 +354,14 @@ static inline void pfx##write##bwlq(type val, \ if (irq) \ local_irq_save(__flags); \ __asm__ __volatile__( \ - ".set arch=r4000" "\t\t# __writeq""\n\t" \ + ".set push" "\t\t# __writeq""\n\t" \ + ".set arch=r4000" "\n\t" \ "dsll32 %L0, %L0, 0" "\n\t" \ "dsrl32 %L0, %L0, 0" "\n\t" \ "dsll32 %M0, %M0, 0" "\n\t" \ "or %L0, %L0, %M0" "\n\t" \ "sd %L0, %2" "\n\t" \ - ".set mips0" "\n" \ + ".set pop" "\n" \ : "=r" (__tmp) \ : "0" (__val), "m" (*__mem)); \ if (irq) \ @@ -375,11 +388,12 @@ static inline type pfx##read##bwlq(const volatile void __iomem *mem) \ if (irq) \ local_irq_save(__flags); \ __asm__ __volatile__( \ - ".set arch=r4000" "\t\t# __readq" "\n\t" \ + ".set push" "\t\t# __readq" "\n\t" \ + ".set arch=r4000" "\n\t" \ "ld %L0, %1" "\n\t" \ "dsra32 %M0, %L0, 0" "\n\t" \ "sll %L0, %L0, 0" "\n\t" \ - ".set mips0" "\n" \ + ".set pop" "\n" \ : "=r" (__val) \ : "m" (*__mem)); \ if (irq) \ diff --git a/arch/mips/include/asm/jazzdma.h b/arch/mips/include/asm/jazzdma.h index d913439c738c..d13f940022d5 100644 --- a/arch/mips/include/asm/jazzdma.h +++ b/arch/mips/include/asm/jazzdma.h @@ -40,12 +40,6 @@ extern int vdma_get_enable(int channel); #define VDMA_OFFSET(a) ((unsigned int)(a) & (VDMA_PAGESIZE-1)) /* - * error code returned by vdma_alloc() - * (See also arch/mips/kernel/jazzdma.c) - */ -#define VDMA_ERROR 0xffffffff - -/* * VDMA pagetable entry description */ typedef volatile struct VDMA_PGTBL_ENTRY { diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 2c1c53d12179..d2abd98471e8 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -411,11 +411,12 @@ static inline void _kvm_atomic_set_c0_guest_reg(unsigned long *reg, unsigned long temp; do { __asm__ __volatile__( + " .set push \n" " .set "MIPS_ISA_ARCH_LEVEL" \n" " " __LL "%0, %1 \n" " or %0, %2 \n" " " __SC "%0, %1 \n" - " .set mips0 \n" + " .set pop \n" : "=&r" (temp), "+m" (*reg) : "r" (val)); } while (unlikely(!temp)); @@ -427,11 +428,12 @@ static inline void _kvm_atomic_clear_c0_guest_reg(unsigned long *reg, unsigned long temp; do { __asm__ __volatile__( + " .set push \n" " .set "MIPS_ISA_ARCH_LEVEL" \n" " " __LL "%0, %1 \n" " and %0, %2 \n" " " __SC "%0, %1 \n" - " .set mips0 \n" + " .set pop \n" : "=&r" (temp), "+m" (*reg) : "r" (~val)); } while (unlikely(!temp)); @@ -444,12 +446,13 @@ static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg, unsigned long temp; do { __asm__ __volatile__( + " .set push \n" " .set "MIPS_ISA_ARCH_LEVEL" \n" " " __LL "%0, %1 \n" " and %0, %2 \n" " or %0, %3 \n" " " __SC "%0, %1 \n" - " .set mips0 \n" + " .set pop \n" : "=&r" (temp), "+m" (*reg) : "r" (~change), "r" (val & change)); } while (unlikely(!temp)); @@ -933,7 +936,7 @@ enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu, #define KVM_ARCH_WANT_MMU_NOTIFIER int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end); -void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); +int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h index ac8264eca1e9..02783e141c32 100644 --- a/arch/mips/include/asm/local.h +++ b/arch/mips/include/asm/local.h @@ -35,13 +35,14 @@ static __inline__ long local_add_return(long i, local_t * l) unsigned long temp; __asm__ __volatile__( + " .set push \n" " .set arch=r4000 \n" "1:" __LL "%1, %2 # local_add_return \n" " addu %0, %1, %3 \n" __SC "%0, %2 \n" " beqzl %0, 1b \n" " addu %0, %1, %3 \n" - " .set mips0 \n" + " .set pop \n" : "=&r" (result), "=&r" (temp), "=m" (l->a.counter) : "Ir" (i), "m" (l->a.counter) : "memory"); @@ -49,13 +50,14 @@ static __inline__ long local_add_return(long i, local_t * l) unsigned long temp; __asm__ __volatile__( + " .set push \n" " .set "MIPS_ISA_ARCH_LEVEL" \n" "1:" __LL "%1, %2 # local_add_return \n" " addu %0, %1, %3 \n" __SC "%0, %2 \n" " beqz %0, 1b \n" " addu %0, %1, %3 \n" - " .set mips0 \n" + " .set pop \n" : "=&r" (result), "=&r" (temp), "=m" (l->a.counter) : "Ir" (i), "m" (l->a.counter) : "memory"); @@ -80,13 +82,14 @@ static __inline__ long local_sub_return(long i, local_t * l) unsigned long temp; __asm__ __volatile__( + " .set push \n" " .set arch=r4000 \n" "1:" __LL "%1, %2 # local_sub_return \n" " subu %0, %1, %3 \n" __SC "%0, %2 \n" " beqzl %0, 1b \n" " subu %0, %1, %3 \n" - " .set mips0 \n" + " .set pop \n" : "=&r" (result), "=&r" (temp), "=m" (l->a.counter) : "Ir" (i), "m" (l->a.counter) : "memory"); @@ -94,13 +97,14 @@ static __inline__ long local_sub_return(long i, local_t * l) unsigned long temp; __asm__ __volatile__( + " .set push \n" " .set "MIPS_ISA_ARCH_LEVEL" \n" "1:" __LL "%1, %2 # local_sub_return \n" " subu %0, %1, %3 \n" __SC "%0, %2 \n" " beqz %0, 1b \n" " subu %0, %1, %3 \n" - " .set mips0 \n" + " .set pop \n" : "=&r" (result), "=&r" (temp), "=m" (l->a.counter) : "Ir" (i), "m" (l->a.counter) : "memory"); diff --git a/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h b/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h index cbac603ced19..b5e288a12dfe 100644 --- a/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h +++ b/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h @@ -31,7 +31,7 @@ /* Enable STFill Buffer */ mfc0 t0, CP0_PRID andi t0, (PRID_IMP_MASK | PRID_REV_MASK) - slti t0, (PRID_IMP_LOONGSON_64 | PRID_REV_LOONGSON3A_R2) + slti t0, (PRID_IMP_LOONGSON_64 | PRID_REV_LOONGSON3A_R2_0) bnez t0, 1f mfc0 t0, CP0_CONFIG6 or t0, 0x100 @@ -60,7 +60,7 @@ /* Enable STFill Buffer */ mfc0 t0, CP0_PRID andi t0, (PRID_IMP_MASK | PRID_REV_MASK) - slti t0, (PRID_IMP_LOONGSON_64 | PRID_REV_LOONGSON3A_R2) + slti t0, (PRID_IMP_LOONGSON_64 | PRID_REV_LOONGSON3A_R2_0) bnez t0, 1f mfc0 t0, CP0_CONFIG6 or t0, 0x100 diff --git a/arch/mips/include/asm/mach-loongson64/mmzone.h b/arch/mips/include/asm/mach-loongson64/mmzone.h index c9f7e231e66b..59c8b11c090e 100644 --- a/arch/mips/include/asm/mach-loongson64/mmzone.h +++ b/arch/mips/include/asm/mach-loongson64/mmzone.h @@ -21,6 +21,7 @@ #define NODE3_ADDRSPACE_OFFSET 0x300000000000UL #define pa_to_nid(addr) (((addr) & 0xf00000000000) >> NODE_ADDRSPACE_SHIFT) +#define nid_to_addrbase(nid) ((nid) << NODE_ADDRSPACE_SHIFT) #define LEVELS_PER_SLICE 128 diff --git a/arch/mips/include/asm/mach-rc32434/rb.h b/arch/mips/include/asm/mach-rc32434/rb.h index aac8ce8902e7..5dfd4d66d6fc 100644 --- a/arch/mips/include/asm/mach-rc32434/rb.h +++ b/arch/mips/include/asm/mach-rc32434/rb.h @@ -71,12 +71,6 @@ struct korina_device { struct net_device *dev; }; -struct cf_device { - int gpio_pin; - void *dev; - struct gendisk *gd; -}; - struct mpmc_device { unsigned char state; spinlock_t lock; diff --git a/arch/mips/include/asm/mipsmtregs.h b/arch/mips/include/asm/mipsmtregs.h index 212336b7c0f4..be4cf9d477be 100644 --- a/arch/mips/include/asm/mipsmtregs.h +++ b/arch/mips/include/asm/mipsmtregs.h @@ -255,12 +255,12 @@ static inline unsigned int dmt(void) static inline void __raw_emt(void) { __asm__ __volatile__( + " .set push \n" " .set noreorder \n" " .set mips32r2 \n" " .word 0x41600be1 # emt \n" " ehb \n" - " .set mips0 \n" - " .set reorder"); + " .set pop"); } /* enable multi-threaded execution if previous suggested it should be. @@ -277,9 +277,10 @@ static inline void emt(int previous) static inline void ehb(void) { __asm__ __volatile__( + " .set push \n" " .set mips32r2 \n" " ehb \n" - " .set mips0 \n"); + " .set pop \n"); } #define mftc0(rt,sel) \ diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index 341a02c92985..402b80af91aa 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -1345,9 +1345,10 @@ do { \ : "=r" (__res)); \ else \ __asm__ vol( \ + ".set\tpush\n\t" \ ".set\tmips32\n\t" \ "mfc0\t%0, " #source ", " #sel "\n\t" \ - ".set\tmips0\n\t" \ + ".set\tpop\n\t" \ : "=r" (__res)); \ __res; \ }) @@ -1358,15 +1359,17 @@ do { \ __res = __read_64bit_c0_split(source, sel, vol); \ else if (sel == 0) \ __asm__ vol( \ + ".set\tpush\n\t" \ ".set\tmips3\n\t" \ "dmfc0\t%0, " #source "\n\t" \ - ".set\tmips0" \ + ".set\tpop" \ : "=r" (__res)); \ else \ __asm__ vol( \ + ".set\tpush\n\t" \ ".set\tmips64\n\t" \ "dmfc0\t%0, " #source ", " #sel "\n\t" \ - ".set\tmips0" \ + ".set\tpop" \ : "=r" (__res)); \ __res; \ }) @@ -1391,9 +1394,10 @@ do { \ : : "Jr" ((unsigned int)(value))); \ else \ __asm__ __volatile__( \ + ".set\tpush\n\t" \ ".set\tmips32\n\t" \ "mtc0\t%z0, " #register ", " #sel "\n\t" \ - ".set\tmips0" \ + ".set\tpop" \ : : "Jr" ((unsigned int)(value))); \ } while (0) @@ -1403,15 +1407,17 @@ do { \ __write_64bit_c0_split(register, sel, value); \ else if (sel == 0) \ __asm__ __volatile__( \ + ".set\tpush\n\t" \ ".set\tmips3\n\t" \ "dmtc0\t%z0, " #register "\n\t" \ - ".set\tmips0" \ + ".set\tpop" \ : : "Jr" (value)); \ else \ __asm__ __volatile__( \ + ".set\tpush\n\t" \ ".set\tmips64\n\t" \ "dmtc0\t%z0, " #register ", " #sel "\n\t" \ - ".set\tmips0" \ + ".set\tpop" \ : : "Jr" (value)); \ } while (0) @@ -1463,19 +1469,21 @@ do { \ local_irq_save(__flags); \ if (sel == 0) \ __asm__ vol( \ + ".set\tpush\n\t" \ ".set\tmips64\n\t" \ "dmfc0\t%L0, " #source "\n\t" \ "dsra\t%M0, %L0, 32\n\t" \ "sll\t%L0, %L0, 0\n\t" \ - ".set\tmips0" \ + ".set\tpop" \ : "=r" (__val)); \ else \ __asm__ vol( \ + ".set\tpush\n\t" \ ".set\tmips64\n\t" \ "dmfc0\t%L0, " #source ", " #sel "\n\t" \ "dsra\t%M0, %L0, 32\n\t" \ "sll\t%L0, %L0, 0\n\t" \ - ".set\tmips0" \ + ".set\tpop" \ : "=r" (__val)); \ local_irq_restore(__flags); \ \ @@ -1498,23 +1506,25 @@ do { \ : "+r" (__tmp)); \ else if (sel == 0) \ __asm__ __volatile__( \ + ".set\tpush\n\t" \ ".set\tmips64\n\t" \ "dsll\t%L0, %L0, 32\n\t" \ "dsrl\t%L0, %L0, 32\n\t" \ "dsll\t%M0, %M0, 32\n\t" \ "or\t%L0, %L0, %M0\n\t" \ "dmtc0\t%L0, " #source "\n\t" \ - ".set\tmips0" \ + ".set\tpop" \ : "+r" (__tmp)); \ else \ __asm__ __volatile__( \ + ".set\tpush\n\t" \ ".set\tmips64\n\t" \ "dsll\t%L0, %L0, 32\n\t" \ "dsrl\t%L0, %L0, 32\n\t" \ "dsll\t%M0, %M0, 32\n\t" \ "or\t%L0, %L0, %M0\n\t" \ "dmtc0\t%L0, " #source ", " #sel "\n\t" \ - ".set\tmips0" \ + ".set\tpop" \ : "+r" (__tmp)); \ local_irq_restore(__flags); \ } while (0) diff --git a/arch/mips/include/asm/mmu.h b/arch/mips/include/asm/mmu.h index 0740be7d5d4a..88a108ce62c1 100644 --- a/arch/mips/include/asm/mmu.h +++ b/arch/mips/include/asm/mmu.h @@ -7,9 +7,8 @@ #include <linux/wait.h> typedef struct { - unsigned long asid[NR_CPUS]; + u64 asid[NR_CPUS]; void *vdso; - atomic_t fp_mode_switching; /* lock to be held whilst modifying fp_bd_emupage_allocmap */ spinlock_t bd_emupage_lock; diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h index 94414561de0e..a589585be21b 100644 --- a/arch/mips/include/asm/mmu_context.h +++ b/arch/mips/include/asm/mmu_context.h @@ -76,14 +76,14 @@ extern unsigned long pgd_current[]; * All unused by hardware upper bits will be considered * as a software asid extension. */ -static unsigned long asid_version_mask(unsigned int cpu) +static inline u64 asid_version_mask(unsigned int cpu) { unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]); - return ~(asid_mask | (asid_mask - 1)); + return ~(u64)(asid_mask | (asid_mask - 1)); } -static unsigned long asid_first_version(unsigned int cpu) +static inline u64 asid_first_version(unsigned int cpu) { return ~asid_version_mask(cpu) + 1; } @@ -102,14 +102,12 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) static inline void get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) { - unsigned long asid = asid_cache(cpu); + u64 asid = asid_cache(cpu); if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) { if (cpu_has_vtag_icache) flush_icache_all(); local_flush_tlb_all(); /* start new asid cycle */ - if (!asid) /* fix version if needed */ - asid = asid_first_version(cpu); } cpu_context(cpu, mm) = asid_cache(cpu) = asid; diff --git a/arch/mips/include/asm/mmzone.h b/arch/mips/include/asm/mmzone.h index f085fba41da5..b826b8473e95 100644 --- a/arch/mips/include/asm/mmzone.h +++ b/arch/mips/include/asm/mmzone.h @@ -7,7 +7,18 @@ #define _ASM_MMZONE_H_ #include <asm/page.h> -#include <mmzone.h> + +#ifdef CONFIG_NEED_MULTIPLE_NODES +# include <mmzone.h> +#endif + +#ifndef pa_to_nid +#define pa_to_nid(addr) 0 +#endif + +#ifndef nid_to_addrbase +#define nid_to_addrbase(nid) 0 +#endif #ifdef CONFIG_DISCONTIGMEM diff --git a/arch/mips/include/asm/octeon/cvmx-agl-defs.h b/arch/mips/include/asm/octeon/cvmx-agl-defs.h index 542ee09510b3..3635ab384447 100644 --- a/arch/mips/include/asm/octeon/cvmx-agl-defs.h +++ b/arch/mips/include/asm/octeon/cvmx-agl-defs.h @@ -171,7 +171,6 @@ union cvmx_agl_gmx_bad_reg { uint64_t reserved_38_63:26; #endif } cn52xx; - struct cvmx_agl_gmx_bad_reg_cn52xx cn52xxp1; struct cvmx_agl_gmx_bad_reg_cn56xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_35_63:29; @@ -199,13 +198,6 @@ union cvmx_agl_gmx_bad_reg { uint64_t reserved_35_63:29; #endif } cn56xx; - struct cvmx_agl_gmx_bad_reg_cn56xx cn56xxp1; - struct cvmx_agl_gmx_bad_reg_s cn61xx; - struct cvmx_agl_gmx_bad_reg_s cn63xx; - struct cvmx_agl_gmx_bad_reg_s cn63xxp1; - struct cvmx_agl_gmx_bad_reg_s cn66xx; - struct cvmx_agl_gmx_bad_reg_s cn68xx; - struct cvmx_agl_gmx_bad_reg_s cn68xxp1; }; union cvmx_agl_gmx_bist { @@ -228,15 +220,6 @@ union cvmx_agl_gmx_bist { uint64_t reserved_10_63:54; #endif } cn52xx; - struct cvmx_agl_gmx_bist_cn52xx cn52xxp1; - struct cvmx_agl_gmx_bist_cn52xx cn56xx; - struct cvmx_agl_gmx_bist_cn52xx cn56xxp1; - struct cvmx_agl_gmx_bist_s cn61xx; - struct cvmx_agl_gmx_bist_s cn63xx; - struct cvmx_agl_gmx_bist_s cn63xxp1; - struct cvmx_agl_gmx_bist_s cn66xx; - struct cvmx_agl_gmx_bist_s cn68xx; - struct cvmx_agl_gmx_bist_s cn68xxp1; }; union cvmx_agl_gmx_drv_ctl { @@ -270,8 +253,6 @@ union cvmx_agl_gmx_drv_ctl { uint64_t reserved_49_63:15; #endif } s; - struct cvmx_agl_gmx_drv_ctl_s cn52xx; - struct cvmx_agl_gmx_drv_ctl_s cn52xxp1; struct cvmx_agl_gmx_drv_ctl_cn56xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_17_63:47; @@ -289,7 +270,6 @@ union cvmx_agl_gmx_drv_ctl { uint64_t reserved_17_63:47; #endif } cn56xx; - struct cvmx_agl_gmx_drv_ctl_cn56xx cn56xxp1; }; union cvmx_agl_gmx_inf_mode { @@ -305,10 +285,6 @@ union cvmx_agl_gmx_inf_mode { uint64_t reserved_2_63:62; #endif } s; - struct cvmx_agl_gmx_inf_mode_s cn52xx; - struct cvmx_agl_gmx_inf_mode_s cn52xxp1; - struct cvmx_agl_gmx_inf_mode_s cn56xx; - struct cvmx_agl_gmx_inf_mode_s cn56xxp1; }; union cvmx_agl_gmx_prtx_cfg { @@ -363,15 +339,6 @@ union cvmx_agl_gmx_prtx_cfg { uint64_t reserved_6_63:58; #endif } cn52xx; - struct cvmx_agl_gmx_prtx_cfg_cn52xx cn52xxp1; - struct cvmx_agl_gmx_prtx_cfg_cn52xx cn56xx; - struct cvmx_agl_gmx_prtx_cfg_cn52xx cn56xxp1; - struct cvmx_agl_gmx_prtx_cfg_s cn61xx; - struct cvmx_agl_gmx_prtx_cfg_s cn63xx; - struct cvmx_agl_gmx_prtx_cfg_s cn63xxp1; - struct cvmx_agl_gmx_prtx_cfg_s cn66xx; - struct cvmx_agl_gmx_prtx_cfg_s cn68xx; - struct cvmx_agl_gmx_prtx_cfg_s cn68xxp1; }; union cvmx_agl_gmx_rxx_adr_cam0 { @@ -383,16 +350,6 @@ union cvmx_agl_gmx_rxx_adr_cam0 { uint64_t adr:64; #endif } s; - struct cvmx_agl_gmx_rxx_adr_cam0_s cn52xx; - struct cvmx_agl_gmx_rxx_adr_cam0_s cn52xxp1; - struct cvmx_agl_gmx_rxx_adr_cam0_s cn56xx; - struct cvmx_agl_gmx_rxx_adr_cam0_s cn56xxp1; - struct cvmx_agl_gmx_rxx_adr_cam0_s cn61xx; - struct cvmx_agl_gmx_rxx_adr_cam0_s cn63xx; - struct cvmx_agl_gmx_rxx_adr_cam0_s cn63xxp1; - struct cvmx_agl_gmx_rxx_adr_cam0_s cn66xx; - struct cvmx_agl_gmx_rxx_adr_cam0_s cn68xx; - struct cvmx_agl_gmx_rxx_adr_cam0_s cn68xxp1; }; union cvmx_agl_gmx_rxx_adr_cam1 { @@ -404,16 +361,6 @@ union cvmx_agl_gmx_rxx_adr_cam1 { uint64_t adr:64; #endif } s; - struct cvmx_agl_gmx_rxx_adr_cam1_s cn52xx; - struct cvmx_agl_gmx_rxx_adr_cam1_s cn52xxp1; - struct cvmx_agl_gmx_rxx_adr_cam1_s cn56xx; - struct cvmx_agl_gmx_rxx_adr_cam1_s cn56xxp1; - struct cvmx_agl_gmx_rxx_adr_cam1_s cn61xx; - struct cvmx_agl_gmx_rxx_adr_cam1_s cn63xx; - struct cvmx_agl_gmx_rxx_adr_cam1_s cn63xxp1; - struct cvmx_agl_gmx_rxx_adr_cam1_s cn66xx; - struct cvmx_agl_gmx_rxx_adr_cam1_s cn68xx; - struct cvmx_agl_gmx_rxx_adr_cam1_s cn68xxp1; }; union cvmx_agl_gmx_rxx_adr_cam2 { @@ -425,16 +372,6 @@ union cvmx_agl_gmx_rxx_adr_cam2 { uint64_t adr:64; #endif } s; - struct cvmx_agl_gmx_rxx_adr_cam2_s cn52xx; - struct cvmx_agl_gmx_rxx_adr_cam2_s cn52xxp1; - struct cvmx_agl_gmx_rxx_adr_cam2_s cn56xx; - struct cvmx_agl_gmx_rxx_adr_cam2_s cn56xxp1; - struct cvmx_agl_gmx_rxx_adr_cam2_s cn61xx; - struct cvmx_agl_gmx_rxx_adr_cam2_s cn63xx; - struct cvmx_agl_gmx_rxx_adr_cam2_s cn63xxp1; - struct cvmx_agl_gmx_rxx_adr_cam2_s cn66xx; - struct cvmx_agl_gmx_rxx_adr_cam2_s cn68xx; - struct cvmx_agl_gmx_rxx_adr_cam2_s cn68xxp1; }; union cvmx_agl_gmx_rxx_adr_cam3 { @@ -446,16 +383,6 @@ union cvmx_agl_gmx_rxx_adr_cam3 { uint64_t adr:64; #endif } s; - struct cvmx_agl_gmx_rxx_adr_cam3_s cn52xx; - struct cvmx_agl_gmx_rxx_adr_cam3_s cn52xxp1; - struct cvmx_agl_gmx_rxx_adr_cam3_s cn56xx; - struct cvmx_agl_gmx_rxx_adr_cam3_s cn56xxp1; - struct cvmx_agl_gmx_rxx_adr_cam3_s cn61xx; - struct cvmx_agl_gmx_rxx_adr_cam3_s cn63xx; - struct cvmx_agl_gmx_rxx_adr_cam3_s cn63xxp1; - struct cvmx_agl_gmx_rxx_adr_cam3_s cn66xx; - struct cvmx_agl_gmx_rxx_adr_cam3_s cn68xx; - struct cvmx_agl_gmx_rxx_adr_cam3_s cn68xxp1; }; union cvmx_agl_gmx_rxx_adr_cam4 { @@ -467,16 +394,6 @@ union cvmx_agl_gmx_rxx_adr_cam4 { uint64_t adr:64; #endif } s; - struct cvmx_agl_gmx_rxx_adr_cam4_s cn52xx; - struct cvmx_agl_gmx_rxx_adr_cam4_s cn52xxp1; - struct cvmx_agl_gmx_rxx_adr_cam4_s cn56xx; - struct cvmx_agl_gmx_rxx_adr_cam4_s cn56xxp1; - struct cvmx_agl_gmx_rxx_adr_cam4_s cn61xx; - struct cvmx_agl_gmx_rxx_adr_cam4_s cn63xx; - struct cvmx_agl_gmx_rxx_adr_cam4_s cn63xxp1; - struct cvmx_agl_gmx_rxx_adr_cam4_s cn66xx; - struct cvmx_agl_gmx_rxx_adr_cam4_s cn68xx; - struct cvmx_agl_gmx_rxx_adr_cam4_s cn68xxp1; }; union cvmx_agl_gmx_rxx_adr_cam5 { @@ -488,16 +405,6 @@ union cvmx_agl_gmx_rxx_adr_cam5 { uint64_t adr:64; #endif } s; - struct cvmx_agl_gmx_rxx_adr_cam5_s cn52xx; - struct cvmx_agl_gmx_rxx_adr_cam5_s cn52xxp1; - struct cvmx_agl_gmx_rxx_adr_cam5_s cn56xx; - struct cvmx_agl_gmx_rxx_adr_cam5_s cn56xxp1; - struct cvmx_agl_gmx_rxx_adr_cam5_s cn61xx; - struct cvmx_agl_gmx_rxx_adr_cam5_s cn63xx; - struct cvmx_agl_gmx_rxx_adr_cam5_s cn63xxp1; - struct cvmx_agl_gmx_rxx_adr_cam5_s cn66xx; - struct cvmx_agl_gmx_rxx_adr_cam5_s cn68xx; - struct cvmx_agl_gmx_rxx_adr_cam5_s cn68xxp1; }; union cvmx_agl_gmx_rxx_adr_cam_en { @@ -511,16 +418,6 @@ union cvmx_agl_gmx_rxx_adr_cam_en { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_agl_gmx_rxx_adr_cam_en_s cn52xx; - struct cvmx_agl_gmx_rxx_adr_cam_en_s cn52xxp1; - struct cvmx_agl_gmx_rxx_adr_cam_en_s cn56xx; - struct cvmx_agl_gmx_rxx_adr_cam_en_s cn56xxp1; - struct cvmx_agl_gmx_rxx_adr_cam_en_s cn61xx; - struct cvmx_agl_gmx_rxx_adr_cam_en_s cn63xx; - struct cvmx_agl_gmx_rxx_adr_cam_en_s cn63xxp1; - struct cvmx_agl_gmx_rxx_adr_cam_en_s cn66xx; - struct cvmx_agl_gmx_rxx_adr_cam_en_s cn68xx; - struct cvmx_agl_gmx_rxx_adr_cam_en_s cn68xxp1; }; union cvmx_agl_gmx_rxx_adr_ctl { @@ -538,16 +435,6 @@ union cvmx_agl_gmx_rxx_adr_ctl { uint64_t reserved_4_63:60; #endif } s; - struct cvmx_agl_gmx_rxx_adr_ctl_s cn52xx; - struct cvmx_agl_gmx_rxx_adr_ctl_s cn52xxp1; - struct cvmx_agl_gmx_rxx_adr_ctl_s cn56xx; - struct cvmx_agl_gmx_rxx_adr_ctl_s cn56xxp1; - struct cvmx_agl_gmx_rxx_adr_ctl_s cn61xx; - struct cvmx_agl_gmx_rxx_adr_ctl_s cn63xx; - struct cvmx_agl_gmx_rxx_adr_ctl_s cn63xxp1; - struct cvmx_agl_gmx_rxx_adr_ctl_s cn66xx; - struct cvmx_agl_gmx_rxx_adr_ctl_s cn68xx; - struct cvmx_agl_gmx_rxx_adr_ctl_s cn68xxp1; }; union cvmx_agl_gmx_rxx_decision { @@ -561,16 +448,6 @@ union cvmx_agl_gmx_rxx_decision { uint64_t reserved_5_63:59; #endif } s; - struct cvmx_agl_gmx_rxx_decision_s cn52xx; - struct cvmx_agl_gmx_rxx_decision_s cn52xxp1; - struct cvmx_agl_gmx_rxx_decision_s cn56xx; - struct cvmx_agl_gmx_rxx_decision_s cn56xxp1; - struct cvmx_agl_gmx_rxx_decision_s cn61xx; - struct cvmx_agl_gmx_rxx_decision_s cn63xx; - struct cvmx_agl_gmx_rxx_decision_s cn63xxp1; - struct cvmx_agl_gmx_rxx_decision_s cn66xx; - struct cvmx_agl_gmx_rxx_decision_s cn68xx; - struct cvmx_agl_gmx_rxx_decision_s cn68xxp1; }; union cvmx_agl_gmx_rxx_frm_chk { @@ -627,15 +504,6 @@ union cvmx_agl_gmx_rxx_frm_chk { uint64_t reserved_9_63:55; #endif } cn52xx; - struct cvmx_agl_gmx_rxx_frm_chk_cn52xx cn52xxp1; - struct cvmx_agl_gmx_rxx_frm_chk_cn52xx cn56xx; - struct cvmx_agl_gmx_rxx_frm_chk_cn52xx cn56xxp1; - struct cvmx_agl_gmx_rxx_frm_chk_s cn61xx; - struct cvmx_agl_gmx_rxx_frm_chk_s cn63xx; - struct cvmx_agl_gmx_rxx_frm_chk_s cn63xxp1; - struct cvmx_agl_gmx_rxx_frm_chk_s cn66xx; - struct cvmx_agl_gmx_rxx_frm_chk_s cn68xx; - struct cvmx_agl_gmx_rxx_frm_chk_s cn68xxp1; }; union cvmx_agl_gmx_rxx_frm_ctl { @@ -700,15 +568,6 @@ union cvmx_agl_gmx_rxx_frm_ctl { uint64_t reserved_10_63:54; #endif } cn52xx; - struct cvmx_agl_gmx_rxx_frm_ctl_cn52xx cn52xxp1; - struct cvmx_agl_gmx_rxx_frm_ctl_cn52xx cn56xx; - struct cvmx_agl_gmx_rxx_frm_ctl_cn52xx cn56xxp1; - struct cvmx_agl_gmx_rxx_frm_ctl_s cn61xx; - struct cvmx_agl_gmx_rxx_frm_ctl_s cn63xx; - struct cvmx_agl_gmx_rxx_frm_ctl_s cn63xxp1; - struct cvmx_agl_gmx_rxx_frm_ctl_s cn66xx; - struct cvmx_agl_gmx_rxx_frm_ctl_s cn68xx; - struct cvmx_agl_gmx_rxx_frm_ctl_s cn68xxp1; }; union cvmx_agl_gmx_rxx_frm_max { @@ -722,16 +581,6 @@ union cvmx_agl_gmx_rxx_frm_max { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_agl_gmx_rxx_frm_max_s cn52xx; - struct cvmx_agl_gmx_rxx_frm_max_s cn52xxp1; - struct cvmx_agl_gmx_rxx_frm_max_s cn56xx; - struct cvmx_agl_gmx_rxx_frm_max_s cn56xxp1; - struct cvmx_agl_gmx_rxx_frm_max_s cn61xx; - struct cvmx_agl_gmx_rxx_frm_max_s cn63xx; - struct cvmx_agl_gmx_rxx_frm_max_s cn63xxp1; - struct cvmx_agl_gmx_rxx_frm_max_s cn66xx; - struct cvmx_agl_gmx_rxx_frm_max_s cn68xx; - struct cvmx_agl_gmx_rxx_frm_max_s cn68xxp1; }; union cvmx_agl_gmx_rxx_frm_min { @@ -745,16 +594,6 @@ union cvmx_agl_gmx_rxx_frm_min { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_agl_gmx_rxx_frm_min_s cn52xx; - struct cvmx_agl_gmx_rxx_frm_min_s cn52xxp1; - struct cvmx_agl_gmx_rxx_frm_min_s cn56xx; - struct cvmx_agl_gmx_rxx_frm_min_s cn56xxp1; - struct cvmx_agl_gmx_rxx_frm_min_s cn61xx; - struct cvmx_agl_gmx_rxx_frm_min_s cn63xx; - struct cvmx_agl_gmx_rxx_frm_min_s cn63xxp1; - struct cvmx_agl_gmx_rxx_frm_min_s cn66xx; - struct cvmx_agl_gmx_rxx_frm_min_s cn68xx; - struct cvmx_agl_gmx_rxx_frm_min_s cn68xxp1; }; union cvmx_agl_gmx_rxx_ifg { @@ -768,16 +607,6 @@ union cvmx_agl_gmx_rxx_ifg { uint64_t reserved_4_63:60; #endif } s; - struct cvmx_agl_gmx_rxx_ifg_s cn52xx; - struct cvmx_agl_gmx_rxx_ifg_s cn52xxp1; - struct cvmx_agl_gmx_rxx_ifg_s cn56xx; - struct cvmx_agl_gmx_rxx_ifg_s cn56xxp1; - struct cvmx_agl_gmx_rxx_ifg_s cn61xx; - struct cvmx_agl_gmx_rxx_ifg_s cn63xx; - struct cvmx_agl_gmx_rxx_ifg_s cn63xxp1; - struct cvmx_agl_gmx_rxx_ifg_s cn66xx; - struct cvmx_agl_gmx_rxx_ifg_s cn68xx; - struct cvmx_agl_gmx_rxx_ifg_s cn68xxp1; }; union cvmx_agl_gmx_rxx_int_en { @@ -872,15 +701,6 @@ union cvmx_agl_gmx_rxx_int_en { uint64_t reserved_20_63:44; #endif } cn52xx; - struct cvmx_agl_gmx_rxx_int_en_cn52xx cn52xxp1; - struct cvmx_agl_gmx_rxx_int_en_cn52xx cn56xx; - struct cvmx_agl_gmx_rxx_int_en_cn52xx cn56xxp1; - struct cvmx_agl_gmx_rxx_int_en_s cn61xx; - struct cvmx_agl_gmx_rxx_int_en_s cn63xx; - struct cvmx_agl_gmx_rxx_int_en_s cn63xxp1; - struct cvmx_agl_gmx_rxx_int_en_s cn66xx; - struct cvmx_agl_gmx_rxx_int_en_s cn68xx; - struct cvmx_agl_gmx_rxx_int_en_s cn68xxp1; }; union cvmx_agl_gmx_rxx_int_reg { @@ -975,15 +795,6 @@ union cvmx_agl_gmx_rxx_int_reg { uint64_t reserved_20_63:44; #endif } cn52xx; - struct cvmx_agl_gmx_rxx_int_reg_cn52xx cn52xxp1; - struct cvmx_agl_gmx_rxx_int_reg_cn52xx cn56xx; - struct cvmx_agl_gmx_rxx_int_reg_cn52xx cn56xxp1; - struct cvmx_agl_gmx_rxx_int_reg_s cn61xx; - struct cvmx_agl_gmx_rxx_int_reg_s cn63xx; - struct cvmx_agl_gmx_rxx_int_reg_s cn63xxp1; - struct cvmx_agl_gmx_rxx_int_reg_s cn66xx; - struct cvmx_agl_gmx_rxx_int_reg_s cn68xx; - struct cvmx_agl_gmx_rxx_int_reg_s cn68xxp1; }; union cvmx_agl_gmx_rxx_jabber { @@ -997,16 +808,6 @@ union cvmx_agl_gmx_rxx_jabber { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_agl_gmx_rxx_jabber_s cn52xx; - struct cvmx_agl_gmx_rxx_jabber_s cn52xxp1; - struct cvmx_agl_gmx_rxx_jabber_s cn56xx; - struct cvmx_agl_gmx_rxx_jabber_s cn56xxp1; - struct cvmx_agl_gmx_rxx_jabber_s cn61xx; - struct cvmx_agl_gmx_rxx_jabber_s cn63xx; - struct cvmx_agl_gmx_rxx_jabber_s cn63xxp1; - struct cvmx_agl_gmx_rxx_jabber_s cn66xx; - struct cvmx_agl_gmx_rxx_jabber_s cn68xx; - struct cvmx_agl_gmx_rxx_jabber_s cn68xxp1; }; union cvmx_agl_gmx_rxx_pause_drop_time { @@ -1020,16 +821,6 @@ union cvmx_agl_gmx_rxx_pause_drop_time { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_agl_gmx_rxx_pause_drop_time_s cn52xx; - struct cvmx_agl_gmx_rxx_pause_drop_time_s cn52xxp1; - struct cvmx_agl_gmx_rxx_pause_drop_time_s cn56xx; - struct cvmx_agl_gmx_rxx_pause_drop_time_s cn56xxp1; - struct cvmx_agl_gmx_rxx_pause_drop_time_s cn61xx; - struct cvmx_agl_gmx_rxx_pause_drop_time_s cn63xx; - struct cvmx_agl_gmx_rxx_pause_drop_time_s cn63xxp1; - struct cvmx_agl_gmx_rxx_pause_drop_time_s cn66xx; - struct cvmx_agl_gmx_rxx_pause_drop_time_s cn68xx; - struct cvmx_agl_gmx_rxx_pause_drop_time_s cn68xxp1; }; union cvmx_agl_gmx_rxx_rx_inbnd { @@ -1047,12 +838,6 @@ union cvmx_agl_gmx_rxx_rx_inbnd { uint64_t reserved_4_63:60; #endif } s; - struct cvmx_agl_gmx_rxx_rx_inbnd_s cn61xx; - struct cvmx_agl_gmx_rxx_rx_inbnd_s cn63xx; - struct cvmx_agl_gmx_rxx_rx_inbnd_s cn63xxp1; - struct cvmx_agl_gmx_rxx_rx_inbnd_s cn66xx; - struct cvmx_agl_gmx_rxx_rx_inbnd_s cn68xx; - struct cvmx_agl_gmx_rxx_rx_inbnd_s cn68xxp1; }; union cvmx_agl_gmx_rxx_stats_ctl { @@ -1066,16 +851,6 @@ union cvmx_agl_gmx_rxx_stats_ctl { uint64_t reserved_1_63:63; #endif } s; - struct cvmx_agl_gmx_rxx_stats_ctl_s cn52xx; - struct cvmx_agl_gmx_rxx_stats_ctl_s cn52xxp1; - struct cvmx_agl_gmx_rxx_stats_ctl_s cn56xx; - struct cvmx_agl_gmx_rxx_stats_ctl_s cn56xxp1; - struct cvmx_agl_gmx_rxx_stats_ctl_s cn61xx; - struct cvmx_agl_gmx_rxx_stats_ctl_s cn63xx; - struct cvmx_agl_gmx_rxx_stats_ctl_s cn63xxp1; - struct cvmx_agl_gmx_rxx_stats_ctl_s cn66xx; - struct cvmx_agl_gmx_rxx_stats_ctl_s cn68xx; - struct cvmx_agl_gmx_rxx_stats_ctl_s cn68xxp1; }; union cvmx_agl_gmx_rxx_stats_octs { @@ -1089,16 +864,6 @@ union cvmx_agl_gmx_rxx_stats_octs { uint64_t reserved_48_63:16; #endif } s; - struct cvmx_agl_gmx_rxx_stats_octs_s cn52xx; - struct cvmx_agl_gmx_rxx_stats_octs_s cn52xxp1; - struct cvmx_agl_gmx_rxx_stats_octs_s cn56xx; - struct cvmx_agl_gmx_rxx_stats_octs_s cn56xxp1; - struct cvmx_agl_gmx_rxx_stats_octs_s cn61xx; - struct cvmx_agl_gmx_rxx_stats_octs_s cn63xx; - struct cvmx_agl_gmx_rxx_stats_octs_s cn63xxp1; - struct cvmx_agl_gmx_rxx_stats_octs_s cn66xx; - struct cvmx_agl_gmx_rxx_stats_octs_s cn68xx; - struct cvmx_agl_gmx_rxx_stats_octs_s cn68xxp1; }; union cvmx_agl_gmx_rxx_stats_octs_ctl { @@ -1112,16 +877,6 @@ union cvmx_agl_gmx_rxx_stats_octs_ctl { uint64_t reserved_48_63:16; #endif } s; - struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn52xx; - struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn52xxp1; - struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn56xx; - struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn56xxp1; - struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn61xx; - struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn63xx; - struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn63xxp1; - struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn66xx; - struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn68xx; - struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn68xxp1; }; union cvmx_agl_gmx_rxx_stats_octs_dmac { @@ -1135,16 +890,6 @@ union cvmx_agl_gmx_rxx_stats_octs_dmac { uint64_t reserved_48_63:16; #endif } s; - struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn52xx; - struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn52xxp1; - struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn56xx; - struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn56xxp1; - struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn61xx; - struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn63xx; - struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn63xxp1; - struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn66xx; - struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn68xx; - struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn68xxp1; }; union cvmx_agl_gmx_rxx_stats_octs_drp { @@ -1158,16 +903,6 @@ union cvmx_agl_gmx_rxx_stats_octs_drp { uint64_t reserved_48_63:16; #endif } s; - struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn52xx; - struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn52xxp1; - struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn56xx; - struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn56xxp1; - struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn61xx; - struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn63xx; - struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn63xxp1; - struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn66xx; - struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn68xx; - struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn68xxp1; }; union cvmx_agl_gmx_rxx_stats_pkts { @@ -1181,16 +916,6 @@ union cvmx_agl_gmx_rxx_stats_pkts { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_agl_gmx_rxx_stats_pkts_s cn52xx; - struct cvmx_agl_gmx_rxx_stats_pkts_s cn52xxp1; - struct cvmx_agl_gmx_rxx_stats_pkts_s cn56xx; - struct cvmx_agl_gmx_rxx_stats_pkts_s cn56xxp1; - struct cvmx_agl_gmx_rxx_stats_pkts_s cn61xx; - struct cvmx_agl_gmx_rxx_stats_pkts_s cn63xx; - struct cvmx_agl_gmx_rxx_stats_pkts_s cn63xxp1; - struct cvmx_agl_gmx_rxx_stats_pkts_s cn66xx; - struct cvmx_agl_gmx_rxx_stats_pkts_s cn68xx; - struct cvmx_agl_gmx_rxx_stats_pkts_s cn68xxp1; }; union cvmx_agl_gmx_rxx_stats_pkts_bad { @@ -1204,16 +929,6 @@ union cvmx_agl_gmx_rxx_stats_pkts_bad { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn52xx; - struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn52xxp1; - struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn56xx; - struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn56xxp1; - struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn61xx; - struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn63xx; - struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn63xxp1; - struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn66xx; - struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn68xx; - struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn68xxp1; }; union cvmx_agl_gmx_rxx_stats_pkts_ctl { @@ -1227,16 +942,6 @@ union cvmx_agl_gmx_rxx_stats_pkts_ctl { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn52xx; - struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn52xxp1; - struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn56xx; - struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn56xxp1; - struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn61xx; - struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn63xx; - struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn63xxp1; - struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn66xx; - struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn68xx; - struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn68xxp1; }; union cvmx_agl_gmx_rxx_stats_pkts_dmac { @@ -1250,16 +955,6 @@ union cvmx_agl_gmx_rxx_stats_pkts_dmac { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn52xx; - struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn52xxp1; - struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn56xx; - struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn56xxp1; - struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn61xx; - struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn63xx; - struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn63xxp1; - struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn66xx; - struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn68xx; - struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn68xxp1; }; union cvmx_agl_gmx_rxx_stats_pkts_drp { @@ -1273,16 +968,6 @@ union cvmx_agl_gmx_rxx_stats_pkts_drp { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn52xx; - struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn52xxp1; - struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn56xx; - struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn56xxp1; - struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn61xx; - struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn63xx; - struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn63xxp1; - struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn66xx; - struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn68xx; - struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn68xxp1; }; union cvmx_agl_gmx_rxx_udd_skp { @@ -1300,16 +985,6 @@ union cvmx_agl_gmx_rxx_udd_skp { uint64_t reserved_9_63:55; #endif } s; - struct cvmx_agl_gmx_rxx_udd_skp_s cn52xx; - struct cvmx_agl_gmx_rxx_udd_skp_s cn52xxp1; - struct cvmx_agl_gmx_rxx_udd_skp_s cn56xx; - struct cvmx_agl_gmx_rxx_udd_skp_s cn56xxp1; - struct cvmx_agl_gmx_rxx_udd_skp_s cn61xx; - struct cvmx_agl_gmx_rxx_udd_skp_s cn63xx; - struct cvmx_agl_gmx_rxx_udd_skp_s cn63xxp1; - struct cvmx_agl_gmx_rxx_udd_skp_s cn66xx; - struct cvmx_agl_gmx_rxx_udd_skp_s cn68xx; - struct cvmx_agl_gmx_rxx_udd_skp_s cn68xxp1; }; union cvmx_agl_gmx_rx_bp_dropx { @@ -1323,16 +998,6 @@ union cvmx_agl_gmx_rx_bp_dropx { uint64_t reserved_6_63:58; #endif } s; - struct cvmx_agl_gmx_rx_bp_dropx_s cn52xx; - struct cvmx_agl_gmx_rx_bp_dropx_s cn52xxp1; - struct cvmx_agl_gmx_rx_bp_dropx_s cn56xx; - struct cvmx_agl_gmx_rx_bp_dropx_s cn56xxp1; - struct cvmx_agl_gmx_rx_bp_dropx_s cn61xx; - struct cvmx_agl_gmx_rx_bp_dropx_s cn63xx; - struct cvmx_agl_gmx_rx_bp_dropx_s cn63xxp1; - struct cvmx_agl_gmx_rx_bp_dropx_s cn66xx; - struct cvmx_agl_gmx_rx_bp_dropx_s cn68xx; - struct cvmx_agl_gmx_rx_bp_dropx_s cn68xxp1; }; union cvmx_agl_gmx_rx_bp_offx { @@ -1346,16 +1011,6 @@ union cvmx_agl_gmx_rx_bp_offx { uint64_t reserved_6_63:58; #endif } s; - struct cvmx_agl_gmx_rx_bp_offx_s cn52xx; - struct cvmx_agl_gmx_rx_bp_offx_s cn52xxp1; - struct cvmx_agl_gmx_rx_bp_offx_s cn56xx; - struct cvmx_agl_gmx_rx_bp_offx_s cn56xxp1; - struct cvmx_agl_gmx_rx_bp_offx_s cn61xx; - struct cvmx_agl_gmx_rx_bp_offx_s cn63xx; - struct cvmx_agl_gmx_rx_bp_offx_s cn63xxp1; - struct cvmx_agl_gmx_rx_bp_offx_s cn66xx; - struct cvmx_agl_gmx_rx_bp_offx_s cn68xx; - struct cvmx_agl_gmx_rx_bp_offx_s cn68xxp1; }; union cvmx_agl_gmx_rx_bp_onx { @@ -1369,16 +1024,6 @@ union cvmx_agl_gmx_rx_bp_onx { uint64_t reserved_9_63:55; #endif } s; - struct cvmx_agl_gmx_rx_bp_onx_s cn52xx; - struct cvmx_agl_gmx_rx_bp_onx_s cn52xxp1; - struct cvmx_agl_gmx_rx_bp_onx_s cn56xx; - struct cvmx_agl_gmx_rx_bp_onx_s cn56xxp1; - struct cvmx_agl_gmx_rx_bp_onx_s cn61xx; - struct cvmx_agl_gmx_rx_bp_onx_s cn63xx; - struct cvmx_agl_gmx_rx_bp_onx_s cn63xxp1; - struct cvmx_agl_gmx_rx_bp_onx_s cn66xx; - struct cvmx_agl_gmx_rx_bp_onx_s cn68xx; - struct cvmx_agl_gmx_rx_bp_onx_s cn68xxp1; }; union cvmx_agl_gmx_rx_prt_info { @@ -1396,8 +1041,6 @@ union cvmx_agl_gmx_rx_prt_info { uint64_t reserved_18_63:46; #endif } s; - struct cvmx_agl_gmx_rx_prt_info_s cn52xx; - struct cvmx_agl_gmx_rx_prt_info_s cn52xxp1; struct cvmx_agl_gmx_rx_prt_info_cn56xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_17_63:47; @@ -1411,13 +1054,6 @@ union cvmx_agl_gmx_rx_prt_info { uint64_t reserved_17_63:47; #endif } cn56xx; - struct cvmx_agl_gmx_rx_prt_info_cn56xx cn56xxp1; - struct cvmx_agl_gmx_rx_prt_info_s cn61xx; - struct cvmx_agl_gmx_rx_prt_info_s cn63xx; - struct cvmx_agl_gmx_rx_prt_info_s cn63xxp1; - struct cvmx_agl_gmx_rx_prt_info_s cn66xx; - struct cvmx_agl_gmx_rx_prt_info_s cn68xx; - struct cvmx_agl_gmx_rx_prt_info_s cn68xxp1; }; union cvmx_agl_gmx_rx_tx_status { @@ -1435,8 +1071,6 @@ union cvmx_agl_gmx_rx_tx_status { uint64_t reserved_6_63:58; #endif } s; - struct cvmx_agl_gmx_rx_tx_status_s cn52xx; - struct cvmx_agl_gmx_rx_tx_status_s cn52xxp1; struct cvmx_agl_gmx_rx_tx_status_cn56xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_5_63:59; @@ -1450,13 +1084,6 @@ union cvmx_agl_gmx_rx_tx_status { uint64_t reserved_5_63:59; #endif } cn56xx; - struct cvmx_agl_gmx_rx_tx_status_cn56xx cn56xxp1; - struct cvmx_agl_gmx_rx_tx_status_s cn61xx; - struct cvmx_agl_gmx_rx_tx_status_s cn63xx; - struct cvmx_agl_gmx_rx_tx_status_s cn63xxp1; - struct cvmx_agl_gmx_rx_tx_status_s cn66xx; - struct cvmx_agl_gmx_rx_tx_status_s cn68xx; - struct cvmx_agl_gmx_rx_tx_status_s cn68xxp1; }; union cvmx_agl_gmx_smacx { @@ -1470,16 +1097,6 @@ union cvmx_agl_gmx_smacx { uint64_t reserved_48_63:16; #endif } s; - struct cvmx_agl_gmx_smacx_s cn52xx; - struct cvmx_agl_gmx_smacx_s cn52xxp1; - struct cvmx_agl_gmx_smacx_s cn56xx; - struct cvmx_agl_gmx_smacx_s cn56xxp1; - struct cvmx_agl_gmx_smacx_s cn61xx; - struct cvmx_agl_gmx_smacx_s cn63xx; - struct cvmx_agl_gmx_smacx_s cn63xxp1; - struct cvmx_agl_gmx_smacx_s cn66xx; - struct cvmx_agl_gmx_smacx_s cn68xx; - struct cvmx_agl_gmx_smacx_s cn68xxp1; }; union cvmx_agl_gmx_stat_bp { @@ -1495,16 +1112,6 @@ union cvmx_agl_gmx_stat_bp { uint64_t reserved_17_63:47; #endif } s; - struct cvmx_agl_gmx_stat_bp_s cn52xx; - struct cvmx_agl_gmx_stat_bp_s cn52xxp1; - struct cvmx_agl_gmx_stat_bp_s cn56xx; - struct cvmx_agl_gmx_stat_bp_s cn56xxp1; - struct cvmx_agl_gmx_stat_bp_s cn61xx; - struct cvmx_agl_gmx_stat_bp_s cn63xx; - struct cvmx_agl_gmx_stat_bp_s cn63xxp1; - struct cvmx_agl_gmx_stat_bp_s cn66xx; - struct cvmx_agl_gmx_stat_bp_s cn68xx; - struct cvmx_agl_gmx_stat_bp_s cn68xxp1; }; union cvmx_agl_gmx_txx_append { @@ -1524,16 +1131,6 @@ union cvmx_agl_gmx_txx_append { uint64_t reserved_4_63:60; #endif } s; - struct cvmx_agl_gmx_txx_append_s cn52xx; - struct cvmx_agl_gmx_txx_append_s cn52xxp1; - struct cvmx_agl_gmx_txx_append_s cn56xx; - struct cvmx_agl_gmx_txx_append_s cn56xxp1; - struct cvmx_agl_gmx_txx_append_s cn61xx; - struct cvmx_agl_gmx_txx_append_s cn63xx; - struct cvmx_agl_gmx_txx_append_s cn63xxp1; - struct cvmx_agl_gmx_txx_append_s cn66xx; - struct cvmx_agl_gmx_txx_append_s cn68xx; - struct cvmx_agl_gmx_txx_append_s cn68xxp1; }; union cvmx_agl_gmx_txx_clk { @@ -1547,12 +1144,6 @@ union cvmx_agl_gmx_txx_clk { uint64_t reserved_6_63:58; #endif } s; - struct cvmx_agl_gmx_txx_clk_s cn61xx; - struct cvmx_agl_gmx_txx_clk_s cn63xx; - struct cvmx_agl_gmx_txx_clk_s cn63xxp1; - struct cvmx_agl_gmx_txx_clk_s cn66xx; - struct cvmx_agl_gmx_txx_clk_s cn68xx; - struct cvmx_agl_gmx_txx_clk_s cn68xxp1; }; union cvmx_agl_gmx_txx_ctl { @@ -1568,16 +1159,6 @@ union cvmx_agl_gmx_txx_ctl { uint64_t reserved_2_63:62; #endif } s; - struct cvmx_agl_gmx_txx_ctl_s cn52xx; - struct cvmx_agl_gmx_txx_ctl_s cn52xxp1; - struct cvmx_agl_gmx_txx_ctl_s cn56xx; - struct cvmx_agl_gmx_txx_ctl_s cn56xxp1; - struct cvmx_agl_gmx_txx_ctl_s cn61xx; - struct cvmx_agl_gmx_txx_ctl_s cn63xx; - struct cvmx_agl_gmx_txx_ctl_s cn63xxp1; - struct cvmx_agl_gmx_txx_ctl_s cn66xx; - struct cvmx_agl_gmx_txx_ctl_s cn68xx; - struct cvmx_agl_gmx_txx_ctl_s cn68xxp1; }; union cvmx_agl_gmx_txx_min_pkt { @@ -1591,16 +1172,6 @@ union cvmx_agl_gmx_txx_min_pkt { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_agl_gmx_txx_min_pkt_s cn52xx; - struct cvmx_agl_gmx_txx_min_pkt_s cn52xxp1; - struct cvmx_agl_gmx_txx_min_pkt_s cn56xx; - struct cvmx_agl_gmx_txx_min_pkt_s cn56xxp1; - struct cvmx_agl_gmx_txx_min_pkt_s cn61xx; - struct cvmx_agl_gmx_txx_min_pkt_s cn63xx; - struct cvmx_agl_gmx_txx_min_pkt_s cn63xxp1; - struct cvmx_agl_gmx_txx_min_pkt_s cn66xx; - struct cvmx_agl_gmx_txx_min_pkt_s cn68xx; - struct cvmx_agl_gmx_txx_min_pkt_s cn68xxp1; }; union cvmx_agl_gmx_txx_pause_pkt_interval { @@ -1614,16 +1185,6 @@ union cvmx_agl_gmx_txx_pause_pkt_interval { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn52xx; - struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn52xxp1; - struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn56xx; - struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn56xxp1; - struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn61xx; - struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn63xx; - struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn63xxp1; - struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn66xx; - struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn68xx; - struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn68xxp1; }; union cvmx_agl_gmx_txx_pause_pkt_time { @@ -1637,16 +1198,6 @@ union cvmx_agl_gmx_txx_pause_pkt_time { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_agl_gmx_txx_pause_pkt_time_s cn52xx; - struct cvmx_agl_gmx_txx_pause_pkt_time_s cn52xxp1; - struct cvmx_agl_gmx_txx_pause_pkt_time_s cn56xx; - struct cvmx_agl_gmx_txx_pause_pkt_time_s cn56xxp1; - struct cvmx_agl_gmx_txx_pause_pkt_time_s cn61xx; - struct cvmx_agl_gmx_txx_pause_pkt_time_s cn63xx; - struct cvmx_agl_gmx_txx_pause_pkt_time_s cn63xxp1; - struct cvmx_agl_gmx_txx_pause_pkt_time_s cn66xx; - struct cvmx_agl_gmx_txx_pause_pkt_time_s cn68xx; - struct cvmx_agl_gmx_txx_pause_pkt_time_s cn68xxp1; }; union cvmx_agl_gmx_txx_pause_togo { @@ -1660,16 +1211,6 @@ union cvmx_agl_gmx_txx_pause_togo { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_agl_gmx_txx_pause_togo_s cn52xx; - struct cvmx_agl_gmx_txx_pause_togo_s cn52xxp1; - struct cvmx_agl_gmx_txx_pause_togo_s cn56xx; - struct cvmx_agl_gmx_txx_pause_togo_s cn56xxp1; - struct cvmx_agl_gmx_txx_pause_togo_s cn61xx; - struct cvmx_agl_gmx_txx_pause_togo_s cn63xx; - struct cvmx_agl_gmx_txx_pause_togo_s cn63xxp1; - struct cvmx_agl_gmx_txx_pause_togo_s cn66xx; - struct cvmx_agl_gmx_txx_pause_togo_s cn68xx; - struct cvmx_agl_gmx_txx_pause_togo_s cn68xxp1; }; union cvmx_agl_gmx_txx_pause_zero { @@ -1683,16 +1224,6 @@ union cvmx_agl_gmx_txx_pause_zero { uint64_t reserved_1_63:63; #endif } s; - struct cvmx_agl_gmx_txx_pause_zero_s cn52xx; - struct cvmx_agl_gmx_txx_pause_zero_s cn52xxp1; - struct cvmx_agl_gmx_txx_pause_zero_s cn56xx; - struct cvmx_agl_gmx_txx_pause_zero_s cn56xxp1; - struct cvmx_agl_gmx_txx_pause_zero_s cn61xx; - struct cvmx_agl_gmx_txx_pause_zero_s cn63xx; - struct cvmx_agl_gmx_txx_pause_zero_s cn63xxp1; - struct cvmx_agl_gmx_txx_pause_zero_s cn66xx; - struct cvmx_agl_gmx_txx_pause_zero_s cn68xx; - struct cvmx_agl_gmx_txx_pause_zero_s cn68xxp1; }; union cvmx_agl_gmx_txx_soft_pause { @@ -1706,16 +1237,6 @@ union cvmx_agl_gmx_txx_soft_pause { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_agl_gmx_txx_soft_pause_s cn52xx; - struct cvmx_agl_gmx_txx_soft_pause_s cn52xxp1; - struct cvmx_agl_gmx_txx_soft_pause_s cn56xx; - struct cvmx_agl_gmx_txx_soft_pause_s cn56xxp1; - struct cvmx_agl_gmx_txx_soft_pause_s cn61xx; - struct cvmx_agl_gmx_txx_soft_pause_s cn63xx; - struct cvmx_agl_gmx_txx_soft_pause_s cn63xxp1; - struct cvmx_agl_gmx_txx_soft_pause_s cn66xx; - struct cvmx_agl_gmx_txx_soft_pause_s cn68xx; - struct cvmx_agl_gmx_txx_soft_pause_s cn68xxp1; }; union cvmx_agl_gmx_txx_stat0 { @@ -1729,16 +1250,6 @@ union cvmx_agl_gmx_txx_stat0 { uint64_t xsdef:32; #endif } s; - struct cvmx_agl_gmx_txx_stat0_s cn52xx; - struct cvmx_agl_gmx_txx_stat0_s cn52xxp1; - struct cvmx_agl_gmx_txx_stat0_s cn56xx; - struct cvmx_agl_gmx_txx_stat0_s cn56xxp1; - struct cvmx_agl_gmx_txx_stat0_s cn61xx; - struct cvmx_agl_gmx_txx_stat0_s cn63xx; - struct cvmx_agl_gmx_txx_stat0_s cn63xxp1; - struct cvmx_agl_gmx_txx_stat0_s cn66xx; - struct cvmx_agl_gmx_txx_stat0_s cn68xx; - struct cvmx_agl_gmx_txx_stat0_s cn68xxp1; }; union cvmx_agl_gmx_txx_stat1 { @@ -1752,16 +1263,6 @@ union cvmx_agl_gmx_txx_stat1 { uint64_t scol:32; #endif } s; - struct cvmx_agl_gmx_txx_stat1_s cn52xx; - struct cvmx_agl_gmx_txx_stat1_s cn52xxp1; - struct cvmx_agl_gmx_txx_stat1_s cn56xx; - struct cvmx_agl_gmx_txx_stat1_s cn56xxp1; - struct cvmx_agl_gmx_txx_stat1_s cn61xx; - struct cvmx_agl_gmx_txx_stat1_s cn63xx; - struct cvmx_agl_gmx_txx_stat1_s cn63xxp1; - struct cvmx_agl_gmx_txx_stat1_s cn66xx; - struct cvmx_agl_gmx_txx_stat1_s cn68xx; - struct cvmx_agl_gmx_txx_stat1_s cn68xxp1; }; union cvmx_agl_gmx_txx_stat2 { @@ -1775,16 +1276,6 @@ union cvmx_agl_gmx_txx_stat2 { uint64_t reserved_48_63:16; #endif } s; - struct cvmx_agl_gmx_txx_stat2_s cn52xx; - struct cvmx_agl_gmx_txx_stat2_s cn52xxp1; - struct cvmx_agl_gmx_txx_stat2_s cn56xx; - struct cvmx_agl_gmx_txx_stat2_s cn56xxp1; - struct cvmx_agl_gmx_txx_stat2_s cn61xx; - struct cvmx_agl_gmx_txx_stat2_s cn63xx; - struct cvmx_agl_gmx_txx_stat2_s cn63xxp1; - struct cvmx_agl_gmx_txx_stat2_s cn66xx; - struct cvmx_agl_gmx_txx_stat2_s cn68xx; - struct cvmx_agl_gmx_txx_stat2_s cn68xxp1; }; union cvmx_agl_gmx_txx_stat3 { @@ -1798,16 +1289,6 @@ union cvmx_agl_gmx_txx_stat3 { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_agl_gmx_txx_stat3_s cn52xx; - struct cvmx_agl_gmx_txx_stat3_s cn52xxp1; - struct cvmx_agl_gmx_txx_stat3_s cn56xx; - struct cvmx_agl_gmx_txx_stat3_s cn56xxp1; - struct cvmx_agl_gmx_txx_stat3_s cn61xx; - struct cvmx_agl_gmx_txx_stat3_s cn63xx; - struct cvmx_agl_gmx_txx_stat3_s cn63xxp1; - struct cvmx_agl_gmx_txx_stat3_s cn66xx; - struct cvmx_agl_gmx_txx_stat3_s cn68xx; - struct cvmx_agl_gmx_txx_stat3_s cn68xxp1; }; union cvmx_agl_gmx_txx_stat4 { @@ -1821,16 +1302,6 @@ union cvmx_agl_gmx_txx_stat4 { uint64_t hist1:32; #endif } s; - struct cvmx_agl_gmx_txx_stat4_s cn52xx; - struct cvmx_agl_gmx_txx_stat4_s cn52xxp1; - struct cvmx_agl_gmx_txx_stat4_s cn56xx; - struct cvmx_agl_gmx_txx_stat4_s cn56xxp1; - struct cvmx_agl_gmx_txx_stat4_s cn61xx; - struct cvmx_agl_gmx_txx_stat4_s cn63xx; - struct cvmx_agl_gmx_txx_stat4_s cn63xxp1; - struct cvmx_agl_gmx_txx_stat4_s cn66xx; - struct cvmx_agl_gmx_txx_stat4_s cn68xx; - struct cvmx_agl_gmx_txx_stat4_s cn68xxp1; }; union cvmx_agl_gmx_txx_stat5 { @@ -1844,16 +1315,6 @@ union cvmx_agl_gmx_txx_stat5 { uint64_t hist3:32; #endif } s; - struct cvmx_agl_gmx_txx_stat5_s cn52xx; - struct cvmx_agl_gmx_txx_stat5_s cn52xxp1; - struct cvmx_agl_gmx_txx_stat5_s cn56xx; - struct cvmx_agl_gmx_txx_stat5_s cn56xxp1; - struct cvmx_agl_gmx_txx_stat5_s cn61xx; - struct cvmx_agl_gmx_txx_stat5_s cn63xx; - struct cvmx_agl_gmx_txx_stat5_s cn63xxp1; - struct cvmx_agl_gmx_txx_stat5_s cn66xx; - struct cvmx_agl_gmx_txx_stat5_s cn68xx; - struct cvmx_agl_gmx_txx_stat5_s cn68xxp1; }; union cvmx_agl_gmx_txx_stat6 { @@ -1867,16 +1328,6 @@ union cvmx_agl_gmx_txx_stat6 { uint64_t hist5:32; #endif } s; - struct cvmx_agl_gmx_txx_stat6_s cn52xx; - struct cvmx_agl_gmx_txx_stat6_s cn52xxp1; - struct cvmx_agl_gmx_txx_stat6_s cn56xx; - struct cvmx_agl_gmx_txx_stat6_s cn56xxp1; - struct cvmx_agl_gmx_txx_stat6_s cn61xx; - struct cvmx_agl_gmx_txx_stat6_s cn63xx; - struct cvmx_agl_gmx_txx_stat6_s cn63xxp1; - struct cvmx_agl_gmx_txx_stat6_s cn66xx; - struct cvmx_agl_gmx_txx_stat6_s cn68xx; - struct cvmx_agl_gmx_txx_stat6_s cn68xxp1; }; union cvmx_agl_gmx_txx_stat7 { @@ -1890,16 +1341,6 @@ union cvmx_agl_gmx_txx_stat7 { uint64_t hist7:32; #endif } s; - struct cvmx_agl_gmx_txx_stat7_s cn52xx; - struct cvmx_agl_gmx_txx_stat7_s cn52xxp1; - struct cvmx_agl_gmx_txx_stat7_s cn56xx; - struct cvmx_agl_gmx_txx_stat7_s cn56xxp1; - struct cvmx_agl_gmx_txx_stat7_s cn61xx; - struct cvmx_agl_gmx_txx_stat7_s cn63xx; - struct cvmx_agl_gmx_txx_stat7_s cn63xxp1; - struct cvmx_agl_gmx_txx_stat7_s cn66xx; - struct cvmx_agl_gmx_txx_stat7_s cn68xx; - struct cvmx_agl_gmx_txx_stat7_s cn68xxp1; }; union cvmx_agl_gmx_txx_stat8 { @@ -1913,16 +1354,6 @@ union cvmx_agl_gmx_txx_stat8 { uint64_t mcst:32; #endif } s; - struct cvmx_agl_gmx_txx_stat8_s cn52xx; - struct cvmx_agl_gmx_txx_stat8_s cn52xxp1; - struct cvmx_agl_gmx_txx_stat8_s cn56xx; - struct cvmx_agl_gmx_txx_stat8_s cn56xxp1; - struct cvmx_agl_gmx_txx_stat8_s cn61xx; - struct cvmx_agl_gmx_txx_stat8_s cn63xx; - struct cvmx_agl_gmx_txx_stat8_s cn63xxp1; - struct cvmx_agl_gmx_txx_stat8_s cn66xx; - struct cvmx_agl_gmx_txx_stat8_s cn68xx; - struct cvmx_agl_gmx_txx_stat8_s cn68xxp1; }; union cvmx_agl_gmx_txx_stat9 { @@ -1936,16 +1367,6 @@ union cvmx_agl_gmx_txx_stat9 { uint64_t undflw:32; #endif } s; - struct cvmx_agl_gmx_txx_stat9_s cn52xx; - struct cvmx_agl_gmx_txx_stat9_s cn52xxp1; - struct cvmx_agl_gmx_txx_stat9_s cn56xx; - struct cvmx_agl_gmx_txx_stat9_s cn56xxp1; - struct cvmx_agl_gmx_txx_stat9_s cn61xx; - struct cvmx_agl_gmx_txx_stat9_s cn63xx; - struct cvmx_agl_gmx_txx_stat9_s cn63xxp1; - struct cvmx_agl_gmx_txx_stat9_s cn66xx; - struct cvmx_agl_gmx_txx_stat9_s cn68xx; - struct cvmx_agl_gmx_txx_stat9_s cn68xxp1; }; union cvmx_agl_gmx_txx_stats_ctl { @@ -1959,16 +1380,6 @@ union cvmx_agl_gmx_txx_stats_ctl { uint64_t reserved_1_63:63; #endif } s; - struct cvmx_agl_gmx_txx_stats_ctl_s cn52xx; - struct cvmx_agl_gmx_txx_stats_ctl_s cn52xxp1; - struct cvmx_agl_gmx_txx_stats_ctl_s cn56xx; - struct cvmx_agl_gmx_txx_stats_ctl_s cn56xxp1; - struct cvmx_agl_gmx_txx_stats_ctl_s cn61xx; - struct cvmx_agl_gmx_txx_stats_ctl_s cn63xx; - struct cvmx_agl_gmx_txx_stats_ctl_s cn63xxp1; - struct cvmx_agl_gmx_txx_stats_ctl_s cn66xx; - struct cvmx_agl_gmx_txx_stats_ctl_s cn68xx; - struct cvmx_agl_gmx_txx_stats_ctl_s cn68xxp1; }; union cvmx_agl_gmx_txx_thresh { @@ -1982,16 +1393,6 @@ union cvmx_agl_gmx_txx_thresh { uint64_t reserved_6_63:58; #endif } s; - struct cvmx_agl_gmx_txx_thresh_s cn52xx; - struct cvmx_agl_gmx_txx_thresh_s cn52xxp1; - struct cvmx_agl_gmx_txx_thresh_s cn56xx; - struct cvmx_agl_gmx_txx_thresh_s cn56xxp1; - struct cvmx_agl_gmx_txx_thresh_s cn61xx; - struct cvmx_agl_gmx_txx_thresh_s cn63xx; - struct cvmx_agl_gmx_txx_thresh_s cn63xxp1; - struct cvmx_agl_gmx_txx_thresh_s cn66xx; - struct cvmx_agl_gmx_txx_thresh_s cn68xx; - struct cvmx_agl_gmx_txx_thresh_s cn68xxp1; }; union cvmx_agl_gmx_tx_bp { @@ -2005,8 +1406,6 @@ union cvmx_agl_gmx_tx_bp { uint64_t reserved_2_63:62; #endif } s; - struct cvmx_agl_gmx_tx_bp_s cn52xx; - struct cvmx_agl_gmx_tx_bp_s cn52xxp1; struct cvmx_agl_gmx_tx_bp_cn56xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_1_63:63; @@ -2016,13 +1415,6 @@ union cvmx_agl_gmx_tx_bp { uint64_t reserved_1_63:63; #endif } cn56xx; - struct cvmx_agl_gmx_tx_bp_cn56xx cn56xxp1; - struct cvmx_agl_gmx_tx_bp_s cn61xx; - struct cvmx_agl_gmx_tx_bp_s cn63xx; - struct cvmx_agl_gmx_tx_bp_s cn63xxp1; - struct cvmx_agl_gmx_tx_bp_s cn66xx; - struct cvmx_agl_gmx_tx_bp_s cn68xx; - struct cvmx_agl_gmx_tx_bp_s cn68xxp1; }; union cvmx_agl_gmx_tx_col_attempt { @@ -2036,16 +1428,6 @@ union cvmx_agl_gmx_tx_col_attempt { uint64_t reserved_5_63:59; #endif } s; - struct cvmx_agl_gmx_tx_col_attempt_s cn52xx; - struct cvmx_agl_gmx_tx_col_attempt_s cn52xxp1; - struct cvmx_agl_gmx_tx_col_attempt_s cn56xx; - struct cvmx_agl_gmx_tx_col_attempt_s cn56xxp1; - struct cvmx_agl_gmx_tx_col_attempt_s cn61xx; - struct cvmx_agl_gmx_tx_col_attempt_s cn63xx; - struct cvmx_agl_gmx_tx_col_attempt_s cn63xxp1; - struct cvmx_agl_gmx_tx_col_attempt_s cn66xx; - struct cvmx_agl_gmx_tx_col_attempt_s cn68xx; - struct cvmx_agl_gmx_tx_col_attempt_s cn68xxp1; }; union cvmx_agl_gmx_tx_ifg { @@ -2061,16 +1443,6 @@ union cvmx_agl_gmx_tx_ifg { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_agl_gmx_tx_ifg_s cn52xx; - struct cvmx_agl_gmx_tx_ifg_s cn52xxp1; - struct cvmx_agl_gmx_tx_ifg_s cn56xx; - struct cvmx_agl_gmx_tx_ifg_s cn56xxp1; - struct cvmx_agl_gmx_tx_ifg_s cn61xx; - struct cvmx_agl_gmx_tx_ifg_s cn63xx; - struct cvmx_agl_gmx_tx_ifg_s cn63xxp1; - struct cvmx_agl_gmx_tx_ifg_s cn66xx; - struct cvmx_agl_gmx_tx_ifg_s cn68xx; - struct cvmx_agl_gmx_tx_ifg_s cn68xxp1; }; union cvmx_agl_gmx_tx_int_en { @@ -2129,7 +1501,6 @@ union cvmx_agl_gmx_tx_int_en { uint64_t reserved_18_63:46; #endif } cn52xx; - struct cvmx_agl_gmx_tx_int_en_cn52xx cn52xxp1; struct cvmx_agl_gmx_tx_int_en_cn56xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_17_63:47; @@ -2155,13 +1526,6 @@ union cvmx_agl_gmx_tx_int_en { uint64_t reserved_17_63:47; #endif } cn56xx; - struct cvmx_agl_gmx_tx_int_en_cn56xx cn56xxp1; - struct cvmx_agl_gmx_tx_int_en_s cn61xx; - struct cvmx_agl_gmx_tx_int_en_s cn63xx; - struct cvmx_agl_gmx_tx_int_en_s cn63xxp1; - struct cvmx_agl_gmx_tx_int_en_s cn66xx; - struct cvmx_agl_gmx_tx_int_en_s cn68xx; - struct cvmx_agl_gmx_tx_int_en_s cn68xxp1; }; union cvmx_agl_gmx_tx_int_reg { @@ -2220,7 +1584,6 @@ union cvmx_agl_gmx_tx_int_reg { uint64_t reserved_18_63:46; #endif } cn52xx; - struct cvmx_agl_gmx_tx_int_reg_cn52xx cn52xxp1; struct cvmx_agl_gmx_tx_int_reg_cn56xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_17_63:47; @@ -2246,13 +1609,6 @@ union cvmx_agl_gmx_tx_int_reg { uint64_t reserved_17_63:47; #endif } cn56xx; - struct cvmx_agl_gmx_tx_int_reg_cn56xx cn56xxp1; - struct cvmx_agl_gmx_tx_int_reg_s cn61xx; - struct cvmx_agl_gmx_tx_int_reg_s cn63xx; - struct cvmx_agl_gmx_tx_int_reg_s cn63xxp1; - struct cvmx_agl_gmx_tx_int_reg_s cn66xx; - struct cvmx_agl_gmx_tx_int_reg_s cn68xx; - struct cvmx_agl_gmx_tx_int_reg_s cn68xxp1; }; union cvmx_agl_gmx_tx_jam { @@ -2266,16 +1622,6 @@ union cvmx_agl_gmx_tx_jam { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_agl_gmx_tx_jam_s cn52xx; - struct cvmx_agl_gmx_tx_jam_s cn52xxp1; - struct cvmx_agl_gmx_tx_jam_s cn56xx; - struct cvmx_agl_gmx_tx_jam_s cn56xxp1; - struct cvmx_agl_gmx_tx_jam_s cn61xx; - struct cvmx_agl_gmx_tx_jam_s cn63xx; - struct cvmx_agl_gmx_tx_jam_s cn63xxp1; - struct cvmx_agl_gmx_tx_jam_s cn66xx; - struct cvmx_agl_gmx_tx_jam_s cn68xx; - struct cvmx_agl_gmx_tx_jam_s cn68xxp1; }; union cvmx_agl_gmx_tx_lfsr { @@ -2289,16 +1635,6 @@ union cvmx_agl_gmx_tx_lfsr { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_agl_gmx_tx_lfsr_s cn52xx; - struct cvmx_agl_gmx_tx_lfsr_s cn52xxp1; - struct cvmx_agl_gmx_tx_lfsr_s cn56xx; - struct cvmx_agl_gmx_tx_lfsr_s cn56xxp1; - struct cvmx_agl_gmx_tx_lfsr_s cn61xx; - struct cvmx_agl_gmx_tx_lfsr_s cn63xx; - struct cvmx_agl_gmx_tx_lfsr_s cn63xxp1; - struct cvmx_agl_gmx_tx_lfsr_s cn66xx; - struct cvmx_agl_gmx_tx_lfsr_s cn68xx; - struct cvmx_agl_gmx_tx_lfsr_s cn68xxp1; }; union cvmx_agl_gmx_tx_ovr_bp { @@ -2320,8 +1656,6 @@ union cvmx_agl_gmx_tx_ovr_bp { uint64_t reserved_10_63:54; #endif } s; - struct cvmx_agl_gmx_tx_ovr_bp_s cn52xx; - struct cvmx_agl_gmx_tx_ovr_bp_s cn52xxp1; struct cvmx_agl_gmx_tx_ovr_bp_cn56xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_9_63:55; @@ -2339,13 +1673,6 @@ union cvmx_agl_gmx_tx_ovr_bp { uint64_t reserved_9_63:55; #endif } cn56xx; - struct cvmx_agl_gmx_tx_ovr_bp_cn56xx cn56xxp1; - struct cvmx_agl_gmx_tx_ovr_bp_s cn61xx; - struct cvmx_agl_gmx_tx_ovr_bp_s cn63xx; - struct cvmx_agl_gmx_tx_ovr_bp_s cn63xxp1; - struct cvmx_agl_gmx_tx_ovr_bp_s cn66xx; - struct cvmx_agl_gmx_tx_ovr_bp_s cn68xx; - struct cvmx_agl_gmx_tx_ovr_bp_s cn68xxp1; }; union cvmx_agl_gmx_tx_pause_pkt_dmac { @@ -2359,16 +1686,6 @@ union cvmx_agl_gmx_tx_pause_pkt_dmac { uint64_t reserved_48_63:16; #endif } s; - struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn52xx; - struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn52xxp1; - struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn56xx; - struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn56xxp1; - struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn61xx; - struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn63xx; - struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn63xxp1; - struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn66xx; - struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn68xx; - struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn68xxp1; }; union cvmx_agl_gmx_tx_pause_pkt_type { @@ -2382,16 +1699,6 @@ union cvmx_agl_gmx_tx_pause_pkt_type { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_agl_gmx_tx_pause_pkt_type_s cn52xx; - struct cvmx_agl_gmx_tx_pause_pkt_type_s cn52xxp1; - struct cvmx_agl_gmx_tx_pause_pkt_type_s cn56xx; - struct cvmx_agl_gmx_tx_pause_pkt_type_s cn56xxp1; - struct cvmx_agl_gmx_tx_pause_pkt_type_s cn61xx; - struct cvmx_agl_gmx_tx_pause_pkt_type_s cn63xx; - struct cvmx_agl_gmx_tx_pause_pkt_type_s cn63xxp1; - struct cvmx_agl_gmx_tx_pause_pkt_type_s cn66xx; - struct cvmx_agl_gmx_tx_pause_pkt_type_s cn68xx; - struct cvmx_agl_gmx_tx_pause_pkt_type_s cn68xxp1; }; union cvmx_agl_prtx_ctl { @@ -2447,12 +1754,6 @@ union cvmx_agl_prtx_ctl { uint64_t drv_byp:1; #endif } s; - struct cvmx_agl_prtx_ctl_s cn61xx; - struct cvmx_agl_prtx_ctl_s cn63xx; - struct cvmx_agl_prtx_ctl_s cn63xxp1; - struct cvmx_agl_prtx_ctl_s cn66xx; - struct cvmx_agl_prtx_ctl_s cn68xx; - struct cvmx_agl_prtx_ctl_s cn68xxp1; }; #endif diff --git a/arch/mips/include/asm/octeon/cvmx-asxx-defs.h b/arch/mips/include/asm/octeon/cvmx-asxx-defs.h index 1eef155979f3..70f4a5729581 100644 --- a/arch/mips/include/asm/octeon/cvmx-asxx-defs.h +++ b/arch/mips/include/asm/octeon/cvmx-asxx-defs.h @@ -68,9 +68,6 @@ union cvmx_asxx_gmii_rx_clk_set { uint64_t reserved_5_63:59; #endif } s; - struct cvmx_asxx_gmii_rx_clk_set_s cn30xx; - struct cvmx_asxx_gmii_rx_clk_set_s cn31xx; - struct cvmx_asxx_gmii_rx_clk_set_s cn50xx; }; union cvmx_asxx_gmii_rx_dat_set { @@ -84,9 +81,6 @@ union cvmx_asxx_gmii_rx_dat_set { uint64_t reserved_5_63:59; #endif } s; - struct cvmx_asxx_gmii_rx_dat_set_s cn30xx; - struct cvmx_asxx_gmii_rx_dat_set_s cn31xx; - struct cvmx_asxx_gmii_rx_dat_set_s cn50xx; }; union cvmx_asxx_int_en { @@ -121,12 +115,6 @@ union cvmx_asxx_int_en { uint64_t reserved_11_63:53; #endif } cn30xx; - struct cvmx_asxx_int_en_cn30xx cn31xx; - struct cvmx_asxx_int_en_s cn38xx; - struct cvmx_asxx_int_en_s cn38xxp2; - struct cvmx_asxx_int_en_cn30xx cn50xx; - struct cvmx_asxx_int_en_s cn58xx; - struct cvmx_asxx_int_en_s cn58xxp1; }; union cvmx_asxx_int_reg { @@ -161,12 +149,6 @@ union cvmx_asxx_int_reg { uint64_t reserved_11_63:53; #endif } cn30xx; - struct cvmx_asxx_int_reg_cn30xx cn31xx; - struct cvmx_asxx_int_reg_s cn38xx; - struct cvmx_asxx_int_reg_s cn38xxp2; - struct cvmx_asxx_int_reg_cn30xx cn50xx; - struct cvmx_asxx_int_reg_s cn58xx; - struct cvmx_asxx_int_reg_s cn58xxp1; }; union cvmx_asxx_mii_rx_dat_set { @@ -180,8 +162,6 @@ union cvmx_asxx_mii_rx_dat_set { uint64_t reserved_5_63:59; #endif } s; - struct cvmx_asxx_mii_rx_dat_set_s cn30xx; - struct cvmx_asxx_mii_rx_dat_set_s cn50xx; }; union cvmx_asxx_prt_loop { @@ -210,12 +190,6 @@ union cvmx_asxx_prt_loop { uint64_t reserved_7_63:57; #endif } cn30xx; - struct cvmx_asxx_prt_loop_cn30xx cn31xx; - struct cvmx_asxx_prt_loop_s cn38xx; - struct cvmx_asxx_prt_loop_s cn38xxp2; - struct cvmx_asxx_prt_loop_cn30xx cn50xx; - struct cvmx_asxx_prt_loop_s cn58xx; - struct cvmx_asxx_prt_loop_s cn58xxp1; }; union cvmx_asxx_rld_bypass { @@ -229,10 +203,6 @@ union cvmx_asxx_rld_bypass { uint64_t reserved_1_63:63; #endif } s; - struct cvmx_asxx_rld_bypass_s cn38xx; - struct cvmx_asxx_rld_bypass_s cn38xxp2; - struct cvmx_asxx_rld_bypass_s cn58xx; - struct cvmx_asxx_rld_bypass_s cn58xxp1; }; union cvmx_asxx_rld_bypass_setting { @@ -246,10 +216,6 @@ union cvmx_asxx_rld_bypass_setting { uint64_t reserved_5_63:59; #endif } s; - struct cvmx_asxx_rld_bypass_setting_s cn38xx; - struct cvmx_asxx_rld_bypass_setting_s cn38xxp2; - struct cvmx_asxx_rld_bypass_setting_s cn58xx; - struct cvmx_asxx_rld_bypass_setting_s cn58xxp1; }; union cvmx_asxx_rld_comp { @@ -276,9 +242,6 @@ union cvmx_asxx_rld_comp { uint64_t reserved_8_63:56; #endif } cn38xx; - struct cvmx_asxx_rld_comp_cn38xx cn38xxp2; - struct cvmx_asxx_rld_comp_s cn58xx; - struct cvmx_asxx_rld_comp_s cn58xxp1; }; union cvmx_asxx_rld_data_drv { @@ -294,10 +257,6 @@ union cvmx_asxx_rld_data_drv { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_asxx_rld_data_drv_s cn38xx; - struct cvmx_asxx_rld_data_drv_s cn38xxp2; - struct cvmx_asxx_rld_data_drv_s cn58xx; - struct cvmx_asxx_rld_data_drv_s cn58xxp1; }; union cvmx_asxx_rld_fcram_mode { @@ -311,8 +270,6 @@ union cvmx_asxx_rld_fcram_mode { uint64_t reserved_1_63:63; #endif } s; - struct cvmx_asxx_rld_fcram_mode_s cn38xx; - struct cvmx_asxx_rld_fcram_mode_s cn38xxp2; }; union cvmx_asxx_rld_nctl_strong { @@ -326,10 +283,6 @@ union cvmx_asxx_rld_nctl_strong { uint64_t reserved_5_63:59; #endif } s; - struct cvmx_asxx_rld_nctl_strong_s cn38xx; - struct cvmx_asxx_rld_nctl_strong_s cn38xxp2; - struct cvmx_asxx_rld_nctl_strong_s cn58xx; - struct cvmx_asxx_rld_nctl_strong_s cn58xxp1; }; union cvmx_asxx_rld_nctl_weak { @@ -343,10 +296,6 @@ union cvmx_asxx_rld_nctl_weak { uint64_t reserved_5_63:59; #endif } s; - struct cvmx_asxx_rld_nctl_weak_s cn38xx; - struct cvmx_asxx_rld_nctl_weak_s cn38xxp2; - struct cvmx_asxx_rld_nctl_weak_s cn58xx; - struct cvmx_asxx_rld_nctl_weak_s cn58xxp1; }; union cvmx_asxx_rld_pctl_strong { @@ -360,10 +309,6 @@ union cvmx_asxx_rld_pctl_strong { uint64_t reserved_5_63:59; #endif } s; - struct cvmx_asxx_rld_pctl_strong_s cn38xx; - struct cvmx_asxx_rld_pctl_strong_s cn38xxp2; - struct cvmx_asxx_rld_pctl_strong_s cn58xx; - struct cvmx_asxx_rld_pctl_strong_s cn58xxp1; }; union cvmx_asxx_rld_pctl_weak { @@ -377,10 +322,6 @@ union cvmx_asxx_rld_pctl_weak { uint64_t reserved_5_63:59; #endif } s; - struct cvmx_asxx_rld_pctl_weak_s cn38xx; - struct cvmx_asxx_rld_pctl_weak_s cn38xxp2; - struct cvmx_asxx_rld_pctl_weak_s cn58xx; - struct cvmx_asxx_rld_pctl_weak_s cn58xxp1; }; union cvmx_asxx_rld_setting { @@ -411,9 +352,6 @@ union cvmx_asxx_rld_setting { uint64_t reserved_5_63:59; #endif } cn38xx; - struct cvmx_asxx_rld_setting_cn38xx cn38xxp2; - struct cvmx_asxx_rld_setting_s cn58xx; - struct cvmx_asxx_rld_setting_s cn58xxp1; }; union cvmx_asxx_rx_clk_setx { @@ -427,13 +365,6 @@ union cvmx_asxx_rx_clk_setx { uint64_t reserved_5_63:59; #endif } s; - struct cvmx_asxx_rx_clk_setx_s cn30xx; - struct cvmx_asxx_rx_clk_setx_s cn31xx; - struct cvmx_asxx_rx_clk_setx_s cn38xx; - struct cvmx_asxx_rx_clk_setx_s cn38xxp2; - struct cvmx_asxx_rx_clk_setx_s cn50xx; - struct cvmx_asxx_rx_clk_setx_s cn58xx; - struct cvmx_asxx_rx_clk_setx_s cn58xxp1; }; union cvmx_asxx_rx_prt_en { @@ -456,12 +387,6 @@ union cvmx_asxx_rx_prt_en { uint64_t reserved_3_63:61; #endif } cn30xx; - struct cvmx_asxx_rx_prt_en_cn30xx cn31xx; - struct cvmx_asxx_rx_prt_en_s cn38xx; - struct cvmx_asxx_rx_prt_en_s cn38xxp2; - struct cvmx_asxx_rx_prt_en_cn30xx cn50xx; - struct cvmx_asxx_rx_prt_en_s cn58xx; - struct cvmx_asxx_rx_prt_en_s cn58xxp1; }; union cvmx_asxx_rx_wol { @@ -477,8 +402,6 @@ union cvmx_asxx_rx_wol { uint64_t reserved_2_63:62; #endif } s; - struct cvmx_asxx_rx_wol_s cn38xx; - struct cvmx_asxx_rx_wol_s cn38xxp2; }; union cvmx_asxx_rx_wol_msk { @@ -490,8 +413,6 @@ union cvmx_asxx_rx_wol_msk { uint64_t msk:64; #endif } s; - struct cvmx_asxx_rx_wol_msk_s cn38xx; - struct cvmx_asxx_rx_wol_msk_s cn38xxp2; }; union cvmx_asxx_rx_wol_powok { @@ -505,8 +426,6 @@ union cvmx_asxx_rx_wol_powok { uint64_t reserved_1_63:63; #endif } s; - struct cvmx_asxx_rx_wol_powok_s cn38xx; - struct cvmx_asxx_rx_wol_powok_s cn38xxp2; }; union cvmx_asxx_rx_wol_sig { @@ -520,8 +439,6 @@ union cvmx_asxx_rx_wol_sig { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_asxx_rx_wol_sig_s cn38xx; - struct cvmx_asxx_rx_wol_sig_s cn38xxp2; }; union cvmx_asxx_tx_clk_setx { @@ -535,13 +452,6 @@ union cvmx_asxx_tx_clk_setx { uint64_t reserved_5_63:59; #endif } s; - struct cvmx_asxx_tx_clk_setx_s cn30xx; - struct cvmx_asxx_tx_clk_setx_s cn31xx; - struct cvmx_asxx_tx_clk_setx_s cn38xx; - struct cvmx_asxx_tx_clk_setx_s cn38xxp2; - struct cvmx_asxx_tx_clk_setx_s cn50xx; - struct cvmx_asxx_tx_clk_setx_s cn58xx; - struct cvmx_asxx_tx_clk_setx_s cn58xxp1; }; union cvmx_asxx_tx_comp_byp { @@ -566,7 +476,6 @@ union cvmx_asxx_tx_comp_byp { uint64_t reserved_9_63:55; #endif } cn30xx; - struct cvmx_asxx_tx_comp_byp_cn30xx cn31xx; struct cvmx_asxx_tx_comp_byp_cn38xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_8_63:56; @@ -578,7 +487,6 @@ union cvmx_asxx_tx_comp_byp { uint64_t reserved_8_63:56; #endif } cn38xx; - struct cvmx_asxx_tx_comp_byp_cn38xx cn38xxp2; struct cvmx_asxx_tx_comp_byp_cn50xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_17_63:47; @@ -609,7 +517,6 @@ union cvmx_asxx_tx_comp_byp { uint64_t reserved_13_63:51; #endif } cn58xx; - struct cvmx_asxx_tx_comp_byp_cn58xx cn58xxp1; }; union cvmx_asxx_tx_hi_waterx { @@ -632,12 +539,6 @@ union cvmx_asxx_tx_hi_waterx { uint64_t reserved_3_63:61; #endif } cn30xx; - struct cvmx_asxx_tx_hi_waterx_cn30xx cn31xx; - struct cvmx_asxx_tx_hi_waterx_s cn38xx; - struct cvmx_asxx_tx_hi_waterx_s cn38xxp2; - struct cvmx_asxx_tx_hi_waterx_cn30xx cn50xx; - struct cvmx_asxx_tx_hi_waterx_s cn58xx; - struct cvmx_asxx_tx_hi_waterx_s cn58xxp1; }; union cvmx_asxx_tx_prt_en { @@ -660,12 +561,6 @@ union cvmx_asxx_tx_prt_en { uint64_t reserved_3_63:61; #endif } cn30xx; - struct cvmx_asxx_tx_prt_en_cn30xx cn31xx; - struct cvmx_asxx_tx_prt_en_s cn38xx; - struct cvmx_asxx_tx_prt_en_s cn38xxp2; - struct cvmx_asxx_tx_prt_en_cn30xx cn50xx; - struct cvmx_asxx_tx_prt_en_s cn58xx; - struct cvmx_asxx_tx_prt_en_s cn58xxp1; }; #endif diff --git a/arch/mips/include/asm/octeon/cvmx-bootmem.h b/arch/mips/include/asm/octeon/cvmx-bootmem.h index 72d2e403a6e4..689a82cac740 100644 --- a/arch/mips/include/asm/octeon/cvmx-bootmem.h +++ b/arch/mips/include/asm/octeon/cvmx-bootmem.h @@ -146,18 +146,6 @@ struct cvmx_bootmem_desc { extern int cvmx_bootmem_init(void *mem_desc_ptr); /** - * Allocate a block of memory from the free list that was passed - * to the application by the bootloader. - * This is an allocate-only algorithm, so freeing memory is not possible. - * - * @size: Size in bytes of block to allocate - * @alignment: Alignment required - must be power of 2 - * - * Returns pointer to block of memory, NULL on error - */ -extern void *cvmx_bootmem_alloc(uint64_t size, uint64_t alignment); - -/** * Allocate a block of memory from the free list that was * passed to the application by the bootloader at a specific * address. This is an allocate-only algorithm, so @@ -174,22 +162,6 @@ extern void *cvmx_bootmem_alloc_address(uint64_t size, uint64_t address, uint64_t alignment); /** - * Allocate a block of memory from the free list that was - * passed to the application by the bootloader within a specified - * address range. This is an allocate-only algorithm, so - * freeing memory is not possible. Allocation will fail if - * memory cannot be allocated in the requested range. - * - * @size: Size in bytes of block to allocate - * @min_addr: defines the minimum address of the range - * @max_addr: defines the maximum address of the range - * @alignment: Alignment required - must be power of 2 - * Returns pointer to block of memory, NULL on error - */ -extern void *cvmx_bootmem_alloc_range(uint64_t size, uint64_t alignment, - uint64_t min_addr, uint64_t max_addr); - -/** * Frees a previously allocated named bootmem block. * * @name: name of block to free @@ -214,27 +186,6 @@ extern void *cvmx_bootmem_alloc_range(uint64_t size, uint64_t alignment, extern void *cvmx_bootmem_alloc_named(uint64_t size, uint64_t alignment, char *name); - - -/** - * Allocate a block of memory from the free list that was passed - * to the application by the bootloader, and assign it a name in the - * global named block table. (part of the cvmx_bootmem_descriptor_t structure) - * Named blocks can later be freed. - * - * @size: Size in bytes of block to allocate - * @address: Physical address to allocate memory at. If this - * memory is not available, the allocation fails. - * @name: name of block - must be less than CVMX_BOOTMEM_NAME_LEN - * bytes - * - * Returns a pointer to block of memory, NULL on error - */ -extern void *cvmx_bootmem_alloc_named_address(uint64_t size, uint64_t address, - char *name); - - - /** * Allocate a block of memory from a specific range of the free list * that was passed to the application by the bootloader, and assign it @@ -351,33 +302,6 @@ int64_t cvmx_bootmem_phy_named_block_alloc(uint64_t size, uint64_t min_addr, char *name, uint32_t flags); /** - * Finds a named memory block by name. - * Also used for finding an unused entry in the named block table. - * - * @name: Name of memory block to find. If NULL pointer given, then - * finds unused descriptor, if available. - * - * @flags: Flags to control options for the allocation. - * - * Returns Pointer to memory block descriptor, NULL if not found. - * If NULL returned when name parameter is NULL, then no memory - * block descriptors are available. - */ -struct cvmx_bootmem_named_block_desc * -cvmx_bootmem_phy_named_block_find(char *name, uint32_t flags); - -/** - * Frees a named block. - * - * @name: name of block to free - * @flags: flags for passing options - * - * Returns 0 on failure - * 1 on success - */ -int cvmx_bootmem_phy_named_block_free(char *name, uint32_t flags); - -/** * Frees a block to the bootmem allocator list. This must * be used with care, as the size provided must match the size * of the block that was allocated, or the list will become diff --git a/arch/mips/include/asm/octeon/cvmx-ciu2-defs.h b/arch/mips/include/asm/octeon/cvmx-ciu2-defs.h index 148bc9a0085d..5babd88d4110 100644 --- a/arch/mips/include/asm/octeon/cvmx-ciu2-defs.h +++ b/arch/mips/include/asm/octeon/cvmx-ciu2-defs.h @@ -28,7081 +28,21 @@ #ifndef __CVMX_CIU2_DEFS_H__ #define __CVMX_CIU2_DEFS_H__ -#define CVMX_CIU2_ACK_IOX_INT(block_id) (CVMX_ADD_IO_SEG(0x00010701080C0800ull) + ((block_id) & 1) * 0x200000ull) #define CVMX_CIU2_ACK_PPX_IP2(block_id) (CVMX_ADD_IO_SEG(0x00010701000C0000ull) + ((block_id) & 31) * 0x200000ull) #define CVMX_CIU2_ACK_PPX_IP3(block_id) (CVMX_ADD_IO_SEG(0x00010701000C0200ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_ACK_PPX_IP4(block_id) (CVMX_ADD_IO_SEG(0x00010701000C0400ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_IOX_INT_GPIO(block_id) (CVMX_ADD_IO_SEG(0x0001070108097800ull) + ((block_id) & 1) * 0x200000ull) -#define CVMX_CIU2_EN_IOX_INT_GPIO_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701080B7800ull) + ((block_id) & 1) * 0x200000ull) -#define CVMX_CIU2_EN_IOX_INT_GPIO_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701080A7800ull) + ((block_id) & 1) * 0x200000ull) -#define CVMX_CIU2_EN_IOX_INT_IO(block_id) (CVMX_ADD_IO_SEG(0x0001070108094800ull) + ((block_id) & 1) * 0x200000ull) -#define CVMX_CIU2_EN_IOX_INT_IO_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701080B4800ull) + ((block_id) & 1) * 0x200000ull) -#define CVMX_CIU2_EN_IOX_INT_IO_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701080A4800ull) + ((block_id) & 1) * 0x200000ull) -#define CVMX_CIU2_EN_IOX_INT_MBOX(block_id) (CVMX_ADD_IO_SEG(0x0001070108098800ull) + ((block_id) & 1) * 0x200000ull) -#define CVMX_CIU2_EN_IOX_INT_MBOX_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701080B8800ull) + ((block_id) & 1) * 0x200000ull) -#define CVMX_CIU2_EN_IOX_INT_MBOX_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701080A8800ull) + ((block_id) & 1) * 0x200000ull) -#define CVMX_CIU2_EN_IOX_INT_MEM(block_id) (CVMX_ADD_IO_SEG(0x0001070108095800ull) + ((block_id) & 1) * 0x200000ull) -#define CVMX_CIU2_EN_IOX_INT_MEM_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701080B5800ull) + ((block_id) & 1) * 0x200000ull) -#define CVMX_CIU2_EN_IOX_INT_MEM_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701080A5800ull) + ((block_id) & 1) * 0x200000ull) -#define CVMX_CIU2_EN_IOX_INT_MIO(block_id) (CVMX_ADD_IO_SEG(0x0001070108093800ull) + ((block_id) & 1) * 0x200000ull) -#define CVMX_CIU2_EN_IOX_INT_MIO_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701080B3800ull) + ((block_id) & 1) * 0x200000ull) -#define CVMX_CIU2_EN_IOX_INT_MIO_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701080A3800ull) + ((block_id) & 1) * 0x200000ull) -#define CVMX_CIU2_EN_IOX_INT_PKT(block_id) (CVMX_ADD_IO_SEG(0x0001070108096800ull) + ((block_id) & 1) * 0x200000ull) -#define CVMX_CIU2_EN_IOX_INT_PKT_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701080B6800ull) + ((block_id) & 1) * 0x200000ull) -#define CVMX_CIU2_EN_IOX_INT_PKT_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701080A6800ull) + ((block_id) & 1) * 0x200000ull) -#define CVMX_CIU2_EN_IOX_INT_RML(block_id) (CVMX_ADD_IO_SEG(0x0001070108092800ull) + ((block_id) & 1) * 0x200000ull) -#define CVMX_CIU2_EN_IOX_INT_RML_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701080B2800ull) + ((block_id) & 1) * 0x200000ull) -#define CVMX_CIU2_EN_IOX_INT_RML_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701080A2800ull) + ((block_id) & 1) * 0x200000ull) -#define CVMX_CIU2_EN_IOX_INT_WDOG(block_id) (CVMX_ADD_IO_SEG(0x0001070108091800ull) + ((block_id) & 1) * 0x200000ull) -#define CVMX_CIU2_EN_IOX_INT_WDOG_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701080B1800ull) + ((block_id) & 1) * 0x200000ull) -#define CVMX_CIU2_EN_IOX_INT_WDOG_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701080A1800ull) + ((block_id) & 1) * 0x200000ull) -#define CVMX_CIU2_EN_IOX_INT_WRKQ(block_id) (CVMX_ADD_IO_SEG(0x0001070108090800ull) + ((block_id) & 1) * 0x200000ull) -#define CVMX_CIU2_EN_IOX_INT_WRKQ_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701080B0800ull) + ((block_id) & 1) * 0x200000ull) -#define CVMX_CIU2_EN_IOX_INT_WRKQ_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701080A0800ull) + ((block_id) & 1) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP2_GPIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100097000ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP2_GPIO_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B7000ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP2_GPIO_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A7000ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP2_IO(block_id) (CVMX_ADD_IO_SEG(0x0001070100094000ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP2_IO_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B4000ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP2_IO_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A4000ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP2_MBOX(block_id) (CVMX_ADD_IO_SEG(0x0001070100098000ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP2_MBOX_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B8000ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP2_MBOX_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A8000ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP2_MEM(block_id) (CVMX_ADD_IO_SEG(0x0001070100095000ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP2_MEM_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B5000ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP2_MEM_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A5000ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP2_MIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100093000ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP2_MIO_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B3000ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP2_MIO_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A3000ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP2_PKT(block_id) (CVMX_ADD_IO_SEG(0x0001070100096000ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP2_PKT_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B6000ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP2_PKT_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A6000ull) + ((block_id) & 31) * 0x200000ull) #define CVMX_CIU2_EN_PPX_IP2_RML(block_id) (CVMX_ADD_IO_SEG(0x0001070100092000ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP2_RML_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B2000ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP2_RML_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A2000ull) + ((block_id) & 31) * 0x200000ull) #define CVMX_CIU2_EN_PPX_IP2_WDOG(block_id) (CVMX_ADD_IO_SEG(0x0001070100091000ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP2_WDOG_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B1000ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP2_WDOG_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A1000ull) + ((block_id) & 31) * 0x200000ull) #define CVMX_CIU2_EN_PPX_IP2_WRKQ(block_id) (CVMX_ADD_IO_SEG(0x0001070100090000ull) + ((block_id) & 31) * 0x200000ull) #define CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B0000ull) + ((block_id) & 31) * 0x200000ull) #define CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A0000ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP3_GPIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100097200ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP3_GPIO_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B7200ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP3_GPIO_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A7200ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP3_IO(block_id) (CVMX_ADD_IO_SEG(0x0001070100094200ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP3_IO_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B4200ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP3_IO_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A4200ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP3_MBOX(block_id) (CVMX_ADD_IO_SEG(0x0001070100098200ull) + ((block_id) & 31) * 0x200000ull) #define CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B8200ull) + ((block_id) & 31) * 0x200000ull) #define CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A8200ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP3_MEM(block_id) (CVMX_ADD_IO_SEG(0x0001070100095200ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP3_MEM_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B5200ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP3_MEM_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A5200ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP3_MIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100093200ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP3_MIO_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B3200ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP3_MIO_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A3200ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP3_PKT(block_id) (CVMX_ADD_IO_SEG(0x0001070100096200ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP3_PKT_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B6200ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP3_PKT_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A6200ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP3_RML(block_id) (CVMX_ADD_IO_SEG(0x0001070100092200ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP3_RML_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B2200ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP3_RML_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A2200ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP3_WDOG(block_id) (CVMX_ADD_IO_SEG(0x0001070100091200ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP3_WDOG_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B1200ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP3_WDOG_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A1200ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP3_WRKQ(block_id) (CVMX_ADD_IO_SEG(0x0001070100090200ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP3_WRKQ_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B0200ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP3_WRKQ_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A0200ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP4_GPIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100097400ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP4_GPIO_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B7400ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP4_GPIO_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A7400ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP4_IO(block_id) (CVMX_ADD_IO_SEG(0x0001070100094400ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP4_IO_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B4400ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP4_IO_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A4400ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP4_MBOX(block_id) (CVMX_ADD_IO_SEG(0x0001070100098400ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP4_MBOX_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B8400ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP4_MBOX_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A8400ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP4_MEM(block_id) (CVMX_ADD_IO_SEG(0x0001070100095400ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP4_MEM_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B5400ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP4_MEM_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A5400ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP4_MIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100093400ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP4_MIO_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B3400ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP4_MIO_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A3400ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP4_PKT(block_id) (CVMX_ADD_IO_SEG(0x0001070100096400ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP4_PKT_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B6400ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP4_PKT_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A6400ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP4_RML(block_id) (CVMX_ADD_IO_SEG(0x0001070100092400ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP4_RML_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B2400ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP4_RML_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A2400ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP4_WDOG(block_id) (CVMX_ADD_IO_SEG(0x0001070100091400ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP4_WDOG_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B1400ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP4_WDOG_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A1400ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP4_WRKQ(block_id) (CVMX_ADD_IO_SEG(0x0001070100090400ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP4_WRKQ_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B0400ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_EN_PPX_IP4_WRKQ_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A0400ull) + ((block_id) & 31) * 0x200000ull) #define CVMX_CIU2_INTR_CIU_READY (CVMX_ADD_IO_SEG(0x0001070100102008ull)) -#define CVMX_CIU2_INTR_RAM_ECC_CTL (CVMX_ADD_IO_SEG(0x0001070100102010ull)) -#define CVMX_CIU2_INTR_RAM_ECC_ST (CVMX_ADD_IO_SEG(0x0001070100102018ull)) -#define CVMX_CIU2_INTR_SLOWDOWN (CVMX_ADD_IO_SEG(0x0001070100102000ull)) -#define CVMX_CIU2_MSIRED_PPX_IP2(block_id) (CVMX_ADD_IO_SEG(0x00010701000C1000ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_MSIRED_PPX_IP3(block_id) (CVMX_ADD_IO_SEG(0x00010701000C1200ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_MSIRED_PPX_IP4(block_id) (CVMX_ADD_IO_SEG(0x00010701000C1400ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_MSI_RCVX(offset) (CVMX_ADD_IO_SEG(0x00010701000C2000ull) + ((offset) & 255) * 8) -#define CVMX_CIU2_MSI_SELX(offset) (CVMX_ADD_IO_SEG(0x00010701000C3000ull) + ((offset) & 255) * 8) -#define CVMX_CIU2_RAW_IOX_INT_GPIO(block_id) (CVMX_ADD_IO_SEG(0x0001070108047800ull) + ((block_id) & 1) * 0x200000ull) -#define CVMX_CIU2_RAW_IOX_INT_IO(block_id) (CVMX_ADD_IO_SEG(0x0001070108044800ull) + ((block_id) & 1) * 0x200000ull) -#define CVMX_CIU2_RAW_IOX_INT_MEM(block_id) (CVMX_ADD_IO_SEG(0x0001070108045800ull) + ((block_id) & 1) * 0x200000ull) -#define CVMX_CIU2_RAW_IOX_INT_MIO(block_id) (CVMX_ADD_IO_SEG(0x0001070108043800ull) + ((block_id) & 1) * 0x200000ull) -#define CVMX_CIU2_RAW_IOX_INT_PKT(block_id) (CVMX_ADD_IO_SEG(0x0001070108046800ull) + ((block_id) & 1) * 0x200000ull) -#define CVMX_CIU2_RAW_IOX_INT_RML(block_id) (CVMX_ADD_IO_SEG(0x0001070108042800ull) + ((block_id) & 1) * 0x200000ull) -#define CVMX_CIU2_RAW_IOX_INT_WDOG(block_id) (CVMX_ADD_IO_SEG(0x0001070108041800ull) + ((block_id) & 1) * 0x200000ull) -#define CVMX_CIU2_RAW_IOX_INT_WRKQ(block_id) (CVMX_ADD_IO_SEG(0x0001070108040800ull) + ((block_id) & 1) * 0x200000ull) -#define CVMX_CIU2_RAW_PPX_IP2_GPIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100047000ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_RAW_PPX_IP2_IO(block_id) (CVMX_ADD_IO_SEG(0x0001070100044000ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_RAW_PPX_IP2_MEM(block_id) (CVMX_ADD_IO_SEG(0x0001070100045000ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_RAW_PPX_IP2_MIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100043000ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_RAW_PPX_IP2_PKT(block_id) (CVMX_ADD_IO_SEG(0x0001070100046000ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_RAW_PPX_IP2_RML(block_id) (CVMX_ADD_IO_SEG(0x0001070100042000ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_RAW_PPX_IP2_WDOG(block_id) (CVMX_ADD_IO_SEG(0x0001070100041000ull) + ((block_id) & 31) * 0x200000ull) #define CVMX_CIU2_RAW_PPX_IP2_WRKQ(block_id) (CVMX_ADD_IO_SEG(0x0001070100040000ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_RAW_PPX_IP3_GPIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100047200ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_RAW_PPX_IP3_IO(block_id) (CVMX_ADD_IO_SEG(0x0001070100044200ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_RAW_PPX_IP3_MEM(block_id) (CVMX_ADD_IO_SEG(0x0001070100045200ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_RAW_PPX_IP3_MIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100043200ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_RAW_PPX_IP3_PKT(block_id) (CVMX_ADD_IO_SEG(0x0001070100046200ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_RAW_PPX_IP3_RML(block_id) (CVMX_ADD_IO_SEG(0x0001070100042200ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_RAW_PPX_IP3_WDOG(block_id) (CVMX_ADD_IO_SEG(0x0001070100041200ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_RAW_PPX_IP3_WRKQ(block_id) (CVMX_ADD_IO_SEG(0x0001070100040200ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_RAW_PPX_IP4_GPIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100047400ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_RAW_PPX_IP4_IO(block_id) (CVMX_ADD_IO_SEG(0x0001070100044400ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_RAW_PPX_IP4_MEM(block_id) (CVMX_ADD_IO_SEG(0x0001070100045400ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_RAW_PPX_IP4_MIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100043400ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_RAW_PPX_IP4_PKT(block_id) (CVMX_ADD_IO_SEG(0x0001070100046400ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_RAW_PPX_IP4_RML(block_id) (CVMX_ADD_IO_SEG(0x0001070100042400ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_RAW_PPX_IP4_WDOG(block_id) (CVMX_ADD_IO_SEG(0x0001070100041400ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_RAW_PPX_IP4_WRKQ(block_id) (CVMX_ADD_IO_SEG(0x0001070100040400ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_SRC_IOX_INT_GPIO(block_id) (CVMX_ADD_IO_SEG(0x0001070108087800ull) + ((block_id) & 1) * 0x200000ull) -#define CVMX_CIU2_SRC_IOX_INT_IO(block_id) (CVMX_ADD_IO_SEG(0x0001070108084800ull) + ((block_id) & 1) * 0x200000ull) -#define CVMX_CIU2_SRC_IOX_INT_MBOX(block_id) (CVMX_ADD_IO_SEG(0x0001070108088800ull) + ((block_id) & 1) * 0x200000ull) -#define CVMX_CIU2_SRC_IOX_INT_MEM(block_id) (CVMX_ADD_IO_SEG(0x0001070108085800ull) + ((block_id) & 1) * 0x200000ull) -#define CVMX_CIU2_SRC_IOX_INT_MIO(block_id) (CVMX_ADD_IO_SEG(0x0001070108083800ull) + ((block_id) & 1) * 0x200000ull) -#define CVMX_CIU2_SRC_IOX_INT_PKT(block_id) (CVMX_ADD_IO_SEG(0x0001070108086800ull) + ((block_id) & 1) * 0x200000ull) -#define CVMX_CIU2_SRC_IOX_INT_RML(block_id) (CVMX_ADD_IO_SEG(0x0001070108082800ull) + ((block_id) & 1) * 0x200000ull) -#define CVMX_CIU2_SRC_IOX_INT_WDOG(block_id) (CVMX_ADD_IO_SEG(0x0001070108081800ull) + ((block_id) & 1) * 0x200000ull) -#define CVMX_CIU2_SRC_IOX_INT_WRKQ(block_id) (CVMX_ADD_IO_SEG(0x0001070108080800ull) + ((block_id) & 1) * 0x200000ull) -#define CVMX_CIU2_SRC_PPX_IP2_GPIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100087000ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_SRC_PPX_IP2_IO(block_id) (CVMX_ADD_IO_SEG(0x0001070100084000ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_SRC_PPX_IP2_MBOX(block_id) (CVMX_ADD_IO_SEG(0x0001070100088000ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_SRC_PPX_IP2_MEM(block_id) (CVMX_ADD_IO_SEG(0x0001070100085000ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_SRC_PPX_IP2_MIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100083000ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_SRC_PPX_IP2_PKT(block_id) (CVMX_ADD_IO_SEG(0x0001070100086000ull) + ((block_id) & 31) * 0x200000ull) #define CVMX_CIU2_SRC_PPX_IP2_RML(block_id) (CVMX_ADD_IO_SEG(0x0001070100082000ull) + ((block_id) & 31) * 0x200000ull) #define CVMX_CIU2_SRC_PPX_IP2_WDOG(block_id) (CVMX_ADD_IO_SEG(0x0001070100081000ull) + ((block_id) & 31) * 0x200000ull) #define CVMX_CIU2_SRC_PPX_IP2_WRKQ(block_id) (CVMX_ADD_IO_SEG(0x0001070100080000ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_SRC_PPX_IP3_GPIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100087200ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_SRC_PPX_IP3_IO(block_id) (CVMX_ADD_IO_SEG(0x0001070100084200ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_SRC_PPX_IP3_MBOX(block_id) (CVMX_ADD_IO_SEG(0x0001070100088200ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_SRC_PPX_IP3_MEM(block_id) (CVMX_ADD_IO_SEG(0x0001070100085200ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_SRC_PPX_IP3_MIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100083200ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_SRC_PPX_IP3_PKT(block_id) (CVMX_ADD_IO_SEG(0x0001070100086200ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_SRC_PPX_IP3_RML(block_id) (CVMX_ADD_IO_SEG(0x0001070100082200ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_SRC_PPX_IP3_WDOG(block_id) (CVMX_ADD_IO_SEG(0x0001070100081200ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_SRC_PPX_IP3_WRKQ(block_id) (CVMX_ADD_IO_SEG(0x0001070100080200ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_SRC_PPX_IP4_GPIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100087400ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_SRC_PPX_IP4_IO(block_id) (CVMX_ADD_IO_SEG(0x0001070100084400ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_SRC_PPX_IP4_MBOX(block_id) (CVMX_ADD_IO_SEG(0x0001070100088400ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_SRC_PPX_IP4_MEM(block_id) (CVMX_ADD_IO_SEG(0x0001070100085400ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_SRC_PPX_IP4_MIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100083400ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_SRC_PPX_IP4_PKT(block_id) (CVMX_ADD_IO_SEG(0x0001070100086400ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_SRC_PPX_IP4_RML(block_id) (CVMX_ADD_IO_SEG(0x0001070100082400ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_SRC_PPX_IP4_WDOG(block_id) (CVMX_ADD_IO_SEG(0x0001070100081400ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_SRC_PPX_IP4_WRKQ(block_id) (CVMX_ADD_IO_SEG(0x0001070100080400ull) + ((block_id) & 31) * 0x200000ull) -#define CVMX_CIU2_SUM_IOX_INT(offset) (CVMX_ADD_IO_SEG(0x0001070100000800ull) + ((offset) & 1) * 8) #define CVMX_CIU2_SUM_PPX_IP2(offset) (CVMX_ADD_IO_SEG(0x0001070100000000ull) + ((offset) & 31) * 8) #define CVMX_CIU2_SUM_PPX_IP3(offset) (CVMX_ADD_IO_SEG(0x0001070100000200ull) + ((offset) & 31) * 8) -#define CVMX_CIU2_SUM_PPX_IP4(offset) (CVMX_ADD_IO_SEG(0x0001070100000400ull) + ((offset) & 31) * 8) - -union cvmx_ciu2_ack_iox_int { - uint64_t u64; - struct cvmx_ciu2_ack_iox_int_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_1_63:63; - uint64_t ack:1; -#else - uint64_t ack:1; - uint64_t reserved_1_63:63; -#endif - } s; - struct cvmx_ciu2_ack_iox_int_s cn68xx; - struct cvmx_ciu2_ack_iox_int_s cn68xxp1; -}; - -union cvmx_ciu2_ack_ppx_ip2 { - uint64_t u64; - struct cvmx_ciu2_ack_ppx_ip2_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_1_63:63; - uint64_t ack:1; -#else - uint64_t ack:1; - uint64_t reserved_1_63:63; -#endif - } s; - struct cvmx_ciu2_ack_ppx_ip2_s cn68xx; - struct cvmx_ciu2_ack_ppx_ip2_s cn68xxp1; -}; - -union cvmx_ciu2_ack_ppx_ip3 { - uint64_t u64; - struct cvmx_ciu2_ack_ppx_ip3_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_1_63:63; - uint64_t ack:1; -#else - uint64_t ack:1; - uint64_t reserved_1_63:63; -#endif - } s; - struct cvmx_ciu2_ack_ppx_ip3_s cn68xx; - struct cvmx_ciu2_ack_ppx_ip3_s cn68xxp1; -}; - -union cvmx_ciu2_ack_ppx_ip4 { - uint64_t u64; - struct cvmx_ciu2_ack_ppx_ip4_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_1_63:63; - uint64_t ack:1; -#else - uint64_t ack:1; - uint64_t reserved_1_63:63; -#endif - } s; - struct cvmx_ciu2_ack_ppx_ip4_s cn68xx; - struct cvmx_ciu2_ack_ppx_ip4_s cn68xxp1; -}; - -union cvmx_ciu2_en_iox_int_gpio { - uint64_t u64; - struct cvmx_ciu2_en_iox_int_gpio_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_16_63:48; - uint64_t gpio:16; -#else - uint64_t gpio:16; - uint64_t reserved_16_63:48; -#endif - } s; - struct cvmx_ciu2_en_iox_int_gpio_s cn68xx; - struct cvmx_ciu2_en_iox_int_gpio_s cn68xxp1; -}; - -union cvmx_ciu2_en_iox_int_gpio_w1c { - uint64_t u64; - struct cvmx_ciu2_en_iox_int_gpio_w1c_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_16_63:48; - uint64_t gpio:16; -#else - uint64_t gpio:16; - uint64_t reserved_16_63:48; -#endif - } s; - struct cvmx_ciu2_en_iox_int_gpio_w1c_s cn68xx; - struct cvmx_ciu2_en_iox_int_gpio_w1c_s cn68xxp1; -}; - -union cvmx_ciu2_en_iox_int_gpio_w1s { - uint64_t u64; - struct cvmx_ciu2_en_iox_int_gpio_w1s_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_16_63:48; - uint64_t gpio:16; -#else - uint64_t gpio:16; - uint64_t reserved_16_63:48; -#endif - } s; - struct cvmx_ciu2_en_iox_int_gpio_w1s_s cn68xx; - struct cvmx_ciu2_en_iox_int_gpio_w1s_s cn68xxp1; -}; - -union cvmx_ciu2_en_iox_int_io { - uint64_t u64; - struct cvmx_ciu2_en_iox_int_io_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_34_63:30; - uint64_t pem:2; - uint64_t reserved_18_31:14; - uint64_t pci_inta:2; - uint64_t reserved_13_15:3; - uint64_t msired:1; - uint64_t pci_msi:4; - uint64_t reserved_4_7:4; - uint64_t pci_intr:4; -#else - uint64_t pci_intr:4; - uint64_t reserved_4_7:4; - uint64_t pci_msi:4; - uint64_t msired:1; - uint64_t reserved_13_15:3; - uint64_t pci_inta:2; - uint64_t reserved_18_31:14; - uint64_t pem:2; - uint64_t reserved_34_63:30; -#endif - } s; - struct cvmx_ciu2_en_iox_int_io_s cn68xx; - struct cvmx_ciu2_en_iox_int_io_s cn68xxp1; -}; - -union cvmx_ciu2_en_iox_int_io_w1c { - uint64_t u64; - struct cvmx_ciu2_en_iox_int_io_w1c_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_34_63:30; - uint64_t pem:2; - uint64_t reserved_18_31:14; - uint64_t pci_inta:2; - uint64_t reserved_13_15:3; - uint64_t msired:1; - uint64_t pci_msi:4; - uint64_t reserved_4_7:4; - uint64_t pci_intr:4; -#else - uint64_t pci_intr:4; - uint64_t reserved_4_7:4; - uint64_t pci_msi:4; - uint64_t msired:1; - uint64_t reserved_13_15:3; - uint64_t pci_inta:2; - uint64_t reserved_18_31:14; - uint64_t pem:2; - uint64_t reserved_34_63:30; -#endif - } s; - struct cvmx_ciu2_en_iox_int_io_w1c_s cn68xx; - struct cvmx_ciu2_en_iox_int_io_w1c_s cn68xxp1; -}; - -union cvmx_ciu2_en_iox_int_io_w1s { - uint64_t u64; - struct cvmx_ciu2_en_iox_int_io_w1s_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_34_63:30; - uint64_t pem:2; - uint64_t reserved_18_31:14; - uint64_t pci_inta:2; - uint64_t reserved_13_15:3; - uint64_t msired:1; - uint64_t pci_msi:4; - uint64_t reserved_4_7:4; - uint64_t pci_intr:4; -#else - uint64_t pci_intr:4; - uint64_t reserved_4_7:4; - uint64_t pci_msi:4; - uint64_t msired:1; - uint64_t reserved_13_15:3; - uint64_t pci_inta:2; - uint64_t reserved_18_31:14; - uint64_t pem:2; - uint64_t reserved_34_63:30; -#endif - } s; - struct cvmx_ciu2_en_iox_int_io_w1s_s cn68xx; - struct cvmx_ciu2_en_iox_int_io_w1s_s cn68xxp1; -}; - -union cvmx_ciu2_en_iox_int_mbox { - uint64_t u64; - struct cvmx_ciu2_en_iox_int_mbox_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_4_63:60; - uint64_t mbox:4; -#else - uint64_t mbox:4; - uint64_t reserved_4_63:60; -#endif - } s; - struct cvmx_ciu2_en_iox_int_mbox_s cn68xx; - struct cvmx_ciu2_en_iox_int_mbox_s cn68xxp1; -}; - -union cvmx_ciu2_en_iox_int_mbox_w1c { - uint64_t u64; - struct cvmx_ciu2_en_iox_int_mbox_w1c_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_4_63:60; - uint64_t mbox:4; -#else - uint64_t mbox:4; - uint64_t reserved_4_63:60; -#endif - } s; - struct cvmx_ciu2_en_iox_int_mbox_w1c_s cn68xx; - struct cvmx_ciu2_en_iox_int_mbox_w1c_s cn68xxp1; -}; - -union cvmx_ciu2_en_iox_int_mbox_w1s { - uint64_t u64; - struct cvmx_ciu2_en_iox_int_mbox_w1s_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_4_63:60; - uint64_t mbox:4; -#else - uint64_t mbox:4; - uint64_t reserved_4_63:60; -#endif - } s; - struct cvmx_ciu2_en_iox_int_mbox_w1s_s cn68xx; - struct cvmx_ciu2_en_iox_int_mbox_w1s_s cn68xxp1; -}; - -union cvmx_ciu2_en_iox_int_mem { - uint64_t u64; - struct cvmx_ciu2_en_iox_int_mem_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_4_63:60; - uint64_t lmc:4; -#else - uint64_t lmc:4; - uint64_t reserved_4_63:60; -#endif - } s; - struct cvmx_ciu2_en_iox_int_mem_s cn68xx; - struct cvmx_ciu2_en_iox_int_mem_s cn68xxp1; -}; - -union cvmx_ciu2_en_iox_int_mem_w1c { - uint64_t u64; - struct cvmx_ciu2_en_iox_int_mem_w1c_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_4_63:60; - uint64_t lmc:4; -#else - uint64_t lmc:4; - uint64_t reserved_4_63:60; -#endif - } s; - struct cvmx_ciu2_en_iox_int_mem_w1c_s cn68xx; - struct cvmx_ciu2_en_iox_int_mem_w1c_s cn68xxp1; -}; - -union cvmx_ciu2_en_iox_int_mem_w1s { - uint64_t u64; - struct cvmx_ciu2_en_iox_int_mem_w1s_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_4_63:60; - uint64_t lmc:4; -#else - uint64_t lmc:4; - uint64_t reserved_4_63:60; -#endif - } s; - struct cvmx_ciu2_en_iox_int_mem_w1s_s cn68xx; - struct cvmx_ciu2_en_iox_int_mem_w1s_s cn68xxp1; -}; - -union cvmx_ciu2_en_iox_int_mio { - uint64_t u64; - struct cvmx_ciu2_en_iox_int_mio_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t rst:1; - uint64_t reserved_49_62:14; - uint64_t ptp:1; - uint64_t reserved_45_47:3; - uint64_t usb_hci:1; - uint64_t reserved_41_43:3; - uint64_t usb_uctl:1; - uint64_t reserved_38_39:2; - uint64_t uart:2; - uint64_t reserved_34_35:2; - uint64_t twsi:2; - uint64_t reserved_19_31:13; - uint64_t bootdma:1; - uint64_t mio:1; - uint64_t nand:1; - uint64_t reserved_12_15:4; - uint64_t timer:4; - uint64_t reserved_3_7:5; - uint64_t ipd_drp:1; - uint64_t ssoiq:1; - uint64_t ipdppthr:1; -#else - uint64_t ipdppthr:1; - uint64_t ssoiq:1; - uint64_t ipd_drp:1; - uint64_t reserved_3_7:5; - uint64_t timer:4; - uint64_t reserved_12_15:4; - uint64_t nand:1; - uint64_t mio:1; - uint64_t bootdma:1; - uint64_t reserved_19_31:13; - uint64_t twsi:2; - uint64_t reserved_34_35:2; - uint64_t uart:2; - uint64_t reserved_38_39:2; - uint64_t usb_uctl:1; - uint64_t reserved_41_43:3; - uint64_t usb_hci:1; - uint64_t reserved_45_47:3; - uint64_t ptp:1; - uint64_t reserved_49_62:14; - uint64_t rst:1; -#endif - } s; - struct cvmx_ciu2_en_iox_int_mio_s cn68xx; - struct cvmx_ciu2_en_iox_int_mio_s cn68xxp1; -}; - -union cvmx_ciu2_en_iox_int_mio_w1c { - uint64_t u64; - struct cvmx_ciu2_en_iox_int_mio_w1c_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t rst:1; - uint64_t reserved_49_62:14; - uint64_t ptp:1; - uint64_t reserved_45_47:3; - uint64_t usb_hci:1; - uint64_t reserved_41_43:3; - uint64_t usb_uctl:1; - uint64_t reserved_38_39:2; - uint64_t uart:2; - uint64_t reserved_34_35:2; - uint64_t twsi:2; - uint64_t reserved_19_31:13; - uint64_t bootdma:1; - uint64_t mio:1; - uint64_t nand:1; - uint64_t reserved_12_15:4; - uint64_t timer:4; - uint64_t reserved_3_7:5; - uint64_t ipd_drp:1; - uint64_t ssoiq:1; - uint64_t ipdppthr:1; -#else - uint64_t ipdppthr:1; - uint64_t ssoiq:1; - uint64_t ipd_drp:1; - uint64_t reserved_3_7:5; - uint64_t timer:4; - uint64_t reserved_12_15:4; - uint64_t nand:1; - uint64_t mio:1; - uint64_t bootdma:1; - uint64_t reserved_19_31:13; - uint64_t twsi:2; - uint64_t reserved_34_35:2; - uint64_t uart:2; - uint64_t reserved_38_39:2; - uint64_t usb_uctl:1; - uint64_t reserved_41_43:3; - uint64_t usb_hci:1; - uint64_t reserved_45_47:3; - uint64_t ptp:1; - uint64_t reserved_49_62:14; - uint64_t rst:1; -#endif - } s; - struct cvmx_ciu2_en_iox_int_mio_w1c_s cn68xx; - struct cvmx_ciu2_en_iox_int_mio_w1c_s cn68xxp1; -}; - -union cvmx_ciu2_en_iox_int_mio_w1s { - uint64_t u64; - struct cvmx_ciu2_en_iox_int_mio_w1s_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t rst:1; - uint64_t reserved_49_62:14; - uint64_t ptp:1; - uint64_t reserved_45_47:3; - uint64_t usb_hci:1; - uint64_t reserved_41_43:3; - uint64_t usb_uctl:1; - uint64_t reserved_38_39:2; - uint64_t uart:2; - uint64_t reserved_34_35:2; - uint64_t twsi:2; - uint64_t reserved_19_31:13; - uint64_t bootdma:1; - uint64_t mio:1; - uint64_t nand:1; - uint64_t reserved_12_15:4; - uint64_t timer:4; - uint64_t reserved_3_7:5; - uint64_t ipd_drp:1; - uint64_t ssoiq:1; - uint64_t ipdppthr:1; -#else - uint64_t ipdppthr:1; - uint64_t ssoiq:1; - uint64_t ipd_drp:1; - uint64_t reserved_3_7:5; - uint64_t timer:4; - uint64_t reserved_12_15:4; - uint64_t nand:1; - uint64_t mio:1; - uint64_t bootdma:1; - uint64_t reserved_19_31:13; - uint64_t twsi:2; - uint64_t reserved_34_35:2; - uint64_t uart:2; - uint64_t reserved_38_39:2; - uint64_t usb_uctl:1; - uint64_t reserved_41_43:3; - uint64_t usb_hci:1; - uint64_t reserved_45_47:3; - uint64_t ptp:1; - uint64_t reserved_49_62:14; - uint64_t rst:1; -#endif - } s; - struct cvmx_ciu2_en_iox_int_mio_w1s_s cn68xx; - struct cvmx_ciu2_en_iox_int_mio_w1s_s cn68xxp1; -}; - -union cvmx_ciu2_en_iox_int_pkt { - uint64_t u64; - struct cvmx_ciu2_en_iox_int_pkt_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_54_63:10; - uint64_t ilk_drp:2; - uint64_t reserved_49_51:3; - uint64_t ilk:1; - uint64_t reserved_41_47:7; - uint64_t mii:1; - uint64_t reserved_33_39:7; - uint64_t agl:1; - uint64_t reserved_13_31:19; - uint64_t gmx_drp:5; - uint64_t reserved_5_7:3; - uint64_t agx:5; -#else - uint64_t agx:5; - uint64_t reserved_5_7:3; - uint64_t gmx_drp:5; - uint64_t reserved_13_31:19; - uint64_t agl:1; - uint64_t reserved_33_39:7; - uint64_t mii:1; - uint64_t reserved_41_47:7; - uint64_t ilk:1; - uint64_t reserved_49_51:3; - uint64_t ilk_drp:2; - uint64_t reserved_54_63:10; -#endif - } s; - struct cvmx_ciu2_en_iox_int_pkt_s cn68xx; - struct cvmx_ciu2_en_iox_int_pkt_cn68xxp1 { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_49_63:15; - uint64_t ilk:1; - uint64_t reserved_41_47:7; - uint64_t mii:1; - uint64_t reserved_33_39:7; - uint64_t agl:1; - uint64_t reserved_13_31:19; - uint64_t gmx_drp:5; - uint64_t reserved_5_7:3; - uint64_t agx:5; -#else - uint64_t agx:5; - uint64_t reserved_5_7:3; - uint64_t gmx_drp:5; - uint64_t reserved_13_31:19; - uint64_t agl:1; - uint64_t reserved_33_39:7; - uint64_t mii:1; - uint64_t reserved_41_47:7; - uint64_t ilk:1; - uint64_t reserved_49_63:15; -#endif - } cn68xxp1; -}; - -union cvmx_ciu2_en_iox_int_pkt_w1c { - uint64_t u64; - struct cvmx_ciu2_en_iox_int_pkt_w1c_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_54_63:10; - uint64_t ilk_drp:2; - uint64_t reserved_49_51:3; - uint64_t ilk:1; - uint64_t reserved_41_47:7; - uint64_t mii:1; - uint64_t reserved_33_39:7; - uint64_t agl:1; - uint64_t reserved_13_31:19; - uint64_t gmx_drp:5; - uint64_t reserved_5_7:3; - uint64_t agx:5; -#else - uint64_t agx:5; - uint64_t reserved_5_7:3; - uint64_t gmx_drp:5; - uint64_t reserved_13_31:19; - uint64_t agl:1; - uint64_t reserved_33_39:7; - uint64_t mii:1; - uint64_t reserved_41_47:7; - uint64_t ilk:1; - uint64_t reserved_49_51:3; - uint64_t ilk_drp:2; - uint64_t reserved_54_63:10; -#endif - } s; - struct cvmx_ciu2_en_iox_int_pkt_w1c_s cn68xx; - struct cvmx_ciu2_en_iox_int_pkt_w1c_cn68xxp1 { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_49_63:15; - uint64_t ilk:1; - uint64_t reserved_41_47:7; - uint64_t mii:1; - uint64_t reserved_33_39:7; - uint64_t agl:1; - uint64_t reserved_13_31:19; - uint64_t gmx_drp:5; - uint64_t reserved_5_7:3; - uint64_t agx:5; -#else - uint64_t agx:5; - uint64_t reserved_5_7:3; - uint64_t gmx_drp:5; - uint64_t reserved_13_31:19; - uint64_t agl:1; - uint64_t reserved_33_39:7; - uint64_t mii:1; - uint64_t reserved_41_47:7; - uint64_t ilk:1; - uint64_t reserved_49_63:15; -#endif - } cn68xxp1; -}; - -union cvmx_ciu2_en_iox_int_pkt_w1s { - uint64_t u64; - struct cvmx_ciu2_en_iox_int_pkt_w1s_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_54_63:10; - uint64_t ilk_drp:2; - uint64_t reserved_49_51:3; - uint64_t ilk:1; - uint64_t reserved_41_47:7; - uint64_t mii:1; - uint64_t reserved_33_39:7; - uint64_t agl:1; - uint64_t reserved_13_31:19; - uint64_t gmx_drp:5; - uint64_t reserved_5_7:3; - uint64_t agx:5; -#else - uint64_t agx:5; - uint64_t reserved_5_7:3; - uint64_t gmx_drp:5; - uint64_t reserved_13_31:19; - uint64_t agl:1; - uint64_t reserved_33_39:7; - uint64_t mii:1; - uint64_t reserved_41_47:7; - uint64_t ilk:1; - uint64_t reserved_49_51:3; - uint64_t ilk_drp:2; - uint64_t reserved_54_63:10; -#endif - } s; - struct cvmx_ciu2_en_iox_int_pkt_w1s_s cn68xx; - struct cvmx_ciu2_en_iox_int_pkt_w1s_cn68xxp1 { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_49_63:15; - uint64_t ilk:1; - uint64_t reserved_41_47:7; - uint64_t mii:1; - uint64_t reserved_33_39:7; - uint64_t agl:1; - uint64_t reserved_13_31:19; - uint64_t gmx_drp:5; - uint64_t reserved_5_7:3; - uint64_t agx:5; -#else - uint64_t agx:5; - uint64_t reserved_5_7:3; - uint64_t gmx_drp:5; - uint64_t reserved_13_31:19; - uint64_t agl:1; - uint64_t reserved_33_39:7; - uint64_t mii:1; - uint64_t reserved_41_47:7; - uint64_t ilk:1; - uint64_t reserved_49_63:15; -#endif - } cn68xxp1; -}; - -union cvmx_ciu2_en_iox_int_rml { - uint64_t u64; - struct cvmx_ciu2_en_iox_int_rml_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_56_63:8; - uint64_t trace:4; - uint64_t reserved_49_51:3; - uint64_t l2c:1; - uint64_t reserved_41_47:7; - uint64_t dfa:1; - uint64_t reserved_37_39:3; - uint64_t dpi_dma:1; - uint64_t reserved_34_35:2; - uint64_t dpi:1; - uint64_t sli:1; - uint64_t reserved_31_31:1; - uint64_t key:1; - uint64_t rad:1; - uint64_t tim:1; - uint64_t reserved_25_27:3; - uint64_t zip:1; - uint64_t reserved_17_23:7; - uint64_t sso:1; - uint64_t reserved_8_15:8; - uint64_t pko:1; - uint64_t pip:1; - uint64_t ipd:1; - uint64_t fpa:1; - uint64_t reserved_1_3:3; - uint64_t iob:1; -#else - uint64_t iob:1; - uint64_t reserved_1_3:3; - uint64_t fpa:1; - uint64_t ipd:1; - uint64_t pip:1; - uint64_t pko:1; - uint64_t reserved_8_15:8; - uint64_t sso:1; - uint64_t reserved_17_23:7; - uint64_t zip:1; - uint64_t reserved_25_27:3; - uint64_t tim:1; - uint64_t rad:1; - uint64_t key:1; - uint64_t reserved_31_31:1; - uint64_t sli:1; - uint64_t dpi:1; - uint64_t reserved_34_35:2; - uint64_t dpi_dma:1; - uint64_t reserved_37_39:3; - uint64_t dfa:1; - uint64_t reserved_41_47:7; - uint64_t l2c:1; - uint64_t reserved_49_51:3; - uint64_t trace:4; - uint64_t reserved_56_63:8; -#endif - } s; - struct cvmx_ciu2_en_iox_int_rml_s cn68xx; - struct cvmx_ciu2_en_iox_int_rml_cn68xxp1 { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_56_63:8; - uint64_t trace:4; - uint64_t reserved_49_51:3; - uint64_t l2c:1; - uint64_t reserved_41_47:7; - uint64_t dfa:1; - uint64_t reserved_34_39:6; - uint64_t dpi:1; - uint64_t sli:1; - uint64_t reserved_31_31:1; - uint64_t key:1; - uint64_t rad:1; - uint64_t tim:1; - uint64_t reserved_25_27:3; - uint64_t zip:1; - uint64_t reserved_17_23:7; - uint64_t sso:1; - uint64_t reserved_8_15:8; - uint64_t pko:1; - uint64_t pip:1; - uint64_t ipd:1; - uint64_t fpa:1; - uint64_t reserved_1_3:3; - uint64_t iob:1; -#else - uint64_t iob:1; - uint64_t reserved_1_3:3; - uint64_t fpa:1; - uint64_t ipd:1; - uint64_t pip:1; - uint64_t pko:1; - uint64_t reserved_8_15:8; - uint64_t sso:1; - uint64_t reserved_17_23:7; - uint64_t zip:1; - uint64_t reserved_25_27:3; - uint64_t tim:1; - uint64_t rad:1; - uint64_t key:1; - uint64_t reserved_31_31:1; - uint64_t sli:1; - uint64_t dpi:1; - uint64_t reserved_34_39:6; - uint64_t dfa:1; - uint64_t reserved_41_47:7; - uint64_t l2c:1; - uint64_t reserved_49_51:3; - uint64_t trace:4; - uint64_t reserved_56_63:8; -#endif - } cn68xxp1; -}; - -union cvmx_ciu2_en_iox_int_rml_w1c { - uint64_t u64; - struct cvmx_ciu2_en_iox_int_rml_w1c_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_56_63:8; - uint64_t trace:4; - uint64_t reserved_49_51:3; - uint64_t l2c:1; - uint64_t reserved_41_47:7; - uint64_t dfa:1; - uint64_t reserved_37_39:3; - uint64_t dpi_dma:1; - uint64_t reserved_34_35:2; - uint64_t dpi:1; - uint64_t sli:1; - uint64_t reserved_31_31:1; - uint64_t key:1; - uint64_t rad:1; - uint64_t tim:1; - uint64_t reserved_25_27:3; - uint64_t zip:1; - uint64_t reserved_17_23:7; - uint64_t sso:1; - uint64_t reserved_8_15:8; - uint64_t pko:1; - uint64_t pip:1; - uint64_t ipd:1; - uint64_t fpa:1; - uint64_t reserved_1_3:3; - uint64_t iob:1; -#else - uint64_t iob:1; - uint64_t reserved_1_3:3; - uint64_t fpa:1; - uint64_t ipd:1; - uint64_t pip:1; - uint64_t pko:1; - uint64_t reserved_8_15:8; - uint64_t sso:1; - uint64_t reserved_17_23:7; - uint64_t zip:1; - uint64_t reserved_25_27:3; - uint64_t tim:1; - uint64_t rad:1; - uint64_t key:1; - uint64_t reserved_31_31:1; - uint64_t sli:1; - uint64_t dpi:1; - uint64_t reserved_34_35:2; - uint64_t dpi_dma:1; - uint64_t reserved_37_39:3; - uint64_t dfa:1; - uint64_t reserved_41_47:7; - uint64_t l2c:1; - uint64_t reserved_49_51:3; - uint64_t trace:4; - uint64_t reserved_56_63:8; -#endif - } s; - struct cvmx_ciu2_en_iox_int_rml_w1c_s cn68xx; - struct cvmx_ciu2_en_iox_int_rml_w1c_cn68xxp1 { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_56_63:8; - uint64_t trace:4; - uint64_t reserved_49_51:3; - uint64_t l2c:1; - uint64_t reserved_41_47:7; - uint64_t dfa:1; - uint64_t reserved_34_39:6; - uint64_t dpi:1; - uint64_t sli:1; - uint64_t reserved_31_31:1; - uint64_t key:1; - uint64_t rad:1; - uint64_t tim:1; - uint64_t reserved_25_27:3; - uint64_t zip:1; - uint64_t reserved_17_23:7; - uint64_t sso:1; - uint64_t reserved_8_15:8; - uint64_t pko:1; - uint64_t pip:1; - uint64_t ipd:1; - uint64_t fpa:1; - uint64_t reserved_1_3:3; - uint64_t iob:1; -#else - uint64_t iob:1; - uint64_t reserved_1_3:3; - uint64_t fpa:1; - uint64_t ipd:1; - uint64_t pip:1; - uint64_t pko:1; - uint64_t reserved_8_15:8; - uint64_t sso:1; - uint64_t reserved_17_23:7; - uint64_t zip:1; - uint64_t reserved_25_27:3; - uint64_t tim:1; - uint64_t rad:1; - uint64_t key:1; - uint64_t reserved_31_31:1; - uint64_t sli:1; - uint64_t dpi:1; - uint64_t reserved_34_39:6; - uint64_t dfa:1; - uint64_t reserved_41_47:7; - uint64_t l2c:1; - uint64_t reserved_49_51:3; - uint64_t trace:4; - uint64_t reserved_56_63:8; -#endif - } cn68xxp1; -}; - -union cvmx_ciu2_en_iox_int_rml_w1s { - uint64_t u64; - struct cvmx_ciu2_en_iox_int_rml_w1s_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_56_63:8; - uint64_t trace:4; - uint64_t reserved_49_51:3; - uint64_t l2c:1; - uint64_t reserved_41_47:7; - uint64_t dfa:1; - uint64_t reserved_37_39:3; - uint64_t dpi_dma:1; - uint64_t reserved_34_35:2; - uint64_t dpi:1; - uint64_t sli:1; - uint64_t reserved_31_31:1; - uint64_t key:1; - uint64_t rad:1; - uint64_t tim:1; - uint64_t reserved_25_27:3; - uint64_t zip:1; - uint64_t reserved_17_23:7; - uint64_t sso:1; - uint64_t reserved_8_15:8; - uint64_t pko:1; - uint64_t pip:1; - uint64_t ipd:1; - uint64_t fpa:1; - uint64_t reserved_1_3:3; - uint64_t iob:1; -#else - uint64_t iob:1; - uint64_t reserved_1_3:3; - uint64_t fpa:1; - uint64_t ipd:1; - uint64_t pip:1; - uint64_t pko:1; - uint64_t reserved_8_15:8; - uint64_t sso:1; - uint64_t reserved_17_23:7; - uint64_t zip:1; - uint64_t reserved_25_27:3; - uint64_t tim:1; - uint64_t rad:1; - uint64_t key:1; - uint64_t reserved_31_31:1; - uint64_t sli:1; - uint64_t dpi:1; - uint64_t reserved_34_35:2; - uint64_t dpi_dma:1; - uint64_t reserved_37_39:3; - uint64_t dfa:1; - uint64_t reserved_41_47:7; - uint64_t l2c:1; - uint64_t reserved_49_51:3; - uint64_t trace:4; - uint64_t reserved_56_63:8; -#endif - } s; - struct cvmx_ciu2_en_iox_int_rml_w1s_s cn68xx; - struct cvmx_ciu2_en_iox_int_rml_w1s_cn68xxp1 { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_56_63:8; - uint64_t trace:4; - uint64_t reserved_49_51:3; - uint64_t l2c:1; - uint64_t reserved_41_47:7; - uint64_t dfa:1; - uint64_t reserved_34_39:6; - uint64_t dpi:1; - uint64_t sli:1; - uint64_t reserved_31_31:1; - uint64_t key:1; - uint64_t rad:1; - uint64_t tim:1; - uint64_t reserved_25_27:3; - uint64_t zip:1; - uint64_t reserved_17_23:7; - uint64_t sso:1; - uint64_t reserved_8_15:8; - uint64_t pko:1; - uint64_t pip:1; - uint64_t ipd:1; - uint64_t fpa:1; - uint64_t reserved_1_3:3; - uint64_t iob:1; -#else - uint64_t iob:1; - uint64_t reserved_1_3:3; - uint64_t fpa:1; - uint64_t ipd:1; - uint64_t pip:1; - uint64_t pko:1; - uint64_t reserved_8_15:8; - uint64_t sso:1; - uint64_t reserved_17_23:7; - uint64_t zip:1; - uint64_t reserved_25_27:3; - uint64_t tim:1; - uint64_t rad:1; - uint64_t key:1; - uint64_t reserved_31_31:1; - uint64_t sli:1; - uint64_t dpi:1; - uint64_t reserved_34_39:6; - uint64_t dfa:1; - uint64_t reserved_41_47:7; - uint64_t l2c:1; - uint64_t reserved_49_51:3; - uint64_t trace:4; - uint64_t reserved_56_63:8; -#endif - } cn68xxp1; -}; - -union cvmx_ciu2_en_iox_int_wdog { - uint64_t u64; - struct cvmx_ciu2_en_iox_int_wdog_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_32_63:32; - uint64_t wdog:32; -#else - uint64_t wdog:32; - uint64_t reserved_32_63:32; -#endif - } s; - struct cvmx_ciu2_en_iox_int_wdog_s cn68xx; - struct cvmx_ciu2_en_iox_int_wdog_s cn68xxp1; -}; - -union cvmx_ciu2_en_iox_int_wdog_w1c { - uint64_t u64; - struct cvmx_ciu2_en_iox_int_wdog_w1c_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_32_63:32; - uint64_t wdog:32; -#else - uint64_t wdog:32; - uint64_t reserved_32_63:32; -#endif - } s; - struct cvmx_ciu2_en_iox_int_wdog_w1c_s cn68xx; - struct cvmx_ciu2_en_iox_int_wdog_w1c_s cn68xxp1; -}; - -union cvmx_ciu2_en_iox_int_wdog_w1s { - uint64_t u64; - struct cvmx_ciu2_en_iox_int_wdog_w1s_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_32_63:32; - uint64_t wdog:32; -#else - uint64_t wdog:32; - uint64_t reserved_32_63:32; -#endif - } s; - struct cvmx_ciu2_en_iox_int_wdog_w1s_s cn68xx; - struct cvmx_ciu2_en_iox_int_wdog_w1s_s cn68xxp1; -}; - -union cvmx_ciu2_en_iox_int_wrkq { - uint64_t u64; - struct cvmx_ciu2_en_iox_int_wrkq_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t workq:64; -#else - uint64_t workq:64; -#endif - } s; - struct cvmx_ciu2_en_iox_int_wrkq_s cn68xx; - struct cvmx_ciu2_en_iox_int_wrkq_s cn68xxp1; -}; - -union cvmx_ciu2_en_iox_int_wrkq_w1c { - uint64_t u64; - struct cvmx_ciu2_en_iox_int_wrkq_w1c_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t workq:64; -#else - uint64_t workq:64; -#endif - } s; - struct cvmx_ciu2_en_iox_int_wrkq_w1c_s cn68xx; - struct cvmx_ciu2_en_iox_int_wrkq_w1c_s cn68xxp1; -}; - -union cvmx_ciu2_en_iox_int_wrkq_w1s { - uint64_t u64; - struct cvmx_ciu2_en_iox_int_wrkq_w1s_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t workq:64; -#else - uint64_t workq:64; -#endif - } s; - struct cvmx_ciu2_en_iox_int_wrkq_w1s_s cn68xx; - struct cvmx_ciu2_en_iox_int_wrkq_w1s_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip2_gpio { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip2_gpio_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_16_63:48; - uint64_t gpio:16; -#else - uint64_t gpio:16; - uint64_t reserved_16_63:48; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip2_gpio_s cn68xx; - struct cvmx_ciu2_en_ppx_ip2_gpio_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip2_gpio_w1c { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip2_gpio_w1c_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_16_63:48; - uint64_t gpio:16; -#else - uint64_t gpio:16; - uint64_t reserved_16_63:48; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip2_gpio_w1c_s cn68xx; - struct cvmx_ciu2_en_ppx_ip2_gpio_w1c_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip2_gpio_w1s { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip2_gpio_w1s_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_16_63:48; - uint64_t gpio:16; -#else - uint64_t gpio:16; - uint64_t reserved_16_63:48; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip2_gpio_w1s_s cn68xx; - struct cvmx_ciu2_en_ppx_ip2_gpio_w1s_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip2_io { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip2_io_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_34_63:30; - uint64_t pem:2; - uint64_t reserved_18_31:14; - uint64_t pci_inta:2; - uint64_t reserved_13_15:3; - uint64_t msired:1; - uint64_t pci_msi:4; - uint64_t reserved_4_7:4; - uint64_t pci_intr:4; -#else - uint64_t pci_intr:4; - uint64_t reserved_4_7:4; - uint64_t pci_msi:4; - uint64_t msired:1; - uint64_t reserved_13_15:3; - uint64_t pci_inta:2; - uint64_t reserved_18_31:14; - uint64_t pem:2; - uint64_t reserved_34_63:30; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip2_io_s cn68xx; - struct cvmx_ciu2_en_ppx_ip2_io_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip2_io_w1c { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip2_io_w1c_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_34_63:30; - uint64_t pem:2; - uint64_t reserved_18_31:14; - uint64_t pci_inta:2; - uint64_t reserved_13_15:3; - uint64_t msired:1; - uint64_t pci_msi:4; - uint64_t reserved_4_7:4; - uint64_t pci_intr:4; -#else - uint64_t pci_intr:4; - uint64_t reserved_4_7:4; - uint64_t pci_msi:4; - uint64_t msired:1; - uint64_t reserved_13_15:3; - uint64_t pci_inta:2; - uint64_t reserved_18_31:14; - uint64_t pem:2; - uint64_t reserved_34_63:30; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip2_io_w1c_s cn68xx; - struct cvmx_ciu2_en_ppx_ip2_io_w1c_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip2_io_w1s { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip2_io_w1s_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_34_63:30; - uint64_t pem:2; - uint64_t reserved_18_31:14; - uint64_t pci_inta:2; - uint64_t reserved_13_15:3; - uint64_t msired:1; - uint64_t pci_msi:4; - uint64_t reserved_4_7:4; - uint64_t pci_intr:4; -#else - uint64_t pci_intr:4; - uint64_t reserved_4_7:4; - uint64_t pci_msi:4; - uint64_t msired:1; - uint64_t reserved_13_15:3; - uint64_t pci_inta:2; - uint64_t reserved_18_31:14; - uint64_t pem:2; - uint64_t reserved_34_63:30; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip2_io_w1s_s cn68xx; - struct cvmx_ciu2_en_ppx_ip2_io_w1s_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip2_mbox { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip2_mbox_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_4_63:60; - uint64_t mbox:4; -#else - uint64_t mbox:4; - uint64_t reserved_4_63:60; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip2_mbox_s cn68xx; - struct cvmx_ciu2_en_ppx_ip2_mbox_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip2_mbox_w1c { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip2_mbox_w1c_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_4_63:60; - uint64_t mbox:4; -#else - uint64_t mbox:4; - uint64_t reserved_4_63:60; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip2_mbox_w1c_s cn68xx; - struct cvmx_ciu2_en_ppx_ip2_mbox_w1c_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip2_mbox_w1s { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip2_mbox_w1s_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_4_63:60; - uint64_t mbox:4; -#else - uint64_t mbox:4; - uint64_t reserved_4_63:60; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip2_mbox_w1s_s cn68xx; - struct cvmx_ciu2_en_ppx_ip2_mbox_w1s_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip2_mem { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip2_mem_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_4_63:60; - uint64_t lmc:4; -#else - uint64_t lmc:4; - uint64_t reserved_4_63:60; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip2_mem_s cn68xx; - struct cvmx_ciu2_en_ppx_ip2_mem_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip2_mem_w1c { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip2_mem_w1c_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_4_63:60; - uint64_t lmc:4; -#else - uint64_t lmc:4; - uint64_t reserved_4_63:60; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip2_mem_w1c_s cn68xx; - struct cvmx_ciu2_en_ppx_ip2_mem_w1c_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip2_mem_w1s { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip2_mem_w1s_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_4_63:60; - uint64_t lmc:4; -#else - uint64_t lmc:4; - uint64_t reserved_4_63:60; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip2_mem_w1s_s cn68xx; - struct cvmx_ciu2_en_ppx_ip2_mem_w1s_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip2_mio { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip2_mio_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t rst:1; - uint64_t reserved_49_62:14; - uint64_t ptp:1; - uint64_t reserved_45_47:3; - uint64_t usb_hci:1; - uint64_t reserved_41_43:3; - uint64_t usb_uctl:1; - uint64_t reserved_38_39:2; - uint64_t uart:2; - uint64_t reserved_34_35:2; - uint64_t twsi:2; - uint64_t reserved_19_31:13; - uint64_t bootdma:1; - uint64_t mio:1; - uint64_t nand:1; - uint64_t reserved_12_15:4; - uint64_t timer:4; - uint64_t reserved_3_7:5; - uint64_t ipd_drp:1; - uint64_t ssoiq:1; - uint64_t ipdppthr:1; -#else - uint64_t ipdppthr:1; - uint64_t ssoiq:1; - uint64_t ipd_drp:1; - uint64_t reserved_3_7:5; - uint64_t timer:4; - uint64_t reserved_12_15:4; - uint64_t nand:1; - uint64_t mio:1; - uint64_t bootdma:1; - uint64_t reserved_19_31:13; - uint64_t twsi:2; - uint64_t reserved_34_35:2; - uint64_t uart:2; - uint64_t reserved_38_39:2; - uint64_t usb_uctl:1; - uint64_t reserved_41_43:3; - uint64_t usb_hci:1; - uint64_t reserved_45_47:3; - uint64_t ptp:1; - uint64_t reserved_49_62:14; - uint64_t rst:1; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip2_mio_s cn68xx; - struct cvmx_ciu2_en_ppx_ip2_mio_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip2_mio_w1c { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip2_mio_w1c_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t rst:1; - uint64_t reserved_49_62:14; - uint64_t ptp:1; - uint64_t reserved_45_47:3; - uint64_t usb_hci:1; - uint64_t reserved_41_43:3; - uint64_t usb_uctl:1; - uint64_t reserved_38_39:2; - uint64_t uart:2; - uint64_t reserved_34_35:2; - uint64_t twsi:2; - uint64_t reserved_19_31:13; - uint64_t bootdma:1; - uint64_t mio:1; - uint64_t nand:1; - uint64_t reserved_12_15:4; - uint64_t timer:4; - uint64_t reserved_3_7:5; - uint64_t ipd_drp:1; - uint64_t ssoiq:1; - uint64_t ipdppthr:1; -#else - uint64_t ipdppthr:1; - uint64_t ssoiq:1; - uint64_t ipd_drp:1; - uint64_t reserved_3_7:5; - uint64_t timer:4; - uint64_t reserved_12_15:4; - uint64_t nand:1; - uint64_t mio:1; - uint64_t bootdma:1; - uint64_t reserved_19_31:13; - uint64_t twsi:2; - uint64_t reserved_34_35:2; - uint64_t uart:2; - uint64_t reserved_38_39:2; - uint64_t usb_uctl:1; - uint64_t reserved_41_43:3; - uint64_t usb_hci:1; - uint64_t reserved_45_47:3; - uint64_t ptp:1; - uint64_t reserved_49_62:14; - uint64_t rst:1; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip2_mio_w1c_s cn68xx; - struct cvmx_ciu2_en_ppx_ip2_mio_w1c_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip2_mio_w1s { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip2_mio_w1s_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t rst:1; - uint64_t reserved_49_62:14; - uint64_t ptp:1; - uint64_t reserved_45_47:3; - uint64_t usb_hci:1; - uint64_t reserved_41_43:3; - uint64_t usb_uctl:1; - uint64_t reserved_38_39:2; - uint64_t uart:2; - uint64_t reserved_34_35:2; - uint64_t twsi:2; - uint64_t reserved_19_31:13; - uint64_t bootdma:1; - uint64_t mio:1; - uint64_t nand:1; - uint64_t reserved_12_15:4; - uint64_t timer:4; - uint64_t reserved_3_7:5; - uint64_t ipd_drp:1; - uint64_t ssoiq:1; - uint64_t ipdppthr:1; -#else - uint64_t ipdppthr:1; - uint64_t ssoiq:1; - uint64_t ipd_drp:1; - uint64_t reserved_3_7:5; - uint64_t timer:4; - uint64_t reserved_12_15:4; - uint64_t nand:1; - uint64_t mio:1; - uint64_t bootdma:1; - uint64_t reserved_19_31:13; - uint64_t twsi:2; - uint64_t reserved_34_35:2; - uint64_t uart:2; - uint64_t reserved_38_39:2; - uint64_t usb_uctl:1; - uint64_t reserved_41_43:3; - uint64_t usb_hci:1; - uint64_t reserved_45_47:3; - uint64_t ptp:1; - uint64_t reserved_49_62:14; - uint64_t rst:1; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip2_mio_w1s_s cn68xx; - struct cvmx_ciu2_en_ppx_ip2_mio_w1s_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip2_pkt { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip2_pkt_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_54_63:10; - uint64_t ilk_drp:2; - uint64_t reserved_49_51:3; - uint64_t ilk:1; - uint64_t reserved_41_47:7; - uint64_t mii:1; - uint64_t reserved_33_39:7; - uint64_t agl:1; - uint64_t reserved_13_31:19; - uint64_t gmx_drp:5; - uint64_t reserved_5_7:3; - uint64_t agx:5; -#else - uint64_t agx:5; - uint64_t reserved_5_7:3; - uint64_t gmx_drp:5; - uint64_t reserved_13_31:19; - uint64_t agl:1; - uint64_t reserved_33_39:7; - uint64_t mii:1; - uint64_t reserved_41_47:7; - uint64_t ilk:1; - uint64_t reserved_49_51:3; - uint64_t ilk_drp:2; - uint64_t reserved_54_63:10; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip2_pkt_s cn68xx; - struct cvmx_ciu2_en_ppx_ip2_pkt_cn68xxp1 { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_49_63:15; - uint64_t ilk:1; - uint64_t reserved_41_47:7; - uint64_t mii:1; - uint64_t reserved_33_39:7; - uint64_t agl:1; - uint64_t reserved_13_31:19; - uint64_t gmx_drp:5; - uint64_t reserved_5_7:3; - uint64_t agx:5; -#else - uint64_t agx:5; - uint64_t reserved_5_7:3; - uint64_t gmx_drp:5; - uint64_t reserved_13_31:19; - uint64_t agl:1; - uint64_t reserved_33_39:7; - uint64_t mii:1; - uint64_t reserved_41_47:7; - uint64_t ilk:1; - uint64_t reserved_49_63:15; -#endif - } cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip2_pkt_w1c { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip2_pkt_w1c_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_54_63:10; - uint64_t ilk_drp:2; - uint64_t reserved_49_51:3; - uint64_t ilk:1; - uint64_t reserved_41_47:7; - uint64_t mii:1; - uint64_t reserved_33_39:7; - uint64_t agl:1; - uint64_t reserved_13_31:19; - uint64_t gmx_drp:5; - uint64_t reserved_5_7:3; - uint64_t agx:5; -#else - uint64_t agx:5; - uint64_t reserved_5_7:3; - uint64_t gmx_drp:5; - uint64_t reserved_13_31:19; - uint64_t agl:1; - uint64_t reserved_33_39:7; - uint64_t mii:1; - uint64_t reserved_41_47:7; - uint64_t ilk:1; - uint64_t reserved_49_51:3; - uint64_t ilk_drp:2; - uint64_t reserved_54_63:10; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip2_pkt_w1c_s cn68xx; - struct cvmx_ciu2_en_ppx_ip2_pkt_w1c_cn68xxp1 { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_49_63:15; - uint64_t ilk:1; - uint64_t reserved_41_47:7; - uint64_t mii:1; - uint64_t reserved_33_39:7; - uint64_t agl:1; - uint64_t reserved_13_31:19; - uint64_t gmx_drp:5; - uint64_t reserved_5_7:3; - uint64_t agx:5; -#else - uint64_t agx:5; - uint64_t reserved_5_7:3; - uint64_t gmx_drp:5; - uint64_t reserved_13_31:19; - uint64_t agl:1; - uint64_t reserved_33_39:7; - uint64_t mii:1; - uint64_t reserved_41_47:7; - uint64_t ilk:1; - uint64_t reserved_49_63:15; -#endif - } cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip2_pkt_w1s { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip2_pkt_w1s_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_54_63:10; - uint64_t ilk_drp:2; - uint64_t reserved_49_51:3; - uint64_t ilk:1; - uint64_t reserved_41_47:7; - uint64_t mii:1; - uint64_t reserved_33_39:7; - uint64_t agl:1; - uint64_t reserved_13_31:19; - uint64_t gmx_drp:5; - uint64_t reserved_5_7:3; - uint64_t agx:5; -#else - uint64_t agx:5; - uint64_t reserved_5_7:3; - uint64_t gmx_drp:5; - uint64_t reserved_13_31:19; - uint64_t agl:1; - uint64_t reserved_33_39:7; - uint64_t mii:1; - uint64_t reserved_41_47:7; - uint64_t ilk:1; - uint64_t reserved_49_51:3; - uint64_t ilk_drp:2; - uint64_t reserved_54_63:10; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip2_pkt_w1s_s cn68xx; - struct cvmx_ciu2_en_ppx_ip2_pkt_w1s_cn68xxp1 { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_49_63:15; - uint64_t ilk:1; - uint64_t reserved_41_47:7; - uint64_t mii:1; - uint64_t reserved_33_39:7; - uint64_t agl:1; - uint64_t reserved_13_31:19; - uint64_t gmx_drp:5; - uint64_t reserved_5_7:3; - uint64_t agx:5; -#else - uint64_t agx:5; - uint64_t reserved_5_7:3; - uint64_t gmx_drp:5; - uint64_t reserved_13_31:19; - uint64_t agl:1; - uint64_t reserved_33_39:7; - uint64_t mii:1; - uint64_t reserved_41_47:7; - uint64_t ilk:1; - uint64_t reserved_49_63:15; -#endif - } cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip2_rml { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip2_rml_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_56_63:8; - uint64_t trace:4; - uint64_t reserved_49_51:3; - uint64_t l2c:1; - uint64_t reserved_41_47:7; - uint64_t dfa:1; - uint64_t reserved_37_39:3; - uint64_t dpi_dma:1; - uint64_t reserved_34_35:2; - uint64_t dpi:1; - uint64_t sli:1; - uint64_t reserved_31_31:1; - uint64_t key:1; - uint64_t rad:1; - uint64_t tim:1; - uint64_t reserved_25_27:3; - uint64_t zip:1; - uint64_t reserved_17_23:7; - uint64_t sso:1; - uint64_t reserved_8_15:8; - uint64_t pko:1; - uint64_t pip:1; - uint64_t ipd:1; - uint64_t fpa:1; - uint64_t reserved_1_3:3; - uint64_t iob:1; -#else - uint64_t iob:1; - uint64_t reserved_1_3:3; - uint64_t fpa:1; - uint64_t ipd:1; - uint64_t pip:1; - uint64_t pko:1; - uint64_t reserved_8_15:8; - uint64_t sso:1; - uint64_t reserved_17_23:7; - uint64_t zip:1; - uint64_t reserved_25_27:3; - uint64_t tim:1; - uint64_t rad:1; - uint64_t key:1; - uint64_t reserved_31_31:1; - uint64_t sli:1; - uint64_t dpi:1; - uint64_t reserved_34_35:2; - uint64_t dpi_dma:1; - uint64_t reserved_37_39:3; - uint64_t dfa:1; - uint64_t reserved_41_47:7; - uint64_t l2c:1; - uint64_t reserved_49_51:3; - uint64_t trace:4; - uint64_t reserved_56_63:8; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip2_rml_s cn68xx; - struct cvmx_ciu2_en_ppx_ip2_rml_cn68xxp1 { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_56_63:8; - uint64_t trace:4; - uint64_t reserved_49_51:3; - uint64_t l2c:1; - uint64_t reserved_41_47:7; - uint64_t dfa:1; - uint64_t reserved_34_39:6; - uint64_t dpi:1; - uint64_t sli:1; - uint64_t reserved_31_31:1; - uint64_t key:1; - uint64_t rad:1; - uint64_t tim:1; - uint64_t reserved_25_27:3; - uint64_t zip:1; - uint64_t reserved_17_23:7; - uint64_t sso:1; - uint64_t reserved_8_15:8; - uint64_t pko:1; - uint64_t pip:1; - uint64_t ipd:1; - uint64_t fpa:1; - uint64_t reserved_1_3:3; - uint64_t iob:1; -#else - uint64_t iob:1; - uint64_t reserved_1_3:3; - uint64_t fpa:1; - uint64_t ipd:1; - uint64_t pip:1; - uint64_t pko:1; - uint64_t reserved_8_15:8; - uint64_t sso:1; - uint64_t reserved_17_23:7; - uint64_t zip:1; - uint64_t reserved_25_27:3; - uint64_t tim:1; - uint64_t rad:1; - uint64_t key:1; - uint64_t reserved_31_31:1; - uint64_t sli:1; - uint64_t dpi:1; - uint64_t reserved_34_39:6; - uint64_t dfa:1; - uint64_t reserved_41_47:7; - uint64_t l2c:1; - uint64_t reserved_49_51:3; - uint64_t trace:4; - uint64_t reserved_56_63:8; -#endif - } cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip2_rml_w1c { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip2_rml_w1c_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_56_63:8; - uint64_t trace:4; - uint64_t reserved_49_51:3; - uint64_t l2c:1; - uint64_t reserved_41_47:7; - uint64_t dfa:1; - uint64_t reserved_37_39:3; - uint64_t dpi_dma:1; - uint64_t reserved_34_35:2; - uint64_t dpi:1; - uint64_t sli:1; - uint64_t reserved_31_31:1; - uint64_t key:1; - uint64_t rad:1; - uint64_t tim:1; - uint64_t reserved_25_27:3; - uint64_t zip:1; - uint64_t reserved_17_23:7; - uint64_t sso:1; - uint64_t reserved_8_15:8; - uint64_t pko:1; - uint64_t pip:1; - uint64_t ipd:1; - uint64_t fpa:1; - uint64_t reserved_1_3:3; - uint64_t iob:1; -#else - uint64_t iob:1; - uint64_t reserved_1_3:3; - uint64_t fpa:1; - uint64_t ipd:1; - uint64_t pip:1; - uint64_t pko:1; - uint64_t reserved_8_15:8; - uint64_t sso:1; - uint64_t reserved_17_23:7; - uint64_t zip:1; - uint64_t reserved_25_27:3; - uint64_t tim:1; - uint64_t rad:1; - uint64_t key:1; - uint64_t reserved_31_31:1; - uint64_t sli:1; - uint64_t dpi:1; - uint64_t reserved_34_35:2; - uint64_t dpi_dma:1; - uint64_t reserved_37_39:3; - uint64_t dfa:1; - uint64_t reserved_41_47:7; - uint64_t l2c:1; - uint64_t reserved_49_51:3; - uint64_t trace:4; - uint64_t reserved_56_63:8; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip2_rml_w1c_s cn68xx; - struct cvmx_ciu2_en_ppx_ip2_rml_w1c_cn68xxp1 { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_56_63:8; - uint64_t trace:4; - uint64_t reserved_49_51:3; - uint64_t l2c:1; - uint64_t reserved_41_47:7; - uint64_t dfa:1; - uint64_t reserved_34_39:6; - uint64_t dpi:1; - uint64_t sli:1; - uint64_t reserved_31_31:1; - uint64_t key:1; - uint64_t rad:1; - uint64_t tim:1; - uint64_t reserved_25_27:3; - uint64_t zip:1; - uint64_t reserved_17_23:7; - uint64_t sso:1; - uint64_t reserved_8_15:8; - uint64_t pko:1; - uint64_t pip:1; - uint64_t ipd:1; - uint64_t fpa:1; - uint64_t reserved_1_3:3; - uint64_t iob:1; -#else - uint64_t iob:1; - uint64_t reserved_1_3:3; - uint64_t fpa:1; - uint64_t ipd:1; - uint64_t pip:1; - uint64_t pko:1; - uint64_t reserved_8_15:8; - uint64_t sso:1; - uint64_t reserved_17_23:7; - uint64_t zip:1; - uint64_t reserved_25_27:3; - uint64_t tim:1; - uint64_t rad:1; - uint64_t key:1; - uint64_t reserved_31_31:1; - uint64_t sli:1; - uint64_t dpi:1; - uint64_t reserved_34_39:6; - uint64_t dfa:1; - uint64_t reserved_41_47:7; - uint64_t l2c:1; - uint64_t reserved_49_51:3; - uint64_t trace:4; - uint64_t reserved_56_63:8; -#endif - } cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip2_rml_w1s { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip2_rml_w1s_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_56_63:8; - uint64_t trace:4; - uint64_t reserved_49_51:3; - uint64_t l2c:1; - uint64_t reserved_41_47:7; - uint64_t dfa:1; - uint64_t reserved_37_39:3; - uint64_t dpi_dma:1; - uint64_t reserved_34_35:2; - uint64_t dpi:1; - uint64_t sli:1; - uint64_t reserved_31_31:1; - uint64_t key:1; - uint64_t rad:1; - uint64_t tim:1; - uint64_t reserved_25_27:3; - uint64_t zip:1; - uint64_t reserved_17_23:7; - uint64_t sso:1; - uint64_t reserved_8_15:8; - uint64_t pko:1; - uint64_t pip:1; - uint64_t ipd:1; - uint64_t fpa:1; - uint64_t reserved_1_3:3; - uint64_t iob:1; -#else - uint64_t iob:1; - uint64_t reserved_1_3:3; - uint64_t fpa:1; - uint64_t ipd:1; - uint64_t pip:1; - uint64_t pko:1; - uint64_t reserved_8_15:8; - uint64_t sso:1; - uint64_t reserved_17_23:7; - uint64_t zip:1; - uint64_t reserved_25_27:3; - uint64_t tim:1; - uint64_t rad:1; - uint64_t key:1; - uint64_t reserved_31_31:1; - uint64_t sli:1; - uint64_t dpi:1; - uint64_t reserved_34_35:2; - uint64_t dpi_dma:1; - uint64_t reserved_37_39:3; - uint64_t dfa:1; - uint64_t reserved_41_47:7; - uint64_t l2c:1; - uint64_t reserved_49_51:3; - uint64_t trace:4; - uint64_t reserved_56_63:8; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip2_rml_w1s_s cn68xx; - struct cvmx_ciu2_en_ppx_ip2_rml_w1s_cn68xxp1 { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_56_63:8; - uint64_t trace:4; - uint64_t reserved_49_51:3; - uint64_t l2c:1; - uint64_t reserved_41_47:7; - uint64_t dfa:1; - uint64_t reserved_34_39:6; - uint64_t dpi:1; - uint64_t sli:1; - uint64_t reserved_31_31:1; - uint64_t key:1; - uint64_t rad:1; - uint64_t tim:1; - uint64_t reserved_25_27:3; - uint64_t zip:1; - uint64_t reserved_17_23:7; - uint64_t sso:1; - uint64_t reserved_8_15:8; - uint64_t pko:1; - uint64_t pip:1; - uint64_t ipd:1; - uint64_t fpa:1; - uint64_t reserved_1_3:3; - uint64_t iob:1; -#else - uint64_t iob:1; - uint64_t reserved_1_3:3; - uint64_t fpa:1; - uint64_t ipd:1; - uint64_t pip:1; - uint64_t pko:1; - uint64_t reserved_8_15:8; - uint64_t sso:1; - uint64_t reserved_17_23:7; - uint64_t zip:1; - uint64_t reserved_25_27:3; - uint64_t tim:1; - uint64_t rad:1; - uint64_t key:1; - uint64_t reserved_31_31:1; - uint64_t sli:1; - uint64_t dpi:1; - uint64_t reserved_34_39:6; - uint64_t dfa:1; - uint64_t reserved_41_47:7; - uint64_t l2c:1; - uint64_t reserved_49_51:3; - uint64_t trace:4; - uint64_t reserved_56_63:8; -#endif - } cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip2_wdog { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip2_wdog_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_32_63:32; - uint64_t wdog:32; -#else - uint64_t wdog:32; - uint64_t reserved_32_63:32; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip2_wdog_s cn68xx; - struct cvmx_ciu2_en_ppx_ip2_wdog_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip2_wdog_w1c { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip2_wdog_w1c_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_32_63:32; - uint64_t wdog:32; -#else - uint64_t wdog:32; - uint64_t reserved_32_63:32; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip2_wdog_w1c_s cn68xx; - struct cvmx_ciu2_en_ppx_ip2_wdog_w1c_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip2_wdog_w1s { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip2_wdog_w1s_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_32_63:32; - uint64_t wdog:32; -#else - uint64_t wdog:32; - uint64_t reserved_32_63:32; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip2_wdog_w1s_s cn68xx; - struct cvmx_ciu2_en_ppx_ip2_wdog_w1s_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip2_wrkq { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip2_wrkq_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t workq:64; -#else - uint64_t workq:64; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip2_wrkq_s cn68xx; - struct cvmx_ciu2_en_ppx_ip2_wrkq_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip2_wrkq_w1c { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip2_wrkq_w1c_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t workq:64; -#else - uint64_t workq:64; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip2_wrkq_w1c_s cn68xx; - struct cvmx_ciu2_en_ppx_ip2_wrkq_w1c_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip2_wrkq_w1s { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip2_wrkq_w1s_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t workq:64; -#else - uint64_t workq:64; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip2_wrkq_w1s_s cn68xx; - struct cvmx_ciu2_en_ppx_ip2_wrkq_w1s_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip3_gpio { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip3_gpio_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_16_63:48; - uint64_t gpio:16; -#else - uint64_t gpio:16; - uint64_t reserved_16_63:48; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip3_gpio_s cn68xx; - struct cvmx_ciu2_en_ppx_ip3_gpio_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip3_gpio_w1c { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip3_gpio_w1c_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_16_63:48; - uint64_t gpio:16; -#else - uint64_t gpio:16; - uint64_t reserved_16_63:48; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip3_gpio_w1c_s cn68xx; - struct cvmx_ciu2_en_ppx_ip3_gpio_w1c_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip3_gpio_w1s { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip3_gpio_w1s_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_16_63:48; - uint64_t gpio:16; -#else - uint64_t gpio:16; - uint64_t reserved_16_63:48; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip3_gpio_w1s_s cn68xx; - struct cvmx_ciu2_en_ppx_ip3_gpio_w1s_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip3_io { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip3_io_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_34_63:30; - uint64_t pem:2; - uint64_t reserved_18_31:14; - uint64_t pci_inta:2; - uint64_t reserved_13_15:3; - uint64_t msired:1; - uint64_t pci_msi:4; - uint64_t reserved_4_7:4; - uint64_t pci_intr:4; -#else - uint64_t pci_intr:4; - uint64_t reserved_4_7:4; - uint64_t pci_msi:4; - uint64_t msired:1; - uint64_t reserved_13_15:3; - uint64_t pci_inta:2; - uint64_t reserved_18_31:14; - uint64_t pem:2; - uint64_t reserved_34_63:30; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip3_io_s cn68xx; - struct cvmx_ciu2_en_ppx_ip3_io_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip3_io_w1c { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip3_io_w1c_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_34_63:30; - uint64_t pem:2; - uint64_t reserved_18_31:14; - uint64_t pci_inta:2; - uint64_t reserved_13_15:3; - uint64_t msired:1; - uint64_t pci_msi:4; - uint64_t reserved_4_7:4; - uint64_t pci_intr:4; -#else - uint64_t pci_intr:4; - uint64_t reserved_4_7:4; - uint64_t pci_msi:4; - uint64_t msired:1; - uint64_t reserved_13_15:3; - uint64_t pci_inta:2; - uint64_t reserved_18_31:14; - uint64_t pem:2; - uint64_t reserved_34_63:30; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip3_io_w1c_s cn68xx; - struct cvmx_ciu2_en_ppx_ip3_io_w1c_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip3_io_w1s { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip3_io_w1s_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_34_63:30; - uint64_t pem:2; - uint64_t reserved_18_31:14; - uint64_t pci_inta:2; - uint64_t reserved_13_15:3; - uint64_t msired:1; - uint64_t pci_msi:4; - uint64_t reserved_4_7:4; - uint64_t pci_intr:4; -#else - uint64_t pci_intr:4; - uint64_t reserved_4_7:4; - uint64_t pci_msi:4; - uint64_t msired:1; - uint64_t reserved_13_15:3; - uint64_t pci_inta:2; - uint64_t reserved_18_31:14; - uint64_t pem:2; - uint64_t reserved_34_63:30; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip3_io_w1s_s cn68xx; - struct cvmx_ciu2_en_ppx_ip3_io_w1s_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip3_mbox { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip3_mbox_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_4_63:60; - uint64_t mbox:4; -#else - uint64_t mbox:4; - uint64_t reserved_4_63:60; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip3_mbox_s cn68xx; - struct cvmx_ciu2_en_ppx_ip3_mbox_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip3_mbox_w1c { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip3_mbox_w1c_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_4_63:60; - uint64_t mbox:4; -#else - uint64_t mbox:4; - uint64_t reserved_4_63:60; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip3_mbox_w1c_s cn68xx; - struct cvmx_ciu2_en_ppx_ip3_mbox_w1c_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip3_mbox_w1s { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip3_mbox_w1s_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_4_63:60; - uint64_t mbox:4; -#else - uint64_t mbox:4; - uint64_t reserved_4_63:60; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip3_mbox_w1s_s cn68xx; - struct cvmx_ciu2_en_ppx_ip3_mbox_w1s_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip3_mem { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip3_mem_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_4_63:60; - uint64_t lmc:4; -#else - uint64_t lmc:4; - uint64_t reserved_4_63:60; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip3_mem_s cn68xx; - struct cvmx_ciu2_en_ppx_ip3_mem_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip3_mem_w1c { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip3_mem_w1c_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_4_63:60; - uint64_t lmc:4; -#else - uint64_t lmc:4; - uint64_t reserved_4_63:60; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip3_mem_w1c_s cn68xx; - struct cvmx_ciu2_en_ppx_ip3_mem_w1c_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip3_mem_w1s { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip3_mem_w1s_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_4_63:60; - uint64_t lmc:4; -#else - uint64_t lmc:4; - uint64_t reserved_4_63:60; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip3_mem_w1s_s cn68xx; - struct cvmx_ciu2_en_ppx_ip3_mem_w1s_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip3_mio { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip3_mio_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t rst:1; - uint64_t reserved_49_62:14; - uint64_t ptp:1; - uint64_t reserved_45_47:3; - uint64_t usb_hci:1; - uint64_t reserved_41_43:3; - uint64_t usb_uctl:1; - uint64_t reserved_38_39:2; - uint64_t uart:2; - uint64_t reserved_34_35:2; - uint64_t twsi:2; - uint64_t reserved_19_31:13; - uint64_t bootdma:1; - uint64_t mio:1; - uint64_t nand:1; - uint64_t reserved_12_15:4; - uint64_t timer:4; - uint64_t reserved_3_7:5; - uint64_t ipd_drp:1; - uint64_t ssoiq:1; - uint64_t ipdppthr:1; -#else - uint64_t ipdppthr:1; - uint64_t ssoiq:1; - uint64_t ipd_drp:1; - uint64_t reserved_3_7:5; - uint64_t timer:4; - uint64_t reserved_12_15:4; - uint64_t nand:1; - uint64_t mio:1; - uint64_t bootdma:1; - uint64_t reserved_19_31:13; - uint64_t twsi:2; - uint64_t reserved_34_35:2; - uint64_t uart:2; - uint64_t reserved_38_39:2; - uint64_t usb_uctl:1; - uint64_t reserved_41_43:3; - uint64_t usb_hci:1; - uint64_t reserved_45_47:3; - uint64_t ptp:1; - uint64_t reserved_49_62:14; - uint64_t rst:1; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip3_mio_s cn68xx; - struct cvmx_ciu2_en_ppx_ip3_mio_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip3_mio_w1c { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip3_mio_w1c_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t rst:1; - uint64_t reserved_49_62:14; - uint64_t ptp:1; - uint64_t reserved_45_47:3; - uint64_t usb_hci:1; - uint64_t reserved_41_43:3; - uint64_t usb_uctl:1; - uint64_t reserved_38_39:2; - uint64_t uart:2; - uint64_t reserved_34_35:2; - uint64_t twsi:2; - uint64_t reserved_19_31:13; - uint64_t bootdma:1; - uint64_t mio:1; - uint64_t nand:1; - uint64_t reserved_12_15:4; - uint64_t timer:4; - uint64_t reserved_3_7:5; - uint64_t ipd_drp:1; - uint64_t ssoiq:1; - uint64_t ipdppthr:1; -#else - uint64_t ipdppthr:1; - uint64_t ssoiq:1; - uint64_t ipd_drp:1; - uint64_t reserved_3_7:5; - uint64_t timer:4; - uint64_t reserved_12_15:4; - uint64_t nand:1; - uint64_t mio:1; - uint64_t bootdma:1; - uint64_t reserved_19_31:13; - uint64_t twsi:2; - uint64_t reserved_34_35:2; - uint64_t uart:2; - uint64_t reserved_38_39:2; - uint64_t usb_uctl:1; - uint64_t reserved_41_43:3; - uint64_t usb_hci:1; - uint64_t reserved_45_47:3; - uint64_t ptp:1; - uint64_t reserved_49_62:14; - uint64_t rst:1; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip3_mio_w1c_s cn68xx; - struct cvmx_ciu2_en_ppx_ip3_mio_w1c_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip3_mio_w1s { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip3_mio_w1s_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t rst:1; - uint64_t reserved_49_62:14; - uint64_t ptp:1; - uint64_t reserved_45_47:3; - uint64_t usb_hci:1; - uint64_t reserved_41_43:3; - uint64_t usb_uctl:1; - uint64_t reserved_38_39:2; - uint64_t uart:2; - uint64_t reserved_34_35:2; - uint64_t twsi:2; - uint64_t reserved_19_31:13; - uint64_t bootdma:1; - uint64_t mio:1; - uint64_t nand:1; - uint64_t reserved_12_15:4; - uint64_t timer:4; - uint64_t reserved_3_7:5; - uint64_t ipd_drp:1; - uint64_t ssoiq:1; - uint64_t ipdppthr:1; -#else - uint64_t ipdppthr:1; - uint64_t ssoiq:1; - uint64_t ipd_drp:1; - uint64_t reserved_3_7:5; - uint64_t timer:4; - uint64_t reserved_12_15:4; - uint64_t nand:1; - uint64_t mio:1; - uint64_t bootdma:1; - uint64_t reserved_19_31:13; - uint64_t twsi:2; - uint64_t reserved_34_35:2; - uint64_t uart:2; - uint64_t reserved_38_39:2; - uint64_t usb_uctl:1; - uint64_t reserved_41_43:3; - uint64_t usb_hci:1; - uint64_t reserved_45_47:3; - uint64_t ptp:1; - uint64_t reserved_49_62:14; - uint64_t rst:1; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip3_mio_w1s_s cn68xx; - struct cvmx_ciu2_en_ppx_ip3_mio_w1s_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip3_pkt { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip3_pkt_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_54_63:10; - uint64_t ilk_drp:2; - uint64_t reserved_49_51:3; - uint64_t ilk:1; - uint64_t reserved_41_47:7; - uint64_t mii:1; - uint64_t reserved_33_39:7; - uint64_t agl:1; - uint64_t reserved_13_31:19; - uint64_t gmx_drp:5; - uint64_t reserved_5_7:3; - uint64_t agx:5; -#else - uint64_t agx:5; - uint64_t reserved_5_7:3; - uint64_t gmx_drp:5; - uint64_t reserved_13_31:19; - uint64_t agl:1; - uint64_t reserved_33_39:7; - uint64_t mii:1; - uint64_t reserved_41_47:7; - uint64_t ilk:1; - uint64_t reserved_49_51:3; - uint64_t ilk_drp:2; - uint64_t reserved_54_63:10; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip3_pkt_s cn68xx; - struct cvmx_ciu2_en_ppx_ip3_pkt_cn68xxp1 { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_49_63:15; - uint64_t ilk:1; - uint64_t reserved_41_47:7; - uint64_t mii:1; - uint64_t reserved_33_39:7; - uint64_t agl:1; - uint64_t reserved_13_31:19; - uint64_t gmx_drp:5; - uint64_t reserved_5_7:3; - uint64_t agx:5; -#else - uint64_t agx:5; - uint64_t reserved_5_7:3; - uint64_t gmx_drp:5; - uint64_t reserved_13_31:19; - uint64_t agl:1; - uint64_t reserved_33_39:7; - uint64_t mii:1; - uint64_t reserved_41_47:7; - uint64_t ilk:1; - uint64_t reserved_49_63:15; -#endif - } cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip3_pkt_w1c { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip3_pkt_w1c_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_54_63:10; - uint64_t ilk_drp:2; - uint64_t reserved_49_51:3; - uint64_t ilk:1; - uint64_t reserved_41_47:7; - uint64_t mii:1; - uint64_t reserved_33_39:7; - uint64_t agl:1; - uint64_t reserved_13_31:19; - uint64_t gmx_drp:5; - uint64_t reserved_5_7:3; - uint64_t agx:5; -#else - uint64_t agx:5; - uint64_t reserved_5_7:3; - uint64_t gmx_drp:5; - uint64_t reserved_13_31:19; - uint64_t agl:1; - uint64_t reserved_33_39:7; - uint64_t mii:1; - uint64_t reserved_41_47:7; - uint64_t ilk:1; - uint64_t reserved_49_51:3; - uint64_t ilk_drp:2; - uint64_t reserved_54_63:10; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip3_pkt_w1c_s cn68xx; - struct cvmx_ciu2_en_ppx_ip3_pkt_w1c_cn68xxp1 { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_49_63:15; - uint64_t ilk:1; - uint64_t reserved_41_47:7; - uint64_t mii:1; - uint64_t reserved_33_39:7; - uint64_t agl:1; - uint64_t reserved_13_31:19; - uint64_t gmx_drp:5; - uint64_t reserved_5_7:3; - uint64_t agx:5; -#else - uint64_t agx:5; - uint64_t reserved_5_7:3; - uint64_t gmx_drp:5; - uint64_t reserved_13_31:19; - uint64_t agl:1; - uint64_t reserved_33_39:7; - uint64_t mii:1; - uint64_t reserved_41_47:7; - uint64_t ilk:1; - uint64_t reserved_49_63:15; -#endif - } cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip3_pkt_w1s { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip3_pkt_w1s_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_54_63:10; - uint64_t ilk_drp:2; - uint64_t reserved_49_51:3; - uint64_t ilk:1; - uint64_t reserved_41_47:7; - uint64_t mii:1; - uint64_t reserved_33_39:7; - uint64_t agl:1; - uint64_t reserved_13_31:19; - uint64_t gmx_drp:5; - uint64_t reserved_5_7:3; - uint64_t agx:5; -#else - uint64_t agx:5; - uint64_t reserved_5_7:3; - uint64_t gmx_drp:5; - uint64_t reserved_13_31:19; - uint64_t agl:1; - uint64_t reserved_33_39:7; - uint64_t mii:1; - uint64_t reserved_41_47:7; - uint64_t ilk:1; - uint64_t reserved_49_51:3; - uint64_t ilk_drp:2; - uint64_t reserved_54_63:10; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip3_pkt_w1s_s cn68xx; - struct cvmx_ciu2_en_ppx_ip3_pkt_w1s_cn68xxp1 { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_49_63:15; - uint64_t ilk:1; - uint64_t reserved_41_47:7; - uint64_t mii:1; - uint64_t reserved_33_39:7; - uint64_t agl:1; - uint64_t reserved_13_31:19; - uint64_t gmx_drp:5; - uint64_t reserved_5_7:3; - uint64_t agx:5; -#else - uint64_t agx:5; - uint64_t reserved_5_7:3; - uint64_t gmx_drp:5; - uint64_t reserved_13_31:19; - uint64_t agl:1; - uint64_t reserved_33_39:7; - uint64_t mii:1; - uint64_t reserved_41_47:7; - uint64_t ilk:1; - uint64_t reserved_49_63:15; -#endif - } cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip3_rml { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip3_rml_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_56_63:8; - uint64_t trace:4; - uint64_t reserved_49_51:3; - uint64_t l2c:1; - uint64_t reserved_41_47:7; - uint64_t dfa:1; - uint64_t reserved_37_39:3; - uint64_t dpi_dma:1; - uint64_t reserved_34_35:2; - uint64_t dpi:1; - uint64_t sli:1; - uint64_t reserved_31_31:1; - uint64_t key:1; - uint64_t rad:1; - uint64_t tim:1; - uint64_t reserved_25_27:3; - uint64_t zip:1; - uint64_t reserved_17_23:7; - uint64_t sso:1; - uint64_t reserved_8_15:8; - uint64_t pko:1; - uint64_t pip:1; - uint64_t ipd:1; - uint64_t fpa:1; - uint64_t reserved_1_3:3; - uint64_t iob:1; -#else - uint64_t iob:1; - uint64_t reserved_1_3:3; - uint64_t fpa:1; - uint64_t ipd:1; - uint64_t pip:1; - uint64_t pko:1; - uint64_t reserved_8_15:8; - uint64_t sso:1; - uint64_t reserved_17_23:7; - uint64_t zip:1; - uint64_t reserved_25_27:3; - uint64_t tim:1; - uint64_t rad:1; - uint64_t key:1; - uint64_t reserved_31_31:1; - uint64_t sli:1; - uint64_t dpi:1; - uint64_t reserved_34_35:2; - uint64_t dpi_dma:1; - uint64_t reserved_37_39:3; - uint64_t dfa:1; - uint64_t reserved_41_47:7; - uint64_t l2c:1; - uint64_t reserved_49_51:3; - uint64_t trace:4; - uint64_t reserved_56_63:8; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip3_rml_s cn68xx; - struct cvmx_ciu2_en_ppx_ip3_rml_cn68xxp1 { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_56_63:8; - uint64_t trace:4; - uint64_t reserved_49_51:3; - uint64_t l2c:1; - uint64_t reserved_41_47:7; - uint64_t dfa:1; - uint64_t reserved_34_39:6; - uint64_t dpi:1; - uint64_t sli:1; - uint64_t reserved_31_31:1; - uint64_t key:1; - uint64_t rad:1; - uint64_t tim:1; - uint64_t reserved_25_27:3; - uint64_t zip:1; - uint64_t reserved_17_23:7; - uint64_t sso:1; - uint64_t reserved_8_15:8; - uint64_t pko:1; - uint64_t pip:1; - uint64_t ipd:1; - uint64_t fpa:1; - uint64_t reserved_1_3:3; - uint64_t iob:1; -#else - uint64_t iob:1; - uint64_t reserved_1_3:3; - uint64_t fpa:1; - uint64_t ipd:1; - uint64_t pip:1; - uint64_t pko:1; - uint64_t reserved_8_15:8; - uint64_t sso:1; - uint64_t reserved_17_23:7; - uint64_t zip:1; - uint64_t reserved_25_27:3; - uint64_t tim:1; - uint64_t rad:1; - uint64_t key:1; - uint64_t reserved_31_31:1; - uint64_t sli:1; - uint64_t dpi:1; - uint64_t reserved_34_39:6; - uint64_t dfa:1; - uint64_t reserved_41_47:7; - uint64_t l2c:1; - uint64_t reserved_49_51:3; - uint64_t trace:4; - uint64_t reserved_56_63:8; -#endif - } cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip3_rml_w1c { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip3_rml_w1c_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_56_63:8; - uint64_t trace:4; - uint64_t reserved_49_51:3; - uint64_t l2c:1; - uint64_t reserved_41_47:7; - uint64_t dfa:1; - uint64_t reserved_37_39:3; - uint64_t dpi_dma:1; - uint64_t reserved_34_35:2; - uint64_t dpi:1; - uint64_t sli:1; - uint64_t reserved_31_31:1; - uint64_t key:1; - uint64_t rad:1; - uint64_t tim:1; - uint64_t reserved_25_27:3; - uint64_t zip:1; - uint64_t reserved_17_23:7; - uint64_t sso:1; - uint64_t reserved_8_15:8; - uint64_t pko:1; - uint64_t pip:1; - uint64_t ipd:1; - uint64_t fpa:1; - uint64_t reserved_1_3:3; - uint64_t iob:1; -#else - uint64_t iob:1; - uint64_t reserved_1_3:3; - uint64_t fpa:1; - uint64_t ipd:1; - uint64_t pip:1; - uint64_t pko:1; - uint64_t reserved_8_15:8; - uint64_t sso:1; - uint64_t reserved_17_23:7; - uint64_t zip:1; - uint64_t reserved_25_27:3; - uint64_t tim:1; - uint64_t rad:1; - uint64_t key:1; - uint64_t reserved_31_31:1; - uint64_t sli:1; - uint64_t dpi:1; - uint64_t reserved_34_35:2; - uint64_t dpi_dma:1; - uint64_t reserved_37_39:3; - uint64_t dfa:1; - uint64_t reserved_41_47:7; - uint64_t l2c:1; - uint64_t reserved_49_51:3; - uint64_t trace:4; - uint64_t reserved_56_63:8; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip3_rml_w1c_s cn68xx; - struct cvmx_ciu2_en_ppx_ip3_rml_w1c_cn68xxp1 { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_56_63:8; - uint64_t trace:4; - uint64_t reserved_49_51:3; - uint64_t l2c:1; - uint64_t reserved_41_47:7; - uint64_t dfa:1; - uint64_t reserved_34_39:6; - uint64_t dpi:1; - uint64_t sli:1; - uint64_t reserved_31_31:1; - uint64_t key:1; - uint64_t rad:1; - uint64_t tim:1; - uint64_t reserved_25_27:3; - uint64_t zip:1; - uint64_t reserved_17_23:7; - uint64_t sso:1; - uint64_t reserved_8_15:8; - uint64_t pko:1; - uint64_t pip:1; - uint64_t ipd:1; - uint64_t fpa:1; - uint64_t reserved_1_3:3; - uint64_t iob:1; -#else - uint64_t iob:1; - uint64_t reserved_1_3:3; - uint64_t fpa:1; - uint64_t ipd:1; - uint64_t pip:1; - uint64_t pko:1; - uint64_t reserved_8_15:8; - uint64_t sso:1; - uint64_t reserved_17_23:7; - uint64_t zip:1; - uint64_t reserved_25_27:3; - uint64_t tim:1; - uint64_t rad:1; - uint64_t key:1; - uint64_t reserved_31_31:1; - uint64_t sli:1; - uint64_t dpi:1; - uint64_t reserved_34_39:6; - uint64_t dfa:1; - uint64_t reserved_41_47:7; - uint64_t l2c:1; - uint64_t reserved_49_51:3; - uint64_t trace:4; - uint64_t reserved_56_63:8; -#endif - } cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip3_rml_w1s { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip3_rml_w1s_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_56_63:8; - uint64_t trace:4; - uint64_t reserved_49_51:3; - uint64_t l2c:1; - uint64_t reserved_41_47:7; - uint64_t dfa:1; - uint64_t reserved_37_39:3; - uint64_t dpi_dma:1; - uint64_t reserved_34_35:2; - uint64_t dpi:1; - uint64_t sli:1; - uint64_t reserved_31_31:1; - uint64_t key:1; - uint64_t rad:1; - uint64_t tim:1; - uint64_t reserved_25_27:3; - uint64_t zip:1; - uint64_t reserved_17_23:7; - uint64_t sso:1; - uint64_t reserved_8_15:8; - uint64_t pko:1; - uint64_t pip:1; - uint64_t ipd:1; - uint64_t fpa:1; - uint64_t reserved_1_3:3; - uint64_t iob:1; -#else - uint64_t iob:1; - uint64_t reserved_1_3:3; - uint64_t fpa:1; - uint64_t ipd:1; - uint64_t pip:1; - uint64_t pko:1; - uint64_t reserved_8_15:8; - uint64_t sso:1; - uint64_t reserved_17_23:7; - uint64_t zip:1; - uint64_t reserved_25_27:3; - uint64_t tim:1; - uint64_t rad:1; - uint64_t key:1; - uint64_t reserved_31_31:1; - uint64_t sli:1; - uint64_t dpi:1; - uint64_t reserved_34_35:2; - uint64_t dpi_dma:1; - uint64_t reserved_37_39:3; - uint64_t dfa:1; - uint64_t reserved_41_47:7; - uint64_t l2c:1; - uint64_t reserved_49_51:3; - uint64_t trace:4; - uint64_t reserved_56_63:8; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip3_rml_w1s_s cn68xx; - struct cvmx_ciu2_en_ppx_ip3_rml_w1s_cn68xxp1 { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_56_63:8; - uint64_t trace:4; - uint64_t reserved_49_51:3; - uint64_t l2c:1; - uint64_t reserved_41_47:7; - uint64_t dfa:1; - uint64_t reserved_34_39:6; - uint64_t dpi:1; - uint64_t sli:1; - uint64_t reserved_31_31:1; - uint64_t key:1; - uint64_t rad:1; - uint64_t tim:1; - uint64_t reserved_25_27:3; - uint64_t zip:1; - uint64_t reserved_17_23:7; - uint64_t sso:1; - uint64_t reserved_8_15:8; - uint64_t pko:1; - uint64_t pip:1; - uint64_t ipd:1; - uint64_t fpa:1; - uint64_t reserved_1_3:3; - uint64_t iob:1; -#else - uint64_t iob:1; - uint64_t reserved_1_3:3; - uint64_t fpa:1; - uint64_t ipd:1; - uint64_t pip:1; - uint64_t pko:1; - uint64_t reserved_8_15:8; - uint64_t sso:1; - uint64_t reserved_17_23:7; - uint64_t zip:1; - uint64_t reserved_25_27:3; - uint64_t tim:1; - uint64_t rad:1; - uint64_t key:1; - uint64_t reserved_31_31:1; - uint64_t sli:1; - uint64_t dpi:1; - uint64_t reserved_34_39:6; - uint64_t dfa:1; - uint64_t reserved_41_47:7; - uint64_t l2c:1; - uint64_t reserved_49_51:3; - uint64_t trace:4; - uint64_t reserved_56_63:8; -#endif - } cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip3_wdog { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip3_wdog_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_32_63:32; - uint64_t wdog:32; -#else - uint64_t wdog:32; - uint64_t reserved_32_63:32; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip3_wdog_s cn68xx; - struct cvmx_ciu2_en_ppx_ip3_wdog_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip3_wdog_w1c { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip3_wdog_w1c_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_32_63:32; - uint64_t wdog:32; -#else - uint64_t wdog:32; - uint64_t reserved_32_63:32; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip3_wdog_w1c_s cn68xx; - struct cvmx_ciu2_en_ppx_ip3_wdog_w1c_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip3_wdog_w1s { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip3_wdog_w1s_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_32_63:32; - uint64_t wdog:32; -#else - uint64_t wdog:32; - uint64_t reserved_32_63:32; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip3_wdog_w1s_s cn68xx; - struct cvmx_ciu2_en_ppx_ip3_wdog_w1s_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip3_wrkq { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip3_wrkq_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t workq:64; -#else - uint64_t workq:64; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip3_wrkq_s cn68xx; - struct cvmx_ciu2_en_ppx_ip3_wrkq_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip3_wrkq_w1c { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip3_wrkq_w1c_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t workq:64; -#else - uint64_t workq:64; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip3_wrkq_w1c_s cn68xx; - struct cvmx_ciu2_en_ppx_ip3_wrkq_w1c_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip3_wrkq_w1s { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip3_wrkq_w1s_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t workq:64; -#else - uint64_t workq:64; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip3_wrkq_w1s_s cn68xx; - struct cvmx_ciu2_en_ppx_ip3_wrkq_w1s_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip4_gpio { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip4_gpio_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_16_63:48; - uint64_t gpio:16; -#else - uint64_t gpio:16; - uint64_t reserved_16_63:48; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip4_gpio_s cn68xx; - struct cvmx_ciu2_en_ppx_ip4_gpio_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip4_gpio_w1c { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip4_gpio_w1c_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_16_63:48; - uint64_t gpio:16; -#else - uint64_t gpio:16; - uint64_t reserved_16_63:48; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip4_gpio_w1c_s cn68xx; - struct cvmx_ciu2_en_ppx_ip4_gpio_w1c_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip4_gpio_w1s { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip4_gpio_w1s_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_16_63:48; - uint64_t gpio:16; -#else - uint64_t gpio:16; - uint64_t reserved_16_63:48; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip4_gpio_w1s_s cn68xx; - struct cvmx_ciu2_en_ppx_ip4_gpio_w1s_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip4_io { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip4_io_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_34_63:30; - uint64_t pem:2; - uint64_t reserved_18_31:14; - uint64_t pci_inta:2; - uint64_t reserved_13_15:3; - uint64_t msired:1; - uint64_t pci_msi:4; - uint64_t reserved_4_7:4; - uint64_t pci_intr:4; -#else - uint64_t pci_intr:4; - uint64_t reserved_4_7:4; - uint64_t pci_msi:4; - uint64_t msired:1; - uint64_t reserved_13_15:3; - uint64_t pci_inta:2; - uint64_t reserved_18_31:14; - uint64_t pem:2; - uint64_t reserved_34_63:30; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip4_io_s cn68xx; - struct cvmx_ciu2_en_ppx_ip4_io_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip4_io_w1c { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip4_io_w1c_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_34_63:30; - uint64_t pem:2; - uint64_t reserved_18_31:14; - uint64_t pci_inta:2; - uint64_t reserved_13_15:3; - uint64_t msired:1; - uint64_t pci_msi:4; - uint64_t reserved_4_7:4; - uint64_t pci_intr:4; -#else - uint64_t pci_intr:4; - uint64_t reserved_4_7:4; - uint64_t pci_msi:4; - uint64_t msired:1; - uint64_t reserved_13_15:3; - uint64_t pci_inta:2; - uint64_t reserved_18_31:14; - uint64_t pem:2; - uint64_t reserved_34_63:30; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip4_io_w1c_s cn68xx; - struct cvmx_ciu2_en_ppx_ip4_io_w1c_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip4_io_w1s { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip4_io_w1s_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_34_63:30; - uint64_t pem:2; - uint64_t reserved_18_31:14; - uint64_t pci_inta:2; - uint64_t reserved_13_15:3; - uint64_t msired:1; - uint64_t pci_msi:4; - uint64_t reserved_4_7:4; - uint64_t pci_intr:4; -#else - uint64_t pci_intr:4; - uint64_t reserved_4_7:4; - uint64_t pci_msi:4; - uint64_t msired:1; - uint64_t reserved_13_15:3; - uint64_t pci_inta:2; - uint64_t reserved_18_31:14; - uint64_t pem:2; - uint64_t reserved_34_63:30; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip4_io_w1s_s cn68xx; - struct cvmx_ciu2_en_ppx_ip4_io_w1s_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip4_mbox { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip4_mbox_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_4_63:60; - uint64_t mbox:4; -#else - uint64_t mbox:4; - uint64_t reserved_4_63:60; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip4_mbox_s cn68xx; - struct cvmx_ciu2_en_ppx_ip4_mbox_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip4_mbox_w1c { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip4_mbox_w1c_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_4_63:60; - uint64_t mbox:4; -#else - uint64_t mbox:4; - uint64_t reserved_4_63:60; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip4_mbox_w1c_s cn68xx; - struct cvmx_ciu2_en_ppx_ip4_mbox_w1c_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip4_mbox_w1s { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip4_mbox_w1s_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_4_63:60; - uint64_t mbox:4; -#else - uint64_t mbox:4; - uint64_t reserved_4_63:60; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip4_mbox_w1s_s cn68xx; - struct cvmx_ciu2_en_ppx_ip4_mbox_w1s_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip4_mem { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip4_mem_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_4_63:60; - uint64_t lmc:4; -#else - uint64_t lmc:4; - uint64_t reserved_4_63:60; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip4_mem_s cn68xx; - struct cvmx_ciu2_en_ppx_ip4_mem_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip4_mem_w1c { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip4_mem_w1c_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_4_63:60; - uint64_t lmc:4; -#else - uint64_t lmc:4; - uint64_t reserved_4_63:60; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip4_mem_w1c_s cn68xx; - struct cvmx_ciu2_en_ppx_ip4_mem_w1c_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip4_mem_w1s { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip4_mem_w1s_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_4_63:60; - uint64_t lmc:4; -#else - uint64_t lmc:4; - uint64_t reserved_4_63:60; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip4_mem_w1s_s cn68xx; - struct cvmx_ciu2_en_ppx_ip4_mem_w1s_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip4_mio { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip4_mio_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t rst:1; - uint64_t reserved_49_62:14; - uint64_t ptp:1; - uint64_t reserved_45_47:3; - uint64_t usb_hci:1; - uint64_t reserved_41_43:3; - uint64_t usb_uctl:1; - uint64_t reserved_38_39:2; - uint64_t uart:2; - uint64_t reserved_34_35:2; - uint64_t twsi:2; - uint64_t reserved_19_31:13; - uint64_t bootdma:1; - uint64_t mio:1; - uint64_t nand:1; - uint64_t reserved_12_15:4; - uint64_t timer:4; - uint64_t reserved_3_7:5; - uint64_t ipd_drp:1; - uint64_t ssoiq:1; - uint64_t ipdppthr:1; -#else - uint64_t ipdppthr:1; - uint64_t ssoiq:1; - uint64_t ipd_drp:1; - uint64_t reserved_3_7:5; - uint64_t timer:4; - uint64_t reserved_12_15:4; - uint64_t nand:1; - uint64_t mio:1; - uint64_t bootdma:1; - uint64_t reserved_19_31:13; - uint64_t twsi:2; - uint64_t reserved_34_35:2; - uint64_t uart:2; - uint64_t reserved_38_39:2; - uint64_t usb_uctl:1; - uint64_t reserved_41_43:3; - uint64_t usb_hci:1; - uint64_t reserved_45_47:3; - uint64_t ptp:1; - uint64_t reserved_49_62:14; - uint64_t rst:1; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip4_mio_s cn68xx; - struct cvmx_ciu2_en_ppx_ip4_mio_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip4_mio_w1c { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip4_mio_w1c_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t rst:1; - uint64_t reserved_49_62:14; - uint64_t ptp:1; - uint64_t reserved_45_47:3; - uint64_t usb_hci:1; - uint64_t reserved_41_43:3; - uint64_t usb_uctl:1; - uint64_t reserved_38_39:2; - uint64_t uart:2; - uint64_t reserved_34_35:2; - uint64_t twsi:2; - uint64_t reserved_19_31:13; - uint64_t bootdma:1; - uint64_t mio:1; - uint64_t nand:1; - uint64_t reserved_12_15:4; - uint64_t timer:4; - uint64_t reserved_3_7:5; - uint64_t ipd_drp:1; - uint64_t ssoiq:1; - uint64_t ipdppthr:1; -#else - uint64_t ipdppthr:1; - uint64_t ssoiq:1; - uint64_t ipd_drp:1; - uint64_t reserved_3_7:5; - uint64_t timer:4; - uint64_t reserved_12_15:4; - uint64_t nand:1; - uint64_t mio:1; - uint64_t bootdma:1; - uint64_t reserved_19_31:13; - uint64_t twsi:2; - uint64_t reserved_34_35:2; - uint64_t uart:2; - uint64_t reserved_38_39:2; - uint64_t usb_uctl:1; - uint64_t reserved_41_43:3; - uint64_t usb_hci:1; - uint64_t reserved_45_47:3; - uint64_t ptp:1; - uint64_t reserved_49_62:14; - uint64_t rst:1; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip4_mio_w1c_s cn68xx; - struct cvmx_ciu2_en_ppx_ip4_mio_w1c_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip4_mio_w1s { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip4_mio_w1s_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t rst:1; - uint64_t reserved_49_62:14; - uint64_t ptp:1; - uint64_t reserved_45_47:3; - uint64_t usb_hci:1; - uint64_t reserved_41_43:3; - uint64_t usb_uctl:1; - uint64_t reserved_38_39:2; - uint64_t uart:2; - uint64_t reserved_34_35:2; - uint64_t twsi:2; - uint64_t reserved_19_31:13; - uint64_t bootdma:1; - uint64_t mio:1; - uint64_t nand:1; - uint64_t reserved_12_15:4; - uint64_t timer:4; - uint64_t reserved_3_7:5; - uint64_t ipd_drp:1; - uint64_t ssoiq:1; - uint64_t ipdppthr:1; -#else - uint64_t ipdppthr:1; - uint64_t ssoiq:1; - uint64_t ipd_drp:1; - uint64_t reserved_3_7:5; - uint64_t timer:4; - uint64_t reserved_12_15:4; - uint64_t nand:1; - uint64_t mio:1; - uint64_t bootdma:1; - uint64_t reserved_19_31:13; - uint64_t twsi:2; - uint64_t reserved_34_35:2; - uint64_t uart:2; - uint64_t reserved_38_39:2; - uint64_t usb_uctl:1; - uint64_t reserved_41_43:3; - uint64_t usb_hci:1; - uint64_t reserved_45_47:3; - uint64_t ptp:1; - uint64_t reserved_49_62:14; - uint64_t rst:1; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip4_mio_w1s_s cn68xx; - struct cvmx_ciu2_en_ppx_ip4_mio_w1s_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip4_pkt { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip4_pkt_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_54_63:10; - uint64_t ilk_drp:2; - uint64_t reserved_49_51:3; - uint64_t ilk:1; - uint64_t reserved_41_47:7; - uint64_t mii:1; - uint64_t reserved_33_39:7; - uint64_t agl:1; - uint64_t reserved_13_31:19; - uint64_t gmx_drp:5; - uint64_t reserved_5_7:3; - uint64_t agx:5; -#else - uint64_t agx:5; - uint64_t reserved_5_7:3; - uint64_t gmx_drp:5; - uint64_t reserved_13_31:19; - uint64_t agl:1; - uint64_t reserved_33_39:7; - uint64_t mii:1; - uint64_t reserved_41_47:7; - uint64_t ilk:1; - uint64_t reserved_49_51:3; - uint64_t ilk_drp:2; - uint64_t reserved_54_63:10; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip4_pkt_s cn68xx; - struct cvmx_ciu2_en_ppx_ip4_pkt_cn68xxp1 { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_49_63:15; - uint64_t ilk:1; - uint64_t reserved_41_47:7; - uint64_t mii:1; - uint64_t reserved_33_39:7; - uint64_t agl:1; - uint64_t reserved_13_31:19; - uint64_t gmx_drp:5; - uint64_t reserved_5_7:3; - uint64_t agx:5; -#else - uint64_t agx:5; - uint64_t reserved_5_7:3; - uint64_t gmx_drp:5; - uint64_t reserved_13_31:19; - uint64_t agl:1; - uint64_t reserved_33_39:7; - uint64_t mii:1; - uint64_t reserved_41_47:7; - uint64_t ilk:1; - uint64_t reserved_49_63:15; -#endif - } cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip4_pkt_w1c { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip4_pkt_w1c_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_54_63:10; - uint64_t ilk_drp:2; - uint64_t reserved_49_51:3; - uint64_t ilk:1; - uint64_t reserved_41_47:7; - uint64_t mii:1; - uint64_t reserved_33_39:7; - uint64_t agl:1; - uint64_t reserved_13_31:19; - uint64_t gmx_drp:5; - uint64_t reserved_5_7:3; - uint64_t agx:5; -#else - uint64_t agx:5; - uint64_t reserved_5_7:3; - uint64_t gmx_drp:5; - uint64_t reserved_13_31:19; - uint64_t agl:1; - uint64_t reserved_33_39:7; - uint64_t mii:1; - uint64_t reserved_41_47:7; - uint64_t ilk:1; - uint64_t reserved_49_51:3; - uint64_t ilk_drp:2; - uint64_t reserved_54_63:10; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip4_pkt_w1c_s cn68xx; - struct cvmx_ciu2_en_ppx_ip4_pkt_w1c_cn68xxp1 { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_49_63:15; - uint64_t ilk:1; - uint64_t reserved_41_47:7; - uint64_t mii:1; - uint64_t reserved_33_39:7; - uint64_t agl:1; - uint64_t reserved_13_31:19; - uint64_t gmx_drp:5; - uint64_t reserved_5_7:3; - uint64_t agx:5; -#else - uint64_t agx:5; - uint64_t reserved_5_7:3; - uint64_t gmx_drp:5; - uint64_t reserved_13_31:19; - uint64_t agl:1; - uint64_t reserved_33_39:7; - uint64_t mii:1; - uint64_t reserved_41_47:7; - uint64_t ilk:1; - uint64_t reserved_49_63:15; -#endif - } cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip4_pkt_w1s { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip4_pkt_w1s_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_54_63:10; - uint64_t ilk_drp:2; - uint64_t reserved_49_51:3; - uint64_t ilk:1; - uint64_t reserved_41_47:7; - uint64_t mii:1; - uint64_t reserved_33_39:7; - uint64_t agl:1; - uint64_t reserved_13_31:19; - uint64_t gmx_drp:5; - uint64_t reserved_5_7:3; - uint64_t agx:5; -#else - uint64_t agx:5; - uint64_t reserved_5_7:3; - uint64_t gmx_drp:5; - uint64_t reserved_13_31:19; - uint64_t agl:1; - uint64_t reserved_33_39:7; - uint64_t mii:1; - uint64_t reserved_41_47:7; - uint64_t ilk:1; - uint64_t reserved_49_51:3; - uint64_t ilk_drp:2; - uint64_t reserved_54_63:10; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip4_pkt_w1s_s cn68xx; - struct cvmx_ciu2_en_ppx_ip4_pkt_w1s_cn68xxp1 { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_49_63:15; - uint64_t ilk:1; - uint64_t reserved_41_47:7; - uint64_t mii:1; - uint64_t reserved_33_39:7; - uint64_t agl:1; - uint64_t reserved_13_31:19; - uint64_t gmx_drp:5; - uint64_t reserved_5_7:3; - uint64_t agx:5; -#else - uint64_t agx:5; - uint64_t reserved_5_7:3; - uint64_t gmx_drp:5; - uint64_t reserved_13_31:19; - uint64_t agl:1; - uint64_t reserved_33_39:7; - uint64_t mii:1; - uint64_t reserved_41_47:7; - uint64_t ilk:1; - uint64_t reserved_49_63:15; -#endif - } cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip4_rml { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip4_rml_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_56_63:8; - uint64_t trace:4; - uint64_t reserved_49_51:3; - uint64_t l2c:1; - uint64_t reserved_41_47:7; - uint64_t dfa:1; - uint64_t reserved_37_39:3; - uint64_t dpi_dma:1; - uint64_t reserved_34_35:2; - uint64_t dpi:1; - uint64_t sli:1; - uint64_t reserved_31_31:1; - uint64_t key:1; - uint64_t rad:1; - uint64_t tim:1; - uint64_t reserved_25_27:3; - uint64_t zip:1; - uint64_t reserved_17_23:7; - uint64_t sso:1; - uint64_t reserved_8_15:8; - uint64_t pko:1; - uint64_t pip:1; - uint64_t ipd:1; - uint64_t fpa:1; - uint64_t reserved_1_3:3; - uint64_t iob:1; -#else - uint64_t iob:1; - uint64_t reserved_1_3:3; - uint64_t fpa:1; - uint64_t ipd:1; - uint64_t pip:1; - uint64_t pko:1; - uint64_t reserved_8_15:8; - uint64_t sso:1; - uint64_t reserved_17_23:7; - uint64_t zip:1; - uint64_t reserved_25_27:3; - uint64_t tim:1; - uint64_t rad:1; - uint64_t key:1; - uint64_t reserved_31_31:1; - uint64_t sli:1; - uint64_t dpi:1; - uint64_t reserved_34_35:2; - uint64_t dpi_dma:1; - uint64_t reserved_37_39:3; - uint64_t dfa:1; - uint64_t reserved_41_47:7; - uint64_t l2c:1; - uint64_t reserved_49_51:3; - uint64_t trace:4; - uint64_t reserved_56_63:8; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip4_rml_s cn68xx; - struct cvmx_ciu2_en_ppx_ip4_rml_cn68xxp1 { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_56_63:8; - uint64_t trace:4; - uint64_t reserved_49_51:3; - uint64_t l2c:1; - uint64_t reserved_41_47:7; - uint64_t dfa:1; - uint64_t reserved_34_39:6; - uint64_t dpi:1; - uint64_t sli:1; - uint64_t reserved_31_31:1; - uint64_t key:1; - uint64_t rad:1; - uint64_t tim:1; - uint64_t reserved_25_27:3; - uint64_t zip:1; - uint64_t reserved_17_23:7; - uint64_t sso:1; - uint64_t reserved_8_15:8; - uint64_t pko:1; - uint64_t pip:1; - uint64_t ipd:1; - uint64_t fpa:1; - uint64_t reserved_1_3:3; - uint64_t iob:1; -#else - uint64_t iob:1; - uint64_t reserved_1_3:3; - uint64_t fpa:1; - uint64_t ipd:1; - uint64_t pip:1; - uint64_t pko:1; - uint64_t reserved_8_15:8; - uint64_t sso:1; - uint64_t reserved_17_23:7; - uint64_t zip:1; - uint64_t reserved_25_27:3; - uint64_t tim:1; - uint64_t rad:1; - uint64_t key:1; - uint64_t reserved_31_31:1; - uint64_t sli:1; - uint64_t dpi:1; - uint64_t reserved_34_39:6; - uint64_t dfa:1; - uint64_t reserved_41_47:7; - uint64_t l2c:1; - uint64_t reserved_49_51:3; - uint64_t trace:4; - uint64_t reserved_56_63:8; -#endif - } cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip4_rml_w1c { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip4_rml_w1c_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_56_63:8; - uint64_t trace:4; - uint64_t reserved_49_51:3; - uint64_t l2c:1; - uint64_t reserved_41_47:7; - uint64_t dfa:1; - uint64_t reserved_37_39:3; - uint64_t dpi_dma:1; - uint64_t reserved_34_35:2; - uint64_t dpi:1; - uint64_t sli:1; - uint64_t reserved_31_31:1; - uint64_t key:1; - uint64_t rad:1; - uint64_t tim:1; - uint64_t reserved_25_27:3; - uint64_t zip:1; - uint64_t reserved_17_23:7; - uint64_t sso:1; - uint64_t reserved_8_15:8; - uint64_t pko:1; - uint64_t pip:1; - uint64_t ipd:1; - uint64_t fpa:1; - uint64_t reserved_1_3:3; - uint64_t iob:1; -#else - uint64_t iob:1; - uint64_t reserved_1_3:3; - uint64_t fpa:1; - uint64_t ipd:1; - uint64_t pip:1; - uint64_t pko:1; - uint64_t reserved_8_15:8; - uint64_t sso:1; - uint64_t reserved_17_23:7; - uint64_t zip:1; - uint64_t reserved_25_27:3; - uint64_t tim:1; - uint64_t rad:1; - uint64_t key:1; - uint64_t reserved_31_31:1; - uint64_t sli:1; - uint64_t dpi:1; - uint64_t reserved_34_35:2; - uint64_t dpi_dma:1; - uint64_t reserved_37_39:3; - uint64_t dfa:1; - uint64_t reserved_41_47:7; - uint64_t l2c:1; - uint64_t reserved_49_51:3; - uint64_t trace:4; - uint64_t reserved_56_63:8; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip4_rml_w1c_s cn68xx; - struct cvmx_ciu2_en_ppx_ip4_rml_w1c_cn68xxp1 { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_56_63:8; - uint64_t trace:4; - uint64_t reserved_49_51:3; - uint64_t l2c:1; - uint64_t reserved_41_47:7; - uint64_t dfa:1; - uint64_t reserved_34_39:6; - uint64_t dpi:1; - uint64_t sli:1; - uint64_t reserved_31_31:1; - uint64_t key:1; - uint64_t rad:1; - uint64_t tim:1; - uint64_t reserved_25_27:3; - uint64_t zip:1; - uint64_t reserved_17_23:7; - uint64_t sso:1; - uint64_t reserved_8_15:8; - uint64_t pko:1; - uint64_t pip:1; - uint64_t ipd:1; - uint64_t fpa:1; - uint64_t reserved_1_3:3; - uint64_t iob:1; -#else - uint64_t iob:1; - uint64_t reserved_1_3:3; - uint64_t fpa:1; - uint64_t ipd:1; - uint64_t pip:1; - uint64_t pko:1; - uint64_t reserved_8_15:8; - uint64_t sso:1; - uint64_t reserved_17_23:7; - uint64_t zip:1; - uint64_t reserved_25_27:3; - uint64_t tim:1; - uint64_t rad:1; - uint64_t key:1; - uint64_t reserved_31_31:1; - uint64_t sli:1; - uint64_t dpi:1; - uint64_t reserved_34_39:6; - uint64_t dfa:1; - uint64_t reserved_41_47:7; - uint64_t l2c:1; - uint64_t reserved_49_51:3; - uint64_t trace:4; - uint64_t reserved_56_63:8; -#endif - } cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip4_rml_w1s { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip4_rml_w1s_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_56_63:8; - uint64_t trace:4; - uint64_t reserved_49_51:3; - uint64_t l2c:1; - uint64_t reserved_41_47:7; - uint64_t dfa:1; - uint64_t reserved_37_39:3; - uint64_t dpi_dma:1; - uint64_t reserved_34_35:2; - uint64_t dpi:1; - uint64_t sli:1; - uint64_t reserved_31_31:1; - uint64_t key:1; - uint64_t rad:1; - uint64_t tim:1; - uint64_t reserved_25_27:3; - uint64_t zip:1; - uint64_t reserved_17_23:7; - uint64_t sso:1; - uint64_t reserved_8_15:8; - uint64_t pko:1; - uint64_t pip:1; - uint64_t ipd:1; - uint64_t fpa:1; - uint64_t reserved_1_3:3; - uint64_t iob:1; -#else - uint64_t iob:1; - uint64_t reserved_1_3:3; - uint64_t fpa:1; - uint64_t ipd:1; - uint64_t pip:1; - uint64_t pko:1; - uint64_t reserved_8_15:8; - uint64_t sso:1; - uint64_t reserved_17_23:7; - uint64_t zip:1; - uint64_t reserved_25_27:3; - uint64_t tim:1; - uint64_t rad:1; - uint64_t key:1; - uint64_t reserved_31_31:1; - uint64_t sli:1; - uint64_t dpi:1; - uint64_t reserved_34_35:2; - uint64_t dpi_dma:1; - uint64_t reserved_37_39:3; - uint64_t dfa:1; - uint64_t reserved_41_47:7; - uint64_t l2c:1; - uint64_t reserved_49_51:3; - uint64_t trace:4; - uint64_t reserved_56_63:8; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip4_rml_w1s_s cn68xx; - struct cvmx_ciu2_en_ppx_ip4_rml_w1s_cn68xxp1 { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_56_63:8; - uint64_t trace:4; - uint64_t reserved_49_51:3; - uint64_t l2c:1; - uint64_t reserved_41_47:7; - uint64_t dfa:1; - uint64_t reserved_34_39:6; - uint64_t dpi:1; - uint64_t sli:1; - uint64_t reserved_31_31:1; - uint64_t key:1; - uint64_t rad:1; - uint64_t tim:1; - uint64_t reserved_25_27:3; - uint64_t zip:1; - uint64_t reserved_17_23:7; - uint64_t sso:1; - uint64_t reserved_8_15:8; - uint64_t pko:1; - uint64_t pip:1; - uint64_t ipd:1; - uint64_t fpa:1; - uint64_t reserved_1_3:3; - uint64_t iob:1; -#else - uint64_t iob:1; - uint64_t reserved_1_3:3; - uint64_t fpa:1; - uint64_t ipd:1; - uint64_t pip:1; - uint64_t pko:1; - uint64_t reserved_8_15:8; - uint64_t sso:1; - uint64_t reserved_17_23:7; - uint64_t zip:1; - uint64_t reserved_25_27:3; - uint64_t tim:1; - uint64_t rad:1; - uint64_t key:1; - uint64_t reserved_31_31:1; - uint64_t sli:1; - uint64_t dpi:1; - uint64_t reserved_34_39:6; - uint64_t dfa:1; - uint64_t reserved_41_47:7; - uint64_t l2c:1; - uint64_t reserved_49_51:3; - uint64_t trace:4; - uint64_t reserved_56_63:8; -#endif - } cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip4_wdog { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip4_wdog_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_32_63:32; - uint64_t wdog:32; -#else - uint64_t wdog:32; - uint64_t reserved_32_63:32; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip4_wdog_s cn68xx; - struct cvmx_ciu2_en_ppx_ip4_wdog_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip4_wdog_w1c { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip4_wdog_w1c_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_32_63:32; - uint64_t wdog:32; -#else - uint64_t wdog:32; - uint64_t reserved_32_63:32; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip4_wdog_w1c_s cn68xx; - struct cvmx_ciu2_en_ppx_ip4_wdog_w1c_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip4_wdog_w1s { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip4_wdog_w1s_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_32_63:32; - uint64_t wdog:32; -#else - uint64_t wdog:32; - uint64_t reserved_32_63:32; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip4_wdog_w1s_s cn68xx; - struct cvmx_ciu2_en_ppx_ip4_wdog_w1s_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip4_wrkq { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip4_wrkq_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t workq:64; -#else - uint64_t workq:64; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip4_wrkq_s cn68xx; - struct cvmx_ciu2_en_ppx_ip4_wrkq_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip4_wrkq_w1c { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip4_wrkq_w1c_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t workq:64; -#else - uint64_t workq:64; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip4_wrkq_w1c_s cn68xx; - struct cvmx_ciu2_en_ppx_ip4_wrkq_w1c_s cn68xxp1; -}; - -union cvmx_ciu2_en_ppx_ip4_wrkq_w1s { - uint64_t u64; - struct cvmx_ciu2_en_ppx_ip4_wrkq_w1s_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t workq:64; -#else - uint64_t workq:64; -#endif - } s; - struct cvmx_ciu2_en_ppx_ip4_wrkq_w1s_s cn68xx; - struct cvmx_ciu2_en_ppx_ip4_wrkq_w1s_s cn68xxp1; -}; - -union cvmx_ciu2_intr_ciu_ready { - uint64_t u64; - struct cvmx_ciu2_intr_ciu_ready_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_1_63:63; - uint64_t ready:1; -#else - uint64_t ready:1; - uint64_t reserved_1_63:63; -#endif - } s; - struct cvmx_ciu2_intr_ciu_ready_s cn68xx; - struct cvmx_ciu2_intr_ciu_ready_s cn68xxp1; -}; - -union cvmx_ciu2_intr_ram_ecc_ctl { - uint64_t u64; - struct cvmx_ciu2_intr_ram_ecc_ctl_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_3_63:61; - uint64_t flip_synd:2; - uint64_t ecc_ena:1; -#else - uint64_t ecc_ena:1; - uint64_t flip_synd:2; - uint64_t reserved_3_63:61; -#endif - } s; - struct cvmx_ciu2_intr_ram_ecc_ctl_s cn68xx; - struct cvmx_ciu2_intr_ram_ecc_ctl_s cn68xxp1; -}; - -union cvmx_ciu2_intr_ram_ecc_st { - uint64_t u64; - struct cvmx_ciu2_intr_ram_ecc_st_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_23_63:41; - uint64_t addr:7; - uint64_t reserved_13_15:3; - uint64_t syndrom:9; - uint64_t reserved_2_3:2; - uint64_t dbe:1; - uint64_t sbe:1; -#else - uint64_t sbe:1; - uint64_t dbe:1; - uint64_t reserved_2_3:2; - uint64_t syndrom:9; - uint64_t reserved_13_15:3; - uint64_t addr:7; - uint64_t reserved_23_63:41; -#endif - } s; - struct cvmx_ciu2_intr_ram_ecc_st_s cn68xx; - struct cvmx_ciu2_intr_ram_ecc_st_s cn68xxp1; -}; - -union cvmx_ciu2_intr_slowdown { - uint64_t u64; - struct cvmx_ciu2_intr_slowdown_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_3_63:61; - uint64_t ctl:3; -#else - uint64_t ctl:3; - uint64_t reserved_3_63:61; -#endif - } s; - struct cvmx_ciu2_intr_slowdown_s cn68xx; - struct cvmx_ciu2_intr_slowdown_s cn68xxp1; -}; - -union cvmx_ciu2_msi_rcvx { - uint64_t u64; - struct cvmx_ciu2_msi_rcvx_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_1_63:63; - uint64_t msi_rcv:1; -#else - uint64_t msi_rcv:1; - uint64_t reserved_1_63:63; -#endif - } s; - struct cvmx_ciu2_msi_rcvx_s cn68xx; - struct cvmx_ciu2_msi_rcvx_s cn68xxp1; -}; - -union cvmx_ciu2_msi_selx { - uint64_t u64; - struct cvmx_ciu2_msi_selx_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_13_63:51; - uint64_t pp_num:5; - uint64_t reserved_6_7:2; - uint64_t ip_num:2; - uint64_t reserved_1_3:3; - uint64_t en:1; -#else - uint64_t en:1; - uint64_t reserved_1_3:3; - uint64_t ip_num:2; - uint64_t reserved_6_7:2; - uint64_t pp_num:5; - uint64_t reserved_13_63:51; -#endif - } s; - struct cvmx_ciu2_msi_selx_s cn68xx; - struct cvmx_ciu2_msi_selx_s cn68xxp1; -}; - -union cvmx_ciu2_msired_ppx_ip2 { - uint64_t u64; - struct cvmx_ciu2_msired_ppx_ip2_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_21_63:43; - uint64_t intr:1; - uint64_t reserved_17_19:3; - uint64_t newint:1; - uint64_t reserved_8_15:8; - uint64_t msi_num:8; -#else - uint64_t msi_num:8; - uint64_t reserved_8_15:8; - uint64_t newint:1; - uint64_t reserved_17_19:3; - uint64_t intr:1; - uint64_t reserved_21_63:43; -#endif - } s; - struct cvmx_ciu2_msired_ppx_ip2_s cn68xx; - struct cvmx_ciu2_msired_ppx_ip2_s cn68xxp1; -}; - -union cvmx_ciu2_msired_ppx_ip3 { - uint64_t u64; - struct cvmx_ciu2_msired_ppx_ip3_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_21_63:43; - uint64_t intr:1; - uint64_t reserved_17_19:3; - uint64_t newint:1; - uint64_t reserved_8_15:8; - uint64_t msi_num:8; -#else - uint64_t msi_num:8; - uint64_t reserved_8_15:8; - uint64_t newint:1; - uint64_t reserved_17_19:3; - uint64_t intr:1; - uint64_t reserved_21_63:43; -#endif - } s; - struct cvmx_ciu2_msired_ppx_ip3_s cn68xx; - struct cvmx_ciu2_msired_ppx_ip3_s cn68xxp1; -}; - -union cvmx_ciu2_msired_ppx_ip4 { - uint64_t u64; - struct cvmx_ciu2_msired_ppx_ip4_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_21_63:43; - uint64_t intr:1; - uint64_t reserved_17_19:3; - uint64_t newint:1; - uint64_t reserved_8_15:8; - uint64_t msi_num:8; -#else - uint64_t msi_num:8; - uint64_t reserved_8_15:8; - uint64_t newint:1; - uint64_t reserved_17_19:3; - uint64_t intr:1; - uint64_t reserved_21_63:43; -#endif - } s; - struct cvmx_ciu2_msired_ppx_ip4_s cn68xx; - struct cvmx_ciu2_msired_ppx_ip4_s cn68xxp1; -}; - -union cvmx_ciu2_raw_iox_int_gpio { - uint64_t u64; - struct cvmx_ciu2_raw_iox_int_gpio_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_16_63:48; - uint64_t gpio:16; -#else - uint64_t gpio:16; - uint64_t reserved_16_63:48; -#endif - } s; - struct cvmx_ciu2_raw_iox_int_gpio_s cn68xx; - struct cvmx_ciu2_raw_iox_int_gpio_s cn68xxp1; -}; - -union cvmx_ciu2_raw_iox_int_io { - uint64_t u64; - struct cvmx_ciu2_raw_iox_int_io_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_34_63:30; - uint64_t pem:2; - uint64_t reserved_18_31:14; - uint64_t pci_inta:2; - uint64_t reserved_13_15:3; - uint64_t msired:1; - uint64_t pci_msi:4; - uint64_t reserved_4_7:4; - uint64_t pci_intr:4; -#else - uint64_t pci_intr:4; - uint64_t reserved_4_7:4; - uint64_t pci_msi:4; - uint64_t msired:1; - uint64_t reserved_13_15:3; - uint64_t pci_inta:2; - uint64_t reserved_18_31:14; - uint64_t pem:2; - uint64_t reserved_34_63:30; -#endif - } s; - struct cvmx_ciu2_raw_iox_int_io_s cn68xx; - struct cvmx_ciu2_raw_iox_int_io_s cn68xxp1; -}; - -union cvmx_ciu2_raw_iox_int_mem { - uint64_t u64; - struct cvmx_ciu2_raw_iox_int_mem_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_4_63:60; - uint64_t lmc:4; -#else - uint64_t lmc:4; - uint64_t reserved_4_63:60; -#endif - } s; - struct cvmx_ciu2_raw_iox_int_mem_s cn68xx; - struct cvmx_ciu2_raw_iox_int_mem_s cn68xxp1; -}; - -union cvmx_ciu2_raw_iox_int_mio { - uint64_t u64; - struct cvmx_ciu2_raw_iox_int_mio_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t rst:1; - uint64_t reserved_49_62:14; - uint64_t ptp:1; - uint64_t reserved_45_47:3; - uint64_t usb_hci:1; - uint64_t reserved_41_43:3; - uint64_t usb_uctl:1; - uint64_t reserved_38_39:2; - uint64_t uart:2; - uint64_t reserved_34_35:2; - uint64_t twsi:2; - uint64_t reserved_19_31:13; - uint64_t bootdma:1; - uint64_t mio:1; - uint64_t nand:1; - uint64_t reserved_12_15:4; - uint64_t timer:4; - uint64_t reserved_3_7:5; - uint64_t ipd_drp:1; - uint64_t ssoiq:1; - uint64_t ipdppthr:1; -#else - uint64_t ipdppthr:1; - uint64_t ssoiq:1; - uint64_t ipd_drp:1; - uint64_t reserved_3_7:5; - uint64_t timer:4; - uint64_t reserved_12_15:4; - uint64_t nand:1; - uint64_t mio:1; - uint64_t bootdma:1; - uint64_t reserved_19_31:13; - uint64_t twsi:2; - uint64_t reserved_34_35:2; - uint64_t uart:2; - uint64_t reserved_38_39:2; - uint64_t usb_uctl:1; - uint64_t reserved_41_43:3; - uint64_t usb_hci:1; - uint64_t reserved_45_47:3; - uint64_t ptp:1; - uint64_t reserved_49_62:14; - uint64_t rst:1; -#endif - } s; - struct cvmx_ciu2_raw_iox_int_mio_s cn68xx; - struct cvmx_ciu2_raw_iox_int_mio_s cn68xxp1; -}; - -union cvmx_ciu2_raw_iox_int_pkt { - uint64_t u64; - struct cvmx_ciu2_raw_iox_int_pkt_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_54_63:10; - uint64_t ilk_drp:2; - uint64_t reserved_49_51:3; - uint64_t ilk:1; - uint64_t reserved_41_47:7; - uint64_t mii:1; - uint64_t reserved_33_39:7; - uint64_t agl:1; - uint64_t reserved_13_31:19; - uint64_t gmx_drp:5; - uint64_t reserved_5_7:3; - uint64_t agx:5; -#else - uint64_t agx:5; - uint64_t reserved_5_7:3; - uint64_t gmx_drp:5; - uint64_t reserved_13_31:19; - uint64_t agl:1; - uint64_t reserved_33_39:7; - uint64_t mii:1; - uint64_t reserved_41_47:7; - uint64_t ilk:1; - uint64_t reserved_49_51:3; - uint64_t ilk_drp:2; - uint64_t reserved_54_63:10; -#endif - } s; - struct cvmx_ciu2_raw_iox_int_pkt_s cn68xx; - struct cvmx_ciu2_raw_iox_int_pkt_cn68xxp1 { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_49_63:15; - uint64_t ilk:1; - uint64_t reserved_41_47:7; - uint64_t mii:1; - uint64_t reserved_33_39:7; - uint64_t agl:1; - uint64_t reserved_13_31:19; - uint64_t gmx_drp:5; - uint64_t reserved_5_7:3; - uint64_t agx:5; -#else - uint64_t agx:5; - uint64_t reserved_5_7:3; - uint64_t gmx_drp:5; - uint64_t reserved_13_31:19; - uint64_t agl:1; - uint64_t reserved_33_39:7; - uint64_t mii:1; - uint64_t reserved_41_47:7; - uint64_t ilk:1; - uint64_t reserved_49_63:15; -#endif - } cn68xxp1; -}; - -union cvmx_ciu2_raw_iox_int_rml { - uint64_t u64; - struct cvmx_ciu2_raw_iox_int_rml_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_56_63:8; - uint64_t trace:4; - uint64_t reserved_49_51:3; - uint64_t l2c:1; - uint64_t reserved_41_47:7; - uint64_t dfa:1; - uint64_t reserved_37_39:3; - uint64_t dpi_dma:1; - uint64_t reserved_34_35:2; - uint64_t dpi:1; - uint64_t sli:1; - uint64_t reserved_31_31:1; - uint64_t key:1; - uint64_t rad:1; - uint64_t tim:1; - uint64_t reserved_25_27:3; - uint64_t zip:1; - uint64_t reserved_17_23:7; - uint64_t sso:1; - uint64_t reserved_8_15:8; - uint64_t pko:1; - uint64_t pip:1; - uint64_t ipd:1; - uint64_t fpa:1; - uint64_t reserved_1_3:3; - uint64_t iob:1; -#else - uint64_t iob:1; - uint64_t reserved_1_3:3; - uint64_t fpa:1; - uint64_t ipd:1; - uint64_t pip:1; - uint64_t pko:1; - uint64_t reserved_8_15:8; - uint64_t sso:1; - uint64_t reserved_17_23:7; - uint64_t zip:1; - uint64_t reserved_25_27:3; - uint64_t tim:1; - uint64_t rad:1; - uint64_t key:1; - uint64_t reserved_31_31:1; - uint64_t sli:1; - uint64_t dpi:1; - uint64_t reserved_34_35:2; - uint64_t dpi_dma:1; - uint64_t reserved_37_39:3; - uint64_t dfa:1; - uint64_t reserved_41_47:7; - uint64_t l2c:1; - uint64_t reserved_49_51:3; - uint64_t trace:4; - uint64_t reserved_56_63:8; -#endif - } s; - struct cvmx_ciu2_raw_iox_int_rml_s cn68xx; - struct cvmx_ciu2_raw_iox_int_rml_cn68xxp1 { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_56_63:8; - uint64_t trace:4; - uint64_t reserved_49_51:3; - uint64_t l2c:1; - uint64_t reserved_41_47:7; - uint64_t dfa:1; - uint64_t reserved_34_39:6; - uint64_t dpi:1; - uint64_t sli:1; - uint64_t reserved_31_31:1; - uint64_t key:1; - uint64_t rad:1; - uint64_t tim:1; - uint64_t reserved_25_27:3; - uint64_t zip:1; - uint64_t reserved_17_23:7; - uint64_t sso:1; - uint64_t reserved_8_15:8; - uint64_t pko:1; - uint64_t pip:1; - uint64_t ipd:1; - uint64_t fpa:1; - uint64_t reserved_1_3:3; - uint64_t iob:1; -#else - uint64_t iob:1; - uint64_t reserved_1_3:3; - uint64_t fpa:1; - uint64_t ipd:1; - uint64_t pip:1; - uint64_t pko:1; - uint64_t reserved_8_15:8; - uint64_t sso:1; - uint64_t reserved_17_23:7; - uint64_t zip:1; - uint64_t reserved_25_27:3; - uint64_t tim:1; - uint64_t rad:1; - uint64_t key:1; - uint64_t reserved_31_31:1; - uint64_t sli:1; - uint64_t dpi:1; - uint64_t reserved_34_39:6; - uint64_t dfa:1; - uint64_t reserved_41_47:7; - uint64_t l2c:1; - uint64_t reserved_49_51:3; - uint64_t trace:4; - uint64_t reserved_56_63:8; -#endif - } cn68xxp1; -}; - -union cvmx_ciu2_raw_iox_int_wdog { - uint64_t u64; - struct cvmx_ciu2_raw_iox_int_wdog_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_32_63:32; - uint64_t wdog:32; -#else - uint64_t wdog:32; - uint64_t reserved_32_63:32; -#endif - } s; - struct cvmx_ciu2_raw_iox_int_wdog_s cn68xx; - struct cvmx_ciu2_raw_iox_int_wdog_s cn68xxp1; -}; - -union cvmx_ciu2_raw_iox_int_wrkq { - uint64_t u64; - struct cvmx_ciu2_raw_iox_int_wrkq_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t workq:64; -#else - uint64_t workq:64; -#endif - } s; - struct cvmx_ciu2_raw_iox_int_wrkq_s cn68xx; - struct cvmx_ciu2_raw_iox_int_wrkq_s cn68xxp1; -}; - -union cvmx_ciu2_raw_ppx_ip2_gpio { - uint64_t u64; - struct cvmx_ciu2_raw_ppx_ip2_gpio_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_16_63:48; - uint64_t gpio:16; -#else - uint64_t gpio:16; - uint64_t reserved_16_63:48; -#endif - } s; - struct cvmx_ciu2_raw_ppx_ip2_gpio_s cn68xx; - struct cvmx_ciu2_raw_ppx_ip2_gpio_s cn68xxp1; -}; - -union cvmx_ciu2_raw_ppx_ip2_io { - uint64_t u64; - struct cvmx_ciu2_raw_ppx_ip2_io_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_34_63:30; - uint64_t pem:2; - uint64_t reserved_18_31:14; - uint64_t pci_inta:2; - uint64_t reserved_13_15:3; - uint64_t msired:1; - uint64_t pci_msi:4; - uint64_t reserved_4_7:4; - uint64_t pci_intr:4; -#else - uint64_t pci_intr:4; - uint64_t reserved_4_7:4; - uint64_t pci_msi:4; - uint64_t msired:1; - uint64_t reserved_13_15:3; - uint64_t pci_inta:2; - uint64_t reserved_18_31:14; - uint64_t pem:2; - uint64_t reserved_34_63:30; -#endif - } s; - struct cvmx_ciu2_raw_ppx_ip2_io_s cn68xx; - struct cvmx_ciu2_raw_ppx_ip2_io_s cn68xxp1; -}; - -union cvmx_ciu2_raw_ppx_ip2_mem { - uint64_t u64; - struct cvmx_ciu2_raw_ppx_ip2_mem_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_4_63:60; - uint64_t lmc:4; -#else - uint64_t lmc:4; - uint64_t reserved_4_63:60; -#endif - } s; - struct cvmx_ciu2_raw_ppx_ip2_mem_s cn68xx; - struct cvmx_ciu2_raw_ppx_ip2_mem_s cn68xxp1; -}; - -union cvmx_ciu2_raw_ppx_ip2_mio { - uint64_t u64; - struct cvmx_ciu2_raw_ppx_ip2_mio_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t rst:1; - uint64_t reserved_49_62:14; - uint64_t ptp:1; - uint64_t reserved_45_47:3; - uint64_t usb_hci:1; - uint64_t reserved_41_43:3; - uint64_t usb_uctl:1; - uint64_t reserved_38_39:2; - uint64_t uart:2; - uint64_t reserved_34_35:2; - uint64_t twsi:2; - uint64_t reserved_19_31:13; - uint64_t bootdma:1; - uint64_t mio:1; - uint64_t nand:1; - uint64_t reserved_12_15:4; - uint64_t timer:4; - uint64_t reserved_3_7:5; - uint64_t ipd_drp:1; - uint64_t ssoiq:1; - uint64_t ipdppthr:1; -#else - uint64_t ipdppthr:1; - uint64_t ssoiq:1; - uint64_t ipd_drp:1; - uint64_t reserved_3_7:5; - uint64_t timer:4; - uint64_t reserved_12_15:4; - uint64_t nand:1; - uint64_t mio:1; - uint64_t bootdma:1; - uint64_t reserved_19_31:13; - uint64_t twsi:2; - uint64_t reserved_34_35:2; - uint64_t uart:2; - uint64_t reserved_38_39:2; - uint64_t usb_uctl:1; - uint64_t reserved_41_43:3; - uint64_t usb_hci:1; - uint64_t reserved_45_47:3; - uint64_t ptp:1; - uint64_t reserved_49_62:14; - uint64_t rst:1; -#endif - } s; - struct cvmx_ciu2_raw_ppx_ip2_mio_s cn68xx; - struct cvmx_ciu2_raw_ppx_ip2_mio_s cn68xxp1; -}; - -union cvmx_ciu2_raw_ppx_ip2_pkt { - uint64_t u64; - struct cvmx_ciu2_raw_ppx_ip2_pkt_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_54_63:10; - uint64_t ilk_drp:2; - uint64_t reserved_49_51:3; - uint64_t ilk:1; - uint64_t reserved_41_47:7; - uint64_t mii:1; - uint64_t reserved_33_39:7; - uint64_t agl:1; - uint64_t reserved_13_31:19; - uint64_t gmx_drp:5; - uint64_t reserved_5_7:3; - uint64_t agx:5; -#else - uint64_t agx:5; - uint64_t reserved_5_7:3; - uint64_t gmx_drp:5; - uint64_t reserved_13_31:19; - uint64_t agl:1; - uint64_t reserved_33_39:7; - uint64_t mii:1; - uint64_t reserved_41_47:7; - uint64_t ilk:1; - uint64_t reserved_49_51:3; - uint64_t ilk_drp:2; - uint64_t reserved_54_63:10; -#endif - } s; - struct cvmx_ciu2_raw_ppx_ip2_pkt_s cn68xx; - struct cvmx_ciu2_raw_ppx_ip2_pkt_cn68xxp1 { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_49_63:15; - uint64_t ilk:1; - uint64_t reserved_41_47:7; - uint64_t mii:1; - uint64_t reserved_33_39:7; - uint64_t agl:1; - uint64_t reserved_13_31:19; - uint64_t gmx_drp:5; - uint64_t reserved_5_7:3; - uint64_t agx:5; -#else - uint64_t agx:5; - uint64_t reserved_5_7:3; - uint64_t gmx_drp:5; - uint64_t reserved_13_31:19; - uint64_t agl:1; - uint64_t reserved_33_39:7; - uint64_t mii:1; - uint64_t reserved_41_47:7; - uint64_t ilk:1; - uint64_t reserved_49_63:15; -#endif - } cn68xxp1; -}; - -union cvmx_ciu2_raw_ppx_ip2_rml { - uint64_t u64; - struct cvmx_ciu2_raw_ppx_ip2_rml_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_56_63:8; - uint64_t trace:4; - uint64_t reserved_49_51:3; - uint64_t l2c:1; - uint64_t reserved_41_47:7; - uint64_t dfa:1; - uint64_t reserved_37_39:3; - uint64_t dpi_dma:1; - uint64_t reserved_34_35:2; - uint64_t dpi:1; - uint64_t sli:1; - uint64_t reserved_31_31:1; - uint64_t key:1; - uint64_t rad:1; - uint64_t tim:1; - uint64_t reserved_25_27:3; - uint64_t zip:1; - uint64_t reserved_17_23:7; - uint64_t sso:1; - uint64_t reserved_8_15:8; - uint64_t pko:1; - uint64_t pip:1; - uint64_t ipd:1; - uint64_t fpa:1; - uint64_t reserved_1_3:3; - uint64_t iob:1; -#else - uint64_t iob:1; - uint64_t reserved_1_3:3; - uint64_t fpa:1; - uint64_t ipd:1; - uint64_t pip:1; - uint64_t pko:1; - uint64_t reserved_8_15:8; - uint64_t sso:1; - uint64_t reserved_17_23:7; - uint64_t zip:1; - uint64_t reserved_25_27:3; - uint64_t tim:1; - uint64_t rad:1; - uint64_t key:1; - uint64_t reserved_31_31:1; - uint64_t sli:1; - uint64_t dpi:1; - uint64_t reserved_34_35:2; - uint64_t dpi_dma:1; - uint64_t reserved_37_39:3; - uint64_t dfa:1; - uint64_t reserved_41_47:7; - uint64_t l2c:1; - uint64_t reserved_49_51:3; - uint64_t trace:4; - uint64_t reserved_56_63:8; -#endif - } s; - struct cvmx_ciu2_raw_ppx_ip2_rml_s cn68xx; - struct cvmx_ciu2_raw_ppx_ip2_rml_cn68xxp1 { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_56_63:8; - uint64_t trace:4; - uint64_t reserved_49_51:3; - uint64_t l2c:1; - uint64_t reserved_41_47:7; - uint64_t dfa:1; - uint64_t reserved_34_39:6; - uint64_t dpi:1; - uint64_t sli:1; - uint64_t reserved_31_31:1; - uint64_t key:1; - uint64_t rad:1; - uint64_t tim:1; - uint64_t reserved_25_27:3; - uint64_t zip:1; - uint64_t reserved_17_23:7; - uint64_t sso:1; - uint64_t reserved_8_15:8; - uint64_t pko:1; - uint64_t pip:1; - uint64_t ipd:1; - uint64_t fpa:1; - uint64_t reserved_1_3:3; - uint64_t iob:1; -#else - uint64_t iob:1; - uint64_t reserved_1_3:3; - uint64_t fpa:1; - uint64_t ipd:1; - uint64_t pip:1; - uint64_t pko:1; - uint64_t reserved_8_15:8; - uint64_t sso:1; - uint64_t reserved_17_23:7; - uint64_t zip:1; - uint64_t reserved_25_27:3; - uint64_t tim:1; - uint64_t rad:1; - uint64_t key:1; - uint64_t reserved_31_31:1; - uint64_t sli:1; - uint64_t dpi:1; - uint64_t reserved_34_39:6; - uint64_t dfa:1; - uint64_t reserved_41_47:7; - uint64_t l2c:1; - uint64_t reserved_49_51:3; - uint64_t trace:4; - uint64_t reserved_56_63:8; -#endif - } cn68xxp1; -}; - -union cvmx_ciu2_raw_ppx_ip2_wdog { - uint64_t u64; - struct cvmx_ciu2_raw_ppx_ip2_wdog_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_32_63:32; - uint64_t wdog:32; -#else - uint64_t wdog:32; - uint64_t reserved_32_63:32; -#endif - } s; - struct cvmx_ciu2_raw_ppx_ip2_wdog_s cn68xx; - struct cvmx_ciu2_raw_ppx_ip2_wdog_s cn68xxp1; -}; - -union cvmx_ciu2_raw_ppx_ip2_wrkq { - uint64_t u64; - struct cvmx_ciu2_raw_ppx_ip2_wrkq_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t workq:64; -#else - uint64_t workq:64; -#endif - } s; - struct cvmx_ciu2_raw_ppx_ip2_wrkq_s cn68xx; - struct cvmx_ciu2_raw_ppx_ip2_wrkq_s cn68xxp1; -}; - -union cvmx_ciu2_raw_ppx_ip3_gpio { - uint64_t u64; - struct cvmx_ciu2_raw_ppx_ip3_gpio_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_16_63:48; - uint64_t gpio:16; -#else - uint64_t gpio:16; - uint64_t reserved_16_63:48; -#endif - } s; - struct cvmx_ciu2_raw_ppx_ip3_gpio_s cn68xx; - struct cvmx_ciu2_raw_ppx_ip3_gpio_s cn68xxp1; -}; - -union cvmx_ciu2_raw_ppx_ip3_io { - uint64_t u64; - struct cvmx_ciu2_raw_ppx_ip3_io_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_34_63:30; - uint64_t pem:2; - uint64_t reserved_18_31:14; - uint64_t pci_inta:2; - uint64_t reserved_13_15:3; - uint64_t msired:1; - uint64_t pci_msi:4; - uint64_t reserved_4_7:4; - uint64_t pci_intr:4; -#else - uint64_t pci_intr:4; - uint64_t reserved_4_7:4; - uint64_t pci_msi:4; - uint64_t msired:1; - uint64_t reserved_13_15:3; - uint64_t pci_inta:2; - uint64_t reserved_18_31:14; - uint64_t pem:2; - uint64_t reserved_34_63:30; -#endif - } s; - struct cvmx_ciu2_raw_ppx_ip3_io_s cn68xx; - struct cvmx_ciu2_raw_ppx_ip3_io_s cn68xxp1; -}; - -union cvmx_ciu2_raw_ppx_ip3_mem { - uint64_t u64; - struct cvmx_ciu2_raw_ppx_ip3_mem_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_4_63:60; - uint64_t lmc:4; -#else - uint64_t lmc:4; - uint64_t reserved_4_63:60; -#endif - } s; - struct cvmx_ciu2_raw_ppx_ip3_mem_s cn68xx; - struct cvmx_ciu2_raw_ppx_ip3_mem_s cn68xxp1; -}; - -union cvmx_ciu2_raw_ppx_ip3_mio { - uint64_t u64; - struct cvmx_ciu2_raw_ppx_ip3_mio_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t rst:1; - uint64_t reserved_49_62:14; - uint64_t ptp:1; - uint64_t reserved_45_47:3; - uint64_t usb_hci:1; - uint64_t reserved_41_43:3; - uint64_t usb_uctl:1; - uint64_t reserved_38_39:2; - uint64_t uart:2; - uint64_t reserved_34_35:2; - uint64_t twsi:2; - uint64_t reserved_19_31:13; - uint64_t bootdma:1; - uint64_t mio:1; - uint64_t nand:1; - uint64_t reserved_12_15:4; - uint64_t timer:4; - uint64_t reserved_3_7:5; - uint64_t ipd_drp:1; - uint64_t ssoiq:1; - uint64_t ipdppthr:1; -#else - uint64_t ipdppthr:1; - uint64_t ssoiq:1; - uint64_t ipd_drp:1; - uint64_t reserved_3_7:5; - uint64_t timer:4; - uint64_t reserved_12_15:4; - uint64_t nand:1; - uint64_t mio:1; - uint64_t bootdma:1; - uint64_t reserved_19_31:13; - uint64_t twsi:2; - uint64_t reserved_34_35:2; - uint64_t uart:2; - uint64_t reserved_38_39:2; - uint64_t usb_uctl:1; - uint64_t reserved_41_43:3; - uint64_t usb_hci:1; - uint64_t reserved_45_47:3; - uint64_t ptp:1; - uint64_t reserved_49_62:14; - uint64_t rst:1; -#endif - } s; - struct cvmx_ciu2_raw_ppx_ip3_mio_s cn68xx; - struct cvmx_ciu2_raw_ppx_ip3_mio_s cn68xxp1; -}; - -union cvmx_ciu2_raw_ppx_ip3_pkt { - uint64_t u64; - struct cvmx_ciu2_raw_ppx_ip3_pkt_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_54_63:10; - uint64_t ilk_drp:2; - uint64_t reserved_49_51:3; - uint64_t ilk:1; - uint64_t reserved_41_47:7; - uint64_t mii:1; - uint64_t reserved_33_39:7; - uint64_t agl:1; - uint64_t reserved_13_31:19; - uint64_t gmx_drp:5; - uint64_t reserved_5_7:3; - uint64_t agx:5; -#else - uint64_t agx:5; - uint64_t reserved_5_7:3; - uint64_t gmx_drp:5; - uint64_t reserved_13_31:19; - uint64_t agl:1; - uint64_t reserved_33_39:7; - uint64_t mii:1; - uint64_t reserved_41_47:7; - uint64_t ilk:1; - uint64_t reserved_49_51:3; - uint64_t ilk_drp:2; - uint64_t reserved_54_63:10; -#endif - } s; - struct cvmx_ciu2_raw_ppx_ip3_pkt_s cn68xx; - struct cvmx_ciu2_raw_ppx_ip3_pkt_cn68xxp1 { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_49_63:15; - uint64_t ilk:1; - uint64_t reserved_41_47:7; - uint64_t mii:1; - uint64_t reserved_33_39:7; - uint64_t agl:1; - uint64_t reserved_13_31:19; - uint64_t gmx_drp:5; - uint64_t reserved_5_7:3; - uint64_t agx:5; -#else - uint64_t agx:5; - uint64_t reserved_5_7:3; - uint64_t gmx_drp:5; - uint64_t reserved_13_31:19; - uint64_t agl:1; - uint64_t reserved_33_39:7; - uint64_t mii:1; - uint64_t reserved_41_47:7; - uint64_t ilk:1; - uint64_t reserved_49_63:15; -#endif - } cn68xxp1; -}; - -union cvmx_ciu2_raw_ppx_ip3_rml { - uint64_t u64; - struct cvmx_ciu2_raw_ppx_ip3_rml_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_56_63:8; - uint64_t trace:4; - uint64_t reserved_49_51:3; - uint64_t l2c:1; - uint64_t reserved_41_47:7; - uint64_t dfa:1; - uint64_t reserved_37_39:3; - uint64_t dpi_dma:1; - uint64_t reserved_34_35:2; - uint64_t dpi:1; - uint64_t sli:1; - uint64_t reserved_31_31:1; - uint64_t key:1; - uint64_t rad:1; - uint64_t tim:1; - uint64_t reserved_25_27:3; - uint64_t zip:1; - uint64_t reserved_17_23:7; - uint64_t sso:1; - uint64_t reserved_8_15:8; - uint64_t pko:1; - uint64_t pip:1; - uint64_t ipd:1; - uint64_t fpa:1; - uint64_t reserved_1_3:3; - uint64_t iob:1; -#else - uint64_t iob:1; - uint64_t reserved_1_3:3; - uint64_t fpa:1; - uint64_t ipd:1; - uint64_t pip:1; - uint64_t pko:1; - uint64_t reserved_8_15:8; - uint64_t sso:1; - uint64_t reserved_17_23:7; - uint64_t zip:1; - uint64_t reserved_25_27:3; - uint64_t tim:1; - uint64_t rad:1; - uint64_t key:1; - uint64_t reserved_31_31:1; - uint64_t sli:1; - uint64_t dpi:1; - uint64_t reserved_34_35:2; - uint64_t dpi_dma:1; - uint64_t reserved_37_39:3; - uint64_t dfa:1; - uint64_t reserved_41_47:7; - uint64_t l2c:1; - uint64_t reserved_49_51:3; - uint64_t trace:4; - uint64_t reserved_56_63:8; -#endif - } s; - struct cvmx_ciu2_raw_ppx_ip3_rml_s cn68xx; - struct cvmx_ciu2_raw_ppx_ip3_rml_cn68xxp1 { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_56_63:8; - uint64_t trace:4; - uint64_t reserved_49_51:3; - uint64_t l2c:1; - uint64_t reserved_41_47:7; - uint64_t dfa:1; - uint64_t reserved_34_39:6; - uint64_t dpi:1; - uint64_t sli:1; - uint64_t reserved_31_31:1; - uint64_t key:1; - uint64_t rad:1; - uint64_t tim:1; - uint64_t reserved_25_27:3; - uint64_t zip:1; - uint64_t reserved_17_23:7; - uint64_t sso:1; - uint64_t reserved_8_15:8; - uint64_t pko:1; - uint64_t pip:1; - uint64_t ipd:1; - uint64_t fpa:1; - uint64_t reserved_1_3:3; - uint64_t iob:1; -#else - uint64_t iob:1; - uint64_t reserved_1_3:3; - uint64_t fpa:1; - uint64_t ipd:1; - uint64_t pip:1; - uint64_t pko:1; - uint64_t reserved_8_15:8; - uint64_t sso:1; - uint64_t reserved_17_23:7; - uint64_t zip:1; - uint64_t reserved_25_27:3; - uint64_t tim:1; - uint64_t rad:1; - uint64_t key:1; - uint64_t reserved_31_31:1; - uint64_t sli:1; - uint64_t dpi:1; - uint64_t reserved_34_39:6; - uint64_t dfa:1; - uint64_t reserved_41_47:7; - uint64_t l2c:1; - uint64_t reserved_49_51:3; - uint64_t trace:4; - uint64_t reserved_56_63:8; -#endif - } cn68xxp1; -}; - -union cvmx_ciu2_raw_ppx_ip3_wdog { - uint64_t u64; - struct cvmx_ciu2_raw_ppx_ip3_wdog_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_32_63:32; - uint64_t wdog:32; -#else - uint64_t wdog:32; - uint64_t reserved_32_63:32; -#endif - } s; - struct cvmx_ciu2_raw_ppx_ip3_wdog_s cn68xx; - struct cvmx_ciu2_raw_ppx_ip3_wdog_s cn68xxp1; -}; - -union cvmx_ciu2_raw_ppx_ip3_wrkq { - uint64_t u64; - struct cvmx_ciu2_raw_ppx_ip3_wrkq_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t workq:64; -#else - uint64_t workq:64; -#endif - } s; - struct cvmx_ciu2_raw_ppx_ip3_wrkq_s cn68xx; - struct cvmx_ciu2_raw_ppx_ip3_wrkq_s cn68xxp1; -}; - -union cvmx_ciu2_raw_ppx_ip4_gpio { - uint64_t u64; - struct cvmx_ciu2_raw_ppx_ip4_gpio_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_16_63:48; - uint64_t gpio:16; -#else - uint64_t gpio:16; - uint64_t reserved_16_63:48; -#endif - } s; - struct cvmx_ciu2_raw_ppx_ip4_gpio_s cn68xx; - struct cvmx_ciu2_raw_ppx_ip4_gpio_s cn68xxp1; -}; - -union cvmx_ciu2_raw_ppx_ip4_io { - uint64_t u64; - struct cvmx_ciu2_raw_ppx_ip4_io_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_34_63:30; - uint64_t pem:2; - uint64_t reserved_18_31:14; - uint64_t pci_inta:2; - uint64_t reserved_13_15:3; - uint64_t msired:1; - uint64_t pci_msi:4; - uint64_t reserved_4_7:4; - uint64_t pci_intr:4; -#else - uint64_t pci_intr:4; - uint64_t reserved_4_7:4; - uint64_t pci_msi:4; - uint64_t msired:1; - uint64_t reserved_13_15:3; - uint64_t pci_inta:2; - uint64_t reserved_18_31:14; - uint64_t pem:2; - uint64_t reserved_34_63:30; -#endif - } s; - struct cvmx_ciu2_raw_ppx_ip4_io_s cn68xx; - struct cvmx_ciu2_raw_ppx_ip4_io_s cn68xxp1; -}; - -union cvmx_ciu2_raw_ppx_ip4_mem { - uint64_t u64; - struct cvmx_ciu2_raw_ppx_ip4_mem_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_4_63:60; - uint64_t lmc:4; -#else - uint64_t lmc:4; - uint64_t reserved_4_63:60; -#endif - } s; - struct cvmx_ciu2_raw_ppx_ip4_mem_s cn68xx; - struct cvmx_ciu2_raw_ppx_ip4_mem_s cn68xxp1; -}; - -union cvmx_ciu2_raw_ppx_ip4_mio { - uint64_t u64; - struct cvmx_ciu2_raw_ppx_ip4_mio_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t rst:1; - uint64_t reserved_49_62:14; - uint64_t ptp:1; - uint64_t reserved_45_47:3; - uint64_t usb_hci:1; - uint64_t reserved_41_43:3; - uint64_t usb_uctl:1; - uint64_t reserved_38_39:2; - uint64_t uart:2; - uint64_t reserved_34_35:2; - uint64_t twsi:2; - uint64_t reserved_19_31:13; - uint64_t bootdma:1; - uint64_t mio:1; - uint64_t nand:1; - uint64_t reserved_12_15:4; - uint64_t timer:4; - uint64_t reserved_3_7:5; - uint64_t ipd_drp:1; - uint64_t ssoiq:1; - uint64_t ipdppthr:1; -#else - uint64_t ipdppthr:1; - uint64_t ssoiq:1; - uint64_t ipd_drp:1; - uint64_t reserved_3_7:5; - uint64_t timer:4; - uint64_t reserved_12_15:4; - uint64_t nand:1; - uint64_t mio:1; - uint64_t bootdma:1; - uint64_t reserved_19_31:13; - uint64_t twsi:2; - uint64_t reserved_34_35:2; - uint64_t uart:2; - uint64_t reserved_38_39:2; - uint64_t usb_uctl:1; - uint64_t reserved_41_43:3; - uint64_t usb_hci:1; - uint64_t reserved_45_47:3; - uint64_t ptp:1; - uint64_t reserved_49_62:14; - uint64_t rst:1; -#endif - } s; - struct cvmx_ciu2_raw_ppx_ip4_mio_s cn68xx; - struct cvmx_ciu2_raw_ppx_ip4_mio_s cn68xxp1; -}; - -union cvmx_ciu2_raw_ppx_ip4_pkt { - uint64_t u64; - struct cvmx_ciu2_raw_ppx_ip4_pkt_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_54_63:10; - uint64_t ilk_drp:2; - uint64_t reserved_49_51:3; - uint64_t ilk:1; - uint64_t reserved_41_47:7; - uint64_t mii:1; - uint64_t reserved_33_39:7; - uint64_t agl:1; - uint64_t reserved_13_31:19; - uint64_t gmx_drp:5; - uint64_t reserved_5_7:3; - uint64_t agx:5; -#else - uint64_t agx:5; - uint64_t reserved_5_7:3; - uint64_t gmx_drp:5; - uint64_t reserved_13_31:19; - uint64_t agl:1; - uint64_t reserved_33_39:7; - uint64_t mii:1; - uint64_t reserved_41_47:7; - uint64_t ilk:1; - uint64_t reserved_49_51:3; - uint64_t ilk_drp:2; - uint64_t reserved_54_63:10; -#endif - } s; - struct cvmx_ciu2_raw_ppx_ip4_pkt_s cn68xx; - struct cvmx_ciu2_raw_ppx_ip4_pkt_cn68xxp1 { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_49_63:15; - uint64_t ilk:1; - uint64_t reserved_41_47:7; - uint64_t mii:1; - uint64_t reserved_33_39:7; - uint64_t agl:1; - uint64_t reserved_13_31:19; - uint64_t gmx_drp:5; - uint64_t reserved_5_7:3; - uint64_t agx:5; -#else - uint64_t agx:5; - uint64_t reserved_5_7:3; - uint64_t gmx_drp:5; - uint64_t reserved_13_31:19; - uint64_t agl:1; - uint64_t reserved_33_39:7; - uint64_t mii:1; - uint64_t reserved_41_47:7; - uint64_t ilk:1; - uint64_t reserved_49_63:15; -#endif - } cn68xxp1; -}; - -union cvmx_ciu2_raw_ppx_ip4_rml { - uint64_t u64; - struct cvmx_ciu2_raw_ppx_ip4_rml_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_56_63:8; - uint64_t trace:4; - uint64_t reserved_49_51:3; - uint64_t l2c:1; - uint64_t reserved_41_47:7; - uint64_t dfa:1; - uint64_t reserved_37_39:3; - uint64_t dpi_dma:1; - uint64_t reserved_34_35:2; - uint64_t dpi:1; - uint64_t sli:1; - uint64_t reserved_31_31:1; - uint64_t key:1; - uint64_t rad:1; - uint64_t tim:1; - uint64_t reserved_25_27:3; - uint64_t zip:1; - uint64_t reserved_17_23:7; - uint64_t sso:1; - uint64_t reserved_8_15:8; - uint64_t pko:1; - uint64_t pip:1; - uint64_t ipd:1; - uint64_t fpa:1; - uint64_t reserved_1_3:3; - uint64_t iob:1; -#else - uint64_t iob:1; - uint64_t reserved_1_3:3; - uint64_t fpa:1; - uint64_t ipd:1; - uint64_t pip:1; - uint64_t pko:1; - uint64_t reserved_8_15:8; - uint64_t sso:1; - uint64_t reserved_17_23:7; - uint64_t zip:1; - uint64_t reserved_25_27:3; - uint64_t tim:1; - uint64_t rad:1; - uint64_t key:1; - uint64_t reserved_31_31:1; - uint64_t sli:1; - uint64_t dpi:1; - uint64_t reserved_34_35:2; - uint64_t dpi_dma:1; - uint64_t reserved_37_39:3; - uint64_t dfa:1; - uint64_t reserved_41_47:7; - uint64_t l2c:1; - uint64_t reserved_49_51:3; - uint64_t trace:4; - uint64_t reserved_56_63:8; -#endif - } s; - struct cvmx_ciu2_raw_ppx_ip4_rml_s cn68xx; - struct cvmx_ciu2_raw_ppx_ip4_rml_cn68xxp1 { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_56_63:8; - uint64_t trace:4; - uint64_t reserved_49_51:3; - uint64_t l2c:1; - uint64_t reserved_41_47:7; - uint64_t dfa:1; - uint64_t reserved_34_39:6; - uint64_t dpi:1; - uint64_t sli:1; - uint64_t reserved_31_31:1; - uint64_t key:1; - uint64_t rad:1; - uint64_t tim:1; - uint64_t reserved_25_27:3; - uint64_t zip:1; - uint64_t reserved_17_23:7; - uint64_t sso:1; - uint64_t reserved_8_15:8; - uint64_t pko:1; - uint64_t pip:1; - uint64_t ipd:1; - uint64_t fpa:1; - uint64_t reserved_1_3:3; - uint64_t iob:1; -#else - uint64_t iob:1; - uint64_t reserved_1_3:3; - uint64_t fpa:1; - uint64_t ipd:1; - uint64_t pip:1; - uint64_t pko:1; - uint64_t reserved_8_15:8; - uint64_t sso:1; - uint64_t reserved_17_23:7; - uint64_t zip:1; - uint64_t reserved_25_27:3; - uint64_t tim:1; - uint64_t rad:1; - uint64_t key:1; - uint64_t reserved_31_31:1; - uint64_t sli:1; - uint64_t dpi:1; - uint64_t reserved_34_39:6; - uint64_t dfa:1; - uint64_t reserved_41_47:7; - uint64_t l2c:1; - uint64_t reserved_49_51:3; - uint64_t trace:4; - uint64_t reserved_56_63:8; -#endif - } cn68xxp1; -}; - -union cvmx_ciu2_raw_ppx_ip4_wdog { - uint64_t u64; - struct cvmx_ciu2_raw_ppx_ip4_wdog_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_32_63:32; - uint64_t wdog:32; -#else - uint64_t wdog:32; - uint64_t reserved_32_63:32; -#endif - } s; - struct cvmx_ciu2_raw_ppx_ip4_wdog_s cn68xx; - struct cvmx_ciu2_raw_ppx_ip4_wdog_s cn68xxp1; -}; - -union cvmx_ciu2_raw_ppx_ip4_wrkq { - uint64_t u64; - struct cvmx_ciu2_raw_ppx_ip4_wrkq_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t workq:64; -#else - uint64_t workq:64; -#endif - } s; - struct cvmx_ciu2_raw_ppx_ip4_wrkq_s cn68xx; - struct cvmx_ciu2_raw_ppx_ip4_wrkq_s cn68xxp1; -}; - -union cvmx_ciu2_src_iox_int_gpio { - uint64_t u64; - struct cvmx_ciu2_src_iox_int_gpio_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_16_63:48; - uint64_t gpio:16; -#else - uint64_t gpio:16; - uint64_t reserved_16_63:48; -#endif - } s; - struct cvmx_ciu2_src_iox_int_gpio_s cn68xx; - struct cvmx_ciu2_src_iox_int_gpio_s cn68xxp1; -}; - -union cvmx_ciu2_src_iox_int_io { - uint64_t u64; - struct cvmx_ciu2_src_iox_int_io_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_34_63:30; - uint64_t pem:2; - uint64_t reserved_18_31:14; - uint64_t pci_inta:2; - uint64_t reserved_13_15:3; - uint64_t msired:1; - uint64_t pci_msi:4; - uint64_t reserved_4_7:4; - uint64_t pci_intr:4; -#else - uint64_t pci_intr:4; - uint64_t reserved_4_7:4; - uint64_t pci_msi:4; - uint64_t msired:1; - uint64_t reserved_13_15:3; - uint64_t pci_inta:2; - uint64_t reserved_18_31:14; - uint64_t pem:2; - uint64_t reserved_34_63:30; -#endif - } s; - struct cvmx_ciu2_src_iox_int_io_s cn68xx; - struct cvmx_ciu2_src_iox_int_io_s cn68xxp1; -}; - -union cvmx_ciu2_src_iox_int_mbox { - uint64_t u64; - struct cvmx_ciu2_src_iox_int_mbox_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_4_63:60; - uint64_t mbox:4; -#else - uint64_t mbox:4; - uint64_t reserved_4_63:60; -#endif - } s; - struct cvmx_ciu2_src_iox_int_mbox_s cn68xx; - struct cvmx_ciu2_src_iox_int_mbox_s cn68xxp1; -}; - -union cvmx_ciu2_src_iox_int_mem { - uint64_t u64; - struct cvmx_ciu2_src_iox_int_mem_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_4_63:60; - uint64_t lmc:4; -#else - uint64_t lmc:4; - uint64_t reserved_4_63:60; -#endif - } s; - struct cvmx_ciu2_src_iox_int_mem_s cn68xx; - struct cvmx_ciu2_src_iox_int_mem_s cn68xxp1; -}; - -union cvmx_ciu2_src_iox_int_mio { - uint64_t u64; - struct cvmx_ciu2_src_iox_int_mio_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t rst:1; - uint64_t reserved_49_62:14; - uint64_t ptp:1; - uint64_t reserved_45_47:3; - uint64_t usb_hci:1; - uint64_t reserved_41_43:3; - uint64_t usb_uctl:1; - uint64_t reserved_38_39:2; - uint64_t uart:2; - uint64_t reserved_34_35:2; - uint64_t twsi:2; - uint64_t reserved_19_31:13; - uint64_t bootdma:1; - uint64_t mio:1; - uint64_t nand:1; - uint64_t reserved_12_15:4; - uint64_t timer:4; - uint64_t reserved_3_7:5; - uint64_t ipd_drp:1; - uint64_t ssoiq:1; - uint64_t ipdppthr:1; -#else - uint64_t ipdppthr:1; - uint64_t ssoiq:1; - uint64_t ipd_drp:1; - uint64_t reserved_3_7:5; - uint64_t timer:4; - uint64_t reserved_12_15:4; - uint64_t nand:1; - uint64_t mio:1; - uint64_t bootdma:1; - uint64_t reserved_19_31:13; - uint64_t twsi:2; - uint64_t reserved_34_35:2; - uint64_t uart:2; - uint64_t reserved_38_39:2; - uint64_t usb_uctl:1; - uint64_t reserved_41_43:3; - uint64_t usb_hci:1; - uint64_t reserved_45_47:3; - uint64_t ptp:1; - uint64_t reserved_49_62:14; - uint64_t rst:1; -#endif - } s; - struct cvmx_ciu2_src_iox_int_mio_s cn68xx; - struct cvmx_ciu2_src_iox_int_mio_s cn68xxp1; -}; - -union cvmx_ciu2_src_iox_int_pkt { - uint64_t u64; - struct cvmx_ciu2_src_iox_int_pkt_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_54_63:10; - uint64_t ilk_drp:2; - uint64_t reserved_49_51:3; - uint64_t ilk:1; - uint64_t reserved_41_47:7; - uint64_t mii:1; - uint64_t reserved_33_39:7; - uint64_t agl:1; - uint64_t reserved_13_31:19; - uint64_t gmx_drp:5; - uint64_t reserved_5_7:3; - uint64_t agx:5; -#else - uint64_t agx:5; - uint64_t reserved_5_7:3; - uint64_t gmx_drp:5; - uint64_t reserved_13_31:19; - uint64_t agl:1; - uint64_t reserved_33_39:7; - uint64_t mii:1; - uint64_t reserved_41_47:7; - uint64_t ilk:1; - uint64_t reserved_49_51:3; - uint64_t ilk_drp:2; - uint64_t reserved_54_63:10; -#endif - } s; - struct cvmx_ciu2_src_iox_int_pkt_s cn68xx; - struct cvmx_ciu2_src_iox_int_pkt_cn68xxp1 { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_49_63:15; - uint64_t ilk:1; - uint64_t reserved_41_47:7; - uint64_t mii:1; - uint64_t reserved_33_39:7; - uint64_t agl:1; - uint64_t reserved_13_31:19; - uint64_t gmx_drp:5; - uint64_t reserved_5_7:3; - uint64_t agx:5; -#else - uint64_t agx:5; - uint64_t reserved_5_7:3; - uint64_t gmx_drp:5; - uint64_t reserved_13_31:19; - uint64_t agl:1; - uint64_t reserved_33_39:7; - uint64_t mii:1; - uint64_t reserved_41_47:7; - uint64_t ilk:1; - uint64_t reserved_49_63:15; -#endif - } cn68xxp1; -}; - -union cvmx_ciu2_src_iox_int_rml { - uint64_t u64; - struct cvmx_ciu2_src_iox_int_rml_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_56_63:8; - uint64_t trace:4; - uint64_t reserved_49_51:3; - uint64_t l2c:1; - uint64_t reserved_41_47:7; - uint64_t dfa:1; - uint64_t reserved_37_39:3; - uint64_t dpi_dma:1; - uint64_t reserved_34_35:2; - uint64_t dpi:1; - uint64_t sli:1; - uint64_t reserved_31_31:1; - uint64_t key:1; - uint64_t rad:1; - uint64_t tim:1; - uint64_t reserved_25_27:3; - uint64_t zip:1; - uint64_t reserved_17_23:7; - uint64_t sso:1; - uint64_t reserved_8_15:8; - uint64_t pko:1; - uint64_t pip:1; - uint64_t ipd:1; - uint64_t fpa:1; - uint64_t reserved_1_3:3; - uint64_t iob:1; -#else - uint64_t iob:1; - uint64_t reserved_1_3:3; - uint64_t fpa:1; - uint64_t ipd:1; - uint64_t pip:1; - uint64_t pko:1; - uint64_t reserved_8_15:8; - uint64_t sso:1; - uint64_t reserved_17_23:7; - uint64_t zip:1; - uint64_t reserved_25_27:3; - uint64_t tim:1; - uint64_t rad:1; - uint64_t key:1; - uint64_t reserved_31_31:1; - uint64_t sli:1; - uint64_t dpi:1; - uint64_t reserved_34_35:2; - uint64_t dpi_dma:1; - uint64_t reserved_37_39:3; - uint64_t dfa:1; - uint64_t reserved_41_47:7; - uint64_t l2c:1; - uint64_t reserved_49_51:3; - uint64_t trace:4; - uint64_t reserved_56_63:8; -#endif - } s; - struct cvmx_ciu2_src_iox_int_rml_s cn68xx; - struct cvmx_ciu2_src_iox_int_rml_cn68xxp1 { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_56_63:8; - uint64_t trace:4; - uint64_t reserved_49_51:3; - uint64_t l2c:1; - uint64_t reserved_41_47:7; - uint64_t dfa:1; - uint64_t reserved_34_39:6; - uint64_t dpi:1; - uint64_t sli:1; - uint64_t reserved_31_31:1; - uint64_t key:1; - uint64_t rad:1; - uint64_t tim:1; - uint64_t reserved_25_27:3; - uint64_t zip:1; - uint64_t reserved_17_23:7; - uint64_t sso:1; - uint64_t reserved_8_15:8; - uint64_t pko:1; - uint64_t pip:1; - uint64_t ipd:1; - uint64_t fpa:1; - uint64_t reserved_1_3:3; - uint64_t iob:1; -#else - uint64_t iob:1; - uint64_t reserved_1_3:3; - uint64_t fpa:1; - uint64_t ipd:1; - uint64_t pip:1; - uint64_t pko:1; - uint64_t reserved_8_15:8; - uint64_t sso:1; - uint64_t reserved_17_23:7; - uint64_t zip:1; - uint64_t reserved_25_27:3; - uint64_t tim:1; - uint64_t rad:1; - uint64_t key:1; - uint64_t reserved_31_31:1; - uint64_t sli:1; - uint64_t dpi:1; - uint64_t reserved_34_39:6; - uint64_t dfa:1; - uint64_t reserved_41_47:7; - uint64_t l2c:1; - uint64_t reserved_49_51:3; - uint64_t trace:4; - uint64_t reserved_56_63:8; -#endif - } cn68xxp1; -}; - -union cvmx_ciu2_src_iox_int_wdog { - uint64_t u64; - struct cvmx_ciu2_src_iox_int_wdog_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_32_63:32; - uint64_t wdog:32; -#else - uint64_t wdog:32; - uint64_t reserved_32_63:32; -#endif - } s; - struct cvmx_ciu2_src_iox_int_wdog_s cn68xx; - struct cvmx_ciu2_src_iox_int_wdog_s cn68xxp1; -}; - -union cvmx_ciu2_src_iox_int_wrkq { - uint64_t u64; - struct cvmx_ciu2_src_iox_int_wrkq_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t workq:64; -#else - uint64_t workq:64; -#endif - } s; - struct cvmx_ciu2_src_iox_int_wrkq_s cn68xx; - struct cvmx_ciu2_src_iox_int_wrkq_s cn68xxp1; -}; - -union cvmx_ciu2_src_ppx_ip2_gpio { - uint64_t u64; - struct cvmx_ciu2_src_ppx_ip2_gpio_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_16_63:48; - uint64_t gpio:16; -#else - uint64_t gpio:16; - uint64_t reserved_16_63:48; -#endif - } s; - struct cvmx_ciu2_src_ppx_ip2_gpio_s cn68xx; - struct cvmx_ciu2_src_ppx_ip2_gpio_s cn68xxp1; -}; - -union cvmx_ciu2_src_ppx_ip2_io { - uint64_t u64; - struct cvmx_ciu2_src_ppx_ip2_io_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_34_63:30; - uint64_t pem:2; - uint64_t reserved_18_31:14; - uint64_t pci_inta:2; - uint64_t reserved_13_15:3; - uint64_t msired:1; - uint64_t pci_msi:4; - uint64_t reserved_4_7:4; - uint64_t pci_intr:4; -#else - uint64_t pci_intr:4; - uint64_t reserved_4_7:4; - uint64_t pci_msi:4; - uint64_t msired:1; - uint64_t reserved_13_15:3; - uint64_t pci_inta:2; - uint64_t reserved_18_31:14; - uint64_t pem:2; - uint64_t reserved_34_63:30; -#endif - } s; - struct cvmx_ciu2_src_ppx_ip2_io_s cn68xx; - struct cvmx_ciu2_src_ppx_ip2_io_s cn68xxp1; -}; - -union cvmx_ciu2_src_ppx_ip2_mbox { - uint64_t u64; - struct cvmx_ciu2_src_ppx_ip2_mbox_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_4_63:60; - uint64_t mbox:4; -#else - uint64_t mbox:4; - uint64_t reserved_4_63:60; -#endif - } s; - struct cvmx_ciu2_src_ppx_ip2_mbox_s cn68xx; - struct cvmx_ciu2_src_ppx_ip2_mbox_s cn68xxp1; -}; - -union cvmx_ciu2_src_ppx_ip2_mem { - uint64_t u64; - struct cvmx_ciu2_src_ppx_ip2_mem_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_4_63:60; - uint64_t lmc:4; -#else - uint64_t lmc:4; - uint64_t reserved_4_63:60; -#endif - } s; - struct cvmx_ciu2_src_ppx_ip2_mem_s cn68xx; - struct cvmx_ciu2_src_ppx_ip2_mem_s cn68xxp1; -}; - -union cvmx_ciu2_src_ppx_ip2_mio { - uint64_t u64; - struct cvmx_ciu2_src_ppx_ip2_mio_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t rst:1; - uint64_t reserved_49_62:14; - uint64_t ptp:1; - uint64_t reserved_45_47:3; - uint64_t usb_hci:1; - uint64_t reserved_41_43:3; - uint64_t usb_uctl:1; - uint64_t reserved_38_39:2; - uint64_t uart:2; - uint64_t reserved_34_35:2; - uint64_t twsi:2; - uint64_t reserved_19_31:13; - uint64_t bootdma:1; - uint64_t mio:1; - uint64_t nand:1; - uint64_t reserved_12_15:4; - uint64_t timer:4; - uint64_t reserved_3_7:5; - uint64_t ipd_drp:1; - uint64_t ssoiq:1; - uint64_t ipdppthr:1; -#else - uint64_t ipdppthr:1; - uint64_t ssoiq:1; - uint64_t ipd_drp:1; - uint64_t reserved_3_7:5; - uint64_t timer:4; - uint64_t reserved_12_15:4; - uint64_t nand:1; - uint64_t mio:1; - uint64_t bootdma:1; - uint64_t reserved_19_31:13; - uint64_t twsi:2; - uint64_t reserved_34_35:2; - uint64_t uart:2; - uint64_t reserved_38_39:2; - uint64_t usb_uctl:1; - uint64_t reserved_41_43:3; - uint64_t usb_hci:1; - uint64_t reserved_45_47:3; - uint64_t ptp:1; - uint64_t reserved_49_62:14; - uint64_t rst:1; -#endif - } s; - struct cvmx_ciu2_src_ppx_ip2_mio_s cn68xx; - struct cvmx_ciu2_src_ppx_ip2_mio_s cn68xxp1; -}; - -union cvmx_ciu2_src_ppx_ip2_pkt { - uint64_t u64; - struct cvmx_ciu2_src_ppx_ip2_pkt_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_54_63:10; - uint64_t ilk_drp:2; - uint64_t reserved_49_51:3; - uint64_t ilk:1; - uint64_t reserved_41_47:7; - uint64_t mii:1; - uint64_t reserved_33_39:7; - uint64_t agl:1; - uint64_t reserved_13_31:19; - uint64_t gmx_drp:5; - uint64_t reserved_5_7:3; - uint64_t agx:5; -#else - uint64_t agx:5; - uint64_t reserved_5_7:3; - uint64_t gmx_drp:5; - uint64_t reserved_13_31:19; - uint64_t agl:1; - uint64_t reserved_33_39:7; - uint64_t mii:1; - uint64_t reserved_41_47:7; - uint64_t ilk:1; - uint64_t reserved_49_51:3; - uint64_t ilk_drp:2; - uint64_t reserved_54_63:10; -#endif - } s; - struct cvmx_ciu2_src_ppx_ip2_pkt_s cn68xx; - struct cvmx_ciu2_src_ppx_ip2_pkt_cn68xxp1 { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_49_63:15; - uint64_t ilk:1; - uint64_t reserved_41_47:7; - uint64_t mii:1; - uint64_t reserved_33_39:7; - uint64_t agl:1; - uint64_t reserved_13_31:19; - uint64_t gmx_drp:5; - uint64_t reserved_5_7:3; - uint64_t agx:5; -#else - uint64_t agx:5; - uint64_t reserved_5_7:3; - uint64_t gmx_drp:5; - uint64_t reserved_13_31:19; - uint64_t agl:1; - uint64_t reserved_33_39:7; - uint64_t mii:1; - uint64_t reserved_41_47:7; - uint64_t ilk:1; - uint64_t reserved_49_63:15; -#endif - } cn68xxp1; -}; - -union cvmx_ciu2_src_ppx_ip2_rml { - uint64_t u64; - struct cvmx_ciu2_src_ppx_ip2_rml_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_56_63:8; - uint64_t trace:4; - uint64_t reserved_49_51:3; - uint64_t l2c:1; - uint64_t reserved_41_47:7; - uint64_t dfa:1; - uint64_t reserved_37_39:3; - uint64_t dpi_dma:1; - uint64_t reserved_34_35:2; - uint64_t dpi:1; - uint64_t sli:1; - uint64_t reserved_31_31:1; - uint64_t key:1; - uint64_t rad:1; - uint64_t tim:1; - uint64_t reserved_25_27:3; - uint64_t zip:1; - uint64_t reserved_17_23:7; - uint64_t sso:1; - uint64_t reserved_8_15:8; - uint64_t pko:1; - uint64_t pip:1; - uint64_t ipd:1; - uint64_t fpa:1; - uint64_t reserved_1_3:3; - uint64_t iob:1; -#else - uint64_t iob:1; - uint64_t reserved_1_3:3; - uint64_t fpa:1; - uint64_t ipd:1; - uint64_t pip:1; - uint64_t pko:1; - uint64_t reserved_8_15:8; - uint64_t sso:1; - uint64_t reserved_17_23:7; - uint64_t zip:1; - uint64_t reserved_25_27:3; - uint64_t tim:1; - uint64_t rad:1; - uint64_t key:1; - uint64_t reserved_31_31:1; - uint64_t sli:1; - uint64_t dpi:1; - uint64_t reserved_34_35:2; - uint64_t dpi_dma:1; - uint64_t reserved_37_39:3; - uint64_t dfa:1; - uint64_t reserved_41_47:7; - uint64_t l2c:1; - uint64_t reserved_49_51:3; - uint64_t trace:4; - uint64_t reserved_56_63:8; -#endif - } s; - struct cvmx_ciu2_src_ppx_ip2_rml_s cn68xx; - struct cvmx_ciu2_src_ppx_ip2_rml_cn68xxp1 { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_56_63:8; - uint64_t trace:4; - uint64_t reserved_49_51:3; - uint64_t l2c:1; - uint64_t reserved_41_47:7; - uint64_t dfa:1; - uint64_t reserved_34_39:6; - uint64_t dpi:1; - uint64_t sli:1; - uint64_t reserved_31_31:1; - uint64_t key:1; - uint64_t rad:1; - uint64_t tim:1; - uint64_t reserved_25_27:3; - uint64_t zip:1; - uint64_t reserved_17_23:7; - uint64_t sso:1; - uint64_t reserved_8_15:8; - uint64_t pko:1; - uint64_t pip:1; - uint64_t ipd:1; - uint64_t fpa:1; - uint64_t reserved_1_3:3; - uint64_t iob:1; -#else - uint64_t iob:1; - uint64_t reserved_1_3:3; - uint64_t fpa:1; - uint64_t ipd:1; - uint64_t pip:1; - uint64_t pko:1; - uint64_t reserved_8_15:8; - uint64_t sso:1; - uint64_t reserved_17_23:7; - uint64_t zip:1; - uint64_t reserved_25_27:3; - uint64_t tim:1; - uint64_t rad:1; - uint64_t key:1; - uint64_t reserved_31_31:1; - uint64_t sli:1; - uint64_t dpi:1; - uint64_t reserved_34_39:6; - uint64_t dfa:1; - uint64_t reserved_41_47:7; - uint64_t l2c:1; - uint64_t reserved_49_51:3; - uint64_t trace:4; - uint64_t reserved_56_63:8; -#endif - } cn68xxp1; -}; - -union cvmx_ciu2_src_ppx_ip2_wdog { - uint64_t u64; - struct cvmx_ciu2_src_ppx_ip2_wdog_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_32_63:32; - uint64_t wdog:32; -#else - uint64_t wdog:32; - uint64_t reserved_32_63:32; -#endif - } s; - struct cvmx_ciu2_src_ppx_ip2_wdog_s cn68xx; - struct cvmx_ciu2_src_ppx_ip2_wdog_s cn68xxp1; -}; - -union cvmx_ciu2_src_ppx_ip2_wrkq { - uint64_t u64; - struct cvmx_ciu2_src_ppx_ip2_wrkq_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t workq:64; -#else - uint64_t workq:64; -#endif - } s; - struct cvmx_ciu2_src_ppx_ip2_wrkq_s cn68xx; - struct cvmx_ciu2_src_ppx_ip2_wrkq_s cn68xxp1; -}; - -union cvmx_ciu2_src_ppx_ip3_gpio { - uint64_t u64; - struct cvmx_ciu2_src_ppx_ip3_gpio_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_16_63:48; - uint64_t gpio:16; -#else - uint64_t gpio:16; - uint64_t reserved_16_63:48; -#endif - } s; - struct cvmx_ciu2_src_ppx_ip3_gpio_s cn68xx; - struct cvmx_ciu2_src_ppx_ip3_gpio_s cn68xxp1; -}; - -union cvmx_ciu2_src_ppx_ip3_io { - uint64_t u64; - struct cvmx_ciu2_src_ppx_ip3_io_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_34_63:30; - uint64_t pem:2; - uint64_t reserved_18_31:14; - uint64_t pci_inta:2; - uint64_t reserved_13_15:3; - uint64_t msired:1; - uint64_t pci_msi:4; - uint64_t reserved_4_7:4; - uint64_t pci_intr:4; -#else - uint64_t pci_intr:4; - uint64_t reserved_4_7:4; - uint64_t pci_msi:4; - uint64_t msired:1; - uint64_t reserved_13_15:3; - uint64_t pci_inta:2; - uint64_t reserved_18_31:14; - uint64_t pem:2; - uint64_t reserved_34_63:30; -#endif - } s; - struct cvmx_ciu2_src_ppx_ip3_io_s cn68xx; - struct cvmx_ciu2_src_ppx_ip3_io_s cn68xxp1; -}; - -union cvmx_ciu2_src_ppx_ip3_mbox { - uint64_t u64; - struct cvmx_ciu2_src_ppx_ip3_mbox_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_4_63:60; - uint64_t mbox:4; -#else - uint64_t mbox:4; - uint64_t reserved_4_63:60; -#endif - } s; - struct cvmx_ciu2_src_ppx_ip3_mbox_s cn68xx; - struct cvmx_ciu2_src_ppx_ip3_mbox_s cn68xxp1; -}; - -union cvmx_ciu2_src_ppx_ip3_mem { - uint64_t u64; - struct cvmx_ciu2_src_ppx_ip3_mem_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_4_63:60; - uint64_t lmc:4; -#else - uint64_t lmc:4; - uint64_t reserved_4_63:60; -#endif - } s; - struct cvmx_ciu2_src_ppx_ip3_mem_s cn68xx; - struct cvmx_ciu2_src_ppx_ip3_mem_s cn68xxp1; -}; - -union cvmx_ciu2_src_ppx_ip3_mio { - uint64_t u64; - struct cvmx_ciu2_src_ppx_ip3_mio_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t rst:1; - uint64_t reserved_49_62:14; - uint64_t ptp:1; - uint64_t reserved_45_47:3; - uint64_t usb_hci:1; - uint64_t reserved_41_43:3; - uint64_t usb_uctl:1; - uint64_t reserved_38_39:2; - uint64_t uart:2; - uint64_t reserved_34_35:2; - uint64_t twsi:2; - uint64_t reserved_19_31:13; - uint64_t bootdma:1; - uint64_t mio:1; - uint64_t nand:1; - uint64_t reserved_12_15:4; - uint64_t timer:4; - uint64_t reserved_3_7:5; - uint64_t ipd_drp:1; - uint64_t ssoiq:1; - uint64_t ipdppthr:1; -#else - uint64_t ipdppthr:1; - uint64_t ssoiq:1; - uint64_t ipd_drp:1; - uint64_t reserved_3_7:5; - uint64_t timer:4; - uint64_t reserved_12_15:4; - uint64_t nand:1; - uint64_t mio:1; - uint64_t bootdma:1; - uint64_t reserved_19_31:13; - uint64_t twsi:2; - uint64_t reserved_34_35:2; - uint64_t uart:2; - uint64_t reserved_38_39:2; - uint64_t usb_uctl:1; - uint64_t reserved_41_43:3; - uint64_t usb_hci:1; - uint64_t reserved_45_47:3; - uint64_t ptp:1; - uint64_t reserved_49_62:14; - uint64_t rst:1; -#endif - } s; - struct cvmx_ciu2_src_ppx_ip3_mio_s cn68xx; - struct cvmx_ciu2_src_ppx_ip3_mio_s cn68xxp1; -}; - -union cvmx_ciu2_src_ppx_ip3_pkt { - uint64_t u64; - struct cvmx_ciu2_src_ppx_ip3_pkt_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_54_63:10; - uint64_t ilk_drp:2; - uint64_t reserved_49_51:3; - uint64_t ilk:1; - uint64_t reserved_41_47:7; - uint64_t mii:1; - uint64_t reserved_33_39:7; - uint64_t agl:1; - uint64_t reserved_13_31:19; - uint64_t gmx_drp:5; - uint64_t reserved_5_7:3; - uint64_t agx:5; -#else - uint64_t agx:5; - uint64_t reserved_5_7:3; - uint64_t gmx_drp:5; - uint64_t reserved_13_31:19; - uint64_t agl:1; - uint64_t reserved_33_39:7; - uint64_t mii:1; - uint64_t reserved_41_47:7; - uint64_t ilk:1; - uint64_t reserved_49_51:3; - uint64_t ilk_drp:2; - uint64_t reserved_54_63:10; -#endif - } s; - struct cvmx_ciu2_src_ppx_ip3_pkt_s cn68xx; - struct cvmx_ciu2_src_ppx_ip3_pkt_cn68xxp1 { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_49_63:15; - uint64_t ilk:1; - uint64_t reserved_41_47:7; - uint64_t mii:1; - uint64_t reserved_33_39:7; - uint64_t agl:1; - uint64_t reserved_13_31:19; - uint64_t gmx_drp:5; - uint64_t reserved_5_7:3; - uint64_t agx:5; -#else - uint64_t agx:5; - uint64_t reserved_5_7:3; - uint64_t gmx_drp:5; - uint64_t reserved_13_31:19; - uint64_t agl:1; - uint64_t reserved_33_39:7; - uint64_t mii:1; - uint64_t reserved_41_47:7; - uint64_t ilk:1; - uint64_t reserved_49_63:15; -#endif - } cn68xxp1; -}; - -union cvmx_ciu2_src_ppx_ip3_rml { - uint64_t u64; - struct cvmx_ciu2_src_ppx_ip3_rml_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_56_63:8; - uint64_t trace:4; - uint64_t reserved_49_51:3; - uint64_t l2c:1; - uint64_t reserved_41_47:7; - uint64_t dfa:1; - uint64_t reserved_37_39:3; - uint64_t dpi_dma:1; - uint64_t reserved_34_35:2; - uint64_t dpi:1; - uint64_t sli:1; - uint64_t reserved_31_31:1; - uint64_t key:1; - uint64_t rad:1; - uint64_t tim:1; - uint64_t reserved_25_27:3; - uint64_t zip:1; - uint64_t reserved_17_23:7; - uint64_t sso:1; - uint64_t reserved_8_15:8; - uint64_t pko:1; - uint64_t pip:1; - uint64_t ipd:1; - uint64_t fpa:1; - uint64_t reserved_1_3:3; - uint64_t iob:1; -#else - uint64_t iob:1; - uint64_t reserved_1_3:3; - uint64_t fpa:1; - uint64_t ipd:1; - uint64_t pip:1; - uint64_t pko:1; - uint64_t reserved_8_15:8; - uint64_t sso:1; - uint64_t reserved_17_23:7; - uint64_t zip:1; - uint64_t reserved_25_27:3; - uint64_t tim:1; - uint64_t rad:1; - uint64_t key:1; - uint64_t reserved_31_31:1; - uint64_t sli:1; - uint64_t dpi:1; - uint64_t reserved_34_35:2; - uint64_t dpi_dma:1; - uint64_t reserved_37_39:3; - uint64_t dfa:1; - uint64_t reserved_41_47:7; - uint64_t l2c:1; - uint64_t reserved_49_51:3; - uint64_t trace:4; - uint64_t reserved_56_63:8; -#endif - } s; - struct cvmx_ciu2_src_ppx_ip3_rml_s cn68xx; - struct cvmx_ciu2_src_ppx_ip3_rml_cn68xxp1 { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_56_63:8; - uint64_t trace:4; - uint64_t reserved_49_51:3; - uint64_t l2c:1; - uint64_t reserved_41_47:7; - uint64_t dfa:1; - uint64_t reserved_34_39:6; - uint64_t dpi:1; - uint64_t sli:1; - uint64_t reserved_31_31:1; - uint64_t key:1; - uint64_t rad:1; - uint64_t tim:1; - uint64_t reserved_25_27:3; - uint64_t zip:1; - uint64_t reserved_17_23:7; - uint64_t sso:1; - uint64_t reserved_8_15:8; - uint64_t pko:1; - uint64_t pip:1; - uint64_t ipd:1; - uint64_t fpa:1; - uint64_t reserved_1_3:3; - uint64_t iob:1; -#else - uint64_t iob:1; - uint64_t reserved_1_3:3; - uint64_t fpa:1; - uint64_t ipd:1; - uint64_t pip:1; - uint64_t pko:1; - uint64_t reserved_8_15:8; - uint64_t sso:1; - uint64_t reserved_17_23:7; - uint64_t zip:1; - uint64_t reserved_25_27:3; - uint64_t tim:1; - uint64_t rad:1; - uint64_t key:1; - uint64_t reserved_31_31:1; - uint64_t sli:1; - uint64_t dpi:1; - uint64_t reserved_34_39:6; - uint64_t dfa:1; - uint64_t reserved_41_47:7; - uint64_t l2c:1; - uint64_t reserved_49_51:3; - uint64_t trace:4; - uint64_t reserved_56_63:8; -#endif - } cn68xxp1; -}; - -union cvmx_ciu2_src_ppx_ip3_wdog { - uint64_t u64; - struct cvmx_ciu2_src_ppx_ip3_wdog_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_32_63:32; - uint64_t wdog:32; -#else - uint64_t wdog:32; - uint64_t reserved_32_63:32; -#endif - } s; - struct cvmx_ciu2_src_ppx_ip3_wdog_s cn68xx; - struct cvmx_ciu2_src_ppx_ip3_wdog_s cn68xxp1; -}; - -union cvmx_ciu2_src_ppx_ip3_wrkq { - uint64_t u64; - struct cvmx_ciu2_src_ppx_ip3_wrkq_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t workq:64; -#else - uint64_t workq:64; -#endif - } s; - struct cvmx_ciu2_src_ppx_ip3_wrkq_s cn68xx; - struct cvmx_ciu2_src_ppx_ip3_wrkq_s cn68xxp1; -}; - -union cvmx_ciu2_src_ppx_ip4_gpio { - uint64_t u64; - struct cvmx_ciu2_src_ppx_ip4_gpio_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_16_63:48; - uint64_t gpio:16; -#else - uint64_t gpio:16; - uint64_t reserved_16_63:48; -#endif - } s; - struct cvmx_ciu2_src_ppx_ip4_gpio_s cn68xx; - struct cvmx_ciu2_src_ppx_ip4_gpio_s cn68xxp1; -}; - -union cvmx_ciu2_src_ppx_ip4_io { - uint64_t u64; - struct cvmx_ciu2_src_ppx_ip4_io_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_34_63:30; - uint64_t pem:2; - uint64_t reserved_18_31:14; - uint64_t pci_inta:2; - uint64_t reserved_13_15:3; - uint64_t msired:1; - uint64_t pci_msi:4; - uint64_t reserved_4_7:4; - uint64_t pci_intr:4; -#else - uint64_t pci_intr:4; - uint64_t reserved_4_7:4; - uint64_t pci_msi:4; - uint64_t msired:1; - uint64_t reserved_13_15:3; - uint64_t pci_inta:2; - uint64_t reserved_18_31:14; - uint64_t pem:2; - uint64_t reserved_34_63:30; -#endif - } s; - struct cvmx_ciu2_src_ppx_ip4_io_s cn68xx; - struct cvmx_ciu2_src_ppx_ip4_io_s cn68xxp1; -}; - -union cvmx_ciu2_src_ppx_ip4_mbox { - uint64_t u64; - struct cvmx_ciu2_src_ppx_ip4_mbox_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_4_63:60; - uint64_t mbox:4; -#else - uint64_t mbox:4; - uint64_t reserved_4_63:60; -#endif - } s; - struct cvmx_ciu2_src_ppx_ip4_mbox_s cn68xx; - struct cvmx_ciu2_src_ppx_ip4_mbox_s cn68xxp1; -}; - -union cvmx_ciu2_src_ppx_ip4_mem { - uint64_t u64; - struct cvmx_ciu2_src_ppx_ip4_mem_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_4_63:60; - uint64_t lmc:4; -#else - uint64_t lmc:4; - uint64_t reserved_4_63:60; -#endif - } s; - struct cvmx_ciu2_src_ppx_ip4_mem_s cn68xx; - struct cvmx_ciu2_src_ppx_ip4_mem_s cn68xxp1; -}; - -union cvmx_ciu2_src_ppx_ip4_mio { - uint64_t u64; - struct cvmx_ciu2_src_ppx_ip4_mio_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t rst:1; - uint64_t reserved_49_62:14; - uint64_t ptp:1; - uint64_t reserved_45_47:3; - uint64_t usb_hci:1; - uint64_t reserved_41_43:3; - uint64_t usb_uctl:1; - uint64_t reserved_38_39:2; - uint64_t uart:2; - uint64_t reserved_34_35:2; - uint64_t twsi:2; - uint64_t reserved_19_31:13; - uint64_t bootdma:1; - uint64_t mio:1; - uint64_t nand:1; - uint64_t reserved_12_15:4; - uint64_t timer:4; - uint64_t reserved_3_7:5; - uint64_t ipd_drp:1; - uint64_t ssoiq:1; - uint64_t ipdppthr:1; -#else - uint64_t ipdppthr:1; - uint64_t ssoiq:1; - uint64_t ipd_drp:1; - uint64_t reserved_3_7:5; - uint64_t timer:4; - uint64_t reserved_12_15:4; - uint64_t nand:1; - uint64_t mio:1; - uint64_t bootdma:1; - uint64_t reserved_19_31:13; - uint64_t twsi:2; - uint64_t reserved_34_35:2; - uint64_t uart:2; - uint64_t reserved_38_39:2; - uint64_t usb_uctl:1; - uint64_t reserved_41_43:3; - uint64_t usb_hci:1; - uint64_t reserved_45_47:3; - uint64_t ptp:1; - uint64_t reserved_49_62:14; - uint64_t rst:1; -#endif - } s; - struct cvmx_ciu2_src_ppx_ip4_mio_s cn68xx; - struct cvmx_ciu2_src_ppx_ip4_mio_s cn68xxp1; -}; - -union cvmx_ciu2_src_ppx_ip4_pkt { - uint64_t u64; - struct cvmx_ciu2_src_ppx_ip4_pkt_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_54_63:10; - uint64_t ilk_drp:2; - uint64_t reserved_49_51:3; - uint64_t ilk:1; - uint64_t reserved_41_47:7; - uint64_t mii:1; - uint64_t reserved_33_39:7; - uint64_t agl:1; - uint64_t reserved_13_31:19; - uint64_t gmx_drp:5; - uint64_t reserved_5_7:3; - uint64_t agx:5; -#else - uint64_t agx:5; - uint64_t reserved_5_7:3; - uint64_t gmx_drp:5; - uint64_t reserved_13_31:19; - uint64_t agl:1; - uint64_t reserved_33_39:7; - uint64_t mii:1; - uint64_t reserved_41_47:7; - uint64_t ilk:1; - uint64_t reserved_49_51:3; - uint64_t ilk_drp:2; - uint64_t reserved_54_63:10; -#endif - } s; - struct cvmx_ciu2_src_ppx_ip4_pkt_s cn68xx; - struct cvmx_ciu2_src_ppx_ip4_pkt_cn68xxp1 { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_49_63:15; - uint64_t ilk:1; - uint64_t reserved_41_47:7; - uint64_t mii:1; - uint64_t reserved_33_39:7; - uint64_t agl:1; - uint64_t reserved_13_31:19; - uint64_t gmx_drp:5; - uint64_t reserved_5_7:3; - uint64_t agx:5; -#else - uint64_t agx:5; - uint64_t reserved_5_7:3; - uint64_t gmx_drp:5; - uint64_t reserved_13_31:19; - uint64_t agl:1; - uint64_t reserved_33_39:7; - uint64_t mii:1; - uint64_t reserved_41_47:7; - uint64_t ilk:1; - uint64_t reserved_49_63:15; -#endif - } cn68xxp1; -}; - -union cvmx_ciu2_src_ppx_ip4_rml { - uint64_t u64; - struct cvmx_ciu2_src_ppx_ip4_rml_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_56_63:8; - uint64_t trace:4; - uint64_t reserved_49_51:3; - uint64_t l2c:1; - uint64_t reserved_41_47:7; - uint64_t dfa:1; - uint64_t reserved_37_39:3; - uint64_t dpi_dma:1; - uint64_t reserved_34_35:2; - uint64_t dpi:1; - uint64_t sli:1; - uint64_t reserved_31_31:1; - uint64_t key:1; - uint64_t rad:1; - uint64_t tim:1; - uint64_t reserved_25_27:3; - uint64_t zip:1; - uint64_t reserved_17_23:7; - uint64_t sso:1; - uint64_t reserved_8_15:8; - uint64_t pko:1; - uint64_t pip:1; - uint64_t ipd:1; - uint64_t fpa:1; - uint64_t reserved_1_3:3; - uint64_t iob:1; -#else - uint64_t iob:1; - uint64_t reserved_1_3:3; - uint64_t fpa:1; - uint64_t ipd:1; - uint64_t pip:1; - uint64_t pko:1; - uint64_t reserved_8_15:8; - uint64_t sso:1; - uint64_t reserved_17_23:7; - uint64_t zip:1; - uint64_t reserved_25_27:3; - uint64_t tim:1; - uint64_t rad:1; - uint64_t key:1; - uint64_t reserved_31_31:1; - uint64_t sli:1; - uint64_t dpi:1; - uint64_t reserved_34_35:2; - uint64_t dpi_dma:1; - uint64_t reserved_37_39:3; - uint64_t dfa:1; - uint64_t reserved_41_47:7; - uint64_t l2c:1; - uint64_t reserved_49_51:3; - uint64_t trace:4; - uint64_t reserved_56_63:8; -#endif - } s; - struct cvmx_ciu2_src_ppx_ip4_rml_s cn68xx; - struct cvmx_ciu2_src_ppx_ip4_rml_cn68xxp1 { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_56_63:8; - uint64_t trace:4; - uint64_t reserved_49_51:3; - uint64_t l2c:1; - uint64_t reserved_41_47:7; - uint64_t dfa:1; - uint64_t reserved_34_39:6; - uint64_t dpi:1; - uint64_t sli:1; - uint64_t reserved_31_31:1; - uint64_t key:1; - uint64_t rad:1; - uint64_t tim:1; - uint64_t reserved_25_27:3; - uint64_t zip:1; - uint64_t reserved_17_23:7; - uint64_t sso:1; - uint64_t reserved_8_15:8; - uint64_t pko:1; - uint64_t pip:1; - uint64_t ipd:1; - uint64_t fpa:1; - uint64_t reserved_1_3:3; - uint64_t iob:1; -#else - uint64_t iob:1; - uint64_t reserved_1_3:3; - uint64_t fpa:1; - uint64_t ipd:1; - uint64_t pip:1; - uint64_t pko:1; - uint64_t reserved_8_15:8; - uint64_t sso:1; - uint64_t reserved_17_23:7; - uint64_t zip:1; - uint64_t reserved_25_27:3; - uint64_t tim:1; - uint64_t rad:1; - uint64_t key:1; - uint64_t reserved_31_31:1; - uint64_t sli:1; - uint64_t dpi:1; - uint64_t reserved_34_39:6; - uint64_t dfa:1; - uint64_t reserved_41_47:7; - uint64_t l2c:1; - uint64_t reserved_49_51:3; - uint64_t trace:4; - uint64_t reserved_56_63:8; -#endif - } cn68xxp1; -}; - -union cvmx_ciu2_src_ppx_ip4_wdog { - uint64_t u64; - struct cvmx_ciu2_src_ppx_ip4_wdog_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_32_63:32; - uint64_t wdog:32; -#else - uint64_t wdog:32; - uint64_t reserved_32_63:32; -#endif - } s; - struct cvmx_ciu2_src_ppx_ip4_wdog_s cn68xx; - struct cvmx_ciu2_src_ppx_ip4_wdog_s cn68xxp1; -}; - -union cvmx_ciu2_src_ppx_ip4_wrkq { - uint64_t u64; - struct cvmx_ciu2_src_ppx_ip4_wrkq_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t workq:64; -#else - uint64_t workq:64; -#endif - } s; - struct cvmx_ciu2_src_ppx_ip4_wrkq_s cn68xx; - struct cvmx_ciu2_src_ppx_ip4_wrkq_s cn68xxp1; -}; - -union cvmx_ciu2_sum_iox_int { - uint64_t u64; - struct cvmx_ciu2_sum_iox_int_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t mbox:4; - uint64_t reserved_8_59:52; - uint64_t gpio:1; - uint64_t pkt:1; - uint64_t mem:1; - uint64_t io:1; - uint64_t mio:1; - uint64_t rml:1; - uint64_t wdog:1; - uint64_t workq:1; -#else - uint64_t workq:1; - uint64_t wdog:1; - uint64_t rml:1; - uint64_t mio:1; - uint64_t io:1; - uint64_t mem:1; - uint64_t pkt:1; - uint64_t gpio:1; - uint64_t reserved_8_59:52; - uint64_t mbox:4; -#endif - } s; - struct cvmx_ciu2_sum_iox_int_s cn68xx; - struct cvmx_ciu2_sum_iox_int_s cn68xxp1; -}; - -union cvmx_ciu2_sum_ppx_ip2 { - uint64_t u64; - struct cvmx_ciu2_sum_ppx_ip2_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t mbox:4; - uint64_t reserved_8_59:52; - uint64_t gpio:1; - uint64_t pkt:1; - uint64_t mem:1; - uint64_t io:1; - uint64_t mio:1; - uint64_t rml:1; - uint64_t wdog:1; - uint64_t workq:1; -#else - uint64_t workq:1; - uint64_t wdog:1; - uint64_t rml:1; - uint64_t mio:1; - uint64_t io:1; - uint64_t mem:1; - uint64_t pkt:1; - uint64_t gpio:1; - uint64_t reserved_8_59:52; - uint64_t mbox:4; -#endif - } s; - struct cvmx_ciu2_sum_ppx_ip2_s cn68xx; - struct cvmx_ciu2_sum_ppx_ip2_s cn68xxp1; -}; - -union cvmx_ciu2_sum_ppx_ip3 { - uint64_t u64; - struct cvmx_ciu2_sum_ppx_ip3_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t mbox:4; - uint64_t reserved_8_59:52; - uint64_t gpio:1; - uint64_t pkt:1; - uint64_t mem:1; - uint64_t io:1; - uint64_t mio:1; - uint64_t rml:1; - uint64_t wdog:1; - uint64_t workq:1; -#else - uint64_t workq:1; - uint64_t wdog:1; - uint64_t rml:1; - uint64_t mio:1; - uint64_t io:1; - uint64_t mem:1; - uint64_t pkt:1; - uint64_t gpio:1; - uint64_t reserved_8_59:52; - uint64_t mbox:4; -#endif - } s; - struct cvmx_ciu2_sum_ppx_ip3_s cn68xx; - struct cvmx_ciu2_sum_ppx_ip3_s cn68xxp1; -}; - -union cvmx_ciu2_sum_ppx_ip4 { - uint64_t u64; - struct cvmx_ciu2_sum_ppx_ip4_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t mbox:4; - uint64_t reserved_8_59:52; - uint64_t gpio:1; - uint64_t pkt:1; - uint64_t mem:1; - uint64_t io:1; - uint64_t mio:1; - uint64_t rml:1; - uint64_t wdog:1; - uint64_t workq:1; -#else - uint64_t workq:1; - uint64_t wdog:1; - uint64_t rml:1; - uint64_t mio:1; - uint64_t io:1; - uint64_t mem:1; - uint64_t pkt:1; - uint64_t gpio:1; - uint64_t reserved_8_59:52; - uint64_t mbox:4; -#endif - } s; - struct cvmx_ciu2_sum_ppx_ip4_s cn68xx; - struct cvmx_ciu2_sum_ppx_ip4_s cn68xxp1; -}; #endif diff --git a/arch/mips/include/asm/octeon/cvmx-dbg-defs.h b/arch/mips/include/asm/octeon/cvmx-dbg-defs.h index 40799cdae695..828d07d87f03 100644 --- a/arch/mips/include/asm/octeon/cvmx-dbg-defs.h +++ b/arch/mips/include/asm/octeon/cvmx-dbg-defs.h @@ -62,7 +62,6 @@ union cvmx_dbg_data { uint64_t reserved_31_63:33; #endif } cn30xx; - struct cvmx_dbg_data_cn30xx cn31xx; struct cvmx_dbg_data_cn38xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_29_63:35; @@ -82,8 +81,6 @@ union cvmx_dbg_data { uint64_t reserved_29_63:35; #endif } cn38xx; - struct cvmx_dbg_data_cn38xx cn38xxp2; - struct cvmx_dbg_data_cn30xx cn50xx; struct cvmx_dbg_data_cn58xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_29_63:35; @@ -99,7 +96,6 @@ union cvmx_dbg_data { uint64_t reserved_29_63:35; #endif } cn58xx; - struct cvmx_dbg_data_cn58xx cn58xxp1; }; #endif diff --git a/arch/mips/include/asm/octeon/cvmx-dpi-defs.h b/arch/mips/include/asm/octeon/cvmx-dpi-defs.h index dd5b0428de35..e8613e1f6930 100644 --- a/arch/mips/include/asm/octeon/cvmx-dpi-defs.h +++ b/arch/mips/include/asm/octeon/cvmx-dpi-defs.h @@ -89,7 +89,6 @@ union cvmx_dpi_bist_status { uint64_t reserved_47_63:17; #endif } s; - struct cvmx_dpi_bist_status_s cn61xx; struct cvmx_dpi_bist_status_cn63xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_45_63:19; @@ -108,10 +107,6 @@ union cvmx_dpi_bist_status { uint64_t reserved_37_63:27; #endif } cn63xxp1; - struct cvmx_dpi_bist_status_s cn66xx; - struct cvmx_dpi_bist_status_cn63xx cn68xx; - struct cvmx_dpi_bist_status_cn63xx cn68xxp1; - struct cvmx_dpi_bist_status_s cnf71xx; }; union cvmx_dpi_ctl { @@ -136,12 +131,6 @@ union cvmx_dpi_ctl { uint64_t reserved_1_63:63; #endif } cn61xx; - struct cvmx_dpi_ctl_s cn63xx; - struct cvmx_dpi_ctl_s cn63xxp1; - struct cvmx_dpi_ctl_s cn66xx; - struct cvmx_dpi_ctl_s cn68xx; - struct cvmx_dpi_ctl_s cn68xxp1; - struct cvmx_dpi_ctl_cn61xx cnf71xx; }; union cvmx_dpi_dmax_counts { @@ -157,13 +146,6 @@ union cvmx_dpi_dmax_counts { uint64_t reserved_39_63:25; #endif } s; - struct cvmx_dpi_dmax_counts_s cn61xx; - struct cvmx_dpi_dmax_counts_s cn63xx; - struct cvmx_dpi_dmax_counts_s cn63xxp1; - struct cvmx_dpi_dmax_counts_s cn66xx; - struct cvmx_dpi_dmax_counts_s cn68xx; - struct cvmx_dpi_dmax_counts_s cn68xxp1; - struct cvmx_dpi_dmax_counts_s cnf71xx; }; union cvmx_dpi_dmax_dbell { @@ -177,13 +159,6 @@ union cvmx_dpi_dmax_dbell { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_dpi_dmax_dbell_s cn61xx; - struct cvmx_dpi_dmax_dbell_s cn63xx; - struct cvmx_dpi_dmax_dbell_s cn63xxp1; - struct cvmx_dpi_dmax_dbell_s cn66xx; - struct cvmx_dpi_dmax_dbell_s cn68xx; - struct cvmx_dpi_dmax_dbell_s cn68xxp1; - struct cvmx_dpi_dmax_dbell_s cnf71xx; }; union cvmx_dpi_dmax_err_rsp_status { @@ -197,11 +172,6 @@ union cvmx_dpi_dmax_err_rsp_status { uint64_t reserved_6_63:58; #endif } s; - struct cvmx_dpi_dmax_err_rsp_status_s cn61xx; - struct cvmx_dpi_dmax_err_rsp_status_s cn66xx; - struct cvmx_dpi_dmax_err_rsp_status_s cn68xx; - struct cvmx_dpi_dmax_err_rsp_status_s cn68xxp1; - struct cvmx_dpi_dmax_err_rsp_status_s cnf71xx; }; union cvmx_dpi_dmax_ibuff_saddr { @@ -242,12 +212,6 @@ union cvmx_dpi_dmax_ibuff_saddr { uint64_t reserved_62_63:2; #endif } cn61xx; - struct cvmx_dpi_dmax_ibuff_saddr_cn61xx cn63xx; - struct cvmx_dpi_dmax_ibuff_saddr_cn61xx cn63xxp1; - struct cvmx_dpi_dmax_ibuff_saddr_cn61xx cn66xx; - struct cvmx_dpi_dmax_ibuff_saddr_s cn68xx; - struct cvmx_dpi_dmax_ibuff_saddr_s cn68xxp1; - struct cvmx_dpi_dmax_ibuff_saddr_cn61xx cnf71xx; }; union cvmx_dpi_dmax_iflight { @@ -261,11 +225,6 @@ union cvmx_dpi_dmax_iflight { uint64_t reserved_3_63:61; #endif } s; - struct cvmx_dpi_dmax_iflight_s cn61xx; - struct cvmx_dpi_dmax_iflight_s cn66xx; - struct cvmx_dpi_dmax_iflight_s cn68xx; - struct cvmx_dpi_dmax_iflight_s cn68xxp1; - struct cvmx_dpi_dmax_iflight_s cnf71xx; }; union cvmx_dpi_dmax_naddr { @@ -288,12 +247,6 @@ union cvmx_dpi_dmax_naddr { uint64_t reserved_36_63:28; #endif } cn61xx; - struct cvmx_dpi_dmax_naddr_cn61xx cn63xx; - struct cvmx_dpi_dmax_naddr_cn61xx cn63xxp1; - struct cvmx_dpi_dmax_naddr_cn61xx cn66xx; - struct cvmx_dpi_dmax_naddr_s cn68xx; - struct cvmx_dpi_dmax_naddr_s cn68xxp1; - struct cvmx_dpi_dmax_naddr_cn61xx cnf71xx; }; union cvmx_dpi_dmax_reqbnk0 { @@ -305,13 +258,6 @@ union cvmx_dpi_dmax_reqbnk0 { uint64_t state:64; #endif } s; - struct cvmx_dpi_dmax_reqbnk0_s cn61xx; - struct cvmx_dpi_dmax_reqbnk0_s cn63xx; - struct cvmx_dpi_dmax_reqbnk0_s cn63xxp1; - struct cvmx_dpi_dmax_reqbnk0_s cn66xx; - struct cvmx_dpi_dmax_reqbnk0_s cn68xx; - struct cvmx_dpi_dmax_reqbnk0_s cn68xxp1; - struct cvmx_dpi_dmax_reqbnk0_s cnf71xx; }; union cvmx_dpi_dmax_reqbnk1 { @@ -323,13 +269,6 @@ union cvmx_dpi_dmax_reqbnk1 { uint64_t state:64; #endif } s; - struct cvmx_dpi_dmax_reqbnk1_s cn61xx; - struct cvmx_dpi_dmax_reqbnk1_s cn63xx; - struct cvmx_dpi_dmax_reqbnk1_s cn63xxp1; - struct cvmx_dpi_dmax_reqbnk1_s cn66xx; - struct cvmx_dpi_dmax_reqbnk1_s cn68xx; - struct cvmx_dpi_dmax_reqbnk1_s cn68xxp1; - struct cvmx_dpi_dmax_reqbnk1_s cnf71xx; }; union cvmx_dpi_dma_control { @@ -379,7 +318,6 @@ union cvmx_dpi_dma_control { uint64_t reserved_62_63:2; #endif } s; - struct cvmx_dpi_dma_control_s cn61xx; struct cvmx_dpi_dma_control_cn63xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_61_63:3; @@ -462,10 +400,6 @@ union cvmx_dpi_dma_control { uint64_t reserved_59_63:5; #endif } cn63xxp1; - struct cvmx_dpi_dma_control_cn63xx cn66xx; - struct cvmx_dpi_dma_control_s cn68xx; - struct cvmx_dpi_dma_control_cn63xx cn68xxp1; - struct cvmx_dpi_dma_control_s cnf71xx; }; union cvmx_dpi_dma_engx_en { @@ -479,13 +413,6 @@ union cvmx_dpi_dma_engx_en { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_dpi_dma_engx_en_s cn61xx; - struct cvmx_dpi_dma_engx_en_s cn63xx; - struct cvmx_dpi_dma_engx_en_s cn63xxp1; - struct cvmx_dpi_dma_engx_en_s cn66xx; - struct cvmx_dpi_dma_engx_en_s cn68xx; - struct cvmx_dpi_dma_engx_en_s cn68xxp1; - struct cvmx_dpi_dma_engx_en_s cnf71xx; }; union cvmx_dpi_dma_ppx_cnt { @@ -499,9 +426,6 @@ union cvmx_dpi_dma_ppx_cnt { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_dpi_dma_ppx_cnt_s cn61xx; - struct cvmx_dpi_dma_ppx_cnt_s cn68xx; - struct cvmx_dpi_dma_ppx_cnt_s cnf71xx; }; union cvmx_dpi_engx_buf { @@ -521,7 +445,6 @@ union cvmx_dpi_engx_buf { uint64_t reserved_37_63:27; #endif } s; - struct cvmx_dpi_engx_buf_s cn61xx; struct cvmx_dpi_engx_buf_cn63xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_8_63:56; @@ -533,11 +456,6 @@ union cvmx_dpi_engx_buf { uint64_t reserved_8_63:56; #endif } cn63xx; - struct cvmx_dpi_engx_buf_cn63xx cn63xxp1; - struct cvmx_dpi_engx_buf_s cn66xx; - struct cvmx_dpi_engx_buf_s cn68xx; - struct cvmx_dpi_engx_buf_s cn68xxp1; - struct cvmx_dpi_engx_buf_s cnf71xx; }; union cvmx_dpi_info_reg { @@ -557,8 +475,6 @@ union cvmx_dpi_info_reg { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_dpi_info_reg_s cn61xx; - struct cvmx_dpi_info_reg_s cn63xx; struct cvmx_dpi_info_reg_cn63xxp1 { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_2_63:62; @@ -570,10 +486,6 @@ union cvmx_dpi_info_reg { uint64_t reserved_2_63:62; #endif } cn63xxp1; - struct cvmx_dpi_info_reg_s cn66xx; - struct cvmx_dpi_info_reg_s cn68xx; - struct cvmx_dpi_info_reg_s cn68xxp1; - struct cvmx_dpi_info_reg_s cnf71xx; }; union cvmx_dpi_int_en { @@ -617,7 +529,6 @@ union cvmx_dpi_int_en { uint64_t reserved_28_63:36; #endif } s; - struct cvmx_dpi_int_en_s cn61xx; struct cvmx_dpi_int_en_cn63xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_26_63:38; @@ -653,11 +564,6 @@ union cvmx_dpi_int_en { uint64_t reserved_26_63:38; #endif } cn63xx; - struct cvmx_dpi_int_en_cn63xx cn63xxp1; - struct cvmx_dpi_int_en_s cn66xx; - struct cvmx_dpi_int_en_cn63xx cn68xx; - struct cvmx_dpi_int_en_cn63xx cn68xxp1; - struct cvmx_dpi_int_en_s cnf71xx; }; union cvmx_dpi_int_reg { @@ -701,7 +607,6 @@ union cvmx_dpi_int_reg { uint64_t reserved_28_63:36; #endif } s; - struct cvmx_dpi_int_reg_s cn61xx; struct cvmx_dpi_int_reg_cn63xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_26_63:38; @@ -737,11 +642,6 @@ union cvmx_dpi_int_reg { uint64_t reserved_26_63:38; #endif } cn63xx; - struct cvmx_dpi_int_reg_cn63xx cn63xxp1; - struct cvmx_dpi_int_reg_s cn66xx; - struct cvmx_dpi_int_reg_cn63xx cn68xx; - struct cvmx_dpi_int_reg_cn63xx cn68xxp1; - struct cvmx_dpi_int_reg_s cnf71xx; }; union cvmx_dpi_ncbx_cfg { @@ -755,10 +655,6 @@ union cvmx_dpi_ncbx_cfg { uint64_t reserved_6_63:58; #endif } s; - struct cvmx_dpi_ncbx_cfg_s cn61xx; - struct cvmx_dpi_ncbx_cfg_s cn66xx; - struct cvmx_dpi_ncbx_cfg_s cn68xx; - struct cvmx_dpi_ncbx_cfg_s cnf71xx; }; union cvmx_dpi_pint_info { @@ -776,13 +672,6 @@ union cvmx_dpi_pint_info { uint64_t reserved_14_63:50; #endif } s; - struct cvmx_dpi_pint_info_s cn61xx; - struct cvmx_dpi_pint_info_s cn63xx; - struct cvmx_dpi_pint_info_s cn63xxp1; - struct cvmx_dpi_pint_info_s cn66xx; - struct cvmx_dpi_pint_info_s cn68xx; - struct cvmx_dpi_pint_info_s cn68xxp1; - struct cvmx_dpi_pint_info_s cnf71xx; }; union cvmx_dpi_pkt_err_rsp { @@ -796,13 +685,6 @@ union cvmx_dpi_pkt_err_rsp { uint64_t reserved_1_63:63; #endif } s; - struct cvmx_dpi_pkt_err_rsp_s cn61xx; - struct cvmx_dpi_pkt_err_rsp_s cn63xx; - struct cvmx_dpi_pkt_err_rsp_s cn63xxp1; - struct cvmx_dpi_pkt_err_rsp_s cn66xx; - struct cvmx_dpi_pkt_err_rsp_s cn68xx; - struct cvmx_dpi_pkt_err_rsp_s cn68xxp1; - struct cvmx_dpi_pkt_err_rsp_s cnf71xx; }; union cvmx_dpi_req_err_rsp { @@ -816,13 +698,6 @@ union cvmx_dpi_req_err_rsp { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_dpi_req_err_rsp_s cn61xx; - struct cvmx_dpi_req_err_rsp_s cn63xx; - struct cvmx_dpi_req_err_rsp_s cn63xxp1; - struct cvmx_dpi_req_err_rsp_s cn66xx; - struct cvmx_dpi_req_err_rsp_s cn68xx; - struct cvmx_dpi_req_err_rsp_s cn68xxp1; - struct cvmx_dpi_req_err_rsp_s cnf71xx; }; union cvmx_dpi_req_err_rsp_en { @@ -836,13 +711,6 @@ union cvmx_dpi_req_err_rsp_en { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_dpi_req_err_rsp_en_s cn61xx; - struct cvmx_dpi_req_err_rsp_en_s cn63xx; - struct cvmx_dpi_req_err_rsp_en_s cn63xxp1; - struct cvmx_dpi_req_err_rsp_en_s cn66xx; - struct cvmx_dpi_req_err_rsp_en_s cn68xx; - struct cvmx_dpi_req_err_rsp_en_s cn68xxp1; - struct cvmx_dpi_req_err_rsp_en_s cnf71xx; }; union cvmx_dpi_req_err_rst { @@ -856,13 +724,6 @@ union cvmx_dpi_req_err_rst { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_dpi_req_err_rst_s cn61xx; - struct cvmx_dpi_req_err_rst_s cn63xx; - struct cvmx_dpi_req_err_rst_s cn63xxp1; - struct cvmx_dpi_req_err_rst_s cn66xx; - struct cvmx_dpi_req_err_rst_s cn68xx; - struct cvmx_dpi_req_err_rst_s cn68xxp1; - struct cvmx_dpi_req_err_rst_s cnf71xx; }; union cvmx_dpi_req_err_rst_en { @@ -876,13 +737,6 @@ union cvmx_dpi_req_err_rst_en { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_dpi_req_err_rst_en_s cn61xx; - struct cvmx_dpi_req_err_rst_en_s cn63xx; - struct cvmx_dpi_req_err_rst_en_s cn63xxp1; - struct cvmx_dpi_req_err_rst_en_s cn66xx; - struct cvmx_dpi_req_err_rst_en_s cn68xx; - struct cvmx_dpi_req_err_rst_en_s cn68xxp1; - struct cvmx_dpi_req_err_rst_en_s cnf71xx; }; union cvmx_dpi_req_err_skip_comp { @@ -900,11 +754,6 @@ union cvmx_dpi_req_err_skip_comp { uint64_t reserved_24_63:40; #endif } s; - struct cvmx_dpi_req_err_skip_comp_s cn61xx; - struct cvmx_dpi_req_err_skip_comp_s cn66xx; - struct cvmx_dpi_req_err_skip_comp_s cn68xx; - struct cvmx_dpi_req_err_skip_comp_s cn68xxp1; - struct cvmx_dpi_req_err_skip_comp_s cnf71xx; }; union cvmx_dpi_req_gbl_en { @@ -918,13 +767,6 @@ union cvmx_dpi_req_gbl_en { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_dpi_req_gbl_en_s cn61xx; - struct cvmx_dpi_req_gbl_en_s cn63xx; - struct cvmx_dpi_req_gbl_en_s cn63xxp1; - struct cvmx_dpi_req_gbl_en_s cn66xx; - struct cvmx_dpi_req_gbl_en_s cn68xx; - struct cvmx_dpi_req_gbl_en_s cn68xxp1; - struct cvmx_dpi_req_gbl_en_s cnf71xx; }; union cvmx_dpi_sli_prtx_cfg { @@ -960,7 +802,6 @@ union cvmx_dpi_sli_prtx_cfg { uint64_t reserved_25_63:39; #endif } s; - struct cvmx_dpi_sli_prtx_cfg_s cn61xx; struct cvmx_dpi_sli_prtx_cfg_cn63xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_25_63:39; @@ -994,11 +835,6 @@ union cvmx_dpi_sli_prtx_cfg { uint64_t reserved_25_63:39; #endif } cn63xx; - struct cvmx_dpi_sli_prtx_cfg_cn63xx cn63xxp1; - struct cvmx_dpi_sli_prtx_cfg_s cn66xx; - struct cvmx_dpi_sli_prtx_cfg_cn63xx cn68xx; - struct cvmx_dpi_sli_prtx_cfg_cn63xx cn68xxp1; - struct cvmx_dpi_sli_prtx_cfg_s cnf71xx; }; union cvmx_dpi_sli_prtx_err { @@ -1012,13 +848,6 @@ union cvmx_dpi_sli_prtx_err { uint64_t addr:61; #endif } s; - struct cvmx_dpi_sli_prtx_err_s cn61xx; - struct cvmx_dpi_sli_prtx_err_s cn63xx; - struct cvmx_dpi_sli_prtx_err_s cn63xxp1; - struct cvmx_dpi_sli_prtx_err_s cn66xx; - struct cvmx_dpi_sli_prtx_err_s cn68xx; - struct cvmx_dpi_sli_prtx_err_s cn68xxp1; - struct cvmx_dpi_sli_prtx_err_s cnf71xx; }; union cvmx_dpi_sli_prtx_err_info { @@ -1040,13 +869,6 @@ union cvmx_dpi_sli_prtx_err_info { uint64_t reserved_9_63:55; #endif } s; - struct cvmx_dpi_sli_prtx_err_info_s cn61xx; - struct cvmx_dpi_sli_prtx_err_info_s cn63xx; - struct cvmx_dpi_sli_prtx_err_info_s cn63xxp1; - struct cvmx_dpi_sli_prtx_err_info_s cn66xx; - struct cvmx_dpi_sli_prtx_err_info_s cn68xx; - struct cvmx_dpi_sli_prtx_err_info_s cn68xxp1; - struct cvmx_dpi_sli_prtx_err_info_s cnf71xx; }; #endif diff --git a/arch/mips/include/asm/octeon/cvmx-fpa-defs.h b/arch/mips/include/asm/octeon/cvmx-fpa-defs.h index 887ff8e1f715..322943f7c4b6 100644 --- a/arch/mips/include/asm/octeon/cvmx-fpa-defs.h +++ b/arch/mips/include/asm/octeon/cvmx-fpa-defs.h @@ -81,11 +81,6 @@ union cvmx_fpa_addr_range_error { uint64_t reserved_38_63:26; #endif } s; - struct cvmx_fpa_addr_range_error_s cn61xx; - struct cvmx_fpa_addr_range_error_s cn66xx; - struct cvmx_fpa_addr_range_error_s cn68xx; - struct cvmx_fpa_addr_range_error_s cn68xxp1; - struct cvmx_fpa_addr_range_error_s cnf71xx; }; union cvmx_fpa_bist_status { @@ -107,24 +102,6 @@ union cvmx_fpa_bist_status { uint64_t reserved_5_63:59; #endif } s; - struct cvmx_fpa_bist_status_s cn30xx; - struct cvmx_fpa_bist_status_s cn31xx; - struct cvmx_fpa_bist_status_s cn38xx; - struct cvmx_fpa_bist_status_s cn38xxp2; - struct cvmx_fpa_bist_status_s cn50xx; - struct cvmx_fpa_bist_status_s cn52xx; - struct cvmx_fpa_bist_status_s cn52xxp1; - struct cvmx_fpa_bist_status_s cn56xx; - struct cvmx_fpa_bist_status_s cn56xxp1; - struct cvmx_fpa_bist_status_s cn58xx; - struct cvmx_fpa_bist_status_s cn58xxp1; - struct cvmx_fpa_bist_status_s cn61xx; - struct cvmx_fpa_bist_status_s cn63xx; - struct cvmx_fpa_bist_status_s cn63xxp1; - struct cvmx_fpa_bist_status_s cn66xx; - struct cvmx_fpa_bist_status_s cn68xx; - struct cvmx_fpa_bist_status_s cn68xxp1; - struct cvmx_fpa_bist_status_s cnf71xx; }; union cvmx_fpa_ctl_status { @@ -173,23 +150,6 @@ union cvmx_fpa_ctl_status { uint64_t reserved_18_63:46; #endif } cn30xx; - struct cvmx_fpa_ctl_status_cn30xx cn31xx; - struct cvmx_fpa_ctl_status_cn30xx cn38xx; - struct cvmx_fpa_ctl_status_cn30xx cn38xxp2; - struct cvmx_fpa_ctl_status_cn30xx cn50xx; - struct cvmx_fpa_ctl_status_cn30xx cn52xx; - struct cvmx_fpa_ctl_status_cn30xx cn52xxp1; - struct cvmx_fpa_ctl_status_cn30xx cn56xx; - struct cvmx_fpa_ctl_status_cn30xx cn56xxp1; - struct cvmx_fpa_ctl_status_cn30xx cn58xx; - struct cvmx_fpa_ctl_status_cn30xx cn58xxp1; - struct cvmx_fpa_ctl_status_s cn61xx; - struct cvmx_fpa_ctl_status_s cn63xx; - struct cvmx_fpa_ctl_status_cn30xx cn63xxp1; - struct cvmx_fpa_ctl_status_s cn66xx; - struct cvmx_fpa_ctl_status_s cn68xx; - struct cvmx_fpa_ctl_status_s cn68xxp1; - struct cvmx_fpa_ctl_status_s cnf71xx; }; union cvmx_fpa_fpfx_marks { @@ -205,19 +165,6 @@ union cvmx_fpa_fpfx_marks { uint64_t reserved_22_63:42; #endif } s; - struct cvmx_fpa_fpfx_marks_s cn38xx; - struct cvmx_fpa_fpfx_marks_s cn38xxp2; - struct cvmx_fpa_fpfx_marks_s cn56xx; - struct cvmx_fpa_fpfx_marks_s cn56xxp1; - struct cvmx_fpa_fpfx_marks_s cn58xx; - struct cvmx_fpa_fpfx_marks_s cn58xxp1; - struct cvmx_fpa_fpfx_marks_s cn61xx; - struct cvmx_fpa_fpfx_marks_s cn63xx; - struct cvmx_fpa_fpfx_marks_s cn63xxp1; - struct cvmx_fpa_fpfx_marks_s cn66xx; - struct cvmx_fpa_fpfx_marks_s cn68xx; - struct cvmx_fpa_fpfx_marks_s cn68xxp1; - struct cvmx_fpa_fpfx_marks_s cnf71xx; }; union cvmx_fpa_fpfx_size { @@ -231,19 +178,6 @@ union cvmx_fpa_fpfx_size { uint64_t reserved_11_63:53; #endif } s; - struct cvmx_fpa_fpfx_size_s cn38xx; - struct cvmx_fpa_fpfx_size_s cn38xxp2; - struct cvmx_fpa_fpfx_size_s cn56xx; - struct cvmx_fpa_fpfx_size_s cn56xxp1; - struct cvmx_fpa_fpfx_size_s cn58xx; - struct cvmx_fpa_fpfx_size_s cn58xxp1; - struct cvmx_fpa_fpfx_size_s cn61xx; - struct cvmx_fpa_fpfx_size_s cn63xx; - struct cvmx_fpa_fpfx_size_s cn63xxp1; - struct cvmx_fpa_fpfx_size_s cn66xx; - struct cvmx_fpa_fpfx_size_s cn68xx; - struct cvmx_fpa_fpfx_size_s cn68xxp1; - struct cvmx_fpa_fpfx_size_s cnf71xx; }; union cvmx_fpa_fpf0_marks { @@ -259,19 +193,6 @@ union cvmx_fpa_fpf0_marks { uint64_t reserved_24_63:40; #endif } s; - struct cvmx_fpa_fpf0_marks_s cn38xx; - struct cvmx_fpa_fpf0_marks_s cn38xxp2; - struct cvmx_fpa_fpf0_marks_s cn56xx; - struct cvmx_fpa_fpf0_marks_s cn56xxp1; - struct cvmx_fpa_fpf0_marks_s cn58xx; - struct cvmx_fpa_fpf0_marks_s cn58xxp1; - struct cvmx_fpa_fpf0_marks_s cn61xx; - struct cvmx_fpa_fpf0_marks_s cn63xx; - struct cvmx_fpa_fpf0_marks_s cn63xxp1; - struct cvmx_fpa_fpf0_marks_s cn66xx; - struct cvmx_fpa_fpf0_marks_s cn68xx; - struct cvmx_fpa_fpf0_marks_s cn68xxp1; - struct cvmx_fpa_fpf0_marks_s cnf71xx; }; union cvmx_fpa_fpf0_size { @@ -285,19 +206,6 @@ union cvmx_fpa_fpf0_size { uint64_t reserved_12_63:52; #endif } s; - struct cvmx_fpa_fpf0_size_s cn38xx; - struct cvmx_fpa_fpf0_size_s cn38xxp2; - struct cvmx_fpa_fpf0_size_s cn56xx; - struct cvmx_fpa_fpf0_size_s cn56xxp1; - struct cvmx_fpa_fpf0_size_s cn58xx; - struct cvmx_fpa_fpf0_size_s cn58xxp1; - struct cvmx_fpa_fpf0_size_s cn61xx; - struct cvmx_fpa_fpf0_size_s cn63xx; - struct cvmx_fpa_fpf0_size_s cn63xxp1; - struct cvmx_fpa_fpf0_size_s cn66xx; - struct cvmx_fpa_fpf0_size_s cn68xx; - struct cvmx_fpa_fpf0_size_s cn68xxp1; - struct cvmx_fpa_fpf0_size_s cnf71xx; }; union cvmx_fpa_fpf8_marks { @@ -313,8 +221,6 @@ union cvmx_fpa_fpf8_marks { uint64_t reserved_22_63:42; #endif } s; - struct cvmx_fpa_fpf8_marks_s cn68xx; - struct cvmx_fpa_fpf8_marks_s cn68xxp1; }; union cvmx_fpa_fpf8_size { @@ -328,8 +234,6 @@ union cvmx_fpa_fpf8_size { uint64_t reserved_12_63:52; #endif } s; - struct cvmx_fpa_fpf8_size_s cn68xx; - struct cvmx_fpa_fpf8_size_s cn68xxp1; }; union cvmx_fpa_int_enb { @@ -496,16 +400,6 @@ union cvmx_fpa_int_enb { uint64_t reserved_28_63:36; #endif } cn30xx; - struct cvmx_fpa_int_enb_cn30xx cn31xx; - struct cvmx_fpa_int_enb_cn30xx cn38xx; - struct cvmx_fpa_int_enb_cn30xx cn38xxp2; - struct cvmx_fpa_int_enb_cn30xx cn50xx; - struct cvmx_fpa_int_enb_cn30xx cn52xx; - struct cvmx_fpa_int_enb_cn30xx cn52xxp1; - struct cvmx_fpa_int_enb_cn30xx cn56xx; - struct cvmx_fpa_int_enb_cn30xx cn56xxp1; - struct cvmx_fpa_int_enb_cn30xx cn58xx; - struct cvmx_fpa_int_enb_cn30xx cn58xxp1; struct cvmx_fpa_int_enb_cn61xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_50_63:14; @@ -700,8 +594,6 @@ union cvmx_fpa_int_enb { uint64_t reserved_44_63:20; #endif } cn63xx; - struct cvmx_fpa_int_enb_cn30xx cn63xxp1; - struct cvmx_fpa_int_enb_cn61xx cn66xx; struct cvmx_fpa_int_enb_cn68xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_50_63:14; @@ -809,8 +701,6 @@ union cvmx_fpa_int_enb { uint64_t reserved_50_63:14; #endif } cn68xx; - struct cvmx_fpa_int_enb_cn68xx cn68xxp1; - struct cvmx_fpa_int_enb_cn61xx cnf71xx; }; union cvmx_fpa_int_sum { @@ -985,16 +875,6 @@ union cvmx_fpa_int_sum { uint64_t reserved_28_63:36; #endif } cn30xx; - struct cvmx_fpa_int_sum_cn30xx cn31xx; - struct cvmx_fpa_int_sum_cn30xx cn38xx; - struct cvmx_fpa_int_sum_cn30xx cn38xxp2; - struct cvmx_fpa_int_sum_cn30xx cn50xx; - struct cvmx_fpa_int_sum_cn30xx cn52xx; - struct cvmx_fpa_int_sum_cn30xx cn52xxp1; - struct cvmx_fpa_int_sum_cn30xx cn56xx; - struct cvmx_fpa_int_sum_cn30xx cn56xxp1; - struct cvmx_fpa_int_sum_cn30xx cn58xx; - struct cvmx_fpa_int_sum_cn30xx cn58xxp1; struct cvmx_fpa_int_sum_cn61xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_50_63:14; @@ -1189,11 +1069,6 @@ union cvmx_fpa_int_sum { uint64_t reserved_44_63:20; #endif } cn63xx; - struct cvmx_fpa_int_sum_cn30xx cn63xxp1; - struct cvmx_fpa_int_sum_cn61xx cn66xx; - struct cvmx_fpa_int_sum_s cn68xx; - struct cvmx_fpa_int_sum_s cn68xxp1; - struct cvmx_fpa_int_sum_cn61xx cnf71xx; }; union cvmx_fpa_packet_threshold { @@ -1207,12 +1082,6 @@ union cvmx_fpa_packet_threshold { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_fpa_packet_threshold_s cn61xx; - struct cvmx_fpa_packet_threshold_s cn63xx; - struct cvmx_fpa_packet_threshold_s cn66xx; - struct cvmx_fpa_packet_threshold_s cn68xx; - struct cvmx_fpa_packet_threshold_s cn68xxp1; - struct cvmx_fpa_packet_threshold_s cnf71xx; }; union cvmx_fpa_poolx_end_addr { @@ -1226,11 +1095,6 @@ union cvmx_fpa_poolx_end_addr { uint64_t reserved_33_63:31; #endif } s; - struct cvmx_fpa_poolx_end_addr_s cn61xx; - struct cvmx_fpa_poolx_end_addr_s cn66xx; - struct cvmx_fpa_poolx_end_addr_s cn68xx; - struct cvmx_fpa_poolx_end_addr_s cn68xxp1; - struct cvmx_fpa_poolx_end_addr_s cnf71xx; }; union cvmx_fpa_poolx_start_addr { @@ -1244,11 +1108,6 @@ union cvmx_fpa_poolx_start_addr { uint64_t reserved_33_63:31; #endif } s; - struct cvmx_fpa_poolx_start_addr_s cn61xx; - struct cvmx_fpa_poolx_start_addr_s cn66xx; - struct cvmx_fpa_poolx_start_addr_s cn68xx; - struct cvmx_fpa_poolx_start_addr_s cn68xxp1; - struct cvmx_fpa_poolx_start_addr_s cnf71xx; }; union cvmx_fpa_poolx_threshold { @@ -1271,11 +1130,6 @@ union cvmx_fpa_poolx_threshold { uint64_t reserved_29_63:35; #endif } cn61xx; - struct cvmx_fpa_poolx_threshold_cn61xx cn63xx; - struct cvmx_fpa_poolx_threshold_cn61xx cn66xx; - struct cvmx_fpa_poolx_threshold_s cn68xx; - struct cvmx_fpa_poolx_threshold_s cn68xxp1; - struct cvmx_fpa_poolx_threshold_cn61xx cnf71xx; }; union cvmx_fpa_quex_available { @@ -1298,23 +1152,6 @@ union cvmx_fpa_quex_available { uint64_t reserved_29_63:35; #endif } cn30xx; - struct cvmx_fpa_quex_available_cn30xx cn31xx; - struct cvmx_fpa_quex_available_cn30xx cn38xx; - struct cvmx_fpa_quex_available_cn30xx cn38xxp2; - struct cvmx_fpa_quex_available_cn30xx cn50xx; - struct cvmx_fpa_quex_available_cn30xx cn52xx; - struct cvmx_fpa_quex_available_cn30xx cn52xxp1; - struct cvmx_fpa_quex_available_cn30xx cn56xx; - struct cvmx_fpa_quex_available_cn30xx cn56xxp1; - struct cvmx_fpa_quex_available_cn30xx cn58xx; - struct cvmx_fpa_quex_available_cn30xx cn58xxp1; - struct cvmx_fpa_quex_available_cn30xx cn61xx; - struct cvmx_fpa_quex_available_cn30xx cn63xx; - struct cvmx_fpa_quex_available_cn30xx cn63xxp1; - struct cvmx_fpa_quex_available_cn30xx cn66xx; - struct cvmx_fpa_quex_available_s cn68xx; - struct cvmx_fpa_quex_available_s cn68xxp1; - struct cvmx_fpa_quex_available_cn30xx cnf71xx; }; union cvmx_fpa_quex_page_index { @@ -1328,24 +1165,6 @@ union cvmx_fpa_quex_page_index { uint64_t reserved_25_63:39; #endif } s; - struct cvmx_fpa_quex_page_index_s cn30xx; - struct cvmx_fpa_quex_page_index_s cn31xx; - struct cvmx_fpa_quex_page_index_s cn38xx; - struct cvmx_fpa_quex_page_index_s cn38xxp2; - struct cvmx_fpa_quex_page_index_s cn50xx; - struct cvmx_fpa_quex_page_index_s cn52xx; - struct cvmx_fpa_quex_page_index_s cn52xxp1; - struct cvmx_fpa_quex_page_index_s cn56xx; - struct cvmx_fpa_quex_page_index_s cn56xxp1; - struct cvmx_fpa_quex_page_index_s cn58xx; - struct cvmx_fpa_quex_page_index_s cn58xxp1; - struct cvmx_fpa_quex_page_index_s cn61xx; - struct cvmx_fpa_quex_page_index_s cn63xx; - struct cvmx_fpa_quex_page_index_s cn63xxp1; - struct cvmx_fpa_quex_page_index_s cn66xx; - struct cvmx_fpa_quex_page_index_s cn68xx; - struct cvmx_fpa_quex_page_index_s cn68xxp1; - struct cvmx_fpa_quex_page_index_s cnf71xx; }; union cvmx_fpa_que8_page_index { @@ -1359,8 +1178,6 @@ union cvmx_fpa_que8_page_index { uint64_t reserved_25_63:39; #endif } s; - struct cvmx_fpa_que8_page_index_s cn68xx; - struct cvmx_fpa_que8_page_index_s cn68xxp1; }; union cvmx_fpa_que_act { @@ -1376,24 +1193,6 @@ union cvmx_fpa_que_act { uint64_t reserved_29_63:35; #endif } s; - struct cvmx_fpa_que_act_s cn30xx; - struct cvmx_fpa_que_act_s cn31xx; - struct cvmx_fpa_que_act_s cn38xx; - struct cvmx_fpa_que_act_s cn38xxp2; - struct cvmx_fpa_que_act_s cn50xx; - struct cvmx_fpa_que_act_s cn52xx; - struct cvmx_fpa_que_act_s cn52xxp1; - struct cvmx_fpa_que_act_s cn56xx; - struct cvmx_fpa_que_act_s cn56xxp1; - struct cvmx_fpa_que_act_s cn58xx; - struct cvmx_fpa_que_act_s cn58xxp1; - struct cvmx_fpa_que_act_s cn61xx; - struct cvmx_fpa_que_act_s cn63xx; - struct cvmx_fpa_que_act_s cn63xxp1; - struct cvmx_fpa_que_act_s cn66xx; - struct cvmx_fpa_que_act_s cn68xx; - struct cvmx_fpa_que_act_s cn68xxp1; - struct cvmx_fpa_que_act_s cnf71xx; }; union cvmx_fpa_que_exp { @@ -1409,24 +1208,6 @@ union cvmx_fpa_que_exp { uint64_t reserved_29_63:35; #endif } s; - struct cvmx_fpa_que_exp_s cn30xx; - struct cvmx_fpa_que_exp_s cn31xx; - struct cvmx_fpa_que_exp_s cn38xx; - struct cvmx_fpa_que_exp_s cn38xxp2; - struct cvmx_fpa_que_exp_s cn50xx; - struct cvmx_fpa_que_exp_s cn52xx; - struct cvmx_fpa_que_exp_s cn52xxp1; - struct cvmx_fpa_que_exp_s cn56xx; - struct cvmx_fpa_que_exp_s cn56xxp1; - struct cvmx_fpa_que_exp_s cn58xx; - struct cvmx_fpa_que_exp_s cn58xxp1; - struct cvmx_fpa_que_exp_s cn61xx; - struct cvmx_fpa_que_exp_s cn63xx; - struct cvmx_fpa_que_exp_s cn63xxp1; - struct cvmx_fpa_que_exp_s cn66xx; - struct cvmx_fpa_que_exp_s cn68xx; - struct cvmx_fpa_que_exp_s cn68xxp1; - struct cvmx_fpa_que_exp_s cnf71xx; }; union cvmx_fpa_wart_ctl { @@ -1440,17 +1221,6 @@ union cvmx_fpa_wart_ctl { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_fpa_wart_ctl_s cn30xx; - struct cvmx_fpa_wart_ctl_s cn31xx; - struct cvmx_fpa_wart_ctl_s cn38xx; - struct cvmx_fpa_wart_ctl_s cn38xxp2; - struct cvmx_fpa_wart_ctl_s cn50xx; - struct cvmx_fpa_wart_ctl_s cn52xx; - struct cvmx_fpa_wart_ctl_s cn52xxp1; - struct cvmx_fpa_wart_ctl_s cn56xx; - struct cvmx_fpa_wart_ctl_s cn56xxp1; - struct cvmx_fpa_wart_ctl_s cn58xx; - struct cvmx_fpa_wart_ctl_s cn58xxp1; }; union cvmx_fpa_wart_status { @@ -1464,17 +1234,6 @@ union cvmx_fpa_wart_status { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_fpa_wart_status_s cn30xx; - struct cvmx_fpa_wart_status_s cn31xx; - struct cvmx_fpa_wart_status_s cn38xx; - struct cvmx_fpa_wart_status_s cn38xxp2; - struct cvmx_fpa_wart_status_s cn50xx; - struct cvmx_fpa_wart_status_s cn52xx; - struct cvmx_fpa_wart_status_s cn52xxp1; - struct cvmx_fpa_wart_status_s cn56xx; - struct cvmx_fpa_wart_status_s cn56xxp1; - struct cvmx_fpa_wart_status_s cn58xx; - struct cvmx_fpa_wart_status_s cn58xxp1; }; union cvmx_fpa_wqe_threshold { @@ -1488,12 +1247,6 @@ union cvmx_fpa_wqe_threshold { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_fpa_wqe_threshold_s cn61xx; - struct cvmx_fpa_wqe_threshold_s cn63xx; - struct cvmx_fpa_wqe_threshold_s cn66xx; - struct cvmx_fpa_wqe_threshold_s cn68xx; - struct cvmx_fpa_wqe_threshold_s cn68xxp1; - struct cvmx_fpa_wqe_threshold_s cnf71xx; }; #endif diff --git a/arch/mips/include/asm/octeon/cvmx-gmxx-defs.h b/arch/mips/include/asm/octeon/cvmx-gmxx-defs.h index 80e4f8358b81..bdba676f1f2c 100644 --- a/arch/mips/include/asm/octeon/cvmx-gmxx-defs.h +++ b/arch/mips/include/asm/octeon/cvmx-gmxx-defs.h @@ -28,82 +28,9 @@ #ifndef __CVMX_GMXX_DEFS_H__ #define __CVMX_GMXX_DEFS_H__ -static inline uint64_t CVMX_GMXX_BAD_REG(unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000518ull) + (block_id) * 0x8000000ull; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000518ull) + (block_id) * 0x8000000ull; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000518ull) + (block_id) * 0x1000000ull; - } - return CVMX_ADD_IO_SEG(0x0001180008000518ull) + (block_id) * 0x8000000ull; -} - -static inline uint64_t CVMX_GMXX_BIST(unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000400ull) + (block_id) * 0x8000000ull; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000400ull) + (block_id) * 0x8000000ull; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000400ull) + (block_id) * 0x1000000ull; - } - return CVMX_ADD_IO_SEG(0x0001180008000400ull) + (block_id) * 0x8000000ull; -} - -#define CVMX_GMXX_BPID_MAPX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000680ull) + (((offset) & 15) + ((block_id) & 7) * 0x200000ull) * 8) -#define CVMX_GMXX_BPID_MSK(block_id) (CVMX_ADD_IO_SEG(0x0001180008000700ull) + ((block_id) & 7) * 0x1000000ull) -static inline uint64_t CVMX_GMXX_CLK_EN(unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080007F0ull) + (block_id) * 0x8000000ull; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080007F0ull) + (block_id) * 0x8000000ull; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080007F0ull) + (block_id) * 0x1000000ull; - } - return CVMX_ADD_IO_SEG(0x00011800080007F0ull) + (block_id) * 0x8000000ull; -} - -#define CVMX_GMXX_EBP_DIS(block_id) (CVMX_ADD_IO_SEG(0x0001180008000608ull) + ((block_id) & 7) * 0x1000000ull) -#define CVMX_GMXX_EBP_MSK(block_id) (CVMX_ADD_IO_SEG(0x0001180008000600ull) + ((block_id) & 7) * 0x1000000ull) static inline uint64_t CVMX_GMXX_HG2_CONTROL(unsigned long block_id) { switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000550ull) + (block_id) * 0x8000000ull; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000550ull) + (block_id) * 0x8000000ull; case OCTEON_CN68XX & OCTEON_FAMILY_MASK: return CVMX_ADD_IO_SEG(0x0001180008000550ull) + (block_id) * 0x1000000ull; } @@ -113,82 +40,15 @@ static inline uint64_t CVMX_GMXX_HG2_CONTROL(unsigned long block_id) static inline uint64_t CVMX_GMXX_INF_MODE(unsigned long block_id) { switch (cvmx_get_octeon_family()) { - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080007F8ull) + (block_id) * 0x8000000ull; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080007F8ull) + (block_id) * 0x8000000ull; case OCTEON_CN68XX & OCTEON_FAMILY_MASK: return CVMX_ADD_IO_SEG(0x00011800080007F8ull) + (block_id) * 0x1000000ull; } return CVMX_ADD_IO_SEG(0x00011800080007F8ull) + (block_id) * 0x8000000ull; } -static inline uint64_t CVMX_GMXX_NXA_ADR(unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000510ull) + (block_id) * 0x8000000ull; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000510ull) + (block_id) * 0x8000000ull; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000510ull) + (block_id) * 0x1000000ull; - } - return CVMX_ADD_IO_SEG(0x0001180008000510ull) + (block_id) * 0x8000000ull; -} - -#define CVMX_GMXX_PIPE_STATUS(block_id) (CVMX_ADD_IO_SEG(0x0001180008000760ull) + ((block_id) & 7) * 0x1000000ull) -static inline uint64_t CVMX_GMXX_PRTX_CBFC_CTL(unsigned long offset, unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000580ull) + (block_id) * 0x8000000ull; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000580ull) + (block_id) * 0x8000000ull; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000580ull) + (block_id) * 0x1000000ull; - } - return CVMX_ADD_IO_SEG(0x0001180008000580ull) + (block_id) * 0x8000000ull; -} - static inline uint64_t CVMX_GMXX_PRTX_CFG(unsigned long offset, unsigned long block_id) { switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000010ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000010ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000010ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000010ull) + ((offset) + (block_id) * 0x10000ull) * 2048; case OCTEON_CN31XX & OCTEON_FAMILY_MASK: return CVMX_ADD_IO_SEG(0x0001180008000010ull) + ((offset) + (block_id) * 0x0ull) * 2048; case OCTEON_CN68XX & OCTEON_FAMILY_MASK: @@ -197,23 +57,9 @@ static inline uint64_t CVMX_GMXX_PRTX_CFG(unsigned long offset, unsigned long bl return CVMX_ADD_IO_SEG(0x0001180008000010ull) + ((offset) + (block_id) * 0x10000ull) * 2048; } -#define CVMX_GMXX_RXAUI_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180008000740ull) + ((block_id) & 7) * 0x1000000ull) static inline uint64_t CVMX_GMXX_RXX_ADR_CAM0(unsigned long offset, unsigned long block_id) { switch (cvmx_get_octeon_family()) { - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000180ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000180ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000180ull) + ((offset) + (block_id) * 0x10000ull) * 2048; case OCTEON_CN31XX & OCTEON_FAMILY_MASK: return CVMX_ADD_IO_SEG(0x0001180008000180ull) + ((offset) + (block_id) * 0x0ull) * 2048; case OCTEON_CN68XX & OCTEON_FAMILY_MASK: @@ -225,19 +71,6 @@ static inline uint64_t CVMX_GMXX_RXX_ADR_CAM0(unsigned long offset, unsigned lon static inline uint64_t CVMX_GMXX_RXX_ADR_CAM1(unsigned long offset, unsigned long block_id) { switch (cvmx_get_octeon_family()) { - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000188ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000188ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000188ull) + ((offset) + (block_id) * 0x10000ull) * 2048; case OCTEON_CN31XX & OCTEON_FAMILY_MASK: return CVMX_ADD_IO_SEG(0x0001180008000188ull) + ((offset) + (block_id) * 0x0ull) * 2048; case OCTEON_CN68XX & OCTEON_FAMILY_MASK: @@ -249,19 +82,6 @@ static inline uint64_t CVMX_GMXX_RXX_ADR_CAM1(unsigned long offset, unsigned lon static inline uint64_t CVMX_GMXX_RXX_ADR_CAM2(unsigned long offset, unsigned long block_id) { switch (cvmx_get_octeon_family()) { - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000190ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000190ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000190ull) + ((offset) + (block_id) * 0x10000ull) * 2048; case OCTEON_CN31XX & OCTEON_FAMILY_MASK: return CVMX_ADD_IO_SEG(0x0001180008000190ull) + ((offset) + (block_id) * 0x0ull) * 2048; case OCTEON_CN68XX & OCTEON_FAMILY_MASK: @@ -273,19 +93,6 @@ static inline uint64_t CVMX_GMXX_RXX_ADR_CAM2(unsigned long offset, unsigned lon static inline uint64_t CVMX_GMXX_RXX_ADR_CAM3(unsigned long offset, unsigned long block_id) { switch (cvmx_get_octeon_family()) { - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000198ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000198ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000198ull) + ((offset) + (block_id) * 0x10000ull) * 2048; case OCTEON_CN31XX & OCTEON_FAMILY_MASK: return CVMX_ADD_IO_SEG(0x0001180008000198ull) + ((offset) + (block_id) * 0x0ull) * 2048; case OCTEON_CN68XX & OCTEON_FAMILY_MASK: @@ -297,19 +104,6 @@ static inline uint64_t CVMX_GMXX_RXX_ADR_CAM3(unsigned long offset, unsigned lon static inline uint64_t CVMX_GMXX_RXX_ADR_CAM4(unsigned long offset, unsigned long block_id) { switch (cvmx_get_octeon_family()) { - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080001A0ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080001A0ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080001A0ull) + ((offset) + (block_id) * 0x10000ull) * 2048; case OCTEON_CN31XX & OCTEON_FAMILY_MASK: return CVMX_ADD_IO_SEG(0x00011800080001A0ull) + ((offset) + (block_id) * 0x0ull) * 2048; case OCTEON_CN68XX & OCTEON_FAMILY_MASK: @@ -321,19 +115,6 @@ static inline uint64_t CVMX_GMXX_RXX_ADR_CAM4(unsigned long offset, unsigned lon static inline uint64_t CVMX_GMXX_RXX_ADR_CAM5(unsigned long offset, unsigned long block_id) { switch (cvmx_get_octeon_family()) { - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080001A8ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080001A8ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080001A8ull) + ((offset) + (block_id) * 0x10000ull) * 2048; case OCTEON_CN31XX & OCTEON_FAMILY_MASK: return CVMX_ADD_IO_SEG(0x00011800080001A8ull) + ((offset) + (block_id) * 0x0ull) * 2048; case OCTEON_CN68XX & OCTEON_FAMILY_MASK: @@ -342,37 +123,9 @@ static inline uint64_t CVMX_GMXX_RXX_ADR_CAM5(unsigned long offset, unsigned lon return CVMX_ADD_IO_SEG(0x00011800080001A8ull) + ((offset) + (block_id) * 0x10000ull) * 2048; } -static inline uint64_t CVMX_GMXX_RXX_ADR_CAM_ALL_EN(unsigned long offset, unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000110ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000110ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000110ull) + ((offset) + (block_id) * 0x2000ull) * 2048; - } - return CVMX_ADD_IO_SEG(0x0001180008000110ull) + ((offset) + (block_id) * 0x10000ull) * 2048; -} - static inline uint64_t CVMX_GMXX_RXX_ADR_CAM_EN(unsigned long offset, unsigned long block_id) { switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000108ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000108ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000108ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000108ull) + ((offset) + (block_id) * 0x10000ull) * 2048; case OCTEON_CN31XX & OCTEON_FAMILY_MASK: return CVMX_ADD_IO_SEG(0x0001180008000108ull) + ((offset) + (block_id) * 0x0ull) * 2048; case OCTEON_CN68XX & OCTEON_FAMILY_MASK: @@ -384,20 +137,6 @@ static inline uint64_t CVMX_GMXX_RXX_ADR_CAM_EN(unsigned long offset, unsigned l static inline uint64_t CVMX_GMXX_RXX_ADR_CTL(unsigned long offset, unsigned long block_id) { switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000100ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000100ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000100ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000100ull) + ((offset) + (block_id) * 0x10000ull) * 2048; case OCTEON_CN31XX & OCTEON_FAMILY_MASK: return CVMX_ADD_IO_SEG(0x0001180008000100ull) + ((offset) + (block_id) * 0x0ull) * 2048; case OCTEON_CN68XX & OCTEON_FAMILY_MASK: @@ -406,73 +145,9 @@ static inline uint64_t CVMX_GMXX_RXX_ADR_CTL(unsigned long offset, unsigned long return CVMX_ADD_IO_SEG(0x0001180008000100ull) + ((offset) + (block_id) * 0x10000ull) * 2048; } -static inline uint64_t CVMX_GMXX_RXX_DECISION(unsigned long offset, unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000040ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000040ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000040ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000040ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000040ull) + ((offset) + (block_id) * 0x0ull) * 2048; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000040ull) + ((offset) + (block_id) * 0x2000ull) * 2048; - } - return CVMX_ADD_IO_SEG(0x0001180008000040ull) + ((offset) + (block_id) * 0x10000ull) * 2048; -} - -static inline uint64_t CVMX_GMXX_RXX_FRM_CHK(unsigned long offset, unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000020ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000020ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000020ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000020ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000020ull) + ((offset) + (block_id) * 0x0ull) * 2048; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000020ull) + ((offset) + (block_id) * 0x2000ull) * 2048; - } - return CVMX_ADD_IO_SEG(0x0001180008000020ull) + ((offset) + (block_id) * 0x10000ull) * 2048; -} - static inline uint64_t CVMX_GMXX_RXX_FRM_CTL(unsigned long offset, unsigned long block_id) { switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000018ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000018ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000018ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000018ull) + ((offset) + (block_id) * 0x10000ull) * 2048; case OCTEON_CN31XX & OCTEON_FAMILY_MASK: return CVMX_ADD_IO_SEG(0x0001180008000018ull) + ((offset) + (block_id) * 0x0ull) * 2048; case OCTEON_CN68XX & OCTEON_FAMILY_MASK: @@ -483,48 +158,10 @@ static inline uint64_t CVMX_GMXX_RXX_FRM_CTL(unsigned long offset, unsigned long #define CVMX_GMXX_RXX_FRM_MAX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000030ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) #define CVMX_GMXX_RXX_FRM_MIN(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000028ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) -static inline uint64_t CVMX_GMXX_RXX_IFG(unsigned long offset, unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000058ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000058ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000058ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000058ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000058ull) + ((offset) + (block_id) * 0x0ull) * 2048; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000058ull) + ((offset) + (block_id) * 0x2000ull) * 2048; - } - return CVMX_ADD_IO_SEG(0x0001180008000058ull) + ((offset) + (block_id) * 0x10000ull) * 2048; -} static inline uint64_t CVMX_GMXX_RXX_INT_EN(unsigned long offset, unsigned long block_id) { switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000008ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000008ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000008ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000008ull) + ((offset) + (block_id) * 0x10000ull) * 2048; case OCTEON_CN31XX & OCTEON_FAMILY_MASK: return CVMX_ADD_IO_SEG(0x0001180008000008ull) + ((offset) + (block_id) * 0x0ull) * 2048; case OCTEON_CN68XX & OCTEON_FAMILY_MASK: @@ -536,20 +173,6 @@ static inline uint64_t CVMX_GMXX_RXX_INT_EN(unsigned long offset, unsigned long static inline uint64_t CVMX_GMXX_RXX_INT_REG(unsigned long offset, unsigned long block_id) { switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000000ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000000ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000000ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000000ull) + ((offset) + (block_id) * 0x10000ull) * 2048; case OCTEON_CN31XX & OCTEON_FAMILY_MASK: return CVMX_ADD_IO_SEG(0x0001180008000000ull) + ((offset) + (block_id) * 0x0ull) * 2048; case OCTEON_CN68XX & OCTEON_FAMILY_MASK: @@ -561,20 +184,6 @@ static inline uint64_t CVMX_GMXX_RXX_INT_REG(unsigned long offset, unsigned long static inline uint64_t CVMX_GMXX_RXX_JABBER(unsigned long offset, unsigned long block_id) { switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000038ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000038ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000038ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000038ull) + ((offset) + (block_id) * 0x10000ull) * 2048; case OCTEON_CN31XX & OCTEON_FAMILY_MASK: return CVMX_ADD_IO_SEG(0x0001180008000038ull) + ((offset) + (block_id) * 0x0ull) * 2048; case OCTEON_CN68XX & OCTEON_FAMILY_MASK: @@ -583,471 +192,20 @@ static inline uint64_t CVMX_GMXX_RXX_JABBER(unsigned long offset, unsigned long return CVMX_ADD_IO_SEG(0x0001180008000038ull) + ((offset) + (block_id) * 0x10000ull) * 2048; } -static inline uint64_t CVMX_GMXX_RXX_PAUSE_DROP_TIME(unsigned long offset, unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000068ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000068ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000068ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000068ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000068ull) + ((offset) + (block_id) * 0x0ull) * 2048; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000068ull) + ((offset) + (block_id) * 0x2000ull) * 2048; - } - return CVMX_ADD_IO_SEG(0x0001180008000068ull) + ((offset) + (block_id) * 0x10000ull) * 2048; -} - #define CVMX_GMXX_RXX_RX_INBND(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000060ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) -static inline uint64_t CVMX_GMXX_RXX_STATS_CTL(unsigned long offset, unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000050ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000050ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000050ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000050ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000050ull) + ((offset) + (block_id) * 0x0ull) * 2048; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000050ull) + ((offset) + (block_id) * 0x2000ull) * 2048; - } - return CVMX_ADD_IO_SEG(0x0001180008000050ull) + ((offset) + (block_id) * 0x10000ull) * 2048; -} - -static inline uint64_t CVMX_GMXX_RXX_STATS_OCTS(unsigned long offset, unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000088ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000088ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000088ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000088ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000088ull) + ((offset) + (block_id) * 0x0ull) * 2048; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000088ull) + ((offset) + (block_id) * 0x2000ull) * 2048; - } - return CVMX_ADD_IO_SEG(0x0001180008000088ull) + ((offset) + (block_id) * 0x10000ull) * 2048; -} - -static inline uint64_t CVMX_GMXX_RXX_STATS_OCTS_CTL(unsigned long offset, unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000098ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000098ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000098ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000098ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000098ull) + ((offset) + (block_id) * 0x0ull) * 2048; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000098ull) + ((offset) + (block_id) * 0x2000ull) * 2048; - } - return CVMX_ADD_IO_SEG(0x0001180008000098ull) + ((offset) + (block_id) * 0x10000ull) * 2048; -} - -static inline uint64_t CVMX_GMXX_RXX_STATS_OCTS_DMAC(unsigned long offset, unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080000A8ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080000A8ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080000A8ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080000A8ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080000A8ull) + ((offset) + (block_id) * 0x0ull) * 2048; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080000A8ull) + ((offset) + (block_id) * 0x2000ull) * 2048; - } - return CVMX_ADD_IO_SEG(0x00011800080000A8ull) + ((offset) + (block_id) * 0x10000ull) * 2048; -} - -static inline uint64_t CVMX_GMXX_RXX_STATS_OCTS_DRP(unsigned long offset, unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080000B8ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080000B8ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080000B8ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080000B8ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080000B8ull) + ((offset) + (block_id) * 0x0ull) * 2048; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080000B8ull) + ((offset) + (block_id) * 0x2000ull) * 2048; - } - return CVMX_ADD_IO_SEG(0x00011800080000B8ull) + ((offset) + (block_id) * 0x10000ull) * 2048; -} - -static inline uint64_t CVMX_GMXX_RXX_STATS_PKTS(unsigned long offset, unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000080ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000080ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000080ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000080ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000080ull) + ((offset) + (block_id) * 0x0ull) * 2048; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000080ull) + ((offset) + (block_id) * 0x2000ull) * 2048; - } - return CVMX_ADD_IO_SEG(0x0001180008000080ull) + ((offset) + (block_id) * 0x10000ull) * 2048; -} -static inline uint64_t CVMX_GMXX_RXX_STATS_PKTS_BAD(unsigned long offset, unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080000C0ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080000C0ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080000C0ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080000C0ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080000C0ull) + ((offset) + (block_id) * 0x0ull) * 2048; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080000C0ull) + ((offset) + (block_id) * 0x2000ull) * 2048; - } - return CVMX_ADD_IO_SEG(0x00011800080000C0ull) + ((offset) + (block_id) * 0x10000ull) * 2048; -} - -static inline uint64_t CVMX_GMXX_RXX_STATS_PKTS_CTL(unsigned long offset, unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000090ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000090ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000090ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000090ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000090ull) + ((offset) + (block_id) * 0x0ull) * 2048; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000090ull) + ((offset) + (block_id) * 0x2000ull) * 2048; - } - return CVMX_ADD_IO_SEG(0x0001180008000090ull) + ((offset) + (block_id) * 0x10000ull) * 2048; -} - -static inline uint64_t CVMX_GMXX_RXX_STATS_PKTS_DMAC(unsigned long offset, unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080000A0ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080000A0ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080000A0ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080000A0ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080000A0ull) + ((offset) + (block_id) * 0x0ull) * 2048; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080000A0ull) + ((offset) + (block_id) * 0x2000ull) * 2048; - } - return CVMX_ADD_IO_SEG(0x00011800080000A0ull) + ((offset) + (block_id) * 0x10000ull) * 2048; -} - -static inline uint64_t CVMX_GMXX_RXX_STATS_PKTS_DRP(unsigned long offset, unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080000B0ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080000B0ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080000B0ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080000B0ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080000B0ull) + ((offset) + (block_id) * 0x0ull) * 2048; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080000B0ull) + ((offset) + (block_id) * 0x2000ull) * 2048; - } - return CVMX_ADD_IO_SEG(0x00011800080000B0ull) + ((offset) + (block_id) * 0x10000ull) * 2048; -} - -static inline uint64_t CVMX_GMXX_RXX_UDD_SKP(unsigned long offset, unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000048ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000048ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000048ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000048ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000048ull) + ((offset) + (block_id) * 0x0ull) * 2048; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000048ull) + ((offset) + (block_id) * 0x2000ull) * 2048; - } - return CVMX_ADD_IO_SEG(0x0001180008000048ull) + ((offset) + (block_id) * 0x10000ull) * 2048; -} - -static inline uint64_t CVMX_GMXX_RX_BP_DROPX(unsigned long offset, unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000420ull) + ((offset) + (block_id) * 0x1000000ull) * 8; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000420ull) + ((offset) + (block_id) * 0x1000000ull) * 8; - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000420ull) + ((offset) + (block_id) * 0x1000000ull) * 8; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000420ull) + ((offset) + (block_id) * 0x1000000ull) * 8; - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000420ull) + ((offset) + (block_id) * 0x0ull) * 8; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000420ull) + ((offset) + (block_id) * 0x200000ull) * 8; - } - return CVMX_ADD_IO_SEG(0x0001180008000420ull) + ((offset) + (block_id) * 0x1000000ull) * 8; -} - -static inline uint64_t CVMX_GMXX_RX_BP_OFFX(unsigned long offset, unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000460ull) + ((offset) + (block_id) * 0x1000000ull) * 8; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000460ull) + ((offset) + (block_id) * 0x1000000ull) * 8; - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000460ull) + ((offset) + (block_id) * 0x1000000ull) * 8; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000460ull) + ((offset) + (block_id) * 0x1000000ull) * 8; - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000460ull) + ((offset) + (block_id) * 0x0ull) * 8; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000460ull) + ((offset) + (block_id) * 0x200000ull) * 8; - } - return CVMX_ADD_IO_SEG(0x0001180008000460ull) + ((offset) + (block_id) * 0x1000000ull) * 8; -} - -static inline uint64_t CVMX_GMXX_RX_BP_ONX(unsigned long offset, unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000440ull) + ((offset) + (block_id) * 0x1000000ull) * 8; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000440ull) + ((offset) + (block_id) * 0x1000000ull) * 8; - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000440ull) + ((offset) + (block_id) * 0x1000000ull) * 8; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000440ull) + ((offset) + (block_id) * 0x1000000ull) * 8; - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000440ull) + ((offset) + (block_id) * 0x0ull) * 8; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000440ull) + ((offset) + (block_id) * 0x200000ull) * 8; - } - return CVMX_ADD_IO_SEG(0x0001180008000440ull) + ((offset) + (block_id) * 0x1000000ull) * 8; -} - -static inline uint64_t CVMX_GMXX_RX_HG2_STATUS(unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000548ull) + (block_id) * 0x8000000ull; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000548ull) + (block_id) * 0x8000000ull; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000548ull) + (block_id) * 0x1000000ull; - } - return CVMX_ADD_IO_SEG(0x0001180008000548ull) + (block_id) * 0x8000000ull; -} - -#define CVMX_GMXX_RX_PASS_EN(block_id) (CVMX_ADD_IO_SEG(0x00011800080005F8ull) + ((block_id) & 1) * 0x8000000ull) -#define CVMX_GMXX_RX_PASS_MAPX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000600ull) + (((offset) & 15) + ((block_id) & 1) * 0x1000000ull) * 8) static inline uint64_t CVMX_GMXX_RX_PRTS(unsigned long block_id) { switch (cvmx_get_octeon_family()) { - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000410ull) + (block_id) * 0x8000000ull; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000410ull) + (block_id) * 0x8000000ull; case OCTEON_CN68XX & OCTEON_FAMILY_MASK: return CVMX_ADD_IO_SEG(0x0001180008000410ull) + (block_id) * 0x1000000ull; } return CVMX_ADD_IO_SEG(0x0001180008000410ull) + (block_id) * 0x8000000ull; } -static inline uint64_t CVMX_GMXX_RX_PRT_INFO(unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080004E8ull) + (block_id) * 0x8000000ull; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080004E8ull) + (block_id) * 0x8000000ull; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080004E8ull) + (block_id) * 0x1000000ull; - } - return CVMX_ADD_IO_SEG(0x00011800080004E8ull) + (block_id) * 0x8000000ull; -} - -#define CVMX_GMXX_RX_TX_STATUS(block_id) (CVMX_ADD_IO_SEG(0x00011800080007E8ull)) -static inline uint64_t CVMX_GMXX_RX_XAUI_BAD_COL(unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000538ull) + (block_id) * 0x8000000ull; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000538ull) + (block_id) * 0x8000000ull; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000538ull) + (block_id) * 0x1000000ull; - } - return CVMX_ADD_IO_SEG(0x0001180008000538ull) + (block_id) * 0x8000000ull; -} - static inline uint64_t CVMX_GMXX_RX_XAUI_CTL(unsigned long block_id) { switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000530ull) + (block_id) * 0x8000000ull; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000530ull) + (block_id) * 0x8000000ull; case OCTEON_CN68XX & OCTEON_FAMILY_MASK: return CVMX_ADD_IO_SEG(0x0001180008000530ull) + (block_id) * 0x1000000ull; } @@ -1057,20 +215,6 @@ static inline uint64_t CVMX_GMXX_RX_XAUI_CTL(unsigned long block_id) static inline uint64_t CVMX_GMXX_SMACX(unsigned long offset, unsigned long block_id) { switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000230ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000230ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000230ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000230ull) + ((offset) + (block_id) * 0x10000ull) * 2048; case OCTEON_CN31XX & OCTEON_FAMILY_MASK: return CVMX_ADD_IO_SEG(0x0001180008000230ull) + ((offset) + (block_id) * 0x0ull) * 2048; case OCTEON_CN68XX & OCTEON_FAMILY_MASK: @@ -1079,97 +223,9 @@ static inline uint64_t CVMX_GMXX_SMACX(unsigned long offset, unsigned long block return CVMX_ADD_IO_SEG(0x0001180008000230ull) + ((offset) + (block_id) * 0x10000ull) * 2048; } -static inline uint64_t CVMX_GMXX_SOFT_BIST(unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080007E8ull) + (block_id) * 0x8000000ull; - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080007E8ull) + (block_id) * 0x8000000ull; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080007E8ull) + (block_id) * 0x1000000ull; - } - return CVMX_ADD_IO_SEG(0x00011800080007E8ull) + (block_id) * 0x1000000ull; -} - -static inline uint64_t CVMX_GMXX_STAT_BP(unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000520ull) + (block_id) * 0x8000000ull; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000520ull) + (block_id) * 0x8000000ull; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000520ull) + (block_id) * 0x1000000ull; - } - return CVMX_ADD_IO_SEG(0x0001180008000520ull) + (block_id) * 0x8000000ull; -} - -static inline uint64_t CVMX_GMXX_TB_REG(unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080007E0ull) + (block_id) * 0x8000000ull; - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080007E0ull) + (block_id) * 0x8000000ull; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080007E0ull) + (block_id) * 0x1000000ull; - } - return CVMX_ADD_IO_SEG(0x00011800080007E0ull) + (block_id) * 0x8000000ull; -} - -static inline uint64_t CVMX_GMXX_TXX_APPEND(unsigned long offset, unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000218ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000218ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000218ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000218ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000218ull) + ((offset) + (block_id) * 0x0ull) * 2048; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000218ull) + ((offset) + (block_id) * 0x2000ull) * 2048; - } - return CVMX_ADD_IO_SEG(0x0001180008000218ull) + ((offset) + (block_id) * 0x10000ull) * 2048; -} - static inline uint64_t CVMX_GMXX_TXX_BURST(unsigned long offset, unsigned long block_id) { switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000228ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000228ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000228ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000228ull) + ((offset) + (block_id) * 0x10000ull) * 2048; case OCTEON_CN31XX & OCTEON_FAMILY_MASK: return CVMX_ADD_IO_SEG(0x0001180008000228ull) + ((offset) + (block_id) * 0x0ull) * 2048; case OCTEON_CN68XX & OCTEON_FAMILY_MASK: @@ -1178,58 +234,10 @@ static inline uint64_t CVMX_GMXX_TXX_BURST(unsigned long offset, unsigned long b return CVMX_ADD_IO_SEG(0x0001180008000228ull) + ((offset) + (block_id) * 0x10000ull) * 2048; } -static inline uint64_t CVMX_GMXX_TXX_CBFC_XOFF(unsigned long offset, unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080005A0ull) + (block_id) * 0x8000000ull; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080005A0ull) + (block_id) * 0x8000000ull; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080005A0ull) + (block_id) * 0x1000000ull; - } - return CVMX_ADD_IO_SEG(0x00011800080005A0ull) + (block_id) * 0x8000000ull; -} - -static inline uint64_t CVMX_GMXX_TXX_CBFC_XON(unsigned long offset, unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080005C0ull) + (block_id) * 0x8000000ull; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080005C0ull) + (block_id) * 0x8000000ull; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080005C0ull) + (block_id) * 0x1000000ull; - } - return CVMX_ADD_IO_SEG(0x00011800080005C0ull) + (block_id) * 0x8000000ull; -} - #define CVMX_GMXX_TXX_CLK(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000208ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) static inline uint64_t CVMX_GMXX_TXX_CTL(unsigned long offset, unsigned long block_id) { switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000270ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000270ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000270ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000270ull) + ((offset) + (block_id) * 0x10000ull) * 2048; case OCTEON_CN31XX & OCTEON_FAMILY_MASK: return CVMX_ADD_IO_SEG(0x0001180008000270ull) + ((offset) + (block_id) * 0x0ull) * 2048; case OCTEON_CN68XX & OCTEON_FAMILY_MASK: @@ -1238,48 +246,9 @@ static inline uint64_t CVMX_GMXX_TXX_CTL(unsigned long offset, unsigned long blo return CVMX_ADD_IO_SEG(0x0001180008000270ull) + ((offset) + (block_id) * 0x10000ull) * 2048; } -static inline uint64_t CVMX_GMXX_TXX_MIN_PKT(unsigned long offset, unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000240ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000240ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000240ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000240ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000240ull) + ((offset) + (block_id) * 0x0ull) * 2048; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000240ull) + ((offset) + (block_id) * 0x2000ull) * 2048; - } - return CVMX_ADD_IO_SEG(0x0001180008000240ull) + ((offset) + (block_id) * 0x10000ull) * 2048; -} - static inline uint64_t CVMX_GMXX_TXX_PAUSE_PKT_INTERVAL(unsigned long offset, unsigned long block_id) { switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000248ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000248ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000248ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000248ull) + ((offset) + (block_id) * 0x10000ull) * 2048; case OCTEON_CN31XX & OCTEON_FAMILY_MASK: return CVMX_ADD_IO_SEG(0x0001180008000248ull) + ((offset) + (block_id) * 0x0ull) * 2048; case OCTEON_CN68XX & OCTEON_FAMILY_MASK: @@ -1291,20 +260,6 @@ static inline uint64_t CVMX_GMXX_TXX_PAUSE_PKT_INTERVAL(unsigned long offset, un static inline uint64_t CVMX_GMXX_TXX_PAUSE_PKT_TIME(unsigned long offset, unsigned long block_id) { switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000238ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000238ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000238ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000238ull) + ((offset) + (block_id) * 0x10000ull) * 2048; case OCTEON_CN31XX & OCTEON_FAMILY_MASK: return CVMX_ADD_IO_SEG(0x0001180008000238ull) + ((offset) + (block_id) * 0x0ull) * 2048; case OCTEON_CN68XX & OCTEON_FAMILY_MASK: @@ -1313,92 +268,9 @@ static inline uint64_t CVMX_GMXX_TXX_PAUSE_PKT_TIME(unsigned long offset, unsign return CVMX_ADD_IO_SEG(0x0001180008000238ull) + ((offset) + (block_id) * 0x10000ull) * 2048; } -static inline uint64_t CVMX_GMXX_TXX_PAUSE_TOGO(unsigned long offset, unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000258ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000258ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000258ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000258ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000258ull) + ((offset) + (block_id) * 0x0ull) * 2048; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000258ull) + ((offset) + (block_id) * 0x2000ull) * 2048; - } - return CVMX_ADD_IO_SEG(0x0001180008000258ull) + ((offset) + (block_id) * 0x10000ull) * 2048; -} - -static inline uint64_t CVMX_GMXX_TXX_PAUSE_ZERO(unsigned long offset, unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000260ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000260ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000260ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000260ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000260ull) + ((offset) + (block_id) * 0x0ull) * 2048; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000260ull) + ((offset) + (block_id) * 0x2000ull) * 2048; - } - return CVMX_ADD_IO_SEG(0x0001180008000260ull) + ((offset) + (block_id) * 0x10000ull) * 2048; -} - -#define CVMX_GMXX_TXX_PIPE(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000310ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048) -static inline uint64_t CVMX_GMXX_TXX_SGMII_CTL(unsigned long offset, unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000300ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000300ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000300ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000300ull) + ((offset) + (block_id) * 0x2000ull) * 2048; - } - return CVMX_ADD_IO_SEG(0x0001180008000300ull) + ((offset) + (block_id) * 0x10000ull) * 2048; -} - static inline uint64_t CVMX_GMXX_TXX_SLOT(unsigned long offset, unsigned long block_id) { switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000220ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000220ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000220ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000220ull) + ((offset) + (block_id) * 0x10000ull) * 2048; case OCTEON_CN31XX & OCTEON_FAMILY_MASK: return CVMX_ADD_IO_SEG(0x0001180008000220ull) + ((offset) + (block_id) * 0x0ull) * 2048; case OCTEON_CN68XX & OCTEON_FAMILY_MASK: @@ -1407,323 +279,9 @@ static inline uint64_t CVMX_GMXX_TXX_SLOT(unsigned long offset, unsigned long bl return CVMX_ADD_IO_SEG(0x0001180008000220ull) + ((offset) + (block_id) * 0x10000ull) * 2048; } -static inline uint64_t CVMX_GMXX_TXX_SOFT_PAUSE(unsigned long offset, unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000250ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000250ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000250ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000250ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000250ull) + ((offset) + (block_id) * 0x0ull) * 2048; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000250ull) + ((offset) + (block_id) * 0x2000ull) * 2048; - } - return CVMX_ADD_IO_SEG(0x0001180008000250ull) + ((offset) + (block_id) * 0x10000ull) * 2048; -} - -static inline uint64_t CVMX_GMXX_TXX_STAT0(unsigned long offset, unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000280ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000280ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000280ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000280ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000280ull) + ((offset) + (block_id) * 0x0ull) * 2048; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000280ull) + ((offset) + (block_id) * 0x2000ull) * 2048; - } - return CVMX_ADD_IO_SEG(0x0001180008000280ull) + ((offset) + (block_id) * 0x10000ull) * 2048; -} - -static inline uint64_t CVMX_GMXX_TXX_STAT1(unsigned long offset, unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000288ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000288ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000288ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000288ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000288ull) + ((offset) + (block_id) * 0x0ull) * 2048; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000288ull) + ((offset) + (block_id) * 0x2000ull) * 2048; - } - return CVMX_ADD_IO_SEG(0x0001180008000288ull) + ((offset) + (block_id) * 0x10000ull) * 2048; -} - -static inline uint64_t CVMX_GMXX_TXX_STAT2(unsigned long offset, unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000290ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000290ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000290ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000290ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000290ull) + ((offset) + (block_id) * 0x0ull) * 2048; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000290ull) + ((offset) + (block_id) * 0x2000ull) * 2048; - } - return CVMX_ADD_IO_SEG(0x0001180008000290ull) + ((offset) + (block_id) * 0x10000ull) * 2048; -} - -static inline uint64_t CVMX_GMXX_TXX_STAT3(unsigned long offset, unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000298ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000298ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000298ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000298ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000298ull) + ((offset) + (block_id) * 0x0ull) * 2048; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000298ull) + ((offset) + (block_id) * 0x2000ull) * 2048; - } - return CVMX_ADD_IO_SEG(0x0001180008000298ull) + ((offset) + (block_id) * 0x10000ull) * 2048; -} - -static inline uint64_t CVMX_GMXX_TXX_STAT4(unsigned long offset, unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080002A0ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080002A0ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080002A0ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080002A0ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080002A0ull) + ((offset) + (block_id) * 0x0ull) * 2048; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080002A0ull) + ((offset) + (block_id) * 0x2000ull) * 2048; - } - return CVMX_ADD_IO_SEG(0x00011800080002A0ull) + ((offset) + (block_id) * 0x10000ull) * 2048; -} - -static inline uint64_t CVMX_GMXX_TXX_STAT5(unsigned long offset, unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080002A8ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080002A8ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080002A8ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080002A8ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080002A8ull) + ((offset) + (block_id) * 0x0ull) * 2048; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080002A8ull) + ((offset) + (block_id) * 0x2000ull) * 2048; - } - return CVMX_ADD_IO_SEG(0x00011800080002A8ull) + ((offset) + (block_id) * 0x10000ull) * 2048; -} - -static inline uint64_t CVMX_GMXX_TXX_STAT6(unsigned long offset, unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080002B0ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080002B0ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080002B0ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080002B0ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080002B0ull) + ((offset) + (block_id) * 0x0ull) * 2048; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080002B0ull) + ((offset) + (block_id) * 0x2000ull) * 2048; - } - return CVMX_ADD_IO_SEG(0x00011800080002B0ull) + ((offset) + (block_id) * 0x10000ull) * 2048; -} - -static inline uint64_t CVMX_GMXX_TXX_STAT7(unsigned long offset, unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080002B8ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080002B8ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080002B8ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080002B8ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080002B8ull) + ((offset) + (block_id) * 0x0ull) * 2048; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080002B8ull) + ((offset) + (block_id) * 0x2000ull) * 2048; - } - return CVMX_ADD_IO_SEG(0x00011800080002B8ull) + ((offset) + (block_id) * 0x10000ull) * 2048; -} - -static inline uint64_t CVMX_GMXX_TXX_STAT8(unsigned long offset, unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080002C0ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080002C0ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080002C0ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080002C0ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080002C0ull) + ((offset) + (block_id) * 0x0ull) * 2048; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080002C0ull) + ((offset) + (block_id) * 0x2000ull) * 2048; - } - return CVMX_ADD_IO_SEG(0x00011800080002C0ull) + ((offset) + (block_id) * 0x10000ull) * 2048; -} - -static inline uint64_t CVMX_GMXX_TXX_STAT9(unsigned long offset, unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080002C8ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080002C8ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080002C8ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080002C8ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080002C8ull) + ((offset) + (block_id) * 0x0ull) * 2048; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080002C8ull) + ((offset) + (block_id) * 0x2000ull) * 2048; - } - return CVMX_ADD_IO_SEG(0x00011800080002C8ull) + ((offset) + (block_id) * 0x10000ull) * 2048; -} - -static inline uint64_t CVMX_GMXX_TXX_STATS_CTL(unsigned long offset, unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000268ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000268ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000268ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000268ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000268ull) + ((offset) + (block_id) * 0x0ull) * 2048; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000268ull) + ((offset) + (block_id) * 0x2000ull) * 2048; - } - return CVMX_ADD_IO_SEG(0x0001180008000268ull) + ((offset) + (block_id) * 0x10000ull) * 2048; -} - static inline uint64_t CVMX_GMXX_TXX_THRESH(unsigned long offset, unsigned long block_id) { switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000210ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000210ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000210ull) + ((offset) + (block_id) * 0x10000ull) * 2048; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000210ull) + ((offset) + (block_id) * 0x10000ull) * 2048; case OCTEON_CN31XX & OCTEON_FAMILY_MASK: return CVMX_ADD_IO_SEG(0x0001180008000210ull) + ((offset) + (block_id) * 0x0ull) * 2048; case OCTEON_CN68XX & OCTEON_FAMILY_MASK: @@ -1732,145 +290,9 @@ static inline uint64_t CVMX_GMXX_TXX_THRESH(unsigned long offset, unsigned long return CVMX_ADD_IO_SEG(0x0001180008000210ull) + ((offset) + (block_id) * 0x10000ull) * 2048; } -static inline uint64_t CVMX_GMXX_TX_BP(unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080004D0ull) + (block_id) * 0x8000000ull; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080004D0ull) + (block_id) * 0x8000000ull; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080004D0ull) + (block_id) * 0x1000000ull; - } - return CVMX_ADD_IO_SEG(0x00011800080004D0ull) + (block_id) * 0x8000000ull; -} - -#define CVMX_GMXX_TX_CLK_MSKX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000780ull) + (((offset) & 1) + ((block_id) & 0) * 0x0ull) * 8) -static inline uint64_t CVMX_GMXX_TX_COL_ATTEMPT(unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000498ull) + (block_id) * 0x8000000ull; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000498ull) + (block_id) * 0x8000000ull; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000498ull) + (block_id) * 0x1000000ull; - } - return CVMX_ADD_IO_SEG(0x0001180008000498ull) + (block_id) * 0x8000000ull; -} - -static inline uint64_t CVMX_GMXX_TX_CORRUPT(unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080004D8ull) + (block_id) * 0x8000000ull; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080004D8ull) + (block_id) * 0x8000000ull; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080004D8ull) + (block_id) * 0x1000000ull; - } - return CVMX_ADD_IO_SEG(0x00011800080004D8ull) + (block_id) * 0x8000000ull; -} - -static inline uint64_t CVMX_GMXX_TX_HG2_REG1(unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000558ull) + (block_id) * 0x8000000ull; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000558ull) + (block_id) * 0x8000000ull; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000558ull) + (block_id) * 0x1000000ull; - } - return CVMX_ADD_IO_SEG(0x0001180008000558ull) + (block_id) * 0x8000000ull; -} - -static inline uint64_t CVMX_GMXX_TX_HG2_REG2(unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000560ull) + (block_id) * 0x8000000ull; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000560ull) + (block_id) * 0x8000000ull; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000560ull) + (block_id) * 0x1000000ull; - } - return CVMX_ADD_IO_SEG(0x0001180008000560ull) + (block_id) * 0x8000000ull; -} - -static inline uint64_t CVMX_GMXX_TX_IFG(unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000488ull) + (block_id) * 0x8000000ull; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000488ull) + (block_id) * 0x8000000ull; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000488ull) + (block_id) * 0x1000000ull; - } - return CVMX_ADD_IO_SEG(0x0001180008000488ull) + (block_id) * 0x8000000ull; -} - static inline uint64_t CVMX_GMXX_TX_INT_EN(unsigned long block_id) { switch (cvmx_get_octeon_family()) { - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000508ull) + (block_id) * 0x8000000ull; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000508ull) + (block_id) * 0x8000000ull; case OCTEON_CN68XX & OCTEON_FAMILY_MASK: return CVMX_ADD_IO_SEG(0x0001180008000508ull) + (block_id) * 0x1000000ull; } @@ -1880,151 +302,24 @@ static inline uint64_t CVMX_GMXX_TX_INT_EN(unsigned long block_id) static inline uint64_t CVMX_GMXX_TX_INT_REG(unsigned long block_id) { switch (cvmx_get_octeon_family()) { - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000500ull) + (block_id) * 0x8000000ull; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000500ull) + (block_id) * 0x8000000ull; case OCTEON_CN68XX & OCTEON_FAMILY_MASK: return CVMX_ADD_IO_SEG(0x0001180008000500ull) + (block_id) * 0x1000000ull; } return CVMX_ADD_IO_SEG(0x0001180008000500ull) + (block_id) * 0x8000000ull; } -static inline uint64_t CVMX_GMXX_TX_JAM(unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000490ull) + (block_id) * 0x8000000ull; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000490ull) + (block_id) * 0x8000000ull; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000490ull) + (block_id) * 0x1000000ull; - } - return CVMX_ADD_IO_SEG(0x0001180008000490ull) + (block_id) * 0x8000000ull; -} - -static inline uint64_t CVMX_GMXX_TX_LFSR(unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080004F8ull) + (block_id) * 0x8000000ull; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080004F8ull) + (block_id) * 0x8000000ull; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080004F8ull) + (block_id) * 0x1000000ull; - } - return CVMX_ADD_IO_SEG(0x00011800080004F8ull) + (block_id) * 0x8000000ull; -} - static inline uint64_t CVMX_GMXX_TX_OVR_BP(unsigned long block_id) { switch (cvmx_get_octeon_family()) { - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080004C8ull) + (block_id) * 0x8000000ull; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080004C8ull) + (block_id) * 0x8000000ull; case OCTEON_CN68XX & OCTEON_FAMILY_MASK: return CVMX_ADD_IO_SEG(0x00011800080004C8ull) + (block_id) * 0x1000000ull; } return CVMX_ADD_IO_SEG(0x00011800080004C8ull) + (block_id) * 0x8000000ull; } -static inline uint64_t CVMX_GMXX_TX_PAUSE_PKT_DMAC(unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080004A0ull) + (block_id) * 0x8000000ull; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080004A0ull) + (block_id) * 0x8000000ull; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080004A0ull) + (block_id) * 0x1000000ull; - } - return CVMX_ADD_IO_SEG(0x00011800080004A0ull) + (block_id) * 0x8000000ull; -} - -static inline uint64_t CVMX_GMXX_TX_PAUSE_PKT_TYPE(unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080004A8ull) + (block_id) * 0x8000000ull; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080004A8ull) + (block_id) * 0x8000000ull; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x00011800080004A8ull) + (block_id) * 0x1000000ull; - } - return CVMX_ADD_IO_SEG(0x00011800080004A8ull) + (block_id) * 0x8000000ull; -} - static inline uint64_t CVMX_GMXX_TX_PRTS(unsigned long block_id) { switch (cvmx_get_octeon_family()) { - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000480ull) + (block_id) * 0x8000000ull; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000480ull) + (block_id) * 0x8000000ull; case OCTEON_CN68XX & OCTEON_FAMILY_MASK: return CVMX_ADD_IO_SEG(0x0001180008000480ull) + (block_id) * 0x1000000ull; } @@ -2032,286 +327,19 @@ static inline uint64_t CVMX_GMXX_TX_PRTS(unsigned long block_id) } #define CVMX_GMXX_TX_SPI_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800080004C0ull) + ((block_id) & 1) * 0x8000000ull) -#define CVMX_GMXX_TX_SPI_DRAIN(block_id) (CVMX_ADD_IO_SEG(0x00011800080004E0ull) + ((block_id) & 1) * 0x8000000ull) #define CVMX_GMXX_TX_SPI_MAX(block_id) (CVMX_ADD_IO_SEG(0x00011800080004B0ull) + ((block_id) & 1) * 0x8000000ull) -#define CVMX_GMXX_TX_SPI_ROUNDX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000680ull) + (((offset) & 31) + ((block_id) & 1) * 0x1000000ull) * 8) #define CVMX_GMXX_TX_SPI_THRESH(block_id) (CVMX_ADD_IO_SEG(0x00011800080004B8ull) + ((block_id) & 1) * 0x8000000ull) static inline uint64_t CVMX_GMXX_TX_XAUI_CTL(unsigned long block_id) { switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000528ull) + (block_id) * 0x8000000ull; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000528ull) + (block_id) * 0x8000000ull; case OCTEON_CN68XX & OCTEON_FAMILY_MASK: return CVMX_ADD_IO_SEG(0x0001180008000528ull) + (block_id) * 0x1000000ull; } return CVMX_ADD_IO_SEG(0x0001180008000528ull) + (block_id) * 0x8000000ull; } -static inline uint64_t CVMX_GMXX_XAUI_EXT_LOOPBACK(unsigned long block_id) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000540ull) + (block_id) * 0x8000000ull; - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000540ull) + (block_id) * 0x8000000ull; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180008000540ull) + (block_id) * 0x1000000ull; - } - return CVMX_ADD_IO_SEG(0x0001180008000540ull) + (block_id) * 0x8000000ull; -} - void __cvmx_interrupt_gmxx_enable(int interface); -union cvmx_gmxx_bad_reg { - uint64_t u64; - struct cvmx_gmxx_bad_reg_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_31_63:33; - uint64_t inb_nxa:4; - uint64_t statovr:1; - uint64_t loststat:4; - uint64_t reserved_18_21:4; - uint64_t out_ovr:16; - uint64_t ncb_ovr:1; - uint64_t out_col:1; -#else - uint64_t out_col:1; - uint64_t ncb_ovr:1; - uint64_t out_ovr:16; - uint64_t reserved_18_21:4; - uint64_t loststat:4; - uint64_t statovr:1; - uint64_t inb_nxa:4; - uint64_t reserved_31_63:33; -#endif - } s; - struct cvmx_gmxx_bad_reg_cn30xx { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_31_63:33; - uint64_t inb_nxa:4; - uint64_t statovr:1; - uint64_t reserved_25_25:1; - uint64_t loststat:3; - uint64_t reserved_5_21:17; - uint64_t out_ovr:3; - uint64_t reserved_0_1:2; -#else - uint64_t reserved_0_1:2; - uint64_t out_ovr:3; - uint64_t reserved_5_21:17; - uint64_t loststat:3; - uint64_t reserved_25_25:1; - uint64_t statovr:1; - uint64_t inb_nxa:4; - uint64_t reserved_31_63:33; -#endif - } cn30xx; - struct cvmx_gmxx_bad_reg_cn30xx cn31xx; - struct cvmx_gmxx_bad_reg_s cn38xx; - struct cvmx_gmxx_bad_reg_s cn38xxp2; - struct cvmx_gmxx_bad_reg_cn30xx cn50xx; - struct cvmx_gmxx_bad_reg_cn52xx { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_31_63:33; - uint64_t inb_nxa:4; - uint64_t statovr:1; - uint64_t loststat:4; - uint64_t reserved_6_21:16; - uint64_t out_ovr:4; - uint64_t reserved_0_1:2; -#else - uint64_t reserved_0_1:2; - uint64_t out_ovr:4; - uint64_t reserved_6_21:16; - uint64_t loststat:4; - uint64_t statovr:1; - uint64_t inb_nxa:4; - uint64_t reserved_31_63:33; -#endif - } cn52xx; - struct cvmx_gmxx_bad_reg_cn52xx cn52xxp1; - struct cvmx_gmxx_bad_reg_cn52xx cn56xx; - struct cvmx_gmxx_bad_reg_cn52xx cn56xxp1; - struct cvmx_gmxx_bad_reg_s cn58xx; - struct cvmx_gmxx_bad_reg_s cn58xxp1; - struct cvmx_gmxx_bad_reg_cn52xx cn61xx; - struct cvmx_gmxx_bad_reg_cn52xx cn63xx; - struct cvmx_gmxx_bad_reg_cn52xx cn63xxp1; - struct cvmx_gmxx_bad_reg_cn52xx cn66xx; - struct cvmx_gmxx_bad_reg_cn52xx cn68xx; - struct cvmx_gmxx_bad_reg_cn52xx cn68xxp1; - struct cvmx_gmxx_bad_reg_cn52xx cnf71xx; -}; - -union cvmx_gmxx_bist { - uint64_t u64; - struct cvmx_gmxx_bist_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_25_63:39; - uint64_t status:25; -#else - uint64_t status:25; - uint64_t reserved_25_63:39; -#endif - } s; - struct cvmx_gmxx_bist_cn30xx { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_10_63:54; - uint64_t status:10; -#else - uint64_t status:10; - uint64_t reserved_10_63:54; -#endif - } cn30xx; - struct cvmx_gmxx_bist_cn30xx cn31xx; - struct cvmx_gmxx_bist_cn30xx cn38xx; - struct cvmx_gmxx_bist_cn30xx cn38xxp2; - struct cvmx_gmxx_bist_cn50xx { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_12_63:52; - uint64_t status:12; -#else - uint64_t status:12; - uint64_t reserved_12_63:52; -#endif - } cn50xx; - struct cvmx_gmxx_bist_cn52xx { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_16_63:48; - uint64_t status:16; -#else - uint64_t status:16; - uint64_t reserved_16_63:48; -#endif - } cn52xx; - struct cvmx_gmxx_bist_cn52xx cn52xxp1; - struct cvmx_gmxx_bist_cn52xx cn56xx; - struct cvmx_gmxx_bist_cn52xx cn56xxp1; - struct cvmx_gmxx_bist_cn58xx { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_17_63:47; - uint64_t status:17; -#else - uint64_t status:17; - uint64_t reserved_17_63:47; -#endif - } cn58xx; - struct cvmx_gmxx_bist_cn58xx cn58xxp1; - struct cvmx_gmxx_bist_s cn61xx; - struct cvmx_gmxx_bist_s cn63xx; - struct cvmx_gmxx_bist_s cn63xxp1; - struct cvmx_gmxx_bist_s cn66xx; - struct cvmx_gmxx_bist_s cn68xx; - struct cvmx_gmxx_bist_s cn68xxp1; - struct cvmx_gmxx_bist_s cnf71xx; -}; - -union cvmx_gmxx_bpid_mapx { - uint64_t u64; - struct cvmx_gmxx_bpid_mapx_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_17_63:47; - uint64_t status:1; - uint64_t reserved_9_15:7; - uint64_t val:1; - uint64_t reserved_6_7:2; - uint64_t bpid:6; -#else - uint64_t bpid:6; - uint64_t reserved_6_7:2; - uint64_t val:1; - uint64_t reserved_9_15:7; - uint64_t status:1; - uint64_t reserved_17_63:47; -#endif - } s; - struct cvmx_gmxx_bpid_mapx_s cn68xx; - struct cvmx_gmxx_bpid_mapx_s cn68xxp1; -}; - -union cvmx_gmxx_bpid_msk { - uint64_t u64; - struct cvmx_gmxx_bpid_msk_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_48_63:16; - uint64_t msk_or:16; - uint64_t reserved_16_31:16; - uint64_t msk_and:16; -#else - uint64_t msk_and:16; - uint64_t reserved_16_31:16; - uint64_t msk_or:16; - uint64_t reserved_48_63:16; -#endif - } s; - struct cvmx_gmxx_bpid_msk_s cn68xx; - struct cvmx_gmxx_bpid_msk_s cn68xxp1; -}; - -union cvmx_gmxx_clk_en { - uint64_t u64; - struct cvmx_gmxx_clk_en_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_1_63:63; - uint64_t clk_en:1; -#else - uint64_t clk_en:1; - uint64_t reserved_1_63:63; -#endif - } s; - struct cvmx_gmxx_clk_en_s cn52xx; - struct cvmx_gmxx_clk_en_s cn52xxp1; - struct cvmx_gmxx_clk_en_s cn56xx; - struct cvmx_gmxx_clk_en_s cn56xxp1; - struct cvmx_gmxx_clk_en_s cn61xx; - struct cvmx_gmxx_clk_en_s cn63xx; - struct cvmx_gmxx_clk_en_s cn63xxp1; - struct cvmx_gmxx_clk_en_s cn66xx; - struct cvmx_gmxx_clk_en_s cn68xx; - struct cvmx_gmxx_clk_en_s cn68xxp1; - struct cvmx_gmxx_clk_en_s cnf71xx; -}; - -union cvmx_gmxx_ebp_dis { - uint64_t u64; - struct cvmx_gmxx_ebp_dis_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_16_63:48; - uint64_t dis:16; -#else - uint64_t dis:16; - uint64_t reserved_16_63:48; -#endif - } s; - struct cvmx_gmxx_ebp_dis_s cn68xx; - struct cvmx_gmxx_ebp_dis_s cn68xxp1; -}; - -union cvmx_gmxx_ebp_msk { - uint64_t u64; - struct cvmx_gmxx_ebp_msk_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_16_63:48; - uint64_t msk:16; -#else - uint64_t msk:16; - uint64_t reserved_16_63:48; -#endif - } s; - struct cvmx_gmxx_ebp_msk_s cn68xx; - struct cvmx_gmxx_ebp_msk_s cn68xxp1; -}; - union cvmx_gmxx_hg2_control { uint64_t u64; struct cvmx_gmxx_hg2_control_s { @@ -2329,16 +357,6 @@ union cvmx_gmxx_hg2_control { uint64_t reserved_19_63:45; #endif } s; - struct cvmx_gmxx_hg2_control_s cn52xx; - struct cvmx_gmxx_hg2_control_s cn52xxp1; - struct cvmx_gmxx_hg2_control_s cn56xx; - struct cvmx_gmxx_hg2_control_s cn61xx; - struct cvmx_gmxx_hg2_control_s cn63xx; - struct cvmx_gmxx_hg2_control_s cn63xxp1; - struct cvmx_gmxx_hg2_control_s cn66xx; - struct cvmx_gmxx_hg2_control_s cn68xx; - struct cvmx_gmxx_hg2_control_s cn68xxp1; - struct cvmx_gmxx_hg2_control_s cnf71xx; }; union cvmx_gmxx_inf_mode { @@ -2392,9 +410,6 @@ union cvmx_gmxx_inf_mode { uint64_t reserved_2_63:62; #endif } cn31xx; - struct cvmx_gmxx_inf_mode_cn31xx cn38xx; - struct cvmx_gmxx_inf_mode_cn31xx cn38xxp2; - struct cvmx_gmxx_inf_mode_cn30xx cn50xx; struct cvmx_gmxx_inf_mode_cn52xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_10_63:54; @@ -2414,11 +429,6 @@ union cvmx_gmxx_inf_mode { uint64_t reserved_10_63:54; #endif } cn52xx; - struct cvmx_gmxx_inf_mode_cn52xx cn52xxp1; - struct cvmx_gmxx_inf_mode_cn52xx cn56xx; - struct cvmx_gmxx_inf_mode_cn52xx cn56xxp1; - struct cvmx_gmxx_inf_mode_cn31xx cn58xx; - struct cvmx_gmxx_inf_mode_cn31xx cn58xxp1; struct cvmx_gmxx_inf_mode_cn61xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_12_63:52; @@ -2438,8 +448,6 @@ union cvmx_gmxx_inf_mode { uint64_t reserved_12_63:52; #endif } cn61xx; - struct cvmx_gmxx_inf_mode_cn61xx cn63xx; - struct cvmx_gmxx_inf_mode_cn61xx cn63xxp1; struct cvmx_gmxx_inf_mode_cn66xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_20_63:44; @@ -2482,108 +490,6 @@ union cvmx_gmxx_inf_mode { uint64_t reserved_12_63:52; #endif } cn68xx; - struct cvmx_gmxx_inf_mode_cn68xx cn68xxp1; - struct cvmx_gmxx_inf_mode_cn61xx cnf71xx; -}; - -union cvmx_gmxx_nxa_adr { - uint64_t u64; - struct cvmx_gmxx_nxa_adr_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_23_63:41; - uint64_t pipe:7; - uint64_t reserved_6_15:10; - uint64_t prt:6; -#else - uint64_t prt:6; - uint64_t reserved_6_15:10; - uint64_t pipe:7; - uint64_t reserved_23_63:41; -#endif - } s; - struct cvmx_gmxx_nxa_adr_cn30xx { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_6_63:58; - uint64_t prt:6; -#else - uint64_t prt:6; - uint64_t reserved_6_63:58; -#endif - } cn30xx; - struct cvmx_gmxx_nxa_adr_cn30xx cn31xx; - struct cvmx_gmxx_nxa_adr_cn30xx cn38xx; - struct cvmx_gmxx_nxa_adr_cn30xx cn38xxp2; - struct cvmx_gmxx_nxa_adr_cn30xx cn50xx; - struct cvmx_gmxx_nxa_adr_cn30xx cn52xx; - struct cvmx_gmxx_nxa_adr_cn30xx cn52xxp1; - struct cvmx_gmxx_nxa_adr_cn30xx cn56xx; - struct cvmx_gmxx_nxa_adr_cn30xx cn56xxp1; - struct cvmx_gmxx_nxa_adr_cn30xx cn58xx; - struct cvmx_gmxx_nxa_adr_cn30xx cn58xxp1; - struct cvmx_gmxx_nxa_adr_cn30xx cn61xx; - struct cvmx_gmxx_nxa_adr_cn30xx cn63xx; - struct cvmx_gmxx_nxa_adr_cn30xx cn63xxp1; - struct cvmx_gmxx_nxa_adr_cn30xx cn66xx; - struct cvmx_gmxx_nxa_adr_s cn68xx; - struct cvmx_gmxx_nxa_adr_s cn68xxp1; - struct cvmx_gmxx_nxa_adr_cn30xx cnf71xx; -}; - -union cvmx_gmxx_pipe_status { - uint64_t u64; - struct cvmx_gmxx_pipe_status_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_20_63:44; - uint64_t ovr:4; - uint64_t reserved_12_15:4; - uint64_t bp:4; - uint64_t reserved_4_7:4; - uint64_t stop:4; -#else - uint64_t stop:4; - uint64_t reserved_4_7:4; - uint64_t bp:4; - uint64_t reserved_12_15:4; - uint64_t ovr:4; - uint64_t reserved_20_63:44; -#endif - } s; - struct cvmx_gmxx_pipe_status_s cn68xx; - struct cvmx_gmxx_pipe_status_s cn68xxp1; -}; - -union cvmx_gmxx_prtx_cbfc_ctl { - uint64_t u64; - struct cvmx_gmxx_prtx_cbfc_ctl_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t phys_en:16; - uint64_t logl_en:16; - uint64_t phys_bp:16; - uint64_t reserved_4_15:12; - uint64_t bck_en:1; - uint64_t drp_en:1; - uint64_t tx_en:1; - uint64_t rx_en:1; -#else - uint64_t rx_en:1; - uint64_t tx_en:1; - uint64_t drp_en:1; - uint64_t bck_en:1; - uint64_t reserved_4_15:12; - uint64_t phys_bp:16; - uint64_t logl_en:16; - uint64_t phys_en:16; -#endif - } s; - struct cvmx_gmxx_prtx_cbfc_ctl_s cn52xx; - struct cvmx_gmxx_prtx_cbfc_ctl_s cn56xx; - struct cvmx_gmxx_prtx_cbfc_ctl_s cn61xx; - struct cvmx_gmxx_prtx_cbfc_ctl_s cn63xx; - struct cvmx_gmxx_prtx_cbfc_ctl_s cn63xxp1; - struct cvmx_gmxx_prtx_cbfc_ctl_s cn66xx; - struct cvmx_gmxx_prtx_cbfc_ctl_s cn68xx; - struct cvmx_gmxx_prtx_cbfc_ctl_s cn68xxp1; - struct cvmx_gmxx_prtx_cbfc_ctl_s cnf71xx; }; union cvmx_gmxx_prtx_cfg { @@ -2632,10 +538,6 @@ union cvmx_gmxx_prtx_cfg { uint64_t reserved_4_63:60; #endif } cn30xx; - struct cvmx_gmxx_prtx_cfg_cn30xx cn31xx; - struct cvmx_gmxx_prtx_cfg_cn30xx cn38xx; - struct cvmx_gmxx_prtx_cfg_cn30xx cn38xxp2; - struct cvmx_gmxx_prtx_cfg_cn30xx cn50xx; struct cvmx_gmxx_prtx_cfg_cn52xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_14_63:50; @@ -2661,240 +563,6 @@ union cvmx_gmxx_prtx_cfg { uint64_t reserved_14_63:50; #endif } cn52xx; - struct cvmx_gmxx_prtx_cfg_cn52xx cn52xxp1; - struct cvmx_gmxx_prtx_cfg_cn52xx cn56xx; - struct cvmx_gmxx_prtx_cfg_cn52xx cn56xxp1; - struct cvmx_gmxx_prtx_cfg_cn30xx cn58xx; - struct cvmx_gmxx_prtx_cfg_cn30xx cn58xxp1; - struct cvmx_gmxx_prtx_cfg_cn52xx cn61xx; - struct cvmx_gmxx_prtx_cfg_cn52xx cn63xx; - struct cvmx_gmxx_prtx_cfg_cn52xx cn63xxp1; - struct cvmx_gmxx_prtx_cfg_cn52xx cn66xx; - struct cvmx_gmxx_prtx_cfg_s cn68xx; - struct cvmx_gmxx_prtx_cfg_s cn68xxp1; - struct cvmx_gmxx_prtx_cfg_cn52xx cnf71xx; -}; - -union cvmx_gmxx_rxx_adr_cam0 { - uint64_t u64; - struct cvmx_gmxx_rxx_adr_cam0_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t adr:64; -#else - uint64_t adr:64; -#endif - } s; - struct cvmx_gmxx_rxx_adr_cam0_s cn30xx; - struct cvmx_gmxx_rxx_adr_cam0_s cn31xx; - struct cvmx_gmxx_rxx_adr_cam0_s cn38xx; - struct cvmx_gmxx_rxx_adr_cam0_s cn38xxp2; - struct cvmx_gmxx_rxx_adr_cam0_s cn50xx; - struct cvmx_gmxx_rxx_adr_cam0_s cn52xx; - struct cvmx_gmxx_rxx_adr_cam0_s cn52xxp1; - struct cvmx_gmxx_rxx_adr_cam0_s cn56xx; - struct cvmx_gmxx_rxx_adr_cam0_s cn56xxp1; - struct cvmx_gmxx_rxx_adr_cam0_s cn58xx; - struct cvmx_gmxx_rxx_adr_cam0_s cn58xxp1; - struct cvmx_gmxx_rxx_adr_cam0_s cn61xx; - struct cvmx_gmxx_rxx_adr_cam0_s cn63xx; - struct cvmx_gmxx_rxx_adr_cam0_s cn63xxp1; - struct cvmx_gmxx_rxx_adr_cam0_s cn66xx; - struct cvmx_gmxx_rxx_adr_cam0_s cn68xx; - struct cvmx_gmxx_rxx_adr_cam0_s cn68xxp1; - struct cvmx_gmxx_rxx_adr_cam0_s cnf71xx; -}; - -union cvmx_gmxx_rxx_adr_cam1 { - uint64_t u64; - struct cvmx_gmxx_rxx_adr_cam1_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t adr:64; -#else - uint64_t adr:64; -#endif - } s; - struct cvmx_gmxx_rxx_adr_cam1_s cn30xx; - struct cvmx_gmxx_rxx_adr_cam1_s cn31xx; - struct cvmx_gmxx_rxx_adr_cam1_s cn38xx; - struct cvmx_gmxx_rxx_adr_cam1_s cn38xxp2; - struct cvmx_gmxx_rxx_adr_cam1_s cn50xx; - struct cvmx_gmxx_rxx_adr_cam1_s cn52xx; - struct cvmx_gmxx_rxx_adr_cam1_s cn52xxp1; - struct cvmx_gmxx_rxx_adr_cam1_s cn56xx; - struct cvmx_gmxx_rxx_adr_cam1_s cn56xxp1; - struct cvmx_gmxx_rxx_adr_cam1_s cn58xx; - struct cvmx_gmxx_rxx_adr_cam1_s cn58xxp1; - struct cvmx_gmxx_rxx_adr_cam1_s cn61xx; - struct cvmx_gmxx_rxx_adr_cam1_s cn63xx; - struct cvmx_gmxx_rxx_adr_cam1_s cn63xxp1; - struct cvmx_gmxx_rxx_adr_cam1_s cn66xx; - struct cvmx_gmxx_rxx_adr_cam1_s cn68xx; - struct cvmx_gmxx_rxx_adr_cam1_s cn68xxp1; - struct cvmx_gmxx_rxx_adr_cam1_s cnf71xx; -}; - -union cvmx_gmxx_rxx_adr_cam2 { - uint64_t u64; - struct cvmx_gmxx_rxx_adr_cam2_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t adr:64; -#else - uint64_t adr:64; -#endif - } s; - struct cvmx_gmxx_rxx_adr_cam2_s cn30xx; - struct cvmx_gmxx_rxx_adr_cam2_s cn31xx; - struct cvmx_gmxx_rxx_adr_cam2_s cn38xx; - struct cvmx_gmxx_rxx_adr_cam2_s cn38xxp2; - struct cvmx_gmxx_rxx_adr_cam2_s cn50xx; - struct cvmx_gmxx_rxx_adr_cam2_s cn52xx; - struct cvmx_gmxx_rxx_adr_cam2_s cn52xxp1; - struct cvmx_gmxx_rxx_adr_cam2_s cn56xx; - struct cvmx_gmxx_rxx_adr_cam2_s cn56xxp1; - struct cvmx_gmxx_rxx_adr_cam2_s cn58xx; - struct cvmx_gmxx_rxx_adr_cam2_s cn58xxp1; - struct cvmx_gmxx_rxx_adr_cam2_s cn61xx; - struct cvmx_gmxx_rxx_adr_cam2_s cn63xx; - struct cvmx_gmxx_rxx_adr_cam2_s cn63xxp1; - struct cvmx_gmxx_rxx_adr_cam2_s cn66xx; - struct cvmx_gmxx_rxx_adr_cam2_s cn68xx; - struct cvmx_gmxx_rxx_adr_cam2_s cn68xxp1; - struct cvmx_gmxx_rxx_adr_cam2_s cnf71xx; -}; - -union cvmx_gmxx_rxx_adr_cam3 { - uint64_t u64; - struct cvmx_gmxx_rxx_adr_cam3_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t adr:64; -#else - uint64_t adr:64; -#endif - } s; - struct cvmx_gmxx_rxx_adr_cam3_s cn30xx; - struct cvmx_gmxx_rxx_adr_cam3_s cn31xx; - struct cvmx_gmxx_rxx_adr_cam3_s cn38xx; - struct cvmx_gmxx_rxx_adr_cam3_s cn38xxp2; - struct cvmx_gmxx_rxx_adr_cam3_s cn50xx; - struct cvmx_gmxx_rxx_adr_cam3_s cn52xx; - struct cvmx_gmxx_rxx_adr_cam3_s cn52xxp1; - struct cvmx_gmxx_rxx_adr_cam3_s cn56xx; - struct cvmx_gmxx_rxx_adr_cam3_s cn56xxp1; - struct cvmx_gmxx_rxx_adr_cam3_s cn58xx; - struct cvmx_gmxx_rxx_adr_cam3_s cn58xxp1; - struct cvmx_gmxx_rxx_adr_cam3_s cn61xx; - struct cvmx_gmxx_rxx_adr_cam3_s cn63xx; - struct cvmx_gmxx_rxx_adr_cam3_s cn63xxp1; - struct cvmx_gmxx_rxx_adr_cam3_s cn66xx; - struct cvmx_gmxx_rxx_adr_cam3_s cn68xx; - struct cvmx_gmxx_rxx_adr_cam3_s cn68xxp1; - struct cvmx_gmxx_rxx_adr_cam3_s cnf71xx; -}; - -union cvmx_gmxx_rxx_adr_cam4 { - uint64_t u64; - struct cvmx_gmxx_rxx_adr_cam4_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t adr:64; -#else - uint64_t adr:64; -#endif - } s; - struct cvmx_gmxx_rxx_adr_cam4_s cn30xx; - struct cvmx_gmxx_rxx_adr_cam4_s cn31xx; - struct cvmx_gmxx_rxx_adr_cam4_s cn38xx; - struct cvmx_gmxx_rxx_adr_cam4_s cn38xxp2; - struct cvmx_gmxx_rxx_adr_cam4_s cn50xx; - struct cvmx_gmxx_rxx_adr_cam4_s cn52xx; - struct cvmx_gmxx_rxx_adr_cam4_s cn52xxp1; - struct cvmx_gmxx_rxx_adr_cam4_s cn56xx; - struct cvmx_gmxx_rxx_adr_cam4_s cn56xxp1; - struct cvmx_gmxx_rxx_adr_cam4_s cn58xx; - struct cvmx_gmxx_rxx_adr_cam4_s cn58xxp1; - struct cvmx_gmxx_rxx_adr_cam4_s cn61xx; - struct cvmx_gmxx_rxx_adr_cam4_s cn63xx; - struct cvmx_gmxx_rxx_adr_cam4_s cn63xxp1; - struct cvmx_gmxx_rxx_adr_cam4_s cn66xx; - struct cvmx_gmxx_rxx_adr_cam4_s cn68xx; - struct cvmx_gmxx_rxx_adr_cam4_s cn68xxp1; - struct cvmx_gmxx_rxx_adr_cam4_s cnf71xx; -}; - -union cvmx_gmxx_rxx_adr_cam5 { - uint64_t u64; - struct cvmx_gmxx_rxx_adr_cam5_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t adr:64; -#else - uint64_t adr:64; -#endif - } s; - struct cvmx_gmxx_rxx_adr_cam5_s cn30xx; - struct cvmx_gmxx_rxx_adr_cam5_s cn31xx; - struct cvmx_gmxx_rxx_adr_cam5_s cn38xx; - struct cvmx_gmxx_rxx_adr_cam5_s cn38xxp2; - struct cvmx_gmxx_rxx_adr_cam5_s cn50xx; - struct cvmx_gmxx_rxx_adr_cam5_s cn52xx; - struct cvmx_gmxx_rxx_adr_cam5_s cn52xxp1; - struct cvmx_gmxx_rxx_adr_cam5_s cn56xx; - struct cvmx_gmxx_rxx_adr_cam5_s cn56xxp1; - struct cvmx_gmxx_rxx_adr_cam5_s cn58xx; - struct cvmx_gmxx_rxx_adr_cam5_s cn58xxp1; - struct cvmx_gmxx_rxx_adr_cam5_s cn61xx; - struct cvmx_gmxx_rxx_adr_cam5_s cn63xx; - struct cvmx_gmxx_rxx_adr_cam5_s cn63xxp1; - struct cvmx_gmxx_rxx_adr_cam5_s cn66xx; - struct cvmx_gmxx_rxx_adr_cam5_s cn68xx; - struct cvmx_gmxx_rxx_adr_cam5_s cn68xxp1; - struct cvmx_gmxx_rxx_adr_cam5_s cnf71xx; -}; - -union cvmx_gmxx_rxx_adr_cam_all_en { - uint64_t u64; - struct cvmx_gmxx_rxx_adr_cam_all_en_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_32_63:32; - uint64_t en:32; -#else - uint64_t en:32; - uint64_t reserved_32_63:32; -#endif - } s; - struct cvmx_gmxx_rxx_adr_cam_all_en_s cn61xx; - struct cvmx_gmxx_rxx_adr_cam_all_en_s cn66xx; - struct cvmx_gmxx_rxx_adr_cam_all_en_s cn68xx; - struct cvmx_gmxx_rxx_adr_cam_all_en_s cnf71xx; -}; - -union cvmx_gmxx_rxx_adr_cam_en { - uint64_t u64; - struct cvmx_gmxx_rxx_adr_cam_en_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_8_63:56; - uint64_t en:8; -#else - uint64_t en:8; - uint64_t reserved_8_63:56; -#endif - } s; - struct cvmx_gmxx_rxx_adr_cam_en_s cn30xx; - struct cvmx_gmxx_rxx_adr_cam_en_s cn31xx; - struct cvmx_gmxx_rxx_adr_cam_en_s cn38xx; - struct cvmx_gmxx_rxx_adr_cam_en_s cn38xxp2; - struct cvmx_gmxx_rxx_adr_cam_en_s cn50xx; - struct cvmx_gmxx_rxx_adr_cam_en_s cn52xx; - struct cvmx_gmxx_rxx_adr_cam_en_s cn52xxp1; - struct cvmx_gmxx_rxx_adr_cam_en_s cn56xx; - struct cvmx_gmxx_rxx_adr_cam_en_s cn56xxp1; - struct cvmx_gmxx_rxx_adr_cam_en_s cn58xx; - struct cvmx_gmxx_rxx_adr_cam_en_s cn58xxp1; - struct cvmx_gmxx_rxx_adr_cam_en_s cn61xx; - struct cvmx_gmxx_rxx_adr_cam_en_s cn63xx; - struct cvmx_gmxx_rxx_adr_cam_en_s cn63xxp1; - struct cvmx_gmxx_rxx_adr_cam_en_s cn66xx; - struct cvmx_gmxx_rxx_adr_cam_en_s cn68xx; - struct cvmx_gmxx_rxx_adr_cam_en_s cn68xxp1; - struct cvmx_gmxx_rxx_adr_cam_en_s cnf71xx; }; union cvmx_gmxx_rxx_adr_ctl { @@ -2912,174 +580,6 @@ union cvmx_gmxx_rxx_adr_ctl { uint64_t reserved_4_63:60; #endif } s; - struct cvmx_gmxx_rxx_adr_ctl_s cn30xx; - struct cvmx_gmxx_rxx_adr_ctl_s cn31xx; - struct cvmx_gmxx_rxx_adr_ctl_s cn38xx; - struct cvmx_gmxx_rxx_adr_ctl_s cn38xxp2; - struct cvmx_gmxx_rxx_adr_ctl_s cn50xx; - struct cvmx_gmxx_rxx_adr_ctl_s cn52xx; - struct cvmx_gmxx_rxx_adr_ctl_s cn52xxp1; - struct cvmx_gmxx_rxx_adr_ctl_s cn56xx; - struct cvmx_gmxx_rxx_adr_ctl_s cn56xxp1; - struct cvmx_gmxx_rxx_adr_ctl_s cn58xx; - struct cvmx_gmxx_rxx_adr_ctl_s cn58xxp1; - struct cvmx_gmxx_rxx_adr_ctl_s cn61xx; - struct cvmx_gmxx_rxx_adr_ctl_s cn63xx; - struct cvmx_gmxx_rxx_adr_ctl_s cn63xxp1; - struct cvmx_gmxx_rxx_adr_ctl_s cn66xx; - struct cvmx_gmxx_rxx_adr_ctl_s cn68xx; - struct cvmx_gmxx_rxx_adr_ctl_s cn68xxp1; - struct cvmx_gmxx_rxx_adr_ctl_s cnf71xx; -}; - -union cvmx_gmxx_rxx_decision { - uint64_t u64; - struct cvmx_gmxx_rxx_decision_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_5_63:59; - uint64_t cnt:5; -#else - uint64_t cnt:5; - uint64_t reserved_5_63:59; -#endif - } s; - struct cvmx_gmxx_rxx_decision_s cn30xx; - struct cvmx_gmxx_rxx_decision_s cn31xx; - struct cvmx_gmxx_rxx_decision_s cn38xx; - struct cvmx_gmxx_rxx_decision_s cn38xxp2; - struct cvmx_gmxx_rxx_decision_s cn50xx; - struct cvmx_gmxx_rxx_decision_s cn52xx; - struct cvmx_gmxx_rxx_decision_s cn52xxp1; - struct cvmx_gmxx_rxx_decision_s cn56xx; - struct cvmx_gmxx_rxx_decision_s cn56xxp1; - struct cvmx_gmxx_rxx_decision_s cn58xx; - struct cvmx_gmxx_rxx_decision_s cn58xxp1; - struct cvmx_gmxx_rxx_decision_s cn61xx; - struct cvmx_gmxx_rxx_decision_s cn63xx; - struct cvmx_gmxx_rxx_decision_s cn63xxp1; - struct cvmx_gmxx_rxx_decision_s cn66xx; - struct cvmx_gmxx_rxx_decision_s cn68xx; - struct cvmx_gmxx_rxx_decision_s cn68xxp1; - struct cvmx_gmxx_rxx_decision_s cnf71xx; -}; - -union cvmx_gmxx_rxx_frm_chk { - uint64_t u64; - struct cvmx_gmxx_rxx_frm_chk_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_10_63:54; - uint64_t niberr:1; - uint64_t skperr:1; - uint64_t rcverr:1; - uint64_t lenerr:1; - uint64_t alnerr:1; - uint64_t fcserr:1; - uint64_t jabber:1; - uint64_t maxerr:1; - uint64_t carext:1; - uint64_t minerr:1; -#else - uint64_t minerr:1; - uint64_t carext:1; - uint64_t maxerr:1; - uint64_t jabber:1; - uint64_t fcserr:1; - uint64_t alnerr:1; - uint64_t lenerr:1; - uint64_t rcverr:1; - uint64_t skperr:1; - uint64_t niberr:1; - uint64_t reserved_10_63:54; -#endif - } s; - struct cvmx_gmxx_rxx_frm_chk_s cn30xx; - struct cvmx_gmxx_rxx_frm_chk_s cn31xx; - struct cvmx_gmxx_rxx_frm_chk_s cn38xx; - struct cvmx_gmxx_rxx_frm_chk_s cn38xxp2; - struct cvmx_gmxx_rxx_frm_chk_cn50xx { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_10_63:54; - uint64_t niberr:1; - uint64_t skperr:1; - uint64_t rcverr:1; - uint64_t reserved_6_6:1; - uint64_t alnerr:1; - uint64_t fcserr:1; - uint64_t jabber:1; - uint64_t reserved_2_2:1; - uint64_t carext:1; - uint64_t reserved_0_0:1; -#else - uint64_t reserved_0_0:1; - uint64_t carext:1; - uint64_t reserved_2_2:1; - uint64_t jabber:1; - uint64_t fcserr:1; - uint64_t alnerr:1; - uint64_t reserved_6_6:1; - uint64_t rcverr:1; - uint64_t skperr:1; - uint64_t niberr:1; - uint64_t reserved_10_63:54; -#endif - } cn50xx; - struct cvmx_gmxx_rxx_frm_chk_cn52xx { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_9_63:55; - uint64_t skperr:1; - uint64_t rcverr:1; - uint64_t reserved_5_6:2; - uint64_t fcserr:1; - uint64_t jabber:1; - uint64_t reserved_2_2:1; - uint64_t carext:1; - uint64_t reserved_0_0:1; -#else - uint64_t reserved_0_0:1; - uint64_t carext:1; - uint64_t reserved_2_2:1; - uint64_t jabber:1; - uint64_t fcserr:1; - uint64_t reserved_5_6:2; - uint64_t rcverr:1; - uint64_t skperr:1; - uint64_t reserved_9_63:55; -#endif - } cn52xx; - struct cvmx_gmxx_rxx_frm_chk_cn52xx cn52xxp1; - struct cvmx_gmxx_rxx_frm_chk_cn52xx cn56xx; - struct cvmx_gmxx_rxx_frm_chk_cn52xx cn56xxp1; - struct cvmx_gmxx_rxx_frm_chk_s cn58xx; - struct cvmx_gmxx_rxx_frm_chk_s cn58xxp1; - struct cvmx_gmxx_rxx_frm_chk_cn61xx { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_9_63:55; - uint64_t skperr:1; - uint64_t rcverr:1; - uint64_t reserved_5_6:2; - uint64_t fcserr:1; - uint64_t jabber:1; - uint64_t reserved_2_2:1; - uint64_t carext:1; - uint64_t minerr:1; -#else - uint64_t minerr:1; - uint64_t carext:1; - uint64_t reserved_2_2:1; - uint64_t jabber:1; - uint64_t fcserr:1; - uint64_t reserved_5_6:2; - uint64_t rcverr:1; - uint64_t skperr:1; - uint64_t reserved_9_63:55; -#endif - } cn61xx; - struct cvmx_gmxx_rxx_frm_chk_cn61xx cn63xx; - struct cvmx_gmxx_rxx_frm_chk_cn61xx cn63xxp1; - struct cvmx_gmxx_rxx_frm_chk_cn61xx cn66xx; - struct cvmx_gmxx_rxx_frm_chk_cn61xx cn68xx; - struct cvmx_gmxx_rxx_frm_chk_cn61xx cn68xxp1; - struct cvmx_gmxx_rxx_frm_chk_cn61xx cnf71xx; }; union cvmx_gmxx_rxx_frm_ctl { @@ -3165,8 +665,6 @@ union cvmx_gmxx_rxx_frm_ctl { uint64_t reserved_8_63:56; #endif } cn31xx; - struct cvmx_gmxx_rxx_frm_ctl_cn30xx cn38xx; - struct cvmx_gmxx_rxx_frm_ctl_cn31xx cn38xxp2; struct cvmx_gmxx_rxx_frm_ctl_cn50xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_11_63:53; @@ -3194,9 +692,6 @@ union cvmx_gmxx_rxx_frm_ctl { uint64_t reserved_11_63:53; #endif } cn50xx; - struct cvmx_gmxx_rxx_frm_ctl_cn50xx cn52xx; - struct cvmx_gmxx_rxx_frm_ctl_cn50xx cn52xxp1; - struct cvmx_gmxx_rxx_frm_ctl_cn50xx cn56xx; struct cvmx_gmxx_rxx_frm_ctl_cn56xxp1 { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_10_63:54; @@ -3251,7 +746,6 @@ union cvmx_gmxx_rxx_frm_ctl { uint64_t reserved_11_63:53; #endif } cn58xx; - struct cvmx_gmxx_rxx_frm_ctl_cn30xx cn58xxp1; struct cvmx_gmxx_rxx_frm_ctl_cn61xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_13_63:51; @@ -3283,12 +777,6 @@ union cvmx_gmxx_rxx_frm_ctl { uint64_t reserved_13_63:51; #endif } cn61xx; - struct cvmx_gmxx_rxx_frm_ctl_cn61xx cn63xx; - struct cvmx_gmxx_rxx_frm_ctl_cn61xx cn63xxp1; - struct cvmx_gmxx_rxx_frm_ctl_cn61xx cn66xx; - struct cvmx_gmxx_rxx_frm_ctl_cn61xx cn68xx; - struct cvmx_gmxx_rxx_frm_ctl_cn61xx cn68xxp1; - struct cvmx_gmxx_rxx_frm_ctl_cn61xx cnf71xx; }; union cvmx_gmxx_rxx_frm_max { @@ -3302,12 +790,6 @@ union cvmx_gmxx_rxx_frm_max { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_gmxx_rxx_frm_max_s cn30xx; - struct cvmx_gmxx_rxx_frm_max_s cn31xx; - struct cvmx_gmxx_rxx_frm_max_s cn38xx; - struct cvmx_gmxx_rxx_frm_max_s cn38xxp2; - struct cvmx_gmxx_rxx_frm_max_s cn58xx; - struct cvmx_gmxx_rxx_frm_max_s cn58xxp1; }; union cvmx_gmxx_rxx_frm_min { @@ -3321,43 +803,6 @@ union cvmx_gmxx_rxx_frm_min { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_gmxx_rxx_frm_min_s cn30xx; - struct cvmx_gmxx_rxx_frm_min_s cn31xx; - struct cvmx_gmxx_rxx_frm_min_s cn38xx; - struct cvmx_gmxx_rxx_frm_min_s cn38xxp2; - struct cvmx_gmxx_rxx_frm_min_s cn58xx; - struct cvmx_gmxx_rxx_frm_min_s cn58xxp1; -}; - -union cvmx_gmxx_rxx_ifg { - uint64_t u64; - struct cvmx_gmxx_rxx_ifg_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_4_63:60; - uint64_t ifg:4; -#else - uint64_t ifg:4; - uint64_t reserved_4_63:60; -#endif - } s; - struct cvmx_gmxx_rxx_ifg_s cn30xx; - struct cvmx_gmxx_rxx_ifg_s cn31xx; - struct cvmx_gmxx_rxx_ifg_s cn38xx; - struct cvmx_gmxx_rxx_ifg_s cn38xxp2; - struct cvmx_gmxx_rxx_ifg_s cn50xx; - struct cvmx_gmxx_rxx_ifg_s cn52xx; - struct cvmx_gmxx_rxx_ifg_s cn52xxp1; - struct cvmx_gmxx_rxx_ifg_s cn56xx; - struct cvmx_gmxx_rxx_ifg_s cn56xxp1; - struct cvmx_gmxx_rxx_ifg_s cn58xx; - struct cvmx_gmxx_rxx_ifg_s cn58xxp1; - struct cvmx_gmxx_rxx_ifg_s cn61xx; - struct cvmx_gmxx_rxx_ifg_s cn63xx; - struct cvmx_gmxx_rxx_ifg_s cn63xxp1; - struct cvmx_gmxx_rxx_ifg_s cn66xx; - struct cvmx_gmxx_rxx_ifg_s cn68xx; - struct cvmx_gmxx_rxx_ifg_s cn68xxp1; - struct cvmx_gmxx_rxx_ifg_s cnf71xx; }; union cvmx_gmxx_rxx_int_en { @@ -3472,9 +917,6 @@ union cvmx_gmxx_rxx_int_en { uint64_t reserved_19_63:45; #endif } cn30xx; - struct cvmx_gmxx_rxx_int_en_cn30xx cn31xx; - struct cvmx_gmxx_rxx_int_en_cn30xx cn38xx; - struct cvmx_gmxx_rxx_int_en_cn30xx cn38xxp2; struct cvmx_gmxx_rxx_int_en_cn50xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_20_63:44; @@ -3581,8 +1023,6 @@ union cvmx_gmxx_rxx_int_en { uint64_t reserved_29_63:35; #endif } cn52xx; - struct cvmx_gmxx_rxx_int_en_cn52xx cn52xxp1; - struct cvmx_gmxx_rxx_int_en_cn52xx cn56xx; struct cvmx_gmxx_rxx_int_en_cn56xxp1 { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_27_63:37; @@ -3685,7 +1125,6 @@ union cvmx_gmxx_rxx_int_en { uint64_t reserved_20_63:44; #endif } cn58xx; - struct cvmx_gmxx_rxx_int_en_cn58xx cn58xxp1; struct cvmx_gmxx_rxx_int_en_cn61xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_29_63:35; @@ -3745,12 +1184,6 @@ union cvmx_gmxx_rxx_int_en { uint64_t reserved_29_63:35; #endif } cn61xx; - struct cvmx_gmxx_rxx_int_en_cn61xx cn63xx; - struct cvmx_gmxx_rxx_int_en_cn61xx cn63xxp1; - struct cvmx_gmxx_rxx_int_en_cn61xx cn66xx; - struct cvmx_gmxx_rxx_int_en_cn61xx cn68xx; - struct cvmx_gmxx_rxx_int_en_cn61xx cn68xxp1; - struct cvmx_gmxx_rxx_int_en_cn61xx cnf71xx; }; union cvmx_gmxx_rxx_int_reg { @@ -3865,9 +1298,6 @@ union cvmx_gmxx_rxx_int_reg { uint64_t reserved_19_63:45; #endif } cn30xx; - struct cvmx_gmxx_rxx_int_reg_cn30xx cn31xx; - struct cvmx_gmxx_rxx_int_reg_cn30xx cn38xx; - struct cvmx_gmxx_rxx_int_reg_cn30xx cn38xxp2; struct cvmx_gmxx_rxx_int_reg_cn50xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_20_63:44; @@ -3974,8 +1404,6 @@ union cvmx_gmxx_rxx_int_reg { uint64_t reserved_29_63:35; #endif } cn52xx; - struct cvmx_gmxx_rxx_int_reg_cn52xx cn52xxp1; - struct cvmx_gmxx_rxx_int_reg_cn52xx cn56xx; struct cvmx_gmxx_rxx_int_reg_cn56xxp1 { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_27_63:37; @@ -4078,7 +1506,6 @@ union cvmx_gmxx_rxx_int_reg { uint64_t reserved_20_63:44; #endif } cn58xx; - struct cvmx_gmxx_rxx_int_reg_cn58xx cn58xxp1; struct cvmx_gmxx_rxx_int_reg_cn61xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_29_63:35; @@ -4138,12 +1565,6 @@ union cvmx_gmxx_rxx_int_reg { uint64_t reserved_29_63:35; #endif } cn61xx; - struct cvmx_gmxx_rxx_int_reg_cn61xx cn63xx; - struct cvmx_gmxx_rxx_int_reg_cn61xx cn63xxp1; - struct cvmx_gmxx_rxx_int_reg_cn61xx cn66xx; - struct cvmx_gmxx_rxx_int_reg_cn61xx cn68xx; - struct cvmx_gmxx_rxx_int_reg_cn61xx cn68xxp1; - struct cvmx_gmxx_rxx_int_reg_cn61xx cnf71xx; }; union cvmx_gmxx_rxx_jabber { @@ -4157,51 +1578,6 @@ union cvmx_gmxx_rxx_jabber { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_gmxx_rxx_jabber_s cn30xx; - struct cvmx_gmxx_rxx_jabber_s cn31xx; - struct cvmx_gmxx_rxx_jabber_s cn38xx; - struct cvmx_gmxx_rxx_jabber_s cn38xxp2; - struct cvmx_gmxx_rxx_jabber_s cn50xx; - struct cvmx_gmxx_rxx_jabber_s cn52xx; - struct cvmx_gmxx_rxx_jabber_s cn52xxp1; - struct cvmx_gmxx_rxx_jabber_s cn56xx; - struct cvmx_gmxx_rxx_jabber_s cn56xxp1; - struct cvmx_gmxx_rxx_jabber_s cn58xx; - struct cvmx_gmxx_rxx_jabber_s cn58xxp1; - struct cvmx_gmxx_rxx_jabber_s cn61xx; - struct cvmx_gmxx_rxx_jabber_s cn63xx; - struct cvmx_gmxx_rxx_jabber_s cn63xxp1; - struct cvmx_gmxx_rxx_jabber_s cn66xx; - struct cvmx_gmxx_rxx_jabber_s cn68xx; - struct cvmx_gmxx_rxx_jabber_s cn68xxp1; - struct cvmx_gmxx_rxx_jabber_s cnf71xx; -}; - -union cvmx_gmxx_rxx_pause_drop_time { - uint64_t u64; - struct cvmx_gmxx_rxx_pause_drop_time_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_16_63:48; - uint64_t status:16; -#else - uint64_t status:16; - uint64_t reserved_16_63:48; -#endif - } s; - struct cvmx_gmxx_rxx_pause_drop_time_s cn50xx; - struct cvmx_gmxx_rxx_pause_drop_time_s cn52xx; - struct cvmx_gmxx_rxx_pause_drop_time_s cn52xxp1; - struct cvmx_gmxx_rxx_pause_drop_time_s cn56xx; - struct cvmx_gmxx_rxx_pause_drop_time_s cn56xxp1; - struct cvmx_gmxx_rxx_pause_drop_time_s cn58xx; - struct cvmx_gmxx_rxx_pause_drop_time_s cn58xxp1; - struct cvmx_gmxx_rxx_pause_drop_time_s cn61xx; - struct cvmx_gmxx_rxx_pause_drop_time_s cn63xx; - struct cvmx_gmxx_rxx_pause_drop_time_s cn63xxp1; - struct cvmx_gmxx_rxx_pause_drop_time_s cn66xx; - struct cvmx_gmxx_rxx_pause_drop_time_s cn68xx; - struct cvmx_gmxx_rxx_pause_drop_time_s cn68xxp1; - struct cvmx_gmxx_rxx_pause_drop_time_s cnf71xx; }; union cvmx_gmxx_rxx_rx_inbnd { @@ -4219,588 +1595,6 @@ union cvmx_gmxx_rxx_rx_inbnd { uint64_t reserved_4_63:60; #endif } s; - struct cvmx_gmxx_rxx_rx_inbnd_s cn30xx; - struct cvmx_gmxx_rxx_rx_inbnd_s cn31xx; - struct cvmx_gmxx_rxx_rx_inbnd_s cn38xx; - struct cvmx_gmxx_rxx_rx_inbnd_s cn38xxp2; - struct cvmx_gmxx_rxx_rx_inbnd_s cn50xx; - struct cvmx_gmxx_rxx_rx_inbnd_s cn58xx; - struct cvmx_gmxx_rxx_rx_inbnd_s cn58xxp1; -}; - -union cvmx_gmxx_rxx_stats_ctl { - uint64_t u64; - struct cvmx_gmxx_rxx_stats_ctl_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_1_63:63; - uint64_t rd_clr:1; -#else - uint64_t rd_clr:1; - uint64_t reserved_1_63:63; -#endif - } s; - struct cvmx_gmxx_rxx_stats_ctl_s cn30xx; - struct cvmx_gmxx_rxx_stats_ctl_s cn31xx; - struct cvmx_gmxx_rxx_stats_ctl_s cn38xx; - struct cvmx_gmxx_rxx_stats_ctl_s cn38xxp2; - struct cvmx_gmxx_rxx_stats_ctl_s cn50xx; - struct cvmx_gmxx_rxx_stats_ctl_s cn52xx; - struct cvmx_gmxx_rxx_stats_ctl_s cn52xxp1; - struct cvmx_gmxx_rxx_stats_ctl_s cn56xx; - struct cvmx_gmxx_rxx_stats_ctl_s cn56xxp1; - struct cvmx_gmxx_rxx_stats_ctl_s cn58xx; - struct cvmx_gmxx_rxx_stats_ctl_s cn58xxp1; - struct cvmx_gmxx_rxx_stats_ctl_s cn61xx; - struct cvmx_gmxx_rxx_stats_ctl_s cn63xx; - struct cvmx_gmxx_rxx_stats_ctl_s cn63xxp1; - struct cvmx_gmxx_rxx_stats_ctl_s cn66xx; - struct cvmx_gmxx_rxx_stats_ctl_s cn68xx; - struct cvmx_gmxx_rxx_stats_ctl_s cn68xxp1; - struct cvmx_gmxx_rxx_stats_ctl_s cnf71xx; -}; - -union cvmx_gmxx_rxx_stats_octs { - uint64_t u64; - struct cvmx_gmxx_rxx_stats_octs_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_48_63:16; - uint64_t cnt:48; -#else - uint64_t cnt:48; - uint64_t reserved_48_63:16; -#endif - } s; - struct cvmx_gmxx_rxx_stats_octs_s cn30xx; - struct cvmx_gmxx_rxx_stats_octs_s cn31xx; - struct cvmx_gmxx_rxx_stats_octs_s cn38xx; - struct cvmx_gmxx_rxx_stats_octs_s cn38xxp2; - struct cvmx_gmxx_rxx_stats_octs_s cn50xx; - struct cvmx_gmxx_rxx_stats_octs_s cn52xx; - struct cvmx_gmxx_rxx_stats_octs_s cn52xxp1; - struct cvmx_gmxx_rxx_stats_octs_s cn56xx; - struct cvmx_gmxx_rxx_stats_octs_s cn56xxp1; - struct cvmx_gmxx_rxx_stats_octs_s cn58xx; - struct cvmx_gmxx_rxx_stats_octs_s cn58xxp1; - struct cvmx_gmxx_rxx_stats_octs_s cn61xx; - struct cvmx_gmxx_rxx_stats_octs_s cn63xx; - struct cvmx_gmxx_rxx_stats_octs_s cn63xxp1; - struct cvmx_gmxx_rxx_stats_octs_s cn66xx; - struct cvmx_gmxx_rxx_stats_octs_s cn68xx; - struct cvmx_gmxx_rxx_stats_octs_s cn68xxp1; - struct cvmx_gmxx_rxx_stats_octs_s cnf71xx; -}; - -union cvmx_gmxx_rxx_stats_octs_ctl { - uint64_t u64; - struct cvmx_gmxx_rxx_stats_octs_ctl_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_48_63:16; - uint64_t cnt:48; -#else - uint64_t cnt:48; - uint64_t reserved_48_63:16; -#endif - } s; - struct cvmx_gmxx_rxx_stats_octs_ctl_s cn30xx; - struct cvmx_gmxx_rxx_stats_octs_ctl_s cn31xx; - struct cvmx_gmxx_rxx_stats_octs_ctl_s cn38xx; - struct cvmx_gmxx_rxx_stats_octs_ctl_s cn38xxp2; - struct cvmx_gmxx_rxx_stats_octs_ctl_s cn50xx; - struct cvmx_gmxx_rxx_stats_octs_ctl_s cn52xx; - struct cvmx_gmxx_rxx_stats_octs_ctl_s cn52xxp1; - struct cvmx_gmxx_rxx_stats_octs_ctl_s cn56xx; - struct cvmx_gmxx_rxx_stats_octs_ctl_s cn56xxp1; - struct cvmx_gmxx_rxx_stats_octs_ctl_s cn58xx; - struct cvmx_gmxx_rxx_stats_octs_ctl_s cn58xxp1; - struct cvmx_gmxx_rxx_stats_octs_ctl_s cn61xx; - struct cvmx_gmxx_rxx_stats_octs_ctl_s cn63xx; - struct cvmx_gmxx_rxx_stats_octs_ctl_s cn63xxp1; - struct cvmx_gmxx_rxx_stats_octs_ctl_s cn66xx; - struct cvmx_gmxx_rxx_stats_octs_ctl_s cn68xx; - struct cvmx_gmxx_rxx_stats_octs_ctl_s cn68xxp1; - struct cvmx_gmxx_rxx_stats_octs_ctl_s cnf71xx; -}; - -union cvmx_gmxx_rxx_stats_octs_dmac { - uint64_t u64; - struct cvmx_gmxx_rxx_stats_octs_dmac_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_48_63:16; - uint64_t cnt:48; -#else - uint64_t cnt:48; - uint64_t reserved_48_63:16; -#endif - } s; - struct cvmx_gmxx_rxx_stats_octs_dmac_s cn30xx; - struct cvmx_gmxx_rxx_stats_octs_dmac_s cn31xx; - struct cvmx_gmxx_rxx_stats_octs_dmac_s cn38xx; - struct cvmx_gmxx_rxx_stats_octs_dmac_s cn38xxp2; - struct cvmx_gmxx_rxx_stats_octs_dmac_s cn50xx; - struct cvmx_gmxx_rxx_stats_octs_dmac_s cn52xx; - struct cvmx_gmxx_rxx_stats_octs_dmac_s cn52xxp1; - struct cvmx_gmxx_rxx_stats_octs_dmac_s cn56xx; - struct cvmx_gmxx_rxx_stats_octs_dmac_s cn56xxp1; - struct cvmx_gmxx_rxx_stats_octs_dmac_s cn58xx; - struct cvmx_gmxx_rxx_stats_octs_dmac_s cn58xxp1; - struct cvmx_gmxx_rxx_stats_octs_dmac_s cn61xx; - struct cvmx_gmxx_rxx_stats_octs_dmac_s cn63xx; - struct cvmx_gmxx_rxx_stats_octs_dmac_s cn63xxp1; - struct cvmx_gmxx_rxx_stats_octs_dmac_s cn66xx; - struct cvmx_gmxx_rxx_stats_octs_dmac_s cn68xx; - struct cvmx_gmxx_rxx_stats_octs_dmac_s cn68xxp1; - struct cvmx_gmxx_rxx_stats_octs_dmac_s cnf71xx; -}; - -union cvmx_gmxx_rxx_stats_octs_drp { - uint64_t u64; - struct cvmx_gmxx_rxx_stats_octs_drp_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_48_63:16; - uint64_t cnt:48; -#else - uint64_t cnt:48; - uint64_t reserved_48_63:16; -#endif - } s; - struct cvmx_gmxx_rxx_stats_octs_drp_s cn30xx; - struct cvmx_gmxx_rxx_stats_octs_drp_s cn31xx; - struct cvmx_gmxx_rxx_stats_octs_drp_s cn38xx; - struct cvmx_gmxx_rxx_stats_octs_drp_s cn38xxp2; - struct cvmx_gmxx_rxx_stats_octs_drp_s cn50xx; - struct cvmx_gmxx_rxx_stats_octs_drp_s cn52xx; - struct cvmx_gmxx_rxx_stats_octs_drp_s cn52xxp1; - struct cvmx_gmxx_rxx_stats_octs_drp_s cn56xx; - struct cvmx_gmxx_rxx_stats_octs_drp_s cn56xxp1; - struct cvmx_gmxx_rxx_stats_octs_drp_s cn58xx; - struct cvmx_gmxx_rxx_stats_octs_drp_s cn58xxp1; - struct cvmx_gmxx_rxx_stats_octs_drp_s cn61xx; - struct cvmx_gmxx_rxx_stats_octs_drp_s cn63xx; - struct cvmx_gmxx_rxx_stats_octs_drp_s cn63xxp1; - struct cvmx_gmxx_rxx_stats_octs_drp_s cn66xx; - struct cvmx_gmxx_rxx_stats_octs_drp_s cn68xx; - struct cvmx_gmxx_rxx_stats_octs_drp_s cn68xxp1; - struct cvmx_gmxx_rxx_stats_octs_drp_s cnf71xx; -}; - -union cvmx_gmxx_rxx_stats_pkts { - uint64_t u64; - struct cvmx_gmxx_rxx_stats_pkts_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_32_63:32; - uint64_t cnt:32; -#else - uint64_t cnt:32; - uint64_t reserved_32_63:32; -#endif - } s; - struct cvmx_gmxx_rxx_stats_pkts_s cn30xx; - struct cvmx_gmxx_rxx_stats_pkts_s cn31xx; - struct cvmx_gmxx_rxx_stats_pkts_s cn38xx; - struct cvmx_gmxx_rxx_stats_pkts_s cn38xxp2; - struct cvmx_gmxx_rxx_stats_pkts_s cn50xx; - struct cvmx_gmxx_rxx_stats_pkts_s cn52xx; - struct cvmx_gmxx_rxx_stats_pkts_s cn52xxp1; - struct cvmx_gmxx_rxx_stats_pkts_s cn56xx; - struct cvmx_gmxx_rxx_stats_pkts_s cn56xxp1; - struct cvmx_gmxx_rxx_stats_pkts_s cn58xx; - struct cvmx_gmxx_rxx_stats_pkts_s cn58xxp1; - struct cvmx_gmxx_rxx_stats_pkts_s cn61xx; - struct cvmx_gmxx_rxx_stats_pkts_s cn63xx; - struct cvmx_gmxx_rxx_stats_pkts_s cn63xxp1; - struct cvmx_gmxx_rxx_stats_pkts_s cn66xx; - struct cvmx_gmxx_rxx_stats_pkts_s cn68xx; - struct cvmx_gmxx_rxx_stats_pkts_s cn68xxp1; - struct cvmx_gmxx_rxx_stats_pkts_s cnf71xx; -}; - -union cvmx_gmxx_rxx_stats_pkts_bad { - uint64_t u64; - struct cvmx_gmxx_rxx_stats_pkts_bad_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_32_63:32; - uint64_t cnt:32; -#else - uint64_t cnt:32; - uint64_t reserved_32_63:32; -#endif - } s; - struct cvmx_gmxx_rxx_stats_pkts_bad_s cn30xx; - struct cvmx_gmxx_rxx_stats_pkts_bad_s cn31xx; - struct cvmx_gmxx_rxx_stats_pkts_bad_s cn38xx; - struct cvmx_gmxx_rxx_stats_pkts_bad_s cn38xxp2; - struct cvmx_gmxx_rxx_stats_pkts_bad_s cn50xx; - struct cvmx_gmxx_rxx_stats_pkts_bad_s cn52xx; - struct cvmx_gmxx_rxx_stats_pkts_bad_s cn52xxp1; - struct cvmx_gmxx_rxx_stats_pkts_bad_s cn56xx; - struct cvmx_gmxx_rxx_stats_pkts_bad_s cn56xxp1; - struct cvmx_gmxx_rxx_stats_pkts_bad_s cn58xx; - struct cvmx_gmxx_rxx_stats_pkts_bad_s cn58xxp1; - struct cvmx_gmxx_rxx_stats_pkts_bad_s cn61xx; - struct cvmx_gmxx_rxx_stats_pkts_bad_s cn63xx; - struct cvmx_gmxx_rxx_stats_pkts_bad_s cn63xxp1; - struct cvmx_gmxx_rxx_stats_pkts_bad_s cn66xx; - struct cvmx_gmxx_rxx_stats_pkts_bad_s cn68xx; - struct cvmx_gmxx_rxx_stats_pkts_bad_s cn68xxp1; - struct cvmx_gmxx_rxx_stats_pkts_bad_s cnf71xx; -}; - -union cvmx_gmxx_rxx_stats_pkts_ctl { - uint64_t u64; - struct cvmx_gmxx_rxx_stats_pkts_ctl_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_32_63:32; - uint64_t cnt:32; -#else - uint64_t cnt:32; - uint64_t reserved_32_63:32; -#endif - } s; - struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn30xx; - struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn31xx; - struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn38xx; - struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn38xxp2; - struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn50xx; - struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn52xx; - struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn52xxp1; - struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn56xx; - struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn56xxp1; - struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn58xx; - struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn58xxp1; - struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn61xx; - struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn63xx; - struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn63xxp1; - struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn66xx; - struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn68xx; - struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn68xxp1; - struct cvmx_gmxx_rxx_stats_pkts_ctl_s cnf71xx; -}; - -union cvmx_gmxx_rxx_stats_pkts_dmac { - uint64_t u64; - struct cvmx_gmxx_rxx_stats_pkts_dmac_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_32_63:32; - uint64_t cnt:32; -#else - uint64_t cnt:32; - uint64_t reserved_32_63:32; -#endif - } s; - struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn30xx; - struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn31xx; - struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn38xx; - struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn38xxp2; - struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn50xx; - struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn52xx; - struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn52xxp1; - struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn56xx; - struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn56xxp1; - struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn58xx; - struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn58xxp1; - struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn61xx; - struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn63xx; - struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn63xxp1; - struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn66xx; - struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn68xx; - struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn68xxp1; - struct cvmx_gmxx_rxx_stats_pkts_dmac_s cnf71xx; -}; - -union cvmx_gmxx_rxx_stats_pkts_drp { - uint64_t u64; - struct cvmx_gmxx_rxx_stats_pkts_drp_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_32_63:32; - uint64_t cnt:32; -#else - uint64_t cnt:32; - uint64_t reserved_32_63:32; -#endif - } s; - struct cvmx_gmxx_rxx_stats_pkts_drp_s cn30xx; - struct cvmx_gmxx_rxx_stats_pkts_drp_s cn31xx; - struct cvmx_gmxx_rxx_stats_pkts_drp_s cn38xx; - struct cvmx_gmxx_rxx_stats_pkts_drp_s cn38xxp2; - struct cvmx_gmxx_rxx_stats_pkts_drp_s cn50xx; - struct cvmx_gmxx_rxx_stats_pkts_drp_s cn52xx; - struct cvmx_gmxx_rxx_stats_pkts_drp_s cn52xxp1; - struct cvmx_gmxx_rxx_stats_pkts_drp_s cn56xx; - struct cvmx_gmxx_rxx_stats_pkts_drp_s cn56xxp1; - struct cvmx_gmxx_rxx_stats_pkts_drp_s cn58xx; - struct cvmx_gmxx_rxx_stats_pkts_drp_s cn58xxp1; - struct cvmx_gmxx_rxx_stats_pkts_drp_s cn61xx; - struct cvmx_gmxx_rxx_stats_pkts_drp_s cn63xx; - struct cvmx_gmxx_rxx_stats_pkts_drp_s cn63xxp1; - struct cvmx_gmxx_rxx_stats_pkts_drp_s cn66xx; - struct cvmx_gmxx_rxx_stats_pkts_drp_s cn68xx; - struct cvmx_gmxx_rxx_stats_pkts_drp_s cn68xxp1; - struct cvmx_gmxx_rxx_stats_pkts_drp_s cnf71xx; -}; - -union cvmx_gmxx_rxx_udd_skp { - uint64_t u64; - struct cvmx_gmxx_rxx_udd_skp_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_9_63:55; - uint64_t fcssel:1; - uint64_t reserved_7_7:1; - uint64_t len:7; -#else - uint64_t len:7; - uint64_t reserved_7_7:1; - uint64_t fcssel:1; - uint64_t reserved_9_63:55; -#endif - } s; - struct cvmx_gmxx_rxx_udd_skp_s cn30xx; - struct cvmx_gmxx_rxx_udd_skp_s cn31xx; - struct cvmx_gmxx_rxx_udd_skp_s cn38xx; - struct cvmx_gmxx_rxx_udd_skp_s cn38xxp2; - struct cvmx_gmxx_rxx_udd_skp_s cn50xx; - struct cvmx_gmxx_rxx_udd_skp_s cn52xx; - struct cvmx_gmxx_rxx_udd_skp_s cn52xxp1; - struct cvmx_gmxx_rxx_udd_skp_s cn56xx; - struct cvmx_gmxx_rxx_udd_skp_s cn56xxp1; - struct cvmx_gmxx_rxx_udd_skp_s cn58xx; - struct cvmx_gmxx_rxx_udd_skp_s cn58xxp1; - struct cvmx_gmxx_rxx_udd_skp_s cn61xx; - struct cvmx_gmxx_rxx_udd_skp_s cn63xx; - struct cvmx_gmxx_rxx_udd_skp_s cn63xxp1; - struct cvmx_gmxx_rxx_udd_skp_s cn66xx; - struct cvmx_gmxx_rxx_udd_skp_s cn68xx; - struct cvmx_gmxx_rxx_udd_skp_s cn68xxp1; - struct cvmx_gmxx_rxx_udd_skp_s cnf71xx; -}; - -union cvmx_gmxx_rx_bp_dropx { - uint64_t u64; - struct cvmx_gmxx_rx_bp_dropx_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_6_63:58; - uint64_t mark:6; -#else - uint64_t mark:6; - uint64_t reserved_6_63:58; -#endif - } s; - struct cvmx_gmxx_rx_bp_dropx_s cn30xx; - struct cvmx_gmxx_rx_bp_dropx_s cn31xx; - struct cvmx_gmxx_rx_bp_dropx_s cn38xx; - struct cvmx_gmxx_rx_bp_dropx_s cn38xxp2; - struct cvmx_gmxx_rx_bp_dropx_s cn50xx; - struct cvmx_gmxx_rx_bp_dropx_s cn52xx; - struct cvmx_gmxx_rx_bp_dropx_s cn52xxp1; - struct cvmx_gmxx_rx_bp_dropx_s cn56xx; - struct cvmx_gmxx_rx_bp_dropx_s cn56xxp1; - struct cvmx_gmxx_rx_bp_dropx_s cn58xx; - struct cvmx_gmxx_rx_bp_dropx_s cn58xxp1; - struct cvmx_gmxx_rx_bp_dropx_s cn61xx; - struct cvmx_gmxx_rx_bp_dropx_s cn63xx; - struct cvmx_gmxx_rx_bp_dropx_s cn63xxp1; - struct cvmx_gmxx_rx_bp_dropx_s cn66xx; - struct cvmx_gmxx_rx_bp_dropx_s cn68xx; - struct cvmx_gmxx_rx_bp_dropx_s cn68xxp1; - struct cvmx_gmxx_rx_bp_dropx_s cnf71xx; -}; - -union cvmx_gmxx_rx_bp_offx { - uint64_t u64; - struct cvmx_gmxx_rx_bp_offx_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_6_63:58; - uint64_t mark:6; -#else - uint64_t mark:6; - uint64_t reserved_6_63:58; -#endif - } s; - struct cvmx_gmxx_rx_bp_offx_s cn30xx; - struct cvmx_gmxx_rx_bp_offx_s cn31xx; - struct cvmx_gmxx_rx_bp_offx_s cn38xx; - struct cvmx_gmxx_rx_bp_offx_s cn38xxp2; - struct cvmx_gmxx_rx_bp_offx_s cn50xx; - struct cvmx_gmxx_rx_bp_offx_s cn52xx; - struct cvmx_gmxx_rx_bp_offx_s cn52xxp1; - struct cvmx_gmxx_rx_bp_offx_s cn56xx; - struct cvmx_gmxx_rx_bp_offx_s cn56xxp1; - struct cvmx_gmxx_rx_bp_offx_s cn58xx; - struct cvmx_gmxx_rx_bp_offx_s cn58xxp1; - struct cvmx_gmxx_rx_bp_offx_s cn61xx; - struct cvmx_gmxx_rx_bp_offx_s cn63xx; - struct cvmx_gmxx_rx_bp_offx_s cn63xxp1; - struct cvmx_gmxx_rx_bp_offx_s cn66xx; - struct cvmx_gmxx_rx_bp_offx_s cn68xx; - struct cvmx_gmxx_rx_bp_offx_s cn68xxp1; - struct cvmx_gmxx_rx_bp_offx_s cnf71xx; -}; - -union cvmx_gmxx_rx_bp_onx { - uint64_t u64; - struct cvmx_gmxx_rx_bp_onx_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_11_63:53; - uint64_t mark:11; -#else - uint64_t mark:11; - uint64_t reserved_11_63:53; -#endif - } s; - struct cvmx_gmxx_rx_bp_onx_cn30xx { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_9_63:55; - uint64_t mark:9; -#else - uint64_t mark:9; - uint64_t reserved_9_63:55; -#endif - } cn30xx; - struct cvmx_gmxx_rx_bp_onx_cn30xx cn31xx; - struct cvmx_gmxx_rx_bp_onx_cn30xx cn38xx; - struct cvmx_gmxx_rx_bp_onx_cn30xx cn38xxp2; - struct cvmx_gmxx_rx_bp_onx_cn30xx cn50xx; - struct cvmx_gmxx_rx_bp_onx_cn30xx cn52xx; - struct cvmx_gmxx_rx_bp_onx_cn30xx cn52xxp1; - struct cvmx_gmxx_rx_bp_onx_cn30xx cn56xx; - struct cvmx_gmxx_rx_bp_onx_cn30xx cn56xxp1; - struct cvmx_gmxx_rx_bp_onx_cn30xx cn58xx; - struct cvmx_gmxx_rx_bp_onx_cn30xx cn58xxp1; - struct cvmx_gmxx_rx_bp_onx_cn30xx cn61xx; - struct cvmx_gmxx_rx_bp_onx_cn30xx cn63xx; - struct cvmx_gmxx_rx_bp_onx_cn30xx cn63xxp1; - struct cvmx_gmxx_rx_bp_onx_cn30xx cn66xx; - struct cvmx_gmxx_rx_bp_onx_s cn68xx; - struct cvmx_gmxx_rx_bp_onx_s cn68xxp1; - struct cvmx_gmxx_rx_bp_onx_cn30xx cnf71xx; -}; - -union cvmx_gmxx_rx_hg2_status { - uint64_t u64; - struct cvmx_gmxx_rx_hg2_status_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_48_63:16; - uint64_t phtim2go:16; - uint64_t xof:16; - uint64_t lgtim2go:16; -#else - uint64_t lgtim2go:16; - uint64_t xof:16; - uint64_t phtim2go:16; - uint64_t reserved_48_63:16; -#endif - } s; - struct cvmx_gmxx_rx_hg2_status_s cn52xx; - struct cvmx_gmxx_rx_hg2_status_s cn52xxp1; - struct cvmx_gmxx_rx_hg2_status_s cn56xx; - struct cvmx_gmxx_rx_hg2_status_s cn61xx; - struct cvmx_gmxx_rx_hg2_status_s cn63xx; - struct cvmx_gmxx_rx_hg2_status_s cn63xxp1; - struct cvmx_gmxx_rx_hg2_status_s cn66xx; - struct cvmx_gmxx_rx_hg2_status_s cn68xx; - struct cvmx_gmxx_rx_hg2_status_s cn68xxp1; - struct cvmx_gmxx_rx_hg2_status_s cnf71xx; -}; - -union cvmx_gmxx_rx_pass_en { - uint64_t u64; - struct cvmx_gmxx_rx_pass_en_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_16_63:48; - uint64_t en:16; -#else - uint64_t en:16; - uint64_t reserved_16_63:48; -#endif - } s; - struct cvmx_gmxx_rx_pass_en_s cn38xx; - struct cvmx_gmxx_rx_pass_en_s cn38xxp2; - struct cvmx_gmxx_rx_pass_en_s cn58xx; - struct cvmx_gmxx_rx_pass_en_s cn58xxp1; -}; - -union cvmx_gmxx_rx_pass_mapx { - uint64_t u64; - struct cvmx_gmxx_rx_pass_mapx_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_4_63:60; - uint64_t dprt:4; -#else - uint64_t dprt:4; - uint64_t reserved_4_63:60; -#endif - } s; - struct cvmx_gmxx_rx_pass_mapx_s cn38xx; - struct cvmx_gmxx_rx_pass_mapx_s cn38xxp2; - struct cvmx_gmxx_rx_pass_mapx_s cn58xx; - struct cvmx_gmxx_rx_pass_mapx_s cn58xxp1; -}; - -union cvmx_gmxx_rx_prt_info { - uint64_t u64; - struct cvmx_gmxx_rx_prt_info_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_32_63:32; - uint64_t drop:16; - uint64_t commit:16; -#else - uint64_t commit:16; - uint64_t drop:16; - uint64_t reserved_32_63:32; -#endif - } s; - struct cvmx_gmxx_rx_prt_info_cn30xx { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_19_63:45; - uint64_t drop:3; - uint64_t reserved_3_15:13; - uint64_t commit:3; -#else - uint64_t commit:3; - uint64_t reserved_3_15:13; - uint64_t drop:3; - uint64_t reserved_19_63:45; -#endif - } cn30xx; - struct cvmx_gmxx_rx_prt_info_cn30xx cn31xx; - struct cvmx_gmxx_rx_prt_info_s cn38xx; - struct cvmx_gmxx_rx_prt_info_cn30xx cn50xx; - struct cvmx_gmxx_rx_prt_info_cn52xx { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_20_63:44; - uint64_t drop:4; - uint64_t reserved_4_15:12; - uint64_t commit:4; -#else - uint64_t commit:4; - uint64_t reserved_4_15:12; - uint64_t drop:4; - uint64_t reserved_20_63:44; -#endif - } cn52xx; - struct cvmx_gmxx_rx_prt_info_cn52xx cn52xxp1; - struct cvmx_gmxx_rx_prt_info_cn52xx cn56xx; - struct cvmx_gmxx_rx_prt_info_cn52xx cn56xxp1; - struct cvmx_gmxx_rx_prt_info_s cn58xx; - struct cvmx_gmxx_rx_prt_info_s cn58xxp1; - struct cvmx_gmxx_rx_prt_info_cn52xx cn61xx; - struct cvmx_gmxx_rx_prt_info_cn52xx cn63xx; - struct cvmx_gmxx_rx_prt_info_cn52xx cn63xxp1; - struct cvmx_gmxx_rx_prt_info_cn52xx cn66xx; - struct cvmx_gmxx_rx_prt_info_cn52xx cn68xx; - struct cvmx_gmxx_rx_prt_info_cn52xx cn68xxp1; - struct cvmx_gmxx_rx_prt_info_cnf71xx { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_18_63:46; - uint64_t drop:2; - uint64_t reserved_2_15:14; - uint64_t commit:2; -#else - uint64_t commit:2; - uint64_t reserved_2_15:14; - uint64_t drop:2; - uint64_t reserved_18_63:46; -#endif - } cnf71xx; }; union cvmx_gmxx_rx_prts { @@ -4814,74 +1608,6 @@ union cvmx_gmxx_rx_prts { uint64_t reserved_3_63:61; #endif } s; - struct cvmx_gmxx_rx_prts_s cn30xx; - struct cvmx_gmxx_rx_prts_s cn31xx; - struct cvmx_gmxx_rx_prts_s cn38xx; - struct cvmx_gmxx_rx_prts_s cn38xxp2; - struct cvmx_gmxx_rx_prts_s cn50xx; - struct cvmx_gmxx_rx_prts_s cn52xx; - struct cvmx_gmxx_rx_prts_s cn52xxp1; - struct cvmx_gmxx_rx_prts_s cn56xx; - struct cvmx_gmxx_rx_prts_s cn56xxp1; - struct cvmx_gmxx_rx_prts_s cn58xx; - struct cvmx_gmxx_rx_prts_s cn58xxp1; - struct cvmx_gmxx_rx_prts_s cn61xx; - struct cvmx_gmxx_rx_prts_s cn63xx; - struct cvmx_gmxx_rx_prts_s cn63xxp1; - struct cvmx_gmxx_rx_prts_s cn66xx; - struct cvmx_gmxx_rx_prts_s cn68xx; - struct cvmx_gmxx_rx_prts_s cn68xxp1; - struct cvmx_gmxx_rx_prts_s cnf71xx; -}; - -union cvmx_gmxx_rx_tx_status { - uint64_t u64; - struct cvmx_gmxx_rx_tx_status_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_7_63:57; - uint64_t tx:3; - uint64_t reserved_3_3:1; - uint64_t rx:3; -#else - uint64_t rx:3; - uint64_t reserved_3_3:1; - uint64_t tx:3; - uint64_t reserved_7_63:57; -#endif - } s; - struct cvmx_gmxx_rx_tx_status_s cn30xx; - struct cvmx_gmxx_rx_tx_status_s cn31xx; - struct cvmx_gmxx_rx_tx_status_s cn50xx; -}; - -union cvmx_gmxx_rx_xaui_bad_col { - uint64_t u64; - struct cvmx_gmxx_rx_xaui_bad_col_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_40_63:24; - uint64_t val:1; - uint64_t state:3; - uint64_t lane_rxc:4; - uint64_t lane_rxd:32; -#else - uint64_t lane_rxd:32; - uint64_t lane_rxc:4; - uint64_t state:3; - uint64_t val:1; - uint64_t reserved_40_63:24; -#endif - } s; - struct cvmx_gmxx_rx_xaui_bad_col_s cn52xx; - struct cvmx_gmxx_rx_xaui_bad_col_s cn52xxp1; - struct cvmx_gmxx_rx_xaui_bad_col_s cn56xx; - struct cvmx_gmxx_rx_xaui_bad_col_s cn56xxp1; - struct cvmx_gmxx_rx_xaui_bad_col_s cn61xx; - struct cvmx_gmxx_rx_xaui_bad_col_s cn63xx; - struct cvmx_gmxx_rx_xaui_bad_col_s cn63xxp1; - struct cvmx_gmxx_rx_xaui_bad_col_s cn66xx; - struct cvmx_gmxx_rx_xaui_bad_col_s cn68xx; - struct cvmx_gmxx_rx_xaui_bad_col_s cn68xxp1; - struct cvmx_gmxx_rx_xaui_bad_col_s cnf71xx; }; union cvmx_gmxx_rx_xaui_ctl { @@ -4895,913 +1621,6 @@ union cvmx_gmxx_rx_xaui_ctl { uint64_t reserved_2_63:62; #endif } s; - struct cvmx_gmxx_rx_xaui_ctl_s cn52xx; - struct cvmx_gmxx_rx_xaui_ctl_s cn52xxp1; - struct cvmx_gmxx_rx_xaui_ctl_s cn56xx; - struct cvmx_gmxx_rx_xaui_ctl_s cn56xxp1; - struct cvmx_gmxx_rx_xaui_ctl_s cn61xx; - struct cvmx_gmxx_rx_xaui_ctl_s cn63xx; - struct cvmx_gmxx_rx_xaui_ctl_s cn63xxp1; - struct cvmx_gmxx_rx_xaui_ctl_s cn66xx; - struct cvmx_gmxx_rx_xaui_ctl_s cn68xx; - struct cvmx_gmxx_rx_xaui_ctl_s cn68xxp1; - struct cvmx_gmxx_rx_xaui_ctl_s cnf71xx; -}; - -union cvmx_gmxx_rxaui_ctl { - uint64_t u64; - struct cvmx_gmxx_rxaui_ctl_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_1_63:63; - uint64_t disparity:1; -#else - uint64_t disparity:1; - uint64_t reserved_1_63:63; -#endif - } s; - struct cvmx_gmxx_rxaui_ctl_s cn68xx; - struct cvmx_gmxx_rxaui_ctl_s cn68xxp1; -}; - -union cvmx_gmxx_smacx { - uint64_t u64; - struct cvmx_gmxx_smacx_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_48_63:16; - uint64_t smac:48; -#else - uint64_t smac:48; - uint64_t reserved_48_63:16; -#endif - } s; - struct cvmx_gmxx_smacx_s cn30xx; - struct cvmx_gmxx_smacx_s cn31xx; - struct cvmx_gmxx_smacx_s cn38xx; - struct cvmx_gmxx_smacx_s cn38xxp2; - struct cvmx_gmxx_smacx_s cn50xx; - struct cvmx_gmxx_smacx_s cn52xx; - struct cvmx_gmxx_smacx_s cn52xxp1; - struct cvmx_gmxx_smacx_s cn56xx; - struct cvmx_gmxx_smacx_s cn56xxp1; - struct cvmx_gmxx_smacx_s cn58xx; - struct cvmx_gmxx_smacx_s cn58xxp1; - struct cvmx_gmxx_smacx_s cn61xx; - struct cvmx_gmxx_smacx_s cn63xx; - struct cvmx_gmxx_smacx_s cn63xxp1; - struct cvmx_gmxx_smacx_s cn66xx; - struct cvmx_gmxx_smacx_s cn68xx; - struct cvmx_gmxx_smacx_s cn68xxp1; - struct cvmx_gmxx_smacx_s cnf71xx; -}; - -union cvmx_gmxx_soft_bist { - uint64_t u64; - struct cvmx_gmxx_soft_bist_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_2_63:62; - uint64_t start_bist:1; - uint64_t clear_bist:1; -#else - uint64_t clear_bist:1; - uint64_t start_bist:1; - uint64_t reserved_2_63:62; -#endif - } s; - struct cvmx_gmxx_soft_bist_s cn63xx; - struct cvmx_gmxx_soft_bist_s cn63xxp1; - struct cvmx_gmxx_soft_bist_s cn66xx; - struct cvmx_gmxx_soft_bist_s cn68xx; - struct cvmx_gmxx_soft_bist_s cn68xxp1; -}; - -union cvmx_gmxx_stat_bp { - uint64_t u64; - struct cvmx_gmxx_stat_bp_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_17_63:47; - uint64_t bp:1; - uint64_t cnt:16; -#else - uint64_t cnt:16; - uint64_t bp:1; - uint64_t reserved_17_63:47; -#endif - } s; - struct cvmx_gmxx_stat_bp_s cn30xx; - struct cvmx_gmxx_stat_bp_s cn31xx; - struct cvmx_gmxx_stat_bp_s cn38xx; - struct cvmx_gmxx_stat_bp_s cn38xxp2; - struct cvmx_gmxx_stat_bp_s cn50xx; - struct cvmx_gmxx_stat_bp_s cn52xx; - struct cvmx_gmxx_stat_bp_s cn52xxp1; - struct cvmx_gmxx_stat_bp_s cn56xx; - struct cvmx_gmxx_stat_bp_s cn56xxp1; - struct cvmx_gmxx_stat_bp_s cn58xx; - struct cvmx_gmxx_stat_bp_s cn58xxp1; - struct cvmx_gmxx_stat_bp_s cn61xx; - struct cvmx_gmxx_stat_bp_s cn63xx; - struct cvmx_gmxx_stat_bp_s cn63xxp1; - struct cvmx_gmxx_stat_bp_s cn66xx; - struct cvmx_gmxx_stat_bp_s cn68xx; - struct cvmx_gmxx_stat_bp_s cn68xxp1; - struct cvmx_gmxx_stat_bp_s cnf71xx; -}; - -union cvmx_gmxx_tb_reg { - uint64_t u64; - struct cvmx_gmxx_tb_reg_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_1_63:63; - uint64_t wr_magic:1; -#else - uint64_t wr_magic:1; - uint64_t reserved_1_63:63; -#endif - } s; - struct cvmx_gmxx_tb_reg_s cn61xx; - struct cvmx_gmxx_tb_reg_s cn66xx; - struct cvmx_gmxx_tb_reg_s cn68xx; - struct cvmx_gmxx_tb_reg_s cnf71xx; -}; - -union cvmx_gmxx_txx_append { - uint64_t u64; - struct cvmx_gmxx_txx_append_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_4_63:60; - uint64_t force_fcs:1; - uint64_t fcs:1; - uint64_t pad:1; - uint64_t preamble:1; -#else - uint64_t preamble:1; - uint64_t pad:1; - uint64_t fcs:1; - uint64_t force_fcs:1; - uint64_t reserved_4_63:60; -#endif - } s; - struct cvmx_gmxx_txx_append_s cn30xx; - struct cvmx_gmxx_txx_append_s cn31xx; - struct cvmx_gmxx_txx_append_s cn38xx; - struct cvmx_gmxx_txx_append_s cn38xxp2; - struct cvmx_gmxx_txx_append_s cn50xx; - struct cvmx_gmxx_txx_append_s cn52xx; - struct cvmx_gmxx_txx_append_s cn52xxp1; - struct cvmx_gmxx_txx_append_s cn56xx; - struct cvmx_gmxx_txx_append_s cn56xxp1; - struct cvmx_gmxx_txx_append_s cn58xx; - struct cvmx_gmxx_txx_append_s cn58xxp1; - struct cvmx_gmxx_txx_append_s cn61xx; - struct cvmx_gmxx_txx_append_s cn63xx; - struct cvmx_gmxx_txx_append_s cn63xxp1; - struct cvmx_gmxx_txx_append_s cn66xx; - struct cvmx_gmxx_txx_append_s cn68xx; - struct cvmx_gmxx_txx_append_s cn68xxp1; - struct cvmx_gmxx_txx_append_s cnf71xx; -}; - -union cvmx_gmxx_txx_burst { - uint64_t u64; - struct cvmx_gmxx_txx_burst_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_16_63:48; - uint64_t burst:16; -#else - uint64_t burst:16; - uint64_t reserved_16_63:48; -#endif - } s; - struct cvmx_gmxx_txx_burst_s cn30xx; - struct cvmx_gmxx_txx_burst_s cn31xx; - struct cvmx_gmxx_txx_burst_s cn38xx; - struct cvmx_gmxx_txx_burst_s cn38xxp2; - struct cvmx_gmxx_txx_burst_s cn50xx; - struct cvmx_gmxx_txx_burst_s cn52xx; - struct cvmx_gmxx_txx_burst_s cn52xxp1; - struct cvmx_gmxx_txx_burst_s cn56xx; - struct cvmx_gmxx_txx_burst_s cn56xxp1; - struct cvmx_gmxx_txx_burst_s cn58xx; - struct cvmx_gmxx_txx_burst_s cn58xxp1; - struct cvmx_gmxx_txx_burst_s cn61xx; - struct cvmx_gmxx_txx_burst_s cn63xx; - struct cvmx_gmxx_txx_burst_s cn63xxp1; - struct cvmx_gmxx_txx_burst_s cn66xx; - struct cvmx_gmxx_txx_burst_s cn68xx; - struct cvmx_gmxx_txx_burst_s cn68xxp1; - struct cvmx_gmxx_txx_burst_s cnf71xx; -}; - -union cvmx_gmxx_txx_cbfc_xoff { - uint64_t u64; - struct cvmx_gmxx_txx_cbfc_xoff_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_16_63:48; - uint64_t xoff:16; -#else - uint64_t xoff:16; - uint64_t reserved_16_63:48; -#endif - } s; - struct cvmx_gmxx_txx_cbfc_xoff_s cn52xx; - struct cvmx_gmxx_txx_cbfc_xoff_s cn56xx; - struct cvmx_gmxx_txx_cbfc_xoff_s cn61xx; - struct cvmx_gmxx_txx_cbfc_xoff_s cn63xx; - struct cvmx_gmxx_txx_cbfc_xoff_s cn63xxp1; - struct cvmx_gmxx_txx_cbfc_xoff_s cn66xx; - struct cvmx_gmxx_txx_cbfc_xoff_s cn68xx; - struct cvmx_gmxx_txx_cbfc_xoff_s cn68xxp1; - struct cvmx_gmxx_txx_cbfc_xoff_s cnf71xx; -}; - -union cvmx_gmxx_txx_cbfc_xon { - uint64_t u64; - struct cvmx_gmxx_txx_cbfc_xon_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_16_63:48; - uint64_t xon:16; -#else - uint64_t xon:16; - uint64_t reserved_16_63:48; -#endif - } s; - struct cvmx_gmxx_txx_cbfc_xon_s cn52xx; - struct cvmx_gmxx_txx_cbfc_xon_s cn56xx; - struct cvmx_gmxx_txx_cbfc_xon_s cn61xx; - struct cvmx_gmxx_txx_cbfc_xon_s cn63xx; - struct cvmx_gmxx_txx_cbfc_xon_s cn63xxp1; - struct cvmx_gmxx_txx_cbfc_xon_s cn66xx; - struct cvmx_gmxx_txx_cbfc_xon_s cn68xx; - struct cvmx_gmxx_txx_cbfc_xon_s cn68xxp1; - struct cvmx_gmxx_txx_cbfc_xon_s cnf71xx; -}; - -union cvmx_gmxx_txx_clk { - uint64_t u64; - struct cvmx_gmxx_txx_clk_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_6_63:58; - uint64_t clk_cnt:6; -#else - uint64_t clk_cnt:6; - uint64_t reserved_6_63:58; -#endif - } s; - struct cvmx_gmxx_txx_clk_s cn30xx; - struct cvmx_gmxx_txx_clk_s cn31xx; - struct cvmx_gmxx_txx_clk_s cn38xx; - struct cvmx_gmxx_txx_clk_s cn38xxp2; - struct cvmx_gmxx_txx_clk_s cn50xx; - struct cvmx_gmxx_txx_clk_s cn58xx; - struct cvmx_gmxx_txx_clk_s cn58xxp1; -}; - -union cvmx_gmxx_txx_ctl { - uint64_t u64; - struct cvmx_gmxx_txx_ctl_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_2_63:62; - uint64_t xsdef_en:1; - uint64_t xscol_en:1; -#else - uint64_t xscol_en:1; - uint64_t xsdef_en:1; - uint64_t reserved_2_63:62; -#endif - } s; - struct cvmx_gmxx_txx_ctl_s cn30xx; - struct cvmx_gmxx_txx_ctl_s cn31xx; - struct cvmx_gmxx_txx_ctl_s cn38xx; - struct cvmx_gmxx_txx_ctl_s cn38xxp2; - struct cvmx_gmxx_txx_ctl_s cn50xx; - struct cvmx_gmxx_txx_ctl_s cn52xx; - struct cvmx_gmxx_txx_ctl_s cn52xxp1; - struct cvmx_gmxx_txx_ctl_s cn56xx; - struct cvmx_gmxx_txx_ctl_s cn56xxp1; - struct cvmx_gmxx_txx_ctl_s cn58xx; - struct cvmx_gmxx_txx_ctl_s cn58xxp1; - struct cvmx_gmxx_txx_ctl_s cn61xx; - struct cvmx_gmxx_txx_ctl_s cn63xx; - struct cvmx_gmxx_txx_ctl_s cn63xxp1; - struct cvmx_gmxx_txx_ctl_s cn66xx; - struct cvmx_gmxx_txx_ctl_s cn68xx; - struct cvmx_gmxx_txx_ctl_s cn68xxp1; - struct cvmx_gmxx_txx_ctl_s cnf71xx; -}; - -union cvmx_gmxx_txx_min_pkt { - uint64_t u64; - struct cvmx_gmxx_txx_min_pkt_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_8_63:56; - uint64_t min_size:8; -#else - uint64_t min_size:8; - uint64_t reserved_8_63:56; -#endif - } s; - struct cvmx_gmxx_txx_min_pkt_s cn30xx; - struct cvmx_gmxx_txx_min_pkt_s cn31xx; - struct cvmx_gmxx_txx_min_pkt_s cn38xx; - struct cvmx_gmxx_txx_min_pkt_s cn38xxp2; - struct cvmx_gmxx_txx_min_pkt_s cn50xx; - struct cvmx_gmxx_txx_min_pkt_s cn52xx; - struct cvmx_gmxx_txx_min_pkt_s cn52xxp1; - struct cvmx_gmxx_txx_min_pkt_s cn56xx; - struct cvmx_gmxx_txx_min_pkt_s cn56xxp1; - struct cvmx_gmxx_txx_min_pkt_s cn58xx; - struct cvmx_gmxx_txx_min_pkt_s cn58xxp1; - struct cvmx_gmxx_txx_min_pkt_s cn61xx; - struct cvmx_gmxx_txx_min_pkt_s cn63xx; - struct cvmx_gmxx_txx_min_pkt_s cn63xxp1; - struct cvmx_gmxx_txx_min_pkt_s cn66xx; - struct cvmx_gmxx_txx_min_pkt_s cn68xx; - struct cvmx_gmxx_txx_min_pkt_s cn68xxp1; - struct cvmx_gmxx_txx_min_pkt_s cnf71xx; -}; - -union cvmx_gmxx_txx_pause_pkt_interval { - uint64_t u64; - struct cvmx_gmxx_txx_pause_pkt_interval_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_16_63:48; - uint64_t interval:16; -#else - uint64_t interval:16; - uint64_t reserved_16_63:48; -#endif - } s; - struct cvmx_gmxx_txx_pause_pkt_interval_s cn30xx; - struct cvmx_gmxx_txx_pause_pkt_interval_s cn31xx; - struct cvmx_gmxx_txx_pause_pkt_interval_s cn38xx; - struct cvmx_gmxx_txx_pause_pkt_interval_s cn38xxp2; - struct cvmx_gmxx_txx_pause_pkt_interval_s cn50xx; - struct cvmx_gmxx_txx_pause_pkt_interval_s cn52xx; - struct cvmx_gmxx_txx_pause_pkt_interval_s cn52xxp1; - struct cvmx_gmxx_txx_pause_pkt_interval_s cn56xx; - struct cvmx_gmxx_txx_pause_pkt_interval_s cn56xxp1; - struct cvmx_gmxx_txx_pause_pkt_interval_s cn58xx; - struct cvmx_gmxx_txx_pause_pkt_interval_s cn58xxp1; - struct cvmx_gmxx_txx_pause_pkt_interval_s cn61xx; - struct cvmx_gmxx_txx_pause_pkt_interval_s cn63xx; - struct cvmx_gmxx_txx_pause_pkt_interval_s cn63xxp1; - struct cvmx_gmxx_txx_pause_pkt_interval_s cn66xx; - struct cvmx_gmxx_txx_pause_pkt_interval_s cn68xx; - struct cvmx_gmxx_txx_pause_pkt_interval_s cn68xxp1; - struct cvmx_gmxx_txx_pause_pkt_interval_s cnf71xx; -}; - -union cvmx_gmxx_txx_pause_pkt_time { - uint64_t u64; - struct cvmx_gmxx_txx_pause_pkt_time_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_16_63:48; - uint64_t time:16; -#else - uint64_t time:16; - uint64_t reserved_16_63:48; -#endif - } s; - struct cvmx_gmxx_txx_pause_pkt_time_s cn30xx; - struct cvmx_gmxx_txx_pause_pkt_time_s cn31xx; - struct cvmx_gmxx_txx_pause_pkt_time_s cn38xx; - struct cvmx_gmxx_txx_pause_pkt_time_s cn38xxp2; - struct cvmx_gmxx_txx_pause_pkt_time_s cn50xx; - struct cvmx_gmxx_txx_pause_pkt_time_s cn52xx; - struct cvmx_gmxx_txx_pause_pkt_time_s cn52xxp1; - struct cvmx_gmxx_txx_pause_pkt_time_s cn56xx; - struct cvmx_gmxx_txx_pause_pkt_time_s cn56xxp1; - struct cvmx_gmxx_txx_pause_pkt_time_s cn58xx; - struct cvmx_gmxx_txx_pause_pkt_time_s cn58xxp1; - struct cvmx_gmxx_txx_pause_pkt_time_s cn61xx; - struct cvmx_gmxx_txx_pause_pkt_time_s cn63xx; - struct cvmx_gmxx_txx_pause_pkt_time_s cn63xxp1; - struct cvmx_gmxx_txx_pause_pkt_time_s cn66xx; - struct cvmx_gmxx_txx_pause_pkt_time_s cn68xx; - struct cvmx_gmxx_txx_pause_pkt_time_s cn68xxp1; - struct cvmx_gmxx_txx_pause_pkt_time_s cnf71xx; -}; - -union cvmx_gmxx_txx_pause_togo { - uint64_t u64; - struct cvmx_gmxx_txx_pause_togo_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_32_63:32; - uint64_t msg_time:16; - uint64_t time:16; -#else - uint64_t time:16; - uint64_t msg_time:16; - uint64_t reserved_32_63:32; -#endif - } s; - struct cvmx_gmxx_txx_pause_togo_cn30xx { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_16_63:48; - uint64_t time:16; -#else - uint64_t time:16; - uint64_t reserved_16_63:48; -#endif - } cn30xx; - struct cvmx_gmxx_txx_pause_togo_cn30xx cn31xx; - struct cvmx_gmxx_txx_pause_togo_cn30xx cn38xx; - struct cvmx_gmxx_txx_pause_togo_cn30xx cn38xxp2; - struct cvmx_gmxx_txx_pause_togo_cn30xx cn50xx; - struct cvmx_gmxx_txx_pause_togo_s cn52xx; - struct cvmx_gmxx_txx_pause_togo_s cn52xxp1; - struct cvmx_gmxx_txx_pause_togo_s cn56xx; - struct cvmx_gmxx_txx_pause_togo_cn30xx cn56xxp1; - struct cvmx_gmxx_txx_pause_togo_cn30xx cn58xx; - struct cvmx_gmxx_txx_pause_togo_cn30xx cn58xxp1; - struct cvmx_gmxx_txx_pause_togo_s cn61xx; - struct cvmx_gmxx_txx_pause_togo_s cn63xx; - struct cvmx_gmxx_txx_pause_togo_s cn63xxp1; - struct cvmx_gmxx_txx_pause_togo_s cn66xx; - struct cvmx_gmxx_txx_pause_togo_s cn68xx; - struct cvmx_gmxx_txx_pause_togo_s cn68xxp1; - struct cvmx_gmxx_txx_pause_togo_s cnf71xx; -}; - -union cvmx_gmxx_txx_pause_zero { - uint64_t u64; - struct cvmx_gmxx_txx_pause_zero_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_1_63:63; - uint64_t send:1; -#else - uint64_t send:1; - uint64_t reserved_1_63:63; -#endif - } s; - struct cvmx_gmxx_txx_pause_zero_s cn30xx; - struct cvmx_gmxx_txx_pause_zero_s cn31xx; - struct cvmx_gmxx_txx_pause_zero_s cn38xx; - struct cvmx_gmxx_txx_pause_zero_s cn38xxp2; - struct cvmx_gmxx_txx_pause_zero_s cn50xx; - struct cvmx_gmxx_txx_pause_zero_s cn52xx; - struct cvmx_gmxx_txx_pause_zero_s cn52xxp1; - struct cvmx_gmxx_txx_pause_zero_s cn56xx; - struct cvmx_gmxx_txx_pause_zero_s cn56xxp1; - struct cvmx_gmxx_txx_pause_zero_s cn58xx; - struct cvmx_gmxx_txx_pause_zero_s cn58xxp1; - struct cvmx_gmxx_txx_pause_zero_s cn61xx; - struct cvmx_gmxx_txx_pause_zero_s cn63xx; - struct cvmx_gmxx_txx_pause_zero_s cn63xxp1; - struct cvmx_gmxx_txx_pause_zero_s cn66xx; - struct cvmx_gmxx_txx_pause_zero_s cn68xx; - struct cvmx_gmxx_txx_pause_zero_s cn68xxp1; - struct cvmx_gmxx_txx_pause_zero_s cnf71xx; -}; - -union cvmx_gmxx_txx_pipe { - uint64_t u64; - struct cvmx_gmxx_txx_pipe_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_33_63:31; - uint64_t ign_bp:1; - uint64_t reserved_21_31:11; - uint64_t nump:5; - uint64_t reserved_7_15:9; - uint64_t base:7; -#else - uint64_t base:7; - uint64_t reserved_7_15:9; - uint64_t nump:5; - uint64_t reserved_21_31:11; - uint64_t ign_bp:1; - uint64_t reserved_33_63:31; -#endif - } s; - struct cvmx_gmxx_txx_pipe_s cn68xx; - struct cvmx_gmxx_txx_pipe_s cn68xxp1; -}; - -union cvmx_gmxx_txx_sgmii_ctl { - uint64_t u64; - struct cvmx_gmxx_txx_sgmii_ctl_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_1_63:63; - uint64_t align:1; -#else - uint64_t align:1; - uint64_t reserved_1_63:63; -#endif - } s; - struct cvmx_gmxx_txx_sgmii_ctl_s cn52xx; - struct cvmx_gmxx_txx_sgmii_ctl_s cn52xxp1; - struct cvmx_gmxx_txx_sgmii_ctl_s cn56xx; - struct cvmx_gmxx_txx_sgmii_ctl_s cn56xxp1; - struct cvmx_gmxx_txx_sgmii_ctl_s cn61xx; - struct cvmx_gmxx_txx_sgmii_ctl_s cn63xx; - struct cvmx_gmxx_txx_sgmii_ctl_s cn63xxp1; - struct cvmx_gmxx_txx_sgmii_ctl_s cn66xx; - struct cvmx_gmxx_txx_sgmii_ctl_s cn68xx; - struct cvmx_gmxx_txx_sgmii_ctl_s cn68xxp1; - struct cvmx_gmxx_txx_sgmii_ctl_s cnf71xx; -}; - -union cvmx_gmxx_txx_slot { - uint64_t u64; - struct cvmx_gmxx_txx_slot_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_10_63:54; - uint64_t slot:10; -#else - uint64_t slot:10; - uint64_t reserved_10_63:54; -#endif - } s; - struct cvmx_gmxx_txx_slot_s cn30xx; - struct cvmx_gmxx_txx_slot_s cn31xx; - struct cvmx_gmxx_txx_slot_s cn38xx; - struct cvmx_gmxx_txx_slot_s cn38xxp2; - struct cvmx_gmxx_txx_slot_s cn50xx; - struct cvmx_gmxx_txx_slot_s cn52xx; - struct cvmx_gmxx_txx_slot_s cn52xxp1; - struct cvmx_gmxx_txx_slot_s cn56xx; - struct cvmx_gmxx_txx_slot_s cn56xxp1; - struct cvmx_gmxx_txx_slot_s cn58xx; - struct cvmx_gmxx_txx_slot_s cn58xxp1; - struct cvmx_gmxx_txx_slot_s cn61xx; - struct cvmx_gmxx_txx_slot_s cn63xx; - struct cvmx_gmxx_txx_slot_s cn63xxp1; - struct cvmx_gmxx_txx_slot_s cn66xx; - struct cvmx_gmxx_txx_slot_s cn68xx; - struct cvmx_gmxx_txx_slot_s cn68xxp1; - struct cvmx_gmxx_txx_slot_s cnf71xx; -}; - -union cvmx_gmxx_txx_soft_pause { - uint64_t u64; - struct cvmx_gmxx_txx_soft_pause_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_16_63:48; - uint64_t time:16; -#else - uint64_t time:16; - uint64_t reserved_16_63:48; -#endif - } s; - struct cvmx_gmxx_txx_soft_pause_s cn30xx; - struct cvmx_gmxx_txx_soft_pause_s cn31xx; - struct cvmx_gmxx_txx_soft_pause_s cn38xx; - struct cvmx_gmxx_txx_soft_pause_s cn38xxp2; - struct cvmx_gmxx_txx_soft_pause_s cn50xx; - struct cvmx_gmxx_txx_soft_pause_s cn52xx; - struct cvmx_gmxx_txx_soft_pause_s cn52xxp1; - struct cvmx_gmxx_txx_soft_pause_s cn56xx; - struct cvmx_gmxx_txx_soft_pause_s cn56xxp1; - struct cvmx_gmxx_txx_soft_pause_s cn58xx; - struct cvmx_gmxx_txx_soft_pause_s cn58xxp1; - struct cvmx_gmxx_txx_soft_pause_s cn61xx; - struct cvmx_gmxx_txx_soft_pause_s cn63xx; - struct cvmx_gmxx_txx_soft_pause_s cn63xxp1; - struct cvmx_gmxx_txx_soft_pause_s cn66xx; - struct cvmx_gmxx_txx_soft_pause_s cn68xx; - struct cvmx_gmxx_txx_soft_pause_s cn68xxp1; - struct cvmx_gmxx_txx_soft_pause_s cnf71xx; -}; - -union cvmx_gmxx_txx_stat0 { - uint64_t u64; - struct cvmx_gmxx_txx_stat0_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t xsdef:32; - uint64_t xscol:32; -#else - uint64_t xscol:32; - uint64_t xsdef:32; -#endif - } s; - struct cvmx_gmxx_txx_stat0_s cn30xx; - struct cvmx_gmxx_txx_stat0_s cn31xx; - struct cvmx_gmxx_txx_stat0_s cn38xx; - struct cvmx_gmxx_txx_stat0_s cn38xxp2; - struct cvmx_gmxx_txx_stat0_s cn50xx; - struct cvmx_gmxx_txx_stat0_s cn52xx; - struct cvmx_gmxx_txx_stat0_s cn52xxp1; - struct cvmx_gmxx_txx_stat0_s cn56xx; - struct cvmx_gmxx_txx_stat0_s cn56xxp1; - struct cvmx_gmxx_txx_stat0_s cn58xx; - struct cvmx_gmxx_txx_stat0_s cn58xxp1; - struct cvmx_gmxx_txx_stat0_s cn61xx; - struct cvmx_gmxx_txx_stat0_s cn63xx; - struct cvmx_gmxx_txx_stat0_s cn63xxp1; - struct cvmx_gmxx_txx_stat0_s cn66xx; - struct cvmx_gmxx_txx_stat0_s cn68xx; - struct cvmx_gmxx_txx_stat0_s cn68xxp1; - struct cvmx_gmxx_txx_stat0_s cnf71xx; -}; - -union cvmx_gmxx_txx_stat1 { - uint64_t u64; - struct cvmx_gmxx_txx_stat1_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t scol:32; - uint64_t mcol:32; -#else - uint64_t mcol:32; - uint64_t scol:32; -#endif - } s; - struct cvmx_gmxx_txx_stat1_s cn30xx; - struct cvmx_gmxx_txx_stat1_s cn31xx; - struct cvmx_gmxx_txx_stat1_s cn38xx; - struct cvmx_gmxx_txx_stat1_s cn38xxp2; - struct cvmx_gmxx_txx_stat1_s cn50xx; - struct cvmx_gmxx_txx_stat1_s cn52xx; - struct cvmx_gmxx_txx_stat1_s cn52xxp1; - struct cvmx_gmxx_txx_stat1_s cn56xx; - struct cvmx_gmxx_txx_stat1_s cn56xxp1; - struct cvmx_gmxx_txx_stat1_s cn58xx; - struct cvmx_gmxx_txx_stat1_s cn58xxp1; - struct cvmx_gmxx_txx_stat1_s cn61xx; - struct cvmx_gmxx_txx_stat1_s cn63xx; - struct cvmx_gmxx_txx_stat1_s cn63xxp1; - struct cvmx_gmxx_txx_stat1_s cn66xx; - struct cvmx_gmxx_txx_stat1_s cn68xx; - struct cvmx_gmxx_txx_stat1_s cn68xxp1; - struct cvmx_gmxx_txx_stat1_s cnf71xx; -}; - -union cvmx_gmxx_txx_stat2 { - uint64_t u64; - struct cvmx_gmxx_txx_stat2_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_48_63:16; - uint64_t octs:48; -#else - uint64_t octs:48; - uint64_t reserved_48_63:16; -#endif - } s; - struct cvmx_gmxx_txx_stat2_s cn30xx; - struct cvmx_gmxx_txx_stat2_s cn31xx; - struct cvmx_gmxx_txx_stat2_s cn38xx; - struct cvmx_gmxx_txx_stat2_s cn38xxp2; - struct cvmx_gmxx_txx_stat2_s cn50xx; - struct cvmx_gmxx_txx_stat2_s cn52xx; - struct cvmx_gmxx_txx_stat2_s cn52xxp1; - struct cvmx_gmxx_txx_stat2_s cn56xx; - struct cvmx_gmxx_txx_stat2_s cn56xxp1; - struct cvmx_gmxx_txx_stat2_s cn58xx; - struct cvmx_gmxx_txx_stat2_s cn58xxp1; - struct cvmx_gmxx_txx_stat2_s cn61xx; - struct cvmx_gmxx_txx_stat2_s cn63xx; - struct cvmx_gmxx_txx_stat2_s cn63xxp1; - struct cvmx_gmxx_txx_stat2_s cn66xx; - struct cvmx_gmxx_txx_stat2_s cn68xx; - struct cvmx_gmxx_txx_stat2_s cn68xxp1; - struct cvmx_gmxx_txx_stat2_s cnf71xx; -}; - -union cvmx_gmxx_txx_stat3 { - uint64_t u64; - struct cvmx_gmxx_txx_stat3_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_32_63:32; - uint64_t pkts:32; -#else - uint64_t pkts:32; - uint64_t reserved_32_63:32; -#endif - } s; - struct cvmx_gmxx_txx_stat3_s cn30xx; - struct cvmx_gmxx_txx_stat3_s cn31xx; - struct cvmx_gmxx_txx_stat3_s cn38xx; - struct cvmx_gmxx_txx_stat3_s cn38xxp2; - struct cvmx_gmxx_txx_stat3_s cn50xx; - struct cvmx_gmxx_txx_stat3_s cn52xx; - struct cvmx_gmxx_txx_stat3_s cn52xxp1; - struct cvmx_gmxx_txx_stat3_s cn56xx; - struct cvmx_gmxx_txx_stat3_s cn56xxp1; - struct cvmx_gmxx_txx_stat3_s cn58xx; - struct cvmx_gmxx_txx_stat3_s cn58xxp1; - struct cvmx_gmxx_txx_stat3_s cn61xx; - struct cvmx_gmxx_txx_stat3_s cn63xx; - struct cvmx_gmxx_txx_stat3_s cn63xxp1; - struct cvmx_gmxx_txx_stat3_s cn66xx; - struct cvmx_gmxx_txx_stat3_s cn68xx; - struct cvmx_gmxx_txx_stat3_s cn68xxp1; - struct cvmx_gmxx_txx_stat3_s cnf71xx; -}; - -union cvmx_gmxx_txx_stat4 { - uint64_t u64; - struct cvmx_gmxx_txx_stat4_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t hist1:32; - uint64_t hist0:32; -#else - uint64_t hist0:32; - uint64_t hist1:32; -#endif - } s; - struct cvmx_gmxx_txx_stat4_s cn30xx; - struct cvmx_gmxx_txx_stat4_s cn31xx; - struct cvmx_gmxx_txx_stat4_s cn38xx; - struct cvmx_gmxx_txx_stat4_s cn38xxp2; - struct cvmx_gmxx_txx_stat4_s cn50xx; - struct cvmx_gmxx_txx_stat4_s cn52xx; - struct cvmx_gmxx_txx_stat4_s cn52xxp1; - struct cvmx_gmxx_txx_stat4_s cn56xx; - struct cvmx_gmxx_txx_stat4_s cn56xxp1; - struct cvmx_gmxx_txx_stat4_s cn58xx; - struct cvmx_gmxx_txx_stat4_s cn58xxp1; - struct cvmx_gmxx_txx_stat4_s cn61xx; - struct cvmx_gmxx_txx_stat4_s cn63xx; - struct cvmx_gmxx_txx_stat4_s cn63xxp1; - struct cvmx_gmxx_txx_stat4_s cn66xx; - struct cvmx_gmxx_txx_stat4_s cn68xx; - struct cvmx_gmxx_txx_stat4_s cn68xxp1; - struct cvmx_gmxx_txx_stat4_s cnf71xx; -}; - -union cvmx_gmxx_txx_stat5 { - uint64_t u64; - struct cvmx_gmxx_txx_stat5_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t hist3:32; - uint64_t hist2:32; -#else - uint64_t hist2:32; - uint64_t hist3:32; -#endif - } s; - struct cvmx_gmxx_txx_stat5_s cn30xx; - struct cvmx_gmxx_txx_stat5_s cn31xx; - struct cvmx_gmxx_txx_stat5_s cn38xx; - struct cvmx_gmxx_txx_stat5_s cn38xxp2; - struct cvmx_gmxx_txx_stat5_s cn50xx; - struct cvmx_gmxx_txx_stat5_s cn52xx; - struct cvmx_gmxx_txx_stat5_s cn52xxp1; - struct cvmx_gmxx_txx_stat5_s cn56xx; - struct cvmx_gmxx_txx_stat5_s cn56xxp1; - struct cvmx_gmxx_txx_stat5_s cn58xx; - struct cvmx_gmxx_txx_stat5_s cn58xxp1; - struct cvmx_gmxx_txx_stat5_s cn61xx; - struct cvmx_gmxx_txx_stat5_s cn63xx; - struct cvmx_gmxx_txx_stat5_s cn63xxp1; - struct cvmx_gmxx_txx_stat5_s cn66xx; - struct cvmx_gmxx_txx_stat5_s cn68xx; - struct cvmx_gmxx_txx_stat5_s cn68xxp1; - struct cvmx_gmxx_txx_stat5_s cnf71xx; -}; - -union cvmx_gmxx_txx_stat6 { - uint64_t u64; - struct cvmx_gmxx_txx_stat6_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t hist5:32; - uint64_t hist4:32; -#else - uint64_t hist4:32; - uint64_t hist5:32; -#endif - } s; - struct cvmx_gmxx_txx_stat6_s cn30xx; - struct cvmx_gmxx_txx_stat6_s cn31xx; - struct cvmx_gmxx_txx_stat6_s cn38xx; - struct cvmx_gmxx_txx_stat6_s cn38xxp2; - struct cvmx_gmxx_txx_stat6_s cn50xx; - struct cvmx_gmxx_txx_stat6_s cn52xx; - struct cvmx_gmxx_txx_stat6_s cn52xxp1; - struct cvmx_gmxx_txx_stat6_s cn56xx; - struct cvmx_gmxx_txx_stat6_s cn56xxp1; - struct cvmx_gmxx_txx_stat6_s cn58xx; - struct cvmx_gmxx_txx_stat6_s cn58xxp1; - struct cvmx_gmxx_txx_stat6_s cn61xx; - struct cvmx_gmxx_txx_stat6_s cn63xx; - struct cvmx_gmxx_txx_stat6_s cn63xxp1; - struct cvmx_gmxx_txx_stat6_s cn66xx; - struct cvmx_gmxx_txx_stat6_s cn68xx; - struct cvmx_gmxx_txx_stat6_s cn68xxp1; - struct cvmx_gmxx_txx_stat6_s cnf71xx; -}; - -union cvmx_gmxx_txx_stat7 { - uint64_t u64; - struct cvmx_gmxx_txx_stat7_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t hist7:32; - uint64_t hist6:32; -#else - uint64_t hist6:32; - uint64_t hist7:32; -#endif - } s; - struct cvmx_gmxx_txx_stat7_s cn30xx; - struct cvmx_gmxx_txx_stat7_s cn31xx; - struct cvmx_gmxx_txx_stat7_s cn38xx; - struct cvmx_gmxx_txx_stat7_s cn38xxp2; - struct cvmx_gmxx_txx_stat7_s cn50xx; - struct cvmx_gmxx_txx_stat7_s cn52xx; - struct cvmx_gmxx_txx_stat7_s cn52xxp1; - struct cvmx_gmxx_txx_stat7_s cn56xx; - struct cvmx_gmxx_txx_stat7_s cn56xxp1; - struct cvmx_gmxx_txx_stat7_s cn58xx; - struct cvmx_gmxx_txx_stat7_s cn58xxp1; - struct cvmx_gmxx_txx_stat7_s cn61xx; - struct cvmx_gmxx_txx_stat7_s cn63xx; - struct cvmx_gmxx_txx_stat7_s cn63xxp1; - struct cvmx_gmxx_txx_stat7_s cn66xx; - struct cvmx_gmxx_txx_stat7_s cn68xx; - struct cvmx_gmxx_txx_stat7_s cn68xxp1; - struct cvmx_gmxx_txx_stat7_s cnf71xx; -}; - -union cvmx_gmxx_txx_stat8 { - uint64_t u64; - struct cvmx_gmxx_txx_stat8_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t mcst:32; - uint64_t bcst:32; -#else - uint64_t bcst:32; - uint64_t mcst:32; -#endif - } s; - struct cvmx_gmxx_txx_stat8_s cn30xx; - struct cvmx_gmxx_txx_stat8_s cn31xx; - struct cvmx_gmxx_txx_stat8_s cn38xx; - struct cvmx_gmxx_txx_stat8_s cn38xxp2; - struct cvmx_gmxx_txx_stat8_s cn50xx; - struct cvmx_gmxx_txx_stat8_s cn52xx; - struct cvmx_gmxx_txx_stat8_s cn52xxp1; - struct cvmx_gmxx_txx_stat8_s cn56xx; - struct cvmx_gmxx_txx_stat8_s cn56xxp1; - struct cvmx_gmxx_txx_stat8_s cn58xx; - struct cvmx_gmxx_txx_stat8_s cn58xxp1; - struct cvmx_gmxx_txx_stat8_s cn61xx; - struct cvmx_gmxx_txx_stat8_s cn63xx; - struct cvmx_gmxx_txx_stat8_s cn63xxp1; - struct cvmx_gmxx_txx_stat8_s cn66xx; - struct cvmx_gmxx_txx_stat8_s cn68xx; - struct cvmx_gmxx_txx_stat8_s cn68xxp1; - struct cvmx_gmxx_txx_stat8_s cnf71xx; -}; - -union cvmx_gmxx_txx_stat9 { - uint64_t u64; - struct cvmx_gmxx_txx_stat9_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t undflw:32; - uint64_t ctl:32; -#else - uint64_t ctl:32; - uint64_t undflw:32; -#endif - } s; - struct cvmx_gmxx_txx_stat9_s cn30xx; - struct cvmx_gmxx_txx_stat9_s cn31xx; - struct cvmx_gmxx_txx_stat9_s cn38xx; - struct cvmx_gmxx_txx_stat9_s cn38xxp2; - struct cvmx_gmxx_txx_stat9_s cn50xx; - struct cvmx_gmxx_txx_stat9_s cn52xx; - struct cvmx_gmxx_txx_stat9_s cn52xxp1; - struct cvmx_gmxx_txx_stat9_s cn56xx; - struct cvmx_gmxx_txx_stat9_s cn56xxp1; - struct cvmx_gmxx_txx_stat9_s cn58xx; - struct cvmx_gmxx_txx_stat9_s cn58xxp1; - struct cvmx_gmxx_txx_stat9_s cn61xx; - struct cvmx_gmxx_txx_stat9_s cn63xx; - struct cvmx_gmxx_txx_stat9_s cn63xxp1; - struct cvmx_gmxx_txx_stat9_s cn66xx; - struct cvmx_gmxx_txx_stat9_s cn68xx; - struct cvmx_gmxx_txx_stat9_s cn68xxp1; - struct cvmx_gmxx_txx_stat9_s cnf71xx; -}; - -union cvmx_gmxx_txx_stats_ctl { - uint64_t u64; - struct cvmx_gmxx_txx_stats_ctl_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_1_63:63; - uint64_t rd_clr:1; -#else - uint64_t rd_clr:1; - uint64_t reserved_1_63:63; -#endif - } s; - struct cvmx_gmxx_txx_stats_ctl_s cn30xx; - struct cvmx_gmxx_txx_stats_ctl_s cn31xx; - struct cvmx_gmxx_txx_stats_ctl_s cn38xx; - struct cvmx_gmxx_txx_stats_ctl_s cn38xxp2; - struct cvmx_gmxx_txx_stats_ctl_s cn50xx; - struct cvmx_gmxx_txx_stats_ctl_s cn52xx; - struct cvmx_gmxx_txx_stats_ctl_s cn52xxp1; - struct cvmx_gmxx_txx_stats_ctl_s cn56xx; - struct cvmx_gmxx_txx_stats_ctl_s cn56xxp1; - struct cvmx_gmxx_txx_stats_ctl_s cn58xx; - struct cvmx_gmxx_txx_stats_ctl_s cn58xxp1; - struct cvmx_gmxx_txx_stats_ctl_s cn61xx; - struct cvmx_gmxx_txx_stats_ctl_s cn63xx; - struct cvmx_gmxx_txx_stats_ctl_s cn63xxp1; - struct cvmx_gmxx_txx_stats_ctl_s cn66xx; - struct cvmx_gmxx_txx_stats_ctl_s cn68xx; - struct cvmx_gmxx_txx_stats_ctl_s cn68xxp1; - struct cvmx_gmxx_txx_stats_ctl_s cnf71xx; }; union cvmx_gmxx_txx_thresh { @@ -5824,7 +1643,6 @@ union cvmx_gmxx_txx_thresh { uint64_t reserved_7_63:57; #endif } cn30xx; - struct cvmx_gmxx_txx_thresh_cn30xx cn31xx; struct cvmx_gmxx_txx_thresh_cn38xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_9_63:55; @@ -5834,240 +1652,6 @@ union cvmx_gmxx_txx_thresh { uint64_t reserved_9_63:55; #endif } cn38xx; - struct cvmx_gmxx_txx_thresh_cn38xx cn38xxp2; - struct cvmx_gmxx_txx_thresh_cn30xx cn50xx; - struct cvmx_gmxx_txx_thresh_cn38xx cn52xx; - struct cvmx_gmxx_txx_thresh_cn38xx cn52xxp1; - struct cvmx_gmxx_txx_thresh_cn38xx cn56xx; - struct cvmx_gmxx_txx_thresh_cn38xx cn56xxp1; - struct cvmx_gmxx_txx_thresh_cn38xx cn58xx; - struct cvmx_gmxx_txx_thresh_cn38xx cn58xxp1; - struct cvmx_gmxx_txx_thresh_cn38xx cn61xx; - struct cvmx_gmxx_txx_thresh_cn38xx cn63xx; - struct cvmx_gmxx_txx_thresh_cn38xx cn63xxp1; - struct cvmx_gmxx_txx_thresh_cn38xx cn66xx; - struct cvmx_gmxx_txx_thresh_s cn68xx; - struct cvmx_gmxx_txx_thresh_s cn68xxp1; - struct cvmx_gmxx_txx_thresh_cn38xx cnf71xx; -}; - -union cvmx_gmxx_tx_bp { - uint64_t u64; - struct cvmx_gmxx_tx_bp_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_4_63:60; - uint64_t bp:4; -#else - uint64_t bp:4; - uint64_t reserved_4_63:60; -#endif - } s; - struct cvmx_gmxx_tx_bp_cn30xx { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_3_63:61; - uint64_t bp:3; -#else - uint64_t bp:3; - uint64_t reserved_3_63:61; -#endif - } cn30xx; - struct cvmx_gmxx_tx_bp_cn30xx cn31xx; - struct cvmx_gmxx_tx_bp_s cn38xx; - struct cvmx_gmxx_tx_bp_s cn38xxp2; - struct cvmx_gmxx_tx_bp_cn30xx cn50xx; - struct cvmx_gmxx_tx_bp_s cn52xx; - struct cvmx_gmxx_tx_bp_s cn52xxp1; - struct cvmx_gmxx_tx_bp_s cn56xx; - struct cvmx_gmxx_tx_bp_s cn56xxp1; - struct cvmx_gmxx_tx_bp_s cn58xx; - struct cvmx_gmxx_tx_bp_s cn58xxp1; - struct cvmx_gmxx_tx_bp_s cn61xx; - struct cvmx_gmxx_tx_bp_s cn63xx; - struct cvmx_gmxx_tx_bp_s cn63xxp1; - struct cvmx_gmxx_tx_bp_s cn66xx; - struct cvmx_gmxx_tx_bp_s cn68xx; - struct cvmx_gmxx_tx_bp_s cn68xxp1; - struct cvmx_gmxx_tx_bp_cnf71xx { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_2_63:62; - uint64_t bp:2; -#else - uint64_t bp:2; - uint64_t reserved_2_63:62; -#endif - } cnf71xx; -}; - -union cvmx_gmxx_tx_clk_mskx { - uint64_t u64; - struct cvmx_gmxx_tx_clk_mskx_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_1_63:63; - uint64_t msk:1; -#else - uint64_t msk:1; - uint64_t reserved_1_63:63; -#endif - } s; - struct cvmx_gmxx_tx_clk_mskx_s cn30xx; - struct cvmx_gmxx_tx_clk_mskx_s cn50xx; -}; - -union cvmx_gmxx_tx_col_attempt { - uint64_t u64; - struct cvmx_gmxx_tx_col_attempt_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_5_63:59; - uint64_t limit:5; -#else - uint64_t limit:5; - uint64_t reserved_5_63:59; -#endif - } s; - struct cvmx_gmxx_tx_col_attempt_s cn30xx; - struct cvmx_gmxx_tx_col_attempt_s cn31xx; - struct cvmx_gmxx_tx_col_attempt_s cn38xx; - struct cvmx_gmxx_tx_col_attempt_s cn38xxp2; - struct cvmx_gmxx_tx_col_attempt_s cn50xx; - struct cvmx_gmxx_tx_col_attempt_s cn52xx; - struct cvmx_gmxx_tx_col_attempt_s cn52xxp1; - struct cvmx_gmxx_tx_col_attempt_s cn56xx; - struct cvmx_gmxx_tx_col_attempt_s cn56xxp1; - struct cvmx_gmxx_tx_col_attempt_s cn58xx; - struct cvmx_gmxx_tx_col_attempt_s cn58xxp1; - struct cvmx_gmxx_tx_col_attempt_s cn61xx; - struct cvmx_gmxx_tx_col_attempt_s cn63xx; - struct cvmx_gmxx_tx_col_attempt_s cn63xxp1; - struct cvmx_gmxx_tx_col_attempt_s cn66xx; - struct cvmx_gmxx_tx_col_attempt_s cn68xx; - struct cvmx_gmxx_tx_col_attempt_s cn68xxp1; - struct cvmx_gmxx_tx_col_attempt_s cnf71xx; -}; - -union cvmx_gmxx_tx_corrupt { - uint64_t u64; - struct cvmx_gmxx_tx_corrupt_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_4_63:60; - uint64_t corrupt:4; -#else - uint64_t corrupt:4; - uint64_t reserved_4_63:60; -#endif - } s; - struct cvmx_gmxx_tx_corrupt_cn30xx { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_3_63:61; - uint64_t corrupt:3; -#else - uint64_t corrupt:3; - uint64_t reserved_3_63:61; -#endif - } cn30xx; - struct cvmx_gmxx_tx_corrupt_cn30xx cn31xx; - struct cvmx_gmxx_tx_corrupt_s cn38xx; - struct cvmx_gmxx_tx_corrupt_s cn38xxp2; - struct cvmx_gmxx_tx_corrupt_cn30xx cn50xx; - struct cvmx_gmxx_tx_corrupt_s cn52xx; - struct cvmx_gmxx_tx_corrupt_s cn52xxp1; - struct cvmx_gmxx_tx_corrupt_s cn56xx; - struct cvmx_gmxx_tx_corrupt_s cn56xxp1; - struct cvmx_gmxx_tx_corrupt_s cn58xx; - struct cvmx_gmxx_tx_corrupt_s cn58xxp1; - struct cvmx_gmxx_tx_corrupt_s cn61xx; - struct cvmx_gmxx_tx_corrupt_s cn63xx; - struct cvmx_gmxx_tx_corrupt_s cn63xxp1; - struct cvmx_gmxx_tx_corrupt_s cn66xx; - struct cvmx_gmxx_tx_corrupt_s cn68xx; - struct cvmx_gmxx_tx_corrupt_s cn68xxp1; - struct cvmx_gmxx_tx_corrupt_cnf71xx { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_2_63:62; - uint64_t corrupt:2; -#else - uint64_t corrupt:2; - uint64_t reserved_2_63:62; -#endif - } cnf71xx; -}; - -union cvmx_gmxx_tx_hg2_reg1 { - uint64_t u64; - struct cvmx_gmxx_tx_hg2_reg1_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_16_63:48; - uint64_t tx_xof:16; -#else - uint64_t tx_xof:16; - uint64_t reserved_16_63:48; -#endif - } s; - struct cvmx_gmxx_tx_hg2_reg1_s cn52xx; - struct cvmx_gmxx_tx_hg2_reg1_s cn52xxp1; - struct cvmx_gmxx_tx_hg2_reg1_s cn56xx; - struct cvmx_gmxx_tx_hg2_reg1_s cn61xx; - struct cvmx_gmxx_tx_hg2_reg1_s cn63xx; - struct cvmx_gmxx_tx_hg2_reg1_s cn63xxp1; - struct cvmx_gmxx_tx_hg2_reg1_s cn66xx; - struct cvmx_gmxx_tx_hg2_reg1_s cn68xx; - struct cvmx_gmxx_tx_hg2_reg1_s cn68xxp1; - struct cvmx_gmxx_tx_hg2_reg1_s cnf71xx; -}; - -union cvmx_gmxx_tx_hg2_reg2 { - uint64_t u64; - struct cvmx_gmxx_tx_hg2_reg2_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_16_63:48; - uint64_t tx_xon:16; -#else - uint64_t tx_xon:16; - uint64_t reserved_16_63:48; -#endif - } s; - struct cvmx_gmxx_tx_hg2_reg2_s cn52xx; - struct cvmx_gmxx_tx_hg2_reg2_s cn52xxp1; - struct cvmx_gmxx_tx_hg2_reg2_s cn56xx; - struct cvmx_gmxx_tx_hg2_reg2_s cn61xx; - struct cvmx_gmxx_tx_hg2_reg2_s cn63xx; - struct cvmx_gmxx_tx_hg2_reg2_s cn63xxp1; - struct cvmx_gmxx_tx_hg2_reg2_s cn66xx; - struct cvmx_gmxx_tx_hg2_reg2_s cn68xx; - struct cvmx_gmxx_tx_hg2_reg2_s cn68xxp1; - struct cvmx_gmxx_tx_hg2_reg2_s cnf71xx; -}; - -union cvmx_gmxx_tx_ifg { - uint64_t u64; - struct cvmx_gmxx_tx_ifg_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_8_63:56; - uint64_t ifg2:4; - uint64_t ifg1:4; -#else - uint64_t ifg1:4; - uint64_t ifg2:4; - uint64_t reserved_8_63:56; -#endif - } s; - struct cvmx_gmxx_tx_ifg_s cn30xx; - struct cvmx_gmxx_tx_ifg_s cn31xx; - struct cvmx_gmxx_tx_ifg_s cn38xx; - struct cvmx_gmxx_tx_ifg_s cn38xxp2; - struct cvmx_gmxx_tx_ifg_s cn50xx; - struct cvmx_gmxx_tx_ifg_s cn52xx; - struct cvmx_gmxx_tx_ifg_s cn52xxp1; - struct cvmx_gmxx_tx_ifg_s cn56xx; - struct cvmx_gmxx_tx_ifg_s cn56xxp1; - struct cvmx_gmxx_tx_ifg_s cn58xx; - struct cvmx_gmxx_tx_ifg_s cn58xxp1; - struct cvmx_gmxx_tx_ifg_s cn61xx; - struct cvmx_gmxx_tx_ifg_s cn63xx; - struct cvmx_gmxx_tx_ifg_s cn63xxp1; - struct cvmx_gmxx_tx_ifg_s cn66xx; - struct cvmx_gmxx_tx_ifg_s cn68xx; - struct cvmx_gmxx_tx_ifg_s cn68xxp1; - struct cvmx_gmxx_tx_ifg_s cnf71xx; }; union cvmx_gmxx_tx_int_en { @@ -6183,7 +1767,6 @@ union cvmx_gmxx_tx_int_en { uint64_t reserved_16_63:48; #endif } cn38xxp2; - struct cvmx_gmxx_tx_int_en_cn30xx cn50xx; struct cvmx_gmxx_tx_int_en_cn52xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_20_63:44; @@ -6205,12 +1788,6 @@ union cvmx_gmxx_tx_int_en { uint64_t reserved_20_63:44; #endif } cn52xx; - struct cvmx_gmxx_tx_int_en_cn52xx cn52xxp1; - struct cvmx_gmxx_tx_int_en_cn52xx cn56xx; - struct cvmx_gmxx_tx_int_en_cn52xx cn56xxp1; - struct cvmx_gmxx_tx_int_en_cn38xx cn58xx; - struct cvmx_gmxx_tx_int_en_cn38xx cn58xxp1; - struct cvmx_gmxx_tx_int_en_s cn61xx; struct cvmx_gmxx_tx_int_en_cn63xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_24_63:40; @@ -6234,8 +1811,6 @@ union cvmx_gmxx_tx_int_en { uint64_t reserved_24_63:40; #endif } cn63xx; - struct cvmx_gmxx_tx_int_en_cn63xx cn63xxp1; - struct cvmx_gmxx_tx_int_en_s cn66xx; struct cvmx_gmxx_tx_int_en_cn68xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_25_63:39; @@ -6261,7 +1836,6 @@ union cvmx_gmxx_tx_int_en { uint64_t reserved_25_63:39; #endif } cn68xx; - struct cvmx_gmxx_tx_int_en_cn68xx cn68xxp1; struct cvmx_gmxx_tx_int_en_cnf71xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_25_63:39; @@ -6410,7 +1984,6 @@ union cvmx_gmxx_tx_int_reg { uint64_t reserved_16_63:48; #endif } cn38xxp2; - struct cvmx_gmxx_tx_int_reg_cn30xx cn50xx; struct cvmx_gmxx_tx_int_reg_cn52xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_20_63:44; @@ -6432,12 +2005,6 @@ union cvmx_gmxx_tx_int_reg { uint64_t reserved_20_63:44; #endif } cn52xx; - struct cvmx_gmxx_tx_int_reg_cn52xx cn52xxp1; - struct cvmx_gmxx_tx_int_reg_cn52xx cn56xx; - struct cvmx_gmxx_tx_int_reg_cn52xx cn56xxp1; - struct cvmx_gmxx_tx_int_reg_cn38xx cn58xx; - struct cvmx_gmxx_tx_int_reg_cn38xx cn58xxp1; - struct cvmx_gmxx_tx_int_reg_s cn61xx; struct cvmx_gmxx_tx_int_reg_cn63xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_24_63:40; @@ -6461,8 +2028,6 @@ union cvmx_gmxx_tx_int_reg { uint64_t reserved_24_63:40; #endif } cn63xx; - struct cvmx_gmxx_tx_int_reg_cn63xx cn63xxp1; - struct cvmx_gmxx_tx_int_reg_s cn66xx; struct cvmx_gmxx_tx_int_reg_cn68xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_25_63:39; @@ -6488,7 +2053,6 @@ union cvmx_gmxx_tx_int_reg { uint64_t reserved_25_63:39; #endif } cn68xx; - struct cvmx_gmxx_tx_int_reg_cn68xx cn68xxp1; struct cvmx_gmxx_tx_int_reg_cnf71xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_25_63:39; @@ -6524,68 +2088,6 @@ union cvmx_gmxx_tx_int_reg { } cnf71xx; }; -union cvmx_gmxx_tx_jam { - uint64_t u64; - struct cvmx_gmxx_tx_jam_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_8_63:56; - uint64_t jam:8; -#else - uint64_t jam:8; - uint64_t reserved_8_63:56; -#endif - } s; - struct cvmx_gmxx_tx_jam_s cn30xx; - struct cvmx_gmxx_tx_jam_s cn31xx; - struct cvmx_gmxx_tx_jam_s cn38xx; - struct cvmx_gmxx_tx_jam_s cn38xxp2; - struct cvmx_gmxx_tx_jam_s cn50xx; - struct cvmx_gmxx_tx_jam_s cn52xx; - struct cvmx_gmxx_tx_jam_s cn52xxp1; - struct cvmx_gmxx_tx_jam_s cn56xx; - struct cvmx_gmxx_tx_jam_s cn56xxp1; - struct cvmx_gmxx_tx_jam_s cn58xx; - struct cvmx_gmxx_tx_jam_s cn58xxp1; - struct cvmx_gmxx_tx_jam_s cn61xx; - struct cvmx_gmxx_tx_jam_s cn63xx; - struct cvmx_gmxx_tx_jam_s cn63xxp1; - struct cvmx_gmxx_tx_jam_s cn66xx; - struct cvmx_gmxx_tx_jam_s cn68xx; - struct cvmx_gmxx_tx_jam_s cn68xxp1; - struct cvmx_gmxx_tx_jam_s cnf71xx; -}; - -union cvmx_gmxx_tx_lfsr { - uint64_t u64; - struct cvmx_gmxx_tx_lfsr_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_16_63:48; - uint64_t lfsr:16; -#else - uint64_t lfsr:16; - uint64_t reserved_16_63:48; -#endif - } s; - struct cvmx_gmxx_tx_lfsr_s cn30xx; - struct cvmx_gmxx_tx_lfsr_s cn31xx; - struct cvmx_gmxx_tx_lfsr_s cn38xx; - struct cvmx_gmxx_tx_lfsr_s cn38xxp2; - struct cvmx_gmxx_tx_lfsr_s cn50xx; - struct cvmx_gmxx_tx_lfsr_s cn52xx; - struct cvmx_gmxx_tx_lfsr_s cn52xxp1; - struct cvmx_gmxx_tx_lfsr_s cn56xx; - struct cvmx_gmxx_tx_lfsr_s cn56xxp1; - struct cvmx_gmxx_tx_lfsr_s cn58xx; - struct cvmx_gmxx_tx_lfsr_s cn58xxp1; - struct cvmx_gmxx_tx_lfsr_s cn61xx; - struct cvmx_gmxx_tx_lfsr_s cn63xx; - struct cvmx_gmxx_tx_lfsr_s cn63xxp1; - struct cvmx_gmxx_tx_lfsr_s cn66xx; - struct cvmx_gmxx_tx_lfsr_s cn68xx; - struct cvmx_gmxx_tx_lfsr_s cn68xxp1; - struct cvmx_gmxx_tx_lfsr_s cnf71xx; -}; - union cvmx_gmxx_tx_ovr_bp { uint64_t u64; struct cvmx_gmxx_tx_ovr_bp_s { @@ -6622,7 +2124,6 @@ union cvmx_gmxx_tx_ovr_bp { uint64_t reserved_11_63:53; #endif } cn30xx; - struct cvmx_gmxx_tx_ovr_bp_cn30xx cn31xx; struct cvmx_gmxx_tx_ovr_bp_cn38xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_12_63:52; @@ -6636,20 +2137,6 @@ union cvmx_gmxx_tx_ovr_bp { uint64_t reserved_12_63:52; #endif } cn38xx; - struct cvmx_gmxx_tx_ovr_bp_cn38xx cn38xxp2; - struct cvmx_gmxx_tx_ovr_bp_cn30xx cn50xx; - struct cvmx_gmxx_tx_ovr_bp_s cn52xx; - struct cvmx_gmxx_tx_ovr_bp_s cn52xxp1; - struct cvmx_gmxx_tx_ovr_bp_s cn56xx; - struct cvmx_gmxx_tx_ovr_bp_s cn56xxp1; - struct cvmx_gmxx_tx_ovr_bp_cn38xx cn58xx; - struct cvmx_gmxx_tx_ovr_bp_cn38xx cn58xxp1; - struct cvmx_gmxx_tx_ovr_bp_s cn61xx; - struct cvmx_gmxx_tx_ovr_bp_s cn63xx; - struct cvmx_gmxx_tx_ovr_bp_s cn63xxp1; - struct cvmx_gmxx_tx_ovr_bp_s cn66xx; - struct cvmx_gmxx_tx_ovr_bp_s cn68xx; - struct cvmx_gmxx_tx_ovr_bp_s cn68xxp1; struct cvmx_gmxx_tx_ovr_bp_cnf71xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_48_63:16; @@ -6673,68 +2160,6 @@ union cvmx_gmxx_tx_ovr_bp { } cnf71xx; }; -union cvmx_gmxx_tx_pause_pkt_dmac { - uint64_t u64; - struct cvmx_gmxx_tx_pause_pkt_dmac_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_48_63:16; - uint64_t dmac:48; -#else - uint64_t dmac:48; - uint64_t reserved_48_63:16; -#endif - } s; - struct cvmx_gmxx_tx_pause_pkt_dmac_s cn30xx; - struct cvmx_gmxx_tx_pause_pkt_dmac_s cn31xx; - struct cvmx_gmxx_tx_pause_pkt_dmac_s cn38xx; - struct cvmx_gmxx_tx_pause_pkt_dmac_s cn38xxp2; - struct cvmx_gmxx_tx_pause_pkt_dmac_s cn50xx; - struct cvmx_gmxx_tx_pause_pkt_dmac_s cn52xx; - struct cvmx_gmxx_tx_pause_pkt_dmac_s cn52xxp1; - struct cvmx_gmxx_tx_pause_pkt_dmac_s cn56xx; - struct cvmx_gmxx_tx_pause_pkt_dmac_s cn56xxp1; - struct cvmx_gmxx_tx_pause_pkt_dmac_s cn58xx; - struct cvmx_gmxx_tx_pause_pkt_dmac_s cn58xxp1; - struct cvmx_gmxx_tx_pause_pkt_dmac_s cn61xx; - struct cvmx_gmxx_tx_pause_pkt_dmac_s cn63xx; - struct cvmx_gmxx_tx_pause_pkt_dmac_s cn63xxp1; - struct cvmx_gmxx_tx_pause_pkt_dmac_s cn66xx; - struct cvmx_gmxx_tx_pause_pkt_dmac_s cn68xx; - struct cvmx_gmxx_tx_pause_pkt_dmac_s cn68xxp1; - struct cvmx_gmxx_tx_pause_pkt_dmac_s cnf71xx; -}; - -union cvmx_gmxx_tx_pause_pkt_type { - uint64_t u64; - struct cvmx_gmxx_tx_pause_pkt_type_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_16_63:48; - uint64_t type:16; -#else - uint64_t type:16; - uint64_t reserved_16_63:48; -#endif - } s; - struct cvmx_gmxx_tx_pause_pkt_type_s cn30xx; - struct cvmx_gmxx_tx_pause_pkt_type_s cn31xx; - struct cvmx_gmxx_tx_pause_pkt_type_s cn38xx; - struct cvmx_gmxx_tx_pause_pkt_type_s cn38xxp2; - struct cvmx_gmxx_tx_pause_pkt_type_s cn50xx; - struct cvmx_gmxx_tx_pause_pkt_type_s cn52xx; - struct cvmx_gmxx_tx_pause_pkt_type_s cn52xxp1; - struct cvmx_gmxx_tx_pause_pkt_type_s cn56xx; - struct cvmx_gmxx_tx_pause_pkt_type_s cn56xxp1; - struct cvmx_gmxx_tx_pause_pkt_type_s cn58xx; - struct cvmx_gmxx_tx_pause_pkt_type_s cn58xxp1; - struct cvmx_gmxx_tx_pause_pkt_type_s cn61xx; - struct cvmx_gmxx_tx_pause_pkt_type_s cn63xx; - struct cvmx_gmxx_tx_pause_pkt_type_s cn63xxp1; - struct cvmx_gmxx_tx_pause_pkt_type_s cn66xx; - struct cvmx_gmxx_tx_pause_pkt_type_s cn68xx; - struct cvmx_gmxx_tx_pause_pkt_type_s cn68xxp1; - struct cvmx_gmxx_tx_pause_pkt_type_s cnf71xx; -}; - union cvmx_gmxx_tx_prts { uint64_t u64; struct cvmx_gmxx_tx_prts_s { @@ -6746,24 +2171,6 @@ union cvmx_gmxx_tx_prts { uint64_t reserved_5_63:59; #endif } s; - struct cvmx_gmxx_tx_prts_s cn30xx; - struct cvmx_gmxx_tx_prts_s cn31xx; - struct cvmx_gmxx_tx_prts_s cn38xx; - struct cvmx_gmxx_tx_prts_s cn38xxp2; - struct cvmx_gmxx_tx_prts_s cn50xx; - struct cvmx_gmxx_tx_prts_s cn52xx; - struct cvmx_gmxx_tx_prts_s cn52xxp1; - struct cvmx_gmxx_tx_prts_s cn56xx; - struct cvmx_gmxx_tx_prts_s cn56xxp1; - struct cvmx_gmxx_tx_prts_s cn58xx; - struct cvmx_gmxx_tx_prts_s cn58xxp1; - struct cvmx_gmxx_tx_prts_s cn61xx; - struct cvmx_gmxx_tx_prts_s cn63xx; - struct cvmx_gmxx_tx_prts_s cn63xxp1; - struct cvmx_gmxx_tx_prts_s cn66xx; - struct cvmx_gmxx_tx_prts_s cn68xx; - struct cvmx_gmxx_tx_prts_s cn68xxp1; - struct cvmx_gmxx_tx_prts_s cnf71xx; }; union cvmx_gmxx_tx_spi_ctl { @@ -6779,26 +2186,6 @@ union cvmx_gmxx_tx_spi_ctl { uint64_t reserved_2_63:62; #endif } s; - struct cvmx_gmxx_tx_spi_ctl_s cn38xx; - struct cvmx_gmxx_tx_spi_ctl_s cn38xxp2; - struct cvmx_gmxx_tx_spi_ctl_s cn58xx; - struct cvmx_gmxx_tx_spi_ctl_s cn58xxp1; -}; - -union cvmx_gmxx_tx_spi_drain { - uint64_t u64; - struct cvmx_gmxx_tx_spi_drain_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_16_63:48; - uint64_t drain:16; -#else - uint64_t drain:16; - uint64_t reserved_16_63:48; -#endif - } s; - struct cvmx_gmxx_tx_spi_drain_s cn38xx; - struct cvmx_gmxx_tx_spi_drain_s cn58xx; - struct cvmx_gmxx_tx_spi_drain_s cn58xxp1; }; union cvmx_gmxx_tx_spi_max { @@ -6827,24 +2214,6 @@ union cvmx_gmxx_tx_spi_max { uint64_t reserved_16_63:48; #endif } cn38xx; - struct cvmx_gmxx_tx_spi_max_cn38xx cn38xxp2; - struct cvmx_gmxx_tx_spi_max_s cn58xx; - struct cvmx_gmxx_tx_spi_max_s cn58xxp1; -}; - -union cvmx_gmxx_tx_spi_roundx { - uint64_t u64; - struct cvmx_gmxx_tx_spi_roundx_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_16_63:48; - uint64_t round:16; -#else - uint64_t round:16; - uint64_t reserved_16_63:48; -#endif - } s; - struct cvmx_gmxx_tx_spi_roundx_s cn58xx; - struct cvmx_gmxx_tx_spi_roundx_s cn58xxp1; }; union cvmx_gmxx_tx_spi_thresh { @@ -6858,10 +2227,6 @@ union cvmx_gmxx_tx_spi_thresh { uint64_t reserved_6_63:58; #endif } s; - struct cvmx_gmxx_tx_spi_thresh_s cn38xx; - struct cvmx_gmxx_tx_spi_thresh_s cn38xxp2; - struct cvmx_gmxx_tx_spi_thresh_s cn58xx; - struct cvmx_gmxx_tx_spi_thresh_s cn58xxp1; }; union cvmx_gmxx_tx_xaui_ctl { @@ -6889,43 +2254,6 @@ union cvmx_gmxx_tx_xaui_ctl { uint64_t reserved_11_63:53; #endif } s; - struct cvmx_gmxx_tx_xaui_ctl_s cn52xx; - struct cvmx_gmxx_tx_xaui_ctl_s cn52xxp1; - struct cvmx_gmxx_tx_xaui_ctl_s cn56xx; - struct cvmx_gmxx_tx_xaui_ctl_s cn56xxp1; - struct cvmx_gmxx_tx_xaui_ctl_s cn61xx; - struct cvmx_gmxx_tx_xaui_ctl_s cn63xx; - struct cvmx_gmxx_tx_xaui_ctl_s cn63xxp1; - struct cvmx_gmxx_tx_xaui_ctl_s cn66xx; - struct cvmx_gmxx_tx_xaui_ctl_s cn68xx; - struct cvmx_gmxx_tx_xaui_ctl_s cn68xxp1; - struct cvmx_gmxx_tx_xaui_ctl_s cnf71xx; -}; - -union cvmx_gmxx_xaui_ext_loopback { - uint64_t u64; - struct cvmx_gmxx_xaui_ext_loopback_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_5_63:59; - uint64_t en:1; - uint64_t thresh:4; -#else - uint64_t thresh:4; - uint64_t en:1; - uint64_t reserved_5_63:59; -#endif - } s; - struct cvmx_gmxx_xaui_ext_loopback_s cn52xx; - struct cvmx_gmxx_xaui_ext_loopback_s cn52xxp1; - struct cvmx_gmxx_xaui_ext_loopback_s cn56xx; - struct cvmx_gmxx_xaui_ext_loopback_s cn56xxp1; - struct cvmx_gmxx_xaui_ext_loopback_s cn61xx; - struct cvmx_gmxx_xaui_ext_loopback_s cn63xx; - struct cvmx_gmxx_xaui_ext_loopback_s cn63xxp1; - struct cvmx_gmxx_xaui_ext_loopback_s cn66xx; - struct cvmx_gmxx_xaui_ext_loopback_s cn68xx; - struct cvmx_gmxx_xaui_ext_loopback_s cn68xxp1; - struct cvmx_gmxx_xaui_ext_loopback_s cnf71xx; }; #endif diff --git a/arch/mips/include/asm/octeon/cvmx-gpio-defs.h b/arch/mips/include/asm/octeon/cvmx-gpio-defs.h index 8123b8209369..5420fa667a9c 100644 --- a/arch/mips/include/asm/octeon/cvmx-gpio-defs.h +++ b/arch/mips/include/asm/octeon/cvmx-gpio-defs.h @@ -90,10 +90,6 @@ union cvmx_gpio_bit_cfgx { uint64_t reserved_12_63:52; #endif } cn30xx; - struct cvmx_gpio_bit_cfgx_cn30xx cn31xx; - struct cvmx_gpio_bit_cfgx_cn30xx cn38xx; - struct cvmx_gpio_bit_cfgx_cn30xx cn38xxp2; - struct cvmx_gpio_bit_cfgx_cn30xx cn50xx; struct cvmx_gpio_bit_cfgx_cn52xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_15_63:49; @@ -117,20 +113,6 @@ union cvmx_gpio_bit_cfgx { uint64_t reserved_15_63:49; #endif } cn52xx; - struct cvmx_gpio_bit_cfgx_cn52xx cn52xxp1; - struct cvmx_gpio_bit_cfgx_cn52xx cn56xx; - struct cvmx_gpio_bit_cfgx_cn52xx cn56xxp1; - struct cvmx_gpio_bit_cfgx_cn30xx cn58xx; - struct cvmx_gpio_bit_cfgx_cn30xx cn58xxp1; - struct cvmx_gpio_bit_cfgx_s cn61xx; - struct cvmx_gpio_bit_cfgx_s cn63xx; - struct cvmx_gpio_bit_cfgx_s cn63xxp1; - struct cvmx_gpio_bit_cfgx_s cn66xx; - struct cvmx_gpio_bit_cfgx_s cn68xx; - struct cvmx_gpio_bit_cfgx_s cn68xxp1; - struct cvmx_gpio_bit_cfgx_s cn70xx; - struct cvmx_gpio_bit_cfgx_s cn73xx; - struct cvmx_gpio_bit_cfgx_s cnf71xx; }; union cvmx_gpio_boot_ena { @@ -146,9 +128,6 @@ union cvmx_gpio_boot_ena { uint64_t reserved_12_63:52; #endif } s; - struct cvmx_gpio_boot_ena_s cn30xx; - struct cvmx_gpio_boot_ena_s cn31xx; - struct cvmx_gpio_boot_ena_s cn50xx; }; union cvmx_gpio_clk_genx { @@ -162,17 +141,6 @@ union cvmx_gpio_clk_genx { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_gpio_clk_genx_s cn52xx; - struct cvmx_gpio_clk_genx_s cn52xxp1; - struct cvmx_gpio_clk_genx_s cn56xx; - struct cvmx_gpio_clk_genx_s cn56xxp1; - struct cvmx_gpio_clk_genx_s cn61xx; - struct cvmx_gpio_clk_genx_s cn63xx; - struct cvmx_gpio_clk_genx_s cn63xxp1; - struct cvmx_gpio_clk_genx_s cn66xx; - struct cvmx_gpio_clk_genx_s cn68xx; - struct cvmx_gpio_clk_genx_s cn68xxp1; - struct cvmx_gpio_clk_genx_s cnf71xx; }; union cvmx_gpio_clk_qlmx { @@ -218,11 +186,6 @@ union cvmx_gpio_clk_qlmx { uint64_t reserved_3_63:61; #endif } cn63xx; - struct cvmx_gpio_clk_qlmx_cn63xx cn63xxp1; - struct cvmx_gpio_clk_qlmx_cn61xx cn66xx; - struct cvmx_gpio_clk_qlmx_s cn68xx; - struct cvmx_gpio_clk_qlmx_s cn68xxp1; - struct cvmx_gpio_clk_qlmx_cn61xx cnf71xx; }; union cvmx_gpio_dbg_ena { @@ -236,9 +199,6 @@ union cvmx_gpio_dbg_ena { uint64_t reserved_21_63:43; #endif } s; - struct cvmx_gpio_dbg_ena_s cn30xx; - struct cvmx_gpio_dbg_ena_s cn31xx; - struct cvmx_gpio_dbg_ena_s cn50xx; }; union cvmx_gpio_int_clr { @@ -252,24 +212,6 @@ union cvmx_gpio_int_clr { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_gpio_int_clr_s cn30xx; - struct cvmx_gpio_int_clr_s cn31xx; - struct cvmx_gpio_int_clr_s cn38xx; - struct cvmx_gpio_int_clr_s cn38xxp2; - struct cvmx_gpio_int_clr_s cn50xx; - struct cvmx_gpio_int_clr_s cn52xx; - struct cvmx_gpio_int_clr_s cn52xxp1; - struct cvmx_gpio_int_clr_s cn56xx; - struct cvmx_gpio_int_clr_s cn56xxp1; - struct cvmx_gpio_int_clr_s cn58xx; - struct cvmx_gpio_int_clr_s cn58xxp1; - struct cvmx_gpio_int_clr_s cn61xx; - struct cvmx_gpio_int_clr_s cn63xx; - struct cvmx_gpio_int_clr_s cn63xxp1; - struct cvmx_gpio_int_clr_s cn66xx; - struct cvmx_gpio_int_clr_s cn68xx; - struct cvmx_gpio_int_clr_s cn68xxp1; - struct cvmx_gpio_int_clr_s cnf71xx; }; union cvmx_gpio_multi_cast { @@ -283,8 +225,6 @@ union cvmx_gpio_multi_cast { uint64_t reserved_1_63:63; #endif } s; - struct cvmx_gpio_multi_cast_s cn61xx; - struct cvmx_gpio_multi_cast_s cnf71xx; }; union cvmx_gpio_pin_ena { @@ -302,7 +242,6 @@ union cvmx_gpio_pin_ena { uint64_t reserved_20_63:44; #endif } s; - struct cvmx_gpio_pin_ena_s cn66xx; }; union cvmx_gpio_rx_dat { @@ -316,8 +255,6 @@ union cvmx_gpio_rx_dat { uint64_t reserved_24_63:40; #endif } s; - struct cvmx_gpio_rx_dat_s cn30xx; - struct cvmx_gpio_rx_dat_s cn31xx; struct cvmx_gpio_rx_dat_cn38xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_16_63:48; @@ -327,14 +264,6 @@ union cvmx_gpio_rx_dat { uint64_t reserved_16_63:48; #endif } cn38xx; - struct cvmx_gpio_rx_dat_cn38xx cn38xxp2; - struct cvmx_gpio_rx_dat_s cn50xx; - struct cvmx_gpio_rx_dat_cn38xx cn52xx; - struct cvmx_gpio_rx_dat_cn38xx cn52xxp1; - struct cvmx_gpio_rx_dat_cn38xx cn56xx; - struct cvmx_gpio_rx_dat_cn38xx cn56xxp1; - struct cvmx_gpio_rx_dat_cn38xx cn58xx; - struct cvmx_gpio_rx_dat_cn38xx cn58xxp1; struct cvmx_gpio_rx_dat_cn61xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_20_63:44; @@ -344,12 +273,6 @@ union cvmx_gpio_rx_dat { uint64_t reserved_20_63:44; #endif } cn61xx; - struct cvmx_gpio_rx_dat_cn38xx cn63xx; - struct cvmx_gpio_rx_dat_cn38xx cn63xxp1; - struct cvmx_gpio_rx_dat_cn61xx cn66xx; - struct cvmx_gpio_rx_dat_cn38xx cn68xx; - struct cvmx_gpio_rx_dat_cn38xx cn68xxp1; - struct cvmx_gpio_rx_dat_cn61xx cnf71xx; }; union cvmx_gpio_tim_ctl { @@ -363,8 +286,6 @@ union cvmx_gpio_tim_ctl { uint64_t reserved_4_63:60; #endif } s; - struct cvmx_gpio_tim_ctl_s cn68xx; - struct cvmx_gpio_tim_ctl_s cn68xxp1; }; union cvmx_gpio_tx_clr { @@ -378,8 +299,6 @@ union cvmx_gpio_tx_clr { uint64_t reserved_24_63:40; #endif } s; - struct cvmx_gpio_tx_clr_s cn30xx; - struct cvmx_gpio_tx_clr_s cn31xx; struct cvmx_gpio_tx_clr_cn38xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_16_63:48; @@ -389,14 +308,6 @@ union cvmx_gpio_tx_clr { uint64_t reserved_16_63:48; #endif } cn38xx; - struct cvmx_gpio_tx_clr_cn38xx cn38xxp2; - struct cvmx_gpio_tx_clr_s cn50xx; - struct cvmx_gpio_tx_clr_cn38xx cn52xx; - struct cvmx_gpio_tx_clr_cn38xx cn52xxp1; - struct cvmx_gpio_tx_clr_cn38xx cn56xx; - struct cvmx_gpio_tx_clr_cn38xx cn56xxp1; - struct cvmx_gpio_tx_clr_cn38xx cn58xx; - struct cvmx_gpio_tx_clr_cn38xx cn58xxp1; struct cvmx_gpio_tx_clr_cn61xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_20_63:44; @@ -406,12 +317,6 @@ union cvmx_gpio_tx_clr { uint64_t reserved_20_63:44; #endif } cn61xx; - struct cvmx_gpio_tx_clr_cn38xx cn63xx; - struct cvmx_gpio_tx_clr_cn38xx cn63xxp1; - struct cvmx_gpio_tx_clr_cn61xx cn66xx; - struct cvmx_gpio_tx_clr_cn38xx cn68xx; - struct cvmx_gpio_tx_clr_cn38xx cn68xxp1; - struct cvmx_gpio_tx_clr_cn61xx cnf71xx; }; union cvmx_gpio_tx_set { @@ -425,8 +330,6 @@ union cvmx_gpio_tx_set { uint64_t reserved_24_63:40; #endif } s; - struct cvmx_gpio_tx_set_s cn30xx; - struct cvmx_gpio_tx_set_s cn31xx; struct cvmx_gpio_tx_set_cn38xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_16_63:48; @@ -436,14 +339,6 @@ union cvmx_gpio_tx_set { uint64_t reserved_16_63:48; #endif } cn38xx; - struct cvmx_gpio_tx_set_cn38xx cn38xxp2; - struct cvmx_gpio_tx_set_s cn50xx; - struct cvmx_gpio_tx_set_cn38xx cn52xx; - struct cvmx_gpio_tx_set_cn38xx cn52xxp1; - struct cvmx_gpio_tx_set_cn38xx cn56xx; - struct cvmx_gpio_tx_set_cn38xx cn56xxp1; - struct cvmx_gpio_tx_set_cn38xx cn58xx; - struct cvmx_gpio_tx_set_cn38xx cn58xxp1; struct cvmx_gpio_tx_set_cn61xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_20_63:44; @@ -453,12 +348,6 @@ union cvmx_gpio_tx_set { uint64_t reserved_20_63:44; #endif } cn61xx; - struct cvmx_gpio_tx_set_cn38xx cn63xx; - struct cvmx_gpio_tx_set_cn38xx cn63xxp1; - struct cvmx_gpio_tx_set_cn61xx cn66xx; - struct cvmx_gpio_tx_set_cn38xx cn68xx; - struct cvmx_gpio_tx_set_cn38xx cn68xxp1; - struct cvmx_gpio_tx_set_cn61xx cnf71xx; }; union cvmx_gpio_xbit_cfgx { @@ -505,11 +394,6 @@ union cvmx_gpio_xbit_cfgx { uint64_t reserved_12_63:52; #endif } cn30xx; - struct cvmx_gpio_xbit_cfgx_cn30xx cn31xx; - struct cvmx_gpio_xbit_cfgx_cn30xx cn50xx; - struct cvmx_gpio_xbit_cfgx_s cn61xx; - struct cvmx_gpio_xbit_cfgx_s cn66xx; - struct cvmx_gpio_xbit_cfgx_s cnf71xx; }; #endif diff --git a/arch/mips/include/asm/octeon/cvmx-helper-rgmii.h b/arch/mips/include/asm/octeon/cvmx-helper-rgmii.h index f7a95d7de140..ac42b5066bd9 100644 --- a/arch/mips/include/asm/octeon/cvmx-helper-rgmii.h +++ b/arch/mips/include/asm/octeon/cvmx-helper-rgmii.h @@ -90,21 +90,4 @@ extern cvmx_helper_link_info_t __cvmx_helper_rgmii_link_get(int ipd_port); extern int __cvmx_helper_rgmii_link_set(int ipd_port, cvmx_helper_link_info_t link_info); -/** - * Configure a port for internal and/or external loopback. Internal loopback - * causes packets sent by the port to be received by Octeon. External loopback - * causes packets received from the wire to sent out again. - * - * @ipd_port: IPD/PKO port to loopback. - * @enable_internal: - * Non zero if you want internal loopback - * @enable_external: - * Non zero if you want external loopback - * - * Returns Zero on success, negative on failure. - */ -extern int __cvmx_helper_rgmii_configure_loopback(int ipd_port, - int enable_internal, - int enable_external); - #endif diff --git a/arch/mips/include/asm/octeon/cvmx-helper-sgmii.h b/arch/mips/include/asm/octeon/cvmx-helper-sgmii.h index 63fd21335e4b..3a54dea58c0a 100644 --- a/arch/mips/include/asm/octeon/cvmx-helper-sgmii.h +++ b/arch/mips/include/asm/octeon/cvmx-helper-sgmii.h @@ -84,21 +84,4 @@ extern cvmx_helper_link_info_t __cvmx_helper_sgmii_link_get(int ipd_port); extern int __cvmx_helper_sgmii_link_set(int ipd_port, cvmx_helper_link_info_t link_info); -/** - * Configure a port for internal and/or external loopback. Internal loopback - * causes packets sent by the port to be received by Octeon. External loopback - * causes packets received from the wire to sent out again. - * - * @ipd_port: IPD/PKO port to loopback. - * @enable_internal: - * Non zero if you want internal loopback - * @enable_external: - * Non zero if you want external loopback - * - * Returns Zero on success, negative on failure. - */ -extern int __cvmx_helper_sgmii_configure_loopback(int ipd_port, - int enable_internal, - int enable_external); - #endif diff --git a/arch/mips/include/asm/octeon/cvmx-helper-util.h b/arch/mips/include/asm/octeon/cvmx-helper-util.h index f446f212bbd4..e9a97e7ee604 100644 --- a/arch/mips/include/asm/octeon/cvmx-helper-util.h +++ b/arch/mips/include/asm/octeon/cvmx-helper-util.h @@ -45,29 +45,6 @@ extern const char *cvmx_helper_interface_mode_to_string(cvmx_helper_interface_mode_t mode); /** - * Debug routine to dump the packet structure to the console - * - * @work: Work queue entry containing the packet to dump - * Returns - */ -extern int cvmx_helper_dump_packet(cvmx_wqe_t *work); - -/** - * Setup Random Early Drop on a specific input queue - * - * @queue: Input queue to setup RED on (0-7) - * @pass_thresh: - * Packets will begin slowly dropping when there are less than - * this many packet buffers free in FPA 0. - * @drop_thresh: - * All incoming packets will be dropped when there are less - * than this many free packet buffers in FPA 0. - * Returns Zero on success. Negative on failure - */ -extern int cvmx_helper_setup_red_queue(int queue, int pass_thresh, - int drop_thresh); - -/** * Setup Random Early Drop to automatically begin dropping packets. * * @pass_thresh: diff --git a/arch/mips/include/asm/octeon/cvmx-helper-xaui.h b/arch/mips/include/asm/octeon/cvmx-helper-xaui.h index f8ce53f6f28f..51f45b495680 100644 --- a/arch/mips/include/asm/octeon/cvmx-helper-xaui.h +++ b/arch/mips/include/asm/octeon/cvmx-helper-xaui.h @@ -84,20 +84,4 @@ extern cvmx_helper_link_info_t __cvmx_helper_xaui_link_get(int ipd_port); extern int __cvmx_helper_xaui_link_set(int ipd_port, cvmx_helper_link_info_t link_info); -/** - * Configure a port for internal and/or external loopback. Internal loopback - * causes packets sent by the port to be received by Octeon. External loopback - * causes packets received from the wire to sent out again. - * - * @ipd_port: IPD/PKO port to loopback. - * @enable_internal: - * Non zero if you want internal loopback - * @enable_external: - * Non zero if you want external loopback - * - * Returns Zero on success, negative on failure. - */ -extern int __cvmx_helper_xaui_configure_loopback(int ipd_port, - int enable_internal, - int enable_external); #endif diff --git a/arch/mips/include/asm/octeon/cvmx-helper.h b/arch/mips/include/asm/octeon/cvmx-helper.h index 0ed87cb67e7f..ba0e76f578e0 100644 --- a/arch/mips/include/asm/octeon/cvmx-helper.h +++ b/arch/mips/include/asm/octeon/cvmx-helper.h @@ -71,26 +71,6 @@ typedef union { #include <asm/octeon/cvmx-helper-xaui.h> /** - * cvmx_override_pko_queue_priority(int ipd_port, uint64_t - * priorities[16]) is a function pointer. It is meant to allow - * customization of the PKO queue priorities based on the port - * number. Users should set this pointer to a function before - * calling any cvmx-helper operations. - */ -extern void (*cvmx_override_pko_queue_priority) (int pko_port, - uint64_t priorities[16]); - -/** - * cvmx_override_ipd_port_setup(int ipd_port) is a function - * pointer. It is meant to allow customization of the IPD port - * setup before packet input/output comes online. It is called - * after cvmx-helper does the default IPD configuration, but - * before IPD is enabled. Users should set this pointer to a - * function before calling any cvmx-helper operations. - */ -extern void (*cvmx_override_ipd_port_setup) (int ipd_port); - -/** * This function enables the IPD and also enables the packet interfaces. * The packet interfaces (RGMII and SPI) must be enabled after the * IPD. This should be called by the user program after any additional @@ -195,20 +175,4 @@ extern int cvmx_helper_link_set(int ipd_port, extern int cvmx_helper_interface_probe(int interface); extern int cvmx_helper_interface_enumerate(int interface); -/** - * Configure a port for internal and/or external loopback. Internal loopback - * causes packets sent by the port to be received by Octeon. External loopback - * causes packets received from the wire to sent out again. - * - * @ipd_port: IPD/PKO port to loopback. - * @enable_internal: - * Non zero if you want internal loopback - * @enable_external: - * Non zero if you want external loopback - * - * Returns Zero on success, negative on failure. - */ -extern int cvmx_helper_configure_loopback(int ipd_port, int enable_internal, - int enable_external); - #endif /* __CVMX_HELPER_H__ */ diff --git a/arch/mips/include/asm/octeon/cvmx-iob-defs.h b/arch/mips/include/asm/octeon/cvmx-iob-defs.h index 7936f816e93e..989b67bbac5b 100644 --- a/arch/mips/include/asm/octeon/cvmx-iob-defs.h +++ b/arch/mips/include/asm/octeon/cvmx-iob-defs.h @@ -119,16 +119,6 @@ union cvmx_iob_bist_status { uint64_t reserved_18_63:46; #endif } cn30xx; - struct cvmx_iob_bist_status_cn30xx cn31xx; - struct cvmx_iob_bist_status_cn30xx cn38xx; - struct cvmx_iob_bist_status_cn30xx cn38xxp2; - struct cvmx_iob_bist_status_cn30xx cn50xx; - struct cvmx_iob_bist_status_cn30xx cn52xx; - struct cvmx_iob_bist_status_cn30xx cn52xxp1; - struct cvmx_iob_bist_status_cn30xx cn56xx; - struct cvmx_iob_bist_status_cn30xx cn56xxp1; - struct cvmx_iob_bist_status_cn30xx cn58xx; - struct cvmx_iob_bist_status_cn30xx cn58xxp1; struct cvmx_iob_bist_status_cn61xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_23_63:41; @@ -182,9 +172,6 @@ union cvmx_iob_bist_status { uint64_t reserved_23_63:41; #endif } cn61xx; - struct cvmx_iob_bist_status_cn61xx cn63xx; - struct cvmx_iob_bist_status_cn61xx cn63xxp1; - struct cvmx_iob_bist_status_cn61xx cn66xx; struct cvmx_iob_bist_status_cn68xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_18_63:46; @@ -228,8 +215,6 @@ union cvmx_iob_bist_status { uint64_t reserved_18_63:46; #endif } cn68xx; - struct cvmx_iob_bist_status_cn68xx cn68xxp1; - struct cvmx_iob_bist_status_cn61xx cnf71xx; }; union cvmx_iob_ctl_status { @@ -274,10 +259,6 @@ union cvmx_iob_ctl_status { uint64_t reserved_5_63:59; #endif } cn30xx; - struct cvmx_iob_ctl_status_cn30xx cn31xx; - struct cvmx_iob_ctl_status_cn30xx cn38xx; - struct cvmx_iob_ctl_status_cn30xx cn38xxp2; - struct cvmx_iob_ctl_status_cn30xx cn50xx; struct cvmx_iob_ctl_status_cn52xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_6_63:58; @@ -297,11 +278,6 @@ union cvmx_iob_ctl_status { uint64_t reserved_6_63:58; #endif } cn52xx; - struct cvmx_iob_ctl_status_cn30xx cn52xxp1; - struct cvmx_iob_ctl_status_cn30xx cn56xx; - struct cvmx_iob_ctl_status_cn30xx cn56xxp1; - struct cvmx_iob_ctl_status_cn30xx cn58xx; - struct cvmx_iob_ctl_status_cn30xx cn58xxp1; struct cvmx_iob_ctl_status_cn61xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_11_63:53; @@ -346,8 +322,6 @@ union cvmx_iob_ctl_status { uint64_t reserved_10_63:54; #endif } cn63xx; - struct cvmx_iob_ctl_status_cn63xx cn63xxp1; - struct cvmx_iob_ctl_status_cn61xx cn66xx; struct cvmx_iob_ctl_status_cn68xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_11_63:53; @@ -371,8 +345,6 @@ union cvmx_iob_ctl_status { uint64_t reserved_11_63:53; #endif } cn68xx; - struct cvmx_iob_ctl_status_cn68xx cn68xxp1; - struct cvmx_iob_ctl_status_cn61xx cnf71xx; }; union cvmx_iob_dwb_pri_cnt { @@ -388,19 +360,6 @@ union cvmx_iob_dwb_pri_cnt { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_iob_dwb_pri_cnt_s cn38xx; - struct cvmx_iob_dwb_pri_cnt_s cn38xxp2; - struct cvmx_iob_dwb_pri_cnt_s cn52xx; - struct cvmx_iob_dwb_pri_cnt_s cn52xxp1; - struct cvmx_iob_dwb_pri_cnt_s cn56xx; - struct cvmx_iob_dwb_pri_cnt_s cn56xxp1; - struct cvmx_iob_dwb_pri_cnt_s cn58xx; - struct cvmx_iob_dwb_pri_cnt_s cn58xxp1; - struct cvmx_iob_dwb_pri_cnt_s cn61xx; - struct cvmx_iob_dwb_pri_cnt_s cn63xx; - struct cvmx_iob_dwb_pri_cnt_s cn63xxp1; - struct cvmx_iob_dwb_pri_cnt_s cn66xx; - struct cvmx_iob_dwb_pri_cnt_s cnf71xx; }; union cvmx_iob_fau_timeout { @@ -416,24 +375,6 @@ union cvmx_iob_fau_timeout { uint64_t reserved_13_63:51; #endif } s; - struct cvmx_iob_fau_timeout_s cn30xx; - struct cvmx_iob_fau_timeout_s cn31xx; - struct cvmx_iob_fau_timeout_s cn38xx; - struct cvmx_iob_fau_timeout_s cn38xxp2; - struct cvmx_iob_fau_timeout_s cn50xx; - struct cvmx_iob_fau_timeout_s cn52xx; - struct cvmx_iob_fau_timeout_s cn52xxp1; - struct cvmx_iob_fau_timeout_s cn56xx; - struct cvmx_iob_fau_timeout_s cn56xxp1; - struct cvmx_iob_fau_timeout_s cn58xx; - struct cvmx_iob_fau_timeout_s cn58xxp1; - struct cvmx_iob_fau_timeout_s cn61xx; - struct cvmx_iob_fau_timeout_s cn63xx; - struct cvmx_iob_fau_timeout_s cn63xxp1; - struct cvmx_iob_fau_timeout_s cn66xx; - struct cvmx_iob_fau_timeout_s cn68xx; - struct cvmx_iob_fau_timeout_s cn68xxp1; - struct cvmx_iob_fau_timeout_s cnf71xx; }; union cvmx_iob_i2c_pri_cnt { @@ -449,19 +390,6 @@ union cvmx_iob_i2c_pri_cnt { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_iob_i2c_pri_cnt_s cn38xx; - struct cvmx_iob_i2c_pri_cnt_s cn38xxp2; - struct cvmx_iob_i2c_pri_cnt_s cn52xx; - struct cvmx_iob_i2c_pri_cnt_s cn52xxp1; - struct cvmx_iob_i2c_pri_cnt_s cn56xx; - struct cvmx_iob_i2c_pri_cnt_s cn56xxp1; - struct cvmx_iob_i2c_pri_cnt_s cn58xx; - struct cvmx_iob_i2c_pri_cnt_s cn58xxp1; - struct cvmx_iob_i2c_pri_cnt_s cn61xx; - struct cvmx_iob_i2c_pri_cnt_s cn63xx; - struct cvmx_iob_i2c_pri_cnt_s cn63xxp1; - struct cvmx_iob_i2c_pri_cnt_s cn66xx; - struct cvmx_iob_i2c_pri_cnt_s cnf71xx; }; union cvmx_iob_inb_control_match { @@ -481,24 +409,6 @@ union cvmx_iob_inb_control_match { uint64_t reserved_29_63:35; #endif } s; - struct cvmx_iob_inb_control_match_s cn30xx; - struct cvmx_iob_inb_control_match_s cn31xx; - struct cvmx_iob_inb_control_match_s cn38xx; - struct cvmx_iob_inb_control_match_s cn38xxp2; - struct cvmx_iob_inb_control_match_s cn50xx; - struct cvmx_iob_inb_control_match_s cn52xx; - struct cvmx_iob_inb_control_match_s cn52xxp1; - struct cvmx_iob_inb_control_match_s cn56xx; - struct cvmx_iob_inb_control_match_s cn56xxp1; - struct cvmx_iob_inb_control_match_s cn58xx; - struct cvmx_iob_inb_control_match_s cn58xxp1; - struct cvmx_iob_inb_control_match_s cn61xx; - struct cvmx_iob_inb_control_match_s cn63xx; - struct cvmx_iob_inb_control_match_s cn63xxp1; - struct cvmx_iob_inb_control_match_s cn66xx; - struct cvmx_iob_inb_control_match_s cn68xx; - struct cvmx_iob_inb_control_match_s cn68xxp1; - struct cvmx_iob_inb_control_match_s cnf71xx; }; union cvmx_iob_inb_control_match_enb { @@ -518,24 +428,6 @@ union cvmx_iob_inb_control_match_enb { uint64_t reserved_29_63:35; #endif } s; - struct cvmx_iob_inb_control_match_enb_s cn30xx; - struct cvmx_iob_inb_control_match_enb_s cn31xx; - struct cvmx_iob_inb_control_match_enb_s cn38xx; - struct cvmx_iob_inb_control_match_enb_s cn38xxp2; - struct cvmx_iob_inb_control_match_enb_s cn50xx; - struct cvmx_iob_inb_control_match_enb_s cn52xx; - struct cvmx_iob_inb_control_match_enb_s cn52xxp1; - struct cvmx_iob_inb_control_match_enb_s cn56xx; - struct cvmx_iob_inb_control_match_enb_s cn56xxp1; - struct cvmx_iob_inb_control_match_enb_s cn58xx; - struct cvmx_iob_inb_control_match_enb_s cn58xxp1; - struct cvmx_iob_inb_control_match_enb_s cn61xx; - struct cvmx_iob_inb_control_match_enb_s cn63xx; - struct cvmx_iob_inb_control_match_enb_s cn63xxp1; - struct cvmx_iob_inb_control_match_enb_s cn66xx; - struct cvmx_iob_inb_control_match_enb_s cn68xx; - struct cvmx_iob_inb_control_match_enb_s cn68xxp1; - struct cvmx_iob_inb_control_match_enb_s cnf71xx; }; union cvmx_iob_inb_data_match { @@ -547,24 +439,6 @@ union cvmx_iob_inb_data_match { uint64_t data:64; #endif } s; - struct cvmx_iob_inb_data_match_s cn30xx; - struct cvmx_iob_inb_data_match_s cn31xx; - struct cvmx_iob_inb_data_match_s cn38xx; - struct cvmx_iob_inb_data_match_s cn38xxp2; - struct cvmx_iob_inb_data_match_s cn50xx; - struct cvmx_iob_inb_data_match_s cn52xx; - struct cvmx_iob_inb_data_match_s cn52xxp1; - struct cvmx_iob_inb_data_match_s cn56xx; - struct cvmx_iob_inb_data_match_s cn56xxp1; - struct cvmx_iob_inb_data_match_s cn58xx; - struct cvmx_iob_inb_data_match_s cn58xxp1; - struct cvmx_iob_inb_data_match_s cn61xx; - struct cvmx_iob_inb_data_match_s cn63xx; - struct cvmx_iob_inb_data_match_s cn63xxp1; - struct cvmx_iob_inb_data_match_s cn66xx; - struct cvmx_iob_inb_data_match_s cn68xx; - struct cvmx_iob_inb_data_match_s cn68xxp1; - struct cvmx_iob_inb_data_match_s cnf71xx; }; union cvmx_iob_inb_data_match_enb { @@ -576,24 +450,6 @@ union cvmx_iob_inb_data_match_enb { uint64_t data:64; #endif } s; - struct cvmx_iob_inb_data_match_enb_s cn30xx; - struct cvmx_iob_inb_data_match_enb_s cn31xx; - struct cvmx_iob_inb_data_match_enb_s cn38xx; - struct cvmx_iob_inb_data_match_enb_s cn38xxp2; - struct cvmx_iob_inb_data_match_enb_s cn50xx; - struct cvmx_iob_inb_data_match_enb_s cn52xx; - struct cvmx_iob_inb_data_match_enb_s cn52xxp1; - struct cvmx_iob_inb_data_match_enb_s cn56xx; - struct cvmx_iob_inb_data_match_enb_s cn56xxp1; - struct cvmx_iob_inb_data_match_enb_s cn58xx; - struct cvmx_iob_inb_data_match_enb_s cn58xxp1; - struct cvmx_iob_inb_data_match_enb_s cn61xx; - struct cvmx_iob_inb_data_match_enb_s cn63xx; - struct cvmx_iob_inb_data_match_enb_s cn63xxp1; - struct cvmx_iob_inb_data_match_enb_s cn66xx; - struct cvmx_iob_inb_data_match_enb_s cn68xx; - struct cvmx_iob_inb_data_match_enb_s cn68xxp1; - struct cvmx_iob_inb_data_match_enb_s cnf71xx; }; union cvmx_iob_int_enb { @@ -632,20 +488,6 @@ union cvmx_iob_int_enb { uint64_t reserved_4_63:60; #endif } cn30xx; - struct cvmx_iob_int_enb_cn30xx cn31xx; - struct cvmx_iob_int_enb_cn30xx cn38xx; - struct cvmx_iob_int_enb_cn30xx cn38xxp2; - struct cvmx_iob_int_enb_s cn50xx; - struct cvmx_iob_int_enb_s cn52xx; - struct cvmx_iob_int_enb_s cn52xxp1; - struct cvmx_iob_int_enb_s cn56xx; - struct cvmx_iob_int_enb_s cn56xxp1; - struct cvmx_iob_int_enb_s cn58xx; - struct cvmx_iob_int_enb_s cn58xxp1; - struct cvmx_iob_int_enb_s cn61xx; - struct cvmx_iob_int_enb_s cn63xx; - struct cvmx_iob_int_enb_s cn63xxp1; - struct cvmx_iob_int_enb_s cn66xx; struct cvmx_iob_int_enb_cn68xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_0_63:64; @@ -653,8 +495,6 @@ union cvmx_iob_int_enb { uint64_t reserved_0_63:64; #endif } cn68xx; - struct cvmx_iob_int_enb_cn68xx cn68xxp1; - struct cvmx_iob_int_enb_s cnf71xx; }; union cvmx_iob_int_sum { @@ -693,20 +533,6 @@ union cvmx_iob_int_sum { uint64_t reserved_4_63:60; #endif } cn30xx; - struct cvmx_iob_int_sum_cn30xx cn31xx; - struct cvmx_iob_int_sum_cn30xx cn38xx; - struct cvmx_iob_int_sum_cn30xx cn38xxp2; - struct cvmx_iob_int_sum_s cn50xx; - struct cvmx_iob_int_sum_s cn52xx; - struct cvmx_iob_int_sum_s cn52xxp1; - struct cvmx_iob_int_sum_s cn56xx; - struct cvmx_iob_int_sum_s cn56xxp1; - struct cvmx_iob_int_sum_s cn58xx; - struct cvmx_iob_int_sum_s cn58xxp1; - struct cvmx_iob_int_sum_s cn61xx; - struct cvmx_iob_int_sum_s cn63xx; - struct cvmx_iob_int_sum_s cn63xxp1; - struct cvmx_iob_int_sum_s cn66xx; struct cvmx_iob_int_sum_cn68xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_0_63:64; @@ -714,8 +540,6 @@ union cvmx_iob_int_sum { uint64_t reserved_0_63:64; #endif } cn68xx; - struct cvmx_iob_int_sum_cn68xx cn68xxp1; - struct cvmx_iob_int_sum_s cnf71xx; }; union cvmx_iob_n2c_l2c_pri_cnt { @@ -731,19 +555,6 @@ union cvmx_iob_n2c_l2c_pri_cnt { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_iob_n2c_l2c_pri_cnt_s cn38xx; - struct cvmx_iob_n2c_l2c_pri_cnt_s cn38xxp2; - struct cvmx_iob_n2c_l2c_pri_cnt_s cn52xx; - struct cvmx_iob_n2c_l2c_pri_cnt_s cn52xxp1; - struct cvmx_iob_n2c_l2c_pri_cnt_s cn56xx; - struct cvmx_iob_n2c_l2c_pri_cnt_s cn56xxp1; - struct cvmx_iob_n2c_l2c_pri_cnt_s cn58xx; - struct cvmx_iob_n2c_l2c_pri_cnt_s cn58xxp1; - struct cvmx_iob_n2c_l2c_pri_cnt_s cn61xx; - struct cvmx_iob_n2c_l2c_pri_cnt_s cn63xx; - struct cvmx_iob_n2c_l2c_pri_cnt_s cn63xxp1; - struct cvmx_iob_n2c_l2c_pri_cnt_s cn66xx; - struct cvmx_iob_n2c_l2c_pri_cnt_s cnf71xx; }; union cvmx_iob_n2c_rsp_pri_cnt { @@ -759,19 +570,6 @@ union cvmx_iob_n2c_rsp_pri_cnt { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_iob_n2c_rsp_pri_cnt_s cn38xx; - struct cvmx_iob_n2c_rsp_pri_cnt_s cn38xxp2; - struct cvmx_iob_n2c_rsp_pri_cnt_s cn52xx; - struct cvmx_iob_n2c_rsp_pri_cnt_s cn52xxp1; - struct cvmx_iob_n2c_rsp_pri_cnt_s cn56xx; - struct cvmx_iob_n2c_rsp_pri_cnt_s cn56xxp1; - struct cvmx_iob_n2c_rsp_pri_cnt_s cn58xx; - struct cvmx_iob_n2c_rsp_pri_cnt_s cn58xxp1; - struct cvmx_iob_n2c_rsp_pri_cnt_s cn61xx; - struct cvmx_iob_n2c_rsp_pri_cnt_s cn63xx; - struct cvmx_iob_n2c_rsp_pri_cnt_s cn63xxp1; - struct cvmx_iob_n2c_rsp_pri_cnt_s cn66xx; - struct cvmx_iob_n2c_rsp_pri_cnt_s cnf71xx; }; union cvmx_iob_outb_com_pri_cnt { @@ -787,21 +585,6 @@ union cvmx_iob_outb_com_pri_cnt { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_iob_outb_com_pri_cnt_s cn38xx; - struct cvmx_iob_outb_com_pri_cnt_s cn38xxp2; - struct cvmx_iob_outb_com_pri_cnt_s cn52xx; - struct cvmx_iob_outb_com_pri_cnt_s cn52xxp1; - struct cvmx_iob_outb_com_pri_cnt_s cn56xx; - struct cvmx_iob_outb_com_pri_cnt_s cn56xxp1; - struct cvmx_iob_outb_com_pri_cnt_s cn58xx; - struct cvmx_iob_outb_com_pri_cnt_s cn58xxp1; - struct cvmx_iob_outb_com_pri_cnt_s cn61xx; - struct cvmx_iob_outb_com_pri_cnt_s cn63xx; - struct cvmx_iob_outb_com_pri_cnt_s cn63xxp1; - struct cvmx_iob_outb_com_pri_cnt_s cn66xx; - struct cvmx_iob_outb_com_pri_cnt_s cn68xx; - struct cvmx_iob_outb_com_pri_cnt_s cn68xxp1; - struct cvmx_iob_outb_com_pri_cnt_s cnf71xx; }; union cvmx_iob_outb_control_match { @@ -821,24 +604,6 @@ union cvmx_iob_outb_control_match { uint64_t reserved_26_63:38; #endif } s; - struct cvmx_iob_outb_control_match_s cn30xx; - struct cvmx_iob_outb_control_match_s cn31xx; - struct cvmx_iob_outb_control_match_s cn38xx; - struct cvmx_iob_outb_control_match_s cn38xxp2; - struct cvmx_iob_outb_control_match_s cn50xx; - struct cvmx_iob_outb_control_match_s cn52xx; - struct cvmx_iob_outb_control_match_s cn52xxp1; - struct cvmx_iob_outb_control_match_s cn56xx; - struct cvmx_iob_outb_control_match_s cn56xxp1; - struct cvmx_iob_outb_control_match_s cn58xx; - struct cvmx_iob_outb_control_match_s cn58xxp1; - struct cvmx_iob_outb_control_match_s cn61xx; - struct cvmx_iob_outb_control_match_s cn63xx; - struct cvmx_iob_outb_control_match_s cn63xxp1; - struct cvmx_iob_outb_control_match_s cn66xx; - struct cvmx_iob_outb_control_match_s cn68xx; - struct cvmx_iob_outb_control_match_s cn68xxp1; - struct cvmx_iob_outb_control_match_s cnf71xx; }; union cvmx_iob_outb_control_match_enb { @@ -858,24 +623,6 @@ union cvmx_iob_outb_control_match_enb { uint64_t reserved_26_63:38; #endif } s; - struct cvmx_iob_outb_control_match_enb_s cn30xx; - struct cvmx_iob_outb_control_match_enb_s cn31xx; - struct cvmx_iob_outb_control_match_enb_s cn38xx; - struct cvmx_iob_outb_control_match_enb_s cn38xxp2; - struct cvmx_iob_outb_control_match_enb_s cn50xx; - struct cvmx_iob_outb_control_match_enb_s cn52xx; - struct cvmx_iob_outb_control_match_enb_s cn52xxp1; - struct cvmx_iob_outb_control_match_enb_s cn56xx; - struct cvmx_iob_outb_control_match_enb_s cn56xxp1; - struct cvmx_iob_outb_control_match_enb_s cn58xx; - struct cvmx_iob_outb_control_match_enb_s cn58xxp1; - struct cvmx_iob_outb_control_match_enb_s cn61xx; - struct cvmx_iob_outb_control_match_enb_s cn63xx; - struct cvmx_iob_outb_control_match_enb_s cn63xxp1; - struct cvmx_iob_outb_control_match_enb_s cn66xx; - struct cvmx_iob_outb_control_match_enb_s cn68xx; - struct cvmx_iob_outb_control_match_enb_s cn68xxp1; - struct cvmx_iob_outb_control_match_enb_s cnf71xx; }; union cvmx_iob_outb_data_match { @@ -887,24 +634,6 @@ union cvmx_iob_outb_data_match { uint64_t data:64; #endif } s; - struct cvmx_iob_outb_data_match_s cn30xx; - struct cvmx_iob_outb_data_match_s cn31xx; - struct cvmx_iob_outb_data_match_s cn38xx; - struct cvmx_iob_outb_data_match_s cn38xxp2; - struct cvmx_iob_outb_data_match_s cn50xx; - struct cvmx_iob_outb_data_match_s cn52xx; - struct cvmx_iob_outb_data_match_s cn52xxp1; - struct cvmx_iob_outb_data_match_s cn56xx; - struct cvmx_iob_outb_data_match_s cn56xxp1; - struct cvmx_iob_outb_data_match_s cn58xx; - struct cvmx_iob_outb_data_match_s cn58xxp1; - struct cvmx_iob_outb_data_match_s cn61xx; - struct cvmx_iob_outb_data_match_s cn63xx; - struct cvmx_iob_outb_data_match_s cn63xxp1; - struct cvmx_iob_outb_data_match_s cn66xx; - struct cvmx_iob_outb_data_match_s cn68xx; - struct cvmx_iob_outb_data_match_s cn68xxp1; - struct cvmx_iob_outb_data_match_s cnf71xx; }; union cvmx_iob_outb_data_match_enb { @@ -916,24 +645,6 @@ union cvmx_iob_outb_data_match_enb { uint64_t data:64; #endif } s; - struct cvmx_iob_outb_data_match_enb_s cn30xx; - struct cvmx_iob_outb_data_match_enb_s cn31xx; - struct cvmx_iob_outb_data_match_enb_s cn38xx; - struct cvmx_iob_outb_data_match_enb_s cn38xxp2; - struct cvmx_iob_outb_data_match_enb_s cn50xx; - struct cvmx_iob_outb_data_match_enb_s cn52xx; - struct cvmx_iob_outb_data_match_enb_s cn52xxp1; - struct cvmx_iob_outb_data_match_enb_s cn56xx; - struct cvmx_iob_outb_data_match_enb_s cn56xxp1; - struct cvmx_iob_outb_data_match_enb_s cn58xx; - struct cvmx_iob_outb_data_match_enb_s cn58xxp1; - struct cvmx_iob_outb_data_match_enb_s cn61xx; - struct cvmx_iob_outb_data_match_enb_s cn63xx; - struct cvmx_iob_outb_data_match_enb_s cn63xxp1; - struct cvmx_iob_outb_data_match_enb_s cn66xx; - struct cvmx_iob_outb_data_match_enb_s cn68xx; - struct cvmx_iob_outb_data_match_enb_s cn68xxp1; - struct cvmx_iob_outb_data_match_enb_s cnf71xx; }; union cvmx_iob_outb_fpa_pri_cnt { @@ -949,21 +660,6 @@ union cvmx_iob_outb_fpa_pri_cnt { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_iob_outb_fpa_pri_cnt_s cn38xx; - struct cvmx_iob_outb_fpa_pri_cnt_s cn38xxp2; - struct cvmx_iob_outb_fpa_pri_cnt_s cn52xx; - struct cvmx_iob_outb_fpa_pri_cnt_s cn52xxp1; - struct cvmx_iob_outb_fpa_pri_cnt_s cn56xx; - struct cvmx_iob_outb_fpa_pri_cnt_s cn56xxp1; - struct cvmx_iob_outb_fpa_pri_cnt_s cn58xx; - struct cvmx_iob_outb_fpa_pri_cnt_s cn58xxp1; - struct cvmx_iob_outb_fpa_pri_cnt_s cn61xx; - struct cvmx_iob_outb_fpa_pri_cnt_s cn63xx; - struct cvmx_iob_outb_fpa_pri_cnt_s cn63xxp1; - struct cvmx_iob_outb_fpa_pri_cnt_s cn66xx; - struct cvmx_iob_outb_fpa_pri_cnt_s cn68xx; - struct cvmx_iob_outb_fpa_pri_cnt_s cn68xxp1; - struct cvmx_iob_outb_fpa_pri_cnt_s cnf71xx; }; union cvmx_iob_outb_req_pri_cnt { @@ -979,21 +675,6 @@ union cvmx_iob_outb_req_pri_cnt { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_iob_outb_req_pri_cnt_s cn38xx; - struct cvmx_iob_outb_req_pri_cnt_s cn38xxp2; - struct cvmx_iob_outb_req_pri_cnt_s cn52xx; - struct cvmx_iob_outb_req_pri_cnt_s cn52xxp1; - struct cvmx_iob_outb_req_pri_cnt_s cn56xx; - struct cvmx_iob_outb_req_pri_cnt_s cn56xxp1; - struct cvmx_iob_outb_req_pri_cnt_s cn58xx; - struct cvmx_iob_outb_req_pri_cnt_s cn58xxp1; - struct cvmx_iob_outb_req_pri_cnt_s cn61xx; - struct cvmx_iob_outb_req_pri_cnt_s cn63xx; - struct cvmx_iob_outb_req_pri_cnt_s cn63xxp1; - struct cvmx_iob_outb_req_pri_cnt_s cn66xx; - struct cvmx_iob_outb_req_pri_cnt_s cn68xx; - struct cvmx_iob_outb_req_pri_cnt_s cn68xxp1; - struct cvmx_iob_outb_req_pri_cnt_s cnf71xx; }; union cvmx_iob_p2c_req_pri_cnt { @@ -1009,19 +690,6 @@ union cvmx_iob_p2c_req_pri_cnt { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_iob_p2c_req_pri_cnt_s cn38xx; - struct cvmx_iob_p2c_req_pri_cnt_s cn38xxp2; - struct cvmx_iob_p2c_req_pri_cnt_s cn52xx; - struct cvmx_iob_p2c_req_pri_cnt_s cn52xxp1; - struct cvmx_iob_p2c_req_pri_cnt_s cn56xx; - struct cvmx_iob_p2c_req_pri_cnt_s cn56xxp1; - struct cvmx_iob_p2c_req_pri_cnt_s cn58xx; - struct cvmx_iob_p2c_req_pri_cnt_s cn58xxp1; - struct cvmx_iob_p2c_req_pri_cnt_s cn61xx; - struct cvmx_iob_p2c_req_pri_cnt_s cn63xx; - struct cvmx_iob_p2c_req_pri_cnt_s cn63xxp1; - struct cvmx_iob_p2c_req_pri_cnt_s cn66xx; - struct cvmx_iob_p2c_req_pri_cnt_s cnf71xx; }; union cvmx_iob_pkt_err { @@ -1046,21 +714,6 @@ union cvmx_iob_pkt_err { uint64_t reserved_6_63:58; #endif } cn30xx; - struct cvmx_iob_pkt_err_cn30xx cn31xx; - struct cvmx_iob_pkt_err_cn30xx cn38xx; - struct cvmx_iob_pkt_err_cn30xx cn38xxp2; - struct cvmx_iob_pkt_err_cn30xx cn50xx; - struct cvmx_iob_pkt_err_cn30xx cn52xx; - struct cvmx_iob_pkt_err_cn30xx cn52xxp1; - struct cvmx_iob_pkt_err_cn30xx cn56xx; - struct cvmx_iob_pkt_err_cn30xx cn56xxp1; - struct cvmx_iob_pkt_err_cn30xx cn58xx; - struct cvmx_iob_pkt_err_cn30xx cn58xxp1; - struct cvmx_iob_pkt_err_s cn61xx; - struct cvmx_iob_pkt_err_s cn63xx; - struct cvmx_iob_pkt_err_s cn63xxp1; - struct cvmx_iob_pkt_err_s cn66xx; - struct cvmx_iob_pkt_err_s cnf71xx; }; union cvmx_iob_to_cmb_credits { @@ -1089,10 +742,6 @@ union cvmx_iob_to_cmb_credits { uint64_t reserved_9_63:55; #endif } cn52xx; - struct cvmx_iob_to_cmb_credits_cn52xx cn61xx; - struct cvmx_iob_to_cmb_credits_cn52xx cn63xx; - struct cvmx_iob_to_cmb_credits_cn52xx cn63xxp1; - struct cvmx_iob_to_cmb_credits_cn52xx cn66xx; struct cvmx_iob_to_cmb_credits_cn68xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_9_63:55; @@ -1106,8 +755,6 @@ union cvmx_iob_to_cmb_credits { uint64_t reserved_9_63:55; #endif } cn68xx; - struct cvmx_iob_to_cmb_credits_cn68xx cn68xxp1; - struct cvmx_iob_to_cmb_credits_cn52xx cnf71xx; }; union cvmx_iob_to_ncb_did_00_credits { @@ -1121,8 +768,6 @@ union cvmx_iob_to_ncb_did_00_credits { uint64_t reserved_7_63:57; #endif } s; - struct cvmx_iob_to_ncb_did_00_credits_s cn68xx; - struct cvmx_iob_to_ncb_did_00_credits_s cn68xxp1; }; union cvmx_iob_to_ncb_did_111_credits { @@ -1136,8 +781,6 @@ union cvmx_iob_to_ncb_did_111_credits { uint64_t reserved_7_63:57; #endif } s; - struct cvmx_iob_to_ncb_did_111_credits_s cn68xx; - struct cvmx_iob_to_ncb_did_111_credits_s cn68xxp1; }; union cvmx_iob_to_ncb_did_223_credits { @@ -1151,8 +794,6 @@ union cvmx_iob_to_ncb_did_223_credits { uint64_t reserved_7_63:57; #endif } s; - struct cvmx_iob_to_ncb_did_223_credits_s cn68xx; - struct cvmx_iob_to_ncb_did_223_credits_s cn68xxp1; }; union cvmx_iob_to_ncb_did_24_credits { @@ -1166,8 +807,6 @@ union cvmx_iob_to_ncb_did_24_credits { uint64_t reserved_7_63:57; #endif } s; - struct cvmx_iob_to_ncb_did_24_credits_s cn68xx; - struct cvmx_iob_to_ncb_did_24_credits_s cn68xxp1; }; union cvmx_iob_to_ncb_did_32_credits { @@ -1181,8 +820,6 @@ union cvmx_iob_to_ncb_did_32_credits { uint64_t reserved_7_63:57; #endif } s; - struct cvmx_iob_to_ncb_did_32_credits_s cn68xx; - struct cvmx_iob_to_ncb_did_32_credits_s cn68xxp1; }; union cvmx_iob_to_ncb_did_40_credits { @@ -1196,8 +833,6 @@ union cvmx_iob_to_ncb_did_40_credits { uint64_t reserved_7_63:57; #endif } s; - struct cvmx_iob_to_ncb_did_40_credits_s cn68xx; - struct cvmx_iob_to_ncb_did_40_credits_s cn68xxp1; }; union cvmx_iob_to_ncb_did_55_credits { @@ -1211,8 +846,6 @@ union cvmx_iob_to_ncb_did_55_credits { uint64_t reserved_7_63:57; #endif } s; - struct cvmx_iob_to_ncb_did_55_credits_s cn68xx; - struct cvmx_iob_to_ncb_did_55_credits_s cn68xxp1; }; union cvmx_iob_to_ncb_did_64_credits { @@ -1226,8 +859,6 @@ union cvmx_iob_to_ncb_did_64_credits { uint64_t reserved_7_63:57; #endif } s; - struct cvmx_iob_to_ncb_did_64_credits_s cn68xx; - struct cvmx_iob_to_ncb_did_64_credits_s cn68xxp1; }; union cvmx_iob_to_ncb_did_79_credits { @@ -1241,8 +872,6 @@ union cvmx_iob_to_ncb_did_79_credits { uint64_t reserved_7_63:57; #endif } s; - struct cvmx_iob_to_ncb_did_79_credits_s cn68xx; - struct cvmx_iob_to_ncb_did_79_credits_s cn68xxp1; }; union cvmx_iob_to_ncb_did_96_credits { @@ -1256,8 +885,6 @@ union cvmx_iob_to_ncb_did_96_credits { uint64_t reserved_7_63:57; #endif } s; - struct cvmx_iob_to_ncb_did_96_credits_s cn68xx; - struct cvmx_iob_to_ncb_did_96_credits_s cn68xxp1; }; union cvmx_iob_to_ncb_did_98_credits { @@ -1271,8 +898,6 @@ union cvmx_iob_to_ncb_did_98_credits { uint64_t reserved_7_63:57; #endif } s; - struct cvmx_iob_to_ncb_did_98_credits_s cn68xx; - struct cvmx_iob_to_ncb_did_98_credits_s cn68xxp1; }; #endif diff --git a/arch/mips/include/asm/octeon/cvmx-ipd-defs.h b/arch/mips/include/asm/octeon/cvmx-ipd-defs.h index 1193f73bb74a..c0a4ac7b41fb 100644 --- a/arch/mips/include/asm/octeon/cvmx-ipd-defs.h +++ b/arch/mips/include/asm/octeon/cvmx-ipd-defs.h @@ -108,24 +108,6 @@ union cvmx_ipd_1st_mbuff_skip { uint64_t reserved_6_63:58; #endif } s; - struct cvmx_ipd_1st_mbuff_skip_s cn30xx; - struct cvmx_ipd_1st_mbuff_skip_s cn31xx; - struct cvmx_ipd_1st_mbuff_skip_s cn38xx; - struct cvmx_ipd_1st_mbuff_skip_s cn38xxp2; - struct cvmx_ipd_1st_mbuff_skip_s cn50xx; - struct cvmx_ipd_1st_mbuff_skip_s cn52xx; - struct cvmx_ipd_1st_mbuff_skip_s cn52xxp1; - struct cvmx_ipd_1st_mbuff_skip_s cn56xx; - struct cvmx_ipd_1st_mbuff_skip_s cn56xxp1; - struct cvmx_ipd_1st_mbuff_skip_s cn58xx; - struct cvmx_ipd_1st_mbuff_skip_s cn58xxp1; - struct cvmx_ipd_1st_mbuff_skip_s cn61xx; - struct cvmx_ipd_1st_mbuff_skip_s cn63xx; - struct cvmx_ipd_1st_mbuff_skip_s cn63xxp1; - struct cvmx_ipd_1st_mbuff_skip_s cn66xx; - struct cvmx_ipd_1st_mbuff_skip_s cn68xx; - struct cvmx_ipd_1st_mbuff_skip_s cn68xxp1; - struct cvmx_ipd_1st_mbuff_skip_s cnf71xx; }; union cvmx_ipd_1st_next_ptr_back { @@ -139,24 +121,6 @@ union cvmx_ipd_1st_next_ptr_back { uint64_t reserved_4_63:60; #endif } s; - struct cvmx_ipd_1st_next_ptr_back_s cn30xx; - struct cvmx_ipd_1st_next_ptr_back_s cn31xx; - struct cvmx_ipd_1st_next_ptr_back_s cn38xx; - struct cvmx_ipd_1st_next_ptr_back_s cn38xxp2; - struct cvmx_ipd_1st_next_ptr_back_s cn50xx; - struct cvmx_ipd_1st_next_ptr_back_s cn52xx; - struct cvmx_ipd_1st_next_ptr_back_s cn52xxp1; - struct cvmx_ipd_1st_next_ptr_back_s cn56xx; - struct cvmx_ipd_1st_next_ptr_back_s cn56xxp1; - struct cvmx_ipd_1st_next_ptr_back_s cn58xx; - struct cvmx_ipd_1st_next_ptr_back_s cn58xxp1; - struct cvmx_ipd_1st_next_ptr_back_s cn61xx; - struct cvmx_ipd_1st_next_ptr_back_s cn63xx; - struct cvmx_ipd_1st_next_ptr_back_s cn63xxp1; - struct cvmx_ipd_1st_next_ptr_back_s cn66xx; - struct cvmx_ipd_1st_next_ptr_back_s cn68xx; - struct cvmx_ipd_1st_next_ptr_back_s cn68xxp1; - struct cvmx_ipd_1st_next_ptr_back_s cnf71xx; }; union cvmx_ipd_2nd_next_ptr_back { @@ -170,24 +134,6 @@ union cvmx_ipd_2nd_next_ptr_back { uint64_t reserved_4_63:60; #endif } s; - struct cvmx_ipd_2nd_next_ptr_back_s cn30xx; - struct cvmx_ipd_2nd_next_ptr_back_s cn31xx; - struct cvmx_ipd_2nd_next_ptr_back_s cn38xx; - struct cvmx_ipd_2nd_next_ptr_back_s cn38xxp2; - struct cvmx_ipd_2nd_next_ptr_back_s cn50xx; - struct cvmx_ipd_2nd_next_ptr_back_s cn52xx; - struct cvmx_ipd_2nd_next_ptr_back_s cn52xxp1; - struct cvmx_ipd_2nd_next_ptr_back_s cn56xx; - struct cvmx_ipd_2nd_next_ptr_back_s cn56xxp1; - struct cvmx_ipd_2nd_next_ptr_back_s cn58xx; - struct cvmx_ipd_2nd_next_ptr_back_s cn58xxp1; - struct cvmx_ipd_2nd_next_ptr_back_s cn61xx; - struct cvmx_ipd_2nd_next_ptr_back_s cn63xx; - struct cvmx_ipd_2nd_next_ptr_back_s cn63xxp1; - struct cvmx_ipd_2nd_next_ptr_back_s cn66xx; - struct cvmx_ipd_2nd_next_ptr_back_s cn68xx; - struct cvmx_ipd_2nd_next_ptr_back_s cn68xxp1; - struct cvmx_ipd_2nd_next_ptr_back_s cnf71xx; }; union cvmx_ipd_bist_status { @@ -284,10 +230,6 @@ union cvmx_ipd_bist_status { uint64_t reserved_16_63:48; #endif } cn30xx; - struct cvmx_ipd_bist_status_cn30xx cn31xx; - struct cvmx_ipd_bist_status_cn30xx cn38xx; - struct cvmx_ipd_bist_status_cn30xx cn38xxp2; - struct cvmx_ipd_bist_status_cn30xx cn50xx; struct cvmx_ipd_bist_status_cn52xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_18_63:46; @@ -331,18 +273,6 @@ union cvmx_ipd_bist_status { uint64_t reserved_18_63:46; #endif } cn52xx; - struct cvmx_ipd_bist_status_cn52xx cn52xxp1; - struct cvmx_ipd_bist_status_cn52xx cn56xx; - struct cvmx_ipd_bist_status_cn52xx cn56xxp1; - struct cvmx_ipd_bist_status_cn30xx cn58xx; - struct cvmx_ipd_bist_status_cn30xx cn58xxp1; - struct cvmx_ipd_bist_status_cn52xx cn61xx; - struct cvmx_ipd_bist_status_cn52xx cn63xx; - struct cvmx_ipd_bist_status_cn52xx cn63xxp1; - struct cvmx_ipd_bist_status_cn52xx cn66xx; - struct cvmx_ipd_bist_status_s cn68xx; - struct cvmx_ipd_bist_status_s cn68xxp1; - struct cvmx_ipd_bist_status_cn52xx cnf71xx; }; union cvmx_ipd_bp_prt_red_end { @@ -365,10 +295,6 @@ union cvmx_ipd_bp_prt_red_end { uint64_t reserved_36_63:28; #endif } cn30xx; - struct cvmx_ipd_bp_prt_red_end_cn30xx cn31xx; - struct cvmx_ipd_bp_prt_red_end_cn30xx cn38xx; - struct cvmx_ipd_bp_prt_red_end_cn30xx cn38xxp2; - struct cvmx_ipd_bp_prt_red_end_cn30xx cn50xx; struct cvmx_ipd_bp_prt_red_end_cn52xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_40_63:24; @@ -378,12 +304,6 @@ union cvmx_ipd_bp_prt_red_end { uint64_t reserved_40_63:24; #endif } cn52xx; - struct cvmx_ipd_bp_prt_red_end_cn52xx cn52xxp1; - struct cvmx_ipd_bp_prt_red_end_cn52xx cn56xx; - struct cvmx_ipd_bp_prt_red_end_cn52xx cn56xxp1; - struct cvmx_ipd_bp_prt_red_end_cn30xx cn58xx; - struct cvmx_ipd_bp_prt_red_end_cn30xx cn58xxp1; - struct cvmx_ipd_bp_prt_red_end_s cn61xx; struct cvmx_ipd_bp_prt_red_end_cn63xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_44_63:20; @@ -393,9 +313,6 @@ union cvmx_ipd_bp_prt_red_end { uint64_t reserved_44_63:20; #endif } cn63xx; - struct cvmx_ipd_bp_prt_red_end_cn63xx cn63xxp1; - struct cvmx_ipd_bp_prt_red_end_s cn66xx; - struct cvmx_ipd_bp_prt_red_end_s cnf71xx; }; union cvmx_ipd_bpidx_mbuf_th { @@ -411,8 +328,6 @@ union cvmx_ipd_bpidx_mbuf_th { uint64_t reserved_18_63:46; #endif } s; - struct cvmx_ipd_bpidx_mbuf_th_s cn68xx; - struct cvmx_ipd_bpidx_mbuf_th_s cn68xxp1; }; union cvmx_ipd_bpid_bp_counterx { @@ -426,8 +341,6 @@ union cvmx_ipd_bpid_bp_counterx { uint64_t reserved_25_63:39; #endif } s; - struct cvmx_ipd_bpid_bp_counterx_s cn68xx; - struct cvmx_ipd_bpid_bp_counterx_s cn68xxp1; }; union cvmx_ipd_clk_count { @@ -439,24 +352,6 @@ union cvmx_ipd_clk_count { uint64_t clk_cnt:64; #endif } s; - struct cvmx_ipd_clk_count_s cn30xx; - struct cvmx_ipd_clk_count_s cn31xx; - struct cvmx_ipd_clk_count_s cn38xx; - struct cvmx_ipd_clk_count_s cn38xxp2; - struct cvmx_ipd_clk_count_s cn50xx; - struct cvmx_ipd_clk_count_s cn52xx; - struct cvmx_ipd_clk_count_s cn52xxp1; - struct cvmx_ipd_clk_count_s cn56xx; - struct cvmx_ipd_clk_count_s cn56xxp1; - struct cvmx_ipd_clk_count_s cn58xx; - struct cvmx_ipd_clk_count_s cn58xxp1; - struct cvmx_ipd_clk_count_s cn61xx; - struct cvmx_ipd_clk_count_s cn63xx; - struct cvmx_ipd_clk_count_s cn63xxp1; - struct cvmx_ipd_clk_count_s cn66xx; - struct cvmx_ipd_clk_count_s cn68xx; - struct cvmx_ipd_clk_count_s cn68xxp1; - struct cvmx_ipd_clk_count_s cnf71xx; }; union cvmx_ipd_credits { @@ -472,8 +367,6 @@ union cvmx_ipd_credits { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_ipd_credits_s cn68xx; - struct cvmx_ipd_credits_s cn68xxp1; }; union cvmx_ipd_ctl_status { @@ -544,8 +437,6 @@ union cvmx_ipd_ctl_status { uint64_t reserved_10_63:54; #endif } cn30xx; - struct cvmx_ipd_ctl_status_cn30xx cn31xx; - struct cvmx_ipd_ctl_status_cn30xx cn38xx; struct cvmx_ipd_ctl_status_cn38xxp2 { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_9_63:55; @@ -604,10 +495,6 @@ union cvmx_ipd_ctl_status { uint64_t reserved_15_63:49; #endif } cn50xx; - struct cvmx_ipd_ctl_status_cn50xx cn52xx; - struct cvmx_ipd_ctl_status_cn50xx cn52xxp1; - struct cvmx_ipd_ctl_status_cn50xx cn56xx; - struct cvmx_ipd_ctl_status_cn50xx cn56xxp1; struct cvmx_ipd_ctl_status_cn58xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_12_63:52; @@ -637,9 +524,6 @@ union cvmx_ipd_ctl_status { uint64_t reserved_12_63:52; #endif } cn58xx; - struct cvmx_ipd_ctl_status_cn58xx cn58xxp1; - struct cvmx_ipd_ctl_status_s cn61xx; - struct cvmx_ipd_ctl_status_s cn63xx; struct cvmx_ipd_ctl_status_cn63xxp1 { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_16_63:48; @@ -677,10 +561,6 @@ union cvmx_ipd_ctl_status { uint64_t reserved_16_63:48; #endif } cn63xxp1; - struct cvmx_ipd_ctl_status_s cn66xx; - struct cvmx_ipd_ctl_status_s cn68xx; - struct cvmx_ipd_ctl_status_s cn68xxp1; - struct cvmx_ipd_ctl_status_s cnf71xx; }; union cvmx_ipd_ecc_ctl { @@ -700,8 +580,6 @@ union cvmx_ipd_ecc_ctl { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_ipd_ecc_ctl_s cn68xx; - struct cvmx_ipd_ecc_ctl_s cn68xxp1; }; union cvmx_ipd_free_ptr_fifo_ctl { @@ -723,8 +601,6 @@ union cvmx_ipd_free_ptr_fifo_ctl { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_ipd_free_ptr_fifo_ctl_s cn68xx; - struct cvmx_ipd_free_ptr_fifo_ctl_s cn68xxp1; }; union cvmx_ipd_free_ptr_value { @@ -738,8 +614,6 @@ union cvmx_ipd_free_ptr_value { uint64_t reserved_33_63:31; #endif } s; - struct cvmx_ipd_free_ptr_value_s cn68xx; - struct cvmx_ipd_free_ptr_value_s cn68xxp1; }; union cvmx_ipd_hold_ptr_fifo_ctl { @@ -761,8 +635,6 @@ union cvmx_ipd_hold_ptr_fifo_ctl { uint64_t reserved_43_63:21; #endif } s; - struct cvmx_ipd_hold_ptr_fifo_ctl_s cn68xx; - struct cvmx_ipd_hold_ptr_fifo_ctl_s cn68xxp1; }; union cvmx_ipd_int_enb { @@ -837,7 +709,6 @@ union cvmx_ipd_int_enb { uint64_t reserved_5_63:59; #endif } cn30xx; - struct cvmx_ipd_int_enb_cn30xx cn31xx; struct cvmx_ipd_int_enb_cn38xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_10_63:54; @@ -865,8 +736,6 @@ union cvmx_ipd_int_enb { uint64_t reserved_10_63:54; #endif } cn38xx; - struct cvmx_ipd_int_enb_cn30xx cn38xxp2; - struct cvmx_ipd_int_enb_cn38xx cn50xx; struct cvmx_ipd_int_enb_cn52xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_12_63:52; @@ -898,18 +767,6 @@ union cvmx_ipd_int_enb { uint64_t reserved_12_63:52; #endif } cn52xx; - struct cvmx_ipd_int_enb_cn52xx cn52xxp1; - struct cvmx_ipd_int_enb_cn52xx cn56xx; - struct cvmx_ipd_int_enb_cn52xx cn56xxp1; - struct cvmx_ipd_int_enb_cn38xx cn58xx; - struct cvmx_ipd_int_enb_cn38xx cn58xxp1; - struct cvmx_ipd_int_enb_cn52xx cn61xx; - struct cvmx_ipd_int_enb_cn52xx cn63xx; - struct cvmx_ipd_int_enb_cn52xx cn63xxp1; - struct cvmx_ipd_int_enb_cn52xx cn66xx; - struct cvmx_ipd_int_enb_s cn68xx; - struct cvmx_ipd_int_enb_s cn68xxp1; - struct cvmx_ipd_int_enb_cn52xx cnf71xx; }; union cvmx_ipd_int_sum { @@ -984,7 +841,6 @@ union cvmx_ipd_int_sum { uint64_t reserved_5_63:59; #endif } cn30xx; - struct cvmx_ipd_int_sum_cn30xx cn31xx; struct cvmx_ipd_int_sum_cn38xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_10_63:54; @@ -1012,8 +868,6 @@ union cvmx_ipd_int_sum { uint64_t reserved_10_63:54; #endif } cn38xx; - struct cvmx_ipd_int_sum_cn30xx cn38xxp2; - struct cvmx_ipd_int_sum_cn38xx cn50xx; struct cvmx_ipd_int_sum_cn52xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_12_63:52; @@ -1045,18 +899,6 @@ union cvmx_ipd_int_sum { uint64_t reserved_12_63:52; #endif } cn52xx; - struct cvmx_ipd_int_sum_cn52xx cn52xxp1; - struct cvmx_ipd_int_sum_cn52xx cn56xx; - struct cvmx_ipd_int_sum_cn52xx cn56xxp1; - struct cvmx_ipd_int_sum_cn38xx cn58xx; - struct cvmx_ipd_int_sum_cn38xx cn58xxp1; - struct cvmx_ipd_int_sum_cn52xx cn61xx; - struct cvmx_ipd_int_sum_cn52xx cn63xx; - struct cvmx_ipd_int_sum_cn52xx cn63xxp1; - struct cvmx_ipd_int_sum_cn52xx cn66xx; - struct cvmx_ipd_int_sum_s cn68xx; - struct cvmx_ipd_int_sum_s cn68xxp1; - struct cvmx_ipd_int_sum_cn52xx cnf71xx; }; union cvmx_ipd_next_pkt_ptr { @@ -1070,8 +912,6 @@ union cvmx_ipd_next_pkt_ptr { uint64_t reserved_33_63:31; #endif } s; - struct cvmx_ipd_next_pkt_ptr_s cn68xx; - struct cvmx_ipd_next_pkt_ptr_s cn68xxp1; }; union cvmx_ipd_next_wqe_ptr { @@ -1085,8 +925,6 @@ union cvmx_ipd_next_wqe_ptr { uint64_t reserved_33_63:31; #endif } s; - struct cvmx_ipd_next_wqe_ptr_s cn68xx; - struct cvmx_ipd_next_wqe_ptr_s cn68xxp1; }; union cvmx_ipd_not_1st_mbuff_skip { @@ -1100,24 +938,6 @@ union cvmx_ipd_not_1st_mbuff_skip { uint64_t reserved_6_63:58; #endif } s; - struct cvmx_ipd_not_1st_mbuff_skip_s cn30xx; - struct cvmx_ipd_not_1st_mbuff_skip_s cn31xx; - struct cvmx_ipd_not_1st_mbuff_skip_s cn38xx; - struct cvmx_ipd_not_1st_mbuff_skip_s cn38xxp2; - struct cvmx_ipd_not_1st_mbuff_skip_s cn50xx; - struct cvmx_ipd_not_1st_mbuff_skip_s cn52xx; - struct cvmx_ipd_not_1st_mbuff_skip_s cn52xxp1; - struct cvmx_ipd_not_1st_mbuff_skip_s cn56xx; - struct cvmx_ipd_not_1st_mbuff_skip_s cn56xxp1; - struct cvmx_ipd_not_1st_mbuff_skip_s cn58xx; - struct cvmx_ipd_not_1st_mbuff_skip_s cn58xxp1; - struct cvmx_ipd_not_1st_mbuff_skip_s cn61xx; - struct cvmx_ipd_not_1st_mbuff_skip_s cn63xx; - struct cvmx_ipd_not_1st_mbuff_skip_s cn63xxp1; - struct cvmx_ipd_not_1st_mbuff_skip_s cn66xx; - struct cvmx_ipd_not_1st_mbuff_skip_s cn68xx; - struct cvmx_ipd_not_1st_mbuff_skip_s cn68xxp1; - struct cvmx_ipd_not_1st_mbuff_skip_s cnf71xx; }; union cvmx_ipd_on_bp_drop_pktx { @@ -1129,8 +949,6 @@ union cvmx_ipd_on_bp_drop_pktx { uint64_t prt_enb:64; #endif } s; - struct cvmx_ipd_on_bp_drop_pktx_s cn68xx; - struct cvmx_ipd_on_bp_drop_pktx_s cn68xxp1; }; union cvmx_ipd_packet_mbuff_size { @@ -1144,24 +962,6 @@ union cvmx_ipd_packet_mbuff_size { uint64_t reserved_12_63:52; #endif } s; - struct cvmx_ipd_packet_mbuff_size_s cn30xx; - struct cvmx_ipd_packet_mbuff_size_s cn31xx; - struct cvmx_ipd_packet_mbuff_size_s cn38xx; - struct cvmx_ipd_packet_mbuff_size_s cn38xxp2; - struct cvmx_ipd_packet_mbuff_size_s cn50xx; - struct cvmx_ipd_packet_mbuff_size_s cn52xx; - struct cvmx_ipd_packet_mbuff_size_s cn52xxp1; - struct cvmx_ipd_packet_mbuff_size_s cn56xx; - struct cvmx_ipd_packet_mbuff_size_s cn56xxp1; - struct cvmx_ipd_packet_mbuff_size_s cn58xx; - struct cvmx_ipd_packet_mbuff_size_s cn58xxp1; - struct cvmx_ipd_packet_mbuff_size_s cn61xx; - struct cvmx_ipd_packet_mbuff_size_s cn63xx; - struct cvmx_ipd_packet_mbuff_size_s cn63xxp1; - struct cvmx_ipd_packet_mbuff_size_s cn66xx; - struct cvmx_ipd_packet_mbuff_size_s cn68xx; - struct cvmx_ipd_packet_mbuff_size_s cn68xxp1; - struct cvmx_ipd_packet_mbuff_size_s cnf71xx; }; union cvmx_ipd_pkt_err { @@ -1175,8 +975,6 @@ union cvmx_ipd_pkt_err { uint64_t reserved_6_63:58; #endif } s; - struct cvmx_ipd_pkt_err_s cn68xx; - struct cvmx_ipd_pkt_err_s cn68xxp1; }; union cvmx_ipd_pkt_ptr_valid { @@ -1190,21 +988,6 @@ union cvmx_ipd_pkt_ptr_valid { uint64_t reserved_29_63:35; #endif } s; - struct cvmx_ipd_pkt_ptr_valid_s cn30xx; - struct cvmx_ipd_pkt_ptr_valid_s cn31xx; - struct cvmx_ipd_pkt_ptr_valid_s cn38xx; - struct cvmx_ipd_pkt_ptr_valid_s cn50xx; - struct cvmx_ipd_pkt_ptr_valid_s cn52xx; - struct cvmx_ipd_pkt_ptr_valid_s cn52xxp1; - struct cvmx_ipd_pkt_ptr_valid_s cn56xx; - struct cvmx_ipd_pkt_ptr_valid_s cn56xxp1; - struct cvmx_ipd_pkt_ptr_valid_s cn58xx; - struct cvmx_ipd_pkt_ptr_valid_s cn58xxp1; - struct cvmx_ipd_pkt_ptr_valid_s cn61xx; - struct cvmx_ipd_pkt_ptr_valid_s cn63xx; - struct cvmx_ipd_pkt_ptr_valid_s cn63xxp1; - struct cvmx_ipd_pkt_ptr_valid_s cn66xx; - struct cvmx_ipd_pkt_ptr_valid_s cnf71xx; }; union cvmx_ipd_portx_bp_page_cnt { @@ -1220,22 +1003,6 @@ union cvmx_ipd_portx_bp_page_cnt { uint64_t reserved_18_63:46; #endif } s; - struct cvmx_ipd_portx_bp_page_cnt_s cn30xx; - struct cvmx_ipd_portx_bp_page_cnt_s cn31xx; - struct cvmx_ipd_portx_bp_page_cnt_s cn38xx; - struct cvmx_ipd_portx_bp_page_cnt_s cn38xxp2; - struct cvmx_ipd_portx_bp_page_cnt_s cn50xx; - struct cvmx_ipd_portx_bp_page_cnt_s cn52xx; - struct cvmx_ipd_portx_bp_page_cnt_s cn52xxp1; - struct cvmx_ipd_portx_bp_page_cnt_s cn56xx; - struct cvmx_ipd_portx_bp_page_cnt_s cn56xxp1; - struct cvmx_ipd_portx_bp_page_cnt_s cn58xx; - struct cvmx_ipd_portx_bp_page_cnt_s cn58xxp1; - struct cvmx_ipd_portx_bp_page_cnt_s cn61xx; - struct cvmx_ipd_portx_bp_page_cnt_s cn63xx; - struct cvmx_ipd_portx_bp_page_cnt_s cn63xxp1; - struct cvmx_ipd_portx_bp_page_cnt_s cn66xx; - struct cvmx_ipd_portx_bp_page_cnt_s cnf71xx; }; union cvmx_ipd_portx_bp_page_cnt2 { @@ -1251,15 +1018,6 @@ union cvmx_ipd_portx_bp_page_cnt2 { uint64_t reserved_18_63:46; #endif } s; - struct cvmx_ipd_portx_bp_page_cnt2_s cn52xx; - struct cvmx_ipd_portx_bp_page_cnt2_s cn52xxp1; - struct cvmx_ipd_portx_bp_page_cnt2_s cn56xx; - struct cvmx_ipd_portx_bp_page_cnt2_s cn56xxp1; - struct cvmx_ipd_portx_bp_page_cnt2_s cn61xx; - struct cvmx_ipd_portx_bp_page_cnt2_s cn63xx; - struct cvmx_ipd_portx_bp_page_cnt2_s cn63xxp1; - struct cvmx_ipd_portx_bp_page_cnt2_s cn66xx; - struct cvmx_ipd_portx_bp_page_cnt2_s cnf71xx; }; union cvmx_ipd_portx_bp_page_cnt3 { @@ -1275,11 +1033,6 @@ union cvmx_ipd_portx_bp_page_cnt3 { uint64_t reserved_18_63:46; #endif } s; - struct cvmx_ipd_portx_bp_page_cnt3_s cn61xx; - struct cvmx_ipd_portx_bp_page_cnt3_s cn63xx; - struct cvmx_ipd_portx_bp_page_cnt3_s cn63xxp1; - struct cvmx_ipd_portx_bp_page_cnt3_s cn66xx; - struct cvmx_ipd_portx_bp_page_cnt3_s cnf71xx; }; union cvmx_ipd_port_bp_counters2_pairx { @@ -1293,15 +1046,6 @@ union cvmx_ipd_port_bp_counters2_pairx { uint64_t reserved_25_63:39; #endif } s; - struct cvmx_ipd_port_bp_counters2_pairx_s cn52xx; - struct cvmx_ipd_port_bp_counters2_pairx_s cn52xxp1; - struct cvmx_ipd_port_bp_counters2_pairx_s cn56xx; - struct cvmx_ipd_port_bp_counters2_pairx_s cn56xxp1; - struct cvmx_ipd_port_bp_counters2_pairx_s cn61xx; - struct cvmx_ipd_port_bp_counters2_pairx_s cn63xx; - struct cvmx_ipd_port_bp_counters2_pairx_s cn63xxp1; - struct cvmx_ipd_port_bp_counters2_pairx_s cn66xx; - struct cvmx_ipd_port_bp_counters2_pairx_s cnf71xx; }; union cvmx_ipd_port_bp_counters3_pairx { @@ -1315,11 +1059,6 @@ union cvmx_ipd_port_bp_counters3_pairx { uint64_t reserved_25_63:39; #endif } s; - struct cvmx_ipd_port_bp_counters3_pairx_s cn61xx; - struct cvmx_ipd_port_bp_counters3_pairx_s cn63xx; - struct cvmx_ipd_port_bp_counters3_pairx_s cn63xxp1; - struct cvmx_ipd_port_bp_counters3_pairx_s cn66xx; - struct cvmx_ipd_port_bp_counters3_pairx_s cnf71xx; }; union cvmx_ipd_port_bp_counters4_pairx { @@ -1333,9 +1072,6 @@ union cvmx_ipd_port_bp_counters4_pairx { uint64_t reserved_25_63:39; #endif } s; - struct cvmx_ipd_port_bp_counters4_pairx_s cn61xx; - struct cvmx_ipd_port_bp_counters4_pairx_s cn66xx; - struct cvmx_ipd_port_bp_counters4_pairx_s cnf71xx; }; union cvmx_ipd_port_bp_counters_pairx { @@ -1349,22 +1085,6 @@ union cvmx_ipd_port_bp_counters_pairx { uint64_t reserved_25_63:39; #endif } s; - struct cvmx_ipd_port_bp_counters_pairx_s cn30xx; - struct cvmx_ipd_port_bp_counters_pairx_s cn31xx; - struct cvmx_ipd_port_bp_counters_pairx_s cn38xx; - struct cvmx_ipd_port_bp_counters_pairx_s cn38xxp2; - struct cvmx_ipd_port_bp_counters_pairx_s cn50xx; - struct cvmx_ipd_port_bp_counters_pairx_s cn52xx; - struct cvmx_ipd_port_bp_counters_pairx_s cn52xxp1; - struct cvmx_ipd_port_bp_counters_pairx_s cn56xx; - struct cvmx_ipd_port_bp_counters_pairx_s cn56xxp1; - struct cvmx_ipd_port_bp_counters_pairx_s cn58xx; - struct cvmx_ipd_port_bp_counters_pairx_s cn58xxp1; - struct cvmx_ipd_port_bp_counters_pairx_s cn61xx; - struct cvmx_ipd_port_bp_counters_pairx_s cn63xx; - struct cvmx_ipd_port_bp_counters_pairx_s cn63xxp1; - struct cvmx_ipd_port_bp_counters_pairx_s cn66xx; - struct cvmx_ipd_port_bp_counters_pairx_s cnf71xx; }; union cvmx_ipd_port_ptr_fifo_ctl { @@ -1384,8 +1104,6 @@ union cvmx_ipd_port_ptr_fifo_ctl { uint64_t reserved_48_63:16; #endif } s; - struct cvmx_ipd_port_ptr_fifo_ctl_s cn68xx; - struct cvmx_ipd_port_ptr_fifo_ctl_s cn68xxp1; }; union cvmx_ipd_port_qos_x_cnt { @@ -1399,17 +1117,6 @@ union cvmx_ipd_port_qos_x_cnt { uint64_t wmark:32; #endif } s; - struct cvmx_ipd_port_qos_x_cnt_s cn52xx; - struct cvmx_ipd_port_qos_x_cnt_s cn52xxp1; - struct cvmx_ipd_port_qos_x_cnt_s cn56xx; - struct cvmx_ipd_port_qos_x_cnt_s cn56xxp1; - struct cvmx_ipd_port_qos_x_cnt_s cn61xx; - struct cvmx_ipd_port_qos_x_cnt_s cn63xx; - struct cvmx_ipd_port_qos_x_cnt_s cn63xxp1; - struct cvmx_ipd_port_qos_x_cnt_s cn66xx; - struct cvmx_ipd_port_qos_x_cnt_s cn68xx; - struct cvmx_ipd_port_qos_x_cnt_s cn68xxp1; - struct cvmx_ipd_port_qos_x_cnt_s cnf71xx; }; union cvmx_ipd_port_qos_intx { @@ -1421,17 +1128,6 @@ union cvmx_ipd_port_qos_intx { uint64_t intr:64; #endif } s; - struct cvmx_ipd_port_qos_intx_s cn52xx; - struct cvmx_ipd_port_qos_intx_s cn52xxp1; - struct cvmx_ipd_port_qos_intx_s cn56xx; - struct cvmx_ipd_port_qos_intx_s cn56xxp1; - struct cvmx_ipd_port_qos_intx_s cn61xx; - struct cvmx_ipd_port_qos_intx_s cn63xx; - struct cvmx_ipd_port_qos_intx_s cn63xxp1; - struct cvmx_ipd_port_qos_intx_s cn66xx; - struct cvmx_ipd_port_qos_intx_s cn68xx; - struct cvmx_ipd_port_qos_intx_s cn68xxp1; - struct cvmx_ipd_port_qos_intx_s cnf71xx; }; union cvmx_ipd_port_qos_int_enbx { @@ -1443,17 +1139,6 @@ union cvmx_ipd_port_qos_int_enbx { uint64_t enb:64; #endif } s; - struct cvmx_ipd_port_qos_int_enbx_s cn52xx; - struct cvmx_ipd_port_qos_int_enbx_s cn52xxp1; - struct cvmx_ipd_port_qos_int_enbx_s cn56xx; - struct cvmx_ipd_port_qos_int_enbx_s cn56xxp1; - struct cvmx_ipd_port_qos_int_enbx_s cn61xx; - struct cvmx_ipd_port_qos_int_enbx_s cn63xx; - struct cvmx_ipd_port_qos_int_enbx_s cn63xxp1; - struct cvmx_ipd_port_qos_int_enbx_s cn66xx; - struct cvmx_ipd_port_qos_int_enbx_s cn68xx; - struct cvmx_ipd_port_qos_int_enbx_s cn68xxp1; - struct cvmx_ipd_port_qos_int_enbx_s cnf71xx; }; union cvmx_ipd_port_sopx { @@ -1465,8 +1150,6 @@ union cvmx_ipd_port_sopx { uint64_t sop:64; #endif } s; - struct cvmx_ipd_port_sopx_s cn68xx; - struct cvmx_ipd_port_sopx_s cn68xxp1; }; union cvmx_ipd_prc_hold_ptr_fifo_ctl { @@ -1488,21 +1171,6 @@ union cvmx_ipd_prc_hold_ptr_fifo_ctl { uint64_t reserved_39_63:25; #endif } s; - struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn30xx; - struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn31xx; - struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn38xx; - struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn50xx; - struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn52xx; - struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn52xxp1; - struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn56xx; - struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn56xxp1; - struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn58xx; - struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn58xxp1; - struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn61xx; - struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn63xx; - struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn63xxp1; - struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn66xx; - struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cnf71xx; }; union cvmx_ipd_prc_port_ptr_fifo_ctl { @@ -1522,21 +1190,6 @@ union cvmx_ipd_prc_port_ptr_fifo_ctl { uint64_t reserved_44_63:20; #endif } s; - struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn30xx; - struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn31xx; - struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn38xx; - struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn50xx; - struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn52xx; - struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn52xxp1; - struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn56xx; - struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn56xxp1; - struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn58xx; - struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn58xxp1; - struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn61xx; - struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn63xx; - struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn63xxp1; - struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn66xx; - struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cnf71xx; }; union cvmx_ipd_ptr_count { @@ -1558,24 +1211,6 @@ union cvmx_ipd_ptr_count { uint64_t reserved_19_63:45; #endif } s; - struct cvmx_ipd_ptr_count_s cn30xx; - struct cvmx_ipd_ptr_count_s cn31xx; - struct cvmx_ipd_ptr_count_s cn38xx; - struct cvmx_ipd_ptr_count_s cn38xxp2; - struct cvmx_ipd_ptr_count_s cn50xx; - struct cvmx_ipd_ptr_count_s cn52xx; - struct cvmx_ipd_ptr_count_s cn52xxp1; - struct cvmx_ipd_ptr_count_s cn56xx; - struct cvmx_ipd_ptr_count_s cn56xxp1; - struct cvmx_ipd_ptr_count_s cn58xx; - struct cvmx_ipd_ptr_count_s cn58xxp1; - struct cvmx_ipd_ptr_count_s cn61xx; - struct cvmx_ipd_ptr_count_s cn63xx; - struct cvmx_ipd_ptr_count_s cn63xxp1; - struct cvmx_ipd_ptr_count_s cn66xx; - struct cvmx_ipd_ptr_count_s cn68xx; - struct cvmx_ipd_ptr_count_s cn68xxp1; - struct cvmx_ipd_ptr_count_s cnf71xx; }; union cvmx_ipd_pwp_ptr_fifo_ctl { @@ -1599,21 +1234,6 @@ union cvmx_ipd_pwp_ptr_fifo_ctl { uint64_t reserved_61_63:3; #endif } s; - struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn30xx; - struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn31xx; - struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn38xx; - struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn50xx; - struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn52xx; - struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn52xxp1; - struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn56xx; - struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn56xxp1; - struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn58xx; - struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn58xxp1; - struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn61xx; - struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn63xx; - struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn63xxp1; - struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn66xx; - struct cvmx_ipd_pwp_ptr_fifo_ctl_s cnf71xx; }; union cvmx_ipd_qosx_red_marks { @@ -1627,24 +1247,6 @@ union cvmx_ipd_qosx_red_marks { uint64_t drop:32; #endif } s; - struct cvmx_ipd_qosx_red_marks_s cn30xx; - struct cvmx_ipd_qosx_red_marks_s cn31xx; - struct cvmx_ipd_qosx_red_marks_s cn38xx; - struct cvmx_ipd_qosx_red_marks_s cn38xxp2; - struct cvmx_ipd_qosx_red_marks_s cn50xx; - struct cvmx_ipd_qosx_red_marks_s cn52xx; - struct cvmx_ipd_qosx_red_marks_s cn52xxp1; - struct cvmx_ipd_qosx_red_marks_s cn56xx; - struct cvmx_ipd_qosx_red_marks_s cn56xxp1; - struct cvmx_ipd_qosx_red_marks_s cn58xx; - struct cvmx_ipd_qosx_red_marks_s cn58xxp1; - struct cvmx_ipd_qosx_red_marks_s cn61xx; - struct cvmx_ipd_qosx_red_marks_s cn63xx; - struct cvmx_ipd_qosx_red_marks_s cn63xxp1; - struct cvmx_ipd_qosx_red_marks_s cn66xx; - struct cvmx_ipd_qosx_red_marks_s cn68xx; - struct cvmx_ipd_qosx_red_marks_s cn68xxp1; - struct cvmx_ipd_qosx_red_marks_s cnf71xx; }; union cvmx_ipd_que0_free_page_cnt { @@ -1658,24 +1260,6 @@ union cvmx_ipd_que0_free_page_cnt { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_ipd_que0_free_page_cnt_s cn30xx; - struct cvmx_ipd_que0_free_page_cnt_s cn31xx; - struct cvmx_ipd_que0_free_page_cnt_s cn38xx; - struct cvmx_ipd_que0_free_page_cnt_s cn38xxp2; - struct cvmx_ipd_que0_free_page_cnt_s cn50xx; - struct cvmx_ipd_que0_free_page_cnt_s cn52xx; - struct cvmx_ipd_que0_free_page_cnt_s cn52xxp1; - struct cvmx_ipd_que0_free_page_cnt_s cn56xx; - struct cvmx_ipd_que0_free_page_cnt_s cn56xxp1; - struct cvmx_ipd_que0_free_page_cnt_s cn58xx; - struct cvmx_ipd_que0_free_page_cnt_s cn58xxp1; - struct cvmx_ipd_que0_free_page_cnt_s cn61xx; - struct cvmx_ipd_que0_free_page_cnt_s cn63xx; - struct cvmx_ipd_que0_free_page_cnt_s cn63xxp1; - struct cvmx_ipd_que0_free_page_cnt_s cn66xx; - struct cvmx_ipd_que0_free_page_cnt_s cn68xx; - struct cvmx_ipd_que0_free_page_cnt_s cn68xxp1; - struct cvmx_ipd_que0_free_page_cnt_s cnf71xx; }; union cvmx_ipd_red_bpid_enablex { @@ -1687,8 +1271,6 @@ union cvmx_ipd_red_bpid_enablex { uint64_t prt_enb:64; #endif } s; - struct cvmx_ipd_red_bpid_enablex_s cn68xx; - struct cvmx_ipd_red_bpid_enablex_s cn68xxp1; }; union cvmx_ipd_red_delay { @@ -1704,8 +1286,6 @@ union cvmx_ipd_red_delay { uint64_t reserved_28_63:36; #endif } s; - struct cvmx_ipd_red_delay_s cn68xx; - struct cvmx_ipd_red_delay_s cn68xxp1; }; union cvmx_ipd_red_port_enable { @@ -1721,22 +1301,6 @@ union cvmx_ipd_red_port_enable { uint64_t prb_dly:14; #endif } s; - struct cvmx_ipd_red_port_enable_s cn30xx; - struct cvmx_ipd_red_port_enable_s cn31xx; - struct cvmx_ipd_red_port_enable_s cn38xx; - struct cvmx_ipd_red_port_enable_s cn38xxp2; - struct cvmx_ipd_red_port_enable_s cn50xx; - struct cvmx_ipd_red_port_enable_s cn52xx; - struct cvmx_ipd_red_port_enable_s cn52xxp1; - struct cvmx_ipd_red_port_enable_s cn56xx; - struct cvmx_ipd_red_port_enable_s cn56xxp1; - struct cvmx_ipd_red_port_enable_s cn58xx; - struct cvmx_ipd_red_port_enable_s cn58xxp1; - struct cvmx_ipd_red_port_enable_s cn61xx; - struct cvmx_ipd_red_port_enable_s cn63xx; - struct cvmx_ipd_red_port_enable_s cn63xxp1; - struct cvmx_ipd_red_port_enable_s cn66xx; - struct cvmx_ipd_red_port_enable_s cnf71xx; }; union cvmx_ipd_red_port_enable2 { @@ -1759,10 +1323,6 @@ union cvmx_ipd_red_port_enable2 { uint64_t reserved_4_63:60; #endif } cn52xx; - struct cvmx_ipd_red_port_enable2_cn52xx cn52xxp1; - struct cvmx_ipd_red_port_enable2_cn52xx cn56xx; - struct cvmx_ipd_red_port_enable2_cn52xx cn56xxp1; - struct cvmx_ipd_red_port_enable2_s cn61xx; struct cvmx_ipd_red_port_enable2_cn63xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_8_63:56; @@ -1772,9 +1332,6 @@ union cvmx_ipd_red_port_enable2 { uint64_t reserved_8_63:56; #endif } cn63xx; - struct cvmx_ipd_red_port_enable2_cn63xx cn63xxp1; - struct cvmx_ipd_red_port_enable2_s cn66xx; - struct cvmx_ipd_red_port_enable2_s cnf71xx; }; union cvmx_ipd_red_quex_param { @@ -1794,24 +1351,6 @@ union cvmx_ipd_red_quex_param { uint64_t reserved_49_63:15; #endif } s; - struct cvmx_ipd_red_quex_param_s cn30xx; - struct cvmx_ipd_red_quex_param_s cn31xx; - struct cvmx_ipd_red_quex_param_s cn38xx; - struct cvmx_ipd_red_quex_param_s cn38xxp2; - struct cvmx_ipd_red_quex_param_s cn50xx; - struct cvmx_ipd_red_quex_param_s cn52xx; - struct cvmx_ipd_red_quex_param_s cn52xxp1; - struct cvmx_ipd_red_quex_param_s cn56xx; - struct cvmx_ipd_red_quex_param_s cn56xxp1; - struct cvmx_ipd_red_quex_param_s cn58xx; - struct cvmx_ipd_red_quex_param_s cn58xxp1; - struct cvmx_ipd_red_quex_param_s cn61xx; - struct cvmx_ipd_red_quex_param_s cn63xx; - struct cvmx_ipd_red_quex_param_s cn63xxp1; - struct cvmx_ipd_red_quex_param_s cn66xx; - struct cvmx_ipd_red_quex_param_s cn68xx; - struct cvmx_ipd_red_quex_param_s cn68xxp1; - struct cvmx_ipd_red_quex_param_s cnf71xx; }; union cvmx_ipd_req_wgt { @@ -1837,7 +1376,6 @@ union cvmx_ipd_req_wgt { uint64_t wgt7:8; #endif } s; - struct cvmx_ipd_req_wgt_s cn68xx; }; union cvmx_ipd_sub_port_bp_page_cnt { @@ -1853,24 +1391,6 @@ union cvmx_ipd_sub_port_bp_page_cnt { uint64_t reserved_31_63:33; #endif } s; - struct cvmx_ipd_sub_port_bp_page_cnt_s cn30xx; - struct cvmx_ipd_sub_port_bp_page_cnt_s cn31xx; - struct cvmx_ipd_sub_port_bp_page_cnt_s cn38xx; - struct cvmx_ipd_sub_port_bp_page_cnt_s cn38xxp2; - struct cvmx_ipd_sub_port_bp_page_cnt_s cn50xx; - struct cvmx_ipd_sub_port_bp_page_cnt_s cn52xx; - struct cvmx_ipd_sub_port_bp_page_cnt_s cn52xxp1; - struct cvmx_ipd_sub_port_bp_page_cnt_s cn56xx; - struct cvmx_ipd_sub_port_bp_page_cnt_s cn56xxp1; - struct cvmx_ipd_sub_port_bp_page_cnt_s cn58xx; - struct cvmx_ipd_sub_port_bp_page_cnt_s cn58xxp1; - struct cvmx_ipd_sub_port_bp_page_cnt_s cn61xx; - struct cvmx_ipd_sub_port_bp_page_cnt_s cn63xx; - struct cvmx_ipd_sub_port_bp_page_cnt_s cn63xxp1; - struct cvmx_ipd_sub_port_bp_page_cnt_s cn66xx; - struct cvmx_ipd_sub_port_bp_page_cnt_s cn68xx; - struct cvmx_ipd_sub_port_bp_page_cnt_s cn68xxp1; - struct cvmx_ipd_sub_port_bp_page_cnt_s cnf71xx; }; union cvmx_ipd_sub_port_fcs { @@ -1897,7 +1417,6 @@ union cvmx_ipd_sub_port_fcs { uint64_t reserved_3_63:61; #endif } cn30xx; - struct cvmx_ipd_sub_port_fcs_cn30xx cn31xx; struct cvmx_ipd_sub_port_fcs_cn38xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_32_63:32; @@ -1907,19 +1426,6 @@ union cvmx_ipd_sub_port_fcs { uint64_t reserved_32_63:32; #endif } cn38xx; - struct cvmx_ipd_sub_port_fcs_cn38xx cn38xxp2; - struct cvmx_ipd_sub_port_fcs_cn30xx cn50xx; - struct cvmx_ipd_sub_port_fcs_s cn52xx; - struct cvmx_ipd_sub_port_fcs_s cn52xxp1; - struct cvmx_ipd_sub_port_fcs_s cn56xx; - struct cvmx_ipd_sub_port_fcs_s cn56xxp1; - struct cvmx_ipd_sub_port_fcs_cn38xx cn58xx; - struct cvmx_ipd_sub_port_fcs_cn38xx cn58xxp1; - struct cvmx_ipd_sub_port_fcs_s cn61xx; - struct cvmx_ipd_sub_port_fcs_s cn63xx; - struct cvmx_ipd_sub_port_fcs_s cn63xxp1; - struct cvmx_ipd_sub_port_fcs_s cn66xx; - struct cvmx_ipd_sub_port_fcs_s cnf71xx; }; union cvmx_ipd_sub_port_qos_cnt { @@ -1935,17 +1441,6 @@ union cvmx_ipd_sub_port_qos_cnt { uint64_t reserved_41_63:23; #endif } s; - struct cvmx_ipd_sub_port_qos_cnt_s cn52xx; - struct cvmx_ipd_sub_port_qos_cnt_s cn52xxp1; - struct cvmx_ipd_sub_port_qos_cnt_s cn56xx; - struct cvmx_ipd_sub_port_qos_cnt_s cn56xxp1; - struct cvmx_ipd_sub_port_qos_cnt_s cn61xx; - struct cvmx_ipd_sub_port_qos_cnt_s cn63xx; - struct cvmx_ipd_sub_port_qos_cnt_s cn63xxp1; - struct cvmx_ipd_sub_port_qos_cnt_s cn66xx; - struct cvmx_ipd_sub_port_qos_cnt_s cn68xx; - struct cvmx_ipd_sub_port_qos_cnt_s cn68xxp1; - struct cvmx_ipd_sub_port_qos_cnt_s cnf71xx; }; union cvmx_ipd_wqe_fpa_queue { @@ -1959,24 +1454,6 @@ union cvmx_ipd_wqe_fpa_queue { uint64_t reserved_3_63:61; #endif } s; - struct cvmx_ipd_wqe_fpa_queue_s cn30xx; - struct cvmx_ipd_wqe_fpa_queue_s cn31xx; - struct cvmx_ipd_wqe_fpa_queue_s cn38xx; - struct cvmx_ipd_wqe_fpa_queue_s cn38xxp2; - struct cvmx_ipd_wqe_fpa_queue_s cn50xx; - struct cvmx_ipd_wqe_fpa_queue_s cn52xx; - struct cvmx_ipd_wqe_fpa_queue_s cn52xxp1; - struct cvmx_ipd_wqe_fpa_queue_s cn56xx; - struct cvmx_ipd_wqe_fpa_queue_s cn56xxp1; - struct cvmx_ipd_wqe_fpa_queue_s cn58xx; - struct cvmx_ipd_wqe_fpa_queue_s cn58xxp1; - struct cvmx_ipd_wqe_fpa_queue_s cn61xx; - struct cvmx_ipd_wqe_fpa_queue_s cn63xx; - struct cvmx_ipd_wqe_fpa_queue_s cn63xxp1; - struct cvmx_ipd_wqe_fpa_queue_s cn66xx; - struct cvmx_ipd_wqe_fpa_queue_s cn68xx; - struct cvmx_ipd_wqe_fpa_queue_s cn68xxp1; - struct cvmx_ipd_wqe_fpa_queue_s cnf71xx; }; union cvmx_ipd_wqe_ptr_valid { @@ -1990,21 +1467,6 @@ union cvmx_ipd_wqe_ptr_valid { uint64_t reserved_29_63:35; #endif } s; - struct cvmx_ipd_wqe_ptr_valid_s cn30xx; - struct cvmx_ipd_wqe_ptr_valid_s cn31xx; - struct cvmx_ipd_wqe_ptr_valid_s cn38xx; - struct cvmx_ipd_wqe_ptr_valid_s cn50xx; - struct cvmx_ipd_wqe_ptr_valid_s cn52xx; - struct cvmx_ipd_wqe_ptr_valid_s cn52xxp1; - struct cvmx_ipd_wqe_ptr_valid_s cn56xx; - struct cvmx_ipd_wqe_ptr_valid_s cn56xxp1; - struct cvmx_ipd_wqe_ptr_valid_s cn58xx; - struct cvmx_ipd_wqe_ptr_valid_s cn58xxp1; - struct cvmx_ipd_wqe_ptr_valid_s cn61xx; - struct cvmx_ipd_wqe_ptr_valid_s cn63xx; - struct cvmx_ipd_wqe_ptr_valid_s cn63xxp1; - struct cvmx_ipd_wqe_ptr_valid_s cn66xx; - struct cvmx_ipd_wqe_ptr_valid_s cnf71xx; }; #endif diff --git a/arch/mips/include/asm/octeon/cvmx-l2t-defs.h b/arch/mips/include/asm/octeon/cvmx-l2t-defs.h index fe50671fd1bb..06ea13251448 100644 --- a/arch/mips/include/asm/octeon/cvmx-l2t-defs.h +++ b/arch/mips/include/asm/octeon/cvmx-l2t-defs.h @@ -104,7 +104,6 @@ union cvmx_l2t_err { __BITFIELD_FIELD(uint64_t ecc_ena:1, ;))))))))))))) } cn38xx; - struct cvmx_l2t_err_cn38xx cn38xxp2; struct cvmx_l2t_err_cn50xx { __BITFIELD_FIELD(uint64_t reserved_28_63:36, __BITFIELD_FIELD(uint64_t lck_intena2:1, @@ -139,11 +138,6 @@ union cvmx_l2t_err { __BITFIELD_FIELD(uint64_t ecc_ena:1, ;)))))))))))))) } cn52xx; - struct cvmx_l2t_err_cn52xx cn52xxp1; - struct cvmx_l2t_err_s cn56xx; - struct cvmx_l2t_err_s cn56xxp1; - struct cvmx_l2t_err_s cn58xx; - struct cvmx_l2t_err_s cn58xxp1; }; #endif diff --git a/arch/mips/include/asm/octeon/cvmx-led-defs.h b/arch/mips/include/asm/octeon/cvmx-led-defs.h index d36d42b8307b..0237907522cb 100644 --- a/arch/mips/include/asm/octeon/cvmx-led-defs.h +++ b/arch/mips/include/asm/octeon/cvmx-led-defs.h @@ -53,12 +53,6 @@ union cvmx_led_blink { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_led_blink_s cn38xx; - struct cvmx_led_blink_s cn38xxp2; - struct cvmx_led_blink_s cn56xx; - struct cvmx_led_blink_s cn56xxp1; - struct cvmx_led_blink_s cn58xx; - struct cvmx_led_blink_s cn58xxp1; }; union cvmx_led_clk_phase { @@ -72,12 +66,6 @@ union cvmx_led_clk_phase { uint64_t reserved_7_63:57; #endif } s; - struct cvmx_led_clk_phase_s cn38xx; - struct cvmx_led_clk_phase_s cn38xxp2; - struct cvmx_led_clk_phase_s cn56xx; - struct cvmx_led_clk_phase_s cn56xxp1; - struct cvmx_led_clk_phase_s cn58xx; - struct cvmx_led_clk_phase_s cn58xxp1; }; union cvmx_led_cylon { @@ -91,12 +79,6 @@ union cvmx_led_cylon { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_led_cylon_s cn38xx; - struct cvmx_led_cylon_s cn38xxp2; - struct cvmx_led_cylon_s cn56xx; - struct cvmx_led_cylon_s cn56xxp1; - struct cvmx_led_cylon_s cn58xx; - struct cvmx_led_cylon_s cn58xxp1; }; union cvmx_led_dbg { @@ -110,12 +92,6 @@ union cvmx_led_dbg { uint64_t reserved_1_63:63; #endif } s; - struct cvmx_led_dbg_s cn38xx; - struct cvmx_led_dbg_s cn38xxp2; - struct cvmx_led_dbg_s cn56xx; - struct cvmx_led_dbg_s cn56xxp1; - struct cvmx_led_dbg_s cn58xx; - struct cvmx_led_dbg_s cn58xxp1; }; union cvmx_led_en { @@ -129,12 +105,6 @@ union cvmx_led_en { uint64_t reserved_1_63:63; #endif } s; - struct cvmx_led_en_s cn38xx; - struct cvmx_led_en_s cn38xxp2; - struct cvmx_led_en_s cn56xx; - struct cvmx_led_en_s cn56xxp1; - struct cvmx_led_en_s cn58xx; - struct cvmx_led_en_s cn58xxp1; }; union cvmx_led_polarity { @@ -148,12 +118,6 @@ union cvmx_led_polarity { uint64_t reserved_1_63:63; #endif } s; - struct cvmx_led_polarity_s cn38xx; - struct cvmx_led_polarity_s cn38xxp2; - struct cvmx_led_polarity_s cn56xx; - struct cvmx_led_polarity_s cn56xxp1; - struct cvmx_led_polarity_s cn58xx; - struct cvmx_led_polarity_s cn58xxp1; }; union cvmx_led_prt { @@ -167,12 +131,6 @@ union cvmx_led_prt { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_led_prt_s cn38xx; - struct cvmx_led_prt_s cn38xxp2; - struct cvmx_led_prt_s cn56xx; - struct cvmx_led_prt_s cn56xxp1; - struct cvmx_led_prt_s cn58xx; - struct cvmx_led_prt_s cn58xxp1; }; union cvmx_led_prt_fmt { @@ -186,12 +144,6 @@ union cvmx_led_prt_fmt { uint64_t reserved_4_63:60; #endif } s; - struct cvmx_led_prt_fmt_s cn38xx; - struct cvmx_led_prt_fmt_s cn38xxp2; - struct cvmx_led_prt_fmt_s cn56xx; - struct cvmx_led_prt_fmt_s cn56xxp1; - struct cvmx_led_prt_fmt_s cn58xx; - struct cvmx_led_prt_fmt_s cn58xxp1; }; union cvmx_led_prt_statusx { @@ -205,12 +157,6 @@ union cvmx_led_prt_statusx { uint64_t reserved_6_63:58; #endif } s; - struct cvmx_led_prt_statusx_s cn38xx; - struct cvmx_led_prt_statusx_s cn38xxp2; - struct cvmx_led_prt_statusx_s cn56xx; - struct cvmx_led_prt_statusx_s cn56xxp1; - struct cvmx_led_prt_statusx_s cn58xx; - struct cvmx_led_prt_statusx_s cn58xxp1; }; union cvmx_led_udd_cntx { @@ -224,12 +170,6 @@ union cvmx_led_udd_cntx { uint64_t reserved_6_63:58; #endif } s; - struct cvmx_led_udd_cntx_s cn38xx; - struct cvmx_led_udd_cntx_s cn38xxp2; - struct cvmx_led_udd_cntx_s cn56xx; - struct cvmx_led_udd_cntx_s cn56xxp1; - struct cvmx_led_udd_cntx_s cn58xx; - struct cvmx_led_udd_cntx_s cn58xxp1; }; union cvmx_led_udd_datx { @@ -243,12 +183,6 @@ union cvmx_led_udd_datx { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_led_udd_datx_s cn38xx; - struct cvmx_led_udd_datx_s cn38xxp2; - struct cvmx_led_udd_datx_s cn56xx; - struct cvmx_led_udd_datx_s cn56xxp1; - struct cvmx_led_udd_datx_s cn58xx; - struct cvmx_led_udd_datx_s cn58xxp1; }; union cvmx_led_udd_dat_clrx { @@ -262,12 +196,6 @@ union cvmx_led_udd_dat_clrx { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_led_udd_dat_clrx_s cn38xx; - struct cvmx_led_udd_dat_clrx_s cn38xxp2; - struct cvmx_led_udd_dat_clrx_s cn56xx; - struct cvmx_led_udd_dat_clrx_s cn56xxp1; - struct cvmx_led_udd_dat_clrx_s cn58xx; - struct cvmx_led_udd_dat_clrx_s cn58xxp1; }; union cvmx_led_udd_dat_setx { @@ -281,12 +209,6 @@ union cvmx_led_udd_dat_setx { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_led_udd_dat_setx_s cn38xx; - struct cvmx_led_udd_dat_setx_s cn38xxp2; - struct cvmx_led_udd_dat_setx_s cn56xx; - struct cvmx_led_udd_dat_setx_s cn56xxp1; - struct cvmx_led_udd_dat_setx_s cn58xx; - struct cvmx_led_udd_dat_setx_s cn58xxp1; }; #endif diff --git a/arch/mips/include/asm/octeon/cvmx-lmcx-defs.h b/arch/mips/include/asm/octeon/cvmx-lmcx-defs.h index 36f510721141..4167a4c7a28d 100644 --- a/arch/mips/include/asm/octeon/cvmx-lmcx-defs.h +++ b/arch/mips/include/asm/octeon/cvmx-lmcx-defs.h @@ -189,11 +189,6 @@ union cvmx_lmcx_bist_ctl { uint64_t reserved_1_63:63; #endif } s; - struct cvmx_lmcx_bist_ctl_s cn50xx; - struct cvmx_lmcx_bist_ctl_s cn52xx; - struct cvmx_lmcx_bist_ctl_s cn52xxp1; - struct cvmx_lmcx_bist_ctl_s cn56xx; - struct cvmx_lmcx_bist_ctl_s cn56xxp1; }; union cvmx_lmcx_bist_result { @@ -236,10 +231,6 @@ union cvmx_lmcx_bist_result { uint64_t reserved_9_63:55; #endif } cn50xx; - struct cvmx_lmcx_bist_result_s cn52xx; - struct cvmx_lmcx_bist_result_s cn52xxp1; - struct cvmx_lmcx_bist_result_s cn56xx; - struct cvmx_lmcx_bist_result_s cn56xxp1; }; union cvmx_lmcx_char_ctl { @@ -263,7 +254,6 @@ union cvmx_lmcx_char_ctl { uint64_t reserved_44_63:20; #endif } s; - struct cvmx_lmcx_char_ctl_s cn61xx; struct cvmx_lmcx_char_ctl_cn63xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_42_63:22; @@ -279,11 +269,6 @@ union cvmx_lmcx_char_ctl { uint64_t reserved_42_63:22; #endif } cn63xx; - struct cvmx_lmcx_char_ctl_cn63xx cn63xxp1; - struct cvmx_lmcx_char_ctl_s cn66xx; - struct cvmx_lmcx_char_ctl_s cn68xx; - struct cvmx_lmcx_char_ctl_cn63xx cn68xxp1; - struct cvmx_lmcx_char_ctl_s cnf71xx; }; union cvmx_lmcx_char_mask0 { @@ -295,13 +280,6 @@ union cvmx_lmcx_char_mask0 { uint64_t mask:64; #endif } s; - struct cvmx_lmcx_char_mask0_s cn61xx; - struct cvmx_lmcx_char_mask0_s cn63xx; - struct cvmx_lmcx_char_mask0_s cn63xxp1; - struct cvmx_lmcx_char_mask0_s cn66xx; - struct cvmx_lmcx_char_mask0_s cn68xx; - struct cvmx_lmcx_char_mask0_s cn68xxp1; - struct cvmx_lmcx_char_mask0_s cnf71xx; }; union cvmx_lmcx_char_mask1 { @@ -315,13 +293,6 @@ union cvmx_lmcx_char_mask1 { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_lmcx_char_mask1_s cn61xx; - struct cvmx_lmcx_char_mask1_s cn63xx; - struct cvmx_lmcx_char_mask1_s cn63xxp1; - struct cvmx_lmcx_char_mask1_s cn66xx; - struct cvmx_lmcx_char_mask1_s cn68xx; - struct cvmx_lmcx_char_mask1_s cn68xxp1; - struct cvmx_lmcx_char_mask1_s cnf71xx; }; union cvmx_lmcx_char_mask2 { @@ -333,13 +304,6 @@ union cvmx_lmcx_char_mask2 { uint64_t mask:64; #endif } s; - struct cvmx_lmcx_char_mask2_s cn61xx; - struct cvmx_lmcx_char_mask2_s cn63xx; - struct cvmx_lmcx_char_mask2_s cn63xxp1; - struct cvmx_lmcx_char_mask2_s cn66xx; - struct cvmx_lmcx_char_mask2_s cn68xx; - struct cvmx_lmcx_char_mask2_s cn68xxp1; - struct cvmx_lmcx_char_mask2_s cnf71xx; }; union cvmx_lmcx_char_mask3 { @@ -353,13 +317,6 @@ union cvmx_lmcx_char_mask3 { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_lmcx_char_mask3_s cn61xx; - struct cvmx_lmcx_char_mask3_s cn63xx; - struct cvmx_lmcx_char_mask3_s cn63xxp1; - struct cvmx_lmcx_char_mask3_s cn66xx; - struct cvmx_lmcx_char_mask3_s cn68xx; - struct cvmx_lmcx_char_mask3_s cn68xxp1; - struct cvmx_lmcx_char_mask3_s cnf71xx; }; union cvmx_lmcx_char_mask4 { @@ -393,13 +350,6 @@ union cvmx_lmcx_char_mask4 { uint64_t reserved_33_63:31; #endif } s; - struct cvmx_lmcx_char_mask4_s cn61xx; - struct cvmx_lmcx_char_mask4_s cn63xx; - struct cvmx_lmcx_char_mask4_s cn63xxp1; - struct cvmx_lmcx_char_mask4_s cn66xx; - struct cvmx_lmcx_char_mask4_s cn68xx; - struct cvmx_lmcx_char_mask4_s cn68xxp1; - struct cvmx_lmcx_char_mask4_s cnf71xx; }; union cvmx_lmcx_comp_ctl { @@ -448,9 +398,6 @@ union cvmx_lmcx_comp_ctl { uint64_t reserved_32_63:32; #endif } cn30xx; - struct cvmx_lmcx_comp_ctl_cn30xx cn31xx; - struct cvmx_lmcx_comp_ctl_cn30xx cn38xx; - struct cvmx_lmcx_comp_ctl_cn30xx cn38xxp2; struct cvmx_lmcx_comp_ctl_cn50xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_32_63:32; @@ -470,11 +417,6 @@ union cvmx_lmcx_comp_ctl { uint64_t reserved_32_63:32; #endif } cn50xx; - struct cvmx_lmcx_comp_ctl_cn50xx cn52xx; - struct cvmx_lmcx_comp_ctl_cn50xx cn52xxp1; - struct cvmx_lmcx_comp_ctl_cn50xx cn56xx; - struct cvmx_lmcx_comp_ctl_cn50xx cn56xxp1; - struct cvmx_lmcx_comp_ctl_cn50xx cn58xx; struct cvmx_lmcx_comp_ctl_cn58xxp1 { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_32_63:32; @@ -525,13 +467,6 @@ union cvmx_lmcx_comp_ctl2 { uint64_t reserved_34_63:30; #endif } s; - struct cvmx_lmcx_comp_ctl2_s cn61xx; - struct cvmx_lmcx_comp_ctl2_s cn63xx; - struct cvmx_lmcx_comp_ctl2_s cn63xxp1; - struct cvmx_lmcx_comp_ctl2_s cn66xx; - struct cvmx_lmcx_comp_ctl2_s cn68xx; - struct cvmx_lmcx_comp_ctl2_s cn68xxp1; - struct cvmx_lmcx_comp_ctl2_s cnf71xx; }; union cvmx_lmcx_config { @@ -587,7 +522,6 @@ union cvmx_lmcx_config { uint64_t reserved_61_63:3; #endif } s; - struct cvmx_lmcx_config_s cn61xx; struct cvmx_lmcx_config_cn63xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_59_63:5; @@ -723,9 +657,6 @@ union cvmx_lmcx_config { uint64_t reserved_60_63:4; #endif } cn66xx; - struct cvmx_lmcx_config_cn63xx cn68xx; - struct cvmx_lmcx_config_cn63xx cn68xxp1; - struct cvmx_lmcx_config_s cnf71xx; }; union cvmx_lmcx_control { @@ -787,7 +718,6 @@ union cvmx_lmcx_control { uint64_t scramble_ena:1; #endif } s; - struct cvmx_lmcx_control_s cn61xx; struct cvmx_lmcx_control_cn63xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_24_63:40; @@ -833,7 +763,6 @@ union cvmx_lmcx_control { uint64_t reserved_24_63:40; #endif } cn63xx; - struct cvmx_lmcx_control_cn63xx cn63xxp1; struct cvmx_lmcx_control_cn66xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t scramble_ena:1; @@ -938,8 +867,6 @@ union cvmx_lmcx_control { uint64_t reserved_63_63:1; #endif } cn68xx; - struct cvmx_lmcx_control_cn68xx cn68xxp1; - struct cvmx_lmcx_control_cn66xx cnf71xx; }; union cvmx_lmcx_ctl { @@ -1032,7 +959,6 @@ union cvmx_lmcx_ctl { uint64_t reserved_32_63:32; #endif } cn30xx; - struct cvmx_lmcx_ctl_cn30xx cn31xx; struct cvmx_lmcx_ctl_cn38xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_32_63:32; @@ -1076,7 +1002,6 @@ union cvmx_lmcx_ctl { uint64_t reserved_32_63:32; #endif } cn38xx; - struct cvmx_lmcx_ctl_cn38xx cn38xxp2; struct cvmx_lmcx_ctl_cn50xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_32_63:32; @@ -1165,9 +1090,6 @@ union cvmx_lmcx_ctl { uint64_t reserved_32_63:32; #endif } cn52xx; - struct cvmx_lmcx_ctl_cn52xx cn52xxp1; - struct cvmx_lmcx_ctl_cn52xx cn56xx; - struct cvmx_lmcx_ctl_cn52xx cn56xxp1; struct cvmx_lmcx_ctl_cn58xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_32_63:32; @@ -1211,7 +1133,6 @@ union cvmx_lmcx_ctl { uint64_t reserved_32_63:32; #endif } cn58xx; - struct cvmx_lmcx_ctl_cn58xx cn58xxp1; }; union cvmx_lmcx_ctl1 { @@ -1284,9 +1205,6 @@ union cvmx_lmcx_ctl1 { uint64_t reserved_21_63:43; #endif } cn52xx; - struct cvmx_lmcx_ctl1_cn52xx cn52xxp1; - struct cvmx_lmcx_ctl1_cn52xx cn56xx; - struct cvmx_lmcx_ctl1_cn52xx cn56xxp1; struct cvmx_lmcx_ctl1_cn58xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_10_63:54; @@ -1300,7 +1218,6 @@ union cvmx_lmcx_ctl1 { uint64_t reserved_10_63:54; #endif } cn58xx; - struct cvmx_lmcx_ctl1_cn58xx cn58xxp1; }; union cvmx_lmcx_dclk_cnt { @@ -1312,13 +1229,6 @@ union cvmx_lmcx_dclk_cnt { uint64_t dclkcnt:64; #endif } s; - struct cvmx_lmcx_dclk_cnt_s cn61xx; - struct cvmx_lmcx_dclk_cnt_s cn63xx; - struct cvmx_lmcx_dclk_cnt_s cn63xxp1; - struct cvmx_lmcx_dclk_cnt_s cn66xx; - struct cvmx_lmcx_dclk_cnt_s cn68xx; - struct cvmx_lmcx_dclk_cnt_s cn68xxp1; - struct cvmx_lmcx_dclk_cnt_s cnf71xx; }; union cvmx_lmcx_dclk_cnt_hi { @@ -1332,17 +1242,6 @@ union cvmx_lmcx_dclk_cnt_hi { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_lmcx_dclk_cnt_hi_s cn30xx; - struct cvmx_lmcx_dclk_cnt_hi_s cn31xx; - struct cvmx_lmcx_dclk_cnt_hi_s cn38xx; - struct cvmx_lmcx_dclk_cnt_hi_s cn38xxp2; - struct cvmx_lmcx_dclk_cnt_hi_s cn50xx; - struct cvmx_lmcx_dclk_cnt_hi_s cn52xx; - struct cvmx_lmcx_dclk_cnt_hi_s cn52xxp1; - struct cvmx_lmcx_dclk_cnt_hi_s cn56xx; - struct cvmx_lmcx_dclk_cnt_hi_s cn56xxp1; - struct cvmx_lmcx_dclk_cnt_hi_s cn58xx; - struct cvmx_lmcx_dclk_cnt_hi_s cn58xxp1; }; union cvmx_lmcx_dclk_cnt_lo { @@ -1356,17 +1255,6 @@ union cvmx_lmcx_dclk_cnt_lo { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_lmcx_dclk_cnt_lo_s cn30xx; - struct cvmx_lmcx_dclk_cnt_lo_s cn31xx; - struct cvmx_lmcx_dclk_cnt_lo_s cn38xx; - struct cvmx_lmcx_dclk_cnt_lo_s cn38xxp2; - struct cvmx_lmcx_dclk_cnt_lo_s cn50xx; - struct cvmx_lmcx_dclk_cnt_lo_s cn52xx; - struct cvmx_lmcx_dclk_cnt_lo_s cn52xxp1; - struct cvmx_lmcx_dclk_cnt_lo_s cn56xx; - struct cvmx_lmcx_dclk_cnt_lo_s cn56xxp1; - struct cvmx_lmcx_dclk_cnt_lo_s cn58xx; - struct cvmx_lmcx_dclk_cnt_lo_s cn58xxp1; }; union cvmx_lmcx_dclk_ctl { @@ -1386,8 +1274,6 @@ union cvmx_lmcx_dclk_ctl { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_lmcx_dclk_ctl_s cn56xx; - struct cvmx_lmcx_dclk_ctl_s cn56xxp1; }; union cvmx_lmcx_ddr2_ctl { @@ -1474,16 +1360,6 @@ union cvmx_lmcx_ddr2_ctl { uint64_t reserved_32_63:32; #endif } cn30xx; - struct cvmx_lmcx_ddr2_ctl_cn30xx cn31xx; - struct cvmx_lmcx_ddr2_ctl_s cn38xx; - struct cvmx_lmcx_ddr2_ctl_s cn38xxp2; - struct cvmx_lmcx_ddr2_ctl_s cn50xx; - struct cvmx_lmcx_ddr2_ctl_s cn52xx; - struct cvmx_lmcx_ddr2_ctl_s cn52xxp1; - struct cvmx_lmcx_ddr2_ctl_s cn56xx; - struct cvmx_lmcx_ddr2_ctl_s cn56xxp1; - struct cvmx_lmcx_ddr2_ctl_s cn58xx; - struct cvmx_lmcx_ddr2_ctl_s cn58xxp1; }; union cvmx_lmcx_ddr_pll_ctl { @@ -1515,13 +1391,6 @@ union cvmx_lmcx_ddr_pll_ctl { uint64_t reserved_27_63:37; #endif } s; - struct cvmx_lmcx_ddr_pll_ctl_s cn61xx; - struct cvmx_lmcx_ddr_pll_ctl_s cn63xx; - struct cvmx_lmcx_ddr_pll_ctl_s cn63xxp1; - struct cvmx_lmcx_ddr_pll_ctl_s cn66xx; - struct cvmx_lmcx_ddr_pll_ctl_s cn68xx; - struct cvmx_lmcx_ddr_pll_ctl_s cn68xxp1; - struct cvmx_lmcx_ddr_pll_ctl_s cnf71xx; }; union cvmx_lmcx_delay_cfg { @@ -1539,7 +1408,6 @@ union cvmx_lmcx_delay_cfg { uint64_t reserved_15_63:49; #endif } s; - struct cvmx_lmcx_delay_cfg_s cn30xx; struct cvmx_lmcx_delay_cfg_cn38xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_14_63:50; @@ -1557,13 +1425,6 @@ union cvmx_lmcx_delay_cfg { uint64_t reserved_14_63:50; #endif } cn38xx; - struct cvmx_lmcx_delay_cfg_cn38xx cn50xx; - struct cvmx_lmcx_delay_cfg_cn38xx cn52xx; - struct cvmx_lmcx_delay_cfg_cn38xx cn52xxp1; - struct cvmx_lmcx_delay_cfg_cn38xx cn56xx; - struct cvmx_lmcx_delay_cfg_cn38xx cn56xxp1; - struct cvmx_lmcx_delay_cfg_cn38xx cn58xx; - struct cvmx_lmcx_delay_cfg_cn38xx cn58xxp1; }; union cvmx_lmcx_dimmx_params { @@ -1605,13 +1466,6 @@ union cvmx_lmcx_dimmx_params { uint64_t rc15:4; #endif } s; - struct cvmx_lmcx_dimmx_params_s cn61xx; - struct cvmx_lmcx_dimmx_params_s cn63xx; - struct cvmx_lmcx_dimmx_params_s cn63xxp1; - struct cvmx_lmcx_dimmx_params_s cn66xx; - struct cvmx_lmcx_dimmx_params_s cn68xx; - struct cvmx_lmcx_dimmx_params_s cn68xxp1; - struct cvmx_lmcx_dimmx_params_s cnf71xx; }; union cvmx_lmcx_dimm_ctl { @@ -1631,13 +1485,6 @@ union cvmx_lmcx_dimm_ctl { uint64_t reserved_46_63:18; #endif } s; - struct cvmx_lmcx_dimm_ctl_s cn61xx; - struct cvmx_lmcx_dimm_ctl_s cn63xx; - struct cvmx_lmcx_dimm_ctl_s cn63xxp1; - struct cvmx_lmcx_dimm_ctl_s cn66xx; - struct cvmx_lmcx_dimm_ctl_s cn68xx; - struct cvmx_lmcx_dimm_ctl_s cn68xxp1; - struct cvmx_lmcx_dimm_ctl_s cnf71xx; }; union cvmx_lmcx_dll_ctl { @@ -1657,10 +1504,6 @@ union cvmx_lmcx_dll_ctl { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_lmcx_dll_ctl_s cn52xx; - struct cvmx_lmcx_dll_ctl_s cn52xxp1; - struct cvmx_lmcx_dll_ctl_s cn56xx; - struct cvmx_lmcx_dll_ctl_s cn56xxp1; }; union cvmx_lmcx_dll_ctl2 { @@ -1684,7 +1527,6 @@ union cvmx_lmcx_dll_ctl2 { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_lmcx_dll_ctl2_s cn61xx; struct cvmx_lmcx_dll_ctl2_cn63xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_15_63:49; @@ -1702,11 +1544,6 @@ union cvmx_lmcx_dll_ctl2 { uint64_t reserved_15_63:49; #endif } cn63xx; - struct cvmx_lmcx_dll_ctl2_cn63xx cn63xxp1; - struct cvmx_lmcx_dll_ctl2_cn63xx cn66xx; - struct cvmx_lmcx_dll_ctl2_s cn68xx; - struct cvmx_lmcx_dll_ctl2_s cn68xxp1; - struct cvmx_lmcx_dll_ctl2_s cnf71xx; }; union cvmx_lmcx_dll_ctl3 { @@ -1748,7 +1585,6 @@ union cvmx_lmcx_dll_ctl3 { uint64_t reserved_41_63:23; #endif } s; - struct cvmx_lmcx_dll_ctl3_s cn61xx; struct cvmx_lmcx_dll_ctl3_cn63xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_29_63:35; @@ -1776,11 +1612,6 @@ union cvmx_lmcx_dll_ctl3 { uint64_t reserved_29_63:35; #endif } cn63xx; - struct cvmx_lmcx_dll_ctl3_cn63xx cn63xxp1; - struct cvmx_lmcx_dll_ctl3_cn63xx cn66xx; - struct cvmx_lmcx_dll_ctl3_s cn68xx; - struct cvmx_lmcx_dll_ctl3_s cn68xxp1; - struct cvmx_lmcx_dll_ctl3_s cnf71xx; }; union cvmx_lmcx_dual_memcfg { @@ -1800,13 +1631,6 @@ union cvmx_lmcx_dual_memcfg { uint64_t reserved_20_63:44; #endif } s; - struct cvmx_lmcx_dual_memcfg_s cn50xx; - struct cvmx_lmcx_dual_memcfg_s cn52xx; - struct cvmx_lmcx_dual_memcfg_s cn52xxp1; - struct cvmx_lmcx_dual_memcfg_s cn56xx; - struct cvmx_lmcx_dual_memcfg_s cn56xxp1; - struct cvmx_lmcx_dual_memcfg_s cn58xx; - struct cvmx_lmcx_dual_memcfg_s cn58xxp1; struct cvmx_lmcx_dual_memcfg_cn61xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_19_63:45; @@ -1820,12 +1644,6 @@ union cvmx_lmcx_dual_memcfg { uint64_t reserved_19_63:45; #endif } cn61xx; - struct cvmx_lmcx_dual_memcfg_cn61xx cn63xx; - struct cvmx_lmcx_dual_memcfg_cn61xx cn63xxp1; - struct cvmx_lmcx_dual_memcfg_cn61xx cn66xx; - struct cvmx_lmcx_dual_memcfg_cn61xx cn68xx; - struct cvmx_lmcx_dual_memcfg_cn61xx cn68xxp1; - struct cvmx_lmcx_dual_memcfg_cn61xx cnf71xx; }; union cvmx_lmcx_ecc_synd { @@ -1845,24 +1663,6 @@ union cvmx_lmcx_ecc_synd { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_lmcx_ecc_synd_s cn30xx; - struct cvmx_lmcx_ecc_synd_s cn31xx; - struct cvmx_lmcx_ecc_synd_s cn38xx; - struct cvmx_lmcx_ecc_synd_s cn38xxp2; - struct cvmx_lmcx_ecc_synd_s cn50xx; - struct cvmx_lmcx_ecc_synd_s cn52xx; - struct cvmx_lmcx_ecc_synd_s cn52xxp1; - struct cvmx_lmcx_ecc_synd_s cn56xx; - struct cvmx_lmcx_ecc_synd_s cn56xxp1; - struct cvmx_lmcx_ecc_synd_s cn58xx; - struct cvmx_lmcx_ecc_synd_s cn58xxp1; - struct cvmx_lmcx_ecc_synd_s cn61xx; - struct cvmx_lmcx_ecc_synd_s cn63xx; - struct cvmx_lmcx_ecc_synd_s cn63xxp1; - struct cvmx_lmcx_ecc_synd_s cn66xx; - struct cvmx_lmcx_ecc_synd_s cn68xx; - struct cvmx_lmcx_ecc_synd_s cn68xxp1; - struct cvmx_lmcx_ecc_synd_s cnf71xx; }; union cvmx_lmcx_fadr { @@ -1891,16 +1691,6 @@ union cvmx_lmcx_fadr { uint64_t reserved_32_63:32; #endif } cn30xx; - struct cvmx_lmcx_fadr_cn30xx cn31xx; - struct cvmx_lmcx_fadr_cn30xx cn38xx; - struct cvmx_lmcx_fadr_cn30xx cn38xxp2; - struct cvmx_lmcx_fadr_cn30xx cn50xx; - struct cvmx_lmcx_fadr_cn30xx cn52xx; - struct cvmx_lmcx_fadr_cn30xx cn52xxp1; - struct cvmx_lmcx_fadr_cn30xx cn56xx; - struct cvmx_lmcx_fadr_cn30xx cn56xxp1; - struct cvmx_lmcx_fadr_cn30xx cn58xx; - struct cvmx_lmcx_fadr_cn30xx cn58xxp1; struct cvmx_lmcx_fadr_cn61xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_36_63:28; @@ -1918,12 +1708,6 @@ union cvmx_lmcx_fadr { uint64_t reserved_36_63:28; #endif } cn61xx; - struct cvmx_lmcx_fadr_cn61xx cn63xx; - struct cvmx_lmcx_fadr_cn61xx cn63xxp1; - struct cvmx_lmcx_fadr_cn61xx cn66xx; - struct cvmx_lmcx_fadr_cn61xx cn68xx; - struct cvmx_lmcx_fadr_cn61xx cn68xxp1; - struct cvmx_lmcx_fadr_cn61xx cnf71xx; }; union cvmx_lmcx_ifb_cnt { @@ -1935,13 +1719,6 @@ union cvmx_lmcx_ifb_cnt { uint64_t ifbcnt:64; #endif } s; - struct cvmx_lmcx_ifb_cnt_s cn61xx; - struct cvmx_lmcx_ifb_cnt_s cn63xx; - struct cvmx_lmcx_ifb_cnt_s cn63xxp1; - struct cvmx_lmcx_ifb_cnt_s cn66xx; - struct cvmx_lmcx_ifb_cnt_s cn68xx; - struct cvmx_lmcx_ifb_cnt_s cn68xxp1; - struct cvmx_lmcx_ifb_cnt_s cnf71xx; }; union cvmx_lmcx_ifb_cnt_hi { @@ -1955,17 +1732,6 @@ union cvmx_lmcx_ifb_cnt_hi { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_lmcx_ifb_cnt_hi_s cn30xx; - struct cvmx_lmcx_ifb_cnt_hi_s cn31xx; - struct cvmx_lmcx_ifb_cnt_hi_s cn38xx; - struct cvmx_lmcx_ifb_cnt_hi_s cn38xxp2; - struct cvmx_lmcx_ifb_cnt_hi_s cn50xx; - struct cvmx_lmcx_ifb_cnt_hi_s cn52xx; - struct cvmx_lmcx_ifb_cnt_hi_s cn52xxp1; - struct cvmx_lmcx_ifb_cnt_hi_s cn56xx; - struct cvmx_lmcx_ifb_cnt_hi_s cn56xxp1; - struct cvmx_lmcx_ifb_cnt_hi_s cn58xx; - struct cvmx_lmcx_ifb_cnt_hi_s cn58xxp1; }; union cvmx_lmcx_ifb_cnt_lo { @@ -1979,17 +1745,6 @@ union cvmx_lmcx_ifb_cnt_lo { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_lmcx_ifb_cnt_lo_s cn30xx; - struct cvmx_lmcx_ifb_cnt_lo_s cn31xx; - struct cvmx_lmcx_ifb_cnt_lo_s cn38xx; - struct cvmx_lmcx_ifb_cnt_lo_s cn38xxp2; - struct cvmx_lmcx_ifb_cnt_lo_s cn50xx; - struct cvmx_lmcx_ifb_cnt_lo_s cn52xx; - struct cvmx_lmcx_ifb_cnt_lo_s cn52xxp1; - struct cvmx_lmcx_ifb_cnt_lo_s cn56xx; - struct cvmx_lmcx_ifb_cnt_lo_s cn56xxp1; - struct cvmx_lmcx_ifb_cnt_lo_s cn58xx; - struct cvmx_lmcx_ifb_cnt_lo_s cn58xxp1; }; union cvmx_lmcx_int { @@ -2007,13 +1762,6 @@ union cvmx_lmcx_int { uint64_t reserved_9_63:55; #endif } s; - struct cvmx_lmcx_int_s cn61xx; - struct cvmx_lmcx_int_s cn63xx; - struct cvmx_lmcx_int_s cn63xxp1; - struct cvmx_lmcx_int_s cn66xx; - struct cvmx_lmcx_int_s cn68xx; - struct cvmx_lmcx_int_s cn68xxp1; - struct cvmx_lmcx_int_s cnf71xx; }; union cvmx_lmcx_int_en { @@ -2031,13 +1779,6 @@ union cvmx_lmcx_int_en { uint64_t reserved_3_63:61; #endif } s; - struct cvmx_lmcx_int_en_s cn61xx; - struct cvmx_lmcx_int_en_s cn63xx; - struct cvmx_lmcx_int_en_s cn63xxp1; - struct cvmx_lmcx_int_en_s cn66xx; - struct cvmx_lmcx_int_en_s cn68xx; - struct cvmx_lmcx_int_en_s cn68xxp1; - struct cvmx_lmcx_int_en_s cnf71xx; }; union cvmx_lmcx_mem_cfg0 { @@ -2075,17 +1816,6 @@ union cvmx_lmcx_mem_cfg0 { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_lmcx_mem_cfg0_s cn30xx; - struct cvmx_lmcx_mem_cfg0_s cn31xx; - struct cvmx_lmcx_mem_cfg0_s cn38xx; - struct cvmx_lmcx_mem_cfg0_s cn38xxp2; - struct cvmx_lmcx_mem_cfg0_s cn50xx; - struct cvmx_lmcx_mem_cfg0_s cn52xx; - struct cvmx_lmcx_mem_cfg0_s cn52xxp1; - struct cvmx_lmcx_mem_cfg0_s cn56xx; - struct cvmx_lmcx_mem_cfg0_s cn56xxp1; - struct cvmx_lmcx_mem_cfg0_s cn58xx; - struct cvmx_lmcx_mem_cfg0_s cn58xxp1; }; union cvmx_lmcx_mem_cfg1 { @@ -2115,8 +1845,6 @@ union cvmx_lmcx_mem_cfg1 { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_lmcx_mem_cfg1_s cn30xx; - struct cvmx_lmcx_mem_cfg1_s cn31xx; struct cvmx_lmcx_mem_cfg1_cn38xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_31_63:33; @@ -2140,14 +1868,6 @@ union cvmx_lmcx_mem_cfg1 { uint64_t reserved_31_63:33; #endif } cn38xx; - struct cvmx_lmcx_mem_cfg1_cn38xx cn38xxp2; - struct cvmx_lmcx_mem_cfg1_s cn50xx; - struct cvmx_lmcx_mem_cfg1_cn38xx cn52xx; - struct cvmx_lmcx_mem_cfg1_cn38xx cn52xxp1; - struct cvmx_lmcx_mem_cfg1_cn38xx cn56xx; - struct cvmx_lmcx_mem_cfg1_cn38xx cn56xxp1; - struct cvmx_lmcx_mem_cfg1_cn38xx cn58xx; - struct cvmx_lmcx_mem_cfg1_cn38xx cn58xxp1; }; union cvmx_lmcx_modereg_params0 { @@ -2189,13 +1909,6 @@ union cvmx_lmcx_modereg_params0 { uint64_t reserved_25_63:39; #endif } s; - struct cvmx_lmcx_modereg_params0_s cn61xx; - struct cvmx_lmcx_modereg_params0_s cn63xx; - struct cvmx_lmcx_modereg_params0_s cn63xxp1; - struct cvmx_lmcx_modereg_params0_s cn66xx; - struct cvmx_lmcx_modereg_params0_s cn68xx; - struct cvmx_lmcx_modereg_params0_s cn68xxp1; - struct cvmx_lmcx_modereg_params0_s cnf71xx; }; union cvmx_lmcx_modereg_params1 { @@ -2255,13 +1968,6 @@ union cvmx_lmcx_modereg_params1 { uint64_t reserved_48_63:16; #endif } s; - struct cvmx_lmcx_modereg_params1_s cn61xx; - struct cvmx_lmcx_modereg_params1_s cn63xx; - struct cvmx_lmcx_modereg_params1_s cn63xxp1; - struct cvmx_lmcx_modereg_params1_s cn66xx; - struct cvmx_lmcx_modereg_params1_s cn68xx; - struct cvmx_lmcx_modereg_params1_s cn68xxp1; - struct cvmx_lmcx_modereg_params1_s cnf71xx; }; union cvmx_lmcx_nxm { @@ -2300,15 +2006,6 @@ union cvmx_lmcx_nxm { uint64_t reserved_8_63:56; #endif } cn52xx; - struct cvmx_lmcx_nxm_cn52xx cn56xx; - struct cvmx_lmcx_nxm_cn52xx cn58xx; - struct cvmx_lmcx_nxm_s cn61xx; - struct cvmx_lmcx_nxm_s cn63xx; - struct cvmx_lmcx_nxm_s cn63xxp1; - struct cvmx_lmcx_nxm_s cn66xx; - struct cvmx_lmcx_nxm_s cn68xx; - struct cvmx_lmcx_nxm_s cn68xxp1; - struct cvmx_lmcx_nxm_s cnf71xx; }; union cvmx_lmcx_ops_cnt { @@ -2320,13 +2017,6 @@ union cvmx_lmcx_ops_cnt { uint64_t opscnt:64; #endif } s; - struct cvmx_lmcx_ops_cnt_s cn61xx; - struct cvmx_lmcx_ops_cnt_s cn63xx; - struct cvmx_lmcx_ops_cnt_s cn63xxp1; - struct cvmx_lmcx_ops_cnt_s cn66xx; - struct cvmx_lmcx_ops_cnt_s cn68xx; - struct cvmx_lmcx_ops_cnt_s cn68xxp1; - struct cvmx_lmcx_ops_cnt_s cnf71xx; }; union cvmx_lmcx_ops_cnt_hi { @@ -2340,17 +2030,6 @@ union cvmx_lmcx_ops_cnt_hi { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_lmcx_ops_cnt_hi_s cn30xx; - struct cvmx_lmcx_ops_cnt_hi_s cn31xx; - struct cvmx_lmcx_ops_cnt_hi_s cn38xx; - struct cvmx_lmcx_ops_cnt_hi_s cn38xxp2; - struct cvmx_lmcx_ops_cnt_hi_s cn50xx; - struct cvmx_lmcx_ops_cnt_hi_s cn52xx; - struct cvmx_lmcx_ops_cnt_hi_s cn52xxp1; - struct cvmx_lmcx_ops_cnt_hi_s cn56xx; - struct cvmx_lmcx_ops_cnt_hi_s cn56xxp1; - struct cvmx_lmcx_ops_cnt_hi_s cn58xx; - struct cvmx_lmcx_ops_cnt_hi_s cn58xxp1; }; union cvmx_lmcx_ops_cnt_lo { @@ -2364,17 +2043,6 @@ union cvmx_lmcx_ops_cnt_lo { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_lmcx_ops_cnt_lo_s cn30xx; - struct cvmx_lmcx_ops_cnt_lo_s cn31xx; - struct cvmx_lmcx_ops_cnt_lo_s cn38xx; - struct cvmx_lmcx_ops_cnt_lo_s cn38xxp2; - struct cvmx_lmcx_ops_cnt_lo_s cn50xx; - struct cvmx_lmcx_ops_cnt_lo_s cn52xx; - struct cvmx_lmcx_ops_cnt_lo_s cn52xxp1; - struct cvmx_lmcx_ops_cnt_lo_s cn56xx; - struct cvmx_lmcx_ops_cnt_lo_s cn56xxp1; - struct cvmx_lmcx_ops_cnt_lo_s cn58xx; - struct cvmx_lmcx_ops_cnt_lo_s cn58xxp1; }; union cvmx_lmcx_phy_ctl { @@ -2404,8 +2072,6 @@ union cvmx_lmcx_phy_ctl { uint64_t reserved_15_63:49; #endif } s; - struct cvmx_lmcx_phy_ctl_s cn61xx; - struct cvmx_lmcx_phy_ctl_s cn63xx; struct cvmx_lmcx_phy_ctl_cn63xxp1 { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_14_63:50; @@ -2429,10 +2095,6 @@ union cvmx_lmcx_phy_ctl { uint64_t reserved_14_63:50; #endif } cn63xxp1; - struct cvmx_lmcx_phy_ctl_s cn66xx; - struct cvmx_lmcx_phy_ctl_s cn68xx; - struct cvmx_lmcx_phy_ctl_s cn68xxp1; - struct cvmx_lmcx_phy_ctl_s cnf71xx; }; union cvmx_lmcx_pll_bwctl { @@ -2448,10 +2110,6 @@ union cvmx_lmcx_pll_bwctl { uint64_t reserved_5_63:59; #endif } s; - struct cvmx_lmcx_pll_bwctl_s cn30xx; - struct cvmx_lmcx_pll_bwctl_s cn31xx; - struct cvmx_lmcx_pll_bwctl_s cn38xx; - struct cvmx_lmcx_pll_bwctl_s cn38xxp2; }; union cvmx_lmcx_pll_ctl { @@ -2520,9 +2178,6 @@ union cvmx_lmcx_pll_ctl { uint64_t reserved_29_63:35; #endif } cn50xx; - struct cvmx_lmcx_pll_ctl_s cn52xx; - struct cvmx_lmcx_pll_ctl_s cn52xxp1; - struct cvmx_lmcx_pll_ctl_cn50xx cn56xx; struct cvmx_lmcx_pll_ctl_cn56xxp1 { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_28_63:36; @@ -2552,8 +2207,6 @@ union cvmx_lmcx_pll_ctl { uint64_t reserved_28_63:36; #endif } cn56xxp1; - struct cvmx_lmcx_pll_ctl_cn56xxp1 cn58xx; - struct cvmx_lmcx_pll_ctl_cn56xxp1 cn58xxp1; }; union cvmx_lmcx_pll_status { @@ -2575,12 +2228,6 @@ union cvmx_lmcx_pll_status { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_lmcx_pll_status_s cn50xx; - struct cvmx_lmcx_pll_status_s cn52xx; - struct cvmx_lmcx_pll_status_s cn52xxp1; - struct cvmx_lmcx_pll_status_s cn56xx; - struct cvmx_lmcx_pll_status_s cn56xxp1; - struct cvmx_lmcx_pll_status_s cn58xx; struct cvmx_lmcx_pll_status_cn58xxp1 { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_2_63:62; @@ -2615,10 +2262,6 @@ union cvmx_lmcx_read_level_ctl { uint64_t reserved_44_63:20; #endif } s; - struct cvmx_lmcx_read_level_ctl_s cn52xx; - struct cvmx_lmcx_read_level_ctl_s cn52xxp1; - struct cvmx_lmcx_read_level_ctl_s cn56xx; - struct cvmx_lmcx_read_level_ctl_s cn56xxp1; }; union cvmx_lmcx_read_level_dbg { @@ -2636,10 +2279,6 @@ union cvmx_lmcx_read_level_dbg { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_lmcx_read_level_dbg_s cn52xx; - struct cvmx_lmcx_read_level_dbg_s cn52xxp1; - struct cvmx_lmcx_read_level_dbg_s cn56xx; - struct cvmx_lmcx_read_level_dbg_s cn56xxp1; }; union cvmx_lmcx_read_level_rankx { @@ -2671,10 +2310,6 @@ union cvmx_lmcx_read_level_rankx { uint64_t reserved_38_63:26; #endif } s; - struct cvmx_lmcx_read_level_rankx_s cn52xx; - struct cvmx_lmcx_read_level_rankx_s cn52xxp1; - struct cvmx_lmcx_read_level_rankx_s cn56xx; - struct cvmx_lmcx_read_level_rankx_s cn56xxp1; }; union cvmx_lmcx_reset_ctl { @@ -2694,13 +2329,6 @@ union cvmx_lmcx_reset_ctl { uint64_t reserved_4_63:60; #endif } s; - struct cvmx_lmcx_reset_ctl_s cn61xx; - struct cvmx_lmcx_reset_ctl_s cn63xx; - struct cvmx_lmcx_reset_ctl_s cn63xxp1; - struct cvmx_lmcx_reset_ctl_s cn66xx; - struct cvmx_lmcx_reset_ctl_s cn68xx; - struct cvmx_lmcx_reset_ctl_s cn68xxp1; - struct cvmx_lmcx_reset_ctl_s cnf71xx; }; union cvmx_lmcx_rlevel_ctl { @@ -2730,8 +2358,6 @@ union cvmx_lmcx_rlevel_ctl { uint64_t reserved_22_63:42; #endif } s; - struct cvmx_lmcx_rlevel_ctl_s cn61xx; - struct cvmx_lmcx_rlevel_ctl_s cn63xx; struct cvmx_lmcx_rlevel_ctl_cn63xxp1 { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_9_63:55; @@ -2745,10 +2371,6 @@ union cvmx_lmcx_rlevel_ctl { uint64_t reserved_9_63:55; #endif } cn63xxp1; - struct cvmx_lmcx_rlevel_ctl_s cn66xx; - struct cvmx_lmcx_rlevel_ctl_s cn68xx; - struct cvmx_lmcx_rlevel_ctl_s cn68xxp1; - struct cvmx_lmcx_rlevel_ctl_s cnf71xx; }; union cvmx_lmcx_rlevel_dbg { @@ -2760,13 +2382,6 @@ union cvmx_lmcx_rlevel_dbg { uint64_t bitmask:64; #endif } s; - struct cvmx_lmcx_rlevel_dbg_s cn61xx; - struct cvmx_lmcx_rlevel_dbg_s cn63xx; - struct cvmx_lmcx_rlevel_dbg_s cn63xxp1; - struct cvmx_lmcx_rlevel_dbg_s cn66xx; - struct cvmx_lmcx_rlevel_dbg_s cn68xx; - struct cvmx_lmcx_rlevel_dbg_s cn68xxp1; - struct cvmx_lmcx_rlevel_dbg_s cnf71xx; }; union cvmx_lmcx_rlevel_rankx { @@ -2798,13 +2413,6 @@ union cvmx_lmcx_rlevel_rankx { uint64_t reserved_56_63:8; #endif } s; - struct cvmx_lmcx_rlevel_rankx_s cn61xx; - struct cvmx_lmcx_rlevel_rankx_s cn63xx; - struct cvmx_lmcx_rlevel_rankx_s cn63xxp1; - struct cvmx_lmcx_rlevel_rankx_s cn66xx; - struct cvmx_lmcx_rlevel_rankx_s cn68xx; - struct cvmx_lmcx_rlevel_rankx_s cn68xxp1; - struct cvmx_lmcx_rlevel_rankx_s cnf71xx; }; union cvmx_lmcx_rodt_comp_ctl { @@ -2826,13 +2434,6 @@ union cvmx_lmcx_rodt_comp_ctl { uint64_t reserved_17_63:47; #endif } s; - struct cvmx_lmcx_rodt_comp_ctl_s cn50xx; - struct cvmx_lmcx_rodt_comp_ctl_s cn52xx; - struct cvmx_lmcx_rodt_comp_ctl_s cn52xxp1; - struct cvmx_lmcx_rodt_comp_ctl_s cn56xx; - struct cvmx_lmcx_rodt_comp_ctl_s cn56xxp1; - struct cvmx_lmcx_rodt_comp_ctl_s cn58xx; - struct cvmx_lmcx_rodt_comp_ctl_s cn58xxp1; }; union cvmx_lmcx_rodt_ctl { @@ -2860,17 +2461,6 @@ union cvmx_lmcx_rodt_ctl { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_lmcx_rodt_ctl_s cn30xx; - struct cvmx_lmcx_rodt_ctl_s cn31xx; - struct cvmx_lmcx_rodt_ctl_s cn38xx; - struct cvmx_lmcx_rodt_ctl_s cn38xxp2; - struct cvmx_lmcx_rodt_ctl_s cn50xx; - struct cvmx_lmcx_rodt_ctl_s cn52xx; - struct cvmx_lmcx_rodt_ctl_s cn52xxp1; - struct cvmx_lmcx_rodt_ctl_s cn56xx; - struct cvmx_lmcx_rodt_ctl_s cn56xxp1; - struct cvmx_lmcx_rodt_ctl_s cn58xx; - struct cvmx_lmcx_rodt_ctl_s cn58xxp1; }; union cvmx_lmcx_rodt_mask { @@ -2896,13 +2486,6 @@ union cvmx_lmcx_rodt_mask { uint64_t rodt_d3_r1:8; #endif } s; - struct cvmx_lmcx_rodt_mask_s cn61xx; - struct cvmx_lmcx_rodt_mask_s cn63xx; - struct cvmx_lmcx_rodt_mask_s cn63xxp1; - struct cvmx_lmcx_rodt_mask_s cn66xx; - struct cvmx_lmcx_rodt_mask_s cn68xx; - struct cvmx_lmcx_rodt_mask_s cn68xxp1; - struct cvmx_lmcx_rodt_mask_s cnf71xx; }; union cvmx_lmcx_scramble_cfg0 { @@ -2914,9 +2497,6 @@ union cvmx_lmcx_scramble_cfg0 { uint64_t key:64; #endif } s; - struct cvmx_lmcx_scramble_cfg0_s cn61xx; - struct cvmx_lmcx_scramble_cfg0_s cn66xx; - struct cvmx_lmcx_scramble_cfg0_s cnf71xx; }; union cvmx_lmcx_scramble_cfg1 { @@ -2928,9 +2508,6 @@ union cvmx_lmcx_scramble_cfg1 { uint64_t key:64; #endif } s; - struct cvmx_lmcx_scramble_cfg1_s cn61xx; - struct cvmx_lmcx_scramble_cfg1_s cn66xx; - struct cvmx_lmcx_scramble_cfg1_s cnf71xx; }; union cvmx_lmcx_scrambled_fadr { @@ -2952,9 +2529,6 @@ union cvmx_lmcx_scrambled_fadr { uint64_t reserved_36_63:28; #endif } s; - struct cvmx_lmcx_scrambled_fadr_s cn61xx; - struct cvmx_lmcx_scrambled_fadr_s cn66xx; - struct cvmx_lmcx_scrambled_fadr_s cnf71xx; }; union cvmx_lmcx_slot_ctl0 { @@ -2974,13 +2548,6 @@ union cvmx_lmcx_slot_ctl0 { uint64_t reserved_24_63:40; #endif } s; - struct cvmx_lmcx_slot_ctl0_s cn61xx; - struct cvmx_lmcx_slot_ctl0_s cn63xx; - struct cvmx_lmcx_slot_ctl0_s cn63xxp1; - struct cvmx_lmcx_slot_ctl0_s cn66xx; - struct cvmx_lmcx_slot_ctl0_s cn68xx; - struct cvmx_lmcx_slot_ctl0_s cn68xxp1; - struct cvmx_lmcx_slot_ctl0_s cnf71xx; }; union cvmx_lmcx_slot_ctl1 { @@ -3000,13 +2567,6 @@ union cvmx_lmcx_slot_ctl1 { uint64_t reserved_24_63:40; #endif } s; - struct cvmx_lmcx_slot_ctl1_s cn61xx; - struct cvmx_lmcx_slot_ctl1_s cn63xx; - struct cvmx_lmcx_slot_ctl1_s cn63xxp1; - struct cvmx_lmcx_slot_ctl1_s cn66xx; - struct cvmx_lmcx_slot_ctl1_s cn68xx; - struct cvmx_lmcx_slot_ctl1_s cn68xxp1; - struct cvmx_lmcx_slot_ctl1_s cnf71xx; }; union cvmx_lmcx_slot_ctl2 { @@ -3026,13 +2586,6 @@ union cvmx_lmcx_slot_ctl2 { uint64_t reserved_24_63:40; #endif } s; - struct cvmx_lmcx_slot_ctl2_s cn61xx; - struct cvmx_lmcx_slot_ctl2_s cn63xx; - struct cvmx_lmcx_slot_ctl2_s cn63xxp1; - struct cvmx_lmcx_slot_ctl2_s cn66xx; - struct cvmx_lmcx_slot_ctl2_s cn68xx; - struct cvmx_lmcx_slot_ctl2_s cn68xxp1; - struct cvmx_lmcx_slot_ctl2_s cnf71xx; }; union cvmx_lmcx_timing_params0 { @@ -3095,7 +2648,6 @@ union cvmx_lmcx_timing_params0 { uint64_t reserved_47_63:17; #endif } cn61xx; - struct cvmx_lmcx_timing_params0_cn61xx cn63xx; struct cvmx_lmcx_timing_params0_cn63xxp1 { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_46_63:18; @@ -3123,10 +2675,6 @@ union cvmx_lmcx_timing_params0 { uint64_t reserved_46_63:18; #endif } cn63xxp1; - struct cvmx_lmcx_timing_params0_cn61xx cn66xx; - struct cvmx_lmcx_timing_params0_cn61xx cn68xx; - struct cvmx_lmcx_timing_params0_cn61xx cn68xxp1; - struct cvmx_lmcx_timing_params0_cn61xx cnf71xx; }; union cvmx_lmcx_timing_params1 { @@ -3162,8 +2710,6 @@ union cvmx_lmcx_timing_params1 { uint64_t reserved_47_63:17; #endif } s; - struct cvmx_lmcx_timing_params1_s cn61xx; - struct cvmx_lmcx_timing_params1_s cn63xx; struct cvmx_lmcx_timing_params1_cn63xxp1 { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_46_63:18; @@ -3193,10 +2739,6 @@ union cvmx_lmcx_timing_params1 { uint64_t reserved_46_63:18; #endif } cn63xxp1; - struct cvmx_lmcx_timing_params1_s cn66xx; - struct cvmx_lmcx_timing_params1_s cn68xx; - struct cvmx_lmcx_timing_params1_s cn68xxp1; - struct cvmx_lmcx_timing_params1_s cnf71xx; }; union cvmx_lmcx_tro_ctl { @@ -3212,13 +2754,6 @@ union cvmx_lmcx_tro_ctl { uint64_t reserved_33_63:31; #endif } s; - struct cvmx_lmcx_tro_ctl_s cn61xx; - struct cvmx_lmcx_tro_ctl_s cn63xx; - struct cvmx_lmcx_tro_ctl_s cn63xxp1; - struct cvmx_lmcx_tro_ctl_s cn66xx; - struct cvmx_lmcx_tro_ctl_s cn68xx; - struct cvmx_lmcx_tro_ctl_s cn68xxp1; - struct cvmx_lmcx_tro_ctl_s cnf71xx; }; union cvmx_lmcx_tro_stat { @@ -3232,13 +2767,6 @@ union cvmx_lmcx_tro_stat { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_lmcx_tro_stat_s cn61xx; - struct cvmx_lmcx_tro_stat_s cn63xx; - struct cvmx_lmcx_tro_stat_s cn63xxp1; - struct cvmx_lmcx_tro_stat_s cn66xx; - struct cvmx_lmcx_tro_stat_s cn68xx; - struct cvmx_lmcx_tro_stat_s cn68xxp1; - struct cvmx_lmcx_tro_stat_s cnf71xx; }; union cvmx_lmcx_wlevel_ctl { @@ -3260,8 +2788,6 @@ union cvmx_lmcx_wlevel_ctl { uint64_t reserved_22_63:42; #endif } s; - struct cvmx_lmcx_wlevel_ctl_s cn61xx; - struct cvmx_lmcx_wlevel_ctl_s cn63xx; struct cvmx_lmcx_wlevel_ctl_cn63xxp1 { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_10_63:54; @@ -3273,10 +2799,6 @@ union cvmx_lmcx_wlevel_ctl { uint64_t reserved_10_63:54; #endif } cn63xxp1; - struct cvmx_lmcx_wlevel_ctl_s cn66xx; - struct cvmx_lmcx_wlevel_ctl_s cn68xx; - struct cvmx_lmcx_wlevel_ctl_s cn68xxp1; - struct cvmx_lmcx_wlevel_ctl_s cnf71xx; }; union cvmx_lmcx_wlevel_dbg { @@ -3292,13 +2814,6 @@ union cvmx_lmcx_wlevel_dbg { uint64_t reserved_12_63:52; #endif } s; - struct cvmx_lmcx_wlevel_dbg_s cn61xx; - struct cvmx_lmcx_wlevel_dbg_s cn63xx; - struct cvmx_lmcx_wlevel_dbg_s cn63xxp1; - struct cvmx_lmcx_wlevel_dbg_s cn66xx; - struct cvmx_lmcx_wlevel_dbg_s cn68xx; - struct cvmx_lmcx_wlevel_dbg_s cn68xxp1; - struct cvmx_lmcx_wlevel_dbg_s cnf71xx; }; union cvmx_lmcx_wlevel_rankx { @@ -3330,13 +2845,6 @@ union cvmx_lmcx_wlevel_rankx { uint64_t reserved_47_63:17; #endif } s; - struct cvmx_lmcx_wlevel_rankx_s cn61xx; - struct cvmx_lmcx_wlevel_rankx_s cn63xx; - struct cvmx_lmcx_wlevel_rankx_s cn63xxp1; - struct cvmx_lmcx_wlevel_rankx_s cn66xx; - struct cvmx_lmcx_wlevel_rankx_s cn68xx; - struct cvmx_lmcx_wlevel_rankx_s cn68xxp1; - struct cvmx_lmcx_wlevel_rankx_s cnf71xx; }; union cvmx_lmcx_wodt_ctl0 { @@ -3363,7 +2871,6 @@ union cvmx_lmcx_wodt_ctl0 { uint64_t reserved_32_63:32; #endif } cn30xx; - struct cvmx_lmcx_wodt_ctl0_cn30xx cn31xx; struct cvmx_lmcx_wodt_ctl0_cn38xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_32_63:32; @@ -3387,14 +2894,6 @@ union cvmx_lmcx_wodt_ctl0 { uint64_t reserved_32_63:32; #endif } cn38xx; - struct cvmx_lmcx_wodt_ctl0_cn38xx cn38xxp2; - struct cvmx_lmcx_wodt_ctl0_cn38xx cn50xx; - struct cvmx_lmcx_wodt_ctl0_cn30xx cn52xx; - struct cvmx_lmcx_wodt_ctl0_cn30xx cn52xxp1; - struct cvmx_lmcx_wodt_ctl0_cn30xx cn56xx; - struct cvmx_lmcx_wodt_ctl0_cn30xx cn56xxp1; - struct cvmx_lmcx_wodt_ctl0_cn38xx cn58xx; - struct cvmx_lmcx_wodt_ctl0_cn38xx cn58xxp1; }; union cvmx_lmcx_wodt_ctl1 { @@ -3414,12 +2913,6 @@ union cvmx_lmcx_wodt_ctl1 { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_lmcx_wodt_ctl1_s cn30xx; - struct cvmx_lmcx_wodt_ctl1_s cn31xx; - struct cvmx_lmcx_wodt_ctl1_s cn52xx; - struct cvmx_lmcx_wodt_ctl1_s cn52xxp1; - struct cvmx_lmcx_wodt_ctl1_s cn56xx; - struct cvmx_lmcx_wodt_ctl1_s cn56xxp1; }; union cvmx_lmcx_wodt_mask { @@ -3445,13 +2938,6 @@ union cvmx_lmcx_wodt_mask { uint64_t wodt_d3_r1:8; #endif } s; - struct cvmx_lmcx_wodt_mask_s cn61xx; - struct cvmx_lmcx_wodt_mask_s cn63xx; - struct cvmx_lmcx_wodt_mask_s cn63xxp1; - struct cvmx_lmcx_wodt_mask_s cn66xx; - struct cvmx_lmcx_wodt_mask_s cn68xx; - struct cvmx_lmcx_wodt_mask_s cn68xxp1; - struct cvmx_lmcx_wodt_mask_s cnf71xx; }; #endif diff --git a/arch/mips/include/asm/octeon/cvmx-mio-defs.h b/arch/mips/include/asm/octeon/cvmx-mio-defs.h index 5196c04eee41..4ad95d040bb1 100644 --- a/arch/mips/include/asm/octeon/cvmx-mio-defs.h +++ b/arch/mips/include/asm/octeon/cvmx-mio-defs.h @@ -188,7 +188,6 @@ union cvmx_mio_boot_bist_stat { uint64_t reserved_4_63:60; #endif } cn30xx; - struct cvmx_mio_boot_bist_stat_cn30xx cn31xx; struct cvmx_mio_boot_bist_stat_cn38xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_3_63:61; @@ -202,7 +201,6 @@ union cvmx_mio_boot_bist_stat { uint64_t reserved_3_63:61; #endif } cn38xx; - struct cvmx_mio_boot_bist_stat_cn38xx cn38xxp2; struct cvmx_mio_boot_bist_stat_cn50xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_6_63:58; @@ -254,10 +252,6 @@ union cvmx_mio_boot_bist_stat { uint64_t reserved_4_63:60; #endif } cn52xxp1; - struct cvmx_mio_boot_bist_stat_cn52xxp1 cn56xx; - struct cvmx_mio_boot_bist_stat_cn52xxp1 cn56xxp1; - struct cvmx_mio_boot_bist_stat_cn38xx cn58xx; - struct cvmx_mio_boot_bist_stat_cn38xx cn58xxp1; struct cvmx_mio_boot_bist_stat_cn61xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_12_63:52; @@ -276,7 +270,6 @@ union cvmx_mio_boot_bist_stat { uint64_t reserved_9_63:55; #endif } cn63xx; - struct cvmx_mio_boot_bist_stat_cn63xx cn63xxp1; struct cvmx_mio_boot_bist_stat_cn66xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_10_63:54; @@ -286,9 +279,6 @@ union cvmx_mio_boot_bist_stat { uint64_t reserved_10_63:54; #endif } cn66xx; - struct cvmx_mio_boot_bist_stat_cn66xx cn68xx; - struct cvmx_mio_boot_bist_stat_cn66xx cn68xxp1; - struct cvmx_mio_boot_bist_stat_cn61xx cnf71xx; }; union cvmx_mio_boot_comp { @@ -311,10 +301,6 @@ union cvmx_mio_boot_comp { uint64_t reserved_10_63:54; #endif } cn50xx; - struct cvmx_mio_boot_comp_cn50xx cn52xx; - struct cvmx_mio_boot_comp_cn50xx cn52xxp1; - struct cvmx_mio_boot_comp_cn50xx cn56xx; - struct cvmx_mio_boot_comp_cn50xx cn56xxp1; struct cvmx_mio_boot_comp_cn61xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_12_63:52; @@ -326,12 +312,6 @@ union cvmx_mio_boot_comp { uint64_t reserved_12_63:52; #endif } cn61xx; - struct cvmx_mio_boot_comp_cn61xx cn63xx; - struct cvmx_mio_boot_comp_cn61xx cn63xxp1; - struct cvmx_mio_boot_comp_cn61xx cn66xx; - struct cvmx_mio_boot_comp_cn61xx cn68xx; - struct cvmx_mio_boot_comp_cn61xx cn68xxp1; - struct cvmx_mio_boot_comp_cn61xx cnf71xx; }; union cvmx_mio_boot_dma_cfgx { @@ -361,17 +341,6 @@ union cvmx_mio_boot_dma_cfgx { uint64_t en:1; #endif } s; - struct cvmx_mio_boot_dma_cfgx_s cn52xx; - struct cvmx_mio_boot_dma_cfgx_s cn52xxp1; - struct cvmx_mio_boot_dma_cfgx_s cn56xx; - struct cvmx_mio_boot_dma_cfgx_s cn56xxp1; - struct cvmx_mio_boot_dma_cfgx_s cn61xx; - struct cvmx_mio_boot_dma_cfgx_s cn63xx; - struct cvmx_mio_boot_dma_cfgx_s cn63xxp1; - struct cvmx_mio_boot_dma_cfgx_s cn66xx; - struct cvmx_mio_boot_dma_cfgx_s cn68xx; - struct cvmx_mio_boot_dma_cfgx_s cn68xxp1; - struct cvmx_mio_boot_dma_cfgx_s cnf71xx; }; union cvmx_mio_boot_dma_intx { @@ -387,17 +356,6 @@ union cvmx_mio_boot_dma_intx { uint64_t reserved_2_63:62; #endif } s; - struct cvmx_mio_boot_dma_intx_s cn52xx; - struct cvmx_mio_boot_dma_intx_s cn52xxp1; - struct cvmx_mio_boot_dma_intx_s cn56xx; - struct cvmx_mio_boot_dma_intx_s cn56xxp1; - struct cvmx_mio_boot_dma_intx_s cn61xx; - struct cvmx_mio_boot_dma_intx_s cn63xx; - struct cvmx_mio_boot_dma_intx_s cn63xxp1; - struct cvmx_mio_boot_dma_intx_s cn66xx; - struct cvmx_mio_boot_dma_intx_s cn68xx; - struct cvmx_mio_boot_dma_intx_s cn68xxp1; - struct cvmx_mio_boot_dma_intx_s cnf71xx; }; union cvmx_mio_boot_dma_int_enx { @@ -413,17 +371,6 @@ union cvmx_mio_boot_dma_int_enx { uint64_t reserved_2_63:62; #endif } s; - struct cvmx_mio_boot_dma_int_enx_s cn52xx; - struct cvmx_mio_boot_dma_int_enx_s cn52xxp1; - struct cvmx_mio_boot_dma_int_enx_s cn56xx; - struct cvmx_mio_boot_dma_int_enx_s cn56xxp1; - struct cvmx_mio_boot_dma_int_enx_s cn61xx; - struct cvmx_mio_boot_dma_int_enx_s cn63xx; - struct cvmx_mio_boot_dma_int_enx_s cn63xxp1; - struct cvmx_mio_boot_dma_int_enx_s cn66xx; - struct cvmx_mio_boot_dma_int_enx_s cn68xx; - struct cvmx_mio_boot_dma_int_enx_s cn68xxp1; - struct cvmx_mio_boot_dma_int_enx_s cnf71xx; }; union cvmx_mio_boot_dma_timx { @@ -463,17 +410,6 @@ union cvmx_mio_boot_dma_timx { uint64_t dmack_pi:1; #endif } s; - struct cvmx_mio_boot_dma_timx_s cn52xx; - struct cvmx_mio_boot_dma_timx_s cn52xxp1; - struct cvmx_mio_boot_dma_timx_s cn56xx; - struct cvmx_mio_boot_dma_timx_s cn56xxp1; - struct cvmx_mio_boot_dma_timx_s cn61xx; - struct cvmx_mio_boot_dma_timx_s cn63xx; - struct cvmx_mio_boot_dma_timx_s cn63xxp1; - struct cvmx_mio_boot_dma_timx_s cn66xx; - struct cvmx_mio_boot_dma_timx_s cn68xx; - struct cvmx_mio_boot_dma_timx_s cn68xxp1; - struct cvmx_mio_boot_dma_timx_s cnf71xx; }; union cvmx_mio_boot_err { @@ -489,24 +425,6 @@ union cvmx_mio_boot_err { uint64_t reserved_2_63:62; #endif } s; - struct cvmx_mio_boot_err_s cn30xx; - struct cvmx_mio_boot_err_s cn31xx; - struct cvmx_mio_boot_err_s cn38xx; - struct cvmx_mio_boot_err_s cn38xxp2; - struct cvmx_mio_boot_err_s cn50xx; - struct cvmx_mio_boot_err_s cn52xx; - struct cvmx_mio_boot_err_s cn52xxp1; - struct cvmx_mio_boot_err_s cn56xx; - struct cvmx_mio_boot_err_s cn56xxp1; - struct cvmx_mio_boot_err_s cn58xx; - struct cvmx_mio_boot_err_s cn58xxp1; - struct cvmx_mio_boot_err_s cn61xx; - struct cvmx_mio_boot_err_s cn63xx; - struct cvmx_mio_boot_err_s cn63xxp1; - struct cvmx_mio_boot_err_s cn66xx; - struct cvmx_mio_boot_err_s cn68xx; - struct cvmx_mio_boot_err_s cn68xxp1; - struct cvmx_mio_boot_err_s cnf71xx; }; union cvmx_mio_boot_int { @@ -522,24 +440,6 @@ union cvmx_mio_boot_int { uint64_t reserved_2_63:62; #endif } s; - struct cvmx_mio_boot_int_s cn30xx; - struct cvmx_mio_boot_int_s cn31xx; - struct cvmx_mio_boot_int_s cn38xx; - struct cvmx_mio_boot_int_s cn38xxp2; - struct cvmx_mio_boot_int_s cn50xx; - struct cvmx_mio_boot_int_s cn52xx; - struct cvmx_mio_boot_int_s cn52xxp1; - struct cvmx_mio_boot_int_s cn56xx; - struct cvmx_mio_boot_int_s cn56xxp1; - struct cvmx_mio_boot_int_s cn58xx; - struct cvmx_mio_boot_int_s cn58xxp1; - struct cvmx_mio_boot_int_s cn61xx; - struct cvmx_mio_boot_int_s cn63xx; - struct cvmx_mio_boot_int_s cn63xxp1; - struct cvmx_mio_boot_int_s cn66xx; - struct cvmx_mio_boot_int_s cn68xx; - struct cvmx_mio_boot_int_s cn68xxp1; - struct cvmx_mio_boot_int_s cnf71xx; }; union cvmx_mio_boot_loc_adr { @@ -555,24 +455,6 @@ union cvmx_mio_boot_loc_adr { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_mio_boot_loc_adr_s cn30xx; - struct cvmx_mio_boot_loc_adr_s cn31xx; - struct cvmx_mio_boot_loc_adr_s cn38xx; - struct cvmx_mio_boot_loc_adr_s cn38xxp2; - struct cvmx_mio_boot_loc_adr_s cn50xx; - struct cvmx_mio_boot_loc_adr_s cn52xx; - struct cvmx_mio_boot_loc_adr_s cn52xxp1; - struct cvmx_mio_boot_loc_adr_s cn56xx; - struct cvmx_mio_boot_loc_adr_s cn56xxp1; - struct cvmx_mio_boot_loc_adr_s cn58xx; - struct cvmx_mio_boot_loc_adr_s cn58xxp1; - struct cvmx_mio_boot_loc_adr_s cn61xx; - struct cvmx_mio_boot_loc_adr_s cn63xx; - struct cvmx_mio_boot_loc_adr_s cn63xxp1; - struct cvmx_mio_boot_loc_adr_s cn66xx; - struct cvmx_mio_boot_loc_adr_s cn68xx; - struct cvmx_mio_boot_loc_adr_s cn68xxp1; - struct cvmx_mio_boot_loc_adr_s cnf71xx; }; union cvmx_mio_boot_loc_cfgx { @@ -592,24 +474,6 @@ union cvmx_mio_boot_loc_cfgx { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_mio_boot_loc_cfgx_s cn30xx; - struct cvmx_mio_boot_loc_cfgx_s cn31xx; - struct cvmx_mio_boot_loc_cfgx_s cn38xx; - struct cvmx_mio_boot_loc_cfgx_s cn38xxp2; - struct cvmx_mio_boot_loc_cfgx_s cn50xx; - struct cvmx_mio_boot_loc_cfgx_s cn52xx; - struct cvmx_mio_boot_loc_cfgx_s cn52xxp1; - struct cvmx_mio_boot_loc_cfgx_s cn56xx; - struct cvmx_mio_boot_loc_cfgx_s cn56xxp1; - struct cvmx_mio_boot_loc_cfgx_s cn58xx; - struct cvmx_mio_boot_loc_cfgx_s cn58xxp1; - struct cvmx_mio_boot_loc_cfgx_s cn61xx; - struct cvmx_mio_boot_loc_cfgx_s cn63xx; - struct cvmx_mio_boot_loc_cfgx_s cn63xxp1; - struct cvmx_mio_boot_loc_cfgx_s cn66xx; - struct cvmx_mio_boot_loc_cfgx_s cn68xx; - struct cvmx_mio_boot_loc_cfgx_s cn68xxp1; - struct cvmx_mio_boot_loc_cfgx_s cnf71xx; }; union cvmx_mio_boot_loc_dat { @@ -621,24 +485,6 @@ union cvmx_mio_boot_loc_dat { uint64_t data:64; #endif } s; - struct cvmx_mio_boot_loc_dat_s cn30xx; - struct cvmx_mio_boot_loc_dat_s cn31xx; - struct cvmx_mio_boot_loc_dat_s cn38xx; - struct cvmx_mio_boot_loc_dat_s cn38xxp2; - struct cvmx_mio_boot_loc_dat_s cn50xx; - struct cvmx_mio_boot_loc_dat_s cn52xx; - struct cvmx_mio_boot_loc_dat_s cn52xxp1; - struct cvmx_mio_boot_loc_dat_s cn56xx; - struct cvmx_mio_boot_loc_dat_s cn56xxp1; - struct cvmx_mio_boot_loc_dat_s cn58xx; - struct cvmx_mio_boot_loc_dat_s cn58xxp1; - struct cvmx_mio_boot_loc_dat_s cn61xx; - struct cvmx_mio_boot_loc_dat_s cn63xx; - struct cvmx_mio_boot_loc_dat_s cn63xxp1; - struct cvmx_mio_boot_loc_dat_s cn66xx; - struct cvmx_mio_boot_loc_dat_s cn68xx; - struct cvmx_mio_boot_loc_dat_s cn68xxp1; - struct cvmx_mio_boot_loc_dat_s cnf71xx; }; union cvmx_mio_boot_pin_defs { @@ -737,12 +583,6 @@ union cvmx_mio_boot_pin_defs { uint64_t reserved_32_63:32; #endif } cn61xx; - struct cvmx_mio_boot_pin_defs_cn52xx cn63xx; - struct cvmx_mio_boot_pin_defs_cn52xx cn63xxp1; - struct cvmx_mio_boot_pin_defs_cn52xx cn66xx; - struct cvmx_mio_boot_pin_defs_cn52xx cn68xx; - struct cvmx_mio_boot_pin_defs_cn52xx cn68xxp1; - struct cvmx_mio_boot_pin_defs_cn61xx cnf71xx; }; union cvmx_mio_boot_reg_cfgx { @@ -803,7 +643,6 @@ union cvmx_mio_boot_reg_cfgx { uint64_t reserved_37_63:27; #endif } cn30xx; - struct cvmx_mio_boot_reg_cfgx_cn30xx cn31xx; struct cvmx_mio_boot_reg_cfgx_cn38xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_32_63:32; @@ -821,7 +660,6 @@ union cvmx_mio_boot_reg_cfgx { uint64_t reserved_32_63:32; #endif } cn38xx; - struct cvmx_mio_boot_reg_cfgx_cn38xx cn38xxp2; struct cvmx_mio_boot_reg_cfgx_cn50xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_42_63:22; @@ -851,19 +689,6 @@ union cvmx_mio_boot_reg_cfgx { uint64_t reserved_42_63:22; #endif } cn50xx; - struct cvmx_mio_boot_reg_cfgx_s cn52xx; - struct cvmx_mio_boot_reg_cfgx_s cn52xxp1; - struct cvmx_mio_boot_reg_cfgx_s cn56xx; - struct cvmx_mio_boot_reg_cfgx_s cn56xxp1; - struct cvmx_mio_boot_reg_cfgx_cn30xx cn58xx; - struct cvmx_mio_boot_reg_cfgx_cn30xx cn58xxp1; - struct cvmx_mio_boot_reg_cfgx_s cn61xx; - struct cvmx_mio_boot_reg_cfgx_s cn63xx; - struct cvmx_mio_boot_reg_cfgx_s cn63xxp1; - struct cvmx_mio_boot_reg_cfgx_s cn66xx; - struct cvmx_mio_boot_reg_cfgx_s cn68xx; - struct cvmx_mio_boot_reg_cfgx_s cn68xxp1; - struct cvmx_mio_boot_reg_cfgx_s cnf71xx; }; union cvmx_mio_boot_reg_timx { @@ -899,8 +724,6 @@ union cvmx_mio_boot_reg_timx { uint64_t pagem:1; #endif } s; - struct cvmx_mio_boot_reg_timx_s cn30xx; - struct cvmx_mio_boot_reg_timx_s cn31xx; struct cvmx_mio_boot_reg_timx_cn38xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t pagem:1; @@ -932,21 +755,6 @@ union cvmx_mio_boot_reg_timx { uint64_t pagem:1; #endif } cn38xx; - struct cvmx_mio_boot_reg_timx_cn38xx cn38xxp2; - struct cvmx_mio_boot_reg_timx_s cn50xx; - struct cvmx_mio_boot_reg_timx_s cn52xx; - struct cvmx_mio_boot_reg_timx_s cn52xxp1; - struct cvmx_mio_boot_reg_timx_s cn56xx; - struct cvmx_mio_boot_reg_timx_s cn56xxp1; - struct cvmx_mio_boot_reg_timx_s cn58xx; - struct cvmx_mio_boot_reg_timx_s cn58xxp1; - struct cvmx_mio_boot_reg_timx_s cn61xx; - struct cvmx_mio_boot_reg_timx_s cn63xx; - struct cvmx_mio_boot_reg_timx_s cn63xxp1; - struct cvmx_mio_boot_reg_timx_s cn66xx; - struct cvmx_mio_boot_reg_timx_s cn68xx; - struct cvmx_mio_boot_reg_timx_s cn68xxp1; - struct cvmx_mio_boot_reg_timx_s cnf71xx; }; union cvmx_mio_boot_thr { @@ -981,23 +789,6 @@ union cvmx_mio_boot_thr { uint64_t reserved_14_63:50; #endif } cn30xx; - struct cvmx_mio_boot_thr_cn30xx cn31xx; - struct cvmx_mio_boot_thr_cn30xx cn38xx; - struct cvmx_mio_boot_thr_cn30xx cn38xxp2; - struct cvmx_mio_boot_thr_cn30xx cn50xx; - struct cvmx_mio_boot_thr_s cn52xx; - struct cvmx_mio_boot_thr_s cn52xxp1; - struct cvmx_mio_boot_thr_s cn56xx; - struct cvmx_mio_boot_thr_s cn56xxp1; - struct cvmx_mio_boot_thr_cn30xx cn58xx; - struct cvmx_mio_boot_thr_cn30xx cn58xxp1; - struct cvmx_mio_boot_thr_s cn61xx; - struct cvmx_mio_boot_thr_s cn63xx; - struct cvmx_mio_boot_thr_s cn63xxp1; - struct cvmx_mio_boot_thr_s cn66xx; - struct cvmx_mio_boot_thr_s cn68xx; - struct cvmx_mio_boot_thr_s cn68xxp1; - struct cvmx_mio_boot_thr_s cnf71xx; }; union cvmx_mio_emm_buf_dat { @@ -1009,8 +800,6 @@ union cvmx_mio_emm_buf_dat { uint64_t dat:64; #endif } s; - struct cvmx_mio_emm_buf_dat_s cn61xx; - struct cvmx_mio_emm_buf_dat_s cnf71xx; }; union cvmx_mio_emm_buf_idx { @@ -1030,8 +819,6 @@ union cvmx_mio_emm_buf_idx { uint64_t reserved_17_63:47; #endif } s; - struct cvmx_mio_emm_buf_idx_s cn61xx; - struct cvmx_mio_emm_buf_idx_s cnf71xx; }; union cvmx_mio_emm_cfg { @@ -1049,8 +836,6 @@ union cvmx_mio_emm_cfg { uint64_t reserved_17_63:47; #endif } s; - struct cvmx_mio_emm_cfg_s cn61xx; - struct cvmx_mio_emm_cfg_s cnf71xx; }; union cvmx_mio_emm_cmd { @@ -1082,8 +867,6 @@ union cvmx_mio_emm_cmd { uint64_t reserved_62_63:2; #endif } s; - struct cvmx_mio_emm_cmd_s cn61xx; - struct cvmx_mio_emm_cmd_s cnf71xx; }; union cvmx_mio_emm_dma { @@ -1115,8 +898,6 @@ union cvmx_mio_emm_dma { uint64_t reserved_62_63:2; #endif } s; - struct cvmx_mio_emm_dma_s cn61xx; - struct cvmx_mio_emm_dma_s cnf71xx; }; union cvmx_mio_emm_int { @@ -1142,8 +923,6 @@ union cvmx_mio_emm_int { uint64_t reserved_7_63:57; #endif } s; - struct cvmx_mio_emm_int_s cn61xx; - struct cvmx_mio_emm_int_s cnf71xx; }; union cvmx_mio_emm_int_en { @@ -1169,8 +948,6 @@ union cvmx_mio_emm_int_en { uint64_t reserved_7_63:57; #endif } s; - struct cvmx_mio_emm_int_en_s cn61xx; - struct cvmx_mio_emm_int_en_s cnf71xx; }; union cvmx_mio_emm_modex { @@ -1196,8 +973,6 @@ union cvmx_mio_emm_modex { uint64_t reserved_49_63:15; #endif } s; - struct cvmx_mio_emm_modex_s cn61xx; - struct cvmx_mio_emm_modex_s cnf71xx; }; union cvmx_mio_emm_rca { @@ -1211,8 +986,6 @@ union cvmx_mio_emm_rca { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_mio_emm_rca_s cn61xx; - struct cvmx_mio_emm_rca_s cnf71xx; }; union cvmx_mio_emm_rsp_hi { @@ -1224,8 +997,6 @@ union cvmx_mio_emm_rsp_hi { uint64_t dat:64; #endif } s; - struct cvmx_mio_emm_rsp_hi_s cn61xx; - struct cvmx_mio_emm_rsp_hi_s cnf71xx; }; union cvmx_mio_emm_rsp_lo { @@ -1237,8 +1008,6 @@ union cvmx_mio_emm_rsp_lo { uint64_t dat:64; #endif } s; - struct cvmx_mio_emm_rsp_lo_s cn61xx; - struct cvmx_mio_emm_rsp_lo_s cnf71xx; }; union cvmx_mio_emm_rsp_sts { @@ -1298,8 +1067,6 @@ union cvmx_mio_emm_rsp_sts { uint64_t reserved_62_63:2; #endif } s; - struct cvmx_mio_emm_rsp_sts_s cn61xx; - struct cvmx_mio_emm_rsp_sts_s cnf71xx; }; union cvmx_mio_emm_sample { @@ -1317,8 +1084,6 @@ union cvmx_mio_emm_sample { uint64_t reserved_26_63:38; #endif } s; - struct cvmx_mio_emm_sample_s cn61xx; - struct cvmx_mio_emm_sample_s cnf71xx; }; union cvmx_mio_emm_sts_mask { @@ -1332,8 +1097,6 @@ union cvmx_mio_emm_sts_mask { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_mio_emm_sts_mask_s cn61xx; - struct cvmx_mio_emm_sts_mask_s cnf71xx; }; union cvmx_mio_emm_switch { @@ -1371,8 +1134,6 @@ union cvmx_mio_emm_switch { uint64_t reserved_62_63:2; #endif } s; - struct cvmx_mio_emm_switch_s cn61xx; - struct cvmx_mio_emm_switch_s cnf71xx; }; union cvmx_mio_emm_wdog { @@ -1386,8 +1147,6 @@ union cvmx_mio_emm_wdog { uint64_t reserved_26_63:38; #endif } s; - struct cvmx_mio_emm_wdog_s cn61xx; - struct cvmx_mio_emm_wdog_s cnf71xx; }; union cvmx_mio_fus_bnk_datx { @@ -1399,20 +1158,6 @@ union cvmx_mio_fus_bnk_datx { uint64_t dat:64; #endif } s; - struct cvmx_mio_fus_bnk_datx_s cn50xx; - struct cvmx_mio_fus_bnk_datx_s cn52xx; - struct cvmx_mio_fus_bnk_datx_s cn52xxp1; - struct cvmx_mio_fus_bnk_datx_s cn56xx; - struct cvmx_mio_fus_bnk_datx_s cn56xxp1; - struct cvmx_mio_fus_bnk_datx_s cn58xx; - struct cvmx_mio_fus_bnk_datx_s cn58xxp1; - struct cvmx_mio_fus_bnk_datx_s cn61xx; - struct cvmx_mio_fus_bnk_datx_s cn63xx; - struct cvmx_mio_fus_bnk_datx_s cn63xxp1; - struct cvmx_mio_fus_bnk_datx_s cn66xx; - struct cvmx_mio_fus_bnk_datx_s cn68xx; - struct cvmx_mio_fus_bnk_datx_s cn68xxp1; - struct cvmx_mio_fus_bnk_datx_s cnf71xx; }; union cvmx_mio_fus_dat0 { @@ -1426,24 +1171,6 @@ union cvmx_mio_fus_dat0 { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_mio_fus_dat0_s cn30xx; - struct cvmx_mio_fus_dat0_s cn31xx; - struct cvmx_mio_fus_dat0_s cn38xx; - struct cvmx_mio_fus_dat0_s cn38xxp2; - struct cvmx_mio_fus_dat0_s cn50xx; - struct cvmx_mio_fus_dat0_s cn52xx; - struct cvmx_mio_fus_dat0_s cn52xxp1; - struct cvmx_mio_fus_dat0_s cn56xx; - struct cvmx_mio_fus_dat0_s cn56xxp1; - struct cvmx_mio_fus_dat0_s cn58xx; - struct cvmx_mio_fus_dat0_s cn58xxp1; - struct cvmx_mio_fus_dat0_s cn61xx; - struct cvmx_mio_fus_dat0_s cn63xx; - struct cvmx_mio_fus_dat0_s cn63xxp1; - struct cvmx_mio_fus_dat0_s cn66xx; - struct cvmx_mio_fus_dat0_s cn68xx; - struct cvmx_mio_fus_dat0_s cn68xxp1; - struct cvmx_mio_fus_dat0_s cnf71xx; }; union cvmx_mio_fus_dat1 { @@ -1457,24 +1184,6 @@ union cvmx_mio_fus_dat1 { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_mio_fus_dat1_s cn30xx; - struct cvmx_mio_fus_dat1_s cn31xx; - struct cvmx_mio_fus_dat1_s cn38xx; - struct cvmx_mio_fus_dat1_s cn38xxp2; - struct cvmx_mio_fus_dat1_s cn50xx; - struct cvmx_mio_fus_dat1_s cn52xx; - struct cvmx_mio_fus_dat1_s cn52xxp1; - struct cvmx_mio_fus_dat1_s cn56xx; - struct cvmx_mio_fus_dat1_s cn56xxp1; - struct cvmx_mio_fus_dat1_s cn58xx; - struct cvmx_mio_fus_dat1_s cn58xxp1; - struct cvmx_mio_fus_dat1_s cn61xx; - struct cvmx_mio_fus_dat1_s cn63xx; - struct cvmx_mio_fus_dat1_s cn63xxp1; - struct cvmx_mio_fus_dat1_s cn66xx; - struct cvmx_mio_fus_dat1_s cn68xx; - struct cvmx_mio_fus_dat1_s cn68xxp1; - struct cvmx_mio_fus_dat1_s cnf71xx; }; union cvmx_mio_fus_dat2 { @@ -1591,7 +1300,6 @@ union cvmx_mio_fus_dat2 { uint64_t reserved_29_63:35; #endif } cn38xx; - struct cvmx_mio_fus_dat2_cn38xx cn38xxp2; struct cvmx_mio_fus_dat2_cn50xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_34_63:30; @@ -1654,7 +1362,6 @@ union cvmx_mio_fus_dat2 { uint64_t reserved_34_63:30; #endif } cn52xx; - struct cvmx_mio_fus_dat2_cn52xx cn52xxp1; struct cvmx_mio_fus_dat2_cn56xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_34_63:30; @@ -1686,7 +1393,6 @@ union cvmx_mio_fus_dat2 { uint64_t reserved_34_63:30; #endif } cn56xx; - struct cvmx_mio_fus_dat2_cn56xx cn56xxp1; struct cvmx_mio_fus_dat2_cn58xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_30_63:34; @@ -1710,7 +1416,6 @@ union cvmx_mio_fus_dat2 { uint64_t reserved_30_63:34; #endif } cn58xx; - struct cvmx_mio_fus_dat2_cn58xx cn58xxp1; struct cvmx_mio_fus_dat2_cn61xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_48_63:16; @@ -1775,7 +1480,6 @@ union cvmx_mio_fus_dat2 { uint64_t reserved_35_63:29; #endif } cn63xx; - struct cvmx_mio_fus_dat2_cn63xx cn63xxp1; struct cvmx_mio_fus_dat2_cn66xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_48_63:16; @@ -1840,7 +1544,6 @@ union cvmx_mio_fus_dat2 { uint64_t reserved_37_63:27; #endif } cn68xx; - struct cvmx_mio_fus_dat2_cn68xx cn68xxp1; struct cvmx_mio_fus_dat2_cn70xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_48_63:16; @@ -1874,7 +1577,6 @@ union cvmx_mio_fus_dat2 { uint64_t reserved_48_63:16; #endif } cn70xx; - struct cvmx_mio_fus_dat2_cn70xx cn70xxp1; struct cvmx_mio_fus_dat2_cn73xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_59_63:5; @@ -1986,8 +1688,6 @@ union cvmx_mio_fus_dat2 { uint64_t reserved_59_63:5; #endif } cn78xxp2; - struct cvmx_mio_fus_dat2_cn61xx cnf71xx; - struct cvmx_mio_fus_dat2_cn73xx cnf75xx; }; union cvmx_mio_fus_dat3 { @@ -2115,13 +1815,6 @@ union cvmx_mio_fus_dat3 { uint64_t reserved_29_63:35; #endif } cn38xxp2; - struct cvmx_mio_fus_dat3_cn38xx cn50xx; - struct cvmx_mio_fus_dat3_cn38xx cn52xx; - struct cvmx_mio_fus_dat3_cn38xx cn52xxp1; - struct cvmx_mio_fus_dat3_cn38xx cn56xx; - struct cvmx_mio_fus_dat3_cn38xx cn56xxp1; - struct cvmx_mio_fus_dat3_cn38xx cn58xx; - struct cvmx_mio_fus_dat3_cn38xx cn58xxp1; struct cvmx_mio_fus_dat3_cn61xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_58_63:6; @@ -2163,11 +1856,6 @@ union cvmx_mio_fus_dat3 { uint64_t reserved_58_63:6; #endif } cn61xx; - struct cvmx_mio_fus_dat3_cn61xx cn63xx; - struct cvmx_mio_fus_dat3_cn61xx cn63xxp1; - struct cvmx_mio_fus_dat3_cn61xx cn66xx; - struct cvmx_mio_fus_dat3_cn61xx cn68xx; - struct cvmx_mio_fus_dat3_cn61xx cn68xxp1; struct cvmx_mio_fus_dat3_cn70xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t ema0:6; @@ -2352,8 +2040,6 @@ union cvmx_mio_fus_dat3 { uint64_t ema0:6; #endif } cn78xx; - struct cvmx_mio_fus_dat3_cn73xx cn78xxp2; - struct cvmx_mio_fus_dat3_cn61xx cnf71xx; struct cvmx_mio_fus_dat3_cnf75xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t ema0:6; @@ -2418,11 +2104,6 @@ union cvmx_mio_fus_ema { uint64_t reserved_7_63:57; #endif } s; - struct cvmx_mio_fus_ema_s cn50xx; - struct cvmx_mio_fus_ema_s cn52xx; - struct cvmx_mio_fus_ema_s cn52xxp1; - struct cvmx_mio_fus_ema_s cn56xx; - struct cvmx_mio_fus_ema_s cn56xxp1; struct cvmx_mio_fus_ema_cn58xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_2_63:62; @@ -2432,14 +2113,6 @@ union cvmx_mio_fus_ema { uint64_t reserved_2_63:62; #endif } cn58xx; - struct cvmx_mio_fus_ema_cn58xx cn58xxp1; - struct cvmx_mio_fus_ema_s cn61xx; - struct cvmx_mio_fus_ema_s cn63xx; - struct cvmx_mio_fus_ema_s cn63xxp1; - struct cvmx_mio_fus_ema_s cn66xx; - struct cvmx_mio_fus_ema_s cn68xx; - struct cvmx_mio_fus_ema_s cn68xxp1; - struct cvmx_mio_fus_ema_s cnf71xx; }; union cvmx_mio_fus_pdf { @@ -2451,19 +2124,6 @@ union cvmx_mio_fus_pdf { uint64_t pdf:64; #endif } s; - struct cvmx_mio_fus_pdf_s cn50xx; - struct cvmx_mio_fus_pdf_s cn52xx; - struct cvmx_mio_fus_pdf_s cn52xxp1; - struct cvmx_mio_fus_pdf_s cn56xx; - struct cvmx_mio_fus_pdf_s cn56xxp1; - struct cvmx_mio_fus_pdf_s cn58xx; - struct cvmx_mio_fus_pdf_s cn61xx; - struct cvmx_mio_fus_pdf_s cn63xx; - struct cvmx_mio_fus_pdf_s cn63xxp1; - struct cvmx_mio_fus_pdf_s cn66xx; - struct cvmx_mio_fus_pdf_s cn68xx; - struct cvmx_mio_fus_pdf_s cn68xxp1; - struct cvmx_mio_fus_pdf_s cnf71xx; }; union cvmx_mio_fus_pll { @@ -2504,12 +2164,6 @@ union cvmx_mio_fus_pll { uint64_t reserved_2_63:62; #endif } cn50xx; - struct cvmx_mio_fus_pll_cn50xx cn52xx; - struct cvmx_mio_fus_pll_cn50xx cn52xxp1; - struct cvmx_mio_fus_pll_cn50xx cn56xx; - struct cvmx_mio_fus_pll_cn50xx cn56xxp1; - struct cvmx_mio_fus_pll_cn50xx cn58xx; - struct cvmx_mio_fus_pll_cn50xx cn58xxp1; struct cvmx_mio_fus_pll_cn61xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_8_63:56; @@ -2529,12 +2183,6 @@ union cvmx_mio_fus_pll { uint64_t reserved_8_63:56; #endif } cn61xx; - struct cvmx_mio_fus_pll_cn61xx cn63xx; - struct cvmx_mio_fus_pll_cn61xx cn63xxp1; - struct cvmx_mio_fus_pll_cn61xx cn66xx; - struct cvmx_mio_fus_pll_s cn68xx; - struct cvmx_mio_fus_pll_s cn68xxp1; - struct cvmx_mio_fus_pll_cn61xx cnf71xx; }; union cvmx_mio_fus_prog { @@ -2559,23 +2207,6 @@ union cvmx_mio_fus_prog { uint64_t reserved_1_63:63; #endif } cn30xx; - struct cvmx_mio_fus_prog_cn30xx cn31xx; - struct cvmx_mio_fus_prog_cn30xx cn38xx; - struct cvmx_mio_fus_prog_cn30xx cn38xxp2; - struct cvmx_mio_fus_prog_cn30xx cn50xx; - struct cvmx_mio_fus_prog_cn30xx cn52xx; - struct cvmx_mio_fus_prog_cn30xx cn52xxp1; - struct cvmx_mio_fus_prog_cn30xx cn56xx; - struct cvmx_mio_fus_prog_cn30xx cn56xxp1; - struct cvmx_mio_fus_prog_cn30xx cn58xx; - struct cvmx_mio_fus_prog_cn30xx cn58xxp1; - struct cvmx_mio_fus_prog_s cn61xx; - struct cvmx_mio_fus_prog_s cn63xx; - struct cvmx_mio_fus_prog_s cn63xxp1; - struct cvmx_mio_fus_prog_s cn66xx; - struct cvmx_mio_fus_prog_s cn68xx; - struct cvmx_mio_fus_prog_s cn68xxp1; - struct cvmx_mio_fus_prog_s cnf71xx; }; union cvmx_mio_fus_prog_times { @@ -2614,12 +2245,6 @@ union cvmx_mio_fus_prog_times { uint64_t reserved_33_63:31; #endif } cn50xx; - struct cvmx_mio_fus_prog_times_cn50xx cn52xx; - struct cvmx_mio_fus_prog_times_cn50xx cn52xxp1; - struct cvmx_mio_fus_prog_times_cn50xx cn56xx; - struct cvmx_mio_fus_prog_times_cn50xx cn56xxp1; - struct cvmx_mio_fus_prog_times_cn50xx cn58xx; - struct cvmx_mio_fus_prog_times_cn50xx cn58xxp1; struct cvmx_mio_fus_prog_times_cn61xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_35_63:29; @@ -2641,12 +2266,6 @@ union cvmx_mio_fus_prog_times { uint64_t reserved_35_63:29; #endif } cn61xx; - struct cvmx_mio_fus_prog_times_cn61xx cn63xx; - struct cvmx_mio_fus_prog_times_cn61xx cn63xxp1; - struct cvmx_mio_fus_prog_times_cn61xx cn66xx; - struct cvmx_mio_fus_prog_times_cn61xx cn68xx; - struct cvmx_mio_fus_prog_times_cn61xx cn68xxp1; - struct cvmx_mio_fus_prog_times_cn61xx cnf71xx; }; union cvmx_mio_fus_rcmd { @@ -2691,23 +2310,6 @@ union cvmx_mio_fus_rcmd { uint64_t reserved_24_63:40; #endif } cn30xx; - struct cvmx_mio_fus_rcmd_cn30xx cn31xx; - struct cvmx_mio_fus_rcmd_cn30xx cn38xx; - struct cvmx_mio_fus_rcmd_cn30xx cn38xxp2; - struct cvmx_mio_fus_rcmd_cn30xx cn50xx; - struct cvmx_mio_fus_rcmd_s cn52xx; - struct cvmx_mio_fus_rcmd_s cn52xxp1; - struct cvmx_mio_fus_rcmd_s cn56xx; - struct cvmx_mio_fus_rcmd_s cn56xxp1; - struct cvmx_mio_fus_rcmd_cn30xx cn58xx; - struct cvmx_mio_fus_rcmd_cn30xx cn58xxp1; - struct cvmx_mio_fus_rcmd_s cn61xx; - struct cvmx_mio_fus_rcmd_s cn63xx; - struct cvmx_mio_fus_rcmd_s cn63xxp1; - struct cvmx_mio_fus_rcmd_s cn66xx; - struct cvmx_mio_fus_rcmd_s cn68xx; - struct cvmx_mio_fus_rcmd_s cn68xxp1; - struct cvmx_mio_fus_rcmd_s cnf71xx; }; union cvmx_mio_fus_read_times { @@ -2729,13 +2331,6 @@ union cvmx_mio_fus_read_times { uint64_t reserved_26_63:38; #endif } s; - struct cvmx_mio_fus_read_times_s cn61xx; - struct cvmx_mio_fus_read_times_s cn63xx; - struct cvmx_mio_fus_read_times_s cn63xxp1; - struct cvmx_mio_fus_read_times_s cn66xx; - struct cvmx_mio_fus_read_times_s cn68xx; - struct cvmx_mio_fus_read_times_s cn68xxp1; - struct cvmx_mio_fus_read_times_s cnf71xx; }; union cvmx_mio_fus_repair_res0 { @@ -2755,13 +2350,6 @@ union cvmx_mio_fus_repair_res0 { uint64_t reserved_55_63:9; #endif } s; - struct cvmx_mio_fus_repair_res0_s cn61xx; - struct cvmx_mio_fus_repair_res0_s cn63xx; - struct cvmx_mio_fus_repair_res0_s cn63xxp1; - struct cvmx_mio_fus_repair_res0_s cn66xx; - struct cvmx_mio_fus_repair_res0_s cn68xx; - struct cvmx_mio_fus_repair_res0_s cn68xxp1; - struct cvmx_mio_fus_repair_res0_s cnf71xx; }; union cvmx_mio_fus_repair_res1 { @@ -2779,13 +2367,6 @@ union cvmx_mio_fus_repair_res1 { uint64_t reserved_54_63:10; #endif } s; - struct cvmx_mio_fus_repair_res1_s cn61xx; - struct cvmx_mio_fus_repair_res1_s cn63xx; - struct cvmx_mio_fus_repair_res1_s cn63xxp1; - struct cvmx_mio_fus_repair_res1_s cn66xx; - struct cvmx_mio_fus_repair_res1_s cn68xx; - struct cvmx_mio_fus_repair_res1_s cn68xxp1; - struct cvmx_mio_fus_repair_res1_s cnf71xx; }; union cvmx_mio_fus_repair_res2 { @@ -2799,13 +2380,6 @@ union cvmx_mio_fus_repair_res2 { uint64_t reserved_18_63:46; #endif } s; - struct cvmx_mio_fus_repair_res2_s cn61xx; - struct cvmx_mio_fus_repair_res2_s cn63xx; - struct cvmx_mio_fus_repair_res2_s cn63xxp1; - struct cvmx_mio_fus_repair_res2_s cn66xx; - struct cvmx_mio_fus_repair_res2_s cn68xx; - struct cvmx_mio_fus_repair_res2_s cn68xxp1; - struct cvmx_mio_fus_repair_res2_s cnf71xx; }; union cvmx_mio_fus_spr_repair_res { @@ -2823,23 +2397,6 @@ union cvmx_mio_fus_spr_repair_res { uint64_t reserved_42_63:22; #endif } s; - struct cvmx_mio_fus_spr_repair_res_s cn30xx; - struct cvmx_mio_fus_spr_repair_res_s cn31xx; - struct cvmx_mio_fus_spr_repair_res_s cn38xx; - struct cvmx_mio_fus_spr_repair_res_s cn50xx; - struct cvmx_mio_fus_spr_repair_res_s cn52xx; - struct cvmx_mio_fus_spr_repair_res_s cn52xxp1; - struct cvmx_mio_fus_spr_repair_res_s cn56xx; - struct cvmx_mio_fus_spr_repair_res_s cn56xxp1; - struct cvmx_mio_fus_spr_repair_res_s cn58xx; - struct cvmx_mio_fus_spr_repair_res_s cn58xxp1; - struct cvmx_mio_fus_spr_repair_res_s cn61xx; - struct cvmx_mio_fus_spr_repair_res_s cn63xx; - struct cvmx_mio_fus_spr_repair_res_s cn63xxp1; - struct cvmx_mio_fus_spr_repair_res_s cn66xx; - struct cvmx_mio_fus_spr_repair_res_s cn68xx; - struct cvmx_mio_fus_spr_repair_res_s cn68xxp1; - struct cvmx_mio_fus_spr_repair_res_s cnf71xx; }; union cvmx_mio_fus_spr_repair_sum { @@ -2853,23 +2410,6 @@ union cvmx_mio_fus_spr_repair_sum { uint64_t reserved_1_63:63; #endif } s; - struct cvmx_mio_fus_spr_repair_sum_s cn30xx; - struct cvmx_mio_fus_spr_repair_sum_s cn31xx; - struct cvmx_mio_fus_spr_repair_sum_s cn38xx; - struct cvmx_mio_fus_spr_repair_sum_s cn50xx; - struct cvmx_mio_fus_spr_repair_sum_s cn52xx; - struct cvmx_mio_fus_spr_repair_sum_s cn52xxp1; - struct cvmx_mio_fus_spr_repair_sum_s cn56xx; - struct cvmx_mio_fus_spr_repair_sum_s cn56xxp1; - struct cvmx_mio_fus_spr_repair_sum_s cn58xx; - struct cvmx_mio_fus_spr_repair_sum_s cn58xxp1; - struct cvmx_mio_fus_spr_repair_sum_s cn61xx; - struct cvmx_mio_fus_spr_repair_sum_s cn63xx; - struct cvmx_mio_fus_spr_repair_sum_s cn63xxp1; - struct cvmx_mio_fus_spr_repair_sum_s cn66xx; - struct cvmx_mio_fus_spr_repair_sum_s cn68xx; - struct cvmx_mio_fus_spr_repair_sum_s cn68xxp1; - struct cvmx_mio_fus_spr_repair_sum_s cnf71xx; }; union cvmx_mio_fus_tgg { @@ -2883,9 +2423,6 @@ union cvmx_mio_fus_tgg { uint64_t val:1; #endif } s; - struct cvmx_mio_fus_tgg_s cn61xx; - struct cvmx_mio_fus_tgg_s cn66xx; - struct cvmx_mio_fus_tgg_s cnf71xx; }; union cvmx_mio_fus_unlock { @@ -2899,8 +2436,6 @@ union cvmx_mio_fus_unlock { uint64_t reserved_24_63:40; #endif } s; - struct cvmx_mio_fus_unlock_s cn30xx; - struct cvmx_mio_fus_unlock_s cn31xx; }; union cvmx_mio_fus_wadr { @@ -2914,10 +2449,6 @@ union cvmx_mio_fus_wadr { uint64_t reserved_10_63:54; #endif } s; - struct cvmx_mio_fus_wadr_s cn30xx; - struct cvmx_mio_fus_wadr_s cn31xx; - struct cvmx_mio_fus_wadr_s cn38xx; - struct cvmx_mio_fus_wadr_s cn38xxp2; struct cvmx_mio_fus_wadr_cn50xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_2_63:62; @@ -2936,11 +2467,6 @@ union cvmx_mio_fus_wadr { uint64_t reserved_3_63:61; #endif } cn52xx; - struct cvmx_mio_fus_wadr_cn52xx cn52xxp1; - struct cvmx_mio_fus_wadr_cn52xx cn56xx; - struct cvmx_mio_fus_wadr_cn52xx cn56xxp1; - struct cvmx_mio_fus_wadr_cn50xx cn58xx; - struct cvmx_mio_fus_wadr_cn50xx cn58xxp1; struct cvmx_mio_fus_wadr_cn61xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_4_63:60; @@ -2950,12 +2476,6 @@ union cvmx_mio_fus_wadr { uint64_t reserved_4_63:60; #endif } cn61xx; - struct cvmx_mio_fus_wadr_cn61xx cn63xx; - struct cvmx_mio_fus_wadr_cn61xx cn63xxp1; - struct cvmx_mio_fus_wadr_cn61xx cn66xx; - struct cvmx_mio_fus_wadr_cn61xx cn68xx; - struct cvmx_mio_fus_wadr_cn61xx cn68xxp1; - struct cvmx_mio_fus_wadr_cn61xx cnf71xx; }; union cvmx_mio_gpio_comp { @@ -2971,13 +2491,6 @@ union cvmx_mio_gpio_comp { uint64_t reserved_12_63:52; #endif } s; - struct cvmx_mio_gpio_comp_s cn61xx; - struct cvmx_mio_gpio_comp_s cn63xx; - struct cvmx_mio_gpio_comp_s cn63xxp1; - struct cvmx_mio_gpio_comp_s cn66xx; - struct cvmx_mio_gpio_comp_s cn68xx; - struct cvmx_mio_gpio_comp_s cn68xxp1; - struct cvmx_mio_gpio_comp_s cnf71xx; }; union cvmx_mio_ndf_dma_cfg { @@ -3007,14 +2520,6 @@ union cvmx_mio_ndf_dma_cfg { uint64_t en:1; #endif } s; - struct cvmx_mio_ndf_dma_cfg_s cn52xx; - struct cvmx_mio_ndf_dma_cfg_s cn61xx; - struct cvmx_mio_ndf_dma_cfg_s cn63xx; - struct cvmx_mio_ndf_dma_cfg_s cn63xxp1; - struct cvmx_mio_ndf_dma_cfg_s cn66xx; - struct cvmx_mio_ndf_dma_cfg_s cn68xx; - struct cvmx_mio_ndf_dma_cfg_s cn68xxp1; - struct cvmx_mio_ndf_dma_cfg_s cnf71xx; }; union cvmx_mio_ndf_dma_int { @@ -3028,14 +2533,6 @@ union cvmx_mio_ndf_dma_int { uint64_t reserved_1_63:63; #endif } s; - struct cvmx_mio_ndf_dma_int_s cn52xx; - struct cvmx_mio_ndf_dma_int_s cn61xx; - struct cvmx_mio_ndf_dma_int_s cn63xx; - struct cvmx_mio_ndf_dma_int_s cn63xxp1; - struct cvmx_mio_ndf_dma_int_s cn66xx; - struct cvmx_mio_ndf_dma_int_s cn68xx; - struct cvmx_mio_ndf_dma_int_s cn68xxp1; - struct cvmx_mio_ndf_dma_int_s cnf71xx; }; union cvmx_mio_ndf_dma_int_en { @@ -3049,14 +2546,6 @@ union cvmx_mio_ndf_dma_int_en { uint64_t reserved_1_63:63; #endif } s; - struct cvmx_mio_ndf_dma_int_en_s cn52xx; - struct cvmx_mio_ndf_dma_int_en_s cn61xx; - struct cvmx_mio_ndf_dma_int_en_s cn63xx; - struct cvmx_mio_ndf_dma_int_en_s cn63xxp1; - struct cvmx_mio_ndf_dma_int_en_s cn66xx; - struct cvmx_mio_ndf_dma_int_en_s cn68xx; - struct cvmx_mio_ndf_dma_int_en_s cn68xxp1; - struct cvmx_mio_ndf_dma_int_en_s cnf71xx; }; union cvmx_mio_pll_ctl { @@ -3070,8 +2559,6 @@ union cvmx_mio_pll_ctl { uint64_t reserved_5_63:59; #endif } s; - struct cvmx_mio_pll_ctl_s cn30xx; - struct cvmx_mio_pll_ctl_s cn31xx; }; union cvmx_mio_pll_setting { @@ -3085,8 +2572,6 @@ union cvmx_mio_pll_setting { uint64_t reserved_17_63:47; #endif } s; - struct cvmx_mio_pll_setting_s cn30xx; - struct cvmx_mio_pll_setting_s cn31xx; }; union cvmx_mio_ptp_ckout_hi_incr { @@ -3100,10 +2585,6 @@ union cvmx_mio_ptp_ckout_hi_incr { uint64_t nanosec:32; #endif } s; - struct cvmx_mio_ptp_ckout_hi_incr_s cn61xx; - struct cvmx_mio_ptp_ckout_hi_incr_s cn66xx; - struct cvmx_mio_ptp_ckout_hi_incr_s cn68xx; - struct cvmx_mio_ptp_ckout_hi_incr_s cnf71xx; }; union cvmx_mio_ptp_ckout_lo_incr { @@ -3117,10 +2598,6 @@ union cvmx_mio_ptp_ckout_lo_incr { uint64_t nanosec:32; #endif } s; - struct cvmx_mio_ptp_ckout_lo_incr_s cn61xx; - struct cvmx_mio_ptp_ckout_lo_incr_s cn66xx; - struct cvmx_mio_ptp_ckout_lo_incr_s cn68xx; - struct cvmx_mio_ptp_ckout_lo_incr_s cnf71xx; }; union cvmx_mio_ptp_ckout_thresh_hi { @@ -3132,10 +2609,6 @@ union cvmx_mio_ptp_ckout_thresh_hi { uint64_t nanosec:64; #endif } s; - struct cvmx_mio_ptp_ckout_thresh_hi_s cn61xx; - struct cvmx_mio_ptp_ckout_thresh_hi_s cn66xx; - struct cvmx_mio_ptp_ckout_thresh_hi_s cn68xx; - struct cvmx_mio_ptp_ckout_thresh_hi_s cnf71xx; }; union cvmx_mio_ptp_ckout_thresh_lo { @@ -3149,10 +2622,6 @@ union cvmx_mio_ptp_ckout_thresh_lo { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_mio_ptp_ckout_thresh_lo_s cn61xx; - struct cvmx_mio_ptp_ckout_thresh_lo_s cn66xx; - struct cvmx_mio_ptp_ckout_thresh_lo_s cn68xx; - struct cvmx_mio_ptp_ckout_thresh_lo_s cnf71xx; }; union cvmx_mio_ptp_clock_cfg { @@ -3202,7 +2671,6 @@ union cvmx_mio_ptp_clock_cfg { uint64_t reserved_42_63:22; #endif } s; - struct cvmx_mio_ptp_clock_cfg_s cn61xx; struct cvmx_mio_ptp_clock_cfg_cn63xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_24_63:40; @@ -3228,7 +2696,6 @@ union cvmx_mio_ptp_clock_cfg { uint64_t reserved_24_63:40; #endif } cn63xx; - struct cvmx_mio_ptp_clock_cfg_cn63xx cn63xxp1; struct cvmx_mio_ptp_clock_cfg_cn66xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_40_63:24; @@ -3270,9 +2737,6 @@ union cvmx_mio_ptp_clock_cfg { uint64_t reserved_40_63:24; #endif } cn66xx; - struct cvmx_mio_ptp_clock_cfg_s cn68xx; - struct cvmx_mio_ptp_clock_cfg_cn63xx cn68xxp1; - struct cvmx_mio_ptp_clock_cfg_s cnf71xx; }; union cvmx_mio_ptp_clock_comp { @@ -3286,13 +2750,6 @@ union cvmx_mio_ptp_clock_comp { uint64_t nanosec:32; #endif } s; - struct cvmx_mio_ptp_clock_comp_s cn61xx; - struct cvmx_mio_ptp_clock_comp_s cn63xx; - struct cvmx_mio_ptp_clock_comp_s cn63xxp1; - struct cvmx_mio_ptp_clock_comp_s cn66xx; - struct cvmx_mio_ptp_clock_comp_s cn68xx; - struct cvmx_mio_ptp_clock_comp_s cn68xxp1; - struct cvmx_mio_ptp_clock_comp_s cnf71xx; }; union cvmx_mio_ptp_clock_hi { @@ -3304,13 +2761,6 @@ union cvmx_mio_ptp_clock_hi { uint64_t nanosec:64; #endif } s; - struct cvmx_mio_ptp_clock_hi_s cn61xx; - struct cvmx_mio_ptp_clock_hi_s cn63xx; - struct cvmx_mio_ptp_clock_hi_s cn63xxp1; - struct cvmx_mio_ptp_clock_hi_s cn66xx; - struct cvmx_mio_ptp_clock_hi_s cn68xx; - struct cvmx_mio_ptp_clock_hi_s cn68xxp1; - struct cvmx_mio_ptp_clock_hi_s cnf71xx; }; union cvmx_mio_ptp_clock_lo { @@ -3324,13 +2774,6 @@ union cvmx_mio_ptp_clock_lo { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_mio_ptp_clock_lo_s cn61xx; - struct cvmx_mio_ptp_clock_lo_s cn63xx; - struct cvmx_mio_ptp_clock_lo_s cn63xxp1; - struct cvmx_mio_ptp_clock_lo_s cn66xx; - struct cvmx_mio_ptp_clock_lo_s cn68xx; - struct cvmx_mio_ptp_clock_lo_s cn68xxp1; - struct cvmx_mio_ptp_clock_lo_s cnf71xx; }; union cvmx_mio_ptp_evt_cnt { @@ -3342,13 +2785,6 @@ union cvmx_mio_ptp_evt_cnt { uint64_t cntr:64; #endif } s; - struct cvmx_mio_ptp_evt_cnt_s cn61xx; - struct cvmx_mio_ptp_evt_cnt_s cn63xx; - struct cvmx_mio_ptp_evt_cnt_s cn63xxp1; - struct cvmx_mio_ptp_evt_cnt_s cn66xx; - struct cvmx_mio_ptp_evt_cnt_s cn68xx; - struct cvmx_mio_ptp_evt_cnt_s cn68xxp1; - struct cvmx_mio_ptp_evt_cnt_s cnf71xx; }; union cvmx_mio_ptp_phy_1pps_in { @@ -3362,7 +2798,6 @@ union cvmx_mio_ptp_phy_1pps_in { uint64_t reserved_5_63:59; #endif } s; - struct cvmx_mio_ptp_phy_1pps_in_s cnf71xx; }; union cvmx_mio_ptp_pps_hi_incr { @@ -3376,10 +2811,6 @@ union cvmx_mio_ptp_pps_hi_incr { uint64_t nanosec:32; #endif } s; - struct cvmx_mio_ptp_pps_hi_incr_s cn61xx; - struct cvmx_mio_ptp_pps_hi_incr_s cn66xx; - struct cvmx_mio_ptp_pps_hi_incr_s cn68xx; - struct cvmx_mio_ptp_pps_hi_incr_s cnf71xx; }; union cvmx_mio_ptp_pps_lo_incr { @@ -3393,10 +2824,6 @@ union cvmx_mio_ptp_pps_lo_incr { uint64_t nanosec:32; #endif } s; - struct cvmx_mio_ptp_pps_lo_incr_s cn61xx; - struct cvmx_mio_ptp_pps_lo_incr_s cn66xx; - struct cvmx_mio_ptp_pps_lo_incr_s cn68xx; - struct cvmx_mio_ptp_pps_lo_incr_s cnf71xx; }; union cvmx_mio_ptp_pps_thresh_hi { @@ -3408,10 +2835,6 @@ union cvmx_mio_ptp_pps_thresh_hi { uint64_t nanosec:64; #endif } s; - struct cvmx_mio_ptp_pps_thresh_hi_s cn61xx; - struct cvmx_mio_ptp_pps_thresh_hi_s cn66xx; - struct cvmx_mio_ptp_pps_thresh_hi_s cn68xx; - struct cvmx_mio_ptp_pps_thresh_hi_s cnf71xx; }; union cvmx_mio_ptp_pps_thresh_lo { @@ -3425,10 +2848,6 @@ union cvmx_mio_ptp_pps_thresh_lo { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_mio_ptp_pps_thresh_lo_s cn61xx; - struct cvmx_mio_ptp_pps_thresh_lo_s cn66xx; - struct cvmx_mio_ptp_pps_thresh_lo_s cn68xx; - struct cvmx_mio_ptp_pps_thresh_lo_s cnf71xx; }; union cvmx_mio_ptp_timestamp { @@ -3440,13 +2859,6 @@ union cvmx_mio_ptp_timestamp { uint64_t nanosec:64; #endif } s; - struct cvmx_mio_ptp_timestamp_s cn61xx; - struct cvmx_mio_ptp_timestamp_s cn63xx; - struct cvmx_mio_ptp_timestamp_s cn63xxp1; - struct cvmx_mio_ptp_timestamp_s cn66xx; - struct cvmx_mio_ptp_timestamp_s cn68xx; - struct cvmx_mio_ptp_timestamp_s cn68xxp1; - struct cvmx_mio_ptp_timestamp_s cnf71xx; }; union cvmx_mio_qlmx_cfg { @@ -3511,8 +2923,6 @@ union cvmx_mio_qlmx_cfg { uint64_t reserved_12_63:52; #endif } cn68xx; - struct cvmx_mio_qlmx_cfg_cn68xx cn68xxp1; - struct cvmx_mio_qlmx_cfg_cn61xx cnf71xx; }; union cvmx_mio_rst_boot { @@ -3622,7 +3032,6 @@ union cvmx_mio_rst_boot { uint64_t reserved_36_63:28; #endif } cn63xx; - struct cvmx_mio_rst_boot_cn63xx cn63xxp1; struct cvmx_mio_rst_boot_cn66xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t chipkill:1; @@ -3718,7 +3127,6 @@ union cvmx_mio_rst_boot { uint64_t reserved_44_63:20; #endif } cn68xxp1; - struct cvmx_mio_rst_boot_cn61xx cnf71xx; }; union cvmx_mio_rst_cfg { @@ -3751,7 +3159,6 @@ union cvmx_mio_rst_cfg { uint64_t bist_delay:58; #endif } cn61xx; - struct cvmx_mio_rst_cfg_cn61xx cn63xx; struct cvmx_mio_rst_cfg_cn63xxp1 { #ifdef __BIG_ENDIAN_BITFIELD uint64_t bist_delay:58; @@ -3765,7 +3172,6 @@ union cvmx_mio_rst_cfg { uint64_t bist_delay:58; #endif } cn63xxp1; - struct cvmx_mio_rst_cfg_cn61xx cn66xx; struct cvmx_mio_rst_cfg_cn68xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t bist_delay:56; @@ -3781,8 +3187,6 @@ union cvmx_mio_rst_cfg { uint64_t bist_delay:56; #endif } cn68xx; - struct cvmx_mio_rst_cfg_cn68xx cn68xxp1; - struct cvmx_mio_rst_cfg_cn61xx cnf71xx; }; union cvmx_mio_rst_ckill { @@ -3796,9 +3200,6 @@ union cvmx_mio_rst_ckill { uint64_t reserved_47_63:17; #endif } s; - struct cvmx_mio_rst_ckill_s cn61xx; - struct cvmx_mio_rst_ckill_s cn66xx; - struct cvmx_mio_rst_ckill_s cnf71xx; }; union cvmx_mio_rst_cntlx { @@ -3834,7 +3235,6 @@ union cvmx_mio_rst_cntlx { uint64_t reserved_13_63:51; #endif } s; - struct cvmx_mio_rst_cntlx_s cn61xx; struct cvmx_mio_rst_cntlx_cn66xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_10_63:54; @@ -3860,8 +3260,6 @@ union cvmx_mio_rst_cntlx { uint64_t reserved_10_63:54; #endif } cn66xx; - struct cvmx_mio_rst_cntlx_cn66xx cn68xx; - struct cvmx_mio_rst_cntlx_s cnf71xx; }; union cvmx_mio_rst_ctlx { @@ -3897,7 +3295,6 @@ union cvmx_mio_rst_ctlx { uint64_t reserved_13_63:51; #endif } s; - struct cvmx_mio_rst_ctlx_s cn61xx; struct cvmx_mio_rst_ctlx_cn63xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_10_63:54; @@ -3946,10 +3343,6 @@ union cvmx_mio_rst_ctlx { uint64_t reserved_9_63:55; #endif } cn63xxp1; - struct cvmx_mio_rst_ctlx_cn63xx cn66xx; - struct cvmx_mio_rst_ctlx_cn63xx cn68xx; - struct cvmx_mio_rst_ctlx_cn63xx cn68xxp1; - struct cvmx_mio_rst_ctlx_s cnf71xx; }; union cvmx_mio_rst_delay { @@ -3965,13 +3358,6 @@ union cvmx_mio_rst_delay { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_mio_rst_delay_s cn61xx; - struct cvmx_mio_rst_delay_s cn63xx; - struct cvmx_mio_rst_delay_s cn63xxp1; - struct cvmx_mio_rst_delay_s cn66xx; - struct cvmx_mio_rst_delay_s cn68xx; - struct cvmx_mio_rst_delay_s cn68xxp1; - struct cvmx_mio_rst_delay_s cnf71xx; }; union cvmx_mio_rst_int { @@ -4014,12 +3400,6 @@ union cvmx_mio_rst_int { uint64_t reserved_10_63:54; #endif } cn61xx; - struct cvmx_mio_rst_int_cn61xx cn63xx; - struct cvmx_mio_rst_int_cn61xx cn63xxp1; - struct cvmx_mio_rst_int_s cn66xx; - struct cvmx_mio_rst_int_cn61xx cn68xx; - struct cvmx_mio_rst_int_cn61xx cn68xxp1; - struct cvmx_mio_rst_int_cn61xx cnf71xx; }; union cvmx_mio_rst_int_en { @@ -4062,12 +3442,6 @@ union cvmx_mio_rst_int_en { uint64_t reserved_10_63:54; #endif } cn61xx; - struct cvmx_mio_rst_int_en_cn61xx cn63xx; - struct cvmx_mio_rst_int_en_cn61xx cn63xxp1; - struct cvmx_mio_rst_int_en_s cn66xx; - struct cvmx_mio_rst_int_en_cn61xx cn68xx; - struct cvmx_mio_rst_int_en_cn61xx cn68xxp1; - struct cvmx_mio_rst_int_en_cn61xx cnf71xx; }; union cvmx_mio_twsx_int { @@ -4103,9 +3477,6 @@ union cvmx_mio_twsx_int { uint64_t reserved_12_63:52; #endif } s; - struct cvmx_mio_twsx_int_s cn30xx; - struct cvmx_mio_twsx_int_s cn31xx; - struct cvmx_mio_twsx_int_s cn38xx; struct cvmx_mio_twsx_int_cn38xxp2 { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_7_63:57; @@ -4127,20 +3498,6 @@ union cvmx_mio_twsx_int { uint64_t reserved_7_63:57; #endif } cn38xxp2; - struct cvmx_mio_twsx_int_s cn50xx; - struct cvmx_mio_twsx_int_s cn52xx; - struct cvmx_mio_twsx_int_s cn52xxp1; - struct cvmx_mio_twsx_int_s cn56xx; - struct cvmx_mio_twsx_int_s cn56xxp1; - struct cvmx_mio_twsx_int_s cn58xx; - struct cvmx_mio_twsx_int_s cn58xxp1; - struct cvmx_mio_twsx_int_s cn61xx; - struct cvmx_mio_twsx_int_s cn63xx; - struct cvmx_mio_twsx_int_s cn63xxp1; - struct cvmx_mio_twsx_int_s cn66xx; - struct cvmx_mio_twsx_int_s cn68xx; - struct cvmx_mio_twsx_int_s cn68xxp1; - struct cvmx_mio_twsx_int_s cnf71xx; }; union cvmx_mio_twsx_sw_twsi { @@ -4174,24 +3531,6 @@ union cvmx_mio_twsx_sw_twsi { uint64_t v:1; #endif } s; - struct cvmx_mio_twsx_sw_twsi_s cn30xx; - struct cvmx_mio_twsx_sw_twsi_s cn31xx; - struct cvmx_mio_twsx_sw_twsi_s cn38xx; - struct cvmx_mio_twsx_sw_twsi_s cn38xxp2; - struct cvmx_mio_twsx_sw_twsi_s cn50xx; - struct cvmx_mio_twsx_sw_twsi_s cn52xx; - struct cvmx_mio_twsx_sw_twsi_s cn52xxp1; - struct cvmx_mio_twsx_sw_twsi_s cn56xx; - struct cvmx_mio_twsx_sw_twsi_s cn56xxp1; - struct cvmx_mio_twsx_sw_twsi_s cn58xx; - struct cvmx_mio_twsx_sw_twsi_s cn58xxp1; - struct cvmx_mio_twsx_sw_twsi_s cn61xx; - struct cvmx_mio_twsx_sw_twsi_s cn63xx; - struct cvmx_mio_twsx_sw_twsi_s cn63xxp1; - struct cvmx_mio_twsx_sw_twsi_s cn66xx; - struct cvmx_mio_twsx_sw_twsi_s cn68xx; - struct cvmx_mio_twsx_sw_twsi_s cn68xxp1; - struct cvmx_mio_twsx_sw_twsi_s cnf71xx; }; union cvmx_mio_twsx_sw_twsi_ext { @@ -4207,24 +3546,6 @@ union cvmx_mio_twsx_sw_twsi_ext { uint64_t reserved_40_63:24; #endif } s; - struct cvmx_mio_twsx_sw_twsi_ext_s cn30xx; - struct cvmx_mio_twsx_sw_twsi_ext_s cn31xx; - struct cvmx_mio_twsx_sw_twsi_ext_s cn38xx; - struct cvmx_mio_twsx_sw_twsi_ext_s cn38xxp2; - struct cvmx_mio_twsx_sw_twsi_ext_s cn50xx; - struct cvmx_mio_twsx_sw_twsi_ext_s cn52xx; - struct cvmx_mio_twsx_sw_twsi_ext_s cn52xxp1; - struct cvmx_mio_twsx_sw_twsi_ext_s cn56xx; - struct cvmx_mio_twsx_sw_twsi_ext_s cn56xxp1; - struct cvmx_mio_twsx_sw_twsi_ext_s cn58xx; - struct cvmx_mio_twsx_sw_twsi_ext_s cn58xxp1; - struct cvmx_mio_twsx_sw_twsi_ext_s cn61xx; - struct cvmx_mio_twsx_sw_twsi_ext_s cn63xx; - struct cvmx_mio_twsx_sw_twsi_ext_s cn63xxp1; - struct cvmx_mio_twsx_sw_twsi_ext_s cn66xx; - struct cvmx_mio_twsx_sw_twsi_ext_s cn68xx; - struct cvmx_mio_twsx_sw_twsi_ext_s cn68xxp1; - struct cvmx_mio_twsx_sw_twsi_ext_s cnf71xx; }; union cvmx_mio_twsx_twsi_sw { @@ -4240,24 +3561,6 @@ union cvmx_mio_twsx_twsi_sw { uint64_t v:2; #endif } s; - struct cvmx_mio_twsx_twsi_sw_s cn30xx; - struct cvmx_mio_twsx_twsi_sw_s cn31xx; - struct cvmx_mio_twsx_twsi_sw_s cn38xx; - struct cvmx_mio_twsx_twsi_sw_s cn38xxp2; - struct cvmx_mio_twsx_twsi_sw_s cn50xx; - struct cvmx_mio_twsx_twsi_sw_s cn52xx; - struct cvmx_mio_twsx_twsi_sw_s cn52xxp1; - struct cvmx_mio_twsx_twsi_sw_s cn56xx; - struct cvmx_mio_twsx_twsi_sw_s cn56xxp1; - struct cvmx_mio_twsx_twsi_sw_s cn58xx; - struct cvmx_mio_twsx_twsi_sw_s cn58xxp1; - struct cvmx_mio_twsx_twsi_sw_s cn61xx; - struct cvmx_mio_twsx_twsi_sw_s cn63xx; - struct cvmx_mio_twsx_twsi_sw_s cn63xxp1; - struct cvmx_mio_twsx_twsi_sw_s cn66xx; - struct cvmx_mio_twsx_twsi_sw_s cn68xx; - struct cvmx_mio_twsx_twsi_sw_s cn68xxp1; - struct cvmx_mio_twsx_twsi_sw_s cnf71xx; }; union cvmx_mio_uartx_dlh { @@ -4271,24 +3574,6 @@ union cvmx_mio_uartx_dlh { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_mio_uartx_dlh_s cn30xx; - struct cvmx_mio_uartx_dlh_s cn31xx; - struct cvmx_mio_uartx_dlh_s cn38xx; - struct cvmx_mio_uartx_dlh_s cn38xxp2; - struct cvmx_mio_uartx_dlh_s cn50xx; - struct cvmx_mio_uartx_dlh_s cn52xx; - struct cvmx_mio_uartx_dlh_s cn52xxp1; - struct cvmx_mio_uartx_dlh_s cn56xx; - struct cvmx_mio_uartx_dlh_s cn56xxp1; - struct cvmx_mio_uartx_dlh_s cn58xx; - struct cvmx_mio_uartx_dlh_s cn58xxp1; - struct cvmx_mio_uartx_dlh_s cn61xx; - struct cvmx_mio_uartx_dlh_s cn63xx; - struct cvmx_mio_uartx_dlh_s cn63xxp1; - struct cvmx_mio_uartx_dlh_s cn66xx; - struct cvmx_mio_uartx_dlh_s cn68xx; - struct cvmx_mio_uartx_dlh_s cn68xxp1; - struct cvmx_mio_uartx_dlh_s cnf71xx; }; union cvmx_mio_uartx_dll { @@ -4302,24 +3587,6 @@ union cvmx_mio_uartx_dll { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_mio_uartx_dll_s cn30xx; - struct cvmx_mio_uartx_dll_s cn31xx; - struct cvmx_mio_uartx_dll_s cn38xx; - struct cvmx_mio_uartx_dll_s cn38xxp2; - struct cvmx_mio_uartx_dll_s cn50xx; - struct cvmx_mio_uartx_dll_s cn52xx; - struct cvmx_mio_uartx_dll_s cn52xxp1; - struct cvmx_mio_uartx_dll_s cn56xx; - struct cvmx_mio_uartx_dll_s cn56xxp1; - struct cvmx_mio_uartx_dll_s cn58xx; - struct cvmx_mio_uartx_dll_s cn58xxp1; - struct cvmx_mio_uartx_dll_s cn61xx; - struct cvmx_mio_uartx_dll_s cn63xx; - struct cvmx_mio_uartx_dll_s cn63xxp1; - struct cvmx_mio_uartx_dll_s cn66xx; - struct cvmx_mio_uartx_dll_s cn68xx; - struct cvmx_mio_uartx_dll_s cn68xxp1; - struct cvmx_mio_uartx_dll_s cnf71xx; }; union cvmx_mio_uartx_far { @@ -4333,24 +3600,6 @@ union cvmx_mio_uartx_far { uint64_t reserved_1_63:63; #endif } s; - struct cvmx_mio_uartx_far_s cn30xx; - struct cvmx_mio_uartx_far_s cn31xx; - struct cvmx_mio_uartx_far_s cn38xx; - struct cvmx_mio_uartx_far_s cn38xxp2; - struct cvmx_mio_uartx_far_s cn50xx; - struct cvmx_mio_uartx_far_s cn52xx; - struct cvmx_mio_uartx_far_s cn52xxp1; - struct cvmx_mio_uartx_far_s cn56xx; - struct cvmx_mio_uartx_far_s cn56xxp1; - struct cvmx_mio_uartx_far_s cn58xx; - struct cvmx_mio_uartx_far_s cn58xxp1; - struct cvmx_mio_uartx_far_s cn61xx; - struct cvmx_mio_uartx_far_s cn63xx; - struct cvmx_mio_uartx_far_s cn63xxp1; - struct cvmx_mio_uartx_far_s cn66xx; - struct cvmx_mio_uartx_far_s cn68xx; - struct cvmx_mio_uartx_far_s cn68xxp1; - struct cvmx_mio_uartx_far_s cnf71xx; }; union cvmx_mio_uartx_fcr { @@ -4374,24 +3623,6 @@ union cvmx_mio_uartx_fcr { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_mio_uartx_fcr_s cn30xx; - struct cvmx_mio_uartx_fcr_s cn31xx; - struct cvmx_mio_uartx_fcr_s cn38xx; - struct cvmx_mio_uartx_fcr_s cn38xxp2; - struct cvmx_mio_uartx_fcr_s cn50xx; - struct cvmx_mio_uartx_fcr_s cn52xx; - struct cvmx_mio_uartx_fcr_s cn52xxp1; - struct cvmx_mio_uartx_fcr_s cn56xx; - struct cvmx_mio_uartx_fcr_s cn56xxp1; - struct cvmx_mio_uartx_fcr_s cn58xx; - struct cvmx_mio_uartx_fcr_s cn58xxp1; - struct cvmx_mio_uartx_fcr_s cn61xx; - struct cvmx_mio_uartx_fcr_s cn63xx; - struct cvmx_mio_uartx_fcr_s cn63xxp1; - struct cvmx_mio_uartx_fcr_s cn66xx; - struct cvmx_mio_uartx_fcr_s cn68xx; - struct cvmx_mio_uartx_fcr_s cn68xxp1; - struct cvmx_mio_uartx_fcr_s cnf71xx; }; union cvmx_mio_uartx_htx { @@ -4405,24 +3636,6 @@ union cvmx_mio_uartx_htx { uint64_t reserved_1_63:63; #endif } s; - struct cvmx_mio_uartx_htx_s cn30xx; - struct cvmx_mio_uartx_htx_s cn31xx; - struct cvmx_mio_uartx_htx_s cn38xx; - struct cvmx_mio_uartx_htx_s cn38xxp2; - struct cvmx_mio_uartx_htx_s cn50xx; - struct cvmx_mio_uartx_htx_s cn52xx; - struct cvmx_mio_uartx_htx_s cn52xxp1; - struct cvmx_mio_uartx_htx_s cn56xx; - struct cvmx_mio_uartx_htx_s cn56xxp1; - struct cvmx_mio_uartx_htx_s cn58xx; - struct cvmx_mio_uartx_htx_s cn58xxp1; - struct cvmx_mio_uartx_htx_s cn61xx; - struct cvmx_mio_uartx_htx_s cn63xx; - struct cvmx_mio_uartx_htx_s cn63xxp1; - struct cvmx_mio_uartx_htx_s cn66xx; - struct cvmx_mio_uartx_htx_s cn68xx; - struct cvmx_mio_uartx_htx_s cn68xxp1; - struct cvmx_mio_uartx_htx_s cnf71xx; }; union cvmx_mio_uartx_ier { @@ -4446,24 +3659,6 @@ union cvmx_mio_uartx_ier { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_mio_uartx_ier_s cn30xx; - struct cvmx_mio_uartx_ier_s cn31xx; - struct cvmx_mio_uartx_ier_s cn38xx; - struct cvmx_mio_uartx_ier_s cn38xxp2; - struct cvmx_mio_uartx_ier_s cn50xx; - struct cvmx_mio_uartx_ier_s cn52xx; - struct cvmx_mio_uartx_ier_s cn52xxp1; - struct cvmx_mio_uartx_ier_s cn56xx; - struct cvmx_mio_uartx_ier_s cn56xxp1; - struct cvmx_mio_uartx_ier_s cn58xx; - struct cvmx_mio_uartx_ier_s cn58xxp1; - struct cvmx_mio_uartx_ier_s cn61xx; - struct cvmx_mio_uartx_ier_s cn63xx; - struct cvmx_mio_uartx_ier_s cn63xxp1; - struct cvmx_mio_uartx_ier_s cn66xx; - struct cvmx_mio_uartx_ier_s cn68xx; - struct cvmx_mio_uartx_ier_s cn68xxp1; - struct cvmx_mio_uartx_ier_s cnf71xx; }; union cvmx_mio_uartx_iir { @@ -4481,24 +3676,6 @@ union cvmx_mio_uartx_iir { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_mio_uartx_iir_s cn30xx; - struct cvmx_mio_uartx_iir_s cn31xx; - struct cvmx_mio_uartx_iir_s cn38xx; - struct cvmx_mio_uartx_iir_s cn38xxp2; - struct cvmx_mio_uartx_iir_s cn50xx; - struct cvmx_mio_uartx_iir_s cn52xx; - struct cvmx_mio_uartx_iir_s cn52xxp1; - struct cvmx_mio_uartx_iir_s cn56xx; - struct cvmx_mio_uartx_iir_s cn56xxp1; - struct cvmx_mio_uartx_iir_s cn58xx; - struct cvmx_mio_uartx_iir_s cn58xxp1; - struct cvmx_mio_uartx_iir_s cn61xx; - struct cvmx_mio_uartx_iir_s cn63xx; - struct cvmx_mio_uartx_iir_s cn63xxp1; - struct cvmx_mio_uartx_iir_s cn66xx; - struct cvmx_mio_uartx_iir_s cn68xx; - struct cvmx_mio_uartx_iir_s cn68xxp1; - struct cvmx_mio_uartx_iir_s cnf71xx; }; union cvmx_mio_uartx_lcr { @@ -4524,24 +3701,6 @@ union cvmx_mio_uartx_lcr { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_mio_uartx_lcr_s cn30xx; - struct cvmx_mio_uartx_lcr_s cn31xx; - struct cvmx_mio_uartx_lcr_s cn38xx; - struct cvmx_mio_uartx_lcr_s cn38xxp2; - struct cvmx_mio_uartx_lcr_s cn50xx; - struct cvmx_mio_uartx_lcr_s cn52xx; - struct cvmx_mio_uartx_lcr_s cn52xxp1; - struct cvmx_mio_uartx_lcr_s cn56xx; - struct cvmx_mio_uartx_lcr_s cn56xxp1; - struct cvmx_mio_uartx_lcr_s cn58xx; - struct cvmx_mio_uartx_lcr_s cn58xxp1; - struct cvmx_mio_uartx_lcr_s cn61xx; - struct cvmx_mio_uartx_lcr_s cn63xx; - struct cvmx_mio_uartx_lcr_s cn63xxp1; - struct cvmx_mio_uartx_lcr_s cn66xx; - struct cvmx_mio_uartx_lcr_s cn68xx; - struct cvmx_mio_uartx_lcr_s cn68xxp1; - struct cvmx_mio_uartx_lcr_s cnf71xx; }; union cvmx_mio_uartx_lsr { @@ -4569,24 +3728,6 @@ union cvmx_mio_uartx_lsr { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_mio_uartx_lsr_s cn30xx; - struct cvmx_mio_uartx_lsr_s cn31xx; - struct cvmx_mio_uartx_lsr_s cn38xx; - struct cvmx_mio_uartx_lsr_s cn38xxp2; - struct cvmx_mio_uartx_lsr_s cn50xx; - struct cvmx_mio_uartx_lsr_s cn52xx; - struct cvmx_mio_uartx_lsr_s cn52xxp1; - struct cvmx_mio_uartx_lsr_s cn56xx; - struct cvmx_mio_uartx_lsr_s cn56xxp1; - struct cvmx_mio_uartx_lsr_s cn58xx; - struct cvmx_mio_uartx_lsr_s cn58xxp1; - struct cvmx_mio_uartx_lsr_s cn61xx; - struct cvmx_mio_uartx_lsr_s cn63xx; - struct cvmx_mio_uartx_lsr_s cn63xxp1; - struct cvmx_mio_uartx_lsr_s cn66xx; - struct cvmx_mio_uartx_lsr_s cn68xx; - struct cvmx_mio_uartx_lsr_s cn68xxp1; - struct cvmx_mio_uartx_lsr_s cnf71xx; }; union cvmx_mio_uartx_mcr { @@ -4610,24 +3751,6 @@ union cvmx_mio_uartx_mcr { uint64_t reserved_6_63:58; #endif } s; - struct cvmx_mio_uartx_mcr_s cn30xx; - struct cvmx_mio_uartx_mcr_s cn31xx; - struct cvmx_mio_uartx_mcr_s cn38xx; - struct cvmx_mio_uartx_mcr_s cn38xxp2; - struct cvmx_mio_uartx_mcr_s cn50xx; - struct cvmx_mio_uartx_mcr_s cn52xx; - struct cvmx_mio_uartx_mcr_s cn52xxp1; - struct cvmx_mio_uartx_mcr_s cn56xx; - struct cvmx_mio_uartx_mcr_s cn56xxp1; - struct cvmx_mio_uartx_mcr_s cn58xx; - struct cvmx_mio_uartx_mcr_s cn58xxp1; - struct cvmx_mio_uartx_mcr_s cn61xx; - struct cvmx_mio_uartx_mcr_s cn63xx; - struct cvmx_mio_uartx_mcr_s cn63xxp1; - struct cvmx_mio_uartx_mcr_s cn66xx; - struct cvmx_mio_uartx_mcr_s cn68xx; - struct cvmx_mio_uartx_mcr_s cn68xxp1; - struct cvmx_mio_uartx_mcr_s cnf71xx; }; union cvmx_mio_uartx_msr { @@ -4655,24 +3778,6 @@ union cvmx_mio_uartx_msr { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_mio_uartx_msr_s cn30xx; - struct cvmx_mio_uartx_msr_s cn31xx; - struct cvmx_mio_uartx_msr_s cn38xx; - struct cvmx_mio_uartx_msr_s cn38xxp2; - struct cvmx_mio_uartx_msr_s cn50xx; - struct cvmx_mio_uartx_msr_s cn52xx; - struct cvmx_mio_uartx_msr_s cn52xxp1; - struct cvmx_mio_uartx_msr_s cn56xx; - struct cvmx_mio_uartx_msr_s cn56xxp1; - struct cvmx_mio_uartx_msr_s cn58xx; - struct cvmx_mio_uartx_msr_s cn58xxp1; - struct cvmx_mio_uartx_msr_s cn61xx; - struct cvmx_mio_uartx_msr_s cn63xx; - struct cvmx_mio_uartx_msr_s cn63xxp1; - struct cvmx_mio_uartx_msr_s cn66xx; - struct cvmx_mio_uartx_msr_s cn68xx; - struct cvmx_mio_uartx_msr_s cn68xxp1; - struct cvmx_mio_uartx_msr_s cnf71xx; }; union cvmx_mio_uartx_rbr { @@ -4686,24 +3791,6 @@ union cvmx_mio_uartx_rbr { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_mio_uartx_rbr_s cn30xx; - struct cvmx_mio_uartx_rbr_s cn31xx; - struct cvmx_mio_uartx_rbr_s cn38xx; - struct cvmx_mio_uartx_rbr_s cn38xxp2; - struct cvmx_mio_uartx_rbr_s cn50xx; - struct cvmx_mio_uartx_rbr_s cn52xx; - struct cvmx_mio_uartx_rbr_s cn52xxp1; - struct cvmx_mio_uartx_rbr_s cn56xx; - struct cvmx_mio_uartx_rbr_s cn56xxp1; - struct cvmx_mio_uartx_rbr_s cn58xx; - struct cvmx_mio_uartx_rbr_s cn58xxp1; - struct cvmx_mio_uartx_rbr_s cn61xx; - struct cvmx_mio_uartx_rbr_s cn63xx; - struct cvmx_mio_uartx_rbr_s cn63xxp1; - struct cvmx_mio_uartx_rbr_s cn66xx; - struct cvmx_mio_uartx_rbr_s cn68xx; - struct cvmx_mio_uartx_rbr_s cn68xxp1; - struct cvmx_mio_uartx_rbr_s cnf71xx; }; union cvmx_mio_uartx_rfl { @@ -4717,24 +3804,6 @@ union cvmx_mio_uartx_rfl { uint64_t reserved_7_63:57; #endif } s; - struct cvmx_mio_uartx_rfl_s cn30xx; - struct cvmx_mio_uartx_rfl_s cn31xx; - struct cvmx_mio_uartx_rfl_s cn38xx; - struct cvmx_mio_uartx_rfl_s cn38xxp2; - struct cvmx_mio_uartx_rfl_s cn50xx; - struct cvmx_mio_uartx_rfl_s cn52xx; - struct cvmx_mio_uartx_rfl_s cn52xxp1; - struct cvmx_mio_uartx_rfl_s cn56xx; - struct cvmx_mio_uartx_rfl_s cn56xxp1; - struct cvmx_mio_uartx_rfl_s cn58xx; - struct cvmx_mio_uartx_rfl_s cn58xxp1; - struct cvmx_mio_uartx_rfl_s cn61xx; - struct cvmx_mio_uartx_rfl_s cn63xx; - struct cvmx_mio_uartx_rfl_s cn63xxp1; - struct cvmx_mio_uartx_rfl_s cn66xx; - struct cvmx_mio_uartx_rfl_s cn68xx; - struct cvmx_mio_uartx_rfl_s cn68xxp1; - struct cvmx_mio_uartx_rfl_s cnf71xx; }; union cvmx_mio_uartx_rfw { @@ -4752,24 +3821,6 @@ union cvmx_mio_uartx_rfw { uint64_t reserved_10_63:54; #endif } s; - struct cvmx_mio_uartx_rfw_s cn30xx; - struct cvmx_mio_uartx_rfw_s cn31xx; - struct cvmx_mio_uartx_rfw_s cn38xx; - struct cvmx_mio_uartx_rfw_s cn38xxp2; - struct cvmx_mio_uartx_rfw_s cn50xx; - struct cvmx_mio_uartx_rfw_s cn52xx; - struct cvmx_mio_uartx_rfw_s cn52xxp1; - struct cvmx_mio_uartx_rfw_s cn56xx; - struct cvmx_mio_uartx_rfw_s cn56xxp1; - struct cvmx_mio_uartx_rfw_s cn58xx; - struct cvmx_mio_uartx_rfw_s cn58xxp1; - struct cvmx_mio_uartx_rfw_s cn61xx; - struct cvmx_mio_uartx_rfw_s cn63xx; - struct cvmx_mio_uartx_rfw_s cn63xxp1; - struct cvmx_mio_uartx_rfw_s cn66xx; - struct cvmx_mio_uartx_rfw_s cn68xx; - struct cvmx_mio_uartx_rfw_s cn68xxp1; - struct cvmx_mio_uartx_rfw_s cnf71xx; }; union cvmx_mio_uartx_sbcr { @@ -4783,24 +3834,6 @@ union cvmx_mio_uartx_sbcr { uint64_t reserved_1_63:63; #endif } s; - struct cvmx_mio_uartx_sbcr_s cn30xx; - struct cvmx_mio_uartx_sbcr_s cn31xx; - struct cvmx_mio_uartx_sbcr_s cn38xx; - struct cvmx_mio_uartx_sbcr_s cn38xxp2; - struct cvmx_mio_uartx_sbcr_s cn50xx; - struct cvmx_mio_uartx_sbcr_s cn52xx; - struct cvmx_mio_uartx_sbcr_s cn52xxp1; - struct cvmx_mio_uartx_sbcr_s cn56xx; - struct cvmx_mio_uartx_sbcr_s cn56xxp1; - struct cvmx_mio_uartx_sbcr_s cn58xx; - struct cvmx_mio_uartx_sbcr_s cn58xxp1; - struct cvmx_mio_uartx_sbcr_s cn61xx; - struct cvmx_mio_uartx_sbcr_s cn63xx; - struct cvmx_mio_uartx_sbcr_s cn63xxp1; - struct cvmx_mio_uartx_sbcr_s cn66xx; - struct cvmx_mio_uartx_sbcr_s cn68xx; - struct cvmx_mio_uartx_sbcr_s cn68xxp1; - struct cvmx_mio_uartx_sbcr_s cnf71xx; }; union cvmx_mio_uartx_scr { @@ -4814,24 +3847,6 @@ union cvmx_mio_uartx_scr { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_mio_uartx_scr_s cn30xx; - struct cvmx_mio_uartx_scr_s cn31xx; - struct cvmx_mio_uartx_scr_s cn38xx; - struct cvmx_mio_uartx_scr_s cn38xxp2; - struct cvmx_mio_uartx_scr_s cn50xx; - struct cvmx_mio_uartx_scr_s cn52xx; - struct cvmx_mio_uartx_scr_s cn52xxp1; - struct cvmx_mio_uartx_scr_s cn56xx; - struct cvmx_mio_uartx_scr_s cn56xxp1; - struct cvmx_mio_uartx_scr_s cn58xx; - struct cvmx_mio_uartx_scr_s cn58xxp1; - struct cvmx_mio_uartx_scr_s cn61xx; - struct cvmx_mio_uartx_scr_s cn63xx; - struct cvmx_mio_uartx_scr_s cn63xxp1; - struct cvmx_mio_uartx_scr_s cn66xx; - struct cvmx_mio_uartx_scr_s cn68xx; - struct cvmx_mio_uartx_scr_s cn68xxp1; - struct cvmx_mio_uartx_scr_s cnf71xx; }; union cvmx_mio_uartx_sfe { @@ -4845,24 +3860,6 @@ union cvmx_mio_uartx_sfe { uint64_t reserved_1_63:63; #endif } s; - struct cvmx_mio_uartx_sfe_s cn30xx; - struct cvmx_mio_uartx_sfe_s cn31xx; - struct cvmx_mio_uartx_sfe_s cn38xx; - struct cvmx_mio_uartx_sfe_s cn38xxp2; - struct cvmx_mio_uartx_sfe_s cn50xx; - struct cvmx_mio_uartx_sfe_s cn52xx; - struct cvmx_mio_uartx_sfe_s cn52xxp1; - struct cvmx_mio_uartx_sfe_s cn56xx; - struct cvmx_mio_uartx_sfe_s cn56xxp1; - struct cvmx_mio_uartx_sfe_s cn58xx; - struct cvmx_mio_uartx_sfe_s cn58xxp1; - struct cvmx_mio_uartx_sfe_s cn61xx; - struct cvmx_mio_uartx_sfe_s cn63xx; - struct cvmx_mio_uartx_sfe_s cn63xxp1; - struct cvmx_mio_uartx_sfe_s cn66xx; - struct cvmx_mio_uartx_sfe_s cn68xx; - struct cvmx_mio_uartx_sfe_s cn68xxp1; - struct cvmx_mio_uartx_sfe_s cnf71xx; }; union cvmx_mio_uartx_srr { @@ -4880,24 +3877,6 @@ union cvmx_mio_uartx_srr { uint64_t reserved_3_63:61; #endif } s; - struct cvmx_mio_uartx_srr_s cn30xx; - struct cvmx_mio_uartx_srr_s cn31xx; - struct cvmx_mio_uartx_srr_s cn38xx; - struct cvmx_mio_uartx_srr_s cn38xxp2; - struct cvmx_mio_uartx_srr_s cn50xx; - struct cvmx_mio_uartx_srr_s cn52xx; - struct cvmx_mio_uartx_srr_s cn52xxp1; - struct cvmx_mio_uartx_srr_s cn56xx; - struct cvmx_mio_uartx_srr_s cn56xxp1; - struct cvmx_mio_uartx_srr_s cn58xx; - struct cvmx_mio_uartx_srr_s cn58xxp1; - struct cvmx_mio_uartx_srr_s cn61xx; - struct cvmx_mio_uartx_srr_s cn63xx; - struct cvmx_mio_uartx_srr_s cn63xxp1; - struct cvmx_mio_uartx_srr_s cn66xx; - struct cvmx_mio_uartx_srr_s cn68xx; - struct cvmx_mio_uartx_srr_s cn68xxp1; - struct cvmx_mio_uartx_srr_s cnf71xx; }; union cvmx_mio_uartx_srt { @@ -4911,24 +3890,6 @@ union cvmx_mio_uartx_srt { uint64_t reserved_2_63:62; #endif } s; - struct cvmx_mio_uartx_srt_s cn30xx; - struct cvmx_mio_uartx_srt_s cn31xx; - struct cvmx_mio_uartx_srt_s cn38xx; - struct cvmx_mio_uartx_srt_s cn38xxp2; - struct cvmx_mio_uartx_srt_s cn50xx; - struct cvmx_mio_uartx_srt_s cn52xx; - struct cvmx_mio_uartx_srt_s cn52xxp1; - struct cvmx_mio_uartx_srt_s cn56xx; - struct cvmx_mio_uartx_srt_s cn56xxp1; - struct cvmx_mio_uartx_srt_s cn58xx; - struct cvmx_mio_uartx_srt_s cn58xxp1; - struct cvmx_mio_uartx_srt_s cn61xx; - struct cvmx_mio_uartx_srt_s cn63xx; - struct cvmx_mio_uartx_srt_s cn63xxp1; - struct cvmx_mio_uartx_srt_s cn66xx; - struct cvmx_mio_uartx_srt_s cn68xx; - struct cvmx_mio_uartx_srt_s cn68xxp1; - struct cvmx_mio_uartx_srt_s cnf71xx; }; union cvmx_mio_uartx_srts { @@ -4942,24 +3903,6 @@ union cvmx_mio_uartx_srts { uint64_t reserved_1_63:63; #endif } s; - struct cvmx_mio_uartx_srts_s cn30xx; - struct cvmx_mio_uartx_srts_s cn31xx; - struct cvmx_mio_uartx_srts_s cn38xx; - struct cvmx_mio_uartx_srts_s cn38xxp2; - struct cvmx_mio_uartx_srts_s cn50xx; - struct cvmx_mio_uartx_srts_s cn52xx; - struct cvmx_mio_uartx_srts_s cn52xxp1; - struct cvmx_mio_uartx_srts_s cn56xx; - struct cvmx_mio_uartx_srts_s cn56xxp1; - struct cvmx_mio_uartx_srts_s cn58xx; - struct cvmx_mio_uartx_srts_s cn58xxp1; - struct cvmx_mio_uartx_srts_s cn61xx; - struct cvmx_mio_uartx_srts_s cn63xx; - struct cvmx_mio_uartx_srts_s cn63xxp1; - struct cvmx_mio_uartx_srts_s cn66xx; - struct cvmx_mio_uartx_srts_s cn68xx; - struct cvmx_mio_uartx_srts_s cn68xxp1; - struct cvmx_mio_uartx_srts_s cnf71xx; }; union cvmx_mio_uartx_stt { @@ -4973,24 +3916,6 @@ union cvmx_mio_uartx_stt { uint64_t reserved_2_63:62; #endif } s; - struct cvmx_mio_uartx_stt_s cn30xx; - struct cvmx_mio_uartx_stt_s cn31xx; - struct cvmx_mio_uartx_stt_s cn38xx; - struct cvmx_mio_uartx_stt_s cn38xxp2; - struct cvmx_mio_uartx_stt_s cn50xx; - struct cvmx_mio_uartx_stt_s cn52xx; - struct cvmx_mio_uartx_stt_s cn52xxp1; - struct cvmx_mio_uartx_stt_s cn56xx; - struct cvmx_mio_uartx_stt_s cn56xxp1; - struct cvmx_mio_uartx_stt_s cn58xx; - struct cvmx_mio_uartx_stt_s cn58xxp1; - struct cvmx_mio_uartx_stt_s cn61xx; - struct cvmx_mio_uartx_stt_s cn63xx; - struct cvmx_mio_uartx_stt_s cn63xxp1; - struct cvmx_mio_uartx_stt_s cn66xx; - struct cvmx_mio_uartx_stt_s cn68xx; - struct cvmx_mio_uartx_stt_s cn68xxp1; - struct cvmx_mio_uartx_stt_s cnf71xx; }; union cvmx_mio_uartx_tfl { @@ -5004,24 +3929,6 @@ union cvmx_mio_uartx_tfl { uint64_t reserved_7_63:57; #endif } s; - struct cvmx_mio_uartx_tfl_s cn30xx; - struct cvmx_mio_uartx_tfl_s cn31xx; - struct cvmx_mio_uartx_tfl_s cn38xx; - struct cvmx_mio_uartx_tfl_s cn38xxp2; - struct cvmx_mio_uartx_tfl_s cn50xx; - struct cvmx_mio_uartx_tfl_s cn52xx; - struct cvmx_mio_uartx_tfl_s cn52xxp1; - struct cvmx_mio_uartx_tfl_s cn56xx; - struct cvmx_mio_uartx_tfl_s cn56xxp1; - struct cvmx_mio_uartx_tfl_s cn58xx; - struct cvmx_mio_uartx_tfl_s cn58xxp1; - struct cvmx_mio_uartx_tfl_s cn61xx; - struct cvmx_mio_uartx_tfl_s cn63xx; - struct cvmx_mio_uartx_tfl_s cn63xxp1; - struct cvmx_mio_uartx_tfl_s cn66xx; - struct cvmx_mio_uartx_tfl_s cn68xx; - struct cvmx_mio_uartx_tfl_s cn68xxp1; - struct cvmx_mio_uartx_tfl_s cnf71xx; }; union cvmx_mio_uartx_tfr { @@ -5035,24 +3942,6 @@ union cvmx_mio_uartx_tfr { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_mio_uartx_tfr_s cn30xx; - struct cvmx_mio_uartx_tfr_s cn31xx; - struct cvmx_mio_uartx_tfr_s cn38xx; - struct cvmx_mio_uartx_tfr_s cn38xxp2; - struct cvmx_mio_uartx_tfr_s cn50xx; - struct cvmx_mio_uartx_tfr_s cn52xx; - struct cvmx_mio_uartx_tfr_s cn52xxp1; - struct cvmx_mio_uartx_tfr_s cn56xx; - struct cvmx_mio_uartx_tfr_s cn56xxp1; - struct cvmx_mio_uartx_tfr_s cn58xx; - struct cvmx_mio_uartx_tfr_s cn58xxp1; - struct cvmx_mio_uartx_tfr_s cn61xx; - struct cvmx_mio_uartx_tfr_s cn63xx; - struct cvmx_mio_uartx_tfr_s cn63xxp1; - struct cvmx_mio_uartx_tfr_s cn66xx; - struct cvmx_mio_uartx_tfr_s cn68xx; - struct cvmx_mio_uartx_tfr_s cn68xxp1; - struct cvmx_mio_uartx_tfr_s cnf71xx; }; union cvmx_mio_uartx_thr { @@ -5066,24 +3955,6 @@ union cvmx_mio_uartx_thr { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_mio_uartx_thr_s cn30xx; - struct cvmx_mio_uartx_thr_s cn31xx; - struct cvmx_mio_uartx_thr_s cn38xx; - struct cvmx_mio_uartx_thr_s cn38xxp2; - struct cvmx_mio_uartx_thr_s cn50xx; - struct cvmx_mio_uartx_thr_s cn52xx; - struct cvmx_mio_uartx_thr_s cn52xxp1; - struct cvmx_mio_uartx_thr_s cn56xx; - struct cvmx_mio_uartx_thr_s cn56xxp1; - struct cvmx_mio_uartx_thr_s cn58xx; - struct cvmx_mio_uartx_thr_s cn58xxp1; - struct cvmx_mio_uartx_thr_s cn61xx; - struct cvmx_mio_uartx_thr_s cn63xx; - struct cvmx_mio_uartx_thr_s cn63xxp1; - struct cvmx_mio_uartx_thr_s cn66xx; - struct cvmx_mio_uartx_thr_s cn68xx; - struct cvmx_mio_uartx_thr_s cn68xxp1; - struct cvmx_mio_uartx_thr_s cnf71xx; }; union cvmx_mio_uartx_usr { @@ -5105,24 +3976,6 @@ union cvmx_mio_uartx_usr { uint64_t reserved_5_63:59; #endif } s; - struct cvmx_mio_uartx_usr_s cn30xx; - struct cvmx_mio_uartx_usr_s cn31xx; - struct cvmx_mio_uartx_usr_s cn38xx; - struct cvmx_mio_uartx_usr_s cn38xxp2; - struct cvmx_mio_uartx_usr_s cn50xx; - struct cvmx_mio_uartx_usr_s cn52xx; - struct cvmx_mio_uartx_usr_s cn52xxp1; - struct cvmx_mio_uartx_usr_s cn56xx; - struct cvmx_mio_uartx_usr_s cn56xxp1; - struct cvmx_mio_uartx_usr_s cn58xx; - struct cvmx_mio_uartx_usr_s cn58xxp1; - struct cvmx_mio_uartx_usr_s cn61xx; - struct cvmx_mio_uartx_usr_s cn63xx; - struct cvmx_mio_uartx_usr_s cn63xxp1; - struct cvmx_mio_uartx_usr_s cn66xx; - struct cvmx_mio_uartx_usr_s cn68xx; - struct cvmx_mio_uartx_usr_s cn68xxp1; - struct cvmx_mio_uartx_usr_s cnf71xx; }; union cvmx_mio_uart2_dlh { @@ -5136,8 +3989,6 @@ union cvmx_mio_uart2_dlh { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_mio_uart2_dlh_s cn52xx; - struct cvmx_mio_uart2_dlh_s cn52xxp1; }; union cvmx_mio_uart2_dll { @@ -5151,8 +4002,6 @@ union cvmx_mio_uart2_dll { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_mio_uart2_dll_s cn52xx; - struct cvmx_mio_uart2_dll_s cn52xxp1; }; union cvmx_mio_uart2_far { @@ -5166,8 +4015,6 @@ union cvmx_mio_uart2_far { uint64_t reserved_1_63:63; #endif } s; - struct cvmx_mio_uart2_far_s cn52xx; - struct cvmx_mio_uart2_far_s cn52xxp1; }; union cvmx_mio_uart2_fcr { @@ -5191,8 +4038,6 @@ union cvmx_mio_uart2_fcr { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_mio_uart2_fcr_s cn52xx; - struct cvmx_mio_uart2_fcr_s cn52xxp1; }; union cvmx_mio_uart2_htx { @@ -5206,8 +4051,6 @@ union cvmx_mio_uart2_htx { uint64_t reserved_1_63:63; #endif } s; - struct cvmx_mio_uart2_htx_s cn52xx; - struct cvmx_mio_uart2_htx_s cn52xxp1; }; union cvmx_mio_uart2_ier { @@ -5231,8 +4074,6 @@ union cvmx_mio_uart2_ier { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_mio_uart2_ier_s cn52xx; - struct cvmx_mio_uart2_ier_s cn52xxp1; }; union cvmx_mio_uart2_iir { @@ -5250,8 +4091,6 @@ union cvmx_mio_uart2_iir { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_mio_uart2_iir_s cn52xx; - struct cvmx_mio_uart2_iir_s cn52xxp1; }; union cvmx_mio_uart2_lcr { @@ -5277,8 +4116,6 @@ union cvmx_mio_uart2_lcr { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_mio_uart2_lcr_s cn52xx; - struct cvmx_mio_uart2_lcr_s cn52xxp1; }; union cvmx_mio_uart2_lsr { @@ -5306,8 +4143,6 @@ union cvmx_mio_uart2_lsr { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_mio_uart2_lsr_s cn52xx; - struct cvmx_mio_uart2_lsr_s cn52xxp1; }; union cvmx_mio_uart2_mcr { @@ -5331,8 +4166,6 @@ union cvmx_mio_uart2_mcr { uint64_t reserved_6_63:58; #endif } s; - struct cvmx_mio_uart2_mcr_s cn52xx; - struct cvmx_mio_uart2_mcr_s cn52xxp1; }; union cvmx_mio_uart2_msr { @@ -5360,8 +4193,6 @@ union cvmx_mio_uart2_msr { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_mio_uart2_msr_s cn52xx; - struct cvmx_mio_uart2_msr_s cn52xxp1; }; union cvmx_mio_uart2_rbr { @@ -5375,8 +4206,6 @@ union cvmx_mio_uart2_rbr { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_mio_uart2_rbr_s cn52xx; - struct cvmx_mio_uart2_rbr_s cn52xxp1; }; union cvmx_mio_uart2_rfl { @@ -5390,8 +4219,6 @@ union cvmx_mio_uart2_rfl { uint64_t reserved_7_63:57; #endif } s; - struct cvmx_mio_uart2_rfl_s cn52xx; - struct cvmx_mio_uart2_rfl_s cn52xxp1; }; union cvmx_mio_uart2_rfw { @@ -5409,8 +4236,6 @@ union cvmx_mio_uart2_rfw { uint64_t reserved_10_63:54; #endif } s; - struct cvmx_mio_uart2_rfw_s cn52xx; - struct cvmx_mio_uart2_rfw_s cn52xxp1; }; union cvmx_mio_uart2_sbcr { @@ -5424,8 +4249,6 @@ union cvmx_mio_uart2_sbcr { uint64_t reserved_1_63:63; #endif } s; - struct cvmx_mio_uart2_sbcr_s cn52xx; - struct cvmx_mio_uart2_sbcr_s cn52xxp1; }; union cvmx_mio_uart2_scr { @@ -5439,8 +4262,6 @@ union cvmx_mio_uart2_scr { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_mio_uart2_scr_s cn52xx; - struct cvmx_mio_uart2_scr_s cn52xxp1; }; union cvmx_mio_uart2_sfe { @@ -5454,8 +4275,6 @@ union cvmx_mio_uart2_sfe { uint64_t reserved_1_63:63; #endif } s; - struct cvmx_mio_uart2_sfe_s cn52xx; - struct cvmx_mio_uart2_sfe_s cn52xxp1; }; union cvmx_mio_uart2_srr { @@ -5473,8 +4292,6 @@ union cvmx_mio_uart2_srr { uint64_t reserved_3_63:61; #endif } s; - struct cvmx_mio_uart2_srr_s cn52xx; - struct cvmx_mio_uart2_srr_s cn52xxp1; }; union cvmx_mio_uart2_srt { @@ -5488,8 +4305,6 @@ union cvmx_mio_uart2_srt { uint64_t reserved_2_63:62; #endif } s; - struct cvmx_mio_uart2_srt_s cn52xx; - struct cvmx_mio_uart2_srt_s cn52xxp1; }; union cvmx_mio_uart2_srts { @@ -5503,8 +4318,6 @@ union cvmx_mio_uart2_srts { uint64_t reserved_1_63:63; #endif } s; - struct cvmx_mio_uart2_srts_s cn52xx; - struct cvmx_mio_uart2_srts_s cn52xxp1; }; union cvmx_mio_uart2_stt { @@ -5518,8 +4331,6 @@ union cvmx_mio_uart2_stt { uint64_t reserved_2_63:62; #endif } s; - struct cvmx_mio_uart2_stt_s cn52xx; - struct cvmx_mio_uart2_stt_s cn52xxp1; }; union cvmx_mio_uart2_tfl { @@ -5533,8 +4344,6 @@ union cvmx_mio_uart2_tfl { uint64_t reserved_7_63:57; #endif } s; - struct cvmx_mio_uart2_tfl_s cn52xx; - struct cvmx_mio_uart2_tfl_s cn52xxp1; }; union cvmx_mio_uart2_tfr { @@ -5548,8 +4357,6 @@ union cvmx_mio_uart2_tfr { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_mio_uart2_tfr_s cn52xx; - struct cvmx_mio_uart2_tfr_s cn52xxp1; }; union cvmx_mio_uart2_thr { @@ -5563,8 +4370,6 @@ union cvmx_mio_uart2_thr { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_mio_uart2_thr_s cn52xx; - struct cvmx_mio_uart2_thr_s cn52xxp1; }; union cvmx_mio_uart2_usr { @@ -5586,8 +4391,6 @@ union cvmx_mio_uart2_usr { uint64_t reserved_5_63:59; #endif } s; - struct cvmx_mio_uart2_usr_s cn52xx; - struct cvmx_mio_uart2_usr_s cn52xxp1; }; #endif diff --git a/arch/mips/include/asm/octeon/cvmx-mixx-defs.h b/arch/mips/include/asm/octeon/cvmx-mixx-defs.h index 3155e6019dc8..cd60d43e809a 100644 --- a/arch/mips/include/asm/octeon/cvmx-mixx-defs.h +++ b/arch/mips/include/asm/octeon/cvmx-mixx-defs.h @@ -80,15 +80,6 @@ union cvmx_mixx_bist { uint64_t reserved_4_63:60; #endif } cn52xx; - struct cvmx_mixx_bist_cn52xx cn52xxp1; - struct cvmx_mixx_bist_cn52xx cn56xx; - struct cvmx_mixx_bist_cn52xx cn56xxp1; - struct cvmx_mixx_bist_s cn61xx; - struct cvmx_mixx_bist_s cn63xx; - struct cvmx_mixx_bist_s cn63xxp1; - struct cvmx_mixx_bist_s cn66xx; - struct cvmx_mixx_bist_s cn68xx; - struct cvmx_mixx_bist_s cn68xxp1; }; union cvmx_mixx_ctl { @@ -137,15 +128,6 @@ union cvmx_mixx_ctl { uint64_t reserved_8_63:56; #endif } cn52xx; - struct cvmx_mixx_ctl_cn52xx cn52xxp1; - struct cvmx_mixx_ctl_cn52xx cn56xx; - struct cvmx_mixx_ctl_cn52xx cn56xxp1; - struct cvmx_mixx_ctl_s cn61xx; - struct cvmx_mixx_ctl_s cn63xx; - struct cvmx_mixx_ctl_s cn63xxp1; - struct cvmx_mixx_ctl_s cn66xx; - struct cvmx_mixx_ctl_s cn68xx; - struct cvmx_mixx_ctl_s cn68xxp1; }; union cvmx_mixx_intena { @@ -194,15 +176,6 @@ union cvmx_mixx_intena { uint64_t reserved_7_63:57; #endif } cn52xx; - struct cvmx_mixx_intena_cn52xx cn52xxp1; - struct cvmx_mixx_intena_cn52xx cn56xx; - struct cvmx_mixx_intena_cn52xx cn56xxp1; - struct cvmx_mixx_intena_s cn61xx; - struct cvmx_mixx_intena_s cn63xx; - struct cvmx_mixx_intena_s cn63xxp1; - struct cvmx_mixx_intena_s cn66xx; - struct cvmx_mixx_intena_s cn68xx; - struct cvmx_mixx_intena_s cn68xxp1; }; union cvmx_mixx_ircnt { @@ -216,16 +189,6 @@ union cvmx_mixx_ircnt { uint64_t reserved_20_63:44; #endif } s; - struct cvmx_mixx_ircnt_s cn52xx; - struct cvmx_mixx_ircnt_s cn52xxp1; - struct cvmx_mixx_ircnt_s cn56xx; - struct cvmx_mixx_ircnt_s cn56xxp1; - struct cvmx_mixx_ircnt_s cn61xx; - struct cvmx_mixx_ircnt_s cn63xx; - struct cvmx_mixx_ircnt_s cn63xxp1; - struct cvmx_mixx_ircnt_s cn66xx; - struct cvmx_mixx_ircnt_s cn68xx; - struct cvmx_mixx_ircnt_s cn68xxp1; }; union cvmx_mixx_irhwm { @@ -241,16 +204,6 @@ union cvmx_mixx_irhwm { uint64_t reserved_40_63:24; #endif } s; - struct cvmx_mixx_irhwm_s cn52xx; - struct cvmx_mixx_irhwm_s cn52xxp1; - struct cvmx_mixx_irhwm_s cn56xx; - struct cvmx_mixx_irhwm_s cn56xxp1; - struct cvmx_mixx_irhwm_s cn61xx; - struct cvmx_mixx_irhwm_s cn63xx; - struct cvmx_mixx_irhwm_s cn63xxp1; - struct cvmx_mixx_irhwm_s cn66xx; - struct cvmx_mixx_irhwm_s cn68xx; - struct cvmx_mixx_irhwm_s cn68xxp1; }; union cvmx_mixx_iring1 { @@ -283,15 +236,6 @@ union cvmx_mixx_iring1 { uint64_t reserved_60_63:4; #endif } cn52xx; - struct cvmx_mixx_iring1_cn52xx cn52xxp1; - struct cvmx_mixx_iring1_cn52xx cn56xx; - struct cvmx_mixx_iring1_cn52xx cn56xxp1; - struct cvmx_mixx_iring1_s cn61xx; - struct cvmx_mixx_iring1_s cn63xx; - struct cvmx_mixx_iring1_s cn63xxp1; - struct cvmx_mixx_iring1_s cn66xx; - struct cvmx_mixx_iring1_s cn68xx; - struct cvmx_mixx_iring1_s cn68xxp1; }; union cvmx_mixx_iring2 { @@ -309,16 +253,6 @@ union cvmx_mixx_iring2 { uint64_t reserved_52_63:12; #endif } s; - struct cvmx_mixx_iring2_s cn52xx; - struct cvmx_mixx_iring2_s cn52xxp1; - struct cvmx_mixx_iring2_s cn56xx; - struct cvmx_mixx_iring2_s cn56xxp1; - struct cvmx_mixx_iring2_s cn61xx; - struct cvmx_mixx_iring2_s cn63xx; - struct cvmx_mixx_iring2_s cn63xxp1; - struct cvmx_mixx_iring2_s cn66xx; - struct cvmx_mixx_iring2_s cn68xx; - struct cvmx_mixx_iring2_s cn68xxp1; }; union cvmx_mixx_isr { @@ -367,15 +301,6 @@ union cvmx_mixx_isr { uint64_t reserved_7_63:57; #endif } cn52xx; - struct cvmx_mixx_isr_cn52xx cn52xxp1; - struct cvmx_mixx_isr_cn52xx cn56xx; - struct cvmx_mixx_isr_cn52xx cn56xxp1; - struct cvmx_mixx_isr_s cn61xx; - struct cvmx_mixx_isr_s cn63xx; - struct cvmx_mixx_isr_s cn63xxp1; - struct cvmx_mixx_isr_s cn66xx; - struct cvmx_mixx_isr_s cn68xx; - struct cvmx_mixx_isr_s cn68xxp1; }; union cvmx_mixx_orcnt { @@ -389,16 +314,6 @@ union cvmx_mixx_orcnt { uint64_t reserved_20_63:44; #endif } s; - struct cvmx_mixx_orcnt_s cn52xx; - struct cvmx_mixx_orcnt_s cn52xxp1; - struct cvmx_mixx_orcnt_s cn56xx; - struct cvmx_mixx_orcnt_s cn56xxp1; - struct cvmx_mixx_orcnt_s cn61xx; - struct cvmx_mixx_orcnt_s cn63xx; - struct cvmx_mixx_orcnt_s cn63xxp1; - struct cvmx_mixx_orcnt_s cn66xx; - struct cvmx_mixx_orcnt_s cn68xx; - struct cvmx_mixx_orcnt_s cn68xxp1; }; union cvmx_mixx_orhwm { @@ -412,16 +327,6 @@ union cvmx_mixx_orhwm { uint64_t reserved_20_63:44; #endif } s; - struct cvmx_mixx_orhwm_s cn52xx; - struct cvmx_mixx_orhwm_s cn52xxp1; - struct cvmx_mixx_orhwm_s cn56xx; - struct cvmx_mixx_orhwm_s cn56xxp1; - struct cvmx_mixx_orhwm_s cn61xx; - struct cvmx_mixx_orhwm_s cn63xx; - struct cvmx_mixx_orhwm_s cn63xxp1; - struct cvmx_mixx_orhwm_s cn66xx; - struct cvmx_mixx_orhwm_s cn68xx; - struct cvmx_mixx_orhwm_s cn68xxp1; }; union cvmx_mixx_oring1 { @@ -454,15 +359,6 @@ union cvmx_mixx_oring1 { uint64_t reserved_60_63:4; #endif } cn52xx; - struct cvmx_mixx_oring1_cn52xx cn52xxp1; - struct cvmx_mixx_oring1_cn52xx cn56xx; - struct cvmx_mixx_oring1_cn52xx cn56xxp1; - struct cvmx_mixx_oring1_s cn61xx; - struct cvmx_mixx_oring1_s cn63xx; - struct cvmx_mixx_oring1_s cn63xxp1; - struct cvmx_mixx_oring1_s cn66xx; - struct cvmx_mixx_oring1_s cn68xx; - struct cvmx_mixx_oring1_s cn68xxp1; }; union cvmx_mixx_oring2 { @@ -480,16 +376,6 @@ union cvmx_mixx_oring2 { uint64_t reserved_52_63:12; #endif } s; - struct cvmx_mixx_oring2_s cn52xx; - struct cvmx_mixx_oring2_s cn52xxp1; - struct cvmx_mixx_oring2_s cn56xx; - struct cvmx_mixx_oring2_s cn56xxp1; - struct cvmx_mixx_oring2_s cn61xx; - struct cvmx_mixx_oring2_s cn63xx; - struct cvmx_mixx_oring2_s cn63xxp1; - struct cvmx_mixx_oring2_s cn66xx; - struct cvmx_mixx_oring2_s cn68xx; - struct cvmx_mixx_oring2_s cn68xxp1; }; union cvmx_mixx_remcnt { @@ -507,16 +393,6 @@ union cvmx_mixx_remcnt { uint64_t reserved_52_63:12; #endif } s; - struct cvmx_mixx_remcnt_s cn52xx; - struct cvmx_mixx_remcnt_s cn52xxp1; - struct cvmx_mixx_remcnt_s cn56xx; - struct cvmx_mixx_remcnt_s cn56xxp1; - struct cvmx_mixx_remcnt_s cn61xx; - struct cvmx_mixx_remcnt_s cn63xx; - struct cvmx_mixx_remcnt_s cn63xxp1; - struct cvmx_mixx_remcnt_s cn66xx; - struct cvmx_mixx_remcnt_s cn68xx; - struct cvmx_mixx_remcnt_s cn68xxp1; }; union cvmx_mixx_tsctl { @@ -538,12 +414,6 @@ union cvmx_mixx_tsctl { uint64_t reserved_21_63:43; #endif } s; - struct cvmx_mixx_tsctl_s cn61xx; - struct cvmx_mixx_tsctl_s cn63xx; - struct cvmx_mixx_tsctl_s cn63xxp1; - struct cvmx_mixx_tsctl_s cn66xx; - struct cvmx_mixx_tsctl_s cn68xx; - struct cvmx_mixx_tsctl_s cn68xxp1; }; union cvmx_mixx_tstamp { @@ -555,12 +425,6 @@ union cvmx_mixx_tstamp { uint64_t tstamp:64; #endif } s; - struct cvmx_mixx_tstamp_s cn61xx; - struct cvmx_mixx_tstamp_s cn63xx; - struct cvmx_mixx_tstamp_s cn63xxp1; - struct cvmx_mixx_tstamp_s cn66xx; - struct cvmx_mixx_tstamp_s cn68xx; - struct cvmx_mixx_tstamp_s cn68xxp1; }; #endif diff --git a/arch/mips/include/asm/octeon/cvmx-npei-defs.h b/arch/mips/include/asm/octeon/cvmx-npei-defs.h index 58114d414356..6a51b1ef8c9b 100644 --- a/arch/mips/include/asm/octeon/cvmx-npei-defs.h +++ b/arch/mips/include/asm/octeon/cvmx-npei-defs.h @@ -154,10 +154,6 @@ union cvmx_npei_bar1_indexx { uint32_t reserved_18_31:14; #endif } s; - struct cvmx_npei_bar1_indexx_s cn52xx; - struct cvmx_npei_bar1_indexx_s cn52xxp1; - struct cvmx_npei_bar1_indexx_s cn56xx; - struct cvmx_npei_bar1_indexx_s cn56xxp1; }; union cvmx_npei_bist_status { @@ -485,7 +481,6 @@ union cvmx_npei_bist_status { uint64_t reserved_46_63:18; #endif } cn52xxp1; - struct cvmx_npei_bist_status_cn52xx cn56xx; struct cvmx_npei_bist_status_cn56xxp1 { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_58_63:6; @@ -648,8 +643,6 @@ union cvmx_npei_bist_status2 { uint64_t reserved_14_63:50; #endif } s; - struct cvmx_npei_bist_status2_s cn52xx; - struct cvmx_npei_bist_status2_s cn56xx; }; union cvmx_npei_ctl_port0 { @@ -693,10 +686,6 @@ union cvmx_npei_ctl_port0 { uint64_t reserved_21_63:43; #endif } s; - struct cvmx_npei_ctl_port0_s cn52xx; - struct cvmx_npei_ctl_port0_s cn52xxp1; - struct cvmx_npei_ctl_port0_s cn56xx; - struct cvmx_npei_ctl_port0_s cn56xxp1; }; union cvmx_npei_ctl_port1 { @@ -740,10 +729,6 @@ union cvmx_npei_ctl_port1 { uint64_t reserved_21_63:43; #endif } s; - struct cvmx_npei_ctl_port1_s cn52xx; - struct cvmx_npei_ctl_port1_s cn52xxp1; - struct cvmx_npei_ctl_port1_s cn56xx; - struct cvmx_npei_ctl_port1_s cn56xxp1; }; union cvmx_npei_ctl_status { @@ -773,7 +758,6 @@ union cvmx_npei_ctl_status { uint64_t reserved_44_63:20; #endif } s; - struct cvmx_npei_ctl_status_s cn52xx; struct cvmx_npei_ctl_status_cn52xxp1 { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_44_63:20; @@ -799,7 +783,6 @@ union cvmx_npei_ctl_status { uint64_t reserved_44_63:20; #endif } cn52xxp1; - struct cvmx_npei_ctl_status_s cn56xx; struct cvmx_npei_ctl_status_cn56xxp1 { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_15_63:49; @@ -848,10 +831,6 @@ union cvmx_npei_ctl_status2 { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_npei_ctl_status2_s cn52xx; - struct cvmx_npei_ctl_status2_s cn52xxp1; - struct cvmx_npei_ctl_status2_s cn56xx; - struct cvmx_npei_ctl_status2_s cn56xxp1; }; union cvmx_npei_data_out_cnt { @@ -871,10 +850,6 @@ union cvmx_npei_data_out_cnt { uint64_t reserved_44_63:20; #endif } s; - struct cvmx_npei_data_out_cnt_s cn52xx; - struct cvmx_npei_data_out_cnt_s cn52xxp1; - struct cvmx_npei_data_out_cnt_s cn56xx; - struct cvmx_npei_data_out_cnt_s cn56xxp1; }; union cvmx_npei_dbg_data { @@ -919,7 +894,6 @@ union cvmx_npei_dbg_data { uint64_t reserved_29_63:35; #endif } cn52xx; - struct cvmx_npei_dbg_data_cn52xx cn52xxp1; struct cvmx_npei_dbg_data_cn56xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_29_63:35; @@ -941,7 +915,6 @@ union cvmx_npei_dbg_data { uint64_t reserved_29_63:35; #endif } cn56xx; - struct cvmx_npei_dbg_data_cn56xx cn56xxp1; }; union cvmx_npei_dbg_select { @@ -955,10 +928,6 @@ union cvmx_npei_dbg_select { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_npei_dbg_select_s cn52xx; - struct cvmx_npei_dbg_select_s cn52xxp1; - struct cvmx_npei_dbg_select_s cn56xx; - struct cvmx_npei_dbg_select_s cn56xxp1; }; union cvmx_npei_dmax_counts { @@ -974,10 +943,6 @@ union cvmx_npei_dmax_counts { uint64_t reserved_39_63:25; #endif } s; - struct cvmx_npei_dmax_counts_s cn52xx; - struct cvmx_npei_dmax_counts_s cn52xxp1; - struct cvmx_npei_dmax_counts_s cn56xx; - struct cvmx_npei_dmax_counts_s cn56xxp1; }; union cvmx_npei_dmax_dbell { @@ -991,10 +956,6 @@ union cvmx_npei_dmax_dbell { uint32_t reserved_16_31:16; #endif } s; - struct cvmx_npei_dmax_dbell_s cn52xx; - struct cvmx_npei_dmax_dbell_s cn52xxp1; - struct cvmx_npei_dmax_dbell_s cn56xx; - struct cvmx_npei_dmax_dbell_s cn56xxp1; }; union cvmx_npei_dmax_ibuff_saddr { @@ -1012,7 +973,6 @@ union cvmx_npei_dmax_ibuff_saddr { uint64_t reserved_37_63:27; #endif } s; - struct cvmx_npei_dmax_ibuff_saddr_s cn52xx; struct cvmx_npei_dmax_ibuff_saddr_cn52xxp1 { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_36_63:28; @@ -1024,8 +984,6 @@ union cvmx_npei_dmax_ibuff_saddr { uint64_t reserved_36_63:28; #endif } cn52xxp1; - struct cvmx_npei_dmax_ibuff_saddr_s cn56xx; - struct cvmx_npei_dmax_ibuff_saddr_cn52xxp1 cn56xxp1; }; union cvmx_npei_dmax_naddr { @@ -1039,10 +997,6 @@ union cvmx_npei_dmax_naddr { uint64_t reserved_36_63:28; #endif } s; - struct cvmx_npei_dmax_naddr_s cn52xx; - struct cvmx_npei_dmax_naddr_s cn52xxp1; - struct cvmx_npei_dmax_naddr_s cn56xx; - struct cvmx_npei_dmax_naddr_s cn56xxp1; }; union cvmx_npei_dma0_int_level { @@ -1056,10 +1010,6 @@ union cvmx_npei_dma0_int_level { uint64_t time:32; #endif } s; - struct cvmx_npei_dma0_int_level_s cn52xx; - struct cvmx_npei_dma0_int_level_s cn52xxp1; - struct cvmx_npei_dma0_int_level_s cn56xx; - struct cvmx_npei_dma0_int_level_s cn56xxp1; }; union cvmx_npei_dma1_int_level { @@ -1073,10 +1023,6 @@ union cvmx_npei_dma1_int_level { uint64_t time:32; #endif } s; - struct cvmx_npei_dma1_int_level_s cn52xx; - struct cvmx_npei_dma1_int_level_s cn52xxp1; - struct cvmx_npei_dma1_int_level_s cn56xx; - struct cvmx_npei_dma1_int_level_s cn56xxp1; }; union cvmx_npei_dma_cnts { @@ -1090,10 +1036,6 @@ union cvmx_npei_dma_cnts { uint64_t dma1:32; #endif } s; - struct cvmx_npei_dma_cnts_s cn52xx; - struct cvmx_npei_dma_cnts_s cn52xxp1; - struct cvmx_npei_dma_cnts_s cn56xx; - struct cvmx_npei_dma_cnts_s cn56xxp1; }; union cvmx_npei_dma_control { @@ -1137,7 +1079,6 @@ union cvmx_npei_dma_control { uint64_t reserved_40_63:24; #endif } s; - struct cvmx_npei_dma_control_s cn52xx; struct cvmx_npei_dma_control_cn52xxp1 { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_38_63:26; @@ -1173,7 +1114,6 @@ union cvmx_npei_dma_control { uint64_t reserved_38_63:26; #endif } cn52xxp1; - struct cvmx_npei_dma_control_s cn56xx; struct cvmx_npei_dma_control_cn56xxp1 { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_39_63:25; @@ -1250,8 +1190,6 @@ union cvmx_npei_dma_pcie_req_num { uint64_t dma_arb:1; #endif } s; - struct cvmx_npei_dma_pcie_req_num_s cn52xx; - struct cvmx_npei_dma_pcie_req_num_s cn56xx; }; union cvmx_npei_dma_state1 { @@ -1273,7 +1211,6 @@ union cvmx_npei_dma_state1 { uint64_t reserved_40_63:24; #endif } s; - struct cvmx_npei_dma_state1_s cn52xx; }; union cvmx_npei_dma_state1_p1 { @@ -1332,7 +1269,6 @@ union cvmx_npei_dma_state1_p1 { uint64_t reserved_60_63:4; #endif } cn52xxp1; - struct cvmx_npei_dma_state1_p1_s cn56xxp1; }; union cvmx_npei_dma_state2 { @@ -1354,7 +1290,6 @@ union cvmx_npei_dma_state2 { uint64_t reserved_28_63:36; #endif } s; - struct cvmx_npei_dma_state2_s cn52xx; }; union cvmx_npei_dma_state2_p1 { @@ -1393,7 +1328,6 @@ union cvmx_npei_dma_state2_p1 { uint64_t reserved_45_63:19; #endif } cn52xxp1; - struct cvmx_npei_dma_state2_p1_s cn56xxp1; }; union cvmx_npei_dma_state3_p1 { @@ -1413,8 +1347,6 @@ union cvmx_npei_dma_state3_p1 { uint64_t reserved_60_63:4; #endif } s; - struct cvmx_npei_dma_state3_p1_s cn52xxp1; - struct cvmx_npei_dma_state3_p1_s cn56xxp1; }; union cvmx_npei_dma_state4_p1 { @@ -1434,8 +1366,6 @@ union cvmx_npei_dma_state4_p1 { uint64_t reserved_52_63:12; #endif } s; - struct cvmx_npei_dma_state4_p1_s cn52xxp1; - struct cvmx_npei_dma_state4_p1_s cn56xxp1; }; union cvmx_npei_dma_state5_p1 { @@ -1451,7 +1381,6 @@ union cvmx_npei_dma_state5_p1 { uint64_t reserved_28_63:36; #endif } s; - struct cvmx_npei_dma_state5_p1_s cn56xxp1; }; union cvmx_npei_int_a_enb { @@ -1483,7 +1412,6 @@ union cvmx_npei_int_a_enb { uint64_t reserved_10_63:54; #endif } s; - struct cvmx_npei_int_a_enb_s cn52xx; struct cvmx_npei_int_a_enb_cn52xxp1 { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_2_63:62; @@ -1495,7 +1423,6 @@ union cvmx_npei_int_a_enb { uint64_t reserved_2_63:62; #endif } cn52xxp1; - struct cvmx_npei_int_a_enb_s cn56xx; }; union cvmx_npei_int_a_enb2 { @@ -1527,7 +1454,6 @@ union cvmx_npei_int_a_enb2 { uint64_t reserved_10_63:54; #endif } s; - struct cvmx_npei_int_a_enb2_s cn52xx; struct cvmx_npei_int_a_enb2_cn52xxp1 { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_2_63:62; @@ -1539,7 +1465,6 @@ union cvmx_npei_int_a_enb2 { uint64_t reserved_2_63:62; #endif } cn52xxp1; - struct cvmx_npei_int_a_enb2_s cn56xx; }; union cvmx_npei_int_a_sum { @@ -1571,7 +1496,6 @@ union cvmx_npei_int_a_sum { uint64_t reserved_10_63:54; #endif } s; - struct cvmx_npei_int_a_sum_s cn52xx; struct cvmx_npei_int_a_sum_cn52xxp1 { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_2_63:62; @@ -1583,7 +1507,6 @@ union cvmx_npei_int_a_sum { uint64_t reserved_2_63:62; #endif } cn52xxp1; - struct cvmx_npei_int_a_sum_s cn56xx; }; union cvmx_npei_int_enb { @@ -1721,7 +1644,6 @@ union cvmx_npei_int_enb { uint64_t mio_inta:1; #endif } s; - struct cvmx_npei_int_enb_s cn52xx; struct cvmx_npei_int_enb_cn52xxp1 { #ifdef __BIG_ENDIAN_BITFIELD uint64_t mio_inta:1; @@ -1855,7 +1777,6 @@ union cvmx_npei_int_enb { uint64_t mio_inta:1; #endif } cn52xxp1; - struct cvmx_npei_int_enb_s cn56xx; struct cvmx_npei_int_enb_cn56xxp1 { #ifdef __BIG_ENDIAN_BITFIELD uint64_t mio_inta:1; @@ -2122,7 +2043,6 @@ union cvmx_npei_int_enb2 { uint64_t reserved_62_63:2; #endif } s; - struct cvmx_npei_int_enb2_s cn52xx; struct cvmx_npei_int_enb2_cn52xxp1 { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_62_63:2; @@ -2254,7 +2174,6 @@ union cvmx_npei_int_enb2 { uint64_t reserved_62_63:2; #endif } cn52xxp1; - struct cvmx_npei_int_enb2_s cn56xx; struct cvmx_npei_int_enb2_cn56xxp1 { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_61_63:3; @@ -2399,9 +2318,6 @@ union cvmx_npei_int_info { uint64_t reserved_12_63:52; #endif } s; - struct cvmx_npei_int_info_s cn52xx; - struct cvmx_npei_int_info_s cn56xx; - struct cvmx_npei_int_info_s cn56xxp1; }; union cvmx_npei_int_sum { @@ -2539,7 +2455,6 @@ union cvmx_npei_int_sum { uint64_t mio_inta:1; #endif } s; - struct cvmx_npei_int_sum_s cn52xx; struct cvmx_npei_int_sum_cn52xxp1 { #ifdef __BIG_ENDIAN_BITFIELD uint64_t mio_inta:1; @@ -2667,7 +2582,6 @@ union cvmx_npei_int_sum { uint64_t mio_inta:1; #endif } cn52xxp1; - struct cvmx_npei_int_sum_s cn56xx; struct cvmx_npei_int_sum_cn56xxp1 { #ifdef __BIG_ENDIAN_BITFIELD uint64_t mio_inta:1; @@ -2924,9 +2838,6 @@ union cvmx_npei_int_sum2 { uint64_t mio_inta:1; #endif } s; - struct cvmx_npei_int_sum2_s cn52xx; - struct cvmx_npei_int_sum2_s cn52xxp1; - struct cvmx_npei_int_sum2_s cn56xx; }; union cvmx_npei_last_win_rdata0 { @@ -2938,10 +2849,6 @@ union cvmx_npei_last_win_rdata0 { uint64_t data:64; #endif } s; - struct cvmx_npei_last_win_rdata0_s cn52xx; - struct cvmx_npei_last_win_rdata0_s cn52xxp1; - struct cvmx_npei_last_win_rdata0_s cn56xx; - struct cvmx_npei_last_win_rdata0_s cn56xxp1; }; union cvmx_npei_last_win_rdata1 { @@ -2953,10 +2860,6 @@ union cvmx_npei_last_win_rdata1 { uint64_t data:64; #endif } s; - struct cvmx_npei_last_win_rdata1_s cn52xx; - struct cvmx_npei_last_win_rdata1_s cn52xxp1; - struct cvmx_npei_last_win_rdata1_s cn56xx; - struct cvmx_npei_last_win_rdata1_s cn56xxp1; }; union cvmx_npei_mem_access_ctl { @@ -2972,10 +2875,6 @@ union cvmx_npei_mem_access_ctl { uint64_t reserved_14_63:50; #endif } s; - struct cvmx_npei_mem_access_ctl_s cn52xx; - struct cvmx_npei_mem_access_ctl_s cn52xxp1; - struct cvmx_npei_mem_access_ctl_s cn56xx; - struct cvmx_npei_mem_access_ctl_s cn56xxp1; }; union cvmx_npei_mem_access_subidx { @@ -3007,10 +2906,6 @@ union cvmx_npei_mem_access_subidx { uint64_t reserved_42_63:22; #endif } s; - struct cvmx_npei_mem_access_subidx_s cn52xx; - struct cvmx_npei_mem_access_subidx_s cn52xxp1; - struct cvmx_npei_mem_access_subidx_s cn56xx; - struct cvmx_npei_mem_access_subidx_s cn56xxp1; }; union cvmx_npei_msi_enb0 { @@ -3022,10 +2917,6 @@ union cvmx_npei_msi_enb0 { uint64_t enb:64; #endif } s; - struct cvmx_npei_msi_enb0_s cn52xx; - struct cvmx_npei_msi_enb0_s cn52xxp1; - struct cvmx_npei_msi_enb0_s cn56xx; - struct cvmx_npei_msi_enb0_s cn56xxp1; }; union cvmx_npei_msi_enb1 { @@ -3037,10 +2928,6 @@ union cvmx_npei_msi_enb1 { uint64_t enb:64; #endif } s; - struct cvmx_npei_msi_enb1_s cn52xx; - struct cvmx_npei_msi_enb1_s cn52xxp1; - struct cvmx_npei_msi_enb1_s cn56xx; - struct cvmx_npei_msi_enb1_s cn56xxp1; }; union cvmx_npei_msi_enb2 { @@ -3052,10 +2939,6 @@ union cvmx_npei_msi_enb2 { uint64_t enb:64; #endif } s; - struct cvmx_npei_msi_enb2_s cn52xx; - struct cvmx_npei_msi_enb2_s cn52xxp1; - struct cvmx_npei_msi_enb2_s cn56xx; - struct cvmx_npei_msi_enb2_s cn56xxp1; }; union cvmx_npei_msi_enb3 { @@ -3067,10 +2950,6 @@ union cvmx_npei_msi_enb3 { uint64_t enb:64; #endif } s; - struct cvmx_npei_msi_enb3_s cn52xx; - struct cvmx_npei_msi_enb3_s cn52xxp1; - struct cvmx_npei_msi_enb3_s cn56xx; - struct cvmx_npei_msi_enb3_s cn56xxp1; }; union cvmx_npei_msi_rcv0 { @@ -3082,10 +2961,6 @@ union cvmx_npei_msi_rcv0 { uint64_t intr:64; #endif } s; - struct cvmx_npei_msi_rcv0_s cn52xx; - struct cvmx_npei_msi_rcv0_s cn52xxp1; - struct cvmx_npei_msi_rcv0_s cn56xx; - struct cvmx_npei_msi_rcv0_s cn56xxp1; }; union cvmx_npei_msi_rcv1 { @@ -3097,10 +2972,6 @@ union cvmx_npei_msi_rcv1 { uint64_t intr:64; #endif } s; - struct cvmx_npei_msi_rcv1_s cn52xx; - struct cvmx_npei_msi_rcv1_s cn52xxp1; - struct cvmx_npei_msi_rcv1_s cn56xx; - struct cvmx_npei_msi_rcv1_s cn56xxp1; }; union cvmx_npei_msi_rcv2 { @@ -3112,10 +2983,6 @@ union cvmx_npei_msi_rcv2 { uint64_t intr:64; #endif } s; - struct cvmx_npei_msi_rcv2_s cn52xx; - struct cvmx_npei_msi_rcv2_s cn52xxp1; - struct cvmx_npei_msi_rcv2_s cn56xx; - struct cvmx_npei_msi_rcv2_s cn56xxp1; }; union cvmx_npei_msi_rcv3 { @@ -3127,10 +2994,6 @@ union cvmx_npei_msi_rcv3 { uint64_t intr:64; #endif } s; - struct cvmx_npei_msi_rcv3_s cn52xx; - struct cvmx_npei_msi_rcv3_s cn52xxp1; - struct cvmx_npei_msi_rcv3_s cn56xx; - struct cvmx_npei_msi_rcv3_s cn56xxp1; }; union cvmx_npei_msi_rd_map { @@ -3146,10 +3009,6 @@ union cvmx_npei_msi_rd_map { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_npei_msi_rd_map_s cn52xx; - struct cvmx_npei_msi_rd_map_s cn52xxp1; - struct cvmx_npei_msi_rd_map_s cn56xx; - struct cvmx_npei_msi_rd_map_s cn56xxp1; }; union cvmx_npei_msi_w1c_enb0 { @@ -3161,8 +3020,6 @@ union cvmx_npei_msi_w1c_enb0 { uint64_t clr:64; #endif } s; - struct cvmx_npei_msi_w1c_enb0_s cn52xx; - struct cvmx_npei_msi_w1c_enb0_s cn56xx; }; union cvmx_npei_msi_w1c_enb1 { @@ -3174,8 +3031,6 @@ union cvmx_npei_msi_w1c_enb1 { uint64_t clr:64; #endif } s; - struct cvmx_npei_msi_w1c_enb1_s cn52xx; - struct cvmx_npei_msi_w1c_enb1_s cn56xx; }; union cvmx_npei_msi_w1c_enb2 { @@ -3187,8 +3042,6 @@ union cvmx_npei_msi_w1c_enb2 { uint64_t clr:64; #endif } s; - struct cvmx_npei_msi_w1c_enb2_s cn52xx; - struct cvmx_npei_msi_w1c_enb2_s cn56xx; }; union cvmx_npei_msi_w1c_enb3 { @@ -3200,8 +3053,6 @@ union cvmx_npei_msi_w1c_enb3 { uint64_t clr:64; #endif } s; - struct cvmx_npei_msi_w1c_enb3_s cn52xx; - struct cvmx_npei_msi_w1c_enb3_s cn56xx; }; union cvmx_npei_msi_w1s_enb0 { @@ -3213,8 +3064,6 @@ union cvmx_npei_msi_w1s_enb0 { uint64_t set:64; #endif } s; - struct cvmx_npei_msi_w1s_enb0_s cn52xx; - struct cvmx_npei_msi_w1s_enb0_s cn56xx; }; union cvmx_npei_msi_w1s_enb1 { @@ -3226,8 +3075,6 @@ union cvmx_npei_msi_w1s_enb1 { uint64_t set:64; #endif } s; - struct cvmx_npei_msi_w1s_enb1_s cn52xx; - struct cvmx_npei_msi_w1s_enb1_s cn56xx; }; union cvmx_npei_msi_w1s_enb2 { @@ -3239,8 +3086,6 @@ union cvmx_npei_msi_w1s_enb2 { uint64_t set:64; #endif } s; - struct cvmx_npei_msi_w1s_enb2_s cn52xx; - struct cvmx_npei_msi_w1s_enb2_s cn56xx; }; union cvmx_npei_msi_w1s_enb3 { @@ -3252,8 +3097,6 @@ union cvmx_npei_msi_w1s_enb3 { uint64_t set:64; #endif } s; - struct cvmx_npei_msi_w1s_enb3_s cn52xx; - struct cvmx_npei_msi_w1s_enb3_s cn56xx; }; union cvmx_npei_msi_wr_map { @@ -3269,10 +3112,6 @@ union cvmx_npei_msi_wr_map { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_npei_msi_wr_map_s cn52xx; - struct cvmx_npei_msi_wr_map_s cn52xxp1; - struct cvmx_npei_msi_wr_map_s cn56xx; - struct cvmx_npei_msi_wr_map_s cn56xxp1; }; union cvmx_npei_pcie_credit_cnt { @@ -3296,8 +3135,6 @@ union cvmx_npei_pcie_credit_cnt { uint64_t reserved_48_63:16; #endif } s; - struct cvmx_npei_pcie_credit_cnt_s cn52xx; - struct cvmx_npei_pcie_credit_cnt_s cn56xx; }; union cvmx_npei_pcie_msi_rcv { @@ -3311,10 +3148,6 @@ union cvmx_npei_pcie_msi_rcv { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_npei_pcie_msi_rcv_s cn52xx; - struct cvmx_npei_pcie_msi_rcv_s cn52xxp1; - struct cvmx_npei_pcie_msi_rcv_s cn56xx; - struct cvmx_npei_pcie_msi_rcv_s cn56xxp1; }; union cvmx_npei_pcie_msi_rcv_b1 { @@ -3330,10 +3163,6 @@ union cvmx_npei_pcie_msi_rcv_b1 { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_npei_pcie_msi_rcv_b1_s cn52xx; - struct cvmx_npei_pcie_msi_rcv_b1_s cn52xxp1; - struct cvmx_npei_pcie_msi_rcv_b1_s cn56xx; - struct cvmx_npei_pcie_msi_rcv_b1_s cn56xxp1; }; union cvmx_npei_pcie_msi_rcv_b2 { @@ -3349,10 +3178,6 @@ union cvmx_npei_pcie_msi_rcv_b2 { uint64_t reserved_24_63:40; #endif } s; - struct cvmx_npei_pcie_msi_rcv_b2_s cn52xx; - struct cvmx_npei_pcie_msi_rcv_b2_s cn52xxp1; - struct cvmx_npei_pcie_msi_rcv_b2_s cn56xx; - struct cvmx_npei_pcie_msi_rcv_b2_s cn56xxp1; }; union cvmx_npei_pcie_msi_rcv_b3 { @@ -3368,10 +3193,6 @@ union cvmx_npei_pcie_msi_rcv_b3 { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_npei_pcie_msi_rcv_b3_s cn52xx; - struct cvmx_npei_pcie_msi_rcv_b3_s cn52xxp1; - struct cvmx_npei_pcie_msi_rcv_b3_s cn56xx; - struct cvmx_npei_pcie_msi_rcv_b3_s cn56xxp1; }; union cvmx_npei_pktx_cnts { @@ -3387,8 +3208,6 @@ union cvmx_npei_pktx_cnts { uint64_t reserved_54_63:10; #endif } s; - struct cvmx_npei_pktx_cnts_s cn52xx; - struct cvmx_npei_pktx_cnts_s cn56xx; }; union cvmx_npei_pktx_in_bp { @@ -3402,8 +3221,6 @@ union cvmx_npei_pktx_in_bp { uint64_t wmark:32; #endif } s; - struct cvmx_npei_pktx_in_bp_s cn52xx; - struct cvmx_npei_pktx_in_bp_s cn56xx; }; union cvmx_npei_pktx_instr_baddr { @@ -3417,8 +3234,6 @@ union cvmx_npei_pktx_instr_baddr { uint64_t addr:61; #endif } s; - struct cvmx_npei_pktx_instr_baddr_s cn52xx; - struct cvmx_npei_pktx_instr_baddr_s cn56xx; }; union cvmx_npei_pktx_instr_baoff_dbell { @@ -3432,8 +3247,6 @@ union cvmx_npei_pktx_instr_baoff_dbell { uint64_t aoff:32; #endif } s; - struct cvmx_npei_pktx_instr_baoff_dbell_s cn52xx; - struct cvmx_npei_pktx_instr_baoff_dbell_s cn56xx; }; union cvmx_npei_pktx_instr_fifo_rsize { @@ -3453,8 +3266,6 @@ union cvmx_npei_pktx_instr_fifo_rsize { uint64_t max:9; #endif } s; - struct cvmx_npei_pktx_instr_fifo_rsize_s cn52xx; - struct cvmx_npei_pktx_instr_fifo_rsize_s cn56xx; }; union cvmx_npei_pktx_instr_header { @@ -3490,8 +3301,6 @@ union cvmx_npei_pktx_instr_header { uint64_t reserved_44_63:20; #endif } s; - struct cvmx_npei_pktx_instr_header_s cn52xx; - struct cvmx_npei_pktx_instr_header_s cn56xx; }; union cvmx_npei_pktx_slist_baddr { @@ -3505,8 +3314,6 @@ union cvmx_npei_pktx_slist_baddr { uint64_t addr:60; #endif } s; - struct cvmx_npei_pktx_slist_baddr_s cn52xx; - struct cvmx_npei_pktx_slist_baddr_s cn56xx; }; union cvmx_npei_pktx_slist_baoff_dbell { @@ -3520,8 +3327,6 @@ union cvmx_npei_pktx_slist_baoff_dbell { uint64_t aoff:32; #endif } s; - struct cvmx_npei_pktx_slist_baoff_dbell_s cn52xx; - struct cvmx_npei_pktx_slist_baoff_dbell_s cn56xx; }; union cvmx_npei_pktx_slist_fifo_rsize { @@ -3535,8 +3340,6 @@ union cvmx_npei_pktx_slist_fifo_rsize { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_npei_pktx_slist_fifo_rsize_s cn52xx; - struct cvmx_npei_pktx_slist_fifo_rsize_s cn56xx; }; union cvmx_npei_pkt_cnt_int { @@ -3550,8 +3353,6 @@ union cvmx_npei_pkt_cnt_int { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_npei_pkt_cnt_int_s cn52xx; - struct cvmx_npei_pkt_cnt_int_s cn56xx; }; union cvmx_npei_pkt_cnt_int_enb { @@ -3565,8 +3366,6 @@ union cvmx_npei_pkt_cnt_int_enb { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_npei_pkt_cnt_int_enb_s cn52xx; - struct cvmx_npei_pkt_cnt_int_enb_s cn56xx; }; union cvmx_npei_pkt_data_out_es { @@ -3578,8 +3377,6 @@ union cvmx_npei_pkt_data_out_es { uint64_t es:64; #endif } s; - struct cvmx_npei_pkt_data_out_es_s cn52xx; - struct cvmx_npei_pkt_data_out_es_s cn56xx; }; union cvmx_npei_pkt_data_out_ns { @@ -3593,8 +3390,6 @@ union cvmx_npei_pkt_data_out_ns { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_npei_pkt_data_out_ns_s cn52xx; - struct cvmx_npei_pkt_data_out_ns_s cn56xx; }; union cvmx_npei_pkt_data_out_ror { @@ -3608,8 +3403,6 @@ union cvmx_npei_pkt_data_out_ror { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_npei_pkt_data_out_ror_s cn52xx; - struct cvmx_npei_pkt_data_out_ror_s cn56xx; }; union cvmx_npei_pkt_dpaddr { @@ -3623,8 +3416,6 @@ union cvmx_npei_pkt_dpaddr { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_npei_pkt_dpaddr_s cn52xx; - struct cvmx_npei_pkt_dpaddr_s cn56xx; }; union cvmx_npei_pkt_in_bp { @@ -3638,8 +3429,6 @@ union cvmx_npei_pkt_in_bp { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_npei_pkt_in_bp_s cn52xx; - struct cvmx_npei_pkt_in_bp_s cn56xx; }; union cvmx_npei_pkt_in_donex_cnts { @@ -3653,8 +3442,6 @@ union cvmx_npei_pkt_in_donex_cnts { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_npei_pkt_in_donex_cnts_s cn52xx; - struct cvmx_npei_pkt_in_donex_cnts_s cn56xx; }; union cvmx_npei_pkt_in_instr_counts { @@ -3668,8 +3455,6 @@ union cvmx_npei_pkt_in_instr_counts { uint64_t wr_cnt:32; #endif } s; - struct cvmx_npei_pkt_in_instr_counts_s cn52xx; - struct cvmx_npei_pkt_in_instr_counts_s cn56xx; }; union cvmx_npei_pkt_in_pcie_port { @@ -3681,8 +3466,6 @@ union cvmx_npei_pkt_in_pcie_port { uint64_t pp:64; #endif } s; - struct cvmx_npei_pkt_in_pcie_port_s cn52xx; - struct cvmx_npei_pkt_in_pcie_port_s cn56xx; }; union cvmx_npei_pkt_input_control { @@ -3712,8 +3495,6 @@ union cvmx_npei_pkt_input_control { uint64_t reserved_23_63:41; #endif } s; - struct cvmx_npei_pkt_input_control_s cn52xx; - struct cvmx_npei_pkt_input_control_s cn56xx; }; union cvmx_npei_pkt_instr_enb { @@ -3727,8 +3508,6 @@ union cvmx_npei_pkt_instr_enb { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_npei_pkt_instr_enb_s cn52xx; - struct cvmx_npei_pkt_instr_enb_s cn56xx; }; union cvmx_npei_pkt_instr_rd_size { @@ -3740,8 +3519,6 @@ union cvmx_npei_pkt_instr_rd_size { uint64_t rdsize:64; #endif } s; - struct cvmx_npei_pkt_instr_rd_size_s cn52xx; - struct cvmx_npei_pkt_instr_rd_size_s cn56xx; }; union cvmx_npei_pkt_instr_size { @@ -3755,8 +3532,6 @@ union cvmx_npei_pkt_instr_size { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_npei_pkt_instr_size_s cn52xx; - struct cvmx_npei_pkt_instr_size_s cn56xx; }; union cvmx_npei_pkt_int_levels { @@ -3772,8 +3547,6 @@ union cvmx_npei_pkt_int_levels { uint64_t reserved_54_63:10; #endif } s; - struct cvmx_npei_pkt_int_levels_s cn52xx; - struct cvmx_npei_pkt_int_levels_s cn56xx; }; union cvmx_npei_pkt_iptr { @@ -3787,8 +3560,6 @@ union cvmx_npei_pkt_iptr { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_npei_pkt_iptr_s cn52xx; - struct cvmx_npei_pkt_iptr_s cn56xx; }; union cvmx_npei_pkt_out_bmode { @@ -3802,8 +3573,6 @@ union cvmx_npei_pkt_out_bmode { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_npei_pkt_out_bmode_s cn52xx; - struct cvmx_npei_pkt_out_bmode_s cn56xx; }; union cvmx_npei_pkt_out_enb { @@ -3817,8 +3586,6 @@ union cvmx_npei_pkt_out_enb { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_npei_pkt_out_enb_s cn52xx; - struct cvmx_npei_pkt_out_enb_s cn56xx; }; union cvmx_npei_pkt_output_wmark { @@ -3832,8 +3599,6 @@ union cvmx_npei_pkt_output_wmark { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_npei_pkt_output_wmark_s cn52xx; - struct cvmx_npei_pkt_output_wmark_s cn56xx; }; union cvmx_npei_pkt_pcie_port { @@ -3845,8 +3610,6 @@ union cvmx_npei_pkt_pcie_port { uint64_t pp:64; #endif } s; - struct cvmx_npei_pkt_pcie_port_s cn52xx; - struct cvmx_npei_pkt_pcie_port_s cn56xx; }; union cvmx_npei_pkt_port_in_rst { @@ -3860,8 +3623,6 @@ union cvmx_npei_pkt_port_in_rst { uint64_t in_rst:32; #endif } s; - struct cvmx_npei_pkt_port_in_rst_s cn52xx; - struct cvmx_npei_pkt_port_in_rst_s cn56xx; }; union cvmx_npei_pkt_slist_es { @@ -3873,8 +3634,6 @@ union cvmx_npei_pkt_slist_es { uint64_t es:64; #endif } s; - struct cvmx_npei_pkt_slist_es_s cn52xx; - struct cvmx_npei_pkt_slist_es_s cn56xx; }; union cvmx_npei_pkt_slist_id_size { @@ -3890,8 +3649,6 @@ union cvmx_npei_pkt_slist_id_size { uint64_t reserved_23_63:41; #endif } s; - struct cvmx_npei_pkt_slist_id_size_s cn52xx; - struct cvmx_npei_pkt_slist_id_size_s cn56xx; }; union cvmx_npei_pkt_slist_ns { @@ -3905,8 +3662,6 @@ union cvmx_npei_pkt_slist_ns { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_npei_pkt_slist_ns_s cn52xx; - struct cvmx_npei_pkt_slist_ns_s cn56xx; }; union cvmx_npei_pkt_slist_ror { @@ -3920,8 +3675,6 @@ union cvmx_npei_pkt_slist_ror { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_npei_pkt_slist_ror_s cn52xx; - struct cvmx_npei_pkt_slist_ror_s cn56xx; }; union cvmx_npei_pkt_time_int { @@ -3935,8 +3688,6 @@ union cvmx_npei_pkt_time_int { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_npei_pkt_time_int_s cn52xx; - struct cvmx_npei_pkt_time_int_s cn56xx; }; union cvmx_npei_pkt_time_int_enb { @@ -3950,8 +3701,6 @@ union cvmx_npei_pkt_time_int_enb { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_npei_pkt_time_int_enb_s cn52xx; - struct cvmx_npei_pkt_time_int_enb_s cn56xx; }; union cvmx_npei_rsl_int_blocks { @@ -4019,10 +3768,6 @@ union cvmx_npei_rsl_int_blocks { uint64_t reserved_31_63:33; #endif } s; - struct cvmx_npei_rsl_int_blocks_s cn52xx; - struct cvmx_npei_rsl_int_blocks_s cn52xxp1; - struct cvmx_npei_rsl_int_blocks_s cn56xx; - struct cvmx_npei_rsl_int_blocks_s cn56xxp1; }; union cvmx_npei_scratch_1 { @@ -4034,10 +3779,6 @@ union cvmx_npei_scratch_1 { uint64_t data:64; #endif } s; - struct cvmx_npei_scratch_1_s cn52xx; - struct cvmx_npei_scratch_1_s cn52xxp1; - struct cvmx_npei_scratch_1_s cn56xx; - struct cvmx_npei_scratch_1_s cn56xxp1; }; union cvmx_npei_state1 { @@ -4055,10 +3796,6 @@ union cvmx_npei_state1 { uint64_t cpl1:12; #endif } s; - struct cvmx_npei_state1_s cn52xx; - struct cvmx_npei_state1_s cn52xxp1; - struct cvmx_npei_state1_s cn56xx; - struct cvmx_npei_state1_s cn56xxp1; }; union cvmx_npei_state2 { @@ -4082,10 +3819,6 @@ union cvmx_npei_state2 { uint64_t reserved_48_63:16; #endif } s; - struct cvmx_npei_state2_s cn52xx; - struct cvmx_npei_state2_s cn52xxp1; - struct cvmx_npei_state2_s cn56xx; - struct cvmx_npei_state2_s cn56xxp1; }; union cvmx_npei_state3 { @@ -4105,10 +3838,6 @@ union cvmx_npei_state3 { uint64_t reserved_56_63:8; #endif } s; - struct cvmx_npei_state3_s cn52xx; - struct cvmx_npei_state3_s cn52xxp1; - struct cvmx_npei_state3_s cn56xx; - struct cvmx_npei_state3_s cn56xxp1; }; union cvmx_npei_win_rd_addr { @@ -4126,10 +3855,6 @@ union cvmx_npei_win_rd_addr { uint64_t reserved_51_63:13; #endif } s; - struct cvmx_npei_win_rd_addr_s cn52xx; - struct cvmx_npei_win_rd_addr_s cn52xxp1; - struct cvmx_npei_win_rd_addr_s cn56xx; - struct cvmx_npei_win_rd_addr_s cn56xxp1; }; union cvmx_npei_win_rd_data { @@ -4141,10 +3866,6 @@ union cvmx_npei_win_rd_data { uint64_t rd_data:64; #endif } s; - struct cvmx_npei_win_rd_data_s cn52xx; - struct cvmx_npei_win_rd_data_s cn52xxp1; - struct cvmx_npei_win_rd_data_s cn56xx; - struct cvmx_npei_win_rd_data_s cn56xxp1; }; union cvmx_npei_win_wr_addr { @@ -4162,10 +3883,6 @@ union cvmx_npei_win_wr_addr { uint64_t reserved_49_63:15; #endif } s; - struct cvmx_npei_win_wr_addr_s cn52xx; - struct cvmx_npei_win_wr_addr_s cn52xxp1; - struct cvmx_npei_win_wr_addr_s cn56xx; - struct cvmx_npei_win_wr_addr_s cn56xxp1; }; union cvmx_npei_win_wr_data { @@ -4177,10 +3894,6 @@ union cvmx_npei_win_wr_data { uint64_t wr_data:64; #endif } s; - struct cvmx_npei_win_wr_data_s cn52xx; - struct cvmx_npei_win_wr_data_s cn52xxp1; - struct cvmx_npei_win_wr_data_s cn56xx; - struct cvmx_npei_win_wr_data_s cn56xxp1; }; union cvmx_npei_win_wr_mask { @@ -4194,10 +3907,6 @@ union cvmx_npei_win_wr_mask { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_npei_win_wr_mask_s cn52xx; - struct cvmx_npei_win_wr_mask_s cn52xxp1; - struct cvmx_npei_win_wr_mask_s cn56xx; - struct cvmx_npei_win_wr_mask_s cn56xxp1; }; union cvmx_npei_window_ctl { @@ -4211,10 +3920,6 @@ union cvmx_npei_window_ctl { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_npei_window_ctl_s cn52xx; - struct cvmx_npei_window_ctl_s cn52xxp1; - struct cvmx_npei_window_ctl_s cn56xx; - struct cvmx_npei_window_ctl_s cn56xxp1; }; #endif diff --git a/arch/mips/include/asm/octeon/cvmx-npi-defs.h b/arch/mips/include/asm/octeon/cvmx-npi-defs.h index 129bb250e534..ba4967fda480 100644 --- a/arch/mips/include/asm/octeon/cvmx-npi-defs.h +++ b/arch/mips/include/asm/octeon/cvmx-npi-defs.h @@ -160,13 +160,6 @@ union cvmx_npi_base_addr_inputx { uint64_t baddr:61; #endif } s; - struct cvmx_npi_base_addr_inputx_s cn30xx; - struct cvmx_npi_base_addr_inputx_s cn31xx; - struct cvmx_npi_base_addr_inputx_s cn38xx; - struct cvmx_npi_base_addr_inputx_s cn38xxp2; - struct cvmx_npi_base_addr_inputx_s cn50xx; - struct cvmx_npi_base_addr_inputx_s cn58xx; - struct cvmx_npi_base_addr_inputx_s cn58xxp1; }; union cvmx_npi_base_addr_outputx { @@ -180,13 +173,6 @@ union cvmx_npi_base_addr_outputx { uint64_t baddr:61; #endif } s; - struct cvmx_npi_base_addr_outputx_s cn30xx; - struct cvmx_npi_base_addr_outputx_s cn31xx; - struct cvmx_npi_base_addr_outputx_s cn38xx; - struct cvmx_npi_base_addr_outputx_s cn38xxp2; - struct cvmx_npi_base_addr_outputx_s cn50xx; - struct cvmx_npi_base_addr_outputx_s cn58xx; - struct cvmx_npi_base_addr_outputx_s cn58xxp1; }; union cvmx_npi_bist_status { @@ -281,9 +267,6 @@ union cvmx_npi_bist_status { uint64_t reserved_20_63:44; #endif } cn30xx; - struct cvmx_npi_bist_status_s cn31xx; - struct cvmx_npi_bist_status_s cn38xx; - struct cvmx_npi_bist_status_s cn38xxp2; struct cvmx_npi_bist_status_cn50xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_20_63:44; @@ -329,8 +312,6 @@ union cvmx_npi_bist_status { uint64_t reserved_20_63:44; #endif } cn50xx; - struct cvmx_npi_bist_status_s cn58xx; - struct cvmx_npi_bist_status_s cn58xxp1; }; union cvmx_npi_buff_size_outputx { @@ -346,13 +327,6 @@ union cvmx_npi_buff_size_outputx { uint64_t reserved_23_63:41; #endif } s; - struct cvmx_npi_buff_size_outputx_s cn30xx; - struct cvmx_npi_buff_size_outputx_s cn31xx; - struct cvmx_npi_buff_size_outputx_s cn38xx; - struct cvmx_npi_buff_size_outputx_s cn38xxp2; - struct cvmx_npi_buff_size_outputx_s cn50xx; - struct cvmx_npi_buff_size_outputx_s cn58xx; - struct cvmx_npi_buff_size_outputx_s cn58xxp1; }; union cvmx_npi_comp_ctl { @@ -368,9 +342,6 @@ union cvmx_npi_comp_ctl { uint64_t reserved_10_63:54; #endif } s; - struct cvmx_npi_comp_ctl_s cn50xx; - struct cvmx_npi_comp_ctl_s cn58xx; - struct cvmx_npi_comp_ctl_s cn58xxp1; }; union cvmx_npi_ctl_status { @@ -498,11 +469,6 @@ union cvmx_npi_ctl_status { uint64_t reserved_63_63:1; #endif } cn31xx; - struct cvmx_npi_ctl_status_s cn38xx; - struct cvmx_npi_ctl_status_s cn38xxp2; - struct cvmx_npi_ctl_status_cn31xx cn50xx; - struct cvmx_npi_ctl_status_s cn58xx; - struct cvmx_npi_ctl_status_s cn58xxp1; }; union cvmx_npi_dbg_select { @@ -516,13 +482,6 @@ union cvmx_npi_dbg_select { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_npi_dbg_select_s cn30xx; - struct cvmx_npi_dbg_select_s cn31xx; - struct cvmx_npi_dbg_select_s cn38xx; - struct cvmx_npi_dbg_select_s cn38xxp2; - struct cvmx_npi_dbg_select_s cn50xx; - struct cvmx_npi_dbg_select_s cn58xx; - struct cvmx_npi_dbg_select_s cn58xxp1; }; union cvmx_npi_dma_control { @@ -558,13 +517,6 @@ union cvmx_npi_dma_control { uint64_t reserved_36_63:28; #endif } s; - struct cvmx_npi_dma_control_s cn30xx; - struct cvmx_npi_dma_control_s cn31xx; - struct cvmx_npi_dma_control_s cn38xx; - struct cvmx_npi_dma_control_s cn38xxp2; - struct cvmx_npi_dma_control_s cn50xx; - struct cvmx_npi_dma_control_s cn58xx; - struct cvmx_npi_dma_control_s cn58xxp1; }; union cvmx_npi_dma_highp_counts { @@ -580,13 +532,6 @@ union cvmx_npi_dma_highp_counts { uint64_t reserved_39_63:25; #endif } s; - struct cvmx_npi_dma_highp_counts_s cn30xx; - struct cvmx_npi_dma_highp_counts_s cn31xx; - struct cvmx_npi_dma_highp_counts_s cn38xx; - struct cvmx_npi_dma_highp_counts_s cn38xxp2; - struct cvmx_npi_dma_highp_counts_s cn50xx; - struct cvmx_npi_dma_highp_counts_s cn58xx; - struct cvmx_npi_dma_highp_counts_s cn58xxp1; }; union cvmx_npi_dma_highp_naddr { @@ -602,13 +547,6 @@ union cvmx_npi_dma_highp_naddr { uint64_t reserved_40_63:24; #endif } s; - struct cvmx_npi_dma_highp_naddr_s cn30xx; - struct cvmx_npi_dma_highp_naddr_s cn31xx; - struct cvmx_npi_dma_highp_naddr_s cn38xx; - struct cvmx_npi_dma_highp_naddr_s cn38xxp2; - struct cvmx_npi_dma_highp_naddr_s cn50xx; - struct cvmx_npi_dma_highp_naddr_s cn58xx; - struct cvmx_npi_dma_highp_naddr_s cn58xxp1; }; union cvmx_npi_dma_lowp_counts { @@ -624,13 +562,6 @@ union cvmx_npi_dma_lowp_counts { uint64_t reserved_39_63:25; #endif } s; - struct cvmx_npi_dma_lowp_counts_s cn30xx; - struct cvmx_npi_dma_lowp_counts_s cn31xx; - struct cvmx_npi_dma_lowp_counts_s cn38xx; - struct cvmx_npi_dma_lowp_counts_s cn38xxp2; - struct cvmx_npi_dma_lowp_counts_s cn50xx; - struct cvmx_npi_dma_lowp_counts_s cn58xx; - struct cvmx_npi_dma_lowp_counts_s cn58xxp1; }; union cvmx_npi_dma_lowp_naddr { @@ -646,13 +577,6 @@ union cvmx_npi_dma_lowp_naddr { uint64_t reserved_40_63:24; #endif } s; - struct cvmx_npi_dma_lowp_naddr_s cn30xx; - struct cvmx_npi_dma_lowp_naddr_s cn31xx; - struct cvmx_npi_dma_lowp_naddr_s cn38xx; - struct cvmx_npi_dma_lowp_naddr_s cn38xxp2; - struct cvmx_npi_dma_lowp_naddr_s cn50xx; - struct cvmx_npi_dma_lowp_naddr_s cn58xx; - struct cvmx_npi_dma_lowp_naddr_s cn58xxp1; }; union cvmx_npi_highp_dbell { @@ -666,13 +590,6 @@ union cvmx_npi_highp_dbell { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_npi_highp_dbell_s cn30xx; - struct cvmx_npi_highp_dbell_s cn31xx; - struct cvmx_npi_highp_dbell_s cn38xx; - struct cvmx_npi_highp_dbell_s cn38xxp2; - struct cvmx_npi_highp_dbell_s cn50xx; - struct cvmx_npi_highp_dbell_s cn58xx; - struct cvmx_npi_highp_dbell_s cn58xxp1; }; union cvmx_npi_highp_ibuff_saddr { @@ -686,13 +603,6 @@ union cvmx_npi_highp_ibuff_saddr { uint64_t reserved_36_63:28; #endif } s; - struct cvmx_npi_highp_ibuff_saddr_s cn30xx; - struct cvmx_npi_highp_ibuff_saddr_s cn31xx; - struct cvmx_npi_highp_ibuff_saddr_s cn38xx; - struct cvmx_npi_highp_ibuff_saddr_s cn38xxp2; - struct cvmx_npi_highp_ibuff_saddr_s cn50xx; - struct cvmx_npi_highp_ibuff_saddr_s cn58xx; - struct cvmx_npi_highp_ibuff_saddr_s cn58xxp1; }; union cvmx_npi_input_control { @@ -745,12 +655,6 @@ union cvmx_npi_input_control { uint64_t reserved_22_63:42; #endif } cn30xx; - struct cvmx_npi_input_control_cn30xx cn31xx; - struct cvmx_npi_input_control_s cn38xx; - struct cvmx_npi_input_control_cn30xx cn38xxp2; - struct cvmx_npi_input_control_s cn50xx; - struct cvmx_npi_input_control_s cn58xx; - struct cvmx_npi_input_control_s cn58xxp1; }; union cvmx_npi_int_enb { @@ -1094,7 +998,6 @@ union cvmx_npi_int_enb { uint64_t reserved_62_63:2; #endif } cn31xx; - struct cvmx_npi_int_enb_s cn38xx; struct cvmx_npi_int_enb_cn38xxp2 { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_42_63:22; @@ -1186,9 +1089,6 @@ union cvmx_npi_int_enb { uint64_t reserved_42_63:22; #endif } cn38xxp2; - struct cvmx_npi_int_enb_cn31xx cn50xx; - struct cvmx_npi_int_enb_s cn58xx; - struct cvmx_npi_int_enb_s cn58xxp1; }; union cvmx_npi_int_sum { @@ -1532,7 +1432,6 @@ union cvmx_npi_int_sum { uint64_t reserved_62_63:2; #endif } cn31xx; - struct cvmx_npi_int_sum_s cn38xx; struct cvmx_npi_int_sum_cn38xxp2 { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_42_63:22; @@ -1624,9 +1523,6 @@ union cvmx_npi_int_sum { uint64_t reserved_42_63:22; #endif } cn38xxp2; - struct cvmx_npi_int_sum_cn31xx cn50xx; - struct cvmx_npi_int_sum_s cn58xx; - struct cvmx_npi_int_sum_s cn58xxp1; }; union cvmx_npi_lowp_dbell { @@ -1640,13 +1536,6 @@ union cvmx_npi_lowp_dbell { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_npi_lowp_dbell_s cn30xx; - struct cvmx_npi_lowp_dbell_s cn31xx; - struct cvmx_npi_lowp_dbell_s cn38xx; - struct cvmx_npi_lowp_dbell_s cn38xxp2; - struct cvmx_npi_lowp_dbell_s cn50xx; - struct cvmx_npi_lowp_dbell_s cn58xx; - struct cvmx_npi_lowp_dbell_s cn58xxp1; }; union cvmx_npi_lowp_ibuff_saddr { @@ -1660,13 +1549,6 @@ union cvmx_npi_lowp_ibuff_saddr { uint64_t reserved_36_63:28; #endif } s; - struct cvmx_npi_lowp_ibuff_saddr_s cn30xx; - struct cvmx_npi_lowp_ibuff_saddr_s cn31xx; - struct cvmx_npi_lowp_ibuff_saddr_s cn38xx; - struct cvmx_npi_lowp_ibuff_saddr_s cn38xxp2; - struct cvmx_npi_lowp_ibuff_saddr_s cn50xx; - struct cvmx_npi_lowp_ibuff_saddr_s cn58xx; - struct cvmx_npi_lowp_ibuff_saddr_s cn58xxp1; }; union cvmx_npi_mem_access_subidx { @@ -1696,7 +1578,6 @@ union cvmx_npi_mem_access_subidx { uint64_t reserved_38_63:26; #endif } s; - struct cvmx_npi_mem_access_subidx_s cn30xx; struct cvmx_npi_mem_access_subidx_cn31xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_36_63:28; @@ -1718,11 +1599,6 @@ union cvmx_npi_mem_access_subidx { uint64_t reserved_36_63:28; #endif } cn31xx; - struct cvmx_npi_mem_access_subidx_s cn38xx; - struct cvmx_npi_mem_access_subidx_cn31xx cn38xxp2; - struct cvmx_npi_mem_access_subidx_s cn50xx; - struct cvmx_npi_mem_access_subidx_s cn58xx; - struct cvmx_npi_mem_access_subidx_s cn58xxp1; }; union cvmx_npi_msi_rcv { @@ -1734,13 +1610,6 @@ union cvmx_npi_msi_rcv { uint64_t int_vec:64; #endif } s; - struct cvmx_npi_msi_rcv_s cn30xx; - struct cvmx_npi_msi_rcv_s cn31xx; - struct cvmx_npi_msi_rcv_s cn38xx; - struct cvmx_npi_msi_rcv_s cn38xxp2; - struct cvmx_npi_msi_rcv_s cn50xx; - struct cvmx_npi_msi_rcv_s cn58xx; - struct cvmx_npi_msi_rcv_s cn58xxp1; }; union cvmx_npi_num_desc_outputx { @@ -1754,13 +1623,6 @@ union cvmx_npi_num_desc_outputx { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_npi_num_desc_outputx_s cn30xx; - struct cvmx_npi_num_desc_outputx_s cn31xx; - struct cvmx_npi_num_desc_outputx_s cn38xx; - struct cvmx_npi_num_desc_outputx_s cn38xxp2; - struct cvmx_npi_num_desc_outputx_s cn50xx; - struct cvmx_npi_num_desc_outputx_s cn58xx; - struct cvmx_npi_num_desc_outputx_s cn58xxp1; }; union cvmx_npi_output_control { @@ -1932,7 +1794,6 @@ union cvmx_npi_output_control { uint64_t reserved_46_63:18; #endif } cn31xx; - struct cvmx_npi_output_control_s cn38xx; struct cvmx_npi_output_control_cn38xxp2 { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_48_63:16; @@ -2069,8 +1930,6 @@ union cvmx_npi_output_control { uint64_t reserved_49_63:15; #endif } cn50xx; - struct cvmx_npi_output_control_s cn58xx; - struct cvmx_npi_output_control_s cn58xxp1; }; union cvmx_npi_px_dbpair_addr { @@ -2086,13 +1945,6 @@ union cvmx_npi_px_dbpair_addr { uint64_t reserved_63_63:1; #endif } s; - struct cvmx_npi_px_dbpair_addr_s cn30xx; - struct cvmx_npi_px_dbpair_addr_s cn31xx; - struct cvmx_npi_px_dbpair_addr_s cn38xx; - struct cvmx_npi_px_dbpair_addr_s cn38xxp2; - struct cvmx_npi_px_dbpair_addr_s cn50xx; - struct cvmx_npi_px_dbpair_addr_s cn58xx; - struct cvmx_npi_px_dbpair_addr_s cn58xxp1; }; union cvmx_npi_px_instr_addr { @@ -2106,13 +1958,6 @@ union cvmx_npi_px_instr_addr { uint64_t state:3; #endif } s; - struct cvmx_npi_px_instr_addr_s cn30xx; - struct cvmx_npi_px_instr_addr_s cn31xx; - struct cvmx_npi_px_instr_addr_s cn38xx; - struct cvmx_npi_px_instr_addr_s cn38xxp2; - struct cvmx_npi_px_instr_addr_s cn50xx; - struct cvmx_npi_px_instr_addr_s cn58xx; - struct cvmx_npi_px_instr_addr_s cn58xxp1; }; union cvmx_npi_px_instr_cnts { @@ -2128,13 +1973,6 @@ union cvmx_npi_px_instr_cnts { uint64_t reserved_38_63:26; #endif } s; - struct cvmx_npi_px_instr_cnts_s cn30xx; - struct cvmx_npi_px_instr_cnts_s cn31xx; - struct cvmx_npi_px_instr_cnts_s cn38xx; - struct cvmx_npi_px_instr_cnts_s cn38xxp2; - struct cvmx_npi_px_instr_cnts_s cn50xx; - struct cvmx_npi_px_instr_cnts_s cn58xx; - struct cvmx_npi_px_instr_cnts_s cn58xxp1; }; union cvmx_npi_px_pair_cnts { @@ -2150,13 +1988,6 @@ union cvmx_npi_px_pair_cnts { uint64_t reserved_37_63:27; #endif } s; - struct cvmx_npi_px_pair_cnts_s cn30xx; - struct cvmx_npi_px_pair_cnts_s cn31xx; - struct cvmx_npi_px_pair_cnts_s cn38xx; - struct cvmx_npi_px_pair_cnts_s cn38xxp2; - struct cvmx_npi_px_pair_cnts_s cn50xx; - struct cvmx_npi_px_pair_cnts_s cn58xx; - struct cvmx_npi_px_pair_cnts_s cn58xxp1; }; union cvmx_npi_pci_burst_size { @@ -2172,13 +2003,6 @@ union cvmx_npi_pci_burst_size { uint64_t reserved_14_63:50; #endif } s; - struct cvmx_npi_pci_burst_size_s cn30xx; - struct cvmx_npi_pci_burst_size_s cn31xx; - struct cvmx_npi_pci_burst_size_s cn38xx; - struct cvmx_npi_pci_burst_size_s cn38xxp2; - struct cvmx_npi_pci_burst_size_s cn50xx; - struct cvmx_npi_pci_burst_size_s cn58xx; - struct cvmx_npi_pci_burst_size_s cn58xxp1; }; union cvmx_npi_pci_int_arb_cfg { @@ -2215,12 +2039,6 @@ union cvmx_npi_pci_int_arb_cfg { uint64_t reserved_5_63:59; #endif } cn30xx; - struct cvmx_npi_pci_int_arb_cfg_cn30xx cn31xx; - struct cvmx_npi_pci_int_arb_cfg_cn30xx cn38xx; - struct cvmx_npi_pci_int_arb_cfg_cn30xx cn38xxp2; - struct cvmx_npi_pci_int_arb_cfg_s cn50xx; - struct cvmx_npi_pci_int_arb_cfg_s cn58xx; - struct cvmx_npi_pci_int_arb_cfg_s cn58xxp1; }; union cvmx_npi_pci_read_cmd { @@ -2234,13 +2052,6 @@ union cvmx_npi_pci_read_cmd { uint64_t reserved_11_63:53; #endif } s; - struct cvmx_npi_pci_read_cmd_s cn30xx; - struct cvmx_npi_pci_read_cmd_s cn31xx; - struct cvmx_npi_pci_read_cmd_s cn38xx; - struct cvmx_npi_pci_read_cmd_s cn38xxp2; - struct cvmx_npi_pci_read_cmd_s cn50xx; - struct cvmx_npi_pci_read_cmd_s cn58xx; - struct cvmx_npi_pci_read_cmd_s cn58xxp1; }; union cvmx_npi_port32_instr_hdr { @@ -2276,13 +2087,6 @@ union cvmx_npi_port32_instr_hdr { uint64_t reserved_44_63:20; #endif } s; - struct cvmx_npi_port32_instr_hdr_s cn30xx; - struct cvmx_npi_port32_instr_hdr_s cn31xx; - struct cvmx_npi_port32_instr_hdr_s cn38xx; - struct cvmx_npi_port32_instr_hdr_s cn38xxp2; - struct cvmx_npi_port32_instr_hdr_s cn50xx; - struct cvmx_npi_port32_instr_hdr_s cn58xx; - struct cvmx_npi_port32_instr_hdr_s cn58xxp1; }; union cvmx_npi_port33_instr_hdr { @@ -2318,12 +2122,6 @@ union cvmx_npi_port33_instr_hdr { uint64_t reserved_44_63:20; #endif } s; - struct cvmx_npi_port33_instr_hdr_s cn31xx; - struct cvmx_npi_port33_instr_hdr_s cn38xx; - struct cvmx_npi_port33_instr_hdr_s cn38xxp2; - struct cvmx_npi_port33_instr_hdr_s cn50xx; - struct cvmx_npi_port33_instr_hdr_s cn58xx; - struct cvmx_npi_port33_instr_hdr_s cn58xxp1; }; union cvmx_npi_port34_instr_hdr { @@ -2359,10 +2157,6 @@ union cvmx_npi_port34_instr_hdr { uint64_t reserved_44_63:20; #endif } s; - struct cvmx_npi_port34_instr_hdr_s cn38xx; - struct cvmx_npi_port34_instr_hdr_s cn38xxp2; - struct cvmx_npi_port34_instr_hdr_s cn58xx; - struct cvmx_npi_port34_instr_hdr_s cn58xxp1; }; union cvmx_npi_port35_instr_hdr { @@ -2398,10 +2192,6 @@ union cvmx_npi_port35_instr_hdr { uint64_t reserved_44_63:20; #endif } s; - struct cvmx_npi_port35_instr_hdr_s cn38xx; - struct cvmx_npi_port35_instr_hdr_s cn38xxp2; - struct cvmx_npi_port35_instr_hdr_s cn58xx; - struct cvmx_npi_port35_instr_hdr_s cn58xxp1; }; union cvmx_npi_port_bp_control { @@ -2417,13 +2207,6 @@ union cvmx_npi_port_bp_control { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_npi_port_bp_control_s cn30xx; - struct cvmx_npi_port_bp_control_s cn31xx; - struct cvmx_npi_port_bp_control_s cn38xx; - struct cvmx_npi_port_bp_control_s cn38xxp2; - struct cvmx_npi_port_bp_control_s cn50xx; - struct cvmx_npi_port_bp_control_s cn58xx; - struct cvmx_npi_port_bp_control_s cn58xxp1; }; union cvmx_npi_rsl_int_blocks { @@ -2566,7 +2349,6 @@ union cvmx_npi_rsl_int_blocks { uint64_t reserved_32_63:32; #endif } cn30xx; - struct cvmx_npi_rsl_int_blocks_cn30xx cn31xx; struct cvmx_npi_rsl_int_blocks_cn38xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_32_63:32; @@ -2638,7 +2420,6 @@ union cvmx_npi_rsl_int_blocks { uint64_t reserved_32_63:32; #endif } cn38xx; - struct cvmx_npi_rsl_int_blocks_cn38xx cn38xxp2; struct cvmx_npi_rsl_int_blocks_cn50xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_31_63:33; @@ -2702,8 +2483,6 @@ union cvmx_npi_rsl_int_blocks { uint64_t reserved_31_63:33; #endif } cn50xx; - struct cvmx_npi_rsl_int_blocks_cn38xx cn58xx; - struct cvmx_npi_rsl_int_blocks_cn38xx cn58xxp1; }; union cvmx_npi_size_inputx { @@ -2717,13 +2496,6 @@ union cvmx_npi_size_inputx { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_npi_size_inputx_s cn30xx; - struct cvmx_npi_size_inputx_s cn31xx; - struct cvmx_npi_size_inputx_s cn38xx; - struct cvmx_npi_size_inputx_s cn38xxp2; - struct cvmx_npi_size_inputx_s cn50xx; - struct cvmx_npi_size_inputx_s cn58xx; - struct cvmx_npi_size_inputx_s cn58xxp1; }; union cvmx_npi_win_read_to { @@ -2737,13 +2509,6 @@ union cvmx_npi_win_read_to { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_npi_win_read_to_s cn30xx; - struct cvmx_npi_win_read_to_s cn31xx; - struct cvmx_npi_win_read_to_s cn38xx; - struct cvmx_npi_win_read_to_s cn38xxp2; - struct cvmx_npi_win_read_to_s cn50xx; - struct cvmx_npi_win_read_to_s cn58xx; - struct cvmx_npi_win_read_to_s cn58xxp1; }; #endif diff --git a/arch/mips/include/asm/octeon/cvmx-pci-defs.h b/arch/mips/include/asm/octeon/cvmx-pci-defs.h index 25d603f18298..be56b693b53b 100644 --- a/arch/mips/include/asm/octeon/cvmx-pci-defs.h +++ b/arch/mips/include/asm/octeon/cvmx-pci-defs.h @@ -131,13 +131,6 @@ union cvmx_pci_bar1_indexx { uint32_t reserved_18_31:14; #endif } s; - struct cvmx_pci_bar1_indexx_s cn30xx; - struct cvmx_pci_bar1_indexx_s cn31xx; - struct cvmx_pci_bar1_indexx_s cn38xx; - struct cvmx_pci_bar1_indexx_s cn38xxp2; - struct cvmx_pci_bar1_indexx_s cn50xx; - struct cvmx_pci_bar1_indexx_s cn58xx; - struct cvmx_pci_bar1_indexx_s cn58xxp1; }; union cvmx_pci_bist_reg { @@ -169,7 +162,6 @@ union cvmx_pci_bist_reg { uint64_t reserved_10_63:54; #endif } s; - struct cvmx_pci_bist_reg_s cn50xx; }; union cvmx_pci_cfg00 { @@ -183,13 +175,6 @@ union cvmx_pci_cfg00 { uint32_t devid:16; #endif } s; - struct cvmx_pci_cfg00_s cn30xx; - struct cvmx_pci_cfg00_s cn31xx; - struct cvmx_pci_cfg00_s cn38xx; - struct cvmx_pci_cfg00_s cn38xxp2; - struct cvmx_pci_cfg00_s cn50xx; - struct cvmx_pci_cfg00_s cn58xx; - struct cvmx_pci_cfg00_s cn58xxp1; }; union cvmx_pci_cfg01 { @@ -247,13 +232,6 @@ union cvmx_pci_cfg01 { uint32_t dpe:1; #endif } s; - struct cvmx_pci_cfg01_s cn30xx; - struct cvmx_pci_cfg01_s cn31xx; - struct cvmx_pci_cfg01_s cn38xx; - struct cvmx_pci_cfg01_s cn38xxp2; - struct cvmx_pci_cfg01_s cn50xx; - struct cvmx_pci_cfg01_s cn58xx; - struct cvmx_pci_cfg01_s cn58xxp1; }; union cvmx_pci_cfg02 { @@ -267,13 +245,6 @@ union cvmx_pci_cfg02 { uint32_t cc:24; #endif } s; - struct cvmx_pci_cfg02_s cn30xx; - struct cvmx_pci_cfg02_s cn31xx; - struct cvmx_pci_cfg02_s cn38xx; - struct cvmx_pci_cfg02_s cn38xxp2; - struct cvmx_pci_cfg02_s cn50xx; - struct cvmx_pci_cfg02_s cn58xx; - struct cvmx_pci_cfg02_s cn58xxp1; }; union cvmx_pci_cfg03 { @@ -297,13 +268,6 @@ union cvmx_pci_cfg03 { uint32_t bcap:1; #endif } s; - struct cvmx_pci_cfg03_s cn30xx; - struct cvmx_pci_cfg03_s cn31xx; - struct cvmx_pci_cfg03_s cn38xx; - struct cvmx_pci_cfg03_s cn38xxp2; - struct cvmx_pci_cfg03_s cn50xx; - struct cvmx_pci_cfg03_s cn58xx; - struct cvmx_pci_cfg03_s cn58xxp1; }; union cvmx_pci_cfg04 { @@ -323,13 +287,6 @@ union cvmx_pci_cfg04 { uint32_t lbase:20; #endif } s; - struct cvmx_pci_cfg04_s cn30xx; - struct cvmx_pci_cfg04_s cn31xx; - struct cvmx_pci_cfg04_s cn38xx; - struct cvmx_pci_cfg04_s cn38xxp2; - struct cvmx_pci_cfg04_s cn50xx; - struct cvmx_pci_cfg04_s cn58xx; - struct cvmx_pci_cfg04_s cn58xxp1; }; union cvmx_pci_cfg05 { @@ -341,13 +298,6 @@ union cvmx_pci_cfg05 { uint32_t hbase:32; #endif } s; - struct cvmx_pci_cfg05_s cn30xx; - struct cvmx_pci_cfg05_s cn31xx; - struct cvmx_pci_cfg05_s cn38xx; - struct cvmx_pci_cfg05_s cn38xxp2; - struct cvmx_pci_cfg05_s cn50xx; - struct cvmx_pci_cfg05_s cn58xx; - struct cvmx_pci_cfg05_s cn58xxp1; }; union cvmx_pci_cfg06 { @@ -367,13 +317,6 @@ union cvmx_pci_cfg06 { uint32_t lbase:5; #endif } s; - struct cvmx_pci_cfg06_s cn30xx; - struct cvmx_pci_cfg06_s cn31xx; - struct cvmx_pci_cfg06_s cn38xx; - struct cvmx_pci_cfg06_s cn38xxp2; - struct cvmx_pci_cfg06_s cn50xx; - struct cvmx_pci_cfg06_s cn58xx; - struct cvmx_pci_cfg06_s cn58xxp1; }; union cvmx_pci_cfg07 { @@ -385,13 +328,6 @@ union cvmx_pci_cfg07 { uint32_t hbase:32; #endif } s; - struct cvmx_pci_cfg07_s cn30xx; - struct cvmx_pci_cfg07_s cn31xx; - struct cvmx_pci_cfg07_s cn38xx; - struct cvmx_pci_cfg07_s cn38xxp2; - struct cvmx_pci_cfg07_s cn50xx; - struct cvmx_pci_cfg07_s cn58xx; - struct cvmx_pci_cfg07_s cn58xxp1; }; union cvmx_pci_cfg08 { @@ -409,13 +345,6 @@ union cvmx_pci_cfg08 { uint32_t lbasez:28; #endif } s; - struct cvmx_pci_cfg08_s cn30xx; - struct cvmx_pci_cfg08_s cn31xx; - struct cvmx_pci_cfg08_s cn38xx; - struct cvmx_pci_cfg08_s cn38xxp2; - struct cvmx_pci_cfg08_s cn50xx; - struct cvmx_pci_cfg08_s cn58xx; - struct cvmx_pci_cfg08_s cn58xxp1; }; union cvmx_pci_cfg09 { @@ -429,13 +358,6 @@ union cvmx_pci_cfg09 { uint32_t hbase:25; #endif } s; - struct cvmx_pci_cfg09_s cn30xx; - struct cvmx_pci_cfg09_s cn31xx; - struct cvmx_pci_cfg09_s cn38xx; - struct cvmx_pci_cfg09_s cn38xxp2; - struct cvmx_pci_cfg09_s cn50xx; - struct cvmx_pci_cfg09_s cn58xx; - struct cvmx_pci_cfg09_s cn58xxp1; }; union cvmx_pci_cfg10 { @@ -447,13 +369,6 @@ union cvmx_pci_cfg10 { uint32_t cisp:32; #endif } s; - struct cvmx_pci_cfg10_s cn30xx; - struct cvmx_pci_cfg10_s cn31xx; - struct cvmx_pci_cfg10_s cn38xx; - struct cvmx_pci_cfg10_s cn38xxp2; - struct cvmx_pci_cfg10_s cn50xx; - struct cvmx_pci_cfg10_s cn58xx; - struct cvmx_pci_cfg10_s cn58xxp1; }; union cvmx_pci_cfg11 { @@ -467,13 +382,6 @@ union cvmx_pci_cfg11 { uint32_t ssid:16; #endif } s; - struct cvmx_pci_cfg11_s cn30xx; - struct cvmx_pci_cfg11_s cn31xx; - struct cvmx_pci_cfg11_s cn38xx; - struct cvmx_pci_cfg11_s cn38xxp2; - struct cvmx_pci_cfg11_s cn50xx; - struct cvmx_pci_cfg11_s cn58xx; - struct cvmx_pci_cfg11_s cn58xxp1; }; union cvmx_pci_cfg12 { @@ -491,13 +399,6 @@ union cvmx_pci_cfg12 { uint32_t erbar:16; #endif } s; - struct cvmx_pci_cfg12_s cn30xx; - struct cvmx_pci_cfg12_s cn31xx; - struct cvmx_pci_cfg12_s cn38xx; - struct cvmx_pci_cfg12_s cn38xxp2; - struct cvmx_pci_cfg12_s cn50xx; - struct cvmx_pci_cfg12_s cn58xx; - struct cvmx_pci_cfg12_s cn58xxp1; }; union cvmx_pci_cfg13 { @@ -511,13 +412,6 @@ union cvmx_pci_cfg13 { uint32_t reserved_8_31:24; #endif } s; - struct cvmx_pci_cfg13_s cn30xx; - struct cvmx_pci_cfg13_s cn31xx; - struct cvmx_pci_cfg13_s cn38xx; - struct cvmx_pci_cfg13_s cn38xxp2; - struct cvmx_pci_cfg13_s cn50xx; - struct cvmx_pci_cfg13_s cn58xx; - struct cvmx_pci_cfg13_s cn58xxp1; }; union cvmx_pci_cfg15 { @@ -535,13 +429,6 @@ union cvmx_pci_cfg15 { uint32_t ml:8; #endif } s; - struct cvmx_pci_cfg15_s cn30xx; - struct cvmx_pci_cfg15_s cn31xx; - struct cvmx_pci_cfg15_s cn38xx; - struct cvmx_pci_cfg15_s cn38xxp2; - struct cvmx_pci_cfg15_s cn50xx; - struct cvmx_pci_cfg15_s cn58xx; - struct cvmx_pci_cfg15_s cn58xxp1; }; union cvmx_pci_cfg16 { @@ -583,13 +470,6 @@ union cvmx_pci_cfg16 { uint32_t trdnpr:1; #endif } s; - struct cvmx_pci_cfg16_s cn30xx; - struct cvmx_pci_cfg16_s cn31xx; - struct cvmx_pci_cfg16_s cn38xx; - struct cvmx_pci_cfg16_s cn38xxp2; - struct cvmx_pci_cfg16_s cn50xx; - struct cvmx_pci_cfg16_s cn58xx; - struct cvmx_pci_cfg16_s cn58xxp1; }; union cvmx_pci_cfg17 { @@ -601,13 +481,6 @@ union cvmx_pci_cfg17 { uint32_t tscme:32; #endif } s; - struct cvmx_pci_cfg17_s cn30xx; - struct cvmx_pci_cfg17_s cn31xx; - struct cvmx_pci_cfg17_s cn38xx; - struct cvmx_pci_cfg17_s cn38xxp2; - struct cvmx_pci_cfg17_s cn50xx; - struct cvmx_pci_cfg17_s cn58xx; - struct cvmx_pci_cfg17_s cn58xxp1; }; union cvmx_pci_cfg18 { @@ -619,13 +492,6 @@ union cvmx_pci_cfg18 { uint32_t tdsrps:32; #endif } s; - struct cvmx_pci_cfg18_s cn30xx; - struct cvmx_pci_cfg18_s cn31xx; - struct cvmx_pci_cfg18_s cn38xx; - struct cvmx_pci_cfg18_s cn38xxp2; - struct cvmx_pci_cfg18_s cn50xx; - struct cvmx_pci_cfg18_s cn58xx; - struct cvmx_pci_cfg18_s cn58xxp1; }; union cvmx_pci_cfg19 { @@ -671,13 +537,6 @@ union cvmx_pci_cfg19 { uint32_t mrbcm:1; #endif } s; - struct cvmx_pci_cfg19_s cn30xx; - struct cvmx_pci_cfg19_s cn31xx; - struct cvmx_pci_cfg19_s cn38xx; - struct cvmx_pci_cfg19_s cn38xxp2; - struct cvmx_pci_cfg19_s cn50xx; - struct cvmx_pci_cfg19_s cn58xx; - struct cvmx_pci_cfg19_s cn58xxp1; }; union cvmx_pci_cfg20 { @@ -689,13 +548,6 @@ union cvmx_pci_cfg20 { uint32_t mdsp:32; #endif } s; - struct cvmx_pci_cfg20_s cn30xx; - struct cvmx_pci_cfg20_s cn31xx; - struct cvmx_pci_cfg20_s cn38xx; - struct cvmx_pci_cfg20_s cn38xxp2; - struct cvmx_pci_cfg20_s cn50xx; - struct cvmx_pci_cfg20_s cn58xx; - struct cvmx_pci_cfg20_s cn58xxp1; }; union cvmx_pci_cfg21 { @@ -707,13 +559,6 @@ union cvmx_pci_cfg21 { uint32_t scmre:32; #endif } s; - struct cvmx_pci_cfg21_s cn30xx; - struct cvmx_pci_cfg21_s cn31xx; - struct cvmx_pci_cfg21_s cn38xx; - struct cvmx_pci_cfg21_s cn38xxp2; - struct cvmx_pci_cfg21_s cn50xx; - struct cvmx_pci_cfg21_s cn58xx; - struct cvmx_pci_cfg21_s cn58xxp1; }; union cvmx_pci_cfg22 { @@ -737,13 +582,6 @@ union cvmx_pci_cfg22 { uint32_t mac:7; #endif } s; - struct cvmx_pci_cfg22_s cn30xx; - struct cvmx_pci_cfg22_s cn31xx; - struct cvmx_pci_cfg22_s cn38xx; - struct cvmx_pci_cfg22_s cn38xxp2; - struct cvmx_pci_cfg22_s cn50xx; - struct cvmx_pci_cfg22_s cn58xx; - struct cvmx_pci_cfg22_s cn58xxp1; }; union cvmx_pci_cfg56 { @@ -767,13 +605,6 @@ union cvmx_pci_cfg56 { uint32_t reserved_23_31:9; #endif } s; - struct cvmx_pci_cfg56_s cn30xx; - struct cvmx_pci_cfg56_s cn31xx; - struct cvmx_pci_cfg56_s cn38xx; - struct cvmx_pci_cfg56_s cn38xxp2; - struct cvmx_pci_cfg56_s cn50xx; - struct cvmx_pci_cfg56_s cn58xx; - struct cvmx_pci_cfg56_s cn58xxp1; }; union cvmx_pci_cfg57 { @@ -809,13 +640,6 @@ union cvmx_pci_cfg57 { uint32_t reserved_30_31:2; #endif } s; - struct cvmx_pci_cfg57_s cn30xx; - struct cvmx_pci_cfg57_s cn31xx; - struct cvmx_pci_cfg57_s cn38xx; - struct cvmx_pci_cfg57_s cn38xxp2; - struct cvmx_pci_cfg57_s cn50xx; - struct cvmx_pci_cfg57_s cn58xx; - struct cvmx_pci_cfg57_s cn58xxp1; }; union cvmx_pci_cfg58 { @@ -845,13 +669,6 @@ union cvmx_pci_cfg58 { uint32_t pmes:5; #endif } s; - struct cvmx_pci_cfg58_s cn30xx; - struct cvmx_pci_cfg58_s cn31xx; - struct cvmx_pci_cfg58_s cn38xx; - struct cvmx_pci_cfg58_s cn38xxp2; - struct cvmx_pci_cfg58_s cn50xx; - struct cvmx_pci_cfg58_s cn58xx; - struct cvmx_pci_cfg58_s cn58xxp1; }; union cvmx_pci_cfg59 { @@ -881,13 +698,6 @@ union cvmx_pci_cfg59 { uint32_t pmdia:8; #endif } s; - struct cvmx_pci_cfg59_s cn30xx; - struct cvmx_pci_cfg59_s cn31xx; - struct cvmx_pci_cfg59_s cn38xx; - struct cvmx_pci_cfg59_s cn38xxp2; - struct cvmx_pci_cfg59_s cn50xx; - struct cvmx_pci_cfg59_s cn58xx; - struct cvmx_pci_cfg59_s cn58xxp1; }; union cvmx_pci_cfg60 { @@ -911,13 +721,6 @@ union cvmx_pci_cfg60 { uint32_t reserved_24_31:8; #endif } s; - struct cvmx_pci_cfg60_s cn30xx; - struct cvmx_pci_cfg60_s cn31xx; - struct cvmx_pci_cfg60_s cn38xx; - struct cvmx_pci_cfg60_s cn38xxp2; - struct cvmx_pci_cfg60_s cn50xx; - struct cvmx_pci_cfg60_s cn58xx; - struct cvmx_pci_cfg60_s cn58xxp1; }; union cvmx_pci_cfg61 { @@ -931,13 +734,6 @@ union cvmx_pci_cfg61 { uint32_t msi31t2:30; #endif } s; - struct cvmx_pci_cfg61_s cn30xx; - struct cvmx_pci_cfg61_s cn31xx; - struct cvmx_pci_cfg61_s cn38xx; - struct cvmx_pci_cfg61_s cn38xxp2; - struct cvmx_pci_cfg61_s cn50xx; - struct cvmx_pci_cfg61_s cn58xx; - struct cvmx_pci_cfg61_s cn58xxp1; }; union cvmx_pci_cfg62 { @@ -949,13 +745,6 @@ union cvmx_pci_cfg62 { uint32_t msi:32; #endif } s; - struct cvmx_pci_cfg62_s cn30xx; - struct cvmx_pci_cfg62_s cn31xx; - struct cvmx_pci_cfg62_s cn38xx; - struct cvmx_pci_cfg62_s cn38xxp2; - struct cvmx_pci_cfg62_s cn50xx; - struct cvmx_pci_cfg62_s cn58xx; - struct cvmx_pci_cfg62_s cn58xxp1; }; union cvmx_pci_cfg63 { @@ -969,13 +758,6 @@ union cvmx_pci_cfg63 { uint32_t reserved_16_31:16; #endif } s; - struct cvmx_pci_cfg63_s cn30xx; - struct cvmx_pci_cfg63_s cn31xx; - struct cvmx_pci_cfg63_s cn38xx; - struct cvmx_pci_cfg63_s cn38xxp2; - struct cvmx_pci_cfg63_s cn50xx; - struct cvmx_pci_cfg63_s cn58xx; - struct cvmx_pci_cfg63_s cn58xxp1; }; union cvmx_pci_cnt_reg { @@ -997,9 +779,6 @@ union cvmx_pci_cnt_reg { uint64_t reserved_38_63:26; #endif } s; - struct cvmx_pci_cnt_reg_s cn50xx; - struct cvmx_pci_cnt_reg_s cn58xx; - struct cvmx_pci_cnt_reg_s cn58xxp1; }; union cvmx_pci_ctl_status_2 { @@ -1053,7 +832,6 @@ union cvmx_pci_ctl_status_2 { uint32_t reserved_29_31:3; #endif } s; - struct cvmx_pci_ctl_status_2_s cn30xx; struct cvmx_pci_ctl_status_2_cn31xx { #ifdef __BIG_ENDIAN_BITFIELD uint32_t reserved_20_31:12; @@ -1091,11 +869,6 @@ union cvmx_pci_ctl_status_2 { uint32_t reserved_20_31:12; #endif } cn31xx; - struct cvmx_pci_ctl_status_2_s cn38xx; - struct cvmx_pci_ctl_status_2_cn31xx cn38xxp2; - struct cvmx_pci_ctl_status_2_s cn50xx; - struct cvmx_pci_ctl_status_2_s cn58xx; - struct cvmx_pci_ctl_status_2_s cn58xxp1; }; union cvmx_pci_dbellx { @@ -1109,13 +882,6 @@ union cvmx_pci_dbellx { uint32_t reserved_16_31:16; #endif } s; - struct cvmx_pci_dbellx_s cn30xx; - struct cvmx_pci_dbellx_s cn31xx; - struct cvmx_pci_dbellx_s cn38xx; - struct cvmx_pci_dbellx_s cn38xxp2; - struct cvmx_pci_dbellx_s cn50xx; - struct cvmx_pci_dbellx_s cn58xx; - struct cvmx_pci_dbellx_s cn58xxp1; }; union cvmx_pci_dma_cntx { @@ -1127,13 +893,6 @@ union cvmx_pci_dma_cntx { uint32_t dma_cnt:32; #endif } s; - struct cvmx_pci_dma_cntx_s cn30xx; - struct cvmx_pci_dma_cntx_s cn31xx; - struct cvmx_pci_dma_cntx_s cn38xx; - struct cvmx_pci_dma_cntx_s cn38xxp2; - struct cvmx_pci_dma_cntx_s cn50xx; - struct cvmx_pci_dma_cntx_s cn58xx; - struct cvmx_pci_dma_cntx_s cn58xxp1; }; union cvmx_pci_dma_int_levx { @@ -1145,13 +904,6 @@ union cvmx_pci_dma_int_levx { uint32_t pkt_cnt:32; #endif } s; - struct cvmx_pci_dma_int_levx_s cn30xx; - struct cvmx_pci_dma_int_levx_s cn31xx; - struct cvmx_pci_dma_int_levx_s cn38xx; - struct cvmx_pci_dma_int_levx_s cn38xxp2; - struct cvmx_pci_dma_int_levx_s cn50xx; - struct cvmx_pci_dma_int_levx_s cn58xx; - struct cvmx_pci_dma_int_levx_s cn58xxp1; }; union cvmx_pci_dma_timex { @@ -1163,13 +915,6 @@ union cvmx_pci_dma_timex { uint32_t dma_time:32; #endif } s; - struct cvmx_pci_dma_timex_s cn30xx; - struct cvmx_pci_dma_timex_s cn31xx; - struct cvmx_pci_dma_timex_s cn38xx; - struct cvmx_pci_dma_timex_s cn38xxp2; - struct cvmx_pci_dma_timex_s cn50xx; - struct cvmx_pci_dma_timex_s cn58xx; - struct cvmx_pci_dma_timex_s cn58xxp1; }; union cvmx_pci_instr_countx { @@ -1181,13 +926,6 @@ union cvmx_pci_instr_countx { uint32_t icnt:32; #endif } s; - struct cvmx_pci_instr_countx_s cn30xx; - struct cvmx_pci_instr_countx_s cn31xx; - struct cvmx_pci_instr_countx_s cn38xx; - struct cvmx_pci_instr_countx_s cn38xxp2; - struct cvmx_pci_instr_countx_s cn50xx; - struct cvmx_pci_instr_countx_s cn58xx; - struct cvmx_pci_instr_countx_s cn58xxp1; }; union cvmx_pci_int_enb { @@ -1405,11 +1143,6 @@ union cvmx_pci_int_enb { uint64_t reserved_34_63:30; #endif } cn31xx; - struct cvmx_pci_int_enb_s cn38xx; - struct cvmx_pci_int_enb_s cn38xxp2; - struct cvmx_pci_int_enb_cn31xx cn50xx; - struct cvmx_pci_int_enb_s cn58xx; - struct cvmx_pci_int_enb_s cn58xxp1; }; union cvmx_pci_int_enb2 { @@ -1627,11 +1360,6 @@ union cvmx_pci_int_enb2 { uint64_t reserved_34_63:30; #endif } cn31xx; - struct cvmx_pci_int_enb2_s cn38xx; - struct cvmx_pci_int_enb2_s cn38xxp2; - struct cvmx_pci_int_enb2_cn31xx cn50xx; - struct cvmx_pci_int_enb2_s cn58xx; - struct cvmx_pci_int_enb2_s cn58xxp1; }; union cvmx_pci_int_sum { @@ -1849,11 +1577,6 @@ union cvmx_pci_int_sum { uint64_t reserved_34_63:30; #endif } cn31xx; - struct cvmx_pci_int_sum_s cn38xx; - struct cvmx_pci_int_sum_s cn38xxp2; - struct cvmx_pci_int_sum_cn31xx cn50xx; - struct cvmx_pci_int_sum_s cn58xx; - struct cvmx_pci_int_sum_s cn58xxp1; }; union cvmx_pci_int_sum2 { @@ -2071,11 +1794,6 @@ union cvmx_pci_int_sum2 { uint64_t reserved_34_63:30; #endif } cn31xx; - struct cvmx_pci_int_sum2_s cn38xx; - struct cvmx_pci_int_sum2_s cn38xxp2; - struct cvmx_pci_int_sum2_cn31xx cn50xx; - struct cvmx_pci_int_sum2_s cn58xx; - struct cvmx_pci_int_sum2_s cn58xxp1; }; union cvmx_pci_msi_rcv { @@ -2089,13 +1807,6 @@ union cvmx_pci_msi_rcv { uint32_t reserved_6_31:26; #endif } s; - struct cvmx_pci_msi_rcv_s cn30xx; - struct cvmx_pci_msi_rcv_s cn31xx; - struct cvmx_pci_msi_rcv_s cn38xx; - struct cvmx_pci_msi_rcv_s cn38xxp2; - struct cvmx_pci_msi_rcv_s cn50xx; - struct cvmx_pci_msi_rcv_s cn58xx; - struct cvmx_pci_msi_rcv_s cn58xxp1; }; union cvmx_pci_pkt_creditsx { @@ -2109,13 +1820,6 @@ union cvmx_pci_pkt_creditsx { uint32_t pkt_cnt:16; #endif } s; - struct cvmx_pci_pkt_creditsx_s cn30xx; - struct cvmx_pci_pkt_creditsx_s cn31xx; - struct cvmx_pci_pkt_creditsx_s cn38xx; - struct cvmx_pci_pkt_creditsx_s cn38xxp2; - struct cvmx_pci_pkt_creditsx_s cn50xx; - struct cvmx_pci_pkt_creditsx_s cn58xx; - struct cvmx_pci_pkt_creditsx_s cn58xxp1; }; union cvmx_pci_pkts_sentx { @@ -2127,13 +1831,6 @@ union cvmx_pci_pkts_sentx { uint32_t pkt_cnt:32; #endif } s; - struct cvmx_pci_pkts_sentx_s cn30xx; - struct cvmx_pci_pkts_sentx_s cn31xx; - struct cvmx_pci_pkts_sentx_s cn38xx; - struct cvmx_pci_pkts_sentx_s cn38xxp2; - struct cvmx_pci_pkts_sentx_s cn50xx; - struct cvmx_pci_pkts_sentx_s cn58xx; - struct cvmx_pci_pkts_sentx_s cn58xxp1; }; union cvmx_pci_pkts_sent_int_levx { @@ -2145,13 +1842,6 @@ union cvmx_pci_pkts_sent_int_levx { uint32_t pkt_cnt:32; #endif } s; - struct cvmx_pci_pkts_sent_int_levx_s cn30xx; - struct cvmx_pci_pkts_sent_int_levx_s cn31xx; - struct cvmx_pci_pkts_sent_int_levx_s cn38xx; - struct cvmx_pci_pkts_sent_int_levx_s cn38xxp2; - struct cvmx_pci_pkts_sent_int_levx_s cn50xx; - struct cvmx_pci_pkts_sent_int_levx_s cn58xx; - struct cvmx_pci_pkts_sent_int_levx_s cn58xxp1; }; union cvmx_pci_pkts_sent_timex { @@ -2163,13 +1853,6 @@ union cvmx_pci_pkts_sent_timex { uint32_t pkt_time:32; #endif } s; - struct cvmx_pci_pkts_sent_timex_s cn30xx; - struct cvmx_pci_pkts_sent_timex_s cn31xx; - struct cvmx_pci_pkts_sent_timex_s cn38xx; - struct cvmx_pci_pkts_sent_timex_s cn38xxp2; - struct cvmx_pci_pkts_sent_timex_s cn50xx; - struct cvmx_pci_pkts_sent_timex_s cn58xx; - struct cvmx_pci_pkts_sent_timex_s cn58xxp1; }; union cvmx_pci_read_cmd_6 { @@ -2185,13 +1868,6 @@ union cvmx_pci_read_cmd_6 { uint32_t reserved_9_31:23; #endif } s; - struct cvmx_pci_read_cmd_6_s cn30xx; - struct cvmx_pci_read_cmd_6_s cn31xx; - struct cvmx_pci_read_cmd_6_s cn38xx; - struct cvmx_pci_read_cmd_6_s cn38xxp2; - struct cvmx_pci_read_cmd_6_s cn50xx; - struct cvmx_pci_read_cmd_6_s cn58xx; - struct cvmx_pci_read_cmd_6_s cn58xxp1; }; union cvmx_pci_read_cmd_c { @@ -2207,13 +1883,6 @@ union cvmx_pci_read_cmd_c { uint32_t reserved_9_31:23; #endif } s; - struct cvmx_pci_read_cmd_c_s cn30xx; - struct cvmx_pci_read_cmd_c_s cn31xx; - struct cvmx_pci_read_cmd_c_s cn38xx; - struct cvmx_pci_read_cmd_c_s cn38xxp2; - struct cvmx_pci_read_cmd_c_s cn50xx; - struct cvmx_pci_read_cmd_c_s cn58xx; - struct cvmx_pci_read_cmd_c_s cn58xxp1; }; union cvmx_pci_read_cmd_e { @@ -2229,13 +1898,6 @@ union cvmx_pci_read_cmd_e { uint32_t reserved_9_31:23; #endif } s; - struct cvmx_pci_read_cmd_e_s cn30xx; - struct cvmx_pci_read_cmd_e_s cn31xx; - struct cvmx_pci_read_cmd_e_s cn38xx; - struct cvmx_pci_read_cmd_e_s cn38xxp2; - struct cvmx_pci_read_cmd_e_s cn50xx; - struct cvmx_pci_read_cmd_e_s cn58xx; - struct cvmx_pci_read_cmd_e_s cn58xxp1; }; union cvmx_pci_read_timeout { @@ -2251,13 +1913,6 @@ union cvmx_pci_read_timeout { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_pci_read_timeout_s cn30xx; - struct cvmx_pci_read_timeout_s cn31xx; - struct cvmx_pci_read_timeout_s cn38xx; - struct cvmx_pci_read_timeout_s cn38xxp2; - struct cvmx_pci_read_timeout_s cn50xx; - struct cvmx_pci_read_timeout_s cn58xx; - struct cvmx_pci_read_timeout_s cn58xxp1; }; union cvmx_pci_scm_reg { @@ -2271,13 +1926,6 @@ union cvmx_pci_scm_reg { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_pci_scm_reg_s cn30xx; - struct cvmx_pci_scm_reg_s cn31xx; - struct cvmx_pci_scm_reg_s cn38xx; - struct cvmx_pci_scm_reg_s cn38xxp2; - struct cvmx_pci_scm_reg_s cn50xx; - struct cvmx_pci_scm_reg_s cn58xx; - struct cvmx_pci_scm_reg_s cn58xxp1; }; union cvmx_pci_tsr_reg { @@ -2291,13 +1939,6 @@ union cvmx_pci_tsr_reg { uint64_t reserved_36_63:28; #endif } s; - struct cvmx_pci_tsr_reg_s cn30xx; - struct cvmx_pci_tsr_reg_s cn31xx; - struct cvmx_pci_tsr_reg_s cn38xx; - struct cvmx_pci_tsr_reg_s cn38xxp2; - struct cvmx_pci_tsr_reg_s cn50xx; - struct cvmx_pci_tsr_reg_s cn58xx; - struct cvmx_pci_tsr_reg_s cn58xxp1; }; union cvmx_pci_win_rd_addr { @@ -2326,7 +1967,6 @@ union cvmx_pci_win_rd_addr { uint64_t reserved_49_63:15; #endif } cn30xx; - struct cvmx_pci_win_rd_addr_cn30xx cn31xx; struct cvmx_pci_win_rd_addr_cn38xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_49_63:15; @@ -2340,10 +1980,6 @@ union cvmx_pci_win_rd_addr { uint64_t reserved_49_63:15; #endif } cn38xx; - struct cvmx_pci_win_rd_addr_cn38xx cn38xxp2; - struct cvmx_pci_win_rd_addr_cn30xx cn50xx; - struct cvmx_pci_win_rd_addr_cn38xx cn58xx; - struct cvmx_pci_win_rd_addr_cn38xx cn58xxp1; }; union cvmx_pci_win_rd_data { @@ -2355,13 +1991,6 @@ union cvmx_pci_win_rd_data { uint64_t rd_data:64; #endif } s; - struct cvmx_pci_win_rd_data_s cn30xx; - struct cvmx_pci_win_rd_data_s cn31xx; - struct cvmx_pci_win_rd_data_s cn38xx; - struct cvmx_pci_win_rd_data_s cn38xxp2; - struct cvmx_pci_win_rd_data_s cn50xx; - struct cvmx_pci_win_rd_data_s cn58xx; - struct cvmx_pci_win_rd_data_s cn58xxp1; }; union cvmx_pci_win_wr_addr { @@ -2379,13 +2008,6 @@ union cvmx_pci_win_wr_addr { uint64_t reserved_49_63:15; #endif } s; - struct cvmx_pci_win_wr_addr_s cn30xx; - struct cvmx_pci_win_wr_addr_s cn31xx; - struct cvmx_pci_win_wr_addr_s cn38xx; - struct cvmx_pci_win_wr_addr_s cn38xxp2; - struct cvmx_pci_win_wr_addr_s cn50xx; - struct cvmx_pci_win_wr_addr_s cn58xx; - struct cvmx_pci_win_wr_addr_s cn58xxp1; }; union cvmx_pci_win_wr_data { @@ -2397,13 +2019,6 @@ union cvmx_pci_win_wr_data { uint64_t wr_data:64; #endif } s; - struct cvmx_pci_win_wr_data_s cn30xx; - struct cvmx_pci_win_wr_data_s cn31xx; - struct cvmx_pci_win_wr_data_s cn38xx; - struct cvmx_pci_win_wr_data_s cn38xxp2; - struct cvmx_pci_win_wr_data_s cn50xx; - struct cvmx_pci_win_wr_data_s cn58xx; - struct cvmx_pci_win_wr_data_s cn58xxp1; }; union cvmx_pci_win_wr_mask { @@ -2417,13 +2032,6 @@ union cvmx_pci_win_wr_mask { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_pci_win_wr_mask_s cn30xx; - struct cvmx_pci_win_wr_mask_s cn31xx; - struct cvmx_pci_win_wr_mask_s cn38xx; - struct cvmx_pci_win_wr_mask_s cn38xxp2; - struct cvmx_pci_win_wr_mask_s cn50xx; - struct cvmx_pci_win_wr_mask_s cn58xx; - struct cvmx_pci_win_wr_mask_s cn58xxp1; }; #endif diff --git a/arch/mips/include/asm/octeon/cvmx-pcsx-defs.h b/arch/mips/include/asm/octeon/cvmx-pcsx-defs.h index 39da7f9d7b3f..5f013269a89d 100644 --- a/arch/mips/include/asm/octeon/cvmx-pcsx-defs.h +++ b/arch/mips/include/asm/octeon/cvmx-pcsx-defs.h @@ -361,17 +361,6 @@ union cvmx_pcsx_anx_adv_reg { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_pcsx_anx_adv_reg_s cn52xx; - struct cvmx_pcsx_anx_adv_reg_s cn52xxp1; - struct cvmx_pcsx_anx_adv_reg_s cn56xx; - struct cvmx_pcsx_anx_adv_reg_s cn56xxp1; - struct cvmx_pcsx_anx_adv_reg_s cn61xx; - struct cvmx_pcsx_anx_adv_reg_s cn63xx; - struct cvmx_pcsx_anx_adv_reg_s cn63xxp1; - struct cvmx_pcsx_anx_adv_reg_s cn66xx; - struct cvmx_pcsx_anx_adv_reg_s cn68xx; - struct cvmx_pcsx_anx_adv_reg_s cn68xxp1; - struct cvmx_pcsx_anx_adv_reg_s cnf71xx; }; union cvmx_pcsx_anx_ext_st_reg { @@ -393,17 +382,6 @@ union cvmx_pcsx_anx_ext_st_reg { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_pcsx_anx_ext_st_reg_s cn52xx; - struct cvmx_pcsx_anx_ext_st_reg_s cn52xxp1; - struct cvmx_pcsx_anx_ext_st_reg_s cn56xx; - struct cvmx_pcsx_anx_ext_st_reg_s cn56xxp1; - struct cvmx_pcsx_anx_ext_st_reg_s cn61xx; - struct cvmx_pcsx_anx_ext_st_reg_s cn63xx; - struct cvmx_pcsx_anx_ext_st_reg_s cn63xxp1; - struct cvmx_pcsx_anx_ext_st_reg_s cn66xx; - struct cvmx_pcsx_anx_ext_st_reg_s cn68xx; - struct cvmx_pcsx_anx_ext_st_reg_s cn68xxp1; - struct cvmx_pcsx_anx_ext_st_reg_s cnf71xx; }; union cvmx_pcsx_anx_lp_abil_reg { @@ -431,17 +409,6 @@ union cvmx_pcsx_anx_lp_abil_reg { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_pcsx_anx_lp_abil_reg_s cn52xx; - struct cvmx_pcsx_anx_lp_abil_reg_s cn52xxp1; - struct cvmx_pcsx_anx_lp_abil_reg_s cn56xx; - struct cvmx_pcsx_anx_lp_abil_reg_s cn56xxp1; - struct cvmx_pcsx_anx_lp_abil_reg_s cn61xx; - struct cvmx_pcsx_anx_lp_abil_reg_s cn63xx; - struct cvmx_pcsx_anx_lp_abil_reg_s cn63xxp1; - struct cvmx_pcsx_anx_lp_abil_reg_s cn66xx; - struct cvmx_pcsx_anx_lp_abil_reg_s cn68xx; - struct cvmx_pcsx_anx_lp_abil_reg_s cn68xxp1; - struct cvmx_pcsx_anx_lp_abil_reg_s cnf71xx; }; union cvmx_pcsx_anx_results_reg { @@ -463,17 +430,6 @@ union cvmx_pcsx_anx_results_reg { uint64_t reserved_7_63:57; #endif } s; - struct cvmx_pcsx_anx_results_reg_s cn52xx; - struct cvmx_pcsx_anx_results_reg_s cn52xxp1; - struct cvmx_pcsx_anx_results_reg_s cn56xx; - struct cvmx_pcsx_anx_results_reg_s cn56xxp1; - struct cvmx_pcsx_anx_results_reg_s cn61xx; - struct cvmx_pcsx_anx_results_reg_s cn63xx; - struct cvmx_pcsx_anx_results_reg_s cn63xxp1; - struct cvmx_pcsx_anx_results_reg_s cn66xx; - struct cvmx_pcsx_anx_results_reg_s cn68xx; - struct cvmx_pcsx_anx_results_reg_s cn68xxp1; - struct cvmx_pcsx_anx_results_reg_s cnf71xx; }; union cvmx_pcsx_intx_en_reg { @@ -542,16 +498,6 @@ union cvmx_pcsx_intx_en_reg { uint64_t reserved_12_63:52; #endif } cn52xx; - struct cvmx_pcsx_intx_en_reg_cn52xx cn52xxp1; - struct cvmx_pcsx_intx_en_reg_cn52xx cn56xx; - struct cvmx_pcsx_intx_en_reg_cn52xx cn56xxp1; - struct cvmx_pcsx_intx_en_reg_s cn61xx; - struct cvmx_pcsx_intx_en_reg_s cn63xx; - struct cvmx_pcsx_intx_en_reg_s cn63xxp1; - struct cvmx_pcsx_intx_en_reg_s cn66xx; - struct cvmx_pcsx_intx_en_reg_s cn68xx; - struct cvmx_pcsx_intx_en_reg_s cn68xxp1; - struct cvmx_pcsx_intx_en_reg_s cnf71xx; }; union cvmx_pcsx_intx_reg { @@ -620,16 +566,6 @@ union cvmx_pcsx_intx_reg { uint64_t reserved_12_63:52; #endif } cn52xx; - struct cvmx_pcsx_intx_reg_cn52xx cn52xxp1; - struct cvmx_pcsx_intx_reg_cn52xx cn56xx; - struct cvmx_pcsx_intx_reg_cn52xx cn56xxp1; - struct cvmx_pcsx_intx_reg_s cn61xx; - struct cvmx_pcsx_intx_reg_s cn63xx; - struct cvmx_pcsx_intx_reg_s cn63xxp1; - struct cvmx_pcsx_intx_reg_s cn66xx; - struct cvmx_pcsx_intx_reg_s cn68xx; - struct cvmx_pcsx_intx_reg_s cn68xxp1; - struct cvmx_pcsx_intx_reg_s cnf71xx; }; union cvmx_pcsx_linkx_timer_count_reg { @@ -643,17 +579,6 @@ union cvmx_pcsx_linkx_timer_count_reg { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_pcsx_linkx_timer_count_reg_s cn52xx; - struct cvmx_pcsx_linkx_timer_count_reg_s cn52xxp1; - struct cvmx_pcsx_linkx_timer_count_reg_s cn56xx; - struct cvmx_pcsx_linkx_timer_count_reg_s cn56xxp1; - struct cvmx_pcsx_linkx_timer_count_reg_s cn61xx; - struct cvmx_pcsx_linkx_timer_count_reg_s cn63xx; - struct cvmx_pcsx_linkx_timer_count_reg_s cn63xxp1; - struct cvmx_pcsx_linkx_timer_count_reg_s cn66xx; - struct cvmx_pcsx_linkx_timer_count_reg_s cn68xx; - struct cvmx_pcsx_linkx_timer_count_reg_s cn68xxp1; - struct cvmx_pcsx_linkx_timer_count_reg_s cnf71xx; }; union cvmx_pcsx_log_anlx_reg { @@ -671,17 +596,6 @@ union cvmx_pcsx_log_anlx_reg { uint64_t reserved_4_63:60; #endif } s; - struct cvmx_pcsx_log_anlx_reg_s cn52xx; - struct cvmx_pcsx_log_anlx_reg_s cn52xxp1; - struct cvmx_pcsx_log_anlx_reg_s cn56xx; - struct cvmx_pcsx_log_anlx_reg_s cn56xxp1; - struct cvmx_pcsx_log_anlx_reg_s cn61xx; - struct cvmx_pcsx_log_anlx_reg_s cn63xx; - struct cvmx_pcsx_log_anlx_reg_s cn63xxp1; - struct cvmx_pcsx_log_anlx_reg_s cn66xx; - struct cvmx_pcsx_log_anlx_reg_s cn68xx; - struct cvmx_pcsx_log_anlx_reg_s cn68xxp1; - struct cvmx_pcsx_log_anlx_reg_s cnf71xx; }; union cvmx_pcsx_miscx_ctl_reg { @@ -707,17 +621,6 @@ union cvmx_pcsx_miscx_ctl_reg { uint64_t reserved_13_63:51; #endif } s; - struct cvmx_pcsx_miscx_ctl_reg_s cn52xx; - struct cvmx_pcsx_miscx_ctl_reg_s cn52xxp1; - struct cvmx_pcsx_miscx_ctl_reg_s cn56xx; - struct cvmx_pcsx_miscx_ctl_reg_s cn56xxp1; - struct cvmx_pcsx_miscx_ctl_reg_s cn61xx; - struct cvmx_pcsx_miscx_ctl_reg_s cn63xx; - struct cvmx_pcsx_miscx_ctl_reg_s cn63xxp1; - struct cvmx_pcsx_miscx_ctl_reg_s cn66xx; - struct cvmx_pcsx_miscx_ctl_reg_s cn68xx; - struct cvmx_pcsx_miscx_ctl_reg_s cn68xxp1; - struct cvmx_pcsx_miscx_ctl_reg_s cnf71xx; }; union cvmx_pcsx_mrx_control_reg { @@ -753,17 +656,6 @@ union cvmx_pcsx_mrx_control_reg { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_pcsx_mrx_control_reg_s cn52xx; - struct cvmx_pcsx_mrx_control_reg_s cn52xxp1; - struct cvmx_pcsx_mrx_control_reg_s cn56xx; - struct cvmx_pcsx_mrx_control_reg_s cn56xxp1; - struct cvmx_pcsx_mrx_control_reg_s cn61xx; - struct cvmx_pcsx_mrx_control_reg_s cn63xx; - struct cvmx_pcsx_mrx_control_reg_s cn63xxp1; - struct cvmx_pcsx_mrx_control_reg_s cn66xx; - struct cvmx_pcsx_mrx_control_reg_s cn68xx; - struct cvmx_pcsx_mrx_control_reg_s cn68xxp1; - struct cvmx_pcsx_mrx_control_reg_s cnf71xx; }; union cvmx_pcsx_mrx_status_reg { @@ -807,17 +699,6 @@ union cvmx_pcsx_mrx_status_reg { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_pcsx_mrx_status_reg_s cn52xx; - struct cvmx_pcsx_mrx_status_reg_s cn52xxp1; - struct cvmx_pcsx_mrx_status_reg_s cn56xx; - struct cvmx_pcsx_mrx_status_reg_s cn56xxp1; - struct cvmx_pcsx_mrx_status_reg_s cn61xx; - struct cvmx_pcsx_mrx_status_reg_s cn63xx; - struct cvmx_pcsx_mrx_status_reg_s cn63xxp1; - struct cvmx_pcsx_mrx_status_reg_s cn66xx; - struct cvmx_pcsx_mrx_status_reg_s cn68xx; - struct cvmx_pcsx_mrx_status_reg_s cn68xxp1; - struct cvmx_pcsx_mrx_status_reg_s cnf71xx; }; union cvmx_pcsx_rxx_states_reg { @@ -841,17 +722,6 @@ union cvmx_pcsx_rxx_states_reg { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_pcsx_rxx_states_reg_s cn52xx; - struct cvmx_pcsx_rxx_states_reg_s cn52xxp1; - struct cvmx_pcsx_rxx_states_reg_s cn56xx; - struct cvmx_pcsx_rxx_states_reg_s cn56xxp1; - struct cvmx_pcsx_rxx_states_reg_s cn61xx; - struct cvmx_pcsx_rxx_states_reg_s cn63xx; - struct cvmx_pcsx_rxx_states_reg_s cn63xxp1; - struct cvmx_pcsx_rxx_states_reg_s cn66xx; - struct cvmx_pcsx_rxx_states_reg_s cn68xx; - struct cvmx_pcsx_rxx_states_reg_s cn68xxp1; - struct cvmx_pcsx_rxx_states_reg_s cnf71xx; }; union cvmx_pcsx_rxx_sync_reg { @@ -867,17 +737,6 @@ union cvmx_pcsx_rxx_sync_reg { uint64_t reserved_2_63:62; #endif } s; - struct cvmx_pcsx_rxx_sync_reg_s cn52xx; - struct cvmx_pcsx_rxx_sync_reg_s cn52xxp1; - struct cvmx_pcsx_rxx_sync_reg_s cn56xx; - struct cvmx_pcsx_rxx_sync_reg_s cn56xxp1; - struct cvmx_pcsx_rxx_sync_reg_s cn61xx; - struct cvmx_pcsx_rxx_sync_reg_s cn63xx; - struct cvmx_pcsx_rxx_sync_reg_s cn63xxp1; - struct cvmx_pcsx_rxx_sync_reg_s cn66xx; - struct cvmx_pcsx_rxx_sync_reg_s cn68xx; - struct cvmx_pcsx_rxx_sync_reg_s cn68xxp1; - struct cvmx_pcsx_rxx_sync_reg_s cnf71xx; }; union cvmx_pcsx_sgmx_an_adv_reg { @@ -903,17 +762,6 @@ union cvmx_pcsx_sgmx_an_adv_reg { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_pcsx_sgmx_an_adv_reg_s cn52xx; - struct cvmx_pcsx_sgmx_an_adv_reg_s cn52xxp1; - struct cvmx_pcsx_sgmx_an_adv_reg_s cn56xx; - struct cvmx_pcsx_sgmx_an_adv_reg_s cn56xxp1; - struct cvmx_pcsx_sgmx_an_adv_reg_s cn61xx; - struct cvmx_pcsx_sgmx_an_adv_reg_s cn63xx; - struct cvmx_pcsx_sgmx_an_adv_reg_s cn63xxp1; - struct cvmx_pcsx_sgmx_an_adv_reg_s cn66xx; - struct cvmx_pcsx_sgmx_an_adv_reg_s cn68xx; - struct cvmx_pcsx_sgmx_an_adv_reg_s cn68xxp1; - struct cvmx_pcsx_sgmx_an_adv_reg_s cnf71xx; }; union cvmx_pcsx_sgmx_lp_adv_reg { @@ -937,17 +785,6 @@ union cvmx_pcsx_sgmx_lp_adv_reg { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_pcsx_sgmx_lp_adv_reg_s cn52xx; - struct cvmx_pcsx_sgmx_lp_adv_reg_s cn52xxp1; - struct cvmx_pcsx_sgmx_lp_adv_reg_s cn56xx; - struct cvmx_pcsx_sgmx_lp_adv_reg_s cn56xxp1; - struct cvmx_pcsx_sgmx_lp_adv_reg_s cn61xx; - struct cvmx_pcsx_sgmx_lp_adv_reg_s cn63xx; - struct cvmx_pcsx_sgmx_lp_adv_reg_s cn63xxp1; - struct cvmx_pcsx_sgmx_lp_adv_reg_s cn66xx; - struct cvmx_pcsx_sgmx_lp_adv_reg_s cn68xx; - struct cvmx_pcsx_sgmx_lp_adv_reg_s cn68xxp1; - struct cvmx_pcsx_sgmx_lp_adv_reg_s cnf71xx; }; union cvmx_pcsx_txx_states_reg { @@ -965,17 +802,6 @@ union cvmx_pcsx_txx_states_reg { uint64_t reserved_7_63:57; #endif } s; - struct cvmx_pcsx_txx_states_reg_s cn52xx; - struct cvmx_pcsx_txx_states_reg_s cn52xxp1; - struct cvmx_pcsx_txx_states_reg_s cn56xx; - struct cvmx_pcsx_txx_states_reg_s cn56xxp1; - struct cvmx_pcsx_txx_states_reg_s cn61xx; - struct cvmx_pcsx_txx_states_reg_s cn63xx; - struct cvmx_pcsx_txx_states_reg_s cn63xxp1; - struct cvmx_pcsx_txx_states_reg_s cn66xx; - struct cvmx_pcsx_txx_states_reg_s cn68xx; - struct cvmx_pcsx_txx_states_reg_s cn68xxp1; - struct cvmx_pcsx_txx_states_reg_s cnf71xx; }; union cvmx_pcsx_tx_rxx_polarity_reg { @@ -995,17 +821,6 @@ union cvmx_pcsx_tx_rxx_polarity_reg { uint64_t reserved_4_63:60; #endif } s; - struct cvmx_pcsx_tx_rxx_polarity_reg_s cn52xx; - struct cvmx_pcsx_tx_rxx_polarity_reg_s cn52xxp1; - struct cvmx_pcsx_tx_rxx_polarity_reg_s cn56xx; - struct cvmx_pcsx_tx_rxx_polarity_reg_s cn56xxp1; - struct cvmx_pcsx_tx_rxx_polarity_reg_s cn61xx; - struct cvmx_pcsx_tx_rxx_polarity_reg_s cn63xx; - struct cvmx_pcsx_tx_rxx_polarity_reg_s cn63xxp1; - struct cvmx_pcsx_tx_rxx_polarity_reg_s cn66xx; - struct cvmx_pcsx_tx_rxx_polarity_reg_s cn68xx; - struct cvmx_pcsx_tx_rxx_polarity_reg_s cn68xxp1; - struct cvmx_pcsx_tx_rxx_polarity_reg_s cnf71xx; }; #endif diff --git a/arch/mips/include/asm/octeon/cvmx-pcsxx-defs.h b/arch/mips/include/asm/octeon/cvmx-pcsxx-defs.h index 847dd9dca6ea..b353775eeeb6 100644 --- a/arch/mips/include/asm/octeon/cvmx-pcsxx-defs.h +++ b/arch/mips/include/asm/octeon/cvmx-pcsxx-defs.h @@ -293,16 +293,6 @@ union cvmx_pcsxx_10gbx_status_reg { uint64_t reserved_13_63:51; #endif } s; - struct cvmx_pcsxx_10gbx_status_reg_s cn52xx; - struct cvmx_pcsxx_10gbx_status_reg_s cn52xxp1; - struct cvmx_pcsxx_10gbx_status_reg_s cn56xx; - struct cvmx_pcsxx_10gbx_status_reg_s cn56xxp1; - struct cvmx_pcsxx_10gbx_status_reg_s cn61xx; - struct cvmx_pcsxx_10gbx_status_reg_s cn63xx; - struct cvmx_pcsxx_10gbx_status_reg_s cn63xxp1; - struct cvmx_pcsxx_10gbx_status_reg_s cn66xx; - struct cvmx_pcsxx_10gbx_status_reg_s cn68xx; - struct cvmx_pcsxx_10gbx_status_reg_s cn68xxp1; }; union cvmx_pcsxx_bist_status_reg { @@ -316,16 +306,6 @@ union cvmx_pcsxx_bist_status_reg { uint64_t reserved_1_63:63; #endif } s; - struct cvmx_pcsxx_bist_status_reg_s cn52xx; - struct cvmx_pcsxx_bist_status_reg_s cn52xxp1; - struct cvmx_pcsxx_bist_status_reg_s cn56xx; - struct cvmx_pcsxx_bist_status_reg_s cn56xxp1; - struct cvmx_pcsxx_bist_status_reg_s cn61xx; - struct cvmx_pcsxx_bist_status_reg_s cn63xx; - struct cvmx_pcsxx_bist_status_reg_s cn63xxp1; - struct cvmx_pcsxx_bist_status_reg_s cn66xx; - struct cvmx_pcsxx_bist_status_reg_s cn68xx; - struct cvmx_pcsxx_bist_status_reg_s cn68xxp1; }; union cvmx_pcsxx_bit_lock_status_reg { @@ -345,16 +325,6 @@ union cvmx_pcsxx_bit_lock_status_reg { uint64_t reserved_4_63:60; #endif } s; - struct cvmx_pcsxx_bit_lock_status_reg_s cn52xx; - struct cvmx_pcsxx_bit_lock_status_reg_s cn52xxp1; - struct cvmx_pcsxx_bit_lock_status_reg_s cn56xx; - struct cvmx_pcsxx_bit_lock_status_reg_s cn56xxp1; - struct cvmx_pcsxx_bit_lock_status_reg_s cn61xx; - struct cvmx_pcsxx_bit_lock_status_reg_s cn63xx; - struct cvmx_pcsxx_bit_lock_status_reg_s cn63xxp1; - struct cvmx_pcsxx_bit_lock_status_reg_s cn66xx; - struct cvmx_pcsxx_bit_lock_status_reg_s cn68xx; - struct cvmx_pcsxx_bit_lock_status_reg_s cn68xxp1; }; union cvmx_pcsxx_control1_reg { @@ -384,16 +354,6 @@ union cvmx_pcsxx_control1_reg { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_pcsxx_control1_reg_s cn52xx; - struct cvmx_pcsxx_control1_reg_s cn52xxp1; - struct cvmx_pcsxx_control1_reg_s cn56xx; - struct cvmx_pcsxx_control1_reg_s cn56xxp1; - struct cvmx_pcsxx_control1_reg_s cn61xx; - struct cvmx_pcsxx_control1_reg_s cn63xx; - struct cvmx_pcsxx_control1_reg_s cn63xxp1; - struct cvmx_pcsxx_control1_reg_s cn66xx; - struct cvmx_pcsxx_control1_reg_s cn68xx; - struct cvmx_pcsxx_control1_reg_s cn68xxp1; }; union cvmx_pcsxx_control2_reg { @@ -407,16 +367,6 @@ union cvmx_pcsxx_control2_reg { uint64_t reserved_2_63:62; #endif } s; - struct cvmx_pcsxx_control2_reg_s cn52xx; - struct cvmx_pcsxx_control2_reg_s cn52xxp1; - struct cvmx_pcsxx_control2_reg_s cn56xx; - struct cvmx_pcsxx_control2_reg_s cn56xxp1; - struct cvmx_pcsxx_control2_reg_s cn61xx; - struct cvmx_pcsxx_control2_reg_s cn63xx; - struct cvmx_pcsxx_control2_reg_s cn63xxp1; - struct cvmx_pcsxx_control2_reg_s cn66xx; - struct cvmx_pcsxx_control2_reg_s cn68xx; - struct cvmx_pcsxx_control2_reg_s cn68xxp1; }; union cvmx_pcsxx_int_en_reg { @@ -461,15 +411,6 @@ union cvmx_pcsxx_int_en_reg { uint64_t reserved_6_63:58; #endif } cn52xx; - struct cvmx_pcsxx_int_en_reg_cn52xx cn52xxp1; - struct cvmx_pcsxx_int_en_reg_cn52xx cn56xx; - struct cvmx_pcsxx_int_en_reg_cn52xx cn56xxp1; - struct cvmx_pcsxx_int_en_reg_s cn61xx; - struct cvmx_pcsxx_int_en_reg_s cn63xx; - struct cvmx_pcsxx_int_en_reg_s cn63xxp1; - struct cvmx_pcsxx_int_en_reg_s cn66xx; - struct cvmx_pcsxx_int_en_reg_s cn68xx; - struct cvmx_pcsxx_int_en_reg_s cn68xxp1; }; union cvmx_pcsxx_int_reg { @@ -514,15 +455,6 @@ union cvmx_pcsxx_int_reg { uint64_t reserved_6_63:58; #endif } cn52xx; - struct cvmx_pcsxx_int_reg_cn52xx cn52xxp1; - struct cvmx_pcsxx_int_reg_cn52xx cn56xx; - struct cvmx_pcsxx_int_reg_cn52xx cn56xxp1; - struct cvmx_pcsxx_int_reg_s cn61xx; - struct cvmx_pcsxx_int_reg_s cn63xx; - struct cvmx_pcsxx_int_reg_s cn63xxp1; - struct cvmx_pcsxx_int_reg_s cn66xx; - struct cvmx_pcsxx_int_reg_s cn68xx; - struct cvmx_pcsxx_int_reg_s cn68xxp1; }; union cvmx_pcsxx_log_anl_reg { @@ -544,16 +476,6 @@ union cvmx_pcsxx_log_anl_reg { uint64_t reserved_7_63:57; #endif } s; - struct cvmx_pcsxx_log_anl_reg_s cn52xx; - struct cvmx_pcsxx_log_anl_reg_s cn52xxp1; - struct cvmx_pcsxx_log_anl_reg_s cn56xx; - struct cvmx_pcsxx_log_anl_reg_s cn56xxp1; - struct cvmx_pcsxx_log_anl_reg_s cn61xx; - struct cvmx_pcsxx_log_anl_reg_s cn63xx; - struct cvmx_pcsxx_log_anl_reg_s cn63xxp1; - struct cvmx_pcsxx_log_anl_reg_s cn66xx; - struct cvmx_pcsxx_log_anl_reg_s cn68xx; - struct cvmx_pcsxx_log_anl_reg_s cn68xxp1; }; union cvmx_pcsxx_misc_ctl_reg { @@ -573,16 +495,6 @@ union cvmx_pcsxx_misc_ctl_reg { uint64_t reserved_4_63:60; #endif } s; - struct cvmx_pcsxx_misc_ctl_reg_s cn52xx; - struct cvmx_pcsxx_misc_ctl_reg_s cn52xxp1; - struct cvmx_pcsxx_misc_ctl_reg_s cn56xx; - struct cvmx_pcsxx_misc_ctl_reg_s cn56xxp1; - struct cvmx_pcsxx_misc_ctl_reg_s cn61xx; - struct cvmx_pcsxx_misc_ctl_reg_s cn63xx; - struct cvmx_pcsxx_misc_ctl_reg_s cn63xxp1; - struct cvmx_pcsxx_misc_ctl_reg_s cn66xx; - struct cvmx_pcsxx_misc_ctl_reg_s cn68xx; - struct cvmx_pcsxx_misc_ctl_reg_s cn68xxp1; }; union cvmx_pcsxx_rx_sync_states_reg { @@ -602,16 +514,6 @@ union cvmx_pcsxx_rx_sync_states_reg { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_pcsxx_rx_sync_states_reg_s cn52xx; - struct cvmx_pcsxx_rx_sync_states_reg_s cn52xxp1; - struct cvmx_pcsxx_rx_sync_states_reg_s cn56xx; - struct cvmx_pcsxx_rx_sync_states_reg_s cn56xxp1; - struct cvmx_pcsxx_rx_sync_states_reg_s cn61xx; - struct cvmx_pcsxx_rx_sync_states_reg_s cn63xx; - struct cvmx_pcsxx_rx_sync_states_reg_s cn63xxp1; - struct cvmx_pcsxx_rx_sync_states_reg_s cn66xx; - struct cvmx_pcsxx_rx_sync_states_reg_s cn68xx; - struct cvmx_pcsxx_rx_sync_states_reg_s cn68xxp1; }; union cvmx_pcsxx_spd_abil_reg { @@ -627,16 +529,6 @@ union cvmx_pcsxx_spd_abil_reg { uint64_t reserved_2_63:62; #endif } s; - struct cvmx_pcsxx_spd_abil_reg_s cn52xx; - struct cvmx_pcsxx_spd_abil_reg_s cn52xxp1; - struct cvmx_pcsxx_spd_abil_reg_s cn56xx; - struct cvmx_pcsxx_spd_abil_reg_s cn56xxp1; - struct cvmx_pcsxx_spd_abil_reg_s cn61xx; - struct cvmx_pcsxx_spd_abil_reg_s cn63xx; - struct cvmx_pcsxx_spd_abil_reg_s cn63xxp1; - struct cvmx_pcsxx_spd_abil_reg_s cn66xx; - struct cvmx_pcsxx_spd_abil_reg_s cn68xx; - struct cvmx_pcsxx_spd_abil_reg_s cn68xxp1; }; union cvmx_pcsxx_status1_reg { @@ -658,16 +550,6 @@ union cvmx_pcsxx_status1_reg { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_pcsxx_status1_reg_s cn52xx; - struct cvmx_pcsxx_status1_reg_s cn52xxp1; - struct cvmx_pcsxx_status1_reg_s cn56xx; - struct cvmx_pcsxx_status1_reg_s cn56xxp1; - struct cvmx_pcsxx_status1_reg_s cn61xx; - struct cvmx_pcsxx_status1_reg_s cn63xx; - struct cvmx_pcsxx_status1_reg_s cn63xxp1; - struct cvmx_pcsxx_status1_reg_s cn66xx; - struct cvmx_pcsxx_status1_reg_s cn68xx; - struct cvmx_pcsxx_status1_reg_s cn68xxp1; }; union cvmx_pcsxx_status2_reg { @@ -695,16 +577,6 @@ union cvmx_pcsxx_status2_reg { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_pcsxx_status2_reg_s cn52xx; - struct cvmx_pcsxx_status2_reg_s cn52xxp1; - struct cvmx_pcsxx_status2_reg_s cn56xx; - struct cvmx_pcsxx_status2_reg_s cn56xxp1; - struct cvmx_pcsxx_status2_reg_s cn61xx; - struct cvmx_pcsxx_status2_reg_s cn63xx; - struct cvmx_pcsxx_status2_reg_s cn63xxp1; - struct cvmx_pcsxx_status2_reg_s cn66xx; - struct cvmx_pcsxx_status2_reg_s cn68xx; - struct cvmx_pcsxx_status2_reg_s cn68xxp1; }; union cvmx_pcsxx_tx_rx_polarity_reg { @@ -724,7 +596,6 @@ union cvmx_pcsxx_tx_rx_polarity_reg { uint64_t reserved_10_63:54; #endif } s; - struct cvmx_pcsxx_tx_rx_polarity_reg_s cn52xx; struct cvmx_pcsxx_tx_rx_polarity_reg_cn52xxp1 { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_2_63:62; @@ -736,14 +607,6 @@ union cvmx_pcsxx_tx_rx_polarity_reg { uint64_t reserved_2_63:62; #endif } cn52xxp1; - struct cvmx_pcsxx_tx_rx_polarity_reg_s cn56xx; - struct cvmx_pcsxx_tx_rx_polarity_reg_cn52xxp1 cn56xxp1; - struct cvmx_pcsxx_tx_rx_polarity_reg_s cn61xx; - struct cvmx_pcsxx_tx_rx_polarity_reg_s cn63xx; - struct cvmx_pcsxx_tx_rx_polarity_reg_s cn63xxp1; - struct cvmx_pcsxx_tx_rx_polarity_reg_s cn66xx; - struct cvmx_pcsxx_tx_rx_polarity_reg_s cn68xx; - struct cvmx_pcsxx_tx_rx_polarity_reg_s cn68xxp1; }; union cvmx_pcsxx_tx_rx_states_reg { @@ -773,7 +636,6 @@ union cvmx_pcsxx_tx_rx_states_reg { uint64_t reserved_14_63:50; #endif } s; - struct cvmx_pcsxx_tx_rx_states_reg_s cn52xx; struct cvmx_pcsxx_tx_rx_states_reg_cn52xxp1 { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_13_63:51; @@ -797,14 +659,6 @@ union cvmx_pcsxx_tx_rx_states_reg { uint64_t reserved_13_63:51; #endif } cn52xxp1; - struct cvmx_pcsxx_tx_rx_states_reg_s cn56xx; - struct cvmx_pcsxx_tx_rx_states_reg_cn52xxp1 cn56xxp1; - struct cvmx_pcsxx_tx_rx_states_reg_s cn61xx; - struct cvmx_pcsxx_tx_rx_states_reg_s cn63xx; - struct cvmx_pcsxx_tx_rx_states_reg_s cn63xxp1; - struct cvmx_pcsxx_tx_rx_states_reg_s cn66xx; - struct cvmx_pcsxx_tx_rx_states_reg_s cn68xx; - struct cvmx_pcsxx_tx_rx_states_reg_s cn68xxp1; }; #endif diff --git a/arch/mips/include/asm/octeon/cvmx-pemx-defs.h b/arch/mips/include/asm/octeon/cvmx-pemx-defs.h index 50a916f892fa..d2d6dba938e9 100644 --- a/arch/mips/include/asm/octeon/cvmx-pemx-defs.h +++ b/arch/mips/include/asm/octeon/cvmx-pemx-defs.h @@ -68,13 +68,6 @@ union cvmx_pemx_bar1_indexx { uint64_t reserved_20_63:44; #endif } s; - struct cvmx_pemx_bar1_indexx_s cn61xx; - struct cvmx_pemx_bar1_indexx_s cn63xx; - struct cvmx_pemx_bar1_indexx_s cn63xxp1; - struct cvmx_pemx_bar1_indexx_s cn66xx; - struct cvmx_pemx_bar1_indexx_s cn68xx; - struct cvmx_pemx_bar1_indexx_s cn68xxp1; - struct cvmx_pemx_bar1_indexx_s cnf71xx; }; union cvmx_pemx_bar2_mask { @@ -90,11 +83,6 @@ union cvmx_pemx_bar2_mask { uint64_t reserved_38_63:26; #endif } s; - struct cvmx_pemx_bar2_mask_s cn61xx; - struct cvmx_pemx_bar2_mask_s cn66xx; - struct cvmx_pemx_bar2_mask_s cn68xx; - struct cvmx_pemx_bar2_mask_s cn68xxp1; - struct cvmx_pemx_bar2_mask_s cnf71xx; }; union cvmx_pemx_bar_ctl { @@ -114,13 +102,6 @@ union cvmx_pemx_bar_ctl { uint64_t reserved_7_63:57; #endif } s; - struct cvmx_pemx_bar_ctl_s cn61xx; - struct cvmx_pemx_bar_ctl_s cn63xx; - struct cvmx_pemx_bar_ctl_s cn63xxp1; - struct cvmx_pemx_bar_ctl_s cn66xx; - struct cvmx_pemx_bar_ctl_s cn68xx; - struct cvmx_pemx_bar_ctl_s cn68xxp1; - struct cvmx_pemx_bar_ctl_s cnf71xx; }; union cvmx_pemx_bist_status { @@ -148,13 +129,6 @@ union cvmx_pemx_bist_status { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_pemx_bist_status_s cn61xx; - struct cvmx_pemx_bist_status_s cn63xx; - struct cvmx_pemx_bist_status_s cn63xxp1; - struct cvmx_pemx_bist_status_s cn66xx; - struct cvmx_pemx_bist_status_s cn68xx; - struct cvmx_pemx_bist_status_s cn68xxp1; - struct cvmx_pemx_bist_status_s cnf71xx; }; union cvmx_pemx_bist_status2 { @@ -186,13 +160,6 @@ union cvmx_pemx_bist_status2 { uint64_t reserved_10_63:54; #endif } s; - struct cvmx_pemx_bist_status2_s cn61xx; - struct cvmx_pemx_bist_status2_s cn63xx; - struct cvmx_pemx_bist_status2_s cn63xxp1; - struct cvmx_pemx_bist_status2_s cn66xx; - struct cvmx_pemx_bist_status2_s cn68xx; - struct cvmx_pemx_bist_status2_s cn68xxp1; - struct cvmx_pemx_bist_status2_s cnf71xx; }; union cvmx_pemx_cfg_rd { @@ -206,13 +173,6 @@ union cvmx_pemx_cfg_rd { uint64_t data:32; #endif } s; - struct cvmx_pemx_cfg_rd_s cn61xx; - struct cvmx_pemx_cfg_rd_s cn63xx; - struct cvmx_pemx_cfg_rd_s cn63xxp1; - struct cvmx_pemx_cfg_rd_s cn66xx; - struct cvmx_pemx_cfg_rd_s cn68xx; - struct cvmx_pemx_cfg_rd_s cn68xxp1; - struct cvmx_pemx_cfg_rd_s cnf71xx; }; union cvmx_pemx_cfg_wr { @@ -226,13 +186,6 @@ union cvmx_pemx_cfg_wr { uint64_t data:32; #endif } s; - struct cvmx_pemx_cfg_wr_s cn61xx; - struct cvmx_pemx_cfg_wr_s cn63xx; - struct cvmx_pemx_cfg_wr_s cn63xxp1; - struct cvmx_pemx_cfg_wr_s cn66xx; - struct cvmx_pemx_cfg_wr_s cn68xx; - struct cvmx_pemx_cfg_wr_s cn68xxp1; - struct cvmx_pemx_cfg_wr_s cnf71xx; }; union cvmx_pemx_cpl_lut_valid { @@ -246,13 +199,6 @@ union cvmx_pemx_cpl_lut_valid { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_pemx_cpl_lut_valid_s cn61xx; - struct cvmx_pemx_cpl_lut_valid_s cn63xx; - struct cvmx_pemx_cpl_lut_valid_s cn63xxp1; - struct cvmx_pemx_cpl_lut_valid_s cn66xx; - struct cvmx_pemx_cpl_lut_valid_s cn68xx; - struct cvmx_pemx_cpl_lut_valid_s cn68xxp1; - struct cvmx_pemx_cpl_lut_valid_s cnf71xx; }; union cvmx_pemx_ctl_status { @@ -298,13 +244,6 @@ union cvmx_pemx_ctl_status { uint64_t reserved_48_63:16; #endif } s; - struct cvmx_pemx_ctl_status_s cn61xx; - struct cvmx_pemx_ctl_status_s cn63xx; - struct cvmx_pemx_ctl_status_s cn63xxp1; - struct cvmx_pemx_ctl_status_s cn66xx; - struct cvmx_pemx_ctl_status_s cn68xx; - struct cvmx_pemx_ctl_status_s cn68xxp1; - struct cvmx_pemx_ctl_status_s cnf71xx; }; union cvmx_pemx_dbg_info { @@ -378,13 +317,6 @@ union cvmx_pemx_dbg_info { uint64_t reserved_31_63:33; #endif } s; - struct cvmx_pemx_dbg_info_s cn61xx; - struct cvmx_pemx_dbg_info_s cn63xx; - struct cvmx_pemx_dbg_info_s cn63xxp1; - struct cvmx_pemx_dbg_info_s cn66xx; - struct cvmx_pemx_dbg_info_s cn68xx; - struct cvmx_pemx_dbg_info_s cn68xxp1; - struct cvmx_pemx_dbg_info_s cnf71xx; }; union cvmx_pemx_dbg_info_en { @@ -458,13 +390,6 @@ union cvmx_pemx_dbg_info_en { uint64_t reserved_31_63:33; #endif } s; - struct cvmx_pemx_dbg_info_en_s cn61xx; - struct cvmx_pemx_dbg_info_en_s cn63xx; - struct cvmx_pemx_dbg_info_en_s cn63xxp1; - struct cvmx_pemx_dbg_info_en_s cn66xx; - struct cvmx_pemx_dbg_info_en_s cn68xx; - struct cvmx_pemx_dbg_info_en_s cn68xxp1; - struct cvmx_pemx_dbg_info_en_s cnf71xx; }; union cvmx_pemx_diag_status { @@ -484,13 +409,6 @@ union cvmx_pemx_diag_status { uint64_t reserved_4_63:60; #endif } s; - struct cvmx_pemx_diag_status_s cn61xx; - struct cvmx_pemx_diag_status_s cn63xx; - struct cvmx_pemx_diag_status_s cn63xxp1; - struct cvmx_pemx_diag_status_s cn66xx; - struct cvmx_pemx_diag_status_s cn68xx; - struct cvmx_pemx_diag_status_s cn68xxp1; - struct cvmx_pemx_diag_status_s cnf71xx; }; union cvmx_pemx_inb_read_credits { @@ -504,10 +422,6 @@ union cvmx_pemx_inb_read_credits { uint64_t reserved_6_63:58; #endif } s; - struct cvmx_pemx_inb_read_credits_s cn61xx; - struct cvmx_pemx_inb_read_credits_s cn66xx; - struct cvmx_pemx_inb_read_credits_s cn68xx; - struct cvmx_pemx_inb_read_credits_s cnf71xx; }; union cvmx_pemx_int_enb { @@ -547,13 +461,6 @@ union cvmx_pemx_int_enb { uint64_t reserved_14_63:50; #endif } s; - struct cvmx_pemx_int_enb_s cn61xx; - struct cvmx_pemx_int_enb_s cn63xx; - struct cvmx_pemx_int_enb_s cn63xxp1; - struct cvmx_pemx_int_enb_s cn66xx; - struct cvmx_pemx_int_enb_s cn68xx; - struct cvmx_pemx_int_enb_s cn68xxp1; - struct cvmx_pemx_int_enb_s cnf71xx; }; union cvmx_pemx_int_enb_int { @@ -593,13 +500,6 @@ union cvmx_pemx_int_enb_int { uint64_t reserved_14_63:50; #endif } s; - struct cvmx_pemx_int_enb_int_s cn61xx; - struct cvmx_pemx_int_enb_int_s cn63xx; - struct cvmx_pemx_int_enb_int_s cn63xxp1; - struct cvmx_pemx_int_enb_int_s cn66xx; - struct cvmx_pemx_int_enb_int_s cn68xx; - struct cvmx_pemx_int_enb_int_s cn68xxp1; - struct cvmx_pemx_int_enb_int_s cnf71xx; }; union cvmx_pemx_int_sum { @@ -639,13 +539,6 @@ union cvmx_pemx_int_sum { uint64_t reserved_14_63:50; #endif } s; - struct cvmx_pemx_int_sum_s cn61xx; - struct cvmx_pemx_int_sum_s cn63xx; - struct cvmx_pemx_int_sum_s cn63xxp1; - struct cvmx_pemx_int_sum_s cn66xx; - struct cvmx_pemx_int_sum_s cn68xx; - struct cvmx_pemx_int_sum_s cn68xxp1; - struct cvmx_pemx_int_sum_s cnf71xx; }; union cvmx_pemx_p2n_bar0_start { @@ -659,13 +552,6 @@ union cvmx_pemx_p2n_bar0_start { uint64_t addr:50; #endif } s; - struct cvmx_pemx_p2n_bar0_start_s cn61xx; - struct cvmx_pemx_p2n_bar0_start_s cn63xx; - struct cvmx_pemx_p2n_bar0_start_s cn63xxp1; - struct cvmx_pemx_p2n_bar0_start_s cn66xx; - struct cvmx_pemx_p2n_bar0_start_s cn68xx; - struct cvmx_pemx_p2n_bar0_start_s cn68xxp1; - struct cvmx_pemx_p2n_bar0_start_s cnf71xx; }; union cvmx_pemx_p2n_bar1_start { @@ -679,13 +565,6 @@ union cvmx_pemx_p2n_bar1_start { uint64_t addr:38; #endif } s; - struct cvmx_pemx_p2n_bar1_start_s cn61xx; - struct cvmx_pemx_p2n_bar1_start_s cn63xx; - struct cvmx_pemx_p2n_bar1_start_s cn63xxp1; - struct cvmx_pemx_p2n_bar1_start_s cn66xx; - struct cvmx_pemx_p2n_bar1_start_s cn68xx; - struct cvmx_pemx_p2n_bar1_start_s cn68xxp1; - struct cvmx_pemx_p2n_bar1_start_s cnf71xx; }; union cvmx_pemx_p2n_bar2_start { @@ -699,13 +578,6 @@ union cvmx_pemx_p2n_bar2_start { uint64_t addr:23; #endif } s; - struct cvmx_pemx_p2n_bar2_start_s cn61xx; - struct cvmx_pemx_p2n_bar2_start_s cn63xx; - struct cvmx_pemx_p2n_bar2_start_s cn63xxp1; - struct cvmx_pemx_p2n_bar2_start_s cn66xx; - struct cvmx_pemx_p2n_bar2_start_s cn68xx; - struct cvmx_pemx_p2n_bar2_start_s cn68xxp1; - struct cvmx_pemx_p2n_bar2_start_s cnf71xx; }; union cvmx_pemx_p2p_barx_end { @@ -719,11 +591,6 @@ union cvmx_pemx_p2p_barx_end { uint64_t addr:52; #endif } s; - struct cvmx_pemx_p2p_barx_end_s cn63xx; - struct cvmx_pemx_p2p_barx_end_s cn63xxp1; - struct cvmx_pemx_p2p_barx_end_s cn66xx; - struct cvmx_pemx_p2p_barx_end_s cn68xx; - struct cvmx_pemx_p2p_barx_end_s cn68xxp1; }; union cvmx_pemx_p2p_barx_start { @@ -737,11 +604,6 @@ union cvmx_pemx_p2p_barx_start { uint64_t addr:52; #endif } s; - struct cvmx_pemx_p2p_barx_start_s cn63xx; - struct cvmx_pemx_p2p_barx_start_s cn63xxp1; - struct cvmx_pemx_p2p_barx_start_s cn66xx; - struct cvmx_pemx_p2p_barx_start_s cn68xx; - struct cvmx_pemx_p2p_barx_start_s cn68xxp1; }; union cvmx_pemx_tlp_credits { @@ -784,12 +646,6 @@ union cvmx_pemx_tlp_credits { uint64_t reserved_56_63:8; #endif } cn61xx; - struct cvmx_pemx_tlp_credits_s cn63xx; - struct cvmx_pemx_tlp_credits_s cn63xxp1; - struct cvmx_pemx_tlp_credits_s cn66xx; - struct cvmx_pemx_tlp_credits_s cn68xx; - struct cvmx_pemx_tlp_credits_s cn68xxp1; - struct cvmx_pemx_tlp_credits_cn61xx cnf71xx; }; #endif diff --git a/arch/mips/include/asm/octeon/cvmx-pescx-defs.h b/arch/mips/include/asm/octeon/cvmx-pescx-defs.h index 59b3dc565442..66561082529e 100644 --- a/arch/mips/include/asm/octeon/cvmx-pescx-defs.h +++ b/arch/mips/include/asm/octeon/cvmx-pescx-defs.h @@ -80,7 +80,6 @@ union cvmx_pescx_bist_status { uint64_t reserved_13_63:51; #endif } s; - struct cvmx_pescx_bist_status_s cn52xx; struct cvmx_pescx_bist_status_cn52xxp1 { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_12_63:52; @@ -112,8 +111,6 @@ union cvmx_pescx_bist_status { uint64_t reserved_12_63:52; #endif } cn52xxp1; - struct cvmx_pescx_bist_status_s cn56xx; - struct cvmx_pescx_bist_status_cn52xxp1 cn56xxp1; }; union cvmx_pescx_bist_status2 { @@ -153,10 +150,6 @@ union cvmx_pescx_bist_status2 { uint64_t reserved_14_63:50; #endif } s; - struct cvmx_pescx_bist_status2_s cn52xx; - struct cvmx_pescx_bist_status2_s cn52xxp1; - struct cvmx_pescx_bist_status2_s cn56xx; - struct cvmx_pescx_bist_status2_s cn56xxp1; }; union cvmx_pescx_cfg_rd { @@ -170,10 +163,6 @@ union cvmx_pescx_cfg_rd { uint64_t data:32; #endif } s; - struct cvmx_pescx_cfg_rd_s cn52xx; - struct cvmx_pescx_cfg_rd_s cn52xxp1; - struct cvmx_pescx_cfg_rd_s cn56xx; - struct cvmx_pescx_cfg_rd_s cn56xxp1; }; union cvmx_pescx_cfg_wr { @@ -187,10 +176,6 @@ union cvmx_pescx_cfg_wr { uint64_t data:32; #endif } s; - struct cvmx_pescx_cfg_wr_s cn52xx; - struct cvmx_pescx_cfg_wr_s cn52xxp1; - struct cvmx_pescx_cfg_wr_s cn56xx; - struct cvmx_pescx_cfg_wr_s cn56xxp1; }; union cvmx_pescx_cpl_lut_valid { @@ -204,10 +189,6 @@ union cvmx_pescx_cpl_lut_valid { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_pescx_cpl_lut_valid_s cn52xx; - struct cvmx_pescx_cpl_lut_valid_s cn52xxp1; - struct cvmx_pescx_cpl_lut_valid_s cn56xx; - struct cvmx_pescx_cpl_lut_valid_s cn56xxp1; }; union cvmx_pescx_ctl_status { @@ -249,8 +230,6 @@ union cvmx_pescx_ctl_status { uint64_t reserved_28_63:36; #endif } s; - struct cvmx_pescx_ctl_status_s cn52xx; - struct cvmx_pescx_ctl_status_s cn52xxp1; struct cvmx_pescx_ctl_status_cn56xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_28_63:36; @@ -288,7 +267,6 @@ union cvmx_pescx_ctl_status { uint64_t reserved_28_63:36; #endif } cn56xx; - struct cvmx_pescx_ctl_status_cn56xx cn56xxp1; }; union cvmx_pescx_ctl_status2 { @@ -304,7 +282,6 @@ union cvmx_pescx_ctl_status2 { uint64_t reserved_2_63:62; #endif } s; - struct cvmx_pescx_ctl_status2_s cn52xx; struct cvmx_pescx_ctl_status2_cn52xxp1 { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_1_63:63; @@ -314,8 +291,6 @@ union cvmx_pescx_ctl_status2 { uint64_t reserved_1_63:63; #endif } cn52xxp1; - struct cvmx_pescx_ctl_status2_s cn56xx; - struct cvmx_pescx_ctl_status2_cn52xxp1 cn56xxp1; }; union cvmx_pescx_dbg_info { @@ -389,10 +364,6 @@ union cvmx_pescx_dbg_info { uint64_t reserved_31_63:33; #endif } s; - struct cvmx_pescx_dbg_info_s cn52xx; - struct cvmx_pescx_dbg_info_s cn52xxp1; - struct cvmx_pescx_dbg_info_s cn56xx; - struct cvmx_pescx_dbg_info_s cn56xxp1; }; union cvmx_pescx_dbg_info_en { @@ -466,10 +437,6 @@ union cvmx_pescx_dbg_info_en { uint64_t reserved_31_63:33; #endif } s; - struct cvmx_pescx_dbg_info_en_s cn52xx; - struct cvmx_pescx_dbg_info_en_s cn52xxp1; - struct cvmx_pescx_dbg_info_en_s cn56xx; - struct cvmx_pescx_dbg_info_en_s cn56xxp1; }; union cvmx_pescx_diag_status { @@ -489,10 +456,6 @@ union cvmx_pescx_diag_status { uint64_t reserved_4_63:60; #endif } s; - struct cvmx_pescx_diag_status_s cn52xx; - struct cvmx_pescx_diag_status_s cn52xxp1; - struct cvmx_pescx_diag_status_s cn56xx; - struct cvmx_pescx_diag_status_s cn56xxp1; }; union cvmx_pescx_p2n_bar0_start { @@ -506,10 +469,6 @@ union cvmx_pescx_p2n_bar0_start { uint64_t addr:50; #endif } s; - struct cvmx_pescx_p2n_bar0_start_s cn52xx; - struct cvmx_pescx_p2n_bar0_start_s cn52xxp1; - struct cvmx_pescx_p2n_bar0_start_s cn56xx; - struct cvmx_pescx_p2n_bar0_start_s cn56xxp1; }; union cvmx_pescx_p2n_bar1_start { @@ -523,10 +482,6 @@ union cvmx_pescx_p2n_bar1_start { uint64_t addr:38; #endif } s; - struct cvmx_pescx_p2n_bar1_start_s cn52xx; - struct cvmx_pescx_p2n_bar1_start_s cn52xxp1; - struct cvmx_pescx_p2n_bar1_start_s cn56xx; - struct cvmx_pescx_p2n_bar1_start_s cn56xxp1; }; union cvmx_pescx_p2n_bar2_start { @@ -540,10 +495,6 @@ union cvmx_pescx_p2n_bar2_start { uint64_t addr:25; #endif } s; - struct cvmx_pescx_p2n_bar2_start_s cn52xx; - struct cvmx_pescx_p2n_bar2_start_s cn52xxp1; - struct cvmx_pescx_p2n_bar2_start_s cn56xx; - struct cvmx_pescx_p2n_bar2_start_s cn56xxp1; }; union cvmx_pescx_p2p_barx_end { @@ -557,10 +508,6 @@ union cvmx_pescx_p2p_barx_end { uint64_t addr:52; #endif } s; - struct cvmx_pescx_p2p_barx_end_s cn52xx; - struct cvmx_pescx_p2p_barx_end_s cn52xxp1; - struct cvmx_pescx_p2p_barx_end_s cn56xx; - struct cvmx_pescx_p2p_barx_end_s cn56xxp1; }; union cvmx_pescx_p2p_barx_start { @@ -574,10 +521,6 @@ union cvmx_pescx_p2p_barx_start { uint64_t addr:52; #endif } s; - struct cvmx_pescx_p2p_barx_start_s cn52xx; - struct cvmx_pescx_p2p_barx_start_s cn52xxp1; - struct cvmx_pescx_p2p_barx_start_s cn56xx; - struct cvmx_pescx_p2p_barx_start_s cn56xxp1; }; union cvmx_pescx_tlp_credits { @@ -631,8 +574,6 @@ union cvmx_pescx_tlp_credits { uint64_t reserved_38_63:26; #endif } cn52xxp1; - struct cvmx_pescx_tlp_credits_cn52xx cn56xx; - struct cvmx_pescx_tlp_credits_cn52xxp1 cn56xxp1; }; #endif diff --git a/arch/mips/include/asm/octeon/cvmx-pip-defs.h b/arch/mips/include/asm/octeon/cvmx-pip-defs.h index e975c7d2e485..e42f411bd2de 100644 --- a/arch/mips/include/asm/octeon/cvmx-pip-defs.h +++ b/arch/mips/include/asm/octeon/cvmx-pip-defs.h @@ -160,10 +160,6 @@ union cvmx_pip_alt_skip_cfgx { uint64_t reserved_57_63:7; #endif } s; - struct cvmx_pip_alt_skip_cfgx_s cn61xx; - struct cvmx_pip_alt_skip_cfgx_s cn66xx; - struct cvmx_pip_alt_skip_cfgx_s cn68xx; - struct cvmx_pip_alt_skip_cfgx_s cnf71xx; }; union cvmx_pip_bck_prs { @@ -183,19 +179,6 @@ union cvmx_pip_bck_prs { uint64_t bckprs:1; #endif } s; - struct cvmx_pip_bck_prs_s cn38xx; - struct cvmx_pip_bck_prs_s cn38xxp2; - struct cvmx_pip_bck_prs_s cn56xx; - struct cvmx_pip_bck_prs_s cn56xxp1; - struct cvmx_pip_bck_prs_s cn58xx; - struct cvmx_pip_bck_prs_s cn58xxp1; - struct cvmx_pip_bck_prs_s cn61xx; - struct cvmx_pip_bck_prs_s cn63xx; - struct cvmx_pip_bck_prs_s cn63xxp1; - struct cvmx_pip_bck_prs_s cn66xx; - struct cvmx_pip_bck_prs_s cn68xx; - struct cvmx_pip_bck_prs_s cn68xxp1; - struct cvmx_pip_bck_prs_s cnf71xx; }; union cvmx_pip_bist_status { @@ -218,9 +201,6 @@ union cvmx_pip_bist_status { uint64_t reserved_18_63:46; #endif } cn30xx; - struct cvmx_pip_bist_status_cn30xx cn31xx; - struct cvmx_pip_bist_status_cn30xx cn38xx; - struct cvmx_pip_bist_status_cn30xx cn38xxp2; struct cvmx_pip_bist_status_cn50xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_17_63:47; @@ -230,12 +210,6 @@ union cvmx_pip_bist_status { uint64_t reserved_17_63:47; #endif } cn50xx; - struct cvmx_pip_bist_status_cn30xx cn52xx; - struct cvmx_pip_bist_status_cn30xx cn52xxp1; - struct cvmx_pip_bist_status_cn30xx cn56xx; - struct cvmx_pip_bist_status_cn30xx cn56xxp1; - struct cvmx_pip_bist_status_cn30xx cn58xx; - struct cvmx_pip_bist_status_cn30xx cn58xxp1; struct cvmx_pip_bist_status_cn61xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_20_63:44; @@ -245,12 +219,6 @@ union cvmx_pip_bist_status { uint64_t reserved_20_63:44; #endif } cn61xx; - struct cvmx_pip_bist_status_cn30xx cn63xx; - struct cvmx_pip_bist_status_cn30xx cn63xxp1; - struct cvmx_pip_bist_status_cn61xx cn66xx; - struct cvmx_pip_bist_status_s cn68xx; - struct cvmx_pip_bist_status_cn61xx cn68xxp1; - struct cvmx_pip_bist_status_cn61xx cnf71xx; }; union cvmx_pip_bsel_ext_cfgx { @@ -274,9 +242,6 @@ union cvmx_pip_bsel_ext_cfgx { uint64_t reserved_56_63:8; #endif } s; - struct cvmx_pip_bsel_ext_cfgx_s cn61xx; - struct cvmx_pip_bsel_ext_cfgx_s cn68xx; - struct cvmx_pip_bsel_ext_cfgx_s cnf71xx; }; union cvmx_pip_bsel_ext_posx { @@ -318,9 +283,6 @@ union cvmx_pip_bsel_ext_posx { uint64_t pos7_val:1; #endif } s; - struct cvmx_pip_bsel_ext_posx_s cn61xx; - struct cvmx_pip_bsel_ext_posx_s cn68xx; - struct cvmx_pip_bsel_ext_posx_s cnf71xx; }; union cvmx_pip_bsel_tbl_entx { @@ -383,8 +345,6 @@ union cvmx_pip_bsel_tbl_entx { uint64_t tag_en:1; #endif } cn61xx; - struct cvmx_pip_bsel_tbl_entx_s cn68xx; - struct cvmx_pip_bsel_tbl_entx_cn61xx cnf71xx; }; union cvmx_pip_clken { @@ -398,13 +358,6 @@ union cvmx_pip_clken { uint64_t reserved_1_63:63; #endif } s; - struct cvmx_pip_clken_s cn61xx; - struct cvmx_pip_clken_s cn63xx; - struct cvmx_pip_clken_s cn63xxp1; - struct cvmx_pip_clken_s cn66xx; - struct cvmx_pip_clken_s cn68xx; - struct cvmx_pip_clken_s cn68xxp1; - struct cvmx_pip_clken_s cnf71xx; }; union cvmx_pip_crc_ctlx { @@ -420,10 +373,6 @@ union cvmx_pip_crc_ctlx { uint64_t reserved_2_63:62; #endif } s; - struct cvmx_pip_crc_ctlx_s cn38xx; - struct cvmx_pip_crc_ctlx_s cn38xxp2; - struct cvmx_pip_crc_ctlx_s cn58xx; - struct cvmx_pip_crc_ctlx_s cn58xxp1; }; union cvmx_pip_crc_ivx { @@ -437,10 +386,6 @@ union cvmx_pip_crc_ivx { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_pip_crc_ivx_s cn38xx; - struct cvmx_pip_crc_ivx_s cn38xxp2; - struct cvmx_pip_crc_ivx_s cn58xx; - struct cvmx_pip_crc_ivx_s cn58xxp1; }; union cvmx_pip_dec_ipsecx { @@ -458,24 +403,6 @@ union cvmx_pip_dec_ipsecx { uint64_t reserved_18_63:46; #endif } s; - struct cvmx_pip_dec_ipsecx_s cn30xx; - struct cvmx_pip_dec_ipsecx_s cn31xx; - struct cvmx_pip_dec_ipsecx_s cn38xx; - struct cvmx_pip_dec_ipsecx_s cn38xxp2; - struct cvmx_pip_dec_ipsecx_s cn50xx; - struct cvmx_pip_dec_ipsecx_s cn52xx; - struct cvmx_pip_dec_ipsecx_s cn52xxp1; - struct cvmx_pip_dec_ipsecx_s cn56xx; - struct cvmx_pip_dec_ipsecx_s cn56xxp1; - struct cvmx_pip_dec_ipsecx_s cn58xx; - struct cvmx_pip_dec_ipsecx_s cn58xxp1; - struct cvmx_pip_dec_ipsecx_s cn61xx; - struct cvmx_pip_dec_ipsecx_s cn63xx; - struct cvmx_pip_dec_ipsecx_s cn63xxp1; - struct cvmx_pip_dec_ipsecx_s cn66xx; - struct cvmx_pip_dec_ipsecx_s cn68xx; - struct cvmx_pip_dec_ipsecx_s cn68xxp1; - struct cvmx_pip_dec_ipsecx_s cnf71xx; }; union cvmx_pip_dsa_src_grp { @@ -517,16 +444,6 @@ union cvmx_pip_dsa_src_grp { uint64_t map15:4; #endif } s; - struct cvmx_pip_dsa_src_grp_s cn52xx; - struct cvmx_pip_dsa_src_grp_s cn52xxp1; - struct cvmx_pip_dsa_src_grp_s cn56xx; - struct cvmx_pip_dsa_src_grp_s cn61xx; - struct cvmx_pip_dsa_src_grp_s cn63xx; - struct cvmx_pip_dsa_src_grp_s cn63xxp1; - struct cvmx_pip_dsa_src_grp_s cn66xx; - struct cvmx_pip_dsa_src_grp_s cn68xx; - struct cvmx_pip_dsa_src_grp_s cn68xxp1; - struct cvmx_pip_dsa_src_grp_s cnf71xx; }; union cvmx_pip_dsa_vid_grp { @@ -568,16 +485,6 @@ union cvmx_pip_dsa_vid_grp { uint64_t map15:4; #endif } s; - struct cvmx_pip_dsa_vid_grp_s cn52xx; - struct cvmx_pip_dsa_vid_grp_s cn52xxp1; - struct cvmx_pip_dsa_vid_grp_s cn56xx; - struct cvmx_pip_dsa_vid_grp_s cn61xx; - struct cvmx_pip_dsa_vid_grp_s cn63xx; - struct cvmx_pip_dsa_vid_grp_s cn63xxp1; - struct cvmx_pip_dsa_vid_grp_s cn66xx; - struct cvmx_pip_dsa_vid_grp_s cn68xx; - struct cvmx_pip_dsa_vid_grp_s cn68xxp1; - struct cvmx_pip_dsa_vid_grp_s cnf71xx; }; union cvmx_pip_frm_len_chkx { @@ -593,18 +500,6 @@ union cvmx_pip_frm_len_chkx { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_pip_frm_len_chkx_s cn50xx; - struct cvmx_pip_frm_len_chkx_s cn52xx; - struct cvmx_pip_frm_len_chkx_s cn52xxp1; - struct cvmx_pip_frm_len_chkx_s cn56xx; - struct cvmx_pip_frm_len_chkx_s cn56xxp1; - struct cvmx_pip_frm_len_chkx_s cn61xx; - struct cvmx_pip_frm_len_chkx_s cn63xx; - struct cvmx_pip_frm_len_chkx_s cn63xxp1; - struct cvmx_pip_frm_len_chkx_s cn66xx; - struct cvmx_pip_frm_len_chkx_s cn68xx; - struct cvmx_pip_frm_len_chkx_s cn68xxp1; - struct cvmx_pip_frm_len_chkx_s cnf71xx; }; union cvmx_pip_gbl_cfg { @@ -630,24 +525,6 @@ union cvmx_pip_gbl_cfg { uint64_t reserved_19_63:45; #endif } s; - struct cvmx_pip_gbl_cfg_s cn30xx; - struct cvmx_pip_gbl_cfg_s cn31xx; - struct cvmx_pip_gbl_cfg_s cn38xx; - struct cvmx_pip_gbl_cfg_s cn38xxp2; - struct cvmx_pip_gbl_cfg_s cn50xx; - struct cvmx_pip_gbl_cfg_s cn52xx; - struct cvmx_pip_gbl_cfg_s cn52xxp1; - struct cvmx_pip_gbl_cfg_s cn56xx; - struct cvmx_pip_gbl_cfg_s cn56xxp1; - struct cvmx_pip_gbl_cfg_s cn58xx; - struct cvmx_pip_gbl_cfg_s cn58xxp1; - struct cvmx_pip_gbl_cfg_s cn61xx; - struct cvmx_pip_gbl_cfg_s cn63xx; - struct cvmx_pip_gbl_cfg_s cn63xxp1; - struct cvmx_pip_gbl_cfg_s cn66xx; - struct cvmx_pip_gbl_cfg_s cn68xx; - struct cvmx_pip_gbl_cfg_s cn68xxp1; - struct cvmx_pip_gbl_cfg_s cnf71xx; }; union cvmx_pip_gbl_ctl { @@ -742,10 +619,6 @@ union cvmx_pip_gbl_ctl { uint64_t reserved_17_63:47; #endif } cn30xx; - struct cvmx_pip_gbl_ctl_cn30xx cn31xx; - struct cvmx_pip_gbl_ctl_cn30xx cn38xx; - struct cvmx_pip_gbl_ctl_cn30xx cn38xxp2; - struct cvmx_pip_gbl_ctl_cn30xx cn50xx; struct cvmx_pip_gbl_ctl_cn52xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_27_63:37; @@ -795,8 +668,6 @@ union cvmx_pip_gbl_ctl { uint64_t reserved_27_63:37; #endif } cn52xx; - struct cvmx_pip_gbl_ctl_cn52xx cn52xxp1; - struct cvmx_pip_gbl_ctl_cn52xx cn56xx; struct cvmx_pip_gbl_ctl_cn56xxp1 { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_21_63:43; @@ -838,8 +709,6 @@ union cvmx_pip_gbl_ctl { uint64_t reserved_21_63:43; #endif } cn56xxp1; - struct cvmx_pip_gbl_ctl_cn30xx cn58xx; - struct cvmx_pip_gbl_ctl_cn30xx cn58xxp1; struct cvmx_pip_gbl_ctl_cn61xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_28_63:36; @@ -891,9 +760,6 @@ union cvmx_pip_gbl_ctl { uint64_t reserved_28_63:36; #endif } cn61xx; - struct cvmx_pip_gbl_ctl_cn61xx cn63xx; - struct cvmx_pip_gbl_ctl_cn61xx cn63xxp1; - struct cvmx_pip_gbl_ctl_cn61xx cn66xx; struct cvmx_pip_gbl_ctl_cn68xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_29_63:35; @@ -990,7 +856,6 @@ union cvmx_pip_gbl_ctl { uint64_t reserved_28_63:36; #endif } cn68xxp1; - struct cvmx_pip_gbl_ctl_cn61xx cnf71xx; }; union cvmx_pip_hg_pri_qos { @@ -1012,14 +877,6 @@ union cvmx_pip_hg_pri_qos { uint64_t reserved_13_63:51; #endif } s; - struct cvmx_pip_hg_pri_qos_s cn52xx; - struct cvmx_pip_hg_pri_qos_s cn52xxp1; - struct cvmx_pip_hg_pri_qos_s cn56xx; - struct cvmx_pip_hg_pri_qos_s cn61xx; - struct cvmx_pip_hg_pri_qos_s cn63xx; - struct cvmx_pip_hg_pri_qos_s cn63xxp1; - struct cvmx_pip_hg_pri_qos_s cn66xx; - struct cvmx_pip_hg_pri_qos_s cnf71xx; }; union cvmx_pip_int_en { @@ -1082,9 +939,6 @@ union cvmx_pip_int_en { uint64_t reserved_9_63:55; #endif } cn30xx; - struct cvmx_pip_int_en_cn30xx cn31xx; - struct cvmx_pip_int_en_cn30xx cn38xx; - struct cvmx_pip_int_en_cn30xx cn38xxp2; struct cvmx_pip_int_en_cn50xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_12_63:52; @@ -1149,8 +1003,6 @@ union cvmx_pip_int_en { uint64_t reserved_13_63:51; #endif } cn52xx; - struct cvmx_pip_int_en_cn52xx cn52xxp1; - struct cvmx_pip_int_en_s cn56xx; struct cvmx_pip_int_en_cn56xxp1 { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_12_63:52; @@ -1211,14 +1063,6 @@ union cvmx_pip_int_en { uint64_t reserved_13_63:51; #endif } cn58xx; - struct cvmx_pip_int_en_cn30xx cn58xxp1; - struct cvmx_pip_int_en_s cn61xx; - struct cvmx_pip_int_en_s cn63xx; - struct cvmx_pip_int_en_s cn63xxp1; - struct cvmx_pip_int_en_s cn66xx; - struct cvmx_pip_int_en_s cn68xx; - struct cvmx_pip_int_en_s cn68xxp1; - struct cvmx_pip_int_en_s cnf71xx; }; union cvmx_pip_int_reg { @@ -1281,9 +1125,6 @@ union cvmx_pip_int_reg { uint64_t reserved_9_63:55; #endif } cn30xx; - struct cvmx_pip_int_reg_cn30xx cn31xx; - struct cvmx_pip_int_reg_cn30xx cn38xx; - struct cvmx_pip_int_reg_cn30xx cn38xxp2; struct cvmx_pip_int_reg_cn50xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_12_63:52; @@ -1348,8 +1189,6 @@ union cvmx_pip_int_reg { uint64_t reserved_13_63:51; #endif } cn52xx; - struct cvmx_pip_int_reg_cn52xx cn52xxp1; - struct cvmx_pip_int_reg_s cn56xx; struct cvmx_pip_int_reg_cn56xxp1 { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_12_63:52; @@ -1410,14 +1249,6 @@ union cvmx_pip_int_reg { uint64_t reserved_13_63:51; #endif } cn58xx; - struct cvmx_pip_int_reg_cn30xx cn58xxp1; - struct cvmx_pip_int_reg_s cn61xx; - struct cvmx_pip_int_reg_s cn63xx; - struct cvmx_pip_int_reg_s cn63xxp1; - struct cvmx_pip_int_reg_s cn66xx; - struct cvmx_pip_int_reg_s cn68xx; - struct cvmx_pip_int_reg_s cn68xxp1; - struct cvmx_pip_int_reg_s cnf71xx; }; union cvmx_pip_ip_offset { @@ -1431,24 +1262,6 @@ union cvmx_pip_ip_offset { uint64_t reserved_3_63:61; #endif } s; - struct cvmx_pip_ip_offset_s cn30xx; - struct cvmx_pip_ip_offset_s cn31xx; - struct cvmx_pip_ip_offset_s cn38xx; - struct cvmx_pip_ip_offset_s cn38xxp2; - struct cvmx_pip_ip_offset_s cn50xx; - struct cvmx_pip_ip_offset_s cn52xx; - struct cvmx_pip_ip_offset_s cn52xxp1; - struct cvmx_pip_ip_offset_s cn56xx; - struct cvmx_pip_ip_offset_s cn56xxp1; - struct cvmx_pip_ip_offset_s cn58xx; - struct cvmx_pip_ip_offset_s cn58xxp1; - struct cvmx_pip_ip_offset_s cn61xx; - struct cvmx_pip_ip_offset_s cn63xx; - struct cvmx_pip_ip_offset_s cn63xxp1; - struct cvmx_pip_ip_offset_s cn66xx; - struct cvmx_pip_ip_offset_s cn68xx; - struct cvmx_pip_ip_offset_s cn68xxp1; - struct cvmx_pip_ip_offset_s cnf71xx; }; union cvmx_pip_pri_tblx { @@ -1488,8 +1301,6 @@ union cvmx_pip_pri_tblx { uint64_t diff2_padd:8; #endif } s; - struct cvmx_pip_pri_tblx_s cn68xx; - struct cvmx_pip_pri_tblx_s cn68xxp1; }; union cvmx_pip_prt_cfgx { @@ -1596,7 +1407,6 @@ union cvmx_pip_prt_cfgx { uint64_t reserved_37_63:27; #endif } cn30xx; - struct cvmx_pip_prt_cfgx_cn30xx cn31xx; struct cvmx_pip_prt_cfgx_cn38xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_37_63:27; @@ -1638,7 +1448,6 @@ union cvmx_pip_prt_cfgx { uint64_t reserved_37_63:27; #endif } cn38xx; - struct cvmx_pip_prt_cfgx_cn38xx cn38xxp2; struct cvmx_pip_prt_cfgx_cn50xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_53_63:11; @@ -1759,9 +1568,6 @@ union cvmx_pip_prt_cfgx { uint64_t reserved_53_63:11; #endif } cn52xx; - struct cvmx_pip_prt_cfgx_cn52xx cn52xxp1; - struct cvmx_pip_prt_cfgx_cn52xx cn56xx; - struct cvmx_pip_prt_cfgx_cn50xx cn56xxp1; struct cvmx_pip_prt_cfgx_cn58xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_37_63:27; @@ -1805,11 +1611,6 @@ union cvmx_pip_prt_cfgx { uint64_t reserved_37_63:27; #endif } cn58xx; - struct cvmx_pip_prt_cfgx_cn58xx cn58xxp1; - struct cvmx_pip_prt_cfgx_cn52xx cn61xx; - struct cvmx_pip_prt_cfgx_cn52xx cn63xx; - struct cvmx_pip_prt_cfgx_cn52xx cn63xxp1; - struct cvmx_pip_prt_cfgx_cn52xx cn66xx; struct cvmx_pip_prt_cfgx_cn68xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_55_63:9; @@ -1875,8 +1676,6 @@ union cvmx_pip_prt_cfgx { uint64_t reserved_55_63:9; #endif } cn68xx; - struct cvmx_pip_prt_cfgx_cn68xx cn68xxp1; - struct cvmx_pip_prt_cfgx_cn52xx cnf71xx; }; union cvmx_pip_prt_cfgbx { @@ -1938,7 +1737,6 @@ union cvmx_pip_prt_cfgbx { uint64_t reserved_39_63:25; #endif } cn66xx; - struct cvmx_pip_prt_cfgbx_s cn68xx; struct cvmx_pip_prt_cfgbx_cn68xxp1 { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_24_63:40; @@ -1952,7 +1750,6 @@ union cvmx_pip_prt_cfgbx { uint64_t reserved_24_63:40; #endif } cn68xxp1; - struct cvmx_pip_prt_cfgbx_cn61xx cnf71xx; }; union cvmx_pip_prt_tagx { @@ -2083,9 +1880,6 @@ union cvmx_pip_prt_tagx { uint64_t reserved_40_63:24; #endif } cn30xx; - struct cvmx_pip_prt_tagx_cn30xx cn31xx; - struct cvmx_pip_prt_tagx_cn30xx cn38xx; - struct cvmx_pip_prt_tagx_cn30xx cn38xxp2; struct cvmx_pip_prt_tagx_cn50xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_40_63:24; @@ -2141,19 +1935,6 @@ union cvmx_pip_prt_tagx { uint64_t reserved_40_63:24; #endif } cn50xx; - struct cvmx_pip_prt_tagx_cn50xx cn52xx; - struct cvmx_pip_prt_tagx_cn50xx cn52xxp1; - struct cvmx_pip_prt_tagx_cn50xx cn56xx; - struct cvmx_pip_prt_tagx_cn50xx cn56xxp1; - struct cvmx_pip_prt_tagx_cn30xx cn58xx; - struct cvmx_pip_prt_tagx_cn30xx cn58xxp1; - struct cvmx_pip_prt_tagx_cn50xx cn61xx; - struct cvmx_pip_prt_tagx_cn50xx cn63xx; - struct cvmx_pip_prt_tagx_cn50xx cn63xxp1; - struct cvmx_pip_prt_tagx_cn50xx cn66xx; - struct cvmx_pip_prt_tagx_s cn68xx; - struct cvmx_pip_prt_tagx_s cn68xxp1; - struct cvmx_pip_prt_tagx_cn50xx cnf71xx; }; union cvmx_pip_qos_diffx { @@ -2167,22 +1948,6 @@ union cvmx_pip_qos_diffx { uint64_t reserved_3_63:61; #endif } s; - struct cvmx_pip_qos_diffx_s cn30xx; - struct cvmx_pip_qos_diffx_s cn31xx; - struct cvmx_pip_qos_diffx_s cn38xx; - struct cvmx_pip_qos_diffx_s cn38xxp2; - struct cvmx_pip_qos_diffx_s cn50xx; - struct cvmx_pip_qos_diffx_s cn52xx; - struct cvmx_pip_qos_diffx_s cn52xxp1; - struct cvmx_pip_qos_diffx_s cn56xx; - struct cvmx_pip_qos_diffx_s cn56xxp1; - struct cvmx_pip_qos_diffx_s cn58xx; - struct cvmx_pip_qos_diffx_s cn58xxp1; - struct cvmx_pip_qos_diffx_s cn61xx; - struct cvmx_pip_qos_diffx_s cn63xx; - struct cvmx_pip_qos_diffx_s cn63xxp1; - struct cvmx_pip_qos_diffx_s cn66xx; - struct cvmx_pip_qos_diffx_s cnf71xx; }; union cvmx_pip_qos_vlanx { @@ -2209,21 +1974,6 @@ union cvmx_pip_qos_vlanx { uint64_t reserved_3_63:61; #endif } cn30xx; - struct cvmx_pip_qos_vlanx_cn30xx cn31xx; - struct cvmx_pip_qos_vlanx_cn30xx cn38xx; - struct cvmx_pip_qos_vlanx_cn30xx cn38xxp2; - struct cvmx_pip_qos_vlanx_cn30xx cn50xx; - struct cvmx_pip_qos_vlanx_s cn52xx; - struct cvmx_pip_qos_vlanx_s cn52xxp1; - struct cvmx_pip_qos_vlanx_s cn56xx; - struct cvmx_pip_qos_vlanx_cn30xx cn56xxp1; - struct cvmx_pip_qos_vlanx_cn30xx cn58xx; - struct cvmx_pip_qos_vlanx_cn30xx cn58xxp1; - struct cvmx_pip_qos_vlanx_s cn61xx; - struct cvmx_pip_qos_vlanx_s cn63xx; - struct cvmx_pip_qos_vlanx_s cn63xxp1; - struct cvmx_pip_qos_vlanx_s cn66xx; - struct cvmx_pip_qos_vlanx_s cnf71xx; }; union cvmx_pip_qos_watchx { @@ -2274,9 +2024,6 @@ union cvmx_pip_qos_watchx { uint64_t reserved_48_63:16; #endif } cn30xx; - struct cvmx_pip_qos_watchx_cn30xx cn31xx; - struct cvmx_pip_qos_watchx_cn30xx cn38xx; - struct cvmx_pip_qos_watchx_cn30xx cn38xxp2; struct cvmx_pip_qos_watchx_cn50xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_48_63:16; @@ -2300,19 +2047,6 @@ union cvmx_pip_qos_watchx { uint64_t reserved_48_63:16; #endif } cn50xx; - struct cvmx_pip_qos_watchx_cn50xx cn52xx; - struct cvmx_pip_qos_watchx_cn50xx cn52xxp1; - struct cvmx_pip_qos_watchx_cn50xx cn56xx; - struct cvmx_pip_qos_watchx_cn50xx cn56xxp1; - struct cvmx_pip_qos_watchx_cn30xx cn58xx; - struct cvmx_pip_qos_watchx_cn30xx cn58xxp1; - struct cvmx_pip_qos_watchx_cn50xx cn61xx; - struct cvmx_pip_qos_watchx_cn50xx cn63xx; - struct cvmx_pip_qos_watchx_cn50xx cn63xxp1; - struct cvmx_pip_qos_watchx_cn50xx cn66xx; - struct cvmx_pip_qos_watchx_s cn68xx; - struct cvmx_pip_qos_watchx_s cn68xxp1; - struct cvmx_pip_qos_watchx_cn50xx cnf71xx; }; union cvmx_pip_raw_word { @@ -2326,24 +2060,6 @@ union cvmx_pip_raw_word { uint64_t reserved_56_63:8; #endif } s; - struct cvmx_pip_raw_word_s cn30xx; - struct cvmx_pip_raw_word_s cn31xx; - struct cvmx_pip_raw_word_s cn38xx; - struct cvmx_pip_raw_word_s cn38xxp2; - struct cvmx_pip_raw_word_s cn50xx; - struct cvmx_pip_raw_word_s cn52xx; - struct cvmx_pip_raw_word_s cn52xxp1; - struct cvmx_pip_raw_word_s cn56xx; - struct cvmx_pip_raw_word_s cn56xxp1; - struct cvmx_pip_raw_word_s cn58xx; - struct cvmx_pip_raw_word_s cn58xxp1; - struct cvmx_pip_raw_word_s cn61xx; - struct cvmx_pip_raw_word_s cn63xx; - struct cvmx_pip_raw_word_s cn63xxp1; - struct cvmx_pip_raw_word_s cn66xx; - struct cvmx_pip_raw_word_s cn68xx; - struct cvmx_pip_raw_word_s cn68xxp1; - struct cvmx_pip_raw_word_s cnf71xx; }; union cvmx_pip_sft_rst { @@ -2357,23 +2073,6 @@ union cvmx_pip_sft_rst { uint64_t reserved_1_63:63; #endif } s; - struct cvmx_pip_sft_rst_s cn30xx; - struct cvmx_pip_sft_rst_s cn31xx; - struct cvmx_pip_sft_rst_s cn38xx; - struct cvmx_pip_sft_rst_s cn50xx; - struct cvmx_pip_sft_rst_s cn52xx; - struct cvmx_pip_sft_rst_s cn52xxp1; - struct cvmx_pip_sft_rst_s cn56xx; - struct cvmx_pip_sft_rst_s cn56xxp1; - struct cvmx_pip_sft_rst_s cn58xx; - struct cvmx_pip_sft_rst_s cn58xxp1; - struct cvmx_pip_sft_rst_s cn61xx; - struct cvmx_pip_sft_rst_s cn63xx; - struct cvmx_pip_sft_rst_s cn63xxp1; - struct cvmx_pip_sft_rst_s cn66xx; - struct cvmx_pip_sft_rst_s cn68xx; - struct cvmx_pip_sft_rst_s cn68xxp1; - struct cvmx_pip_sft_rst_s cnf71xx; }; union cvmx_pip_stat0_x { @@ -2387,8 +2086,6 @@ union cvmx_pip_stat0_x { uint64_t drp_pkts:32; #endif } s; - struct cvmx_pip_stat0_x_s cn68xx; - struct cvmx_pip_stat0_x_s cn68xxp1; }; union cvmx_pip_stat0_prtx { @@ -2402,22 +2099,6 @@ union cvmx_pip_stat0_prtx { uint64_t drp_pkts:32; #endif } s; - struct cvmx_pip_stat0_prtx_s cn30xx; - struct cvmx_pip_stat0_prtx_s cn31xx; - struct cvmx_pip_stat0_prtx_s cn38xx; - struct cvmx_pip_stat0_prtx_s cn38xxp2; - struct cvmx_pip_stat0_prtx_s cn50xx; - struct cvmx_pip_stat0_prtx_s cn52xx; - struct cvmx_pip_stat0_prtx_s cn52xxp1; - struct cvmx_pip_stat0_prtx_s cn56xx; - struct cvmx_pip_stat0_prtx_s cn56xxp1; - struct cvmx_pip_stat0_prtx_s cn58xx; - struct cvmx_pip_stat0_prtx_s cn58xxp1; - struct cvmx_pip_stat0_prtx_s cn61xx; - struct cvmx_pip_stat0_prtx_s cn63xx; - struct cvmx_pip_stat0_prtx_s cn63xxp1; - struct cvmx_pip_stat0_prtx_s cn66xx; - struct cvmx_pip_stat0_prtx_s cnf71xx; }; union cvmx_pip_stat10_x { @@ -2431,8 +2112,6 @@ union cvmx_pip_stat10_x { uint64_t bcast:32; #endif } s; - struct cvmx_pip_stat10_x_s cn68xx; - struct cvmx_pip_stat10_x_s cn68xxp1; }; union cvmx_pip_stat10_prtx { @@ -2446,15 +2125,6 @@ union cvmx_pip_stat10_prtx { uint64_t bcast:32; #endif } s; - struct cvmx_pip_stat10_prtx_s cn52xx; - struct cvmx_pip_stat10_prtx_s cn52xxp1; - struct cvmx_pip_stat10_prtx_s cn56xx; - struct cvmx_pip_stat10_prtx_s cn56xxp1; - struct cvmx_pip_stat10_prtx_s cn61xx; - struct cvmx_pip_stat10_prtx_s cn63xx; - struct cvmx_pip_stat10_prtx_s cn63xxp1; - struct cvmx_pip_stat10_prtx_s cn66xx; - struct cvmx_pip_stat10_prtx_s cnf71xx; }; union cvmx_pip_stat11_x { @@ -2468,8 +2138,6 @@ union cvmx_pip_stat11_x { uint64_t bcast:32; #endif } s; - struct cvmx_pip_stat11_x_s cn68xx; - struct cvmx_pip_stat11_x_s cn68xxp1; }; union cvmx_pip_stat11_prtx { @@ -2483,15 +2151,6 @@ union cvmx_pip_stat11_prtx { uint64_t bcast:32; #endif } s; - struct cvmx_pip_stat11_prtx_s cn52xx; - struct cvmx_pip_stat11_prtx_s cn52xxp1; - struct cvmx_pip_stat11_prtx_s cn56xx; - struct cvmx_pip_stat11_prtx_s cn56xxp1; - struct cvmx_pip_stat11_prtx_s cn61xx; - struct cvmx_pip_stat11_prtx_s cn63xx; - struct cvmx_pip_stat11_prtx_s cn63xxp1; - struct cvmx_pip_stat11_prtx_s cn66xx; - struct cvmx_pip_stat11_prtx_s cnf71xx; }; union cvmx_pip_stat1_x { @@ -2505,8 +2164,6 @@ union cvmx_pip_stat1_x { uint64_t reserved_48_63:16; #endif } s; - struct cvmx_pip_stat1_x_s cn68xx; - struct cvmx_pip_stat1_x_s cn68xxp1; }; union cvmx_pip_stat1_prtx { @@ -2520,22 +2177,6 @@ union cvmx_pip_stat1_prtx { uint64_t reserved_48_63:16; #endif } s; - struct cvmx_pip_stat1_prtx_s cn30xx; - struct cvmx_pip_stat1_prtx_s cn31xx; - struct cvmx_pip_stat1_prtx_s cn38xx; - struct cvmx_pip_stat1_prtx_s cn38xxp2; - struct cvmx_pip_stat1_prtx_s cn50xx; - struct cvmx_pip_stat1_prtx_s cn52xx; - struct cvmx_pip_stat1_prtx_s cn52xxp1; - struct cvmx_pip_stat1_prtx_s cn56xx; - struct cvmx_pip_stat1_prtx_s cn56xxp1; - struct cvmx_pip_stat1_prtx_s cn58xx; - struct cvmx_pip_stat1_prtx_s cn58xxp1; - struct cvmx_pip_stat1_prtx_s cn61xx; - struct cvmx_pip_stat1_prtx_s cn63xx; - struct cvmx_pip_stat1_prtx_s cn63xxp1; - struct cvmx_pip_stat1_prtx_s cn66xx; - struct cvmx_pip_stat1_prtx_s cnf71xx; }; union cvmx_pip_stat2_x { @@ -2549,8 +2190,6 @@ union cvmx_pip_stat2_x { uint64_t pkts:32; #endif } s; - struct cvmx_pip_stat2_x_s cn68xx; - struct cvmx_pip_stat2_x_s cn68xxp1; }; union cvmx_pip_stat2_prtx { @@ -2564,22 +2203,6 @@ union cvmx_pip_stat2_prtx { uint64_t pkts:32; #endif } s; - struct cvmx_pip_stat2_prtx_s cn30xx; - struct cvmx_pip_stat2_prtx_s cn31xx; - struct cvmx_pip_stat2_prtx_s cn38xx; - struct cvmx_pip_stat2_prtx_s cn38xxp2; - struct cvmx_pip_stat2_prtx_s cn50xx; - struct cvmx_pip_stat2_prtx_s cn52xx; - struct cvmx_pip_stat2_prtx_s cn52xxp1; - struct cvmx_pip_stat2_prtx_s cn56xx; - struct cvmx_pip_stat2_prtx_s cn56xxp1; - struct cvmx_pip_stat2_prtx_s cn58xx; - struct cvmx_pip_stat2_prtx_s cn58xxp1; - struct cvmx_pip_stat2_prtx_s cn61xx; - struct cvmx_pip_stat2_prtx_s cn63xx; - struct cvmx_pip_stat2_prtx_s cn63xxp1; - struct cvmx_pip_stat2_prtx_s cn66xx; - struct cvmx_pip_stat2_prtx_s cnf71xx; }; union cvmx_pip_stat3_x { @@ -2593,8 +2216,6 @@ union cvmx_pip_stat3_x { uint64_t bcst:32; #endif } s; - struct cvmx_pip_stat3_x_s cn68xx; - struct cvmx_pip_stat3_x_s cn68xxp1; }; union cvmx_pip_stat3_prtx { @@ -2608,22 +2229,6 @@ union cvmx_pip_stat3_prtx { uint64_t bcst:32; #endif } s; - struct cvmx_pip_stat3_prtx_s cn30xx; - struct cvmx_pip_stat3_prtx_s cn31xx; - struct cvmx_pip_stat3_prtx_s cn38xx; - struct cvmx_pip_stat3_prtx_s cn38xxp2; - struct cvmx_pip_stat3_prtx_s cn50xx; - struct cvmx_pip_stat3_prtx_s cn52xx; - struct cvmx_pip_stat3_prtx_s cn52xxp1; - struct cvmx_pip_stat3_prtx_s cn56xx; - struct cvmx_pip_stat3_prtx_s cn56xxp1; - struct cvmx_pip_stat3_prtx_s cn58xx; - struct cvmx_pip_stat3_prtx_s cn58xxp1; - struct cvmx_pip_stat3_prtx_s cn61xx; - struct cvmx_pip_stat3_prtx_s cn63xx; - struct cvmx_pip_stat3_prtx_s cn63xxp1; - struct cvmx_pip_stat3_prtx_s cn66xx; - struct cvmx_pip_stat3_prtx_s cnf71xx; }; union cvmx_pip_stat4_x { @@ -2637,8 +2242,6 @@ union cvmx_pip_stat4_x { uint64_t h65to127:32; #endif } s; - struct cvmx_pip_stat4_x_s cn68xx; - struct cvmx_pip_stat4_x_s cn68xxp1; }; union cvmx_pip_stat4_prtx { @@ -2652,22 +2255,6 @@ union cvmx_pip_stat4_prtx { uint64_t h65to127:32; #endif } s; - struct cvmx_pip_stat4_prtx_s cn30xx; - struct cvmx_pip_stat4_prtx_s cn31xx; - struct cvmx_pip_stat4_prtx_s cn38xx; - struct cvmx_pip_stat4_prtx_s cn38xxp2; - struct cvmx_pip_stat4_prtx_s cn50xx; - struct cvmx_pip_stat4_prtx_s cn52xx; - struct cvmx_pip_stat4_prtx_s cn52xxp1; - struct cvmx_pip_stat4_prtx_s cn56xx; - struct cvmx_pip_stat4_prtx_s cn56xxp1; - struct cvmx_pip_stat4_prtx_s cn58xx; - struct cvmx_pip_stat4_prtx_s cn58xxp1; - struct cvmx_pip_stat4_prtx_s cn61xx; - struct cvmx_pip_stat4_prtx_s cn63xx; - struct cvmx_pip_stat4_prtx_s cn63xxp1; - struct cvmx_pip_stat4_prtx_s cn66xx; - struct cvmx_pip_stat4_prtx_s cnf71xx; }; union cvmx_pip_stat5_x { @@ -2681,8 +2268,6 @@ union cvmx_pip_stat5_x { uint64_t h256to511:32; #endif } s; - struct cvmx_pip_stat5_x_s cn68xx; - struct cvmx_pip_stat5_x_s cn68xxp1; }; union cvmx_pip_stat5_prtx { @@ -2696,22 +2281,6 @@ union cvmx_pip_stat5_prtx { uint64_t h256to511:32; #endif } s; - struct cvmx_pip_stat5_prtx_s cn30xx; - struct cvmx_pip_stat5_prtx_s cn31xx; - struct cvmx_pip_stat5_prtx_s cn38xx; - struct cvmx_pip_stat5_prtx_s cn38xxp2; - struct cvmx_pip_stat5_prtx_s cn50xx; - struct cvmx_pip_stat5_prtx_s cn52xx; - struct cvmx_pip_stat5_prtx_s cn52xxp1; - struct cvmx_pip_stat5_prtx_s cn56xx; - struct cvmx_pip_stat5_prtx_s cn56xxp1; - struct cvmx_pip_stat5_prtx_s cn58xx; - struct cvmx_pip_stat5_prtx_s cn58xxp1; - struct cvmx_pip_stat5_prtx_s cn61xx; - struct cvmx_pip_stat5_prtx_s cn63xx; - struct cvmx_pip_stat5_prtx_s cn63xxp1; - struct cvmx_pip_stat5_prtx_s cn66xx; - struct cvmx_pip_stat5_prtx_s cnf71xx; }; union cvmx_pip_stat6_x { @@ -2725,8 +2294,6 @@ union cvmx_pip_stat6_x { uint64_t h1024to1518:32; #endif } s; - struct cvmx_pip_stat6_x_s cn68xx; - struct cvmx_pip_stat6_x_s cn68xxp1; }; union cvmx_pip_stat6_prtx { @@ -2740,22 +2307,6 @@ union cvmx_pip_stat6_prtx { uint64_t h1024to1518:32; #endif } s; - struct cvmx_pip_stat6_prtx_s cn30xx; - struct cvmx_pip_stat6_prtx_s cn31xx; - struct cvmx_pip_stat6_prtx_s cn38xx; - struct cvmx_pip_stat6_prtx_s cn38xxp2; - struct cvmx_pip_stat6_prtx_s cn50xx; - struct cvmx_pip_stat6_prtx_s cn52xx; - struct cvmx_pip_stat6_prtx_s cn52xxp1; - struct cvmx_pip_stat6_prtx_s cn56xx; - struct cvmx_pip_stat6_prtx_s cn56xxp1; - struct cvmx_pip_stat6_prtx_s cn58xx; - struct cvmx_pip_stat6_prtx_s cn58xxp1; - struct cvmx_pip_stat6_prtx_s cn61xx; - struct cvmx_pip_stat6_prtx_s cn63xx; - struct cvmx_pip_stat6_prtx_s cn63xxp1; - struct cvmx_pip_stat6_prtx_s cn66xx; - struct cvmx_pip_stat6_prtx_s cnf71xx; }; union cvmx_pip_stat7_x { @@ -2769,8 +2320,6 @@ union cvmx_pip_stat7_x { uint64_t fcs:32; #endif } s; - struct cvmx_pip_stat7_x_s cn68xx; - struct cvmx_pip_stat7_x_s cn68xxp1; }; union cvmx_pip_stat7_prtx { @@ -2784,22 +2333,6 @@ union cvmx_pip_stat7_prtx { uint64_t fcs:32; #endif } s; - struct cvmx_pip_stat7_prtx_s cn30xx; - struct cvmx_pip_stat7_prtx_s cn31xx; - struct cvmx_pip_stat7_prtx_s cn38xx; - struct cvmx_pip_stat7_prtx_s cn38xxp2; - struct cvmx_pip_stat7_prtx_s cn50xx; - struct cvmx_pip_stat7_prtx_s cn52xx; - struct cvmx_pip_stat7_prtx_s cn52xxp1; - struct cvmx_pip_stat7_prtx_s cn56xx; - struct cvmx_pip_stat7_prtx_s cn56xxp1; - struct cvmx_pip_stat7_prtx_s cn58xx; - struct cvmx_pip_stat7_prtx_s cn58xxp1; - struct cvmx_pip_stat7_prtx_s cn61xx; - struct cvmx_pip_stat7_prtx_s cn63xx; - struct cvmx_pip_stat7_prtx_s cn63xxp1; - struct cvmx_pip_stat7_prtx_s cn66xx; - struct cvmx_pip_stat7_prtx_s cnf71xx; }; union cvmx_pip_stat8_x { @@ -2813,8 +2346,6 @@ union cvmx_pip_stat8_x { uint64_t frag:32; #endif } s; - struct cvmx_pip_stat8_x_s cn68xx; - struct cvmx_pip_stat8_x_s cn68xxp1; }; union cvmx_pip_stat8_prtx { @@ -2828,22 +2359,6 @@ union cvmx_pip_stat8_prtx { uint64_t frag:32; #endif } s; - struct cvmx_pip_stat8_prtx_s cn30xx; - struct cvmx_pip_stat8_prtx_s cn31xx; - struct cvmx_pip_stat8_prtx_s cn38xx; - struct cvmx_pip_stat8_prtx_s cn38xxp2; - struct cvmx_pip_stat8_prtx_s cn50xx; - struct cvmx_pip_stat8_prtx_s cn52xx; - struct cvmx_pip_stat8_prtx_s cn52xxp1; - struct cvmx_pip_stat8_prtx_s cn56xx; - struct cvmx_pip_stat8_prtx_s cn56xxp1; - struct cvmx_pip_stat8_prtx_s cn58xx; - struct cvmx_pip_stat8_prtx_s cn58xxp1; - struct cvmx_pip_stat8_prtx_s cn61xx; - struct cvmx_pip_stat8_prtx_s cn63xx; - struct cvmx_pip_stat8_prtx_s cn63xxp1; - struct cvmx_pip_stat8_prtx_s cn66xx; - struct cvmx_pip_stat8_prtx_s cnf71xx; }; union cvmx_pip_stat9_x { @@ -2857,8 +2372,6 @@ union cvmx_pip_stat9_x { uint64_t jabber:32; #endif } s; - struct cvmx_pip_stat9_x_s cn68xx; - struct cvmx_pip_stat9_x_s cn68xxp1; }; union cvmx_pip_stat9_prtx { @@ -2872,22 +2385,6 @@ union cvmx_pip_stat9_prtx { uint64_t jabber:32; #endif } s; - struct cvmx_pip_stat9_prtx_s cn30xx; - struct cvmx_pip_stat9_prtx_s cn31xx; - struct cvmx_pip_stat9_prtx_s cn38xx; - struct cvmx_pip_stat9_prtx_s cn38xxp2; - struct cvmx_pip_stat9_prtx_s cn50xx; - struct cvmx_pip_stat9_prtx_s cn52xx; - struct cvmx_pip_stat9_prtx_s cn52xxp1; - struct cvmx_pip_stat9_prtx_s cn56xx; - struct cvmx_pip_stat9_prtx_s cn56xxp1; - struct cvmx_pip_stat9_prtx_s cn58xx; - struct cvmx_pip_stat9_prtx_s cn58xxp1; - struct cvmx_pip_stat9_prtx_s cn61xx; - struct cvmx_pip_stat9_prtx_s cn63xx; - struct cvmx_pip_stat9_prtx_s cn63xxp1; - struct cvmx_pip_stat9_prtx_s cn66xx; - struct cvmx_pip_stat9_prtx_s cnf71xx; }; union cvmx_pip_stat_ctl { @@ -2914,23 +2411,6 @@ union cvmx_pip_stat_ctl { uint64_t reserved_1_63:63; #endif } cn30xx; - struct cvmx_pip_stat_ctl_cn30xx cn31xx; - struct cvmx_pip_stat_ctl_cn30xx cn38xx; - struct cvmx_pip_stat_ctl_cn30xx cn38xxp2; - struct cvmx_pip_stat_ctl_cn30xx cn50xx; - struct cvmx_pip_stat_ctl_cn30xx cn52xx; - struct cvmx_pip_stat_ctl_cn30xx cn52xxp1; - struct cvmx_pip_stat_ctl_cn30xx cn56xx; - struct cvmx_pip_stat_ctl_cn30xx cn56xxp1; - struct cvmx_pip_stat_ctl_cn30xx cn58xx; - struct cvmx_pip_stat_ctl_cn30xx cn58xxp1; - struct cvmx_pip_stat_ctl_cn30xx cn61xx; - struct cvmx_pip_stat_ctl_cn30xx cn63xx; - struct cvmx_pip_stat_ctl_cn30xx cn63xxp1; - struct cvmx_pip_stat_ctl_cn30xx cn66xx; - struct cvmx_pip_stat_ctl_s cn68xx; - struct cvmx_pip_stat_ctl_s cn68xxp1; - struct cvmx_pip_stat_ctl_cn30xx cnf71xx; }; union cvmx_pip_stat_inb_errsx { @@ -2944,22 +2424,6 @@ union cvmx_pip_stat_inb_errsx { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_pip_stat_inb_errsx_s cn30xx; - struct cvmx_pip_stat_inb_errsx_s cn31xx; - struct cvmx_pip_stat_inb_errsx_s cn38xx; - struct cvmx_pip_stat_inb_errsx_s cn38xxp2; - struct cvmx_pip_stat_inb_errsx_s cn50xx; - struct cvmx_pip_stat_inb_errsx_s cn52xx; - struct cvmx_pip_stat_inb_errsx_s cn52xxp1; - struct cvmx_pip_stat_inb_errsx_s cn56xx; - struct cvmx_pip_stat_inb_errsx_s cn56xxp1; - struct cvmx_pip_stat_inb_errsx_s cn58xx; - struct cvmx_pip_stat_inb_errsx_s cn58xxp1; - struct cvmx_pip_stat_inb_errsx_s cn61xx; - struct cvmx_pip_stat_inb_errsx_s cn63xx; - struct cvmx_pip_stat_inb_errsx_s cn63xxp1; - struct cvmx_pip_stat_inb_errsx_s cn66xx; - struct cvmx_pip_stat_inb_errsx_s cnf71xx; }; union cvmx_pip_stat_inb_errs_pkndx { @@ -2973,8 +2437,6 @@ union cvmx_pip_stat_inb_errs_pkndx { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_pip_stat_inb_errs_pkndx_s cn68xx; - struct cvmx_pip_stat_inb_errs_pkndx_s cn68xxp1; }; union cvmx_pip_stat_inb_octsx { @@ -2988,22 +2450,6 @@ union cvmx_pip_stat_inb_octsx { uint64_t reserved_48_63:16; #endif } s; - struct cvmx_pip_stat_inb_octsx_s cn30xx; - struct cvmx_pip_stat_inb_octsx_s cn31xx; - struct cvmx_pip_stat_inb_octsx_s cn38xx; - struct cvmx_pip_stat_inb_octsx_s cn38xxp2; - struct cvmx_pip_stat_inb_octsx_s cn50xx; - struct cvmx_pip_stat_inb_octsx_s cn52xx; - struct cvmx_pip_stat_inb_octsx_s cn52xxp1; - struct cvmx_pip_stat_inb_octsx_s cn56xx; - struct cvmx_pip_stat_inb_octsx_s cn56xxp1; - struct cvmx_pip_stat_inb_octsx_s cn58xx; - struct cvmx_pip_stat_inb_octsx_s cn58xxp1; - struct cvmx_pip_stat_inb_octsx_s cn61xx; - struct cvmx_pip_stat_inb_octsx_s cn63xx; - struct cvmx_pip_stat_inb_octsx_s cn63xxp1; - struct cvmx_pip_stat_inb_octsx_s cn66xx; - struct cvmx_pip_stat_inb_octsx_s cnf71xx; }; union cvmx_pip_stat_inb_octs_pkndx { @@ -3017,8 +2463,6 @@ union cvmx_pip_stat_inb_octs_pkndx { uint64_t reserved_48_63:16; #endif } s; - struct cvmx_pip_stat_inb_octs_pkndx_s cn68xx; - struct cvmx_pip_stat_inb_octs_pkndx_s cn68xxp1; }; union cvmx_pip_stat_inb_pktsx { @@ -3032,22 +2476,6 @@ union cvmx_pip_stat_inb_pktsx { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_pip_stat_inb_pktsx_s cn30xx; - struct cvmx_pip_stat_inb_pktsx_s cn31xx; - struct cvmx_pip_stat_inb_pktsx_s cn38xx; - struct cvmx_pip_stat_inb_pktsx_s cn38xxp2; - struct cvmx_pip_stat_inb_pktsx_s cn50xx; - struct cvmx_pip_stat_inb_pktsx_s cn52xx; - struct cvmx_pip_stat_inb_pktsx_s cn52xxp1; - struct cvmx_pip_stat_inb_pktsx_s cn56xx; - struct cvmx_pip_stat_inb_pktsx_s cn56xxp1; - struct cvmx_pip_stat_inb_pktsx_s cn58xx; - struct cvmx_pip_stat_inb_pktsx_s cn58xxp1; - struct cvmx_pip_stat_inb_pktsx_s cn61xx; - struct cvmx_pip_stat_inb_pktsx_s cn63xx; - struct cvmx_pip_stat_inb_pktsx_s cn63xxp1; - struct cvmx_pip_stat_inb_pktsx_s cn66xx; - struct cvmx_pip_stat_inb_pktsx_s cnf71xx; }; union cvmx_pip_stat_inb_pkts_pkndx { @@ -3061,8 +2489,6 @@ union cvmx_pip_stat_inb_pkts_pkndx { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_pip_stat_inb_pkts_pkndx_s cn68xx; - struct cvmx_pip_stat_inb_pkts_pkndx_s cn68xxp1; }; union cvmx_pip_sub_pkind_fcsx { @@ -3074,8 +2500,6 @@ union cvmx_pip_sub_pkind_fcsx { uint64_t port_bit:64; #endif } s; - struct cvmx_pip_sub_pkind_fcsx_s cn68xx; - struct cvmx_pip_sub_pkind_fcsx_s cn68xxp1; }; union cvmx_pip_tag_incx { @@ -3089,24 +2513,6 @@ union cvmx_pip_tag_incx { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_pip_tag_incx_s cn30xx; - struct cvmx_pip_tag_incx_s cn31xx; - struct cvmx_pip_tag_incx_s cn38xx; - struct cvmx_pip_tag_incx_s cn38xxp2; - struct cvmx_pip_tag_incx_s cn50xx; - struct cvmx_pip_tag_incx_s cn52xx; - struct cvmx_pip_tag_incx_s cn52xxp1; - struct cvmx_pip_tag_incx_s cn56xx; - struct cvmx_pip_tag_incx_s cn56xxp1; - struct cvmx_pip_tag_incx_s cn58xx; - struct cvmx_pip_tag_incx_s cn58xxp1; - struct cvmx_pip_tag_incx_s cn61xx; - struct cvmx_pip_tag_incx_s cn63xx; - struct cvmx_pip_tag_incx_s cn63xxp1; - struct cvmx_pip_tag_incx_s cn66xx; - struct cvmx_pip_tag_incx_s cn68xx; - struct cvmx_pip_tag_incx_s cn68xxp1; - struct cvmx_pip_tag_incx_s cnf71xx; }; union cvmx_pip_tag_mask { @@ -3120,24 +2526,6 @@ union cvmx_pip_tag_mask { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_pip_tag_mask_s cn30xx; - struct cvmx_pip_tag_mask_s cn31xx; - struct cvmx_pip_tag_mask_s cn38xx; - struct cvmx_pip_tag_mask_s cn38xxp2; - struct cvmx_pip_tag_mask_s cn50xx; - struct cvmx_pip_tag_mask_s cn52xx; - struct cvmx_pip_tag_mask_s cn52xxp1; - struct cvmx_pip_tag_mask_s cn56xx; - struct cvmx_pip_tag_mask_s cn56xxp1; - struct cvmx_pip_tag_mask_s cn58xx; - struct cvmx_pip_tag_mask_s cn58xxp1; - struct cvmx_pip_tag_mask_s cn61xx; - struct cvmx_pip_tag_mask_s cn63xx; - struct cvmx_pip_tag_mask_s cn63xxp1; - struct cvmx_pip_tag_mask_s cn66xx; - struct cvmx_pip_tag_mask_s cn68xx; - struct cvmx_pip_tag_mask_s cn68xxp1; - struct cvmx_pip_tag_mask_s cnf71xx; }; union cvmx_pip_tag_secret { @@ -3153,24 +2541,6 @@ union cvmx_pip_tag_secret { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_pip_tag_secret_s cn30xx; - struct cvmx_pip_tag_secret_s cn31xx; - struct cvmx_pip_tag_secret_s cn38xx; - struct cvmx_pip_tag_secret_s cn38xxp2; - struct cvmx_pip_tag_secret_s cn50xx; - struct cvmx_pip_tag_secret_s cn52xx; - struct cvmx_pip_tag_secret_s cn52xxp1; - struct cvmx_pip_tag_secret_s cn56xx; - struct cvmx_pip_tag_secret_s cn56xxp1; - struct cvmx_pip_tag_secret_s cn58xx; - struct cvmx_pip_tag_secret_s cn58xxp1; - struct cvmx_pip_tag_secret_s cn61xx; - struct cvmx_pip_tag_secret_s cn63xx; - struct cvmx_pip_tag_secret_s cn63xxp1; - struct cvmx_pip_tag_secret_s cn66xx; - struct cvmx_pip_tag_secret_s cn68xx; - struct cvmx_pip_tag_secret_s cn68xxp1; - struct cvmx_pip_tag_secret_s cnf71xx; }; union cvmx_pip_todo_entry { @@ -3186,24 +2556,6 @@ union cvmx_pip_todo_entry { uint64_t val:1; #endif } s; - struct cvmx_pip_todo_entry_s cn30xx; - struct cvmx_pip_todo_entry_s cn31xx; - struct cvmx_pip_todo_entry_s cn38xx; - struct cvmx_pip_todo_entry_s cn38xxp2; - struct cvmx_pip_todo_entry_s cn50xx; - struct cvmx_pip_todo_entry_s cn52xx; - struct cvmx_pip_todo_entry_s cn52xxp1; - struct cvmx_pip_todo_entry_s cn56xx; - struct cvmx_pip_todo_entry_s cn56xxp1; - struct cvmx_pip_todo_entry_s cn58xx; - struct cvmx_pip_todo_entry_s cn58xxp1; - struct cvmx_pip_todo_entry_s cn61xx; - struct cvmx_pip_todo_entry_s cn63xx; - struct cvmx_pip_todo_entry_s cn63xxp1; - struct cvmx_pip_todo_entry_s cn66xx; - struct cvmx_pip_todo_entry_s cn68xx; - struct cvmx_pip_todo_entry_s cn68xxp1; - struct cvmx_pip_todo_entry_s cnf71xx; }; union cvmx_pip_vlan_etypesx { @@ -3221,10 +2573,6 @@ union cvmx_pip_vlan_etypesx { uint64_t type3:16; #endif } s; - struct cvmx_pip_vlan_etypesx_s cn61xx; - struct cvmx_pip_vlan_etypesx_s cn66xx; - struct cvmx_pip_vlan_etypesx_s cn68xx; - struct cvmx_pip_vlan_etypesx_s cnf71xx; }; union cvmx_pip_xstat0_prtx { @@ -3238,9 +2586,6 @@ union cvmx_pip_xstat0_prtx { uint64_t drp_pkts:32; #endif } s; - struct cvmx_pip_xstat0_prtx_s cn63xx; - struct cvmx_pip_xstat0_prtx_s cn63xxp1; - struct cvmx_pip_xstat0_prtx_s cn66xx; }; union cvmx_pip_xstat10_prtx { @@ -3254,9 +2599,6 @@ union cvmx_pip_xstat10_prtx { uint64_t bcast:32; #endif } s; - struct cvmx_pip_xstat10_prtx_s cn63xx; - struct cvmx_pip_xstat10_prtx_s cn63xxp1; - struct cvmx_pip_xstat10_prtx_s cn66xx; }; union cvmx_pip_xstat11_prtx { @@ -3270,9 +2612,6 @@ union cvmx_pip_xstat11_prtx { uint64_t bcast:32; #endif } s; - struct cvmx_pip_xstat11_prtx_s cn63xx; - struct cvmx_pip_xstat11_prtx_s cn63xxp1; - struct cvmx_pip_xstat11_prtx_s cn66xx; }; union cvmx_pip_xstat1_prtx { @@ -3286,9 +2625,6 @@ union cvmx_pip_xstat1_prtx { uint64_t reserved_48_63:16; #endif } s; - struct cvmx_pip_xstat1_prtx_s cn63xx; - struct cvmx_pip_xstat1_prtx_s cn63xxp1; - struct cvmx_pip_xstat1_prtx_s cn66xx; }; union cvmx_pip_xstat2_prtx { @@ -3302,9 +2638,6 @@ union cvmx_pip_xstat2_prtx { uint64_t pkts:32; #endif } s; - struct cvmx_pip_xstat2_prtx_s cn63xx; - struct cvmx_pip_xstat2_prtx_s cn63xxp1; - struct cvmx_pip_xstat2_prtx_s cn66xx; }; union cvmx_pip_xstat3_prtx { @@ -3318,9 +2651,6 @@ union cvmx_pip_xstat3_prtx { uint64_t bcst:32; #endif } s; - struct cvmx_pip_xstat3_prtx_s cn63xx; - struct cvmx_pip_xstat3_prtx_s cn63xxp1; - struct cvmx_pip_xstat3_prtx_s cn66xx; }; union cvmx_pip_xstat4_prtx { @@ -3334,9 +2664,6 @@ union cvmx_pip_xstat4_prtx { uint64_t h65to127:32; #endif } s; - struct cvmx_pip_xstat4_prtx_s cn63xx; - struct cvmx_pip_xstat4_prtx_s cn63xxp1; - struct cvmx_pip_xstat4_prtx_s cn66xx; }; union cvmx_pip_xstat5_prtx { @@ -3350,9 +2677,6 @@ union cvmx_pip_xstat5_prtx { uint64_t h256to511:32; #endif } s; - struct cvmx_pip_xstat5_prtx_s cn63xx; - struct cvmx_pip_xstat5_prtx_s cn63xxp1; - struct cvmx_pip_xstat5_prtx_s cn66xx; }; union cvmx_pip_xstat6_prtx { @@ -3366,9 +2690,6 @@ union cvmx_pip_xstat6_prtx { uint64_t h1024to1518:32; #endif } s; - struct cvmx_pip_xstat6_prtx_s cn63xx; - struct cvmx_pip_xstat6_prtx_s cn63xxp1; - struct cvmx_pip_xstat6_prtx_s cn66xx; }; union cvmx_pip_xstat7_prtx { @@ -3382,9 +2703,6 @@ union cvmx_pip_xstat7_prtx { uint64_t fcs:32; #endif } s; - struct cvmx_pip_xstat7_prtx_s cn63xx; - struct cvmx_pip_xstat7_prtx_s cn63xxp1; - struct cvmx_pip_xstat7_prtx_s cn66xx; }; union cvmx_pip_xstat8_prtx { @@ -3398,9 +2716,6 @@ union cvmx_pip_xstat8_prtx { uint64_t frag:32; #endif } s; - struct cvmx_pip_xstat8_prtx_s cn63xx; - struct cvmx_pip_xstat8_prtx_s cn63xxp1; - struct cvmx_pip_xstat8_prtx_s cn66xx; }; union cvmx_pip_xstat9_prtx { @@ -3414,9 +2729,6 @@ union cvmx_pip_xstat9_prtx { uint64_t jabber:32; #endif } s; - struct cvmx_pip_xstat9_prtx_s cn63xx; - struct cvmx_pip_xstat9_prtx_s cn63xxp1; - struct cvmx_pip_xstat9_prtx_s cn66xx; }; #endif diff --git a/arch/mips/include/asm/octeon/cvmx-pko-defs.h b/arch/mips/include/asm/octeon/cvmx-pko-defs.h index 87c3b970cad4..7e14c0d328f1 100644 --- a/arch/mips/include/asm/octeon/cvmx-pko-defs.h +++ b/arch/mips/include/asm/octeon/cvmx-pko-defs.h @@ -97,24 +97,6 @@ union cvmx_pko_mem_count0 { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_pko_mem_count0_s cn30xx; - struct cvmx_pko_mem_count0_s cn31xx; - struct cvmx_pko_mem_count0_s cn38xx; - struct cvmx_pko_mem_count0_s cn38xxp2; - struct cvmx_pko_mem_count0_s cn50xx; - struct cvmx_pko_mem_count0_s cn52xx; - struct cvmx_pko_mem_count0_s cn52xxp1; - struct cvmx_pko_mem_count0_s cn56xx; - struct cvmx_pko_mem_count0_s cn56xxp1; - struct cvmx_pko_mem_count0_s cn58xx; - struct cvmx_pko_mem_count0_s cn58xxp1; - struct cvmx_pko_mem_count0_s cn61xx; - struct cvmx_pko_mem_count0_s cn63xx; - struct cvmx_pko_mem_count0_s cn63xxp1; - struct cvmx_pko_mem_count0_s cn66xx; - struct cvmx_pko_mem_count0_s cn68xx; - struct cvmx_pko_mem_count0_s cn68xxp1; - struct cvmx_pko_mem_count0_s cnf71xx; }; union cvmx_pko_mem_count1 { @@ -128,24 +110,6 @@ union cvmx_pko_mem_count1 { uint64_t reserved_48_63:16; #endif } s; - struct cvmx_pko_mem_count1_s cn30xx; - struct cvmx_pko_mem_count1_s cn31xx; - struct cvmx_pko_mem_count1_s cn38xx; - struct cvmx_pko_mem_count1_s cn38xxp2; - struct cvmx_pko_mem_count1_s cn50xx; - struct cvmx_pko_mem_count1_s cn52xx; - struct cvmx_pko_mem_count1_s cn52xxp1; - struct cvmx_pko_mem_count1_s cn56xx; - struct cvmx_pko_mem_count1_s cn56xxp1; - struct cvmx_pko_mem_count1_s cn58xx; - struct cvmx_pko_mem_count1_s cn58xxp1; - struct cvmx_pko_mem_count1_s cn61xx; - struct cvmx_pko_mem_count1_s cn63xx; - struct cvmx_pko_mem_count1_s cn63xxp1; - struct cvmx_pko_mem_count1_s cn66xx; - struct cvmx_pko_mem_count1_s cn68xx; - struct cvmx_pko_mem_count1_s cn68xxp1; - struct cvmx_pko_mem_count1_s cnf71xx; }; union cvmx_pko_mem_debug0 { @@ -163,24 +127,6 @@ union cvmx_pko_mem_debug0 { uint64_t fau:28; #endif } s; - struct cvmx_pko_mem_debug0_s cn30xx; - struct cvmx_pko_mem_debug0_s cn31xx; - struct cvmx_pko_mem_debug0_s cn38xx; - struct cvmx_pko_mem_debug0_s cn38xxp2; - struct cvmx_pko_mem_debug0_s cn50xx; - struct cvmx_pko_mem_debug0_s cn52xx; - struct cvmx_pko_mem_debug0_s cn52xxp1; - struct cvmx_pko_mem_debug0_s cn56xx; - struct cvmx_pko_mem_debug0_s cn56xxp1; - struct cvmx_pko_mem_debug0_s cn58xx; - struct cvmx_pko_mem_debug0_s cn58xxp1; - struct cvmx_pko_mem_debug0_s cn61xx; - struct cvmx_pko_mem_debug0_s cn63xx; - struct cvmx_pko_mem_debug0_s cn63xxp1; - struct cvmx_pko_mem_debug0_s cn66xx; - struct cvmx_pko_mem_debug0_s cn68xx; - struct cvmx_pko_mem_debug0_s cn68xxp1; - struct cvmx_pko_mem_debug0_s cnf71xx; }; union cvmx_pko_mem_debug1 { @@ -200,24 +146,6 @@ union cvmx_pko_mem_debug1 { uint64_t i:1; #endif } s; - struct cvmx_pko_mem_debug1_s cn30xx; - struct cvmx_pko_mem_debug1_s cn31xx; - struct cvmx_pko_mem_debug1_s cn38xx; - struct cvmx_pko_mem_debug1_s cn38xxp2; - struct cvmx_pko_mem_debug1_s cn50xx; - struct cvmx_pko_mem_debug1_s cn52xx; - struct cvmx_pko_mem_debug1_s cn52xxp1; - struct cvmx_pko_mem_debug1_s cn56xx; - struct cvmx_pko_mem_debug1_s cn56xxp1; - struct cvmx_pko_mem_debug1_s cn58xx; - struct cvmx_pko_mem_debug1_s cn58xxp1; - struct cvmx_pko_mem_debug1_s cn61xx; - struct cvmx_pko_mem_debug1_s cn63xx; - struct cvmx_pko_mem_debug1_s cn63xxp1; - struct cvmx_pko_mem_debug1_s cn66xx; - struct cvmx_pko_mem_debug1_s cn68xx; - struct cvmx_pko_mem_debug1_s cn68xxp1; - struct cvmx_pko_mem_debug1_s cnf71xx; }; union cvmx_pko_mem_debug10 { @@ -242,9 +170,6 @@ union cvmx_pko_mem_debug10 { uint64_t fau:28; #endif } cn30xx; - struct cvmx_pko_mem_debug10_cn30xx cn31xx; - struct cvmx_pko_mem_debug10_cn30xx cn38xx; - struct cvmx_pko_mem_debug10_cn30xx cn38xxp2; struct cvmx_pko_mem_debug10_cn50xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_49_63:15; @@ -258,19 +183,6 @@ union cvmx_pko_mem_debug10 { uint64_t reserved_49_63:15; #endif } cn50xx; - struct cvmx_pko_mem_debug10_cn50xx cn52xx; - struct cvmx_pko_mem_debug10_cn50xx cn52xxp1; - struct cvmx_pko_mem_debug10_cn50xx cn56xx; - struct cvmx_pko_mem_debug10_cn50xx cn56xxp1; - struct cvmx_pko_mem_debug10_cn50xx cn58xx; - struct cvmx_pko_mem_debug10_cn50xx cn58xxp1; - struct cvmx_pko_mem_debug10_cn50xx cn61xx; - struct cvmx_pko_mem_debug10_cn50xx cn63xx; - struct cvmx_pko_mem_debug10_cn50xx cn63xxp1; - struct cvmx_pko_mem_debug10_cn50xx cn66xx; - struct cvmx_pko_mem_debug10_cn50xx cn68xx; - struct cvmx_pko_mem_debug10_cn50xx cn68xxp1; - struct cvmx_pko_mem_debug10_cn50xx cnf71xx; }; union cvmx_pko_mem_debug11 { @@ -305,9 +217,6 @@ union cvmx_pko_mem_debug11 { uint64_t i:1; #endif } cn30xx; - struct cvmx_pko_mem_debug11_cn30xx cn31xx; - struct cvmx_pko_mem_debug11_cn30xx cn38xx; - struct cvmx_pko_mem_debug11_cn30xx cn38xxp2; struct cvmx_pko_mem_debug11_cn50xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_23_63:41; @@ -329,19 +238,6 @@ union cvmx_pko_mem_debug11 { uint64_t reserved_23_63:41; #endif } cn50xx; - struct cvmx_pko_mem_debug11_cn50xx cn52xx; - struct cvmx_pko_mem_debug11_cn50xx cn52xxp1; - struct cvmx_pko_mem_debug11_cn50xx cn56xx; - struct cvmx_pko_mem_debug11_cn50xx cn56xxp1; - struct cvmx_pko_mem_debug11_cn50xx cn58xx; - struct cvmx_pko_mem_debug11_cn50xx cn58xxp1; - struct cvmx_pko_mem_debug11_cn50xx cn61xx; - struct cvmx_pko_mem_debug11_cn50xx cn63xx; - struct cvmx_pko_mem_debug11_cn50xx cn63xxp1; - struct cvmx_pko_mem_debug11_cn50xx cn66xx; - struct cvmx_pko_mem_debug11_cn50xx cn68xx; - struct cvmx_pko_mem_debug11_cn50xx cn68xxp1; - struct cvmx_pko_mem_debug11_cn50xx cnf71xx; }; union cvmx_pko_mem_debug12 { @@ -360,9 +256,6 @@ union cvmx_pko_mem_debug12 { uint64_t data:64; #endif } cn30xx; - struct cvmx_pko_mem_debug12_cn30xx cn31xx; - struct cvmx_pko_mem_debug12_cn30xx cn38xx; - struct cvmx_pko_mem_debug12_cn30xx cn38xxp2; struct cvmx_pko_mem_debug12_cn50xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t fau:28; @@ -376,16 +269,6 @@ union cvmx_pko_mem_debug12 { uint64_t fau:28; #endif } cn50xx; - struct cvmx_pko_mem_debug12_cn50xx cn52xx; - struct cvmx_pko_mem_debug12_cn50xx cn52xxp1; - struct cvmx_pko_mem_debug12_cn50xx cn56xx; - struct cvmx_pko_mem_debug12_cn50xx cn56xxp1; - struct cvmx_pko_mem_debug12_cn50xx cn58xx; - struct cvmx_pko_mem_debug12_cn50xx cn58xxp1; - struct cvmx_pko_mem_debug12_cn50xx cn61xx; - struct cvmx_pko_mem_debug12_cn50xx cn63xx; - struct cvmx_pko_mem_debug12_cn50xx cn63xxp1; - struct cvmx_pko_mem_debug12_cn50xx cn66xx; struct cvmx_pko_mem_debug12_cn68xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t state:64; @@ -393,8 +276,6 @@ union cvmx_pko_mem_debug12 { uint64_t state:64; #endif } cn68xx; - struct cvmx_pko_mem_debug12_cn68xx cn68xxp1; - struct cvmx_pko_mem_debug12_cn50xx cnf71xx; }; union cvmx_pko_mem_debug13 { @@ -419,9 +300,6 @@ union cvmx_pko_mem_debug13 { uint64_t reserved_51_63:13; #endif } cn30xx; - struct cvmx_pko_mem_debug13_cn30xx cn31xx; - struct cvmx_pko_mem_debug13_cn30xx cn38xx; - struct cvmx_pko_mem_debug13_cn30xx cn38xxp2; struct cvmx_pko_mem_debug13_cn50xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t i:1; @@ -437,16 +315,6 @@ union cvmx_pko_mem_debug13 { uint64_t i:1; #endif } cn50xx; - struct cvmx_pko_mem_debug13_cn50xx cn52xx; - struct cvmx_pko_mem_debug13_cn50xx cn52xxp1; - struct cvmx_pko_mem_debug13_cn50xx cn56xx; - struct cvmx_pko_mem_debug13_cn50xx cn56xxp1; - struct cvmx_pko_mem_debug13_cn50xx cn58xx; - struct cvmx_pko_mem_debug13_cn50xx cn58xxp1; - struct cvmx_pko_mem_debug13_cn50xx cn61xx; - struct cvmx_pko_mem_debug13_cn50xx cn63xx; - struct cvmx_pko_mem_debug13_cn50xx cn63xxp1; - struct cvmx_pko_mem_debug13_cn50xx cn66xx; struct cvmx_pko_mem_debug13_cn68xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t state:64; @@ -454,8 +322,6 @@ union cvmx_pko_mem_debug13 { uint64_t state:64; #endif } cn68xx; - struct cvmx_pko_mem_debug13_cn68xx cn68xxp1; - struct cvmx_pko_mem_debug13_cn50xx cnf71xx; }; union cvmx_pko_mem_debug14 { @@ -476,9 +342,6 @@ union cvmx_pko_mem_debug14 { uint64_t reserved_17_63:47; #endif } cn30xx; - struct cvmx_pko_mem_debug14_cn30xx cn31xx; - struct cvmx_pko_mem_debug14_cn30xx cn38xx; - struct cvmx_pko_mem_debug14_cn30xx cn38xxp2; struct cvmx_pko_mem_debug14_cn52xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t data:64; @@ -486,14 +349,6 @@ union cvmx_pko_mem_debug14 { uint64_t data:64; #endif } cn52xx; - struct cvmx_pko_mem_debug14_cn52xx cn52xxp1; - struct cvmx_pko_mem_debug14_cn52xx cn56xx; - struct cvmx_pko_mem_debug14_cn52xx cn56xxp1; - struct cvmx_pko_mem_debug14_cn52xx cn61xx; - struct cvmx_pko_mem_debug14_cn52xx cn63xx; - struct cvmx_pko_mem_debug14_cn52xx cn63xxp1; - struct cvmx_pko_mem_debug14_cn52xx cn66xx; - struct cvmx_pko_mem_debug14_cn52xx cnf71xx; }; union cvmx_pko_mem_debug2 { @@ -513,24 +368,6 @@ union cvmx_pko_mem_debug2 { uint64_t i:1; #endif } s; - struct cvmx_pko_mem_debug2_s cn30xx; - struct cvmx_pko_mem_debug2_s cn31xx; - struct cvmx_pko_mem_debug2_s cn38xx; - struct cvmx_pko_mem_debug2_s cn38xxp2; - struct cvmx_pko_mem_debug2_s cn50xx; - struct cvmx_pko_mem_debug2_s cn52xx; - struct cvmx_pko_mem_debug2_s cn52xxp1; - struct cvmx_pko_mem_debug2_s cn56xx; - struct cvmx_pko_mem_debug2_s cn56xxp1; - struct cvmx_pko_mem_debug2_s cn58xx; - struct cvmx_pko_mem_debug2_s cn58xxp1; - struct cvmx_pko_mem_debug2_s cn61xx; - struct cvmx_pko_mem_debug2_s cn63xx; - struct cvmx_pko_mem_debug2_s cn63xxp1; - struct cvmx_pko_mem_debug2_s cn66xx; - struct cvmx_pko_mem_debug2_s cn68xx; - struct cvmx_pko_mem_debug2_s cn68xxp1; - struct cvmx_pko_mem_debug2_s cnf71xx; }; union cvmx_pko_mem_debug3 { @@ -557,9 +394,6 @@ union cvmx_pko_mem_debug3 { uint64_t i:1; #endif } cn30xx; - struct cvmx_pko_mem_debug3_cn30xx cn31xx; - struct cvmx_pko_mem_debug3_cn30xx cn38xx; - struct cvmx_pko_mem_debug3_cn30xx cn38xxp2; struct cvmx_pko_mem_debug3_cn50xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t data:64; @@ -567,19 +401,6 @@ union cvmx_pko_mem_debug3 { uint64_t data:64; #endif } cn50xx; - struct cvmx_pko_mem_debug3_cn50xx cn52xx; - struct cvmx_pko_mem_debug3_cn50xx cn52xxp1; - struct cvmx_pko_mem_debug3_cn50xx cn56xx; - struct cvmx_pko_mem_debug3_cn50xx cn56xxp1; - struct cvmx_pko_mem_debug3_cn50xx cn58xx; - struct cvmx_pko_mem_debug3_cn50xx cn58xxp1; - struct cvmx_pko_mem_debug3_cn50xx cn61xx; - struct cvmx_pko_mem_debug3_cn50xx cn63xx; - struct cvmx_pko_mem_debug3_cn50xx cn63xxp1; - struct cvmx_pko_mem_debug3_cn50xx cn66xx; - struct cvmx_pko_mem_debug3_cn50xx cn68xx; - struct cvmx_pko_mem_debug3_cn50xx cn68xxp1; - struct cvmx_pko_mem_debug3_cn50xx cnf71xx; }; union cvmx_pko_mem_debug4 { @@ -598,9 +419,6 @@ union cvmx_pko_mem_debug4 { uint64_t data:64; #endif } cn30xx; - struct cvmx_pko_mem_debug4_cn30xx cn31xx; - struct cvmx_pko_mem_debug4_cn30xx cn38xx; - struct cvmx_pko_mem_debug4_cn30xx cn38xxp2; struct cvmx_pko_mem_debug4_cn50xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t cmnd_segs:3; @@ -673,18 +491,6 @@ union cvmx_pko_mem_debug4 { uint64_t curr_siz:8; #endif } cn52xx; - struct cvmx_pko_mem_debug4_cn52xx cn52xxp1; - struct cvmx_pko_mem_debug4_cn52xx cn56xx; - struct cvmx_pko_mem_debug4_cn52xx cn56xxp1; - struct cvmx_pko_mem_debug4_cn50xx cn58xx; - struct cvmx_pko_mem_debug4_cn50xx cn58xxp1; - struct cvmx_pko_mem_debug4_cn52xx cn61xx; - struct cvmx_pko_mem_debug4_cn52xx cn63xx; - struct cvmx_pko_mem_debug4_cn52xx cn63xxp1; - struct cvmx_pko_mem_debug4_cn52xx cn66xx; - struct cvmx_pko_mem_debug4_cn52xx cn68xx; - struct cvmx_pko_mem_debug4_cn52xx cn68xxp1; - struct cvmx_pko_mem_debug4_cn52xx cnf71xx; }; union cvmx_pko_mem_debug5 { @@ -739,9 +545,6 @@ union cvmx_pko_mem_debug5 { uint64_t dwri_mod:1; #endif } cn30xx; - struct cvmx_pko_mem_debug5_cn30xx cn31xx; - struct cvmx_pko_mem_debug5_cn30xx cn38xx; - struct cvmx_pko_mem_debug5_cn30xx cn38xxp2; struct cvmx_pko_mem_debug5_cn50xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t curr_ptr:29; @@ -768,11 +571,6 @@ union cvmx_pko_mem_debug5 { uint64_t reserved_54_63:10; #endif } cn52xx; - struct cvmx_pko_mem_debug5_cn52xx cn52xxp1; - struct cvmx_pko_mem_debug5_cn52xx cn56xx; - struct cvmx_pko_mem_debug5_cn52xx cn56xxp1; - struct cvmx_pko_mem_debug5_cn50xx cn58xx; - struct cvmx_pko_mem_debug5_cn50xx cn58xxp1; struct cvmx_pko_mem_debug5_cn61xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_56_63:8; @@ -790,9 +588,6 @@ union cvmx_pko_mem_debug5 { uint64_t reserved_56_63:8; #endif } cn61xx; - struct cvmx_pko_mem_debug5_cn61xx cn63xx; - struct cvmx_pko_mem_debug5_cn61xx cn63xxp1; - struct cvmx_pko_mem_debug5_cn61xx cn66xx; struct cvmx_pko_mem_debug5_cn68xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_57_63:7; @@ -812,8 +607,6 @@ union cvmx_pko_mem_debug5 { uint64_t reserved_57_63:7; #endif } cn68xx; - struct cvmx_pko_mem_debug5_cn68xx cn68xxp1; - struct cvmx_pko_mem_debug5_cn61xx cnf71xx; }; union cvmx_pko_mem_debug6 { @@ -866,9 +659,6 @@ union cvmx_pko_mem_debug6 { uint64_t reserved_11_63:53; #endif } cn30xx; - struct cvmx_pko_mem_debug6_cn30xx cn31xx; - struct cvmx_pko_mem_debug6_cn30xx cn38xx; - struct cvmx_pko_mem_debug6_cn30xx cn38xxp2; struct cvmx_pko_mem_debug6_cn50xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_11_63:53; @@ -909,18 +699,6 @@ union cvmx_pko_mem_debug6 { uint64_t reserved_37_63:27; #endif } cn52xx; - struct cvmx_pko_mem_debug6_cn52xx cn52xxp1; - struct cvmx_pko_mem_debug6_cn52xx cn56xx; - struct cvmx_pko_mem_debug6_cn52xx cn56xxp1; - struct cvmx_pko_mem_debug6_cn50xx cn58xx; - struct cvmx_pko_mem_debug6_cn50xx cn58xxp1; - struct cvmx_pko_mem_debug6_cn52xx cn61xx; - struct cvmx_pko_mem_debug6_cn52xx cn63xx; - struct cvmx_pko_mem_debug6_cn52xx cn63xxp1; - struct cvmx_pko_mem_debug6_cn52xx cn66xx; - struct cvmx_pko_mem_debug6_cn52xx cn68xx; - struct cvmx_pko_mem_debug6_cn52xx cn68xxp1; - struct cvmx_pko_mem_debug6_cn52xx cnf71xx; }; union cvmx_pko_mem_debug7 { @@ -945,9 +723,6 @@ union cvmx_pko_mem_debug7 { uint64_t reserved_58_63:6; #endif } cn30xx; - struct cvmx_pko_mem_debug7_cn30xx cn31xx; - struct cvmx_pko_mem_debug7_cn30xx cn38xx; - struct cvmx_pko_mem_debug7_cn30xx cn38xxp2; struct cvmx_pko_mem_debug7_cn50xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t qos:5; @@ -965,16 +740,6 @@ union cvmx_pko_mem_debug7 { uint64_t qos:5; #endif } cn50xx; - struct cvmx_pko_mem_debug7_cn50xx cn52xx; - struct cvmx_pko_mem_debug7_cn50xx cn52xxp1; - struct cvmx_pko_mem_debug7_cn50xx cn56xx; - struct cvmx_pko_mem_debug7_cn50xx cn56xxp1; - struct cvmx_pko_mem_debug7_cn50xx cn58xx; - struct cvmx_pko_mem_debug7_cn50xx cn58xxp1; - struct cvmx_pko_mem_debug7_cn50xx cn61xx; - struct cvmx_pko_mem_debug7_cn50xx cn63xx; - struct cvmx_pko_mem_debug7_cn50xx cn63xxp1; - struct cvmx_pko_mem_debug7_cn50xx cn66xx; struct cvmx_pko_mem_debug7_cn68xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t qos:3; @@ -992,8 +757,6 @@ union cvmx_pko_mem_debug7 { uint64_t qos:3; #endif } cn68xx; - struct cvmx_pko_mem_debug7_cn68xx cn68xxp1; - struct cvmx_pko_mem_debug7_cn50xx cnf71xx; }; union cvmx_pko_mem_debug8 { @@ -1028,9 +791,6 @@ union cvmx_pko_mem_debug8 { uint64_t qos:5; #endif } cn30xx; - struct cvmx_pko_mem_debug8_cn30xx cn31xx; - struct cvmx_pko_mem_debug8_cn30xx cn38xx; - struct cvmx_pko_mem_debug8_cn30xx cn38xxp2; struct cvmx_pko_mem_debug8_cn50xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_28_63:36; @@ -1073,11 +833,6 @@ union cvmx_pko_mem_debug8 { uint64_t reserved_29_63:35; #endif } cn52xx; - struct cvmx_pko_mem_debug8_cn52xx cn52xxp1; - struct cvmx_pko_mem_debug8_cn52xx cn56xx; - struct cvmx_pko_mem_debug8_cn52xx cn56xxp1; - struct cvmx_pko_mem_debug8_cn50xx cn58xx; - struct cvmx_pko_mem_debug8_cn50xx cn58xxp1; struct cvmx_pko_mem_debug8_cn61xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_42_63:22; @@ -1107,9 +862,6 @@ union cvmx_pko_mem_debug8 { uint64_t reserved_42_63:22; #endif } cn61xx; - struct cvmx_pko_mem_debug8_cn52xx cn63xx; - struct cvmx_pko_mem_debug8_cn52xx cn63xxp1; - struct cvmx_pko_mem_debug8_cn61xx cn66xx; struct cvmx_pko_mem_debug8_cn68xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_37_63:27; @@ -1133,8 +885,6 @@ union cvmx_pko_mem_debug8 { uint64_t reserved_37_63:27; #endif } cn68xx; - struct cvmx_pko_mem_debug8_cn68xx cn68xxp1; - struct cvmx_pko_mem_debug8_cn61xx cnf71xx; }; union cvmx_pko_mem_debug9 { @@ -1167,7 +917,6 @@ union cvmx_pko_mem_debug9 { uint64_t reserved_28_63:36; #endif } cn30xx; - struct cvmx_pko_mem_debug9_cn30xx cn31xx; struct cvmx_pko_mem_debug9_cn38xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_28_63:36; @@ -1187,7 +936,6 @@ union cvmx_pko_mem_debug9 { uint64_t reserved_28_63:36; #endif } cn38xx; - struct cvmx_pko_mem_debug9_cn38xx cn38xxp2; struct cvmx_pko_mem_debug9_cn50xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_49_63:15; @@ -1201,19 +949,6 @@ union cvmx_pko_mem_debug9 { uint64_t reserved_49_63:15; #endif } cn50xx; - struct cvmx_pko_mem_debug9_cn50xx cn52xx; - struct cvmx_pko_mem_debug9_cn50xx cn52xxp1; - struct cvmx_pko_mem_debug9_cn50xx cn56xx; - struct cvmx_pko_mem_debug9_cn50xx cn56xxp1; - struct cvmx_pko_mem_debug9_cn50xx cn58xx; - struct cvmx_pko_mem_debug9_cn50xx cn58xxp1; - struct cvmx_pko_mem_debug9_cn50xx cn61xx; - struct cvmx_pko_mem_debug9_cn50xx cn63xx; - struct cvmx_pko_mem_debug9_cn50xx cn63xxp1; - struct cvmx_pko_mem_debug9_cn50xx cn66xx; - struct cvmx_pko_mem_debug9_cn50xx cn68xx; - struct cvmx_pko_mem_debug9_cn50xx cn68xxp1; - struct cvmx_pko_mem_debug9_cn50xx cnf71xx; }; union cvmx_pko_mem_iport_ptrs { @@ -1249,8 +984,6 @@ union cvmx_pko_mem_iport_ptrs { uint64_t reserved_63_63:1; #endif } s; - struct cvmx_pko_mem_iport_ptrs_s cn68xx; - struct cvmx_pko_mem_iport_ptrs_s cn68xxp1; }; union cvmx_pko_mem_iport_qos { @@ -1272,8 +1005,6 @@ union cvmx_pko_mem_iport_qos { uint64_t reserved_61_63:3; #endif } s; - struct cvmx_pko_mem_iport_qos_s cn68xx; - struct cvmx_pko_mem_iport_qos_s cn68xxp1; }; union cvmx_pko_mem_iqueue_ptrs { @@ -1303,8 +1034,6 @@ union cvmx_pko_mem_iqueue_ptrs { uint64_t s_tail:1; #endif } s; - struct cvmx_pko_mem_iqueue_ptrs_s cn68xx; - struct cvmx_pko_mem_iqueue_ptrs_s cn68xxp1; }; union cvmx_pko_mem_iqueue_qos { @@ -1324,8 +1053,6 @@ union cvmx_pko_mem_iqueue_qos { uint64_t reserved_61_63:3; #endif } s; - struct cvmx_pko_mem_iqueue_qos_s cn68xx; - struct cvmx_pko_mem_iqueue_qos_s cn68xxp1; }; union cvmx_pko_mem_port_ptrs { @@ -1349,15 +1076,6 @@ union cvmx_pko_mem_port_ptrs { uint64_t reserved_62_63:2; #endif } s; - struct cvmx_pko_mem_port_ptrs_s cn52xx; - struct cvmx_pko_mem_port_ptrs_s cn52xxp1; - struct cvmx_pko_mem_port_ptrs_s cn56xx; - struct cvmx_pko_mem_port_ptrs_s cn56xxp1; - struct cvmx_pko_mem_port_ptrs_s cn61xx; - struct cvmx_pko_mem_port_ptrs_s cn63xx; - struct cvmx_pko_mem_port_ptrs_s cn63xxp1; - struct cvmx_pko_mem_port_ptrs_s cn66xx; - struct cvmx_pko_mem_port_ptrs_s cnf71xx; }; union cvmx_pko_mem_port_qos { @@ -1377,15 +1095,6 @@ union cvmx_pko_mem_port_qos { uint64_t reserved_61_63:3; #endif } s; - struct cvmx_pko_mem_port_qos_s cn52xx; - struct cvmx_pko_mem_port_qos_s cn52xxp1; - struct cvmx_pko_mem_port_qos_s cn56xx; - struct cvmx_pko_mem_port_qos_s cn56xxp1; - struct cvmx_pko_mem_port_qos_s cn61xx; - struct cvmx_pko_mem_port_qos_s cn63xx; - struct cvmx_pko_mem_port_qos_s cn63xxp1; - struct cvmx_pko_mem_port_qos_s cn66xx; - struct cvmx_pko_mem_port_qos_s cnf71xx; }; union cvmx_pko_mem_port_rate0 { @@ -1420,16 +1129,6 @@ union cvmx_pko_mem_port_rate0 { uint64_t reserved_51_63:13; #endif } cn52xx; - struct cvmx_pko_mem_port_rate0_cn52xx cn52xxp1; - struct cvmx_pko_mem_port_rate0_cn52xx cn56xx; - struct cvmx_pko_mem_port_rate0_cn52xx cn56xxp1; - struct cvmx_pko_mem_port_rate0_cn52xx cn61xx; - struct cvmx_pko_mem_port_rate0_cn52xx cn63xx; - struct cvmx_pko_mem_port_rate0_cn52xx cn63xxp1; - struct cvmx_pko_mem_port_rate0_cn52xx cn66xx; - struct cvmx_pko_mem_port_rate0_s cn68xx; - struct cvmx_pko_mem_port_rate0_s cn68xxp1; - struct cvmx_pko_mem_port_rate0_cn52xx cnf71xx; }; union cvmx_pko_mem_port_rate1 { @@ -1460,16 +1159,6 @@ union cvmx_pko_mem_port_rate1 { uint64_t reserved_32_63:32; #endif } cn52xx; - struct cvmx_pko_mem_port_rate1_cn52xx cn52xxp1; - struct cvmx_pko_mem_port_rate1_cn52xx cn56xx; - struct cvmx_pko_mem_port_rate1_cn52xx cn56xxp1; - struct cvmx_pko_mem_port_rate1_cn52xx cn61xx; - struct cvmx_pko_mem_port_rate1_cn52xx cn63xx; - struct cvmx_pko_mem_port_rate1_cn52xx cn63xxp1; - struct cvmx_pko_mem_port_rate1_cn52xx cn66xx; - struct cvmx_pko_mem_port_rate1_s cn68xx; - struct cvmx_pko_mem_port_rate1_s cn68xxp1; - struct cvmx_pko_mem_port_rate1_cn52xx cnf71xx; }; union cvmx_pko_mem_queue_ptrs { @@ -1497,22 +1186,6 @@ union cvmx_pko_mem_queue_ptrs { uint64_t s_tail:1; #endif } s; - struct cvmx_pko_mem_queue_ptrs_s cn30xx; - struct cvmx_pko_mem_queue_ptrs_s cn31xx; - struct cvmx_pko_mem_queue_ptrs_s cn38xx; - struct cvmx_pko_mem_queue_ptrs_s cn38xxp2; - struct cvmx_pko_mem_queue_ptrs_s cn50xx; - struct cvmx_pko_mem_queue_ptrs_s cn52xx; - struct cvmx_pko_mem_queue_ptrs_s cn52xxp1; - struct cvmx_pko_mem_queue_ptrs_s cn56xx; - struct cvmx_pko_mem_queue_ptrs_s cn56xxp1; - struct cvmx_pko_mem_queue_ptrs_s cn58xx; - struct cvmx_pko_mem_queue_ptrs_s cn58xxp1; - struct cvmx_pko_mem_queue_ptrs_s cn61xx; - struct cvmx_pko_mem_queue_ptrs_s cn63xx; - struct cvmx_pko_mem_queue_ptrs_s cn63xxp1; - struct cvmx_pko_mem_queue_ptrs_s cn66xx; - struct cvmx_pko_mem_queue_ptrs_s cnf71xx; }; union cvmx_pko_mem_queue_qos { @@ -1532,22 +1205,6 @@ union cvmx_pko_mem_queue_qos { uint64_t reserved_61_63:3; #endif } s; - struct cvmx_pko_mem_queue_qos_s cn30xx; - struct cvmx_pko_mem_queue_qos_s cn31xx; - struct cvmx_pko_mem_queue_qos_s cn38xx; - struct cvmx_pko_mem_queue_qos_s cn38xxp2; - struct cvmx_pko_mem_queue_qos_s cn50xx; - struct cvmx_pko_mem_queue_qos_s cn52xx; - struct cvmx_pko_mem_queue_qos_s cn52xxp1; - struct cvmx_pko_mem_queue_qos_s cn56xx; - struct cvmx_pko_mem_queue_qos_s cn56xxp1; - struct cvmx_pko_mem_queue_qos_s cn58xx; - struct cvmx_pko_mem_queue_qos_s cn58xxp1; - struct cvmx_pko_mem_queue_qos_s cn61xx; - struct cvmx_pko_mem_queue_qos_s cn63xx; - struct cvmx_pko_mem_queue_qos_s cn63xxp1; - struct cvmx_pko_mem_queue_qos_s cn66xx; - struct cvmx_pko_mem_queue_qos_s cnf71xx; }; union cvmx_pko_mem_throttle_int { @@ -1569,8 +1226,6 @@ union cvmx_pko_mem_throttle_int { uint64_t reserved_47_63:17; #endif } s; - struct cvmx_pko_mem_throttle_int_s cn68xx; - struct cvmx_pko_mem_throttle_int_s cn68xxp1; }; union cvmx_pko_mem_throttle_pipe { @@ -1592,8 +1247,6 @@ union cvmx_pko_mem_throttle_pipe { uint64_t reserved_47_63:17; #endif } s; - struct cvmx_pko_mem_throttle_pipe_s cn68xx; - struct cvmx_pko_mem_throttle_pipe_s cn68xxp1; }; union cvmx_pko_reg_bist_result { @@ -1636,9 +1289,6 @@ union cvmx_pko_reg_bist_result { uint64_t reserved_27_63:37; #endif } cn30xx; - struct cvmx_pko_reg_bist_result_cn30xx cn31xx; - struct cvmx_pko_reg_bist_result_cn30xx cn38xx; - struct cvmx_pko_reg_bist_result_cn30xx cn38xxp2; struct cvmx_pko_reg_bist_result_cn50xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_33_63:31; @@ -1711,15 +1361,6 @@ union cvmx_pko_reg_bist_result { uint64_t reserved_35_63:29; #endif } cn52xx; - struct cvmx_pko_reg_bist_result_cn52xx cn52xxp1; - struct cvmx_pko_reg_bist_result_cn52xx cn56xx; - struct cvmx_pko_reg_bist_result_cn52xx cn56xxp1; - struct cvmx_pko_reg_bist_result_cn50xx cn58xx; - struct cvmx_pko_reg_bist_result_cn50xx cn58xxp1; - struct cvmx_pko_reg_bist_result_cn52xx cn61xx; - struct cvmx_pko_reg_bist_result_cn52xx cn63xx; - struct cvmx_pko_reg_bist_result_cn52xx cn63xxp1; - struct cvmx_pko_reg_bist_result_cn52xx cn66xx; struct cvmx_pko_reg_bist_result_cn68xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_36_63:28; @@ -1808,7 +1449,6 @@ union cvmx_pko_reg_bist_result { uint64_t reserved_35_63:29; #endif } cn68xxp1; - struct cvmx_pko_reg_bist_result_cn52xx cnf71xx; }; union cvmx_pko_reg_cmd_buf { @@ -1826,24 +1466,6 @@ union cvmx_pko_reg_cmd_buf { uint64_t reserved_23_63:41; #endif } s; - struct cvmx_pko_reg_cmd_buf_s cn30xx; - struct cvmx_pko_reg_cmd_buf_s cn31xx; - struct cvmx_pko_reg_cmd_buf_s cn38xx; - struct cvmx_pko_reg_cmd_buf_s cn38xxp2; - struct cvmx_pko_reg_cmd_buf_s cn50xx; - struct cvmx_pko_reg_cmd_buf_s cn52xx; - struct cvmx_pko_reg_cmd_buf_s cn52xxp1; - struct cvmx_pko_reg_cmd_buf_s cn56xx; - struct cvmx_pko_reg_cmd_buf_s cn56xxp1; - struct cvmx_pko_reg_cmd_buf_s cn58xx; - struct cvmx_pko_reg_cmd_buf_s cn58xxp1; - struct cvmx_pko_reg_cmd_buf_s cn61xx; - struct cvmx_pko_reg_cmd_buf_s cn63xx; - struct cvmx_pko_reg_cmd_buf_s cn63xxp1; - struct cvmx_pko_reg_cmd_buf_s cn66xx; - struct cvmx_pko_reg_cmd_buf_s cn68xx; - struct cvmx_pko_reg_cmd_buf_s cn68xxp1; - struct cvmx_pko_reg_cmd_buf_s cnf71xx; }; union cvmx_pko_reg_crc_ctlx { @@ -1859,10 +1481,6 @@ union cvmx_pko_reg_crc_ctlx { uint64_t reserved_2_63:62; #endif } s; - struct cvmx_pko_reg_crc_ctlx_s cn38xx; - struct cvmx_pko_reg_crc_ctlx_s cn38xxp2; - struct cvmx_pko_reg_crc_ctlx_s cn58xx; - struct cvmx_pko_reg_crc_ctlx_s cn58xxp1; }; union cvmx_pko_reg_crc_enable { @@ -1876,10 +1494,6 @@ union cvmx_pko_reg_crc_enable { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_pko_reg_crc_enable_s cn38xx; - struct cvmx_pko_reg_crc_enable_s cn38xxp2; - struct cvmx_pko_reg_crc_enable_s cn58xx; - struct cvmx_pko_reg_crc_enable_s cn58xxp1; }; union cvmx_pko_reg_crc_ivx { @@ -1893,10 +1507,6 @@ union cvmx_pko_reg_crc_ivx { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_pko_reg_crc_ivx_s cn38xx; - struct cvmx_pko_reg_crc_ivx_s cn38xxp2; - struct cvmx_pko_reg_crc_ivx_s cn58xx; - struct cvmx_pko_reg_crc_ivx_s cn58xxp1; }; union cvmx_pko_reg_debug0 { @@ -1917,23 +1527,6 @@ union cvmx_pko_reg_debug0 { uint64_t reserved_17_63:47; #endif } cn30xx; - struct cvmx_pko_reg_debug0_cn30xx cn31xx; - struct cvmx_pko_reg_debug0_cn30xx cn38xx; - struct cvmx_pko_reg_debug0_cn30xx cn38xxp2; - struct cvmx_pko_reg_debug0_s cn50xx; - struct cvmx_pko_reg_debug0_s cn52xx; - struct cvmx_pko_reg_debug0_s cn52xxp1; - struct cvmx_pko_reg_debug0_s cn56xx; - struct cvmx_pko_reg_debug0_s cn56xxp1; - struct cvmx_pko_reg_debug0_s cn58xx; - struct cvmx_pko_reg_debug0_s cn58xxp1; - struct cvmx_pko_reg_debug0_s cn61xx; - struct cvmx_pko_reg_debug0_s cn63xx; - struct cvmx_pko_reg_debug0_s cn63xxp1; - struct cvmx_pko_reg_debug0_s cn66xx; - struct cvmx_pko_reg_debug0_s cn68xx; - struct cvmx_pko_reg_debug0_s cn68xxp1; - struct cvmx_pko_reg_debug0_s cnf71xx; }; union cvmx_pko_reg_debug1 { @@ -1945,20 +1538,6 @@ union cvmx_pko_reg_debug1 { uint64_t asserts:64; #endif } s; - struct cvmx_pko_reg_debug1_s cn50xx; - struct cvmx_pko_reg_debug1_s cn52xx; - struct cvmx_pko_reg_debug1_s cn52xxp1; - struct cvmx_pko_reg_debug1_s cn56xx; - struct cvmx_pko_reg_debug1_s cn56xxp1; - struct cvmx_pko_reg_debug1_s cn58xx; - struct cvmx_pko_reg_debug1_s cn58xxp1; - struct cvmx_pko_reg_debug1_s cn61xx; - struct cvmx_pko_reg_debug1_s cn63xx; - struct cvmx_pko_reg_debug1_s cn63xxp1; - struct cvmx_pko_reg_debug1_s cn66xx; - struct cvmx_pko_reg_debug1_s cn68xx; - struct cvmx_pko_reg_debug1_s cn68xxp1; - struct cvmx_pko_reg_debug1_s cnf71xx; }; union cvmx_pko_reg_debug2 { @@ -1970,20 +1549,6 @@ union cvmx_pko_reg_debug2 { uint64_t asserts:64; #endif } s; - struct cvmx_pko_reg_debug2_s cn50xx; - struct cvmx_pko_reg_debug2_s cn52xx; - struct cvmx_pko_reg_debug2_s cn52xxp1; - struct cvmx_pko_reg_debug2_s cn56xx; - struct cvmx_pko_reg_debug2_s cn56xxp1; - struct cvmx_pko_reg_debug2_s cn58xx; - struct cvmx_pko_reg_debug2_s cn58xxp1; - struct cvmx_pko_reg_debug2_s cn61xx; - struct cvmx_pko_reg_debug2_s cn63xx; - struct cvmx_pko_reg_debug2_s cn63xxp1; - struct cvmx_pko_reg_debug2_s cn66xx; - struct cvmx_pko_reg_debug2_s cn68xx; - struct cvmx_pko_reg_debug2_s cn68xxp1; - struct cvmx_pko_reg_debug2_s cnf71xx; }; union cvmx_pko_reg_debug3 { @@ -1995,20 +1560,6 @@ union cvmx_pko_reg_debug3 { uint64_t asserts:64; #endif } s; - struct cvmx_pko_reg_debug3_s cn50xx; - struct cvmx_pko_reg_debug3_s cn52xx; - struct cvmx_pko_reg_debug3_s cn52xxp1; - struct cvmx_pko_reg_debug3_s cn56xx; - struct cvmx_pko_reg_debug3_s cn56xxp1; - struct cvmx_pko_reg_debug3_s cn58xx; - struct cvmx_pko_reg_debug3_s cn58xxp1; - struct cvmx_pko_reg_debug3_s cn61xx; - struct cvmx_pko_reg_debug3_s cn63xx; - struct cvmx_pko_reg_debug3_s cn63xxp1; - struct cvmx_pko_reg_debug3_s cn66xx; - struct cvmx_pko_reg_debug3_s cn68xx; - struct cvmx_pko_reg_debug3_s cn68xxp1; - struct cvmx_pko_reg_debug3_s cnf71xx; }; union cvmx_pko_reg_debug4 { @@ -2020,8 +1571,6 @@ union cvmx_pko_reg_debug4 { uint64_t asserts:64; #endif } s; - struct cvmx_pko_reg_debug4_s cn68xx; - struct cvmx_pko_reg_debug4_s cn68xxp1; }; union cvmx_pko_reg_engine_inflight { @@ -2090,9 +1639,6 @@ union cvmx_pko_reg_engine_inflight { uint64_t reserved_40_63:24; #endif } cn52xx; - struct cvmx_pko_reg_engine_inflight_cn52xx cn52xxp1; - struct cvmx_pko_reg_engine_inflight_cn52xx cn56xx; - struct cvmx_pko_reg_engine_inflight_cn52xx cn56xxp1; struct cvmx_pko_reg_engine_inflight_cn61xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_56_63:8; @@ -2159,11 +1705,6 @@ union cvmx_pko_reg_engine_inflight { uint64_t reserved_48_63:16; #endif } cn63xx; - struct cvmx_pko_reg_engine_inflight_cn63xx cn63xxp1; - struct cvmx_pko_reg_engine_inflight_cn61xx cn66xx; - struct cvmx_pko_reg_engine_inflight_s cn68xx; - struct cvmx_pko_reg_engine_inflight_s cn68xxp1; - struct cvmx_pko_reg_engine_inflight_cn61xx cnf71xx; }; union cvmx_pko_reg_engine_inflight1 { @@ -2183,8 +1724,6 @@ union cvmx_pko_reg_engine_inflight1 { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_pko_reg_engine_inflight1_s cn68xx; - struct cvmx_pko_reg_engine_inflight1_s cn68xxp1; }; union cvmx_pko_reg_engine_storagex { @@ -2226,8 +1765,6 @@ union cvmx_pko_reg_engine_storagex { uint64_t engine15:4; #endif } s; - struct cvmx_pko_reg_engine_storagex_s cn68xx; - struct cvmx_pko_reg_engine_storagex_s cn68xxp1; }; union cvmx_pko_reg_engine_thresh { @@ -2250,9 +1787,6 @@ union cvmx_pko_reg_engine_thresh { uint64_t reserved_10_63:54; #endif } cn52xx; - struct cvmx_pko_reg_engine_thresh_cn52xx cn52xxp1; - struct cvmx_pko_reg_engine_thresh_cn52xx cn56xx; - struct cvmx_pko_reg_engine_thresh_cn52xx cn56xxp1; struct cvmx_pko_reg_engine_thresh_cn61xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_14_63:50; @@ -2271,11 +1805,6 @@ union cvmx_pko_reg_engine_thresh { uint64_t reserved_12_63:52; #endif } cn63xx; - struct cvmx_pko_reg_engine_thresh_cn63xx cn63xxp1; - struct cvmx_pko_reg_engine_thresh_cn61xx cn66xx; - struct cvmx_pko_reg_engine_thresh_s cn68xx; - struct cvmx_pko_reg_engine_thresh_s cn68xxp1; - struct cvmx_pko_reg_engine_thresh_cn61xx cnf71xx; }; union cvmx_pko_reg_error { @@ -2306,9 +1835,6 @@ union cvmx_pko_reg_error { uint64_t reserved_2_63:62; #endif } cn30xx; - struct cvmx_pko_reg_error_cn30xx cn31xx; - struct cvmx_pko_reg_error_cn30xx cn38xx; - struct cvmx_pko_reg_error_cn30xx cn38xxp2; struct cvmx_pko_reg_error_cn50xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_3_63:61; @@ -2322,19 +1848,6 @@ union cvmx_pko_reg_error { uint64_t reserved_3_63:61; #endif } cn50xx; - struct cvmx_pko_reg_error_cn50xx cn52xx; - struct cvmx_pko_reg_error_cn50xx cn52xxp1; - struct cvmx_pko_reg_error_cn50xx cn56xx; - struct cvmx_pko_reg_error_cn50xx cn56xxp1; - struct cvmx_pko_reg_error_cn50xx cn58xx; - struct cvmx_pko_reg_error_cn50xx cn58xxp1; - struct cvmx_pko_reg_error_cn50xx cn61xx; - struct cvmx_pko_reg_error_cn50xx cn63xx; - struct cvmx_pko_reg_error_cn50xx cn63xxp1; - struct cvmx_pko_reg_error_cn50xx cn66xx; - struct cvmx_pko_reg_error_s cn68xx; - struct cvmx_pko_reg_error_s cn68xxp1; - struct cvmx_pko_reg_error_cn50xx cnf71xx; }; union cvmx_pko_reg_flags { @@ -2379,16 +1892,6 @@ union cvmx_pko_reg_flags { uint64_t reserved_4_63:60; #endif } cn30xx; - struct cvmx_pko_reg_flags_cn30xx cn31xx; - struct cvmx_pko_reg_flags_cn30xx cn38xx; - struct cvmx_pko_reg_flags_cn30xx cn38xxp2; - struct cvmx_pko_reg_flags_cn30xx cn50xx; - struct cvmx_pko_reg_flags_cn30xx cn52xx; - struct cvmx_pko_reg_flags_cn30xx cn52xxp1; - struct cvmx_pko_reg_flags_cn30xx cn56xx; - struct cvmx_pko_reg_flags_cn30xx cn56xxp1; - struct cvmx_pko_reg_flags_cn30xx cn58xx; - struct cvmx_pko_reg_flags_cn30xx cn58xxp1; struct cvmx_pko_reg_flags_cn61xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_9_63:55; @@ -2410,10 +1913,6 @@ union cvmx_pko_reg_flags { uint64_t reserved_9_63:55; #endif } cn61xx; - struct cvmx_pko_reg_flags_cn30xx cn63xx; - struct cvmx_pko_reg_flags_cn30xx cn63xxp1; - struct cvmx_pko_reg_flags_cn61xx cn66xx; - struct cvmx_pko_reg_flags_s cn68xx; struct cvmx_pko_reg_flags_cn68xxp1 { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_7_63:57; @@ -2435,7 +1934,6 @@ union cvmx_pko_reg_flags { uint64_t reserved_7_63:57; #endif } cn68xxp1; - struct cvmx_pko_reg_flags_cn61xx cnf71xx; }; union cvmx_pko_reg_gmx_port_mode { @@ -2451,22 +1949,6 @@ union cvmx_pko_reg_gmx_port_mode { uint64_t reserved_6_63:58; #endif } s; - struct cvmx_pko_reg_gmx_port_mode_s cn30xx; - struct cvmx_pko_reg_gmx_port_mode_s cn31xx; - struct cvmx_pko_reg_gmx_port_mode_s cn38xx; - struct cvmx_pko_reg_gmx_port_mode_s cn38xxp2; - struct cvmx_pko_reg_gmx_port_mode_s cn50xx; - struct cvmx_pko_reg_gmx_port_mode_s cn52xx; - struct cvmx_pko_reg_gmx_port_mode_s cn52xxp1; - struct cvmx_pko_reg_gmx_port_mode_s cn56xx; - struct cvmx_pko_reg_gmx_port_mode_s cn56xxp1; - struct cvmx_pko_reg_gmx_port_mode_s cn58xx; - struct cvmx_pko_reg_gmx_port_mode_s cn58xxp1; - struct cvmx_pko_reg_gmx_port_mode_s cn61xx; - struct cvmx_pko_reg_gmx_port_mode_s cn63xx; - struct cvmx_pko_reg_gmx_port_mode_s cn63xxp1; - struct cvmx_pko_reg_gmx_port_mode_s cn66xx; - struct cvmx_pko_reg_gmx_port_mode_s cnf71xx; }; union cvmx_pko_reg_int_mask { @@ -2497,9 +1979,6 @@ union cvmx_pko_reg_int_mask { uint64_t reserved_2_63:62; #endif } cn30xx; - struct cvmx_pko_reg_int_mask_cn30xx cn31xx; - struct cvmx_pko_reg_int_mask_cn30xx cn38xx; - struct cvmx_pko_reg_int_mask_cn30xx cn38xxp2; struct cvmx_pko_reg_int_mask_cn50xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_3_63:61; @@ -2513,19 +1992,6 @@ union cvmx_pko_reg_int_mask { uint64_t reserved_3_63:61; #endif } cn50xx; - struct cvmx_pko_reg_int_mask_cn50xx cn52xx; - struct cvmx_pko_reg_int_mask_cn50xx cn52xxp1; - struct cvmx_pko_reg_int_mask_cn50xx cn56xx; - struct cvmx_pko_reg_int_mask_cn50xx cn56xxp1; - struct cvmx_pko_reg_int_mask_cn50xx cn58xx; - struct cvmx_pko_reg_int_mask_cn50xx cn58xxp1; - struct cvmx_pko_reg_int_mask_cn50xx cn61xx; - struct cvmx_pko_reg_int_mask_cn50xx cn63xx; - struct cvmx_pko_reg_int_mask_cn50xx cn63xxp1; - struct cvmx_pko_reg_int_mask_cn50xx cn66xx; - struct cvmx_pko_reg_int_mask_s cn68xx; - struct cvmx_pko_reg_int_mask_s cn68xxp1; - struct cvmx_pko_reg_int_mask_cn50xx cnf71xx; }; union cvmx_pko_reg_loopback_bpid { @@ -2569,8 +2035,6 @@ union cvmx_pko_reg_loopback_bpid { uint64_t reserved_59_63:5; #endif } s; - struct cvmx_pko_reg_loopback_bpid_s cn68xx; - struct cvmx_pko_reg_loopback_bpid_s cn68xxp1; }; union cvmx_pko_reg_loopback_pkind { @@ -2614,8 +2078,6 @@ union cvmx_pko_reg_loopback_pkind { uint64_t reserved_59_63:5; #endif } s; - struct cvmx_pko_reg_loopback_pkind_s cn68xx; - struct cvmx_pko_reg_loopback_pkind_s cn68xxp1; }; union cvmx_pko_reg_min_pkt { @@ -2641,8 +2103,6 @@ union cvmx_pko_reg_min_pkt { uint64_t size7:8; #endif } s; - struct cvmx_pko_reg_min_pkt_s cn68xx; - struct cvmx_pko_reg_min_pkt_s cn68xxp1; }; union cvmx_pko_reg_preempt { @@ -2656,17 +2116,6 @@ union cvmx_pko_reg_preempt { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_pko_reg_preempt_s cn52xx; - struct cvmx_pko_reg_preempt_s cn52xxp1; - struct cvmx_pko_reg_preempt_s cn56xx; - struct cvmx_pko_reg_preempt_s cn56xxp1; - struct cvmx_pko_reg_preempt_s cn61xx; - struct cvmx_pko_reg_preempt_s cn63xx; - struct cvmx_pko_reg_preempt_s cn63xxp1; - struct cvmx_pko_reg_preempt_s cn66xx; - struct cvmx_pko_reg_preempt_s cn68xx; - struct cvmx_pko_reg_preempt_s cn68xxp1; - struct cvmx_pko_reg_preempt_s cnf71xx; }; union cvmx_pko_reg_queue_mode { @@ -2680,24 +2129,6 @@ union cvmx_pko_reg_queue_mode { uint64_t reserved_2_63:62; #endif } s; - struct cvmx_pko_reg_queue_mode_s cn30xx; - struct cvmx_pko_reg_queue_mode_s cn31xx; - struct cvmx_pko_reg_queue_mode_s cn38xx; - struct cvmx_pko_reg_queue_mode_s cn38xxp2; - struct cvmx_pko_reg_queue_mode_s cn50xx; - struct cvmx_pko_reg_queue_mode_s cn52xx; - struct cvmx_pko_reg_queue_mode_s cn52xxp1; - struct cvmx_pko_reg_queue_mode_s cn56xx; - struct cvmx_pko_reg_queue_mode_s cn56xxp1; - struct cvmx_pko_reg_queue_mode_s cn58xx; - struct cvmx_pko_reg_queue_mode_s cn58xxp1; - struct cvmx_pko_reg_queue_mode_s cn61xx; - struct cvmx_pko_reg_queue_mode_s cn63xx; - struct cvmx_pko_reg_queue_mode_s cn63xxp1; - struct cvmx_pko_reg_queue_mode_s cn66xx; - struct cvmx_pko_reg_queue_mode_s cn68xx; - struct cvmx_pko_reg_queue_mode_s cn68xxp1; - struct cvmx_pko_reg_queue_mode_s cnf71xx; }; union cvmx_pko_reg_queue_preempt { @@ -2713,17 +2144,6 @@ union cvmx_pko_reg_queue_preempt { uint64_t reserved_2_63:62; #endif } s; - struct cvmx_pko_reg_queue_preempt_s cn52xx; - struct cvmx_pko_reg_queue_preempt_s cn52xxp1; - struct cvmx_pko_reg_queue_preempt_s cn56xx; - struct cvmx_pko_reg_queue_preempt_s cn56xxp1; - struct cvmx_pko_reg_queue_preempt_s cn61xx; - struct cvmx_pko_reg_queue_preempt_s cn63xx; - struct cvmx_pko_reg_queue_preempt_s cn63xxp1; - struct cvmx_pko_reg_queue_preempt_s cn66xx; - struct cvmx_pko_reg_queue_preempt_s cn68xx; - struct cvmx_pko_reg_queue_preempt_s cn68xxp1; - struct cvmx_pko_reg_queue_preempt_s cnf71xx; }; union cvmx_pko_reg_queue_ptrs1 { @@ -2739,18 +2159,6 @@ union cvmx_pko_reg_queue_ptrs1 { uint64_t reserved_2_63:62; #endif } s; - struct cvmx_pko_reg_queue_ptrs1_s cn50xx; - struct cvmx_pko_reg_queue_ptrs1_s cn52xx; - struct cvmx_pko_reg_queue_ptrs1_s cn52xxp1; - struct cvmx_pko_reg_queue_ptrs1_s cn56xx; - struct cvmx_pko_reg_queue_ptrs1_s cn56xxp1; - struct cvmx_pko_reg_queue_ptrs1_s cn58xx; - struct cvmx_pko_reg_queue_ptrs1_s cn58xxp1; - struct cvmx_pko_reg_queue_ptrs1_s cn61xx; - struct cvmx_pko_reg_queue_ptrs1_s cn63xx; - struct cvmx_pko_reg_queue_ptrs1_s cn63xxp1; - struct cvmx_pko_reg_queue_ptrs1_s cn66xx; - struct cvmx_pko_reg_queue_ptrs1_s cnf71xx; }; union cvmx_pko_reg_read_idx { @@ -2766,24 +2174,6 @@ union cvmx_pko_reg_read_idx { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_pko_reg_read_idx_s cn30xx; - struct cvmx_pko_reg_read_idx_s cn31xx; - struct cvmx_pko_reg_read_idx_s cn38xx; - struct cvmx_pko_reg_read_idx_s cn38xxp2; - struct cvmx_pko_reg_read_idx_s cn50xx; - struct cvmx_pko_reg_read_idx_s cn52xx; - struct cvmx_pko_reg_read_idx_s cn52xxp1; - struct cvmx_pko_reg_read_idx_s cn56xx; - struct cvmx_pko_reg_read_idx_s cn56xxp1; - struct cvmx_pko_reg_read_idx_s cn58xx; - struct cvmx_pko_reg_read_idx_s cn58xxp1; - struct cvmx_pko_reg_read_idx_s cn61xx; - struct cvmx_pko_reg_read_idx_s cn63xx; - struct cvmx_pko_reg_read_idx_s cn63xxp1; - struct cvmx_pko_reg_read_idx_s cn66xx; - struct cvmx_pko_reg_read_idx_s cn68xx; - struct cvmx_pko_reg_read_idx_s cn68xxp1; - struct cvmx_pko_reg_read_idx_s cnf71xx; }; union cvmx_pko_reg_throttle { @@ -2797,8 +2187,6 @@ union cvmx_pko_reg_throttle { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_pko_reg_throttle_s cn68xx; - struct cvmx_pko_reg_throttle_s cn68xxp1; }; union cvmx_pko_reg_timestamp { @@ -2812,13 +2200,6 @@ union cvmx_pko_reg_timestamp { uint64_t reserved_4_63:60; #endif } s; - struct cvmx_pko_reg_timestamp_s cn61xx; - struct cvmx_pko_reg_timestamp_s cn63xx; - struct cvmx_pko_reg_timestamp_s cn63xxp1; - struct cvmx_pko_reg_timestamp_s cn66xx; - struct cvmx_pko_reg_timestamp_s cn68xx; - struct cvmx_pko_reg_timestamp_s cn68xxp1; - struct cvmx_pko_reg_timestamp_s cnf71xx; }; #endif diff --git a/arch/mips/include/asm/octeon/cvmx-pko.h b/arch/mips/include/asm/octeon/cvmx-pko.h index 5f47f76ed510..20eb9c46a75a 100644 --- a/arch/mips/include/asm/octeon/cvmx-pko.h +++ b/arch/mips/include/asm/octeon/cvmx-pko.h @@ -611,7 +611,7 @@ static inline void cvmx_pko_get_port_status(uint64_t port_num, uint64_t clear, pko_reg_read_idx.s.index = cvmx_pko_get_base_queue(port_num); cvmx_write_csr(CVMX_PKO_REG_READ_IDX, pko_reg_read_idx.u64); debug8.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG8); - status->doorbell = debug8.cn58xx.doorbell; + status->doorbell = debug8.cn50xx.doorbell; } } diff --git a/arch/mips/include/asm/octeon/cvmx-pow-defs.h b/arch/mips/include/asm/octeon/cvmx-pow-defs.h index 6a3db4b068ff..474dd544314b 100644 --- a/arch/mips/include/asm/octeon/cvmx-pow-defs.h +++ b/arch/mips/include/asm/octeon/cvmx-pow-defs.h @@ -160,8 +160,6 @@ union cvmx_pow_bist_stat { uint64_t reserved_32_63:32; #endif } cn38xx; - struct cvmx_pow_bist_stat_cn38xx cn38xxp2; - struct cvmx_pow_bist_stat_cn31xx cn50xx; struct cvmx_pow_bist_stat_cn52xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_20_63:44; @@ -191,7 +189,6 @@ union cvmx_pow_bist_stat { uint64_t reserved_20_63:44; #endif } cn52xx; - struct cvmx_pow_bist_stat_cn52xx cn52xxp1; struct cvmx_pow_bist_stat_cn56xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_28_63:36; @@ -223,9 +220,6 @@ union cvmx_pow_bist_stat { uint64_t reserved_28_63:36; #endif } cn56xx; - struct cvmx_pow_bist_stat_cn56xx cn56xxp1; - struct cvmx_pow_bist_stat_cn38xx cn58xx; - struct cvmx_pow_bist_stat_cn38xx cn58xxp1; struct cvmx_pow_bist_stat_cn61xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_20_63:44; @@ -276,7 +270,6 @@ union cvmx_pow_bist_stat { uint64_t reserved_22_63:42; #endif } cn63xx; - struct cvmx_pow_bist_stat_cn63xx cn63xxp1; struct cvmx_pow_bist_stat_cn66xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_26_63:38; @@ -302,7 +295,6 @@ union cvmx_pow_bist_stat { uint64_t reserved_26_63:38; #endif } cn66xx; - struct cvmx_pow_bist_stat_cn61xx cnf71xx; }; union cvmx_pow_ds_pc { @@ -316,22 +308,6 @@ union cvmx_pow_ds_pc { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_pow_ds_pc_s cn30xx; - struct cvmx_pow_ds_pc_s cn31xx; - struct cvmx_pow_ds_pc_s cn38xx; - struct cvmx_pow_ds_pc_s cn38xxp2; - struct cvmx_pow_ds_pc_s cn50xx; - struct cvmx_pow_ds_pc_s cn52xx; - struct cvmx_pow_ds_pc_s cn52xxp1; - struct cvmx_pow_ds_pc_s cn56xx; - struct cvmx_pow_ds_pc_s cn56xxp1; - struct cvmx_pow_ds_pc_s cn58xx; - struct cvmx_pow_ds_pc_s cn58xxp1; - struct cvmx_pow_ds_pc_s cn61xx; - struct cvmx_pow_ds_pc_s cn63xx; - struct cvmx_pow_ds_pc_s cn63xxp1; - struct cvmx_pow_ds_pc_s cn66xx; - struct cvmx_pow_ds_pc_s cnf71xx; }; union cvmx_pow_ecc_err { @@ -367,7 +343,6 @@ union cvmx_pow_ecc_err { uint64_t reserved_45_63:19; #endif } s; - struct cvmx_pow_ecc_err_s cn30xx; struct cvmx_pow_ecc_err_cn31xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_14_63:50; @@ -391,20 +366,6 @@ union cvmx_pow_ecc_err { uint64_t reserved_14_63:50; #endif } cn31xx; - struct cvmx_pow_ecc_err_s cn38xx; - struct cvmx_pow_ecc_err_cn31xx cn38xxp2; - struct cvmx_pow_ecc_err_s cn50xx; - struct cvmx_pow_ecc_err_s cn52xx; - struct cvmx_pow_ecc_err_s cn52xxp1; - struct cvmx_pow_ecc_err_s cn56xx; - struct cvmx_pow_ecc_err_s cn56xxp1; - struct cvmx_pow_ecc_err_s cn58xx; - struct cvmx_pow_ecc_err_s cn58xxp1; - struct cvmx_pow_ecc_err_s cn61xx; - struct cvmx_pow_ecc_err_s cn63xx; - struct cvmx_pow_ecc_err_s cn63xxp1; - struct cvmx_pow_ecc_err_s cn66xx; - struct cvmx_pow_ecc_err_s cnf71xx; }; union cvmx_pow_int_ctl { @@ -420,22 +381,6 @@ union cvmx_pow_int_ctl { uint64_t reserved_6_63:58; #endif } s; - struct cvmx_pow_int_ctl_s cn30xx; - struct cvmx_pow_int_ctl_s cn31xx; - struct cvmx_pow_int_ctl_s cn38xx; - struct cvmx_pow_int_ctl_s cn38xxp2; - struct cvmx_pow_int_ctl_s cn50xx; - struct cvmx_pow_int_ctl_s cn52xx; - struct cvmx_pow_int_ctl_s cn52xxp1; - struct cvmx_pow_int_ctl_s cn56xx; - struct cvmx_pow_int_ctl_s cn56xxp1; - struct cvmx_pow_int_ctl_s cn58xx; - struct cvmx_pow_int_ctl_s cn58xxp1; - struct cvmx_pow_int_ctl_s cn61xx; - struct cvmx_pow_int_ctl_s cn63xx; - struct cvmx_pow_int_ctl_s cn63xxp1; - struct cvmx_pow_int_ctl_s cn66xx; - struct cvmx_pow_int_ctl_s cnf71xx; }; union cvmx_pow_iq_cntx { @@ -449,22 +394,6 @@ union cvmx_pow_iq_cntx { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_pow_iq_cntx_s cn30xx; - struct cvmx_pow_iq_cntx_s cn31xx; - struct cvmx_pow_iq_cntx_s cn38xx; - struct cvmx_pow_iq_cntx_s cn38xxp2; - struct cvmx_pow_iq_cntx_s cn50xx; - struct cvmx_pow_iq_cntx_s cn52xx; - struct cvmx_pow_iq_cntx_s cn52xxp1; - struct cvmx_pow_iq_cntx_s cn56xx; - struct cvmx_pow_iq_cntx_s cn56xxp1; - struct cvmx_pow_iq_cntx_s cn58xx; - struct cvmx_pow_iq_cntx_s cn58xxp1; - struct cvmx_pow_iq_cntx_s cn61xx; - struct cvmx_pow_iq_cntx_s cn63xx; - struct cvmx_pow_iq_cntx_s cn63xxp1; - struct cvmx_pow_iq_cntx_s cn66xx; - struct cvmx_pow_iq_cntx_s cnf71xx; }; union cvmx_pow_iq_com_cnt { @@ -478,22 +407,6 @@ union cvmx_pow_iq_com_cnt { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_pow_iq_com_cnt_s cn30xx; - struct cvmx_pow_iq_com_cnt_s cn31xx; - struct cvmx_pow_iq_com_cnt_s cn38xx; - struct cvmx_pow_iq_com_cnt_s cn38xxp2; - struct cvmx_pow_iq_com_cnt_s cn50xx; - struct cvmx_pow_iq_com_cnt_s cn52xx; - struct cvmx_pow_iq_com_cnt_s cn52xxp1; - struct cvmx_pow_iq_com_cnt_s cn56xx; - struct cvmx_pow_iq_com_cnt_s cn56xxp1; - struct cvmx_pow_iq_com_cnt_s cn58xx; - struct cvmx_pow_iq_com_cnt_s cn58xxp1; - struct cvmx_pow_iq_com_cnt_s cn61xx; - struct cvmx_pow_iq_com_cnt_s cn63xx; - struct cvmx_pow_iq_com_cnt_s cn63xxp1; - struct cvmx_pow_iq_com_cnt_s cn66xx; - struct cvmx_pow_iq_com_cnt_s cnf71xx; }; union cvmx_pow_iq_int { @@ -507,15 +420,6 @@ union cvmx_pow_iq_int { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_pow_iq_int_s cn52xx; - struct cvmx_pow_iq_int_s cn52xxp1; - struct cvmx_pow_iq_int_s cn56xx; - struct cvmx_pow_iq_int_s cn56xxp1; - struct cvmx_pow_iq_int_s cn61xx; - struct cvmx_pow_iq_int_s cn63xx; - struct cvmx_pow_iq_int_s cn63xxp1; - struct cvmx_pow_iq_int_s cn66xx; - struct cvmx_pow_iq_int_s cnf71xx; }; union cvmx_pow_iq_int_en { @@ -529,15 +433,6 @@ union cvmx_pow_iq_int_en { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_pow_iq_int_en_s cn52xx; - struct cvmx_pow_iq_int_en_s cn52xxp1; - struct cvmx_pow_iq_int_en_s cn56xx; - struct cvmx_pow_iq_int_en_s cn56xxp1; - struct cvmx_pow_iq_int_en_s cn61xx; - struct cvmx_pow_iq_int_en_s cn63xx; - struct cvmx_pow_iq_int_en_s cn63xxp1; - struct cvmx_pow_iq_int_en_s cn66xx; - struct cvmx_pow_iq_int_en_s cnf71xx; }; union cvmx_pow_iq_thrx { @@ -551,15 +446,6 @@ union cvmx_pow_iq_thrx { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_pow_iq_thrx_s cn52xx; - struct cvmx_pow_iq_thrx_s cn52xxp1; - struct cvmx_pow_iq_thrx_s cn56xx; - struct cvmx_pow_iq_thrx_s cn56xxp1; - struct cvmx_pow_iq_thrx_s cn61xx; - struct cvmx_pow_iq_thrx_s cn63xx; - struct cvmx_pow_iq_thrx_s cn63xxp1; - struct cvmx_pow_iq_thrx_s cn66xx; - struct cvmx_pow_iq_thrx_s cnf71xx; }; union cvmx_pow_nos_cnt { @@ -591,9 +477,6 @@ union cvmx_pow_nos_cnt { uint64_t reserved_9_63:55; #endif } cn31xx; - struct cvmx_pow_nos_cnt_s cn38xx; - struct cvmx_pow_nos_cnt_s cn38xxp2; - struct cvmx_pow_nos_cnt_cn31xx cn50xx; struct cvmx_pow_nos_cnt_cn52xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_10_63:54; @@ -603,12 +486,6 @@ union cvmx_pow_nos_cnt { uint64_t reserved_10_63:54; #endif } cn52xx; - struct cvmx_pow_nos_cnt_cn52xx cn52xxp1; - struct cvmx_pow_nos_cnt_s cn56xx; - struct cvmx_pow_nos_cnt_s cn56xxp1; - struct cvmx_pow_nos_cnt_s cn58xx; - struct cvmx_pow_nos_cnt_s cn58xxp1; - struct cvmx_pow_nos_cnt_cn52xx cn61xx; struct cvmx_pow_nos_cnt_cn63xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_11_63:53; @@ -618,9 +495,6 @@ union cvmx_pow_nos_cnt { uint64_t reserved_11_63:53; #endif } cn63xx; - struct cvmx_pow_nos_cnt_cn63xx cn63xxp1; - struct cvmx_pow_nos_cnt_cn63xx cn66xx; - struct cvmx_pow_nos_cnt_cn52xx cnf71xx; }; union cvmx_pow_nw_tim { @@ -634,22 +508,6 @@ union cvmx_pow_nw_tim { uint64_t reserved_10_63:54; #endif } s; - struct cvmx_pow_nw_tim_s cn30xx; - struct cvmx_pow_nw_tim_s cn31xx; - struct cvmx_pow_nw_tim_s cn38xx; - struct cvmx_pow_nw_tim_s cn38xxp2; - struct cvmx_pow_nw_tim_s cn50xx; - struct cvmx_pow_nw_tim_s cn52xx; - struct cvmx_pow_nw_tim_s cn52xxp1; - struct cvmx_pow_nw_tim_s cn56xx; - struct cvmx_pow_nw_tim_s cn56xxp1; - struct cvmx_pow_nw_tim_s cn58xx; - struct cvmx_pow_nw_tim_s cn58xxp1; - struct cvmx_pow_nw_tim_s cn61xx; - struct cvmx_pow_nw_tim_s cn63xx; - struct cvmx_pow_nw_tim_s cn63xxp1; - struct cvmx_pow_nw_tim_s cn66xx; - struct cvmx_pow_nw_tim_s cnf71xx; }; union cvmx_pow_pf_rst_msk { @@ -663,18 +521,6 @@ union cvmx_pow_pf_rst_msk { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_pow_pf_rst_msk_s cn50xx; - struct cvmx_pow_pf_rst_msk_s cn52xx; - struct cvmx_pow_pf_rst_msk_s cn52xxp1; - struct cvmx_pow_pf_rst_msk_s cn56xx; - struct cvmx_pow_pf_rst_msk_s cn56xxp1; - struct cvmx_pow_pf_rst_msk_s cn58xx; - struct cvmx_pow_pf_rst_msk_s cn58xxp1; - struct cvmx_pow_pf_rst_msk_s cn61xx; - struct cvmx_pow_pf_rst_msk_s cn63xx; - struct cvmx_pow_pf_rst_msk_s cn63xxp1; - struct cvmx_pow_pf_rst_msk_s cn66xx; - struct cvmx_pow_pf_rst_msk_s cnf71xx; }; union cvmx_pow_pp_grp_mskx { @@ -713,21 +559,6 @@ union cvmx_pow_pp_grp_mskx { uint64_t reserved_16_63:48; #endif } cn30xx; - struct cvmx_pow_pp_grp_mskx_cn30xx cn31xx; - struct cvmx_pow_pp_grp_mskx_cn30xx cn38xx; - struct cvmx_pow_pp_grp_mskx_cn30xx cn38xxp2; - struct cvmx_pow_pp_grp_mskx_s cn50xx; - struct cvmx_pow_pp_grp_mskx_s cn52xx; - struct cvmx_pow_pp_grp_mskx_s cn52xxp1; - struct cvmx_pow_pp_grp_mskx_s cn56xx; - struct cvmx_pow_pp_grp_mskx_s cn56xxp1; - struct cvmx_pow_pp_grp_mskx_s cn58xx; - struct cvmx_pow_pp_grp_mskx_s cn58xxp1; - struct cvmx_pow_pp_grp_mskx_s cn61xx; - struct cvmx_pow_pp_grp_mskx_s cn63xx; - struct cvmx_pow_pp_grp_mskx_s cn63xxp1; - struct cvmx_pow_pp_grp_mskx_s cn66xx; - struct cvmx_pow_pp_grp_mskx_s cnf71xx; }; union cvmx_pow_qos_rndx { @@ -747,22 +578,6 @@ union cvmx_pow_qos_rndx { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_pow_qos_rndx_s cn30xx; - struct cvmx_pow_qos_rndx_s cn31xx; - struct cvmx_pow_qos_rndx_s cn38xx; - struct cvmx_pow_qos_rndx_s cn38xxp2; - struct cvmx_pow_qos_rndx_s cn50xx; - struct cvmx_pow_qos_rndx_s cn52xx; - struct cvmx_pow_qos_rndx_s cn52xxp1; - struct cvmx_pow_qos_rndx_s cn56xx; - struct cvmx_pow_qos_rndx_s cn56xxp1; - struct cvmx_pow_qos_rndx_s cn58xx; - struct cvmx_pow_qos_rndx_s cn58xxp1; - struct cvmx_pow_qos_rndx_s cn61xx; - struct cvmx_pow_qos_rndx_s cn63xx; - struct cvmx_pow_qos_rndx_s cn63xxp1; - struct cvmx_pow_qos_rndx_s cn66xx; - struct cvmx_pow_qos_rndx_s cnf71xx; }; union cvmx_pow_qos_thrx { @@ -838,9 +653,6 @@ union cvmx_pow_qos_thrx { uint64_t reserved_57_63:7; #endif } cn31xx; - struct cvmx_pow_qos_thrx_s cn38xx; - struct cvmx_pow_qos_thrx_s cn38xxp2; - struct cvmx_pow_qos_thrx_cn31xx cn50xx; struct cvmx_pow_qos_thrx_cn52xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_58_63:6; @@ -866,12 +678,6 @@ union cvmx_pow_qos_thrx { uint64_t reserved_58_63:6; #endif } cn52xx; - struct cvmx_pow_qos_thrx_cn52xx cn52xxp1; - struct cvmx_pow_qos_thrx_s cn56xx; - struct cvmx_pow_qos_thrx_s cn56xxp1; - struct cvmx_pow_qos_thrx_s cn58xx; - struct cvmx_pow_qos_thrx_s cn58xxp1; - struct cvmx_pow_qos_thrx_cn52xx cn61xx; struct cvmx_pow_qos_thrx_cn63xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_59_63:5; @@ -897,9 +703,6 @@ union cvmx_pow_qos_thrx { uint64_t reserved_59_63:5; #endif } cn63xx; - struct cvmx_pow_qos_thrx_cn63xx cn63xxp1; - struct cvmx_pow_qos_thrx_cn63xx cn66xx; - struct cvmx_pow_qos_thrx_cn52xx cnf71xx; }; union cvmx_pow_ts_pc { @@ -913,22 +716,6 @@ union cvmx_pow_ts_pc { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_pow_ts_pc_s cn30xx; - struct cvmx_pow_ts_pc_s cn31xx; - struct cvmx_pow_ts_pc_s cn38xx; - struct cvmx_pow_ts_pc_s cn38xxp2; - struct cvmx_pow_ts_pc_s cn50xx; - struct cvmx_pow_ts_pc_s cn52xx; - struct cvmx_pow_ts_pc_s cn52xxp1; - struct cvmx_pow_ts_pc_s cn56xx; - struct cvmx_pow_ts_pc_s cn56xxp1; - struct cvmx_pow_ts_pc_s cn58xx; - struct cvmx_pow_ts_pc_s cn58xxp1; - struct cvmx_pow_ts_pc_s cn61xx; - struct cvmx_pow_ts_pc_s cn63xx; - struct cvmx_pow_ts_pc_s cn63xxp1; - struct cvmx_pow_ts_pc_s cn66xx; - struct cvmx_pow_ts_pc_s cnf71xx; }; union cvmx_pow_wa_com_pc { @@ -942,22 +729,6 @@ union cvmx_pow_wa_com_pc { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_pow_wa_com_pc_s cn30xx; - struct cvmx_pow_wa_com_pc_s cn31xx; - struct cvmx_pow_wa_com_pc_s cn38xx; - struct cvmx_pow_wa_com_pc_s cn38xxp2; - struct cvmx_pow_wa_com_pc_s cn50xx; - struct cvmx_pow_wa_com_pc_s cn52xx; - struct cvmx_pow_wa_com_pc_s cn52xxp1; - struct cvmx_pow_wa_com_pc_s cn56xx; - struct cvmx_pow_wa_com_pc_s cn56xxp1; - struct cvmx_pow_wa_com_pc_s cn58xx; - struct cvmx_pow_wa_com_pc_s cn58xxp1; - struct cvmx_pow_wa_com_pc_s cn61xx; - struct cvmx_pow_wa_com_pc_s cn63xx; - struct cvmx_pow_wa_com_pc_s cn63xxp1; - struct cvmx_pow_wa_com_pc_s cn66xx; - struct cvmx_pow_wa_com_pc_s cnf71xx; }; union cvmx_pow_wa_pcx { @@ -971,22 +742,6 @@ union cvmx_pow_wa_pcx { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_pow_wa_pcx_s cn30xx; - struct cvmx_pow_wa_pcx_s cn31xx; - struct cvmx_pow_wa_pcx_s cn38xx; - struct cvmx_pow_wa_pcx_s cn38xxp2; - struct cvmx_pow_wa_pcx_s cn50xx; - struct cvmx_pow_wa_pcx_s cn52xx; - struct cvmx_pow_wa_pcx_s cn52xxp1; - struct cvmx_pow_wa_pcx_s cn56xx; - struct cvmx_pow_wa_pcx_s cn56xxp1; - struct cvmx_pow_wa_pcx_s cn58xx; - struct cvmx_pow_wa_pcx_s cn58xxp1; - struct cvmx_pow_wa_pcx_s cn61xx; - struct cvmx_pow_wa_pcx_s cn63xx; - struct cvmx_pow_wa_pcx_s cn63xxp1; - struct cvmx_pow_wa_pcx_s cn66xx; - struct cvmx_pow_wa_pcx_s cnf71xx; }; union cvmx_pow_wq_int { @@ -1002,22 +757,6 @@ union cvmx_pow_wq_int { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_pow_wq_int_s cn30xx; - struct cvmx_pow_wq_int_s cn31xx; - struct cvmx_pow_wq_int_s cn38xx; - struct cvmx_pow_wq_int_s cn38xxp2; - struct cvmx_pow_wq_int_s cn50xx; - struct cvmx_pow_wq_int_s cn52xx; - struct cvmx_pow_wq_int_s cn52xxp1; - struct cvmx_pow_wq_int_s cn56xx; - struct cvmx_pow_wq_int_s cn56xxp1; - struct cvmx_pow_wq_int_s cn58xx; - struct cvmx_pow_wq_int_s cn58xxp1; - struct cvmx_pow_wq_int_s cn61xx; - struct cvmx_pow_wq_int_s cn63xx; - struct cvmx_pow_wq_int_s cn63xxp1; - struct cvmx_pow_wq_int_s cn66xx; - struct cvmx_pow_wq_int_s cnf71xx; }; union cvmx_pow_wq_int_cntx { @@ -1069,9 +808,6 @@ union cvmx_pow_wq_int_cntx { uint64_t reserved_28_63:36; #endif } cn31xx; - struct cvmx_pow_wq_int_cntx_s cn38xx; - struct cvmx_pow_wq_int_cntx_s cn38xxp2; - struct cvmx_pow_wq_int_cntx_cn31xx cn50xx; struct cvmx_pow_wq_int_cntx_cn52xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_28_63:36; @@ -1089,12 +825,6 @@ union cvmx_pow_wq_int_cntx { uint64_t reserved_28_63:36; #endif } cn52xx; - struct cvmx_pow_wq_int_cntx_cn52xx cn52xxp1; - struct cvmx_pow_wq_int_cntx_s cn56xx; - struct cvmx_pow_wq_int_cntx_s cn56xxp1; - struct cvmx_pow_wq_int_cntx_s cn58xx; - struct cvmx_pow_wq_int_cntx_s cn58xxp1; - struct cvmx_pow_wq_int_cntx_cn52xx cn61xx; struct cvmx_pow_wq_int_cntx_cn63xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_28_63:36; @@ -1112,9 +842,6 @@ union cvmx_pow_wq_int_cntx { uint64_t reserved_28_63:36; #endif } cn63xx; - struct cvmx_pow_wq_int_cntx_cn63xx cn63xxp1; - struct cvmx_pow_wq_int_cntx_cn63xx cn66xx; - struct cvmx_pow_wq_int_cntx_cn52xx cnf71xx; }; union cvmx_pow_wq_int_pc { @@ -1134,22 +861,6 @@ union cvmx_pow_wq_int_pc { uint64_t reserved_60_63:4; #endif } s; - struct cvmx_pow_wq_int_pc_s cn30xx; - struct cvmx_pow_wq_int_pc_s cn31xx; - struct cvmx_pow_wq_int_pc_s cn38xx; - struct cvmx_pow_wq_int_pc_s cn38xxp2; - struct cvmx_pow_wq_int_pc_s cn50xx; - struct cvmx_pow_wq_int_pc_s cn52xx; - struct cvmx_pow_wq_int_pc_s cn52xxp1; - struct cvmx_pow_wq_int_pc_s cn56xx; - struct cvmx_pow_wq_int_pc_s cn56xxp1; - struct cvmx_pow_wq_int_pc_s cn58xx; - struct cvmx_pow_wq_int_pc_s cn58xxp1; - struct cvmx_pow_wq_int_pc_s cn61xx; - struct cvmx_pow_wq_int_pc_s cn63xx; - struct cvmx_pow_wq_int_pc_s cn63xxp1; - struct cvmx_pow_wq_int_pc_s cn66xx; - struct cvmx_pow_wq_int_pc_s cnf71xx; }; union cvmx_pow_wq_int_thrx { @@ -1211,9 +922,6 @@ union cvmx_pow_wq_int_thrx { uint64_t reserved_29_63:35; #endif } cn31xx; - struct cvmx_pow_wq_int_thrx_s cn38xx; - struct cvmx_pow_wq_int_thrx_s cn38xxp2; - struct cvmx_pow_wq_int_thrx_cn31xx cn50xx; struct cvmx_pow_wq_int_thrx_cn52xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_29_63:35; @@ -1233,12 +941,6 @@ union cvmx_pow_wq_int_thrx { uint64_t reserved_29_63:35; #endif } cn52xx; - struct cvmx_pow_wq_int_thrx_cn52xx cn52xxp1; - struct cvmx_pow_wq_int_thrx_s cn56xx; - struct cvmx_pow_wq_int_thrx_s cn56xxp1; - struct cvmx_pow_wq_int_thrx_s cn58xx; - struct cvmx_pow_wq_int_thrx_s cn58xxp1; - struct cvmx_pow_wq_int_thrx_cn52xx cn61xx; struct cvmx_pow_wq_int_thrx_cn63xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_29_63:35; @@ -1258,9 +960,6 @@ union cvmx_pow_wq_int_thrx { uint64_t reserved_29_63:35; #endif } cn63xx; - struct cvmx_pow_wq_int_thrx_cn63xx cn63xxp1; - struct cvmx_pow_wq_int_thrx_cn63xx cn66xx; - struct cvmx_pow_wq_int_thrx_cn52xx cnf71xx; }; union cvmx_pow_ws_pcx { @@ -1274,22 +973,6 @@ union cvmx_pow_ws_pcx { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_pow_ws_pcx_s cn30xx; - struct cvmx_pow_ws_pcx_s cn31xx; - struct cvmx_pow_ws_pcx_s cn38xx; - struct cvmx_pow_ws_pcx_s cn38xxp2; - struct cvmx_pow_ws_pcx_s cn50xx; - struct cvmx_pow_ws_pcx_s cn52xx; - struct cvmx_pow_ws_pcx_s cn52xxp1; - struct cvmx_pow_ws_pcx_s cn56xx; - struct cvmx_pow_ws_pcx_s cn56xxp1; - struct cvmx_pow_ws_pcx_s cn58xx; - struct cvmx_pow_ws_pcx_s cn58xxp1; - struct cvmx_pow_ws_pcx_s cn61xx; - struct cvmx_pow_ws_pcx_s cn63xx; - struct cvmx_pow_ws_pcx_s cn63xxp1; - struct cvmx_pow_ws_pcx_s cn66xx; - struct cvmx_pow_ws_pcx_s cnf71xx; }; union cvmx_sso_wq_int_thrx { diff --git a/arch/mips/include/asm/octeon/cvmx-rnm-defs.h b/arch/mips/include/asm/octeon/cvmx-rnm-defs.h index 87d6f92a548a..94295d2fe22e 100644 --- a/arch/mips/include/asm/octeon/cvmx-rnm-defs.h +++ b/arch/mips/include/asm/octeon/cvmx-rnm-defs.h @@ -47,24 +47,6 @@ union cvmx_rnm_bist_status { uint64_t reserved_2_63:62; #endif } s; - struct cvmx_rnm_bist_status_s cn30xx; - struct cvmx_rnm_bist_status_s cn31xx; - struct cvmx_rnm_bist_status_s cn38xx; - struct cvmx_rnm_bist_status_s cn38xxp2; - struct cvmx_rnm_bist_status_s cn50xx; - struct cvmx_rnm_bist_status_s cn52xx; - struct cvmx_rnm_bist_status_s cn52xxp1; - struct cvmx_rnm_bist_status_s cn56xx; - struct cvmx_rnm_bist_status_s cn56xxp1; - struct cvmx_rnm_bist_status_s cn58xx; - struct cvmx_rnm_bist_status_s cn58xxp1; - struct cvmx_rnm_bist_status_s cn61xx; - struct cvmx_rnm_bist_status_s cn63xx; - struct cvmx_rnm_bist_status_s cn63xxp1; - struct cvmx_rnm_bist_status_s cn66xx; - struct cvmx_rnm_bist_status_s cn68xx; - struct cvmx_rnm_bist_status_s cn68xxp1; - struct cvmx_rnm_bist_status_s cnf71xx; }; union cvmx_rnm_ctl_status { @@ -109,9 +91,6 @@ union cvmx_rnm_ctl_status { uint64_t reserved_4_63:60; #endif } cn30xx; - struct cvmx_rnm_ctl_status_cn30xx cn31xx; - struct cvmx_rnm_ctl_status_cn30xx cn38xx; - struct cvmx_rnm_ctl_status_cn30xx cn38xxp2; struct cvmx_rnm_ctl_status_cn50xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_9_63:55; @@ -131,13 +110,6 @@ union cvmx_rnm_ctl_status { uint64_t reserved_9_63:55; #endif } cn50xx; - struct cvmx_rnm_ctl_status_cn50xx cn52xx; - struct cvmx_rnm_ctl_status_cn50xx cn52xxp1; - struct cvmx_rnm_ctl_status_cn50xx cn56xx; - struct cvmx_rnm_ctl_status_cn50xx cn56xxp1; - struct cvmx_rnm_ctl_status_cn50xx cn58xx; - struct cvmx_rnm_ctl_status_cn50xx cn58xxp1; - struct cvmx_rnm_ctl_status_s cn61xx; struct cvmx_rnm_ctl_status_cn63xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_11_63:53; @@ -161,11 +133,6 @@ union cvmx_rnm_ctl_status { uint64_t reserved_11_63:53; #endif } cn63xx; - struct cvmx_rnm_ctl_status_cn63xx cn63xxp1; - struct cvmx_rnm_ctl_status_s cn66xx; - struct cvmx_rnm_ctl_status_cn63xx cn68xx; - struct cvmx_rnm_ctl_status_cn63xx cn68xxp1; - struct cvmx_rnm_ctl_status_s cnf71xx; }; union cvmx_rnm_eer_dbg { @@ -177,13 +144,6 @@ union cvmx_rnm_eer_dbg { uint64_t dat:64; #endif } s; - struct cvmx_rnm_eer_dbg_s cn61xx; - struct cvmx_rnm_eer_dbg_s cn63xx; - struct cvmx_rnm_eer_dbg_s cn63xxp1; - struct cvmx_rnm_eer_dbg_s cn66xx; - struct cvmx_rnm_eer_dbg_s cn68xx; - struct cvmx_rnm_eer_dbg_s cn68xxp1; - struct cvmx_rnm_eer_dbg_s cnf71xx; }; union cvmx_rnm_eer_key { @@ -195,13 +155,6 @@ union cvmx_rnm_eer_key { uint64_t key:64; #endif } s; - struct cvmx_rnm_eer_key_s cn61xx; - struct cvmx_rnm_eer_key_s cn63xx; - struct cvmx_rnm_eer_key_s cn63xxp1; - struct cvmx_rnm_eer_key_s cn66xx; - struct cvmx_rnm_eer_key_s cn68xx; - struct cvmx_rnm_eer_key_s cn68xxp1; - struct cvmx_rnm_eer_key_s cnf71xx; }; union cvmx_rnm_serial_num { @@ -213,12 +166,6 @@ union cvmx_rnm_serial_num { uint64_t dat:64; #endif } s; - struct cvmx_rnm_serial_num_s cn61xx; - struct cvmx_rnm_serial_num_s cn63xx; - struct cvmx_rnm_serial_num_s cn66xx; - struct cvmx_rnm_serial_num_s cn68xx; - struct cvmx_rnm_serial_num_s cn68xxp1; - struct cvmx_rnm_serial_num_s cnf71xx; }; #endif diff --git a/arch/mips/include/asm/octeon/cvmx-rst-defs.h b/arch/mips/include/asm/octeon/cvmx-rst-defs.h index 0c9c3e74d4ae..accc9977d9cd 100644 --- a/arch/mips/include/asm/octeon/cvmx-rst-defs.h +++ b/arch/mips/include/asm/octeon/cvmx-rst-defs.h @@ -80,9 +80,6 @@ union cvmx_rst_boot { uint64_t chipkill:1; #endif } s; - struct cvmx_rst_boot_s cn70xx; - struct cvmx_rst_boot_s cn70xxp1; - struct cvmx_rst_boot_s cn78xx; }; union cvmx_rst_cfg { @@ -102,9 +99,6 @@ union cvmx_rst_cfg { uint64_t bist_delay:58; #endif } s; - struct cvmx_rst_cfg_s cn70xx; - struct cvmx_rst_cfg_s cn70xxp1; - struct cvmx_rst_cfg_s cn78xx; }; union cvmx_rst_ckill { @@ -118,9 +112,6 @@ union cvmx_rst_ckill { uint64_t reserved_47_63:17; #endif } s; - struct cvmx_rst_ckill_s cn70xx; - struct cvmx_rst_ckill_s cn70xxp1; - struct cvmx_rst_ckill_s cn78xx; }; union cvmx_rst_ctlx { @@ -150,9 +141,6 @@ union cvmx_rst_ctlx { uint64_t reserved_10_63:54; #endif } s; - struct cvmx_rst_ctlx_s cn70xx; - struct cvmx_rst_ctlx_s cn70xxp1; - struct cvmx_rst_ctlx_s cn78xx; }; union cvmx_rst_delay { @@ -168,9 +156,6 @@ union cvmx_rst_delay { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_rst_delay_s cn70xx; - struct cvmx_rst_delay_s cn70xxp1; - struct cvmx_rst_delay_s cn78xx; }; union cvmx_rst_eco { @@ -184,7 +169,6 @@ union cvmx_rst_eco { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_rst_eco_s cn78xx; }; union cvmx_rst_int { @@ -215,8 +199,6 @@ union cvmx_rst_int { uint64_t reserved_11_63:53; #endif } cn70xx; - struct cvmx_rst_int_cn70xx cn70xxp1; - struct cvmx_rst_int_s cn78xx; }; union cvmx_rst_ocx { @@ -230,7 +212,6 @@ union cvmx_rst_ocx { uint64_t reserved_3_63:61; #endif } s; - struct cvmx_rst_ocx_s cn78xx; }; union cvmx_rst_power_dbg { @@ -244,7 +225,6 @@ union cvmx_rst_power_dbg { uint64_t reserved_3_63:61; #endif } s; - struct cvmx_rst_power_dbg_s cn78xx; }; union cvmx_rst_pp_power { @@ -267,8 +247,6 @@ union cvmx_rst_pp_power { uint64_t reserved_4_63:60; #endif } cn70xx; - struct cvmx_rst_pp_power_cn70xx cn70xxp1; - struct cvmx_rst_pp_power_s cn78xx; }; union cvmx_rst_soft_prstx { @@ -282,9 +260,6 @@ union cvmx_rst_soft_prstx { uint64_t reserved_1_63:63; #endif } s; - struct cvmx_rst_soft_prstx_s cn70xx; - struct cvmx_rst_soft_prstx_s cn70xxp1; - struct cvmx_rst_soft_prstx_s cn78xx; }; union cvmx_rst_soft_rst { @@ -298,9 +273,6 @@ union cvmx_rst_soft_rst { uint64_t reserved_1_63:63; #endif } s; - struct cvmx_rst_soft_rst_s cn70xx; - struct cvmx_rst_soft_rst_s cn70xxp1; - struct cvmx_rst_soft_rst_s cn78xx; }; #endif diff --git a/arch/mips/include/asm/octeon/cvmx-smix-defs.h b/arch/mips/include/asm/octeon/cvmx-smix-defs.h index 8a278e6ddba9..7a928230b0c0 100644 --- a/arch/mips/include/asm/octeon/cvmx-smix-defs.h +++ b/arch/mips/include/asm/octeon/cvmx-smix-defs.h @@ -186,23 +186,6 @@ union cvmx_smix_clk { uint64_t reserved_21_63:43; #endif } cn30xx; - struct cvmx_smix_clk_cn30xx cn31xx; - struct cvmx_smix_clk_cn30xx cn38xx; - struct cvmx_smix_clk_cn30xx cn38xxp2; - struct cvmx_smix_clk_s cn50xx; - struct cvmx_smix_clk_s cn52xx; - struct cvmx_smix_clk_s cn52xxp1; - struct cvmx_smix_clk_s cn56xx; - struct cvmx_smix_clk_s cn56xxp1; - struct cvmx_smix_clk_cn30xx cn58xx; - struct cvmx_smix_clk_cn30xx cn58xxp1; - struct cvmx_smix_clk_s cn61xx; - struct cvmx_smix_clk_s cn63xx; - struct cvmx_smix_clk_s cn63xxp1; - struct cvmx_smix_clk_s cn66xx; - struct cvmx_smix_clk_s cn68xx; - struct cvmx_smix_clk_s cn68xxp1; - struct cvmx_smix_clk_s cnf71xx; }; union cvmx_smix_cmd { @@ -241,23 +224,6 @@ union cvmx_smix_cmd { uint64_t reserved_17_63:47; #endif } cn30xx; - struct cvmx_smix_cmd_cn30xx cn31xx; - struct cvmx_smix_cmd_cn30xx cn38xx; - struct cvmx_smix_cmd_cn30xx cn38xxp2; - struct cvmx_smix_cmd_s cn50xx; - struct cvmx_smix_cmd_s cn52xx; - struct cvmx_smix_cmd_s cn52xxp1; - struct cvmx_smix_cmd_s cn56xx; - struct cvmx_smix_cmd_s cn56xxp1; - struct cvmx_smix_cmd_cn30xx cn58xx; - struct cvmx_smix_cmd_cn30xx cn58xxp1; - struct cvmx_smix_cmd_s cn61xx; - struct cvmx_smix_cmd_s cn63xx; - struct cvmx_smix_cmd_s cn63xxp1; - struct cvmx_smix_cmd_s cn66xx; - struct cvmx_smix_cmd_s cn68xx; - struct cvmx_smix_cmd_s cn68xxp1; - struct cvmx_smix_cmd_s cnf71xx; }; union cvmx_smix_en { @@ -271,24 +237,6 @@ union cvmx_smix_en { uint64_t reserved_1_63:63; #endif } s; - struct cvmx_smix_en_s cn30xx; - struct cvmx_smix_en_s cn31xx; - struct cvmx_smix_en_s cn38xx; - struct cvmx_smix_en_s cn38xxp2; - struct cvmx_smix_en_s cn50xx; - struct cvmx_smix_en_s cn52xx; - struct cvmx_smix_en_s cn52xxp1; - struct cvmx_smix_en_s cn56xx; - struct cvmx_smix_en_s cn56xxp1; - struct cvmx_smix_en_s cn58xx; - struct cvmx_smix_en_s cn58xxp1; - struct cvmx_smix_en_s cn61xx; - struct cvmx_smix_en_s cn63xx; - struct cvmx_smix_en_s cn63xxp1; - struct cvmx_smix_en_s cn66xx; - struct cvmx_smix_en_s cn68xx; - struct cvmx_smix_en_s cn68xxp1; - struct cvmx_smix_en_s cnf71xx; }; union cvmx_smix_rd_dat { @@ -306,24 +254,6 @@ union cvmx_smix_rd_dat { uint64_t reserved_18_63:46; #endif } s; - struct cvmx_smix_rd_dat_s cn30xx; - struct cvmx_smix_rd_dat_s cn31xx; - struct cvmx_smix_rd_dat_s cn38xx; - struct cvmx_smix_rd_dat_s cn38xxp2; - struct cvmx_smix_rd_dat_s cn50xx; - struct cvmx_smix_rd_dat_s cn52xx; - struct cvmx_smix_rd_dat_s cn52xxp1; - struct cvmx_smix_rd_dat_s cn56xx; - struct cvmx_smix_rd_dat_s cn56xxp1; - struct cvmx_smix_rd_dat_s cn58xx; - struct cvmx_smix_rd_dat_s cn58xxp1; - struct cvmx_smix_rd_dat_s cn61xx; - struct cvmx_smix_rd_dat_s cn63xx; - struct cvmx_smix_rd_dat_s cn63xxp1; - struct cvmx_smix_rd_dat_s cn66xx; - struct cvmx_smix_rd_dat_s cn68xx; - struct cvmx_smix_rd_dat_s cn68xxp1; - struct cvmx_smix_rd_dat_s cnf71xx; }; union cvmx_smix_wr_dat { @@ -341,24 +271,6 @@ union cvmx_smix_wr_dat { uint64_t reserved_18_63:46; #endif } s; - struct cvmx_smix_wr_dat_s cn30xx; - struct cvmx_smix_wr_dat_s cn31xx; - struct cvmx_smix_wr_dat_s cn38xx; - struct cvmx_smix_wr_dat_s cn38xxp2; - struct cvmx_smix_wr_dat_s cn50xx; - struct cvmx_smix_wr_dat_s cn52xx; - struct cvmx_smix_wr_dat_s cn52xxp1; - struct cvmx_smix_wr_dat_s cn56xx; - struct cvmx_smix_wr_dat_s cn56xxp1; - struct cvmx_smix_wr_dat_s cn58xx; - struct cvmx_smix_wr_dat_s cn58xxp1; - struct cvmx_smix_wr_dat_s cn61xx; - struct cvmx_smix_wr_dat_s cn63xx; - struct cvmx_smix_wr_dat_s cn63xxp1; - struct cvmx_smix_wr_dat_s cn66xx; - struct cvmx_smix_wr_dat_s cn68xx; - struct cvmx_smix_wr_dat_s cn68xxp1; - struct cvmx_smix_wr_dat_s cnf71xx; }; #endif diff --git a/arch/mips/include/asm/octeon/cvmx-spxx-defs.h b/arch/mips/include/asm/octeon/cvmx-spxx-defs.h index f4c4e8051160..8471ed2dea51 100644 --- a/arch/mips/include/asm/octeon/cvmx-spxx-defs.h +++ b/arch/mips/include/asm/octeon/cvmx-spxx-defs.h @@ -58,10 +58,6 @@ union cvmx_spxx_bckprs_cnt { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_spxx_bckprs_cnt_s cn38xx; - struct cvmx_spxx_bckprs_cnt_s cn38xxp2; - struct cvmx_spxx_bckprs_cnt_s cn58xx; - struct cvmx_spxx_bckprs_cnt_s cn58xxp1; }; union cvmx_spxx_bist_stat { @@ -79,10 +75,6 @@ union cvmx_spxx_bist_stat { uint64_t reserved_3_63:61; #endif } s; - struct cvmx_spxx_bist_stat_s cn38xx; - struct cvmx_spxx_bist_stat_s cn38xxp2; - struct cvmx_spxx_bist_stat_s cn58xx; - struct cvmx_spxx_bist_stat_s cn58xxp1; }; union cvmx_spxx_clk_ctl { @@ -114,10 +106,6 @@ union cvmx_spxx_clk_ctl { uint64_t reserved_17_63:47; #endif } s; - struct cvmx_spxx_clk_ctl_s cn38xx; - struct cvmx_spxx_clk_ctl_s cn38xxp2; - struct cvmx_spxx_clk_ctl_s cn58xx; - struct cvmx_spxx_clk_ctl_s cn58xxp1; }; union cvmx_spxx_clk_stat { @@ -145,10 +133,6 @@ union cvmx_spxx_clk_stat { uint64_t reserved_11_63:53; #endif } s; - struct cvmx_spxx_clk_stat_s cn38xx; - struct cvmx_spxx_clk_stat_s cn38xxp2; - struct cvmx_spxx_clk_stat_s cn58xx; - struct cvmx_spxx_clk_stat_s cn58xxp1; }; union cvmx_spxx_dbg_deskew_ctl { @@ -190,10 +174,6 @@ union cvmx_spxx_dbg_deskew_ctl { uint64_t reserved_30_63:34; #endif } s; - struct cvmx_spxx_dbg_deskew_ctl_s cn38xx; - struct cvmx_spxx_dbg_deskew_ctl_s cn38xxp2; - struct cvmx_spxx_dbg_deskew_ctl_s cn58xx; - struct cvmx_spxx_dbg_deskew_ctl_s cn58xxp1; }; union cvmx_spxx_dbg_deskew_state { @@ -213,10 +193,6 @@ union cvmx_spxx_dbg_deskew_state { uint64_t reserved_9_63:55; #endif } s; - struct cvmx_spxx_dbg_deskew_state_s cn38xx; - struct cvmx_spxx_dbg_deskew_state_s cn38xxp2; - struct cvmx_spxx_dbg_deskew_state_s cn58xx; - struct cvmx_spxx_dbg_deskew_state_s cn58xxp1; }; union cvmx_spxx_drv_ctl { @@ -241,7 +217,6 @@ union cvmx_spxx_drv_ctl { uint64_t reserved_16_63:48; #endif } cn38xx; - struct cvmx_spxx_drv_ctl_cn38xx cn38xxp2; struct cvmx_spxx_drv_ctl_cn58xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_24_63:40; @@ -257,7 +232,6 @@ union cvmx_spxx_drv_ctl { uint64_t reserved_24_63:40; #endif } cn58xx; - struct cvmx_spxx_drv_ctl_cn58xx cn58xxp1; }; union cvmx_spxx_err_ctl { @@ -279,10 +253,6 @@ union cvmx_spxx_err_ctl { uint64_t reserved_9_63:55; #endif } s; - struct cvmx_spxx_err_ctl_s cn38xx; - struct cvmx_spxx_err_ctl_s cn38xxp2; - struct cvmx_spxx_err_ctl_s cn58xx; - struct cvmx_spxx_err_ctl_s cn58xxp1; }; union cvmx_spxx_int_dat { @@ -304,10 +274,6 @@ union cvmx_spxx_int_dat { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_spxx_int_dat_s cn38xx; - struct cvmx_spxx_int_dat_s cn38xxp2; - struct cvmx_spxx_int_dat_s cn58xx; - struct cvmx_spxx_int_dat_s cn58xxp1; }; union cvmx_spxx_int_msk { @@ -341,10 +307,6 @@ union cvmx_spxx_int_msk { uint64_t reserved_12_63:52; #endif } s; - struct cvmx_spxx_int_msk_s cn38xx; - struct cvmx_spxx_int_msk_s cn38xxp2; - struct cvmx_spxx_int_msk_s cn58xx; - struct cvmx_spxx_int_msk_s cn58xxp1; }; union cvmx_spxx_int_reg { @@ -382,10 +344,6 @@ union cvmx_spxx_int_reg { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_spxx_int_reg_s cn38xx; - struct cvmx_spxx_int_reg_s cn38xxp2; - struct cvmx_spxx_int_reg_s cn58xx; - struct cvmx_spxx_int_reg_s cn58xxp1; }; union cvmx_spxx_int_sync { @@ -419,10 +377,6 @@ union cvmx_spxx_int_sync { uint64_t reserved_12_63:52; #endif } s; - struct cvmx_spxx_int_sync_s cn38xx; - struct cvmx_spxx_int_sync_s cn38xxp2; - struct cvmx_spxx_int_sync_s cn58xx; - struct cvmx_spxx_int_sync_s cn58xxp1; }; union cvmx_spxx_tpa_acc { @@ -436,10 +390,6 @@ union cvmx_spxx_tpa_acc { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_spxx_tpa_acc_s cn38xx; - struct cvmx_spxx_tpa_acc_s cn38xxp2; - struct cvmx_spxx_tpa_acc_s cn58xx; - struct cvmx_spxx_tpa_acc_s cn58xxp1; }; union cvmx_spxx_tpa_max { @@ -453,10 +403,6 @@ union cvmx_spxx_tpa_max { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_spxx_tpa_max_s cn38xx; - struct cvmx_spxx_tpa_max_s cn38xxp2; - struct cvmx_spxx_tpa_max_s cn58xx; - struct cvmx_spxx_tpa_max_s cn58xxp1; }; union cvmx_spxx_tpa_sel { @@ -470,10 +416,6 @@ union cvmx_spxx_tpa_sel { uint64_t reserved_4_63:60; #endif } s; - struct cvmx_spxx_tpa_sel_s cn38xx; - struct cvmx_spxx_tpa_sel_s cn38xxp2; - struct cvmx_spxx_tpa_sel_s cn58xx; - struct cvmx_spxx_tpa_sel_s cn58xxp1; }; union cvmx_spxx_trn4_ctl { @@ -499,10 +441,6 @@ union cvmx_spxx_trn4_ctl { uint64_t reserved_13_63:51; #endif } s; - struct cvmx_spxx_trn4_ctl_s cn38xx; - struct cvmx_spxx_trn4_ctl_s cn38xxp2; - struct cvmx_spxx_trn4_ctl_s cn58xx; - struct cvmx_spxx_trn4_ctl_s cn58xxp1; }; #endif diff --git a/arch/mips/include/asm/octeon/cvmx-sriox-defs.h b/arch/mips/include/asm/octeon/cvmx-sriox-defs.h index 5140f2d2ad1c..34d0fadb5eb3 100644 --- a/arch/mips/include/asm/octeon/cvmx-sriox-defs.h +++ b/arch/mips/include/asm/octeon/cvmx-sriox-defs.h @@ -112,8 +112,6 @@ union cvmx_sriox_acc_ctrl { uint64_t reserved_3_63:61; #endif } cn63xx; - struct cvmx_sriox_acc_ctrl_cn63xx cn63xxp1; - struct cvmx_sriox_acc_ctrl_s cn66xx; }; union cvmx_sriox_asmbly_id { @@ -129,9 +127,6 @@ union cvmx_sriox_asmbly_id { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_sriox_asmbly_id_s cn63xx; - struct cvmx_sriox_asmbly_id_s cn63xxp1; - struct cvmx_sriox_asmbly_id_s cn66xx; }; union cvmx_sriox_asmbly_info { @@ -147,9 +142,6 @@ union cvmx_sriox_asmbly_info { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_sriox_asmbly_info_s cn63xx; - struct cvmx_sriox_asmbly_info_s cn63xxp1; - struct cvmx_sriox_asmbly_info_s cn66xx; }; union cvmx_sriox_bell_resp_ctrl { @@ -169,9 +161,6 @@ union cvmx_sriox_bell_resp_ctrl { uint64_t reserved_6_63:58; #endif } s; - struct cvmx_sriox_bell_resp_ctrl_s cn63xx; - struct cvmx_sriox_bell_resp_ctrl_s cn63xxp1; - struct cvmx_sriox_bell_resp_ctrl_s cn66xx; }; union cvmx_sriox_bist_status { @@ -305,7 +294,6 @@ union cvmx_sriox_bist_status { uint64_t reserved_44_63:20; #endif } cn63xxp1; - struct cvmx_sriox_bist_status_s cn66xx; }; union cvmx_sriox_imsg_ctrl { @@ -343,9 +331,6 @@ union cvmx_sriox_imsg_ctrl { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_sriox_imsg_ctrl_s cn63xx; - struct cvmx_sriox_imsg_ctrl_s cn63xxp1; - struct cvmx_sriox_imsg_ctrl_s cn66xx; }; union cvmx_sriox_imsg_inst_hdrx { @@ -383,9 +368,6 @@ union cvmx_sriox_imsg_inst_hdrx { uint64_t r:1; #endif } s; - struct cvmx_sriox_imsg_inst_hdrx_s cn63xx; - struct cvmx_sriox_imsg_inst_hdrx_s cn63xxp1; - struct cvmx_sriox_imsg_inst_hdrx_s cn66xx; }; union cvmx_sriox_imsg_qos_grpx { @@ -443,9 +425,6 @@ union cvmx_sriox_imsg_qos_grpx { uint64_t reserved_63_63:1; #endif } s; - struct cvmx_sriox_imsg_qos_grpx_s cn63xx; - struct cvmx_sriox_imsg_qos_grpx_s cn63xxp1; - struct cvmx_sriox_imsg_qos_grpx_s cn66xx; }; union cvmx_sriox_imsg_statusx { @@ -503,9 +482,6 @@ union cvmx_sriox_imsg_statusx { uint64_t val1:1; #endif } s; - struct cvmx_sriox_imsg_statusx_s cn63xx; - struct cvmx_sriox_imsg_statusx_s cn63xxp1; - struct cvmx_sriox_imsg_statusx_s cn66xx; }; union cvmx_sriox_imsg_vport_thr { @@ -541,9 +517,6 @@ union cvmx_sriox_imsg_vport_thr { uint64_t reserved_54_63:10; #endif } s; - struct cvmx_sriox_imsg_vport_thr_s cn63xx; - struct cvmx_sriox_imsg_vport_thr_s cn63xxp1; - struct cvmx_sriox_imsg_vport_thr_s cn66xx; }; union cvmx_sriox_imsg_vport_thr2 { @@ -563,7 +536,6 @@ union cvmx_sriox_imsg_vport_thr2 { uint64_t reserved_46_63:18; #endif } s; - struct cvmx_sriox_imsg_vport_thr2_s cn66xx; }; union cvmx_sriox_int2_enable { @@ -577,8 +549,6 @@ union cvmx_sriox_int2_enable { uint64_t reserved_1_63:63; #endif } s; - struct cvmx_sriox_int2_enable_s cn63xx; - struct cvmx_sriox_int2_enable_s cn66xx; }; union cvmx_sriox_int2_reg { @@ -596,8 +566,6 @@ union cvmx_sriox_int2_reg { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_sriox_int2_reg_s cn63xx; - struct cvmx_sriox_int2_reg_s cn66xx; }; union cvmx_sriox_int_enable { @@ -663,7 +631,6 @@ union cvmx_sriox_int_enable { uint64_t reserved_27_63:37; #endif } s; - struct cvmx_sriox_int_enable_s cn63xx; struct cvmx_sriox_int_enable_cn63xxp1 { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_22_63:42; @@ -715,7 +682,6 @@ union cvmx_sriox_int_enable { uint64_t reserved_22_63:42; #endif } cn63xxp1; - struct cvmx_sriox_int_enable_s cn66xx; }; union cvmx_sriox_int_info0 { @@ -743,9 +709,6 @@ union cvmx_sriox_int_info0 { uint64_t cmd:4; #endif } s; - struct cvmx_sriox_int_info0_s cn63xx; - struct cvmx_sriox_int_info0_s cn63xxp1; - struct cvmx_sriox_int_info0_s cn66xx; }; union cvmx_sriox_int_info1 { @@ -757,9 +720,6 @@ union cvmx_sriox_int_info1 { uint64_t info1:64; #endif } s; - struct cvmx_sriox_int_info1_s cn63xx; - struct cvmx_sriox_int_info1_s cn63xxp1; - struct cvmx_sriox_int_info1_s cn66xx; }; union cvmx_sriox_int_info2 { @@ -791,9 +751,6 @@ union cvmx_sriox_int_info2 { uint64_t prio:2; #endif } s; - struct cvmx_sriox_int_info2_s cn63xx; - struct cvmx_sriox_int_info2_s cn63xxp1; - struct cvmx_sriox_int_info2_s cn66xx; }; union cvmx_sriox_int_info3 { @@ -813,9 +770,6 @@ union cvmx_sriox_int_info3 { uint64_t prio:2; #endif } s; - struct cvmx_sriox_int_info3_s cn63xx; - struct cvmx_sriox_int_info3_s cn63xxp1; - struct cvmx_sriox_int_info3_s cn66xx; }; union cvmx_sriox_int_reg { @@ -885,7 +839,6 @@ union cvmx_sriox_int_reg { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_sriox_int_reg_s cn63xx; struct cvmx_sriox_int_reg_cn63xxp1 { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_22_63:42; @@ -937,7 +890,6 @@ union cvmx_sriox_int_reg { uint64_t reserved_22_63:42; #endif } cn63xxp1; - struct cvmx_sriox_int_reg_s cn66xx; }; union cvmx_sriox_ip_feature { @@ -990,8 +942,6 @@ union cvmx_sriox_ip_feature { uint64_t ops:32; #endif } cn63xx; - struct cvmx_sriox_ip_feature_cn63xx cn63xxp1; - struct cvmx_sriox_ip_feature_s cn66xx; }; union cvmx_sriox_mac_buffers { @@ -1021,8 +971,6 @@ union cvmx_sriox_mac_buffers { uint64_t reserved_56_63:8; #endif } s; - struct cvmx_sriox_mac_buffers_s cn63xx; - struct cvmx_sriox_mac_buffers_s cn66xx; }; union cvmx_sriox_maint_op { @@ -1044,9 +992,6 @@ union cvmx_sriox_maint_op { uint64_t wr_data:32; #endif } s; - struct cvmx_sriox_maint_op_s cn63xx; - struct cvmx_sriox_maint_op_s cn63xxp1; - struct cvmx_sriox_maint_op_s cn66xx; }; union cvmx_sriox_maint_rd_data { @@ -1062,9 +1007,6 @@ union cvmx_sriox_maint_rd_data { uint64_t reserved_33_63:31; #endif } s; - struct cvmx_sriox_maint_rd_data_s cn63xx; - struct cvmx_sriox_maint_rd_data_s cn63xxp1; - struct cvmx_sriox_maint_rd_data_s cn66xx; }; union cvmx_sriox_mce_tx_ctl { @@ -1078,9 +1020,6 @@ union cvmx_sriox_mce_tx_ctl { uint64_t reserved_1_63:63; #endif } s; - struct cvmx_sriox_mce_tx_ctl_s cn63xx; - struct cvmx_sriox_mce_tx_ctl_s cn63xxp1; - struct cvmx_sriox_mce_tx_ctl_s cn66xx; }; union cvmx_sriox_mem_op_ctrl { @@ -1106,9 +1045,6 @@ union cvmx_sriox_mem_op_ctrl { uint64_t reserved_10_63:54; #endif } s; - struct cvmx_sriox_mem_op_ctrl_s cn63xx; - struct cvmx_sriox_mem_op_ctrl_s cn63xxp1; - struct cvmx_sriox_mem_op_ctrl_s cn66xx; }; union cvmx_sriox_omsg_ctrlx { @@ -1140,7 +1076,6 @@ union cvmx_sriox_omsg_ctrlx { uint64_t testmode:1; #endif } s; - struct cvmx_sriox_omsg_ctrlx_s cn63xx; struct cvmx_sriox_omsg_ctrlx_cn63xxp1 { #ifdef __BIG_ENDIAN_BITFIELD uint64_t testmode:1; @@ -1166,7 +1101,6 @@ union cvmx_sriox_omsg_ctrlx { uint64_t testmode:1; #endif } cn63xxp1; - struct cvmx_sriox_omsg_ctrlx_s cn66xx; }; union cvmx_sriox_omsg_done_countsx { @@ -1182,8 +1116,6 @@ union cvmx_sriox_omsg_done_countsx { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_sriox_omsg_done_countsx_s cn63xx; - struct cvmx_sriox_omsg_done_countsx_s cn66xx; }; union cvmx_sriox_omsg_fmp_mrx { @@ -1225,9 +1157,6 @@ union cvmx_sriox_omsg_fmp_mrx { uint64_t reserved_15_63:49; #endif } s; - struct cvmx_sriox_omsg_fmp_mrx_s cn63xx; - struct cvmx_sriox_omsg_fmp_mrx_s cn63xxp1; - struct cvmx_sriox_omsg_fmp_mrx_s cn66xx; }; union cvmx_sriox_omsg_nmp_mrx { @@ -1269,9 +1198,6 @@ union cvmx_sriox_omsg_nmp_mrx { uint64_t reserved_15_63:49; #endif } s; - struct cvmx_sriox_omsg_nmp_mrx_s cn63xx; - struct cvmx_sriox_omsg_nmp_mrx_s cn63xxp1; - struct cvmx_sriox_omsg_nmp_mrx_s cn66xx; }; union cvmx_sriox_omsg_portx { @@ -1302,8 +1228,6 @@ union cvmx_sriox_omsg_portx { uint64_t reserved_32_63:32; #endif } cn63xx; - struct cvmx_sriox_omsg_portx_cn63xx cn63xxp1; - struct cvmx_sriox_omsg_portx_s cn66xx; }; union cvmx_sriox_omsg_silo_thr { @@ -1317,8 +1241,6 @@ union cvmx_sriox_omsg_silo_thr { uint64_t reserved_5_63:59; #endif } s; - struct cvmx_sriox_omsg_silo_thr_s cn63xx; - struct cvmx_sriox_omsg_silo_thr_s cn66xx; }; union cvmx_sriox_omsg_sp_mrx { @@ -1362,9 +1284,6 @@ union cvmx_sriox_omsg_sp_mrx { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_sriox_omsg_sp_mrx_s cn63xx; - struct cvmx_sriox_omsg_sp_mrx_s cn63xxp1; - struct cvmx_sriox_omsg_sp_mrx_s cn66xx; }; union cvmx_sriox_priox_in_use { @@ -1380,8 +1299,6 @@ union cvmx_sriox_priox_in_use { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_sriox_priox_in_use_s cn63xx; - struct cvmx_sriox_priox_in_use_s cn66xx; }; union cvmx_sriox_rx_bell { @@ -1409,9 +1326,6 @@ union cvmx_sriox_rx_bell { uint64_t reserved_48_63:16; #endif } s; - struct cvmx_sriox_rx_bell_s cn63xx; - struct cvmx_sriox_rx_bell_s cn63xxp1; - struct cvmx_sriox_rx_bell_s cn66xx; }; union cvmx_sriox_rx_bell_seq { @@ -1427,9 +1341,6 @@ union cvmx_sriox_rx_bell_seq { uint64_t reserved_40_63:24; #endif } s; - struct cvmx_sriox_rx_bell_seq_s cn63xx; - struct cvmx_sriox_rx_bell_seq_s cn63xxp1; - struct cvmx_sriox_rx_bell_seq_s cn66xx; }; union cvmx_sriox_rx_status { @@ -1457,9 +1368,6 @@ union cvmx_sriox_rx_status { uint64_t rtn_pr3:8; #endif } s; - struct cvmx_sriox_rx_status_s cn63xx; - struct cvmx_sriox_rx_status_s cn63xxp1; - struct cvmx_sriox_rx_status_s cn66xx; }; union cvmx_sriox_s2m_typex { @@ -1491,9 +1399,6 @@ union cvmx_sriox_s2m_typex { uint64_t reserved_19_63:45; #endif } s; - struct cvmx_sriox_s2m_typex_s cn63xx; - struct cvmx_sriox_s2m_typex_s cn63xxp1; - struct cvmx_sriox_s2m_typex_s cn66xx; }; union cvmx_sriox_seq { @@ -1507,9 +1412,6 @@ union cvmx_sriox_seq { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_sriox_seq_s cn63xx; - struct cvmx_sriox_seq_s cn63xxp1; - struct cvmx_sriox_seq_s cn66xx; }; union cvmx_sriox_status_reg { @@ -1525,9 +1427,6 @@ union cvmx_sriox_status_reg { uint64_t reserved_2_63:62; #endif } s; - struct cvmx_sriox_status_reg_s cn63xx; - struct cvmx_sriox_status_reg_s cn63xxp1; - struct cvmx_sriox_status_reg_s cn66xx; }; union cvmx_sriox_tag_ctrl { @@ -1549,9 +1448,6 @@ union cvmx_sriox_tag_ctrl { uint64_t reserved_17_63:47; #endif } s; - struct cvmx_sriox_tag_ctrl_s cn63xx; - struct cvmx_sriox_tag_ctrl_s cn63xxp1; - struct cvmx_sriox_tag_ctrl_s cn66xx; }; union cvmx_sriox_tlp_credits { @@ -1573,9 +1469,6 @@ union cvmx_sriox_tlp_credits { uint64_t reserved_28_63:36; #endif } s; - struct cvmx_sriox_tlp_credits_s cn63xx; - struct cvmx_sriox_tlp_credits_s cn63xxp1; - struct cvmx_sriox_tlp_credits_s cn66xx; }; union cvmx_sriox_tx_bell { @@ -1605,9 +1498,6 @@ union cvmx_sriox_tx_bell { uint64_t reserved_48_63:16; #endif } s; - struct cvmx_sriox_tx_bell_s cn63xx; - struct cvmx_sriox_tx_bell_s cn63xxp1; - struct cvmx_sriox_tx_bell_s cn66xx; }; union cvmx_sriox_tx_bell_info { @@ -1639,9 +1529,6 @@ union cvmx_sriox_tx_bell_info { uint64_t reserved_48_63:16; #endif } s; - struct cvmx_sriox_tx_bell_info_s cn63xx; - struct cvmx_sriox_tx_bell_info_s cn63xxp1; - struct cvmx_sriox_tx_bell_info_s cn66xx; }; union cvmx_sriox_tx_ctrl { @@ -1675,9 +1562,6 @@ union cvmx_sriox_tx_ctrl { uint64_t reserved_53_63:11; #endif } s; - struct cvmx_sriox_tx_ctrl_s cn63xx; - struct cvmx_sriox_tx_ctrl_s cn63xxp1; - struct cvmx_sriox_tx_ctrl_s cn66xx; }; union cvmx_sriox_tx_emphasis { @@ -1691,8 +1575,6 @@ union cvmx_sriox_tx_emphasis { uint64_t reserved_4_63:60; #endif } s; - struct cvmx_sriox_tx_emphasis_s cn63xx; - struct cvmx_sriox_tx_emphasis_s cn66xx; }; union cvmx_sriox_tx_status { @@ -1712,9 +1594,6 @@ union cvmx_sriox_tx_status { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_sriox_tx_status_s cn63xx; - struct cvmx_sriox_tx_status_s cn63xxp1; - struct cvmx_sriox_tx_status_s cn66xx; }; union cvmx_sriox_wr_done_counts { @@ -1730,8 +1609,6 @@ union cvmx_sriox_wr_done_counts { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_sriox_wr_done_counts_s cn63xx; - struct cvmx_sriox_wr_done_counts_s cn66xx; }; #endif diff --git a/arch/mips/include/asm/octeon/cvmx-srxx-defs.h b/arch/mips/include/asm/octeon/cvmx-srxx-defs.h index c98e625cd4ed..76b2a42f53aa 100644 --- a/arch/mips/include/asm/octeon/cvmx-srxx-defs.h +++ b/arch/mips/include/asm/octeon/cvmx-srxx-defs.h @@ -52,10 +52,6 @@ union cvmx_srxx_com_ctl { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_srxx_com_ctl_s cn38xx; - struct cvmx_srxx_com_ctl_s cn38xxp2; - struct cvmx_srxx_com_ctl_s cn58xx; - struct cvmx_srxx_com_ctl_s cn58xxp1; }; union cvmx_srxx_ign_rx_full { @@ -69,10 +65,6 @@ union cvmx_srxx_ign_rx_full { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_srxx_ign_rx_full_s cn38xx; - struct cvmx_srxx_ign_rx_full_s cn38xxp2; - struct cvmx_srxx_ign_rx_full_s cn58xx; - struct cvmx_srxx_ign_rx_full_s cn58xxp1; }; union cvmx_srxx_spi4_calx { @@ -94,10 +86,6 @@ union cvmx_srxx_spi4_calx { uint64_t reserved_17_63:47; #endif } s; - struct cvmx_srxx_spi4_calx_s cn38xx; - struct cvmx_srxx_spi4_calx_s cn38xxp2; - struct cvmx_srxx_spi4_calx_s cn58xx; - struct cvmx_srxx_spi4_calx_s cn58xxp1; }; union cvmx_srxx_spi4_stat { @@ -115,10 +103,6 @@ union cvmx_srxx_spi4_stat { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_srxx_spi4_stat_s cn38xx; - struct cvmx_srxx_spi4_stat_s cn38xxp2; - struct cvmx_srxx_spi4_stat_s cn58xx; - struct cvmx_srxx_spi4_stat_s cn58xxp1; }; union cvmx_srxx_sw_tick_ctl { @@ -140,9 +124,6 @@ union cvmx_srxx_sw_tick_ctl { uint64_t reserved_14_63:50; #endif } s; - struct cvmx_srxx_sw_tick_ctl_s cn38xx; - struct cvmx_srxx_sw_tick_ctl_s cn58xx; - struct cvmx_srxx_sw_tick_ctl_s cn58xxp1; }; union cvmx_srxx_sw_tick_dat { @@ -154,9 +135,6 @@ union cvmx_srxx_sw_tick_dat { uint64_t dat:64; #endif } s; - struct cvmx_srxx_sw_tick_dat_s cn38xx; - struct cvmx_srxx_sw_tick_dat_s cn58xx; - struct cvmx_srxx_sw_tick_dat_s cn58xxp1; }; #endif diff --git a/arch/mips/include/asm/octeon/cvmx-stxx-defs.h b/arch/mips/include/asm/octeon/cvmx-stxx-defs.h index 3c409a854d91..f49d82145c57 100644 --- a/arch/mips/include/asm/octeon/cvmx-stxx-defs.h +++ b/arch/mips/include/asm/octeon/cvmx-stxx-defs.h @@ -64,10 +64,6 @@ union cvmx_stxx_arb_ctl { uint64_t reserved_6_63:58; #endif } s; - struct cvmx_stxx_arb_ctl_s cn38xx; - struct cvmx_stxx_arb_ctl_s cn38xxp2; - struct cvmx_stxx_arb_ctl_s cn58xx; - struct cvmx_stxx_arb_ctl_s cn58xxp1; }; union cvmx_stxx_bckprs_cnt { @@ -81,10 +77,6 @@ union cvmx_stxx_bckprs_cnt { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_stxx_bckprs_cnt_s cn38xx; - struct cvmx_stxx_bckprs_cnt_s cn38xxp2; - struct cvmx_stxx_bckprs_cnt_s cn58xx; - struct cvmx_stxx_bckprs_cnt_s cn58xxp1; }; union cvmx_stxx_com_ctl { @@ -102,10 +94,6 @@ union cvmx_stxx_com_ctl { uint64_t reserved_4_63:60; #endif } s; - struct cvmx_stxx_com_ctl_s cn38xx; - struct cvmx_stxx_com_ctl_s cn38xxp2; - struct cvmx_stxx_com_ctl_s cn58xx; - struct cvmx_stxx_com_ctl_s cn58xxp1; }; union cvmx_stxx_dip_cnt { @@ -121,10 +109,6 @@ union cvmx_stxx_dip_cnt { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_stxx_dip_cnt_s cn38xx; - struct cvmx_stxx_dip_cnt_s cn38xxp2; - struct cvmx_stxx_dip_cnt_s cn58xx; - struct cvmx_stxx_dip_cnt_s cn58xxp1; }; union cvmx_stxx_ign_cal { @@ -138,10 +122,6 @@ union cvmx_stxx_ign_cal { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_stxx_ign_cal_s cn38xx; - struct cvmx_stxx_ign_cal_s cn38xxp2; - struct cvmx_stxx_ign_cal_s cn58xx; - struct cvmx_stxx_ign_cal_s cn58xxp1; }; union cvmx_stxx_int_msk { @@ -169,10 +149,6 @@ union cvmx_stxx_int_msk { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_stxx_int_msk_s cn38xx; - struct cvmx_stxx_int_msk_s cn38xxp2; - struct cvmx_stxx_int_msk_s cn58xx; - struct cvmx_stxx_int_msk_s cn58xxp1; }; union cvmx_stxx_int_reg { @@ -202,10 +178,6 @@ union cvmx_stxx_int_reg { uint64_t reserved_9_63:55; #endif } s; - struct cvmx_stxx_int_reg_s cn38xx; - struct cvmx_stxx_int_reg_s cn38xxp2; - struct cvmx_stxx_int_reg_s cn58xx; - struct cvmx_stxx_int_reg_s cn58xxp1; }; union cvmx_stxx_int_sync { @@ -233,10 +205,6 @@ union cvmx_stxx_int_sync { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_stxx_int_sync_s cn38xx; - struct cvmx_stxx_int_sync_s cn38xxp2; - struct cvmx_stxx_int_sync_s cn58xx; - struct cvmx_stxx_int_sync_s cn58xxp1; }; union cvmx_stxx_min_bst { @@ -250,10 +218,6 @@ union cvmx_stxx_min_bst { uint64_t reserved_9_63:55; #endif } s; - struct cvmx_stxx_min_bst_s cn38xx; - struct cvmx_stxx_min_bst_s cn38xxp2; - struct cvmx_stxx_min_bst_s cn58xx; - struct cvmx_stxx_min_bst_s cn58xxp1; }; union cvmx_stxx_spi4_calx { @@ -275,10 +239,6 @@ union cvmx_stxx_spi4_calx { uint64_t reserved_17_63:47; #endif } s; - struct cvmx_stxx_spi4_calx_s cn38xx; - struct cvmx_stxx_spi4_calx_s cn38xxp2; - struct cvmx_stxx_spi4_calx_s cn58xx; - struct cvmx_stxx_spi4_calx_s cn58xxp1; }; union cvmx_stxx_spi4_dat { @@ -294,10 +254,6 @@ union cvmx_stxx_spi4_dat { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_stxx_spi4_dat_s cn38xx; - struct cvmx_stxx_spi4_dat_s cn38xxp2; - struct cvmx_stxx_spi4_dat_s cn58xx; - struct cvmx_stxx_spi4_dat_s cn58xxp1; }; union cvmx_stxx_spi4_stat { @@ -315,10 +271,6 @@ union cvmx_stxx_spi4_stat { uint64_t reserved_16_63:48; #endif } s; - struct cvmx_stxx_spi4_stat_s cn38xx; - struct cvmx_stxx_spi4_stat_s cn38xxp2; - struct cvmx_stxx_spi4_stat_s cn58xx; - struct cvmx_stxx_spi4_stat_s cn58xxp1; }; union cvmx_stxx_stat_bytes_hi { @@ -332,10 +284,6 @@ union cvmx_stxx_stat_bytes_hi { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_stxx_stat_bytes_hi_s cn38xx; - struct cvmx_stxx_stat_bytes_hi_s cn38xxp2; - struct cvmx_stxx_stat_bytes_hi_s cn58xx; - struct cvmx_stxx_stat_bytes_hi_s cn58xxp1; }; union cvmx_stxx_stat_bytes_lo { @@ -349,10 +297,6 @@ union cvmx_stxx_stat_bytes_lo { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_stxx_stat_bytes_lo_s cn38xx; - struct cvmx_stxx_stat_bytes_lo_s cn38xxp2; - struct cvmx_stxx_stat_bytes_lo_s cn58xx; - struct cvmx_stxx_stat_bytes_lo_s cn58xxp1; }; union cvmx_stxx_stat_ctl { @@ -368,10 +312,6 @@ union cvmx_stxx_stat_ctl { uint64_t reserved_5_63:59; #endif } s; - struct cvmx_stxx_stat_ctl_s cn38xx; - struct cvmx_stxx_stat_ctl_s cn38xxp2; - struct cvmx_stxx_stat_ctl_s cn58xx; - struct cvmx_stxx_stat_ctl_s cn58xxp1; }; union cvmx_stxx_stat_pkt_xmt { @@ -385,10 +325,6 @@ union cvmx_stxx_stat_pkt_xmt { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_stxx_stat_pkt_xmt_s cn38xx; - struct cvmx_stxx_stat_pkt_xmt_s cn38xxp2; - struct cvmx_stxx_stat_pkt_xmt_s cn58xx; - struct cvmx_stxx_stat_pkt_xmt_s cn58xxp1; }; #endif diff --git a/arch/mips/include/asm/octeon/cvmx-uctlx-defs.h b/arch/mips/include/asm/octeon/cvmx-uctlx-defs.h index bc5b80c6bbe2..6cf2280166dd 100644 --- a/arch/mips/include/asm/octeon/cvmx-uctlx-defs.h +++ b/arch/mips/include/asm/octeon/cvmx-uctlx-defs.h @@ -63,13 +63,6 @@ union cvmx_uctlx_bist_status { uint64_t reserved_6_63:58; #endif } s; - struct cvmx_uctlx_bist_status_s cn61xx; - struct cvmx_uctlx_bist_status_s cn63xx; - struct cvmx_uctlx_bist_status_s cn63xxp1; - struct cvmx_uctlx_bist_status_s cn66xx; - struct cvmx_uctlx_bist_status_s cn68xx; - struct cvmx_uctlx_bist_status_s cn68xxp1; - struct cvmx_uctlx_bist_status_s cnf71xx; }; union cvmx_uctlx_clk_rst_ctl { @@ -121,13 +114,6 @@ union cvmx_uctlx_clk_rst_ctl { uint64_t reserved_25_63:39; #endif } s; - struct cvmx_uctlx_clk_rst_ctl_s cn61xx; - struct cvmx_uctlx_clk_rst_ctl_s cn63xx; - struct cvmx_uctlx_clk_rst_ctl_s cn63xxp1; - struct cvmx_uctlx_clk_rst_ctl_s cn66xx; - struct cvmx_uctlx_clk_rst_ctl_s cn68xx; - struct cvmx_uctlx_clk_rst_ctl_s cn68xxp1; - struct cvmx_uctlx_clk_rst_ctl_s cnf71xx; }; union cvmx_uctlx_ehci_ctl { @@ -161,13 +147,6 @@ union cvmx_uctlx_ehci_ctl { uint64_t reserved_20_63:44; #endif } s; - struct cvmx_uctlx_ehci_ctl_s cn61xx; - struct cvmx_uctlx_ehci_ctl_s cn63xx; - struct cvmx_uctlx_ehci_ctl_s cn63xxp1; - struct cvmx_uctlx_ehci_ctl_s cn66xx; - struct cvmx_uctlx_ehci_ctl_s cn68xx; - struct cvmx_uctlx_ehci_ctl_s cn68xxp1; - struct cvmx_uctlx_ehci_ctl_s cnf71xx; }; union cvmx_uctlx_ehci_fla { @@ -181,13 +160,6 @@ union cvmx_uctlx_ehci_fla { uint64_t reserved_6_63:58; #endif } s; - struct cvmx_uctlx_ehci_fla_s cn61xx; - struct cvmx_uctlx_ehci_fla_s cn63xx; - struct cvmx_uctlx_ehci_fla_s cn63xxp1; - struct cvmx_uctlx_ehci_fla_s cn66xx; - struct cvmx_uctlx_ehci_fla_s cn68xx; - struct cvmx_uctlx_ehci_fla_s cn68xxp1; - struct cvmx_uctlx_ehci_fla_s cnf71xx; }; union cvmx_uctlx_erto_ctl { @@ -203,13 +175,6 @@ union cvmx_uctlx_erto_ctl { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_uctlx_erto_ctl_s cn61xx; - struct cvmx_uctlx_erto_ctl_s cn63xx; - struct cvmx_uctlx_erto_ctl_s cn63xxp1; - struct cvmx_uctlx_erto_ctl_s cn66xx; - struct cvmx_uctlx_erto_ctl_s cn68xx; - struct cvmx_uctlx_erto_ctl_s cn68xxp1; - struct cvmx_uctlx_erto_ctl_s cnf71xx; }; union cvmx_uctlx_if_ena { @@ -223,13 +188,6 @@ union cvmx_uctlx_if_ena { uint64_t reserved_1_63:63; #endif } s; - struct cvmx_uctlx_if_ena_s cn61xx; - struct cvmx_uctlx_if_ena_s cn63xx; - struct cvmx_uctlx_if_ena_s cn63xxp1; - struct cvmx_uctlx_if_ena_s cn66xx; - struct cvmx_uctlx_if_ena_s cn68xx; - struct cvmx_uctlx_if_ena_s cn68xxp1; - struct cvmx_uctlx_if_ena_s cnf71xx; }; union cvmx_uctlx_int_ena { @@ -257,13 +215,6 @@ union cvmx_uctlx_int_ena { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_uctlx_int_ena_s cn61xx; - struct cvmx_uctlx_int_ena_s cn63xx; - struct cvmx_uctlx_int_ena_s cn63xxp1; - struct cvmx_uctlx_int_ena_s cn66xx; - struct cvmx_uctlx_int_ena_s cn68xx; - struct cvmx_uctlx_int_ena_s cn68xxp1; - struct cvmx_uctlx_int_ena_s cnf71xx; }; union cvmx_uctlx_int_reg { @@ -291,13 +242,6 @@ union cvmx_uctlx_int_reg { uint64_t reserved_8_63:56; #endif } s; - struct cvmx_uctlx_int_reg_s cn61xx; - struct cvmx_uctlx_int_reg_s cn63xx; - struct cvmx_uctlx_int_reg_s cn63xxp1; - struct cvmx_uctlx_int_reg_s cn66xx; - struct cvmx_uctlx_int_reg_s cn68xx; - struct cvmx_uctlx_int_reg_s cn68xxp1; - struct cvmx_uctlx_int_reg_s cnf71xx; }; union cvmx_uctlx_ohci_ctl { @@ -329,13 +273,6 @@ union cvmx_uctlx_ohci_ctl { uint64_t reserved_19_63:45; #endif } s; - struct cvmx_uctlx_ohci_ctl_s cn61xx; - struct cvmx_uctlx_ohci_ctl_s cn63xx; - struct cvmx_uctlx_ohci_ctl_s cn63xxp1; - struct cvmx_uctlx_ohci_ctl_s cn66xx; - struct cvmx_uctlx_ohci_ctl_s cn68xx; - struct cvmx_uctlx_ohci_ctl_s cn68xxp1; - struct cvmx_uctlx_ohci_ctl_s cnf71xx; }; union cvmx_uctlx_orto_ctl { @@ -351,13 +288,6 @@ union cvmx_uctlx_orto_ctl { uint64_t reserved_32_63:32; #endif } s; - struct cvmx_uctlx_orto_ctl_s cn61xx; - struct cvmx_uctlx_orto_ctl_s cn63xx; - struct cvmx_uctlx_orto_ctl_s cn63xxp1; - struct cvmx_uctlx_orto_ctl_s cn66xx; - struct cvmx_uctlx_orto_ctl_s cn68xx; - struct cvmx_uctlx_orto_ctl_s cn68xxp1; - struct cvmx_uctlx_orto_ctl_s cnf71xx; }; union cvmx_uctlx_ppaf_wm { @@ -371,11 +301,6 @@ union cvmx_uctlx_ppaf_wm { uint64_t reserved_5_63:59; #endif } s; - struct cvmx_uctlx_ppaf_wm_s cn61xx; - struct cvmx_uctlx_ppaf_wm_s cn63xx; - struct cvmx_uctlx_ppaf_wm_s cn63xxp1; - struct cvmx_uctlx_ppaf_wm_s cn66xx; - struct cvmx_uctlx_ppaf_wm_s cnf71xx; }; union cvmx_uctlx_uphy_ctl_status { @@ -407,13 +332,6 @@ union cvmx_uctlx_uphy_ctl_status { uint64_t reserved_10_63:54; #endif } s; - struct cvmx_uctlx_uphy_ctl_status_s cn61xx; - struct cvmx_uctlx_uphy_ctl_status_s cn63xx; - struct cvmx_uctlx_uphy_ctl_status_s cn63xxp1; - struct cvmx_uctlx_uphy_ctl_status_s cn66xx; - struct cvmx_uctlx_uphy_ctl_status_s cn68xx; - struct cvmx_uctlx_uphy_ctl_status_s cn68xxp1; - struct cvmx_uctlx_uphy_ctl_status_s cnf71xx; }; union cvmx_uctlx_uphy_portx_ctl_status { @@ -463,13 +381,6 @@ union cvmx_uctlx_uphy_portx_ctl_status { uint64_t reserved_43_63:21; #endif } s; - struct cvmx_uctlx_uphy_portx_ctl_status_s cn61xx; - struct cvmx_uctlx_uphy_portx_ctl_status_s cn63xx; - struct cvmx_uctlx_uphy_portx_ctl_status_s cn63xxp1; - struct cvmx_uctlx_uphy_portx_ctl_status_s cn66xx; - struct cvmx_uctlx_uphy_portx_ctl_status_s cn68xx; - struct cvmx_uctlx_uphy_portx_ctl_status_s cn68xxp1; - struct cvmx_uctlx_uphy_portx_ctl_status_s cnf71xx; }; #endif diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h index e8cc328fce2d..6b31c93b5eaa 100644 --- a/arch/mips/include/asm/page.h +++ b/arch/mips/include/asm/page.h @@ -154,6 +154,7 @@ typedef struct { unsigned long pgd; } pgd_t; typedef struct { unsigned long pgprot; } pgprot_t; #define pgprot_val(x) ((x).pgprot) #define __pgprot(x) ((pgprot_t) { (x) } ) +#define pte_pgprot(x) __pgprot(pte_val(x) & ~_PFN_MASK) /* * On R4000-style MMUs where a TLB entry is mapping a adjacent even / odd diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h index 0036ea0c7173..93a9dce31f25 100644 --- a/arch/mips/include/asm/pgtable-64.h +++ b/arch/mips/include/asm/pgtable-64.h @@ -265,6 +265,11 @@ static inline int pmd_bad(pmd_t pmd) static inline int pmd_present(pmd_t pmd) { +#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT + if (unlikely(pmd_val(pmd) & _PAGE_HUGE)) + return pmd_val(pmd) & _PAGE_PRESENT; +#endif + return pmd_val(pmd) != (unsigned long) invalid_pte_table; } diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index 129e0328367f..57933fc8fd98 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -214,8 +214,8 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) if (kernel_uses_llsc && R10000_LLSC_WAR) { __asm__ __volatile__ ( - " .set arch=r4000 \n" " .set push \n" + " .set arch=r4000 \n" " .set noreorder \n" "1:" __LL "%[tmp], %[buddy] \n" " bnez %[tmp], 2f \n" @@ -225,13 +225,12 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) " nop \n" "2: \n" " .set pop \n" - " .set mips0 \n" : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp) : [global] "r" (page_global)); } else if (kernel_uses_llsc) { __asm__ __volatile__ ( - " .set "MIPS_ISA_ARCH_LEVEL" \n" " .set push \n" + " .set "MIPS_ISA_ARCH_LEVEL" \n" " .set noreorder \n" "1:" __LL "%[tmp], %[buddy] \n" " bnez %[tmp], 2f \n" @@ -241,7 +240,6 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) " nop \n" "2: \n" " .set pop \n" - " .set mips0 \n" : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp) : [global] "r" (page_global)); } diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h index ce3ed4d17813..aca909bd7841 100644 --- a/arch/mips/include/asm/processor.h +++ b/arch/mips/include/asm/processor.h @@ -255,8 +255,10 @@ struct thread_struct { /* Saved cp0 stuff. */ unsigned long cp0_status; +#ifdef CONFIG_MIPS_FP_SUPPORT /* Saved fpu/fpu emulator stuff. */ struct mips_fpu_struct fpu FPU_ALIGN; +#endif /* Assigned branch delay slot 'emulation' frame */ atomic_t bd_emu_frame; /* PC of the branch from a branch delay slot 'emulation' */ @@ -299,6 +301,17 @@ struct thread_struct { #define FPAFF_INIT #endif /* CONFIG_MIPS_MT_FPAFF */ +#ifdef CONFIG_MIPS_FP_SUPPORT +# define FPU_INIT \ + .fpu = { \ + .fpr = {{{0,},},}, \ + .fcr31 = 0, \ + .msacsr = 0, \ + }, +#else +# define FPU_INIT +#endif + #define INIT_THREAD { \ /* \ * Saved main processor registers \ @@ -321,11 +334,7 @@ struct thread_struct { /* \ * Saved FPU/FPU emulator stuff \ */ \ - .fpu = { \ - .fpr = {{{0,},},}, \ - .fcr31 = 0, \ - .msacsr = 0, \ - }, \ + FPU_INIT \ /* \ * FPU affinity state (null if not FPAFF) \ */ \ diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h index d19b2d65336b..7f4a32d3345a 100644 --- a/arch/mips/include/asm/r4kcache.h +++ b/arch/mips/include/asm/r4kcache.h @@ -20,6 +20,7 @@ #include <asm/cpu-features.h> #include <asm/cpu-type.h> #include <asm/mipsmtregs.h> +#include <asm/mmzone.h> #include <linux/uaccess.h> /* for uaccess_kernel() */ extern void (*r4k_blast_dcache)(void); @@ -674,4 +675,25 @@ __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , ) __BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , ) __BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , ) +/* Currently, this is very specific to Loongson-3 */ +#define __BUILD_BLAST_CACHE_NODE(pfx, desc, indexop, hitop, lsize) \ +static inline void blast_##pfx##cache##lsize##_node(long node) \ +{ \ + unsigned long start = CAC_BASE | nid_to_addrbase(node); \ + unsigned long end = start + current_cpu_data.desc.waysize; \ + unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \ + unsigned long ws_end = current_cpu_data.desc.ways << \ + current_cpu_data.desc.waybit; \ + unsigned long ws, addr; \ + \ + for (ws = 0; ws < ws_end; ws += ws_inc) \ + for (addr = start; addr < end; addr += lsize * 32) \ + cache##lsize##_unroll32(addr|ws, indexop); \ +} + +__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16) +__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32) +__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64) +__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128) + #endif /* _ASM_R4KCACHE_H */ diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h index 2161357cc68f..4d6ad907ae54 100644 --- a/arch/mips/include/asm/stackframe.h +++ b/arch/mips/include/asm/stackframe.h @@ -427,9 +427,10 @@ #ifdef CONFIG_CPU_MIPSR6 eretnc #else + .set push .set arch=r4000 eret - .set mips0 + .set pop #endif .endm diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h index e610473d61b8..0f813bb753c6 100644 --- a/arch/mips/include/asm/switch_to.h +++ b/arch/mips/include/asm/switch_to.h @@ -84,7 +84,8 @@ do { \ * Check FCSR for any unmasked exceptions pending set with `ptrace', * clear them and send a signal. */ -#define __sanitize_fcr31(next) \ +#ifdef CONFIG_MIPS_FP_SUPPORT +# define __sanitize_fcr31(next) \ do { \ unsigned long fcr31 = mask_fcr31_x(next->thread.fpu.fcr31); \ void __user *pc; \ @@ -95,6 +96,9 @@ do { \ force_fcr31_sig(fcr31, pc, next); \ } \ } while (0) +#else +# define __sanitize_fcr31(next) +#endif /* * For newly created kernel threads switch_to() will return to diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h index 59dae37f6b8d..b1990dd75f27 100644 --- a/arch/mips/include/asm/uasm.h +++ b/arch/mips/include/asm/uasm.h @@ -157,6 +157,7 @@ Ip_u2u1s3(_slti); Ip_u2u1s3(_sltiu); Ip_u3u1u2(_sltu); Ip_u2u1u3(_sra); +Ip_u3u2u1(_srav); Ip_u2u1u3(_srl); Ip_u3u2u1(_srlv); Ip_u3u1u2(_subu); diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h index c68b8ae3efcb..b23d74a601b3 100644 --- a/arch/mips/include/asm/unistd.h +++ b/arch/mips/include/asm/unistd.h @@ -13,6 +13,9 @@ #define _ASM_UNISTD_H #include <uapi/asm/unistd.h> +#include <asm/unistd_nr_n32.h> +#include <asm/unistd_nr_n64.h> +#include <asm/unistd_nr_o32.h> #ifdef CONFIG_MIPS32_N32 #define NR_syscalls (__NR_N32_Linux + __NR_N32_Linux_syscalls) diff --git a/arch/mips/include/uapi/asm/Kbuild b/arch/mips/include/uapi/asm/Kbuild index 7a4becd8963a..ed4bd032f456 100644 --- a/arch/mips/include/uapi/asm/Kbuild +++ b/arch/mips/include/uapi/asm/Kbuild @@ -1,5 +1,11 @@ # UAPI Header export list include include/uapi/asm-generic/Kbuild.asm +generated-y += unistd_n32.h +generated-y += unistd_n64.h +generated-y += unistd_o32.h +generated-y += unistd_nr_n32.h +generated-y += unistd_nr_n64.h +generated-y += unistd_nr_o32.h generic-y += bpf_perf_event.h generic-y += ipcbuf.h diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h index c05dcf5ab414..40fbb5dd66df 100644 --- a/arch/mips/include/uapi/asm/inst.h +++ b/arch/mips/include/uapi/asm/inst.h @@ -369,8 +369,9 @@ enum mm_32a_minor_op { mm_ext_op = 0x02c, mm_pool32axf_op = 0x03c, mm_srl32_op = 0x040, + mm_srlv32_op = 0x050, mm_sra_op = 0x080, - mm_srlv32_op = 0x090, + mm_srav_op = 0x090, mm_rotr_op = 0x0c0, mm_lwxs_op = 0x118, mm_addu32_op = 0x150, diff --git a/arch/mips/include/uapi/asm/sgidefs.h b/arch/mips/include/uapi/asm/sgidefs.h index 26143e3b7c26..69c3de90c536 100644 --- a/arch/mips/include/uapi/asm/sgidefs.h +++ b/arch/mips/include/uapi/asm/sgidefs.h @@ -12,14 +12,6 @@ #define __ASM_SGIDEFS_H /* - * Using a Linux compiler for building Linux seems logic but not to - * everybody. - */ -#ifndef __linux__ -#error Use a Linux compiler or give up. -#endif - -/* * Definitions for the ISA levels * * With the introduction of MIPS32 / MIPS64 instruction sets definitions diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h index f25dd1d83fb7..4abe387549ad 100644 --- a/arch/mips/include/uapi/asm/unistd.h +++ b/arch/mips/include/uapi/asm/unistd.h @@ -17,1085 +17,23 @@ #if _MIPS_SIM == _MIPS_SIM_ABI32 -/* - * Linux o32 style syscalls are in the range from 4000 to 4999. - */ -#define __NR_Linux 4000 -#define __NR_syscall (__NR_Linux + 0) -#define __NR_exit (__NR_Linux + 1) -#define __NR_fork (__NR_Linux + 2) -#define __NR_read (__NR_Linux + 3) -#define __NR_write (__NR_Linux + 4) -#define __NR_open (__NR_Linux + 5) -#define __NR_close (__NR_Linux + 6) -#define __NR_waitpid (__NR_Linux + 7) -#define __NR_creat (__NR_Linux + 8) -#define __NR_link (__NR_Linux + 9) -#define __NR_unlink (__NR_Linux + 10) -#define __NR_execve (__NR_Linux + 11) -#define __NR_chdir (__NR_Linux + 12) -#define __NR_time (__NR_Linux + 13) -#define __NR_mknod (__NR_Linux + 14) -#define __NR_chmod (__NR_Linux + 15) -#define __NR_lchown (__NR_Linux + 16) -#define __NR_break (__NR_Linux + 17) -#define __NR_unused18 (__NR_Linux + 18) -#define __NR_lseek (__NR_Linux + 19) -#define __NR_getpid (__NR_Linux + 20) -#define __NR_mount (__NR_Linux + 21) -#define __NR_umount (__NR_Linux + 22) -#define __NR_setuid (__NR_Linux + 23) -#define __NR_getuid (__NR_Linux + 24) -#define __NR_stime (__NR_Linux + 25) -#define __NR_ptrace (__NR_Linux + 26) -#define __NR_alarm (__NR_Linux + 27) -#define __NR_unused28 (__NR_Linux + 28) -#define __NR_pause (__NR_Linux + 29) -#define __NR_utime (__NR_Linux + 30) -#define __NR_stty (__NR_Linux + 31) -#define __NR_gtty (__NR_Linux + 32) -#define __NR_access (__NR_Linux + 33) -#define __NR_nice (__NR_Linux + 34) -#define __NR_ftime (__NR_Linux + 35) -#define __NR_sync (__NR_Linux + 36) -#define __NR_kill (__NR_Linux + 37) -#define __NR_rename (__NR_Linux + 38) -#define __NR_mkdir (__NR_Linux + 39) -#define __NR_rmdir (__NR_Linux + 40) -#define __NR_dup (__NR_Linux + 41) -#define __NR_pipe (__NR_Linux + 42) -#define __NR_times (__NR_Linux + 43) -#define __NR_prof (__NR_Linux + 44) -#define __NR_brk (__NR_Linux + 45) -#define __NR_setgid (__NR_Linux + 46) -#define __NR_getgid (__NR_Linux + 47) -#define __NR_signal (__NR_Linux + 48) -#define __NR_geteuid (__NR_Linux + 49) -#define __NR_getegid (__NR_Linux + 50) -#define __NR_acct (__NR_Linux + 51) -#define __NR_umount2 (__NR_Linux + 52) -#define __NR_lock (__NR_Linux + 53) -#define __NR_ioctl (__NR_Linux + 54) -#define __NR_fcntl (__NR_Linux + 55) -#define __NR_mpx (__NR_Linux + 56) -#define __NR_setpgid (__NR_Linux + 57) -#define __NR_ulimit (__NR_Linux + 58) -#define __NR_unused59 (__NR_Linux + 59) -#define __NR_umask (__NR_Linux + 60) -#define __NR_chroot (__NR_Linux + 61) -#define __NR_ustat (__NR_Linux + 62) -#define __NR_dup2 (__NR_Linux + 63) -#define __NR_getppid (__NR_Linux + 64) -#define __NR_getpgrp (__NR_Linux + 65) -#define __NR_setsid (__NR_Linux + 66) -#define __NR_sigaction (__NR_Linux + 67) -#define __NR_sgetmask (__NR_Linux + 68) -#define __NR_ssetmask (__NR_Linux + 69) -#define __NR_setreuid (__NR_Linux + 70) -#define __NR_setregid (__NR_Linux + 71) -#define __NR_sigsuspend (__NR_Linux + 72) -#define __NR_sigpending (__NR_Linux + 73) -#define __NR_sethostname (__NR_Linux + 74) -#define __NR_setrlimit (__NR_Linux + 75) -#define __NR_getrlimit (__NR_Linux + 76) -#define __NR_getrusage (__NR_Linux + 77) -#define __NR_gettimeofday (__NR_Linux + 78) -#define __NR_settimeofday (__NR_Linux + 79) -#define __NR_getgroups (__NR_Linux + 80) -#define __NR_setgroups (__NR_Linux + 81) -#define __NR_reserved82 (__NR_Linux + 82) -#define __NR_symlink (__NR_Linux + 83) -#define __NR_unused84 (__NR_Linux + 84) -#define __NR_readlink (__NR_Linux + 85) -#define __NR_uselib (__NR_Linux + 86) -#define __NR_swapon (__NR_Linux + 87) -#define __NR_reboot (__NR_Linux + 88) -#define __NR_readdir (__NR_Linux + 89) -#define __NR_mmap (__NR_Linux + 90) -#define __NR_munmap (__NR_Linux + 91) -#define __NR_truncate (__NR_Linux + 92) -#define __NR_ftruncate (__NR_Linux + 93) -#define __NR_fchmod (__NR_Linux + 94) -#define __NR_fchown (__NR_Linux + 95) -#define __NR_getpriority (__NR_Linux + 96) -#define __NR_setpriority (__NR_Linux + 97) -#define __NR_profil (__NR_Linux + 98) -#define __NR_statfs (__NR_Linux + 99) -#define __NR_fstatfs (__NR_Linux + 100) -#define __NR_ioperm (__NR_Linux + 101) -#define __NR_socketcall (__NR_Linux + 102) -#define __NR_syslog (__NR_Linux + 103) -#define __NR_setitimer (__NR_Linux + 104) -#define __NR_getitimer (__NR_Linux + 105) -#define __NR_stat (__NR_Linux + 106) -#define __NR_lstat (__NR_Linux + 107) -#define __NR_fstat (__NR_Linux + 108) -#define __NR_unused109 (__NR_Linux + 109) -#define __NR_iopl (__NR_Linux + 110) -#define __NR_vhangup (__NR_Linux + 111) -#define __NR_idle (__NR_Linux + 112) -#define __NR_vm86 (__NR_Linux + 113) -#define __NR_wait4 (__NR_Linux + 114) -#define __NR_swapoff (__NR_Linux + 115) -#define __NR_sysinfo (__NR_Linux + 116) -#define __NR_ipc (__NR_Linux + 117) -#define __NR_fsync (__NR_Linux + 118) -#define __NR_sigreturn (__NR_Linux + 119) -#define __NR_clone (__NR_Linux + 120) -#define __NR_setdomainname (__NR_Linux + 121) -#define __NR_uname (__NR_Linux + 122) -#define __NR_modify_ldt (__NR_Linux + 123) -#define __NR_adjtimex (__NR_Linux + 124) -#define __NR_mprotect (__NR_Linux + 125) -#define __NR_sigprocmask (__NR_Linux + 126) -#define __NR_create_module (__NR_Linux + 127) -#define __NR_init_module (__NR_Linux + 128) -#define __NR_delete_module (__NR_Linux + 129) -#define __NR_get_kernel_syms (__NR_Linux + 130) -#define __NR_quotactl (__NR_Linux + 131) -#define __NR_getpgid (__NR_Linux + 132) -#define __NR_fchdir (__NR_Linux + 133) -#define __NR_bdflush (__NR_Linux + 134) -#define __NR_sysfs (__NR_Linux + 135) -#define __NR_personality (__NR_Linux + 136) -#define __NR_afs_syscall (__NR_Linux + 137) /* Syscall for Andrew File System */ -#define __NR_setfsuid (__NR_Linux + 138) -#define __NR_setfsgid (__NR_Linux + 139) -#define __NR__llseek (__NR_Linux + 140) -#define __NR_getdents (__NR_Linux + 141) -#define __NR__newselect (__NR_Linux + 142) -#define __NR_flock (__NR_Linux + 143) -#define __NR_msync (__NR_Linux + 144) -#define __NR_readv (__NR_Linux + 145) -#define __NR_writev (__NR_Linux + 146) -#define __NR_cacheflush (__NR_Linux + 147) -#define __NR_cachectl (__NR_Linux + 148) -#define __NR_sysmips (__NR_Linux + 149) -#define __NR_unused150 (__NR_Linux + 150) -#define __NR_getsid (__NR_Linux + 151) -#define __NR_fdatasync (__NR_Linux + 152) -#define __NR__sysctl (__NR_Linux + 153) -#define __NR_mlock (__NR_Linux + 154) -#define __NR_munlock (__NR_Linux + 155) -#define __NR_mlockall (__NR_Linux + 156) -#define __NR_munlockall (__NR_Linux + 157) -#define __NR_sched_setparam (__NR_Linux + 158) -#define __NR_sched_getparam (__NR_Linux + 159) -#define __NR_sched_setscheduler (__NR_Linux + 160) -#define __NR_sched_getscheduler (__NR_Linux + 161) -#define __NR_sched_yield (__NR_Linux + 162) -#define __NR_sched_get_priority_max (__NR_Linux + 163) -#define __NR_sched_get_priority_min (__NR_Linux + 164) -#define __NR_sched_rr_get_interval (__NR_Linux + 165) -#define __NR_nanosleep (__NR_Linux + 166) -#define __NR_mremap (__NR_Linux + 167) -#define __NR_accept (__NR_Linux + 168) -#define __NR_bind (__NR_Linux + 169) -#define __NR_connect (__NR_Linux + 170) -#define __NR_getpeername (__NR_Linux + 171) -#define __NR_getsockname (__NR_Linux + 172) -#define __NR_getsockopt (__NR_Linux + 173) -#define __NR_listen (__NR_Linux + 174) -#define __NR_recv (__NR_Linux + 175) -#define __NR_recvfrom (__NR_Linux + 176) -#define __NR_recvmsg (__NR_Linux + 177) -#define __NR_send (__NR_Linux + 178) -#define __NR_sendmsg (__NR_Linux + 179) -#define __NR_sendto (__NR_Linux + 180) -#define __NR_setsockopt (__NR_Linux + 181) -#define __NR_shutdown (__NR_Linux + 182) -#define __NR_socket (__NR_Linux + 183) -#define __NR_socketpair (__NR_Linux + 184) -#define __NR_setresuid (__NR_Linux + 185) -#define __NR_getresuid (__NR_Linux + 186) -#define __NR_query_module (__NR_Linux + 187) -#define __NR_poll (__NR_Linux + 188) -#define __NR_nfsservctl (__NR_Linux + 189) -#define __NR_setresgid (__NR_Linux + 190) -#define __NR_getresgid (__NR_Linux + 191) -#define __NR_prctl (__NR_Linux + 192) -#define __NR_rt_sigreturn (__NR_Linux + 193) -#define __NR_rt_sigaction (__NR_Linux + 194) -#define __NR_rt_sigprocmask (__NR_Linux + 195) -#define __NR_rt_sigpending (__NR_Linux + 196) -#define __NR_rt_sigtimedwait (__NR_Linux + 197) -#define __NR_rt_sigqueueinfo (__NR_Linux + 198) -#define __NR_rt_sigsuspend (__NR_Linux + 199) -#define __NR_pread64 (__NR_Linux + 200) -#define __NR_pwrite64 (__NR_Linux + 201) -#define __NR_chown (__NR_Linux + 202) -#define __NR_getcwd (__NR_Linux + 203) -#define __NR_capget (__NR_Linux + 204) -#define __NR_capset (__NR_Linux + 205) -#define __NR_sigaltstack (__NR_Linux + 206) -#define __NR_sendfile (__NR_Linux + 207) -#define __NR_getpmsg (__NR_Linux + 208) -#define __NR_putpmsg (__NR_Linux + 209) -#define __NR_mmap2 (__NR_Linux + 210) -#define __NR_truncate64 (__NR_Linux + 211) -#define __NR_ftruncate64 (__NR_Linux + 212) -#define __NR_stat64 (__NR_Linux + 213) -#define __NR_lstat64 (__NR_Linux + 214) -#define __NR_fstat64 (__NR_Linux + 215) -#define __NR_pivot_root (__NR_Linux + 216) -#define __NR_mincore (__NR_Linux + 217) -#define __NR_madvise (__NR_Linux + 218) -#define __NR_getdents64 (__NR_Linux + 219) -#define __NR_fcntl64 (__NR_Linux + 220) -#define __NR_reserved221 (__NR_Linux + 221) -#define __NR_gettid (__NR_Linux + 222) -#define __NR_readahead (__NR_Linux + 223) -#define __NR_setxattr (__NR_Linux + 224) -#define __NR_lsetxattr (__NR_Linux + 225) -#define __NR_fsetxattr (__NR_Linux + 226) -#define __NR_getxattr (__NR_Linux + 227) -#define __NR_lgetxattr (__NR_Linux + 228) -#define __NR_fgetxattr (__NR_Linux + 229) -#define __NR_listxattr (__NR_Linux + 230) -#define __NR_llistxattr (__NR_Linux + 231) -#define __NR_flistxattr (__NR_Linux + 232) -#define __NR_removexattr (__NR_Linux + 233) -#define __NR_lremovexattr (__NR_Linux + 234) -#define __NR_fremovexattr (__NR_Linux + 235) -#define __NR_tkill (__NR_Linux + 236) -#define __NR_sendfile64 (__NR_Linux + 237) -#define __NR_futex (__NR_Linux + 238) -#define __NR_sched_setaffinity (__NR_Linux + 239) -#define __NR_sched_getaffinity (__NR_Linux + 240) -#define __NR_io_setup (__NR_Linux + 241) -#define __NR_io_destroy (__NR_Linux + 242) -#define __NR_io_getevents (__NR_Linux + 243) -#define __NR_io_submit (__NR_Linux + 244) -#define __NR_io_cancel (__NR_Linux + 245) -#define __NR_exit_group (__NR_Linux + 246) -#define __NR_lookup_dcookie (__NR_Linux + 247) -#define __NR_epoll_create (__NR_Linux + 248) -#define __NR_epoll_ctl (__NR_Linux + 249) -#define __NR_epoll_wait (__NR_Linux + 250) -#define __NR_remap_file_pages (__NR_Linux + 251) -#define __NR_set_tid_address (__NR_Linux + 252) -#define __NR_restart_syscall (__NR_Linux + 253) -#define __NR_fadvise64 (__NR_Linux + 254) -#define __NR_statfs64 (__NR_Linux + 255) -#define __NR_fstatfs64 (__NR_Linux + 256) -#define __NR_timer_create (__NR_Linux + 257) -#define __NR_timer_settime (__NR_Linux + 258) -#define __NR_timer_gettime (__NR_Linux + 259) -#define __NR_timer_getoverrun (__NR_Linux + 260) -#define __NR_timer_delete (__NR_Linux + 261) -#define __NR_clock_settime (__NR_Linux + 262) -#define __NR_clock_gettime (__NR_Linux + 263) -#define __NR_clock_getres (__NR_Linux + 264) -#define __NR_clock_nanosleep (__NR_Linux + 265) -#define __NR_tgkill (__NR_Linux + 266) -#define __NR_utimes (__NR_Linux + 267) -#define __NR_mbind (__NR_Linux + 268) -#define __NR_get_mempolicy (__NR_Linux + 269) -#define __NR_set_mempolicy (__NR_Linux + 270) -#define __NR_mq_open (__NR_Linux + 271) -#define __NR_mq_unlink (__NR_Linux + 272) -#define __NR_mq_timedsend (__NR_Linux + 273) -#define __NR_mq_timedreceive (__NR_Linux + 274) -#define __NR_mq_notify (__NR_Linux + 275) -#define __NR_mq_getsetattr (__NR_Linux + 276) -#define __NR_vserver (__NR_Linux + 277) -#define __NR_waitid (__NR_Linux + 278) -/* #define __NR_sys_setaltroot (__NR_Linux + 279) */ -#define __NR_add_key (__NR_Linux + 280) -#define __NR_request_key (__NR_Linux + 281) -#define __NR_keyctl (__NR_Linux + 282) -#define __NR_set_thread_area (__NR_Linux + 283) -#define __NR_inotify_init (__NR_Linux + 284) -#define __NR_inotify_add_watch (__NR_Linux + 285) -#define __NR_inotify_rm_watch (__NR_Linux + 286) -#define __NR_migrate_pages (__NR_Linux + 287) -#define __NR_openat (__NR_Linux + 288) -#define __NR_mkdirat (__NR_Linux + 289) -#define __NR_mknodat (__NR_Linux + 290) -#define __NR_fchownat (__NR_Linux + 291) -#define __NR_futimesat (__NR_Linux + 292) -#define __NR_fstatat64 (__NR_Linux + 293) -#define __NR_unlinkat (__NR_Linux + 294) -#define __NR_renameat (__NR_Linux + 295) -#define __NR_linkat (__NR_Linux + 296) -#define __NR_symlinkat (__NR_Linux + 297) -#define __NR_readlinkat (__NR_Linux + 298) -#define __NR_fchmodat (__NR_Linux + 299) -#define __NR_faccessat (__NR_Linux + 300) -#define __NR_pselect6 (__NR_Linux + 301) -#define __NR_ppoll (__NR_Linux + 302) -#define __NR_unshare (__NR_Linux + 303) -#define __NR_splice (__NR_Linux + 304) -#define __NR_sync_file_range (__NR_Linux + 305) -#define __NR_tee (__NR_Linux + 306) -#define __NR_vmsplice (__NR_Linux + 307) -#define __NR_move_pages (__NR_Linux + 308) -#define __NR_set_robust_list (__NR_Linux + 309) -#define __NR_get_robust_list (__NR_Linux + 310) -#define __NR_kexec_load (__NR_Linux + 311) -#define __NR_getcpu (__NR_Linux + 312) -#define __NR_epoll_pwait (__NR_Linux + 313) -#define __NR_ioprio_set (__NR_Linux + 314) -#define __NR_ioprio_get (__NR_Linux + 315) -#define __NR_utimensat (__NR_Linux + 316) -#define __NR_signalfd (__NR_Linux + 317) -#define __NR_timerfd (__NR_Linux + 318) -#define __NR_eventfd (__NR_Linux + 319) -#define __NR_fallocate (__NR_Linux + 320) -#define __NR_timerfd_create (__NR_Linux + 321) -#define __NR_timerfd_gettime (__NR_Linux + 322) -#define __NR_timerfd_settime (__NR_Linux + 323) -#define __NR_signalfd4 (__NR_Linux + 324) -#define __NR_eventfd2 (__NR_Linux + 325) -#define __NR_epoll_create1 (__NR_Linux + 326) -#define __NR_dup3 (__NR_Linux + 327) -#define __NR_pipe2 (__NR_Linux + 328) -#define __NR_inotify_init1 (__NR_Linux + 329) -#define __NR_preadv (__NR_Linux + 330) -#define __NR_pwritev (__NR_Linux + 331) -#define __NR_rt_tgsigqueueinfo (__NR_Linux + 332) -#define __NR_perf_event_open (__NR_Linux + 333) -#define __NR_accept4 (__NR_Linux + 334) -#define __NR_recvmmsg (__NR_Linux + 335) -#define __NR_fanotify_init (__NR_Linux + 336) -#define __NR_fanotify_mark (__NR_Linux + 337) -#define __NR_prlimit64 (__NR_Linux + 338) -#define __NR_name_to_handle_at (__NR_Linux + 339) -#define __NR_open_by_handle_at (__NR_Linux + 340) -#define __NR_clock_adjtime (__NR_Linux + 341) -#define __NR_syncfs (__NR_Linux + 342) -#define __NR_sendmmsg (__NR_Linux + 343) -#define __NR_setns (__NR_Linux + 344) -#define __NR_process_vm_readv (__NR_Linux + 345) -#define __NR_process_vm_writev (__NR_Linux + 346) -#define __NR_kcmp (__NR_Linux + 347) -#define __NR_finit_module (__NR_Linux + 348) -#define __NR_sched_setattr (__NR_Linux + 349) -#define __NR_sched_getattr (__NR_Linux + 350) -#define __NR_renameat2 (__NR_Linux + 351) -#define __NR_seccomp (__NR_Linux + 352) -#define __NR_getrandom (__NR_Linux + 353) -#define __NR_memfd_create (__NR_Linux + 354) -#define __NR_bpf (__NR_Linux + 355) -#define __NR_execveat (__NR_Linux + 356) -#define __NR_userfaultfd (__NR_Linux + 357) -#define __NR_membarrier (__NR_Linux + 358) -#define __NR_mlock2 (__NR_Linux + 359) -#define __NR_copy_file_range (__NR_Linux + 360) -#define __NR_preadv2 (__NR_Linux + 361) -#define __NR_pwritev2 (__NR_Linux + 362) -#define __NR_pkey_mprotect (__NR_Linux + 363) -#define __NR_pkey_alloc (__NR_Linux + 364) -#define __NR_pkey_free (__NR_Linux + 365) -#define __NR_statx (__NR_Linux + 366) -#define __NR_rseq (__NR_Linux + 367) -#define __NR_io_pgetevents (__NR_Linux + 368) - - -/* - * Offset of the last Linux o32 flavoured syscall - */ -#define __NR_Linux_syscalls 368 +#define __NR_Linux 4000 +#include <asm/unistd_o32.h> #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ -#define __NR_O32_Linux 4000 -#define __NR_O32_Linux_syscalls 368 - #if _MIPS_SIM == _MIPS_SIM_ABI64 -/* - * Linux 64-bit syscalls are in the range from 5000 to 5999. - */ -#define __NR_Linux 5000 -#define __NR_read (__NR_Linux + 0) -#define __NR_write (__NR_Linux + 1) -#define __NR_open (__NR_Linux + 2) -#define __NR_close (__NR_Linux + 3) -#define __NR_stat (__NR_Linux + 4) -#define __NR_fstat (__NR_Linux + 5) -#define __NR_lstat (__NR_Linux + 6) -#define __NR_poll (__NR_Linux + 7) -#define __NR_lseek (__NR_Linux + 8) -#define __NR_mmap (__NR_Linux + 9) -#define __NR_mprotect (__NR_Linux + 10) -#define __NR_munmap (__NR_Linux + 11) -#define __NR_brk (__NR_Linux + 12) -#define __NR_rt_sigaction (__NR_Linux + 13) -#define __NR_rt_sigprocmask (__NR_Linux + 14) -#define __NR_ioctl (__NR_Linux + 15) -#define __NR_pread64 (__NR_Linux + 16) -#define __NR_pwrite64 (__NR_Linux + 17) -#define __NR_readv (__NR_Linux + 18) -#define __NR_writev (__NR_Linux + 19) -#define __NR_access (__NR_Linux + 20) -#define __NR_pipe (__NR_Linux + 21) -#define __NR__newselect (__NR_Linux + 22) -#define __NR_sched_yield (__NR_Linux + 23) -#define __NR_mremap (__NR_Linux + 24) -#define __NR_msync (__NR_Linux + 25) -#define __NR_mincore (__NR_Linux + 26) -#define __NR_madvise (__NR_Linux + 27) -#define __NR_shmget (__NR_Linux + 28) -#define __NR_shmat (__NR_Linux + 29) -#define __NR_shmctl (__NR_Linux + 30) -#define __NR_dup (__NR_Linux + 31) -#define __NR_dup2 (__NR_Linux + 32) -#define __NR_pause (__NR_Linux + 33) -#define __NR_nanosleep (__NR_Linux + 34) -#define __NR_getitimer (__NR_Linux + 35) -#define __NR_setitimer (__NR_Linux + 36) -#define __NR_alarm (__NR_Linux + 37) -#define __NR_getpid (__NR_Linux + 38) -#define __NR_sendfile (__NR_Linux + 39) -#define __NR_socket (__NR_Linux + 40) -#define __NR_connect (__NR_Linux + 41) -#define __NR_accept (__NR_Linux + 42) -#define __NR_sendto (__NR_Linux + 43) -#define __NR_recvfrom (__NR_Linux + 44) -#define __NR_sendmsg (__NR_Linux + 45) -#define __NR_recvmsg (__NR_Linux + 46) -#define __NR_shutdown (__NR_Linux + 47) -#define __NR_bind (__NR_Linux + 48) -#define __NR_listen (__NR_Linux + 49) -#define __NR_getsockname (__NR_Linux + 50) -#define __NR_getpeername (__NR_Linux + 51) -#define __NR_socketpair (__NR_Linux + 52) -#define __NR_setsockopt (__NR_Linux + 53) -#define __NR_getsockopt (__NR_Linux + 54) -#define __NR_clone (__NR_Linux + 55) -#define __NR_fork (__NR_Linux + 56) -#define __NR_execve (__NR_Linux + 57) -#define __NR_exit (__NR_Linux + 58) -#define __NR_wait4 (__NR_Linux + 59) -#define __NR_kill (__NR_Linux + 60) -#define __NR_uname (__NR_Linux + 61) -#define __NR_semget (__NR_Linux + 62) -#define __NR_semop (__NR_Linux + 63) -#define __NR_semctl (__NR_Linux + 64) -#define __NR_shmdt (__NR_Linux + 65) -#define __NR_msgget (__NR_Linux + 66) -#define __NR_msgsnd (__NR_Linux + 67) -#define __NR_msgrcv (__NR_Linux + 68) -#define __NR_msgctl (__NR_Linux + 69) -#define __NR_fcntl (__NR_Linux + 70) -#define __NR_flock (__NR_Linux + 71) -#define __NR_fsync (__NR_Linux + 72) -#define __NR_fdatasync (__NR_Linux + 73) -#define __NR_truncate (__NR_Linux + 74) -#define __NR_ftruncate (__NR_Linux + 75) -#define __NR_getdents (__NR_Linux + 76) -#define __NR_getcwd (__NR_Linux + 77) -#define __NR_chdir (__NR_Linux + 78) -#define __NR_fchdir (__NR_Linux + 79) -#define __NR_rename (__NR_Linux + 80) -#define __NR_mkdir (__NR_Linux + 81) -#define __NR_rmdir (__NR_Linux + 82) -#define __NR_creat (__NR_Linux + 83) -#define __NR_link (__NR_Linux + 84) -#define __NR_unlink (__NR_Linux + 85) -#define __NR_symlink (__NR_Linux + 86) -#define __NR_readlink (__NR_Linux + 87) -#define __NR_chmod (__NR_Linux + 88) -#define __NR_fchmod (__NR_Linux + 89) -#define __NR_chown (__NR_Linux + 90) -#define __NR_fchown (__NR_Linux + 91) -#define __NR_lchown (__NR_Linux + 92) -#define __NR_umask (__NR_Linux + 93) -#define __NR_gettimeofday (__NR_Linux + 94) -#define __NR_getrlimit (__NR_Linux + 95) -#define __NR_getrusage (__NR_Linux + 96) -#define __NR_sysinfo (__NR_Linux + 97) -#define __NR_times (__NR_Linux + 98) -#define __NR_ptrace (__NR_Linux + 99) -#define __NR_getuid (__NR_Linux + 100) -#define __NR_syslog (__NR_Linux + 101) -#define __NR_getgid (__NR_Linux + 102) -#define __NR_setuid (__NR_Linux + 103) -#define __NR_setgid (__NR_Linux + 104) -#define __NR_geteuid (__NR_Linux + 105) -#define __NR_getegid (__NR_Linux + 106) -#define __NR_setpgid (__NR_Linux + 107) -#define __NR_getppid (__NR_Linux + 108) -#define __NR_getpgrp (__NR_Linux + 109) -#define __NR_setsid (__NR_Linux + 110) -#define __NR_setreuid (__NR_Linux + 111) -#define __NR_setregid (__NR_Linux + 112) -#define __NR_getgroups (__NR_Linux + 113) -#define __NR_setgroups (__NR_Linux + 114) -#define __NR_setresuid (__NR_Linux + 115) -#define __NR_getresuid (__NR_Linux + 116) -#define __NR_setresgid (__NR_Linux + 117) -#define __NR_getresgid (__NR_Linux + 118) -#define __NR_getpgid (__NR_Linux + 119) -#define __NR_setfsuid (__NR_Linux + 120) -#define __NR_setfsgid (__NR_Linux + 121) -#define __NR_getsid (__NR_Linux + 122) -#define __NR_capget (__NR_Linux + 123) -#define __NR_capset (__NR_Linux + 124) -#define __NR_rt_sigpending (__NR_Linux + 125) -#define __NR_rt_sigtimedwait (__NR_Linux + 126) -#define __NR_rt_sigqueueinfo (__NR_Linux + 127) -#define __NR_rt_sigsuspend (__NR_Linux + 128) -#define __NR_sigaltstack (__NR_Linux + 129) -#define __NR_utime (__NR_Linux + 130) -#define __NR_mknod (__NR_Linux + 131) -#define __NR_personality (__NR_Linux + 132) -#define __NR_ustat (__NR_Linux + 133) -#define __NR_statfs (__NR_Linux + 134) -#define __NR_fstatfs (__NR_Linux + 135) -#define __NR_sysfs (__NR_Linux + 136) -#define __NR_getpriority (__NR_Linux + 137) -#define __NR_setpriority (__NR_Linux + 138) -#define __NR_sched_setparam (__NR_Linux + 139) -#define __NR_sched_getparam (__NR_Linux + 140) -#define __NR_sched_setscheduler (__NR_Linux + 141) -#define __NR_sched_getscheduler (__NR_Linux + 142) -#define __NR_sched_get_priority_max (__NR_Linux + 143) -#define __NR_sched_get_priority_min (__NR_Linux + 144) -#define __NR_sched_rr_get_interval (__NR_Linux + 145) -#define __NR_mlock (__NR_Linux + 146) -#define __NR_munlock (__NR_Linux + 147) -#define __NR_mlockall (__NR_Linux + 148) -#define __NR_munlockall (__NR_Linux + 149) -#define __NR_vhangup (__NR_Linux + 150) -#define __NR_pivot_root (__NR_Linux + 151) -#define __NR__sysctl (__NR_Linux + 152) -#define __NR_prctl (__NR_Linux + 153) -#define __NR_adjtimex (__NR_Linux + 154) -#define __NR_setrlimit (__NR_Linux + 155) -#define __NR_chroot (__NR_Linux + 156) -#define __NR_sync (__NR_Linux + 157) -#define __NR_acct (__NR_Linux + 158) -#define __NR_settimeofday (__NR_Linux + 159) -#define __NR_mount (__NR_Linux + 160) -#define __NR_umount2 (__NR_Linux + 161) -#define __NR_swapon (__NR_Linux + 162) -#define __NR_swapoff (__NR_Linux + 163) -#define __NR_reboot (__NR_Linux + 164) -#define __NR_sethostname (__NR_Linux + 165) -#define __NR_setdomainname (__NR_Linux + 166) -#define __NR_create_module (__NR_Linux + 167) -#define __NR_init_module (__NR_Linux + 168) -#define __NR_delete_module (__NR_Linux + 169) -#define __NR_get_kernel_syms (__NR_Linux + 170) -#define __NR_query_module (__NR_Linux + 171) -#define __NR_quotactl (__NR_Linux + 172) -#define __NR_nfsservctl (__NR_Linux + 173) -#define __NR_getpmsg (__NR_Linux + 174) -#define __NR_putpmsg (__NR_Linux + 175) -#define __NR_afs_syscall (__NR_Linux + 176) -#define __NR_reserved177 (__NR_Linux + 177) -#define __NR_gettid (__NR_Linux + 178) -#define __NR_readahead (__NR_Linux + 179) -#define __NR_setxattr (__NR_Linux + 180) -#define __NR_lsetxattr (__NR_Linux + 181) -#define __NR_fsetxattr (__NR_Linux + 182) -#define __NR_getxattr (__NR_Linux + 183) -#define __NR_lgetxattr (__NR_Linux + 184) -#define __NR_fgetxattr (__NR_Linux + 185) -#define __NR_listxattr (__NR_Linux + 186) -#define __NR_llistxattr (__NR_Linux + 187) -#define __NR_flistxattr (__NR_Linux + 188) -#define __NR_removexattr (__NR_Linux + 189) -#define __NR_lremovexattr (__NR_Linux + 190) -#define __NR_fremovexattr (__NR_Linux + 191) -#define __NR_tkill (__NR_Linux + 192) -#define __NR_reserved193 (__NR_Linux + 193) -#define __NR_futex (__NR_Linux + 194) -#define __NR_sched_setaffinity (__NR_Linux + 195) -#define __NR_sched_getaffinity (__NR_Linux + 196) -#define __NR_cacheflush (__NR_Linux + 197) -#define __NR_cachectl (__NR_Linux + 198) -#define __NR_sysmips (__NR_Linux + 199) -#define __NR_io_setup (__NR_Linux + 200) -#define __NR_io_destroy (__NR_Linux + 201) -#define __NR_io_getevents (__NR_Linux + 202) -#define __NR_io_submit (__NR_Linux + 203) -#define __NR_io_cancel (__NR_Linux + 204) -#define __NR_exit_group (__NR_Linux + 205) -#define __NR_lookup_dcookie (__NR_Linux + 206) -#define __NR_epoll_create (__NR_Linux + 207) -#define __NR_epoll_ctl (__NR_Linux + 208) -#define __NR_epoll_wait (__NR_Linux + 209) -#define __NR_remap_file_pages (__NR_Linux + 210) -#define __NR_rt_sigreturn (__NR_Linux + 211) -#define __NR_set_tid_address (__NR_Linux + 212) -#define __NR_restart_syscall (__NR_Linux + 213) -#define __NR_semtimedop (__NR_Linux + 214) -#define __NR_fadvise64 (__NR_Linux + 215) -#define __NR_timer_create (__NR_Linux + 216) -#define __NR_timer_settime (__NR_Linux + 217) -#define __NR_timer_gettime (__NR_Linux + 218) -#define __NR_timer_getoverrun (__NR_Linux + 219) -#define __NR_timer_delete (__NR_Linux + 220) -#define __NR_clock_settime (__NR_Linux + 221) -#define __NR_clock_gettime (__NR_Linux + 222) -#define __NR_clock_getres (__NR_Linux + 223) -#define __NR_clock_nanosleep (__NR_Linux + 224) -#define __NR_tgkill (__NR_Linux + 225) -#define __NR_utimes (__NR_Linux + 226) -#define __NR_mbind (__NR_Linux + 227) -#define __NR_get_mempolicy (__NR_Linux + 228) -#define __NR_set_mempolicy (__NR_Linux + 229) -#define __NR_mq_open (__NR_Linux + 230) -#define __NR_mq_unlink (__NR_Linux + 231) -#define __NR_mq_timedsend (__NR_Linux + 232) -#define __NR_mq_timedreceive (__NR_Linux + 233) -#define __NR_mq_notify (__NR_Linux + 234) -#define __NR_mq_getsetattr (__NR_Linux + 235) -#define __NR_vserver (__NR_Linux + 236) -#define __NR_waitid (__NR_Linux + 237) -/* #define __NR_sys_setaltroot (__NR_Linux + 238) */ -#define __NR_add_key (__NR_Linux + 239) -#define __NR_request_key (__NR_Linux + 240) -#define __NR_keyctl (__NR_Linux + 241) -#define __NR_set_thread_area (__NR_Linux + 242) -#define __NR_inotify_init (__NR_Linux + 243) -#define __NR_inotify_add_watch (__NR_Linux + 244) -#define __NR_inotify_rm_watch (__NR_Linux + 245) -#define __NR_migrate_pages (__NR_Linux + 246) -#define __NR_openat (__NR_Linux + 247) -#define __NR_mkdirat (__NR_Linux + 248) -#define __NR_mknodat (__NR_Linux + 249) -#define __NR_fchownat (__NR_Linux + 250) -#define __NR_futimesat (__NR_Linux + 251) -#define __NR_newfstatat (__NR_Linux + 252) -#define __NR_unlinkat (__NR_Linux + 253) -#define __NR_renameat (__NR_Linux + 254) -#define __NR_linkat (__NR_Linux + 255) -#define __NR_symlinkat (__NR_Linux + 256) -#define __NR_readlinkat (__NR_Linux + 257) -#define __NR_fchmodat (__NR_Linux + 258) -#define __NR_faccessat (__NR_Linux + 259) -#define __NR_pselect6 (__NR_Linux + 260) -#define __NR_ppoll (__NR_Linux + 261) -#define __NR_unshare (__NR_Linux + 262) -#define __NR_splice (__NR_Linux + 263) -#define __NR_sync_file_range (__NR_Linux + 264) -#define __NR_tee (__NR_Linux + 265) -#define __NR_vmsplice (__NR_Linux + 266) -#define __NR_move_pages (__NR_Linux + 267) -#define __NR_set_robust_list (__NR_Linux + 268) -#define __NR_get_robust_list (__NR_Linux + 269) -#define __NR_kexec_load (__NR_Linux + 270) -#define __NR_getcpu (__NR_Linux + 271) -#define __NR_epoll_pwait (__NR_Linux + 272) -#define __NR_ioprio_set (__NR_Linux + 273) -#define __NR_ioprio_get (__NR_Linux + 274) -#define __NR_utimensat (__NR_Linux + 275) -#define __NR_signalfd (__NR_Linux + 276) -#define __NR_timerfd (__NR_Linux + 277) -#define __NR_eventfd (__NR_Linux + 278) -#define __NR_fallocate (__NR_Linux + 279) -#define __NR_timerfd_create (__NR_Linux + 280) -#define __NR_timerfd_gettime (__NR_Linux + 281) -#define __NR_timerfd_settime (__NR_Linux + 282) -#define __NR_signalfd4 (__NR_Linux + 283) -#define __NR_eventfd2 (__NR_Linux + 284) -#define __NR_epoll_create1 (__NR_Linux + 285) -#define __NR_dup3 (__NR_Linux + 286) -#define __NR_pipe2 (__NR_Linux + 287) -#define __NR_inotify_init1 (__NR_Linux + 288) -#define __NR_preadv (__NR_Linux + 289) -#define __NR_pwritev (__NR_Linux + 290) -#define __NR_rt_tgsigqueueinfo (__NR_Linux + 291) -#define __NR_perf_event_open (__NR_Linux + 292) -#define __NR_accept4 (__NR_Linux + 293) -#define __NR_recvmmsg (__NR_Linux + 294) -#define __NR_fanotify_init (__NR_Linux + 295) -#define __NR_fanotify_mark (__NR_Linux + 296) -#define __NR_prlimit64 (__NR_Linux + 297) -#define __NR_name_to_handle_at (__NR_Linux + 298) -#define __NR_open_by_handle_at (__NR_Linux + 299) -#define __NR_clock_adjtime (__NR_Linux + 300) -#define __NR_syncfs (__NR_Linux + 301) -#define __NR_sendmmsg (__NR_Linux + 302) -#define __NR_setns (__NR_Linux + 303) -#define __NR_process_vm_readv (__NR_Linux + 304) -#define __NR_process_vm_writev (__NR_Linux + 305) -#define __NR_kcmp (__NR_Linux + 306) -#define __NR_finit_module (__NR_Linux + 307) -#define __NR_getdents64 (__NR_Linux + 308) -#define __NR_sched_setattr (__NR_Linux + 309) -#define __NR_sched_getattr (__NR_Linux + 310) -#define __NR_renameat2 (__NR_Linux + 311) -#define __NR_seccomp (__NR_Linux + 312) -#define __NR_getrandom (__NR_Linux + 313) -#define __NR_memfd_create (__NR_Linux + 314) -#define __NR_bpf (__NR_Linux + 315) -#define __NR_execveat (__NR_Linux + 316) -#define __NR_userfaultfd (__NR_Linux + 317) -#define __NR_membarrier (__NR_Linux + 318) -#define __NR_mlock2 (__NR_Linux + 319) -#define __NR_copy_file_range (__NR_Linux + 320) -#define __NR_preadv2 (__NR_Linux + 321) -#define __NR_pwritev2 (__NR_Linux + 322) -#define __NR_pkey_mprotect (__NR_Linux + 323) -#define __NR_pkey_alloc (__NR_Linux + 324) -#define __NR_pkey_free (__NR_Linux + 325) -#define __NR_statx (__NR_Linux + 326) -#define __NR_rseq (__NR_Linux + 327) -#define __NR_io_pgetevents (__NR_Linux + 328) - -/* - * Offset of the last Linux 64-bit flavoured syscall - */ -#define __NR_Linux_syscalls 328 +#define __NR_Linux 5000 +#include <asm/unistd_n64.h> #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ -#define __NR_64_Linux 5000 -#define __NR_64_Linux_syscalls 328 - #if _MIPS_SIM == _MIPS_SIM_NABI32 -/* - * Linux N32 syscalls are in the range from 6000 to 6999. - */ -#define __NR_Linux 6000 -#define __NR_read (__NR_Linux + 0) -#define __NR_write (__NR_Linux + 1) -#define __NR_open (__NR_Linux + 2) -#define __NR_close (__NR_Linux + 3) -#define __NR_stat (__NR_Linux + 4) -#define __NR_fstat (__NR_Linux + 5) -#define __NR_lstat (__NR_Linux + 6) -#define __NR_poll (__NR_Linux + 7) -#define __NR_lseek (__NR_Linux + 8) -#define __NR_mmap (__NR_Linux + 9) -#define __NR_mprotect (__NR_Linux + 10) -#define __NR_munmap (__NR_Linux + 11) -#define __NR_brk (__NR_Linux + 12) -#define __NR_rt_sigaction (__NR_Linux + 13) -#define __NR_rt_sigprocmask (__NR_Linux + 14) -#define __NR_ioctl (__NR_Linux + 15) -#define __NR_pread64 (__NR_Linux + 16) -#define __NR_pwrite64 (__NR_Linux + 17) -#define __NR_readv (__NR_Linux + 18) -#define __NR_writev (__NR_Linux + 19) -#define __NR_access (__NR_Linux + 20) -#define __NR_pipe (__NR_Linux + 21) -#define __NR__newselect (__NR_Linux + 22) -#define __NR_sched_yield (__NR_Linux + 23) -#define __NR_mremap (__NR_Linux + 24) -#define __NR_msync (__NR_Linux + 25) -#define __NR_mincore (__NR_Linux + 26) -#define __NR_madvise (__NR_Linux + 27) -#define __NR_shmget (__NR_Linux + 28) -#define __NR_shmat (__NR_Linux + 29) -#define __NR_shmctl (__NR_Linux + 30) -#define __NR_dup (__NR_Linux + 31) -#define __NR_dup2 (__NR_Linux + 32) -#define __NR_pause (__NR_Linux + 33) -#define __NR_nanosleep (__NR_Linux + 34) -#define __NR_getitimer (__NR_Linux + 35) -#define __NR_setitimer (__NR_Linux + 36) -#define __NR_alarm (__NR_Linux + 37) -#define __NR_getpid (__NR_Linux + 38) -#define __NR_sendfile (__NR_Linux + 39) -#define __NR_socket (__NR_Linux + 40) -#define __NR_connect (__NR_Linux + 41) -#define __NR_accept (__NR_Linux + 42) -#define __NR_sendto (__NR_Linux + 43) -#define __NR_recvfrom (__NR_Linux + 44) -#define __NR_sendmsg (__NR_Linux + 45) -#define __NR_recvmsg (__NR_Linux + 46) -#define __NR_shutdown (__NR_Linux + 47) -#define __NR_bind (__NR_Linux + 48) -#define __NR_listen (__NR_Linux + 49) -#define __NR_getsockname (__NR_Linux + 50) -#define __NR_getpeername (__NR_Linux + 51) -#define __NR_socketpair (__NR_Linux + 52) -#define __NR_setsockopt (__NR_Linux + 53) -#define __NR_getsockopt (__NR_Linux + 54) -#define __NR_clone (__NR_Linux + 55) -#define __NR_fork (__NR_Linux + 56) -#define __NR_execve (__NR_Linux + 57) -#define __NR_exit (__NR_Linux + 58) -#define __NR_wait4 (__NR_Linux + 59) -#define __NR_kill (__NR_Linux + 60) -#define __NR_uname (__NR_Linux + 61) -#define __NR_semget (__NR_Linux + 62) -#define __NR_semop (__NR_Linux + 63) -#define __NR_semctl (__NR_Linux + 64) -#define __NR_shmdt (__NR_Linux + 65) -#define __NR_msgget (__NR_Linux + 66) -#define __NR_msgsnd (__NR_Linux + 67) -#define __NR_msgrcv (__NR_Linux + 68) -#define __NR_msgctl (__NR_Linux + 69) -#define __NR_fcntl (__NR_Linux + 70) -#define __NR_flock (__NR_Linux + 71) -#define __NR_fsync (__NR_Linux + 72) -#define __NR_fdatasync (__NR_Linux + 73) -#define __NR_truncate (__NR_Linux + 74) -#define __NR_ftruncate (__NR_Linux + 75) -#define __NR_getdents (__NR_Linux + 76) -#define __NR_getcwd (__NR_Linux + 77) -#define __NR_chdir (__NR_Linux + 78) -#define __NR_fchdir (__NR_Linux + 79) -#define __NR_rename (__NR_Linux + 80) -#define __NR_mkdir (__NR_Linux + 81) -#define __NR_rmdir (__NR_Linux + 82) -#define __NR_creat (__NR_Linux + 83) -#define __NR_link (__NR_Linux + 84) -#define __NR_unlink (__NR_Linux + 85) -#define __NR_symlink (__NR_Linux + 86) -#define __NR_readlink (__NR_Linux + 87) -#define __NR_chmod (__NR_Linux + 88) -#define __NR_fchmod (__NR_Linux + 89) -#define __NR_chown (__NR_Linux + 90) -#define __NR_fchown (__NR_Linux + 91) -#define __NR_lchown (__NR_Linux + 92) -#define __NR_umask (__NR_Linux + 93) -#define __NR_gettimeofday (__NR_Linux + 94) -#define __NR_getrlimit (__NR_Linux + 95) -#define __NR_getrusage (__NR_Linux + 96) -#define __NR_sysinfo (__NR_Linux + 97) -#define __NR_times (__NR_Linux + 98) -#define __NR_ptrace (__NR_Linux + 99) -#define __NR_getuid (__NR_Linux + 100) -#define __NR_syslog (__NR_Linux + 101) -#define __NR_getgid (__NR_Linux + 102) -#define __NR_setuid (__NR_Linux + 103) -#define __NR_setgid (__NR_Linux + 104) -#define __NR_geteuid (__NR_Linux + 105) -#define __NR_getegid (__NR_Linux + 106) -#define __NR_setpgid (__NR_Linux + 107) -#define __NR_getppid (__NR_Linux + 108) -#define __NR_getpgrp (__NR_Linux + 109) -#define __NR_setsid (__NR_Linux + 110) -#define __NR_setreuid (__NR_Linux + 111) -#define __NR_setregid (__NR_Linux + 112) -#define __NR_getgroups (__NR_Linux + 113) -#define __NR_setgroups (__NR_Linux + 114) -#define __NR_setresuid (__NR_Linux + 115) -#define __NR_getresuid (__NR_Linux + 116) -#define __NR_setresgid (__NR_Linux + 117) -#define __NR_getresgid (__NR_Linux + 118) -#define __NR_getpgid (__NR_Linux + 119) -#define __NR_setfsuid (__NR_Linux + 120) -#define __NR_setfsgid (__NR_Linux + 121) -#define __NR_getsid (__NR_Linux + 122) -#define __NR_capget (__NR_Linux + 123) -#define __NR_capset (__NR_Linux + 124) -#define __NR_rt_sigpending (__NR_Linux + 125) -#define __NR_rt_sigtimedwait (__NR_Linux + 126) -#define __NR_rt_sigqueueinfo (__NR_Linux + 127) -#define __NR_rt_sigsuspend (__NR_Linux + 128) -#define __NR_sigaltstack (__NR_Linux + 129) -#define __NR_utime (__NR_Linux + 130) -#define __NR_mknod (__NR_Linux + 131) -#define __NR_personality (__NR_Linux + 132) -#define __NR_ustat (__NR_Linux + 133) -#define __NR_statfs (__NR_Linux + 134) -#define __NR_fstatfs (__NR_Linux + 135) -#define __NR_sysfs (__NR_Linux + 136) -#define __NR_getpriority (__NR_Linux + 137) -#define __NR_setpriority (__NR_Linux + 138) -#define __NR_sched_setparam (__NR_Linux + 139) -#define __NR_sched_getparam (__NR_Linux + 140) -#define __NR_sched_setscheduler (__NR_Linux + 141) -#define __NR_sched_getscheduler (__NR_Linux + 142) -#define __NR_sched_get_priority_max (__NR_Linux + 143) -#define __NR_sched_get_priority_min (__NR_Linux + 144) -#define __NR_sched_rr_get_interval (__NR_Linux + 145) -#define __NR_mlock (__NR_Linux + 146) -#define __NR_munlock (__NR_Linux + 147) -#define __NR_mlockall (__NR_Linux + 148) -#define __NR_munlockall (__NR_Linux + 149) -#define __NR_vhangup (__NR_Linux + 150) -#define __NR_pivot_root (__NR_Linux + 151) -#define __NR__sysctl (__NR_Linux + 152) -#define __NR_prctl (__NR_Linux + 153) -#define __NR_adjtimex (__NR_Linux + 154) -#define __NR_setrlimit (__NR_Linux + 155) -#define __NR_chroot (__NR_Linux + 156) -#define __NR_sync (__NR_Linux + 157) -#define __NR_acct (__NR_Linux + 158) -#define __NR_settimeofday (__NR_Linux + 159) -#define __NR_mount (__NR_Linux + 160) -#define __NR_umount2 (__NR_Linux + 161) -#define __NR_swapon (__NR_Linux + 162) -#define __NR_swapoff (__NR_Linux + 163) -#define __NR_reboot (__NR_Linux + 164) -#define __NR_sethostname (__NR_Linux + 165) -#define __NR_setdomainname (__NR_Linux + 166) -#define __NR_create_module (__NR_Linux + 167) -#define __NR_init_module (__NR_Linux + 168) -#define __NR_delete_module (__NR_Linux + 169) -#define __NR_get_kernel_syms (__NR_Linux + 170) -#define __NR_query_module (__NR_Linux + 171) -#define __NR_quotactl (__NR_Linux + 172) -#define __NR_nfsservctl (__NR_Linux + 173) -#define __NR_getpmsg (__NR_Linux + 174) -#define __NR_putpmsg (__NR_Linux + 175) -#define __NR_afs_syscall (__NR_Linux + 176) -#define __NR_reserved177 (__NR_Linux + 177) -#define __NR_gettid (__NR_Linux + 178) -#define __NR_readahead (__NR_Linux + 179) -#define __NR_setxattr (__NR_Linux + 180) -#define __NR_lsetxattr (__NR_Linux + 181) -#define __NR_fsetxattr (__NR_Linux + 182) -#define __NR_getxattr (__NR_Linux + 183) -#define __NR_lgetxattr (__NR_Linux + 184) -#define __NR_fgetxattr (__NR_Linux + 185) -#define __NR_listxattr (__NR_Linux + 186) -#define __NR_llistxattr (__NR_Linux + 187) -#define __NR_flistxattr (__NR_Linux + 188) -#define __NR_removexattr (__NR_Linux + 189) -#define __NR_lremovexattr (__NR_Linux + 190) -#define __NR_fremovexattr (__NR_Linux + 191) -#define __NR_tkill (__NR_Linux + 192) -#define __NR_reserved193 (__NR_Linux + 193) -#define __NR_futex (__NR_Linux + 194) -#define __NR_sched_setaffinity (__NR_Linux + 195) -#define __NR_sched_getaffinity (__NR_Linux + 196) -#define __NR_cacheflush (__NR_Linux + 197) -#define __NR_cachectl (__NR_Linux + 198) -#define __NR_sysmips (__NR_Linux + 199) -#define __NR_io_setup (__NR_Linux + 200) -#define __NR_io_destroy (__NR_Linux + 201) -#define __NR_io_getevents (__NR_Linux + 202) -#define __NR_io_submit (__NR_Linux + 203) -#define __NR_io_cancel (__NR_Linux + 204) -#define __NR_exit_group (__NR_Linux + 205) -#define __NR_lookup_dcookie (__NR_Linux + 206) -#define __NR_epoll_create (__NR_Linux + 207) -#define __NR_epoll_ctl (__NR_Linux + 208) -#define __NR_epoll_wait (__NR_Linux + 209) -#define __NR_remap_file_pages (__NR_Linux + 210) -#define __NR_rt_sigreturn (__NR_Linux + 211) -#define __NR_fcntl64 (__NR_Linux + 212) -#define __NR_set_tid_address (__NR_Linux + 213) -#define __NR_restart_syscall (__NR_Linux + 214) -#define __NR_semtimedop (__NR_Linux + 215) -#define __NR_fadvise64 (__NR_Linux + 216) -#define __NR_statfs64 (__NR_Linux + 217) -#define __NR_fstatfs64 (__NR_Linux + 218) -#define __NR_sendfile64 (__NR_Linux + 219) -#define __NR_timer_create (__NR_Linux + 220) -#define __NR_timer_settime (__NR_Linux + 221) -#define __NR_timer_gettime (__NR_Linux + 222) -#define __NR_timer_getoverrun (__NR_Linux + 223) -#define __NR_timer_delete (__NR_Linux + 224) -#define __NR_clock_settime (__NR_Linux + 225) -#define __NR_clock_gettime (__NR_Linux + 226) -#define __NR_clock_getres (__NR_Linux + 227) -#define __NR_clock_nanosleep (__NR_Linux + 228) -#define __NR_tgkill (__NR_Linux + 229) -#define __NR_utimes (__NR_Linux + 230) -#define __NR_mbind (__NR_Linux + 231) -#define __NR_get_mempolicy (__NR_Linux + 232) -#define __NR_set_mempolicy (__NR_Linux + 233) -#define __NR_mq_open (__NR_Linux + 234) -#define __NR_mq_unlink (__NR_Linux + 235) -#define __NR_mq_timedsend (__NR_Linux + 236) -#define __NR_mq_timedreceive (__NR_Linux + 237) -#define __NR_mq_notify (__NR_Linux + 238) -#define __NR_mq_getsetattr (__NR_Linux + 239) -#define __NR_vserver (__NR_Linux + 240) -#define __NR_waitid (__NR_Linux + 241) -/* #define __NR_sys_setaltroot (__NR_Linux + 242) */ -#define __NR_add_key (__NR_Linux + 243) -#define __NR_request_key (__NR_Linux + 244) -#define __NR_keyctl (__NR_Linux + 245) -#define __NR_set_thread_area (__NR_Linux + 246) -#define __NR_inotify_init (__NR_Linux + 247) -#define __NR_inotify_add_watch (__NR_Linux + 248) -#define __NR_inotify_rm_watch (__NR_Linux + 249) -#define __NR_migrate_pages (__NR_Linux + 250) -#define __NR_openat (__NR_Linux + 251) -#define __NR_mkdirat (__NR_Linux + 252) -#define __NR_mknodat (__NR_Linux + 253) -#define __NR_fchownat (__NR_Linux + 254) -#define __NR_futimesat (__NR_Linux + 255) -#define __NR_newfstatat (__NR_Linux + 256) -#define __NR_unlinkat (__NR_Linux + 257) -#define __NR_renameat (__NR_Linux + 258) -#define __NR_linkat (__NR_Linux + 259) -#define __NR_symlinkat (__NR_Linux + 260) -#define __NR_readlinkat (__NR_Linux + 261) -#define __NR_fchmodat (__NR_Linux + 262) -#define __NR_faccessat (__NR_Linux + 263) -#define __NR_pselect6 (__NR_Linux + 264) -#define __NR_ppoll (__NR_Linux + 265) -#define __NR_unshare (__NR_Linux + 266) -#define __NR_splice (__NR_Linux + 267) -#define __NR_sync_file_range (__NR_Linux + 268) -#define __NR_tee (__NR_Linux + 269) -#define __NR_vmsplice (__NR_Linux + 270) -#define __NR_move_pages (__NR_Linux + 271) -#define __NR_set_robust_list (__NR_Linux + 272) -#define __NR_get_robust_list (__NR_Linux + 273) -#define __NR_kexec_load (__NR_Linux + 274) -#define __NR_getcpu (__NR_Linux + 275) -#define __NR_epoll_pwait (__NR_Linux + 276) -#define __NR_ioprio_set (__NR_Linux + 277) -#define __NR_ioprio_get (__NR_Linux + 278) -#define __NR_utimensat (__NR_Linux + 279) -#define __NR_signalfd (__NR_Linux + 280) -#define __NR_timerfd (__NR_Linux + 281) -#define __NR_eventfd (__NR_Linux + 282) -#define __NR_fallocate (__NR_Linux + 283) -#define __NR_timerfd_create (__NR_Linux + 284) -#define __NR_timerfd_gettime (__NR_Linux + 285) -#define __NR_timerfd_settime (__NR_Linux + 286) -#define __NR_signalfd4 (__NR_Linux + 287) -#define __NR_eventfd2 (__NR_Linux + 288) -#define __NR_epoll_create1 (__NR_Linux + 289) -#define __NR_dup3 (__NR_Linux + 290) -#define __NR_pipe2 (__NR_Linux + 291) -#define __NR_inotify_init1 (__NR_Linux + 292) -#define __NR_preadv (__NR_Linux + 293) -#define __NR_pwritev (__NR_Linux + 294) -#define __NR_rt_tgsigqueueinfo (__NR_Linux + 295) -#define __NR_perf_event_open (__NR_Linux + 296) -#define __NR_accept4 (__NR_Linux + 297) -#define __NR_recvmmsg (__NR_Linux + 298) -#define __NR_getdents64 (__NR_Linux + 299) -#define __NR_fanotify_init (__NR_Linux + 300) -#define __NR_fanotify_mark (__NR_Linux + 301) -#define __NR_prlimit64 (__NR_Linux + 302) -#define __NR_name_to_handle_at (__NR_Linux + 303) -#define __NR_open_by_handle_at (__NR_Linux + 304) -#define __NR_clock_adjtime (__NR_Linux + 305) -#define __NR_syncfs (__NR_Linux + 306) -#define __NR_sendmmsg (__NR_Linux + 307) -#define __NR_setns (__NR_Linux + 308) -#define __NR_process_vm_readv (__NR_Linux + 309) -#define __NR_process_vm_writev (__NR_Linux + 310) -#define __NR_kcmp (__NR_Linux + 311) -#define __NR_finit_module (__NR_Linux + 312) -#define __NR_sched_setattr (__NR_Linux + 313) -#define __NR_sched_getattr (__NR_Linux + 314) -#define __NR_renameat2 (__NR_Linux + 315) -#define __NR_seccomp (__NR_Linux + 316) -#define __NR_getrandom (__NR_Linux + 317) -#define __NR_memfd_create (__NR_Linux + 318) -#define __NR_bpf (__NR_Linux + 319) -#define __NR_execveat (__NR_Linux + 320) -#define __NR_userfaultfd (__NR_Linux + 321) -#define __NR_membarrier (__NR_Linux + 322) -#define __NR_mlock2 (__NR_Linux + 323) -#define __NR_copy_file_range (__NR_Linux + 324) -#define __NR_preadv2 (__NR_Linux + 325) -#define __NR_pwritev2 (__NR_Linux + 326) -#define __NR_pkey_mprotect (__NR_Linux + 327) -#define __NR_pkey_alloc (__NR_Linux + 328) -#define __NR_pkey_free (__NR_Linux + 329) -#define __NR_statx (__NR_Linux + 330) -#define __NR_rseq (__NR_Linux + 331) -#define __NR_io_pgetevents (__NR_Linux + 332) - -/* - * Offset of the last N32 flavoured syscall - */ -#define __NR_Linux_syscalls 332 +#define __NR_Linux 6000 +#include <asm/unistd_n32.h> #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ -#define __NR_N32_Linux 6000 -#define __NR_N32_Linux_syscalls 332 - #endif /* _UAPI_ASM_UNISTD_H */ diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c index 4c41ed0a637e..6256d35dbf4d 100644 --- a/arch/mips/jazz/jazzdma.c +++ b/arch/mips/jazz/jazzdma.c @@ -104,12 +104,12 @@ unsigned long vdma_alloc(unsigned long paddr, unsigned long size) if (vdma_debug) printk("vdma_alloc: Invalid physical address: %08lx\n", paddr); - return VDMA_ERROR; /* invalid physical address */ + return DMA_MAPPING_ERROR; /* invalid physical address */ } if (size > 0x400000 || size == 0) { if (vdma_debug) printk("vdma_alloc: Invalid size: %08lx\n", size); - return VDMA_ERROR; /* invalid physical address */ + return DMA_MAPPING_ERROR; /* invalid physical address */ } spin_lock_irqsave(&vdma_lock, flags); @@ -123,7 +123,7 @@ unsigned long vdma_alloc(unsigned long paddr, unsigned long size) first < VDMA_PGTBL_ENTRIES) first++; if (first + pages > VDMA_PGTBL_ENTRIES) { /* nothing free */ spin_unlock_irqrestore(&vdma_lock, flags); - return VDMA_ERROR; + return DMA_MAPPING_ERROR; } last = first + 1; @@ -569,7 +569,7 @@ static void *jazz_dma_alloc(struct device *dev, size_t size, return NULL; *dma_handle = vdma_alloc(virt_to_phys(ret), size); - if (*dma_handle == VDMA_ERROR) { + if (*dma_handle == DMA_MAPPING_ERROR) { dma_direct_free_pages(dev, size, ret, *dma_handle, attrs); return NULL; } @@ -620,7 +620,7 @@ static int jazz_dma_map_sg(struct device *dev, struct scatterlist *sglist, arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir); sg->dma_address = vdma_alloc(sg_phys(sg), sg->length); - if (sg->dma_address == VDMA_ERROR) + if (sg->dma_address == DMA_MAPPING_ERROR) return 0; sg_dma_len(sg) = sg->length; } @@ -674,11 +674,6 @@ static void jazz_dma_sync_sg_for_cpu(struct device *dev, arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir); } -static int jazz_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return dma_addr == VDMA_ERROR; -} - const struct dma_map_ops jazz_dma_ops = { .alloc = jazz_dma_alloc, .free = jazz_dma_free, @@ -692,6 +687,5 @@ const struct dma_map_ops jazz_dma_ops = { .sync_sg_for_device = jazz_dma_sync_sg_for_device, .dma_supported = dma_direct_supported, .cache_sync = arch_dma_cache_sync, - .mapping_error = jazz_dma_mapping_error, }; EXPORT_SYMBOL(jazz_dma_ops); diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 210c2802cf4d..89b07ea8d249 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -42,9 +42,8 @@ sw-$(CONFIG_CPU_TX39XX) := r2300_switch.o sw-$(CONFIG_CPU_CAVIUM_OCTEON) := octeon_switch.o obj-y += $(sw-y) +obj-$(CONFIG_CPU_R2300_FPU) += r2300_fpu.o obj-$(CONFIG_CPU_R4K_FPU) += r4k_fpu.o -obj-$(CONFIG_CPU_R3000) += r2300_fpu.o -obj-$(CONFIG_CPU_TX39XX) += r2300_fpu.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_SMP_UP) += smp-up.o @@ -72,7 +71,7 @@ obj-$(CONFIG_IRQ_GT641XX) += irq-gt641xx.o obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_32BIT) += scall32-o32.o -obj-$(CONFIG_64BIT) += scall64-64.o +obj-$(CONFIG_64BIT) += scall64-n64.o obj-$(CONFIG_MIPS32_COMPAT) += linux32.o ptrace32.o signal32.o obj-$(CONFIG_MIPS32_N32) += binfmt_elfn32.o scall64-n32.o signal_n32.o obj-$(CONFIG_MIPS32_O32) += binfmt_elfo32.o scall64-o32.o signal_o32.o diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index cbe4742d2fff..aebfda81120a 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c @@ -123,7 +123,6 @@ void output_thread_defines(void) OFFSET(THREAD_REG31, task_struct, thread.reg31); OFFSET(THREAD_STATUS, task_struct, thread.cp0_status); - OFFSET(THREAD_FPU, task_struct, thread.fpu); OFFSET(THREAD_BVADDR, task_struct, \ thread.cp0_badvaddr); @@ -135,8 +134,11 @@ void output_thread_defines(void) BLANK(); } +#ifdef CONFIG_MIPS_FP_SUPPORT void output_thread_fpu_defines(void) { + OFFSET(THREAD_FPU, task_struct, thread.fpu); + OFFSET(THREAD_FPR0, task_struct, thread.fpu.fpr[0]); OFFSET(THREAD_FPR1, task_struct, thread.fpu.fpr[1]); OFFSET(THREAD_FPR2, task_struct, thread.fpu.fpr[2]); @@ -174,6 +176,7 @@ void output_thread_fpu_defines(void) OFFSET(THREAD_MSA_CSR, task_struct, thread.fpu.msacsr); BLANK(); } +#endif void output_mm_defines(void) { @@ -341,6 +344,7 @@ void output_pm_defines(void) } #endif +#ifdef CONFIG_MIPS_FP_SUPPORT void output_kvm_defines(void) { COMMENT(" KVM/MIPS Specific offsets. "); @@ -382,6 +386,7 @@ void output_kvm_defines(void) OFFSET(VCPU_MSA_CSR, kvm_vcpu_arch, fpu.msacsr); BLANK(); } +#endif #ifdef CONFIG_MIPS_CPS void output_cps_defines(void) diff --git a/arch/mips/kernel/bmips_5xxx_init.S b/arch/mips/kernel/bmips_5xxx_init.S index adaa82e00f2b..9e422d186a17 100644 --- a/arch/mips/kernel/bmips_5xxx_init.S +++ b/arch/mips/kernel/bmips_5xxx_init.S @@ -632,12 +632,6 @@ core_init: bal set_zephyr nop -#if ENABLE_FPU==1 - /* initialize the Floating point unit (both TPs) */ - bal init_fpu - nop -#endif - /* set low latency memory bus */ li a0, 1 bal set_llmb diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index e48f6c0a9e4a..180ad081afcf 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c @@ -58,9 +58,6 @@ int __mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, unsigned long *contpc) { union mips_instruction insn = (union mips_instruction)dec_insn.insn; - int bc_false = 0; - unsigned int fcr31; - unsigned int bit; if (!cpu_has_mmips) return 0; @@ -139,8 +136,13 @@ int __mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, dec_insn.pc_inc + dec_insn.next_pc_inc; return 1; +#ifdef CONFIG_MIPS_FP_SUPPORT case mm_bc2f_op: - case mm_bc1f_op: + case mm_bc1f_op: { + int bc_false = 0; + unsigned int fcr31; + unsigned int bit; + bc_false = 1; /* Fall through */ case mm_bc2t_op: @@ -167,6 +169,8 @@ int __mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, dec_insn.pc_inc + dec_insn.next_pc_inc; return 1; } +#endif /* CONFIG_MIPS_FP_SUPPORT */ + } break; case mm_pool16c_op: switch (insn.mm_i_format.rt) { @@ -416,8 +420,8 @@ int __MIPS16e_compute_return_epc(struct pt_regs *regs) int __compute_return_epc_for_insn(struct pt_regs *regs, union mips_instruction insn) { - unsigned int bit, fcr31, dspcontrol, reg; long epc = regs->cp0_epc; + unsigned int dspcontrol; int ret = 0; switch (insn.i_format.opcode) { @@ -447,6 +451,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, case bltzl_op: if (NO_R6EMU) goto sigill_r2r6; + /* fall through */ case bltz_op: if ((long)regs->regs[insn.i_format.rs] < 0) { epc = epc + 4 + (insn.i_format.simmediate << 2); @@ -460,6 +465,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, case bgezl_op: if (NO_R6EMU) goto sigill_r2r6; + /* fall through */ case bgez_op: if ((long)regs->regs[insn.i_format.rs] >= 0) { epc = epc + 4 + (insn.i_format.simmediate << 2); @@ -555,6 +561,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, case jalx_op: case jal_op: regs->regs[31] = regs->cp0_epc + 8; + /* fall through */ case j_op: epc += 4; epc >>= 28; @@ -571,6 +578,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, case beql_op: if (NO_R6EMU) goto sigill_r2r6; + /* fall through */ case beq_op: if (regs->regs[insn.i_format.rs] == regs->regs[insn.i_format.rt]) { @@ -585,6 +593,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, case bnel_op: if (NO_R6EMU) goto sigill_r2r6; + /* fall through */ case bne_op: if (regs->regs[insn.i_format.rs] != regs->regs[insn.i_format.rt]) { @@ -599,6 +608,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, case blezl_op: /* not really i_format */ if (!insn.i_format.rt && NO_R6EMU) goto sigill_r2r6; + /* fall through */ case blez_op: /* * Compact branches for R6 for the @@ -634,6 +644,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, case bgtzl_op: if (!insn.i_format.rt && NO_R6EMU) goto sigill_r2r6; + /* fall through */ case bgtz_op: /* * Compact branches for R6 for the @@ -667,23 +678,18 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, regs->cp0_epc = epc; break; +#ifdef CONFIG_MIPS_FP_SUPPORT /* * And now the FPA/cp1 branch instructions. */ - case cop1_op: + case cop1_op: { + unsigned int bit, fcr31, reg; + if (cpu_has_mips_r6 && ((insn.i_format.rs == bc1eqz_op) || (insn.i_format.rs == bc1nez_op))) { - if (!used_math()) { /* First time FPU user */ - ret = init_fpu(); - if (ret && NO_R6EMU) { - ret = -ret; - break; - } - ret = 0; - set_used_math(); - } - lose_fpu(1); /* Save FPU state for the emulator. */ + if (!init_fp_ctx(current)) + lose_fpu(1); reg = insn.i_format.rt; bit = get_fpr32(¤t->thread.fpu.fpr[reg], 0) & 0x1; if (insn.i_format.rs == bc1eqz_op) @@ -736,6 +742,9 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, } break; } + } +#endif /* CONFIG_MIPS_FP_SUPPORT */ + #ifdef CONFIG_CPU_CAVIUM_OCTEON case lwc2_op: /* This is bbit0 on Octeon */ if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) diff --git a/arch/mips/kernel/cpu-bugs64.c b/arch/mips/kernel/cpu-bugs64.c index c9e8622b5a16..bada74af7641 100644 --- a/arch/mips/kernel/cpu-bugs64.c +++ b/arch/mips/kernel/cpu-bugs64.c @@ -39,7 +39,7 @@ static inline void align_mod(const int align, const int mod) ".endr\n\t" ".set pop" : - : GCC_IMM_ASM() (align), GCC_IMM_ASM() (mod)); + : "n"(align), "n"(mod)); } static inline void mult_sh_align_mod(long *v1, long *v2, long *w, @@ -92,7 +92,7 @@ static inline void mult_sh_align_mod(long *v1, long *v2, long *w, ".set pop" : "=&r" (lv1), "=r" (lw) : "r" (m1), "r" (m2), "r" (s), "I" (0) - : "hi", "lo", GCC_REG_ACCUM); + : "hi", "lo", "$0"); /* We have to use single integers for m1 and m2 and a double * one for p to be sure the mulsidi3 gcc's RTL multiplication * instruction has the workaround applied. Older versions of diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index d535fc706a8b..95b18a194f53 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -36,6 +36,8 @@ unsigned int elf_hwcap __read_mostly; EXPORT_SYMBOL_GPL(elf_hwcap); +#ifdef CONFIG_MIPS_FP_SUPPORT + /* * Get the FPU Implementation/Revision. */ @@ -58,19 +60,6 @@ static inline int __cpu_has_fpu(void) return (cpu_get_fpu_id() & FPIR_IMP_MASK) != FPIR_IMP_NONE; } -static inline unsigned long cpu_get_msa_id(void) -{ - unsigned long status, msa_id; - - status = read_c0_status(); - __enable_fpu(FPU_64BIT); - enable_msa(); - msa_id = read_msa_ir(); - disable_msa(); - write_c0_status(status); - return msa_id; -} - /* * Determine the FCSR mask for FPU hardware. */ @@ -326,6 +315,45 @@ static int __init fpu_disable(char *s) __setup("nofpu", fpu_disable); +#else /* !CONFIG_MIPS_FP_SUPPORT */ + +#define mips_fpu_disabled 1 + +static inline unsigned long cpu_get_fpu_id(void) +{ + return FPIR_IMP_NONE; +} + +static inline int __cpu_has_fpu(void) +{ + return 0; +} + +static void cpu_set_fpu_opts(struct cpuinfo_mips *c) +{ + /* no-op */ +} + +static void cpu_set_nofpu_opts(struct cpuinfo_mips *c) +{ + /* no-op */ +} + +#endif /* CONFIG_MIPS_FP_SUPPORT */ + +static inline unsigned long cpu_get_msa_id(void) +{ + unsigned long status, msa_id; + + status = read_c0_status(); + __enable_fpu(FPU_64BIT); + enable_msa(); + msa_id = read_msa_ir(); + disable_msa(); + write_c0_status(status); + return msa_id; +} + static int mips_dsp_disabled; static int __init dsp_disable(char *s) @@ -489,12 +517,16 @@ static void set_isa(struct cpuinfo_mips *c, unsigned int isa) switch (isa) { case MIPS_CPU_ISA_M64R2: c->isa_level |= MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2; + /* fall through */ case MIPS_CPU_ISA_M64R1: c->isa_level |= MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1; + /* fall through */ case MIPS_CPU_ISA_V: c->isa_level |= MIPS_CPU_ISA_V; + /* fall through */ case MIPS_CPU_ISA_IV: c->isa_level |= MIPS_CPU_ISA_IV; + /* fall through */ case MIPS_CPU_ISA_III: c->isa_level |= MIPS_CPU_ISA_II | MIPS_CPU_ISA_III; break; @@ -502,14 +534,17 @@ static void set_isa(struct cpuinfo_mips *c, unsigned int isa) /* R6 incompatible with everything else */ case MIPS_CPU_ISA_M64R6: c->isa_level |= MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6; + /* fall through */ case MIPS_CPU_ISA_M32R6: c->isa_level |= MIPS_CPU_ISA_M32R6; /* Break here so we don't add incompatible ISAs */ break; case MIPS_CPU_ISA_M32R2: c->isa_level |= MIPS_CPU_ISA_M32R2; + /* fall through */ case MIPS_CPU_ISA_M32R1: c->isa_level |= MIPS_CPU_ISA_M32R1; + /* fall through */ case MIPS_CPU_ISA_II: c->isa_level |= MIPS_CPU_ISA_II; break; @@ -1843,7 +1878,8 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) switch (c->processor_id & PRID_IMP_MASK) { case PRID_IMP_LOONGSON_64: /* Loongson-2/3 */ switch (c->processor_id & PRID_REV_MASK) { - case PRID_REV_LOONGSON3A_R2: + case PRID_REV_LOONGSON3A_R2_0: + case PRID_REV_LOONGSON3A_R2_1: c->cputype = CPU_LOONGSON3; __cpu_name[cpu] = "ICT Loongson-3"; set_elf_platform(cpu, "loongson3a"); diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c index 731325a61a78..72056d54a2b8 100644 --- a/arch/mips/kernel/elf.c +++ b/arch/mips/kernel/elf.c @@ -16,6 +16,8 @@ #include <asm/cpu-features.h> #include <asm/cpu-info.h> +#ifdef CONFIG_MIPS_FP_SUPPORT + /* Whether to accept legacy-NaN and 2008-NaN user binaries. */ bool mips_use_nan_legacy; bool mips_use_nan_2008; @@ -326,6 +328,8 @@ void mips_set_personality_nan(struct arch_elf_state *state) } } +#endif /* CONFIG_MIPS_FP_SUPPORT */ + int mips_elf_read_implies_exec(void *elf_ex, int exstack) { if (exstack != EXSTACK_DISABLE_X) { diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index b122cbb4aad1..2ea0ec95efe9 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c @@ -400,13 +400,13 @@ unsigned long __init arch_syscall_addr(int nr) unsigned long __init arch_syscall_addr(int nr) { #ifdef CONFIG_MIPS32_N32 - if (nr >= __NR_N32_Linux && nr <= __NR_N32_Linux + __NR_N32_Linux_syscalls) + if (nr >= __NR_N32_Linux && nr < __NR_N32_Linux + __NR_N32_Linux_syscalls) return (unsigned long)sysn32_call_table[nr - __NR_N32_Linux]; #endif - if (nr >= __NR_64_Linux && nr <= __NR_64_Linux + __NR_64_Linux_syscalls) + if (nr >= __NR_64_Linux && nr < __NR_64_Linux + __NR_64_Linux_syscalls) return (unsigned long)sys_call_table[nr - __NR_64_Linux]; #ifdef CONFIG_MIPS32_O32 - if (nr >= __NR_O32_Linux && nr <= __NR_O32_Linux + __NR_O32_Linux_syscalls) + if (nr >= __NR_O32_Linux && nr < __NR_O32_Linux + __NR_O32_Linux_syscalls) return (unsigned long)sys32_call_table[nr - __NR_O32_Linux]; #endif diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index 6c257b52f57f..398b905b027d 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S @@ -553,7 +553,9 @@ NESTED(nmi_handler, PT_SIZE, sp) BUILD_HANDLER ov ov sti silent /* #12 */ BUILD_HANDLER tr tr sti silent /* #13 */ BUILD_HANDLER msa_fpe msa_fpe msa_fpe silent /* #14 */ +#ifdef CONFIG_MIPS_FP_SUPPORT BUILD_HANDLER fpe fpe fpe silent /* #15 */ +#endif BUILD_HANDLER ftlb ftlb none silent /* #16 */ BUILD_HANDLER msa msa sti silent /* #21 */ BUILD_HANDLER mdmx mdmx sti silent /* #22 */ @@ -650,9 +652,10 @@ isrdhwr: ori k1, _THREAD_MASK xori k1, _THREAD_MASK LONG_L v1, TI_TP_VALUE(k1) + .set push .set arch=r4000 eret - .set mips0 + .set pop #endif .set pop END(handle_ri_rdhwr) diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c index 046846999efd..695f55477503 100644 --- a/arch/mips/kernel/idle.c +++ b/arch/mips/kernel/idle.c @@ -101,7 +101,8 @@ static void __cpuidle au1k_wait(void) unsigned long c0status = read_c0_status() | 1; /* irqs on */ __asm__( - " .set arch=r4000 \n" + " .set push \n" + " .set arch=r4000 \n" " cache 0x14, 0(%0) \n" " cache 0x14, 32(%0) \n" " sync \n" @@ -111,7 +112,7 @@ static void __cpuidle au1k_wait(void) " nop \n" " nop \n" " nop \n" - " .set mips0 \n" + " .set pop \n" : : "r" (au1k_wait), "r" (c0status)); } @@ -183,7 +184,7 @@ void __init check_wait(void) cpu_wait = r4k_wait; break; case CPU_LOONGSON3: - if ((c->processor_id & PRID_REV_MASK) >= PRID_REV_LOONGSON3A_R2) + if ((c->processor_id & PRID_REV_MASK) >= PRID_REV_LOONGSON3A_R2_0) cpu_wait = r4k_wait; break; diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c index 9f85b98d24ac..d5f7362e8c24 100644 --- a/arch/mips/kernel/mips-mt.c +++ b/arch/mips/kernel/mips-mt.c @@ -119,19 +119,11 @@ void mips_mt_regdump(unsigned long mvpctl) local_irq_restore(flags); } -static int mt_opt_norps; static int mt_opt_rpsctl = -1; static int mt_opt_nblsu = -1; static int mt_opt_forceconfig7; static int mt_opt_config7 = -1; -static int __init rps_disable(char *s) -{ - mt_opt_norps = 1; - return 1; -} -__setup("norps", rps_disable); - static int __init rpsctl_set(char *str) { get_option(&str, &mt_opt_rpsctl); @@ -169,9 +161,6 @@ void mips_mt_set_cpuoptions(void) unsigned int oconfig7 = read_c0_config7(); unsigned int nconfig7 = oconfig7; - if (mt_opt_norps) { - printk("\"norps\" option deprecated: use \"rpsctl=\"\n"); - } if (mt_opt_rpsctl >= 0) { printk("34K return prediction stack override set to %d.\n", mt_opt_rpsctl); diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c index eb18b186e858..cb22a558431e 100644 --- a/arch/mips/kernel/mips-r2-to-r6-emul.c +++ b/arch/mips/kernel/mips-r2-to-r6-emul.c @@ -1174,13 +1174,6 @@ repeat: fpu_emul: regs->regs[31] = r31; regs->cp0_epc = epc; - if (!used_math()) { /* First time FPU user. */ - preempt_disable(); - err = init_fpu(); - preempt_enable(); - set_used_math(); - } - lose_fpu(1); /* Save FPU state for the emulator. */ err = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 0, &fault_addr); @@ -2242,7 +2235,7 @@ fpu_emul: #ifdef CONFIG_DEBUG_FS -static int mipsr2_stats_show(struct seq_file *s, void *unused) +static int mipsr2_emul_show(struct seq_file *s, void *unused) { seq_printf(s, "Instruction\tTotal\tBDslot\n------------------------------\n"); @@ -2308,9 +2301,9 @@ static int mipsr2_stats_show(struct seq_file *s, void *unused) return 0; } -static int mipsr2_stats_clear_show(struct seq_file *s, void *unused) +static int mipsr2_clear_show(struct seq_file *s, void *unused) { - mipsr2_stats_show(s, unused); + mipsr2_emul_show(s, unused); __this_cpu_write((mipsr2emustats).movs, 0); __this_cpu_write((mipsr2bdemustats).movs, 0); @@ -2353,30 +2346,8 @@ static int mipsr2_stats_clear_show(struct seq_file *s, void *unused) return 0; } -static int mipsr2_stats_open(struct inode *inode, struct file *file) -{ - return single_open(file, mipsr2_stats_show, inode->i_private); -} - -static int mipsr2_stats_clear_open(struct inode *inode, struct file *file) -{ - return single_open(file, mipsr2_stats_clear_show, inode->i_private); -} - -static const struct file_operations mipsr2_emul_fops = { - .open = mipsr2_stats_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -static const struct file_operations mipsr2_clear_fops = { - .open = mipsr2_stats_clear_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - +DEFINE_SHOW_ATTRIBUTE(mipsr2_emul); +DEFINE_SHOW_ATTRIBUTE(mipsr2_clear); static int __init mipsr2_init_debugfs(void) { diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index d4f7fd4550e1..6829a064aac8 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -737,10 +737,9 @@ static long prepare_for_fp_mode_switch(void *unused) /* * This is icky, but we use this to simply ensure that all CPUs have * context switched, regardless of whether they were previously running - * kernel or user code. This ensures that no CPU currently has its FPU - * enabled, or is about to attempt to enable it through any path other - * than enable_restore_fp_context() which will wait appropriately for - * fp_mode_switching to be zero. + * kernel or user code. This ensures that no CPU that a mode-switching + * program may execute on keeps its FPU enabled (& in the old mode) + * throughout the mode switch. */ return 0; } @@ -829,8 +828,6 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value) work_on_cpu(cpu, prepare_for_fp_mode_switch, NULL); put_online_cpus(); - wake_up_var(&task->mm->context.fp_mode_switching); - return 0; } diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index e5ba56c01ee0..ea54575255ea 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -50,25 +50,6 @@ #define CREATE_TRACE_POINTS #include <trace/events/syscalls.h> -static void init_fp_ctx(struct task_struct *target) -{ - /* If FP has been used then the target already has context */ - if (tsk_used_math(target)) - return; - - /* Begin with data registers set to all 1s... */ - memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr)); - - /* FCSR has been preset by `mips_set_personality_nan'. */ - - /* - * Record that the target has "used" math, such that the context - * just initialised, and any modifications made by the caller, - * aren't discarded. - */ - set_stopped_child_used_math(target); -} - /* * Called by kernel/ptrace.c when detaching.. * @@ -81,21 +62,6 @@ void ptrace_disable(struct task_struct *child) } /* - * Poke at FCSR according to its mask. Set the Cause bits even - * if a corresponding Enable bit is set. This will be noticed at - * the time the thread is switched to and SIGFPE thrown accordingly. - */ -static void ptrace_setfcr31(struct task_struct *child, u32 value) -{ - u32 fcr31; - u32 mask; - - fcr31 = child->thread.fpu.fcr31; - mask = boot_cpu_data.fpu_msk31; - child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask); -} - -/* * Read a general register set. We always use the 64-bit format, even * for 32-bit kernels and for 32-bit processes on a 64-bit kernel. * Registers are sign extended to fill the available space. @@ -151,55 +117,6 @@ int ptrace_setregs(struct task_struct *child, struct user_pt_regs __user *data) return 0; } -int ptrace_getfpregs(struct task_struct *child, __u32 __user *data) -{ - int i; - - if (!access_ok(VERIFY_WRITE, data, 33 * 8)) - return -EIO; - - if (tsk_used_math(child)) { - union fpureg *fregs = get_fpu_regs(child); - for (i = 0; i < 32; i++) - __put_user(get_fpr64(&fregs[i], 0), - i + (__u64 __user *)data); - } else { - for (i = 0; i < 32; i++) - __put_user((__u64) -1, i + (__u64 __user *) data); - } - - __put_user(child->thread.fpu.fcr31, data + 64); - __put_user(boot_cpu_data.fpu_id, data + 65); - - return 0; -} - -int ptrace_setfpregs(struct task_struct *child, __u32 __user *data) -{ - union fpureg *fregs; - u64 fpr_val; - u32 value; - int i; - - if (!access_ok(VERIFY_READ, data, 33 * 8)) - return -EIO; - - init_fp_ctx(child); - fregs = get_fpu_regs(child); - - for (i = 0; i < 32; i++) { - __get_user(fpr_val, i + (__u64 __user *)data); - set_fpr64(&fregs[i], 0, fpr_val); - } - - __get_user(value, data + 64); - ptrace_setfcr31(child, value); - - /* FIR may not be written. */ - - return 0; -} - int ptrace_get_watch_regs(struct task_struct *child, struct pt_watch_regs __user *addr) { @@ -420,6 +337,73 @@ static int gpr64_set(struct task_struct *target, #endif /* CONFIG_64BIT */ + +#ifdef CONFIG_MIPS_FP_SUPPORT + +/* + * Poke at FCSR according to its mask. Set the Cause bits even + * if a corresponding Enable bit is set. This will be noticed at + * the time the thread is switched to and SIGFPE thrown accordingly. + */ +static void ptrace_setfcr31(struct task_struct *child, u32 value) +{ + u32 fcr31; + u32 mask; + + fcr31 = child->thread.fpu.fcr31; + mask = boot_cpu_data.fpu_msk31; + child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask); +} + +int ptrace_getfpregs(struct task_struct *child, __u32 __user *data) +{ + int i; + + if (!access_ok(VERIFY_WRITE, data, 33 * 8)) + return -EIO; + + if (tsk_used_math(child)) { + union fpureg *fregs = get_fpu_regs(child); + for (i = 0; i < 32; i++) + __put_user(get_fpr64(&fregs[i], 0), + i + (__u64 __user *)data); + } else { + for (i = 0; i < 32; i++) + __put_user((__u64) -1, i + (__u64 __user *) data); + } + + __put_user(child->thread.fpu.fcr31, data + 64); + __put_user(boot_cpu_data.fpu_id, data + 65); + + return 0; +} + +int ptrace_setfpregs(struct task_struct *child, __u32 __user *data) +{ + union fpureg *fregs; + u64 fpr_val; + u32 value; + int i; + + if (!access_ok(VERIFY_READ, data, 33 * 8)) + return -EIO; + + init_fp_ctx(child); + fregs = get_fpu_regs(child); + + for (i = 0; i < 32; i++) { + __get_user(fpr_val, i + (__u64 __user *)data); + set_fpr64(&fregs[i], 0, fpr_val); + } + + __get_user(value, data + 64); + ptrace_setfcr31(child, value); + + /* FIR may not be written. */ + + return 0; +} + /* * Copy the floating-point context to the supplied NT_PRFPREG buffer, * !CONFIG_CPU_HAS_MSA variant. FP context's general register slots @@ -590,6 +574,178 @@ static int fpr_set(struct task_struct *target, return err; } +/* Copy the FP mode setting to the supplied NT_MIPS_FP_MODE buffer. */ +static int fp_mode_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + int fp_mode; + + fp_mode = mips_get_process_fp_mode(target); + return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &fp_mode, 0, + sizeof(fp_mode)); +} + +/* + * Copy the supplied NT_MIPS_FP_MODE buffer to the FP mode setting. + * + * We optimize for the case where `count % sizeof(int) == 0', which + * is supposed to have been guaranteed by the kernel before calling + * us, e.g. in `ptrace_regset'. We enforce that requirement, so + * that we can safely avoid preinitializing temporaries for partial + * mode writes. + */ +static int fp_mode_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int fp_mode; + int err; + + BUG_ON(count % sizeof(int)); + + if (pos + count > sizeof(fp_mode)) + return -EIO; + + err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fp_mode, 0, + sizeof(fp_mode)); + if (err) + return err; + + if (count > 0) + err = mips_set_process_fp_mode(target, fp_mode); + + return err; +} + +#endif /* CONFIG_MIPS_FP_SUPPORT */ + +#ifdef CONFIG_CPU_HAS_MSA + +struct msa_control_regs { + unsigned int fir; + unsigned int fcsr; + unsigned int msair; + unsigned int msacsr; +}; + +static int copy_pad_fprs(struct task_struct *target, + const struct user_regset *regset, + unsigned int *ppos, unsigned int *pcount, + void **pkbuf, void __user **pubuf, + unsigned int live_sz) +{ + int i, j, start, start_pad, err; + unsigned long long fill = ~0ull; + unsigned int cp_sz, pad_sz; + + cp_sz = min(regset->size, live_sz); + pad_sz = regset->size - cp_sz; + WARN_ON(pad_sz % sizeof(fill)); + + i = start = err = 0; + for (; i < NUM_FPU_REGS; i++, start += regset->size) { + err |= user_regset_copyout(ppos, pcount, pkbuf, pubuf, + &target->thread.fpu.fpr[i], + start, start + cp_sz); + + start_pad = start + cp_sz; + for (j = 0; j < (pad_sz / sizeof(fill)); j++) { + err |= user_regset_copyout(ppos, pcount, pkbuf, pubuf, + &fill, start_pad, + start_pad + sizeof(fill)); + start_pad += sizeof(fill); + } + } + + return err; +} + +static int msa_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + const unsigned int wr_size = NUM_FPU_REGS * regset->size; + const struct msa_control_regs ctrl_regs = { + .fir = boot_cpu_data.fpu_id, + .fcsr = target->thread.fpu.fcr31, + .msair = boot_cpu_data.msa_id, + .msacsr = target->thread.fpu.msacsr, + }; + int err; + + if (!tsk_used_math(target)) { + /* The task hasn't used FP or MSA, fill with 0xff */ + err = copy_pad_fprs(target, regset, &pos, &count, + &kbuf, &ubuf, 0); + } else if (!test_tsk_thread_flag(target, TIF_MSA_CTX_LIVE)) { + /* Copy scalar FP context, fill the rest with 0xff */ + err = copy_pad_fprs(target, regset, &pos, &count, + &kbuf, &ubuf, 8); + } else if (sizeof(target->thread.fpu.fpr[0]) == regset->size) { + /* Trivially copy the vector registers */ + err = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + &target->thread.fpu.fpr, + 0, wr_size); + } else { + /* Copy as much context as possible, fill the rest with 0xff */ + err = copy_pad_fprs(target, regset, &pos, &count, + &kbuf, &ubuf, + sizeof(target->thread.fpu.fpr[0])); + } + + err |= user_regset_copyout(&pos, &count, &kbuf, &ubuf, + &ctrl_regs, wr_size, + wr_size + sizeof(ctrl_regs)); + return err; +} + +static int msa_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + const unsigned int wr_size = NUM_FPU_REGS * regset->size; + struct msa_control_regs ctrl_regs; + unsigned int cp_sz; + int i, err, start; + + init_fp_ctx(target); + + if (sizeof(target->thread.fpu.fpr[0]) == regset->size) { + /* Trivially copy the vector registers */ + err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.fpu.fpr, + 0, wr_size); + } else { + /* Copy as much context as possible */ + cp_sz = min_t(unsigned int, regset->size, + sizeof(target->thread.fpu.fpr[0])); + + i = start = err = 0; + for (; i < NUM_FPU_REGS; i++, start += regset->size) { + err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.fpu.fpr[i], + start, start + cp_sz); + } + } + + if (!err) + err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl_regs, + wr_size, wr_size + sizeof(ctrl_regs)); + if (!err) { + target->thread.fpu.fcr31 = ctrl_regs.fcsr & ~FPU_CSR_ALL_X; + target->thread.fpu.msacsr = ctrl_regs.msacsr & ~MSA_CSR_CAUSEF; + } + + return err; +} + +#endif /* CONFIG_CPU_HAS_MSA */ + #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32) /* @@ -759,57 +915,16 @@ static int dsp_active(struct task_struct *target, return cpu_has_dsp ? NUM_DSP_REGS + 1 : -ENODEV; } -/* Copy the FP mode setting to the supplied NT_MIPS_FP_MODE buffer. */ -static int fp_mode_get(struct task_struct *target, - const struct user_regset *regset, - unsigned int pos, unsigned int count, - void *kbuf, void __user *ubuf) -{ - int fp_mode; - - fp_mode = mips_get_process_fp_mode(target); - return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &fp_mode, 0, - sizeof(fp_mode)); -} - -/* - * Copy the supplied NT_MIPS_FP_MODE buffer to the FP mode setting. - * - * We optimize for the case where `count % sizeof(int) == 0', which - * is supposed to have been guaranteed by the kernel before calling - * us, e.g. in `ptrace_regset'. We enforce that requirement, so - * that we can safely avoid preinitializing temporaries for partial - * mode writes. - */ -static int fp_mode_set(struct task_struct *target, - const struct user_regset *regset, - unsigned int pos, unsigned int count, - const void *kbuf, const void __user *ubuf) -{ - int fp_mode; - int err; - - BUG_ON(count % sizeof(int)); - - if (pos + count > sizeof(fp_mode)) - return -EIO; - - err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fp_mode, 0, - sizeof(fp_mode)); - if (err) - return err; - - if (count > 0) - err = mips_set_process_fp_mode(target, fp_mode); - - return err; -} - enum mips_regset { REGSET_GPR, - REGSET_FPR, REGSET_DSP, +#ifdef CONFIG_MIPS_FP_SUPPORT + REGSET_FPR, REGSET_FP_MODE, +#endif +#ifdef CONFIG_CPU_HAS_MSA + REGSET_MSA, +#endif }; struct pt_regs_offset { @@ -907,14 +1022,6 @@ static const struct user_regset mips_regsets[] = { .get = gpr32_get, .set = gpr32_set, }, - [REGSET_FPR] = { - .core_note_type = NT_PRFPREG, - .n = ELF_NFPREG, - .size = sizeof(elf_fpreg_t), - .align = sizeof(elf_fpreg_t), - .get = fpr_get, - .set = fpr_set, - }, [REGSET_DSP] = { .core_note_type = NT_MIPS_DSP, .n = NUM_DSP_REGS + 1, @@ -924,6 +1031,15 @@ static const struct user_regset mips_regsets[] = { .set = dsp32_set, .active = dsp_active, }, +#ifdef CONFIG_MIPS_FP_SUPPORT + [REGSET_FPR] = { + .core_note_type = NT_PRFPREG, + .n = ELF_NFPREG, + .size = sizeof(elf_fpreg_t), + .align = sizeof(elf_fpreg_t), + .get = fpr_get, + .set = fpr_set, + }, [REGSET_FP_MODE] = { .core_note_type = NT_MIPS_FP_MODE, .n = 1, @@ -932,6 +1048,17 @@ static const struct user_regset mips_regsets[] = { .get = fp_mode_get, .set = fp_mode_set, }, +#endif +#ifdef CONFIG_CPU_HAS_MSA + [REGSET_MSA] = { + .core_note_type = NT_MIPS_MSA, + .n = NUM_FPU_REGS + 1, + .size = 16, + .align = 16, + .get = msa_get, + .set = msa_set, + }, +#endif }; static const struct user_regset_view user_mips_view = { @@ -955,14 +1082,6 @@ static const struct user_regset mips64_regsets[] = { .get = gpr64_get, .set = gpr64_set, }, - [REGSET_FPR] = { - .core_note_type = NT_PRFPREG, - .n = ELF_NFPREG, - .size = sizeof(elf_fpreg_t), - .align = sizeof(elf_fpreg_t), - .get = fpr_get, - .set = fpr_set, - }, [REGSET_DSP] = { .core_note_type = NT_MIPS_DSP, .n = NUM_DSP_REGS + 1, @@ -972,6 +1091,7 @@ static const struct user_regset mips64_regsets[] = { .set = dsp64_set, .active = dsp_active, }, +#ifdef CONFIG_MIPS_FP_SUPPORT [REGSET_FP_MODE] = { .core_note_type = NT_MIPS_FP_MODE, .n = 1, @@ -980,6 +1100,25 @@ static const struct user_regset mips64_regsets[] = { .get = fp_mode_get, .set = fp_mode_set, }, + [REGSET_FPR] = { + .core_note_type = NT_PRFPREG, + .n = ELF_NFPREG, + .size = sizeof(elf_fpreg_t), + .align = sizeof(elf_fpreg_t), + .get = fpr_get, + .set = fpr_set, + }, +#endif +#ifdef CONFIG_CPU_HAS_MSA + [REGSET_MSA] = { + .core_note_type = NT_MIPS_MSA, + .n = NUM_FPU_REGS + 1, + .size = 16, + .align = 16, + .get = msa_get, + .set = msa_set, + }, +#endif }; static const struct user_regset_view user_mips64_view = { @@ -1040,7 +1179,6 @@ long arch_ptrace(struct task_struct *child, long request, /* Read the word at location addr in the USER area. */ case PTRACE_PEEKUSR: { struct pt_regs *regs; - union fpureg *fregs; unsigned long tmp = 0; regs = task_pt_regs(child); @@ -1050,7 +1188,10 @@ long arch_ptrace(struct task_struct *child, long request, case 0 ... 31: tmp = regs->regs[addr]; break; - case FPR_BASE ... FPR_BASE + 31: +#ifdef CONFIG_MIPS_FP_SUPPORT + case FPR_BASE ... FPR_BASE + 31: { + union fpureg *fregs; + if (!tsk_used_math(child)) { /* FP not yet used */ tmp = -1; @@ -1072,6 +1213,15 @@ long arch_ptrace(struct task_struct *child, long request, #endif tmp = get_fpr64(&fregs[addr - FPR_BASE], 0); break; + } + case FPC_CSR: + tmp = child->thread.fpu.fcr31; + break; + case FPC_EIR: + /* implementation / version register */ + tmp = boot_cpu_data.fpu_id; + break; +#endif case PC: tmp = regs->cp0_epc; break; @@ -1092,13 +1242,6 @@ long arch_ptrace(struct task_struct *child, long request, tmp = regs->acx; break; #endif - case FPC_CSR: - tmp = child->thread.fpu.fcr31; - break; - case FPC_EIR: - /* implementation / version register */ - tmp = boot_cpu_data.fpu_id; - break; case DSP_BASE ... DSP_BASE + 5: { dspreg_t *dregs; @@ -1149,6 +1292,7 @@ long arch_ptrace(struct task_struct *child, long request, mips_syscall_is_indirect(child, regs)) mips_syscall_update_nr(child, regs); break; +#ifdef CONFIG_MIPS_FP_SUPPORT case FPR_BASE ... FPR_BASE + 31: { union fpureg *fregs = get_fpu_regs(child); @@ -1168,6 +1312,11 @@ long arch_ptrace(struct task_struct *child, long request, set_fpr64(&fregs[addr - FPR_BASE], 0, data); break; } + case FPC_CSR: + init_fp_ctx(child); + ptrace_setfcr31(child, data); + break; +#endif case PC: regs->cp0_epc = data; break; @@ -1182,10 +1331,6 @@ long arch_ptrace(struct task_struct *child, long request, regs->acx = data; break; #endif - case FPC_CSR: - init_fp_ctx(child); - ptrace_setfcr31(child, data); - break; case DSP_BASE ... DSP_BASE + 5: { dspreg_t *dregs; @@ -1221,6 +1366,7 @@ long arch_ptrace(struct task_struct *child, long request, ret = ptrace_setregs(child, datavp); break; +#ifdef CONFIG_MIPS_FP_SUPPORT case PTRACE_GETFPREGS: ret = ptrace_getfpregs(child, datavp); break; @@ -1228,7 +1374,7 @@ long arch_ptrace(struct task_struct *child, long request, case PTRACE_SETFPREGS: ret = ptrace_setfpregs(child, datavp); break; - +#endif case PTRACE_GET_THREAD_AREA: ret = put_user(task_thread_info(child)->tp_value, datalp); break; diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c index bc348d44d151..2525eca9c962 100644 --- a/arch/mips/kernel/ptrace32.c +++ b/arch/mips/kernel/ptrace32.c @@ -82,7 +82,6 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, /* Read the word at location addr in the USER area. */ case PTRACE_PEEKUSR: { struct pt_regs *regs; - union fpureg *fregs; unsigned int tmp; regs = task_pt_regs(child); @@ -92,7 +91,10 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, case 0 ... 31: tmp = regs->regs[addr]; break; - case FPR_BASE ... FPR_BASE + 31: +#ifdef CONFIG_MIPS_FP_SUPPORT + case FPR_BASE ... FPR_BASE + 31: { + union fpureg *fregs; + if (!tsk_used_math(child)) { /* FP not yet used */ tmp = -1; @@ -111,6 +113,15 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, } tmp = get_fpr64(&fregs[addr - FPR_BASE], 0); break; + } + case FPC_CSR: + tmp = child->thread.fpu.fcr31; + break; + case FPC_EIR: + /* implementation / version register */ + tmp = boot_cpu_data.fpu_id; + break; +#endif /* CONFIG_MIPS_FP_SUPPORT */ case PC: tmp = regs->cp0_epc; break; @@ -126,13 +137,6 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, case MMLO: tmp = regs->lo; break; - case FPC_CSR: - tmp = child->thread.fpu.fcr31; - break; - case FPC_EIR: - /* implementation / version register */ - tmp = boot_cpu_data.fpu_id; - break; case DSP_BASE ... DSP_BASE + 5: { dspreg_t *dregs; @@ -203,6 +207,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, mips_syscall_is_indirect(child, regs)) mips_syscall_update_nr(child, regs); break; +#ifdef CONFIG_MIPS_FP_SUPPORT case FPR_BASE ... FPR_BASE + 31: { union fpureg *fregs = get_fpu_regs(child); @@ -225,6 +230,10 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, set_fpr64(&fregs[addr - FPR_BASE], 0, data); break; } + case FPC_CSR: + child->thread.fpu.fcr31 = data; + break; +#endif /* CONFIG_MIPS_FP_SUPPORT */ case PC: regs->cp0_epc = data; break; @@ -234,9 +243,6 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, case MMLO: regs->lo = data; break; - case FPC_CSR: - child->thread.fpu.fcr31 = data; - break; case DSP_BASE ... DSP_BASE + 5: { dspreg_t *dregs; @@ -274,6 +280,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, (struct user_pt_regs __user *) (__u64) data); break; +#ifdef CONFIG_MIPS_FP_SUPPORT case PTRACE_GETFPREGS: ret = ptrace_getfpregs(child, (__u32 __user *) (__u64) data); break; @@ -281,7 +288,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, case PTRACE_SETFPREGS: ret = ptrace_setfpregs(child, (__u32 __user *) (__u64) data); break; - +#endif case PTRACE_GET_THREAD_AREA: ret = put_user(task_thread_info(child)->tp_value, (unsigned int __user *) (unsigned long) data); diff --git a/arch/mips/kernel/r2300_fpu.S b/arch/mips/kernel/r2300_fpu.S index 3062ba66c563..12e58053544f 100644 --- a/arch/mips/kernel/r2300_fpu.S +++ b/arch/mips/kernel/r2300_fpu.S @@ -52,64 +52,6 @@ LEAF(_restore_fp) jr ra END(_restore_fp) -/* - * Load the FPU with signalling NANS. This bit pattern we're using has - * the property that no matter whether considered as single or as double - * precision represents signaling NANS. - * - * The value to initialize fcr31 to comes in $a0. - */ - - .set push - SET_HARDFLOAT - -LEAF(_init_fpu) - mfc0 t0, CP0_STATUS - li t1, ST0_CU1 - or t0, t1 - mtc0 t0, CP0_STATUS - - ctc1 a0, fcr31 - - li t0, -1 - - mtc1 t0, $f0 - mtc1 t0, $f1 - mtc1 t0, $f2 - mtc1 t0, $f3 - mtc1 t0, $f4 - mtc1 t0, $f5 - mtc1 t0, $f6 - mtc1 t0, $f7 - mtc1 t0, $f8 - mtc1 t0, $f9 - mtc1 t0, $f10 - mtc1 t0, $f11 - mtc1 t0, $f12 - mtc1 t0, $f13 - mtc1 t0, $f14 - mtc1 t0, $f15 - mtc1 t0, $f16 - mtc1 t0, $f17 - mtc1 t0, $f18 - mtc1 t0, $f19 - mtc1 t0, $f20 - mtc1 t0, $f21 - mtc1 t0, $f22 - mtc1 t0, $f23 - mtc1 t0, $f24 - mtc1 t0, $f25 - mtc1 t0, $f26 - mtc1 t0, $f27 - mtc1 t0, $f28 - mtc1 t0, $f29 - mtc1 t0, $f30 - mtc1 t0, $f31 - jr ra - END(_init_fpu) - - .set pop - .set noreorder /** diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S index 8e3a6020c613..59be5c812aa2 100644 --- a/arch/mips/kernel/r4k_fpu.S +++ b/arch/mips/kernel/r4k_fpu.S @@ -86,150 +86,6 @@ LEAF(_init_msa_upper) #endif -/* - * Load the FPU with signalling NANS. This bit pattern we're using has - * the property that no matter whether considered as single or as double - * precision represents signaling NANS. - * - * The value to initialize fcr31 to comes in $a0. - */ - - .set push - SET_HARDFLOAT - -LEAF(_init_fpu) - mfc0 t0, CP0_STATUS - li t1, ST0_CU1 - or t0, t1 - mtc0 t0, CP0_STATUS - enable_fpu_hazard - - ctc1 a0, fcr31 - - li t1, -1 # SNaN - -#ifdef CONFIG_64BIT - sll t0, t0, 5 - bgez t0, 1f # 16 / 32 register mode? - - dmtc1 t1, $f1 - dmtc1 t1, $f3 - dmtc1 t1, $f5 - dmtc1 t1, $f7 - dmtc1 t1, $f9 - dmtc1 t1, $f11 - dmtc1 t1, $f13 - dmtc1 t1, $f15 - dmtc1 t1, $f17 - dmtc1 t1, $f19 - dmtc1 t1, $f21 - dmtc1 t1, $f23 - dmtc1 t1, $f25 - dmtc1 t1, $f27 - dmtc1 t1, $f29 - dmtc1 t1, $f31 -1: -#endif - -#ifdef CONFIG_CPU_MIPS32 - mtc1 t1, $f0 - mtc1 t1, $f1 - mtc1 t1, $f2 - mtc1 t1, $f3 - mtc1 t1, $f4 - mtc1 t1, $f5 - mtc1 t1, $f6 - mtc1 t1, $f7 - mtc1 t1, $f8 - mtc1 t1, $f9 - mtc1 t1, $f10 - mtc1 t1, $f11 - mtc1 t1, $f12 - mtc1 t1, $f13 - mtc1 t1, $f14 - mtc1 t1, $f15 - mtc1 t1, $f16 - mtc1 t1, $f17 - mtc1 t1, $f18 - mtc1 t1, $f19 - mtc1 t1, $f20 - mtc1 t1, $f21 - mtc1 t1, $f22 - mtc1 t1, $f23 - mtc1 t1, $f24 - mtc1 t1, $f25 - mtc1 t1, $f26 - mtc1 t1, $f27 - mtc1 t1, $f28 - mtc1 t1, $f29 - mtc1 t1, $f30 - mtc1 t1, $f31 - -#if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) - .set push - .set MIPS_ISA_LEVEL_RAW - .set fp=64 - sll t0, t0, 5 # is Status.FR set? - bgez t0, 1f # no: skip setting upper 32b - - mthc1 t1, $f0 - mthc1 t1, $f1 - mthc1 t1, $f2 - mthc1 t1, $f3 - mthc1 t1, $f4 - mthc1 t1, $f5 - mthc1 t1, $f6 - mthc1 t1, $f7 - mthc1 t1, $f8 - mthc1 t1, $f9 - mthc1 t1, $f10 - mthc1 t1, $f11 - mthc1 t1, $f12 - mthc1 t1, $f13 - mthc1 t1, $f14 - mthc1 t1, $f15 - mthc1 t1, $f16 - mthc1 t1, $f17 - mthc1 t1, $f18 - mthc1 t1, $f19 - mthc1 t1, $f20 - mthc1 t1, $f21 - mthc1 t1, $f22 - mthc1 t1, $f23 - mthc1 t1, $f24 - mthc1 t1, $f25 - mthc1 t1, $f26 - mthc1 t1, $f27 - mthc1 t1, $f28 - mthc1 t1, $f29 - mthc1 t1, $f30 - mthc1 t1, $f31 -1: .set pop -#endif /* CONFIG_CPU_MIPS32_R2 || CONFIG_CPU_MIPS32_R6 */ -#else - .set MIPS_ISA_ARCH_LEVEL_RAW - dmtc1 t1, $f0 - dmtc1 t1, $f2 - dmtc1 t1, $f4 - dmtc1 t1, $f6 - dmtc1 t1, $f8 - dmtc1 t1, $f10 - dmtc1 t1, $f12 - dmtc1 t1, $f14 - dmtc1 t1, $f16 - dmtc1 t1, $f18 - dmtc1 t1, $f20 - dmtc1 t1, $f22 - dmtc1 t1, $f24 - dmtc1 t1, $f26 - dmtc1 t1, $f28 - dmtc1 t1, $f30 -#endif - jr ra - END(_init_fpu) - - .set pop /* SET_HARDFLOAT */ - .set noreorder /** diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index 91d3c8c46097..d9434cd0f568 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -22,9 +22,6 @@ #include <asm/war.h> #include <asm/asm-offsets.h> -/* Highest syscall used of any syscall flavour */ -#define MAX_SYSCALL_NO __NR_O32_Linux + __NR_O32_Linux_syscalls - .align 5 NESTED(handle_sys, PT_SIZE, sp) .set noat @@ -89,7 +86,7 @@ loads_done: bnez t0, syscall_trace_entry # -> yes syscall_common: subu v0, v0, __NR_O32_Linux # check syscall number - sltiu t0, v0, __NR_O32_Linux_syscalls + 1 + sltiu t0, v0, __NR_O32_Linux_syscalls beqz t0, illegal_syscall sll t0, v0, 2 @@ -185,7 +182,7 @@ illegal_syscall: LEAF(sys_syscall) subu t0, a0, __NR_O32_Linux # check syscall number - sltiu v0, t0, __NR_O32_Linux_syscalls + 1 + sltiu v0, t0, __NR_O32_Linux_syscalls beqz t0, einval # do not recurse sll t1, t0, 2 beqz v0, einval @@ -208,248 +205,6 @@ einval: li v0, -ENOSYS jr ra END(sys_syscall) - .align 2 - .type sys_call_table, @object -EXPORT(sys_call_table) - PTR sys_syscall /* 4000 */ - PTR sys_exit - PTR __sys_fork - PTR sys_read - PTR sys_write - PTR sys_open /* 4005 */ - PTR sys_close - PTR sys_waitpid - PTR sys_creat - PTR sys_link - PTR sys_unlink /* 4010 */ - PTR sys_execve - PTR sys_chdir - PTR sys_time - PTR sys_mknod - PTR sys_chmod /* 4015 */ - PTR sys_lchown - PTR sys_ni_syscall - PTR sys_ni_syscall /* was sys_stat */ - PTR sys_lseek - PTR sys_getpid /* 4020 */ - PTR sys_mount - PTR sys_oldumount - PTR sys_setuid - PTR sys_getuid - PTR sys_stime /* 4025 */ - PTR sys_ptrace - PTR sys_alarm - PTR sys_ni_syscall /* was sys_fstat */ - PTR sys_pause - PTR sys_utime /* 4030 */ - PTR sys_ni_syscall - PTR sys_ni_syscall - PTR sys_access - PTR sys_nice - PTR sys_ni_syscall /* 4035 */ - PTR sys_sync - PTR sys_kill - PTR sys_rename - PTR sys_mkdir - PTR sys_rmdir /* 4040 */ - PTR sys_dup - PTR sysm_pipe - PTR sys_times - PTR sys_ni_syscall - PTR sys_brk /* 4045 */ - PTR sys_setgid - PTR sys_getgid - PTR sys_ni_syscall /* was signal(2) */ - PTR sys_geteuid - PTR sys_getegid /* 4050 */ - PTR sys_acct - PTR sys_umount - PTR sys_ni_syscall - PTR sys_ioctl - PTR sys_fcntl /* 4055 */ - PTR sys_ni_syscall - PTR sys_setpgid - PTR sys_ni_syscall - PTR sys_olduname - PTR sys_umask /* 4060 */ - PTR sys_chroot - PTR sys_ustat - PTR sys_dup2 - PTR sys_getppid - PTR sys_getpgrp /* 4065 */ - PTR sys_setsid - PTR sys_sigaction - PTR sys_sgetmask - PTR sys_ssetmask - PTR sys_setreuid /* 4070 */ - PTR sys_setregid - PTR sys_sigsuspend - PTR sys_sigpending - PTR sys_sethostname - PTR sys_setrlimit /* 4075 */ - PTR sys_getrlimit - PTR sys_getrusage - PTR sys_gettimeofday - PTR sys_settimeofday - PTR sys_getgroups /* 4080 */ - PTR sys_setgroups - PTR sys_ni_syscall /* old_select */ - PTR sys_symlink - PTR sys_ni_syscall /* was sys_lstat */ - PTR sys_readlink /* 4085 */ - PTR sys_uselib - PTR sys_swapon - PTR sys_reboot - PTR sys_old_readdir - PTR sys_mips_mmap /* 4090 */ - PTR sys_munmap - PTR sys_truncate - PTR sys_ftruncate - PTR sys_fchmod - PTR sys_fchown /* 4095 */ - PTR sys_getpriority - PTR sys_setpriority - PTR sys_ni_syscall - PTR sys_statfs - PTR sys_fstatfs /* 4100 */ - PTR sys_ni_syscall /* was ioperm(2) */ - PTR sys_socketcall - PTR sys_syslog - PTR sys_setitimer - PTR sys_getitimer /* 4105 */ - PTR sys_newstat - PTR sys_newlstat - PTR sys_newfstat - PTR sys_uname - PTR sys_ni_syscall /* 4110 was iopl(2) */ - PTR sys_vhangup - PTR sys_ni_syscall /* was sys_idle() */ - PTR sys_ni_syscall /* was sys_vm86 */ - PTR sys_wait4 - PTR sys_swapoff /* 4115 */ - PTR sys_sysinfo - PTR sys_ipc - PTR sys_fsync - PTR sys_sigreturn - PTR __sys_clone /* 4120 */ - PTR sys_setdomainname - PTR sys_newuname - PTR sys_ni_syscall /* sys_modify_ldt */ - PTR sys_adjtimex - PTR sys_mprotect /* 4125 */ - PTR sys_sigprocmask - PTR sys_ni_syscall /* was create_module */ - PTR sys_init_module - PTR sys_delete_module - PTR sys_ni_syscall /* 4130 was get_kernel_syms */ - PTR sys_quotactl - PTR sys_getpgid - PTR sys_fchdir - PTR sys_bdflush - PTR sys_sysfs /* 4135 */ - PTR sys_personality - PTR sys_ni_syscall /* for afs_syscall */ - PTR sys_setfsuid - PTR sys_setfsgid - PTR sys_llseek /* 4140 */ - PTR sys_getdents - PTR sys_select - PTR sys_flock - PTR sys_msync - PTR sys_readv /* 4145 */ - PTR sys_writev - PTR sys_cacheflush - PTR sys_cachectl - PTR __sys_sysmips - PTR sys_ni_syscall /* 4150 */ - PTR sys_getsid - PTR sys_fdatasync - PTR sys_sysctl - PTR sys_mlock - PTR sys_munlock /* 4155 */ - PTR sys_mlockall - PTR sys_munlockall - PTR sys_sched_setparam - PTR sys_sched_getparam - PTR sys_sched_setscheduler /* 4160 */ - PTR sys_sched_getscheduler - PTR sys_sched_yield - PTR sys_sched_get_priority_max - PTR sys_sched_get_priority_min - PTR sys_sched_rr_get_interval /* 4165 */ - PTR sys_nanosleep - PTR sys_mremap - PTR sys_accept - PTR sys_bind - PTR sys_connect /* 4170 */ - PTR sys_getpeername - PTR sys_getsockname - PTR sys_getsockopt - PTR sys_listen - PTR sys_recv /* 4175 */ - PTR sys_recvfrom - PTR sys_recvmsg - PTR sys_send - PTR sys_sendmsg - PTR sys_sendto /* 4180 */ - PTR sys_setsockopt - PTR sys_shutdown - PTR sys_socket - PTR sys_socketpair - PTR sys_setresuid /* 4185 */ - PTR sys_getresuid - PTR sys_ni_syscall /* was sys_query_module */ - PTR sys_poll - PTR sys_ni_syscall /* was nfsservctl */ - PTR sys_setresgid /* 4190 */ - PTR sys_getresgid - PTR sys_prctl - PTR sys_rt_sigreturn - PTR sys_rt_sigaction - PTR sys_rt_sigprocmask /* 4195 */ - PTR sys_rt_sigpending - PTR sys_rt_sigtimedwait - PTR sys_rt_sigqueueinfo - PTR sys_rt_sigsuspend - PTR sys_pread64 /* 4200 */ - PTR sys_pwrite64 - PTR sys_chown - PTR sys_getcwd - PTR sys_capget - PTR sys_capset /* 4205 */ - PTR sys_sigaltstack - PTR sys_sendfile - PTR sys_ni_syscall - PTR sys_ni_syscall - PTR sys_mips_mmap2 /* 4210 */ - PTR sys_truncate64 - PTR sys_ftruncate64 - PTR sys_stat64 - PTR sys_lstat64 - PTR sys_fstat64 /* 4215 */ - PTR sys_pivot_root - PTR sys_mincore - PTR sys_madvise - PTR sys_getdents64 - PTR sys_fcntl64 /* 4220 */ - PTR sys_ni_syscall - PTR sys_gettid - PTR sys_readahead - PTR sys_setxattr - PTR sys_lsetxattr /* 4225 */ - PTR sys_fsetxattr - PTR sys_getxattr - PTR sys_lgetxattr - PTR sys_fgetxattr - PTR sys_listxattr /* 4230 */ - PTR sys_llistxattr - PTR sys_flistxattr - PTR sys_removexattr - PTR sys_lremovexattr - PTR sys_fremovexattr /* 4235 */ - PTR sys_tkill - PTR sys_sendfile64 - PTR sys_futex #ifdef CONFIG_MIPS_MT_FPAFF /* * For FPU affinity scheduling on MIPS MT processors, we need to @@ -458,137 +213,13 @@ EXPORT(sys_call_table) * these hooks for the 32-bit kernel - there is no MIPS64 MT processor * atm. */ - PTR mipsmt_sys_sched_setaffinity - PTR mipsmt_sys_sched_getaffinity -#else - PTR sys_sched_setaffinity - PTR sys_sched_getaffinity /* 4240 */ +#define sys_sched_setaffinity mipsmt_sys_sched_setaffinity +#define sys_sched_getaffinity mipsmt_sys_sched_getaffinity #endif /* CONFIG_MIPS_MT_FPAFF */ - PTR sys_io_setup - PTR sys_io_destroy - PTR sys_io_getevents - PTR sys_io_submit - PTR sys_io_cancel /* 4245 */ - PTR sys_exit_group - PTR sys_lookup_dcookie - PTR sys_epoll_create - PTR sys_epoll_ctl - PTR sys_epoll_wait /* 4250 */ - PTR sys_remap_file_pages - PTR sys_set_tid_address - PTR sys_restart_syscall - PTR sys_fadvise64_64 - PTR sys_statfs64 /* 4255 */ - PTR sys_fstatfs64 - PTR sys_timer_create - PTR sys_timer_settime - PTR sys_timer_gettime - PTR sys_timer_getoverrun /* 4260 */ - PTR sys_timer_delete - PTR sys_clock_settime - PTR sys_clock_gettime - PTR sys_clock_getres - PTR sys_clock_nanosleep /* 4265 */ - PTR sys_tgkill - PTR sys_utimes - PTR sys_mbind - PTR sys_get_mempolicy - PTR sys_set_mempolicy /* 4270 */ - PTR sys_mq_open - PTR sys_mq_unlink - PTR sys_mq_timedsend - PTR sys_mq_timedreceive - PTR sys_mq_notify /* 4275 */ - PTR sys_mq_getsetattr - PTR sys_ni_syscall /* sys_vserver */ - PTR sys_waitid - PTR sys_ni_syscall /* available, was setaltroot */ - PTR sys_add_key /* 4280 */ - PTR sys_request_key - PTR sys_keyctl - PTR sys_set_thread_area - PTR sys_inotify_init - PTR sys_inotify_add_watch /* 4285 */ - PTR sys_inotify_rm_watch - PTR sys_migrate_pages - PTR sys_openat - PTR sys_mkdirat - PTR sys_mknodat /* 4290 */ - PTR sys_fchownat - PTR sys_futimesat - PTR sys_fstatat64 - PTR sys_unlinkat - PTR sys_renameat /* 4295 */ - PTR sys_linkat - PTR sys_symlinkat - PTR sys_readlinkat - PTR sys_fchmodat - PTR sys_faccessat /* 4300 */ - PTR sys_pselect6 - PTR sys_ppoll - PTR sys_unshare - PTR sys_splice - PTR sys_sync_file_range /* 4305 */ - PTR sys_tee - PTR sys_vmsplice - PTR sys_move_pages - PTR sys_set_robust_list - PTR sys_get_robust_list /* 4310 */ - PTR sys_kexec_load - PTR sys_getcpu - PTR sys_epoll_pwait - PTR sys_ioprio_set - PTR sys_ioprio_get /* 4315 */ - PTR sys_utimensat - PTR sys_signalfd - PTR sys_ni_syscall /* was timerfd */ - PTR sys_eventfd - PTR sys_fallocate /* 4320 */ - PTR sys_timerfd_create - PTR sys_timerfd_gettime - PTR sys_timerfd_settime - PTR sys_signalfd4 - PTR sys_eventfd2 /* 4325 */ - PTR sys_epoll_create1 - PTR sys_dup3 - PTR sys_pipe2 - PTR sys_inotify_init1 - PTR sys_preadv /* 4330 */ - PTR sys_pwritev - PTR sys_rt_tgsigqueueinfo - PTR sys_perf_event_open - PTR sys_accept4 - PTR sys_recvmmsg /* 4335 */ - PTR sys_fanotify_init - PTR sys_fanotify_mark - PTR sys_prlimit64 - PTR sys_name_to_handle_at - PTR sys_open_by_handle_at /* 4340 */ - PTR sys_clock_adjtime - PTR sys_syncfs - PTR sys_sendmmsg - PTR sys_setns - PTR sys_process_vm_readv /* 4345 */ - PTR sys_process_vm_writev - PTR sys_kcmp - PTR sys_finit_module - PTR sys_sched_setattr - PTR sys_sched_getattr /* 4350 */ - PTR sys_renameat2 - PTR sys_seccomp - PTR sys_getrandom - PTR sys_memfd_create - PTR sys_bpf /* 4355 */ - PTR sys_execveat - PTR sys_userfaultfd - PTR sys_membarrier - PTR sys_mlock2 - PTR sys_copy_file_range /* 4360 */ - PTR sys_preadv2 - PTR sys_pwritev2 - PTR sys_pkey_mprotect - PTR sys_pkey_alloc - PTR sys_pkey_free /* 4365 */ - PTR sys_statx - PTR sys_rseq - PTR sys_io_pgetevents + +#define __SYSCALL(nr, entry, nargs) PTR entry + .align 2 + .type sys_call_table, @object +EXPORT(sys_call_table) +#include <asm/syscall_table_32_o32.h> +#undef __SYSCALL diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S deleted file mode 100644 index 358d9599983d..000000000000 --- a/arch/mips/kernel/scall64-64.S +++ /dev/null @@ -1,444 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1995, 96, 97, 98, 99, 2000, 01, 02 by Ralf Baechle - * Copyright (C) 1999, 2000 Silicon Graphics, Inc. - * Copyright (C) 2001 MIPS Technologies, Inc. - */ -#include <linux/errno.h> -#include <asm/asm.h> -#include <asm/asmmacro.h> -#include <asm/irqflags.h> -#include <asm/mipsregs.h> -#include <asm/regdef.h> -#include <asm/stackframe.h> -#include <asm/asm-offsets.h> -#include <asm/sysmips.h> -#include <asm/thread_info.h> -#include <asm/unistd.h> -#include <asm/war.h> - -#ifndef CONFIG_BINFMT_ELF32 -/* Neither O32 nor N32, so define handle_sys here */ -#define handle_sys64 handle_sys -#endif - - .align 5 -NESTED(handle_sys64, PT_SIZE, sp) -#if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32) - /* - * When 32-bit compatibility is configured scall_o32.S - * already did this. - */ - .set noat - SAVE_SOME - TRACE_IRQS_ON_RELOAD - STI - .set at -#endif - -#if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32) - ld t1, PT_EPC(sp) # skip syscall on return - daddiu t1, 4 # skip to next instruction - sd t1, PT_EPC(sp) -#endif - - sd a3, PT_R26(sp) # save a3 for syscall restarting - - li t1, _TIF_WORK_SYSCALL_ENTRY - LONG_L t0, TI_FLAGS($28) # syscall tracing enabled? - and t0, t1, t0 - bnez t0, syscall_trace_entry - -syscall_common: - dsubu t2, v0, __NR_64_Linux - sltiu t0, t2, __NR_64_Linux_syscalls + 1 - beqz t0, illegal_syscall - - dsll t0, t2, 3 # offset into table - dla t2, sys_call_table - daddu t0, t2, t0 - ld t2, (t0) # syscall routine - beqz t2, illegal_syscall - - jalr t2 # Do The Real Thing (TM) - - li t0, -EMAXERRNO - 1 # error? - sltu t0, t0, v0 - sd t0, PT_R7(sp) # set error flag - beqz t0, 1f - - ld t1, PT_R2(sp) # syscall number - dnegu v0 # error - sd t1, PT_R0(sp) # save it for syscall restarting -1: sd v0, PT_R2(sp) # result - -n64_syscall_exit: - j syscall_exit_partial - -/* ------------------------------------------------------------------------ */ - -syscall_trace_entry: - SAVE_STATIC - move a0, sp - move a1, v0 - jal syscall_trace_enter - - bltz v0, 1f # seccomp failed? Skip syscall - - RESTORE_STATIC - ld v0, PT_R2(sp) # Restore syscall (maybe modified) - ld a0, PT_R4(sp) # Restore argument registers - ld a1, PT_R5(sp) - ld a2, PT_R6(sp) - ld a3, PT_R7(sp) - ld a4, PT_R8(sp) - ld a5, PT_R9(sp) - j syscall_common - -1: j syscall_exit - -illegal_syscall: - /* This also isn't a 64-bit syscall, throw an error. */ - li v0, ENOSYS # error - sd v0, PT_R2(sp) - li t0, 1 # set error flag - sd t0, PT_R7(sp) - j n64_syscall_exit - END(handle_sys64) - - .align 3 - .type sys_call_table, @object -EXPORT(sys_call_table) - PTR sys_read /* 5000 */ - PTR sys_write - PTR sys_open - PTR sys_close - PTR sys_newstat - PTR sys_newfstat /* 5005 */ - PTR sys_newlstat - PTR sys_poll - PTR sys_lseek - PTR sys_mips_mmap - PTR sys_mprotect /* 5010 */ - PTR sys_munmap - PTR sys_brk - PTR sys_rt_sigaction - PTR sys_rt_sigprocmask - PTR sys_ioctl /* 5015 */ - PTR sys_pread64 - PTR sys_pwrite64 - PTR sys_readv - PTR sys_writev - PTR sys_access /* 5020 */ - PTR sysm_pipe - PTR sys_select - PTR sys_sched_yield - PTR sys_mremap - PTR sys_msync /* 5025 */ - PTR sys_mincore - PTR sys_madvise - PTR sys_shmget - PTR sys_shmat - PTR sys_shmctl /* 5030 */ - PTR sys_dup - PTR sys_dup2 - PTR sys_pause - PTR sys_nanosleep - PTR sys_getitimer /* 5035 */ - PTR sys_setitimer - PTR sys_alarm - PTR sys_getpid - PTR sys_sendfile64 - PTR sys_socket /* 5040 */ - PTR sys_connect - PTR sys_accept - PTR sys_sendto - PTR sys_recvfrom - PTR sys_sendmsg /* 5045 */ - PTR sys_recvmsg - PTR sys_shutdown - PTR sys_bind - PTR sys_listen - PTR sys_getsockname /* 5050 */ - PTR sys_getpeername - PTR sys_socketpair - PTR sys_setsockopt - PTR sys_getsockopt - PTR __sys_clone /* 5055 */ - PTR __sys_fork - PTR sys_execve - PTR sys_exit - PTR sys_wait4 - PTR sys_kill /* 5060 */ - PTR sys_newuname - PTR sys_semget - PTR sys_semop - PTR sys_semctl - PTR sys_shmdt /* 5065 */ - PTR sys_msgget - PTR sys_msgsnd - PTR sys_msgrcv - PTR sys_msgctl - PTR sys_fcntl /* 5070 */ - PTR sys_flock - PTR sys_fsync - PTR sys_fdatasync - PTR sys_truncate - PTR sys_ftruncate /* 5075 */ - PTR sys_getdents - PTR sys_getcwd - PTR sys_chdir - PTR sys_fchdir - PTR sys_rename /* 5080 */ - PTR sys_mkdir - PTR sys_rmdir - PTR sys_creat - PTR sys_link - PTR sys_unlink /* 5085 */ - PTR sys_symlink - PTR sys_readlink - PTR sys_chmod - PTR sys_fchmod - PTR sys_chown /* 5090 */ - PTR sys_fchown - PTR sys_lchown - PTR sys_umask - PTR sys_gettimeofday - PTR sys_getrlimit /* 5095 */ - PTR sys_getrusage - PTR sys_sysinfo - PTR sys_times - PTR sys_ptrace - PTR sys_getuid /* 5100 */ - PTR sys_syslog - PTR sys_getgid - PTR sys_setuid - PTR sys_setgid - PTR sys_geteuid /* 5105 */ - PTR sys_getegid - PTR sys_setpgid - PTR sys_getppid - PTR sys_getpgrp - PTR sys_setsid /* 5110 */ - PTR sys_setreuid - PTR sys_setregid - PTR sys_getgroups - PTR sys_setgroups - PTR sys_setresuid /* 5115 */ - PTR sys_getresuid - PTR sys_setresgid - PTR sys_getresgid - PTR sys_getpgid - PTR sys_setfsuid /* 5120 */ - PTR sys_setfsgid - PTR sys_getsid - PTR sys_capget - PTR sys_capset - PTR sys_rt_sigpending /* 5125 */ - PTR sys_rt_sigtimedwait - PTR sys_rt_sigqueueinfo - PTR sys_rt_sigsuspend - PTR sys_sigaltstack - PTR sys_utime /* 5130 */ - PTR sys_mknod - PTR sys_personality - PTR sys_ustat - PTR sys_statfs - PTR sys_fstatfs /* 5135 */ - PTR sys_sysfs - PTR sys_getpriority - PTR sys_setpriority - PTR sys_sched_setparam - PTR sys_sched_getparam /* 5140 */ - PTR sys_sched_setscheduler - PTR sys_sched_getscheduler - PTR sys_sched_get_priority_max - PTR sys_sched_get_priority_min - PTR sys_sched_rr_get_interval /* 5145 */ - PTR sys_mlock - PTR sys_munlock - PTR sys_mlockall - PTR sys_munlockall - PTR sys_vhangup /* 5150 */ - PTR sys_pivot_root - PTR sys_sysctl - PTR sys_prctl - PTR sys_adjtimex - PTR sys_setrlimit /* 5155 */ - PTR sys_chroot - PTR sys_sync - PTR sys_acct - PTR sys_settimeofday - PTR sys_mount /* 5160 */ - PTR sys_umount - PTR sys_swapon - PTR sys_swapoff - PTR sys_reboot - PTR sys_sethostname /* 5165 */ - PTR sys_setdomainname - PTR sys_ni_syscall /* was create_module */ - PTR sys_init_module - PTR sys_delete_module - PTR sys_ni_syscall /* 5170, was get_kernel_syms */ - PTR sys_ni_syscall /* was query_module */ - PTR sys_quotactl - PTR sys_ni_syscall /* was nfsservctl */ - PTR sys_ni_syscall /* res. for getpmsg */ - PTR sys_ni_syscall /* 5175 for putpmsg */ - PTR sys_ni_syscall /* res. for afs_syscall */ - PTR sys_ni_syscall /* res. for security */ - PTR sys_gettid - PTR sys_readahead - PTR sys_setxattr /* 5180 */ - PTR sys_lsetxattr - PTR sys_fsetxattr - PTR sys_getxattr - PTR sys_lgetxattr - PTR sys_fgetxattr /* 5185 */ - PTR sys_listxattr - PTR sys_llistxattr - PTR sys_flistxattr - PTR sys_removexattr - PTR sys_lremovexattr /* 5190 */ - PTR sys_fremovexattr - PTR sys_tkill - PTR sys_ni_syscall - PTR sys_futex - PTR sys_sched_setaffinity /* 5195 */ - PTR sys_sched_getaffinity - PTR sys_cacheflush - PTR sys_cachectl - PTR __sys_sysmips - PTR sys_io_setup /* 5200 */ - PTR sys_io_destroy - PTR sys_io_getevents - PTR sys_io_submit - PTR sys_io_cancel - PTR sys_exit_group /* 5205 */ - PTR sys_lookup_dcookie - PTR sys_epoll_create - PTR sys_epoll_ctl - PTR sys_epoll_wait - PTR sys_remap_file_pages /* 5210 */ - PTR sys_rt_sigreturn - PTR sys_set_tid_address - PTR sys_restart_syscall - PTR sys_semtimedop - PTR sys_fadvise64_64 /* 5215 */ - PTR sys_timer_create - PTR sys_timer_settime - PTR sys_timer_gettime - PTR sys_timer_getoverrun - PTR sys_timer_delete /* 5220 */ - PTR sys_clock_settime - PTR sys_clock_gettime - PTR sys_clock_getres - PTR sys_clock_nanosleep - PTR sys_tgkill /* 5225 */ - PTR sys_utimes - PTR sys_mbind - PTR sys_get_mempolicy - PTR sys_set_mempolicy - PTR sys_mq_open /* 5230 */ - PTR sys_mq_unlink - PTR sys_mq_timedsend - PTR sys_mq_timedreceive - PTR sys_mq_notify - PTR sys_mq_getsetattr /* 5235 */ - PTR sys_ni_syscall /* sys_vserver */ - PTR sys_waitid - PTR sys_ni_syscall /* available, was setaltroot */ - PTR sys_add_key - PTR sys_request_key /* 5240 */ - PTR sys_keyctl - PTR sys_set_thread_area - PTR sys_inotify_init - PTR sys_inotify_add_watch - PTR sys_inotify_rm_watch /* 5245 */ - PTR sys_migrate_pages - PTR sys_openat - PTR sys_mkdirat - PTR sys_mknodat - PTR sys_fchownat /* 5250 */ - PTR sys_futimesat - PTR sys_newfstatat - PTR sys_unlinkat - PTR sys_renameat - PTR sys_linkat /* 5255 */ - PTR sys_symlinkat - PTR sys_readlinkat - PTR sys_fchmodat - PTR sys_faccessat - PTR sys_pselect6 /* 5260 */ - PTR sys_ppoll - PTR sys_unshare - PTR sys_splice - PTR sys_sync_file_range - PTR sys_tee /* 5265 */ - PTR sys_vmsplice - PTR sys_move_pages - PTR sys_set_robust_list - PTR sys_get_robust_list - PTR sys_kexec_load /* 5270 */ - PTR sys_getcpu - PTR sys_epoll_pwait - PTR sys_ioprio_set - PTR sys_ioprio_get - PTR sys_utimensat /* 5275 */ - PTR sys_signalfd - PTR sys_ni_syscall /* was timerfd */ - PTR sys_eventfd - PTR sys_fallocate - PTR sys_timerfd_create /* 5280 */ - PTR sys_timerfd_gettime - PTR sys_timerfd_settime - PTR sys_signalfd4 - PTR sys_eventfd2 - PTR sys_epoll_create1 /* 5285 */ - PTR sys_dup3 - PTR sys_pipe2 - PTR sys_inotify_init1 - PTR sys_preadv - PTR sys_pwritev /* 5290 */ - PTR sys_rt_tgsigqueueinfo - PTR sys_perf_event_open - PTR sys_accept4 - PTR sys_recvmmsg - PTR sys_fanotify_init /* 5295 */ - PTR sys_fanotify_mark - PTR sys_prlimit64 - PTR sys_name_to_handle_at - PTR sys_open_by_handle_at - PTR sys_clock_adjtime /* 5300 */ - PTR sys_syncfs - PTR sys_sendmmsg - PTR sys_setns - PTR sys_process_vm_readv - PTR sys_process_vm_writev /* 5305 */ - PTR sys_kcmp - PTR sys_finit_module - PTR sys_getdents64 - PTR sys_sched_setattr - PTR sys_sched_getattr /* 5310 */ - PTR sys_renameat2 - PTR sys_seccomp - PTR sys_getrandom - PTR sys_memfd_create - PTR sys_bpf /* 5315 */ - PTR sys_execveat - PTR sys_userfaultfd - PTR sys_membarrier - PTR sys_mlock2 - PTR sys_copy_file_range /* 5320 */ - PTR sys_preadv2 - PTR sys_pwritev2 - PTR sys_pkey_mprotect - PTR sys_pkey_alloc - PTR sys_pkey_free /* 5325 */ - PTR sys_statx - PTR sys_rseq - PTR sys_io_pgetevents - .size sys_call_table,.-sys_call_table diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index c65eaacc1abf..c761ddfed9e6 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -33,7 +33,7 @@ NESTED(handle_sysn32, PT_SIZE, sp) #endif dsubu t0, v0, __NR_N32_Linux # check syscall number - sltiu t0, t0, __NR_N32_Linux_syscalls + 1 + sltiu t0, t0, __NR_N32_Linux_syscalls #ifndef CONFIG_MIPS32_O32 ld t1, PT_EPC(sp) # skip syscall on return @@ -87,7 +87,7 @@ n32_syscall_trace_entry: ld a5, PT_R9(sp) dsubu t2, v0, __NR_N32_Linux # check (new) syscall number - sltiu t0, t2, __NR_N32_Linux_syscalls + 1 + sltiu t0, t2, __NR_N32_Linux_syscalls beqz t0, not_n32_scall j syscall_common @@ -101,339 +101,8 @@ not_n32_scall: END(handle_sysn32) +#define __SYSCALL(nr, entry, nargs) PTR entry .type sysn32_call_table, @object EXPORT(sysn32_call_table) - PTR sys_read /* 6000 */ - PTR sys_write - PTR sys_open - PTR sys_close - PTR sys_newstat - PTR sys_newfstat /* 6005 */ - PTR sys_newlstat - PTR sys_poll - PTR sys_lseek - PTR sys_mips_mmap - PTR sys_mprotect /* 6010 */ - PTR sys_munmap - PTR sys_brk - PTR compat_sys_rt_sigaction - PTR compat_sys_rt_sigprocmask - PTR compat_sys_ioctl /* 6015 */ - PTR sys_pread64 - PTR sys_pwrite64 - PTR compat_sys_readv - PTR compat_sys_writev - PTR sys_access /* 6020 */ - PTR sysm_pipe - PTR compat_sys_select - PTR sys_sched_yield - PTR sys_mremap - PTR sys_msync /* 6025 */ - PTR sys_mincore - PTR sys_madvise - PTR sys_shmget - PTR sys_shmat - PTR compat_sys_shmctl /* 6030 */ - PTR sys_dup - PTR sys_dup2 - PTR sys_pause - PTR compat_sys_nanosleep - PTR compat_sys_getitimer /* 6035 */ - PTR compat_sys_setitimer - PTR sys_alarm - PTR sys_getpid - PTR compat_sys_sendfile - PTR sys_socket /* 6040 */ - PTR sys_connect - PTR sys_accept - PTR sys_sendto - PTR compat_sys_recvfrom - PTR compat_sys_sendmsg /* 6045 */ - PTR compat_sys_recvmsg - PTR sys_shutdown - PTR sys_bind - PTR sys_listen - PTR sys_getsockname /* 6050 */ - PTR sys_getpeername - PTR sys_socketpair - PTR compat_sys_setsockopt - PTR compat_sys_getsockopt - PTR __sys_clone /* 6055 */ - PTR __sys_fork - PTR compat_sys_execve - PTR sys_exit - PTR compat_sys_wait4 - PTR sys_kill /* 6060 */ - PTR sys_newuname - PTR sys_semget - PTR sys_semop - PTR compat_sys_semctl - PTR sys_shmdt /* 6065 */ - PTR sys_msgget - PTR compat_sys_msgsnd - PTR compat_sys_msgrcv - PTR compat_sys_msgctl - PTR compat_sys_fcntl /* 6070 */ - PTR sys_flock - PTR sys_fsync - PTR sys_fdatasync - PTR sys_truncate - PTR sys_ftruncate /* 6075 */ - PTR compat_sys_getdents - PTR sys_getcwd - PTR sys_chdir - PTR sys_fchdir - PTR sys_rename /* 6080 */ - PTR sys_mkdir - PTR sys_rmdir - PTR sys_creat - PTR sys_link - PTR sys_unlink /* 6085 */ - PTR sys_symlink - PTR sys_readlink - PTR sys_chmod - PTR sys_fchmod - PTR sys_chown /* 6090 */ - PTR sys_fchown - PTR sys_lchown - PTR sys_umask - PTR compat_sys_gettimeofday - PTR compat_sys_getrlimit /* 6095 */ - PTR compat_sys_getrusage - PTR compat_sys_sysinfo - PTR compat_sys_times - PTR compat_sys_ptrace - PTR sys_getuid /* 6100 */ - PTR sys_syslog - PTR sys_getgid - PTR sys_setuid - PTR sys_setgid - PTR sys_geteuid /* 6105 */ - PTR sys_getegid - PTR sys_setpgid - PTR sys_getppid - PTR sys_getpgrp - PTR sys_setsid /* 6110 */ - PTR sys_setreuid - PTR sys_setregid - PTR sys_getgroups - PTR sys_setgroups - PTR sys_setresuid /* 6115 */ - PTR sys_getresuid - PTR sys_setresgid - PTR sys_getresgid - PTR sys_getpgid - PTR sys_setfsuid /* 6120 */ - PTR sys_setfsgid - PTR sys_getsid - PTR sys_capget - PTR sys_capset - PTR compat_sys_rt_sigpending /* 6125 */ - PTR compat_sys_rt_sigtimedwait - PTR compat_sys_rt_sigqueueinfo - PTR compat_sys_rt_sigsuspend - PTR compat_sys_sigaltstack - PTR compat_sys_utime /* 6130 */ - PTR sys_mknod - PTR sys_32_personality - PTR compat_sys_ustat - PTR compat_sys_statfs - PTR compat_sys_fstatfs /* 6135 */ - PTR sys_sysfs - PTR sys_getpriority - PTR sys_setpriority - PTR sys_sched_setparam - PTR sys_sched_getparam /* 6140 */ - PTR sys_sched_setscheduler - PTR sys_sched_getscheduler - PTR sys_sched_get_priority_max - PTR sys_sched_get_priority_min - PTR compat_sys_sched_rr_get_interval /* 6145 */ - PTR sys_mlock - PTR sys_munlock - PTR sys_mlockall - PTR sys_munlockall - PTR sys_vhangup /* 6150 */ - PTR sys_pivot_root - PTR compat_sys_sysctl - PTR sys_prctl - PTR compat_sys_adjtimex - PTR compat_sys_setrlimit /* 6155 */ - PTR sys_chroot - PTR sys_sync - PTR sys_acct - PTR compat_sys_settimeofday - PTR compat_sys_mount /* 6160 */ - PTR sys_umount - PTR sys_swapon - PTR sys_swapoff - PTR sys_reboot - PTR sys_sethostname /* 6165 */ - PTR sys_setdomainname - PTR sys_ni_syscall /* was create_module */ - PTR sys_init_module - PTR sys_delete_module - PTR sys_ni_syscall /* 6170, was get_kernel_syms */ - PTR sys_ni_syscall /* was query_module */ - PTR sys_quotactl - PTR sys_ni_syscall /* was nfsservctl */ - PTR sys_ni_syscall /* res. for getpmsg */ - PTR sys_ni_syscall /* 6175 for putpmsg */ - PTR sys_ni_syscall /* res. for afs_syscall */ - PTR sys_ni_syscall /* res. for security */ - PTR sys_gettid - PTR sys_readahead - PTR sys_setxattr /* 6180 */ - PTR sys_lsetxattr - PTR sys_fsetxattr - PTR sys_getxattr - PTR sys_lgetxattr - PTR sys_fgetxattr /* 6185 */ - PTR sys_listxattr - PTR sys_llistxattr - PTR sys_flistxattr - PTR sys_removexattr - PTR sys_lremovexattr /* 6190 */ - PTR sys_fremovexattr - PTR sys_tkill - PTR sys_ni_syscall - PTR compat_sys_futex - PTR compat_sys_sched_setaffinity /* 6195 */ - PTR compat_sys_sched_getaffinity - PTR sys_cacheflush - PTR sys_cachectl - PTR __sys_sysmips - PTR compat_sys_io_setup /* 6200 */ - PTR sys_io_destroy - PTR compat_sys_io_getevents - PTR compat_sys_io_submit - PTR sys_io_cancel - PTR sys_exit_group /* 6205 */ - PTR sys_lookup_dcookie - PTR sys_epoll_create - PTR sys_epoll_ctl - PTR sys_epoll_wait - PTR sys_remap_file_pages /* 6210 */ - PTR sysn32_rt_sigreturn - PTR compat_sys_fcntl64 - PTR sys_set_tid_address - PTR sys_restart_syscall - PTR compat_sys_semtimedop /* 6215 */ - PTR sys_fadvise64_64 - PTR compat_sys_statfs64 - PTR compat_sys_fstatfs64 - PTR sys_sendfile64 - PTR compat_sys_timer_create /* 6220 */ - PTR compat_sys_timer_settime - PTR compat_sys_timer_gettime - PTR sys_timer_getoverrun - PTR sys_timer_delete - PTR compat_sys_clock_settime /* 6225 */ - PTR compat_sys_clock_gettime - PTR compat_sys_clock_getres - PTR compat_sys_clock_nanosleep - PTR sys_tgkill - PTR compat_sys_utimes /* 6230 */ - PTR compat_sys_mbind - PTR compat_sys_get_mempolicy - PTR compat_sys_set_mempolicy - PTR compat_sys_mq_open - PTR sys_mq_unlink /* 6235 */ - PTR compat_sys_mq_timedsend - PTR compat_sys_mq_timedreceive - PTR compat_sys_mq_notify - PTR compat_sys_mq_getsetattr - PTR sys_ni_syscall /* 6240, sys_vserver */ - PTR compat_sys_waitid - PTR sys_ni_syscall /* available, was setaltroot */ - PTR sys_add_key - PTR sys_request_key - PTR compat_sys_keyctl /* 6245 */ - PTR sys_set_thread_area - PTR sys_inotify_init - PTR sys_inotify_add_watch - PTR sys_inotify_rm_watch - PTR compat_sys_migrate_pages /* 6250 */ - PTR sys_openat - PTR sys_mkdirat - PTR sys_mknodat - PTR sys_fchownat - PTR compat_sys_futimesat /* 6255 */ - PTR sys_newfstatat - PTR sys_unlinkat - PTR sys_renameat - PTR sys_linkat - PTR sys_symlinkat /* 6260 */ - PTR sys_readlinkat - PTR sys_fchmodat - PTR sys_faccessat - PTR compat_sys_pselect6 - PTR compat_sys_ppoll /* 6265 */ - PTR sys_unshare - PTR sys_splice - PTR sys_sync_file_range - PTR sys_tee - PTR compat_sys_vmsplice /* 6270 */ - PTR compat_sys_move_pages - PTR compat_sys_set_robust_list - PTR compat_sys_get_robust_list - PTR compat_sys_kexec_load - PTR sys_getcpu /* 6275 */ - PTR compat_sys_epoll_pwait - PTR sys_ioprio_set - PTR sys_ioprio_get - PTR compat_sys_utimensat - PTR compat_sys_signalfd /* 6280 */ - PTR sys_ni_syscall /* was timerfd */ - PTR sys_eventfd - PTR sys_fallocate - PTR sys_timerfd_create - PTR compat_sys_timerfd_gettime /* 6285 */ - PTR compat_sys_timerfd_settime - PTR compat_sys_signalfd4 - PTR sys_eventfd2 - PTR sys_epoll_create1 - PTR sys_dup3 /* 6290 */ - PTR sys_pipe2 - PTR sys_inotify_init1 - PTR compat_sys_preadv - PTR compat_sys_pwritev - PTR compat_sys_rt_tgsigqueueinfo /* 6295 */ - PTR sys_perf_event_open - PTR sys_accept4 - PTR compat_sys_recvmmsg - PTR sys_getdents64 - PTR sys_fanotify_init /* 6300 */ - PTR sys_fanotify_mark - PTR sys_prlimit64 - PTR sys_name_to_handle_at - PTR sys_open_by_handle_at - PTR compat_sys_clock_adjtime /* 6305 */ - PTR sys_syncfs - PTR compat_sys_sendmmsg - PTR sys_setns - PTR compat_sys_process_vm_readv - PTR compat_sys_process_vm_writev /* 6310 */ - PTR sys_kcmp - PTR sys_finit_module - PTR sys_sched_setattr - PTR sys_sched_getattr - PTR sys_renameat2 /* 6315 */ - PTR sys_seccomp - PTR sys_getrandom - PTR sys_memfd_create - PTR sys_bpf - PTR compat_sys_execveat /* 6320 */ - PTR sys_userfaultfd - PTR sys_membarrier - PTR sys_mlock2 - PTR sys_copy_file_range - PTR compat_sys_preadv2 /* 6325 */ - PTR compat_sys_pwritev2 - PTR sys_pkey_mprotect - PTR sys_pkey_alloc - PTR sys_pkey_free - PTR sys_statx /* 6330 */ - PTR sys_rseq - PTR compat_sys_io_pgetevents - .size sysn32_call_table,.-sysn32_call_table +#include <asm/syscall_table_64_n32.h> +#undef __SYSCALL diff --git a/arch/mips/kernel/scall64-n64.S b/arch/mips/kernel/scall64-n64.S new file mode 100644 index 000000000000..727fb8a1b0eb --- /dev/null +++ b/arch/mips/kernel/scall64-n64.S @@ -0,0 +1,117 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1995, 96, 97, 98, 99, 2000, 01, 02 by Ralf Baechle + * Copyright (C) 1999, 2000 Silicon Graphics, Inc. + * Copyright (C) 2001 MIPS Technologies, Inc. + */ +#include <linux/errno.h> +#include <asm/asm.h> +#include <asm/asmmacro.h> +#include <asm/irqflags.h> +#include <asm/mipsregs.h> +#include <asm/regdef.h> +#include <asm/stackframe.h> +#include <asm/asm-offsets.h> +#include <asm/sysmips.h> +#include <asm/thread_info.h> +#include <asm/unistd.h> +#include <asm/war.h> + +#ifndef CONFIG_BINFMT_ELF32 +/* Neither O32 nor N32, so define handle_sys here */ +#define handle_sys64 handle_sys +#endif + + .align 5 +NESTED(handle_sys64, PT_SIZE, sp) +#if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32) + /* + * When 32-bit compatibility is configured scall_o32.S + * already did this. + */ + .set noat + SAVE_SOME + TRACE_IRQS_ON_RELOAD + STI + .set at +#endif + +#if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32) + ld t1, PT_EPC(sp) # skip syscall on return + daddiu t1, 4 # skip to next instruction + sd t1, PT_EPC(sp) +#endif + + sd a3, PT_R26(sp) # save a3 for syscall restarting + + li t1, _TIF_WORK_SYSCALL_ENTRY + LONG_L t0, TI_FLAGS($28) # syscall tracing enabled? + and t0, t1, t0 + bnez t0, syscall_trace_entry + +syscall_common: + dsubu t2, v0, __NR_64_Linux + sltiu t0, t2, __NR_64_Linux_syscalls + beqz t0, illegal_syscall + + dsll t0, t2, 3 # offset into table + dla t2, sys_call_table + daddu t0, t2, t0 + ld t2, (t0) # syscall routine + beqz t2, illegal_syscall + + jalr t2 # Do The Real Thing (TM) + + li t0, -EMAXERRNO - 1 # error? + sltu t0, t0, v0 + sd t0, PT_R7(sp) # set error flag + beqz t0, 1f + + ld t1, PT_R2(sp) # syscall number + dnegu v0 # error + sd t1, PT_R0(sp) # save it for syscall restarting +1: sd v0, PT_R2(sp) # result + +n64_syscall_exit: + j syscall_exit_partial + +/* ------------------------------------------------------------------------ */ + +syscall_trace_entry: + SAVE_STATIC + move a0, sp + move a1, v0 + jal syscall_trace_enter + + bltz v0, 1f # seccomp failed? Skip syscall + + RESTORE_STATIC + ld v0, PT_R2(sp) # Restore syscall (maybe modified) + ld a0, PT_R4(sp) # Restore argument registers + ld a1, PT_R5(sp) + ld a2, PT_R6(sp) + ld a3, PT_R7(sp) + ld a4, PT_R8(sp) + ld a5, PT_R9(sp) + j syscall_common + +1: j syscall_exit + +illegal_syscall: + /* This also isn't a 64-bit syscall, throw an error. */ + li v0, ENOSYS # error + sd v0, PT_R2(sp) + li t0, 1 # set error flag + sd t0, PT_R7(sp) + j n64_syscall_exit + END(handle_sys64) + +#define __SYSCALL(nr, entry, nargs) PTR entry + .align 3 + .type sys_call_table, @object +EXPORT(sys_call_table) +#include <asm/syscall_table_64_n64.h> +#undef __SYSCALL diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 73913f072e39..f158c5894a9a 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -34,7 +34,7 @@ NESTED(handle_sys, PT_SIZE, sp) ld t1, PT_EPC(sp) # skip syscall on return dsubu t0, v0, __NR_O32_Linux # check syscall number - sltiu t0, t0, __NR_O32_Linux_syscalls + 1 + sltiu t0, t0, __NR_O32_Linux_syscalls daddiu t1, 4 # skip to next instruction sd t1, PT_EPC(sp) beqz t0, not_o32_scall @@ -144,7 +144,7 @@ trace_a_syscall: ld a7, PT_R11(sp) # For indirect syscalls dsubu t0, v0, __NR_O32_Linux # check (new) syscall number - sltiu t0, t0, __NR_O32_Linux_syscalls + 1 + sltiu t0, t0, __NR_O32_Linux_syscalls beqz t0, not_o32_scall j syscall_common @@ -193,7 +193,7 @@ not_o32_scall: LEAF(sys32_syscall) subu t0, a0, __NR_O32_Linux # check syscall number - sltiu v0, t0, __NR_O32_Linux_syscalls + 1 + sltiu v0, t0, __NR_O32_Linux_syscalls beqz t0, einval # do not recurse dsll t1, t0, 3 beqz v0, einval @@ -213,376 +213,9 @@ einval: li v0, -ENOSYS jr ra END(sys32_syscall) +#define __SYSCALL(nr, entry, nargs) PTR entry .align 3 .type sys32_call_table,@object EXPORT(sys32_call_table) - PTR sys32_syscall /* 4000 */ - PTR sys_exit - PTR __sys_fork - PTR sys_read - PTR sys_write - PTR compat_sys_open /* 4005 */ - PTR sys_close - PTR sys_waitpid - PTR sys_creat - PTR sys_link - PTR sys_unlink /* 4010 */ - PTR compat_sys_execve - PTR sys_chdir - PTR compat_sys_time - PTR sys_mknod - PTR sys_chmod /* 4015 */ - PTR sys_lchown - PTR sys_ni_syscall - PTR sys_ni_syscall /* was sys_stat */ - PTR sys_lseek - PTR sys_getpid /* 4020 */ - PTR compat_sys_mount - PTR sys_oldumount - PTR sys_setuid - PTR sys_getuid - PTR compat_sys_stime /* 4025 */ - PTR compat_sys_ptrace - PTR sys_alarm - PTR sys_ni_syscall /* was sys_fstat */ - PTR sys_pause - PTR compat_sys_utime /* 4030 */ - PTR sys_ni_syscall - PTR sys_ni_syscall - PTR sys_access - PTR sys_nice - PTR sys_ni_syscall /* 4035 */ - PTR sys_sync - PTR sys_kill - PTR sys_rename - PTR sys_mkdir - PTR sys_rmdir /* 4040 */ - PTR sys_dup - PTR sysm_pipe - PTR compat_sys_times - PTR sys_ni_syscall - PTR sys_brk /* 4045 */ - PTR sys_setgid - PTR sys_getgid - PTR sys_ni_syscall /* was signal 2 */ - PTR sys_geteuid - PTR sys_getegid /* 4050 */ - PTR sys_acct - PTR sys_umount - PTR sys_ni_syscall - PTR compat_sys_ioctl - PTR compat_sys_fcntl /* 4055 */ - PTR sys_ni_syscall - PTR sys_setpgid - PTR sys_ni_syscall - PTR sys_olduname - PTR sys_umask /* 4060 */ - PTR sys_chroot - PTR compat_sys_ustat - PTR sys_dup2 - PTR sys_getppid - PTR sys_getpgrp /* 4065 */ - PTR sys_setsid - PTR sys_32_sigaction - PTR sys_sgetmask - PTR sys_ssetmask - PTR sys_setreuid /* 4070 */ - PTR sys_setregid - PTR sys32_sigsuspend - PTR compat_sys_sigpending - PTR sys_sethostname - PTR compat_sys_setrlimit /* 4075 */ - PTR compat_sys_getrlimit - PTR compat_sys_getrusage - PTR compat_sys_gettimeofday - PTR compat_sys_settimeofday - PTR sys_getgroups /* 4080 */ - PTR sys_setgroups - PTR sys_ni_syscall /* old_select */ - PTR sys_symlink - PTR sys_ni_syscall /* was sys_lstat */ - PTR sys_readlink /* 4085 */ - PTR sys_uselib - PTR sys_swapon - PTR sys_reboot - PTR compat_sys_old_readdir - PTR sys_mips_mmap /* 4090 */ - PTR sys_munmap - PTR compat_sys_truncate - PTR compat_sys_ftruncate - PTR sys_fchmod - PTR sys_fchown /* 4095 */ - PTR sys_getpriority - PTR sys_setpriority - PTR sys_ni_syscall - PTR compat_sys_statfs - PTR compat_sys_fstatfs /* 4100 */ - PTR sys_ni_syscall /* sys_ioperm */ - PTR compat_sys_socketcall - PTR sys_syslog - PTR compat_sys_setitimer - PTR compat_sys_getitimer /* 4105 */ - PTR compat_sys_newstat - PTR compat_sys_newlstat - PTR compat_sys_newfstat - PTR sys_uname - PTR sys_ni_syscall /* sys_ioperm *//* 4110 */ - PTR sys_vhangup - PTR sys_ni_syscall /* was sys_idle */ - PTR sys_ni_syscall /* sys_vm86 */ - PTR compat_sys_wait4 - PTR sys_swapoff /* 4115 */ - PTR compat_sys_sysinfo - PTR compat_sys_ipc - PTR sys_fsync - PTR sys32_sigreturn - PTR __sys_clone /* 4120 */ - PTR sys_setdomainname - PTR sys_newuname - PTR sys_ni_syscall /* sys_modify_ldt */ - PTR compat_sys_adjtimex - PTR sys_mprotect /* 4125 */ - PTR compat_sys_sigprocmask - PTR sys_ni_syscall /* was creat_module */ - PTR sys_init_module - PTR sys_delete_module - PTR sys_ni_syscall /* 4130, get_kernel_syms */ - PTR sys_quotactl - PTR sys_getpgid - PTR sys_fchdir - PTR sys_bdflush - PTR sys_sysfs /* 4135 */ - PTR sys_32_personality - PTR sys_ni_syscall /* for afs_syscall */ - PTR sys_setfsuid - PTR sys_setfsgid - PTR sys_32_llseek /* 4140 */ - PTR compat_sys_getdents - PTR compat_sys_select - PTR sys_flock - PTR sys_msync - PTR compat_sys_readv /* 4145 */ - PTR compat_sys_writev - PTR sys_cacheflush - PTR sys_cachectl - PTR __sys_sysmips - PTR sys_ni_syscall /* 4150 */ - PTR sys_getsid - PTR sys_fdatasync - PTR compat_sys_sysctl - PTR sys_mlock - PTR sys_munlock /* 4155 */ - PTR sys_mlockall - PTR sys_munlockall - PTR sys_sched_setparam - PTR sys_sched_getparam - PTR sys_sched_setscheduler /* 4160 */ - PTR sys_sched_getscheduler - PTR sys_sched_yield - PTR sys_sched_get_priority_max - PTR sys_sched_get_priority_min - PTR compat_sys_sched_rr_get_interval /* 4165 */ - PTR compat_sys_nanosleep - PTR sys_mremap - PTR sys_accept - PTR sys_bind - PTR sys_connect /* 4170 */ - PTR sys_getpeername - PTR sys_getsockname - PTR compat_sys_getsockopt - PTR sys_listen - PTR compat_sys_recv /* 4175 */ - PTR compat_sys_recvfrom - PTR compat_sys_recvmsg - PTR sys_send - PTR compat_sys_sendmsg - PTR sys_sendto /* 4180 */ - PTR compat_sys_setsockopt - PTR sys_shutdown - PTR sys_socket - PTR sys_socketpair - PTR sys_setresuid /* 4185 */ - PTR sys_getresuid - PTR sys_ni_syscall /* was query_module */ - PTR sys_poll - PTR sys_ni_syscall /* was nfsservctl */ - PTR sys_setresgid /* 4190 */ - PTR sys_getresgid - PTR sys_prctl - PTR sys32_rt_sigreturn - PTR compat_sys_rt_sigaction - PTR compat_sys_rt_sigprocmask /* 4195 */ - PTR compat_sys_rt_sigpending - PTR compat_sys_rt_sigtimedwait - PTR compat_sys_rt_sigqueueinfo - PTR compat_sys_rt_sigsuspend - PTR sys_32_pread /* 4200 */ - PTR sys_32_pwrite - PTR sys_chown - PTR sys_getcwd - PTR sys_capget - PTR sys_capset /* 4205 */ - PTR compat_sys_sigaltstack - PTR compat_sys_sendfile - PTR sys_ni_syscall - PTR sys_ni_syscall - PTR sys_mips_mmap2 /* 4210 */ - PTR sys_32_truncate64 - PTR sys_32_ftruncate64 - PTR sys_newstat - PTR sys_newlstat - PTR sys_newfstat /* 4215 */ - PTR sys_pivot_root - PTR sys_mincore - PTR sys_madvise - PTR sys_getdents64 - PTR compat_sys_fcntl64 /* 4220 */ - PTR sys_ni_syscall - PTR sys_gettid - PTR sys32_readahead - PTR sys_setxattr - PTR sys_lsetxattr /* 4225 */ - PTR sys_fsetxattr - PTR sys_getxattr - PTR sys_lgetxattr - PTR sys_fgetxattr - PTR sys_listxattr /* 4230 */ - PTR sys_llistxattr - PTR sys_flistxattr - PTR sys_removexattr - PTR sys_lremovexattr - PTR sys_fremovexattr /* 4235 */ - PTR sys_tkill - PTR sys_sendfile64 - PTR compat_sys_futex - PTR compat_sys_sched_setaffinity - PTR compat_sys_sched_getaffinity /* 4240 */ - PTR compat_sys_io_setup - PTR sys_io_destroy - PTR compat_sys_io_getevents - PTR compat_sys_io_submit - PTR sys_io_cancel /* 4245 */ - PTR sys_exit_group - PTR compat_sys_lookup_dcookie - PTR sys_epoll_create - PTR sys_epoll_ctl - PTR sys_epoll_wait /* 4250 */ - PTR sys_remap_file_pages - PTR sys_set_tid_address - PTR sys_restart_syscall - PTR sys32_fadvise64_64 - PTR compat_sys_statfs64 /* 4255 */ - PTR compat_sys_fstatfs64 - PTR compat_sys_timer_create - PTR compat_sys_timer_settime - PTR compat_sys_timer_gettime - PTR sys_timer_getoverrun /* 4260 */ - PTR sys_timer_delete - PTR compat_sys_clock_settime - PTR compat_sys_clock_gettime - PTR compat_sys_clock_getres - PTR compat_sys_clock_nanosleep /* 4265 */ - PTR sys_tgkill - PTR compat_sys_utimes - PTR compat_sys_mbind - PTR compat_sys_get_mempolicy - PTR compat_sys_set_mempolicy /* 4270 */ - PTR compat_sys_mq_open - PTR sys_mq_unlink - PTR compat_sys_mq_timedsend - PTR compat_sys_mq_timedreceive - PTR compat_sys_mq_notify /* 4275 */ - PTR compat_sys_mq_getsetattr - PTR sys_ni_syscall /* sys_vserver */ - PTR compat_sys_waitid - PTR sys_ni_syscall /* available, was setaltroot */ - PTR sys_add_key /* 4280 */ - PTR sys_request_key - PTR compat_sys_keyctl - PTR sys_set_thread_area - PTR sys_inotify_init - PTR sys_inotify_add_watch /* 4285 */ - PTR sys_inotify_rm_watch - PTR compat_sys_migrate_pages - PTR compat_sys_openat - PTR sys_mkdirat - PTR sys_mknodat /* 4290 */ - PTR sys_fchownat - PTR compat_sys_futimesat - PTR sys_newfstatat - PTR sys_unlinkat - PTR sys_renameat /* 4295 */ - PTR sys_linkat - PTR sys_symlinkat - PTR sys_readlinkat - PTR sys_fchmodat - PTR sys_faccessat /* 4300 */ - PTR compat_sys_pselect6 - PTR compat_sys_ppoll - PTR sys_unshare - PTR sys_splice - PTR sys32_sync_file_range /* 4305 */ - PTR sys_tee - PTR compat_sys_vmsplice - PTR compat_sys_move_pages - PTR compat_sys_set_robust_list - PTR compat_sys_get_robust_list /* 4310 */ - PTR compat_sys_kexec_load - PTR sys_getcpu - PTR compat_sys_epoll_pwait - PTR sys_ioprio_set - PTR sys_ioprio_get /* 4315 */ - PTR compat_sys_utimensat - PTR compat_sys_signalfd - PTR sys_ni_syscall /* was timerfd */ - PTR sys_eventfd - PTR sys32_fallocate /* 4320 */ - PTR sys_timerfd_create - PTR compat_sys_timerfd_gettime - PTR compat_sys_timerfd_settime - PTR compat_sys_signalfd4 - PTR sys_eventfd2 /* 4325 */ - PTR sys_epoll_create1 - PTR sys_dup3 - PTR sys_pipe2 - PTR sys_inotify_init1 - PTR compat_sys_preadv /* 4330 */ - PTR compat_sys_pwritev - PTR compat_sys_rt_tgsigqueueinfo - PTR sys_perf_event_open - PTR sys_accept4 - PTR compat_sys_recvmmsg /* 4335 */ - PTR sys_fanotify_init - PTR compat_sys_fanotify_mark - PTR sys_prlimit64 - PTR sys_name_to_handle_at - PTR compat_sys_open_by_handle_at /* 4340 */ - PTR compat_sys_clock_adjtime - PTR sys_syncfs - PTR compat_sys_sendmmsg - PTR sys_setns - PTR compat_sys_process_vm_readv /* 4345 */ - PTR compat_sys_process_vm_writev - PTR sys_kcmp - PTR sys_finit_module - PTR sys_sched_setattr - PTR sys_sched_getattr /* 4350 */ - PTR sys_renameat2 - PTR sys_seccomp - PTR sys_getrandom - PTR sys_memfd_create - PTR sys_bpf /* 4355 */ - PTR compat_sys_execveat - PTR sys_userfaultfd - PTR sys_membarrier - PTR sys_mlock2 - PTR sys_copy_file_range /* 4360 */ - PTR compat_sys_preadv2 - PTR compat_sys_pwritev2 - PTR sys_pkey_mprotect - PTR sys_pkey_alloc - PTR sys_pkey_free /* 4365 */ - PTR sys_statx - PTR sys_rseq - PTR compat_sys_io_pgetevents - .size sys32_call_table,.-sys32_call_table +#include <asm/syscall_table_64_o32.h> +#undef __SYSCALL diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 109ed163a6a6..d3a23758592c 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c @@ -62,6 +62,8 @@ struct rt_sigframe { struct ucontext rs_uc; }; +#ifdef CONFIG_MIPS_FP_SUPPORT + /* * Thread saved context copy to/from a signal context presumed to be on the * user stack, and therefore accessed with appropriate macros from uaccess.h. @@ -104,6 +106,20 @@ static int copy_fp_from_sigcontext(void __user *sc) return err; } +#else /* !CONFIG_MIPS_FP_SUPPORT */ + +static int copy_fp_to_sigcontext(void __user *sc) +{ + return 0; +} + +static int copy_fp_from_sigcontext(void __user *sc) +{ + return 0; +} + +#endif /* !CONFIG_MIPS_FP_SUPPORT */ + /* * Wrappers for the assembly _{save,restore}_fp_context functions. */ @@ -142,6 +158,8 @@ static inline void __user *sc_to_extcontext(void __user *sc) return &uc->uc_extcontext; } +#ifdef CONFIG_CPU_HAS_MSA + static int save_msa_extcontext(void __user *buf) { struct msa_extcontext __user *msa = buf; @@ -195,9 +213,6 @@ static int restore_msa_extcontext(void __user *buf, unsigned int size) unsigned int csr; int i, err; - if (!IS_ENABLED(CONFIG_CPU_HAS_MSA)) - return SIGSYS; - if (size != sizeof(*msa)) return -EINVAL; @@ -234,6 +249,20 @@ static int restore_msa_extcontext(void __user *buf, unsigned int size) return err; } +#else /* !CONFIG_CPU_HAS_MSA */ + +static int save_msa_extcontext(void __user *buf) +{ + return 0; +} + +static int restore_msa_extcontext(void __user *buf, unsigned int size) +{ + return SIGSYS; +} + +#endif /* !CONFIG_CPU_HAS_MSA */ + static int save_extcontext(void __user *buf) { int sz; @@ -880,7 +909,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused, user_enter(); } -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) && defined(CONFIG_MIPS_FP_SUPPORT) static int smp_save_fp_context(void __user *sc) { return raw_cpu_has_fpu @@ -908,7 +937,7 @@ static int signal_setup(void) (offsetof(struct rt_sigframe, rs_uc.uc_extcontext) - offsetof(struct rt_sigframe, rs_uc.uc_mcontext))); -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) && defined(CONFIG_MIPS_FP_SUPPORT) /* For now just do the cpu_has_fpu check when the functions are invoked */ save_fp_context = smp_save_fp_context; restore_fp_context = smp_restore_fp_context; diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index 69c17b549fd3..41a0db08cd37 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c @@ -106,6 +106,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new) if (cpu_has_llsc && R10000_LLSC_WAR) { __asm__ __volatile__ ( + " .set push \n" " .set arch=r4000 \n" " li %[err], 0 \n" "1: ll %[old], (%[addr]) \n" @@ -122,7 +123,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new) " "STR(PTR)" 1b, 4b \n" " "STR(PTR)" 2b, 4b \n" " .previous \n" - " .set mips0 \n" + " .set pop \n" : [old] "=&r" (old), [err] "=&r" (err), [tmp] "=&r" (tmp) @@ -132,6 +133,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new) : "memory"); } else if (cpu_has_llsc) { __asm__ __volatile__ ( + " .set push \n" " .set "MIPS_ISA_ARCH_LEVEL" \n" " li %[err], 0 \n" "1: \n" @@ -150,7 +152,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new) " "STR(PTR)" 1b, 5b \n" " "STR(PTR)" 2b, 5b \n" " .previous \n" - " .set mips0 \n" + " .set pop \n" : [old] "=&r" (old), [err] "=&r" (err), [tmp] "=&r" (tmp) diff --git a/arch/mips/kernel/syscalls/Makefile b/arch/mips/kernel/syscalls/Makefile new file mode 100644 index 000000000000..a3d4bec695c6 --- /dev/null +++ b/arch/mips/kernel/syscalls/Makefile @@ -0,0 +1,96 @@ +# SPDX-License-Identifier: GPL-2.0 +kapi := arch/$(SRCARCH)/include/generated/asm +uapi := arch/$(SRCARCH)/include/generated/uapi/asm + +_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \ + $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)') + +syscalln32 := $(srctree)/$(src)/syscall_n32.tbl +syscalln64 := $(srctree)/$(src)/syscall_n64.tbl +syscallo32 := $(srctree)/$(src)/syscall_o32.tbl +syshdr := $(srctree)/$(src)/syscallhdr.sh +sysnr := $(srctree)/$(src)/syscallnr.sh +systbl := $(srctree)/$(src)/syscalltbl.sh + +quiet_cmd_syshdr = SYSHDR $@ + cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \ + '$(syshdr_abis_$(basetarget))' \ + '$(syshdr_pfx_$(basetarget))' \ + '$(syshdr_offset_$(basetarget))' + +quiet_cmd_sysnr = SYSNR $@ + cmd_sysnr = $(CONFIG_SHELL) '$(sysnr)' '$<' '$@' \ + '$(sysnr_abis_$(basetarget))' \ + '$(sysnr_pfx_$(basetarget))' \ + '$(sysnr_offset_$(basetarget))' + +quiet_cmd_systbl = SYSTBL $@ + cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \ + '$(systbl_abis_$(basetarget))' \ + '$(systbl_abi_$(basetarget))' \ + '$(systbl_offset_$(basetarget))' + +syshdr_offset_unistd_n32 := __NR_Linux +$(uapi)/unistd_n32.h: $(syscalln32) $(syshdr) + $(call if_changed,syshdr) + +syshdr_offset_unistd_n64 := __NR_Linux +$(uapi)/unistd_n64.h: $(syscalln64) $(syshdr) + $(call if_changed,syshdr) + +syshdr_offset_unistd_o32 := __NR_Linux +$(uapi)/unistd_o32.h: $(syscallo32) $(syshdr) + $(call if_changed,syshdr) + +sysnr_pfx_unistd_nr_n32 := N32 +sysnr_offset_unistd_nr_n32 := 6000 +$(uapi)/unistd_nr_n32.h: $(syscalln32) $(sysnr) + $(call if_changed,sysnr) + +sysnr_pfx_unistd_nr_n64 := 64 +sysnr_offset_unistd_nr_n64 := 5000 +$(uapi)/unistd_nr_n64.h: $(syscalln64) $(sysnr) + $(call if_changed,sysnr) + +sysnr_pfx_unistd_nr_o32 := O32 +sysnr_offset_unistd_nr_o32 := 4000 +$(uapi)/unistd_nr_o32.h: $(syscallo32) $(sysnr) + $(call if_changed,sysnr) + +systbl_abi_syscall_table_32_o32 := 32_o32 +systbl_offset_syscall_table_32_o32 := 4000 +$(kapi)/syscall_table_32_o32.h: $(syscallo32) $(systbl) + $(call if_changed,systbl) + +systbl_abi_syscall_table_64_n32 := 64_n32 +systbl_offset_syscall_table_64_n32 := 6000 +$(kapi)/syscall_table_64_n32.h: $(syscalln32) $(systbl) + $(call if_changed,systbl) + +systbl_abi_syscall_table_64_n64 := 64_n64 +systbl_offset_syscall_table_64_n64 := 5000 +$(kapi)/syscall_table_64_n64.h: $(syscalln64) $(systbl) + $(call if_changed,systbl) + +systbl_abi_syscall_table_64_o32 := 64_o32 +systbl_offset_syscall_table_64_o32 := 4000 +$(kapi)/syscall_table_64_o32.h: $(syscallo32) $(systbl) + $(call if_changed,systbl) + +uapisyshdr-y += unistd_n32.h \ + unistd_n64.h \ + unistd_o32.h \ + unistd_nr_n32.h \ + unistd_nr_n64.h \ + unistd_nr_o32.h +kapisyshdr-y += syscall_table_32_o32.h \ + syscall_table_64_n32.h \ + syscall_table_64_n64.h \ + syscall_table_64_o32.h + +targets += $(uapisyshdr-y) $(kapisyshdr-y) + +PHONY += all +all: $(addprefix $(uapi)/,$(uapisyshdr-y)) +all: $(addprefix $(kapi)/,$(kapisyshdr-y)) + @: diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl new file mode 100644 index 000000000000..53d5862649ae --- /dev/null +++ b/arch/mips/kernel/syscalls/syscall_n32.tbl @@ -0,0 +1,343 @@ +# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note +# +# system call numbers and entry vectors for mips +# +# The format is: +# <number> <abi> <name> <entry point> <compat entry point> +# +# The <abi> is always "n32" for this file. +# +0 n32 read sys_read +1 n32 write sys_write +2 n32 open sys_open +3 n32 close sys_close +4 n32 stat sys_newstat +5 n32 fstat sys_newfstat +6 n32 lstat sys_newlstat +7 n32 poll sys_poll +8 n32 lseek sys_lseek +9 n32 mmap sys_mips_mmap +10 n32 mprotect sys_mprotect +11 n32 munmap sys_munmap +12 n32 brk sys_brk +13 n32 rt_sigaction compat_sys_rt_sigaction +14 n32 rt_sigprocmask compat_sys_rt_sigprocmask +15 n32 ioctl compat_sys_ioctl +16 n32 pread64 sys_pread64 +17 n32 pwrite64 sys_pwrite64 +18 n32 readv compat_sys_readv +19 n32 writev compat_sys_writev +20 n32 access sys_access +21 n32 pipe sysm_pipe +22 n32 _newselect compat_sys_select +23 n32 sched_yield sys_sched_yield +24 n32 mremap sys_mremap +25 n32 msync sys_msync +26 n32 mincore sys_mincore +27 n32 madvise sys_madvise +28 n32 shmget sys_shmget +29 n32 shmat sys_shmat +30 n32 shmctl compat_sys_shmctl +31 n32 dup sys_dup +32 n32 dup2 sys_dup2 +33 n32 pause sys_pause +34 n32 nanosleep compat_sys_nanosleep +35 n32 getitimer compat_sys_getitimer +36 n32 setitimer compat_sys_setitimer +37 n32 alarm sys_alarm +38 n32 getpid sys_getpid +39 n32 sendfile compat_sys_sendfile +40 n32 socket sys_socket +41 n32 connect sys_connect +42 n32 accept sys_accept +43 n32 sendto sys_sendto +44 n32 recvfrom compat_sys_recvfrom +45 n32 sendmsg compat_sys_sendmsg +46 n32 recvmsg compat_sys_recvmsg +47 n32 shutdown sys_shutdown +48 n32 bind sys_bind +49 n32 listen sys_listen +50 n32 getsockname sys_getsockname +51 n32 getpeername sys_getpeername +52 n32 socketpair sys_socketpair +53 n32 setsockopt compat_sys_setsockopt +54 n32 getsockopt compat_sys_getsockopt +55 n32 clone __sys_clone +56 n32 fork __sys_fork +57 n32 execve compat_sys_execve +58 n32 exit sys_exit +59 n32 wait4 compat_sys_wait4 +60 n32 kill sys_kill +61 n32 uname sys_newuname +62 n32 semget sys_semget +63 n32 semop sys_semop +64 n32 semctl compat_sys_semctl +65 n32 shmdt sys_shmdt +66 n32 msgget sys_msgget +67 n32 msgsnd compat_sys_msgsnd +68 n32 msgrcv compat_sys_msgrcv +69 n32 msgctl compat_sys_msgctl +70 n32 fcntl compat_sys_fcntl +71 n32 flock sys_flock +72 n32 fsync sys_fsync +73 n32 fdatasync sys_fdatasync +74 n32 truncate sys_truncate +75 n32 ftruncate sys_ftruncate +76 n32 getdents compat_sys_getdents +77 n32 getcwd sys_getcwd +78 n32 chdir sys_chdir +79 n32 fchdir sys_fchdir +80 n32 rename sys_rename +81 n32 mkdir sys_mkdir +82 n32 rmdir sys_rmdir +83 n32 creat sys_creat +84 n32 link sys_link +85 n32 unlink sys_unlink +86 n32 symlink sys_symlink +87 n32 readlink sys_readlink +88 n32 chmod sys_chmod +89 n32 fchmod sys_fchmod +90 n32 chown sys_chown +91 n32 fchown sys_fchown +92 n32 lchown sys_lchown +93 n32 umask sys_umask +94 n32 gettimeofday compat_sys_gettimeofday +95 n32 getrlimit compat_sys_getrlimit +96 n32 getrusage compat_sys_getrusage +97 n32 sysinfo compat_sys_sysinfo +98 n32 times compat_sys_times +99 n32 ptrace compat_sys_ptrace +100 n32 getuid sys_getuid +101 n32 syslog sys_syslog +102 n32 getgid sys_getgid +103 n32 setuid sys_setuid +104 n32 setgid sys_setgid +105 n32 geteuid sys_geteuid +106 n32 getegid sys_getegid +107 n32 setpgid sys_setpgid +108 n32 getppid sys_getppid +109 n32 getpgrp sys_getpgrp +110 n32 setsid sys_setsid +111 n32 setreuid sys_setreuid +112 n32 setregid sys_setregid +113 n32 getgroups sys_getgroups +114 n32 setgroups sys_setgroups +115 n32 setresuid sys_setresuid +116 n32 getresuid sys_getresuid +117 n32 setresgid sys_setresgid +118 n32 getresgid sys_getresgid +119 n32 getpgid sys_getpgid +120 n32 setfsuid sys_setfsuid +121 n32 setfsgid sys_setfsgid +122 n32 getsid sys_getsid +123 n32 capget sys_capget +124 n32 capset sys_capset +125 n32 rt_sigpending compat_sys_rt_sigpending +126 n32 rt_sigtimedwait compat_sys_rt_sigtimedwait +127 n32 rt_sigqueueinfo compat_sys_rt_sigqueueinfo +128 n32 rt_sigsuspend compat_sys_rt_sigsuspend +129 n32 sigaltstack compat_sys_sigaltstack +130 n32 utime compat_sys_utime +131 n32 mknod sys_mknod +132 n32 personality sys_32_personality +133 n32 ustat compat_sys_ustat +134 n32 statfs compat_sys_statfs +135 n32 fstatfs compat_sys_fstatfs +136 n32 sysfs sys_sysfs +137 n32 getpriority sys_getpriority +138 n32 setpriority sys_setpriority +139 n32 sched_setparam sys_sched_setparam +140 n32 sched_getparam sys_sched_getparam +141 n32 sched_setscheduler sys_sched_setscheduler +142 n32 sched_getscheduler sys_sched_getscheduler +143 n32 sched_get_priority_max sys_sched_get_priority_max +144 n32 sched_get_priority_min sys_sched_get_priority_min +145 n32 sched_rr_get_interval compat_sys_sched_rr_get_interval +146 n32 mlock sys_mlock +147 n32 munlock sys_munlock +148 n32 mlockall sys_mlockall +149 n32 munlockall sys_munlockall +150 n32 vhangup sys_vhangup +151 n32 pivot_root sys_pivot_root +152 n32 _sysctl compat_sys_sysctl +153 n32 prctl sys_prctl +154 n32 adjtimex compat_sys_adjtimex +155 n32 setrlimit compat_sys_setrlimit +156 n32 chroot sys_chroot +157 n32 sync sys_sync +158 n32 acct sys_acct +159 n32 settimeofday compat_sys_settimeofday +160 n32 mount compat_sys_mount +161 n32 umount2 sys_umount +162 n32 swapon sys_swapon +163 n32 swapoff sys_swapoff +164 n32 reboot sys_reboot +165 n32 sethostname sys_sethostname +166 n32 setdomainname sys_setdomainname +167 n32 create_module sys_ni_syscall +168 n32 init_module sys_init_module +169 n32 delete_module sys_delete_module +170 n32 get_kernel_syms sys_ni_syscall +171 n32 query_module sys_ni_syscall +172 n32 quotactl sys_quotactl +173 n32 nfsservctl sys_ni_syscall +174 n32 getpmsg sys_ni_syscall +175 n32 putpmsg sys_ni_syscall +176 n32 afs_syscall sys_ni_syscall +# 177 reserved for security +177 n32 reserved177 sys_ni_syscall +178 n32 gettid sys_gettid +179 n32 readahead sys_readahead +180 n32 setxattr sys_setxattr +181 n32 lsetxattr sys_lsetxattr +182 n32 fsetxattr sys_fsetxattr +183 n32 getxattr sys_getxattr +184 n32 lgetxattr sys_lgetxattr +185 n32 fgetxattr sys_fgetxattr +186 n32 listxattr sys_listxattr +187 n32 llistxattr sys_llistxattr +188 n32 flistxattr sys_flistxattr +189 n32 removexattr sys_removexattr +190 n32 lremovexattr sys_lremovexattr +191 n32 fremovexattr sys_fremovexattr +192 n32 tkill sys_tkill +193 n32 reserved193 sys_ni_syscall +194 n32 futex compat_sys_futex +195 n32 sched_setaffinity compat_sys_sched_setaffinity +196 n32 sched_getaffinity compat_sys_sched_getaffinity +197 n32 cacheflush sys_cacheflush +198 n32 cachectl sys_cachectl +199 n32 sysmips __sys_sysmips +200 n32 io_setup compat_sys_io_setup +201 n32 io_destroy sys_io_destroy +202 n32 io_getevents compat_sys_io_getevents +203 n32 io_submit compat_sys_io_submit +204 n32 io_cancel sys_io_cancel +205 n32 exit_group sys_exit_group +206 n32 lookup_dcookie sys_lookup_dcookie +207 n32 epoll_create sys_epoll_create +208 n32 epoll_ctl sys_epoll_ctl +209 n32 epoll_wait sys_epoll_wait +210 n32 remap_file_pages sys_remap_file_pages +211 n32 rt_sigreturn sysn32_rt_sigreturn +212 n32 fcntl64 compat_sys_fcntl64 +213 n32 set_tid_address sys_set_tid_address +214 n32 restart_syscall sys_restart_syscall +215 n32 semtimedop compat_sys_semtimedop +216 n32 fadvise64 sys_fadvise64_64 +217 n32 statfs64 compat_sys_statfs64 +218 n32 fstatfs64 compat_sys_fstatfs64 +219 n32 sendfile64 sys_sendfile64 +220 n32 timer_create compat_sys_timer_create +221 n32 timer_settime compat_sys_timer_settime +222 n32 timer_gettime compat_sys_timer_gettime +223 n32 timer_getoverrun sys_timer_getoverrun +224 n32 timer_delete sys_timer_delete +225 n32 clock_settime compat_sys_clock_settime +226 n32 clock_gettime compat_sys_clock_gettime +227 n32 clock_getres compat_sys_clock_getres +228 n32 clock_nanosleep compat_sys_clock_nanosleep +229 n32 tgkill sys_tgkill +230 n32 utimes compat_sys_utimes +231 n32 mbind compat_sys_mbind +232 n32 get_mempolicy compat_sys_get_mempolicy +233 n32 set_mempolicy compat_sys_set_mempolicy +234 n32 mq_open compat_sys_mq_open +235 n32 mq_unlink sys_mq_unlink +236 n32 mq_timedsend compat_sys_mq_timedsend +237 n32 mq_timedreceive compat_sys_mq_timedreceive +238 n32 mq_notify compat_sys_mq_notify +239 n32 mq_getsetattr compat_sys_mq_getsetattr +240 n32 vserver sys_ni_syscall +241 n32 waitid compat_sys_waitid +# 242 was sys_setaltroot +243 n32 add_key sys_add_key +244 n32 request_key sys_request_key +245 n32 keyctl compat_sys_keyctl +246 n32 set_thread_area sys_set_thread_area +247 n32 inotify_init sys_inotify_init +248 n32 inotify_add_watch sys_inotify_add_watch +249 n32 inotify_rm_watch sys_inotify_rm_watch +250 n32 migrate_pages compat_sys_migrate_pages +251 n32 openat sys_openat +252 n32 mkdirat sys_mkdirat +253 n32 mknodat sys_mknodat +254 n32 fchownat sys_fchownat +255 n32 futimesat compat_sys_futimesat +256 n32 newfstatat sys_newfstatat +257 n32 unlinkat sys_unlinkat +258 n32 renameat sys_renameat +259 n32 linkat sys_linkat +260 n32 symlinkat sys_symlinkat +261 n32 readlinkat sys_readlinkat +262 n32 fchmodat sys_fchmodat +263 n32 faccessat sys_faccessat +264 n32 pselect6 compat_sys_pselect6 +265 n32 ppoll compat_sys_ppoll +266 n32 unshare sys_unshare +267 n32 splice sys_splice +268 n32 sync_file_range sys_sync_file_range +269 n32 tee sys_tee +270 n32 vmsplice compat_sys_vmsplice +271 n32 move_pages compat_sys_move_pages +272 n32 set_robust_list compat_sys_set_robust_list +273 n32 get_robust_list compat_sys_get_robust_list +274 n32 kexec_load compat_sys_kexec_load +275 n32 getcpu sys_getcpu +276 n32 epoll_pwait compat_sys_epoll_pwait +277 n32 ioprio_set sys_ioprio_set +278 n32 ioprio_get sys_ioprio_get +279 n32 utimensat compat_sys_utimensat +280 n32 signalfd compat_sys_signalfd +281 n32 timerfd sys_ni_syscall +282 n32 eventfd sys_eventfd +283 n32 fallocate sys_fallocate +284 n32 timerfd_create sys_timerfd_create +285 n32 timerfd_gettime compat_sys_timerfd_gettime +286 n32 timerfd_settime compat_sys_timerfd_settime +287 n32 signalfd4 compat_sys_signalfd4 +288 n32 eventfd2 sys_eventfd2 +289 n32 epoll_create1 sys_epoll_create1 +290 n32 dup3 sys_dup3 +291 n32 pipe2 sys_pipe2 +292 n32 inotify_init1 sys_inotify_init1 +293 n32 preadv compat_sys_preadv +294 n32 pwritev compat_sys_pwritev +295 n32 rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo +296 n32 perf_event_open sys_perf_event_open +297 n32 accept4 sys_accept4 +298 n32 recvmmsg compat_sys_recvmmsg +299 n32 getdents64 sys_getdents64 +300 n32 fanotify_init sys_fanotify_init +301 n32 fanotify_mark sys_fanotify_mark +302 n32 prlimit64 sys_prlimit64 +303 n32 name_to_handle_at sys_name_to_handle_at +304 n32 open_by_handle_at sys_open_by_handle_at +305 n32 clock_adjtime compat_sys_clock_adjtime +306 n32 syncfs sys_syncfs +307 n32 sendmmsg compat_sys_sendmmsg +308 n32 setns sys_setns +309 n32 process_vm_readv compat_sys_process_vm_readv +310 n32 process_vm_writev compat_sys_process_vm_writev +311 n32 kcmp sys_kcmp +312 n32 finit_module sys_finit_module +313 n32 sched_setattr sys_sched_setattr +314 n32 sched_getattr sys_sched_getattr +315 n32 renameat2 sys_renameat2 +316 n32 seccomp sys_seccomp +317 n32 getrandom sys_getrandom +318 n32 memfd_create sys_memfd_create +319 n32 bpf sys_bpf +320 n32 execveat compat_sys_execveat +321 n32 userfaultfd sys_userfaultfd +322 n32 membarrier sys_membarrier +323 n32 mlock2 sys_mlock2 +324 n32 copy_file_range sys_copy_file_range +325 n32 preadv2 compat_sys_preadv2 +326 n32 pwritev2 compat_sys_pwritev2 +327 n32 pkey_mprotect sys_pkey_mprotect +328 n32 pkey_alloc sys_pkey_alloc +329 n32 pkey_free sys_pkey_free +330 n32 statx sys_statx +331 n32 rseq sys_rseq +332 n32 io_pgetevents compat_sys_io_pgetevents diff --git a/arch/mips/kernel/syscalls/syscall_n64.tbl b/arch/mips/kernel/syscalls/syscall_n64.tbl new file mode 100644 index 000000000000..a8286ccbb66c --- /dev/null +++ b/arch/mips/kernel/syscalls/syscall_n64.tbl @@ -0,0 +1,339 @@ +# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note +# +# system call numbers and entry vectors for mips +# +# The format is: +# <number> <abi> <name> <entry point> +# +# The <abi> is always "n64" for this file. +# +0 n64 read sys_read +1 n64 write sys_write +2 n64 open sys_open +3 n64 close sys_close +4 n64 stat sys_newstat +5 n64 fstat sys_newfstat +6 n64 lstat sys_newlstat +7 n64 poll sys_poll +8 n64 lseek sys_lseek +9 n64 mmap sys_mips_mmap +10 n64 mprotect sys_mprotect +11 n64 munmap sys_munmap +12 n64 brk sys_brk +13 n64 rt_sigaction sys_rt_sigaction +14 n64 rt_sigprocmask sys_rt_sigprocmask +15 n64 ioctl sys_ioctl +16 n64 pread64 sys_pread64 +17 n64 pwrite64 sys_pwrite64 +18 n64 readv sys_readv +19 n64 writev sys_writev +20 n64 access sys_access +21 n64 pipe sysm_pipe +22 n64 _newselect sys_select +23 n64 sched_yield sys_sched_yield +24 n64 mremap sys_mremap +25 n64 msync sys_msync +26 n64 mincore sys_mincore +27 n64 madvise sys_madvise +28 n64 shmget sys_shmget +29 n64 shmat sys_shmat +30 n64 shmctl sys_shmctl +31 n64 dup sys_dup +32 n64 dup2 sys_dup2 +33 n64 pause sys_pause +34 n64 nanosleep sys_nanosleep +35 n64 getitimer sys_getitimer +36 n64 setitimer sys_setitimer +37 n64 alarm sys_alarm +38 n64 getpid sys_getpid +39 n64 sendfile sys_sendfile64 +40 n64 socket sys_socket +41 n64 connect sys_connect +42 n64 accept sys_accept +43 n64 sendto sys_sendto +44 n64 recvfrom sys_recvfrom +45 n64 sendmsg sys_sendmsg +46 n64 recvmsg sys_recvmsg +47 n64 shutdown sys_shutdown +48 n64 bind sys_bind +49 n64 listen sys_listen +50 n64 getsockname sys_getsockname +51 n64 getpeername sys_getpeername +52 n64 socketpair sys_socketpair +53 n64 setsockopt sys_setsockopt +54 n64 getsockopt sys_getsockopt +55 n64 clone __sys_clone +56 n64 fork __sys_fork +57 n64 execve sys_execve +58 n64 exit sys_exit +59 n64 wait4 sys_wait4 +60 n64 kill sys_kill +61 n64 uname sys_newuname +62 n64 semget sys_semget +63 n64 semop sys_semop +64 n64 semctl sys_semctl +65 n64 shmdt sys_shmdt +66 n64 msgget sys_msgget +67 n64 msgsnd sys_msgsnd +68 n64 msgrcv sys_msgrcv +69 n64 msgctl sys_msgctl +70 n64 fcntl sys_fcntl +71 n64 flock sys_flock +72 n64 fsync sys_fsync +73 n64 fdatasync sys_fdatasync +74 n64 truncate sys_truncate +75 n64 ftruncate sys_ftruncate +76 n64 getdents sys_getdents +77 n64 getcwd sys_getcwd +78 n64 chdir sys_chdir +79 n64 fchdir sys_fchdir +80 n64 rename sys_rename +81 n64 mkdir sys_mkdir +82 n64 rmdir sys_rmdir +83 n64 creat sys_creat +84 n64 link sys_link +85 n64 unlink sys_unlink +86 n64 symlink sys_symlink +87 n64 readlink sys_readlink +88 n64 chmod sys_chmod +89 n64 fchmod sys_fchmod +90 n64 chown sys_chown +91 n64 fchown sys_fchown +92 n64 lchown sys_lchown +93 n64 umask sys_umask +94 n64 gettimeofday sys_gettimeofday +95 n64 getrlimit sys_getrlimit +96 n64 getrusage sys_getrusage +97 n64 sysinfo sys_sysinfo +98 n64 times sys_times +99 n64 ptrace sys_ptrace +100 n64 getuid sys_getuid +101 n64 syslog sys_syslog +102 n64 getgid sys_getgid +103 n64 setuid sys_setuid +104 n64 setgid sys_setgid +105 n64 geteuid sys_geteuid +106 n64 getegid sys_getegid +107 n64 setpgid sys_setpgid +108 n64 getppid sys_getppid +109 n64 getpgrp sys_getpgrp +110 n64 setsid sys_setsid +111 n64 setreuid sys_setreuid +112 n64 setregid sys_setregid +113 n64 getgroups sys_getgroups +114 n64 setgroups sys_setgroups +115 n64 setresuid sys_setresuid +116 n64 getresuid sys_getresuid +117 n64 setresgid sys_setresgid +118 n64 getresgid sys_getresgid +119 n64 getpgid sys_getpgid +120 n64 setfsuid sys_setfsuid +121 n64 setfsgid sys_setfsgid +122 n64 getsid sys_getsid +123 n64 capget sys_capget +124 n64 capset sys_capset +125 n64 rt_sigpending sys_rt_sigpending +126 n64 rt_sigtimedwait sys_rt_sigtimedwait +127 n64 rt_sigqueueinfo sys_rt_sigqueueinfo +128 n64 rt_sigsuspend sys_rt_sigsuspend +129 n64 sigaltstack sys_sigaltstack +130 n64 utime sys_utime +131 n64 mknod sys_mknod +132 n64 personality sys_personality +133 n64 ustat sys_ustat +134 n64 statfs sys_statfs +135 n64 fstatfs sys_fstatfs +136 n64 sysfs sys_sysfs +137 n64 getpriority sys_getpriority +138 n64 setpriority sys_setpriority +139 n64 sched_setparam sys_sched_setparam +140 n64 sched_getparam sys_sched_getparam +141 n64 sched_setscheduler sys_sched_setscheduler +142 n64 sched_getscheduler sys_sched_getscheduler +143 n64 sched_get_priority_max sys_sched_get_priority_max +144 n64 sched_get_priority_min sys_sched_get_priority_min +145 n64 sched_rr_get_interval sys_sched_rr_get_interval +146 n64 mlock sys_mlock +147 n64 munlock sys_munlock +148 n64 mlockall sys_mlockall +149 n64 munlockall sys_munlockall +150 n64 vhangup sys_vhangup +151 n64 pivot_root sys_pivot_root +152 n64 _sysctl sys_sysctl +153 n64 prctl sys_prctl +154 n64 adjtimex sys_adjtimex +155 n64 setrlimit sys_setrlimit +156 n64 chroot sys_chroot +157 n64 sync sys_sync +158 n64 acct sys_acct +159 n64 settimeofday sys_settimeofday +160 n64 mount sys_mount +161 n64 umount2 sys_umount +162 n64 swapon sys_swapon +163 n64 swapoff sys_swapoff +164 n64 reboot sys_reboot +165 n64 sethostname sys_sethostname +166 n64 setdomainname sys_setdomainname +167 n64 create_module sys_ni_syscall +168 n64 init_module sys_init_module +169 n64 delete_module sys_delete_module +170 n64 get_kernel_syms sys_ni_syscall +171 n64 query_module sys_ni_syscall +172 n64 quotactl sys_quotactl +173 n64 nfsservctl sys_ni_syscall +174 n64 getpmsg sys_ni_syscall +175 n64 putpmsg sys_ni_syscall +176 n64 afs_syscall sys_ni_syscall +# 177 reserved for security +177 n64 reserved177 sys_ni_syscall +178 n64 gettid sys_gettid +179 n64 readahead sys_readahead +180 n64 setxattr sys_setxattr +181 n64 lsetxattr sys_lsetxattr +182 n64 fsetxattr sys_fsetxattr +183 n64 getxattr sys_getxattr +184 n64 lgetxattr sys_lgetxattr +185 n64 fgetxattr sys_fgetxattr +186 n64 listxattr sys_listxattr +187 n64 llistxattr sys_llistxattr +188 n64 flistxattr sys_flistxattr +189 n64 removexattr sys_removexattr +190 n64 lremovexattr sys_lremovexattr +191 n64 fremovexattr sys_fremovexattr +192 n64 tkill sys_tkill +193 n64 reserved193 sys_ni_syscall +194 n64 futex sys_futex +195 n64 sched_setaffinity sys_sched_setaffinity +196 n64 sched_getaffinity sys_sched_getaffinity +197 n64 cacheflush sys_cacheflush +198 n64 cachectl sys_cachectl +199 n64 sysmips __sys_sysmips +200 n64 io_setup sys_io_setup +201 n64 io_destroy sys_io_destroy +202 n64 io_getevents sys_io_getevents +203 n64 io_submit sys_io_submit +204 n64 io_cancel sys_io_cancel +205 n64 exit_group sys_exit_group +206 n64 lookup_dcookie sys_lookup_dcookie +207 n64 epoll_create sys_epoll_create +208 n64 epoll_ctl sys_epoll_ctl +209 n64 epoll_wait sys_epoll_wait +210 n64 remap_file_pages sys_remap_file_pages +211 n64 rt_sigreturn sys_rt_sigreturn +212 n64 set_tid_address sys_set_tid_address +213 n64 restart_syscall sys_restart_syscall +214 n64 semtimedop sys_semtimedop +215 n64 fadvise64 sys_fadvise64_64 +216 n64 timer_create sys_timer_create +217 n64 timer_settime sys_timer_settime +218 n64 timer_gettime sys_timer_gettime +219 n64 timer_getoverrun sys_timer_getoverrun +220 n64 timer_delete sys_timer_delete +221 n64 clock_settime sys_clock_settime +222 n64 clock_gettime sys_clock_gettime +223 n64 clock_getres sys_clock_getres +224 n64 clock_nanosleep sys_clock_nanosleep +225 n64 tgkill sys_tgkill +226 n64 utimes sys_utimes +227 n64 mbind sys_mbind +228 n64 get_mempolicy sys_get_mempolicy +229 n64 set_mempolicy sys_set_mempolicy +230 n64 mq_open sys_mq_open +231 n64 mq_unlink sys_mq_unlink +232 n64 mq_timedsend sys_mq_timedsend +233 n64 mq_timedreceive sys_mq_timedreceive +234 n64 mq_notify sys_mq_notify +235 n64 mq_getsetattr sys_mq_getsetattr +236 n64 vserver sys_ni_syscall +237 n64 waitid sys_waitid +# 238 was sys_setaltroot +239 n64 add_key sys_add_key +240 n64 request_key sys_request_key +241 n64 keyctl sys_keyctl +242 n64 set_thread_area sys_set_thread_area +243 n64 inotify_init sys_inotify_init +244 n64 inotify_add_watch sys_inotify_add_watch +245 n64 inotify_rm_watch sys_inotify_rm_watch +246 n64 migrate_pages sys_migrate_pages +247 n64 openat sys_openat +248 n64 mkdirat sys_mkdirat +249 n64 mknodat sys_mknodat +250 n64 fchownat sys_fchownat +251 n64 futimesat sys_futimesat +252 n64 newfstatat sys_newfstatat +253 n64 unlinkat sys_unlinkat +254 n64 renameat sys_renameat +255 n64 linkat sys_linkat +256 n64 symlinkat sys_symlinkat +257 n64 readlinkat sys_readlinkat +258 n64 fchmodat sys_fchmodat +259 n64 faccessat sys_faccessat +260 n64 pselect6 sys_pselect6 +261 n64 ppoll sys_ppoll +262 n64 unshare sys_unshare +263 n64 splice sys_splice +264 n64 sync_file_range sys_sync_file_range +265 n64 tee sys_tee +266 n64 vmsplice sys_vmsplice +267 n64 move_pages sys_move_pages +268 n64 set_robust_list sys_set_robust_list +269 n64 get_robust_list sys_get_robust_list +270 n64 kexec_load sys_kexec_load +271 n64 getcpu sys_getcpu +272 n64 epoll_pwait sys_epoll_pwait +273 n64 ioprio_set sys_ioprio_set +274 n64 ioprio_get sys_ioprio_get +275 n64 utimensat sys_utimensat +276 n64 signalfd sys_signalfd +277 n64 timerfd sys_ni_syscall +278 n64 eventfd sys_eventfd +279 n64 fallocate sys_fallocate +280 n64 timerfd_create sys_timerfd_create +281 n64 timerfd_gettime sys_timerfd_gettime +282 n64 timerfd_settime sys_timerfd_settime +283 n64 signalfd4 sys_signalfd4 +284 n64 eventfd2 sys_eventfd2 +285 n64 epoll_create1 sys_epoll_create1 +286 n64 dup3 sys_dup3 +287 n64 pipe2 sys_pipe2 +288 n64 inotify_init1 sys_inotify_init1 +289 n64 preadv sys_preadv +290 n64 pwritev sys_pwritev +291 n64 rt_tgsigqueueinfo sys_rt_tgsigqueueinfo +292 n64 perf_event_open sys_perf_event_open +293 n64 accept4 sys_accept4 +294 n64 recvmmsg sys_recvmmsg +295 n64 fanotify_init sys_fanotify_init +296 n64 fanotify_mark sys_fanotify_mark +297 n64 prlimit64 sys_prlimit64 +298 n64 name_to_handle_at sys_name_to_handle_at +299 n64 open_by_handle_at sys_open_by_handle_at +300 n64 clock_adjtime sys_clock_adjtime +301 n64 syncfs sys_syncfs +302 n64 sendmmsg sys_sendmmsg +303 n64 setns sys_setns +304 n64 process_vm_readv sys_process_vm_readv +305 n64 process_vm_writev sys_process_vm_writev +306 n64 kcmp sys_kcmp +307 n64 finit_module sys_finit_module +308 n64 getdents64 sys_getdents64 +309 n64 sched_setattr sys_sched_setattr +310 n64 sched_getattr sys_sched_getattr +311 n64 renameat2 sys_renameat2 +312 n64 seccomp sys_seccomp +313 n64 getrandom sys_getrandom +314 n64 memfd_create sys_memfd_create +315 n64 bpf sys_bpf +316 n64 execveat sys_execveat +317 n64 userfaultfd sys_userfaultfd +318 n64 membarrier sys_membarrier +319 n64 mlock2 sys_mlock2 +320 n64 copy_file_range sys_copy_file_range +321 n64 preadv2 sys_preadv2 +322 n64 pwritev2 sys_pwritev2 +323 n64 pkey_mprotect sys_pkey_mprotect +324 n64 pkey_alloc sys_pkey_alloc +325 n64 pkey_free sys_pkey_free +326 n64 statx sys_statx +327 n64 rseq sys_rseq +328 n64 io_pgetevents sys_io_pgetevents diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl new file mode 100644 index 000000000000..3d5a47b80d2b --- /dev/null +++ b/arch/mips/kernel/syscalls/syscall_o32.tbl @@ -0,0 +1,382 @@ +# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note +# +# system call numbers and entry vectors for mips +# +# The format is: +# <number> <abi> <name> <entry point> <compat entry point> +# +# The <abi> is always "o32" for this file. +# +0 o32 syscall sys_syscall sys32_syscall +1 o32 exit sys_exit +2 o32 fork __sys_fork +3 o32 read sys_read +4 o32 write sys_write +5 o32 open sys_open compat_sys_open +6 o32 close sys_close +7 o32 waitpid sys_waitpid +8 o32 creat sys_creat +9 o32 link sys_link +10 o32 unlink sys_unlink +11 o32 execve sys_execve compat_sys_execve +12 o32 chdir sys_chdir +13 o32 time sys_time compat_sys_time +14 o32 mknod sys_mknod +15 o32 chmod sys_chmod +16 o32 lchown sys_lchown +17 o32 break sys_ni_syscall +# 18 was sys_stat +18 o32 unused18 sys_ni_syscall +19 o32 lseek sys_lseek +20 o32 getpid sys_getpid +21 o32 mount sys_mount compat_sys_mount +22 o32 umount sys_oldumount +23 o32 setuid sys_setuid +24 o32 getuid sys_getuid +25 o32 stime sys_stime compat_sys_stime +26 o32 ptrace sys_ptrace compat_sys_ptrace +27 o32 alarm sys_alarm +# 28 was sys_fstat +28 o32 unused28 sys_ni_syscall +29 o32 pause sys_pause +30 o32 utime sys_utime compat_sys_utime +31 o32 stty sys_ni_syscall +32 o32 gtty sys_ni_syscall +33 o32 access sys_access +34 o32 nice sys_nice +35 o32 ftime sys_ni_syscall +36 o32 sync sys_sync +37 o32 kill sys_kill +38 o32 rename sys_rename +39 o32 mkdir sys_mkdir +40 o32 rmdir sys_rmdir +41 o32 dup sys_dup +42 o32 pipe sysm_pipe +43 o32 times sys_times compat_sys_times +44 o32 prof sys_ni_syscall +45 o32 brk sys_brk +46 o32 setgid sys_setgid +47 o32 getgid sys_getgid +48 o32 signal sys_ni_syscall +49 o32 geteuid sys_geteuid +50 o32 getegid sys_getegid +51 o32 acct sys_acct +52 o32 umount2 sys_umount +53 o32 lock sys_ni_syscall +54 o32 ioctl sys_ioctl compat_sys_ioctl +55 o32 fcntl sys_fcntl compat_sys_fcntl +56 o32 mpx sys_ni_syscall +57 o32 setpgid sys_setpgid +58 o32 ulimit sys_ni_syscall +59 o32 unused59 sys_olduname +60 o32 umask sys_umask +61 o32 chroot sys_chroot +62 o32 ustat sys_ustat compat_sys_ustat +63 o32 dup2 sys_dup2 +64 o32 getppid sys_getppid +65 o32 getpgrp sys_getpgrp +66 o32 setsid sys_setsid +67 o32 sigaction sys_sigaction sys_32_sigaction +68 o32 sgetmask sys_sgetmask +69 o32 ssetmask sys_ssetmask +70 o32 setreuid sys_setreuid +71 o32 setregid sys_setregid +72 o32 sigsuspend sys_sigsuspend sys32_sigsuspend +73 o32 sigpending sys_sigpending compat_sys_sigpending +74 o32 sethostname sys_sethostname +75 o32 setrlimit sys_setrlimit compat_sys_setrlimit +76 o32 getrlimit sys_getrlimit compat_sys_getrlimit +77 o32 getrusage sys_getrusage compat_sys_getrusage +78 o32 gettimeofday sys_gettimeofday compat_sys_gettimeofday +79 o32 settimeofday sys_settimeofday compat_sys_settimeofday +80 o32 getgroups sys_getgroups +81 o32 setgroups sys_setgroups +# 82 was old_select +82 o32 reserved82 sys_ni_syscall +83 o32 symlink sys_symlink +# 84 was sys_lstat +84 o32 unused84 sys_ni_syscall +85 o32 readlink sys_readlink +86 o32 uselib sys_uselib +87 o32 swapon sys_swapon +88 o32 reboot sys_reboot +89 o32 readdir sys_old_readdir compat_sys_old_readdir +90 o32 mmap sys_mips_mmap +91 o32 munmap sys_munmap +92 o32 truncate sys_truncate compat_sys_truncate +93 o32 ftruncate sys_ftruncate compat_sys_ftruncate +94 o32 fchmod sys_fchmod +95 o32 fchown sys_fchown +96 o32 getpriority sys_getpriority +97 o32 setpriority sys_setpriority +98 o32 profil sys_ni_syscall +99 o32 statfs sys_statfs compat_sys_statfs +100 o32 fstatfs sys_fstatfs compat_sys_fstatfs +101 o32 ioperm sys_ni_syscall +102 o32 socketcall sys_socketcall compat_sys_socketcall +103 o32 syslog sys_syslog +104 o32 setitimer sys_setitimer compat_sys_setitimer +105 o32 getitimer sys_getitimer compat_sys_getitimer +106 o32 stat sys_newstat compat_sys_newstat +107 o32 lstat sys_newlstat compat_sys_newlstat +108 o32 fstat sys_newfstat compat_sys_newfstat +109 o32 unused109 sys_uname +110 o32 iopl sys_ni_syscall +111 o32 vhangup sys_vhangup +112 o32 idle sys_ni_syscall +113 o32 vm86 sys_ni_syscall +114 o32 wait4 sys_wait4 compat_sys_wait4 +115 o32 swapoff sys_swapoff +116 o32 sysinfo sys_sysinfo compat_sys_sysinfo +117 o32 ipc sys_ipc compat_sys_ipc +118 o32 fsync sys_fsync +119 o32 sigreturn sys_sigreturn sys32_sigreturn +120 o32 clone __sys_clone +121 o32 setdomainname sys_setdomainname +122 o32 uname sys_newuname +123 o32 modify_ldt sys_ni_syscall +124 o32 adjtimex sys_adjtimex compat_sys_adjtimex +125 o32 mprotect sys_mprotect +126 o32 sigprocmask sys_sigprocmask compat_sys_sigprocmask +127 o32 create_module sys_ni_syscall +128 o32 init_module sys_init_module +129 o32 delete_module sys_delete_module +130 o32 get_kernel_syms sys_ni_syscall +131 o32 quotactl sys_quotactl +132 o32 getpgid sys_getpgid +133 o32 fchdir sys_fchdir +134 o32 bdflush sys_bdflush +135 o32 sysfs sys_sysfs +136 o32 personality sys_personality sys_32_personality +137 o32 afs_syscall sys_ni_syscall +138 o32 setfsuid sys_setfsuid +139 o32 setfsgid sys_setfsgid +140 o32 _llseek sys_llseek sys_32_llseek +141 o32 getdents sys_getdents compat_sys_getdents +142 o32 _newselect sys_select compat_sys_select +143 o32 flock sys_flock +144 o32 msync sys_msync +145 o32 readv sys_readv compat_sys_readv +146 o32 writev sys_writev compat_sys_writev +147 o32 cacheflush sys_cacheflush +148 o32 cachectl sys_cachectl +149 o32 sysmips __sys_sysmips +150 o32 unused150 sys_ni_syscall +151 o32 getsid sys_getsid +152 o32 fdatasync sys_fdatasync +153 o32 _sysctl sys_sysctl compat_sys_sysctl +154 o32 mlock sys_mlock +155 o32 munlock sys_munlock +156 o32 mlockall sys_mlockall +157 o32 munlockall sys_munlockall +158 o32 sched_setparam sys_sched_setparam +159 o32 sched_getparam sys_sched_getparam +160 o32 sched_setscheduler sys_sched_setscheduler +161 o32 sched_getscheduler sys_sched_getscheduler +162 o32 sched_yield sys_sched_yield +163 o32 sched_get_priority_max sys_sched_get_priority_max +164 o32 sched_get_priority_min sys_sched_get_priority_min +165 o32 sched_rr_get_interval sys_sched_rr_get_interval compat_sys_sched_rr_get_interval +166 o32 nanosleep sys_nanosleep compat_sys_nanosleep +167 o32 mremap sys_mremap +168 o32 accept sys_accept +169 o32 bind sys_bind +170 o32 connect sys_connect +171 o32 getpeername sys_getpeername +172 o32 getsockname sys_getsockname +173 o32 getsockopt sys_getsockopt compat_sys_getsockopt +174 o32 listen sys_listen +175 o32 recv sys_recv compat_sys_recv +176 o32 recvfrom sys_recvfrom compat_sys_recvfrom +177 o32 recvmsg sys_recvmsg compat_sys_recvmsg +178 o32 send sys_send +179 o32 sendmsg sys_sendmsg compat_sys_sendmsg +180 o32 sendto sys_sendto +181 o32 setsockopt sys_setsockopt compat_sys_setsockopt +182 o32 shutdown sys_shutdown +183 o32 socket sys_socket +184 o32 socketpair sys_socketpair +185 o32 setresuid sys_setresuid +186 o32 getresuid sys_getresuid +187 o32 query_module sys_ni_syscall +188 o32 poll sys_poll +189 o32 nfsservctl sys_ni_syscall +190 o32 setresgid sys_setresgid +191 o32 getresgid sys_getresgid +192 o32 prctl sys_prctl +193 o32 rt_sigreturn sys_rt_sigreturn sys32_rt_sigreturn +194 o32 rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction +195 o32 rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask +196 o32 rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending +197 o32 rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait +198 o32 rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo +199 o32 rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend +200 o32 pread64 sys_pread64 sys_32_pread +201 o32 pwrite64 sys_pwrite64 sys_32_pwrite +202 o32 chown sys_chown +203 o32 getcwd sys_getcwd +204 o32 capget sys_capget +205 o32 capset sys_capset +206 o32 sigaltstack sys_sigaltstack compat_sys_sigaltstack +207 o32 sendfile sys_sendfile compat_sys_sendfile +208 o32 getpmsg sys_ni_syscall +209 o32 putpmsg sys_ni_syscall +210 o32 mmap2 sys_mips_mmap2 +211 o32 truncate64 sys_truncate64 sys_32_truncate64 +212 o32 ftruncate64 sys_ftruncate64 sys_32_ftruncate64 +213 o32 stat64 sys_stat64 sys_newstat +214 o32 lstat64 sys_lstat64 sys_newlstat +215 o32 fstat64 sys_fstat64 sys_newfstat +216 o32 pivot_root sys_pivot_root +217 o32 mincore sys_mincore +218 o32 madvise sys_madvise +219 o32 getdents64 sys_getdents64 +220 o32 fcntl64 sys_fcntl64 compat_sys_fcntl64 +221 o32 reserved221 sys_ni_syscall +222 o32 gettid sys_gettid +223 o32 readahead sys_readahead sys32_readahead +224 o32 setxattr sys_setxattr +225 o32 lsetxattr sys_lsetxattr +226 o32 fsetxattr sys_fsetxattr +227 o32 getxattr sys_getxattr +228 o32 lgetxattr sys_lgetxattr +229 o32 fgetxattr sys_fgetxattr +230 o32 listxattr sys_listxattr +231 o32 llistxattr sys_llistxattr +232 o32 flistxattr sys_flistxattr +233 o32 removexattr sys_removexattr +234 o32 lremovexattr sys_lremovexattr +235 o32 fremovexattr sys_fremovexattr +236 o32 tkill sys_tkill +237 o32 sendfile64 sys_sendfile64 +238 o32 futex sys_futex compat_sys_futex +239 o32 sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity +240 o32 sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity +241 o32 io_setup sys_io_setup compat_sys_io_setup +242 o32 io_destroy sys_io_destroy +243 o32 io_getevents sys_io_getevents compat_sys_io_getevents +244 o32 io_submit sys_io_submit compat_sys_io_submit +245 o32 io_cancel sys_io_cancel +246 o32 exit_group sys_exit_group +247 o32 lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie +248 o32 epoll_create sys_epoll_create +249 o32 epoll_ctl sys_epoll_ctl +250 o32 epoll_wait sys_epoll_wait +251 o32 remap_file_pages sys_remap_file_pages +252 o32 set_tid_address sys_set_tid_address +253 o32 restart_syscall sys_restart_syscall +254 o32 fadvise64 sys_fadvise64_64 sys32_fadvise64_64 +255 o32 statfs64 sys_statfs64 compat_sys_statfs64 +256 o32 fstatfs64 sys_fstatfs64 compat_sys_fstatfs64 +257 o32 timer_create sys_timer_create compat_sys_timer_create +258 o32 timer_settime sys_timer_settime compat_sys_timer_settime +259 o32 timer_gettime sys_timer_gettime compat_sys_timer_gettime +260 o32 timer_getoverrun sys_timer_getoverrun +261 o32 timer_delete sys_timer_delete +262 o32 clock_settime sys_clock_settime compat_sys_clock_settime +263 o32 clock_gettime sys_clock_gettime compat_sys_clock_gettime +264 o32 clock_getres sys_clock_getres compat_sys_clock_getres +265 o32 clock_nanosleep sys_clock_nanosleep compat_sys_clock_nanosleep +266 o32 tgkill sys_tgkill +267 o32 utimes sys_utimes compat_sys_utimes +268 o32 mbind sys_mbind compat_sys_mbind +269 o32 get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy +270 o32 set_mempolicy sys_set_mempolicy compat_sys_set_mempolicy +271 o32 mq_open sys_mq_open compat_sys_mq_open +272 o32 mq_unlink sys_mq_unlink +273 o32 mq_timedsend sys_mq_timedsend compat_sys_mq_timedsend +274 o32 mq_timedreceive sys_mq_timedreceive compat_sys_mq_timedreceive +275 o32 mq_notify sys_mq_notify compat_sys_mq_notify +276 o32 mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr +277 o32 vserver sys_ni_syscall +278 o32 waitid sys_waitid compat_sys_waitid +# 279 was sys_setaltroot +280 o32 add_key sys_add_key +281 o32 request_key sys_request_key +282 o32 keyctl sys_keyctl compat_sys_keyctl +283 o32 set_thread_area sys_set_thread_area +284 o32 inotify_init sys_inotify_init +285 o32 inotify_add_watch sys_inotify_add_watch +286 o32 inotify_rm_watch sys_inotify_rm_watch +287 o32 migrate_pages sys_migrate_pages compat_sys_migrate_pages +288 o32 openat sys_openat compat_sys_openat +289 o32 mkdirat sys_mkdirat +290 o32 mknodat sys_mknodat +291 o32 fchownat sys_fchownat +292 o32 futimesat sys_futimesat compat_sys_futimesat +293 o32 fstatat64 sys_fstatat64 sys_newfstatat +294 o32 unlinkat sys_unlinkat +295 o32 renameat sys_renameat +296 o32 linkat sys_linkat +297 o32 symlinkat sys_symlinkat +298 o32 readlinkat sys_readlinkat +299 o32 fchmodat sys_fchmodat +300 o32 faccessat sys_faccessat +301 o32 pselect6 sys_pselect6 compat_sys_pselect6 +302 o32 ppoll sys_ppoll compat_sys_ppoll +303 o32 unshare sys_unshare +304 o32 splice sys_splice +305 o32 sync_file_range sys_sync_file_range sys32_sync_file_range +306 o32 tee sys_tee +307 o32 vmsplice sys_vmsplice compat_sys_vmsplice +308 o32 move_pages sys_move_pages compat_sys_move_pages +309 o32 set_robust_list sys_set_robust_list compat_sys_set_robust_list +310 o32 get_robust_list sys_get_robust_list compat_sys_get_robust_list +311 o32 kexec_load sys_kexec_load compat_sys_kexec_load +312 o32 getcpu sys_getcpu +313 o32 epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait +314 o32 ioprio_set sys_ioprio_set +315 o32 ioprio_get sys_ioprio_get +316 o32 utimensat sys_utimensat compat_sys_utimensat +317 o32 signalfd sys_signalfd compat_sys_signalfd +318 o32 timerfd sys_ni_syscall +319 o32 eventfd sys_eventfd +320 o32 fallocate sys_fallocate sys32_fallocate +321 o32 timerfd_create sys_timerfd_create +322 o32 timerfd_gettime sys_timerfd_gettime compat_sys_timerfd_gettime +323 o32 timerfd_settime sys_timerfd_settime compat_sys_timerfd_settime +324 o32 signalfd4 sys_signalfd4 compat_sys_signalfd4 +325 o32 eventfd2 sys_eventfd2 +326 o32 epoll_create1 sys_epoll_create1 +327 o32 dup3 sys_dup3 +328 o32 pipe2 sys_pipe2 +329 o32 inotify_init1 sys_inotify_init1 +330 o32 preadv sys_preadv compat_sys_preadv +331 o32 pwritev sys_pwritev compat_sys_pwritev +332 o32 rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo +333 o32 perf_event_open sys_perf_event_open +334 o32 accept4 sys_accept4 +335 o32 recvmmsg sys_recvmmsg compat_sys_recvmmsg +336 o32 fanotify_init sys_fanotify_init +337 o32 fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark +338 o32 prlimit64 sys_prlimit64 +339 o32 name_to_handle_at sys_name_to_handle_at +340 o32 open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at +341 o32 clock_adjtime sys_clock_adjtime compat_sys_clock_adjtime +342 o32 syncfs sys_syncfs +343 o32 sendmmsg sys_sendmmsg compat_sys_sendmmsg +344 o32 setns sys_setns +345 o32 process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv +346 o32 process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev +347 o32 kcmp sys_kcmp +348 o32 finit_module sys_finit_module +349 o32 sched_setattr sys_sched_setattr +350 o32 sched_getattr sys_sched_getattr +351 o32 renameat2 sys_renameat2 +352 o32 seccomp sys_seccomp +353 o32 getrandom sys_getrandom +354 o32 memfd_create sys_memfd_create +355 o32 bpf sys_bpf +356 o32 execveat sys_execveat compat_sys_execveat +357 o32 userfaultfd sys_userfaultfd +358 o32 membarrier sys_membarrier +359 o32 mlock2 sys_mlock2 +360 o32 copy_file_range sys_copy_file_range +361 o32 preadv2 sys_preadv2 compat_sys_preadv2 +362 o32 pwritev2 sys_pwritev2 compat_sys_pwritev2 +363 o32 pkey_mprotect sys_pkey_mprotect +364 o32 pkey_alloc sys_pkey_alloc +365 o32 pkey_free sys_pkey_free +366 o32 statx sys_statx +367 o32 rseq sys_rseq +368 o32 io_pgetevents sys_io_pgetevents compat_sys_io_pgetevents diff --git a/arch/mips/kernel/syscalls/syscallhdr.sh b/arch/mips/kernel/syscalls/syscallhdr.sh new file mode 100644 index 000000000000..d2bcfa8f4d1a --- /dev/null +++ b/arch/mips/kernel/syscalls/syscallhdr.sh @@ -0,0 +1,37 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 + +in="$1" +out="$2" +my_abis=`echo "($3)" | tr ',' '|'` +prefix="$4" +offset="$5" + +fileguard=_UAPI_ASM_MIPS_`basename "$out" | sed \ + -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \ + -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'` +grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( + printf "#ifndef %s\n" "${fileguard}" + printf "#define %s\n" "${fileguard}" + printf "\n" + + nxt=0 + while read nr abi name entry compat ; do + if [ -z "$offset" ]; then + printf "#define __NR_%s%s\t%s\n" \ + "${prefix}" "${name}" "${nr}" + else + printf "#define __NR_%s%s\t(%s + %s)\n" \ + "${prefix}" "${name}" "${offset}" "${nr}" + fi + nxt=$((nr+1)) + done + + printf "\n" + printf "#ifdef __KERNEL__\n" + printf "#define __NR_syscalls\t%s\n" "${nxt}" + printf "#endif\n" + printf "\n" + printf "#endif /* %s */" "${fileguard}" + printf "\n" +) > "$out" diff --git a/arch/mips/kernel/syscalls/syscallnr.sh b/arch/mips/kernel/syscalls/syscallnr.sh new file mode 100644 index 000000000000..60bbdb3fe03a --- /dev/null +++ b/arch/mips/kernel/syscalls/syscallnr.sh @@ -0,0 +1,28 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 + +in="$1" +out="$2" +my_abis=`echo "($3)" | tr ',' '|'` +prefix="$4" +offset="$5" + +fileguard=_UAPI_ASM_MIPS_`basename "$out" | sed \ + -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \ + -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'` +grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( + printf "#ifndef %s\n" "${fileguard}" + printf "#define %s\n" "${fileguard}" + printf "\n" + + nxt=0 + while read nr abi name entry compat ; do + nxt=$((nr+1)) + done + + printf "#define __NR_%s_Linux\t%s\n" "${prefix}" "${offset}" + printf "#define __NR_%s_Linux_syscalls\t%s\n" "${prefix}" "${nxt}" + printf "\n" + printf "#endif /* %s */" "${fileguard}" + printf "\n" +) > "$out" diff --git a/arch/mips/kernel/syscalls/syscalltbl.sh b/arch/mips/kernel/syscalls/syscalltbl.sh new file mode 100644 index 000000000000..acd338d33bbe --- /dev/null +++ b/arch/mips/kernel/syscalls/syscalltbl.sh @@ -0,0 +1,36 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 + +in="$1" +out="$2" +my_abis=`echo "($3)" | tr ',' '|'` +my_abi="$4" +offset="$5" + +emit() { + t_nxt="$1" + t_nr="$2" + t_entry="$3" + + while [ $t_nxt -lt $t_nr ]; do + printf "__SYSCALL(%s, sys_ni_syscall, )\n" "${t_nxt}" + t_nxt=$((t_nxt+1)) + done + printf "__SYSCALL(%s, %s, )\n" "${t_nxt}" "${t_entry}" +} + +grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( + nxt=0 + if [ -z "$offset" ]; then + offset=0 + fi + + while read nr abi name entry compat ; do + if [ "$my_abi" = "64_o32" ] && [ ! -z "$compat" ]; then + emit $((nxt+offset)) $((nr+offset)) $compat + else + emit $((nxt+offset)) $((nr+offset)) $entry + fi + nxt=$((nr+1)) + done +) > "$out" diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 15e103c6d799..c91097f7b32f 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -50,6 +50,7 @@ #include <asm/fpu.h> #include <asm/fpu_emulator.h> #include <asm/idle.h> +#include <asm/isa-rev.h> #include <asm/mips-cps.h> #include <asm/mips-r2-to-r6-emul.h> #include <asm/mipsregs.h> @@ -277,8 +278,10 @@ static void __show_regs(const struct pt_regs *regs) #ifdef CONFIG_CPU_HAS_SMARTMIPS printk("Acx : %0*lx\n", field, regs->acx); #endif - printk("Hi : %0*lx\n", field, regs->hi); - printk("Lo : %0*lx\n", field, regs->lo); + if (MIPS_ISA_REV < 6) { + printk("Hi : %0*lx\n", field, regs->hi); + printk("Lo : %0*lx\n", field, regs->lo); + } /* * Saved cp0 registers @@ -706,6 +709,8 @@ asmlinkage void do_ov(struct pt_regs *regs) exception_exit(prev_state); } +#ifdef CONFIG_MIPS_FP_SUPPORT + /* * Send SIGFPE according to FCSR Cause bits, which must have already * been masked against Enable bits. This is impotant as Inexact can @@ -794,9 +799,6 @@ static int simulate_fp(struct pt_regs *regs, unsigned int opcode, regs->cp0_epc = old_epc; regs->regs[31] = old_ra; - /* Save the FP context to struct thread_struct */ - lose_fpu(1); - /* Run the emulator */ sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, &fault_addr); @@ -848,8 +850,6 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) * register operands before invoking the emulator, which seems * a bit extreme for what should be an infrequent event. */ - /* Ensure 'resume' not overwrite saved fp context again. */ - lose_fpu(1); /* Run the emulator */ sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, @@ -876,6 +876,45 @@ out: exception_exit(prev_state); } +/* + * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've + * emulated more than some threshold number of instructions, force migration to + * a "CPU" that has FP support. + */ +static void mt_ase_fp_affinity(void) +{ +#ifdef CONFIG_MIPS_MT_FPAFF + if (mt_fpemul_threshold > 0 && + ((current->thread.emulated_fp++ > mt_fpemul_threshold))) { + /* + * If there's no FPU present, or if the application has already + * restricted the allowed set to exclude any CPUs with FPUs, + * we'll skip the procedure. + */ + if (cpumask_intersects(¤t->cpus_allowed, &mt_fpu_cpumask)) { + cpumask_t tmask; + + current->thread.user_cpus_allowed + = current->cpus_allowed; + cpumask_and(&tmask, ¤t->cpus_allowed, + &mt_fpu_cpumask); + set_cpus_allowed_ptr(current, &tmask); + set_thread_flag(TIF_FPUBOUND); + } + } +#endif /* CONFIG_MIPS_MT_FPAFF */ +} + +#else /* !CONFIG_MIPS_FP_SUPPORT */ + +static int simulate_fp(struct pt_regs *regs, unsigned int opcode, + unsigned long old_epc, unsigned long old_ra) +{ + return -1; +} + +#endif /* !CONFIG_MIPS_FP_SUPPORT */ + void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code, const char *str) { @@ -1160,35 +1199,6 @@ out: } /* - * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've - * emulated more than some threshold number of instructions, force migration to - * a "CPU" that has FP support. - */ -static void mt_ase_fp_affinity(void) -{ -#ifdef CONFIG_MIPS_MT_FPAFF - if (mt_fpemul_threshold > 0 && - ((current->thread.emulated_fp++ > mt_fpemul_threshold))) { - /* - * If there's no FPU present, or if the application has already - * restricted the allowed set to exclude any CPUs with FPUs, - * we'll skip the procedure. - */ - if (cpumask_intersects(¤t->cpus_allowed, &mt_fpu_cpumask)) { - cpumask_t tmask; - - current->thread.user_cpus_allowed - = current->cpus_allowed; - cpumask_and(&tmask, ¤t->cpus_allowed, - &mt_fpu_cpumask); - set_cpus_allowed_ptr(current, &tmask); - set_thread_flag(TIF_FPUBOUND); - } - } -#endif /* CONFIG_MIPS_MT_FPAFF */ -} - -/* * No lock; only written during early bootup by CPU 0. */ static RAW_NOTIFIER_HEAD(cu2_chain); @@ -1215,23 +1225,25 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action, return NOTIFY_OK; } +#ifdef CONFIG_MIPS_FP_SUPPORT + static int enable_restore_fp_context(int msa) { int err, was_fpu_owner, prior_msa; + bool first_fp; + + /* Initialize context if it hasn't been used already */ + first_fp = init_fp_ctx(current); - if (!used_math()) { - /* First time FP context user. */ + if (first_fp) { preempt_disable(); - err = init_fpu(); + err = own_fpu_inatomic(1); if (msa && !err) { enable_msa(); - init_msa_upper(); set_thread_flag(TIF_USEDMSA); set_thread_flag(TIF_MSA_CTX_LIVE); } preempt_enable(); - if (!err) - set_used_math(); return err; } @@ -1322,17 +1334,23 @@ out: return 0; } +#else /* !CONFIG_MIPS_FP_SUPPORT */ + +static int enable_restore_fp_context(int msa) +{ + return SIGILL; +} + +#endif /* CONFIG_MIPS_FP_SUPPORT */ + asmlinkage void do_cpu(struct pt_regs *regs) { enum ctx_state prev_state; unsigned int __user *epc; unsigned long old_epc, old31; - void __user *fault_addr; unsigned int opcode; - unsigned long fcr31; unsigned int cpid; - int status, err; - int sig; + int status; prev_state = exception_enter(); cpid = (regs->cp0_cause >> CAUSEB_CE) & 3; @@ -1370,6 +1388,7 @@ asmlinkage void do_cpu(struct pt_regs *regs) break; +#ifdef CONFIG_MIPS_FP_SUPPORT case 3: /* * The COP3 opcode space and consequently the CP0.Status.CU3 @@ -1389,7 +1408,11 @@ asmlinkage void do_cpu(struct pt_regs *regs) } /* Fall through. */ - case 1: + case 1: { + void __user *fault_addr; + unsigned long fcr31; + int err, sig; + err = enable_restore_fp_context(0); if (raw_cpu_has_fpu && !err) @@ -1410,6 +1433,13 @@ asmlinkage void do_cpu(struct pt_regs *regs) mt_ase_fp_affinity(); break; + } +#else /* CONFIG_MIPS_FP_SUPPORT */ + case 1: + case 3: + force_sig(SIGILL, current); + break; +#endif /* CONFIG_MIPS_FP_SUPPORT */ case 2: raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs); diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index ce446eed62d2..c60e7719ef77 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c @@ -882,18 +882,12 @@ do { \ static void emulate_load_store_insn(struct pt_regs *regs, void __user *addr, unsigned int __user *pc) { + unsigned long origpc, orig31, value; union mips_instruction insn; - unsigned long value; - unsigned int res, preempted; - unsigned long origpc; - unsigned long orig31; - void __user *fault_addr = NULL; + unsigned int res; #ifdef CONFIG_EVA mm_segment_t seg; #endif - union fpureg *fpr; - enum msa_2b_fmt df; - unsigned int wd; origpc = (unsigned long)pc; orig31 = regs->regs[31]; @@ -1212,15 +1206,18 @@ static void emulate_load_store_insn(struct pt_regs *regs, /* Cannot handle 64-bit instructions in 32-bit kernel */ goto sigill; +#ifdef CONFIG_MIPS_FP_SUPPORT + case lwc1_op: case ldc1_op: case swc1_op: case sdc1_op: - case cop1x_op: + case cop1x_op: { + void __user *fault_addr = NULL; + die_if_kernel("Unaligned FP access in kernel code", regs); BUG_ON(!used_math()); - lose_fpu(1); /* Save FPU state for the emulator. */ res = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, &fault_addr); own_fpu(1); /* Restore FPU state. */ @@ -1231,8 +1228,16 @@ static void emulate_load_store_insn(struct pt_regs *regs, if (res == 0) break; return; + } +#endif /* CONFIG_MIPS_FP_SUPPORT */ + +#ifdef CONFIG_CPU_HAS_MSA + + case msa_op: { + unsigned int wd, preempted; + enum msa_2b_fmt df; + union fpureg *fpr; - case msa_op: if (!cpu_has_msa) goto sigill; @@ -1309,6 +1314,8 @@ static void emulate_load_store_insn(struct pt_regs *regs, compute_return_epc(regs); break; + } +#endif /* CONFIG_CPU_HAS_MSA */ #ifndef CONFIG_CPU_MIPSR6 /* @@ -1393,7 +1400,6 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs, unsigned long origpc, contpc; union mips_instruction insn; struct mm_decoded_insn mminsn; - void __user *fault_addr = NULL; origpc = regs->cp0_epc; orig31 = regs->regs[31]; @@ -1709,6 +1715,7 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs, /* LL,SC,LLD,SCD are not serviced */ goto sigbus; +#ifdef CONFIG_MIPS_FP_SUPPORT case mm_pool32f_op: switch (insn.mm_x_format.func) { case mm_lwxc1_func: @@ -1723,7 +1730,9 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs, case mm_ldc132_op: case mm_sdc132_op: case mm_lwc132_op: - case mm_swc132_op: + case mm_swc132_op: { + void __user *fault_addr = NULL; + fpu_emul: /* roll back jump/branch */ regs->cp0_epc = origpc; @@ -1733,7 +1742,6 @@ fpu_emul: BUG_ON(!used_math()); BUG_ON(!is_fpu_owner()); - lose_fpu(1); /* save the FPU state for the emulator */ res = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, &fault_addr); own_fpu(1); /* restore FPU state */ @@ -1744,6 +1752,8 @@ fpu_emul: if (res == 0) goto success; return; + } +#endif /* CONFIG_MIPS_FP_SUPPORT */ case mm_lh32_op: reg = insn.mm_i_format.rt; @@ -2338,7 +2348,7 @@ asmlinkage void do_ade(struct pt_regs *regs) set_fs(seg); return; - } + } goto sigbus; } diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c index 48a9c6b90e07..9df3ebdc7b0f 100644 --- a/arch/mips/kernel/vdso.c +++ b/arch/mips/kernel/vdso.c @@ -126,8 +126,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) /* Map delay slot emulation page */ base = mmap_region(NULL, STACK_TOP, PAGE_SIZE, - VM_READ|VM_WRITE|VM_EXEC| - VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, + VM_READ | VM_EXEC | + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, 0, NULL); if (IS_ERR_VALUE(base)) { ret = base; diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index 971a504001c2..cb7e9ed7a453 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S @@ -72,7 +72,7 @@ SECTIONS /* Exception table for data bus errors */ __dbe_table : { __start___dbe_table = .; - *(__dbe_table) + KEEP(*(__dbe_table)) __stop___dbe_table = .; } @@ -123,7 +123,7 @@ SECTIONS . = ALIGN(4); .mips.machines.init : AT(ADDR(.mips.machines.init) - LOAD_OFFSET) { __mips_machines_start = .; - *(.mips.machines.init) + KEEP(*(.mips.machines.init)) __mips_machines_end = .; } diff --git a/arch/mips/kernel/watch.c b/arch/mips/kernel/watch.c index 0e61a5b7647f..ba73b4077668 100644 --- a/arch/mips/kernel/watch.c +++ b/arch/mips/kernel/watch.c @@ -27,12 +27,15 @@ void mips_install_watch_registers(struct task_struct *t) case 4: write_c0_watchlo3(watches->watchlo[3]); write_c0_watchhi3(watchhi | watches->watchhi[3]); + /* fall through */ case 3: write_c0_watchlo2(watches->watchlo[2]); write_c0_watchhi2(watchhi | watches->watchhi[2]); + /* fall through */ case 2: write_c0_watchlo1(watches->watchlo[1]); write_c0_watchhi1(watchhi | watches->watchhi[1]); + /* fall through */ case 1: write_c0_watchlo0(watches->watchlo[0]); write_c0_watchhi0(watchhi | watches->watchhi[0]); @@ -55,10 +58,13 @@ void mips_read_watch_registers(void) BUG(); case 4: watches->watchhi[3] = (read_c0_watchhi3() & watchhi_mask); + /* fall through */ case 3: watches->watchhi[2] = (read_c0_watchhi2() & watchhi_mask); + /* fall through */ case 2: watches->watchhi[1] = (read_c0_watchhi1() & watchhi_mask); + /* fall through */ case 1: watches->watchhi[0] = (read_c0_watchhi0() & watchhi_mask); } @@ -85,18 +91,25 @@ void mips_clear_watch_registers(void) BUG(); case 8: write_c0_watchlo7(0); + /* fall through */ case 7: write_c0_watchlo6(0); + /* fall through */ case 6: write_c0_watchlo5(0); + /* fall through */ case 5: write_c0_watchlo4(0); + /* fall through */ case 4: write_c0_watchlo3(0); + /* fall through */ case 3: write_c0_watchlo2(0); + /* fall through */ case 2: write_c0_watchlo1(0); + /* fall through */ case 1: write_c0_watchlo0(0); } diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig index 76b93a9c8c9b..760aec70dce5 100644 --- a/arch/mips/kvm/Kconfig +++ b/arch/mips/kvm/Kconfig @@ -18,6 +18,7 @@ if VIRTUALIZATION config KVM tristate "Kernel-based Virtual Machine (KVM) support" depends on HAVE_KVM + depends on MIPS_FP_SUPPORT select EXPORT_UASM select PREEMPT_NOTIFIERS select ANON_INODES diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 1fcc4d149054..3734cd58895e 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -1004,14 +1004,37 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) { struct kvm_memslots *slots; struct kvm_memory_slot *memslot; - bool is_dirty = false; + bool flush = false; int r; mutex_lock(&kvm->slots_lock); - r = kvm_get_dirty_log_protect(kvm, log, &is_dirty); + r = kvm_get_dirty_log_protect(kvm, log, &flush); - if (is_dirty) { + if (flush) { + slots = kvm_memslots(kvm); + memslot = id_to_memslot(slots, log->slot); + + /* Let implementation handle TLB/GVA invalidation */ + kvm_mips_callbacks->flush_shadow_memslot(kvm, memslot); + } + + mutex_unlock(&kvm->slots_lock); + return r; +} + +int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, struct kvm_clear_dirty_log *log) +{ + struct kvm_memslots *slots; + struct kvm_memory_slot *memslot; + bool flush = false; + int r; + + mutex_lock(&kvm->slots_lock); + + r = kvm_clear_dirty_log_protect(kvm, log, &flush); + + if (flush) { slots = kvm_memslots(kvm); memslot = id_to_memslot(slots, log->slot); diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c index d8dcdb350405..97e538a8c1be 100644 --- a/arch/mips/kvm/mmu.c +++ b/arch/mips/kvm/mmu.c @@ -551,7 +551,7 @@ static int kvm_set_spte_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, (pte_dirty(old_pte) && !pte_dirty(hva_pte)); } -void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) +int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) { unsigned long end = hva + PAGE_SIZE; int ret; @@ -559,6 +559,7 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) ret = handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &pte); if (ret) kvm_mips_callbacks->flush_shadow_all(kvm); + return 0; } static int kvm_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, diff --git a/arch/mips/loongson64/common/env.c b/arch/mips/loongson64/common/env.c index 8f68ee02a8c2..72e5f8fb2b35 100644 --- a/arch/mips/loongson64/common/env.c +++ b/arch/mips/loongson64/common/env.c @@ -197,7 +197,8 @@ void __init prom_init_env(void) cpu_clock_freq = 797000000; break; case PRID_REV_LOONGSON3A_R1: - case PRID_REV_LOONGSON3A_R2: + case PRID_REV_LOONGSON3A_R2_0: + case PRID_REV_LOONGSON3A_R2_1: case PRID_REV_LOONGSON3A_R3_0: case PRID_REV_LOONGSON3A_R3_1: cpu_clock_freq = 900000000; diff --git a/arch/mips/loongson64/loongson-3/cop2-ex.c b/arch/mips/loongson64/loongson-3/cop2-ex.c index 621d6af5f6eb..9efdfe430ff0 100644 --- a/arch/mips/loongson64/loongson-3/cop2-ex.c +++ b/arch/mips/loongson64/loongson-3/cop2-ex.c @@ -43,11 +43,8 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action, /* If FPU is owned, we needn't init or restore fp */ if (!fpu_owned) { set_thread_flag(TIF_USEDFPU); - if (!used_math()) { - _init_fpu(current->thread.fpu.fcr31); - set_used_math(); - } else - _restore_fp(current); + init_fp_ctx(current); + _restore_fp(current); } preempt_enable(); diff --git a/arch/mips/loongson64/loongson-3/smp.c b/arch/mips/loongson64/loongson-3/smp.c index b5c1e0aa955e..8fba0aa48bf4 100644 --- a/arch/mips/loongson64/loongson-3/smp.c +++ b/arch/mips/loongson64/loongson-3/smp.c @@ -682,7 +682,8 @@ void play_dead(void) play_dead_at_ckseg1 = (void *)CKSEG1ADDR((unsigned long)loongson3a_r1_play_dead); break; - case PRID_REV_LOONGSON3A_R2: + case PRID_REV_LOONGSON3A_R2_0: + case PRID_REV_LOONGSON3A_R2_1: case PRID_REV_LOONGSON3A_R3_0: case PRID_REV_LOONGSON3A_R3_1: play_dead_at_ckseg1 = diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index 62deb025970b..82e2993c1a2c 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c @@ -2831,6 +2831,13 @@ int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx, u16 *instr_ptr; int sig = 0; + /* + * Initialize context if it hasn't been used already, otherwise ensure + * it has been saved to struct thread_struct. + */ + if (!init_fp_ctx(current)) + lose_fpu(1); + oldepc = xcp->cp0_epc; do { prevepc = xcp->cp0_epc; diff --git a/arch/mips/math-emu/dsemul.c b/arch/mips/math-emu/dsemul.c index 5450f4d1c920..e2d46cb93ca9 100644 --- a/arch/mips/math-emu/dsemul.c +++ b/arch/mips/math-emu/dsemul.c @@ -214,8 +214,9 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, { int isa16 = get_isa16_mode(regs->cp0_epc); mips_instruction break_math; - struct emuframe __user *fr; - int err, fr_idx; + unsigned long fr_uaddr; + struct emuframe fr; + int fr_idx, ret; /* NOP is easy */ if (ir == 0) @@ -250,27 +251,31 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, fr_idx = alloc_emuframe(); if (fr_idx == BD_EMUFRAME_NONE) return SIGBUS; - fr = &dsemul_page()[fr_idx]; /* Retrieve the appropriately encoded break instruction */ break_math = BREAK_MATH(isa16); /* Write the instructions to the frame */ if (isa16) { - err = __put_user(ir >> 16, - (u16 __user *)(&fr->emul)); - err |= __put_user(ir & 0xffff, - (u16 __user *)((long)(&fr->emul) + 2)); - err |= __put_user(break_math >> 16, - (u16 __user *)(&fr->badinst)); - err |= __put_user(break_math & 0xffff, - (u16 __user *)((long)(&fr->badinst) + 2)); + union mips_instruction _emul = { + .halfword = { ir >> 16, ir } + }; + union mips_instruction _badinst = { + .halfword = { break_math >> 16, break_math } + }; + + fr.emul = _emul.word; + fr.badinst = _badinst.word; } else { - err = __put_user(ir, &fr->emul); - err |= __put_user(break_math, &fr->badinst); + fr.emul = ir; + fr.badinst = break_math; } - if (unlikely(err)) { + /* Write the frame to user memory */ + fr_uaddr = (unsigned long)&dsemul_page()[fr_idx]; + ret = access_process_vm(current, fr_uaddr, &fr, sizeof(fr), + FOLL_FORCE | FOLL_WRITE); + if (unlikely(ret != sizeof(fr))) { MIPS_FPU_EMU_INC_STATS(errors); free_emuframe(fr_idx, current->mm); return SIGBUS; @@ -282,10 +287,7 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, atomic_set(¤t->thread.bd_emu_frame, fr_idx); /* Change user register context to execute the frame */ - regs->cp0_epc = (unsigned long)&fr->emul | isa16; - - /* Ensure the icache observes our newly written frame */ - flush_cache_sigtramp((unsigned long)&fr->emul); + regs->cp0_epc = fr_uaddr | isa16; return 0; } diff --git a/arch/mips/math-emu/me-debugfs.c b/arch/mips/math-emu/me-debugfs.c index 62566385ce0e..58798f527356 100644 --- a/arch/mips/math-emu/me-debugfs.c +++ b/arch/mips/math-emu/me-debugfs.c @@ -183,17 +183,7 @@ static int fpuemustats_clear_show(struct seq_file *s, void *unused) return 0; } -static int fpuemustats_clear_open(struct inode *inode, struct file *file) -{ - return single_open(file, fpuemustats_clear_show, inode->i_private); -} - -static const struct file_operations fpuemustats_clear_fops = { - .open = fpuemustats_clear_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(fpuemustats_clear); static int __init debugfs_fpuemu(void) { diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c index 3466fcdae0ca..01848cdf2074 100644 --- a/arch/mips/mm/c-r3k.c +++ b/arch/mips/mm/c-r3k.c @@ -245,7 +245,7 @@ static void r3k_flush_cache_page(struct vm_area_struct *vma, pmd_t *pmdp; pte_t *ptep; - pr_debug("cpage[%08lx,%08lx]\n", + pr_debug("cpage[%08llx,%08lx]\n", cpu_context(smp_processor_id(), mm), addr); /* No ASID => no such page in the cache. */ diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 05bd77727fb9..d0b64df51eb2 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -459,11 +459,28 @@ static void r4k_blast_scache_setup(void) r4k_blast_scache = blast_scache128; } +static void (*r4k_blast_scache_node)(long node); + +static void r4k_blast_scache_node_setup(void) +{ + unsigned long sc_lsize = cpu_scache_line_size(); + + if (current_cpu_type() != CPU_LOONGSON3) + r4k_blast_scache_node = (void *)cache_noop; + else if (sc_lsize == 16) + r4k_blast_scache_node = blast_scache16_node; + else if (sc_lsize == 32) + r4k_blast_scache_node = blast_scache32_node; + else if (sc_lsize == 64) + r4k_blast_scache_node = blast_scache64_node; + else if (sc_lsize == 128) + r4k_blast_scache_node = blast_scache128_node; +} + static inline void local_r4k___flush_cache_all(void * args) { switch (current_cpu_type()) { case CPU_LOONGSON2: - case CPU_LOONGSON3: case CPU_R4000SC: case CPU_R4000MC: case CPU_R4400SC: @@ -480,6 +497,11 @@ static inline void local_r4k___flush_cache_all(void * args) r4k_blast_scache(); break; + case CPU_LOONGSON3: + /* Use get_ebase_cpunum() for both NUMA=y/n */ + r4k_blast_scache_node(get_ebase_cpunum() >> 2); + break; + case CPU_BMIPS5000: r4k_blast_scache(); __sync(); @@ -840,10 +862,14 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) preempt_disable(); if (cpu_has_inclusive_pcaches) { - if (size >= scache_size) - r4k_blast_scache(); - else + if (size >= scache_size) { + if (current_cpu_type() != CPU_LOONGSON3) + r4k_blast_scache(); + else + r4k_blast_scache_node(pa_to_nid(addr)); + } else { blast_scache_range(addr, addr + size); + } preempt_enable(); __sync(); return; @@ -877,9 +903,12 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) preempt_disable(); if (cpu_has_inclusive_pcaches) { - if (size >= scache_size) - r4k_blast_scache(); - else { + if (size >= scache_size) { + if (current_cpu_type() != CPU_LOONGSON3) + r4k_blast_scache(); + else + r4k_blast_scache_node(pa_to_nid(addr)); + } else { /* * There is no clearly documented alignment requirement * for the cache instruction on MIPS processors and @@ -1251,6 +1280,7 @@ static void probe_pcache(void) case CPU_VR4133: write_c0_config(config & ~VR41_CONF_P4K); + /* fall through */ case CPU_VR4131: /* Workaround for cache instruction bug of VR4131 */ if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U || @@ -1352,7 +1382,7 @@ static void probe_pcache(void) c->dcache.ways * c->dcache.linesz; c->dcache.waybit = 0; - if ((prid & PRID_REV_MASK) >= PRID_REV_LOONGSON3A_R2) + if ((prid & PRID_REV_MASK) >= PRID_REV_LOONGSON3A_R2_0) c->options |= MIPS_CPU_PREFETCH; break; @@ -1498,6 +1528,7 @@ static void probe_pcache(void) c->dcache.flags |= MIPS_CACHE_PINDEX; break; } + /* fall through */ default: if (has_74k_erratum || c->dcache.waysize > PAGE_SIZE) c->dcache.flags |= MIPS_CACHE_ALIASES; @@ -1918,6 +1949,7 @@ void r4k_cache_init(void) r4k_blast_scache_page_setup(); r4k_blast_scache_page_indexed_setup(); r4k_blast_scache_setup(); + r4k_blast_scache_node_setup(); #ifdef CONFIG_EVA r4k_blast_dcache_user_page_setup(); r4k_blast_icache_user_page_setup(); diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 067714291643..37b1cb246332 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -576,6 +576,7 @@ void build_tlb_write_entry(u32 **p, struct uasm_label **l, case CPU_R5500: if (m4kc_tlbp_war()) uasm_i_nop(p); + /* fall through */ case CPU_ALCHEMY: tlbw(p); break; diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c index 24e5b0d06899..75ef90486fe6 100644 --- a/arch/mips/mm/uasm-micromips.c +++ b/arch/mips/mm/uasm-micromips.c @@ -104,6 +104,7 @@ static const struct insn insn_table_MM[insn_invalid] = { [insn_sltiu] = {M(mm_sltiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM}, [insn_sltu] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_sltu_op), RT | RS | RD}, [insn_sra] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_sra_op), RT | RS | RD}, + [insn_srav] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_srav_op), RT | RS | RD}, [insn_srl] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_srl32_op), RT | RS | RD}, [insn_srlv] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_srlv32_op), RT | RS | RD}, [insn_rotr] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_rotr_op), RT | RS | RD}, diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c index 60ceb93c71a0..6abe40fc413d 100644 --- a/arch/mips/mm/uasm-mips.c +++ b/arch/mips/mm/uasm-mips.c @@ -171,6 +171,7 @@ static const struct insn insn_table[insn_invalid] = { [insn_sltiu] = {M(sltiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM}, [insn_sltu] = {M(spec_op, 0, 0, 0, 0, sltu_op), RS | RT | RD}, [insn_sra] = {M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE}, + [insn_srav] = {M(spec_op, 0, 0, 0, 0, srav_op), RS | RT | RD}, [insn_srl] = {M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE}, [insn_srlv] = {M(spec_op, 0, 0, 0, 0, srlv_op), RS | RT | RD}, [insn_subu] = {M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD}, diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c index 57570c0649b4..45b6264ff308 100644 --- a/arch/mips/mm/uasm.c +++ b/arch/mips/mm/uasm.c @@ -61,10 +61,10 @@ enum opcode { insn_mthc0, insn_mthi, insn_mtlo, insn_mul, insn_multu, insn_nor, insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sb, insn_sc, insn_scd, insn_sd, insn_sh, insn_sll, insn_sllv, - insn_slt, insn_slti, insn_sltiu, insn_sltu, insn_sra, insn_srl, - insn_srlv, insn_subu, insn_sw, insn_sync, insn_syscall, insn_tlbp, - insn_tlbr, insn_tlbwi, insn_tlbwr, insn_wait, insn_wsbh, insn_xor, - insn_xori, insn_yield, + insn_slt, insn_slti, insn_sltiu, insn_sltu, insn_sra, insn_srav, + insn_srl, insn_srlv, insn_subu, insn_sw, insn_sync, insn_syscall, + insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_wait, insn_wsbh, + insn_xor, insn_xori, insn_yield, insn_invalid /* insn_invalid must be last */ }; @@ -353,6 +353,7 @@ I_u2u1s3(_slti) I_u2u1s3(_sltiu) I_u3u1u2(_sltu) I_u2u1u3(_sra) +I_u3u2u1(_srav) I_u2u1u3(_srl) I_u3u2u1(_srlv) I_u2u1u3(_rotr) diff --git a/arch/mips/mti-malta/Makefile b/arch/mips/mti-malta/Makefile index 17c7fd471a27..94c11f5eac74 100644 --- a/arch/mips/mti-malta/Makefile +++ b/arch/mips/mti-malta/Makefile @@ -6,7 +6,6 @@ # Copyright (C) 2008 Wind River Systems, Inc. # written by Ralf Baechle <ralf@linux-mips.org> # -obj-y += malta-display.o obj-y += malta-dt.o obj-y += malta-dtshim.o obj-y += malta-init.o diff --git a/arch/mips/mti-malta/malta-display.c b/arch/mips/mti-malta/malta-display.c deleted file mode 100644 index ee0bd50f754b..000000000000 --- a/arch/mips/mti-malta/malta-display.c +++ /dev/null @@ -1,56 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Display routines for display messages in MIPS boards ascii display. - * - * Copyright (C) 1999,2000,2012 MIPS Technologies, Inc. - * All rights reserved. - * Authors: Carsten Langgaard <carstenl@mips.com> - * Steven J. Hill <sjhill@mips.com> - */ -#include <linux/compiler.h> -#include <linux/timer.h> -#include <linux/io.h> - -#include <asm/mips-boards/generic.h> - -extern const char display_string[]; -static unsigned int display_count; -static unsigned int max_display_count; - -void mips_display_message(const char *str) -{ - static unsigned int __iomem *display = NULL; - int i; - - if (unlikely(display == NULL)) - display = ioremap(ASCII_DISPLAY_POS_BASE, 16*sizeof(int)); - - for (i = 0; i <= 14; i += 2) { - if (*str) - __raw_writel(*str++, display + i); - else - __raw_writel(' ', display + i); - } -} - -static void scroll_display_message(struct timer_list *unused); -static DEFINE_TIMER(mips_scroll_timer, scroll_display_message); - -static void scroll_display_message(struct timer_list *unused) -{ - mips_display_message(&display_string[display_count++]); - if (display_count == max_display_count) - display_count = 0; - - mod_timer(&mips_scroll_timer, jiffies + HZ); -} - -void mips_scroll_message(void) -{ - del_timer_sync(&mips_scroll_timer); - max_display_count = strlen(display_string) + 1 - 8; - mod_timer(&mips_scroll_timer, jiffies + 1); -} diff --git a/arch/mips/mti-malta/malta-init.c b/arch/mips/mti-malta/malta-init.c index 009f2918b320..ff2c1d809538 100644 --- a/arch/mips/mti-malta/malta-init.c +++ b/arch/mips/mti-malta/malta-init.c @@ -118,8 +118,6 @@ phys_addr_t mips_cpc_default_phys_base(void) void __init prom_init(void) { - mips_display_message("LINUX"); - /* * early setup of _pcictrl_bonito so that we can determine * the system controller on a CORE_EMUL board @@ -277,7 +275,6 @@ mips_pci_controller: default: /* Unknown system controller */ - mips_display_message("SC Error"); while (1); /* We die here... */ } board_nmi_handler_setup = mips_nmi_setup; diff --git a/arch/mips/mti-malta/malta-setup.c b/arch/mips/mti-malta/malta-setup.c index 5d4c5e5fbd69..85c6c11ebcea 100644 --- a/arch/mips/mti-malta/malta-setup.c +++ b/arch/mips/mti-malta/malta-setup.c @@ -81,8 +81,6 @@ const char *get_system_type(void) return "MIPS Malta"; } -const char display_string[] = " LINUX ON MALTA "; - #ifdef CONFIG_BLK_DEV_FD static void __init fd_activate(void) { diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c index d22b7edc3886..f403574a1e6f 100644 --- a/arch/mips/mti-malta/malta-time.c +++ b/arch/mips/mti-malta/malta-time.c @@ -251,8 +251,6 @@ void __init plat_time_init(void) printk("CPU frequency %d.%02d MHz\n", freq/1000000, (freq%1000000)*100/1000000); - mips_scroll_message(); - #ifdef CONFIG_I8253 /* Only Malta has a PIT. */ setup_pit_timer(); diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c index 4d8cb9bb8365..3a0e34f4e615 100644 --- a/arch/mips/net/bpf_jit.c +++ b/arch/mips/net/bpf_jit.c @@ -1159,19 +1159,19 @@ jmp_cmp: emit_load(r_A, r_skb, off, ctx); break; case BPF_ANC | SKF_AD_VLAN_TAG: - case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT: ctx->flags |= SEEN_SKB | SEEN_A; BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); off = offsetof(struct sk_buff, vlan_tci); - emit_half_load_unsigned(r_s0, r_skb, off, ctx); - if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) { - emit_andi(r_A, r_s0, (u16)~VLAN_TAG_PRESENT, ctx); - } else { - emit_andi(r_A, r_s0, VLAN_TAG_PRESENT, ctx); - /* return 1 if present */ - emit_sltu(r_A, r_zero, r_A, ctx); - } + emit_half_load_unsigned(r_A, r_skb, off, ctx); + break; + case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT: + ctx->flags |= SEEN_SKB | SEEN_A; + emit_load_byte(r_A, r_skb, PKT_VLAN_PRESENT_OFFSET(), ctx); + if (PKT_VLAN_PRESENT_BIT) + emit_srl(r_A, r_A, PKT_VLAN_PRESENT_BIT, ctx); + if (PKT_VLAN_PRESENT_BIT < 7) + emit_andi(r_A, r_A, 1, ctx); break; case BPF_ANC | SKF_AD_PKTTYPE: ctx->flags |= SEEN_SKB; diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c index aeb7b1b0f202..b16710a8a9e7 100644 --- a/arch/mips/net/ebpf_jit.c +++ b/arch/mips/net/ebpf_jit.c @@ -854,6 +854,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, case BPF_ALU | BPF_MOD | BPF_X: /* ALU_REG */ case BPF_ALU | BPF_LSH | BPF_X: /* ALU_REG */ case BPF_ALU | BPF_RSH | BPF_X: /* ALU_REG */ + case BPF_ALU | BPF_ARSH | BPF_X: /* ALU_REG */ src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp); dst = ebpf_to_mips_reg(ctx, insn, dst_reg); if (src < 0 || dst < 0) @@ -913,6 +914,9 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, case BPF_RSH: emit_instr(ctx, srlv, dst, dst, src); break; + case BPF_ARSH: + emit_instr(ctx, srav, dst, dst, src); + break; default: pr_err("ALU_REG NOT HANDLED\n"); return -EINVAL; diff --git a/arch/mips/pci/fixup-sb1250.c b/arch/mips/pci/fixup-sb1250.c index 8feae9154baf..45266406b585 100644 --- a/arch/mips/pci/fixup-sb1250.c +++ b/arch/mips/pci/fixup-sb1250.c @@ -1,6 +1,7 @@ /* * Copyright (C) 2004, 2006 MIPS Technologies, Inc. All rights reserved. * Author: Maciej W. Rozycki <macro@mips.com> + * Copyright (C) 2018 Maciej W. Rozycki * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -8,6 +9,7 @@ * 2 of the License, or (at your option) any later version. */ +#include <linux/dma-mapping.h> #include <linux/pci.h> /* @@ -22,6 +24,57 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIBYTE, PCI_DEVICE_ID_BCM1250_PCI, quirk_sb1250_pci); /* + * The BCM1250, etc. PCI host bridge does not support DAC on its 32-bit + * bus, so we set the bus's DMA mask accordingly. However the HT link + * down the artificial PCI-HT bridge supports 40-bit addressing and the + * SP1011 HT-PCI bridge downstream supports both DAC and a 64-bit bus + * width, so we record the PCI-HT bridge's secondary and subordinate bus + * numbers and do not set the mask for devices present in the inclusive + * range of those. + */ +struct sb1250_bus_dma_mask_exclude { + bool set; + unsigned char start; + unsigned char end; +}; + +static int sb1250_bus_dma_mask(struct pci_dev *dev, void *data) +{ + struct sb1250_bus_dma_mask_exclude *exclude = data; + bool exclude_this; + bool ht_bridge; + + exclude_this = exclude->set && (dev->bus->number >= exclude->start && + dev->bus->number <= exclude->end); + ht_bridge = !exclude->set && (dev->vendor == PCI_VENDOR_ID_SIBYTE && + dev->device == PCI_DEVICE_ID_BCM1250_HT); + + if (exclude_this) { + dev_dbg(&dev->dev, "not disabling DAC for device"); + } else if (ht_bridge) { + exclude->start = dev->subordinate->number; + exclude->end = pci_bus_max_busnr(dev->subordinate); + exclude->set = true; + dev_dbg(&dev->dev, "not disabling DAC for [bus %02x-%02x]", + exclude->start, exclude->end); + } else { + dev_dbg(&dev->dev, "disabling DAC for device"); + dev->dev.bus_dma_mask = DMA_BIT_MASK(32); + } + + return 0; +} + +static void quirk_sb1250_pci_dac(struct pci_dev *dev) +{ + struct sb1250_bus_dma_mask_exclude exclude = { .set = false }; + + pci_walk_bus(dev->bus, sb1250_bus_dma_mask, &exclude); +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SIBYTE, PCI_DEVICE_ID_BCM1250_PCI, + quirk_sb1250_pci_dac); + +/* * The BCM1250, etc. PCI/HT bridge reports as a host bridge. */ static void quirk_sb1250_ht(struct pci_dev *dev) diff --git a/arch/mips/pci/pci-rt3883.c b/arch/mips/pci/pci-rt3883.c index 958899ffe99c..bafbf69e7dc4 100644 --- a/arch/mips/pci/pci-rt3883.c +++ b/arch/mips/pci/pci-rt3883.c @@ -445,8 +445,7 @@ static int rt3883_pci_probe(struct platform_device *pdev) /* find the PCI host bridge child node */ for_each_child_of_node(np, child) { - if (child->type && - of_node_cmp(child->type, "pci") == 0) { + if (of_node_is_type(child, "pci")) { rpc->pci_controller.of_node = child; break; } @@ -464,8 +463,7 @@ static int rt3883_pci_probe(struct platform_device *pdev) for_each_available_child_of_node(rpc->pci_controller.of_node, child) { int devfn; - if (!child->type || - of_node_cmp(child->type, "pci") != 0) + if (!of_node_is_type(child, "pci")) continue; devfn = of_pci_get_devfn(child); diff --git a/arch/mips/rb532/devices.c b/arch/mips/rb532/devices.c index 2b23ad640f39..828d8cc3a5df 100644 --- a/arch/mips/rb532/devices.c +++ b/arch/mips/rb532/devices.c @@ -23,6 +23,7 @@ #include <linux/mtd/platnand.h> #include <linux/mtd/mtd.h> #include <linux/gpio.h> +#include <linux/gpio/machine.h> #include <linux/gpio_keys.h> #include <linux/input.h> #include <linux/serial_8250.h> @@ -127,14 +128,18 @@ static struct resource cf_slot0_res[] = { } }; -static struct cf_device cf_slot0_data = { - .gpio_pin = CF_GPIO_NUM +static struct gpiod_lookup_table cf_slot0_gpio_table = { + .dev_id = "pata-rb532-cf", + .table = { + GPIO_LOOKUP("gpio0", CF_GPIO_NUM, + NULL, GPIO_ACTIVE_HIGH), + { }, + }, }; static struct platform_device cf_slot0 = { .id = -1, .name = "pata-rb532-cf", - .dev.platform_data = &cf_slot0_data, .resource = cf_slot0_res, .num_resources = ARRAY_SIZE(cf_slot0_res), }; @@ -305,6 +310,7 @@ static int __init plat_setup_devices(void) dev_set_drvdata(&korina_dev0.dev, &korina_dev0_data); + gpiod_add_lookup_table(&cf_slot0_gpio_table); return platform_add_devices(rb532_devs, ARRAY_SIZE(rb532_devs)); } diff --git a/arch/mips/sibyte/common/Makefile b/arch/mips/sibyte/common/Makefile index b3d6bf23a662..3ef3fb658136 100644 --- a/arch/mips/sibyte/common/Makefile +++ b/arch/mips/sibyte/common/Makefile @@ -1,4 +1,5 @@ obj-y := cfe.o +obj-$(CONFIG_SWIOTLB) += dma.o obj-$(CONFIG_SIBYTE_BUS_WATCHER) += bus_watcher.o obj-$(CONFIG_SIBYTE_CFE_CONSOLE) += cfe_console.o obj-$(CONFIG_SIBYTE_TBPROF) += sb_tbprof.o diff --git a/arch/mips/sibyte/common/dma.c b/arch/mips/sibyte/common/dma.c new file mode 100644 index 000000000000..eb47a94f3583 --- /dev/null +++ b/arch/mips/sibyte/common/dma.c @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * DMA support for Broadcom SiByte platforms. + * + * Copyright (c) 2018 Maciej W. Rozycki + */ + +#include <linux/swiotlb.h> +#include <asm/bootinfo.h> + +void __init plat_swiotlb_setup(void) +{ + swiotlb_init(1); +} diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile index 58a0315ad743..f6fd340e39c2 100644 --- a/arch/mips/vdso/Makefile +++ b/arch/mips/vdso/Makefile @@ -50,6 +50,7 @@ VDSO_LDFLAGS := \ $(call cc-ldoption, -Wl$(comma)--build-id) GCOV_PROFILE := n +UBSAN_SANITIZE := n # # Shared build commands. diff --git a/arch/nds32/Kconfig b/arch/nds32/Kconfig index 7a04adacb2f0..1af6bbae7220 100644 --- a/arch/nds32/Kconfig +++ b/arch/nds32/Kconfig @@ -11,7 +11,6 @@ config NDS32 select CLKSRC_MMIO select CLONE_BACKWARDS select COMMON_CLK - select DMA_DIRECT_OPS select GENERIC_ATOMIC64 select GENERIC_CPU_DEVICES select GENERIC_CLOCKEVENTS diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig index 7e95506e957a..f6c4b0f49997 100644 --- a/arch/nios2/Kconfig +++ b/arch/nios2/Kconfig @@ -4,7 +4,6 @@ config NIOS2 select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_NO_SWAP - select DMA_DIRECT_OPS select TIMER_OF select GENERIC_ATOMIC64 select GENERIC_CLOCKEVENTS diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig index 285f7d05c8ed..d0feebad5a8f 100644 --- a/arch/openrisc/Kconfig +++ b/arch/openrisc/Kconfig @@ -7,7 +7,6 @@ config OPENRISC def_bool y select ARCH_HAS_SYNC_DMA_FOR_DEVICE - select DMA_DIRECT_OPS select OF select OF_EARLY_FLATTREE select IRQ_DOMAIN diff --git a/arch/openrisc/kernel/dma.c b/arch/openrisc/kernel/dma.c index 159336adfa2f..f79457cb3741 100644 --- a/arch/openrisc/kernel/dma.c +++ b/arch/openrisc/kernel/dma.c @@ -89,7 +89,7 @@ arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, .mm = &init_mm }; - page = alloc_pages_exact(size, gfp); + page = alloc_pages_exact(size, gfp | __GFP_ZERO); if (!page) return NULL; diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 92a339ee28b3..6e1b71da0e71 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -11,6 +11,7 @@ config PARISC select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_STRICT_KERNEL_RWX select ARCH_HAS_UBSAN_SANITIZE_ALL + select ARCH_NO_SG_CHAIN select ARCH_SUPPORTS_MEMORY_FAILURE select RTC_CLASS select RTC_DRV_GENERIC @@ -184,7 +185,6 @@ config PA11 depends on PA7000 || PA7100LC || PA7200 || PA7300LC select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE - select DMA_DIRECT_OPS select DMA_NONCOHERENT_CACHE_SYNC config PREFETCH diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index 1085385e1f06..c19af26febe6 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile @@ -163,3 +163,6 @@ define archhelp @echo ' copy to $$(INSTALL_PATH)' @echo ' zinstall - Install compressed vmlinuz kernel' endef + +archheaders: + $(Q)$(MAKE) $(build)=arch/parisc/kernel/syscalls all diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild index 2013d639e735..0b1e354c8c24 100644 --- a/arch/parisc/include/asm/Kbuild +++ b/arch/parisc/include/asm/Kbuild @@ -1,3 +1,6 @@ +generated-y += syscall_table_32.h +generated-y += syscall_table_64.h +generated-y += syscall_table_c32.h generic-y += barrier.h generic-y += current.h generic-y += device.h diff --git a/arch/parisc/include/asm/alternative.h b/arch/parisc/include/asm/alternative.h index bf485a94d0b4..793d8baa3a10 100644 --- a/arch/parisc/include/asm/alternative.h +++ b/arch/parisc/include/asm/alternative.h @@ -2,6 +2,7 @@ #ifndef __ASM_PARISC_ALTERNATIVE_H #define __ASM_PARISC_ALTERNATIVE_H +#define ALT_COND_ALWAYS 0x80 /* always replace instruction */ #define ALT_COND_NO_SMP 0x01 /* when running UP instead of SMP */ #define ALT_COND_NO_DCACHE 0x02 /* if system has no d-cache */ #define ALT_COND_NO_ICACHE 0x04 /* if system has no i-cache */ @@ -26,6 +27,9 @@ struct alt_instr { }; void set_kernel_text_rw(int enable_read_write); +void apply_alternatives_all(void); +void apply_alternatives(struct alt_instr *start, struct alt_instr *end, + const char *module_name); /* Alternative SMP implementation. */ #define ALTERNATIVE(cond, replacement) "!0:" \ diff --git a/arch/parisc/include/asm/unistd.h b/arch/parisc/include/asm/unistd.h index bc37a4953eaa..c2c2afb28941 100644 --- a/arch/parisc/include/asm/unistd.h +++ b/arch/parisc/include/asm/unistd.h @@ -4,10 +4,18 @@ #include <uapi/asm/unistd.h> +#define __NR_Linux_syscalls __NR_syscalls + #ifndef __ASSEMBLY__ #define SYS_ify(syscall_name) __NR_##syscall_name +#define __IGNORE_select /* newselect */ +#define __IGNORE_fadvise64 /* fadvise64_64 */ +#define __IGNORE_pkey_mprotect +#define __IGNORE_pkey_alloc +#define __IGNORE_pkey_free + #ifndef ASM_LINE_SEP # define ASM_LINE_SEP ; #endif diff --git a/arch/parisc/include/uapi/asm/Kbuild b/arch/parisc/include/uapi/asm/Kbuild index adb5c64831c7..d31b4261cafc 100644 --- a/arch/parisc/include/uapi/asm/Kbuild +++ b/arch/parisc/include/uapi/asm/Kbuild @@ -1,6 +1,8 @@ # UAPI Header export list include include/uapi/asm-generic/Kbuild.asm +generated-y += unistd_32.h +generated-y += unistd_64.h generic-y += auxvec.h generic-y += bpf_perf_event.h generic-y += kvm_para.h diff --git a/arch/parisc/include/uapi/asm/unistd.h b/arch/parisc/include/uapi/asm/unistd.h index dc77c5a51db7..98dc953656af 100644 --- a/arch/parisc/include/uapi/asm/unistd.h +++ b/arch/parisc/include/uapi/asm/unistd.h @@ -2,379 +2,12 @@ #ifndef _UAPI_ASM_PARISC_UNISTD_H_ #define _UAPI_ASM_PARISC_UNISTD_H_ -/* - * Linux system call numbers. - * - * Cary Coutant says that we should just use another syscall gateway - * page to avoid clashing with the HPUX space, and I think he's right: - * it will would keep a branch out of our syscall entry path, at the - * very least. If we decide to change it later, we can ``just'' tweak - * the LINUX_GATEWAY_ADDR define at the bottom and make __NR_Linux be - * 1024 or something. Oh, and recompile libc. =) - */ +#ifdef __LP64__ +#include <asm/unistd_64.h> +#else +#include <asm/unistd_32.h> +#endif -#define __NR_Linux 0 -#define __NR_restart_syscall (__NR_Linux + 0) -#define __NR_exit (__NR_Linux + 1) -#define __NR_fork (__NR_Linux + 2) -#define __NR_read (__NR_Linux + 3) -#define __NR_write (__NR_Linux + 4) -#define __NR_open (__NR_Linux + 5) -#define __NR_close (__NR_Linux + 6) -#define __NR_waitpid (__NR_Linux + 7) -#define __NR_creat (__NR_Linux + 8) -#define __NR_link (__NR_Linux + 9) -#define __NR_unlink (__NR_Linux + 10) -#define __NR_execve (__NR_Linux + 11) -#define __NR_chdir (__NR_Linux + 12) -#define __NR_time (__NR_Linux + 13) -#define __NR_mknod (__NR_Linux + 14) -#define __NR_chmod (__NR_Linux + 15) -#define __NR_lchown (__NR_Linux + 16) -#define __NR_socket (__NR_Linux + 17) -#define __NR_stat (__NR_Linux + 18) -#define __NR_lseek (__NR_Linux + 19) -#define __NR_getpid (__NR_Linux + 20) -#define __NR_mount (__NR_Linux + 21) -#define __NR_bind (__NR_Linux + 22) -#define __NR_setuid (__NR_Linux + 23) -#define __NR_getuid (__NR_Linux + 24) -#define __NR_stime (__NR_Linux + 25) -#define __NR_ptrace (__NR_Linux + 26) -#define __NR_alarm (__NR_Linux + 27) -#define __NR_fstat (__NR_Linux + 28) -#define __NR_pause (__NR_Linux + 29) -#define __NR_utime (__NR_Linux + 30) -#define __NR_connect (__NR_Linux + 31) -#define __NR_listen (__NR_Linux + 32) -#define __NR_access (__NR_Linux + 33) -#define __NR_nice (__NR_Linux + 34) -#define __NR_accept (__NR_Linux + 35) -#define __NR_sync (__NR_Linux + 36) -#define __NR_kill (__NR_Linux + 37) -#define __NR_rename (__NR_Linux + 38) -#define __NR_mkdir (__NR_Linux + 39) -#define __NR_rmdir (__NR_Linux + 40) -#define __NR_dup (__NR_Linux + 41) -#define __NR_pipe (__NR_Linux + 42) -#define __NR_times (__NR_Linux + 43) -#define __NR_getsockname (__NR_Linux + 44) -#define __NR_brk (__NR_Linux + 45) -#define __NR_setgid (__NR_Linux + 46) -#define __NR_getgid (__NR_Linux + 47) -#define __NR_signal (__NR_Linux + 48) -#define __NR_geteuid (__NR_Linux + 49) -#define __NR_getegid (__NR_Linux + 50) -#define __NR_acct (__NR_Linux + 51) -#define __NR_umount2 (__NR_Linux + 52) -#define __NR_getpeername (__NR_Linux + 53) -#define __NR_ioctl (__NR_Linux + 54) -#define __NR_fcntl (__NR_Linux + 55) -#define __NR_socketpair (__NR_Linux + 56) -#define __NR_setpgid (__NR_Linux + 57) -#define __NR_send (__NR_Linux + 58) -#define __NR_uname (__NR_Linux + 59) -#define __NR_umask (__NR_Linux + 60) -#define __NR_chroot (__NR_Linux + 61) -#define __NR_ustat (__NR_Linux + 62) -#define __NR_dup2 (__NR_Linux + 63) -#define __NR_getppid (__NR_Linux + 64) -#define __NR_getpgrp (__NR_Linux + 65) -#define __NR_setsid (__NR_Linux + 66) -#define __NR_pivot_root (__NR_Linux + 67) -#define __NR_sgetmask (__NR_Linux + 68) -#define __NR_ssetmask (__NR_Linux + 69) -#define __NR_setreuid (__NR_Linux + 70) -#define __NR_setregid (__NR_Linux + 71) -#define __NR_mincore (__NR_Linux + 72) -#define __NR_sigpending (__NR_Linux + 73) -#define __NR_sethostname (__NR_Linux + 74) -#define __NR_setrlimit (__NR_Linux + 75) -#define __NR_getrlimit (__NR_Linux + 76) -#define __NR_getrusage (__NR_Linux + 77) -#define __NR_gettimeofday (__NR_Linux + 78) -#define __NR_settimeofday (__NR_Linux + 79) -#define __NR_getgroups (__NR_Linux + 80) -#define __NR_setgroups (__NR_Linux + 81) -#define __NR_sendto (__NR_Linux + 82) -#define __NR_symlink (__NR_Linux + 83) -#define __NR_lstat (__NR_Linux + 84) -#define __NR_readlink (__NR_Linux + 85) -#define __NR_uselib (__NR_Linux + 86) -#define __NR_swapon (__NR_Linux + 87) -#define __NR_reboot (__NR_Linux + 88) -#define __NR_mmap2 (__NR_Linux + 89) -#define __NR_mmap (__NR_Linux + 90) -#define __NR_munmap (__NR_Linux + 91) -#define __NR_truncate (__NR_Linux + 92) -#define __NR_ftruncate (__NR_Linux + 93) -#define __NR_fchmod (__NR_Linux + 94) -#define __NR_fchown (__NR_Linux + 95) -#define __NR_getpriority (__NR_Linux + 96) -#define __NR_setpriority (__NR_Linux + 97) -#define __NR_recv (__NR_Linux + 98) -#define __NR_statfs (__NR_Linux + 99) -#define __NR_fstatfs (__NR_Linux + 100) -#define __NR_stat64 (__NR_Linux + 101) -/* #define __NR_socketcall (__NR_Linux + 102) */ -#define __NR_syslog (__NR_Linux + 103) -#define __NR_setitimer (__NR_Linux + 104) -#define __NR_getitimer (__NR_Linux + 105) -#define __NR_capget (__NR_Linux + 106) -#define __NR_capset (__NR_Linux + 107) -#define __NR_pread64 (__NR_Linux + 108) -#define __NR_pwrite64 (__NR_Linux + 109) -#define __NR_getcwd (__NR_Linux + 110) -#define __NR_vhangup (__NR_Linux + 111) -#define __NR_fstat64 (__NR_Linux + 112) -#define __NR_vfork (__NR_Linux + 113) -#define __NR_wait4 (__NR_Linux + 114) -#define __NR_swapoff (__NR_Linux + 115) -#define __NR_sysinfo (__NR_Linux + 116) -#define __NR_shutdown (__NR_Linux + 117) -#define __NR_fsync (__NR_Linux + 118) -#define __NR_madvise (__NR_Linux + 119) -#define __NR_clone (__NR_Linux + 120) -#define __NR_setdomainname (__NR_Linux + 121) -#define __NR_sendfile (__NR_Linux + 122) -#define __NR_recvfrom (__NR_Linux + 123) -#define __NR_adjtimex (__NR_Linux + 124) -#define __NR_mprotect (__NR_Linux + 125) -#define __NR_sigprocmask (__NR_Linux + 126) -#define __NR_create_module (__NR_Linux + 127) /* not used */ -#define __NR_init_module (__NR_Linux + 128) -#define __NR_delete_module (__NR_Linux + 129) -#define __NR_get_kernel_syms (__NR_Linux + 130) /* not used */ -#define __NR_quotactl (__NR_Linux + 131) -#define __NR_getpgid (__NR_Linux + 132) -#define __NR_fchdir (__NR_Linux + 133) -#define __NR_bdflush (__NR_Linux + 134) -#define __NR_sysfs (__NR_Linux + 135) -#define __NR_personality (__NR_Linux + 136) -#define __NR_afs_syscall (__NR_Linux + 137) /* not used */ -#define __NR_setfsuid (__NR_Linux + 138) -#define __NR_setfsgid (__NR_Linux + 139) -#define __NR__llseek (__NR_Linux + 140) -#define __NR_getdents (__NR_Linux + 141) -#define __NR__newselect (__NR_Linux + 142) -#define __NR_flock (__NR_Linux + 143) -#define __NR_msync (__NR_Linux + 144) -#define __NR_readv (__NR_Linux + 145) -#define __NR_writev (__NR_Linux + 146) -#define __NR_getsid (__NR_Linux + 147) -#define __NR_fdatasync (__NR_Linux + 148) -#define __NR__sysctl (__NR_Linux + 149) -#define __NR_mlock (__NR_Linux + 150) -#define __NR_munlock (__NR_Linux + 151) -#define __NR_mlockall (__NR_Linux + 152) -#define __NR_munlockall (__NR_Linux + 153) -#define __NR_sched_setparam (__NR_Linux + 154) -#define __NR_sched_getparam (__NR_Linux + 155) -#define __NR_sched_setscheduler (__NR_Linux + 156) -#define __NR_sched_getscheduler (__NR_Linux + 157) -#define __NR_sched_yield (__NR_Linux + 158) -#define __NR_sched_get_priority_max (__NR_Linux + 159) -#define __NR_sched_get_priority_min (__NR_Linux + 160) -#define __NR_sched_rr_get_interval (__NR_Linux + 161) -#define __NR_nanosleep (__NR_Linux + 162) -#define __NR_mremap (__NR_Linux + 163) -#define __NR_setresuid (__NR_Linux + 164) -#define __NR_getresuid (__NR_Linux + 165) -#define __NR_sigaltstack (__NR_Linux + 166) -#define __NR_query_module (__NR_Linux + 167) /* not used */ -#define __NR_poll (__NR_Linux + 168) -#define __NR_nfsservctl (__NR_Linux + 169) /* not used */ -#define __NR_setresgid (__NR_Linux + 170) -#define __NR_getresgid (__NR_Linux + 171) -#define __NR_prctl (__NR_Linux + 172) -#define __NR_rt_sigreturn (__NR_Linux + 173) -#define __NR_rt_sigaction (__NR_Linux + 174) -#define __NR_rt_sigprocmask (__NR_Linux + 175) -#define __NR_rt_sigpending (__NR_Linux + 176) -#define __NR_rt_sigtimedwait (__NR_Linux + 177) -#define __NR_rt_sigqueueinfo (__NR_Linux + 178) -#define __NR_rt_sigsuspend (__NR_Linux + 179) -#define __NR_chown (__NR_Linux + 180) -#define __NR_setsockopt (__NR_Linux + 181) -#define __NR_getsockopt (__NR_Linux + 182) -#define __NR_sendmsg (__NR_Linux + 183) -#define __NR_recvmsg (__NR_Linux + 184) -#define __NR_semop (__NR_Linux + 185) -#define __NR_semget (__NR_Linux + 186) -#define __NR_semctl (__NR_Linux + 187) -#define __NR_msgsnd (__NR_Linux + 188) -#define __NR_msgrcv (__NR_Linux + 189) -#define __NR_msgget (__NR_Linux + 190) -#define __NR_msgctl (__NR_Linux + 191) -#define __NR_shmat (__NR_Linux + 192) -#define __NR_shmdt (__NR_Linux + 193) -#define __NR_shmget (__NR_Linux + 194) -#define __NR_shmctl (__NR_Linux + 195) -#define __NR_getpmsg (__NR_Linux + 196) /* not used */ -#define __NR_putpmsg (__NR_Linux + 197) /* not used */ -#define __NR_lstat64 (__NR_Linux + 198) -#define __NR_truncate64 (__NR_Linux + 199) -#define __NR_ftruncate64 (__NR_Linux + 200) -#define __NR_getdents64 (__NR_Linux + 201) -#define __NR_fcntl64 (__NR_Linux + 202) -#define __NR_attrctl (__NR_Linux + 203) /* not used */ -#define __NR_acl_get (__NR_Linux + 204) /* not used */ -#define __NR_acl_set (__NR_Linux + 205) /* not used */ -#define __NR_gettid (__NR_Linux + 206) -#define __NR_readahead (__NR_Linux + 207) -#define __NR_tkill (__NR_Linux + 208) -#define __NR_sendfile64 (__NR_Linux + 209) -#define __NR_futex (__NR_Linux + 210) -#define __NR_sched_setaffinity (__NR_Linux + 211) -#define __NR_sched_getaffinity (__NR_Linux + 212) -#define __NR_set_thread_area (__NR_Linux + 213) /* not used */ -#define __NR_get_thread_area (__NR_Linux + 214) /* not used */ -#define __NR_io_setup (__NR_Linux + 215) -#define __NR_io_destroy (__NR_Linux + 216) -#define __NR_io_getevents (__NR_Linux + 217) -#define __NR_io_submit (__NR_Linux + 218) -#define __NR_io_cancel (__NR_Linux + 219) -#define __NR_alloc_hugepages (__NR_Linux + 220) /* not used */ -#define __NR_free_hugepages (__NR_Linux + 221) /* not used */ -#define __NR_exit_group (__NR_Linux + 222) -#define __NR_lookup_dcookie (__NR_Linux + 223) -#define __NR_epoll_create (__NR_Linux + 224) -#define __NR_epoll_ctl (__NR_Linux + 225) -#define __NR_epoll_wait (__NR_Linux + 226) -#define __NR_remap_file_pages (__NR_Linux + 227) -#define __NR_semtimedop (__NR_Linux + 228) -#define __NR_mq_open (__NR_Linux + 229) -#define __NR_mq_unlink (__NR_Linux + 230) -#define __NR_mq_timedsend (__NR_Linux + 231) -#define __NR_mq_timedreceive (__NR_Linux + 232) -#define __NR_mq_notify (__NR_Linux + 233) -#define __NR_mq_getsetattr (__NR_Linux + 234) -#define __NR_waitid (__NR_Linux + 235) -#define __NR_fadvise64_64 (__NR_Linux + 236) -#define __NR_set_tid_address (__NR_Linux + 237) -#define __NR_setxattr (__NR_Linux + 238) -#define __NR_lsetxattr (__NR_Linux + 239) -#define __NR_fsetxattr (__NR_Linux + 240) -#define __NR_getxattr (__NR_Linux + 241) -#define __NR_lgetxattr (__NR_Linux + 242) -#define __NR_fgetxattr (__NR_Linux + 243) -#define __NR_listxattr (__NR_Linux + 244) -#define __NR_llistxattr (__NR_Linux + 245) -#define __NR_flistxattr (__NR_Linux + 246) -#define __NR_removexattr (__NR_Linux + 247) -#define __NR_lremovexattr (__NR_Linux + 248) -#define __NR_fremovexattr (__NR_Linux + 249) -#define __NR_timer_create (__NR_Linux + 250) -#define __NR_timer_settime (__NR_Linux + 251) -#define __NR_timer_gettime (__NR_Linux + 252) -#define __NR_timer_getoverrun (__NR_Linux + 253) -#define __NR_timer_delete (__NR_Linux + 254) -#define __NR_clock_settime (__NR_Linux + 255) -#define __NR_clock_gettime (__NR_Linux + 256) -#define __NR_clock_getres (__NR_Linux + 257) -#define __NR_clock_nanosleep (__NR_Linux + 258) -#define __NR_tgkill (__NR_Linux + 259) -#define __NR_mbind (__NR_Linux + 260) -#define __NR_get_mempolicy (__NR_Linux + 261) -#define __NR_set_mempolicy (__NR_Linux + 262) -#define __NR_vserver (__NR_Linux + 263) /* not used */ -#define __NR_add_key (__NR_Linux + 264) -#define __NR_request_key (__NR_Linux + 265) -#define __NR_keyctl (__NR_Linux + 266) -#define __NR_ioprio_set (__NR_Linux + 267) -#define __NR_ioprio_get (__NR_Linux + 268) -#define __NR_inotify_init (__NR_Linux + 269) -#define __NR_inotify_add_watch (__NR_Linux + 270) -#define __NR_inotify_rm_watch (__NR_Linux + 271) -#define __NR_migrate_pages (__NR_Linux + 272) -#define __NR_pselect6 (__NR_Linux + 273) -#define __NR_ppoll (__NR_Linux + 274) -#define __NR_openat (__NR_Linux + 275) -#define __NR_mkdirat (__NR_Linux + 276) -#define __NR_mknodat (__NR_Linux + 277) -#define __NR_fchownat (__NR_Linux + 278) -#define __NR_futimesat (__NR_Linux + 279) -#define __NR_fstatat64 (__NR_Linux + 280) -#define __NR_unlinkat (__NR_Linux + 281) -#define __NR_renameat (__NR_Linux + 282) -#define __NR_linkat (__NR_Linux + 283) -#define __NR_symlinkat (__NR_Linux + 284) -#define __NR_readlinkat (__NR_Linux + 285) -#define __NR_fchmodat (__NR_Linux + 286) -#define __NR_faccessat (__NR_Linux + 287) -#define __NR_unshare (__NR_Linux + 288) -#define __NR_set_robust_list (__NR_Linux + 289) -#define __NR_get_robust_list (__NR_Linux + 290) -#define __NR_splice (__NR_Linux + 291) -#define __NR_sync_file_range (__NR_Linux + 292) -#define __NR_tee (__NR_Linux + 293) -#define __NR_vmsplice (__NR_Linux + 294) -#define __NR_move_pages (__NR_Linux + 295) -#define __NR_getcpu (__NR_Linux + 296) -#define __NR_epoll_pwait (__NR_Linux + 297) -#define __NR_statfs64 (__NR_Linux + 298) -#define __NR_fstatfs64 (__NR_Linux + 299) -#define __NR_kexec_load (__NR_Linux + 300) -#define __NR_utimensat (__NR_Linux + 301) -#define __NR_signalfd (__NR_Linux + 302) -#define __NR_timerfd (__NR_Linux + 303) /* not used */ -#define __NR_eventfd (__NR_Linux + 304) -#define __NR_fallocate (__NR_Linux + 305) -#define __NR_timerfd_create (__NR_Linux + 306) -#define __NR_timerfd_settime (__NR_Linux + 307) -#define __NR_timerfd_gettime (__NR_Linux + 308) -#define __NR_signalfd4 (__NR_Linux + 309) -#define __NR_eventfd2 (__NR_Linux + 310) -#define __NR_epoll_create1 (__NR_Linux + 311) -#define __NR_dup3 (__NR_Linux + 312) -#define __NR_pipe2 (__NR_Linux + 313) -#define __NR_inotify_init1 (__NR_Linux + 314) -#define __NR_preadv (__NR_Linux + 315) -#define __NR_pwritev (__NR_Linux + 316) -#define __NR_rt_tgsigqueueinfo (__NR_Linux + 317) -#define __NR_perf_event_open (__NR_Linux + 318) -#define __NR_recvmmsg (__NR_Linux + 319) -#define __NR_accept4 (__NR_Linux + 320) -#define __NR_prlimit64 (__NR_Linux + 321) -#define __NR_fanotify_init (__NR_Linux + 322) -#define __NR_fanotify_mark (__NR_Linux + 323) -#define __NR_clock_adjtime (__NR_Linux + 324) -#define __NR_name_to_handle_at (__NR_Linux + 325) -#define __NR_open_by_handle_at (__NR_Linux + 326) -#define __NR_syncfs (__NR_Linux + 327) -#define __NR_setns (__NR_Linux + 328) -#define __NR_sendmmsg (__NR_Linux + 329) -#define __NR_process_vm_readv (__NR_Linux + 330) -#define __NR_process_vm_writev (__NR_Linux + 331) -#define __NR_kcmp (__NR_Linux + 332) -#define __NR_finit_module (__NR_Linux + 333) -#define __NR_sched_setattr (__NR_Linux + 334) -#define __NR_sched_getattr (__NR_Linux + 335) -#define __NR_utimes (__NR_Linux + 336) -#define __NR_renameat2 (__NR_Linux + 337) -#define __NR_seccomp (__NR_Linux + 338) -#define __NR_getrandom (__NR_Linux + 339) -#define __NR_memfd_create (__NR_Linux + 340) -#define __NR_bpf (__NR_Linux + 341) -#define __NR_execveat (__NR_Linux + 342) -#define __NR_membarrier (__NR_Linux + 343) -#define __NR_userfaultfd (__NR_Linux + 344) -#define __NR_mlock2 (__NR_Linux + 345) -#define __NR_copy_file_range (__NR_Linux + 346) -#define __NR_preadv2 (__NR_Linux + 347) -#define __NR_pwritev2 (__NR_Linux + 348) -#define __NR_statx (__NR_Linux + 349) -#define __NR_io_pgetevents (__NR_Linux + 350) - -#define __NR_Linux_syscalls (__NR_io_pgetevents + 1) - - -#define __IGNORE_select /* newselect */ -#define __IGNORE_fadvise64 /* fadvise64_64 */ -#define __IGNORE_pkey_mprotect -#define __IGNORE_pkey_alloc -#define __IGNORE_pkey_free - -#define LINUX_GATEWAY_ADDR 0x100 +#define LINUX_GATEWAY_ADDR 0x100 #endif /* _UAPI_ASM_PARISC_UNISTD_H_ */ diff --git a/arch/parisc/kernel/Makefile b/arch/parisc/kernel/Makefile index e5de34d00b1a..8e5f1ab65c68 100644 --- a/arch/parisc/kernel/Makefile +++ b/arch/parisc/kernel/Makefile @@ -7,7 +7,7 @@ extra-y := head.o vmlinux.lds obj-y := cache.o pacache.o setup.o pdt.o traps.o time.o irq.o \ pa7300lc.o syscall.o entry.o sys_parisc.o firmware.o \ - ptrace.o hardware.o inventory.o drivers.o \ + ptrace.o hardware.o inventory.o drivers.o alternative.o \ signal.o hpmc.o real2.o parisc_ksyms.o unaligned.o \ process.o processor.o pdc_cons.o pdc_chassis.o unwind.o diff --git a/arch/parisc/kernel/alternative.c b/arch/parisc/kernel/alternative.c new file mode 100644 index 000000000000..bf2274e01a96 --- /dev/null +++ b/arch/parisc/kernel/alternative.c @@ -0,0 +1,98 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Alternative live-patching for parisc. + * Copyright (C) 2018 Helge Deller <deller@gmx.de> + * + */ + +#include <asm/processor.h> +#include <asm/sections.h> +#include <asm/alternative.h> + +#include <linux/module.h> + +static int no_alternatives; +static int __init setup_no_alternatives(char *str) +{ + no_alternatives = 1; + return 1; +} +__setup("no-alternatives", setup_no_alternatives); + +void __init_or_module apply_alternatives(struct alt_instr *start, + struct alt_instr *end, const char *module_name) +{ + struct alt_instr *entry; + int index = 0, applied = 0; + int num_cpus = num_online_cpus(); + + for (entry = start; entry < end; entry++, index++) { + + u32 *from, len, cond, replacement; + + from = (u32 *)((ulong)&entry->orig_offset + entry->orig_offset); + len = entry->len; + cond = entry->cond; + replacement = entry->replacement; + + WARN_ON(!cond); + + if (cond != ALT_COND_ALWAYS && no_alternatives) + continue; + + pr_debug("Check %d: Cond 0x%x, Replace %02d instructions @ 0x%px with 0x%08x\n", + index, cond, len, from, replacement); + + if ((cond & ALT_COND_NO_SMP) && (num_cpus != 1)) + continue; + if ((cond & ALT_COND_NO_DCACHE) && (cache_info.dc_size != 0)) + continue; + if ((cond & ALT_COND_NO_ICACHE) && (cache_info.ic_size != 0)) + continue; + + /* + * If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit + * set (bit #61, big endian), we have to flush and sync every + * time IO-PDIR is changed in Ike/Astro. + */ + if ((cond & ALT_COND_NO_IOC_FDC) && + (boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC)) + continue; + + /* Want to replace pdtlb by a pdtlb,l instruction? */ + if (replacement == INSN_PxTLB) { + replacement = *from; + if (boot_cpu_data.cpu_type >= pcxu) /* >= pa2.0 ? */ + replacement |= (1 << 10); /* set el bit */ + } + + /* + * Replace instruction with NOPs? + * For long distance insert a branch instruction instead. + */ + if (replacement == INSN_NOP && len > 1) + replacement = 0xe8000002 + (len-2)*8; /* "b,n .+8" */ + + pr_debug("Do %d: Cond 0x%x, Replace %02d instructions @ 0x%px with 0x%08x\n", + index, cond, len, from, replacement); + + /* Replace instruction */ + *from = replacement; + applied++; + } + + pr_info("%s%salternatives: applied %d out of %d patches\n", + module_name ? : "", module_name ? " " : "", + applied, index); +} + + +void __init apply_alternatives_all(void) +{ + set_kernel_text_rw(1); + + apply_alternatives((struct alt_instr *) &__alt_instructions, + (struct alt_instr *) &__alt_instructions_end, NULL); + + set_kernel_text_rw(0); +} diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c index b5b3cb00f1fb..43778420614b 100644 --- a/arch/parisc/kernel/module.c +++ b/arch/parisc/kernel/module.c @@ -877,6 +877,8 @@ int module_finalize(const Elf_Ehdr *hdr, int i; unsigned long nsyms; const char *strtab = NULL; + const Elf_Shdr *s; + char *secstrings; Elf_Sym *newptr, *oldptr; Elf_Shdr *symhdr = NULL; #ifdef DEBUG @@ -948,6 +950,18 @@ int module_finalize(const Elf_Ehdr *hdr, nsyms = newptr - (Elf_Sym *)symhdr->sh_addr; DEBUGP("NEW num_symtab %lu\n", nsyms); symhdr->sh_size = nsyms * sizeof(Elf_Sym); + + /* find .altinstructions section */ + secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; + for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { + void *aseg = (void *) s->sh_addr; + char *secname = secstrings + s->sh_name; + + if (!strcmp(".altinstructions", secname)) + /* patch .altinstructions */ + apply_alternatives(aseg, aseg + s->sh_size, me->name); + } + return 0; } diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c index 04c48f1ef3fb..239162355b58 100644 --- a/arch/parisc/kernel/pci-dma.c +++ b/arch/parisc/kernel/pci-dma.c @@ -404,7 +404,7 @@ static void *pcxl_dma_alloc(struct device *dev, size_t size, order = get_order(size); size = 1 << (order + PAGE_SHIFT); vaddr = pcxl_alloc_range(size); - paddr = __get_free_pages(flag, order); + paddr = __get_free_pages(flag | __GFP_ZERO, order); flush_kernel_dcache_range(paddr, size); paddr = __pa(paddr); map_uncached_pages(vaddr, size, paddr); @@ -429,7 +429,7 @@ static void *pcx_dma_alloc(struct device *dev, size_t size, if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) return NULL; - addr = (void *)__get_free_pages(flag, get_order(size)); + addr = (void *)__get_free_pages(flag | __GFP_ZERO, get_order(size)); if (addr) *dma_handle = (dma_addr_t)virt_to_phys(addr); diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c index cd227f1cf629..f2cf86ac279b 100644 --- a/arch/parisc/kernel/setup.c +++ b/arch/parisc/kernel/setup.c @@ -99,10 +99,6 @@ void __init dma_ops_init(void) case pcxl2: pa7300lc_init(); - case pcxl: /* falls through */ - case pcxs: - case pcxt: - hppa_dma_ops = &dma_direct_ops; break; default: break; @@ -305,86 +301,6 @@ static int __init parisc_init_resources(void) return 0; } -static int no_alternatives __initdata; -static int __init setup_no_alternatives(char *str) -{ - no_alternatives = 1; - return 1; -} -__setup("no-alternatives", setup_no_alternatives); - -static void __init apply_alternatives_all(void) -{ - struct alt_instr *entry; - int index = 0, applied = 0; - - - pr_info("alternatives: %spatching kernel code\n", - no_alternatives ? "NOT " : ""); - if (no_alternatives) - return; - - set_kernel_text_rw(1); - - for (entry = (struct alt_instr *) &__alt_instructions; - entry < (struct alt_instr *) &__alt_instructions_end; - entry++, index++) { - - u32 *from, len, cond, replacement; - - from = (u32 *)((ulong)&entry->orig_offset + entry->orig_offset); - len = entry->len; - cond = entry->cond; - replacement = entry->replacement; - - WARN_ON(!cond); - pr_debug("Check %d: Cond 0x%x, Replace %02d instructions @ 0x%px with 0x%08x\n", - index, cond, len, from, replacement); - - if ((cond & ALT_COND_NO_SMP) && (num_online_cpus() != 1)) - continue; - if ((cond & ALT_COND_NO_DCACHE) && (cache_info.dc_size != 0)) - continue; - if ((cond & ALT_COND_NO_ICACHE) && (cache_info.ic_size != 0)) - continue; - - /* - * If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit - * set (bit #61, big endian), we have to flush and sync every - * time IO-PDIR is changed in Ike/Astro. - */ - if ((cond & ALT_COND_NO_IOC_FDC) && - (boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC)) - continue; - - /* Want to replace pdtlb by a pdtlb,l instruction? */ - if (replacement == INSN_PxTLB) { - replacement = *from; - if (boot_cpu_data.cpu_type >= pcxu) /* >= pa2.0 ? */ - replacement |= (1 << 10); /* set el bit */ - } - - /* - * Replace instruction with NOPs? - * For long distance insert a branch instruction instead. - */ - if (replacement == INSN_NOP && len > 1) - replacement = 0xe8000002 + (len-2)*8; /* "b,n .+8" */ - - pr_debug("Do %d: Cond 0x%x, Replace %02d instructions @ 0x%px with 0x%08x\n", - index, cond, len, from, replacement); - - /* Replace instruction */ - *from = replacement; - applied++; - } - - pr_info("alternatives: applied %d out of %d patches\n", applied, index); - - set_kernel_text_rw(0); -} - - extern void gsc_init(void); extern void processor_init(void); extern void ccio_init(void); diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index a9bc90dc4ae7..4f77bd9be66b 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S @@ -923,19 +923,24 @@ ENTRY(lws_table) END(lws_table) /* End of lws table */ +#define __SYSCALL(nr, entry, nargs) ASM_ULONG_INSN entry .align 8 ENTRY(sys_call_table) .export sys_call_table,data -#include "syscall_table.S" +#ifdef CONFIG_64BIT +#include <asm/syscall_table_c32.h> /* Compat syscalls */ +#else +#include <asm/syscall_table_32.h> /* 32-bit native syscalls */ +#endif END(sys_call_table) #ifdef CONFIG_64BIT .align 8 ENTRY(sys_call_table64) -#define SYSCALL_TABLE_64BIT -#include "syscall_table.S" +#include <asm/syscall_table_64.h> /* 64-bit native syscalls */ END(sys_call_table64) #endif +#undef __SYSCALL /* All light-weight-syscall atomic operations diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S deleted file mode 100644 index fe3f2a49d2b1..000000000000 --- a/arch/parisc/kernel/syscall_table.S +++ /dev/null @@ -1,459 +0,0 @@ -/* System Call Table - * - * Copyright (C) 1999-2004 Matthew Wilcox <willy at parisc-linux.org> - * Copyright (C) 2000-2001 John Marvin <jsm at parisc-linux.org> - * Copyright (C) 2000 Alan Modra <amodra at parisc-linux.org> - * Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org> - * Copyright (C) 2000 Philipp Rumpf <prumpf with tux.org> - * Copyright (C) 2000 Michael Ang <mang with subcarrier.org> - * Copyright (C) 2000 David Huggins-Daines <dhd with pobox.org> - * Copyright (C) 2000 Grant Grundler <grundler at parisc-linux.org> - * Copyright (C) 2001 Richard Hirst <rhirst with parisc-linux.org> - * Copyright (C) 2001-2002 Ryan Bradetich <rbrad at parisc-linux.org> - * Copyright (C) 2001-2007 Helge Deller <deller at parisc-linux.org> - * Copyright (C) 2000-2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org> - * Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org> - * Copyright (C) 2005-2006 Kyle McMartin <kyle at parisc-linux.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#if defined(CONFIG_64BIT) && !defined(SYSCALL_TABLE_64BIT) -/* Use ENTRY_SAME for 32-bit syscalls which are the same on wide and - * narrow palinux. Use ENTRY_DIFF for those where a 32-bit specific - * implementation is required on wide palinux. Use ENTRY_COMP where - * the compatibility layer has a useful 32-bit implementation. - */ -#define ENTRY_SAME(_name_) .dword sys_##_name_ -#define ENTRY_DIFF(_name_) .dword sys32_##_name_ -#define ENTRY_UHOH(_name_) .dword sys32_##unimplemented -#define ENTRY_OURS(_name_) .dword parisc_##_name_ -#define ENTRY_COMP(_name_) .dword compat_sys_##_name_ -#elif defined(CONFIG_64BIT) && defined(SYSCALL_TABLE_64BIT) -#define ENTRY_SAME(_name_) .dword sys_##_name_ -#define ENTRY_DIFF(_name_) .dword sys_##_name_ -#define ENTRY_UHOH(_name_) .dword sys_##_name_ -#define ENTRY_OURS(_name_) .dword sys_##_name_ -#define ENTRY_COMP(_name_) .dword sys_##_name_ -#else -#define ENTRY_SAME(_name_) .word sys_##_name_ -#define ENTRY_DIFF(_name_) .word sys_##_name_ -#define ENTRY_UHOH(_name_) .word sys_##_name_ -#define ENTRY_OURS(_name_) .word parisc_##_name_ -#define ENTRY_COMP(_name_) .word sys_##_name_ -#endif - -90: ENTRY_SAME(restart_syscall) /* 0 */ -91: ENTRY_SAME(exit) - ENTRY_SAME(fork_wrapper) - ENTRY_SAME(read) - ENTRY_SAME(write) - ENTRY_COMP(open) /* 5 */ - ENTRY_SAME(close) - ENTRY_SAME(waitpid) - ENTRY_SAME(creat) - ENTRY_SAME(link) - ENTRY_SAME(unlink) /* 10 */ - ENTRY_COMP(execve) - ENTRY_SAME(chdir) - /* See comments in kernel/time.c!!! Maybe we don't need this? */ - ENTRY_COMP(time) - ENTRY_SAME(mknod) - ENTRY_SAME(chmod) /* 15 */ - ENTRY_SAME(lchown) - ENTRY_SAME(socket) - /* struct stat is MAYBE identical wide and narrow ?? */ - ENTRY_COMP(newstat) - ENTRY_COMP(lseek) - ENTRY_SAME(getpid) /* 20 */ - /* the 'void * data' parameter may need re-packing in wide */ - ENTRY_COMP(mount) - /* concerned about struct sockaddr in wide/narrow */ - /* ---> I think sockaddr is OK unless the compiler packs the struct */ - /* differently to align the char array */ - ENTRY_SAME(bind) - ENTRY_SAME(setuid) - ENTRY_SAME(getuid) - ENTRY_COMP(stime) /* 25 */ - ENTRY_COMP(ptrace) - ENTRY_SAME(alarm) - /* see stat comment */ - ENTRY_COMP(newfstat) - ENTRY_SAME(pause) - /* struct utimbuf uses time_t which might vary */ - ENTRY_COMP(utime) /* 30 */ - /* struct sockaddr... */ - ENTRY_SAME(connect) - ENTRY_SAME(listen) - ENTRY_SAME(access) - ENTRY_SAME(nice) - /* struct sockaddr... */ - ENTRY_SAME(accept) /* 35 */ - ENTRY_SAME(sync) - ENTRY_SAME(kill) - ENTRY_SAME(rename) - ENTRY_SAME(mkdir) - ENTRY_SAME(rmdir) /* 40 */ - ENTRY_SAME(dup) - ENTRY_SAME(pipe) - ENTRY_COMP(times) - /* struct sockaddr... */ - ENTRY_SAME(getsockname) - /* it seems possible brk() could return a >4G pointer... */ - ENTRY_SAME(brk) /* 45 */ - ENTRY_SAME(setgid) - ENTRY_SAME(getgid) - ENTRY_SAME(signal) - ENTRY_SAME(geteuid) - ENTRY_SAME(getegid) /* 50 */ - ENTRY_SAME(acct) - ENTRY_SAME(umount) - /* struct sockaddr... */ - ENTRY_SAME(getpeername) - ENTRY_COMP(ioctl) - ENTRY_COMP(fcntl) /* 55 */ - ENTRY_SAME(socketpair) - ENTRY_SAME(setpgid) - ENTRY_SAME(send) - ENTRY_SAME(newuname) - ENTRY_SAME(umask) /* 60 */ - ENTRY_SAME(chroot) - ENTRY_COMP(ustat) - ENTRY_SAME(dup2) - ENTRY_SAME(getppid) - ENTRY_SAME(getpgrp) /* 65 */ - ENTRY_SAME(setsid) - ENTRY_SAME(pivot_root) - /* I don't like this */ - ENTRY_UHOH(sgetmask) - ENTRY_UHOH(ssetmask) - ENTRY_SAME(setreuid) /* 70 */ - ENTRY_SAME(setregid) - ENTRY_SAME(mincore) - ENTRY_COMP(sigpending) - ENTRY_SAME(sethostname) - /* Following 3 have linux-common-code structs containing longs -( */ - ENTRY_COMP(setrlimit) /* 75 */ - ENTRY_COMP(getrlimit) - ENTRY_COMP(getrusage) - /* struct timeval and timezone are maybe?? consistent wide and narrow */ - ENTRY_COMP(gettimeofday) - ENTRY_COMP(settimeofday) - ENTRY_SAME(getgroups) /* 80 */ - ENTRY_SAME(setgroups) - /* struct socketaddr... */ - ENTRY_SAME(sendto) - ENTRY_SAME(symlink) - /* see stat comment */ - ENTRY_COMP(newlstat) - ENTRY_SAME(readlink) /* 85 */ - ENTRY_SAME(ni_syscall) /* was uselib */ - ENTRY_SAME(swapon) - ENTRY_SAME(reboot) - ENTRY_SAME(mmap2) - ENTRY_SAME(mmap) /* 90 */ - ENTRY_SAME(munmap) - ENTRY_COMP(truncate) - ENTRY_COMP(ftruncate) - ENTRY_SAME(fchmod) - ENTRY_SAME(fchown) /* 95 */ - ENTRY_SAME(getpriority) - ENTRY_SAME(setpriority) - ENTRY_SAME(recv) - ENTRY_COMP(statfs) - ENTRY_COMP(fstatfs) /* 100 */ - ENTRY_SAME(stat64) - ENTRY_SAME(ni_syscall) /* was socketcall */ - ENTRY_SAME(syslog) - /* even though manpage says struct timeval contains longs, ours has - * time_t and suseconds_t -- both of which are safe wide/narrow */ - ENTRY_COMP(setitimer) - ENTRY_COMP(getitimer) /* 105 */ - ENTRY_SAME(capget) - ENTRY_SAME(capset) - ENTRY_OURS(pread64) - ENTRY_OURS(pwrite64) - ENTRY_SAME(getcwd) /* 110 */ - ENTRY_SAME(vhangup) - ENTRY_SAME(fstat64) - ENTRY_SAME(vfork_wrapper) - /* struct rusage contains longs... */ - ENTRY_COMP(wait4) - ENTRY_SAME(swapoff) /* 115 */ - ENTRY_COMP(sysinfo) - ENTRY_SAME(shutdown) - ENTRY_SAME(fsync) - ENTRY_SAME(madvise) - ENTRY_SAME(clone_wrapper) /* 120 */ - ENTRY_SAME(setdomainname) - ENTRY_COMP(sendfile) - /* struct sockaddr... */ - ENTRY_SAME(recvfrom) - /* struct timex contains longs */ - ENTRY_COMP(adjtimex) - ENTRY_SAME(mprotect) /* 125 */ - /* old_sigset_t forced to 32 bits. Beware glibc sigset_t */ - ENTRY_COMP(sigprocmask) - ENTRY_SAME(ni_syscall) /* create_module */ - ENTRY_SAME(init_module) - ENTRY_SAME(delete_module) - ENTRY_SAME(ni_syscall) /* 130: get_kernel_syms */ - /* time_t inside struct dqblk */ - ENTRY_SAME(quotactl) - ENTRY_SAME(getpgid) - ENTRY_SAME(fchdir) - ENTRY_SAME(bdflush) - ENTRY_SAME(sysfs) /* 135 */ - ENTRY_OURS(personality) - ENTRY_SAME(ni_syscall) /* for afs_syscall */ - ENTRY_SAME(setfsuid) - ENTRY_SAME(setfsgid) - /* I think this might work */ - ENTRY_SAME(llseek) /* 140 */ - ENTRY_COMP(getdents) - /* it is POSSIBLE that select will be OK because even though fd_set - * contains longs, the macros and sizes are clever. */ - ENTRY_COMP(select) - ENTRY_SAME(flock) - ENTRY_SAME(msync) - /* struct iovec contains pointers */ - ENTRY_COMP(readv) /* 145 */ - ENTRY_COMP(writev) - ENTRY_SAME(getsid) - ENTRY_SAME(fdatasync) - /* struct __sysctl_args is a mess */ - ENTRY_COMP(sysctl) - ENTRY_SAME(mlock) /* 150 */ - ENTRY_SAME(munlock) - ENTRY_SAME(mlockall) - ENTRY_SAME(munlockall) - /* struct sched_param is ok for now */ - ENTRY_SAME(sched_setparam) - ENTRY_SAME(sched_getparam) /* 155 */ - ENTRY_SAME(sched_setscheduler) - ENTRY_SAME(sched_getscheduler) - ENTRY_SAME(sched_yield) - ENTRY_SAME(sched_get_priority_max) - ENTRY_SAME(sched_get_priority_min) /* 160 */ - ENTRY_COMP(sched_rr_get_interval) - ENTRY_COMP(nanosleep) - ENTRY_SAME(mremap) - ENTRY_SAME(setresuid) - ENTRY_SAME(getresuid) /* 165 */ - ENTRY_COMP(sigaltstack) - ENTRY_SAME(ni_syscall) /* query_module */ - ENTRY_SAME(poll) - /* structs contain pointers and an in_addr... */ - ENTRY_SAME(ni_syscall) /* was nfsservctl */ - ENTRY_SAME(setresgid) /* 170 */ - ENTRY_SAME(getresgid) - ENTRY_SAME(prctl) - /* signals need a careful review */ - ENTRY_SAME(rt_sigreturn_wrapper) - ENTRY_COMP(rt_sigaction) - ENTRY_COMP(rt_sigprocmask) /* 175 */ - ENTRY_COMP(rt_sigpending) - ENTRY_COMP(rt_sigtimedwait) - /* even though the struct siginfo_t is different, it appears like - * all the paths use values which should be same wide and narrow. - * Also the struct is padded to 128 bytes which means we don't have - * to worry about faulting trying to copy in a larger 64-bit - * struct from a 32-bit user-space app. - */ - ENTRY_COMP(rt_sigqueueinfo) - ENTRY_COMP(rt_sigsuspend) - ENTRY_SAME(chown) /* 180 */ - /* setsockopt() used by iptables: SO_SET_REPLACE/SO_SET_ADD_COUNTERS */ - ENTRY_COMP(setsockopt) - ENTRY_COMP(getsockopt) - ENTRY_COMP(sendmsg) - ENTRY_COMP(recvmsg) - ENTRY_SAME(semop) /* 185 */ - ENTRY_SAME(semget) - ENTRY_COMP(semctl) - ENTRY_COMP(msgsnd) - ENTRY_COMP(msgrcv) - ENTRY_SAME(msgget) /* 190 */ - ENTRY_COMP(msgctl) - ENTRY_COMP(shmat) - ENTRY_SAME(shmdt) - ENTRY_SAME(shmget) - ENTRY_COMP(shmctl) /* 195 */ - ENTRY_SAME(ni_syscall) /* streams1 */ - ENTRY_SAME(ni_syscall) /* streams2 */ - ENTRY_SAME(lstat64) - ENTRY_OURS(truncate64) - ENTRY_OURS(ftruncate64) /* 200 */ - ENTRY_SAME(getdents64) - ENTRY_COMP(fcntl64) - ENTRY_SAME(ni_syscall) /* attrctl -- dead */ - ENTRY_SAME(ni_syscall) /* acl_get -- dead */ - ENTRY_SAME(ni_syscall) /* 205 (acl_set -- dead) */ - ENTRY_SAME(gettid) - ENTRY_OURS(readahead) - ENTRY_SAME(tkill) - ENTRY_COMP(sendfile64) - ENTRY_COMP(futex) /* 210 */ - ENTRY_COMP(sched_setaffinity) - ENTRY_COMP(sched_getaffinity) - ENTRY_SAME(ni_syscall) /* set_thread_area */ - ENTRY_SAME(ni_syscall) /* get_thread_area */ - ENTRY_COMP(io_setup) /* 215 */ - ENTRY_SAME(io_destroy) - ENTRY_COMP(io_getevents) - ENTRY_COMP(io_submit) - ENTRY_SAME(io_cancel) - ENTRY_SAME(ni_syscall) /* 220: was alloc_hugepages */ - ENTRY_SAME(ni_syscall) /* was free_hugepages */ - ENTRY_SAME(exit_group) - ENTRY_COMP(lookup_dcookie) - ENTRY_SAME(epoll_create) - ENTRY_SAME(epoll_ctl) /* 225 */ - ENTRY_SAME(epoll_wait) - ENTRY_SAME(remap_file_pages) - ENTRY_COMP(semtimedop) - ENTRY_COMP(mq_open) - ENTRY_SAME(mq_unlink) /* 230 */ - ENTRY_COMP(mq_timedsend) - ENTRY_COMP(mq_timedreceive) - ENTRY_COMP(mq_notify) - ENTRY_COMP(mq_getsetattr) - ENTRY_COMP(waitid) /* 235 */ - ENTRY_OURS(fadvise64_64) - ENTRY_SAME(set_tid_address) - ENTRY_SAME(setxattr) - ENTRY_SAME(lsetxattr) - ENTRY_SAME(fsetxattr) /* 240 */ - ENTRY_SAME(getxattr) - ENTRY_SAME(lgetxattr) - ENTRY_SAME(fgetxattr) - ENTRY_SAME(listxattr) - ENTRY_SAME(llistxattr) /* 245 */ - ENTRY_SAME(flistxattr) - ENTRY_SAME(removexattr) - ENTRY_SAME(lremovexattr) - ENTRY_SAME(fremovexattr) - ENTRY_COMP(timer_create) /* 250 */ - ENTRY_COMP(timer_settime) - ENTRY_COMP(timer_gettime) - ENTRY_SAME(timer_getoverrun) - ENTRY_SAME(timer_delete) - ENTRY_COMP(clock_settime) /* 255 */ - ENTRY_COMP(clock_gettime) - ENTRY_COMP(clock_getres) - ENTRY_COMP(clock_nanosleep) - ENTRY_SAME(tgkill) - ENTRY_COMP(mbind) /* 260 */ - ENTRY_COMP(get_mempolicy) - ENTRY_COMP(set_mempolicy) - ENTRY_SAME(ni_syscall) /* 263: reserved for vserver */ - ENTRY_SAME(add_key) - ENTRY_SAME(request_key) /* 265 */ - ENTRY_COMP(keyctl) - ENTRY_SAME(ioprio_set) - ENTRY_SAME(ioprio_get) - ENTRY_SAME(inotify_init) - ENTRY_SAME(inotify_add_watch) /* 270 */ - ENTRY_SAME(inotify_rm_watch) - ENTRY_SAME(migrate_pages) - ENTRY_COMP(pselect6) - ENTRY_COMP(ppoll) - ENTRY_COMP(openat) /* 275 */ - ENTRY_SAME(mkdirat) - ENTRY_SAME(mknodat) - ENTRY_SAME(fchownat) - ENTRY_COMP(futimesat) - ENTRY_SAME(fstatat64) /* 280 */ - ENTRY_SAME(unlinkat) - ENTRY_SAME(renameat) - ENTRY_SAME(linkat) - ENTRY_SAME(symlinkat) - ENTRY_SAME(readlinkat) /* 285 */ - ENTRY_SAME(fchmodat) - ENTRY_SAME(faccessat) - ENTRY_SAME(unshare) - ENTRY_COMP(set_robust_list) - ENTRY_COMP(get_robust_list) /* 290 */ - ENTRY_SAME(splice) - ENTRY_OURS(sync_file_range) - ENTRY_SAME(tee) - ENTRY_COMP(vmsplice) - ENTRY_COMP(move_pages) /* 295 */ - ENTRY_SAME(getcpu) - ENTRY_COMP(epoll_pwait) - ENTRY_COMP(statfs64) - ENTRY_COMP(fstatfs64) - ENTRY_COMP(kexec_load) /* 300 */ - ENTRY_COMP(utimensat) - ENTRY_COMP(signalfd) - ENTRY_SAME(ni_syscall) /* was timerfd */ - ENTRY_SAME(eventfd) - ENTRY_OURS(fallocate) /* 305 */ - ENTRY_SAME(timerfd_create) - ENTRY_COMP(timerfd_settime) - ENTRY_COMP(timerfd_gettime) - ENTRY_COMP(signalfd4) - ENTRY_SAME(eventfd2) /* 310 */ - ENTRY_SAME(epoll_create1) - ENTRY_SAME(dup3) - ENTRY_SAME(pipe2) - ENTRY_SAME(inotify_init1) - ENTRY_COMP(preadv) /* 315 */ - ENTRY_COMP(pwritev) - ENTRY_COMP(rt_tgsigqueueinfo) - ENTRY_SAME(perf_event_open) - ENTRY_COMP(recvmmsg) - ENTRY_SAME(accept4) /* 320 */ - ENTRY_SAME(prlimit64) - ENTRY_SAME(fanotify_init) - ENTRY_DIFF(fanotify_mark) - ENTRY_COMP(clock_adjtime) - ENTRY_SAME(name_to_handle_at) /* 325 */ - ENTRY_COMP(open_by_handle_at) - ENTRY_SAME(syncfs) - ENTRY_SAME(setns) - ENTRY_COMP(sendmmsg) - ENTRY_COMP(process_vm_readv) /* 330 */ - ENTRY_COMP(process_vm_writev) - ENTRY_SAME(kcmp) - ENTRY_SAME(finit_module) - ENTRY_SAME(sched_setattr) - ENTRY_SAME(sched_getattr) /* 335 */ - ENTRY_COMP(utimes) - ENTRY_SAME(renameat2) - ENTRY_SAME(seccomp) - ENTRY_SAME(getrandom) - ENTRY_SAME(memfd_create) /* 340 */ - ENTRY_SAME(bpf) - ENTRY_COMP(execveat) - ENTRY_SAME(membarrier) - ENTRY_SAME(userfaultfd) - ENTRY_SAME(mlock2) /* 345 */ - ENTRY_SAME(copy_file_range) - ENTRY_COMP(preadv2) - ENTRY_COMP(pwritev2) - ENTRY_SAME(statx) - ENTRY_COMP(io_pgetevents) /* 350 */ - - -.ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b)) -.error "size of syscall table does not fit value of __NR_Linux_syscalls" -.endif - -#undef ENTRY_SAME -#undef ENTRY_DIFF -#undef ENTRY_UHOH -#undef ENTRY_COMP -#undef ENTRY_OURS diff --git a/arch/parisc/kernel/syscalls/Makefile b/arch/parisc/kernel/syscalls/Makefile new file mode 100644 index 000000000000..c22a21c39f30 --- /dev/null +++ b/arch/parisc/kernel/syscalls/Makefile @@ -0,0 +1,55 @@ +# SPDX-License-Identifier: GPL-2.0 +kapi := arch/$(SRCARCH)/include/generated/asm +uapi := arch/$(SRCARCH)/include/generated/uapi/asm + +_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \ + $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)') + +syscall := $(srctree)/$(src)/syscall.tbl +syshdr := $(srctree)/$(src)/syscallhdr.sh +systbl := $(srctree)/$(src)/syscalltbl.sh + +quiet_cmd_syshdr = SYSHDR $@ + cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \ + '$(syshdr_abis_$(basetarget))' \ + '$(syshdr_pfx_$(basetarget))' \ + '$(syshdr_offset_$(basetarget))' + +quiet_cmd_systbl = SYSTBL $@ + cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \ + '$(systbl_abis_$(basetarget))' \ + '$(systbl_abi_$(basetarget))' \ + '$(systbl_offset_$(basetarget))' + +syshdr_abis_unistd_32 := common,32 +$(uapi)/unistd_32.h: $(syscall) $(syshdr) + $(call if_changed,syshdr) + +syshdr_abis_unistd_64 := common,64 +$(uapi)/unistd_64.h: $(syscall) $(syshdr) + $(call if_changed,syshdr) + +systbl_abis_syscall_table_32 := common,32 +$(kapi)/syscall_table_32.h: $(syscall) $(systbl) + $(call if_changed,systbl) + +systbl_abis_syscall_table_64 := common,64 +$(kapi)/syscall_table_64.h: $(syscall) $(systbl) + $(call if_changed,systbl) + +systbl_abis_syscall_table_c32 := common,32 +systbl_abi_syscall_table_c32 := c32 +$(kapi)/syscall_table_c32.h: $(syscall) $(systbl) + $(call if_changed,systbl) + +uapisyshdr-y += unistd_32.h unistd_64.h +kapisyshdr-y += syscall_table_32.h \ + syscall_table_64.h \ + syscall_table_c32.h + +targets += $(uapisyshdr-y) $(kapisyshdr-y) + +PHONY += all +all: $(addprefix $(uapi)/,$(uapisyshdr-y)) +all: $(addprefix $(kapi)/,$(kapisyshdr-y)) + @: diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl new file mode 100644 index 000000000000..9bbd2f9f56c8 --- /dev/null +++ b/arch/parisc/kernel/syscalls/syscall.tbl @@ -0,0 +1,369 @@ +# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note +# +# system call numbers and entry vectors for parisc +# +# The format is: +# <number> <abi> <name> <entry point> <compat entry point> +# +# The <abi> can be common, 64, or 32 for this file. +# +0 common restart_syscall sys_restart_syscall +1 common exit sys_exit +2 common fork sys_fork_wrapper +3 common read sys_read +4 common write sys_write +5 common open sys_open compat_sys_open +6 common close sys_close +7 common waitpid sys_waitpid +8 common creat sys_creat +9 common link sys_link +10 common unlink sys_unlink +11 common execve sys_execve compat_sys_execve +12 common chdir sys_chdir +13 common time sys_time compat_sys_time +14 common mknod sys_mknod +15 common chmod sys_chmod +16 common lchown sys_lchown +17 common socket sys_socket +18 common stat sys_newstat compat_sys_newstat +19 common lseek sys_lseek compat_sys_lseek +20 common getpid sys_getpid +21 common mount sys_mount compat_sys_mount +22 common bind sys_bind +23 common setuid sys_setuid +24 common getuid sys_getuid +25 common stime sys_stime compat_sys_stime +26 common ptrace sys_ptrace compat_sys_ptrace +27 common alarm sys_alarm +28 common fstat sys_newfstat compat_sys_newfstat +29 common pause sys_pause +30 common utime sys_utime compat_sys_utime +31 common connect sys_connect +32 common listen sys_listen +33 common access sys_access +34 common nice sys_nice +35 common accept sys_accept +36 common sync sys_sync +37 common kill sys_kill +38 common rename sys_rename +39 common mkdir sys_mkdir +40 common rmdir sys_rmdir +41 common dup sys_dup +42 common pipe sys_pipe +43 common times sys_times compat_sys_times +44 common getsockname sys_getsockname +45 common brk sys_brk +46 common setgid sys_setgid +47 common getgid sys_getgid +48 common signal sys_signal +49 common geteuid sys_geteuid +50 common getegid sys_getegid +51 common acct sys_acct +52 common umount2 sys_umount +53 common getpeername sys_getpeername +54 common ioctl sys_ioctl compat_sys_ioctl +55 common fcntl sys_fcntl compat_sys_fcntl +56 common socketpair sys_socketpair +57 common setpgid sys_setpgid +58 common send sys_send +59 common uname sys_newuname +60 common umask sys_umask +61 common chroot sys_chroot +62 common ustat sys_ustat compat_sys_ustat +63 common dup2 sys_dup2 +64 common getppid sys_getppid +65 common getpgrp sys_getpgrp +66 common setsid sys_setsid +67 common pivot_root sys_pivot_root +68 common sgetmask sys_sgetmask sys32_unimplemented +69 common ssetmask sys_ssetmask sys32_unimplemented +70 common setreuid sys_setreuid +71 common setregid sys_setregid +72 common mincore sys_mincore +73 common sigpending sys_sigpending compat_sys_sigpending +74 common sethostname sys_sethostname +75 common setrlimit sys_setrlimit compat_sys_setrlimit +76 common getrlimit sys_getrlimit compat_sys_getrlimit +77 common getrusage sys_getrusage compat_sys_getrusage +78 common gettimeofday sys_gettimeofday compat_sys_gettimeofday +79 common settimeofday sys_settimeofday compat_sys_settimeofday +80 common getgroups sys_getgroups +81 common setgroups sys_setgroups +82 common sendto sys_sendto +83 common symlink sys_symlink +84 common lstat sys_newlstat compat_sys_newlstat +85 common readlink sys_readlink +86 common uselib sys_ni_syscall +87 common swapon sys_swapon +88 common reboot sys_reboot +89 common mmap2 sys_mmap2 +90 common mmap sys_mmap +91 common munmap sys_munmap +92 common truncate sys_truncate compat_sys_truncate +93 common ftruncate sys_ftruncate compat_sys_ftruncate +94 common fchmod sys_fchmod +95 common fchown sys_fchown +96 common getpriority sys_getpriority +97 common setpriority sys_setpriority +98 common recv sys_recv +99 common statfs sys_statfs compat_sys_statfs +100 common fstatfs sys_fstatfs compat_sys_fstatfs +101 common stat64 sys_stat64 +# 102 was socketcall +103 common syslog sys_syslog +104 common setitimer sys_setitimer compat_sys_setitimer +105 common getitimer sys_getitimer compat_sys_getitimer +106 common capget sys_capget +107 common capset sys_capset +108 32 pread64 parisc_pread64 +108 64 pread64 sys_pread64 +109 32 pwrite64 parisc_pwrite64 +109 64 pwrite64 sys_pwrite64 +110 common getcwd sys_getcwd +111 common vhangup sys_vhangup +112 common fstat64 sys_fstat64 +113 common vfork sys_vfork_wrapper +114 common wait4 sys_wait4 compat_sys_wait4 +115 common swapoff sys_swapoff +116 common sysinfo sys_sysinfo compat_sys_sysinfo +117 common shutdown sys_shutdown +118 common fsync sys_fsync +119 common madvise sys_madvise +120 common clone sys_clone_wrapper +121 common setdomainname sys_setdomainname +122 common sendfile sys_sendfile compat_sys_sendfile +123 common recvfrom sys_recvfrom +124 common adjtimex sys_adjtimex compat_sys_adjtimex +125 common mprotect sys_mprotect +126 common sigprocmask sys_sigprocmask compat_sys_sigprocmask +# 127 was create_module +128 common init_module sys_init_module +129 common delete_module sys_delete_module +# 130 was get_kernel_syms +131 common quotactl sys_quotactl +132 common getpgid sys_getpgid +133 common fchdir sys_fchdir +134 common bdflush sys_bdflush +135 common sysfs sys_sysfs +136 32 personality parisc_personality +136 64 personality sys_personality +# 137 was afs_syscall +138 common setfsuid sys_setfsuid +139 common setfsgid sys_setfsgid +140 common _llseek sys_llseek +141 common getdents sys_getdents compat_sys_getdents +142 common _newselect sys_select compat_sys_select +143 common flock sys_flock +144 common msync sys_msync +145 common readv sys_readv compat_sys_readv +146 common writev sys_writev compat_sys_writev +147 common getsid sys_getsid +148 common fdatasync sys_fdatasync +149 common _sysctl sys_sysctl compat_sys_sysctl +150 common mlock sys_mlock +151 common munlock sys_munlock +152 common mlockall sys_mlockall +153 common munlockall sys_munlockall +154 common sched_setparam sys_sched_setparam +155 common sched_getparam sys_sched_getparam +156 common sched_setscheduler sys_sched_setscheduler +157 common sched_getscheduler sys_sched_getscheduler +158 common sched_yield sys_sched_yield +159 common sched_get_priority_max sys_sched_get_priority_max +160 common sched_get_priority_min sys_sched_get_priority_min +161 common sched_rr_get_interval sys_sched_rr_get_interval compat_sys_sched_rr_get_interval +162 common nanosleep sys_nanosleep compat_sys_nanosleep +163 common mremap sys_mremap +164 common setresuid sys_setresuid +165 common getresuid sys_getresuid +166 common sigaltstack sys_sigaltstack compat_sys_sigaltstack +# 167 was query_module +168 common poll sys_poll +# 169 was nfsservctl +170 common setresgid sys_setresgid +171 common getresgid sys_getresgid +172 common prctl sys_prctl +173 common rt_sigreturn sys_rt_sigreturn_wrapper +174 common rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction +175 common rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask +176 common rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending +177 common rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait +178 common rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo +179 common rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend +180 common chown sys_chown +181 common setsockopt sys_setsockopt compat_sys_setsockopt +182 common getsockopt sys_getsockopt compat_sys_getsockopt +183 common sendmsg sys_sendmsg compat_sys_sendmsg +184 common recvmsg sys_recvmsg compat_sys_recvmsg +185 common semop sys_semop +186 common semget sys_semget +187 common semctl sys_semctl compat_sys_semctl +188 common msgsnd sys_msgsnd compat_sys_msgsnd +189 common msgrcv sys_msgrcv compat_sys_msgrcv +190 common msgget sys_msgget +191 common msgctl sys_msgctl compat_sys_msgctl +192 common shmat sys_shmat compat_sys_shmat +193 common shmdt sys_shmdt +194 common shmget sys_shmget +195 common shmctl sys_shmctl compat_sys_shmctl +# 196 was getpmsg +# 197 was putpmsg +198 common lstat64 sys_lstat64 +199 32 truncate64 parisc_truncate64 +199 64 truncate64 sys_truncate64 +200 32 ftruncate64 parisc_ftruncate64 +200 64 ftruncate64 sys_ftruncate64 +201 common getdents64 sys_getdents64 +202 common fcntl64 sys_fcntl64 compat_sys_fcntl64 +# 203 was attrctl +# 204 was acl_get +# 205 was acl_set +206 common gettid sys_gettid +207 32 readahead parisc_readahead +207 64 readahead sys_readahead +208 common tkill sys_tkill +209 common sendfile64 sys_sendfile64 compat_sys_sendfile64 +210 common futex sys_futex compat_sys_futex +211 common sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity +212 common sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity +# 213 was set_thread_area +# 214 was get_thread_area +215 common io_setup sys_io_setup compat_sys_io_setup +216 common io_destroy sys_io_destroy +217 common io_getevents sys_io_getevents compat_sys_io_getevents +218 common io_submit sys_io_submit compat_sys_io_submit +219 common io_cancel sys_io_cancel +# 220 was alloc_hugepages +# 221 was free_hugepages +222 common exit_group sys_exit_group +223 common lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie +224 common epoll_create sys_epoll_create +225 common epoll_ctl sys_epoll_ctl +226 common epoll_wait sys_epoll_wait +227 common remap_file_pages sys_remap_file_pages +228 common semtimedop sys_semtimedop compat_sys_semtimedop +229 common mq_open sys_mq_open compat_sys_mq_open +230 common mq_unlink sys_mq_unlink +231 common mq_timedsend sys_mq_timedsend compat_sys_mq_timedsend +232 common mq_timedreceive sys_mq_timedreceive compat_sys_mq_timedreceive +233 common mq_notify sys_mq_notify compat_sys_mq_notify +234 common mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr +235 common waitid sys_waitid compat_sys_waitid +236 32 fadvise64_64 parisc_fadvise64_64 +236 64 fadvise64_64 sys_fadvise64_64 +237 common set_tid_address sys_set_tid_address +238 common setxattr sys_setxattr +239 common lsetxattr sys_lsetxattr +240 common fsetxattr sys_fsetxattr +241 common getxattr sys_getxattr +242 common lgetxattr sys_lgetxattr +243 common fgetxattr sys_fgetxattr +244 common listxattr sys_listxattr +245 common llistxattr sys_llistxattr +246 common flistxattr sys_flistxattr +247 common removexattr sys_removexattr +248 common lremovexattr sys_lremovexattr +249 common fremovexattr sys_fremovexattr +250 common timer_create sys_timer_create compat_sys_timer_create +251 common timer_settime sys_timer_settime compat_sys_timer_settime +252 common timer_gettime sys_timer_gettime compat_sys_timer_gettime +253 common timer_getoverrun sys_timer_getoverrun +254 common timer_delete sys_timer_delete +255 common clock_settime sys_clock_settime compat_sys_clock_settime +256 common clock_gettime sys_clock_gettime compat_sys_clock_gettime +257 common clock_getres sys_clock_getres compat_sys_clock_getres +258 common clock_nanosleep sys_clock_nanosleep compat_sys_clock_nanosleep +259 common tgkill sys_tgkill +260 common mbind sys_mbind compat_sys_mbind +261 common get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy +262 common set_mempolicy sys_set_mempolicy compat_sys_set_mempolicy +# 263 was vserver +264 common add_key sys_add_key +265 common request_key sys_request_key +266 common keyctl sys_keyctl compat_sys_keyctl +267 common ioprio_set sys_ioprio_set +268 common ioprio_get sys_ioprio_get +269 common inotify_init sys_inotify_init +270 common inotify_add_watch sys_inotify_add_watch +271 common inotify_rm_watch sys_inotify_rm_watch +272 common migrate_pages sys_migrate_pages +273 common pselect6 sys_pselect6 compat_sys_pselect6 +274 common ppoll sys_ppoll compat_sys_ppoll +275 common openat sys_openat compat_sys_openat +276 common mkdirat sys_mkdirat +277 common mknodat sys_mknodat +278 common fchownat sys_fchownat +279 common futimesat sys_futimesat compat_sys_futimesat +280 common fstatat64 sys_fstatat64 +281 common unlinkat sys_unlinkat +282 common renameat sys_renameat +283 common linkat sys_linkat +284 common symlinkat sys_symlinkat +285 common readlinkat sys_readlinkat +286 common fchmodat sys_fchmodat +287 common faccessat sys_faccessat +288 common unshare sys_unshare +289 common set_robust_list sys_set_robust_list compat_sys_set_robust_list +290 common get_robust_list sys_get_robust_list compat_sys_get_robust_list +291 common splice sys_splice +292 32 sync_file_range parisc_sync_file_range +292 64 sync_file_range sys_sync_file_range +293 common tee sys_tee +294 common vmsplice sys_vmsplice compat_sys_vmsplice +295 common move_pages sys_move_pages compat_sys_move_pages +296 common getcpu sys_getcpu +297 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait +298 common statfs64 sys_statfs64 compat_sys_statfs64 +299 common fstatfs64 sys_fstatfs64 compat_sys_fstatfs64 +300 common kexec_load sys_kexec_load compat_sys_kexec_load +301 common utimensat sys_utimensat compat_sys_utimensat +302 common signalfd sys_signalfd compat_sys_signalfd +# 303 was timerfd +304 common eventfd sys_eventfd +305 32 fallocate parisc_fallocate +305 64 fallocate sys_fallocate +306 common timerfd_create sys_timerfd_create +307 common timerfd_settime sys_timerfd_settime compat_sys_timerfd_settime +308 common timerfd_gettime sys_timerfd_gettime compat_sys_timerfd_gettime +309 common signalfd4 sys_signalfd4 compat_sys_signalfd4 +310 common eventfd2 sys_eventfd2 +311 common epoll_create1 sys_epoll_create1 +312 common dup3 sys_dup3 +313 common pipe2 sys_pipe2 +314 common inotify_init1 sys_inotify_init1 +315 common preadv sys_preadv compat_sys_preadv +316 common pwritev sys_pwritev compat_sys_pwritev +317 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo +318 common perf_event_open sys_perf_event_open +319 common recvmmsg sys_recvmmsg compat_sys_recvmmsg +320 common accept4 sys_accept4 +321 common prlimit64 sys_prlimit64 +322 common fanotify_init sys_fanotify_init +323 common fanotify_mark sys_fanotify_mark sys32_fanotify_mark +324 common clock_adjtime sys_clock_adjtime compat_sys_clock_adjtime +325 common name_to_handle_at sys_name_to_handle_at +326 common open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at +327 common syncfs sys_syncfs +328 common setns sys_setns +329 common sendmmsg sys_sendmmsg compat_sys_sendmmsg +330 common process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv +331 common process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev +332 common kcmp sys_kcmp +333 common finit_module sys_finit_module +334 common sched_setattr sys_sched_setattr +335 common sched_getattr sys_sched_getattr +336 common utimes sys_utimes compat_sys_utimes +337 common renameat2 sys_renameat2 +338 common seccomp sys_seccomp +339 common getrandom sys_getrandom +340 common memfd_create sys_memfd_create +341 common bpf sys_bpf +342 common execveat sys_execveat compat_sys_execveat +343 common membarrier sys_membarrier +344 common userfaultfd sys_userfaultfd +345 common mlock2 sys_mlock2 +346 common copy_file_range sys_copy_file_range +347 common preadv2 sys_preadv2 compat_sys_preadv2 +348 common pwritev2 sys_pwritev2 compat_sys_pwritev2 +349 common statx sys_statx +350 common io_pgetevents sys_io_pgetevents compat_sys_io_pgetevents diff --git a/arch/parisc/kernel/syscalls/syscallhdr.sh b/arch/parisc/kernel/syscalls/syscallhdr.sh new file mode 100644 index 000000000000..50242b747d7c --- /dev/null +++ b/arch/parisc/kernel/syscalls/syscallhdr.sh @@ -0,0 +1,36 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 + +in="$1" +out="$2" +my_abis=`echo "($3)" | tr ',' '|'` +prefix="$4" +offset="$5" + +fileguard=_UAPI_ASM_PARISC_`basename "$out" | sed \ + -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \ + -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'` +grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( + printf "#ifndef %s\n" "${fileguard}" + printf "#define %s\n" "${fileguard}" + printf "\n" + + nxt=0 + while read nr abi name entry compat ; do + if [ -z "$offset" ]; then + printf "#define __NR_%s%s\t%s\n" \ + "${prefix}" "${name}" "${nr}" + else + printf "#define __NR_%s%s\t(%s + %s)\n" \ + "${prefix}" "${name}" "${offset}" "${nr}" + fi + nxt=$((nr+1)) + done + + printf "\n" + printf "#ifdef __KERNEL__\n" + printf "#define __NR_syscalls\t%s\n" "${nxt}" + printf "#endif\n" + printf "\n" + printf "#endif /* %s */" "${fileguard}" +) > "$out" diff --git a/arch/parisc/kernel/syscalls/syscalltbl.sh b/arch/parisc/kernel/syscalls/syscalltbl.sh new file mode 100644 index 000000000000..45b5bae26240 --- /dev/null +++ b/arch/parisc/kernel/syscalls/syscalltbl.sh @@ -0,0 +1,36 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 + +in="$1" +out="$2" +my_abis=`echo "($3)" | tr ',' '|'` +my_abi="$4" +offset="$5" + +emit() { + t_nxt="$1" + t_nr="$2" + t_entry="$3" + + while [ $t_nxt -lt $t_nr ]; do + printf "__SYSCALL(%s, sys_ni_syscall, )\n" "${t_nxt}" + t_nxt=$((t_nxt+1)) + done + printf "__SYSCALL(%s, %s, )\n" "${t_nxt}" "${t_entry}" +} + +grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( + nxt=0 + if [ -z "$offset" ]; then + offset=0 + fi + + while read nr abi name entry compat ; do + if [ "$my_abi" = "c32" ] && [ ! -z "$compat" ]; then + emit $((nxt+offset)) $((nr+offset)) $compat + else + emit $((nxt+offset)) $((nr+offset)) $entry + fi + nxt=$((nr+1)) + done +) > "$out" diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 8be31261aec8..50f27a656051 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -128,6 +128,7 @@ config PPC # # Please keep this list sorted alphabetically. # + select ARCH_HAS_DEBUG_VIRTUAL select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_DMA_SET_COHERENT_MASK select ARCH_HAS_ELF_RANDOMIZE @@ -138,7 +139,6 @@ config PPC select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_MEMBARRIER_CALLBACKS select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE && PPC64 - select ARCH_HAS_SG_CHAIN select ARCH_HAS_STRICT_KERNEL_RWX if ((PPC_BOOK3S_64 || PPC32) && !RELOCATABLE && !HIBERNATION) select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAS_UACCESS_FLUSHCACHE if PPC64 @@ -374,9 +374,9 @@ config PPC_ADV_DEBUG_DAC_RANGE depends on PPC_ADV_DEBUG_REGS && 44x default y -config ZONE_DMA32 +config ZONE_DMA bool - default y if PPC64 + default y if PPC_BOOK3E_64 config PGTABLE_LEVELS int @@ -556,7 +556,7 @@ config RELOCATABLE_TEST config CRASH_DUMP bool "Build a dump capture kernel" - depends on PPC64 || 6xx || FSL_BOOKE || (44x && !SMP) + depends on PPC64 || PPC_BOOK3S_32 || FSL_BOOKE || (44x && !SMP) select RELOCATABLE if PPC64 || 44x || FSL_BOOKE help Build a kernel suitable for use as a dump capture kernel. @@ -869,10 +869,6 @@ config ISA have an IBM RS/6000 or pSeries machine, say Y. If you have an embedded board, consult your board documentation. -config ZONE_DMA - bool - default y - config GENERIC_ISA_DMA bool depends on ISA_DMA_API @@ -1096,7 +1092,7 @@ config PHYSICAL_START_BOOL config PHYSICAL_START hex "Physical address where the kernel is loaded" if PHYSICAL_START_BOOL - default "0x02000000" if PPC_STD_MMU && CRASH_DUMP && !NONSTATIC_KERNEL + default "0x02000000" if PPC_BOOK3S && CRASH_DUMP && !NONSTATIC_KERNEL default "0x00000000" config PHYSICAL_ALIGN @@ -1146,7 +1142,7 @@ config PIN_TLB_DATA config PIN_TLB_IMMR bool "Pinned TLB for IMMR" - depends on PIN_TLB + depends on PIN_TLB || PPC_EARLY_DEBUG_CPM default y config PIN_TLB_TEXT diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 8a2ce14d68d0..488c9edffa58 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -30,6 +30,10 @@ endif endif endif +ifdef CONFIG_PPC_BOOK3S_32 +KBUILD_CFLAGS += -mcpu=powerpc +endif + ifeq ($(CROSS_COMPILE),) KBUILD_DEFCONFIG := $(shell uname -m)_defconfig else @@ -152,7 +156,14 @@ endif CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcmodel=medium,$(call cc-option,-mminimal-toc)) CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mno-pointers-to-nested-functions) -CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 $(MULTIPLEWORD) +# Clang unconditionally reserves r2 on ppc32 and does not support the flag +# https://bugs.llvm.org/show_bug.cgi?id=39555 +CFLAGS-$(CONFIG_PPC32) := $(call cc-option, -ffixed-r2) + +# Clang doesn't support -mmultiple / -mno-multiple +# https://bugs.llvm.org/show_bug.cgi?id=39556 +CFLAGS-$(CONFIG_PPC32) += $(call cc-option, $(MULTIPLEWORD)) + CFLAGS-$(CONFIG_PPC32) += $(call cc-option,-mno-readonly-in-sdata) ifdef CONFIG_PPC_BOOK3S_64 @@ -228,10 +239,6 @@ KBUILD_CFLAGS += $(call cc-option,-mno-vsx) KBUILD_CFLAGS += $(call cc-option,-mno-spe) KBUILD_CFLAGS += $(call cc-option,-mspe=no) -# Enable unit-at-a-time mode when possible. It shrinks the -# kernel considerably. -KBUILD_CFLAGS += $(call cc-option,-funit-at-a-time) - # FIXME: the module load should be taught about the additional relocs # generated by this. # revert to pre-gcc-4.4 behaviour of .eh_frame @@ -241,10 +248,6 @@ KBUILD_CFLAGS += $(call cc-option,-fno-dwarf2-cfi-asm) # often slow when they are implemented at all KBUILD_CFLAGS += $(call cc-option,-mno-string) -ifdef CONFIG_6xx -KBUILD_CFLAGS += -mcpu=powerpc -endif - cpu-as-$(CONFIG_4xx) += -Wa,-m405 cpu-as-$(CONFIG_ALTIVEC) += $(call as-option,-Wa$(comma)-maltivec) cpu-as-$(CONFIG_E200) += -Wa,-me200 @@ -317,6 +320,14 @@ PHONY += ppc64le_defconfig ppc64le_defconfig: $(call merge_into_defconfig,ppc64_defconfig,le) +PHONY += ppc64le_guest_defconfig +ppc64le_guest_defconfig: + $(call merge_into_defconfig,ppc64_defconfig,le guest) + +PHONY += ppc64_guest_defconfig +ppc64_guest_defconfig: + $(call merge_into_defconfig,ppc64_defconfig,be guest) + PHONY += powernv_be_defconfig powernv_be_defconfig: $(call merge_into_defconfig,powernv_defconfig,be) @@ -402,6 +413,9 @@ archclean: archprepare: checkbin +archheaders: + $(Q)$(MAKE) $(build)=arch/powerpc/kernel/syscalls all + ifdef CONFIG_STACKPROTECTOR prepare: stack_protector_prepare diff --git a/arch/powerpc/boot/dts/bamboo.dts b/arch/powerpc/boot/dts/bamboo.dts index 538e42b1120d..b5861fa3836c 100644 --- a/arch/powerpc/boot/dts/bamboo.dts +++ b/arch/powerpc/boot/dts/bamboo.dts @@ -268,8 +268,10 @@ /* Outbound ranges, one memory and one IO, * later cannot be changed. Chip supports a second * IO range but we don't use it for now + * The chip also supports a larger memory range but + * it's not naturally aligned, so our code will break */ - ranges = <0x02000000 0x00000000 0xa0000000 0x00000000 0xa0000000 0x00000000 0x40000000 + ranges = <0x02000000 0x00000000 0xa0000000 0x00000000 0xa0000000 0x00000000 0x20000000 0x02000000 0x00000000 0x00000000 0x00000000 0xe0000000 0x00000000 0x00100000 0x01000000 0x00000000 0x00000000 0x00000000 0xe8000000 0x00000000 0x00010000>; diff --git a/arch/powerpc/boot/dts/fsl/b4420si-pre.dtsi b/arch/powerpc/boot/dts/fsl/b4420si-pre.dtsi index 88d8423f8ac5..bb7b9b9f3f5f 100644 --- a/arch/powerpc/boot/dts/fsl/b4420si-pre.dtsi +++ b/arch/powerpc/boot/dts/fsl/b4420si-pre.dtsi @@ -70,14 +70,14 @@ cpu0: PowerPC,e6500@0 { device_type = "cpu"; reg = <0 1>; - clocks = <&mux0>; + clocks = <&clockgen 1 0>; next-level-cache = <&L2_1>; fsl,portid-mapping = <0x80000000>; }; cpu1: PowerPC,e6500@2 { device_type = "cpu"; reg = <2 3>; - clocks = <&mux0>; + clocks = <&clockgen 1 0>; next-level-cache = <&L2_1>; fsl,portid-mapping = <0x80000000>; }; diff --git a/arch/powerpc/boot/dts/fsl/b4860si-pre.dtsi b/arch/powerpc/boot/dts/fsl/b4860si-pre.dtsi index f3f968c51f4b..388ba1b15f8c 100644 --- a/arch/powerpc/boot/dts/fsl/b4860si-pre.dtsi +++ b/arch/powerpc/boot/dts/fsl/b4860si-pre.dtsi @@ -75,28 +75,28 @@ cpu0: PowerPC,e6500@0 { device_type = "cpu"; reg = <0 1>; - clocks = <&mux0>; + clocks = <&clockgen 1 0>; next-level-cache = <&L2_1>; fsl,portid-mapping = <0x80000000>; }; cpu1: PowerPC,e6500@2 { device_type = "cpu"; reg = <2 3>; - clocks = <&mux0>; + clocks = <&clockgen 1 0>; next-level-cache = <&L2_1>; fsl,portid-mapping = <0x80000000>; }; cpu2: PowerPC,e6500@4 { device_type = "cpu"; reg = <4 5>; - clocks = <&mux0>; + clocks = <&clockgen 1 0>; next-level-cache = <&L2_1>; fsl,portid-mapping = <0x80000000>; }; cpu3: PowerPC,e6500@6 { device_type = "cpu"; reg = <6 7>; - clocks = <&mux0>; + clocks = <&clockgen 1 0>; next-level-cache = <&L2_1>; fsl,portid-mapping = <0x80000000>; }; diff --git a/arch/powerpc/boot/dts/fsl/b4si-post.dtsi b/arch/powerpc/boot/dts/fsl/b4si-post.dtsi index 1b33f5157c8a..4f044b41a776 100644 --- a/arch/powerpc/boot/dts/fsl/b4si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/b4si-post.dtsi @@ -398,21 +398,6 @@ }; /include/ "qoriq-clockgen2.dtsi" - clockgen: global-utilities@e1000 { - compatible = "fsl,b4-clockgen", "fsl,qoriq-clockgen-2.0"; - reg = <0xe1000 0x1000>; - - mux0: mux0@0 { - #clock-cells = <0>; - reg = <0x0 0x4>; - compatible = "fsl,qoriq-core-mux-2.0"; - clocks = <&pll0 0>, <&pll0 1>, <&pll0 2>, - <&pll1 0>, <&pll1 1>, <&pll1 2>; - clock-names = "pll0", "pll0-div2", "pll0-div4", - "pll1", "pll1-div2", "pll1-div4"; - clock-output-names = "cmux0"; - }; - }; rcpm: global-utilities@e2000 { compatible = "fsl,b4-rcpm", "fsl,qoriq-rcpm-2.0"; diff --git a/arch/powerpc/boot/dts/fsl/mpc8641_hpcn.dts b/arch/powerpc/boot/dts/fsl/mpc8641_hpcn.dts index 11bea3e6a43f..58ac17496c89 100644 --- a/arch/powerpc/boot/dts/fsl/mpc8641_hpcn.dts +++ b/arch/powerpc/boot/dts/fsl/mpc8641_hpcn.dts @@ -169,100 +169,100 @@ interrupt-map-mask = <0xff00 0 0 7>; interrupt-map = < /* IDSEL 0x11 func 0 - PCI slot 1 */ - 0x8800 0 0 1 &mpic 2 1 - 0x8800 0 0 2 &mpic 3 1 - 0x8800 0 0 3 &mpic 4 1 - 0x8800 0 0 4 &mpic 1 1 + 0x8800 0 0 1 &mpic 2 1 0 0 + 0x8800 0 0 2 &mpic 3 1 0 0 + 0x8800 0 0 3 &mpic 4 1 0 0 + 0x8800 0 0 4 &mpic 1 1 0 0 /* IDSEL 0x11 func 1 - PCI slot 1 */ - 0x8900 0 0 1 &mpic 2 1 - 0x8900 0 0 2 &mpic 3 1 - 0x8900 0 0 3 &mpic 4 1 - 0x8900 0 0 4 &mpic 1 1 + 0x8900 0 0 1 &mpic 2 1 0 0 + 0x8900 0 0 2 &mpic 3 1 0 0 + 0x8900 0 0 3 &mpic 4 1 0 0 + 0x8900 0 0 4 &mpic 1 1 0 0 /* IDSEL 0x11 func 2 - PCI slot 1 */ - 0x8a00 0 0 1 &mpic 2 1 - 0x8a00 0 0 2 &mpic 3 1 - 0x8a00 0 0 3 &mpic 4 1 - 0x8a00 0 0 4 &mpic 1 1 + 0x8a00 0 0 1 &mpic 2 1 0 0 + 0x8a00 0 0 2 &mpic 3 1 0 0 + 0x8a00 0 0 3 &mpic 4 1 0 0 + 0x8a00 0 0 4 &mpic 1 1 0 0 /* IDSEL 0x11 func 3 - PCI slot 1 */ - 0x8b00 0 0 1 &mpic 2 1 - 0x8b00 0 0 2 &mpic 3 1 - 0x8b00 0 0 3 &mpic 4 1 - 0x8b00 0 0 4 &mpic 1 1 + 0x8b00 0 0 1 &mpic 2 1 0 0 + 0x8b00 0 0 2 &mpic 3 1 0 0 + 0x8b00 0 0 3 &mpic 4 1 0 0 + 0x8b00 0 0 4 &mpic 1 1 0 0 /* IDSEL 0x11 func 4 - PCI slot 1 */ - 0x8c00 0 0 1 &mpic 2 1 - 0x8c00 0 0 2 &mpic 3 1 - 0x8c00 0 0 3 &mpic 4 1 - 0x8c00 0 0 4 &mpic 1 1 + 0x8c00 0 0 1 &mpic 2 1 0 0 + 0x8c00 0 0 2 &mpic 3 1 0 0 + 0x8c00 0 0 3 &mpic 4 1 0 0 + 0x8c00 0 0 4 &mpic 1 1 0 0 /* IDSEL 0x11 func 5 - PCI slot 1 */ - 0x8d00 0 0 1 &mpic 2 1 - 0x8d00 0 0 2 &mpic 3 1 - 0x8d00 0 0 3 &mpic 4 1 - 0x8d00 0 0 4 &mpic 1 1 + 0x8d00 0 0 1 &mpic 2 1 0 0 + 0x8d00 0 0 2 &mpic 3 1 0 0 + 0x8d00 0 0 3 &mpic 4 1 0 0 + 0x8d00 0 0 4 &mpic 1 1 0 0 /* IDSEL 0x11 func 6 - PCI slot 1 */ - 0x8e00 0 0 1 &mpic 2 1 - 0x8e00 0 0 2 &mpic 3 1 - 0x8e00 0 0 3 &mpic 4 1 - 0x8e00 0 0 4 &mpic 1 1 + 0x8e00 0 0 1 &mpic 2 1 0 0 + 0x8e00 0 0 2 &mpic 3 1 0 0 + 0x8e00 0 0 3 &mpic 4 1 0 0 + 0x8e00 0 0 4 &mpic 1 1 0 0 /* IDSEL 0x11 func 7 - PCI slot 1 */ - 0x8f00 0 0 1 &mpic 2 1 - 0x8f00 0 0 2 &mpic 3 1 - 0x8f00 0 0 3 &mpic 4 1 - 0x8f00 0 0 4 &mpic 1 1 + 0x8f00 0 0 1 &mpic 2 1 0 0 + 0x8f00 0 0 2 &mpic 3 1 0 0 + 0x8f00 0 0 3 &mpic 4 1 0 0 + 0x8f00 0 0 4 &mpic 1 1 0 0 /* IDSEL 0x12 func 0 - PCI slot 2 */ - 0x9000 0 0 1 &mpic 3 1 - 0x9000 0 0 2 &mpic 4 1 - 0x9000 0 0 3 &mpic 1 1 - 0x9000 0 0 4 &mpic 2 1 + 0x9000 0 0 1 &mpic 3 1 0 0 + 0x9000 0 0 2 &mpic 4 1 0 0 + 0x9000 0 0 3 &mpic 1 1 0 0 + 0x9000 0 0 4 &mpic 2 1 0 0 /* IDSEL 0x12 func 1 - PCI slot 2 */ - 0x9100 0 0 1 &mpic 3 1 - 0x9100 0 0 2 &mpic 4 1 - 0x9100 0 0 3 &mpic 1 1 - 0x9100 0 0 4 &mpic 2 1 + 0x9100 0 0 1 &mpic 3 1 0 0 + 0x9100 0 0 2 &mpic 4 1 0 0 + 0x9100 0 0 3 &mpic 1 1 0 0 + 0x9100 0 0 4 &mpic 2 1 0 0 /* IDSEL 0x12 func 2 - PCI slot 2 */ - 0x9200 0 0 1 &mpic 3 1 - 0x9200 0 0 2 &mpic 4 1 - 0x9200 0 0 3 &mpic 1 1 - 0x9200 0 0 4 &mpic 2 1 + 0x9200 0 0 1 &mpic 3 1 0 0 + 0x9200 0 0 2 &mpic 4 1 0 0 + 0x9200 0 0 3 &mpic 1 1 0 0 + 0x9200 0 0 4 &mpic 2 1 0 0 /* IDSEL 0x12 func 3 - PCI slot 2 */ - 0x9300 0 0 1 &mpic 3 1 - 0x9300 0 0 2 &mpic 4 1 - 0x9300 0 0 3 &mpic 1 1 - 0x9300 0 0 4 &mpic 2 1 + 0x9300 0 0 1 &mpic 3 1 0 0 + 0x9300 0 0 2 &mpic 4 1 0 0 + 0x9300 0 0 3 &mpic 1 1 0 0 + 0x9300 0 0 4 &mpic 2 1 0 0 /* IDSEL 0x12 func 4 - PCI slot 2 */ - 0x9400 0 0 1 &mpic 3 1 - 0x9400 0 0 2 &mpic 4 1 - 0x9400 0 0 3 &mpic 1 1 - 0x9400 0 0 4 &mpic 2 1 + 0x9400 0 0 1 &mpic 3 1 0 0 + 0x9400 0 0 2 &mpic 4 1 0 0 + 0x9400 0 0 3 &mpic 1 1 0 0 + 0x9400 0 0 4 &mpic 2 1 0 0 /* IDSEL 0x12 func 5 - PCI slot 2 */ - 0x9500 0 0 1 &mpic 3 1 - 0x9500 0 0 2 &mpic 4 1 - 0x9500 0 0 3 &mpic 1 1 - 0x9500 0 0 4 &mpic 2 1 + 0x9500 0 0 1 &mpic 3 1 0 0 + 0x9500 0 0 2 &mpic 4 1 0 0 + 0x9500 0 0 3 &mpic 1 1 0 0 + 0x9500 0 0 4 &mpic 2 1 0 0 /* IDSEL 0x12 func 6 - PCI slot 2 */ - 0x9600 0 0 1 &mpic 3 1 - 0x9600 0 0 2 &mpic 4 1 - 0x9600 0 0 3 &mpic 1 1 - 0x9600 0 0 4 &mpic 2 1 + 0x9600 0 0 1 &mpic 3 1 0 0 + 0x9600 0 0 2 &mpic 4 1 0 0 + 0x9600 0 0 3 &mpic 1 1 0 0 + 0x9600 0 0 4 &mpic 2 1 0 0 /* IDSEL 0x12 func 7 - PCI slot 2 */ - 0x9700 0 0 1 &mpic 3 1 - 0x9700 0 0 2 &mpic 4 1 - 0x9700 0 0 3 &mpic 1 1 - 0x9700 0 0 4 &mpic 2 1 + 0x9700 0 0 1 &mpic 3 1 0 0 + 0x9700 0 0 2 &mpic 4 1 0 0 + 0x9700 0 0 3 &mpic 1 1 0 0 + 0x9700 0 0 4 &mpic 2 1 0 0 // IDSEL 0x1c USB 0xe000 0 0 1 &i8259 12 2 diff --git a/arch/powerpc/boot/dts/fsl/mpc8641_hpcn_36b.dts b/arch/powerpc/boot/dts/fsl/mpc8641_hpcn_36b.dts index 7ff62046a9ea..e64b91e321f6 100644 --- a/arch/powerpc/boot/dts/fsl/mpc8641_hpcn_36b.dts +++ b/arch/powerpc/boot/dts/fsl/mpc8641_hpcn_36b.dts @@ -136,100 +136,100 @@ interrupt-map-mask = <0xff00 0 0 7>; interrupt-map = < /* IDSEL 0x11 func 0 - PCI slot 1 */ - 0x8800 0 0 1 &mpic 2 1 - 0x8800 0 0 2 &mpic 3 1 - 0x8800 0 0 3 &mpic 4 1 - 0x8800 0 0 4 &mpic 1 1 + 0x8800 0 0 1 &mpic 2 1 0 0 + 0x8800 0 0 2 &mpic 3 1 0 0 + 0x8800 0 0 3 &mpic 4 1 0 0 + 0x8800 0 0 4 &mpic 1 1 0 0 /* IDSEL 0x11 func 1 - PCI slot 1 */ - 0x8900 0 0 1 &mpic 2 1 - 0x8900 0 0 2 &mpic 3 1 - 0x8900 0 0 3 &mpic 4 1 - 0x8900 0 0 4 &mpic 1 1 + 0x8900 0 0 1 &mpic 2 1 0 0 + 0x8900 0 0 2 &mpic 3 1 0 0 + 0x8900 0 0 3 &mpic 4 1 0 0 + 0x8900 0 0 4 &mpic 1 1 0 0 /* IDSEL 0x11 func 2 - PCI slot 1 */ - 0x8a00 0 0 1 &mpic 2 1 - 0x8a00 0 0 2 &mpic 3 1 - 0x8a00 0 0 3 &mpic 4 1 - 0x8a00 0 0 4 &mpic 1 1 + 0x8a00 0 0 1 &mpic 2 1 0 0 + 0x8a00 0 0 2 &mpic 3 1 0 0 + 0x8a00 0 0 3 &mpic 4 1 0 0 + 0x8a00 0 0 4 &mpic 1 1 0 0 /* IDSEL 0x11 func 3 - PCI slot 1 */ - 0x8b00 0 0 1 &mpic 2 1 - 0x8b00 0 0 2 &mpic 3 1 - 0x8b00 0 0 3 &mpic 4 1 - 0x8b00 0 0 4 &mpic 1 1 + 0x8b00 0 0 1 &mpic 2 1 0 0 + 0x8b00 0 0 2 &mpic 3 1 0 0 + 0x8b00 0 0 3 &mpic 4 1 0 0 + 0x8b00 0 0 4 &mpic 1 1 0 0 /* IDSEL 0x11 func 4 - PCI slot 1 */ - 0x8c00 0 0 1 &mpic 2 1 - 0x8c00 0 0 2 &mpic 3 1 - 0x8c00 0 0 3 &mpic 4 1 - 0x8c00 0 0 4 &mpic 1 1 + 0x8c00 0 0 1 &mpic 2 1 0 0 + 0x8c00 0 0 2 &mpic 3 1 0 0 + 0x8c00 0 0 3 &mpic 4 1 0 0 + 0x8c00 0 0 4 &mpic 1 1 0 0 /* IDSEL 0x11 func 5 - PCI slot 1 */ - 0x8d00 0 0 1 &mpic 2 1 - 0x8d00 0 0 2 &mpic 3 1 - 0x8d00 0 0 3 &mpic 4 1 - 0x8d00 0 0 4 &mpic 1 1 + 0x8d00 0 0 1 &mpic 2 1 0 0 + 0x8d00 0 0 2 &mpic 3 1 0 0 + 0x8d00 0 0 3 &mpic 4 1 0 0 + 0x8d00 0 0 4 &mpic 1 1 0 0 /* IDSEL 0x11 func 6 - PCI slot 1 */ - 0x8e00 0 0 1 &mpic 2 1 - 0x8e00 0 0 2 &mpic 3 1 - 0x8e00 0 0 3 &mpic 4 1 - 0x8e00 0 0 4 &mpic 1 1 + 0x8e00 0 0 1 &mpic 2 1 0 0 + 0x8e00 0 0 2 &mpic 3 1 0 0 + 0x8e00 0 0 3 &mpic 4 1 0 0 + 0x8e00 0 0 4 &mpic 1 1 0 0 /* IDSEL 0x11 func 7 - PCI slot 1 */ - 0x8f00 0 0 1 &mpic 2 1 - 0x8f00 0 0 2 &mpic 3 1 - 0x8f00 0 0 3 &mpic 4 1 - 0x8f00 0 0 4 &mpic 1 1 + 0x8f00 0 0 1 &mpic 2 1 0 0 + 0x8f00 0 0 2 &mpic 3 1 0 0 + 0x8f00 0 0 3 &mpic 4 1 0 0 + 0x8f00 0 0 4 &mpic 1 1 0 0 /* IDSEL 0x12 func 0 - PCI slot 2 */ - 0x9000 0 0 1 &mpic 3 1 - 0x9000 0 0 2 &mpic 4 1 - 0x9000 0 0 3 &mpic 1 1 - 0x9000 0 0 4 &mpic 2 1 + 0x9000 0 0 1 &mpic 3 1 0 0 + 0x9000 0 0 2 &mpic 4 1 0 0 + 0x9000 0 0 3 &mpic 1 1 0 0 + 0x9000 0 0 4 &mpic 2 1 0 0 /* IDSEL 0x12 func 1 - PCI slot 2 */ - 0x9100 0 0 1 &mpic 3 1 - 0x9100 0 0 2 &mpic 4 1 - 0x9100 0 0 3 &mpic 1 1 - 0x9100 0 0 4 &mpic 2 1 + 0x9100 0 0 1 &mpic 3 1 0 0 + 0x9100 0 0 2 &mpic 4 1 0 0 + 0x9100 0 0 3 &mpic 1 1 0 0 + 0x9100 0 0 4 &mpic 2 1 0 0 /* IDSEL 0x12 func 2 - PCI slot 2 */ - 0x9200 0 0 1 &mpic 3 1 - 0x9200 0 0 2 &mpic 4 1 - 0x9200 0 0 3 &mpic 1 1 - 0x9200 0 0 4 &mpic 2 1 + 0x9200 0 0 1 &mpic 3 1 0 0 + 0x9200 0 0 2 &mpic 4 1 0 0 + 0x9200 0 0 3 &mpic 1 1 0 0 + 0x9200 0 0 4 &mpic 2 1 0 0 /* IDSEL 0x12 func 3 - PCI slot 2 */ - 0x9300 0 0 1 &mpic 3 1 - 0x9300 0 0 2 &mpic 4 1 - 0x9300 0 0 3 &mpic 1 1 - 0x9300 0 0 4 &mpic 2 1 + 0x9300 0 0 1 &mpic 3 1 0 0 + 0x9300 0 0 2 &mpic 4 1 0 0 + 0x9300 0 0 3 &mpic 1 1 0 0 + 0x9300 0 0 4 &mpic 2 1 0 0 /* IDSEL 0x12 func 4 - PCI slot 2 */ - 0x9400 0 0 1 &mpic 3 1 - 0x9400 0 0 2 &mpic 4 1 - 0x9400 0 0 3 &mpic 1 1 - 0x9400 0 0 4 &mpic 2 1 + 0x9400 0 0 1 &mpic 3 1 0 0 + 0x9400 0 0 2 &mpic 4 1 0 0 + 0x9400 0 0 3 &mpic 1 1 0 0 + 0x9400 0 0 4 &mpic 2 1 0 0 /* IDSEL 0x12 func 5 - PCI slot 2 */ - 0x9500 0 0 1 &mpic 3 1 - 0x9500 0 0 2 &mpic 4 1 - 0x9500 0 0 3 &mpic 1 1 - 0x9500 0 0 4 &mpic 2 1 + 0x9500 0 0 1 &mpic 3 1 0 0 + 0x9500 0 0 2 &mpic 4 1 0 0 + 0x9500 0 0 3 &mpic 1 1 0 0 + 0x9500 0 0 4 &mpic 2 1 0 0 /* IDSEL 0x12 func 6 - PCI slot 2 */ - 0x9600 0 0 1 &mpic 3 1 - 0x9600 0 0 2 &mpic 4 1 - 0x9600 0 0 3 &mpic 1 1 - 0x9600 0 0 4 &mpic 2 1 + 0x9600 0 0 1 &mpic 3 1 0 0 + 0x9600 0 0 2 &mpic 4 1 0 0 + 0x9600 0 0 3 &mpic 1 1 0 0 + 0x9600 0 0 4 &mpic 2 1 0 0 /* IDSEL 0x12 func 7 - PCI slot 2 */ - 0x9700 0 0 1 &mpic 3 1 - 0x9700 0 0 2 &mpic 4 1 - 0x9700 0 0 3 &mpic 1 1 - 0x9700 0 0 4 &mpic 2 1 + 0x9700 0 0 1 &mpic 3 1 0 0 + 0x9700 0 0 2 &mpic 4 1 0 0 + 0x9700 0 0 3 &mpic 1 1 0 0 + 0x9700 0 0 4 &mpic 2 1 0 0 // IDSEL 0x1c USB 0xe000 0 0 1 &i8259 12 2 diff --git a/arch/powerpc/boot/dts/fsl/mpc8641si-post.dtsi b/arch/powerpc/boot/dts/fsl/mpc8641si-post.dtsi index eeb7c65d5f22..50039d4fa278 100644 --- a/arch/powerpc/boot/dts/fsl/mpc8641si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/mpc8641si-post.dtsi @@ -97,6 +97,7 @@ &pci0 { compatible = "fsl,mpc8641-pcie"; device_type = "pci"; + #interrupt-cells = <1>; #size-cells = <2>; #address-cells = <3>; bus-range = <0x0 0xff>; @@ -123,6 +124,7 @@ &pci1 { compatible = "fsl,mpc8641-pcie"; device_type = "pci"; + #interrupt-cells = <1>; #size-cells = <2>; #address-cells = <3>; bus-range = <0x0 0xff>; diff --git a/arch/powerpc/boot/dts/fsl/p1020rdb-pc.dtsi b/arch/powerpc/boot/dts/fsl/p1020rdb-pc.dtsi index 25f81eea60e0..a13876c05c1e 100644 --- a/arch/powerpc/boot/dts/fsl/p1020rdb-pc.dtsi +++ b/arch/powerpc/boot/dts/fsl/p1020rdb-pc.dtsi @@ -205,13 +205,13 @@ mdio@24000 { phy0: ethernet-phy@0 { interrupt-parent = <&mpic>; - interrupts = <3 1>; + interrupts = <3 1 0 0>; reg = <0x0>; }; phy1: ethernet-phy@1 { interrupt-parent = <&mpic>; - interrupts = <2 1>; + interrupts = <2 1 0 0>; reg = <0x1>; }; diff --git a/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi b/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi index 51e975d7631a..872e4485dc3f 100644 --- a/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi @@ -327,24 +327,6 @@ /include/ "qoriq-clockgen1.dtsi" global-utilities@e1000 { compatible = "fsl,p2041-clockgen", "fsl,qoriq-clockgen-1.0"; - - mux2: mux2@40 { - #clock-cells = <0>; - reg = <0x40 0x4>; - compatible = "fsl,qoriq-core-mux-1.0"; - clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>; - clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2"; - clock-output-names = "cmux2"; - }; - - mux3: mux3@60 { - #clock-cells = <0>; - reg = <0x60 0x4>; - compatible = "fsl,qoriq-core-mux-1.0"; - clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>; - clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2"; - clock-output-names = "cmux3"; - }; }; rcpm: global-utilities@e2000 { diff --git a/arch/powerpc/boot/dts/fsl/p2041si-pre.dtsi b/arch/powerpc/boot/dts/fsl/p2041si-pre.dtsi index 941274c41f21..6318962e8d14 100644 --- a/arch/powerpc/boot/dts/fsl/p2041si-pre.dtsi +++ b/arch/powerpc/boot/dts/fsl/p2041si-pre.dtsi @@ -89,7 +89,7 @@ cpu0: PowerPC,e500mc@0 { device_type = "cpu"; reg = <0>; - clocks = <&mux0>; + clocks = <&clockgen 1 0>; next-level-cache = <&L2_0>; fsl,portid-mapping = <0x80000000>; L2_0: l2-cache { @@ -99,7 +99,7 @@ cpu1: PowerPC,e500mc@1 { device_type = "cpu"; reg = <1>; - clocks = <&mux1>; + clocks = <&clockgen 1 1>; next-level-cache = <&L2_1>; fsl,portid-mapping = <0x40000000>; L2_1: l2-cache { @@ -109,7 +109,7 @@ cpu2: PowerPC,e500mc@2 { device_type = "cpu"; reg = <2>; - clocks = <&mux2>; + clocks = <&clockgen 1 2>; next-level-cache = <&L2_2>; fsl,portid-mapping = <0x20000000>; L2_2: l2-cache { @@ -119,7 +119,7 @@ cpu3: PowerPC,e500mc@3 { device_type = "cpu"; reg = <3>; - clocks = <&mux3>; + clocks = <&clockgen 1 3>; next-level-cache = <&L2_3>; fsl,portid-mapping = <0x10000000>; L2_3: l2-cache { diff --git a/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi b/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi index 187676fa8d83..81bc75aca2e0 100644 --- a/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi @@ -354,24 +354,6 @@ /include/ "qoriq-clockgen1.dtsi" global-utilities@e1000 { compatible = "fsl,p3041-clockgen", "fsl,qoriq-clockgen-1.0"; - - mux2: mux2@40 { - #clock-cells = <0>; - reg = <0x40 0x4>; - compatible = "fsl,qoriq-core-mux-1.0"; - clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>; - clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2"; - clock-output-names = "cmux2"; - }; - - mux3: mux3@60 { - #clock-cells = <0>; - reg = <0x60 0x4>; - compatible = "fsl,qoriq-core-mux-1.0"; - clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>; - clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2"; - clock-output-names = "cmux3"; - }; }; rcpm: global-utilities@e2000 { diff --git a/arch/powerpc/boot/dts/fsl/p3041si-pre.dtsi b/arch/powerpc/boot/dts/fsl/p3041si-pre.dtsi index 50b73e8e638f..db92f1151a48 100644 --- a/arch/powerpc/boot/dts/fsl/p3041si-pre.dtsi +++ b/arch/powerpc/boot/dts/fsl/p3041si-pre.dtsi @@ -90,7 +90,7 @@ cpu0: PowerPC,e500mc@0 { device_type = "cpu"; reg = <0>; - clocks = <&mux0>; + clocks = <&clockgen 1 0>; next-level-cache = <&L2_0>; fsl,portid-mapping = <0x80000000>; L2_0: l2-cache { @@ -100,7 +100,7 @@ cpu1: PowerPC,e500mc@1 { device_type = "cpu"; reg = <1>; - clocks = <&mux1>; + clocks = <&clockgen 1 1>; next-level-cache = <&L2_1>; fsl,portid-mapping = <0x40000000>; L2_1: l2-cache { @@ -110,7 +110,7 @@ cpu2: PowerPC,e500mc@2 { device_type = "cpu"; reg = <2>; - clocks = <&mux2>; + clocks = <&clockgen 1 2>; next-level-cache = <&L2_2>; fsl,portid-mapping = <0x20000000>; L2_2: l2-cache { @@ -120,7 +120,7 @@ cpu3: PowerPC,e500mc@3 { device_type = "cpu"; reg = <3>; - clocks = <&mux3>; + clocks = <&clockgen 1 3>; next-level-cache = <&L2_3>; fsl,portid-mapping = <0x10000000>; L2_3: l2-cache { diff --git a/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi b/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi index a0252085f858..4da49b6dd3f5 100644 --- a/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi @@ -374,76 +374,6 @@ /include/ "qoriq-clockgen1.dtsi" global-utilities@e1000 { compatible = "fsl,p4080-clockgen", "fsl,qoriq-clockgen-1.0"; - - pll2: pll2@840 { - #clock-cells = <1>; - reg = <0x840 0x4>; - compatible = "fsl,qoriq-core-pll-1.0"; - clocks = <&sysclk>; - clock-output-names = "pll2", "pll2-div2"; - }; - - pll3: pll3@860 { - #clock-cells = <1>; - reg = <0x860 0x4>; - compatible = "fsl,qoriq-core-pll-1.0"; - clocks = <&sysclk>; - clock-output-names = "pll3", "pll3-div2"; - }; - - mux2: mux2@40 { - #clock-cells = <0>; - reg = <0x40 0x4>; - compatible = "fsl,qoriq-core-mux-1.0"; - clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>; - clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2"; - clock-output-names = "cmux2"; - }; - - mux3: mux3@60 { - #clock-cells = <0>; - reg = <0x60 0x4>; - compatible = "fsl,qoriq-core-mux-1.0"; - clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>; - clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2"; - clock-output-names = "cmux3"; - }; - - mux4: mux4@80 { - #clock-cells = <0>; - reg = <0x80 0x4>; - compatible = "fsl,qoriq-core-mux-1.0"; - clocks = <&pll2 0>, <&pll2 1>, <&pll3 0>, <&pll3 1>; - clock-names = "pll2", "pll2-div2", "pll3", "pll3-div2"; - clock-output-names = "cmux4"; - }; - - mux5: mux5@a0 { - #clock-cells = <0>; - reg = <0xa0 0x4>; - compatible = "fsl,qoriq-core-mux-1.0"; - clocks = <&pll2 0>, <&pll2 1>, <&pll3 0>, <&pll3 1>; - clock-names = "pll2", "pll2-div2", "pll3", "pll3-div2"; - clock-output-names = "cmux5"; - }; - - mux6: mux6@c0 { - #clock-cells = <0>; - reg = <0xc0 0x4>; - compatible = "fsl,qoriq-core-mux-1.0"; - clocks = <&pll2 0>, <&pll2 1>, <&pll3 0>, <&pll3 1>; - clock-names = "pll2", "pll2-div2", "pll3", "pll3-div2"; - clock-output-names = "cmux6"; - }; - - mux7: mux7@e0 { - #clock-cells = <0>; - reg = <0xe0 0x4>; - compatible = "fsl,qoriq-core-mux-1.0"; - clocks = <&pll2 0>, <&pll2 1>, <&pll3 0>, <&pll3 1>; - clock-names = "pll2", "pll2-div2", "pll3", "pll3-div2"; - clock-output-names = "cmux7"; - }; }; rcpm: global-utilities@e2000 { diff --git a/arch/powerpc/boot/dts/fsl/p4080si-pre.dtsi b/arch/powerpc/boot/dts/fsl/p4080si-pre.dtsi index d56a546b73e6..0a7c65a00e5e 100644 --- a/arch/powerpc/boot/dts/fsl/p4080si-pre.dtsi +++ b/arch/powerpc/boot/dts/fsl/p4080si-pre.dtsi @@ -94,7 +94,7 @@ cpu0: PowerPC,e500mc@0 { device_type = "cpu"; reg = <0>; - clocks = <&mux0>; + clocks = <&clockgen 1 0>; next-level-cache = <&L2_0>; fsl,portid-mapping = <0x80000000>; L2_0: l2-cache { @@ -104,7 +104,7 @@ cpu1: PowerPC,e500mc@1 { device_type = "cpu"; reg = <1>; - clocks = <&mux1>; + clocks = <&clockgen 1 1>; next-level-cache = <&L2_1>; fsl,portid-mapping = <0x40000000>; L2_1: l2-cache { @@ -114,7 +114,7 @@ cpu2: PowerPC,e500mc@2 { device_type = "cpu"; reg = <2>; - clocks = <&mux2>; + clocks = <&clockgen 1 2>; next-level-cache = <&L2_2>; fsl,portid-mapping = <0x20000000>; L2_2: l2-cache { @@ -124,7 +124,7 @@ cpu3: PowerPC,e500mc@3 { device_type = "cpu"; reg = <3>; - clocks = <&mux3>; + clocks = <&clockgen 1 3>; next-level-cache = <&L2_3>; fsl,portid-mapping = <0x10000000>; L2_3: l2-cache { @@ -134,7 +134,7 @@ cpu4: PowerPC,e500mc@4 { device_type = "cpu"; reg = <4>; - clocks = <&mux4>; + clocks = <&clockgen 1 4>; next-level-cache = <&L2_4>; fsl,portid-mapping = <0x08000000>; L2_4: l2-cache { @@ -144,7 +144,7 @@ cpu5: PowerPC,e500mc@5 { device_type = "cpu"; reg = <5>; - clocks = <&mux5>; + clocks = <&clockgen 1 5>; next-level-cache = <&L2_5>; fsl,portid-mapping = <0x04000000>; L2_5: l2-cache { @@ -154,7 +154,7 @@ cpu6: PowerPC,e500mc@6 { device_type = "cpu"; reg = <6>; - clocks = <&mux6>; + clocks = <&clockgen 1 6>; next-level-cache = <&L2_6>; fsl,portid-mapping = <0x02000000>; L2_6: l2-cache { @@ -164,7 +164,7 @@ cpu7: PowerPC,e500mc@7 { device_type = "cpu"; reg = <7>; - clocks = <&mux7>; + clocks = <&clockgen 1 7>; next-level-cache = <&L2_7>; fsl,portid-mapping = <0x01000000>; L2_7: l2-cache { diff --git a/arch/powerpc/boot/dts/fsl/p5020si-pre.dtsi b/arch/powerpc/boot/dts/fsl/p5020si-pre.dtsi index bfba0b4f1cbb..2d74ea85e5df 100644 --- a/arch/powerpc/boot/dts/fsl/p5020si-pre.dtsi +++ b/arch/powerpc/boot/dts/fsl/p5020si-pre.dtsi @@ -96,7 +96,7 @@ cpu0: PowerPC,e5500@0 { device_type = "cpu"; reg = <0>; - clocks = <&mux0>; + clocks = <&clockgen 1 0>; next-level-cache = <&L2_0>; fsl,portid-mapping = <0x80000000>; L2_0: l2-cache { @@ -106,7 +106,7 @@ cpu1: PowerPC,e5500@1 { device_type = "cpu"; reg = <1>; - clocks = <&mux1>; + clocks = <&clockgen 1 1>; next-level-cache = <&L2_1>; fsl,portid-mapping = <0x40000000>; L2_1: l2-cache { diff --git a/arch/powerpc/boot/dts/fsl/p5040si-post.dtsi b/arch/powerpc/boot/dts/fsl/p5040si-post.dtsi index e2bd9313e632..16b454b504e2 100644 --- a/arch/powerpc/boot/dts/fsl/p5040si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/p5040si-post.dtsi @@ -319,24 +319,6 @@ /include/ "qoriq-clockgen1.dtsi" global-utilities@e1000 { compatible = "fsl,p5040-clockgen", "fsl,qoriq-clockgen-1.0"; - - mux2: mux2@40 { - #clock-cells = <0>; - reg = <0x40 0x4>; - compatible = "fsl,qoriq-core-mux-1.0"; - clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>; - clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2"; - clock-output-names = "cmux2"; - }; - - mux3: mux3@60 { - #clock-cells = <0>; - reg = <0x60 0x4>; - compatible = "fsl,qoriq-core-mux-1.0"; - clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>; - clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2"; - clock-output-names = "cmux3"; - }; }; rcpm: global-utilities@e2000 { diff --git a/arch/powerpc/boot/dts/fsl/p5040si-pre.dtsi b/arch/powerpc/boot/dts/fsl/p5040si-pre.dtsi index dbd57750fc02..ed89dbbdacf0 100644 --- a/arch/powerpc/boot/dts/fsl/p5040si-pre.dtsi +++ b/arch/powerpc/boot/dts/fsl/p5040si-pre.dtsi @@ -102,7 +102,7 @@ cpu0: PowerPC,e5500@0 { device_type = "cpu"; reg = <0>; - clocks = <&mux0>; + clocks = <&clockgen 1 0>; next-level-cache = <&L2_0>; fsl,portid-mapping = <0x80000000>; L2_0: l2-cache { @@ -112,7 +112,7 @@ cpu1: PowerPC,e5500@1 { device_type = "cpu"; reg = <1>; - clocks = <&mux1>; + clocks = <&clockgen 1 1>; next-level-cache = <&L2_1>; fsl,portid-mapping = <0x40000000>; L2_1: l2-cache { @@ -122,7 +122,7 @@ cpu2: PowerPC,e5500@2 { device_type = "cpu"; reg = <2>; - clocks = <&mux2>; + clocks = <&clockgen 1 2>; next-level-cache = <&L2_2>; fsl,portid-mapping = <0x20000000>; L2_2: l2-cache { @@ -132,7 +132,7 @@ cpu3: PowerPC,e5500@3 { device_type = "cpu"; reg = <3>; - clocks = <&mux3>; + clocks = <&clockgen 1 3>; next-level-cache = <&L2_3>; fsl,portid-mapping = <0x10000000>; L2_3: l2-cache { diff --git a/arch/powerpc/boot/dts/fsl/qoriq-clockgen1.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-clockgen1.dtsi index 88cd70de4f86..463c1ed9ffdd 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-clockgen1.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-clockgen1.dtsi @@ -34,53 +34,6 @@ clockgen: global-utilities@e1000 { compatible = "fsl,qoriq-clockgen-1.0"; - ranges = <0x0 0xe1000 0x1000>; reg = <0xe1000 0x1000>; - clock-frequency = <0>; - #address-cells = <1>; - #size-cells = <1>; #clock-cells = <2>; - - sysclk: sysclk { - #clock-cells = <0>; - compatible = "fsl,qoriq-sysclk-1.0", "fixed-clock"; - clock-output-names = "sysclk"; - }; - pll0: pll0@800 { - #clock-cells = <1>; - reg = <0x800 0x4>; - compatible = "fsl,qoriq-core-pll-1.0"; - clocks = <&sysclk>; - clock-output-names = "pll0", "pll0-div2"; - }; - pll1: pll1@820 { - #clock-cells = <1>; - reg = <0x820 0x4>; - compatible = "fsl,qoriq-core-pll-1.0"; - clocks = <&sysclk>; - clock-output-names = "pll1", "pll1-div2"; - }; - mux0: mux0@0 { - #clock-cells = <0>; - reg = <0x0 0x4>; - compatible = "fsl,qoriq-core-mux-1.0"; - clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>; - clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2"; - clock-output-names = "cmux0"; - }; - mux1: mux1@20 { - #clock-cells = <0>; - reg = <0x20 0x4>; - compatible = "fsl,qoriq-core-mux-1.0"; - clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>; - clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2"; - clock-output-names = "cmux1"; - }; - platform_pll: platform-pll@c00 { - #clock-cells = <1>; - reg = <0xc00 0x4>; - compatible = "fsl,qoriq-platform-pll-1.0"; - clocks = <&sysclk>; - clock-output-names = "platform-pll", "platform-pll-div2"; - }; }; diff --git a/arch/powerpc/boot/dts/fsl/qoriq-clockgen2.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-clockgen2.dtsi index 6dfd7c5357ab..0361050bb56a 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-clockgen2.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-clockgen2.dtsi @@ -34,36 +34,6 @@ clockgen: global-utilities@e1000 { compatible = "fsl,qoriq-clockgen-2.0"; - ranges = <0x0 0xe1000 0x1000>; reg = <0xe1000 0x1000>; - #address-cells = <1>; - #size-cells = <1>; #clock-cells = <2>; - - sysclk: sysclk { - #clock-cells = <0>; - compatible = "fsl,qoriq-sysclk-2.0", "fixed-clock"; - clock-output-names = "sysclk"; - }; - pll0: pll0@800 { - #clock-cells = <1>; - reg = <0x800 0x4>; - compatible = "fsl,qoriq-core-pll-2.0"; - clocks = <&sysclk>; - clock-output-names = "pll0", "pll0-div2", "pll0-div4"; - }; - pll1: pll1@820 { - #clock-cells = <1>; - reg = <0x820 0x4>; - compatible = "fsl,qoriq-core-pll-2.0"; - clocks = <&sysclk>; - clock-output-names = "pll1", "pll1-div2", "pll1-div4"; - }; - platform_pll: platform-pll@c00 { - #clock-cells = <1>; - reg = <0xc00 0x4>; - compatible = "fsl,qoriq-platform-pll-2.0"; - clocks = <&sysclk>; - clock-output-names = "platform-pll", "platform-pll-div2"; - }; }; diff --git a/arch/powerpc/boot/dts/fsl/t1023si-post.dtsi b/arch/powerpc/boot/dts/fsl/t1023si-post.dtsi index 4908af501098..d552044c5afc 100644 --- a/arch/powerpc/boot/dts/fsl/t1023si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/t1023si-post.dtsi @@ -345,22 +345,6 @@ /include/ "qoriq-clockgen2.dtsi" global-utilities@e1000 { compatible = "fsl,t1023-clockgen", "fsl,qoriq-clockgen-2.0"; - mux0: mux0@0 { - #clock-cells = <0>; - reg = <0x0 4>; - compatible = "fsl,core-mux-clock"; - clocks = <&pll0 0>, <&pll0 1>; - clock-names = "pll0_0", "pll0_1"; - clock-output-names = "cmux0"; - }; - mux1: mux1@20 { - #clock-cells = <0>; - reg = <0x20 4>; - compatible = "fsl,core-mux-clock"; - clocks = <&pll0 0>, <&pll0 1>; - clock-names = "pll0_0", "pll0_1"; - clock-output-names = "cmux1"; - }; }; rcpm: global-utilities@e2000 { diff --git a/arch/powerpc/boot/dts/fsl/t102xsi-pre.dtsi b/arch/powerpc/boot/dts/fsl/t102xsi-pre.dtsi index 9d08a363bab3..d87ea13164f2 100644 --- a/arch/powerpc/boot/dts/fsl/t102xsi-pre.dtsi +++ b/arch/powerpc/boot/dts/fsl/t102xsi-pre.dtsi @@ -74,7 +74,7 @@ cpu0: PowerPC,e5500@0 { device_type = "cpu"; reg = <0>; - clocks = <&mux0>; + clocks = <&clockgen 1 0>; next-level-cache = <&L2_1>; #cooling-cells = <2>; L2_1: l2-cache { @@ -84,7 +84,7 @@ cpu1: PowerPC,e5500@1 { device_type = "cpu"; reg = <1>; - clocks = <&mux1>; + clocks = <&clockgen 1 1>; next-level-cache = <&L2_2>; #cooling-cells = <2>; L2_2: l2-cache { diff --git a/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi b/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi index 145c7f43b5b6..315d0557eefc 100644 --- a/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi @@ -425,50 +425,6 @@ /include/ "qoriq-clockgen2.dtsi" global-utilities@e1000 { compatible = "fsl,t1040-clockgen", "fsl,qoriq-clockgen-2.0"; - - mux0: mux0@0 { - #clock-cells = <0>; - reg = <0x0 4>; - compatible = "fsl,qoriq-core-mux-2.0"; - clocks = <&pll0 0>, <&pll0 1>, <&pll0 2>, - <&pll1 0>, <&pll1 1>, <&pll1 2>; - clock-names = "pll0", "pll0-div2", "pll1-div4", - "pll1", "pll1-div2", "pll1-div4"; - clock-output-names = "cmux0"; - }; - - mux1: mux1@20 { - #clock-cells = <0>; - reg = <0x20 4>; - compatible = "fsl,qoriq-core-mux-2.0"; - clocks = <&pll0 0>, <&pll0 1>, <&pll0 2>, - <&pll1 0>, <&pll1 1>, <&pll1 2>; - clock-names = "pll0", "pll0-div2", "pll1-div4", - "pll1", "pll1-div2", "pll1-div4"; - clock-output-names = "cmux1"; - }; - - mux2: mux2@40 { - #clock-cells = <0>; - reg = <0x40 4>; - compatible = "fsl,qoriq-core-mux-2.0"; - clocks = <&pll0 0>, <&pll0 1>, <&pll0 2>, - <&pll1 0>, <&pll1 1>, <&pll1 2>; - clock-names = "pll0", "pll0-div2", "pll1-div4", - "pll1", "pll1-div2", "pll1-div4"; - clock-output-names = "cmux2"; - }; - - mux3: mux3@60 { - #clock-cells = <0>; - reg = <0x60 4>; - compatible = "fsl,qoriq-core-mux-2.0"; - clocks = <&pll0 0>, <&pll0 1>, <&pll0 2>, - <&pll1 0>, <&pll1 1>, <&pll1 2>; - clock-names = "pll0_0", "pll0_1", "pll0_2", - "pll1_0", "pll1_1", "pll1_2"; - clock-output-names = "cmux3"; - }; }; rcpm: global-utilities@e2000 { diff --git a/arch/powerpc/boot/dts/fsl/t104xsi-pre.dtsi b/arch/powerpc/boot/dts/fsl/t104xsi-pre.dtsi index 6db0ee8b1384..dd59e4b69480 100644 --- a/arch/powerpc/boot/dts/fsl/t104xsi-pre.dtsi +++ b/arch/powerpc/boot/dts/fsl/t104xsi-pre.dtsi @@ -74,7 +74,7 @@ cpu0: PowerPC,e5500@0 { device_type = "cpu"; reg = <0>; - clocks = <&mux0>; + clocks = <&clockgen 1 0>; next-level-cache = <&L2_1>; #cooling-cells = <2>; L2_1: l2-cache { @@ -84,7 +84,7 @@ cpu1: PowerPC,e5500@1 { device_type = "cpu"; reg = <1>; - clocks = <&mux1>; + clocks = <&clockgen 1 1>; next-level-cache = <&L2_2>; #cooling-cells = <2>; L2_2: l2-cache { @@ -94,7 +94,7 @@ cpu2: PowerPC,e5500@2 { device_type = "cpu"; reg = <2>; - clocks = <&mux2>; + clocks = <&clockgen 1 2>; next-level-cache = <&L2_3>; #cooling-cells = <2>; L2_3: l2-cache { @@ -104,7 +104,7 @@ cpu3: PowerPC,e5500@3 { device_type = "cpu"; reg = <3>; - clocks = <&mux3>; + clocks = <&clockgen 1 3>; next-level-cache = <&L2_4>; #cooling-cells = <2>; L2_4: l2-cache { diff --git a/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi b/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi index a97296c64eb2..ecbb447920bc 100644 --- a/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi @@ -535,28 +535,6 @@ /include/ "qoriq-clockgen2.dtsi" global-utilities@e1000 { compatible = "fsl,t2080-clockgen", "fsl,qoriq-clockgen-2.0"; - - mux0: mux0@0 { - #clock-cells = <0>; - reg = <0x0 4>; - compatible = "fsl,qoriq-core-mux-2.0"; - clocks = <&pll0 0>, <&pll0 1>, <&pll0 2>, - <&pll1 0>, <&pll1 1>, <&pll1 2>; - clock-names = "pll0", "pll0-div2", "pll0-div4", - "pll1", "pll1-div2", "pll1-div4"; - clock-output-names = "cmux0"; - }; - - mux1: mux1@20 { - #clock-cells = <0>; - reg = <0x20 4>; - compatible = "fsl,qoriq-core-mux-2.0"; - clocks = <&pll0 0>, <&pll0 1>, <&pll0 2>, - <&pll1 0>, <&pll1 1>, <&pll1 2>; - clock-names = "pll0", "pll0-div2", "pll0-div4", - "pll1", "pll1-div2", "pll1-div4"; - clock-output-names = "cmux1"; - }; }; rcpm: global-utilities@e2000 { diff --git a/arch/powerpc/boot/dts/fsl/t208xsi-pre.dtsi b/arch/powerpc/boot/dts/fsl/t208xsi-pre.dtsi index c2e57203910d..3f745de44284 100644 --- a/arch/powerpc/boot/dts/fsl/t208xsi-pre.dtsi +++ b/arch/powerpc/boot/dts/fsl/t208xsi-pre.dtsi @@ -81,28 +81,28 @@ cpu0: PowerPC,e6500@0 { device_type = "cpu"; reg = <0 1>; - clocks = <&mux0>; + clocks = <&clockgen 1 0>; next-level-cache = <&L2_1>; fsl,portid-mapping = <0x80000000>; }; cpu1: PowerPC,e6500@2 { device_type = "cpu"; reg = <2 3>; - clocks = <&mux0>; + clocks = <&clockgen 1 0>; next-level-cache = <&L2_1>; fsl,portid-mapping = <0x80000000>; }; cpu2: PowerPC,e6500@4 { device_type = "cpu"; reg = <4 5>; - clocks = <&mux0>; + clocks = <&clockgen 1 0>; next-level-cache = <&L2_1>; fsl,portid-mapping = <0x80000000>; }; cpu3: PowerPC,e6500@6 { device_type = "cpu"; reg = <6 7>; - clocks = <&mux0>; + clocks = <&clockgen 1 0>; next-level-cache = <&L2_1>; fsl,portid-mapping = <0x80000000>; }; diff --git a/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi b/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi index 68c4eadc19e3..fcac73486d48 100644 --- a/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi @@ -950,67 +950,6 @@ /include/ "qoriq-clockgen2.dtsi" global-utilities@e1000 { compatible = "fsl,t4240-clockgen", "fsl,qoriq-clockgen-2.0"; - - pll2: pll2@840 { - #clock-cells = <1>; - reg = <0x840 0x4>; - compatible = "fsl,qoriq-core-pll-2.0"; - clocks = <&sysclk>; - clock-output-names = "pll2", "pll2-div2", "pll2-div4"; - }; - - pll3: pll3@860 { - #clock-cells = <1>; - reg = <0x860 0x4>; - compatible = "fsl,qoriq-core-pll-2.0"; - clocks = <&sysclk>; - clock-output-names = "pll3", "pll3-div2", "pll3-div4"; - }; - - pll4: pll4@880 { - #clock-cells = <1>; - reg = <0x880 0x4>; - compatible = "fsl,qoriq-core-pll-2.0"; - clocks = <&sysclk>; - clock-output-names = "pll4", "pll4-div2", "pll4-div4"; - }; - - mux0: mux0@0 { - #clock-cells = <0>; - reg = <0x0 0x4>; - compatible = "fsl,qoriq-core-mux-2.0"; - clocks = <&pll0 0>, <&pll0 1>, <&pll0 2>, - <&pll1 0>, <&pll1 1>, <&pll1 2>, - <&pll2 0>, <&pll2 1>, <&pll2 2>; - clock-names = "pll0", "pll0-div2", "pll0-div4", - "pll1", "pll1-div2", "pll1-div4", - "pll2", "pll2-div2", "pll2-div4"; - clock-output-names = "cmux0"; - }; - - mux1: mux1@20 { - #clock-cells = <0>; - reg = <0x20 0x4>; - compatible = "fsl,qoriq-core-mux-2.0"; - clocks = <&pll0 0>, <&pll0 1>, <&pll0 2>, - <&pll1 0>, <&pll1 1>, <&pll1 2>, - <&pll2 0>, <&pll2 1>, <&pll2 2>; - clock-names = "pll0", "pll0-div2", "pll0-div4", - "pll1", "pll1-div2", "pll1-div4", - "pll2", "pll2-div2", "pll2-div4"; - clock-output-names = "cmux1"; - }; - - mux2: mux2@40 { - #clock-cells = <0>; - reg = <0x40 0x4>; - compatible = "fsl,qoriq-core-mux-2.0"; - clocks = <&pll3 0>, <&pll3 1>, <&pll3 2>, - <&pll4 0>, <&pll4 1>, <&pll4 2>; - clock-names = "pll3", "pll3-div2", "pll3-div4", - "pll4", "pll4-div2", "pll4-div4"; - clock-output-names = "cmux2"; - }; }; rcpm: global-utilities@e2000 { diff --git a/arch/powerpc/boot/dts/fsl/t4240si-pre.dtsi b/arch/powerpc/boot/dts/fsl/t4240si-pre.dtsi index 038cf8fadee4..632314c6faa9 100644 --- a/arch/powerpc/boot/dts/fsl/t4240si-pre.dtsi +++ b/arch/powerpc/boot/dts/fsl/t4240si-pre.dtsi @@ -90,84 +90,84 @@ cpu0: PowerPC,e6500@0 { device_type = "cpu"; reg = <0 1>; - clocks = <&mux0>; + clocks = <&clockgen 1 0>; next-level-cache = <&L2_1>; fsl,portid-mapping = <0x80000000>; }; cpu1: PowerPC,e6500@2 { device_type = "cpu"; reg = <2 3>; - clocks = <&mux0>; + clocks = <&clockgen 1 0>; next-level-cache = <&L2_1>; fsl,portid-mapping = <0x80000000>; }; cpu2: PowerPC,e6500@4 { device_type = "cpu"; reg = <4 5>; - clocks = <&mux0>; + clocks = <&clockgen 1 0>; next-level-cache = <&L2_1>; fsl,portid-mapping = <0x80000000>; }; cpu3: PowerPC,e6500@6 { device_type = "cpu"; reg = <6 7>; - clocks = <&mux0>; + clocks = <&clockgen 1 0>; next-level-cache = <&L2_1>; fsl,portid-mapping = <0x80000000>; }; cpu4: PowerPC,e6500@8 { device_type = "cpu"; reg = <8 9>; - clocks = <&mux1>; + clocks = <&clockgen 1 1>; next-level-cache = <&L2_2>; fsl,portid-mapping = <0x40000000>; }; cpu5: PowerPC,e6500@10 { device_type = "cpu"; reg = <10 11>; - clocks = <&mux1>; + clocks = <&clockgen 1 1>; next-level-cache = <&L2_2>; fsl,portid-mapping = <0x40000000>; }; cpu6: PowerPC,e6500@12 { device_type = "cpu"; reg = <12 13>; - clocks = <&mux1>; + clocks = <&clockgen 1 1>; next-level-cache = <&L2_2>; fsl,portid-mapping = <0x40000000>; }; cpu7: PowerPC,e6500@14 { device_type = "cpu"; reg = <14 15>; - clocks = <&mux1>; + clocks = <&clockgen 1 1>; next-level-cache = <&L2_2>; fsl,portid-mapping = <0x40000000>; }; cpu8: PowerPC,e6500@16 { device_type = "cpu"; reg = <16 17>; - clocks = <&mux2>; + clocks = <&clockgen 1 2>; next-level-cache = <&L2_3>; fsl,portid-mapping = <0x20000000>; }; cpu9: PowerPC,e6500@18 { device_type = "cpu"; reg = <18 19>; - clocks = <&mux2>; + clocks = <&clockgen 1 2>; next-level-cache = <&L2_3>; fsl,portid-mapping = <0x20000000>; }; cpu10: PowerPC,e6500@20 { device_type = "cpu"; reg = <20 21>; - clocks = <&mux2>; + clocks = <&clockgen 1 2>; next-level-cache = <&L2_3>; fsl,portid-mapping = <0x20000000>; }; cpu11: PowerPC,e6500@22 { device_type = "cpu"; reg = <22 23>; - clocks = <&mux2>; + clocks = <&clockgen 1 2>; next-level-cache = <&L2_3>; fsl,portid-mapping = <0x20000000>; }; diff --git a/arch/powerpc/boot/dts/mpc832x_rdb.dts b/arch/powerpc/boot/dts/mpc832x_rdb.dts index 647cae14c16d..be6ef3531b28 100644 --- a/arch/powerpc/boot/dts/mpc832x_rdb.dts +++ b/arch/powerpc/boot/dts/mpc832x_rdb.dts @@ -311,13 +311,9 @@ compatible = "fsl,ucc-mdio"; phy00:ethernet-phy@0 { - interrupt-parent = <&ipic>; - interrupts = <0>; reg = <0x0>; }; phy04:ethernet-phy@4 { - interrupt-parent = <&ipic>; - interrupts = <0>; reg = <0x4>; }; }; diff --git a/arch/powerpc/boot/serial.c b/arch/powerpc/boot/serial.c index f045f8494bf9..b0491b8c0199 100644 --- a/arch/powerpc/boot/serial.c +++ b/arch/powerpc/boot/serial.c @@ -93,7 +93,8 @@ static void *serial_get_stdout_devp(void) if (devp == NULL) goto err_out; - if (getprop(devp, "linux,stdout-path", path, MAX_PATH_LEN) > 0) { + if (getprop(devp, "linux,stdout-path", path, MAX_PATH_LEN) > 0 || + getprop(devp, "stdout-path", path, MAX_PATH_LEN) > 0) { devp = finddevice(path); if (devp == NULL) goto err_out; diff --git a/arch/powerpc/configs/fsl-emb-nonhw.config b/arch/powerpc/configs/fsl-emb-nonhw.config index e0567dc41968..d592ba27b122 100644 --- a/arch/powerpc/configs/fsl-emb-nonhw.config +++ b/arch/powerpc/configs/fsl-emb-nonhw.config @@ -25,6 +25,7 @@ CONFIG_CRYPTO_SHA256=y CONFIG_CRYPTO_SHA512=y CONFIG_DEBUG_FS=y CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_SHIRQ=y CONFIG_DETECT_HUNG_TASK=y CONFIG_DEVTMPFS_MOUNT=y diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig index f686cc1eac0b..ceb3c770786f 100644 --- a/arch/powerpc/configs/g5_defconfig +++ b/arch/powerpc/configs/g5_defconfig @@ -246,7 +246,6 @@ CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_MUTEXES=y CONFIG_LATENCYTOP=y CONFIG_BOOTX_TEXT=y -CONFIG_PPC_EARLY_DEBUG=y CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_HMAC=y diff --git a/arch/powerpc/configs/guest.config b/arch/powerpc/configs/guest.config new file mode 100644 index 000000000000..8b8cd18ecd7c --- /dev/null +++ b/arch/powerpc/configs/guest.config @@ -0,0 +1,13 @@ +CONFIG_VIRTIO_BLK=y +CONFIG_VIRTIO_BLK_SCSI=y +CONFIG_SCSI_VIRTIO=y +CONFIG_VIRTIO_NET=y +CONFIG_NET_FAILOVER=y +CONFIG_VIRTIO_CONSOLE=y +CONFIG_VIRTIO=y +CONFIG_VIRTIO_PCI=y +CONFIG_KVM_GUEST=y +CONFIG_EPAPR_PARAVIRT=y +CONFIG_VIRTIO_BALLOON=y +CONFIG_VHOST_NET=y +CONFIG_VHOST=y diff --git a/arch/powerpc/configs/maple_defconfig b/arch/powerpc/configs/maple_defconfig index f71eddafb02f..c5f2005005d3 100644 --- a/arch/powerpc/configs/maple_defconfig +++ b/arch/powerpc/configs/maple_defconfig @@ -108,7 +108,6 @@ CONFIG_LATENCYTOP=y CONFIG_XMON=y CONFIG_XMON_DEFAULT=y CONFIG_BOOTX_TEXT=y -CONFIG_PPC_EARLY_DEBUG=y CONFIG_CRYPTO_ECB=m CONFIG_CRYPTO_PCBC=m # CONFIG_CRYPTO_HW is not set diff --git a/arch/powerpc/configs/pmac32_defconfig b/arch/powerpc/configs/pmac32_defconfig index 62948d198d7f..50b610b48914 100644 --- a/arch/powerpc/configs/pmac32_defconfig +++ b/arch/powerpc/configs/pmac32_defconfig @@ -297,7 +297,6 @@ CONFIG_LATENCYTOP=y CONFIG_XMON=y CONFIG_XMON_DEFAULT=y CONFIG_BOOTX_TEXT=y -CONFIG_PPC_EARLY_DEBUG=y CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_MD4=m CONFIG_CRYPTO_SHA512=m diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig index f2515674a1e2..91fdb619b484 100644 --- a/arch/powerpc/configs/ppc64_defconfig +++ b/arch/powerpc/configs/ppc64_defconfig @@ -1,4 +1,3 @@ -CONFIG_PPC64=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_NO_HZ=y @@ -9,21 +8,22 @@ CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=18 CONFIG_LOG_CPU_MAX_BUF_SHIFT=13 +CONFIG_NUMA_BALANCING=y CONFIG_CGROUPS=y +CONFIG_MEMCG=y +CONFIG_CGROUP_SCHED=y +CONFIG_CGROUP_FREEZER=y CONFIG_CPUSETS=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y CONFIG_CGROUP_BPF=y CONFIG_BLK_DEV_INITRD=y CONFIG_BPF_SYSCALL=y # CONFIG_COMPAT_BRK is not set CONFIG_PROFILING=y -CONFIG_OPROFILE=m -CONFIG_KPROBES=y -CONFIG_JUMP_LABEL=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -CONFIG_MODVERSIONS=y -CONFIG_MODULE_SRCVERSION_ALL=y -CONFIG_PARTITION_ADVANCED=y +CONFIG_PPC64=y +CONFIG_NR_CPUS=2048 CONFIG_PPC_SPLPAR=y CONFIG_DTL=y CONFIG_SCANLOG=m @@ -45,14 +45,11 @@ CONFIG_CPU_FREQ_GOV_USERSPACE=y CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y CONFIG_CPU_FREQ_PMAC64=y CONFIG_HZ_100=y -CONFIG_BINFMT_MISC=m CONFIG_PPC_TRANSACTIONAL_MEM=y CONFIG_KEXEC=y CONFIG_KEXEC_FILE=y CONFIG_CRASH_DUMP=y CONFIG_IRQ_ALL_CPUS=y -CONFIG_KSM=y -CONFIG_TRANSPARENT_HUGEPAGE=y CONFIG_PPC_64K_PAGES=y CONFIG_SCHED_SMT=y CONFIG_HOTPLUG_PCI=y @@ -60,6 +57,23 @@ CONFIG_HOTPLUG_PCI_RPA=m CONFIG_HOTPLUG_PCI_RPA_DLPAR=m CONFIG_PCCARD=y CONFIG_ELECTRA_CF=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM_BOOK3S_64=m +CONFIG_KVM_BOOK3S_64_HV=m +CONFIG_VHOST_NET=m +CONFIG_OPROFILE=m +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_BINFMT_MISC=m +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_KSM=y +CONFIG_TRANSPARENT_HUGEPAGE=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -163,7 +177,6 @@ CONFIG_TIGON3=y CONFIG_BNX2X=m CONFIG_CHELSIO_T1=m CONFIG_BE2NET=m -CONFIG_S2IO=m CONFIG_IBMVETH=m CONFIG_EHEA=m CONFIG_E100=y @@ -174,6 +187,7 @@ CONFIG_IXGBE=m CONFIG_I40E=m CONFIG_MLX4_EN=m CONFIG_MYRI10GE=m +CONFIG_S2IO=m CONFIG_PASEMI_MAC=y CONFIG_QLGE=m CONFIG_NETXEN_NIC=m @@ -284,7 +298,7 @@ CONFIG_REISERFS_FS_SECURITY=y CONFIG_JFS_FS=m CONFIG_JFS_POSIX_ACL=y CONFIG_JFS_SECURITY=y -CONFIG_XFS_FS=m +CONFIG_XFS_FS=y CONFIG_XFS_POSIX_ACL=y CONFIG_BTRFS_FS=m CONFIG_BTRFS_FS_POSIX_ACL=y @@ -323,25 +337,6 @@ CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ASCII=y CONFIG_NLS_ISO8859_1=y CONFIG_NLS_UTF8=y -CONFIG_MAGIC_SYSRQ=y -CONFIG_DEBUG_KERNEL=y -CONFIG_DEBUG_STACK_USAGE=y -CONFIG_DEBUG_STACKOVERFLOW=y -CONFIG_SOFTLOCKUP_DETECTOR=y -CONFIG_HARDLOCKUP_DETECTOR=y -CONFIG_DEBUG_MUTEXES=y -CONFIG_LATENCYTOP=y -CONFIG_FTRACE=y -CONFIG_FUNCTION_TRACER=y -CONFIG_FUNCTION_GRAPH_TRACER=y -CONFIG_SCHED_TRACER=y -CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_CODE_PATCHING_SELFTEST=y -CONFIG_FTR_FIXUP_SELFTEST=y -CONFIG_MSI_BITMAP_SELFTEST=y -CONFIG_XMON=y -CONFIG_BOOTX_TEXT=y -CONFIG_PPC_EARLY_DEBUG=y CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_HMAC=y @@ -364,8 +359,20 @@ CONFIG_CRYPTO_LZO=m CONFIG_CRYPTO_DEV_NX=y CONFIG_CRYPTO_DEV_NX_ENCRYPT=m CONFIG_CRYPTO_DEV_VMX=y -CONFIG_VIRTUALIZATION=y -CONFIG_KVM_BOOK3S_64=m -CONFIG_KVM_BOOK3S_64_HV=m -CONFIG_VHOST_NET=m CONFIG_PRINTK_TIME=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_STACK_USAGE=y +CONFIG_DEBUG_STACKOVERFLOW=y +CONFIG_SOFTLOCKUP_DETECTOR=y +CONFIG_HARDLOCKUP_DETECTOR=y +CONFIG_DEBUG_MUTEXES=y +CONFIG_LATENCYTOP=y +CONFIG_FUNCTION_TRACER=y +CONFIG_SCHED_TRACER=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_CODE_PATCHING_SELFTEST=y +CONFIG_FTR_FIXUP_SELFTEST=y +CONFIG_MSI_BITMAP_SELFTEST=y +CONFIG_XMON=y +CONFIG_BOOTX_TEXT=y diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig index 7ee736f20774..53687c3a70c4 100644 --- a/arch/powerpc/configs/ppc6xx_defconfig +++ b/arch/powerpc/configs/ppc6xx_defconfig @@ -1155,7 +1155,6 @@ CONFIG_STACK_TRACER=y CONFIG_BLK_DEV_IO_TRACE=y CONFIG_XMON=y CONFIG_BOOTX_TEXT=y -CONFIG_PPC_EARLY_DEBUG=y CONFIG_SECURITY=y CONFIG_SECURITY_NETWORK=y CONFIG_SECURITY_NETWORK_XFRM=y diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig index 5e09a40cbcbf..ea79c519863d 100644 --- a/arch/powerpc/configs/pseries_defconfig +++ b/arch/powerpc/configs/pseries_defconfig @@ -290,9 +290,7 @@ CONFIG_DEBUG_STACKOVERFLOW=y CONFIG_SOFTLOCKUP_DETECTOR=y CONFIG_HARDLOCKUP_DETECTOR=y CONFIG_LATENCYTOP=y -CONFIG_FTRACE=y CONFIG_FUNCTION_TRACER=y -CONFIG_FUNCTION_GRAPH_TRACER=y CONFIG_SCHED_TRACER=y CONFIG_BLK_DEV_IO_TRACE=y CONFIG_CODE_PATCHING_SELFTEST=y diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild index 3196d227e351..77ff7fb24823 100644 --- a/arch/powerpc/include/asm/Kbuild +++ b/arch/powerpc/include/asm/Kbuild @@ -1,3 +1,7 @@ +generated-y += syscall_table_32.h +generated-y += syscall_table_64.h +generated-y += syscall_table_c32.h +generated-y += syscall_table_spu.h generic-y += div64.h generic-y += export.h generic-y += irq_regs.h diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h index ec691d489656..6f201b199c02 100644 --- a/arch/powerpc/include/asm/asm-prototypes.h +++ b/arch/powerpc/include/asm/asm-prototypes.h @@ -61,7 +61,6 @@ void RunModeException(struct pt_regs *regs); void single_step_exception(struct pt_regs *regs); void program_check_exception(struct pt_regs *regs); void alignment_exception(struct pt_regs *regs); -void slb_miss_bad_addr(struct pt_regs *regs); void StackOverflow(struct pt_regs *regs); void kernel_fp_unavailable_exception(struct pt_regs *regs); void altivec_unavailable_exception(struct pt_regs *regs); diff --git a/arch/powerpc/include/asm/book3s/32/hash.h b/arch/powerpc/include/asm/book3s/32/hash.h index f2892c7ab73e..2a0a467d2985 100644 --- a/arch/powerpc/include/asm/book3s/32/hash.h +++ b/arch/powerpc/include/asm/book3s/32/hash.h @@ -26,6 +26,7 @@ #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */ #define _PAGE_DIRTY 0x080 /* C: page changed */ #define _PAGE_ACCESSED 0x100 /* R: page referenced */ +#define _PAGE_EXEC 0x200 /* software: exec allowed */ #define _PAGE_RW 0x400 /* software: user write access allowed */ #define _PAGE_SPECIAL 0x800 /* software: Special page */ diff --git a/arch/powerpc/include/asm/book3s/32/mmu-hash.h b/arch/powerpc/include/asm/book3s/32/mmu-hash.h index e38c91388c40..0c261ba2c826 100644 --- a/arch/powerpc/include/asm/book3s/32/mmu-hash.h +++ b/arch/powerpc/include/asm/book3s/32/mmu-hash.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_POWERPC_BOOK3S_32_MMU_HASH_H_ #define _ASM_POWERPC_BOOK3S_32_MMU_HASH_H_ + /* * 32-bit hash table MMU support */ @@ -9,6 +10,8 @@ * BATs */ +#include <asm/page.h> + /* Block size masks */ #define BL_128K 0x000 #define BL_256K 0x001 @@ -34,14 +37,20 @@ #define BAT_PHYS_ADDR(x) ((u32)((x & 0x00000000fffe0000ULL) | \ ((x & 0x0000000e00000000ULL) >> 24) | \ ((x & 0x0000000100000000ULL) >> 30))) +#define PHYS_BAT_ADDR(x) (((u64)(x) & 0x00000000fffe0000ULL) | \ + (((u64)(x) << 24) & 0x0000000e00000000ULL) | \ + (((u64)(x) << 30) & 0x0000000100000000ULL)) #else #define BAT_PHYS_ADDR(x) (x) +#define PHYS_BAT_ADDR(x) ((x) & 0xfffe0000) #endif struct ppc_bat { u32 batu; u32 batl; }; + +typedef pte_t *pgtable_t; #endif /* !__ASSEMBLY__ */ /* @@ -83,6 +92,12 @@ typedef struct { unsigned long vdso_base; } mm_context_t; +/* patch sites */ +extern s32 patch__hash_page_A0, patch__hash_page_A1, patch__hash_page_A2; +extern s32 patch__hash_page_B, patch__hash_page_C; +extern s32 patch__flush_hash_A0, patch__flush_hash_A1, patch__flush_hash_A2; +extern s32 patch__flush_hash_B; + #endif /* !__ASSEMBLY__ */ /* We happily ignore the smaller BATs on 601, we don't actually use diff --git a/arch/powerpc/include/asm/book3s/32/pgalloc.h b/arch/powerpc/include/asm/book3s/32/pgalloc.h index 82e44b1a00ae..b5b955eb2fb7 100644 --- a/arch/powerpc/include/asm/book3s/32/pgalloc.h +++ b/arch/powerpc/include/asm/book3s/32/pgalloc.h @@ -25,10 +25,7 @@ extern void __bad_pte(pmd_t *pmd); extern struct kmem_cache *pgtable_cache[]; -#define PGT_CACHE(shift) ({ \ - BUG_ON(!(shift)); \ - pgtable_cache[(shift) - 1]; \ - }) +#define PGT_CACHE(shift) pgtable_cache[shift] static inline pgd_t *pgd_alloc(struct mm_struct *mm) { @@ -50,8 +47,6 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) #define __pmd_free_tlb(tlb,x,a) do { } while (0) /* #define pgd_populate(mm, pmd, pte) BUG() */ -#ifndef CONFIG_BOOKE - static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *pte) { @@ -61,46 +56,31 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t pte_page) { - *pmdp = __pmd((page_to_pfn(pte_page) << PAGE_SHIFT) | _PMD_PRESENT); -} - -#define pmd_pgtable(pmd) pmd_page(pmd) -#else - -static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, - pte_t *pte) -{ - *pmdp = __pmd((unsigned long)pte | _PMD_PRESENT); -} - -static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, - pgtable_t pte_page) -{ - *pmdp = __pmd((unsigned long)lowmem_page_address(pte_page) | _PMD_PRESENT); + *pmdp = __pmd(__pa(pte_page) | _PMD_PRESENT); } -#define pmd_pgtable(pmd) pmd_page(pmd) -#endif +#define pmd_pgtable(pmd) ((pgtable_t)pmd_page_vaddr(pmd)) extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr); extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr); +void pte_frag_destroy(void *pte_frag); +pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel); +void pte_fragment_free(unsigned long *table, int kernel); static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) { - free_page((unsigned long)pte); + pte_fragment_free((unsigned long *)pte, 1); } static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) { - pgtable_page_dtor(ptepage); - __free_page(ptepage); + pte_fragment_free((unsigned long *)ptepage, 0); } static inline void pgtable_free(void *table, unsigned index_size) { if (!index_size) { - pgtable_page_dtor(virt_to_page(table)); - free_page((unsigned long)table); + pte_fragment_free((unsigned long *)table, 0); } else { BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE); kmem_cache_free(PGT_CACHE(index_size), table); @@ -138,6 +118,6 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb, static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, unsigned long address) { - pgtable_free_tlb(tlb, page_address(table), 0); + pgtable_free_tlb(tlb, table, 0); } #endif /* _ASM_POWERPC_BOOK3S_32_PGALLOC_H */ diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h index c21d33704633..49d76adb9bc5 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h @@ -10,9 +10,9 @@ /* And here we include common definitions */ #define _PAGE_KERNEL_RO 0 -#define _PAGE_KERNEL_ROX 0 +#define _PAGE_KERNEL_ROX (_PAGE_EXEC) #define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW) -#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW) +#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC) #define _PAGE_HPTEFLAGS _PAGE_HASHPTE @@ -66,11 +66,11 @@ static inline bool pte_user(pte_t pte) */ #define PAGE_NONE __pgprot(_PAGE_BASE) #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) -#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) +#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC) #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) -#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER) +#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) -#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER) +#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) /* Permission masks used for kernel mappings */ #define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW) @@ -318,7 +318,7 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma, int psize) { unsigned long set = pte_val(entry) & - (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW); + (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); pte_update(ptep, 0, set); @@ -328,24 +328,10 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma, #define __HAVE_ARCH_PTE_SAME #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0) -/* - * Note that on Book E processors, the pmd contains the kernel virtual - * (lowmem) address of the pte page. The physical address is less useful - * because everything runs with translation enabled (even the TLB miss - * handler). On everything else the pmd contains the physical address - * of the pte page. -- paulus - */ -#ifndef CONFIG_BOOKE #define pmd_page_vaddr(pmd) \ - ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) + ((unsigned long)__va(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1))) #define pmd_page(pmd) \ pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) -#else -#define pmd_page_vaddr(pmd) \ - ((unsigned long) (pmd_val(pmd) & PAGE_MASK)) -#define pmd_page(pmd) \ - pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT)) -#endif /* to find an entry in a kernel page-table-directory */ #define pgd_offset_k(address) pgd_offset(&init_mm, address) @@ -360,7 +346,8 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma, #define pte_offset_kernel(dir, addr) \ ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr)) #define pte_offset_map(dir, addr) \ - ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr)) + ((pte_t *)(kmap_atomic(pmd_page(*(dir))) + \ + (pmd_page_vaddr(*(dir)) & ~PAGE_MASK)) + pte_index(addr)) #define pte_unmap(pte) kunmap_atomic(pte) /* @@ -384,7 +371,7 @@ static inline int pte_dirty(pte_t pte) { return !!(pte_val(pte) & _PAGE_DIRTY); static inline int pte_young(pte_t pte) { return !!(pte_val(pte) & _PAGE_ACCESSED); } static inline int pte_special(pte_t pte) { return !!(pte_val(pte) & _PAGE_SPECIAL); } static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; } -static inline bool pte_exec(pte_t pte) { return true; } +static inline bool pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; } static inline int pte_present(pte_t pte) { @@ -451,7 +438,7 @@ static inline pte_t pte_wrprotect(pte_t pte) static inline pte_t pte_exprotect(pte_t pte) { - return pte; + return __pte(pte_val(pte) & ~_PAGE_EXEC); } static inline pte_t pte_mkclean(pte_t pte) @@ -466,7 +453,7 @@ static inline pte_t pte_mkold(pte_t pte) static inline pte_t pte_mkexec(pte_t pte) { - return pte; + return __pte(pte_val(pte) | _PAGE_EXEC); } static inline pte_t pte_mkpte(pte_t pte) @@ -524,7 +511,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte, int percpu) { -#if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT) +#if defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT) /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the * helper pte_update() which does an atomic update. We need to do that * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a @@ -537,7 +524,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, else pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte)); -#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) +#elif defined(CONFIG_PTE_64BIT) /* Second case is 32-bit with 64-bit PTE. In this case, we * can just store as long as we do the two halves in the right order * with a barrier in between. This is possible because we take care, @@ -560,7 +547,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) : "r" (pte) : "memory"); -#elif defined(CONFIG_PPC_STD_MMU_32) +#else /* Third case is 32-bit hash table in UP mode, we need to preserve * the _PAGE_HASHPTE bit since we may not have invalidated the previous * translation in the hash yet (done in a subsequent flush_tlb_xxx()) @@ -568,9 +555,6 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, */ *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) | (pte_val(pte) & ~_PAGE_HASHPTE)); - -#else -#error "Not supported " #endif } diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h index 15bc16b1dc9c..cf5ba5254299 100644 --- a/arch/powerpc/include/asm/book3s/64/hash-4k.h +++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h @@ -1,11 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_POWERPC_BOOK3S_64_HASH_4K_H #define _ASM_POWERPC_BOOK3S_64_HASH_4K_H -/* - * Entries per page directory level. The PTE level must use a 64b record - * for each page table entry. The PMD and PGD level use a 32b record for - * each entry by assuming that each entry is page aligned. - */ + #define H_PTE_INDEX_SIZE 9 #define H_PMD_INDEX_SIZE 7 #define H_PUD_INDEX_SIZE 9 diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h index 6328857f259f..1ceee000c18d 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu.h +++ b/arch/powerpc/include/asm/book3s/64/mmu.h @@ -2,6 +2,8 @@ #ifndef _ASM_POWERPC_BOOK3S_64_MMU_H_ #define _ASM_POWERPC_BOOK3S_64_MMU_H_ +#include <asm/page.h> + #ifndef __ASSEMBLY__ /* * Page size definition @@ -24,6 +26,13 @@ struct mmu_psize_def { }; extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; +/* + * For BOOK3s 64 with 4k and 64K linux page size + * we want to use pointers, because the page table + * actually store pfn + */ +typedef pte_t *pgtable_t; + #endif /* __ASSEMBLY__ */ /* 64-bit classic hash table MMU */ diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h index 391ed2c3b697..4aba625389c4 100644 --- a/arch/powerpc/include/asm/book3s/64/pgalloc.h +++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h @@ -37,10 +37,7 @@ extern struct vmemmap_backing *vmemmap_list; #define MAX_PGTABLE_INDEX_SIZE 0xf extern struct kmem_cache *pgtable_cache[]; -#define PGT_CACHE(shift) ({ \ - BUG_ON(!(shift)); \ - pgtable_cache[(shift) - 1]; \ - }) +#define PGT_CACHE(shift) pgtable_cache[shift] extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int); extern pmd_t *pmd_fragment_alloc(struct mm_struct *, unsigned long); @@ -50,6 +47,7 @@ extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift); #ifdef CONFIG_SMP extern void __tlb_remove_table(void *_table); #endif +void pte_frag_destroy(void *pte_frag); static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm) { diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 6c99e846a8c9..2e6ada28da64 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -1304,7 +1304,7 @@ static inline int pgd_devmap(pgd_t pgd) } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ -static inline const int pud_pfn(pud_t pud) +static inline int pud_pfn(pud_t pud) { /* * Currently all calls to pud_pfn() are gated around a pud_devmap() diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h index 66298461b640..40ea5b3781c6 100644 --- a/arch/powerpc/include/asm/cache.h +++ b/arch/powerpc/include/asm/cache.h @@ -71,7 +71,7 @@ extern struct ppc64_caches ppc64_caches; #else #define __read_mostly __attribute__((__section__(".data..read_mostly"))) -#ifdef CONFIG_6xx +#ifdef CONFIG_PPC_BOOK3S_32 extern long _get_L2CR(void); extern long _get_L3CR(void); extern void _set_L2CR(unsigned long); diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h index 3d5acd2b113a..2074b40f3fb5 100644 --- a/arch/powerpc/include/asm/code-patching.h +++ b/arch/powerpc/include/asm/code-patching.h @@ -33,14 +33,33 @@ unsigned int create_cond_branch(const unsigned int *addr, int patch_branch(unsigned int *addr, unsigned long target, int flags); int patch_instruction(unsigned int *addr, unsigned int instr); int raw_patch_instruction(unsigned int *addr, unsigned int instr); -int patch_instruction_site(s32 *addr, unsigned int instr); -int patch_branch_site(s32 *site, unsigned long target, int flags); static inline unsigned long patch_site_addr(s32 *site) { return (unsigned long)site + *site; } +static inline int patch_instruction_site(s32 *site, unsigned int instr) +{ + return patch_instruction((unsigned int *)patch_site_addr(site), instr); +} + +static inline int patch_branch_site(s32 *site, unsigned long target, int flags) +{ + return patch_branch((unsigned int *)patch_site_addr(site), target, flags); +} + +static inline int modify_instruction(unsigned int *addr, unsigned int clr, + unsigned int set) +{ + return patch_instruction(addr, (*addr & ~clr) | set); +} + +static inline int modify_instruction_site(s32 *site, unsigned int clr, unsigned int set) +{ + return modify_instruction((unsigned int *)patch_site_addr(site), clr, set); +} + int instr_is_relative_branch(unsigned int instr); int instr_is_relative_link_branch(unsigned int instr); int instr_is_branch_to_addr(const unsigned int *instr, unsigned long addr); diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index 29f49a35d6ee..d05f0c28e515 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -44,6 +44,7 @@ extern int machine_check_e500(struct pt_regs *regs); extern int machine_check_e200(struct pt_regs *regs); extern int machine_check_47x(struct pt_regs *regs); int machine_check_8xx(struct pt_regs *regs); +int machine_check_83xx(struct pt_regs *regs); extern void cpu_down_flush_e500v2(void); extern void cpu_down_flush_e500mc(void); @@ -296,7 +297,7 @@ static inline void cpu_feature_keys_init(void) { } #define CPU_FTRS_PPC601 (CPU_FTR_COMMON | CPU_FTR_601 | \ CPU_FTR_COHERENT_ICACHE | CPU_FTR_UNIFIED_ID_CACHE | CPU_FTR_USE_RTC) #define CPU_FTRS_603 (CPU_FTR_COMMON | CPU_FTR_MAYBE_CAN_DOZE | \ - CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE) + CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE | CPU_FTR_NOEXECUTE) #define CPU_FTRS_604 (CPU_FTR_COMMON | CPU_FTR_PPC_LE) #define CPU_FTRS_740_NOTAU (CPU_FTR_COMMON | \ CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_L2CR | \ @@ -367,15 +368,15 @@ static inline void cpu_feature_keys_init(void) { } CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \ CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX) -#define CPU_FTRS_82XX (CPU_FTR_COMMON | CPU_FTR_MAYBE_CAN_DOZE) +#define CPU_FTRS_82XX (CPU_FTR_COMMON | CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_NOEXECUTE) #define CPU_FTRS_G2_LE (CPU_FTR_COMMON | CPU_FTR_MAYBE_CAN_DOZE | \ CPU_FTR_MAYBE_CAN_NAP) #define CPU_FTRS_E300 (CPU_FTR_MAYBE_CAN_DOZE | \ CPU_FTR_MAYBE_CAN_NAP | \ - CPU_FTR_COMMON) + CPU_FTR_COMMON | CPU_FTR_NOEXECUTE) #define CPU_FTRS_E300C2 (CPU_FTR_MAYBE_CAN_DOZE | \ CPU_FTR_MAYBE_CAN_NAP | \ - CPU_FTR_COMMON | CPU_FTR_FPU_UNAVAILABLE) + CPU_FTR_COMMON | CPU_FTR_FPU_UNAVAILABLE | CPU_FTR_NOEXECUTE) #define CPU_FTRS_CLASSIC32 (CPU_FTR_COMMON) #define CPU_FTRS_8XX (CPU_FTR_NOEXECUTE) #define CPU_FTRS_40X (CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE) diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index 8fa394520af6..ebf66809f2d3 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h @@ -39,9 +39,6 @@ extern int dma_nommu_mmap_coherent(struct device *dev, * to ensure it is consistent. */ struct device; -extern void *__dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *handle, gfp_t gfp); -extern void __dma_free_coherent(size_t size, void *vaddr); extern void __dma_sync(void *vaddr, size_t size, int direction); extern void __dma_sync_page(struct page *page, unsigned long offset, size_t size, int direction); @@ -52,8 +49,6 @@ extern unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr); * Cache coherent cores. */ -#define __dma_alloc_coherent(dev, gfp, size, handle) NULL -#define __dma_free_coherent(size, addr) ((void)0) #define __dma_sync(addr, size, rw) ((void)0) #define __dma_sync_page(pg, off, sz, rw) ((void)0) @@ -108,11 +103,8 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off) } #define HAVE_ARCH_DMA_SET_MASK 1 -extern int dma_set_mask(struct device *dev, u64 dma_mask); extern u64 __dma_get_required_mask(struct device *dev); -#define ARCH_HAS_DMA_MMAP_COHERENT - #endif /* __KERNEL__ */ #endif /* _ASM_DMA_MAPPING_H */ diff --git a/arch/powerpc/include/asm/fadump.h b/arch/powerpc/include/asm/fadump.h index 1e7a33592e29..188776befaf9 100644 --- a/arch/powerpc/include/asm/fadump.h +++ b/arch/powerpc/include/asm/fadump.h @@ -48,6 +48,10 @@ #define memblock_num_regions(memblock_type) (memblock.memblock_type.cnt) +/* Alignement per CMA requirement. */ +#define FADUMP_CMA_ALIGNMENT (PAGE_SIZE << \ + max_t(unsigned long, MAX_ORDER - 1, pageblock_order)) + /* Firmware provided dump sections */ #define FADUMP_CPU_STATE_DATA 0x0001 #define FADUMP_HPTE_REGION 0x0002 @@ -141,6 +145,7 @@ struct fw_dump { unsigned long fadump_supported:1; unsigned long dump_active:1; unsigned long dump_registered:1; + unsigned long nocma:1; }; /* @@ -200,7 +205,7 @@ struct fad_crash_memory_ranges { unsigned long long size; }; -extern int is_fadump_boot_memory_area(u64 addr, ulong size); +extern int is_fadump_memory_area(u64 addr, ulong size); extern int early_init_dt_scan_fw_dump(unsigned long node, const char *uname, int depth, void *data); extern int fadump_reserve_mem(void); diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h index 33b6f9c892c8..40a6c9261a6b 100644 --- a/arch/powerpc/include/asm/feature-fixups.h +++ b/arch/powerpc/include/asm/feature-fixups.h @@ -221,6 +221,17 @@ label##3: \ FTR_ENTRY_OFFSET 953b-954b; \ .popsection; +#define START_BTB_FLUSH_SECTION \ +955: \ + +#define END_BTB_FLUSH_SECTION \ +956: \ + .pushsection __btb_flush_fixup,"a"; \ + .align 2; \ +957: \ + FTR_ENTRY_OFFSET 955b-957b; \ + FTR_ENTRY_OFFSET 956b-957b; \ + .popsection; #ifndef __ASSEMBLY__ #include <linux/types.h> @@ -230,6 +241,7 @@ extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup; extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup; extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup; extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup; +extern long __start__btb_flush_fixup, __stop__btb_flush_fixup; void apply_feature_fixups(void); void setup_feature_keys(void); diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index 383da1ab9e23..8d40565ad0c3 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h @@ -5,8 +5,6 @@ #ifdef CONFIG_HUGETLB_PAGE #include <asm/page.h> -extern struct kmem_cache *hugepte_cache; - #ifdef CONFIG_PPC_BOOK3S_64 #include <asm/book3s/64/hugetlb.h> @@ -76,7 +74,9 @@ static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr, unsigned long idx = 0; pte_t *dir = hugepd_page(hpd); -#ifndef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_8xx + idx = (addr & ((1UL << pdshift) - 1)) >> PAGE_SHIFT; +#elif !defined(CONFIG_PPC_FSL_BOOK3E) idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(hpd); #endif @@ -129,15 +129,14 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { - pte_t pte; - pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); + huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); flush_hugetlb_page(vma, addr); } #define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS -extern int huge_ptep_set_access_flags(struct vm_area_struct *vma, - unsigned long addr, pte_t *ptep, - pte_t pte, int dirty); +int huge_ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + pte_t pte, int dirty); static inline void arch_clear_hugepage_flags(struct page *page) { diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index 33a4fc891947..463c63a9fcf1 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -335,6 +335,7 @@ #define H_SET_PARTITION_TABLE 0xF800 #define H_ENTER_NESTED 0xF804 #define H_TLB_INVALIDATE 0xF808 +#define H_COPY_TOFROM_GUEST 0xF80C /* Values for 2nd argument to H_SET_MODE */ #define H_SET_MODE_RESOURCE_SET_CIABR 1 diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h index e746becd9d6f..7f19fbd3ba55 100644 --- a/arch/powerpc/include/asm/io.h +++ b/arch/powerpc/include/asm/io.h @@ -29,12 +29,14 @@ extern struct pci_dev *isa_bridge_pcidev; #include <linux/device.h> #include <linux/compiler.h> +#include <linux/mm.h> #include <asm/page.h> #include <asm/byteorder.h> #include <asm/synch.h> #include <asm/delay.h> #include <asm/mmu.h> #include <asm/ppc_asm.h> +#include <asm/pgtable.h> #ifdef CONFIG_PPC64 #include <asm/paca.h> @@ -804,6 +806,8 @@ extern void __iounmap_at(void *ea, unsigned long size); */ static inline unsigned long virt_to_phys(volatile void * address) { + WARN_ON(IS_ENABLED(CONFIG_DEBUG_VIRTUAL) && !virt_addr_valid(address)); + return __pa((unsigned long)address); } @@ -827,7 +831,14 @@ static inline void * phys_to_virt(unsigned long address) /* * Change "struct page" to physical address. */ -#define page_to_phys(page) ((phys_addr_t)page_to_pfn(page) << PAGE_SHIFT) +static inline phys_addr_t page_to_phys(struct page *page) +{ + unsigned long pfn = page_to_pfn(page); + + WARN_ON(IS_ENABLED(CONFIG_DEBUG_VIRTUAL) && !pfn_valid(pfn)); + + return PFN_PHYS(pfn); +} /* * 32 bits still uses virt_to_bus() for it's implementation of DMA diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h index 35db0cbc9222..17524d222a7b 100644 --- a/arch/powerpc/include/asm/iommu.h +++ b/arch/powerpc/include/asm/iommu.h @@ -143,8 +143,6 @@ struct scatterlist; #ifdef CONFIG_PPC64 -#define IOMMU_MAPPING_ERROR (~(dma_addr_t)0x0) - static inline void set_iommu_table_base(struct device *dev, struct iommu_table *base) { @@ -215,11 +213,12 @@ struct iommu_table_group { extern void iommu_register_group(struct iommu_table_group *table_group, int pci_domain_number, unsigned long pe_num); -extern int iommu_add_device(struct device *dev); +extern int iommu_add_device(struct iommu_table_group *table_group, + struct device *dev); extern void iommu_del_device(struct device *dev); -extern int __init tce_iommu_bus_notifier_init(void); -extern long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry, - unsigned long *hpa, enum dma_data_direction *direction); +extern long iommu_tce_xchg(struct mm_struct *mm, struct iommu_table *tbl, + unsigned long entry, unsigned long *hpa, + enum dma_data_direction *direction); #else static inline void iommu_register_group(struct iommu_table_group *table_group, int pci_domain_number, @@ -227,7 +226,8 @@ static inline void iommu_register_group(struct iommu_table_group *table_group, { } -static inline int iommu_add_device(struct device *dev) +static inline int iommu_add_device(struct iommu_table_group *table_group, + struct device *dev) { return 0; } @@ -235,15 +235,8 @@ static inline int iommu_add_device(struct device *dev) static inline void iommu_del_device(struct device *dev) { } - -static inline int __init tce_iommu_bus_notifier_init(void) -{ - return 0; -} #endif /* !CONFIG_IOMMU_API */ -int dma_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr); - #else static inline void *get_iommu_table_base(struct device *dev) diff --git a/arch/powerpc/include/asm/ipic.h b/arch/powerpc/include/asm/ipic.h index fb59829983b8..3dbd47f2bffe 100644 --- a/arch/powerpc/include/asm/ipic.h +++ b/arch/powerpc/include/asm/ipic.h @@ -69,7 +69,6 @@ enum ipic_mcp_irq { IPIC_MCP_MU = 7, }; -extern int ipic_set_priority(unsigned int irq, unsigned int priority); extern void ipic_set_highest_priority(unsigned int irq); extern void ipic_set_default_priority(void); extern void ipic_enable_mcp(enum ipic_mcp_irq mcp_irq); diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 09f8e9ba69bc..38f1b879f569 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -188,6 +188,13 @@ extern int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hc); extern int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, unsigned long ea, unsigned long dsisr); +extern unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid, + gva_t eaddr, void *to, void *from, + unsigned long n); +extern long kvmhv_copy_from_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, + void *to, unsigned long n); +extern long kvmhv_copy_to_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, + void *from, unsigned long n); extern int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *gpte, u64 root, u64 *pte_ret_p); @@ -196,8 +203,11 @@ extern int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr, int table_index, u64 *pte_ret_p); extern int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *gpte, bool data, bool iswrite); +extern void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr, + unsigned int pshift, unsigned int lpid); extern void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa, - unsigned int shift, struct kvm_memory_slot *memslot, + unsigned int shift, + const struct kvm_memory_slot *memslot, unsigned int lpid); extern bool kvmppc_hv_handle_set_rc(struct kvm *kvm, pgd_t *pgtable, bool writing, unsigned long gpa, @@ -215,16 +225,14 @@ extern int kvmppc_radix_init(void); extern void kvmppc_radix_exit(void); extern int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long gfn); -extern void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, - unsigned long gpa, unsigned int shift, - struct kvm_memory_slot *memslot, - unsigned int lpid); extern int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long gfn); extern int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long gfn); extern long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long *map); +extern void kvmppc_radix_flush_memslot(struct kvm *kvm, + const struct kvm_memory_slot *memslot); extern int kvmhv_get_rmmu_info(struct kvm *kvm, struct kvm_ppc_rmmu_info *info); /* XXX remove this export when load_last_inst() is generic */ @@ -242,7 +250,7 @@ extern kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing, bool *writable); extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev, unsigned long *rmap, long pte_index, int realmode); -extern void kvmppc_update_dirty_map(struct kvm_memory_slot *memslot, +extern void kvmppc_update_dirty_map(const struct kvm_memory_slot *memslot, unsigned long gfn, unsigned long psize); extern void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep, unsigned long pte_index); @@ -298,6 +306,7 @@ long kvmhv_nested_init(void); void kvmhv_nested_exit(void); void kvmhv_vm_nested_init(struct kvm *kvm); long kvmhv_set_partition_table(struct kvm_vcpu *vcpu); +long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu); void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1); void kvmhv_release_all_nested(struct kvm *kvm); long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu); @@ -307,7 +316,7 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu, void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr); void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu, struct hv_guest_state *hr); -long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu); +long int kvmhv_nested_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu); void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac); diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index 6d298145d564..21b1ed5df888 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h @@ -55,6 +55,7 @@ struct kvm_nested_guest { cpumask_t need_tlb_flush; cpumask_t cpu_in_guest; short prev_cpu[NR_CPUS]; + u8 radix; /* is this nested guest radix */ }; /* @@ -150,6 +151,18 @@ static inline bool kvm_is_radix(struct kvm *kvm) return kvm->arch.radix; } +static inline bool kvmhv_vcpu_is_radix(struct kvm_vcpu *vcpu) +{ + bool radix; + + if (vcpu->arch.nested) + radix = vcpu->arch.nested->radix; + else + radix = kvm_is_radix(vcpu->kvm); + + return radix; +} + #define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */ #endif @@ -624,8 +637,11 @@ extern int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, unsigned long *rmapp, struct rmap_nested **n_rmap); extern void kvmhv_insert_nest_rmap(struct kvm *kvm, unsigned long *rmapp, struct rmap_nested **n_rmap); +extern void kvmhv_update_nest_rmap_rc_list(struct kvm *kvm, unsigned long *rmapp, + unsigned long clr, unsigned long set, + unsigned long hpa, unsigned long nbytes); extern void kvmhv_remove_nest_rmap_range(struct kvm *kvm, - struct kvm_memory_slot *memslot, + const struct kvm_memory_slot *memslot, unsigned long gpa, unsigned long hpa, unsigned long nbytes); diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index fac6f631ed29..0f98f00da2ea 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -72,7 +72,7 @@ extern int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end); extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); -extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); +extern int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); #define HPTEG_CACHE_NUM (1 << 15) #define HPTEG_HASH_BITS_PTE 13 @@ -793,6 +793,7 @@ struct kvm_vcpu_arch { /* For support of nested guests */ struct kvm_nested_guest *nested; u32 nested_vcpu_id; + gpa_t nested_io_gpr; #endif #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING @@ -827,6 +828,8 @@ struct kvm_vcpu_arch { #define KVM_MMIO_REG_FQPR 0x00c0 #define KVM_MMIO_REG_VSX 0x0100 #define KVM_MMIO_REG_VMX 0x0180 +#define KVM_MMIO_REG_NESTED_GPR 0xffc0 + #define __KVM_HAVE_ARCH_WQP #define __KVM_HAVE_CREATE_DEVICE diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 9b89b1918dfc..eb0d79f0ca45 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -224,7 +224,8 @@ extern int kvmppc_core_prepare_memory_region(struct kvm *kvm, extern void kvmppc_core_commit_memory_region(struct kvm *kvm, const struct kvm_userspace_memory_region *mem, const struct kvm_memory_slot *old, - const struct kvm_memory_slot *new); + const struct kvm_memory_slot *new, + enum kvm_mr_change change); extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info); extern void kvmppc_core_flush_memslot(struct kvm *kvm, @@ -294,7 +295,8 @@ struct kvmppc_ops { void (*commit_memory_region)(struct kvm *kvm, const struct kvm_userspace_memory_region *mem, const struct kvm_memory_slot *old, - const struct kvm_memory_slot *new); + const struct kvm_memory_slot *new, + enum kvm_mr_change change); int (*unmap_hva_range)(struct kvm *kvm, unsigned long start, unsigned long end); int (*age_hva)(struct kvm *kvm, unsigned long start, unsigned long end); @@ -326,6 +328,10 @@ struct kvmppc_ops { unsigned long flags); void (*giveup_ext)(struct kvm_vcpu *vcpu, ulong msr); int (*enable_nested)(struct kvm *kvm); + int (*load_from_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr, + int size); + int (*store_to_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr, + int size); }; extern struct kvmppc_ops *kvmppc_hv_ops; diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index eb20eb3b8fb0..25607604a7a5 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -48,7 +48,7 @@ #define MMU_FTR_USE_HIGH_BATS ASM_CONST(0x00010000) /* Enable >32-bit physical addresses on 32-bit processor, only used - * by CONFIG_6xx currently as BookE supports that from day 1 + * by CONFIG_PPC_BOOK3S_32 currently as BookE supports that from day 1 */ #define MMU_FTR_BIG_PHYS ASM_CONST(0x00020000) @@ -131,16 +131,37 @@ DECLARE_PER_CPU(int, next_tlbcam_idx); #endif enum { - MMU_FTRS_POSSIBLE = MMU_FTR_HPTE_TABLE | MMU_FTR_TYPE_8xx | - MMU_FTR_TYPE_40x | MMU_FTR_TYPE_44x | MMU_FTR_TYPE_FSL_E | - MMU_FTR_TYPE_47x | MMU_FTR_USE_HIGH_BATS | MMU_FTR_BIG_PHYS | - MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_USE_TLBILX | - MMU_FTR_LOCK_BCAST_INVAL | MMU_FTR_NEED_DTLB_SW_LRU | + MMU_FTRS_POSSIBLE = +#ifdef CONFIG_PPC_BOOK3S + MMU_FTR_HPTE_TABLE | +#endif +#ifdef CONFIG_PPC_8xx + MMU_FTR_TYPE_8xx | +#endif +#ifdef CONFIG_40x + MMU_FTR_TYPE_40x | +#endif +#ifdef CONFIG_44x + MMU_FTR_TYPE_44x | +#endif +#if defined(CONFIG_E200) || defined(CONFIG_E500) + MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | MMU_FTR_USE_TLBILX | +#endif +#ifdef CONFIG_PPC_47x + MMU_FTR_TYPE_47x | MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL | +#endif +#ifdef CONFIG_PPC_BOOK3S_32 + MMU_FTR_USE_HIGH_BATS | MMU_FTR_NEED_DTLB_SW_LRU | +#endif +#ifdef CONFIG_PPC_BOOK3E_64 MMU_FTR_USE_TLBRSRV | MMU_FTR_USE_PAIRED_MAS | +#endif +#ifdef CONFIG_PPC_BOOK3S_64 MMU_FTR_NO_SLBIE_B | MMU_FTR_16M_PAGE | MMU_FTR_TLBIEL | MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_CI_LARGE_PAGE | MMU_FTR_1T_SEGMENT | MMU_FTR_TLBIE_CROP_VA | MMU_FTR_KERNEL_RO | MMU_FTR_68_BIT_VA | +#endif #ifdef CONFIG_PPC_RADIX_MMU MMU_FTR_TYPE_RADIX | #endif @@ -338,21 +359,11 @@ static inline void mmu_early_init_devtree(void) { } #endif /* __ASSEMBLY__ */ #endif -#if defined(CONFIG_PPC_STD_MMU_32) +#if defined(CONFIG_PPC_BOOK3S_32) /* 32-bit classic hash table MMU */ #include <asm/book3s/32/mmu-hash.h> -#elif defined(CONFIG_40x) -/* 40x-style software loaded TLB */ -# include <asm/mmu-40x.h> -#elif defined(CONFIG_44x) -/* 44x-style software loaded TLB */ -# include <asm/mmu-44x.h> -#elif defined(CONFIG_PPC_BOOK3E_MMU) -/* Freescale Book-E software loaded TLB or Book-3e (ISA 2.06+) MMU */ -# include <asm/mmu-book3e.h> -#elif defined (CONFIG_PPC_8xx) -/* Motorola/Freescale 8xx software loaded TLB */ -# include <asm/mmu-8xx.h> +#elif defined(CONFIG_PPC_MMU_NOHASH) +#include <asm/nohash/mmu.h> #endif #endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index 0381394a425b..6ee8195a2ffb 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -21,9 +21,12 @@ struct mm_iommu_table_group_mem_t; extern int isolate_lru_page(struct page *page); /* from internal.h */ extern bool mm_iommu_preregistered(struct mm_struct *mm); -extern long mm_iommu_get(struct mm_struct *mm, +extern long mm_iommu_new(struct mm_struct *mm, unsigned long ua, unsigned long entries, struct mm_iommu_table_group_mem_t **pmem); +extern long mm_iommu_newdev(struct mm_struct *mm, unsigned long ua, + unsigned long entries, unsigned long dev_hpa, + struct mm_iommu_table_group_mem_t **pmem); extern long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem); extern void mm_iommu_init(struct mm_struct *mm); @@ -32,15 +35,23 @@ extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm, unsigned long ua, unsigned long size); extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm( struct mm_struct *mm, unsigned long ua, unsigned long size); -extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm, +extern struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries); extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, unsigned long ua, unsigned int pageshift, unsigned long *hpa); extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem, unsigned long ua, unsigned int pageshift, unsigned long *hpa); extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua); +extern bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa, + unsigned int pageshift, unsigned long *size); extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem); extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem); +#else +static inline bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa, + unsigned int pageshift, unsigned long *size) +{ + return false; +} #endif extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm); extern void set_context(unsigned long id, pgd_t *pgd); @@ -217,13 +228,7 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, #endif } -static inline int arch_dup_mmap(struct mm_struct *oldmm, - struct mm_struct *mm) -{ - return 0; -} - -#ifndef CONFIG_PPC_BOOK3S_64 +#ifdef CONFIG_PPC_BOOK3E_64 static inline void arch_exit_mmap(struct mm_struct *mm) { } @@ -247,6 +252,7 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm, #ifdef CONFIG_PPC_MEM_KEYS bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write, bool execute, bool foreign); +void arch_dup_pkeys(struct mm_struct *oldmm, struct mm_struct *mm); #else /* CONFIG_PPC_MEM_KEYS */ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write, bool execute, bool foreign) @@ -259,6 +265,7 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, #define thread_pkey_regs_save(thread) #define thread_pkey_regs_restore(new_thread, old_thread) #define thread_pkey_regs_init(thread) +#define arch_dup_pkeys(oldmm, mm) static inline u64 pte_to_hpte_pkey_bits(u64 pteflags) { @@ -267,5 +274,12 @@ static inline u64 pte_to_hpte_pkey_bits(u64 pteflags) #endif /* CONFIG_PPC_MEM_KEYS */ +static inline int arch_dup_mmap(struct mm_struct *oldmm, + struct mm_struct *mm) +{ + arch_dup_pkeys(oldmm, mm); + return 0; +} + #endif /* __KERNEL__ */ #endif /* __ASM_POWERPC_MMU_CONTEXT_H */ diff --git a/arch/powerpc/include/asm/mmu-40x.h b/arch/powerpc/include/asm/nohash/32/mmu-40x.h index 74f4edb5916e..74f4edb5916e 100644 --- a/arch/powerpc/include/asm/mmu-40x.h +++ b/arch/powerpc/include/asm/nohash/32/mmu-40x.h diff --git a/arch/powerpc/include/asm/mmu-44x.h b/arch/powerpc/include/asm/nohash/32/mmu-44x.h index 295b3dbb2698..28aa3b339c5e 100644 --- a/arch/powerpc/include/asm/mmu-44x.h +++ b/arch/powerpc/include/asm/nohash/32/mmu-44x.h @@ -111,6 +111,9 @@ typedef struct { unsigned long vdso_base; } mm_context_t; +/* patch sites */ +extern s32 patch__tlb_44x_hwater_D, patch__tlb_44x_hwater_I; + #endif /* !__ASSEMBLY__ */ #ifndef CONFIG_PPC_EARLY_DEBUG_44x diff --git a/arch/powerpc/include/asm/mmu-8xx.h b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h index fa05aa566ece..b0f764c827c0 100644 --- a/arch/powerpc/include/asm/mmu-8xx.h +++ b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h @@ -190,6 +190,7 @@ typedef struct { struct slice_mask mask_8m; # endif #endif + void *pte_frag; } mm_context_t; #define PHYS_IMMR_BASE (mfspr(SPRN_IMMR) & 0xfff80000) @@ -244,6 +245,9 @@ extern s32 patch__itlbmiss_perf, patch__dtlbmiss_perf; #define mmu_virtual_psize MMU_PAGE_4K #elif defined(CONFIG_PPC_16K_PAGES) #define mmu_virtual_psize MMU_PAGE_16K +#define PTE_FRAG_NR 4 +#define PTE_FRAG_SIZE_SHIFT 12 +#define PTE_FRAG_SIZE (1UL << 12) #else #error "Unsupported PAGE_SIZE" #endif diff --git a/arch/powerpc/include/asm/nohash/32/mmu.h b/arch/powerpc/include/asm/nohash/32/mmu.h new file mode 100644 index 000000000000..7d94a36d57d2 --- /dev/null +++ b/arch/powerpc/include/asm/nohash/32/mmu.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_NOHASH_32_MMU_H_ +#define _ASM_POWERPC_NOHASH_32_MMU_H_ + +#include <asm/page.h> + +#if defined(CONFIG_40x) +/* 40x-style software loaded TLB */ +#include <asm/nohash/32/mmu-40x.h> +#elif defined(CONFIG_44x) +/* 44x-style software loaded TLB */ +#include <asm/nohash/32/mmu-44x.h> +#elif defined(CONFIG_PPC_BOOK3E_MMU) +/* Freescale Book-E software loaded TLB or Book-3e (ISA 2.06+) MMU */ +#include <asm/nohash/mmu-book3e.h> +#elif defined (CONFIG_PPC_8xx) +/* Motorola/Freescale 8xx software loaded TLB */ +#include <asm/nohash/32/mmu-8xx.h> +#endif + +#ifndef __ASSEMBLY__ +typedef pte_t *pgtable_t; +#endif + +#endif /* _ASM_POWERPC_NOHASH_32_MMU_H_ */ diff --git a/arch/powerpc/include/asm/nohash/32/pgalloc.h b/arch/powerpc/include/asm/nohash/32/pgalloc.h index 8825953c225b..17963951bdb0 100644 --- a/arch/powerpc/include/asm/nohash/32/pgalloc.h +++ b/arch/powerpc/include/asm/nohash/32/pgalloc.h @@ -25,10 +25,7 @@ extern void __bad_pte(pmd_t *pmd); extern struct kmem_cache *pgtable_cache[]; -#define PGT_CACHE(shift) ({ \ - BUG_ON(!(shift)); \ - pgtable_cache[(shift) - 1]; \ - }) +#define PGT_CACHE(shift) pgtable_cache[shift] static inline pgd_t *pgd_alloc(struct mm_struct *mm) { @@ -61,11 +58,10 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t pte_page) { - *pmdp = __pmd((page_to_pfn(pte_page) << PAGE_SHIFT) | _PMD_USER | - _PMD_PRESENT); + *pmdp = __pmd(__pa(pte_page) | _PMD_USER | _PMD_PRESENT); } -#define pmd_pgtable(pmd) pmd_page(pmd) +#define pmd_pgtable(pmd) ((pgtable_t)pmd_page_vaddr(pmd)) #else static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, @@ -77,31 +73,32 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t pte_page) { - *pmdp = __pmd((unsigned long)lowmem_page_address(pte_page) | _PMD_PRESENT); + *pmdp = __pmd((unsigned long)pte_page | _PMD_PRESENT); } -#define pmd_pgtable(pmd) pmd_page(pmd) +#define pmd_pgtable(pmd) ((pgtable_t)pmd_page_vaddr(pmd)) #endif extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr); extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr); +void pte_frag_destroy(void *pte_frag); +pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel); +void pte_fragment_free(unsigned long *table, int kernel); static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) { - free_page((unsigned long)pte); + pte_fragment_free((unsigned long *)pte, 1); } static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) { - pgtable_page_dtor(ptepage); - __free_page(ptepage); + pte_fragment_free((unsigned long *)ptepage, 0); } static inline void pgtable_free(void *table, unsigned index_size) { if (!index_size) { - pgtable_page_dtor(virt_to_page(table)); - free_page((unsigned long)table); + pte_fragment_free((unsigned long *)table, 0); } else { BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE); kmem_cache_free(PGT_CACHE(index_size), table); @@ -140,6 +137,6 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, unsigned long address) { tlb_flush_pgtable(tlb, address); - pgtable_free_tlb(tlb, page_address(table), 0); + pgtable_free_tlb(tlb, table, 0); } #endif /* _ASM_POWERPC_PGALLOC_32_H */ diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index 3ffb0ff5a038..bed433358260 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -232,7 +232,13 @@ static inline unsigned long pte_update(pte_t *p, : "cc" ); #else /* PTE_ATOMIC_UPDATES */ unsigned long old = pte_val(*p); - *p = __pte((old & ~clr) | set); + unsigned long new = (old & ~clr) | set; + +#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES) + p->pte = p->pte1 = p->pte2 = p->pte3 = new; +#else + *p = __pte(new); +#endif #endif /* !PTE_ATOMIC_UPDATES */ #ifdef CONFIG_44x @@ -333,12 +339,12 @@ static inline int pte_young(pte_t pte) */ #ifndef CONFIG_BOOKE #define pmd_page_vaddr(pmd) \ - ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) + ((unsigned long)__va(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1))) #define pmd_page(pmd) \ pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) #else #define pmd_page_vaddr(pmd) \ - ((unsigned long) (pmd_val(pmd) & PAGE_MASK)) + ((unsigned long)(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1))) #define pmd_page(pmd) \ pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT)) #endif @@ -357,7 +363,8 @@ static inline int pte_young(pte_t pte) (pmd_bad(*(dir)) ? NULL : (pte_t *)pmd_page_vaddr(*(dir)) + \ pte_index(addr)) #define pte_offset_map(dir, addr) \ - ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr)) + ((pte_t *)(kmap_atomic(pmd_page(*(dir))) + \ + (pmd_page_vaddr(*(dir)) & ~PAGE_MASK)) + pte_index(addr)) #define pte_unmap(pte) kunmap_atomic(pte) /* diff --git a/arch/powerpc/include/asm/nohash/32/pte-40x.h b/arch/powerpc/include/asm/nohash/32/pte-40x.h index 661f4599f2fc..12c6811e344b 100644 --- a/arch/powerpc/include/asm/nohash/32/pte-40x.h +++ b/arch/powerpc/include/asm/nohash/32/pte-40x.h @@ -33,7 +33,7 @@ * is cleared in the TLB miss handler before the TLB entry is loaded. * - All other bits of the PTE are loaded into TLBLO without * modification, leaving us only the bits 20, 21, 24, 25, 26, 30 for - * software PTE bits. We actually use use bits 21, 24, 25, and + * software PTE bits. We actually use bits 21, 24, 25, and * 30 respectively for the software bits: ACCESSED, DIRTY, RW, and * PRESENT. */ diff --git a/arch/powerpc/include/asm/nohash/32/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h index 6bfe041ef59d..c9e4b2d90f65 100644 --- a/arch/powerpc/include/asm/nohash/32/pte-8xx.h +++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h @@ -65,9 +65,6 @@ #define _PTE_NONE_MASK 0 -/* Until my rework is finished, 8xx still needs atomic PTE updates */ -#define PTE_ATOMIC_UPDATES 1 - #ifdef CONFIG_PPC_16K_PAGES #define _PAGE_PSIZE _PAGE_SPS #else diff --git a/arch/powerpc/include/asm/nohash/64/mmu.h b/arch/powerpc/include/asm/nohash/64/mmu.h new file mode 100644 index 000000000000..e6585480dfc4 --- /dev/null +++ b/arch/powerpc/include/asm/nohash/64/mmu.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_NOHASH_64_MMU_H_ +#define _ASM_POWERPC_NOHASH_64_MMU_H_ + +/* Freescale Book-E software loaded TLB or Book-3e (ISA 2.06+) MMU */ +#include <asm/nohash/mmu-book3e.h> + +#ifndef __ASSEMBLY__ +typedef struct page *pgtable_t; +#endif + +#endif /* _ASM_POWERPC_NOHASH_64_MMU_H_ */ diff --git a/arch/powerpc/include/asm/nohash/64/pgalloc.h b/arch/powerpc/include/asm/nohash/64/pgalloc.h index e2d62d033708..e95eb499a174 100644 --- a/arch/powerpc/include/asm/nohash/64/pgalloc.h +++ b/arch/powerpc/include/asm/nohash/64/pgalloc.h @@ -36,10 +36,7 @@ extern struct vmemmap_backing *vmemmap_list; #define MAX_PGTABLE_INDEX_SIZE 0xf extern struct kmem_cache *pgtable_cache[]; -#define PGT_CACHE(shift) ({ \ - BUG_ON(!(shift)); \ - pgtable_cache[(shift) - 1]; \ - }) +#define PGT_CACHE(shift) pgtable_cache[shift] static inline pgd_t *pgd_alloc(struct mm_struct *mm) { diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/nohash/mmu-book3e.h index e20072972e35..e20072972e35 100644 --- a/arch/powerpc/include/asm/mmu-book3e.h +++ b/arch/powerpc/include/asm/nohash/mmu-book3e.h diff --git a/arch/powerpc/include/asm/nohash/mmu.h b/arch/powerpc/include/asm/nohash/mmu.h new file mode 100644 index 000000000000..a037cb1efb57 --- /dev/null +++ b/arch/powerpc/include/asm/nohash/mmu.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_NOHASH_MMU_H_ +#define _ASM_POWERPC_NOHASH_MMU_H_ + +#ifdef CONFIG_PPC64 +#include <asm/nohash/64/mmu.h> +#else +#include <asm/nohash/32/mmu.h> +#endif + +#endif /* _ASM_POWERPC_NOHASH_MMU_H_ */ diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h index 70ff23974b59..1ca1c1864b32 100644 --- a/arch/powerpc/include/asm/nohash/pgtable.h +++ b/arch/powerpc/include/asm/nohash/pgtable.h @@ -209,7 +209,11 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, /* Anything else just stores the PTE normally. That covers all 64-bit * cases, and 32-bit non-hash with 32-bit PTEs. */ +#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES) + ptep->pte = ptep->pte1 = ptep->pte2 = ptep->pte3 = pte_val(pte); +#else *ptep = pte; +#endif /* * With hardware tablewalk, a sync is needed to ensure that diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h index ff3866473afe..a55b01c90bb1 100644 --- a/arch/powerpc/include/asm/opal.h +++ b/arch/powerpc/include/asm/opal.h @@ -347,6 +347,7 @@ extern int opal_async_comp_init(void); extern int opal_sensor_init(void); extern int opal_hmi_handler_init(void); extern int opal_event_init(void); +int opal_power_control_init(void); extern int opal_machine_check(struct pt_regs *regs); extern bool opal_mce_check_early_recovery(struct pt_regs *regs); diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index f6a1265face2..5c5ea2413413 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -289,7 +289,7 @@ static inline bool pfn_valid(unsigned long pfn) * page tables at arbitrary addresses, this breaks and will have to change. */ #ifdef CONFIG_PPC64 -#define PD_HUGE 0x8000000000000000 +#define PD_HUGE 0x8000000000000000UL #else #define PD_HUGE 0x80000000 #endif @@ -335,23 +335,11 @@ void arch_free_page(struct page *page, int order); #endif struct vm_area_struct; -#ifdef CONFIG_PPC_BOOK3S_64 -/* - * For BOOK3s 64 with 4k and 64K linux page size - * we want to use pointers, because the page table - * actually store pfn - */ -typedef pte_t *pgtable_t; -#else -#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC64) -typedef pte_t *pgtable_t; -#else -typedef struct page *pgtable_t; -#endif -#endif #include <asm-generic/memory_model.h> #endif /* __ASSEMBLY__ */ #include <asm/slice.h> +#define ARCH_ZONE_DMA_BITS 31 + #endif /* _ASM_POWERPC_PAGE_H */ diff --git a/arch/powerpc/include/asm/page_32.h b/arch/powerpc/include/asm/page_32.h index 5c378e9b78c8..683dfbc67ca8 100644 --- a/arch/powerpc/include/asm/page_32.h +++ b/arch/powerpc/include/asm/page_32.h @@ -22,7 +22,8 @@ #define PTE_FLAGS_OFFSET 0 #endif -#ifdef CONFIG_PPC_256K_PAGES +#if defined(CONFIG_PPC_256K_PAGES) || \ + (defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES)) #define PTE_SHIFT (PAGE_SHIFT - PTE_T_LOG2 - 2) /* 1/4 of a page */ #else #define PTE_SHIFT (PAGE_SHIFT - PTE_T_LOG2) /* full page */ diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h index 94d449031b18..aee4fcc24990 100644 --- a/arch/powerpc/include/asm/pci-bridge.h +++ b/arch/powerpc/include/asm/pci-bridge.h @@ -129,6 +129,7 @@ struct pci_controller { #endif /* CONFIG_PPC64 */ void *private_data; + struct npu *npu; }; /* These are used for config access before all the PCI probing diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h index 2af9ded80540..0c72f1897063 100644 --- a/arch/powerpc/include/asm/pci.h +++ b/arch/powerpc/include/asm/pci.h @@ -129,5 +129,9 @@ extern void pcibios_scan_phb(struct pci_controller *hose); extern struct pci_dev *pnv_pci_get_gpu_dev(struct pci_dev *npdev); extern struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index); +extern int pnv_npu2_init(struct pci_controller *hose); +extern int pnv_npu2_map_lpar_dev(struct pci_dev *gpdev, unsigned int lparid, + unsigned long msr); +extern int pnv_npu2_unmap_lpar_dev(struct pci_dev *gpdev); #endif /* __ASM_POWERPC_PCI_H */ diff --git a/arch/powerpc/include/asm/perf_event.h b/arch/powerpc/include/asm/perf_event.h index 16a49819da9a..35926cd6cd0b 100644 --- a/arch/powerpc/include/asm/perf_event.h +++ b/arch/powerpc/include/asm/perf_event.h @@ -39,4 +39,7 @@ (regs)->gpr[1] = current_stack_pointer(); \ asm volatile("mfmsr %0" : "=r" ((regs)->msr)); \ } while (0) + +/* To support perf_regs sier update */ +extern bool is_sier_available(void); #endif diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h index 67a8a9585d50..e60aeb46d6a0 100644 --- a/arch/powerpc/include/asm/perf_event_server.h +++ b/arch/powerpc/include/asm/perf_event_server.h @@ -41,6 +41,8 @@ struct power_pmu { void (*get_mem_data_src)(union perf_mem_data_src *dsrc, u32 flags, struct pt_regs *regs); void (*get_mem_weight)(u64 *weight); + unsigned long group_constraint_mask; + unsigned long group_constraint_val; u64 (*bhrb_filter_map)(u64 branch_sample_type); void (*config_bhrb)(u64 pmu_bhrb_filter); void (*disable_pmc)(unsigned int pmc, unsigned long mmcr[]); diff --git a/arch/powerpc/include/asm/pgtable-types.h b/arch/powerpc/include/asm/pgtable-types.h index eccb30b38b47..3b0edf041b2e 100644 --- a/arch/powerpc/include/asm/pgtable-types.h +++ b/arch/powerpc/include/asm/pgtable-types.h @@ -3,7 +3,11 @@ #define _ASM_POWERPC_PGTABLE_TYPES_H /* PTE level */ +#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES) +typedef struct { pte_basic_t pte, pte1, pte2, pte3; } pte_t; +#else typedef struct { pte_basic_t pte; } pte_t; +#endif #define __pte(x) ((pte_t) { (x) }) static inline pte_basic_t pte_val(pte_t x) { diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index 9679b7519a35..dad1d27e196d 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -66,7 +66,6 @@ extern unsigned long empty_zero_page[]; extern pgd_t swapper_pg_dir[]; -void limit_zone_pfn(enum zone_type zone, unsigned long max_pfn); int dma_pfn_limit_to_zone(u64 pfn_limit); extern void paging_init(void); @@ -101,7 +100,7 @@ extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, /* can we use this in kvm */ unsigned long vmalloc_to_phys(void *vmalloc_addr); -void pgtable_cache_add(unsigned shift, void (*ctor)(void *)); +void pgtable_cache_add(unsigned int shift); void pgtable_cache_init(void); #if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_PPC32) @@ -110,6 +109,35 @@ void mark_initmem_nx(void); static inline void mark_initmem_nx(void) { } #endif +/* + * When used, PTE_FRAG_NR is defined in subarch pgtable.h + * so we are sure it is included when arriving here. + */ +#ifdef PTE_FRAG_NR +static inline void *pte_frag_get(mm_context_t *ctx) +{ + return ctx->pte_frag; +} + +static inline void pte_frag_set(mm_context_t *ctx, void *p) +{ + ctx->pte_frag = p; +} +#else +#define PTE_FRAG_NR 1 +#define PTE_FRAG_SIZE_SHIFT PAGE_SHIFT +#define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT) + +static inline void *pte_frag_get(mm_context_t *ctx) +{ + return NULL; +} + +static inline void pte_frag_set(mm_context_t *ctx, void *p) +{ +} +#endif + #endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_PGTABLE_H */ diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index a6e9e314c707..19a8834e0398 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -257,6 +257,7 @@ #define PPC_INST_MTSPR_DSCR_USER_MASK 0xfc1ffffe #define PPC_INST_MFVSRD 0x7c000066 #define PPC_INST_MTVSRD 0x7c000166 +#define PPC_INST_SC 0x44000002 #define PPC_INST_SLBFEE 0x7c0007a7 #define PPC_INST_SLBIA 0x7c0003e4 @@ -342,6 +343,8 @@ #define PPC_INST_SLW 0x7c000030 #define PPC_INST_SLD 0x7c000036 #define PPC_INST_SRW 0x7c000430 +#define PPC_INST_SRAW 0x7c000630 +#define PPC_INST_SRAWI 0x7c000670 #define PPC_INST_SRD 0x7c000436 #define PPC_INST_SRAD 0x7c000634 #define PPC_INST_SRADI 0x7c000674 diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index b5d023680801..e0637730a8e7 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -480,26 +480,11 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601) ori rd,rd,((KERNELBASE>>48)&0xFFFF);\ rotldi rd,rd,48 #else -/* - * On APUS (Amiga PowerPC cpu upgrade board), we don't know the - * physical base address of RAM at compile time. - */ #define toreal(rd) tophys(rd,rd) #define fromreal(rd) tovirt(rd,rd) -#define tophys(rd,rs) \ -0: addis rd,rs,-PAGE_OFFSET@h; \ - .section ".vtop_fixup","aw"; \ - .align 1; \ - .long 0b; \ - .previous - -#define tovirt(rd,rs) \ -0: addis rd,rs,PAGE_OFFSET@h; \ - .section ".ptov_fixup","aw"; \ - .align 1; \ - .long 0b; \ - .previous +#define tophys(rd, rs) addis rd, rs, -PAGE_OFFSET@h +#define tovirt(rd, rs) addis rd, rs, PAGE_OFFSET@h #endif #ifdef CONFIG_PPC_BOOK3S_64 @@ -821,4 +806,14 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601) stringify_in_c(.long (_target) - . ;) \ stringify_in_c(.previous) +#ifdef CONFIG_PPC_FSL_BOOK3E +#define BTB_FLUSH(reg) \ + lis reg,BUCSR_INIT@h; \ + ori reg,reg,BUCSR_INIT@l; \ + mtspr SPRN_BUCSR,reg; \ + isync; +#else +#define BTB_FLUSH(reg) +#endif /* CONFIG_PPC_FSL_BOOK3E */ + #endif /* _ASM_POWERPC_PPC_ASM_H */ diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index de52c3166ba4..1c98ef1f2d5b 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -582,7 +582,7 @@ #define HID0_POWER9_RADIX __MASK(63 - 8) #define SPRN_HID1 0x3F1 /* Hardware Implementation Register 1 */ -#ifdef CONFIG_6xx +#ifdef CONFIG_PPC_BOOK3S_32 #define HID1_EMCP (1<<31) /* 7450 Machine Check Pin Enable */ #define HID1_DFS (1<<22) /* 7447A Dynamic Frequency Scaling */ #define HID1_PC0 (1<<16) /* 7450 PLL_CFG[0] */ @@ -769,6 +769,8 @@ #define SRR1_PROGTRAP 0x00020000 /* Trap */ #define SRR1_PROGADDR 0x00010000 /* SRR0 contains subsequent addr */ +#define SRR1_MCE_MCP 0x00080000 /* Machine check signal caused interrupt */ + #define SPRN_HSRR0 0x13A /* Save/Restore Register 0 */ #define SPRN_HSRR1 0x13B /* Save/Restore Register 1 */ #define HSRR1_DENORM 0x00100000 /* Denorm exception */ diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index 1fffbba8d6a5..65676e2325b8 100644 --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h @@ -67,6 +67,13 @@ void do_barrier_nospec_fixups_range(bool enable, void *start, void *end); static inline void do_barrier_nospec_fixups_range(bool enable, void *start, void *end) { }; #endif +#ifdef CONFIG_PPC_FSL_BOOK3E +void setup_spectre_v2(void); +#else +static inline void setup_spectre_v2(void) {}; +#endif +void do_btb_flush_fixups(void); + #endif /* !__ASSEMBLY__ */ #endif /* _ASM_POWERPC_SETUP_H */ diff --git a/arch/powerpc/include/asm/sfp-machine.h b/arch/powerpc/include/asm/sfp-machine.h index d89beaba26ff..8b957aabb826 100644 --- a/arch/powerpc/include/asm/sfp-machine.h +++ b/arch/powerpc/include/asm/sfp-machine.h @@ -213,30 +213,18 @@ * respectively. The result is placed in HIGH_SUM and LOW_SUM. Overflow * (i.e. carry out) is not stored anywhere, and is lost. */ -#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ +#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ do { \ if (__builtin_constant_p (bh) && (bh) == 0) \ - __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2" \ - : "=r" ((USItype)(sh)), \ - "=&r" ((USItype)(sl)) \ - : "%r" ((USItype)(ah)), \ - "%r" ((USItype)(al)), \ - "rI" ((USItype)(bl))); \ - else if (__builtin_constant_p (bh) && (bh) ==~(USItype) 0) \ - __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2" \ - : "=r" ((USItype)(sh)), \ - "=&r" ((USItype)(sl)) \ - : "%r" ((USItype)(ah)), \ - "%r" ((USItype)(al)), \ - "rI" ((USItype)(bl))); \ + __asm__ ("add%I4c %1,%3,%4\n\taddze %0,%2" \ + : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl));\ + else if (__builtin_constant_p (bh) && (bh) == ~(USItype) 0) \ + __asm__ ("add%I4c %1,%3,%4\n\taddme %0,%2" \ + : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl));\ else \ - __asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3" \ - : "=r" ((USItype)(sh)), \ - "=&r" ((USItype)(sl)) \ - : "%r" ((USItype)(ah)), \ - "r" ((USItype)(bh)), \ - "%r" ((USItype)(al)), \ - "rI" ((USItype)(bl))); \ + __asm__ ("add%I5c %1,%4,%5\n\tadde %0,%2,%3" \ + : "=r" (sh), "=&r" (sl) \ + : "%r" (ah), "r" (bh), "%r" (al), "rI" (bl)); \ } while (0) /* sub_ddmmss is used in op-2.h and udivmodti4.c and should be equivalent to @@ -248,44 +236,24 @@ * and LOW_DIFFERENCE. Overflow (i.e. carry out) is not stored anywhere, * and is lost. */ -#define sub_ddmmss(sh, sl, ah, al, bh, bl) \ +#define sub_ddmmss(sh, sl, ah, al, bh, bl) \ do { \ if (__builtin_constant_p (ah) && (ah) == 0) \ - __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2" \ - : "=r" ((USItype)(sh)), \ - "=&r" ((USItype)(sl)) \ - : "r" ((USItype)(bh)), \ - "rI" ((USItype)(al)), \ - "r" ((USItype)(bl))); \ - else if (__builtin_constant_p (ah) && (ah) ==~(USItype) 0) \ - __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2" \ - : "=r" ((USItype)(sh)), \ - "=&r" ((USItype)(sl)) \ - : "r" ((USItype)(bh)), \ - "rI" ((USItype)(al)), \ - "r" ((USItype)(bl))); \ + __asm__ ("subf%I3c %1,%4,%3\n\tsubfze %0,%2" \ + : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\ + else if (__builtin_constant_p (ah) && (ah) == ~(USItype) 0) \ + __asm__ ("subf%I3c %1,%4,%3\n\tsubfme %0,%2" \ + : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\ else if (__builtin_constant_p (bh) && (bh) == 0) \ - __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2" \ - : "=r" ((USItype)(sh)), \ - "=&r" ((USItype)(sl)) \ - : "r" ((USItype)(ah)), \ - "rI" ((USItype)(al)), \ - "r" ((USItype)(bl))); \ - else if (__builtin_constant_p (bh) && (bh) ==~(USItype) 0) \ - __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2" \ - : "=r" ((USItype)(sh)), \ - "=&r" ((USItype)(sl)) \ - : "r" ((USItype)(ah)), \ - "rI" ((USItype)(al)), \ - "r" ((USItype)(bl))); \ + __asm__ ("subf%I3c %1,%4,%3\n\taddme %0,%2" \ + : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\ + else if (__builtin_constant_p (bh) && (bh) == ~(USItype) 0) \ + __asm__ ("subf%I3c %1,%4,%3\n\taddze %0,%2" \ + : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\ else \ - __asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2" \ - : "=r" ((USItype)(sh)), \ - "=&r" ((USItype)(sl)) \ - : "r" ((USItype)(ah)), \ - "r" ((USItype)(bh)), \ - "rI" ((USItype)(al)), \ - "r" ((USItype)(bl))); \ + __asm__ ("subf%I4c %1,%5,%4\n\tsubfe %0,%3,%2" \ + : "=r" (sh), "=&r" (sl) \ + : "r" (ah), "r" (bh), "rI" (al), "r" (bl)); \ } while (0) /* asm fragments for mul and div */ @@ -294,13 +262,10 @@ * UWtype integers MULTIPLER and MULTIPLICAND, and generates a two UWtype * word product in HIGH_PROD and LOW_PROD. */ -#define umul_ppmm(ph, pl, m0, m1) \ +#define umul_ppmm(ph, pl, m0, m1) \ do { \ USItype __m0 = (m0), __m1 = (m1); \ - __asm__ ("mulhwu %0,%1,%2" \ - : "=r" ((USItype)(ph)) \ - : "%r" (__m0), \ - "r" (__m1)); \ + __asm__ ("mulhwu %0,%1,%2" : "=r" (ph) : "%r" (m0), "r" (m1)); \ (pl) = __m0 * __m1; \ } while (0) @@ -312,9 +277,10 @@ * significant bit of DENOMINATOR must be 1, then the pre-processor symbol * UDIV_NEEDS_NORMALIZATION is defined to 1. */ -#define udiv_qrnnd(q, r, n1, n0, d) \ +#define udiv_qrnnd(q, r, n1, n0, d) \ do { \ - UWtype __d1, __d0, __q1, __q0, __r1, __r0, __m; \ + UWtype __d1, __d0, __q1, __q0; \ + UWtype __r1, __r0, __m; \ __d1 = __ll_highpart (d); \ __d0 = __ll_lowpart (d); \ \ @@ -325,7 +291,7 @@ if (__r1 < __m) \ { \ __q1--, __r1 += (d); \ - if (__r1 >= (d)) /* we didn't get carry when adding to __r1 */ \ + if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */\ if (__r1 < __m) \ __q1--, __r1 += (d); \ } \ diff --git a/arch/powerpc/include/asm/slice.h b/arch/powerpc/include/asm/slice.h index a595461c9cb0..44816cbc4198 100644 --- a/arch/powerpc/include/asm/slice.h +++ b/arch/powerpc/include/asm/slice.h @@ -10,6 +10,10 @@ #include <asm/nohash/32/slice.h> #endif +#ifndef __ASSEMBLY__ + +struct mm_struct; + #ifdef CONFIG_PPC_MM_SLICES #ifdef CONFIG_HUGETLB_PAGE @@ -18,10 +22,6 @@ #define HAVE_ARCH_UNMAPPED_AREA #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN -#ifndef __ASSEMBLY__ - -struct mm_struct; - unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, unsigned long flags, unsigned int psize, int topdown); @@ -34,8 +34,12 @@ void slice_set_range_psize(struct mm_struct *mm, unsigned long start, void slice_init_new_context_exec(struct mm_struct *mm); void slice_setup_new_exec(void); -#endif /* __ASSEMBLY__ */ +#else /* CONFIG_PPC_MM_SLICES */ + +static inline void slice_init_new_context_exec(struct mm_struct *mm) {} #endif /* CONFIG_PPC_MM_SLICES */ +#endif /* __ASSEMBLY__ */ + #endif /* _ASM_POWERPC_SLICE_H */ diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h index ab9f3f0a8637..1a0e7a8b1c81 100644 --- a/arch/powerpc/include/asm/syscall.h +++ b/arch/powerpc/include/asm/syscall.h @@ -18,9 +18,8 @@ #include <linux/thread_info.h> /* ftrace syscalls requires exporting the sys_call_table */ -#ifdef CONFIG_FTRACE_SYSCALLS extern const unsigned long sys_call_table[]; -#endif /* CONFIG_FTRACE_SYSCALLS */ +extern const unsigned long compat_sys_call_table[]; static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs) { diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h deleted file mode 100644 index 01b5171ea189..000000000000 --- a/arch/powerpc/include/asm/systbl.h +++ /dev/null @@ -1,396 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * List of powerpc syscalls. For the meaning of the _SPU suffix see - * arch/powerpc/platforms/cell/spu_callbacks.c - */ - -SYSCALL(restart_syscall) -SYSCALL(exit) -PPC_SYS(fork) -SYSCALL_SPU(read) -SYSCALL_SPU(write) -COMPAT_SYS_SPU(open) -SYSCALL_SPU(close) -SYSCALL_SPU(waitpid) -SYSCALL_SPU(creat) -SYSCALL_SPU(link) -SYSCALL_SPU(unlink) -COMPAT_SYS(execve) -SYSCALL_SPU(chdir) -COMPAT_SYS_SPU(time) -SYSCALL_SPU(mknod) -SYSCALL_SPU(chmod) -SYSCALL_SPU(lchown) -SYSCALL(ni_syscall) -OLDSYS(stat) -COMPAT_SYS_SPU(lseek) -SYSCALL_SPU(getpid) -COMPAT_SYS(mount) -SYSX(sys_ni_syscall,sys_oldumount,sys_oldumount) -SYSCALL_SPU(setuid) -SYSCALL_SPU(getuid) -COMPAT_SYS_SPU(stime) -COMPAT_SYS(ptrace) -SYSCALL_SPU(alarm) -OLDSYS(fstat) -SYSCALL(pause) -COMPAT_SYS(utime) -SYSCALL(ni_syscall) -SYSCALL(ni_syscall) -SYSCALL_SPU(access) -SYSCALL_SPU(nice) -SYSCALL(ni_syscall) -SYSCALL_SPU(sync) -SYSCALL_SPU(kill) -SYSCALL_SPU(rename) -SYSCALL_SPU(mkdir) -SYSCALL_SPU(rmdir) -SYSCALL_SPU(dup) -SYSCALL_SPU(pipe) -COMPAT_SYS_SPU(times) -SYSCALL(ni_syscall) -SYSCALL_SPU(brk) -SYSCALL_SPU(setgid) -SYSCALL_SPU(getgid) -SYSCALL(signal) -SYSCALL_SPU(geteuid) -SYSCALL_SPU(getegid) -SYSCALL(acct) -SYSCALL(umount) -SYSCALL(ni_syscall) -COMPAT_SYS_SPU(ioctl) -COMPAT_SYS_SPU(fcntl) -SYSCALL(ni_syscall) -SYSCALL_SPU(setpgid) -SYSCALL(ni_syscall) -SYSX(sys_ni_syscall,sys_olduname,sys_olduname) -SYSCALL_SPU(umask) -SYSCALL_SPU(chroot) -COMPAT_SYS(ustat) -SYSCALL_SPU(dup2) -SYSCALL_SPU(getppid) -SYSCALL_SPU(getpgrp) -SYSCALL_SPU(setsid) -SYS32ONLY(sigaction) -SYSCALL_SPU(sgetmask) -SYSCALL_SPU(ssetmask) -SYSCALL_SPU(setreuid) -SYSCALL_SPU(setregid) -#define compat_sys_sigsuspend sys_sigsuspend -SYS32ONLY(sigsuspend) -SYSX(sys_ni_syscall,compat_sys_sigpending,sys_sigpending) -SYSCALL_SPU(sethostname) -COMPAT_SYS_SPU(setrlimit) -SYSX(sys_ni_syscall,compat_sys_old_getrlimit,sys_old_getrlimit) -COMPAT_SYS_SPU(getrusage) -COMPAT_SYS_SPU(gettimeofday) -COMPAT_SYS_SPU(settimeofday) -SYSCALL_SPU(getgroups) -SYSCALL_SPU(setgroups) -SYSX(sys_ni_syscall,sys_ni_syscall,ppc_select) -SYSCALL_SPU(symlink) -OLDSYS(lstat) -SYSCALL_SPU(readlink) -SYSCALL(uselib) -SYSCALL(swapon) -SYSCALL(reboot) -SYSX(sys_ni_syscall,compat_sys_old_readdir,sys_old_readdir) -SYSCALL_SPU(mmap) -SYSCALL_SPU(munmap) -COMPAT_SYS_SPU(truncate) -COMPAT_SYS_SPU(ftruncate) -SYSCALL_SPU(fchmod) -SYSCALL_SPU(fchown) -SYSCALL_SPU(getpriority) -SYSCALL_SPU(setpriority) -SYSCALL(ni_syscall) -COMPAT_SYS(statfs) -COMPAT_SYS(fstatfs) -SYSCALL(ni_syscall) -COMPAT_SYS_SPU(socketcall) -SYSCALL_SPU(syslog) -COMPAT_SYS_SPU(setitimer) -COMPAT_SYS_SPU(getitimer) -COMPAT_SYS_SPU(newstat) -COMPAT_SYS_SPU(newlstat) -COMPAT_SYS_SPU(newfstat) -SYSX(sys_ni_syscall,sys_uname,sys_uname) -SYSCALL(ni_syscall) -SYSCALL_SPU(vhangup) -SYSCALL(ni_syscall) -SYSCALL(ni_syscall) -COMPAT_SYS_SPU(wait4) -SYSCALL(swapoff) -COMPAT_SYS_SPU(sysinfo) -COMPAT_SYS(ipc) -SYSCALL_SPU(fsync) -SYS32ONLY(sigreturn) -PPC_SYS(clone) -SYSCALL_SPU(setdomainname) -SYSCALL_SPU(newuname) -SYSCALL(ni_syscall) -COMPAT_SYS_SPU(adjtimex) -SYSCALL_SPU(mprotect) -SYSX(sys_ni_syscall,compat_sys_sigprocmask,sys_sigprocmask) -SYSCALL(ni_syscall) -SYSCALL(init_module) -SYSCALL(delete_module) -SYSCALL(ni_syscall) -SYSCALL(quotactl) -SYSCALL_SPU(getpgid) -SYSCALL_SPU(fchdir) -SYSCALL_SPU(bdflush) -SYSCALL_SPU(sysfs) -SYSX_SPU(ppc64_personality,ppc64_personality,sys_personality) -SYSCALL(ni_syscall) -SYSCALL_SPU(setfsuid) -SYSCALL_SPU(setfsgid) -SYSCALL_SPU(llseek) -COMPAT_SYS_SPU(getdents) -COMPAT_SPU_NEW(select) -SYSCALL_SPU(flock) -SYSCALL_SPU(msync) -COMPAT_SYS_SPU(readv) -COMPAT_SYS_SPU(writev) -SYSCALL_SPU(getsid) -SYSCALL_SPU(fdatasync) -COMPAT_SYS(sysctl) -SYSCALL_SPU(mlock) -SYSCALL_SPU(munlock) -SYSCALL_SPU(mlockall) -SYSCALL_SPU(munlockall) -SYSCALL_SPU(sched_setparam) -SYSCALL_SPU(sched_getparam) -SYSCALL_SPU(sched_setscheduler) -SYSCALL_SPU(sched_getscheduler) -SYSCALL_SPU(sched_yield) -SYSCALL_SPU(sched_get_priority_max) -SYSCALL_SPU(sched_get_priority_min) -COMPAT_SYS_SPU(sched_rr_get_interval) -COMPAT_SYS_SPU(nanosleep) -SYSCALL_SPU(mremap) -SYSCALL_SPU(setresuid) -SYSCALL_SPU(getresuid) -SYSCALL(ni_syscall) -SYSCALL_SPU(poll) -SYSCALL(ni_syscall) -SYSCALL_SPU(setresgid) -SYSCALL_SPU(getresgid) -SYSCALL_SPU(prctl) -COMPAT_SYS(rt_sigreturn) -COMPAT_SYS(rt_sigaction) -COMPAT_SYS(rt_sigprocmask) -COMPAT_SYS(rt_sigpending) -COMPAT_SYS(rt_sigtimedwait) -COMPAT_SYS(rt_sigqueueinfo) -COMPAT_SYS(rt_sigsuspend) -COMPAT_SYS_SPU(pread64) -COMPAT_SYS_SPU(pwrite64) -SYSCALL_SPU(chown) -SYSCALL_SPU(getcwd) -SYSCALL_SPU(capget) -SYSCALL_SPU(capset) -COMPAT_SYS(sigaltstack) -SYSX_SPU(sys_sendfile64,compat_sys_sendfile,sys_sendfile) -SYSCALL(ni_syscall) -SYSCALL(ni_syscall) -PPC_SYS(vfork) -COMPAT_SYS_SPU(getrlimit) -COMPAT_SYS_SPU(readahead) -SYS32ONLY(mmap2) -SYS32ONLY(truncate64) -SYS32ONLY(ftruncate64) -SYSX(sys_ni_syscall,sys_stat64,sys_stat64) -SYSX(sys_ni_syscall,sys_lstat64,sys_lstat64) -SYSX(sys_ni_syscall,sys_fstat64,sys_fstat64) -SYSCALL(pciconfig_read) -SYSCALL(pciconfig_write) -SYSCALL(pciconfig_iobase) -SYSCALL(ni_syscall) -SYSCALL_SPU(getdents64) -SYSCALL_SPU(pivot_root) -SYSX(sys_ni_syscall,compat_sys_fcntl64,sys_fcntl64) -SYSCALL_SPU(madvise) -SYSCALL_SPU(mincore) -SYSCALL_SPU(gettid) -SYSCALL_SPU(tkill) -SYSCALL_SPU(setxattr) -SYSCALL_SPU(lsetxattr) -SYSCALL_SPU(fsetxattr) -SYSCALL_SPU(getxattr) -SYSCALL_SPU(lgetxattr) -SYSCALL_SPU(fgetxattr) -SYSCALL_SPU(listxattr) -SYSCALL_SPU(llistxattr) -SYSCALL_SPU(flistxattr) -SYSCALL_SPU(removexattr) -SYSCALL_SPU(lremovexattr) -SYSCALL_SPU(fremovexattr) -COMPAT_SYS_SPU(futex) -COMPAT_SYS_SPU(sched_setaffinity) -COMPAT_SYS_SPU(sched_getaffinity) -SYSCALL(ni_syscall) -SYSCALL(ni_syscall) -SYS32ONLY(sendfile64) -COMPAT_SYS_SPU(io_setup) -SYSCALL_SPU(io_destroy) -COMPAT_SYS_SPU(io_getevents) -COMPAT_SYS_SPU(io_submit) -SYSCALL_SPU(io_cancel) -SYSCALL(set_tid_address) -SYSX_SPU(sys_fadvise64,ppc32_fadvise64,sys_fadvise64) -SYSCALL(exit_group) -COMPAT_SYS(lookup_dcookie) -SYSCALL_SPU(epoll_create) -SYSCALL_SPU(epoll_ctl) -SYSCALL_SPU(epoll_wait) -SYSCALL_SPU(remap_file_pages) -COMPAT_SYS_SPU(timer_create) -COMPAT_SYS_SPU(timer_settime) -COMPAT_SYS_SPU(timer_gettime) -SYSCALL_SPU(timer_getoverrun) -SYSCALL_SPU(timer_delete) -COMPAT_SYS_SPU(clock_settime) -COMPAT_SYS_SPU(clock_gettime) -COMPAT_SYS_SPU(clock_getres) -COMPAT_SYS_SPU(clock_nanosleep) -SYSX(ppc64_swapcontext,ppc32_swapcontext,ppc_swapcontext) -SYSCALL_SPU(tgkill) -COMPAT_SYS_SPU(utimes) -COMPAT_SYS_SPU(statfs64) -COMPAT_SYS_SPU(fstatfs64) -SYSX(sys_ni_syscall,ppc_fadvise64_64,ppc_fadvise64_64) -SYSCALL_SPU(rtas) -OLDSYS(debug_setcontext) -SYSCALL(ni_syscall) -COMPAT_SYS(migrate_pages) -COMPAT_SYS(mbind) -COMPAT_SYS(get_mempolicy) -COMPAT_SYS(set_mempolicy) -COMPAT_SYS(mq_open) -SYSCALL(mq_unlink) -COMPAT_SYS(mq_timedsend) -COMPAT_SYS(mq_timedreceive) -COMPAT_SYS(mq_notify) -COMPAT_SYS(mq_getsetattr) -COMPAT_SYS(kexec_load) -SYSCALL(add_key) -SYSCALL(request_key) -COMPAT_SYS(keyctl) -COMPAT_SYS(waitid) -SYSCALL(ioprio_set) -SYSCALL(ioprio_get) -SYSCALL(inotify_init) -SYSCALL(inotify_add_watch) -SYSCALL(inotify_rm_watch) -SYSCALL(spu_run) -SYSCALL(spu_create) -COMPAT_SYS(pselect6) -COMPAT_SYS(ppoll) -SYSCALL_SPU(unshare) -SYSCALL_SPU(splice) -SYSCALL_SPU(tee) -COMPAT_SYS_SPU(vmsplice) -COMPAT_SYS_SPU(openat) -SYSCALL_SPU(mkdirat) -SYSCALL_SPU(mknodat) -SYSCALL_SPU(fchownat) -COMPAT_SYS_SPU(futimesat) -SYSX_SPU(sys_newfstatat,sys_fstatat64,sys_fstatat64) -SYSCALL_SPU(unlinkat) -SYSCALL_SPU(renameat) -SYSCALL_SPU(linkat) -SYSCALL_SPU(symlinkat) -SYSCALL_SPU(readlinkat) -SYSCALL_SPU(fchmodat) -SYSCALL_SPU(faccessat) -COMPAT_SYS_SPU(get_robust_list) -COMPAT_SYS_SPU(set_robust_list) -COMPAT_SYS_SPU(move_pages) -SYSCALL_SPU(getcpu) -COMPAT_SYS(epoll_pwait) -COMPAT_SYS_SPU(utimensat) -COMPAT_SYS_SPU(signalfd) -SYSCALL_SPU(timerfd_create) -SYSCALL_SPU(eventfd) -COMPAT_SYS_SPU(sync_file_range2) -COMPAT_SYS(fallocate) -SYSCALL(subpage_prot) -COMPAT_SYS_SPU(timerfd_settime) -COMPAT_SYS_SPU(timerfd_gettime) -COMPAT_SYS_SPU(signalfd4) -SYSCALL_SPU(eventfd2) -SYSCALL_SPU(epoll_create1) -SYSCALL_SPU(dup3) -SYSCALL_SPU(pipe2) -SYSCALL(inotify_init1) -SYSCALL_SPU(perf_event_open) -COMPAT_SYS_SPU(preadv) -COMPAT_SYS_SPU(pwritev) -COMPAT_SYS(rt_tgsigqueueinfo) -SYSCALL(fanotify_init) -COMPAT_SYS(fanotify_mark) -SYSCALL_SPU(prlimit64) -SYSCALL_SPU(socket) -SYSCALL_SPU(bind) -SYSCALL_SPU(connect) -SYSCALL_SPU(listen) -SYSCALL_SPU(accept) -SYSCALL_SPU(getsockname) -SYSCALL_SPU(getpeername) -SYSCALL_SPU(socketpair) -SYSCALL_SPU(send) -SYSCALL_SPU(sendto) -COMPAT_SYS_SPU(recv) -COMPAT_SYS_SPU(recvfrom) -SYSCALL_SPU(shutdown) -COMPAT_SYS_SPU(setsockopt) -COMPAT_SYS_SPU(getsockopt) -COMPAT_SYS_SPU(sendmsg) -COMPAT_SYS_SPU(recvmsg) -COMPAT_SYS_SPU(recvmmsg) -SYSCALL_SPU(accept4) -SYSCALL_SPU(name_to_handle_at) -COMPAT_SYS_SPU(open_by_handle_at) -COMPAT_SYS_SPU(clock_adjtime) -SYSCALL_SPU(syncfs) -COMPAT_SYS_SPU(sendmmsg) -SYSCALL_SPU(setns) -COMPAT_SYS(process_vm_readv) -COMPAT_SYS(process_vm_writev) -SYSCALL(finit_module) -SYSCALL(kcmp) /* sys_kcmp */ -SYSCALL_SPU(sched_setattr) -SYSCALL_SPU(sched_getattr) -SYSCALL_SPU(renameat2) -SYSCALL_SPU(seccomp) -SYSCALL_SPU(getrandom) -SYSCALL_SPU(memfd_create) -SYSCALL_SPU(bpf) -COMPAT_SYS(execveat) -PPC64ONLY(switch_endian) -SYSCALL_SPU(userfaultfd) -SYSCALL_SPU(membarrier) -SYSCALL(ni_syscall) -SYSCALL(ni_syscall) -SYSCALL(ni_syscall) -SYSCALL(ni_syscall) -SYSCALL(ni_syscall) -SYSCALL(ni_syscall) -SYSCALL(ni_syscall) -SYSCALL(ni_syscall) -SYSCALL(ni_syscall) -SYSCALL(ni_syscall) -SYSCALL(ni_syscall) -SYSCALL(ni_syscall) -SYSCALL(mlock2) -SYSCALL(copy_file_range) -COMPAT_SYS_SPU(preadv2) -COMPAT_SYS_SPU(pwritev2) -SYSCALL(kexec_file_load) -SYSCALL(statx) -SYSCALL(pkey_alloc) -SYSCALL(pkey_free) -SYSCALL(pkey_mprotect) -SYSCALL(rseq) -COMPAT_SYS(io_pgetevents) diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h index b80d492ceb29..54bf7e68a7e1 100644 --- a/arch/powerpc/include/asm/time.h +++ b/arch/powerpc/include/asm/time.h @@ -43,7 +43,7 @@ struct div_result { /* Accessor functions for the timebase (RTC on 601) registers. */ /* If one day CONFIG_POWER is added just define __USE_RTC as 1 */ -#ifdef CONFIG_6xx +#ifdef CONFIG_PPC_BOOK3S_32 #define __USE_RTC() (cpu_has_feature(CPU_FTR_USE_RTC)) #else #define __USE_RTC() 0 diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h index f0e571b2dc7c..e24c67d5ba75 100644 --- a/arch/powerpc/include/asm/tlb.h +++ b/arch/powerpc/include/asm/tlb.h @@ -40,7 +40,7 @@ extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long address) { -#ifdef CONFIG_PPC_STD_MMU_32 +#ifdef CONFIG_PPC_BOOK3S_32 if (pte_val(*ptep) & _PAGE_HASHPTE) flush_hash_entry(tlb->mm, ptep, address); #endif diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index 15bea9a0f260..ebc0b916dcf9 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -63,7 +63,7 @@ static inline int __access_ok(unsigned long addr, unsigned long size, #endif #define access_ok(type, addr, size) \ - (__chk_user_ptr(addr), \ + (__chk_user_ptr(addr), (void)(type), \ __access_ok((__force unsigned long)(addr), (size), get_fs())) /* diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index b0de85b477e1..a3c35e6d6ffb 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h @@ -11,8 +11,7 @@ #include <uapi/asm/unistd.h> - -#define NR_syscalls 389 +#define NR_syscalls __NR_syscalls #define __NR__exit __NR_exit diff --git a/arch/powerpc/include/uapi/asm/Kbuild b/arch/powerpc/include/uapi/asm/Kbuild index 3712152206f3..8ab8ba1b71bc 100644 --- a/arch/powerpc/include/uapi/asm/Kbuild +++ b/arch/powerpc/include/uapi/asm/Kbuild @@ -1,6 +1,8 @@ # UAPI Header export list include include/uapi/asm-generic/Kbuild.asm +generated-y += unistd_32.h +generated-y += unistd_64.h generic-y += param.h generic-y += poll.h generic-y += resource.h diff --git a/arch/powerpc/include/uapi/asm/perf_regs.h b/arch/powerpc/include/uapi/asm/perf_regs.h index 9e52c86ccbd3..ff91192407d1 100644 --- a/arch/powerpc/include/uapi/asm/perf_regs.h +++ b/arch/powerpc/include/uapi/asm/perf_regs.h @@ -46,6 +46,7 @@ enum perf_event_powerpc_regs { PERF_REG_POWERPC_TRAP, PERF_REG_POWERPC_DAR, PERF_REG_POWERPC_DSISR, + PERF_REG_POWERPC_SIER, PERF_REG_POWERPC_MAX, }; #endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */ diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h index 985534d0b448..5f84e3dc98d0 100644 --- a/arch/powerpc/include/uapi/asm/unistd.h +++ b/arch/powerpc/include/uapi/asm/unistd.h @@ -10,395 +10,10 @@ #ifndef _UAPI_ASM_POWERPC_UNISTD_H_ #define _UAPI_ASM_POWERPC_UNISTD_H_ - -#define __NR_restart_syscall 0 -#define __NR_exit 1 -#define __NR_fork 2 -#define __NR_read 3 -#define __NR_write 4 -#define __NR_open 5 -#define __NR_close 6 -#define __NR_waitpid 7 -#define __NR_creat 8 -#define __NR_link 9 -#define __NR_unlink 10 -#define __NR_execve 11 -#define __NR_chdir 12 -#define __NR_time 13 -#define __NR_mknod 14 -#define __NR_chmod 15 -#define __NR_lchown 16 -#define __NR_break 17 -#define __NR_oldstat 18 -#define __NR_lseek 19 -#define __NR_getpid 20 -#define __NR_mount 21 -#define __NR_umount 22 -#define __NR_setuid 23 -#define __NR_getuid 24 -#define __NR_stime 25 -#define __NR_ptrace 26 -#define __NR_alarm 27 -#define __NR_oldfstat 28 -#define __NR_pause 29 -#define __NR_utime 30 -#define __NR_stty 31 -#define __NR_gtty 32 -#define __NR_access 33 -#define __NR_nice 34 -#define __NR_ftime 35 -#define __NR_sync 36 -#define __NR_kill 37 -#define __NR_rename 38 -#define __NR_mkdir 39 -#define __NR_rmdir 40 -#define __NR_dup 41 -#define __NR_pipe 42 -#define __NR_times 43 -#define __NR_prof 44 -#define __NR_brk 45 -#define __NR_setgid 46 -#define __NR_getgid 47 -#define __NR_signal 48 -#define __NR_geteuid 49 -#define __NR_getegid 50 -#define __NR_acct 51 -#define __NR_umount2 52 -#define __NR_lock 53 -#define __NR_ioctl 54 -#define __NR_fcntl 55 -#define __NR_mpx 56 -#define __NR_setpgid 57 -#define __NR_ulimit 58 -#define __NR_oldolduname 59 -#define __NR_umask 60 -#define __NR_chroot 61 -#define __NR_ustat 62 -#define __NR_dup2 63 -#define __NR_getppid 64 -#define __NR_getpgrp 65 -#define __NR_setsid 66 -#define __NR_sigaction 67 -#define __NR_sgetmask 68 -#define __NR_ssetmask 69 -#define __NR_setreuid 70 -#define __NR_setregid 71 -#define __NR_sigsuspend 72 -#define __NR_sigpending 73 -#define __NR_sethostname 74 -#define __NR_setrlimit 75 -#define __NR_getrlimit 76 -#define __NR_getrusage 77 -#define __NR_gettimeofday 78 -#define __NR_settimeofday 79 -#define __NR_getgroups 80 -#define __NR_setgroups 81 -#define __NR_select 82 -#define __NR_symlink 83 -#define __NR_oldlstat 84 -#define __NR_readlink 85 -#define __NR_uselib 86 -#define __NR_swapon 87 -#define __NR_reboot 88 -#define __NR_readdir 89 -#define __NR_mmap 90 -#define __NR_munmap 91 -#define __NR_truncate 92 -#define __NR_ftruncate 93 -#define __NR_fchmod 94 -#define __NR_fchown 95 -#define __NR_getpriority 96 -#define __NR_setpriority 97 -#define __NR_profil 98 -#define __NR_statfs 99 -#define __NR_fstatfs 100 -#define __NR_ioperm 101 -#define __NR_socketcall 102 -#define __NR_syslog 103 -#define __NR_setitimer 104 -#define __NR_getitimer 105 -#define __NR_stat 106 -#define __NR_lstat 107 -#define __NR_fstat 108 -#define __NR_olduname 109 -#define __NR_iopl 110 -#define __NR_vhangup 111 -#define __NR_idle 112 -#define __NR_vm86 113 -#define __NR_wait4 114 -#define __NR_swapoff 115 -#define __NR_sysinfo 116 -#define __NR_ipc 117 -#define __NR_fsync 118 -#define __NR_sigreturn 119 -#define __NR_clone 120 -#define __NR_setdomainname 121 -#define __NR_uname 122 -#define __NR_modify_ldt 123 -#define __NR_adjtimex 124 -#define __NR_mprotect 125 -#define __NR_sigprocmask 126 -#define __NR_create_module 127 -#define __NR_init_module 128 -#define __NR_delete_module 129 -#define __NR_get_kernel_syms 130 -#define __NR_quotactl 131 -#define __NR_getpgid 132 -#define __NR_fchdir 133 -#define __NR_bdflush 134 -#define __NR_sysfs 135 -#define __NR_personality 136 -#define __NR_afs_syscall 137 /* Syscall for Andrew File System */ -#define __NR_setfsuid 138 -#define __NR_setfsgid 139 -#define __NR__llseek 140 -#define __NR_getdents 141 -#define __NR__newselect 142 -#define __NR_flock 143 -#define __NR_msync 144 -#define __NR_readv 145 -#define __NR_writev 146 -#define __NR_getsid 147 -#define __NR_fdatasync 148 -#define __NR__sysctl 149 -#define __NR_mlock 150 -#define __NR_munlock 151 -#define __NR_mlockall 152 -#define __NR_munlockall 153 -#define __NR_sched_setparam 154 -#define __NR_sched_getparam 155 -#define __NR_sched_setscheduler 156 -#define __NR_sched_getscheduler 157 -#define __NR_sched_yield 158 -#define __NR_sched_get_priority_max 159 -#define __NR_sched_get_priority_min 160 -#define __NR_sched_rr_get_interval 161 -#define __NR_nanosleep 162 -#define __NR_mremap 163 -#define __NR_setresuid 164 -#define __NR_getresuid 165 -#define __NR_query_module 166 -#define __NR_poll 167 -#define __NR_nfsservctl 168 -#define __NR_setresgid 169 -#define __NR_getresgid 170 -#define __NR_prctl 171 -#define __NR_rt_sigreturn 172 -#define __NR_rt_sigaction 173 -#define __NR_rt_sigprocmask 174 -#define __NR_rt_sigpending 175 -#define __NR_rt_sigtimedwait 176 -#define __NR_rt_sigqueueinfo 177 -#define __NR_rt_sigsuspend 178 -#define __NR_pread64 179 -#define __NR_pwrite64 180 -#define __NR_chown 181 -#define __NR_getcwd 182 -#define __NR_capget 183 -#define __NR_capset 184 -#define __NR_sigaltstack 185 -#define __NR_sendfile 186 -#define __NR_getpmsg 187 /* some people actually want streams */ -#define __NR_putpmsg 188 /* some people actually want streams */ -#define __NR_vfork 189 -#define __NR_ugetrlimit 190 /* SuS compliant getrlimit */ -#define __NR_readahead 191 -#ifndef __powerpc64__ /* these are 32-bit only */ -#define __NR_mmap2 192 -#define __NR_truncate64 193 -#define __NR_ftruncate64 194 -#define __NR_stat64 195 -#define __NR_lstat64 196 -#define __NR_fstat64 197 -#endif -#define __NR_pciconfig_read 198 -#define __NR_pciconfig_write 199 -#define __NR_pciconfig_iobase 200 -#define __NR_multiplexer 201 -#define __NR_getdents64 202 -#define __NR_pivot_root 203 -#ifndef __powerpc64__ -#define __NR_fcntl64 204 -#endif -#define __NR_madvise 205 -#define __NR_mincore 206 -#define __NR_gettid 207 -#define __NR_tkill 208 -#define __NR_setxattr 209 -#define __NR_lsetxattr 210 -#define __NR_fsetxattr 211 -#define __NR_getxattr 212 -#define __NR_lgetxattr 213 -#define __NR_fgetxattr 214 -#define __NR_listxattr 215 -#define __NR_llistxattr 216 -#define __NR_flistxattr 217 -#define __NR_removexattr 218 -#define __NR_lremovexattr 219 -#define __NR_fremovexattr 220 -#define __NR_futex 221 -#define __NR_sched_setaffinity 222 -#define __NR_sched_getaffinity 223 -/* 224 currently unused */ -#define __NR_tuxcall 225 #ifndef __powerpc64__ -#define __NR_sendfile64 226 -#endif -#define __NR_io_setup 227 -#define __NR_io_destroy 228 -#define __NR_io_getevents 229 -#define __NR_io_submit 230 -#define __NR_io_cancel 231 -#define __NR_set_tid_address 232 -#define __NR_fadvise64 233 -#define __NR_exit_group 234 -#define __NR_lookup_dcookie 235 -#define __NR_epoll_create 236 -#define __NR_epoll_ctl 237 -#define __NR_epoll_wait 238 -#define __NR_remap_file_pages 239 -#define __NR_timer_create 240 -#define __NR_timer_settime 241 -#define __NR_timer_gettime 242 -#define __NR_timer_getoverrun 243 -#define __NR_timer_delete 244 -#define __NR_clock_settime 245 -#define __NR_clock_gettime 246 -#define __NR_clock_getres 247 -#define __NR_clock_nanosleep 248 -#define __NR_swapcontext 249 -#define __NR_tgkill 250 -#define __NR_utimes 251 -#define __NR_statfs64 252 -#define __NR_fstatfs64 253 -#ifndef __powerpc64__ -#define __NR_fadvise64_64 254 -#endif -#define __NR_rtas 255 -#define __NR_sys_debug_setcontext 256 -/* Number 257 is reserved for vserver */ -#define __NR_migrate_pages 258 -#define __NR_mbind 259 -#define __NR_get_mempolicy 260 -#define __NR_set_mempolicy 261 -#define __NR_mq_open 262 -#define __NR_mq_unlink 263 -#define __NR_mq_timedsend 264 -#define __NR_mq_timedreceive 265 -#define __NR_mq_notify 266 -#define __NR_mq_getsetattr 267 -#define __NR_kexec_load 268 -#define __NR_add_key 269 -#define __NR_request_key 270 -#define __NR_keyctl 271 -#define __NR_waitid 272 -#define __NR_ioprio_set 273 -#define __NR_ioprio_get 274 -#define __NR_inotify_init 275 -#define __NR_inotify_add_watch 276 -#define __NR_inotify_rm_watch 277 -#define __NR_spu_run 278 -#define __NR_spu_create 279 -#define __NR_pselect6 280 -#define __NR_ppoll 281 -#define __NR_unshare 282 -#define __NR_splice 283 -#define __NR_tee 284 -#define __NR_vmsplice 285 -#define __NR_openat 286 -#define __NR_mkdirat 287 -#define __NR_mknodat 288 -#define __NR_fchownat 289 -#define __NR_futimesat 290 -#ifdef __powerpc64__ -#define __NR_newfstatat 291 +#include <asm/unistd_32.h> #else -#define __NR_fstatat64 291 +#include <asm/unistd_64.h> #endif -#define __NR_unlinkat 292 -#define __NR_renameat 293 -#define __NR_linkat 294 -#define __NR_symlinkat 295 -#define __NR_readlinkat 296 -#define __NR_fchmodat 297 -#define __NR_faccessat 298 -#define __NR_get_robust_list 299 -#define __NR_set_robust_list 300 -#define __NR_move_pages 301 -#define __NR_getcpu 302 -#define __NR_epoll_pwait 303 -#define __NR_utimensat 304 -#define __NR_signalfd 305 -#define __NR_timerfd_create 306 -#define __NR_eventfd 307 -#define __NR_sync_file_range2 308 -#define __NR_fallocate 309 -#define __NR_subpage_prot 310 -#define __NR_timerfd_settime 311 -#define __NR_timerfd_gettime 312 -#define __NR_signalfd4 313 -#define __NR_eventfd2 314 -#define __NR_epoll_create1 315 -#define __NR_dup3 316 -#define __NR_pipe2 317 -#define __NR_inotify_init1 318 -#define __NR_perf_event_open 319 -#define __NR_preadv 320 -#define __NR_pwritev 321 -#define __NR_rt_tgsigqueueinfo 322 -#define __NR_fanotify_init 323 -#define __NR_fanotify_mark 324 -#define __NR_prlimit64 325 -#define __NR_socket 326 -#define __NR_bind 327 -#define __NR_connect 328 -#define __NR_listen 329 -#define __NR_accept 330 -#define __NR_getsockname 331 -#define __NR_getpeername 332 -#define __NR_socketpair 333 -#define __NR_send 334 -#define __NR_sendto 335 -#define __NR_recv 336 -#define __NR_recvfrom 337 -#define __NR_shutdown 338 -#define __NR_setsockopt 339 -#define __NR_getsockopt 340 -#define __NR_sendmsg 341 -#define __NR_recvmsg 342 -#define __NR_recvmmsg 343 -#define __NR_accept4 344 -#define __NR_name_to_handle_at 345 -#define __NR_open_by_handle_at 346 -#define __NR_clock_adjtime 347 -#define __NR_syncfs 348 -#define __NR_sendmmsg 349 -#define __NR_setns 350 -#define __NR_process_vm_readv 351 -#define __NR_process_vm_writev 352 -#define __NR_finit_module 353 -#define __NR_kcmp 354 -#define __NR_sched_setattr 355 -#define __NR_sched_getattr 356 -#define __NR_renameat2 357 -#define __NR_seccomp 358 -#define __NR_getrandom 359 -#define __NR_memfd_create 360 -#define __NR_bpf 361 -#define __NR_execveat 362 -#define __NR_switch_endian 363 -#define __NR_userfaultfd 364 -#define __NR_membarrier 365 -#define __NR_mlock2 378 -#define __NR_copy_file_range 379 -#define __NR_preadv2 380 -#define __NR_pwritev2 381 -#define __NR_kexec_file_load 382 -#define __NR_statx 383 -#define __NR_pkey_alloc 384 -#define __NR_pkey_free 385 -#define __NR_pkey_mprotect 386 -#define __NR_rseq 387 -#define __NR_io_pgetevents 388 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 53d4b8d5b54d..cb7f0bb9ee71 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -69,7 +69,7 @@ obj-$(CONFIG_FA_DUMP) += fadump.o ifdef CONFIG_PPC32 obj-$(CONFIG_E500) += idle_e500.o endif -obj-$(CONFIG_6xx) += idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o +obj-$(CONFIG_PPC_BOOK3S_32) += idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o obj-$(CONFIG_TAU) += tau_6xx.o obj-$(CONFIG_HIBERNATION) += swsusp.o suspend.o ifdef CONFIG_FSL_BOOKE @@ -160,16 +160,6 @@ extra-$(CONFIG_ALTIVEC) += vector.o extra-$(CONFIG_PPC64) += entry_64.o extra-$(CONFIG_PPC_OF_BOOT_TRAMPOLINE) += prom_init.o -extra-y += systbl_chk.i -$(obj)/systbl.o: systbl_chk - -quiet_cmd_systbl_chk = CALL $< - cmd_systbl_chk = $(CONFIG_SHELL) $< $(obj)/systbl_chk.i - -PHONY += systbl_chk -systbl_chk: $(src)/systbl_chk.sh $(obj)/systbl_chk.i - $(call cmd,systbl_chk) - ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE $(obj)/built-in.a: prom_init_check diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c index b4241ed1456e..6dfceaa820e4 100644 --- a/arch/powerpc/kernel/btext.c +++ b/arch/powerpc/kernel/btext.c @@ -232,20 +232,12 @@ static int btext_initialize(struct device_node *np) int __init btext_find_display(int allow_nonstdout) { - const char *name; - struct device_node *np = NULL; + struct device_node *np = of_stdout; int rc = -ENODEV; - name = of_get_property(of_chosen, "linux,stdout-path", NULL); - if (name != NULL) { - np = of_find_node_by_path(name); - if (np != NULL) { - if (strcmp(np->type, "display") != 0) { - printk("boot stdout isn't a display !\n"); - of_node_put(np); - np = NULL; - } - } + if (!of_node_is_type(np, "display")) { + printk("boot stdout isn't a display !\n"); + np = NULL; } if (np) rc = btext_initialize(np); diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c index be57bd07596d..53102764fd2f 100644 --- a/arch/powerpc/kernel/cacheinfo.c +++ b/arch/powerpc/kernel/cacheinfo.c @@ -428,7 +428,7 @@ static void link_cache_lists(struct cache *smaller, struct cache *bigger) static void do_subsidiary_caches_debugcheck(struct cache *cache) { WARN_ON_ONCE(cache->level != 1); - WARN_ON_ONCE(strcmp(cache->ofnode->type, "cpu")); + WARN_ON_ONCE(!of_node_is_type(cache->ofnode, "cpu")); } static void do_subsidiary_caches(struct cache *cache) diff --git a/arch/powerpc/kernel/cpu_setup_6xx.S b/arch/powerpc/kernel/cpu_setup_6xx.S index fa3c2c91290c..8c069e96c478 100644 --- a/arch/powerpc/kernel/cpu_setup_6xx.S +++ b/arch/powerpc/kernel/cpu_setup_6xx.S @@ -326,7 +326,7 @@ _GLOBAL(__save_cpu_setup) lis r5,cpu_state_storage@h ori r5,r5,cpu_state_storage@l - /* Save HID0 (common to all CONFIG_6xx cpus) */ + /* Save HID0 (common to all CONFIG_PPC_BOOK3S_32 cpus) */ mfspr r3,SPRN_HID0 stw r3,CS_HID0(r5) diff --git a/arch/powerpc/kernel/cpu_setup_fsl_booke.S b/arch/powerpc/kernel/cpu_setup_fsl_booke.S index 8d142e5d84cd..5fbc890d1094 100644 --- a/arch/powerpc/kernel/cpu_setup_fsl_booke.S +++ b/arch/powerpc/kernel/cpu_setup_fsl_booke.S @@ -17,7 +17,7 @@ #include <asm/processor.h> #include <asm/cputable.h> #include <asm/ppc_asm.h> -#include <asm/mmu-book3e.h> +#include <asm/nohash/mmu-book3e.h> #include <asm/asm-offsets.h> #include <asm/mpc85xx.h> diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 2da01340c84c..1eab54bc6ee9 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -1141,6 +1141,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .machine_check = machine_check_generic, .platform = "ppc603", }, +#ifdef CONFIG_PPC_83xx { /* e300c1 (a 603e core, plus some) on 83xx */ .pvr_mask = 0x7fff0000, .pvr_value = 0x00830000, @@ -1151,7 +1152,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_603, - .machine_check = machine_check_generic, + .machine_check = machine_check_83xx, .platform = "ppc603", }, { /* e300c2 (an e300c1 core, plus some, minus FPU) on 83xx */ @@ -1165,7 +1166,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_603, - .machine_check = machine_check_generic, + .machine_check = machine_check_83xx, .platform = "ppc603", }, { /* e300c3 (e300c1, plus one IU, half cache size) on 83xx */ @@ -1179,7 +1180,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_603, - .machine_check = machine_check_generic, + .machine_check = machine_check_83xx, .num_pmcs = 4, .oprofile_cpu_type = "ppc/e300", .oprofile_type = PPC_OPROFILE_FSL_EMB, @@ -1196,12 +1197,13 @@ static struct cpu_spec __initdata cpu_specs[] = { .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_603, - .machine_check = machine_check_generic, + .machine_check = machine_check_83xx, .num_pmcs = 4, .oprofile_cpu_type = "ppc/e300", .oprofile_type = PPC_OPROFILE_FSL_EMB, .platform = "ppc603", }, +#endif { /* default match, we assume split I/D cache & TB (non-601)... */ .pvr_mask = 0x00000000, .pvr_value = 0x00000000, diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c index f9fe2080ceb9..9c9bcaae2f75 100644 --- a/arch/powerpc/kernel/dma-iommu.c +++ b/arch/powerpc/kernel/dma-iommu.c @@ -6,7 +6,6 @@ * busses using the iommu infrastructure */ -#include <linux/export.h> #include <asm/iommu.h> /* @@ -106,11 +105,6 @@ static u64 dma_iommu_get_required_mask(struct device *dev) return mask; } -int dma_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return dma_addr == IOMMU_MAPPING_ERROR; -} - struct dma_map_ops dma_iommu_ops = { .alloc = dma_iommu_alloc_coherent, .free = dma_iommu_free_coherent, @@ -121,6 +115,4 @@ struct dma_map_ops dma_iommu_ops = { .map_page = dma_iommu_map_page, .unmap_page = dma_iommu_unmap_page, .get_required_mask = dma_iommu_get_required_mask, - .mapping_error = dma_iommu_mapping_error, }; -EXPORT_SYMBOL(dma_iommu_ops); diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c index 5fc335f4d9cd..7d5fc9751622 100644 --- a/arch/powerpc/kernel/dma-swiotlb.c +++ b/arch/powerpc/kernel/dma-swiotlb.c @@ -50,16 +50,15 @@ const struct dma_map_ops powerpc_swiotlb_dma_ops = { .alloc = __dma_nommu_alloc_coherent, .free = __dma_nommu_free_coherent, .mmap = dma_nommu_mmap_coherent, - .map_sg = swiotlb_map_sg_attrs, - .unmap_sg = swiotlb_unmap_sg_attrs, + .map_sg = dma_direct_map_sg, + .unmap_sg = dma_direct_unmap_sg, .dma_supported = swiotlb_dma_supported, - .map_page = swiotlb_map_page, - .unmap_page = swiotlb_unmap_page, - .sync_single_for_cpu = swiotlb_sync_single_for_cpu, - .sync_single_for_device = swiotlb_sync_single_for_device, - .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, - .sync_sg_for_device = swiotlb_sync_sg_for_device, - .mapping_error = dma_direct_mapping_error, + .map_page = dma_direct_map_page, + .unmap_page = dma_direct_unmap_page, + .sync_single_for_cpu = dma_direct_sync_single_for_cpu, + .sync_single_for_device = dma_direct_sync_single_for_device, + .sync_sg_for_cpu = dma_direct_sync_sg_for_cpu, + .sync_sg_for_device = dma_direct_sync_sg_for_device, .get_required_mask = swiotlb_powerpc_get_required, }; @@ -108,12 +107,8 @@ int __init swiotlb_setup_bus_notifier(void) void __init swiotlb_detect_4g(void) { - if ((memblock_end_of_DRAM() - 1) > 0xffffffff) { + if ((memblock_end_of_DRAM() - 1) > 0xffffffff) ppc_swiotlb_enable = 1; -#ifdef CONFIG_ZONE_DMA32 - limit_zone_pfn(ZONE_DMA32, (1ULL << 32) >> PAGE_SHIFT); -#endif - } } static int __init check_swiotlb_enabled(void) diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index dbfc7056d7df..b1903ebb2e9c 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c @@ -50,7 +50,8 @@ static int dma_nommu_dma_supported(struct device *dev, u64 mask) return 1; #ifdef CONFIG_FSL_SOC - /* Freescale gets another chance via ZONE_DMA/ZONE_DMA32, however + /* + * Freescale gets another chance via ZONE_DMA, however * that will have to be refined if/when they support iommus */ return 1; @@ -62,18 +63,12 @@ static int dma_nommu_dma_supported(struct device *dev, u64 mask) #endif } +#ifndef CONFIG_NOT_COHERENT_CACHE void *__dma_nommu_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) { void *ret; -#ifdef CONFIG_NOT_COHERENT_CACHE - ret = __dma_alloc_coherent(dev, size, dma_handle, flag); - if (ret == NULL) - return NULL; - *dma_handle += get_dma_offset(dev); - return ret; -#else struct page *page; int node = dev_to_node(dev); #ifdef CONFIG_FSL_SOC @@ -94,13 +89,10 @@ void *__dma_nommu_alloc_coherent(struct device *dev, size_t size, } switch (zone) { +#ifdef CONFIG_ZONE_DMA case ZONE_DMA: flag |= GFP_DMA; break; -#ifdef CONFIG_ZONE_DMA32 - case ZONE_DMA32: - flag |= GFP_DMA32; - break; #endif }; #endif /* CONFIG_FSL_SOC */ @@ -113,19 +105,15 @@ void *__dma_nommu_alloc_coherent(struct device *dev, size_t size, *dma_handle = __pa(ret) + get_dma_offset(dev); return ret; -#endif } void __dma_nommu_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs) { -#ifdef CONFIG_NOT_COHERENT_CACHE - __dma_free_coherent(size, vaddr); -#else free_pages((unsigned long)vaddr, get_order(size)); -#endif } +#endif /* !CONFIG_NOT_COHERENT_CACHE */ static void *dma_nommu_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, @@ -210,10 +198,15 @@ static int dma_nommu_map_sg(struct device *dev, struct scatterlist *sgl, return nents; } -static void dma_nommu_unmap_sg(struct device *dev, struct scatterlist *sg, +static void dma_nommu_unmap_sg(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction direction, unsigned long attrs) { + struct scatterlist *sg; + int i; + + for_each_sg(sgl, sg, nents, i) + __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); } static u64 dma_nommu_get_required_mask(struct device *dev) @@ -247,6 +240,8 @@ static inline void dma_nommu_unmap_page(struct device *dev, enum dma_data_direction direction, unsigned long attrs) { + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + __dma_sync(bus_to_virt(dma_address), size, direction); } #ifdef CONFIG_NOT_COHERENT_CACHE diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index 6cae6b56ffd6..3230137469ab 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -1808,10 +1808,10 @@ static int eeh_freeze_dbgfs_get(void *data, u64 *val) return 0; } -DEFINE_SIMPLE_ATTRIBUTE(eeh_enable_dbgfs_ops, eeh_enable_dbgfs_get, - eeh_enable_dbgfs_set, "0x%llx\n"); -DEFINE_SIMPLE_ATTRIBUTE(eeh_freeze_dbgfs_ops, eeh_freeze_dbgfs_get, - eeh_freeze_dbgfs_set, "0x%llx\n"); +DEFINE_DEBUGFS_ATTRIBUTE(eeh_enable_dbgfs_ops, eeh_enable_dbgfs_get, + eeh_enable_dbgfs_set, "0x%llx\n"); +DEFINE_DEBUGFS_ATTRIBUTE(eeh_freeze_dbgfs_ops, eeh_freeze_dbgfs_get, + eeh_freeze_dbgfs_set, "0x%llx\n"); #endif static int __init eeh_init_proc(void) @@ -1819,12 +1819,12 @@ static int __init eeh_init_proc(void) if (machine_is(pseries) || machine_is(powernv)) { proc_create_single("powerpc/eeh", 0, NULL, proc_eeh_show); #ifdef CONFIG_DEBUG_FS - debugfs_create_file("eeh_enable", 0600, - powerpc_debugfs_root, NULL, - &eeh_enable_dbgfs_ops); - debugfs_create_file("eeh_max_freezes", 0600, - powerpc_debugfs_root, NULL, - &eeh_freeze_dbgfs_ops); + debugfs_create_file_unsafe("eeh_enable", 0600, + powerpc_debugfs_root, NULL, + &eeh_enable_dbgfs_ops); + debugfs_create_file_unsafe("eeh_max_freezes", 0600, + powerpc_debugfs_root, NULL, + &eeh_freeze_dbgfs_ops); #endif } diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index 9446248eb6b8..99eab7bc7edc 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c @@ -60,7 +60,7 @@ static int eeh_result_priority(enum pci_ers_result result) } }; -const char *pci_ers_result_name(enum pci_ers_result result) +static const char *pci_ers_result_name(enum pci_ers_result result) { switch (result) { case PCI_ERS_RESULT_NONE: diff --git a/arch/powerpc/kernel/eeh_event.c b/arch/powerpc/kernel/eeh_event.c index 61c9356bf9c9..227e57f980df 100644 --- a/arch/powerpc/kernel/eeh_event.c +++ b/arch/powerpc/kernel/eeh_event.c @@ -35,7 +35,7 @@ */ static DEFINE_SPINLOCK(eeh_eventlist_lock); -static struct semaphore eeh_eventlist_sem; +static DECLARE_COMPLETION(eeh_eventlist_event); static LIST_HEAD(eeh_eventlist); /** @@ -55,7 +55,7 @@ static int eeh_event_handler(void * dummy) struct eeh_pe *pe; while (!kthread_should_stop()) { - if (down_interruptible(&eeh_eventlist_sem)) + if (wait_for_completion_interruptible(&eeh_eventlist_event)) break; /* Fetch EEH event from the queue */ @@ -102,9 +102,6 @@ int eeh_event_init(void) struct task_struct *t; int ret = 0; - /* Initialize semaphore */ - sema_init(&eeh_eventlist_sem, 0); - t = kthread_run(eeh_event_handler, NULL, "eehd"); if (IS_ERR(t)) { ret = PTR_ERR(t); @@ -142,7 +139,7 @@ int eeh_send_failure_event(struct eeh_pe *pe) spin_unlock_irqrestore(&eeh_eventlist_lock, flags); /* For EEH deamon to knick in */ - up(&eeh_eventlist_sem); + complete(&eeh_eventlist_event); return 0; } diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 77decded1175..0768dfd8a64e 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -200,14 +200,14 @@ transfer_to_handler: cmplw r1,r9 /* if r1 <= ksp_limit */ ble- stack_ovf /* then the kernel stack overflowed */ 5: -#if defined(CONFIG_6xx) || defined(CONFIG_E500) +#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500) CURRENT_THREAD_INFO(r9, r1) tophys(r9,r9) /* check local flags */ lwz r12,TI_LOCAL_FLAGS(r9) mtcrf 0x01,r12 bt- 31-TLF_NAPPING,4f bt- 31-TLF_SLEEPING,7f -#endif /* CONFIG_6xx || CONFIG_E500 */ +#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_E500 */ .globl transfer_to_handler_cont transfer_to_handler_cont: 3: @@ -273,7 +273,7 @@ reenable_mmu: /* re-enable mmu so we can */ RFI /* jump to handler, enable MMU */ #endif /* CONFIG_TRACE_IRQFLAGS */ -#if defined (CONFIG_6xx) || defined(CONFIG_E500) +#if defined (CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500) 4: rlwinm r12,r12,0,~_TLF_NAPPING stw r12,TI_LOCAL_FLAGS(r9) b power_save_ppc32_restore @@ -612,7 +612,7 @@ ppc_swapcontext: handle_page_fault: stw r4,_DAR(r1) addi r3,r1,STACK_FRAME_OVERHEAD -#ifdef CONFIG_6xx +#ifdef CONFIG_PPC_BOOK3S_32 andis. r0,r5,DSISR_DABRMATCH@h bne- handle_dabr_fault #endif @@ -629,7 +629,7 @@ handle_page_fault: bl bad_page_fault b ret_from_except_full -#ifdef CONFIG_6xx +#ifdef CONFIG_PPC_BOOK3S_32 /* We have a data breakpoint exception - handle it */ handle_dabr_fault: SAVE_NVGPRS(r1) diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 7b1693adff2a..435927f549c4 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -54,6 +54,9 @@ SYS_CALL_TABLE: .tc sys_call_table[TC],sys_call_table +COMPAT_SYS_CALL_TABLE: + .tc compat_sys_call_table[TC],compat_sys_call_table + /* This value is used to mark exception frames on the stack. */ exception_marker: .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER @@ -80,6 +83,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM) std r0,GPR0(r1) std r10,GPR1(r1) beq 2f /* if from kernel mode */ +#ifdef CONFIG_PPC_FSL_BOOK3E +START_BTB_FLUSH_SECTION + BTB_FLUSH(r10) +END_BTB_FLUSH_SECTION +#endif ACCOUNT_CPU_USER_ENTRY(r13, r10, r11) 2: std r2,GPR2(r1) std r3,GPR3(r1) @@ -173,7 +181,7 @@ system_call: /* label this so stack traces look sane */ ld r11,SYS_CALL_TABLE@toc(2) andis. r10,r10,_TIF_32BIT@h beq 15f - addi r11,r11,8 /* use 32-bit syscall entries */ + ld r11,COMPAT_SYS_CALL_TABLE@toc(2) clrldi r3,r3,32 clrldi r4,r4,32 clrldi r5,r5,32 @@ -181,7 +189,7 @@ system_call: /* label this so stack traces look sane */ clrldi r7,r7,32 clrldi r8,r8,32 15: - slwi r0,r0,4 + slwi r0,r0,3 barrier_nospec_asm /* @@ -286,6 +294,10 @@ BEGIN_FTR_SECTION HMT_MEDIUM_LOW END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + std r8, PACATMSCRATCH(r13) +#endif + ld r13,GPR13(r1) /* only restore r13 if returning to usermode */ ld r2,GPR2(r1) ld r1,GPR1(r1) diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index 6d6e144a28ce..afb638778f44 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -296,7 +296,8 @@ ret_from_mc_except: andi. r10,r11,MSR_PR; /* save stack pointer */ \ beq 1f; /* branch around if supervisor */ \ ld r1,PACAKSAVE(r13); /* get kernel stack coming from usr */\ -1: cmpdi cr1,r1,0; /* check if SP makes sense */ \ +1: type##_BTB_FLUSH \ + cmpdi cr1,r1,0; /* check if SP makes sense */ \ bge- cr1,exc_##n##_bad_stack;/* bad stack (TODO: out of line) */ \ mfspr r10,SPRN_##type##_SRR0; /* read SRR0 before touching stack */ @@ -328,6 +329,29 @@ ret_from_mc_except: #define SPRN_MC_SRR0 SPRN_MCSRR0 #define SPRN_MC_SRR1 SPRN_MCSRR1 +#ifdef CONFIG_PPC_FSL_BOOK3E +#define GEN_BTB_FLUSH \ + START_BTB_FLUSH_SECTION \ + beq 1f; \ + BTB_FLUSH(r10) \ + 1: \ + END_BTB_FLUSH_SECTION + +#define CRIT_BTB_FLUSH \ + START_BTB_FLUSH_SECTION \ + BTB_FLUSH(r10) \ + END_BTB_FLUSH_SECTION + +#define DBG_BTB_FLUSH CRIT_BTB_FLUSH +#define MC_BTB_FLUSH CRIT_BTB_FLUSH +#define GDBELL_BTB_FLUSH GEN_BTB_FLUSH +#else +#define GEN_BTB_FLUSH +#define CRIT_BTB_FLUSH +#define DBG_BTB_FLUSH +#define GDBELL_BTB_FLUSH +#endif + #define NORMAL_EXCEPTION_PROLOG(n, intnum, addition) \ EXCEPTION_PROLOG(n, intnum, GEN, addition##_GEN(n)) diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 89d32bb79d5e..9e253ce27e08 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -995,7 +995,16 @@ EXC_COMMON_BEGIN(h_data_storage_common) bl save_nvgprs RECONCILE_IRQ_STATE(r10, r11) addi r3,r1,STACK_FRAME_OVERHEAD +BEGIN_MMU_FTR_SECTION + ld r4,PACA_EXGEN+EX_DAR(r13) + lwz r5,PACA_EXGEN+EX_DSISR(r13) + std r4,_DAR(r1) + std r5,_DSISR(r1) + li r5,SIGSEGV + bl bad_page_fault +MMU_FTR_SECTION_ELSE bl unknown_exception +ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_TYPE_RADIX) b ret_from_except @@ -1031,7 +1040,7 @@ TRAMP_REAL_BEGIN(hmi_exception_early) EXCEPTION_PROLOG_COMMON_2(PACA_EXGEN) EXCEPTION_PROLOG_COMMON_3(0xe60) addi r3,r1,STACK_FRAME_OVERHEAD - BRANCH_LINK_TO_FAR(hmi_exception_realmode) /* Function call ABI */ + BRANCH_LINK_TO_FAR(DOTSYM(hmi_exception_realmode)) /* Function call ABI */ cmpdi cr0,r3,0 /* Windup the stack. */ diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index 761b28b1427d..45a8d0be1c96 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -35,6 +35,7 @@ #include <linux/kobject.h> #include <linux/sysfs.h> #include <linux/slab.h> +#include <linux/cma.h> #include <asm/debugfs.h> #include <asm/page.h> @@ -46,6 +47,9 @@ static struct fw_dump fw_dump; static struct fadump_mem_struct fdm; static const struct fadump_mem_struct *fdm_active; +#ifdef CONFIG_CMA +static struct cma *fadump_cma; +#endif static DEFINE_MUTEX(fadump_mutex); struct fad_crash_memory_ranges *crash_memory_ranges; @@ -53,6 +57,67 @@ int crash_memory_ranges_size; int crash_mem_ranges; int max_crash_mem_ranges; +#ifdef CONFIG_CMA +/* + * fadump_cma_init() - Initialize CMA area from a fadump reserved memory + * + * This function initializes CMA area from fadump reserved memory. + * The total size of fadump reserved memory covers for boot memory size + * + cpu data size + hpte size and metadata. + * Initialize only the area equivalent to boot memory size for CMA use. + * The reamining portion of fadump reserved memory will be not given + * to CMA and pages for thoes will stay reserved. boot memory size is + * aligned per CMA requirement to satisy cma_init_reserved_mem() call. + * But for some reason even if it fails we still have the memory reservation + * with us and we can still continue doing fadump. + */ +int __init fadump_cma_init(void) +{ + unsigned long long base, size; + int rc; + + if (!fw_dump.fadump_enabled) + return 0; + + /* + * Do not use CMA if user has provided fadump=nocma kernel parameter. + * Return 1 to continue with fadump old behaviour. + */ + if (fw_dump.nocma) + return 1; + + base = fw_dump.reserve_dump_area_start; + size = fw_dump.boot_memory_size; + + if (!size) + return 0; + + rc = cma_init_reserved_mem(base, size, 0, "fadump_cma", &fadump_cma); + if (rc) { + pr_err("Failed to init cma area for firmware-assisted dump,%d\n", rc); + /* + * Though the CMA init has failed we still have memory + * reservation with us. The reserved memory will be + * blocked from production system usage. Hence return 1, + * so that we can continue with fadump. + */ + return 1; + } + + /* + * So we now have successfully initialized cma area for fadump. + */ + pr_info("Initialized 0x%lx bytes cma area at %ldMB from 0x%lx " + "bytes of memory reserved for firmware-assisted dump\n", + cma_get_size(fadump_cma), + (unsigned long)cma_get_base(fadump_cma) >> 20, + fw_dump.reserve_dump_area_size); + return 1; +} +#else +static int __init fadump_cma_init(void) { return 1; } +#endif /* CONFIG_CMA */ + /* Scan the Firmware Assisted dump configuration details. */ int __init early_init_dt_scan_fw_dump(unsigned long node, const char *uname, int depth, void *data) @@ -118,13 +183,19 @@ int __init early_init_dt_scan_fw_dump(unsigned long node, /* * If fadump is registered, check if the memory provided - * falls within boot memory area. + * falls within boot memory area and reserved memory area. */ -int is_fadump_boot_memory_area(u64 addr, ulong size) +int is_fadump_memory_area(u64 addr, ulong size) { + u64 d_start = fw_dump.reserve_dump_area_start; + u64 d_end = d_start + fw_dump.reserve_dump_area_size; + if (!fw_dump.dump_registered) return 0; + if (((addr + size) > d_start) && (addr <= d_end)) + return 1; + return (addr + size) > RMA_START && addr <= fw_dump.boot_memory_size; } @@ -172,6 +243,35 @@ static int is_boot_memory_area_contiguous(void) return ret; } +/* + * Returns true, if there are no holes in reserved memory area, + * false otherwise. + */ +static bool is_reserved_memory_area_contiguous(void) +{ + struct memblock_region *reg; + unsigned long start, end; + unsigned long d_start = fw_dump.reserve_dump_area_start; + unsigned long d_end = d_start + fw_dump.reserve_dump_area_size; + + for_each_memblock(memory, reg) { + start = max(d_start, (unsigned long)reg->base); + end = min(d_end, (unsigned long)(reg->base + reg->size)); + if (d_start < end) { + /* Memory hole from d_start to start */ + if (start > d_start) + break; + + if (end == d_end) + return true; + + d_start = end + 1; + } + } + + return false; +} + /* Print firmware assisted dump configurations for debugging purpose. */ static void fadump_show_config(void) { @@ -378,8 +478,15 @@ int __init fadump_reserve_mem(void) */ if (fdm_active) fw_dump.boot_memory_size = be64_to_cpu(fdm_active->rmr_region.source_len); - else + else { fw_dump.boot_memory_size = fadump_calculate_reserve_size(); +#ifdef CONFIG_CMA + if (!fw_dump.nocma) + fw_dump.boot_memory_size = + ALIGN(fw_dump.boot_memory_size, + FADUMP_CMA_ALIGNMENT); +#endif + } /* * Calculate the memory boundary. @@ -426,8 +533,9 @@ int __init fadump_reserve_mem(void) fw_dump.fadumphdr_addr = be64_to_cpu(fdm_active->rmr_region.destination_address) + be64_to_cpu(fdm_active->rmr_region.source_len); - pr_debug("fadumphdr_addr = %p\n", - (void *) fw_dump.fadumphdr_addr); + pr_debug("fadumphdr_addr = %pa\n", &fw_dump.fadumphdr_addr); + fw_dump.reserve_dump_area_start = base; + fw_dump.reserve_dump_area_size = size; } else { size = get_fadump_area_size(); @@ -455,10 +563,11 @@ int __init fadump_reserve_mem(void) (unsigned long)(size >> 20), (unsigned long)(base >> 20), (unsigned long)(memblock_phys_mem_size() >> 20)); - } - fw_dump.reserve_dump_area_start = base; - fw_dump.reserve_dump_area_size = size; + fw_dump.reserve_dump_area_start = base; + fw_dump.reserve_dump_area_size = size; + return fadump_cma_init(); + } return 1; } @@ -477,6 +586,10 @@ static int __init early_fadump_param(char *p) fw_dump.fadump_enabled = 1; else if (strncmp(p, "off", 3) == 0) fw_dump.fadump_enabled = 0; + else if (strncmp(p, "nocma", 5) == 0) { + fw_dump.fadump_enabled = 1; + fw_dump.nocma = 1; + } return 0; } @@ -525,8 +638,10 @@ static int register_fw_dump(struct fadump_mem_struct *fdm) break; case -3: if (!is_boot_memory_area_contiguous()) - pr_err("Can't have holes in boot memory area while " - "registering fadump\n"); + pr_err("Can't have holes in boot memory area while registering fadump\n"); + else if (!is_reserved_memory_area_contiguous()) + pr_err("Can't have holes in reserved memory area while" + " registering fadump\n"); printk(KERN_ERR "Failed to register firmware-assisted kernel" " dump. Parameter Error(%d).\n", rc); @@ -1229,7 +1344,7 @@ static int fadump_unregister_dump(struct fadump_mem_struct *fdm) return 0; } -static int fadump_invalidate_dump(struct fadump_mem_struct *fdm) +static int fadump_invalidate_dump(const struct fadump_mem_struct *fdm) { int rc = 0; unsigned int wait_time; @@ -1260,9 +1375,8 @@ void fadump_cleanup(void) { /* Invalidate the registration only if dump is active. */ if (fw_dump.dump_active) { - init_fadump_mem_struct(&fdm, - be64_to_cpu(fdm_active->cpu_state_data.destination_address)); - fadump_invalidate_dump(&fdm); + /* pass the same memory dump structure provided by platform */ + fadump_invalidate_dump(fdm_active); } else if (fw_dump.dump_registered) { /* Un-register Firmware-assisted dump if it was registered. */ fadump_unregister_dump(&fdm); @@ -1531,17 +1645,7 @@ static struct kobj_attribute fadump_register_attr = __ATTR(fadump_registered, 0644, fadump_register_show, fadump_register_store); -static int fadump_region_open(struct inode *inode, struct file *file) -{ - return single_open(file, fadump_region_show, inode->i_private); -} - -static const struct file_operations fadump_region_fops = { - .open = fadump_region_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(fadump_region); static void fadump_init_files(void) { diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index 61ca27929355..05b08db3901d 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S @@ -176,10 +176,10 @@ __after_mmu_off: bl reloc_offset li r24,0 /* cpu# */ bl call_setup_cpu /* Call setup_cpu for this CPU */ -#ifdef CONFIG_6xx +#ifdef CONFIG_PPC_BOOK3S_32 bl reloc_offset bl init_idle_6xx -#endif /* CONFIG_6xx */ +#endif /* CONFIG_PPC_BOOK3S_32 */ /* @@ -393,7 +393,9 @@ DataAccess: bne 1f /* if not, try to put a PTE */ mfspr r4,SPRN_DAR /* into the hash table */ rlwinm r3,r10,32-15,21,21 /* DSISR_STORE -> _PAGE_RW */ +BEGIN_MMU_FTR_SECTION bl hash_page +END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE) 1: lwz r5,_DSISR(r11) /* get DSISR value */ mfspr r4,SPRN_DAR EXC_XFER_LITE(0x300, handle_page_fault) @@ -408,7 +410,9 @@ InstructionAccess: beq 1f /* if so, try to put a PTE */ li r3,0 /* into the hash table */ mr r4,r12 /* SRR0 is fault address */ +BEGIN_MMU_FTR_SECTION bl hash_page +END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE) 1: mr r4,r12 andis. r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */ EXC_XFER_LITE(0x400, handle_page_fault) @@ -499,7 +503,7 @@ InstructionTLBMiss: lis r1,PAGE_OFFSET@h /* check if kernel address */ cmplw 0,r1,r3 mfspr r2,SPRN_SPRG_THREAD - li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */ + li r1,_PAGE_USER|_PAGE_PRESENT|_PAGE_EXEC /* low addresses tested as user */ lwz r2,PGDIR(r2) bge- 112f mfspr r2,SPRN_SRR1 /* and MSR_PR bit from SRR1 */ @@ -836,10 +840,10 @@ __secondary_start: lis r3,-KERNELBASE@h mr r4,r24 bl call_setup_cpu /* Call setup_cpu for this CPU */ -#ifdef CONFIG_6xx +#ifdef CONFIG_PPC_BOOK3S_32 lis r3,-KERNELBASE@h bl init_idle_6xx -#endif /* CONFIG_6xx */ +#endif /* CONFIG_PPC_BOOK3S_32 */ /* get current_thread_info and current */ lis r1,secondary_ti@ha @@ -880,14 +884,14 @@ __secondary_start: /* * Those generic dummy functions are kept for CPUs not - * included in CONFIG_6xx + * included in CONFIG_PPC_BOOK3S_32 */ -#if !defined(CONFIG_6xx) +#if !defined(CONFIG_PPC_BOOK3S_32) _ENTRY(__save_cpu_setup) blr _ENTRY(__restore_cpu_setup) blr -#endif /* !defined(CONFIG_6xx) */ +#endif /* !defined(CONFIG_PPC_BOOK3S_32) */ /* diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S index 37e4a7cf0065..bf23c19c92d6 100644 --- a/arch/powerpc/kernel/head_44x.S +++ b/arch/powerpc/kernel/head_44x.S @@ -40,6 +40,7 @@ #include <asm/ptrace.h> #include <asm/synch.h> #include <asm/export.h> +#include <asm/code-patching-asm.h> #include "head_booke.h" @@ -382,10 +383,9 @@ interrupt_base: /* Increment, rollover, and store TLB index */ addi r13,r13,1 + patch_site 0f, patch__tlb_44x_hwater_D /* Compare with watermark (instruction gets patched) */ - .globl tlb_44x_patch_hwater_D -tlb_44x_patch_hwater_D: - cmpwi 0,r13,1 /* reserve entries */ +0: cmpwi 0,r13,1 /* reserve entries */ ble 5f li r13,0 5: @@ -478,10 +478,9 @@ tlb_44x_patch_hwater_D: /* Increment, rollover, and store TLB index */ addi r13,r13,1 + patch_site 0f, patch__tlb_44x_hwater_I /* Compare with watermark (instruction gets patched) */ - .globl tlb_44x_patch_hwater_I -tlb_44x_patch_hwater_I: - cmpwi 0,r13,1 /* reserve entries */ +0: cmpwi 0,r13,1 /* reserve entries */ ble 5f li r13,0 5: diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index 3b67b9533c82..57deb1e9ffea 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -106,6 +106,23 @@ turn_on_mmu: mtspr SPRN_SRR0,r0 rfi /* enables MMU */ + +#ifdef CONFIG_PERF_EVENTS + .align 4 + + .globl itlb_miss_counter +itlb_miss_counter: + .space 4 + + .globl dtlb_miss_counter +dtlb_miss_counter: + .space 4 + + .globl instruction_counter +instruction_counter: + .space 4 +#endif + /* * Exception entry code. This code runs with address translation * turned off, i.e. using physical addresses. @@ -149,6 +166,9 @@ turn_on_mmu: li r10,MSR_KERNEL & ~(MSR_IR|MSR_DR); /* can take exceptions */ \ mtmsr r10; \ stw r0,GPR0(r11); \ + lis r10, STACK_FRAME_REGS_MARKER@ha; /* exception frame marker */ \ + addi r10, r10, STACK_FRAME_REGS_MARKER@l; \ + stw r10, 8(r11); \ SAVE_4GPRS(3, r11); \ SAVE_2GPRS(7, r11) @@ -275,7 +295,7 @@ SystemCall: . = 0x1100 /* * For the MPC8xx, this is a software tablewalk to load the instruction - * TLB. The task switch loads the M_TW register with the pointer to the first + * TLB. The task switch loads the M_TWB register with the pointer to the first * level table. * If we discover there is no second level table (value is zero) or if there * is an invalid pte, we load that into the TLB, which causes another fault @@ -285,186 +305,154 @@ SystemCall: */ #ifdef CONFIG_8xx_CPU15 -#define INVALIDATE_ADJACENT_PAGES_CPU15(tmp, addr) \ - addi tmp, addr, PAGE_SIZE; \ - tlbie tmp; \ - addi tmp, addr, -PAGE_SIZE; \ - tlbie tmp +#define INVALIDATE_ADJACENT_PAGES_CPU15(addr) \ + addi addr, addr, PAGE_SIZE; \ + tlbie addr; \ + addi addr, addr, -(PAGE_SIZE << 1); \ + tlbie addr; \ + addi addr, addr, PAGE_SIZE #else -#define INVALIDATE_ADJACENT_PAGES_CPU15(tmp, addr) +#define INVALIDATE_ADJACENT_PAGES_CPU15(addr) #endif InstructionTLBMiss: mtspr SPRN_SPRG_SCRATCH0, r10 +#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_SWAP) mtspr SPRN_SPRG_SCRATCH1, r11 -#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_HUGETLB_PAGE) - mtspr SPRN_SPRG_SCRATCH2, r12 #endif /* If we are faulting a kernel address, we have to use the * kernel page tables. */ mfspr r10, SPRN_SRR0 /* Get effective address of fault */ - INVALIDATE_ADJACENT_PAGES_CPU15(r11, r10) + INVALIDATE_ADJACENT_PAGES_CPU15(r10) + mtspr SPRN_MD_EPN, r10 /* Only modules will cause ITLB Misses as we always * pin the first 8MB of kernel memory */ -#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_HUGETLB_PAGE) - mfcr r12 -#endif #ifdef ITLB_MISS_KERNEL + mfcr r11 #if defined(SIMPLE_KERNEL_ADDRESS) && defined(CONFIG_PIN_TLB_TEXT) - andis. r11, r10, 0x8000 /* Address >= 0x80000000 */ + cmpi cr0, r10, 0 /* Address >= 0x80000000 */ #else - rlwinm r11, r10, 16, 0xfff8 - cmpli cr0, r11, PAGE_OFFSET@h + rlwinm r10, r10, 16, 0xfff8 + cmpli cr0, r10, PAGE_OFFSET@h #ifndef CONFIG_PIN_TLB_TEXT /* It is assumed that kernel code fits into the first 8M page */ -0: cmpli cr7, r11, (PAGE_OFFSET + 0x0800000)@h +0: cmpli cr7, r10, (PAGE_OFFSET + 0x0800000)@h patch_site 0b, patch__itlbmiss_linmem_top #endif #endif #endif - mfspr r11, SPRN_M_TW /* Get level 1 table */ + mfspr r10, SPRN_M_TWB /* Get level 1 table */ #ifdef ITLB_MISS_KERNEL #if defined(SIMPLE_KERNEL_ADDRESS) && defined(CONFIG_PIN_TLB_TEXT) - beq+ 3f + bge+ 3f #else blt+ 3f #endif #ifndef CONFIG_PIN_TLB_TEXT blt cr7, ITLBMissLinear #endif - lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha + rlwinm r10, r10, 0, 20, 31 + oris r10, r10, (swapper_pg_dir - PAGE_OFFSET)@ha 3: #endif - /* Insert level 1 index */ - rlwimi r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29 - lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r11) /* Get the level 1 entry */ + lwz r10, (swapper_pg_dir-PAGE_OFFSET)@l(r10) /* Get level 1 entry */ + mtspr SPRN_MI_TWC, r10 /* Set segment attributes */ - /* Extract level 2 index */ - rlwinm r10, r10, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29 -#ifdef CONFIG_HUGETLB_PAGE - mtcr r11 - bt- 28, 10f /* bit 28 = Large page (8M) */ - bt- 29, 20f /* bit 29 = Large page (8M or 512k) */ -#endif - rlwimi r10, r11, 0, 0, 32 - PAGE_SHIFT - 1 /* Add level 2 base */ + mtspr SPRN_MD_TWC, r10 + mfspr r10, SPRN_MD_TWC lwz r10, 0(r10) /* Get the pte */ -4: -#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_HUGETLB_PAGE) - mtcr r12 +#ifdef ITLB_MISS_KERNEL + mtcr r11 #endif - /* Load the MI_TWC with the attributes for this "segment." */ - mtspr SPRN_MI_TWC, r11 /* Set segment attributes */ - #ifdef CONFIG_SWAP rlwinm r11, r10, 32-5, _PAGE_PRESENT and r11, r11, r10 rlwimi r10, r11, 0, _PAGE_PRESENT #endif - li r11, RPN_PATTERN | 0x200 /* The Linux PTE won't go exactly into the MMU TLB. * Software indicator bits 20 and 23 must be clear. * Software indicator bits 22, 24, 25, 26, and 27 must be * set. All other Linux PTE bits control the behavior * of the MMU. */ - rlwimi r11, r10, 4, 0x0400 /* Copy _PAGE_EXEC into bit 21 */ - rlwimi r10, r11, 0, 0x0ff0 /* Set 22, 24-27, clear 20,23 */ + rlwimi r10, r10, 0, 0x0f00 /* Clear bits 20-23 */ + rlwimi r10, r10, 4, 0x0400 /* Copy _PAGE_EXEC into bit 21 */ + ori r10, r10, RPN_PATTERN | 0x200 /* Set 22 and 24-27 */ mtspr SPRN_MI_RPN, r10 /* Update TLB entry */ /* Restore registers */ 0: mfspr r10, SPRN_SPRG_SCRATCH0 +#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_SWAP) mfspr r11, SPRN_SPRG_SCRATCH1 -#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_HUGETLB_PAGE) - mfspr r12, SPRN_SPRG_SCRATCH2 #endif rfi patch_site 0b, patch__itlbmiss_exit_1 #ifdef CONFIG_PERF_EVENTS patch_site 0f, patch__itlbmiss_perf -0: lis r10, (itlb_miss_counter - PAGE_OFFSET)@ha - lwz r11, (itlb_miss_counter - PAGE_OFFSET)@l(r10) - addi r11, r11, 1 - stw r11, (itlb_miss_counter - PAGE_OFFSET)@l(r10) -#endif +0: lwz r10, (itlb_miss_counter - PAGE_OFFSET)@l(0) + addi r10, r10, 1 + stw r10, (itlb_miss_counter - PAGE_OFFSET)@l(0) mfspr r10, SPRN_SPRG_SCRATCH0 +#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_SWAP) mfspr r11, SPRN_SPRG_SCRATCH1 -#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_HUGETLB_PAGE) - mfspr r12, SPRN_SPRG_SCRATCH2 #endif rfi - -#ifdef CONFIG_HUGETLB_PAGE -10: /* 8M pages */ -#ifdef CONFIG_PPC_16K_PAGES - /* Extract level 2 index */ - rlwinm r10, r10, 32 - (PAGE_SHIFT_8M - PAGE_SHIFT), 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1), 29 - /* Add level 2 base */ - rlwimi r10, r11, 0, 0, 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1) - 1 -#else - /* Level 2 base */ - rlwinm r10, r11, 0, ~HUGEPD_SHIFT_MASK #endif - lwz r10, 0(r10) /* Get the pte */ - b 4b -20: /* 512k pages */ - /* Extract level 2 index */ - rlwinm r10, r10, 32 - (PAGE_SHIFT_512K - PAGE_SHIFT), 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1), 29 - /* Add level 2 base */ - rlwimi r10, r11, 0, 0, 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1) - 1 - lwz r10, 0(r10) /* Get the pte */ - b 4b +#ifndef CONFIG_PIN_TLB_TEXT +ITLBMissLinear: + mtcr r11 + /* Set 8M byte page and mark it valid */ + li r11, MI_PS8MEG | MI_SVALID + mtspr SPRN_MI_TWC, r11 + rlwinm r10, r10, 20, 0x0f800000 /* 8xx supports max 256Mb RAM */ + ori r10, r10, 0xf0 | MI_SPS16K | _PAGE_SH | _PAGE_DIRTY | \ + _PAGE_PRESENT + mtspr SPRN_MI_RPN, r10 /* Update TLB entry */ + +0: mfspr r10, SPRN_SPRG_SCRATCH0 + mfspr r11, SPRN_SPRG_SCRATCH1 + rfi + patch_site 0b, patch__itlbmiss_exit_2 #endif . = 0x1200 DataStoreTLBMiss: mtspr SPRN_SPRG_SCRATCH0, r10 mtspr SPRN_SPRG_SCRATCH1, r11 - mtspr SPRN_SPRG_SCRATCH2, r12 - mfcr r12 + mfcr r11 /* If we are faulting a kernel address, we have to use the * kernel page tables. */ mfspr r10, SPRN_MD_EPN - rlwinm r11, r10, 16, 0xfff8 - cmpli cr0, r11, PAGE_OFFSET@h - mfspr r11, SPRN_M_TW /* Get level 1 table */ - blt+ 3f - rlwinm r11, r10, 16, 0xfff8 + rlwinm r10, r10, 16, 0xfff8 + cmpli cr0, r10, PAGE_OFFSET@h #ifndef CONFIG_PIN_TLB_IMMR - cmpli cr0, r11, VIRT_IMMR_BASE@h + cmpli cr6, r10, VIRT_IMMR_BASE@h #endif -0: cmpli cr7, r11, (PAGE_OFFSET + 0x1800000)@h +0: cmpli cr7, r10, (PAGE_OFFSET + 0x1800000)@h patch_site 0b, patch__dtlbmiss_linmem_top + + mfspr r10, SPRN_M_TWB /* Get level 1 table */ + blt+ 3f #ifndef CONFIG_PIN_TLB_IMMR -0: beq- DTLBMissIMMR +0: beq- cr6, DTLBMissIMMR patch_site 0b, patch__dtlbmiss_immr_jmp #endif blt cr7, DTLBMissLinear - lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha + rlwinm r10, r10, 0, 20, 31 + oris r10, r10, (swapper_pg_dir - PAGE_OFFSET)@ha 3: - - /* Insert level 1 index */ - rlwimi r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29 - lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r11) /* Get the level 1 entry */ - - /* We have a pte table, so load fetch the pte from the table. - */ - /* Extract level 2 index */ - rlwinm r10, r10, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29 -#ifdef CONFIG_HUGETLB_PAGE mtcr r11 - bt- 28, 10f /* bit 28 = Large page (8M) */ - bt- 29, 20f /* bit 29 = Large page (8M or 512k) */ -#endif - rlwimi r10, r11, 0, 0, 32 - PAGE_SHIFT - 1 /* Add level 2 base */ + lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r10) /* Get level 1 entry */ + + mtspr SPRN_MD_TWC, r11 + mfspr r10, SPRN_MD_TWC lwz r10, 0(r10) /* Get the pte */ -4: - mtcr r12 /* Insert the Guarded flag into the TWC from the Linux PTE. * It is bit 27 of both the Linux PTE and the TWC (at least @@ -503,44 +491,55 @@ DataStoreTLBMiss: 0: mfspr r10, SPRN_SPRG_SCRATCH0 mfspr r11, SPRN_SPRG_SCRATCH1 - mfspr r12, SPRN_SPRG_SCRATCH2 rfi patch_site 0b, patch__dtlbmiss_exit_1 #ifdef CONFIG_PERF_EVENTS patch_site 0f, patch__dtlbmiss_perf -0: lis r10, (dtlb_miss_counter - PAGE_OFFSET)@ha - lwz r11, (dtlb_miss_counter - PAGE_OFFSET)@l(r10) - addi r11, r11, 1 - stw r11, (dtlb_miss_counter - PAGE_OFFSET)@l(r10) -#endif +0: lwz r10, (dtlb_miss_counter - PAGE_OFFSET)@l(0) + addi r10, r10, 1 + stw r10, (dtlb_miss_counter - PAGE_OFFSET)@l(0) mfspr r10, SPRN_SPRG_SCRATCH0 mfspr r11, SPRN_SPRG_SCRATCH1 - mfspr r12, SPRN_SPRG_SCRATCH2 rfi - -#ifdef CONFIG_HUGETLB_PAGE -10: /* 8M pages */ - /* Extract level 2 index */ -#ifdef CONFIG_PPC_16K_PAGES - rlwinm r10, r10, 32 - (PAGE_SHIFT_8M - PAGE_SHIFT), 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1), 29 - /* Add level 2 base */ - rlwimi r10, r11, 0, 0, 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1) - 1 -#else - /* Level 2 base */ - rlwinm r10, r11, 0, ~HUGEPD_SHIFT_MASK #endif - lwz r10, 0(r10) /* Get the pte */ - b 4b -20: /* 512k pages */ - /* Extract level 2 index */ - rlwinm r10, r10, 32 - (PAGE_SHIFT_512K - PAGE_SHIFT), 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1), 29 - /* Add level 2 base */ - rlwimi r10, r11, 0, 0, 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1) - 1 - lwz r10, 0(r10) /* Get the pte */ - b 4b -#endif +DTLBMissIMMR: + mtcr r11 + /* Set 512k byte guarded page and mark it valid */ + li r10, MD_PS512K | MD_GUARDED | MD_SVALID + mtspr SPRN_MD_TWC, r10 + mfspr r10, SPRN_IMMR /* Get current IMMR */ + rlwinm r10, r10, 0, 0xfff80000 /* Get 512 kbytes boundary */ + ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_SH | _PAGE_DIRTY | \ + _PAGE_PRESENT | _PAGE_NO_CACHE + mtspr SPRN_MD_RPN, r10 /* Update TLB entry */ + + li r11, RPN_PATTERN + mtspr SPRN_DAR, r11 /* Tag DAR */ + +0: mfspr r10, SPRN_SPRG_SCRATCH0 + mfspr r11, SPRN_SPRG_SCRATCH1 + rfi + patch_site 0b, patch__dtlbmiss_exit_2 + +DTLBMissLinear: + mtcr r11 + /* Set 8M byte page and mark it valid */ + li r11, MD_PS8MEG | MD_SVALID + mtspr SPRN_MD_TWC, r11 + rlwinm r10, r10, 20, 0x0f800000 /* 8xx supports max 256Mb RAM */ + ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_SH | _PAGE_DIRTY | \ + _PAGE_PRESENT + mtspr SPRN_MD_RPN, r10 /* Update TLB entry */ + + li r11, RPN_PATTERN + mtspr SPRN_DAR, r11 /* Tag DAR */ + +0: mfspr r10, SPRN_SPRG_SCRATCH0 + mfspr r11, SPRN_SPRG_SCRATCH1 + rfi + patch_site 0b, patch__dtlbmiss_exit_3 /* This is an instruction TLB error on the MPC8xx. This could be due * to many reasons, such as executing guarded memory or illegal instruction @@ -625,16 +624,13 @@ DataBreakpoint: . = 0x1d00 InstructionBreakpoint: mtspr SPRN_SPRG_SCRATCH0, r10 - mtspr SPRN_SPRG_SCRATCH1, r11 - lis r10, (instruction_counter - PAGE_OFFSET)@ha - lwz r11, (instruction_counter - PAGE_OFFSET)@l(r10) - addi r11, r11, -1 - stw r11, (instruction_counter - PAGE_OFFSET)@l(r10) + lwz r10, (instruction_counter - PAGE_OFFSET)@l(0) + addi r10, r10, -1 + stw r10, (instruction_counter - PAGE_OFFSET)@l(0) lis r10, 0xffff ori r10, r10, 0x01 mtspr SPRN_COUNTA, r10 mfspr r10, SPRN_SPRG_SCRATCH0 - mfspr r11, SPRN_SPRG_SCRATCH1 rfi #else EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_EE) @@ -644,67 +640,6 @@ InstructionBreakpoint: . = 0x2000 -/* - * Bottom part of DataStoreTLBMiss handlers for IMMR area and linear RAM. - * not enough space in the DataStoreTLBMiss area. - */ -DTLBMissIMMR: - mtcr r12 - /* Set 512k byte guarded page and mark it valid */ - li r10, MD_PS512K | MD_GUARDED | MD_SVALID - mtspr SPRN_MD_TWC, r10 - mfspr r10, SPRN_IMMR /* Get current IMMR */ - rlwinm r10, r10, 0, 0xfff80000 /* Get 512 kbytes boundary */ - ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_SH | _PAGE_DIRTY | \ - _PAGE_PRESENT | _PAGE_NO_CACHE - mtspr SPRN_MD_RPN, r10 /* Update TLB entry */ - - li r11, RPN_PATTERN - mtspr SPRN_DAR, r11 /* Tag DAR */ - -0: mfspr r10, SPRN_SPRG_SCRATCH0 - mfspr r11, SPRN_SPRG_SCRATCH1 - mfspr r12, SPRN_SPRG_SCRATCH2 - rfi - patch_site 0b, patch__dtlbmiss_exit_2 - -DTLBMissLinear: - mtcr r12 - /* Set 8M byte page and mark it valid */ - li r11, MD_PS8MEG | MD_SVALID - mtspr SPRN_MD_TWC, r11 - rlwinm r10, r10, 0, 0x0f800000 /* 8xx supports max 256Mb RAM */ - ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_SH | _PAGE_DIRTY | \ - _PAGE_PRESENT - mtspr SPRN_MD_RPN, r10 /* Update TLB entry */ - - li r11, RPN_PATTERN - mtspr SPRN_DAR, r11 /* Tag DAR */ - -0: mfspr r10, SPRN_SPRG_SCRATCH0 - mfspr r11, SPRN_SPRG_SCRATCH1 - mfspr r12, SPRN_SPRG_SCRATCH2 - rfi - patch_site 0b, patch__dtlbmiss_exit_3 - -#ifndef CONFIG_PIN_TLB_TEXT -ITLBMissLinear: - mtcr r12 - /* Set 8M byte page and mark it valid */ - li r11, MI_PS8MEG | MI_SVALID - mtspr SPRN_MI_TWC, r11 - rlwinm r10, r10, 0, 0x0f800000 /* 8xx supports max 256Mb RAM */ - ori r10, r10, 0xf0 | MI_SPS16K | _PAGE_SH | _PAGE_DIRTY | \ - _PAGE_PRESENT - mtspr SPRN_MI_RPN, r10 /* Update TLB entry */ - -0: mfspr r10, SPRN_SPRG_SCRATCH0 - mfspr r11, SPRN_SPRG_SCRATCH1 - mfspr r12, SPRN_SPRG_SCRATCH2 - rfi - patch_site 0b, patch__itlbmiss_exit_2 -#endif - /* This is the procedure to calculate the data EA for buggy dcbx,dcbi instructions * by decoding the registers used by the dcbx instruction and adding them. * DAR is set to the calculated address. @@ -712,12 +647,13 @@ ITLBMissLinear: /* define if you don't want to use self modifying code */ #define NO_SELF_MODIFYING_CODE FixupDAR:/* Entry point for dcbx workaround. */ - mtspr SPRN_SPRG_SCRATCH2, r10 + mtspr SPRN_M_TW, r10 /* fetch instruction from memory. */ mfspr r10, SPRN_SRR0 + mtspr SPRN_MD_EPN, r10 rlwinm r11, r10, 16, 0xfff8 cmpli cr0, r11, PAGE_OFFSET@h - mfspr r11, SPRN_M_TW /* Get level 1 table */ + mfspr r11, SPRN_M_TWB /* Get level 1 table */ blt+ 3f rlwinm r11, r10, 16, 0xfff8 @@ -727,17 +663,17 @@ FixupDAR:/* Entry point for dcbx workaround. */ /* create physical page address from effective address */ tophys(r11, r10) blt- cr7, 201f - lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha - /* Insert level 1 index */ -3: rlwimi r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29 + mfspr r11, SPRN_M_TWB /* Get level 1 table */ + rlwinm r11, r11, 0, 20, 31 + oris r11, r11, (swapper_pg_dir - PAGE_OFFSET)@ha +3: lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r11) /* Get the level 1 entry */ + mtspr SPRN_MD_TWC, r11 mtcr r11 + mfspr r11, SPRN_MD_TWC + lwz r11, 0(r11) /* Get the pte */ bt 28,200f /* bit 28 = Large page (8M) */ bt 29,202f /* bit 29 = Large page (8M or 512K) */ - rlwinm r11, r11,0,0,19 /* Extract page descriptor page address */ - /* Insert level 2 index */ - rlwimi r11, r10, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29 - lwz r11, 0(r11) /* Get the pte */ /* concat physical page address(r11) and page offset(r10) */ rlwimi r11, r10, 0, 32 - PAGE_SHIFT, 31 201: lwz r11,0(r11) @@ -756,26 +692,15 @@ FixupDAR:/* Entry point for dcbx workaround. */ beq+ 142f cmpwi cr0, r10, 1964 /* Is icbi? */ beq+ 142f -141: mfspr r10,SPRN_SPRG_SCRATCH2 +141: mfspr r10,SPRN_M_TW b DARFixed /* Nope, go back to normal TLB processing */ - /* concat physical page address(r11) and page offset(r10) */ 200: -#ifdef CONFIG_PPC_16K_PAGES - rlwinm r11, r11, 0, 0, 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1) - 1 - rlwimi r11, r10, 32 - (PAGE_SHIFT_8M - 2), 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1), 29 -#else - rlwinm r11, r10, 0, ~HUGEPD_SHIFT_MASK -#endif - lwz r11, 0(r11) /* Get the pte */ /* concat physical page address(r11) and page offset(r10) */ rlwimi r11, r10, 0, 32 - PAGE_SHIFT_8M, 31 b 201b 202: - rlwinm r11, r11, 0, 0, 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1) - 1 - rlwimi r11, r10, 32 - (PAGE_SHIFT_512K - 2), 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1), 29 - lwz r11, 0(r11) /* Get the pte */ /* concat physical page address(r11) and page offset(r10) */ rlwimi r11, r10, 0, 32 - PAGE_SHIFT_512K, 31 b 201b @@ -802,7 +727,7 @@ modified_instr: bne+ 143f subf r10,r0,r10 /* r10=r10-r0, only if reg RA is r0 */ 143: mtdar r10 /* store faulting EA in DAR */ - mfspr r10,SPRN_SPRG_SCRATCH2 + mfspr r10,SPRN_M_TW b DARFixed /* Go back to normal TLB handling */ #else mfctr r10 @@ -856,7 +781,7 @@ modified_instr: mfdar r11 mtctr r11 /* restore ctr reg from DAR */ mtdar r10 /* save fault EA to DAR */ - mfspr r10,SPRN_SPRG_SCRATCH2 + mfspr r10,SPRN_M_TW b DARFixed /* Go back to normal TLB handling */ /* special handling for r10,r11 since these are modified already */ @@ -891,7 +816,7 @@ start_here: lis r6, swapper_pg_dir@ha tophys(r6,r6) - mtspr SPRN_M_TW, r6 + mtspr SPRN_M_TWB, r6 bl early_init /* We have to do this with MMU on */ @@ -1065,17 +990,3 @@ swapper_pg_dir: */ abatron_pteptrs: .space 8 - -#ifdef CONFIG_PERF_EVENTS - .globl itlb_miss_counter -itlb_miss_counter: - .space 4 - - .globl dtlb_miss_counter -dtlb_miss_counter: - .space 4 - - .globl instruction_counter -instruction_counter: - .space 4 -#endif diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h index d0862a100d29..15ac51072eb3 100644 --- a/arch/powerpc/kernel/head_booke.h +++ b/arch/powerpc/kernel/head_booke.h @@ -43,6 +43,9 @@ andi. r11, r11, MSR_PR; /* check whether user or kernel */\ mr r11, r1; \ beq 1f; \ +START_BTB_FLUSH_SECTION \ + BTB_FLUSH(r11) \ +END_BTB_FLUSH_SECTION \ /* if from user, start at top of this thread's kernel stack */ \ lwz r11, THREAD_INFO-THREAD(r10); \ ALLOC_STACK_FRAME(r11, THREAD_SIZE); \ @@ -128,6 +131,9 @@ stw r9,_CCR(r8); /* save CR on stack */\ mfspr r11,exc_level_srr1; /* check whether user or kernel */\ DO_KVM BOOKE_INTERRUPT_##intno exc_level_srr1; \ +START_BTB_FLUSH_SECTION \ + BTB_FLUSH(r10) \ +END_BTB_FLUSH_SECTION \ andi. r11,r11,MSR_PR; \ mfspr r11,SPRN_SPRG_THREAD; /* if from user, start at top of */\ lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\ diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index e2750b856c8f..2386ce2a9c6e 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S @@ -453,6 +453,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) mfcr r13 stw r13, THREAD_NORMSAVE(3)(r10) DO_KVM BOOKE_INTERRUPT_DTLB_MISS SPRN_SRR1 +START_BTB_FLUSH_SECTION + mfspr r11, SPRN_SRR1 + andi. r10,r11,MSR_PR + beq 1f + BTB_FLUSH(r10) +1: +END_BTB_FLUSH_SECTION mfspr r10, SPRN_DEAR /* Get faulting address */ /* If we are faulting a kernel address, we have to use the @@ -547,6 +554,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) mfcr r13 stw r13, THREAD_NORMSAVE(3)(r10) DO_KVM BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR1 +START_BTB_FLUSH_SECTION + mfspr r11, SPRN_SRR1 + andi. r10,r11,MSR_PR + beq 1f + BTB_FLUSH(r10) +1: +END_BTB_FLUSH_SECTION + mfspr r10, SPRN_SRR0 /* Get faulting address */ /* If we are faulting a kernel address, we have to use the diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index f0dc680e659a..d0625480b59e 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -47,6 +47,7 @@ #include <asm/fadump.h> #include <asm/vio.h> #include <asm/tce.h> +#include <asm/mmu_context.h> #define DBG(...) @@ -197,11 +198,11 @@ static unsigned long iommu_range_alloc(struct device *dev, if (unlikely(npages == 0)) { if (printk_ratelimit()) WARN_ON(1); - return IOMMU_MAPPING_ERROR; + return DMA_MAPPING_ERROR; } if (should_fail_iommu(dev)) - return IOMMU_MAPPING_ERROR; + return DMA_MAPPING_ERROR; /* * We don't need to disable preemption here because any CPU can @@ -277,7 +278,7 @@ again: } else { /* Give up */ spin_unlock_irqrestore(&(pool->lock), flags); - return IOMMU_MAPPING_ERROR; + return DMA_MAPPING_ERROR; } } @@ -309,13 +310,13 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, unsigned long attrs) { unsigned long entry; - dma_addr_t ret = IOMMU_MAPPING_ERROR; + dma_addr_t ret = DMA_MAPPING_ERROR; int build_fail; entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order); - if (unlikely(entry == IOMMU_MAPPING_ERROR)) - return IOMMU_MAPPING_ERROR; + if (unlikely(entry == DMA_MAPPING_ERROR)) + return DMA_MAPPING_ERROR; entry += tbl->it_offset; /* Offset into real TCE table */ ret = entry << tbl->it_page_shift; /* Set the return dma address */ @@ -327,12 +328,12 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, /* tbl->it_ops->set() only returns non-zero for transient errors. * Clean up the table bitmap in this case and return - * IOMMU_MAPPING_ERROR. For all other errors the functionality is + * DMA_MAPPING_ERROR. For all other errors the functionality is * not altered. */ if (unlikely(build_fail)) { __iommu_free(tbl, ret, npages); - return IOMMU_MAPPING_ERROR; + return DMA_MAPPING_ERROR; } /* Flush/invalidate TLB caches if necessary */ @@ -477,7 +478,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen); /* Handle failure */ - if (unlikely(entry == IOMMU_MAPPING_ERROR)) { + if (unlikely(entry == DMA_MAPPING_ERROR)) { if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) dev_info(dev, "iommu_alloc failed, tbl %p " @@ -544,7 +545,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, */ if (outcount < incount) { outs = sg_next(outs); - outs->dma_address = IOMMU_MAPPING_ERROR; + outs->dma_address = DMA_MAPPING_ERROR; outs->dma_length = 0; } @@ -562,7 +563,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, npages = iommu_num_pages(s->dma_address, s->dma_length, IOMMU_PAGE_SIZE(tbl)); __iommu_free(tbl, vaddr, npages); - s->dma_address = IOMMU_MAPPING_ERROR; + s->dma_address = DMA_MAPPING_ERROR; s->dma_length = 0; } if (s == outs) @@ -776,7 +777,7 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, unsigned long mask, enum dma_data_direction direction, unsigned long attrs) { - dma_addr_t dma_handle = IOMMU_MAPPING_ERROR; + dma_addr_t dma_handle = DMA_MAPPING_ERROR; void *vaddr; unsigned long uaddr; unsigned int npages, align; @@ -796,7 +797,7 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction, mask >> tbl->it_page_shift, align, attrs); - if (dma_handle == IOMMU_MAPPING_ERROR) { + if (dma_handle == DMA_MAPPING_ERROR) { if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) { dev_info(dev, "iommu_alloc failed, tbl %p " @@ -868,7 +869,7 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, io_order = get_iommu_order(size, tbl); mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL, mask >> tbl->it_page_shift, io_order, 0); - if (mapping == IOMMU_MAPPING_ERROR) { + if (mapping == DMA_MAPPING_ERROR) { free_pages((unsigned long)ret, order); return NULL; } @@ -993,15 +994,19 @@ int iommu_tce_check_gpa(unsigned long page_shift, unsigned long gpa) } EXPORT_SYMBOL_GPL(iommu_tce_check_gpa); -long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry, - unsigned long *hpa, enum dma_data_direction *direction) +long iommu_tce_xchg(struct mm_struct *mm, struct iommu_table *tbl, + unsigned long entry, unsigned long *hpa, + enum dma_data_direction *direction) { long ret; + unsigned long size = 0; ret = tbl->it_ops->exchange(tbl, entry, hpa, direction); if (!ret && ((*direction == DMA_FROM_DEVICE) || - (*direction == DMA_BIDIRECTIONAL))) + (*direction == DMA_BIDIRECTIONAL)) && + !mm_iommu_is_devmem(mm, *hpa, tbl->it_page_shift, + &size)) SetPageDirty(pfn_to_page(*hpa >> PAGE_SHIFT)); /* if (unlikely(ret)) @@ -1073,11 +1078,8 @@ void iommu_release_ownership(struct iommu_table *tbl) } EXPORT_SYMBOL_GPL(iommu_release_ownership); -int iommu_add_device(struct device *dev) +int iommu_add_device(struct iommu_table_group *table_group, struct device *dev) { - struct iommu_table *tbl; - struct iommu_table_group_link *tgl; - /* * The sysfs entries should be populated before * binding IOMMU group. If sysfs entries isn't @@ -1093,32 +1095,10 @@ int iommu_add_device(struct device *dev) return -EBUSY; } - tbl = get_iommu_table_base(dev); - if (!tbl) { - pr_debug("%s: Skipping device %s with no tbl\n", - __func__, dev_name(dev)); - return 0; - } - - tgl = list_first_entry_or_null(&tbl->it_group_list, - struct iommu_table_group_link, next); - if (!tgl) { - pr_debug("%s: Skipping device %s with no group\n", - __func__, dev_name(dev)); - return 0; - } pr_debug("%s: Adding %s to iommu group %d\n", - __func__, dev_name(dev), - iommu_group_id(tgl->table_group->group)); - - if (PAGE_SIZE < IOMMU_PAGE_SIZE(tbl)) { - pr_err("%s: Invalid IOMMU page size %lx (%lx) on %s\n", - __func__, IOMMU_PAGE_SIZE(tbl), - PAGE_SIZE, dev_name(dev)); - return -EINVAL; - } + __func__, dev_name(dev), iommu_group_id(table_group->group)); - return iommu_group_add_device(tgl->table_group->group, dev); + return iommu_group_add_device(table_group->group, dev); } EXPORT_SYMBOL_GPL(iommu_add_device); @@ -1138,31 +1118,4 @@ void iommu_del_device(struct device *dev) iommu_group_remove_device(dev); } EXPORT_SYMBOL_GPL(iommu_del_device); - -static int tce_iommu_bus_notifier(struct notifier_block *nb, - unsigned long action, void *data) -{ - struct device *dev = data; - - switch (action) { - case BUS_NOTIFY_ADD_DEVICE: - return iommu_add_device(dev); - case BUS_NOTIFY_DEL_DEVICE: - if (dev->iommu_group) - iommu_del_device(dev); - return 0; - default: - return 0; - } -} - -static struct notifier_block tce_iommu_bus_nb = { - .notifier_call = tce_iommu_bus_notifier, -}; - -int __init tce_iommu_bus_notifier_init(void) -{ - bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb); - return 0; -} #endif /* CONFIG_IOMMU_API */ diff --git a/arch/powerpc/kernel/isa-bridge.c b/arch/powerpc/kernel/isa-bridge.c index fda3ae48480c..0e7099da4f25 100644 --- a/arch/powerpc/kernel/isa-bridge.c +++ b/arch/powerpc/kernel/isa-bridge.c @@ -327,8 +327,7 @@ static int isa_bridge_notify(struct notifier_block *nb, unsigned long action, /* Check if we have no ISA device, and this happens to be one, * register it as such if it has an OF device */ - if (!isa_bridge_devnode && devnode && devnode->type && - !strcmp(devnode->type, "isa")) + if (!isa_bridge_devnode && of_node_is_type(devnode, "isa")) isa_bridge_find_late(pdev, devnode); return 0; diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c index 5b9dce17f0c9..7cea5978f21f 100644 --- a/arch/powerpc/kernel/legacy_serial.c +++ b/arch/powerpc/kernel/legacy_serial.c @@ -192,7 +192,7 @@ static int __init add_legacy_soc_port(struct device_node *np, /* Add port, irq will be dealt with later. We passed a translated * IO port value. It will be fixed up later along with the irq */ - if (tsi && !strcmp(tsi->type, "tsi-bridge")) + if (of_node_is_type(tsi, "tsi-bridge")) return add_legacy_port(np, -1, UPIO_TSI, addr, addr, 0, legacy_port_flags, 0); else @@ -400,8 +400,7 @@ void __init find_legacy_serial_ports(void) /* Next, fill our array with ISA ports */ for_each_node_by_type(np, "serial") { struct device_node *isa = of_get_parent(np); - if (isa && (!strcmp(isa->name, "isa") || - !strcmp(isa->name, "lpc"))) { + if (of_node_name_eq(isa, "isa") || of_node_name_eq(isa, "lpc")) { if (of_device_is_available(np)) { index = add_legacy_isa_port(np, isa); if (index >= 0 && np == stdout) @@ -415,11 +414,12 @@ void __init find_legacy_serial_ports(void) /* Next, try to locate PCI ports */ for (np = NULL; (np = of_find_all_nodes(np));) { struct device_node *pci, *parent = of_get_parent(np); - if (parent && !strcmp(parent->name, "isa")) { + if (of_node_name_eq(parent, "isa")) { of_node_put(parent); continue; } - if (strcmp(np->name, "serial") && strcmp(np->type, "serial")) { + if (!of_node_name_eq(np, "serial") && + !of_node_is_type(np, "serial")) { of_node_put(parent); continue; } diff --git a/arch/powerpc/kernel/machine_kexec_file_64.c b/arch/powerpc/kernel/machine_kexec_file_64.c index c77e95e9b384..0d20c7ad40fa 100644 --- a/arch/powerpc/kernel/machine_kexec_file_64.c +++ b/arch/powerpc/kernel/machine_kexec_file_64.c @@ -24,7 +24,6 @@ #include <linux/slab.h> #include <linux/kexec.h> -#include <linux/memblock.h> #include <linux/of_fdt.h> #include <linux/libfdt.h> #include <asm/ima.h> @@ -47,59 +46,6 @@ int arch_kexec_kernel_image_probe(struct kimage *image, void *buf, } /** - * arch_kexec_walk_mem - call func(data) for each unreserved memory block - * @kbuf: Context info for the search. Also passed to @func. - * @func: Function to call for each memory block. - * - * This function is used by kexec_add_buffer and kexec_locate_mem_hole - * to find unreserved memory to load kexec segments into. - * - * Return: The memory walk will stop when func returns a non-zero value - * and that value will be returned. If all free regions are visited without - * func returning non-zero, then zero will be returned. - */ -int arch_kexec_walk_mem(struct kexec_buf *kbuf, - int (*func)(struct resource *, void *)) -{ - int ret = 0; - u64 i; - phys_addr_t mstart, mend; - struct resource res = { }; - - if (kbuf->top_down) { - for_each_free_mem_range_reverse(i, NUMA_NO_NODE, 0, - &mstart, &mend, NULL) { - /* - * In memblock, end points to the first byte after the - * range while in kexec, end points to the last byte - * in the range. - */ - res.start = mstart; - res.end = mend - 1; - ret = func(&res, kbuf); - if (ret) - break; - } - } else { - for_each_free_mem_range(i, NUMA_NO_NODE, 0, &mstart, &mend, - NULL) { - /* - * In memblock, end points to the first byte after the - * range while in kexec, end points to the last byte - * in the range. - */ - res.start = mstart; - res.end = mend - 1; - ret = func(&res, kbuf); - if (ret) - break; - } - } - - return ret; -} - -/** * setup_purgatory - initialize the purgatory's global variables * @image: kexec image. * @slave_code: Slave code for the purgatory. diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index 695b24a2d954..57d2ffb2d45c 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S @@ -153,7 +153,7 @@ _GLOBAL(call_setup_cpu) mtctr r5 bctr -#if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_6xx) +#if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_PPC_BOOK3S_32) /* This gets called by via-pmu.c to switch the PLL selection * on 750fx CPU. This function should really be moved to some @@ -223,7 +223,7 @@ _GLOBAL(low_choose_7447a_dfs) mtmsr r7 blr -#endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_6xx */ +#endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_PPC_BOOK3S_32 */ /* * complement mask on the msr then "or" some values on. diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c index 22e9d281324d..38b03a330cd2 100644 --- a/arch/powerpc/kernel/nvram_64.c +++ b/arch/powerpc/kernel/nvram_64.c @@ -563,8 +563,6 @@ static int nvram_pstore_init(void) nvram_pstore_info.buf = oops_data; nvram_pstore_info.bufsize = oops_data_sz; - spin_lock_init(&nvram_pstore_info.buf_lock); - rc = pstore_register(&nvram_pstore_info); if (rc && (rc != -EPERM)) /* Print error only when pstore.backend == nvram */ @@ -809,6 +807,7 @@ static long dev_nvram_ioctl(struct file *file, unsigned int cmd, #ifdef CONFIG_PPC_PMAC case OBSOLETE_PMAC_NVRAM_GET_OFFSET: printk(KERN_WARNING "nvram: Using obsolete PMAC_NVRAM_GET_OFFSET ioctl\n"); + /* fall through */ case IOC_NVRAM_GET_OFFSET: { int part, offset; diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c index 98f04725def7..24191ea2d9a7 100644 --- a/arch/powerpc/kernel/pci_of_scan.c +++ b/arch/powerpc/kernel/pci_of_scan.c @@ -125,16 +125,13 @@ struct pci_dev *of_create_pci_dev(struct device_node *node, struct pci_bus *bus, int devfn) { struct pci_dev *dev; - const char *type; dev = pci_alloc_dev(bus); if (!dev) return NULL; - type = of_get_property(node, "device_type", NULL); - if (type == NULL) - type = ""; - pr_debug(" create device, devfn: %x, type: %s\n", devfn, type); + pr_debug(" create device, devfn: %x, type: %s\n", devfn, + of_node_get_device_type(node)); dev->dev.of_node = of_node_get(node); dev->dev.parent = bus->bridge; @@ -167,12 +164,12 @@ struct pci_dev *of_create_pci_dev(struct device_node *node, /* Early fixups, before probing the BARs */ pci_fixup_device(pci_fixup_early, dev); - if (!strcmp(type, "pci") || !strcmp(type, "pciex")) { + if (of_node_is_type(node, "pci") || of_node_is_type(node, "pciex")) { /* a PCI-PCI bridge */ dev->hdr_type = PCI_HEADER_TYPE_BRIDGE; dev->rom_base_reg = PCI_ROM_ADDRESS1; set_pcie_hotplug_bridge(dev); - } else if (!strcmp(type, "cardbus")) { + } else if (of_node_is_type(node, "cardbus")) { dev->hdr_type = PCI_HEADER_TYPE_CARDBUS; } else { dev->hdr_type = PCI_HEADER_TYPE_NORMAL; diff --git a/arch/powerpc/kernel/pmc.c b/arch/powerpc/kernel/pmc.c index 58eaa3ddf7b9..2de71faca911 100644 --- a/arch/powerpc/kernel/pmc.c +++ b/arch/powerpc/kernel/pmc.c @@ -29,7 +29,7 @@ static void dummy_perf(struct pt_regs *regs) { #if defined(CONFIG_FSL_EMB_PERFMON) mtpmr(PMRN_PMGC0, mfpmr(PMRN_PMGC0) & ~PMGC0_PMIE); -#elif defined(CONFIG_PPC64) || defined(CONFIG_6xx) +#elif defined(CONFIG_PPC64) || defined(CONFIG_PPC_BOOK3S_32) if (cur_cpu_spec->pmc_type == PPC_PMC_IBM) mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) & ~(MMCR0_PMXE|MMCR0_PMAO)); #else diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index fe758cedb93f..4181ec715f88 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -124,12 +124,12 @@ static void __init move_device_tree(void) size = fdt_totalsize(initial_boot_params); if ((memory_limit && (start + size) > PHYSICAL_START + memory_limit) || - overlaps_crashkernel(start, size) || - overlaps_initrd(start, size)) { + !memblock_is_memory(start + size - 1) || + overlaps_crashkernel(start, size) || overlaps_initrd(start, size)) { p = __va(memblock_phys_alloc(size, PAGE_SIZE)); memcpy(p, initial_boot_params, size); initial_boot_params = p; - DBG("Moved device tree to 0x%p\n", p); + DBG("Moved device tree to 0x%px\n", p); } DBG("<- move_device_tree\n"); @@ -689,7 +689,7 @@ void __init early_init_devtree(void *params) { phys_addr_t limit; - DBG(" -> early_init_devtree(%p)\n", params); + DBG(" -> early_init_devtree(%px)\n", params); /* Too early to BUG_ON(), do it by hand */ if (!early_init_dt_verify(params)) @@ -749,7 +749,7 @@ void __init early_init_devtree(void *params) memblock_allow_resize(); memblock_dump_all(); - DBG("Phys. mem: %llx\n", memblock_phys_mem_size()); + DBG("Phys. mem: %llx\n", (unsigned long long)memblock_phys_mem_size()); /* We may need to relocate the flat tree, do it now. * FIXME .. and the initrd too? */ diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 714c3480c52d..cdd5d1d3ae41 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -3263,32 +3263,40 @@ static inline int do_seccomp(struct pt_regs *regs) { return 0; } */ long do_syscall_trace_enter(struct pt_regs *regs) { + u32 flags; + user_exit(); - if (test_thread_flag(TIF_SYSCALL_EMU)) { - /* - * A nonzero return code from tracehook_report_syscall_entry() - * tells us to prevent the syscall execution, but we are not - * going to execute it anyway. - * - * Returning -1 will skip the syscall execution. We want to - * avoid clobbering any register also, thus, not 'gotoing' - * skip label. - */ - if (tracehook_report_syscall_entry(regs)) - ; - return -1; - } + flags = READ_ONCE(current_thread_info()->flags) & + (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE); - /* - * The tracer may decide to abort the syscall, if so tracehook - * will return !0. Note that the tracer may also just change - * regs->gpr[0] to an invalid syscall number, that is handled - * below on the exit path. - */ - if (test_thread_flag(TIF_SYSCALL_TRACE) && - tracehook_report_syscall_entry(regs)) - goto skip; + if (flags) { + int rc = tracehook_report_syscall_entry(regs); + + if (unlikely(flags & _TIF_SYSCALL_EMU)) { + /* + * A nonzero return code from + * tracehook_report_syscall_entry() tells us to prevent + * the syscall execution, but we are not going to + * execute it anyway. + * + * Returning -1 will skip the syscall execution. We want + * to avoid clobbering any registers, so we don't goto + * the skip label below. + */ + return -1; + } + + if (rc) { + /* + * The tracer decided to abort the syscall. Note that + * the tracer may also just change regs->gpr[0] to an + * invalid syscall number, that is handled below on the + * exit path. + */ + goto skip; + } + } /* Run seccomp after ptrace; allow it to set gpr[3]. */ if (do_seccomp(regs)) diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c index f6f469fc4073..9b8631533e02 100644 --- a/arch/powerpc/kernel/security.c +++ b/arch/powerpc/kernel/security.c @@ -4,6 +4,7 @@ // // Copyright 2018, Michael Ellerman, IBM Corporation. +#include <linux/cpu.h> #include <linux/kernel.h> #include <linux/device.h> #include <linux/seq_buf.h> @@ -22,10 +23,14 @@ enum count_cache_flush_type { COUNT_CACHE_FLUSH_SW = 0x2, COUNT_CACHE_FLUSH_HW = 0x4, }; -static enum count_cache_flush_type count_cache_flush_type; +static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NONE; bool barrier_nospec_enabled; static bool no_nospec; +static bool btb_flush_enabled; +#ifdef CONFIG_PPC_FSL_BOOK3E +static bool no_spectrev2; +#endif static void enable_barrier_nospec(bool enable) { @@ -101,6 +106,23 @@ static __init int barrier_nospec_debugfs_init(void) device_initcall(barrier_nospec_debugfs_init); #endif /* CONFIG_DEBUG_FS */ +#ifdef CONFIG_PPC_FSL_BOOK3E +static int __init handle_nospectre_v2(char *p) +{ + no_spectrev2 = true; + + return 0; +} +early_param("nospectre_v2", handle_nospectre_v2); +void setup_spectre_v2(void) +{ + if (no_spectrev2) + do_btb_flush_fixups(); + else + btb_flush_enabled = true; +} +#endif /* CONFIG_PPC_FSL_BOOK3E */ + #ifdef CONFIG_PPC_BOOK3S_64 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) { @@ -191,8 +213,11 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW) seq_buf_printf(&s, "(hardware accelerated)"); - } else + } else if (btb_flush_enabled) { + seq_buf_printf(&s, "Mitigation: Branch predictor state flush"); + } else { seq_buf_printf(&s, "Vulnerable"); + } seq_buf_printf(&s, "\n"); diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 93ee3703b42f..ca00fbb97cf8 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -687,7 +687,7 @@ int check_legacy_ioport(unsigned long base_port) return ret; parent = of_get_parent(np); if (parent) { - if (strcmp(parent->type, "isa") == 0) + if (of_node_is_type(parent, "isa")) ret = 0; of_node_put(parent); } @@ -800,7 +800,7 @@ static __init void print_system_info(void) #ifdef CONFIG_PPC_BOOK3S_64 pr_info("ppc64_pft_size = 0x%llx\n", ppc64_pft_size); #endif -#ifdef CONFIG_PPC_STD_MMU_32 +#ifdef CONFIG_PPC_BOOK3S_32 pr_info("Hash_size = 0x%lx\n", Hash_size); #endif pr_info("phys_mem_size = 0x%llx\n", @@ -830,7 +830,7 @@ static __init void print_system_info(void) if (htab_hash_mask) pr_info("htab_hash_mask = 0x%lx\n", htab_hash_mask); #endif -#ifdef CONFIG_PPC_STD_MMU_32 +#ifdef CONFIG_PPC_BOOK3S_32 if (Hash) pr_info("Hash = 0x%p\n", Hash); if (Hash_mask) @@ -974,6 +974,7 @@ void __init setup_arch(char **cmdline_p) ppc_md.setup_arch(); setup_barrier_nospec(); + setup_spectre_v2(); paging_init(); diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 81909600013a..947f904688b0 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -59,7 +59,6 @@ unsigned long ISA_DMA_THRESHOLD; unsigned int DMA_MODE_READ; unsigned int DMA_MODE_WRITE; -EXPORT_SYMBOL(ISA_DMA_THRESHOLD); EXPORT_SYMBOL(DMA_MODE_READ); EXPORT_SYMBOL(DMA_MODE_WRITE); @@ -101,8 +100,7 @@ notrace unsigned long __init early_init(unsigned long dt_ptr) */ notrace void __init machine_init(u64 dt_ptr) { - unsigned int *addr = (unsigned int *)((unsigned long)&patch__memset_nocache + - patch__memset_nocache); + unsigned int *addr = (unsigned int *)patch_site_addr(&patch__memset_nocache); unsigned long insn; /* Configure static keys first, now that we're relocated. */ @@ -240,7 +238,7 @@ void __init exc_lvl_early_init(void) void __init setup_power_save(void) { -#ifdef CONFIG_6xx +#ifdef CONFIG_PPC_BOOK3S_32 if (cpu_has_feature(CPU_FTR_CAN_DOZE) || cpu_has_feature(CPU_FTR_CAN_NAP)) ppc_md.power_save = ppc6xx_idle; diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index e6474a45cef5..2d47cc79e5b3 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -470,9 +470,9 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame, return 1; if (sigret) { - /* Set up the sigreturn trampoline: li r0,sigret; sc */ - if (__put_user(0x38000000UL + sigret, &frame->tramp[0]) - || __put_user(0x44000002UL, &frame->tramp[1])) + /* Set up the sigreturn trampoline: li 0,sigret; sc */ + if (__put_user(PPC_INST_ADDI + sigret, &frame->tramp[0]) + || __put_user(PPC_INST_SC, &frame->tramp[1])) return 1; flush_icache_range((unsigned long) &frame->tramp[0], (unsigned long) &frame->tramp[2]); @@ -619,9 +619,9 @@ static int save_tm_user_regs(struct pt_regs *regs, if (__put_user(msr, &frame->mc_gregs[PT_MSR])) return 1; if (sigret) { - /* Set up the sigreturn trampoline: li r0,sigret; sc */ - if (__put_user(0x38000000UL + sigret, &frame->tramp[0]) - || __put_user(0x44000002UL, &frame->tramp[1])) + /* Set up the sigreturn trampoline: li 0,sigret; sc */ + if (__put_user(PPC_INST_ADDI + sigret, &frame->tramp[0]) + || __put_user(PPC_INST_SC, &frame->tramp[1])) return 1; flush_icache_range((unsigned long) &frame->tramp[0], (unsigned long) &frame->tramp[2]); @@ -848,7 +848,23 @@ static long restore_tm_user_regs(struct pt_regs *regs, /* If TM bits are set to the reserved value, it's an invalid context */ if (MSR_TM_RESV(msr_hi)) return 1; - /* Pull in the MSR TM bits from the user context */ + + /* + * Disabling preemption, since it is unsafe to be preempted + * with MSR[TS] set without recheckpointing. + */ + preempt_disable(); + + /* + * CAUTION: + * After regs->MSR[TS] being updated, make sure that get_user(), + * put_user() or similar functions are *not* called. These + * functions can generate page faults which will cause the process + * to be de-scheduled with MSR[TS] set but without calling + * tm_recheckpoint(). This can cause a bug. + * + * Pull in the MSR TM bits from the user context + */ regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK); /* Now, recheckpoint. This loads up all of the checkpointed (older) * registers, including FP and V[S]Rs. After recheckpointing, the @@ -873,6 +889,8 @@ static long restore_tm_user_regs(struct pt_regs *regs, } #endif + preempt_enable(); + return 0; } #endif @@ -1140,11 +1158,11 @@ SYSCALL_DEFINE0(rt_sigreturn) { struct rt_sigframe __user *rt_sf; struct pt_regs *regs = current_pt_regs(); + int tm_restore = 0; #ifdef CONFIG_PPC_TRANSACTIONAL_MEM struct ucontext __user *uc_transact; unsigned long msr_hi; unsigned long tmp; - int tm_restore = 0; #endif /* Always make any pending restarted system calls return -EINTR */ current->restart_block.fn = do_no_restart_syscall; @@ -1192,11 +1210,19 @@ SYSCALL_DEFINE0(rt_sigreturn) goto bad; } } - if (!tm_restore) - /* Fall through, for non-TM restore */ + if (!tm_restore) { + /* + * Unset regs->msr because ucontext MSR TS is not + * set, and recheckpoint was not called. This avoid + * hitting a TM Bad thing at RFID + */ + regs->msr &= ~MSR_TS_MASK; + } + /* Fall through, for non-TM restore */ #endif - if (do_setcontext(&rt_sf->uc, regs, 1)) - goto bad; + if (!tm_restore) + if (do_setcontext(&rt_sf->uc, regs, 1)) + goto bad; /* * It's not clear whether or why it is desirable to save the diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index 83d51bf586c7..0935fe6c282a 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -467,20 +467,6 @@ static long restore_tm_sigcontexts(struct task_struct *tsk, if (MSR_TM_RESV(msr)) return -EINVAL; - /* pull in MSR TS bits from user context */ - regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK); - - /* - * Ensure that TM is enabled in regs->msr before we leave the signal - * handler. It could be the case that (a) user disabled the TM bit - * through the manipulation of the MSR bits in uc_mcontext or (b) the - * TM bit was disabled because a sufficient number of context switches - * happened whilst in the signal handler and load_tm overflowed, - * disabling the TM bit. In either case we can end up with an illegal - * TM state leading to a TM Bad Thing when we return to userspace. - */ - regs->msr |= MSR_TM; - /* pull in MSR LE from user context */ regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); @@ -572,6 +558,34 @@ static long restore_tm_sigcontexts(struct task_struct *tsk, tm_enable(); /* Make sure the transaction is marked as failed */ tsk->thread.tm_texasr |= TEXASR_FS; + + /* + * Disabling preemption, since it is unsafe to be preempted + * with MSR[TS] set without recheckpointing. + */ + preempt_disable(); + + /* pull in MSR TS bits from user context */ + regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK); + + /* + * Ensure that TM is enabled in regs->msr before we leave the signal + * handler. It could be the case that (a) user disabled the TM bit + * through the manipulation of the MSR bits in uc_mcontext or (b) the + * TM bit was disabled because a sufficient number of context switches + * happened whilst in the signal handler and load_tm overflowed, + * disabling the TM bit. In either case we can end up with an illegal + * TM state leading to a TM Bad Thing when we return to userspace. + * + * CAUTION: + * After regs->MSR[TS] being updated, make sure that get_user(), + * put_user() or similar functions are *not* called. These + * functions can generate page faults which will cause the process + * to be de-scheduled with MSR[TS] set but without calling + * tm_recheckpoint(). This can cause a bug. + */ + regs->msr |= MSR_TM; + /* This loads the checkpointed FP/VEC state, if used */ tm_recheckpoint(&tsk->thread); @@ -585,6 +599,8 @@ static long restore_tm_sigcontexts(struct task_struct *tsk, regs->msr |= MSR_VEC; } + preempt_enable(); + return err; } #endif @@ -598,11 +614,12 @@ static long setup_trampoline(unsigned int syscall, unsigned int __user *tramp) long err = 0; /* addi r1, r1, __SIGNAL_FRAMESIZE # Pop the dummy stackframe */ - err |= __put_user(0x38210000UL | (__SIGNAL_FRAMESIZE & 0xffff), &tramp[0]); + err |= __put_user(PPC_INST_ADDI | __PPC_RT(R1) | __PPC_RA(R1) | + (__SIGNAL_FRAMESIZE & 0xffff), &tramp[0]); /* li r0, __NR_[rt_]sigreturn| */ - err |= __put_user(0x38000000UL | (syscall & 0xffff), &tramp[1]); + err |= __put_user(PPC_INST_ADDI | (syscall & 0xffff), &tramp[1]); /* sc */ - err |= __put_user(0x44000002UL, &tramp[2]); + err |= __put_user(PPC_INST_SC, &tramp[2]); /* Minimal traceback info */ for (i=TRAMP_TRACEBACK; i < TRAMP_SIZE ;i++) @@ -740,11 +757,23 @@ SYSCALL_DEFINE0(rt_sigreturn) &uc_transact->uc_mcontext)) goto badframe; } - else - /* Fall through, for non-TM restore */ #endif - if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext)) - goto badframe; + /* Fall through, for non-TM restore */ + if (!MSR_TM_ACTIVE(msr)) { + /* + * Unset MSR[TS] on the thread regs since MSR from user + * context does not have MSR active, and recheckpoint was + * not called since restore_tm_sigcontexts() was not called + * also. + * + * If not unsetting it, the code can RFID to userspace with + * MSR[TS] set, but without CPU in the proper state, + * causing a TM bad thing. + */ + current->thread.regs->msr &= ~MSR_TS_MASK; + if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext)) + goto badframe; + } if (restore_altstack(&uc->uc_stack)) goto badframe; diff --git a/arch/powerpc/kernel/syscalls/Makefile b/arch/powerpc/kernel/syscalls/Makefile new file mode 100644 index 000000000000..27b48954808d --- /dev/null +++ b/arch/powerpc/kernel/syscalls/Makefile @@ -0,0 +1,63 @@ +# SPDX-License-Identifier: GPL-2.0 +kapi := arch/$(SRCARCH)/include/generated/asm +uapi := arch/$(SRCARCH)/include/generated/uapi/asm + +_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \ + $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)') + +syscall := $(srctree)/$(src)/syscall.tbl +syshdr := $(srctree)/$(src)/syscallhdr.sh +systbl := $(srctree)/$(src)/syscalltbl.sh + +quiet_cmd_syshdr = SYSHDR $@ + cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \ + '$(syshdr_abis_$(basetarget))' \ + '$(syshdr_pfx_$(basetarget))' \ + '$(syshdr_offset_$(basetarget))' + +quiet_cmd_systbl = SYSTBL $@ + cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \ + '$(systbl_abis_$(basetarget))' \ + '$(systbl_abi_$(basetarget))' \ + '$(systbl_offset_$(basetarget))' + +syshdr_abis_unistd_32 := common,nospu,32 +$(uapi)/unistd_32.h: $(syscall) $(syshdr) + $(call if_changed,syshdr) + +syshdr_abis_unistd_64 := common,nospu,64 +$(uapi)/unistd_64.h: $(syscall) $(syshdr) + $(call if_changed,syshdr) + +systbl_abis_syscall_table_32 := common,nospu,32 +systbl_abi_syscall_table_32 := 32 +$(kapi)/syscall_table_32.h: $(syscall) $(systbl) + $(call if_changed,systbl) + +systbl_abis_syscall_table_64 := common,nospu,64 +systbl_abi_syscall_table_64 := 64 +$(kapi)/syscall_table_64.h: $(syscall) $(systbl) + $(call if_changed,systbl) + +systbl_abis_syscall_table_c32 := common,nospu,32 +systbl_abi_syscall_table_c32 := c32 +$(kapi)/syscall_table_c32.h: $(syscall) $(systbl) + $(call if_changed,systbl) + +systbl_abis_syscall_table_spu := common,spu +systbl_abi_syscall_table_spu := spu +$(kapi)/syscall_table_spu.h: $(syscall) $(systbl) + $(call if_changed,systbl) + +uapisyshdr-y += unistd_32.h unistd_64.h +kapisyshdr-y += syscall_table_32.h \ + syscall_table_64.h \ + syscall_table_c32.h \ + syscall_table_spu.h + +targets += $(uapisyshdr-y) $(kapisyshdr-y) + +PHONY += all +all: $(addprefix $(uapi)/,$(uapisyshdr-y)) +all: $(addprefix $(kapi)/,$(kapisyshdr-y)) + @: diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl new file mode 100644 index 000000000000..db3bbb8744af --- /dev/null +++ b/arch/powerpc/kernel/syscalls/syscall.tbl @@ -0,0 +1,427 @@ +# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note +# +# system call numbers and entry vectors for powerpc +# +# The format is: +# <number> <abi> <name> <entry point> <compat entry point> +# +# The <abi> can be common, spu, nospu, 64, or 32 for this file. +# +0 nospu restart_syscall sys_restart_syscall +1 nospu exit sys_exit +2 nospu fork ppc_fork +3 common read sys_read +4 common write sys_write +5 common open sys_open compat_sys_open +6 common close sys_close +7 common waitpid sys_waitpid +8 common creat sys_creat +9 common link sys_link +10 common unlink sys_unlink +11 nospu execve sys_execve compat_sys_execve +12 common chdir sys_chdir +13 common time sys_time compat_sys_time +14 common mknod sys_mknod +15 common chmod sys_chmod +16 common lchown sys_lchown +17 common break sys_ni_syscall +18 32 oldstat sys_stat sys_ni_syscall +18 64 oldstat sys_ni_syscall +18 spu oldstat sys_ni_syscall +19 common lseek sys_lseek compat_sys_lseek +20 common getpid sys_getpid +21 nospu mount sys_mount compat_sys_mount +22 32 umount sys_oldumount +22 64 umount sys_ni_syscall +22 spu umount sys_ni_syscall +23 common setuid sys_setuid +24 common getuid sys_getuid +25 common stime sys_stime compat_sys_stime +26 nospu ptrace sys_ptrace compat_sys_ptrace +27 common alarm sys_alarm +28 32 oldfstat sys_fstat sys_ni_syscall +28 64 oldfstat sys_ni_syscall +28 spu oldfstat sys_ni_syscall +29 nospu pause sys_pause +30 nospu utime sys_utime compat_sys_utime +31 common stty sys_ni_syscall +32 common gtty sys_ni_syscall +33 common access sys_access +34 common nice sys_nice +35 common ftime sys_ni_syscall +36 common sync sys_sync +37 common kill sys_kill +38 common rename sys_rename +39 common mkdir sys_mkdir +40 common rmdir sys_rmdir +41 common dup sys_dup +42 common pipe sys_pipe +43 common times sys_times compat_sys_times +44 common prof sys_ni_syscall +45 common brk sys_brk +46 common setgid sys_setgid +47 common getgid sys_getgid +48 nospu signal sys_signal +49 common geteuid sys_geteuid +50 common getegid sys_getegid +51 nospu acct sys_acct +52 nospu umount2 sys_umount +53 common lock sys_ni_syscall +54 common ioctl sys_ioctl compat_sys_ioctl +55 common fcntl sys_fcntl compat_sys_fcntl +56 common mpx sys_ni_syscall +57 common setpgid sys_setpgid +58 common ulimit sys_ni_syscall +59 32 oldolduname sys_olduname +59 64 oldolduname sys_ni_syscall +59 spu oldolduname sys_ni_syscall +60 common umask sys_umask +61 common chroot sys_chroot +62 nospu ustat sys_ustat compat_sys_ustat +63 common dup2 sys_dup2 +64 common getppid sys_getppid +65 common getpgrp sys_getpgrp +66 common setsid sys_setsid +67 32 sigaction sys_sigaction compat_sys_sigaction +67 64 sigaction sys_ni_syscall +67 spu sigaction sys_ni_syscall +68 common sgetmask sys_sgetmask +69 common ssetmask sys_ssetmask +70 common setreuid sys_setreuid +71 common setregid sys_setregid +72 32 sigsuspend sys_sigsuspend +72 64 sigsuspend sys_ni_syscall +72 spu sigsuspend sys_ni_syscall +73 32 sigpending sys_sigpending compat_sys_sigpending +73 64 sigpending sys_ni_syscall +73 spu sigpending sys_ni_syscall +74 common sethostname sys_sethostname +75 common setrlimit sys_setrlimit compat_sys_setrlimit +76 32 getrlimit sys_old_getrlimit compat_sys_old_getrlimit +76 64 getrlimit sys_ni_syscall +76 spu getrlimit sys_ni_syscall +77 common getrusage sys_getrusage compat_sys_getrusage +78 common gettimeofday sys_gettimeofday compat_sys_gettimeofday +79 common settimeofday sys_settimeofday compat_sys_settimeofday +80 common getgroups sys_getgroups +81 common setgroups sys_setgroups +82 32 select ppc_select sys_ni_syscall +82 64 select sys_ni_syscall +82 spu select sys_ni_syscall +83 common symlink sys_symlink +84 32 oldlstat sys_lstat sys_ni_syscall +84 64 oldlstat sys_ni_syscall +84 spu oldlstat sys_ni_syscall +85 common readlink sys_readlink +86 nospu uselib sys_uselib +87 nospu swapon sys_swapon +88 nospu reboot sys_reboot +89 32 readdir sys_old_readdir compat_sys_old_readdir +89 64 readdir sys_ni_syscall +89 spu readdir sys_ni_syscall +90 common mmap sys_mmap +91 common munmap sys_munmap +92 common truncate sys_truncate compat_sys_truncate +93 common ftruncate sys_ftruncate compat_sys_ftruncate +94 common fchmod sys_fchmod +95 common fchown sys_fchown +96 common getpriority sys_getpriority +97 common setpriority sys_setpriority +98 common profil sys_ni_syscall +99 nospu statfs sys_statfs compat_sys_statfs +100 nospu fstatfs sys_fstatfs compat_sys_fstatfs +101 common ioperm sys_ni_syscall +102 common socketcall sys_socketcall compat_sys_socketcall +103 common syslog sys_syslog +104 common setitimer sys_setitimer compat_sys_setitimer +105 common getitimer sys_getitimer compat_sys_getitimer +106 common stat sys_newstat compat_sys_newstat +107 common lstat sys_newlstat compat_sys_newlstat +108 common fstat sys_newfstat compat_sys_newfstat +109 32 olduname sys_uname +109 64 olduname sys_ni_syscall +109 spu olduname sys_ni_syscall +110 common iopl sys_ni_syscall +111 common vhangup sys_vhangup +112 common idle sys_ni_syscall +113 common vm86 sys_ni_syscall +114 common wait4 sys_wait4 compat_sys_wait4 +115 nospu swapoff sys_swapoff +116 common sysinfo sys_sysinfo compat_sys_sysinfo +117 nospu ipc sys_ipc compat_sys_ipc +118 common fsync sys_fsync +119 32 sigreturn sys_sigreturn compat_sys_sigreturn +119 64 sigreturn sys_ni_syscall +119 spu sigreturn sys_ni_syscall +120 nospu clone ppc_clone +121 common setdomainname sys_setdomainname +122 common uname sys_newuname +123 common modify_ldt sys_ni_syscall +124 common adjtimex sys_adjtimex compat_sys_adjtimex +125 common mprotect sys_mprotect +126 32 sigprocmask sys_sigprocmask compat_sys_sigprocmask +126 64 sigprocmask sys_ni_syscall +126 spu sigprocmask sys_ni_syscall +127 common create_module sys_ni_syscall +128 nospu init_module sys_init_module +129 nospu delete_module sys_delete_module +130 common get_kernel_syms sys_ni_syscall +131 nospu quotactl sys_quotactl +132 common getpgid sys_getpgid +133 common fchdir sys_fchdir +134 common bdflush sys_bdflush +135 common sysfs sys_sysfs +136 32 personality sys_personality ppc64_personality +136 64 personality ppc64_personality +136 spu personality ppc64_personality +137 common afs_syscall sys_ni_syscall +138 common setfsuid sys_setfsuid +139 common setfsgid sys_setfsgid +140 common _llseek sys_llseek +141 common getdents sys_getdents compat_sys_getdents +142 common _newselect sys_select compat_sys_select +143 common flock sys_flock +144 common msync sys_msync +145 common readv sys_readv compat_sys_readv +146 common writev sys_writev compat_sys_writev +147 common getsid sys_getsid +148 common fdatasync sys_fdatasync +149 nospu _sysctl sys_sysctl compat_sys_sysctl +150 common mlock sys_mlock +151 common munlock sys_munlock +152 common mlockall sys_mlockall +153 common munlockall sys_munlockall +154 common sched_setparam sys_sched_setparam +155 common sched_getparam sys_sched_getparam +156 common sched_setscheduler sys_sched_setscheduler +157 common sched_getscheduler sys_sched_getscheduler +158 common sched_yield sys_sched_yield +159 common sched_get_priority_max sys_sched_get_priority_max +160 common sched_get_priority_min sys_sched_get_priority_min +161 common sched_rr_get_interval sys_sched_rr_get_interval compat_sys_sched_rr_get_interval +162 common nanosleep sys_nanosleep compat_sys_nanosleep +163 common mremap sys_mremap +164 common setresuid sys_setresuid +165 common getresuid sys_getresuid +166 common query_module sys_ni_syscall +167 common poll sys_poll +168 common nfsservctl sys_ni_syscall +169 common setresgid sys_setresgid +170 common getresgid sys_getresgid +171 common prctl sys_prctl +172 nospu rt_sigreturn sys_rt_sigreturn compat_sys_rt_sigreturn +173 nospu rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction +174 nospu rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask +175 nospu rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending +176 nospu rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait +177 nospu rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo +178 nospu rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend +179 common pread64 sys_pread64 compat_sys_pread64 +180 common pwrite64 sys_pwrite64 compat_sys_pwrite64 +181 common chown sys_chown +182 common getcwd sys_getcwd +183 common capget sys_capget +184 common capset sys_capset +185 nospu sigaltstack sys_sigaltstack compat_sys_sigaltstack +186 32 sendfile sys_sendfile compat_sys_sendfile +186 64 sendfile sys_sendfile64 +186 spu sendfile sys_sendfile64 +187 common getpmsg sys_ni_syscall +188 common putpmsg sys_ni_syscall +189 nospu vfork ppc_vfork +190 common ugetrlimit sys_getrlimit compat_sys_getrlimit +191 common readahead sys_readahead compat_sys_readahead +192 32 mmap2 sys_mmap2 compat_sys_mmap2 +193 32 truncate64 sys_truncate64 compat_sys_truncate64 +194 32 ftruncate64 sys_ftruncate64 compat_sys_ftruncate64 +195 32 stat64 sys_stat64 +196 32 lstat64 sys_lstat64 +197 32 fstat64 sys_fstat64 +198 nospu pciconfig_read sys_pciconfig_read +199 nospu pciconfig_write sys_pciconfig_write +200 nospu pciconfig_iobase sys_pciconfig_iobase +201 common multiplexer sys_ni_syscall +202 common getdents64 sys_getdents64 +203 common pivot_root sys_pivot_root +204 32 fcntl64 sys_fcntl64 compat_sys_fcntl64 +205 common madvise sys_madvise +206 common mincore sys_mincore +207 common gettid sys_gettid +208 common tkill sys_tkill +209 common setxattr sys_setxattr +210 common lsetxattr sys_lsetxattr +211 common fsetxattr sys_fsetxattr +212 common getxattr sys_getxattr +213 common lgetxattr sys_lgetxattr +214 common fgetxattr sys_fgetxattr +215 common listxattr sys_listxattr +216 common llistxattr sys_llistxattr +217 common flistxattr sys_flistxattr +218 common removexattr sys_removexattr +219 common lremovexattr sys_lremovexattr +220 common fremovexattr sys_fremovexattr +221 common futex sys_futex compat_sys_futex +222 common sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity +223 common sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity +# 224 unused +225 common tuxcall sys_ni_syscall +226 32 sendfile64 sys_sendfile64 compat_sys_sendfile64 +227 common io_setup sys_io_setup compat_sys_io_setup +228 common io_destroy sys_io_destroy +229 common io_getevents sys_io_getevents compat_sys_io_getevents +230 common io_submit sys_io_submit compat_sys_io_submit +231 common io_cancel sys_io_cancel +232 nospu set_tid_address sys_set_tid_address +233 common fadvise64 sys_fadvise64 ppc32_fadvise64 +234 nospu exit_group sys_exit_group +235 nospu lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie +236 common epoll_create sys_epoll_create +237 common epoll_ctl sys_epoll_ctl +238 common epoll_wait sys_epoll_wait +239 common remap_file_pages sys_remap_file_pages +240 common timer_create sys_timer_create compat_sys_timer_create +241 common timer_settime sys_timer_settime compat_sys_timer_settime +242 common timer_gettime sys_timer_gettime compat_sys_timer_gettime +243 common timer_getoverrun sys_timer_getoverrun +244 common timer_delete sys_timer_delete +245 common clock_settime sys_clock_settime compat_sys_clock_settime +246 common clock_gettime sys_clock_gettime compat_sys_clock_gettime +247 common clock_getres sys_clock_getres compat_sys_clock_getres +248 common clock_nanosleep sys_clock_nanosleep compat_sys_clock_nanosleep +249 32 swapcontext ppc_swapcontext ppc32_swapcontext +249 64 swapcontext ppc64_swapcontext +249 spu swapcontext sys_ni_syscall +250 common tgkill sys_tgkill +251 common utimes sys_utimes compat_sys_utimes +252 common statfs64 sys_statfs64 compat_sys_statfs64 +253 common fstatfs64 sys_fstatfs64 compat_sys_fstatfs64 +254 32 fadvise64_64 ppc_fadvise64_64 +254 spu fadvise64_64 sys_ni_syscall +255 common rtas sys_rtas +256 32 sys_debug_setcontext sys_debug_setcontext sys_ni_syscall +256 64 sys_debug_setcontext sys_ni_syscall +256 spu sys_debug_setcontext sys_ni_syscall +# 257 reserved for vserver +258 nospu migrate_pages sys_migrate_pages compat_sys_migrate_pages +259 nospu mbind sys_mbind compat_sys_mbind +260 nospu get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy +261 nospu set_mempolicy sys_set_mempolicy compat_sys_set_mempolicy +262 nospu mq_open sys_mq_open compat_sys_mq_open +263 nospu mq_unlink sys_mq_unlink +264 nospu mq_timedsend sys_mq_timedsend compat_sys_mq_timedsend +265 nospu mq_timedreceive sys_mq_timedreceive compat_sys_mq_timedreceive +266 nospu mq_notify sys_mq_notify compat_sys_mq_notify +267 nospu mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr +268 nospu kexec_load sys_kexec_load compat_sys_kexec_load +269 nospu add_key sys_add_key +270 nospu request_key sys_request_key +271 nospu keyctl sys_keyctl compat_sys_keyctl +272 nospu waitid sys_waitid compat_sys_waitid +273 nospu ioprio_set sys_ioprio_set +274 nospu ioprio_get sys_ioprio_get +275 nospu inotify_init sys_inotify_init +276 nospu inotify_add_watch sys_inotify_add_watch +277 nospu inotify_rm_watch sys_inotify_rm_watch +278 nospu spu_run sys_spu_run +279 nospu spu_create sys_spu_create +280 nospu pselect6 sys_pselect6 compat_sys_pselect6 +281 nospu ppoll sys_ppoll compat_sys_ppoll +282 common unshare sys_unshare +283 common splice sys_splice +284 common tee sys_tee +285 common vmsplice sys_vmsplice compat_sys_vmsplice +286 common openat sys_openat compat_sys_openat +287 common mkdirat sys_mkdirat +288 common mknodat sys_mknodat +289 common fchownat sys_fchownat +290 common futimesat sys_futimesat compat_sys_futimesat +291 32 fstatat64 sys_fstatat64 +291 64 newfstatat sys_newfstatat +291 spu newfstatat sys_newfstatat +292 common unlinkat sys_unlinkat +293 common renameat sys_renameat +294 common linkat sys_linkat +295 common symlinkat sys_symlinkat +296 common readlinkat sys_readlinkat +297 common fchmodat sys_fchmodat +298 common faccessat sys_faccessat +299 common get_robust_list sys_get_robust_list compat_sys_get_robust_list +300 common set_robust_list sys_set_robust_list compat_sys_set_robust_list +301 common move_pages sys_move_pages compat_sys_move_pages +302 common getcpu sys_getcpu +303 nospu epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait +304 common utimensat sys_utimensat compat_sys_utimensat +305 common signalfd sys_signalfd compat_sys_signalfd +306 common timerfd_create sys_timerfd_create +307 common eventfd sys_eventfd +308 common sync_file_range2 sys_sync_file_range2 compat_sys_sync_file_range2 +309 nospu fallocate sys_fallocate compat_sys_fallocate +310 nospu subpage_prot sys_subpage_prot +311 common timerfd_settime sys_timerfd_settime compat_sys_timerfd_settime +312 common timerfd_gettime sys_timerfd_gettime compat_sys_timerfd_gettime +313 common signalfd4 sys_signalfd4 compat_sys_signalfd4 +314 common eventfd2 sys_eventfd2 +315 common epoll_create1 sys_epoll_create1 +316 common dup3 sys_dup3 +317 common pipe2 sys_pipe2 +318 nospu inotify_init1 sys_inotify_init1 +319 common perf_event_open sys_perf_event_open +320 common preadv sys_preadv compat_sys_preadv +321 common pwritev sys_pwritev compat_sys_pwritev +322 nospu rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo +323 nospu fanotify_init sys_fanotify_init +324 nospu fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark +325 common prlimit64 sys_prlimit64 +326 common socket sys_socket +327 common bind sys_bind +328 common connect sys_connect +329 common listen sys_listen +330 common accept sys_accept +331 common getsockname sys_getsockname +332 common getpeername sys_getpeername +333 common socketpair sys_socketpair +334 common send sys_send +335 common sendto sys_sendto +336 common recv sys_recv compat_sys_recv +337 common recvfrom sys_recvfrom compat_sys_recvfrom +338 common shutdown sys_shutdown +339 common setsockopt sys_setsockopt compat_sys_setsockopt +340 common getsockopt sys_getsockopt compat_sys_getsockopt +341 common sendmsg sys_sendmsg compat_sys_sendmsg +342 common recvmsg sys_recvmsg compat_sys_recvmsg +343 common recvmmsg sys_recvmmsg compat_sys_recvmmsg +344 common accept4 sys_accept4 +345 common name_to_handle_at sys_name_to_handle_at +346 common open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at +347 common clock_adjtime sys_clock_adjtime compat_sys_clock_adjtime +348 common syncfs sys_syncfs +349 common sendmmsg sys_sendmmsg compat_sys_sendmmsg +350 common setns sys_setns +351 nospu process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv +352 nospu process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev +353 nospu finit_module sys_finit_module +354 nospu kcmp sys_kcmp +355 common sched_setattr sys_sched_setattr +356 common sched_getattr sys_sched_getattr +357 common renameat2 sys_renameat2 +358 common seccomp sys_seccomp +359 common getrandom sys_getrandom +360 common memfd_create sys_memfd_create +361 common bpf sys_bpf +362 nospu execveat sys_execveat compat_sys_execveat +363 32 switch_endian sys_ni_syscall +363 64 switch_endian ppc_switch_endian +363 spu switch_endian sys_ni_syscall +364 common userfaultfd sys_userfaultfd +365 common membarrier sys_membarrier +378 nospu mlock2 sys_mlock2 +379 nospu copy_file_range sys_copy_file_range +380 common preadv2 sys_preadv2 compat_sys_preadv2 +381 common pwritev2 sys_pwritev2 compat_sys_pwritev2 +382 nospu kexec_file_load sys_kexec_file_load +383 nospu statx sys_statx +384 nospu pkey_alloc sys_pkey_alloc +385 nospu pkey_free sys_pkey_free +386 nospu pkey_mprotect sys_pkey_mprotect +387 nospu rseq sys_rseq +388 nospu io_pgetevents sys_io_pgetevents compat_sys_io_pgetevents diff --git a/arch/powerpc/kernel/syscalls/syscallhdr.sh b/arch/powerpc/kernel/syscalls/syscallhdr.sh new file mode 100644 index 000000000000..c0a9a32937f1 --- /dev/null +++ b/arch/powerpc/kernel/syscalls/syscallhdr.sh @@ -0,0 +1,37 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 + +in="$1" +out="$2" +my_abis=`echo "($3)" | tr ',' '|'` +prefix="$4" +offset="$5" + +fileguard=_UAPI_ASM_POWERPC_`basename "$out" | sed \ + -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \ + -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'` +grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( + printf "#ifndef %s\n" "${fileguard}" + printf "#define %s\n" "${fileguard}" + printf "\n" + + nxt=0 + while read nr abi name entry compat ; do + if [ -z "$offset" ]; then + printf "#define __NR_%s%s\t%s\n" \ + "${prefix}" "${name}" "${nr}" + else + printf "#define __NR_%s%s\t(%s + %s)\n" \ + "${prefix}" "${name}" "${offset}" "${nr}" + fi + nxt=$((nr+1)) + done + + printf "\n" + printf "#ifdef __KERNEL__\n" + printf "#define __NR_syscalls\t%s\n" "${nxt}" + printf "#endif\n" + printf "\n" + printf "#endif /* %s */" "${fileguard}" + printf "\n" +) > "$out" diff --git a/arch/powerpc/kernel/syscalls/syscalltbl.sh b/arch/powerpc/kernel/syscalls/syscalltbl.sh new file mode 100644 index 000000000000..fd620490a542 --- /dev/null +++ b/arch/powerpc/kernel/syscalls/syscalltbl.sh @@ -0,0 +1,36 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 + +in="$1" +out="$2" +my_abis=`echo "($3)" | tr ',' '|'` +my_abi="$4" +offset="$5" + +emit() { + t_nxt="$1" + t_nr="$2" + t_entry="$3" + + while [ $t_nxt -lt $t_nr ]; do + printf "__SYSCALL(%s,sys_ni_syscall, )\n" "${t_nxt}" + t_nxt=$((t_nxt+1)) + done + printf "__SYSCALL(%s,%s, )\n" "${t_nxt}" "${t_entry}" +} + +grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( + nxt=0 + if [ -z "$offset" ]; then + offset=0 + fi + + while read nr abi name entry compat ; do + if [ "$my_abi" = "c32" ] && [ ! -z "$compat" ]; then + emit $((nxt+offset)) $((nr+offset)) $compat + else + emit $((nxt+offset)) $((nr+offset)) $entry + fi + nxt=$((nr+1)) + done +) > "$out" diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index 755dc98a57ae..e8e93c2c7d03 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c @@ -457,7 +457,7 @@ static ssize_t __used \ #define HAS_PPC_PMC_CLASSIC 1 #define HAS_PPC_PMC_IBM 1 #define HAS_PPC_PMC_PA6T 1 -#elif defined(CONFIG_6xx) +#elif defined(CONFIG_PPC_BOOK3S_32) #define HAS_PPC_PMC_CLASSIC 1 #define HAS_PPC_PMC_IBM 1 #define HAS_PPC_PMC_G4 1 diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S index 919a32746ede..23265a28740b 100644 --- a/arch/powerpc/kernel/systbl.S +++ b/arch/powerpc/kernel/systbl.S @@ -16,28 +16,6 @@ #include <asm/ppc_asm.h> -#ifdef CONFIG_PPC64 -#define SYSCALL(func) .8byte DOTSYM(sys_##func),DOTSYM(sys_##func) -#define COMPAT_SYS(func) .8byte DOTSYM(sys_##func),DOTSYM(compat_sys_##func) -#define PPC_SYS(func) .8byte DOTSYM(ppc_##func),DOTSYM(ppc_##func) -#define OLDSYS(func) .8byte DOTSYM(sys_ni_syscall),DOTSYM(sys_ni_syscall) -#define SYS32ONLY(func) .8byte DOTSYM(sys_ni_syscall),DOTSYM(compat_sys_##func) -#define PPC64ONLY(func) .8byte DOTSYM(ppc_##func),DOTSYM(sys_ni_syscall) -#define SYSX(f, f3264, f32) .8byte DOTSYM(f),DOTSYM(f3264) -#else -#define SYSCALL(func) .long sys_##func -#define COMPAT_SYS(func) .long sys_##func -#define PPC_SYS(func) .long ppc_##func -#define OLDSYS(func) .long sys_##func -#define SYS32ONLY(func) .long sys_##func -#define PPC64ONLY(func) .long sys_ni_syscall -#define SYSX(f, f3264, f32) .long f32 -#endif -#define SYSCALL_SPU(func) SYSCALL(func) -#define COMPAT_SYS_SPU(func) COMPAT_SYS(func) -#define COMPAT_SPU_NEW(func) COMPAT_SYS(func) -#define SYSX_SPU(f, f3264, f32) SYSX(f, f3264, f32) - .section .rodata,"a" #ifdef CONFIG_PPC64 @@ -46,5 +24,21 @@ .globl sys_call_table sys_call_table: +#ifdef CONFIG_PPC64 +#define __SYSCALL(nr, entry, nargs) .8byte DOTSYM(entry) +#include <asm/syscall_table_64.h> +#undef __SYSCALL +#else +#define __SYSCALL(nr, entry, nargs) .long entry +#include <asm/syscall_table_32.h> +#undef __SYSCALL +#endif -#include <asm/systbl.h> +#ifdef CONFIG_COMPAT +.globl compat_sys_call_table +compat_sys_call_table: +#define compat_sys_sigsuspend sys_sigsuspend +#define __SYSCALL(nr, entry, nargs) .8byte DOTSYM(entry) +#include <asm/syscall_table_c32.h> +#undef __SYSCALL +#endif diff --git a/arch/powerpc/kernel/systbl_chk.c b/arch/powerpc/kernel/systbl_chk.c deleted file mode 100644 index 4653258722ac..000000000000 --- a/arch/powerpc/kernel/systbl_chk.c +++ /dev/null @@ -1,60 +0,0 @@ -/* - * This file, when run through CPP produces a list of syscall numbers - * in the order of systbl.h. That way we can check for gaps and syscalls - * that are out of order. - * - * Unfortunately, we cannot check for the correct ordering of entries - * using SYSX(). - * - * Copyright © IBM Corporation - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ -#include <asm/unistd.h> - -#define SYSCALL(func) __NR_##func -#define COMPAT_SYS(func) __NR_##func -#define PPC_SYS(func) __NR_##func -#ifdef CONFIG_PPC64 -#define OLDSYS(func) -1 -#define SYS32ONLY(func) -1 -#define PPC64ONLY(func) __NR_##func -#else -#define OLDSYS(func) __NR_old##func -#define SYS32ONLY(func) __NR_##func -#define PPC64ONLY(func) -1 -#endif -#define SYSX(f, f3264, f32) -1 - -#define SYSCALL_SPU(func) SYSCALL(func) -#define COMPAT_SYS_SPU(func) COMPAT_SYS(func) -#define COMPAT_SPU_NEW(func) COMPAT_SYS(_new##func) -#define SYSX_SPU(f, f3264, f32) SYSX(f, f3264, f32) - -/* Just insert a marker for ni_syscalls */ -#define __NR_ni_syscall -1 - -/* - * These are the known exceptions. - * Hopefully, there will be no more. - */ -#define __NR_llseek __NR__llseek -#undef __NR_umount -#define __NR_umount __NR_umount2 -#define __NR_old_getrlimit __NR_getrlimit -#define __NR_newstat __NR_stat -#define __NR_newlstat __NR_lstat -#define __NR_newfstat __NR_fstat -#define __NR_newuname __NR_uname -#define __NR_sysctl __NR__sysctl -#define __NR_olddebug_setcontext __NR_sys_debug_setcontext - -/* We call sys_ugetrlimit for syscall number __NR_getrlimit */ -#define getrlimit ugetrlimit - -START_TABLE -#include <asm/systbl.h> -END_TABLE NR_syscalls diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c index b65c8a34ad6e..29746dc28df5 100644 --- a/arch/powerpc/kernel/trace/ftrace.c +++ b/arch/powerpc/kernel/trace/ftrace.c @@ -107,7 +107,7 @@ static int is_b_op(unsigned int op) static unsigned long find_bl_target(unsigned long ip, unsigned int op) { - static int offset; + int offset; offset = (op & 0x03fffffc); /* make it signed */ diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 9a86572db1ef..00af2c4febf4 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -1434,7 +1434,8 @@ void program_check_exception(struct pt_regs *regs) goto bail; } else { printk(KERN_EMERG "Unexpected TM Bad Thing exception " - "at %lx (msr 0x%lx)\n", regs->nip, regs->msr); + "at %lx (msr 0x%lx) tm_scratch=%llx\n", + regs->nip, regs->msr, get_paca()->tm_scratch); die("Unrecoverable exception", regs, SIGABRT); } } diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index 65b3bdb99f0b..7725a9714736 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -671,15 +671,18 @@ static void __init vdso_setup_syscall_map(void) { unsigned int i; extern unsigned long *sys_call_table; +#ifdef CONFIG_PPC64 + extern unsigned long *compat_sys_call_table; +#endif extern unsigned long sys_ni_syscall; for (i = 0; i < NR_syscalls; i++) { #ifdef CONFIG_PPC64 - if (sys_call_table[i*2] != sys_ni_syscall) + if (sys_call_table[i] != sys_ni_syscall) vdso_data->syscall_map_64[i >> 5] |= 0x80000000UL >> (i & 0x1f); - if (sys_call_table[i*2+1] != sys_ni_syscall) + if (compat_sys_call_table[i] != sys_ni_syscall) vdso_data->syscall_map_32[i >> 5] |= 0x80000000UL >> (i & 0x1f); #else /* CONFIG_PPC64 */ diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 434581bcd5b4..ad1c77f71f54 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -170,6 +170,14 @@ SECTIONS } #endif /* CONFIG_PPC_BARRIER_NOSPEC */ +#ifdef CONFIG_PPC_FSL_BOOK3E + . = ALIGN(8); + __spec_btb_flush_fixup : AT(ADDR(__spec_btb_flush_fixup) - LOAD_OFFSET) { + __start__btb_flush_fixup = .; + *(__btb_flush_fixup) + __stop__btb_flush_fixup = .; + } +#endif EXCEPTION_TABLE(0) NOTES :kernel :notes @@ -206,12 +214,6 @@ SECTIONS .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { INIT_DATA - __vtop_table_begin = .; - KEEP(*(.vtop_fixup)); - __vtop_table_end = .; - __ptov_table_begin = .; - KEEP(*(.ptov_fixup)); - __ptov_table_end = .; } .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) { @@ -308,6 +310,10 @@ SECTIONS #ifdef CONFIG_PPC32 .data : AT(ADDR(.data) - LOAD_OFFSET) { DATA_DATA +#ifdef CONFIG_UBSAN + *(.data..Lubsan_data*) + *(.data..Lubsan_type*) +#endif *(.data.rel*) *(SDATA_MAIN) *(.sdata2) diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index fd9893bc7aa1..bd1a677dd9e4 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -830,9 +830,10 @@ int kvmppc_core_prepare_memory_region(struct kvm *kvm, void kvmppc_core_commit_memory_region(struct kvm *kvm, const struct kvm_userspace_memory_region *mem, const struct kvm_memory_slot *old, - const struct kvm_memory_slot *new) + const struct kvm_memory_slot *new, + enum kvm_mr_change change) { - kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new); + kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new, change); } int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) @@ -850,9 +851,10 @@ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) return kvm->arch.kvm_ops->test_age_hva(kvm, hva); } -void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) +int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) { kvm->arch.kvm_ops->set_spte_hva(kvm, hva, pte); + return 0; } void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index c615617e78ac..6f2d2fb4e098 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -743,12 +743,15 @@ void kvmppc_rmap_reset(struct kvm *kvm) srcu_idx = srcu_read_lock(&kvm->srcu); slots = kvm_memslots(kvm); kvm_for_each_memslot(memslot, slots) { + /* Mutual exclusion with kvm_unmap_hva_range etc. */ + spin_lock(&kvm->mmu_lock); /* * This assumes it is acceptable to lose reference and * change bits across a reset. */ memset(memslot->arch.rmap, 0, memslot->npages * sizeof(*memslot->arch.rmap)); + spin_unlock(&kvm->mmu_lock); } srcu_read_unlock(&kvm->srcu, srcu_idx); } @@ -896,11 +899,12 @@ void kvmppc_core_flush_memslot_hv(struct kvm *kvm, gfn = memslot->base_gfn; rmapp = memslot->arch.rmap; + if (kvm_is_radix(kvm)) { + kvmppc_radix_flush_memslot(kvm, memslot); + return; + } + for (n = memslot->npages; n; --n, ++gfn) { - if (kvm_is_radix(kvm)) { - kvm_unmap_radix(kvm, memslot, gfn); - continue; - } /* * Testing the present bit without locking is OK because * the memslot has been marked invalid already, and hence diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index d68162ee159b..fb88167a402a 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -29,6 +29,103 @@ */ static int p9_supported_radix_bits[4] = { 5, 9, 9, 13 }; +unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid, + gva_t eaddr, void *to, void *from, + unsigned long n) +{ + unsigned long quadrant, ret = n; + int old_pid, old_lpid; + bool is_load = !!to; + + /* Can't access quadrants 1 or 2 in non-HV mode, call the HV to do it */ + if (kvmhv_on_pseries()) + return plpar_hcall_norets(H_COPY_TOFROM_GUEST, lpid, pid, eaddr, + __pa(to), __pa(from), n); + + quadrant = 1; + if (!pid) + quadrant = 2; + if (is_load) + from = (void *) (eaddr | (quadrant << 62)); + else + to = (void *) (eaddr | (quadrant << 62)); + + preempt_disable(); + + /* switch the lpid first to avoid running host with unallocated pid */ + old_lpid = mfspr(SPRN_LPID); + if (old_lpid != lpid) + mtspr(SPRN_LPID, lpid); + if (quadrant == 1) { + old_pid = mfspr(SPRN_PID); + if (old_pid != pid) + mtspr(SPRN_PID, pid); + } + isync(); + + pagefault_disable(); + if (is_load) + ret = raw_copy_from_user(to, from, n); + else + ret = raw_copy_to_user(to, from, n); + pagefault_enable(); + + /* switch the pid first to avoid running host with unallocated pid */ + if (quadrant == 1 && pid != old_pid) + mtspr(SPRN_PID, old_pid); + if (lpid != old_lpid) + mtspr(SPRN_LPID, old_lpid); + isync(); + + preempt_enable(); + + return ret; +} +EXPORT_SYMBOL_GPL(__kvmhv_copy_tofrom_guest_radix); + +static long kvmhv_copy_tofrom_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, + void *to, void *from, unsigned long n) +{ + int lpid = vcpu->kvm->arch.lpid; + int pid = vcpu->arch.pid; + + /* This would cause a data segment intr so don't allow the access */ + if (eaddr & (0x3FFUL << 52)) + return -EINVAL; + + /* Should we be using the nested lpid */ + if (vcpu->arch.nested) + lpid = vcpu->arch.nested->shadow_lpid; + + /* If accessing quadrant 3 then pid is expected to be 0 */ + if (((eaddr >> 62) & 0x3) == 0x3) + pid = 0; + + eaddr &= ~(0xFFFUL << 52); + + return __kvmhv_copy_tofrom_guest_radix(lpid, pid, eaddr, to, from, n); +} + +long kvmhv_copy_from_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, void *to, + unsigned long n) +{ + long ret; + + ret = kvmhv_copy_tofrom_guest_radix(vcpu, eaddr, to, NULL, n); + if (ret > 0) + memset(to + (n - ret), 0, ret); + + return ret; +} +EXPORT_SYMBOL_GPL(kvmhv_copy_from_guest_radix); + +long kvmhv_copy_to_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, void *from, + unsigned long n) +{ + return kvmhv_copy_tofrom_guest_radix(vcpu, eaddr, NULL, from, n); +} +EXPORT_SYMBOL_GPL(kvmhv_copy_to_guest_radix); + int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *gpte, u64 root, u64 *pte_ret_p) @@ -197,8 +294,8 @@ int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, return 0; } -static void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr, - unsigned int pshift, unsigned int lpid) +void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr, + unsigned int pshift, unsigned int lpid) { unsigned long psize = PAGE_SIZE; int psi; @@ -284,7 +381,8 @@ static void kvmppc_pmd_free(pmd_t *pmdp) /* Called with kvm->mmu_lock held */ void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa, - unsigned int shift, struct kvm_memory_slot *memslot, + unsigned int shift, + const struct kvm_memory_slot *memslot, unsigned int lpid) { @@ -683,6 +781,7 @@ int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu, pte_t pte, *ptep; unsigned int shift, level; int ret; + bool large_enable; /* used to check for invalidations in progress */ mmu_seq = kvm->mmu_notifier_seq; @@ -732,12 +831,15 @@ int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu, pte = *ptep; local_irq_enable(); + /* If we're logging dirty pages, always map single pages */ + large_enable = !(memslot->flags & KVM_MEM_LOG_DIRTY_PAGES); + /* Get pte level from shift/size */ - if (shift == PUD_SHIFT && + if (large_enable && shift == PUD_SHIFT && (gpa & (PUD_SIZE - PAGE_SIZE)) == (hva & (PUD_SIZE - PAGE_SIZE))) { level = 2; - } else if (shift == PMD_SHIFT && + } else if (large_enable && shift == PMD_SHIFT && (gpa & (PMD_SIZE - PAGE_SIZE)) == (hva & (PMD_SIZE - PAGE_SIZE))) { level = 1; @@ -857,7 +959,7 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, return ret; } -/* Called with kvm->lock held */ +/* Called with kvm->mmu_lock held */ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long gfn) { @@ -872,7 +974,7 @@ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, return 0; } -/* Called with kvm->lock held */ +/* Called with kvm->mmu_lock held */ int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long gfn) { @@ -880,18 +982,24 @@ int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long gpa = gfn << PAGE_SHIFT; unsigned int shift; int ref = 0; + unsigned long old, *rmapp; ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift); if (ptep && pte_present(*ptep) && pte_young(*ptep)) { - kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0, - gpa, shift); + old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0, + gpa, shift); /* XXX need to flush tlb here? */ + /* Also clear bit in ptes in shadow pgtable for nested guests */ + rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; + kvmhv_update_nest_rmap_rc_list(kvm, rmapp, _PAGE_ACCESSED, 0, + old & PTE_RPN_MASK, + 1UL << shift); ref = 1; } return ref; } -/* Called with kvm->lock held */ +/* Called with kvm->mmu_lock held */ int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long gfn) { @@ -915,15 +1023,23 @@ static int kvm_radix_test_clear_dirty(struct kvm *kvm, pte_t *ptep; unsigned int shift; int ret = 0; + unsigned long old, *rmapp; ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift); if (ptep && pte_present(*ptep) && pte_dirty(*ptep)) { ret = 1; if (shift) ret = 1 << (shift - PAGE_SHIFT); - kvmppc_radix_update_pte(kvm, ptep, _PAGE_DIRTY, 0, - gpa, shift); + spin_lock(&kvm->mmu_lock); + old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_DIRTY, 0, + gpa, shift); kvmppc_radix_tlbie_page(kvm, gpa, shift, kvm->arch.lpid); + /* Also clear bit in ptes in shadow pgtable for nested guests */ + rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; + kvmhv_update_nest_rmap_rc_list(kvm, rmapp, _PAGE_DIRTY, 0, + old & PTE_RPN_MASK, + 1UL << shift); + spin_unlock(&kvm->mmu_lock); } return ret; } @@ -953,6 +1069,26 @@ long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm, return 0; } +void kvmppc_radix_flush_memslot(struct kvm *kvm, + const struct kvm_memory_slot *memslot) +{ + unsigned long n; + pte_t *ptep; + unsigned long gpa; + unsigned int shift; + + gpa = memslot->base_gfn << PAGE_SHIFT; + spin_lock(&kvm->mmu_lock); + for (n = memslot->npages; n; --n) { + ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift); + if (ptep && pte_present(*ptep)) + kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot, + kvm->arch.lpid); + gpa += PAGE_SIZE; + } + spin_unlock(&kvm->mmu_lock); +} + static void add_rmmu_ap_encoding(struct kvm_ppc_rmmu_info *info, int psize, int *indexp) { diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c index 62a8d03ba7e9..532ab79734c7 100644 --- a/arch/powerpc/kvm/book3s_64_vio.c +++ b/arch/powerpc/kvm/book3s_64_vio.c @@ -397,12 +397,13 @@ static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, return H_SUCCESS; } -static void kvmppc_clear_tce(struct iommu_table *tbl, unsigned long entry) +static void kvmppc_clear_tce(struct mm_struct *mm, struct iommu_table *tbl, + unsigned long entry) { unsigned long hpa = 0; enum dma_data_direction dir = DMA_NONE; - iommu_tce_xchg(tbl, entry, &hpa, &dir); + iommu_tce_xchg(mm, tbl, entry, &hpa, &dir); } static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm, @@ -433,7 +434,7 @@ static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm, unsigned long hpa = 0; long ret; - if (WARN_ON_ONCE(iommu_tce_xchg(tbl, entry, &hpa, &dir))) + if (WARN_ON_ONCE(iommu_tce_xchg(kvm->mm, tbl, entry, &hpa, &dir))) return H_TOO_HARD; if (dir == DMA_NONE) @@ -441,7 +442,7 @@ static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm, ret = kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry); if (ret != H_SUCCESS) - iommu_tce_xchg(tbl, entry, &hpa, &dir); + iommu_tce_xchg(kvm->mm, tbl, entry, &hpa, &dir); return ret; } @@ -487,7 +488,7 @@ long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl, if (mm_iommu_mapped_inc(mem)) return H_TOO_HARD; - ret = iommu_tce_xchg(tbl, entry, &hpa, &dir); + ret = iommu_tce_xchg(kvm->mm, tbl, entry, &hpa, &dir); if (WARN_ON_ONCE(ret)) { mm_iommu_mapped_dec(mem); return H_TOO_HARD; @@ -566,7 +567,7 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, entry, ua, dir); if (ret != H_SUCCESS) { - kvmppc_clear_tce(stit->tbl, entry); + kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry); goto unlock_exit; } } @@ -655,7 +656,8 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu, iommu_tce_direction(tce)); if (ret != H_SUCCESS) { - kvmppc_clear_tce(stit->tbl, entry); + kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, + entry); goto unlock_exit; } } @@ -704,7 +706,7 @@ long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu, return ret; WARN_ON_ONCE(1); - kvmppc_clear_tce(stit->tbl, entry); + kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry); } } diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index a56f8413758a..5a066fc299e1 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -985,6 +985,10 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) kvmppc_set_gpr(vcpu, 3, 0); vcpu->arch.hcall_needed = 0; return -EINTR; + } else if (ret == H_TOO_HARD) { + kvmppc_set_gpr(vcpu, 3, 0); + vcpu->arch.hcall_needed = 0; + return RESUME_HOST; } break; case H_TLB_INVALIDATE: @@ -992,7 +996,11 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) if (nesting_enabled(vcpu->kvm)) ret = kvmhv_do_nested_tlbie(vcpu); break; - + case H_COPY_TOFROM_GUEST: + ret = H_FUNCTION; + if (nesting_enabled(vcpu->kvm)) + ret = kvmhv_copy_tofrom_guest_nested(vcpu); + break; default: return RESUME_HOST; } @@ -1336,7 +1344,7 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, return r; } -static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu) +static int kvmppc_handle_nested_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) { int r; int srcu_idx; @@ -1394,7 +1402,7 @@ static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu) */ case BOOK3S_INTERRUPT_H_DATA_STORAGE: srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); - r = kvmhv_nested_page_fault(vcpu); + r = kvmhv_nested_page_fault(run, vcpu); srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); break; case BOOK3S_INTERRUPT_H_INST_STORAGE: @@ -1404,7 +1412,7 @@ static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu) if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE) vcpu->arch.fault_dsisr |= DSISR_ISSTORE; srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); - r = kvmhv_nested_page_fault(vcpu); + r = kvmhv_nested_page_fault(run, vcpu); srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); break; @@ -4059,7 +4067,7 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, if (!nested) r = kvmppc_handle_exit_hv(kvm_run, vcpu, current); else - r = kvmppc_handle_nested_exit(vcpu); + r = kvmppc_handle_nested_exit(kvm_run, vcpu); } vcpu->arch.ret = r; @@ -4371,7 +4379,8 @@ static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm, static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm, const struct kvm_userspace_memory_region *mem, const struct kvm_memory_slot *old, - const struct kvm_memory_slot *new) + const struct kvm_memory_slot *new, + enum kvm_mr_change change) { unsigned long npages = mem->memory_size >> PAGE_SHIFT; @@ -4383,6 +4392,23 @@ static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm, */ if (npages) atomic64_inc(&kvm->arch.mmio_update); + + /* + * For change == KVM_MR_MOVE or KVM_MR_DELETE, higher levels + * have already called kvm_arch_flush_shadow_memslot() to + * flush shadow mappings. For KVM_MR_CREATE we have no + * previous mappings. So the only case to handle is + * KVM_MR_FLAGS_ONLY when the KVM_MEM_LOG_DIRTY_PAGES bit + * has been changed. + * For radix guests, we flush on setting KVM_MEM_LOG_DIRTY_PAGES + * to get rid of any THP PTEs in the partition-scoped page tables + * so we can track dirtiness at the page level; we flush when + * clearing KVM_MEM_LOG_DIRTY_PAGES so that we can go back to + * using THP PTEs. + */ + if (change == KVM_MR_FLAGS_ONLY && kvm_is_radix(kvm) && + ((new->flags ^ old->flags) & KVM_MEM_LOG_DIRTY_PAGES)) + kvmppc_radix_flush_memslot(kvm, old); } /* @@ -4532,12 +4558,15 @@ int kvmppc_switch_mmu_to_hpt(struct kvm *kvm) { if (nesting_enabled(kvm)) kvmhv_release_all_nested(kvm); + kvmppc_rmap_reset(kvm); + kvm->arch.process_table = 0; + /* Mutual exclusion with kvm_unmap_hva_range etc. */ + spin_lock(&kvm->mmu_lock); + kvm->arch.radix = 0; + spin_unlock(&kvm->mmu_lock); kvmppc_free_radix(kvm); kvmppc_update_lpcr(kvm, LPCR_VPM1, LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR); - kvmppc_rmap_reset(kvm); - kvm->arch.radix = 0; - kvm->arch.process_table = 0; return 0; } @@ -4549,12 +4578,14 @@ int kvmppc_switch_mmu_to_radix(struct kvm *kvm) err = kvmppc_init_vm_radix(kvm); if (err) return err; - + kvmppc_rmap_reset(kvm); + /* Mutual exclusion with kvm_unmap_hva_range etc. */ + spin_lock(&kvm->mmu_lock); + kvm->arch.radix = 1; + spin_unlock(&kvm->mmu_lock); kvmppc_free_hpt(&kvm->arch.hpt); kvmppc_update_lpcr(kvm, LPCR_UPRT | LPCR_GTSE | LPCR_HR, LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR); - kvmppc_rmap_reset(kvm); - kvm->arch.radix = 1; return 0; } @@ -5214,6 +5245,44 @@ static int kvmhv_enable_nested(struct kvm *kvm) return 0; } +static int kvmhv_load_from_eaddr(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr, + int size) +{ + int rc = -EINVAL; + + if (kvmhv_vcpu_is_radix(vcpu)) { + rc = kvmhv_copy_from_guest_radix(vcpu, *eaddr, ptr, size); + + if (rc > 0) + rc = -EINVAL; + } + + /* For now quadrants are the only way to access nested guest memory */ + if (rc && vcpu->arch.nested) + rc = -EAGAIN; + + return rc; +} + +static int kvmhv_store_to_eaddr(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr, + int size) +{ + int rc = -EINVAL; + + if (kvmhv_vcpu_is_radix(vcpu)) { + rc = kvmhv_copy_to_guest_radix(vcpu, *eaddr, ptr, size); + + if (rc > 0) + rc = -EINVAL; + } + + /* For now quadrants are the only way to access nested guest memory */ + if (rc && vcpu->arch.nested) + rc = -EAGAIN; + + return rc; +} + static struct kvmppc_ops kvm_ops_hv = { .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv, .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv, @@ -5254,6 +5323,8 @@ static struct kvmppc_ops kvm_ops_hv = { .get_rmmu_info = kvmhv_get_rmmu_info, .set_smt_mode = kvmhv_set_smt_mode, .enable_nested = kvmhv_enable_nested, + .load_from_eaddr = kvmhv_load_from_eaddr, + .store_to_eaddr = kvmhv_store_to_eaddr, }; static int kvm_init_subcore_bitmap(void) diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c index 401d2ecbebc5..735e0ac6f5b2 100644 --- a/arch/powerpc/kvm/book3s_hv_nested.c +++ b/arch/powerpc/kvm/book3s_hv_nested.c @@ -195,6 +195,26 @@ void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu, vcpu->arch.ppr = hr->ppr; } +static void kvmhv_nested_mmio_needed(struct kvm_vcpu *vcpu, u64 regs_ptr) +{ + /* No need to reflect the page fault to L1, we've handled it */ + vcpu->arch.trap = 0; + + /* + * Since the L2 gprs have already been written back into L1 memory when + * we complete the mmio, store the L1 memory location of the L2 gpr + * being loaded into by the mmio so that the loaded value can be + * written there in kvmppc_complete_mmio_load() + */ + if (((vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) == KVM_MMIO_REG_GPR) + && (vcpu->mmio_is_write == 0)) { + vcpu->arch.nested_io_gpr = (gpa_t) regs_ptr + + offsetof(struct pt_regs, + gpr[vcpu->arch.io_gpr]); + vcpu->arch.io_gpr = KVM_MMIO_REG_NESTED_GPR; + } +} + long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu) { long int err, r; @@ -316,6 +336,11 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu) if (r == -EINTR) return H_INTERRUPT; + if (vcpu->mmio_needed) { + kvmhv_nested_mmio_needed(vcpu, regs_ptr); + return H_TOO_HARD; + } + return vcpu->arch.trap; } @@ -437,6 +462,81 @@ long kvmhv_set_partition_table(struct kvm_vcpu *vcpu) } /* + * Handle the H_COPY_TOFROM_GUEST hcall. + * r4 = L1 lpid of nested guest + * r5 = pid + * r6 = eaddr to access + * r7 = to buffer (L1 gpa) + * r8 = from buffer (L1 gpa) + * r9 = n bytes to copy + */ +long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu) +{ + struct kvm_nested_guest *gp; + int l1_lpid = kvmppc_get_gpr(vcpu, 4); + int pid = kvmppc_get_gpr(vcpu, 5); + gva_t eaddr = kvmppc_get_gpr(vcpu, 6); + gpa_t gp_to = (gpa_t) kvmppc_get_gpr(vcpu, 7); + gpa_t gp_from = (gpa_t) kvmppc_get_gpr(vcpu, 8); + void *buf; + unsigned long n = kvmppc_get_gpr(vcpu, 9); + bool is_load = !!gp_to; + long rc; + + if (gp_to && gp_from) /* One must be NULL to determine the direction */ + return H_PARAMETER; + + if (eaddr & (0xFFFUL << 52)) + return H_PARAMETER; + + buf = kzalloc(n, GFP_KERNEL); + if (!buf) + return H_NO_MEM; + + gp = kvmhv_get_nested(vcpu->kvm, l1_lpid, false); + if (!gp) { + rc = H_PARAMETER; + goto out_free; + } + + mutex_lock(&gp->tlb_lock); + + if (is_load) { + /* Load from the nested guest into our buffer */ + rc = __kvmhv_copy_tofrom_guest_radix(gp->shadow_lpid, pid, + eaddr, buf, NULL, n); + if (rc) + goto not_found; + + /* Write what was loaded into our buffer back to the L1 guest */ + rc = kvm_vcpu_write_guest(vcpu, gp_to, buf, n); + if (rc) + goto not_found; + } else { + /* Load the data to be stored from the L1 guest into our buf */ + rc = kvm_vcpu_read_guest(vcpu, gp_from, buf, n); + if (rc) + goto not_found; + + /* Store from our buffer into the nested guest */ + rc = __kvmhv_copy_tofrom_guest_radix(gp->shadow_lpid, pid, + eaddr, NULL, buf, n); + if (rc) + goto not_found; + } + +out_unlock: + mutex_unlock(&gp->tlb_lock); + kvmhv_put_nested(gp); +out_free: + kfree(buf); + return rc; +not_found: + rc = H_NOT_FOUND; + goto out_unlock; +} + +/* * Reload the partition table entry for a guest. * Caller must hold gp->tlb_lock. */ @@ -480,6 +580,7 @@ struct kvm_nested_guest *kvmhv_alloc_nested(struct kvm *kvm, unsigned int lpid) if (shadow_lpid < 0) goto out_free2; gp->shadow_lpid = shadow_lpid; + gp->radix = 1; memset(gp->prev_cpu, -1, sizeof(gp->prev_cpu)); @@ -687,6 +788,57 @@ void kvmhv_insert_nest_rmap(struct kvm *kvm, unsigned long *rmapp, *n_rmap = NULL; } +static void kvmhv_update_nest_rmap_rc(struct kvm *kvm, u64 n_rmap, + unsigned long clr, unsigned long set, + unsigned long hpa, unsigned long mask) +{ + struct kvm_nested_guest *gp; + unsigned long gpa; + unsigned int shift, lpid; + pte_t *ptep; + + gpa = n_rmap & RMAP_NESTED_GPA_MASK; + lpid = (n_rmap & RMAP_NESTED_LPID_MASK) >> RMAP_NESTED_LPID_SHIFT; + gp = kvmhv_find_nested(kvm, lpid); + if (!gp) + return; + + /* Find the pte */ + ptep = __find_linux_pte(gp->shadow_pgtable, gpa, NULL, &shift); + /* + * If the pte is present and the pfn is still the same, update the pte. + * If the pfn has changed then this is a stale rmap entry, the nested + * gpa actually points somewhere else now, and there is nothing to do. + * XXX A future optimisation would be to remove the rmap entry here. + */ + if (ptep && pte_present(*ptep) && ((pte_val(*ptep) & mask) == hpa)) { + __radix_pte_update(ptep, clr, set); + kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid); + } +} + +/* + * For a given list of rmap entries, update the rc bits in all ptes in shadow + * page tables for nested guests which are referenced by the rmap list. + */ +void kvmhv_update_nest_rmap_rc_list(struct kvm *kvm, unsigned long *rmapp, + unsigned long clr, unsigned long set, + unsigned long hpa, unsigned long nbytes) +{ + struct llist_node *entry = ((struct llist_head *) rmapp)->first; + struct rmap_nested *cursor; + unsigned long rmap, mask; + + if ((clr | set) & ~(_PAGE_DIRTY | _PAGE_ACCESSED)) + return; + + mask = PTE_RPN_MASK & ~(nbytes - 1); + hpa &= mask; + + for_each_nest_rmap_safe(cursor, entry, &rmap) + kvmhv_update_nest_rmap_rc(kvm, rmap, clr, set, hpa, mask); +} + static void kvmhv_remove_nest_rmap(struct kvm *kvm, u64 n_rmap, unsigned long hpa, unsigned long mask) { @@ -723,7 +875,7 @@ static void kvmhv_remove_nest_rmap_list(struct kvm *kvm, unsigned long *rmapp, /* called with kvm->mmu_lock held */ void kvmhv_remove_nest_rmap_range(struct kvm *kvm, - struct kvm_memory_slot *memslot, + const struct kvm_memory_slot *memslot, unsigned long gpa, unsigned long hpa, unsigned long nbytes) { @@ -1049,7 +1201,7 @@ static long kvmhv_handle_nested_set_rc(struct kvm_vcpu *vcpu, struct kvm *kvm = vcpu->kvm; bool writing = !!(dsisr & DSISR_ISSTORE); u64 pgflags; - bool ret; + long ret; /* Are the rc bits set in the L1 partition scoped pte? */ pgflags = _PAGE_ACCESSED; @@ -1062,16 +1214,22 @@ static long kvmhv_handle_nested_set_rc(struct kvm_vcpu *vcpu, /* Set the rc bit in the pte of our (L0) pgtable for the L1 guest */ ret = kvmppc_hv_handle_set_rc(kvm, kvm->arch.pgtable, writing, gpte.raddr, kvm->arch.lpid); - spin_unlock(&kvm->mmu_lock); - if (!ret) - return -EINVAL; + if (!ret) { + ret = -EINVAL; + goto out_unlock; + } /* Set the rc bit in the pte of the shadow_pgtable for the nest guest */ ret = kvmppc_hv_handle_set_rc(kvm, gp->shadow_pgtable, writing, n_gpa, gp->shadow_lpid); if (!ret) - return -EINVAL; - return 0; + ret = -EINVAL; + else + ret = 0; + +out_unlock: + spin_unlock(&kvm->mmu_lock); + return ret; } static inline int kvmppc_radix_level_to_shift(int level) @@ -1099,7 +1257,8 @@ static inline int kvmppc_radix_shift_to_level(int shift) } /* called with gp->tlb_lock held */ -static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu, +static long int __kvmhv_nested_page_fault(struct kvm_run *run, + struct kvm_vcpu *vcpu, struct kvm_nested_guest *gp) { struct kvm *kvm = vcpu->kvm; @@ -1180,9 +1339,9 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu, kvmppc_core_queue_data_storage(vcpu, ea, dsisr); return RESUME_GUEST; } - /* passthrough of emulated MMIO case... */ - pr_err("emulated MMIO passthrough?\n"); - return -EINVAL; + + /* passthrough of emulated MMIO case */ + return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, writing); } if (memslot->flags & KVM_MEM_READONLY) { if (writing) { @@ -1220,6 +1379,8 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu, return ret; shift = kvmppc_radix_level_to_shift(level); } + /* Align gfn to the start of the page */ + gfn = (gpa & ~((1UL << shift) - 1)) >> PAGE_SHIFT; /* 3. Compute the pte we need to insert for nest_gpa -> host r_addr */ @@ -1227,6 +1388,9 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu, perm |= gpte.may_read ? 0UL : _PAGE_READ; perm |= gpte.may_write ? 0UL : _PAGE_WRITE; perm |= gpte.may_execute ? 0UL : _PAGE_EXEC; + /* Only set accessed/dirty (rc) bits if set in host and l1 guest ptes */ + perm |= (gpte.rc & _PAGE_ACCESSED) ? 0UL : _PAGE_ACCESSED; + perm |= ((gpte.rc & _PAGE_DIRTY) && writing) ? 0UL : _PAGE_DIRTY; pte = __pte(pte_val(pte) & ~perm); /* What size pte can we insert? */ @@ -1264,13 +1428,13 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu, return RESUME_GUEST; } -long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu) +long int kvmhv_nested_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu) { struct kvm_nested_guest *gp = vcpu->arch.nested; long int ret; mutex_lock(&gp->tlb_lock); - ret = __kvmhv_nested_page_fault(vcpu, gp); + ret = __kvmhv_nested_page_fault(run, vcpu, gp); mutex_unlock(&gp->tlb_lock); return ret; } diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index a67cf1cdeda4..3b3791ed74a6 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -107,7 +107,7 @@ void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev, EXPORT_SYMBOL_GPL(kvmppc_add_revmap_chain); /* Update the dirty bitmap of a memslot */ -void kvmppc_update_dirty_map(struct kvm_memory_slot *memslot, +void kvmppc_update_dirty_map(const struct kvm_memory_slot *memslot, unsigned long gfn, unsigned long psize) { unsigned long npages; diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 4efd65d9e828..811a3c2fb0e9 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -587,6 +587,7 @@ void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr) case PVR_POWER8: case PVR_POWER8E: case PVR_POWER8NVL: + case PVR_POWER9: vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE | BOOK3S_HFLAG_NEW_TLBIE; break; @@ -1913,7 +1914,8 @@ static int kvmppc_core_prepare_memory_region_pr(struct kvm *kvm, static void kvmppc_core_commit_memory_region_pr(struct kvm *kvm, const struct kvm_userspace_memory_region *mem, const struct kvm_memory_slot *old, - const struct kvm_memory_slot *new) + const struct kvm_memory_slot *new, + enum kvm_mr_change change) { return; } diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c index b0b2bfc2ff51..f27ee57ab46e 100644 --- a/arch/powerpc/kvm/book3s_xics.c +++ b/arch/powerpc/kvm/book3s_xics.c @@ -1015,17 +1015,7 @@ static int xics_debug_show(struct seq_file *m, void *private) return 0; } -static int xics_debug_open(struct inode *inode, struct file *file) -{ - return single_open(file, xics_debug_show, inode->i_private); -} - -static const struct file_operations xics_debug_fops = { - .open = xics_debug_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(xics_debug); static void xics_debugfs_init(struct kvmppc_xics *xics) { diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c index ad4a370703d3..f78d002f0fe0 100644 --- a/arch/powerpc/kvm/book3s_xive.c +++ b/arch/powerpc/kvm/book3s_xive.c @@ -1968,17 +1968,7 @@ static int xive_debug_show(struct seq_file *m, void *private) return 0; } -static int xive_debug_open(struct inode *inode, struct file *file) -{ - return single_open(file, xive_debug_show, inode->i_private); -} - -static const struct file_operations xive_debug_fops = { - .open = xive_debug_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(xive_debug); static void xive_debugfs_init(struct kvmppc_xive *xive) { diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index a9ca016da670..dbec4128bb51 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -1833,7 +1833,8 @@ int kvmppc_core_prepare_memory_region(struct kvm *kvm, void kvmppc_core_commit_memory_region(struct kvm *kvm, const struct kvm_userspace_memory_region *mem, const struct kvm_memory_slot *old, - const struct kvm_memory_slot *new) + const struct kvm_memory_slot *new, + enum kvm_mr_change change) { } diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S index 051af7d97327..4e5081e58409 100644 --- a/arch/powerpc/kvm/bookehv_interrupts.S +++ b/arch/powerpc/kvm/bookehv_interrupts.S @@ -75,6 +75,10 @@ PPC_LL r1, VCPU_HOST_STACK(r4) PPC_LL r2, HOST_R2(r1) +START_BTB_FLUSH_SECTION + BTB_FLUSH(r10) +END_BTB_FLUSH_SECTION + mfspr r10, SPRN_PID lwz r8, VCPU_HOST_PID(r4) PPC_LL r11, VCPU_SHARED(r4) diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h index 94f04fcb373e..962ee90a0dfe 100644 --- a/arch/powerpc/kvm/e500.h +++ b/arch/powerpc/kvm/e500.h @@ -20,7 +20,7 @@ #define KVM_E500_H #include <linux/kvm_host.h> -#include <asm/mmu-book3e.h> +#include <asm/nohash/mmu-book3e.h> #include <asm/tlb.h> #include <asm/cputhreads.h> diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c index 3f8189eb56ed..fde1de08b4d7 100644 --- a/arch/powerpc/kvm/e500_emulate.c +++ b/arch/powerpc/kvm/e500_emulate.c @@ -277,6 +277,13 @@ int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong spr_va vcpu->arch.pwrmgtcr0 = spr_val; break; + case SPRN_BUCSR: + /* + * If we are here, it means that we have already flushed the + * branch predictor, so just return to guest. + */ + break; + /* extra exceptions */ #ifdef CONFIG_SPE_POSSIBLE case SPRN_IVOR32: diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index 8f2985e46f6f..c3f312b2bcb3 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c @@ -757,10 +757,11 @@ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) return 0; } -void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) +int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) { /* The page will get remapped properly on its next fault */ kvm_unmap_hva(kvm, hva); + return 0; } /*****************************************/ diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 2869a299c4ed..b90a7d154180 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -331,10 +331,17 @@ int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, { ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; struct kvmppc_pte pte; - int r; + int r = -EINVAL; vcpu->stat.st++; + if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr) + r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr, + size); + + if ((!r) || (r == -EAGAIN)) + return r; + r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, XLATE_WRITE, &pte); if (r < 0) @@ -367,10 +374,17 @@ int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, { ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; struct kvmppc_pte pte; - int rc; + int rc = -EINVAL; vcpu->stat.ld++; + if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr) + rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr, + size); + + if ((!rc) || (rc == -EAGAIN)) + return rc; + rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, XLATE_READ, &pte); if (rc) @@ -518,7 +532,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_PPC_UNSET_IRQ: case KVM_CAP_PPC_IRQ_LEVEL: case KVM_CAP_ENABLE_CAP: - case KVM_CAP_ENABLE_CAP_VM: case KVM_CAP_ONE_REG: case KVM_CAP_IOEVENTFD: case KVM_CAP_DEVICE_CTRL: @@ -543,8 +556,11 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) #ifdef CONFIG_PPC_BOOK3S_64 case KVM_CAP_SPAPR_TCE: case KVM_CAP_SPAPR_TCE_64: - /* fallthrough */ + r = 1; + break; case KVM_CAP_SPAPR_TCE_VFIO: + r = !!cpu_has_feature(CPU_FTR_HVMODE); + break; case KVM_CAP_PPC_RTAS: case KVM_CAP_PPC_FIXUP_HCALL: case KVM_CAP_PPC_ENABLE_HCALL: @@ -696,7 +712,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, const struct kvm_memory_slot *new, enum kvm_mr_change change) { - kvmppc_core_commit_memory_region(kvm, mem, old, new); + kvmppc_core_commit_memory_region(kvm, mem, old, new, change); } void kvm_arch_flush_shadow_memslot(struct kvm *kvm, @@ -1192,6 +1208,14 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, kvmppc_set_vmx_byte(vcpu, gpr); break; #endif +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE + case KVM_MMIO_REG_NESTED_GPR: + if (kvmppc_need_byteswap(vcpu)) + gpr = swab64(gpr); + kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr, + sizeof(gpr)); + break; +#endif default: BUG(); } @@ -2084,8 +2108,8 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, } -static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, - struct kvm_enable_cap *cap) +int kvm_vm_ioctl_enable_cap(struct kvm *kvm, + struct kvm_enable_cap *cap) { int r; @@ -2273,15 +2297,6 @@ long kvm_arch_vm_ioctl(struct file *filp, break; } - case KVM_ENABLE_CAP: - { - struct kvm_enable_cap cap; - r = -EFAULT; - if (copy_from_user(&cap, argp, sizeof(cap))) - goto out; - r = kvm_vm_ioctl_enable_cap(kvm, &cap); - break; - } #ifdef CONFIG_SPAPR_TCE_IOMMU case KVM_CREATE_SPAPR_TCE_64: { struct kvm_create_spapr_tce_64 create_tce_64; diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index 89502cbccb1b..506413a2c25e 100644 --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c @@ -204,22 +204,6 @@ int patch_branch(unsigned int *addr, unsigned long target, int flags) return patch_instruction(addr, create_branch(addr, target, flags)); } -int patch_branch_site(s32 *site, unsigned long target, int flags) -{ - unsigned int *addr; - - addr = (unsigned int *)((unsigned long)site + *site); - return patch_instruction(addr, create_branch(addr, target, flags)); -} - -int patch_instruction_site(s32 *site, unsigned int instr) -{ - unsigned int *addr; - - addr = (unsigned int *)((unsigned long)site + *site); - return patch_instruction(addr, instr); -} - bool is_offset_in_branch_range(long offset) { /* diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index e613b02bb2f0..5169cc805464 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -118,7 +118,7 @@ void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end) } #ifdef CONFIG_PPC_BOOK3S_64 -void do_stf_entry_barrier_fixups(enum stf_barrier_type types) +static void do_stf_entry_barrier_fixups(enum stf_barrier_type types) { unsigned int instrs[3], *dest; long *start, *end; @@ -168,7 +168,7 @@ void do_stf_entry_barrier_fixups(enum stf_barrier_type types) : "unknown"); } -void do_stf_exit_barrier_fixups(enum stf_barrier_type types) +static void do_stf_exit_barrier_fixups(enum stf_barrier_type types) { unsigned int instrs[6], *dest; long *start, *end; @@ -347,6 +347,29 @@ void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_ printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i); } + +static void patch_btb_flush_section(long *curr) +{ + unsigned int *start, *end; + + start = (void *)curr + *curr; + end = (void *)curr + *(curr + 1); + for (; start < end; start++) { + pr_devel("patching dest %lx\n", (unsigned long)start); + patch_instruction(start, PPC_INST_NOP); + } +} + +void do_btb_flush_fixups(void) +{ + long *start, *end; + + start = PTRRELOC(&__start__btb_flush_fixup); + end = PTRRELOC(&__stop__btb_flush_fixup); + + for (; start < end; start += 2) + patch_btb_flush_section(start); +} #endif /* CONFIG_PPC_FSL_BOOK3E */ void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end) diff --git a/arch/powerpc/mm/44x_mmu.c b/arch/powerpc/mm/44x_mmu.c index 12d92518e898..ea2b9af08a48 100644 --- a/arch/powerpc/mm/44x_mmu.c +++ b/arch/powerpc/mm/44x_mmu.c @@ -29,6 +29,7 @@ #include <asm/mmu.h> #include <asm/page.h> #include <asm/cacheflush.h> +#include <asm/code-patching.h> #include "mmu_decl.h" @@ -43,22 +44,13 @@ unsigned long tlb_47x_boltmap[1024/8]; static void ppc44x_update_tlb_hwater(void) { - extern unsigned int tlb_44x_patch_hwater_D[]; - extern unsigned int tlb_44x_patch_hwater_I[]; - /* The TLB miss handlers hard codes the watermark in a cmpli * instruction to improve performances rather than loading it * from the global variable. Thus, we patch the instructions * in the 2 TLB miss handlers when updating the value */ - tlb_44x_patch_hwater_D[0] = (tlb_44x_patch_hwater_D[0] & 0xffff0000) | - tlb_44x_hwater; - flush_icache_range((unsigned long)&tlb_44x_patch_hwater_D[0], - (unsigned long)&tlb_44x_patch_hwater_D[1]); - tlb_44x_patch_hwater_I[0] = (tlb_44x_patch_hwater_I[0] & 0xffff0000) | - tlb_44x_hwater; - flush_icache_range((unsigned long)&tlb_44x_patch_hwater_I[0], - (unsigned long)&tlb_44x_patch_hwater_I[1]); + modify_instruction_site(&patch__tlb_44x_hwater_D, 0xffff, tlb_44x_hwater); + modify_instruction_site(&patch__tlb_44x_hwater_I, 0xffff, tlb_44x_hwater); } /* diff --git a/arch/powerpc/mm/8xx_mmu.c b/arch/powerpc/mm/8xx_mmu.c index 01b7f5107c3a..bfa503cff351 100644 --- a/arch/powerpc/mm/8xx_mmu.c +++ b/arch/powerpc/mm/8xx_mmu.c @@ -100,11 +100,7 @@ static void __init mmu_mapin_immr(void) static void __init mmu_patch_cmp_limit(s32 *site, unsigned long mapped) { - unsigned int instr = *(unsigned int *)patch_site_addr(site); - - instr &= 0xffff0000; - instr |= (unsigned long)__va(mapped) >> 16; - patch_instruction_site(site, instr); + modify_instruction_site(site, 0xffff, (unsigned long)__va(mapped) >> 16); } unsigned long __init mmu_mapin_ram(unsigned long top) @@ -175,12 +171,12 @@ void set_context(unsigned long id, pgd_t *pgd) *(ptr + 1) = pgd; #endif - /* Register M_TW will contain base address of level 1 table minus the + /* Register M_TWB will contain base address of level 1 table minus the * lower part of the kernel PGDIR base address, so that all accesses to * level 1 table are done relative to lower part of kernel PGDIR base * address. */ - mtspr(SPRN_M_TW, __pa(pgd) - offset); + mtspr(SPRN_M_TWB, __pa(pgd) - offset); /* Update context */ mtspr(SPRN_M_CASID, id - 1); diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile index ca96e7be4d0e..f965fc33a8b7 100644 --- a/arch/powerpc/mm/Makefile +++ b/arch/powerpc/mm/Makefile @@ -15,10 +15,13 @@ obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o \ obj-$(CONFIG_PPC_BOOK3E) += tlb_low_$(BITS)e.o hash64-$(CONFIG_PPC_NATIVE) := hash_native_64.o obj-$(CONFIG_PPC_BOOK3E_64) += pgtable-book3e.o -obj-$(CONFIG_PPC_BOOK3S_64) += pgtable-hash64.o hash_utils_64.o slb.o $(hash64-y) mmu_context_book3s64.o pgtable-book3s64.o +obj-$(CONFIG_PPC_BOOK3S_64) += pgtable-hash64.o hash_utils_64.o slb.o \ + $(hash64-y) mmu_context_book3s64.o \ + pgtable-book3s64.o pgtable-frag.o +obj-$(CONFIG_PPC32) += pgtable-frag.o obj-$(CONFIG_PPC_RADIX_MMU) += pgtable-radix.o tlb-radix.o -obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o hash_low_32.o mmu_context_hash32.o -obj-$(CONFIG_PPC_STD_MMU) += tlb_hash$(BITS).o +obj-$(CONFIG_PPC_BOOK3S_32) += ppc_mmu_32.o hash_low_32.o mmu_context_hash32.o +obj-$(CONFIG_PPC_BOOK3S) += tlb_hash$(BITS).o ifdef CONFIG_PPC_BOOK3S_64 obj-$(CONFIG_PPC_4K_PAGES) += hash64_4k.o obj-$(CONFIG_PPC_64K_PAGES) += hash64_64k.o @@ -47,7 +50,7 @@ ifdef CONFIG_PPC_PTDUMP obj-$(CONFIG_4xx) += dump_linuxpagetables-generic.o obj-$(CONFIG_PPC_8xx) += dump_linuxpagetables-8xx.o obj-$(CONFIG_PPC_BOOK3E_MMU) += dump_linuxpagetables-generic.o -obj-$(CONFIG_PPC_BOOK3S_32) += dump_linuxpagetables-generic.o +obj-$(CONFIG_PPC_BOOK3S_32) += dump_linuxpagetables-generic.o dump_bats.o dump_sr.o obj-$(CONFIG_PPC_BOOK3S_64) += dump_linuxpagetables-book3s64.o endif obj-$(CONFIG_PPC_HTDUMP) += dump_hashpagetable.o diff --git a/arch/powerpc/mm/dma-noncoherent.c b/arch/powerpc/mm/dma-noncoherent.c index b6e7b5952ab5..e955539686a4 100644 --- a/arch/powerpc/mm/dma-noncoherent.c +++ b/arch/powerpc/mm/dma-noncoherent.c @@ -29,7 +29,7 @@ #include <linux/string.h> #include <linux/types.h> #include <linux/highmem.h> -#include <linux/dma-mapping.h> +#include <linux/dma-direct.h> #include <linux/export.h> #include <asm/tlbflush.h> @@ -151,8 +151,8 @@ static struct ppc_vm_region *ppc_vm_region_find(struct ppc_vm_region *head, unsi * Allocate DMA-coherent memory space and return both the kernel remapped * virtual and bus address for that space. */ -void * -__dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) +void *__dma_nommu_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) { struct page *page; struct ppc_vm_region *c; @@ -223,7 +223,7 @@ __dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t /* * Set the "dma handle" */ - *handle = page_to_phys(page); + *dma_handle = phys_to_dma(dev, page_to_phys(page)); do { SetPageReserved(page); @@ -249,12 +249,12 @@ __dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t no_page: return NULL; } -EXPORT_SYMBOL(__dma_alloc_coherent); /* * free a page as defined by the above mapping. */ -void __dma_free_coherent(size_t size, void *vaddr) +void __dma_nommu_free_coherent(struct device *dev, size_t size, void *vaddr, + dma_addr_t dma_handle, unsigned long attrs) { struct ppc_vm_region *c; unsigned long flags, addr; @@ -309,7 +309,6 @@ void __dma_free_coherent(size_t size, void *vaddr) __func__, vaddr); dump_stack(); } -EXPORT_SYMBOL(__dma_free_coherent); /* * make an area consistent. @@ -401,7 +400,7 @@ EXPORT_SYMBOL(__dma_sync_page); /* * Return the PFN for a given cpu virtual address returned by - * __dma_alloc_coherent. This is used by dma_mmap_coherent() + * __dma_nommu_alloc_coherent. This is used by dma_mmap_coherent() */ unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr) { diff --git a/arch/powerpc/mm/dump_bats.c b/arch/powerpc/mm/dump_bats.c new file mode 100644 index 000000000000..a0d23e96e841 --- /dev/null +++ b/arch/powerpc/mm/dump_bats.c @@ -0,0 +1,173 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2018, Christophe Leroy CS S.I. + * <christophe.leroy@c-s.fr> + * + * This dumps the content of BATS + */ + +#include <asm/debugfs.h> +#include <asm/pgtable.h> +#include <asm/cpu_has_feature.h> + +static char *pp_601(int k, int pp) +{ + if (pp == 0) + return k ? "NA" : "RWX"; + if (pp == 1) + return k ? "ROX" : "RWX"; + if (pp == 2) + return k ? "RWX" : "RWX"; + return k ? "ROX" : "ROX"; +} + +static void bat_show_601(struct seq_file *m, int idx, u32 lower, u32 upper) +{ + u32 blpi = upper & 0xfffe0000; + u32 k = (upper >> 2) & 3; + u32 pp = upper & 3; + phys_addr_t pbn = PHYS_BAT_ADDR(lower); + u32 bsm = lower & 0x3ff; + u32 size = (bsm + 1) << 17; + + seq_printf(m, "%d: ", idx); + if (!(lower & 0x40)) { + seq_puts(m, " -\n"); + return; + } + + seq_printf(m, "0x%08x-0x%08x ", blpi, blpi + size - 1); +#ifdef CONFIG_PHYS_64BIT + seq_printf(m, "0x%016llx ", pbn); +#else + seq_printf(m, "0x%08x ", pbn); +#endif + + seq_printf(m, "Kernel %s User %s", pp_601(k & 2, pp), pp_601(k & 1, pp)); + + if (lower & _PAGE_WRITETHRU) + seq_puts(m, "write through "); + if (lower & _PAGE_NO_CACHE) + seq_puts(m, "no cache "); + if (lower & _PAGE_COHERENT) + seq_puts(m, "coherent "); + seq_puts(m, "\n"); +} + +#define BAT_SHOW_601(_m, _n, _l, _u) bat_show_601(_m, _n, mfspr(_l), mfspr(_u)) + +static int bats_show_601(struct seq_file *m, void *v) +{ + seq_puts(m, "---[ Block Address Translation ]---\n"); + + BAT_SHOW_601(m, 0, SPRN_IBAT0L, SPRN_IBAT0U); + BAT_SHOW_601(m, 1, SPRN_IBAT1L, SPRN_IBAT1U); + BAT_SHOW_601(m, 2, SPRN_IBAT2L, SPRN_IBAT2U); + BAT_SHOW_601(m, 3, SPRN_IBAT3L, SPRN_IBAT3U); + + return 0; +} + +static void bat_show_603(struct seq_file *m, int idx, u32 lower, u32 upper, bool is_d) +{ + u32 bepi = upper & 0xfffe0000; + u32 bl = (upper >> 2) & 0x7ff; + u32 k = upper & 3; + phys_addr_t brpn = PHYS_BAT_ADDR(lower); + u32 size = (bl + 1) << 17; + + seq_printf(m, "%d: ", idx); + if (k == 0) { + seq_puts(m, " -\n"); + return; + } + + seq_printf(m, "0x%08x-0x%08x ", bepi, bepi + size - 1); +#ifdef CONFIG_PHYS_64BIT + seq_printf(m, "0x%016llx ", brpn); +#else + seq_printf(m, "0x%08x ", brpn); +#endif + + if (k == 1) + seq_puts(m, "User "); + else if (k == 2) + seq_puts(m, "Kernel "); + else + seq_puts(m, "Kernel/User "); + + if (lower & BPP_RX) + seq_puts(m, is_d ? "RO " : "EXEC "); + else if (lower & BPP_RW) + seq_puts(m, is_d ? "RW " : "EXEC "); + else + seq_puts(m, is_d ? "NA " : "NX "); + + if (lower & _PAGE_WRITETHRU) + seq_puts(m, "write through "); + if (lower & _PAGE_NO_CACHE) + seq_puts(m, "no cache "); + if (lower & _PAGE_COHERENT) + seq_puts(m, "coherent "); + if (lower & _PAGE_GUARDED) + seq_puts(m, "guarded "); + seq_puts(m, "\n"); +} + +#define BAT_SHOW_603(_m, _n, _l, _u, _d) bat_show_603(_m, _n, mfspr(_l), mfspr(_u), _d) + +static int bats_show_603(struct seq_file *m, void *v) +{ + seq_puts(m, "---[ Instruction Block Address Translation ]---\n"); + + BAT_SHOW_603(m, 0, SPRN_IBAT0L, SPRN_IBAT0U, false); + BAT_SHOW_603(m, 1, SPRN_IBAT1L, SPRN_IBAT1U, false); + BAT_SHOW_603(m, 2, SPRN_IBAT2L, SPRN_IBAT2U, false); + BAT_SHOW_603(m, 3, SPRN_IBAT3L, SPRN_IBAT3U, false); + if (mmu_has_feature(MMU_FTR_USE_HIGH_BATS)) { + BAT_SHOW_603(m, 4, SPRN_IBAT4L, SPRN_IBAT4U, false); + BAT_SHOW_603(m, 5, SPRN_IBAT5L, SPRN_IBAT5U, false); + BAT_SHOW_603(m, 6, SPRN_IBAT6L, SPRN_IBAT6U, false); + BAT_SHOW_603(m, 7, SPRN_IBAT7L, SPRN_IBAT7U, false); + } + + seq_puts(m, "\n---[ Data Block Address Translation ]---\n"); + + BAT_SHOW_603(m, 0, SPRN_DBAT0L, SPRN_DBAT0U, true); + BAT_SHOW_603(m, 1, SPRN_DBAT1L, SPRN_DBAT1U, true); + BAT_SHOW_603(m, 2, SPRN_DBAT2L, SPRN_DBAT2U, true); + BAT_SHOW_603(m, 3, SPRN_DBAT3L, SPRN_DBAT3U, true); + if (mmu_has_feature(MMU_FTR_USE_HIGH_BATS)) { + BAT_SHOW_603(m, 4, SPRN_DBAT4L, SPRN_DBAT4U, true); + BAT_SHOW_603(m, 5, SPRN_DBAT5L, SPRN_DBAT5U, true); + BAT_SHOW_603(m, 6, SPRN_DBAT6L, SPRN_DBAT6U, true); + BAT_SHOW_603(m, 7, SPRN_DBAT7L, SPRN_DBAT7U, true); + } + + return 0; +} + +static int bats_open(struct inode *inode, struct file *file) +{ + if (cpu_has_feature(CPU_FTR_601)) + return single_open(file, bats_show_601, NULL); + + return single_open(file, bats_show_603, NULL); +} + +static const struct file_operations bats_fops = { + .open = bats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int __init bats_init(void) +{ + struct dentry *debugfs_file; + + debugfs_file = debugfs_create_file("block_address_translation", 0400, + powerpc_debugfs_root, NULL, &bats_fops); + return debugfs_file ? 0 : -ENOMEM; +} +device_initcall(bats_init); diff --git a/arch/powerpc/mm/dump_linuxpagetables-generic.c b/arch/powerpc/mm/dump_linuxpagetables-generic.c index 1e3829ec1348..3fe98a0974c6 100644 --- a/arch/powerpc/mm/dump_linuxpagetables-generic.c +++ b/arch/powerpc/mm/dump_linuxpagetables-generic.c @@ -21,13 +21,11 @@ static const struct flag_info flag_array[] = { .set = "rw", .clear = "r ", }, { -#ifndef CONFIG_PPC_BOOK3S_32 .mask = _PAGE_EXEC, .val = _PAGE_EXEC, .set = " X ", .clear = " ", }, { -#endif .mask = _PAGE_PRESENT, .val = _PAGE_PRESENT, .set = "present", diff --git a/arch/powerpc/mm/dump_sr.c b/arch/powerpc/mm/dump_sr.c new file mode 100644 index 000000000000..501843664bb9 --- /dev/null +++ b/arch/powerpc/mm/dump_sr.c @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2018, Christophe Leroy CS S.I. + * <christophe.leroy@c-s.fr> + * + * This dumps the content of Segment Registers + */ + +#include <asm/debugfs.h> + +static void seg_show(struct seq_file *m, int i) +{ + u32 val = mfsrin(i << 28); + + seq_printf(m, "0x%01x0000000-0x%01xfffffff ", i, i); + seq_printf(m, "Kern key %d ", (val >> 30) & 1); + seq_printf(m, "User key %d ", (val >> 29) & 1); + if (val & 0x80000000) { + seq_printf(m, "Device 0x%03x", (val >> 20) & 0x1ff); + seq_printf(m, "-0x%05x", val & 0xfffff); + } else { + if (val & 0x10000000) + seq_puts(m, "No Exec "); + seq_printf(m, "VSID 0x%06x", val & 0xffffff); + } + seq_puts(m, "\n"); +} + +static int sr_show(struct seq_file *m, void *v) +{ + int i; + + seq_puts(m, "---[ User Segments ]---\n"); + for (i = 0; i < TASK_SIZE >> 28; i++) + seg_show(m, i); + + seq_puts(m, "\n---[ Kernel Segments ]---\n"); + for (; i < 16; i++) + seg_show(m, i); + + return 0; +} + +static int sr_open(struct inode *inode, struct file *file) +{ + return single_open(file, sr_show, NULL); +} + +static const struct file_operations sr_fops = { + .open = sr_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int __init sr_init(void) +{ + struct dentry *debugfs_file; + + debugfs_file = debugfs_create_file("segment_registers", 0400, + powerpc_debugfs_root, NULL, &sr_fops); + return debugfs_file ? 0 : -ENOMEM; +} +device_initcall(sr_init); diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 1697e903bbf2..a6dcfda3e11e 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -226,7 +226,9 @@ static int mm_fault_error(struct pt_regs *regs, unsigned long addr, static bool bad_kernel_fault(bool is_exec, unsigned long error_code, unsigned long address) { - if (is_exec && (error_code & (DSISR_NOEXEC_OR_G | DSISR_KEYFAULT))) { + /* NX faults set DSISR_PROTFAULT on the 8xx, DSISR_NOEXEC_OR_G on others */ + if (is_exec && (error_code & (DSISR_NOEXEC_OR_G | DSISR_KEYFAULT | + DSISR_PROTFAULT))) { printk_ratelimited(KERN_CRIT "kernel tried to execute" " exec-protected page (%lx) -" "exploit attempt? (uid: %d)\n", @@ -341,10 +343,21 @@ static inline void cmo_account_page_fault(void) static inline void cmo_account_page_fault(void) { } #endif /* CONFIG_PPC_SMLPAR */ -#ifdef CONFIG_PPC_STD_MMU -static void sanity_check_fault(bool is_write, unsigned long error_code) +#ifdef CONFIG_PPC_BOOK3S +static void sanity_check_fault(bool is_write, bool is_user, + unsigned long error_code, unsigned long address) { /* + * Userspace trying to access kernel address, we get PROTFAULT for that. + */ + if (is_user && address >= TASK_SIZE) { + pr_crit_ratelimited("%s[%d]: User access of kernel address (%lx) - exploit attempt? (uid: %d)\n", + current->comm, current->pid, address, + from_kuid(&init_user_ns, current_uid())); + return; + } + + /* * For hash translation mode, we should never get a * PROTFAULT. Any update to pte to reduce access will result in us * removing the hash page table entry, thus resulting in a DSISR_NOHPTE @@ -373,12 +386,15 @@ static void sanity_check_fault(bool is_write, unsigned long error_code) * For radix, we can get prot fault for autonuma case, because radix * page table will have them marked noaccess for user. */ - if (!radix_enabled() && !is_write) - WARN_ON_ONCE(error_code & DSISR_PROTFAULT); + if (radix_enabled() || is_write) + return; + + WARN_ON_ONCE(error_code & DSISR_PROTFAULT); } #else -static void sanity_check_fault(bool is_write, unsigned long error_code) { } -#endif /* CONFIG_PPC_STD_MMU */ +static void sanity_check_fault(bool is_write, bool is_user, + unsigned long error_code, unsigned long address) { } +#endif /* CONFIG_PPC_BOOK3S */ /* * Define the correct "is_write" bit in error_code based @@ -435,7 +451,7 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address, } /* Additional sanity check(s) */ - sanity_check_fault(is_write, error_code); + sanity_check_fault(is_write, is_user, error_code, address); /* * The kernel should never take an execute fault nor should it @@ -636,21 +652,23 @@ void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) switch (TRAP(regs)) { case 0x300: case 0x380: - printk(KERN_ALERT "Unable to handle kernel paging request for " - "data at address 0x%08lx\n", regs->dar); + case 0xe00: + pr_alert("BUG: %s at 0x%08lx\n", + regs->dar < PAGE_SIZE ? "Kernel NULL pointer dereference" : + "Unable to handle kernel data access", regs->dar); break; case 0x400: case 0x480: - printk(KERN_ALERT "Unable to handle kernel paging request for " - "instruction fetch\n"); + pr_alert("BUG: Unable to handle kernel instruction fetch%s", + regs->nip < PAGE_SIZE ? " (NULL pointer?)\n" : "\n"); break; case 0x600: - printk(KERN_ALERT "Unable to handle kernel paging request for " - "unaligned access at address 0x%08lx\n", regs->dar); + pr_alert("BUG: Unable to handle kernel unaligned access at 0x%08lx\n", + regs->dar); break; default: - printk(KERN_ALERT "Unable to handle kernel paging request for " - "unknown fault\n"); + pr_alert("BUG: Unable to handle unknown paging fault at 0x%08lx\n", + regs->dar); break; } printk(KERN_ALERT "Faulting instruction address: 0x%08lx\n", diff --git a/arch/powerpc/mm/hash_low_32.S b/arch/powerpc/mm/hash_low_32.S index 26acf6c8c20c..1e2df3e9f9ea 100644 --- a/arch/powerpc/mm/hash_low_32.S +++ b/arch/powerpc/mm/hash_low_32.S @@ -28,6 +28,7 @@ #include <asm/asm-offsets.h> #include <asm/export.h> #include <asm/feature-fixups.h> +#include <asm/code-patching-asm.h> #ifdef CONFIG_SMP .section .bss @@ -337,11 +338,13 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) rlwimi r5,r4,10,26,31 /* put in API (abbrev page index) */ SET_V(r5) /* set V (valid) bit */ + patch_site 0f, patch__hash_page_A0 + patch_site 1f, patch__hash_page_A1 + patch_site 2f, patch__hash_page_A2 /* Get the address of the primary PTE group in the hash table (r3) */ -_GLOBAL(hash_page_patch_A) - addis r0,r7,Hash_base@h /* base address of hash table */ - rlwimi r0,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */ - rlwinm r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */ +0: addis r0,r7,Hash_base@h /* base address of hash table */ +1: rlwimi r0,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */ +2: rlwinm r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */ xor r3,r3,r0 /* make primary hash */ li r0,8 /* PTEs/group */ @@ -366,10 +369,10 @@ _GLOBAL(hash_page_patch_A) bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */ beq+ found_slot + patch_site 0f, patch__hash_page_B /* Search the secondary PTEG for a matching PTE */ ori r5,r5,PTE_H /* set H (secondary hash) bit */ -_GLOBAL(hash_page_patch_B) - xoris r4,r3,Hash_msk>>16 /* compute secondary hash */ +0: xoris r4,r3,Hash_msk>>16 /* compute secondary hash */ xori r4,r4,(-PTEG_SIZE & 0xffff) addi r4,r4,-HPTE_SIZE mtctr r0 @@ -393,10 +396,10 @@ _GLOBAL(hash_page_patch_B) addi r6,r6,1 stw r6,primary_pteg_full@l(r4) + patch_site 0f, patch__hash_page_C /* Search the secondary PTEG for an empty slot */ ori r5,r5,PTE_H /* set H (secondary hash) bit */ -_GLOBAL(hash_page_patch_C) - xoris r4,r3,Hash_msk>>16 /* compute secondary hash */ +0: xoris r4,r3,Hash_msk>>16 /* compute secondary hash */ xori r4,r4,(-PTEG_SIZE & 0xffff) addi r4,r4,-HPTE_SIZE mtctr r0 @@ -577,11 +580,13 @@ _GLOBAL(flush_hash_pages) stwcx. r8,0,r5 /* update the pte */ bne- 33b + patch_site 0f, patch__flush_hash_A0 + patch_site 1f, patch__flush_hash_A1 + patch_site 2f, patch__flush_hash_A2 /* Get the address of the primary PTE group in the hash table (r3) */ -_GLOBAL(flush_hash_patch_A) - addis r8,r7,Hash_base@h /* base address of hash table */ - rlwimi r8,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */ - rlwinm r0,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */ +0: addis r8,r7,Hash_base@h /* base address of hash table */ +1: rlwimi r8,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */ +2: rlwinm r0,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */ xor r8,r0,r8 /* make primary hash */ /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */ @@ -593,11 +598,11 @@ _GLOBAL(flush_hash_patch_A) bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */ beq+ 3f + patch_site 0f, patch__flush_hash_B /* Search the secondary PTEG for a matching PTE */ ori r11,r11,PTE_H /* set H (secondary hash) bit */ li r0,8 /* PTEs/group */ -_GLOBAL(flush_hash_patch_B) - xoris r12,r8,Hash_msk>>16 /* compute secondary hash */ +0: xoris r12,r8,Hash_msk>>16 /* compute secondary hash */ xori r12,r12,(-PTEG_SIZE & 0xffff) addi r12,r12,-HPTE_SIZE mtctr r0 diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 8cf035e68378..9e732bb2c84a 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -42,6 +42,8 @@ EXPORT_SYMBOL(HPAGE_SHIFT); #define hugepd_none(hpd) (hpd_val(hpd) == 0) +#define PTE_T_ORDER (__builtin_ffs(sizeof(pte_t)) - __builtin_ffs(sizeof(void *))) + pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz) { /* @@ -61,14 +63,17 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, int num_hugepd; if (pshift >= pdshift) { - cachep = hugepte_cache; + cachep = PGT_CACHE(PTE_T_ORDER); num_hugepd = 1 << (pshift - pdshift); + } else if (IS_ENABLED(CONFIG_PPC_8xx)) { + cachep = PGT_CACHE(PTE_INDEX_SIZE); + num_hugepd = 1; } else { cachep = PGT_CACHE(pdshift - pshift); num_hugepd = 1; } - new = kmem_cache_zalloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL)); + new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL)); BUG_ON(pshift > HUGEPD_SHIFT_MASK); BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK); @@ -264,7 +269,7 @@ static void hugepd_free_rcu_callback(struct rcu_head *head) unsigned int i; for (i = 0; i < batch->index; i++) - kmem_cache_free(hugepte_cache, batch->ptes[i]); + kmem_cache_free(PGT_CACHE(PTE_T_ORDER), batch->ptes[i]); free_page((unsigned long)batch); } @@ -277,7 +282,7 @@ static void hugepd_free(struct mmu_gather *tlb, void *hugepte) if (atomic_read(&tlb->mm->mm_users) < 2 || mm_is_thread_local(tlb->mm)) { - kmem_cache_free(hugepte_cache, hugepte); + kmem_cache_free(PGT_CACHE(PTE_T_ORDER), hugepte); put_cpu_var(hugepd_freelist_cur); return; } @@ -289,7 +294,7 @@ static void hugepd_free(struct mmu_gather *tlb, void *hugepte) (*batchp)->ptes[(*batchp)->index++] = hugepte; if ((*batchp)->index == HUGEPD_FREELIST_SIZE) { - call_rcu_sched(&(*batchp)->rcu, hugepd_free_rcu_callback); + call_rcu(&(*batchp)->rcu, hugepd_free_rcu_callback); *batchp = NULL; } put_cpu_var(hugepd_freelist_cur); @@ -329,6 +334,9 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif if (shift >= pdshift) hugepd_free(tlb, hugepte); + else if (IS_ENABLED(CONFIG_PPC_8xx)) + pgtable_free_tlb(tlb, hugepte, + get_hugepd_cache_index(PTE_INDEX_SIZE)); else pgtable_free_tlb(tlb, hugepte, get_hugepd_cache_index(pdshift - shift)); @@ -652,7 +660,6 @@ static int __init hugepage_setup_sz(char *str) } __setup("hugepagesz=", hugepage_setup_sz); -struct kmem_cache *hugepte_cache; static int __init hugetlbpage_init(void) { int psize; @@ -699,24 +706,13 @@ static int __init hugetlbpage_init(void) * if we have pdshift and shift value same, we don't * use pgt cache for hugepd. */ - if (pdshift > shift) - pgtable_cache_add(pdshift - shift, NULL); + if (pdshift > shift && IS_ENABLED(CONFIG_PPC_8xx)) + pgtable_cache_add(PTE_INDEX_SIZE); + else if (pdshift > shift) + pgtable_cache_add(pdshift - shift); #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx) - else if (!hugepte_cache) { - /* - * Create a kmem cache for hugeptes. The bottom bits in - * the pte have size information encoded in them, so - * align them to allow this - */ - hugepte_cache = kmem_cache_create("hugepte-cache", - sizeof(pte_t), - HUGEPD_SHIFT_MASK + 1, - 0, NULL); - if (hugepte_cache == NULL) - panic("%s: Unable to create kmem cache " - "for hugeptes\n", __func__); - - } + else + pgtable_cache_add(PTE_T_ORDER); #endif } diff --git a/arch/powerpc/mm/init-common.c b/arch/powerpc/mm/init-common.c index 2b656e67f2ea..1e6910eb70ed 100644 --- a/arch/powerpc/mm/init-common.c +++ b/arch/powerpc/mm/init-common.c @@ -25,22 +25,40 @@ #include <asm/pgalloc.h> #include <asm/pgtable.h> -static void pgd_ctor(void *addr) -{ - memset(addr, 0, PGD_TABLE_SIZE); +#define CTOR(shift) static void ctor_##shift(void *addr) \ +{ \ + memset(addr, 0, sizeof(void *) << (shift)); \ } -static void pud_ctor(void *addr) -{ - memset(addr, 0, PUD_TABLE_SIZE); -} +CTOR(0); CTOR(1); CTOR(2); CTOR(3); CTOR(4); CTOR(5); CTOR(6); CTOR(7); +CTOR(8); CTOR(9); CTOR(10); CTOR(11); CTOR(12); CTOR(13); CTOR(14); CTOR(15); -static void pmd_ctor(void *addr) +static inline void (*ctor(int shift))(void *) { - memset(addr, 0, PMD_TABLE_SIZE); + BUILD_BUG_ON(MAX_PGTABLE_INDEX_SIZE != 15); + + switch (shift) { + case 0: return ctor_0; + case 1: return ctor_1; + case 2: return ctor_2; + case 3: return ctor_3; + case 4: return ctor_4; + case 5: return ctor_5; + case 6: return ctor_6; + case 7: return ctor_7; + case 8: return ctor_8; + case 9: return ctor_9; + case 10: return ctor_10; + case 11: return ctor_11; + case 12: return ctor_12; + case 13: return ctor_13; + case 14: return ctor_14; + case 15: return ctor_15; + } + return NULL; } -struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE]; +struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE + 1]; EXPORT_SYMBOL_GPL(pgtable_cache); /* used by kvm_hv module */ /* @@ -50,7 +68,7 @@ EXPORT_SYMBOL_GPL(pgtable_cache); /* used by kvm_hv module */ * everything else. Caches created by this function are used for all * the higher level pagetables, and for hugepage pagetables. */ -void pgtable_cache_add(unsigned shift, void (*ctor)(void *)) +void pgtable_cache_add(unsigned int shift) { char *name; unsigned long table_size = sizeof(void *) << shift; @@ -71,19 +89,19 @@ void pgtable_cache_add(unsigned shift, void (*ctor)(void *)) * moment, gcc doesn't seem to recognize is_power_of_2 as a * constant expression, so so much for that. */ BUG_ON(!is_power_of_2(minalign)); - BUG_ON((shift < 1) || (shift > MAX_PGTABLE_INDEX_SIZE)); + BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE); if (PGT_CACHE(shift)) return; /* Already have a cache of this size */ align = max_t(unsigned long, align, minalign); name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift); - new = kmem_cache_create(name, table_size, align, 0, ctor); + new = kmem_cache_create(name, table_size, align, 0, ctor(shift)); if (!new) panic("Could not allocate pgtable cache for order %d", shift); kfree(name); - pgtable_cache[shift - 1] = new; + pgtable_cache[shift] = new; pr_debug("Allocated pgtable cache for order %d\n", shift); } @@ -91,15 +109,15 @@ EXPORT_SYMBOL_GPL(pgtable_cache_add); /* used by kvm_hv module */ void pgtable_cache_init(void) { - pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor); + pgtable_cache_add(PGD_INDEX_SIZE); - if (PMD_CACHE_INDEX && !PGT_CACHE(PMD_CACHE_INDEX)) - pgtable_cache_add(PMD_CACHE_INDEX, pmd_ctor); + if (PMD_CACHE_INDEX) + pgtable_cache_add(PMD_CACHE_INDEX); /* * In all current configs, when the PUD index exists it's the * same size as either the pgd or pmd index except with THP enabled * on book3s 64 */ - if (PUD_CACHE_INDEX && !PGT_CACHE(PUD_CACHE_INDEX)) - pgtable_cache_add(PUD_CACHE_INDEX, pud_ctor); + if (PUD_CACHE_INDEX) + pgtable_cache_add(PUD_CACHE_INDEX); } diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 0a64fffabee1..20394e52fe27 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -246,35 +246,19 @@ static int __init mark_nonram_nosave(void) } #endif -static bool zone_limits_final; - /* - * The memory zones past TOP_ZONE are managed by generic mm code. - * These should be set to zero since that's what every other - * architecture does. + * Zones usage: + * + * We setup ZONE_DMA to be 31-bits on all platforms and ZONE_NORMAL to be + * everything else. GFP_DMA32 page allocations automatically fall back to + * ZONE_DMA. + * + * By using 31-bit unconditionally, we can exploit ARCH_ZONE_DMA_BITS to + * inform the generic DMA mapping code. 32-bit only devices (if not handled + * by an IOMMU anyway) will take a first dip into ZONE_NORMAL and get + * otherwise served by ZONE_DMA. */ -static unsigned long max_zone_pfns[MAX_NR_ZONES] = { - [0 ... TOP_ZONE ] = ~0UL, - [TOP_ZONE + 1 ... MAX_NR_ZONES - 1] = 0 -}; - -/* - * Restrict the specified zone and all more restrictive zones - * to be below the specified pfn. May not be called after - * paging_init(). - */ -void __init limit_zone_pfn(enum zone_type zone, unsigned long pfn_limit) -{ - int i; - - if (WARN_ON(zone_limits_final)) - return; - - for (i = zone; i >= 0; i--) { - if (max_zone_pfns[i] > pfn_limit) - max_zone_pfns[i] = pfn_limit; - } -} +static unsigned long max_zone_pfns[MAX_NR_ZONES]; /* * Find the least restrictive zone that is entirely below the @@ -324,11 +308,14 @@ void __init paging_init(void) printk(KERN_DEBUG "Memory hole size: %ldMB\n", (long int)((top_of_ram - total_ram) >> 20)); +#ifdef CONFIG_ZONE_DMA + max_zone_pfns[ZONE_DMA] = min(max_low_pfn, 0x7fffffffUL >> PAGE_SHIFT); +#endif + max_zone_pfns[ZONE_NORMAL] = max_low_pfn; #ifdef CONFIG_HIGHMEM - limit_zone_pfn(ZONE_NORMAL, lowmem_end_addr >> PAGE_SHIFT); + max_zone_pfns[ZONE_HIGHMEM] = max_pfn; #endif - limit_zone_pfn(TOP_ZONE, top_of_ram >> PAGE_SHIFT); - zone_limits_final = true; + free_area_init_nodes(max_zone_pfns); mark_nonram_nosave(); @@ -503,7 +490,7 @@ EXPORT_SYMBOL(flush_icache_user_range); void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { -#ifdef CONFIG_PPC_STD_MMU +#ifdef CONFIG_PPC_BOOK3S /* * We don't need to worry about _PAGE_PRESENT here because we are * called with either mm->page_table_lock held or ptl lock held @@ -541,7 +528,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, } hash_preload(vma->vm_mm, address, is_exec, trap); -#endif /* CONFIG_PPC_STD_MMU */ +#endif /* CONFIG_PPC_BOOK3S */ #if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \ && defined(CONFIG_HUGETLB_PAGE) if (is_vm_hugetlb_page(vma)) diff --git a/arch/powerpc/mm/mmu_context.c b/arch/powerpc/mm/mmu_context.c index f84e14f23e50..bb52320b7369 100644 --- a/arch/powerpc/mm/mmu_context.c +++ b/arch/powerpc/mm/mmu_context.c @@ -15,6 +15,7 @@ #include <linux/sched/mm.h> #include <asm/mmu_context.h> +#include <asm/pgalloc.h> #if defined(CONFIG_PPC32) static inline void switch_mm_pgdir(struct task_struct *tsk, @@ -97,3 +98,12 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, switch_mmu_context(prev, next, tsk); } +#ifdef CONFIG_PPC32 +void arch_exit_mmap(struct mm_struct *mm) +{ + void *frag = pte_frag_get(&mm->context); + + if (frag) + pte_frag_destroy(frag); +} +#endif diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c index 510f103d7813..f720c5cc0b5e 100644 --- a/arch/powerpc/mm/mmu_context_book3s64.c +++ b/arch/powerpc/mm/mmu_context_book3s64.c @@ -164,21 +164,6 @@ static void destroy_contexts(mm_context_t *ctx) } } -static void pte_frag_destroy(void *pte_frag) -{ - int count; - struct page *page; - - page = virt_to_page(pte_frag); - /* drop all the pending references */ - count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT; - /* We allow PTE_FRAG_NR fragments from a PTE page */ - if (atomic_sub_and_test(PTE_FRAG_NR - count, &page->pt_frag_refcount)) { - pgtable_page_dtor(page); - __free_page(page); - } -} - static void pmd_frag_destroy(void *pmd_frag) { int count; diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c index 56c2234cc6ae..a712a650a8b6 100644 --- a/arch/powerpc/mm/mmu_context_iommu.c +++ b/arch/powerpc/mm/mmu_context_iommu.c @@ -36,6 +36,8 @@ struct mm_iommu_table_group_mem_t { u64 ua; /* userspace address */ u64 entries; /* number of entries in hpas[] */ u64 *hpas; /* vmalloc'ed */ +#define MM_IOMMU_TABLE_INVALID_HPA ((uint64_t)-1) + u64 dev_hpa; /* Device memory base address */ }; static long mm_iommu_adjust_locked_vm(struct mm_struct *mm, @@ -126,7 +128,8 @@ static int mm_iommu_move_page_from_cma(struct page *page) return 0; } -long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries, +static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua, + unsigned long entries, unsigned long dev_hpa, struct mm_iommu_table_group_mem_t **pmem) { struct mm_iommu_table_group_mem_t *mem; @@ -140,12 +143,6 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries, list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) { - if ((mem->ua == ua) && (mem->entries == entries)) { - ++mem->used; - *pmem = mem; - goto unlock_exit; - } - /* Overlap? */ if ((mem->ua < (ua + (entries << PAGE_SHIFT))) && (ua < (mem->ua + @@ -156,11 +153,13 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries, } - ret = mm_iommu_adjust_locked_vm(mm, entries, true); - if (ret) - goto unlock_exit; + if (dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) { + ret = mm_iommu_adjust_locked_vm(mm, entries, true); + if (ret) + goto unlock_exit; - locked_entries = entries; + locked_entries = entries; + } mem = kzalloc(sizeof(*mem), GFP_KERNEL); if (!mem) { @@ -168,6 +167,13 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries, goto unlock_exit; } + if (dev_hpa != MM_IOMMU_TABLE_INVALID_HPA) { + mem->pageshift = __ffs(dev_hpa | (entries << PAGE_SHIFT)); + mem->dev_hpa = dev_hpa; + goto good_exit; + } + mem->dev_hpa = MM_IOMMU_TABLE_INVALID_HPA; + /* * For a starting point for a maximum page size calculation * we use @ua and @entries natural alignment to allow IOMMU pages @@ -236,6 +242,7 @@ populate: mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT; } +good_exit: atomic64_set(&mem->mapped, 1); mem->used = 1; mem->ua = ua; @@ -252,13 +259,31 @@ unlock_exit: return ret; } -EXPORT_SYMBOL_GPL(mm_iommu_get); + +long mm_iommu_new(struct mm_struct *mm, unsigned long ua, unsigned long entries, + struct mm_iommu_table_group_mem_t **pmem) +{ + return mm_iommu_do_alloc(mm, ua, entries, MM_IOMMU_TABLE_INVALID_HPA, + pmem); +} +EXPORT_SYMBOL_GPL(mm_iommu_new); + +long mm_iommu_newdev(struct mm_struct *mm, unsigned long ua, + unsigned long entries, unsigned long dev_hpa, + struct mm_iommu_table_group_mem_t **pmem) +{ + return mm_iommu_do_alloc(mm, ua, entries, dev_hpa, pmem); +} +EXPORT_SYMBOL_GPL(mm_iommu_newdev); static void mm_iommu_unpin(struct mm_iommu_table_group_mem_t *mem) { long i; struct page *page = NULL; + if (!mem->hpas) + return; + for (i = 0; i < mem->entries; ++i) { if (!mem->hpas[i]) continue; @@ -300,6 +325,7 @@ static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem) long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem) { long ret = 0; + unsigned long entries, dev_hpa; mutex_lock(&mem_list_mutex); @@ -321,9 +347,12 @@ long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem) } /* @mapped became 0 so now mappings are disabled, release the region */ + entries = mem->entries; + dev_hpa = mem->dev_hpa; mm_iommu_release(mem); - mm_iommu_adjust_locked_vm(mm, mem->entries, false); + if (dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) + mm_iommu_adjust_locked_vm(mm, entries, false); unlock_exit: mutex_unlock(&mem_list_mutex); @@ -368,27 +397,32 @@ struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(struct mm_struct *mm, return ret; } -struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm, +struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries) { struct mm_iommu_table_group_mem_t *mem, *ret = NULL; + mutex_lock(&mem_list_mutex); + list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) { if ((mem->ua == ua) && (mem->entries == entries)) { ret = mem; + ++mem->used; break; } } + mutex_unlock(&mem_list_mutex); + return ret; } -EXPORT_SYMBOL_GPL(mm_iommu_find); +EXPORT_SYMBOL_GPL(mm_iommu_get); long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, unsigned long ua, unsigned int pageshift, unsigned long *hpa) { const long entry = (ua - mem->ua) >> PAGE_SHIFT; - u64 *va = &mem->hpas[entry]; + u64 *va; if (entry >= mem->entries) return -EFAULT; @@ -396,6 +430,12 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, if (pageshift > mem->pageshift) return -EFAULT; + if (!mem->hpas) { + *hpa = mem->dev_hpa + (ua - mem->ua); + return 0; + } + + va = &mem->hpas[entry]; *hpa = (*va & MM_IOMMU_TABLE_GROUP_PAGE_MASK) | (ua & ~PAGE_MASK); return 0; @@ -406,7 +446,6 @@ long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem, unsigned long ua, unsigned int pageshift, unsigned long *hpa) { const long entry = (ua - mem->ua) >> PAGE_SHIFT; - void *va = &mem->hpas[entry]; unsigned long *pa; if (entry >= mem->entries) @@ -415,7 +454,12 @@ long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem, if (pageshift > mem->pageshift) return -EFAULT; - pa = (void *) vmalloc_to_phys(va); + if (!mem->hpas) { + *hpa = mem->dev_hpa + (ua - mem->ua); + return 0; + } + + pa = (void *) vmalloc_to_phys(&mem->hpas[entry]); if (!pa) return -EFAULT; @@ -435,6 +479,9 @@ extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua) if (!mem) return; + if (mem->dev_hpa != MM_IOMMU_TABLE_INVALID_HPA) + return; + entry = (ua - mem->ua) >> PAGE_SHIFT; va = &mem->hpas[entry]; @@ -445,6 +492,33 @@ extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua) *pa |= MM_IOMMU_TABLE_GROUP_PAGE_DIRTY; } +bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa, + unsigned int pageshift, unsigned long *size) +{ + struct mm_iommu_table_group_mem_t *mem; + unsigned long end; + + list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) { + if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) + continue; + + end = mem->dev_hpa + (mem->entries << PAGE_SHIFT); + if ((mem->dev_hpa <= hpa) && (hpa < end)) { + /* + * Since the IOMMU page size might be bigger than + * PAGE_SIZE, the amount of preregistered memory + * starting from @hpa might be smaller than 1<<pageshift + * and the caller needs to distinguish this situation. + */ + *size = min(1UL << pageshift, end - hpa); + return true; + } + } + + return false; +} +EXPORT_SYMBOL_GPL(mm_iommu_is_devmem); + long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem) { if (atomic64_inc_not_zero(&mem->mapped)) diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c index 2faca46ad720..22d71a58167f 100644 --- a/arch/powerpc/mm/mmu_context_nohash.c +++ b/arch/powerpc/mm/mmu_context_nohash.c @@ -372,7 +372,6 @@ int init_new_context(struct task_struct *t, struct mm_struct *mm) { pr_hard("initing context for mm @%p\n", mm); -#ifdef CONFIG_PPC_MM_SLICES /* * We have MMU_NO_CONTEXT set to be ~0. Hence check * explicitly against context.id == 0. This ensures that we properly @@ -382,9 +381,9 @@ int init_new_context(struct task_struct *t, struct mm_struct *mm) */ if (mm->context.id == 0) slice_init_new_context_exec(mm); -#endif mm->context.id = MMU_NO_CONTEXT; mm->context.active = 0; + pte_frag_set(&mm->context, NULL); return 0; } @@ -487,4 +486,3 @@ void __init mmu_context_init(void) next_context = FIRST_CONTEXT; nr_free_contexts = LAST_CONTEXT - FIRST_CONTEXT + 1; } - diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h index 8574fbbc45e0..c4a717da65eb 100644 --- a/arch/powerpc/mm/mmu_decl.h +++ b/arch/powerpc/mm/mmu_decl.h @@ -155,7 +155,7 @@ struct tlbcam { }; #endif -#if defined(CONFIG_6xx) || defined(CONFIG_FSL_BOOKE) || defined(CONFIG_PPC_8xx) +#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_FSL_BOOKE) || defined(CONFIG_PPC_8xx) /* 6xx have BATS */ /* FSL_BOOKE have TLBCAM */ /* 8xx have LTLB */ diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index ce28ae5ca080..87f0dd004295 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -1475,7 +1475,7 @@ static int dt_update_callback(struct notifier_block *nb, switch (action) { case OF_RECONFIG_UPDATE_PROPERTY: - if (!of_prop_cmp(update->dn->type, "cpu") && + if (of_node_is_type(update->dn, "cpu") && !of_prop_cmp(update->prop->name, "ibm,associativity")) { u32 core_id; of_property_read_u32(update->dn, "reg", &core_id); diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c index 9f93c9f985c5..f3c31f5e1026 100644 --- a/arch/powerpc/mm/pgtable-book3s64.c +++ b/arch/powerpc/mm/pgtable-book3s64.c @@ -244,6 +244,9 @@ static pmd_t *get_pmd_from_cache(struct mm_struct *mm) { void *pmd_frag, *ret; + if (PMD_FRAG_NR == 1) + return NULL; + spin_lock(&mm->page_table_lock); ret = mm->context.pmd_frag; if (ret) { @@ -322,91 +325,6 @@ void pmd_fragment_free(unsigned long *pmd) } } -static pte_t *get_pte_from_cache(struct mm_struct *mm) -{ - void *pte_frag, *ret; - - spin_lock(&mm->page_table_lock); - ret = mm->context.pte_frag; - if (ret) { - pte_frag = ret + PTE_FRAG_SIZE; - /* - * If we have taken up all the fragments mark PTE page NULL - */ - if (((unsigned long)pte_frag & ~PAGE_MASK) == 0) - pte_frag = NULL; - mm->context.pte_frag = pte_frag; - } - spin_unlock(&mm->page_table_lock); - return (pte_t *)ret; -} - -static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel) -{ - void *ret = NULL; - struct page *page; - - if (!kernel) { - page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT); - if (!page) - return NULL; - if (!pgtable_page_ctor(page)) { - __free_page(page); - return NULL; - } - } else { - page = alloc_page(PGALLOC_GFP); - if (!page) - return NULL; - } - - atomic_set(&page->pt_frag_refcount, 1); - - ret = page_address(page); - /* - * if we support only one fragment just return the - * allocated page. - */ - if (PTE_FRAG_NR == 1) - return ret; - spin_lock(&mm->page_table_lock); - /* - * If we find pgtable_page set, we return - * the allocated page with single fragement - * count. - */ - if (likely(!mm->context.pte_frag)) { - atomic_set(&page->pt_frag_refcount, PTE_FRAG_NR); - mm->context.pte_frag = ret + PTE_FRAG_SIZE; - } - spin_unlock(&mm->page_table_lock); - - return (pte_t *)ret; -} - -pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel) -{ - pte_t *pte; - - pte = get_pte_from_cache(mm); - if (pte) - return pte; - - return __alloc_for_ptecache(mm, kernel); -} - -void pte_fragment_free(unsigned long *table, int kernel) -{ - struct page *page = virt_to_page(table); - - BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0); - if (atomic_dec_and_test(&page->pt_frag_refcount)) { - if (!kernel) - pgtable_page_dtor(page); - __free_page(page); - } -} - static inline void pgtable_free(void *table, int index) { switch (index) { diff --git a/arch/powerpc/mm/pgtable-frag.c b/arch/powerpc/mm/pgtable-frag.c new file mode 100644 index 000000000000..af23a587f019 --- /dev/null +++ b/arch/powerpc/mm/pgtable-frag.c @@ -0,0 +1,119 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Handling Page Tables through page fragments + * + */ + +#include <linux/kernel.h> +#include <linux/gfp.h> +#include <linux/mm.h> +#include <linux/percpu.h> +#include <linux/hardirq.h> +#include <linux/hugetlb.h> +#include <asm/pgalloc.h> +#include <asm/tlbflush.h> +#include <asm/tlb.h> + +void pte_frag_destroy(void *pte_frag) +{ + int count; + struct page *page; + + page = virt_to_page(pte_frag); + /* drop all the pending references */ + count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT; + /* We allow PTE_FRAG_NR fragments from a PTE page */ + if (atomic_sub_and_test(PTE_FRAG_NR - count, &page->pt_frag_refcount)) { + pgtable_page_dtor(page); + __free_page(page); + } +} + +static pte_t *get_pte_from_cache(struct mm_struct *mm) +{ + void *pte_frag, *ret; + + if (PTE_FRAG_NR == 1) + return NULL; + + spin_lock(&mm->page_table_lock); + ret = pte_frag_get(&mm->context); + if (ret) { + pte_frag = ret + PTE_FRAG_SIZE; + /* + * If we have taken up all the fragments mark PTE page NULL + */ + if (((unsigned long)pte_frag & ~PAGE_MASK) == 0) + pte_frag = NULL; + pte_frag_set(&mm->context, pte_frag); + } + spin_unlock(&mm->page_table_lock); + return (pte_t *)ret; +} + +static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel) +{ + void *ret = NULL; + struct page *page; + + if (!kernel) { + page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT); + if (!page) + return NULL; + if (!pgtable_page_ctor(page)) { + __free_page(page); + return NULL; + } + } else { + page = alloc_page(PGALLOC_GFP); + if (!page) + return NULL; + } + + atomic_set(&page->pt_frag_refcount, 1); + + ret = page_address(page); + /* + * if we support only one fragment just return the + * allocated page. + */ + if (PTE_FRAG_NR == 1) + return ret; + spin_lock(&mm->page_table_lock); + /* + * If we find pgtable_page set, we return + * the allocated page with single fragement + * count. + */ + if (likely(!pte_frag_get(&mm->context))) { + atomic_set(&page->pt_frag_refcount, PTE_FRAG_NR); + pte_frag_set(&mm->context, ret + PTE_FRAG_SIZE); + } + spin_unlock(&mm->page_table_lock); + + return (pte_t *)ret; +} + +pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel) +{ + pte_t *pte; + + pte = get_pte_from_cache(mm); + if (pte) + return pte; + + return __alloc_for_ptecache(mm, kernel); +} + +void pte_fragment_free(unsigned long *table, int kernel) +{ + struct page *page = virt_to_page(table); + + BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0); + if (atomic_dec_and_test(&page->pt_frag_refcount)) { + if (!kernel) + pgtable_page_dtor(page); + __free_page(page); + } +} diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c index 010e1c616cb2..d3d61d29b4f1 100644 --- a/arch/powerpc/mm/pgtable.c +++ b/arch/powerpc/mm/pgtable.c @@ -74,7 +74,7 @@ static struct page *maybe_pte_to_page(pte_t pte) * support falls into the same category. */ -static pte_t set_pte_filter(pte_t pte) +static pte_t set_pte_filter_hash(pte_t pte) { if (radix_enabled()) return pte; @@ -93,14 +93,12 @@ static pte_t set_pte_filter(pte_t pte) return pte; } -static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma, - int dirty) -{ - return pte; -} - #else /* CONFIG_PPC_BOOK3S */ +static pte_t set_pte_filter_hash(pte_t pte) { return pte; } + +#endif /* CONFIG_PPC_BOOK3S */ + /* Embedded type MMU with HW exec support. This is a bit more complicated * as we don't have two bits to spare for _PAGE_EXEC and _PAGE_HWEXEC so * instead we "filter out" the exec permission for non clean pages. @@ -109,6 +107,9 @@ static pte_t set_pte_filter(pte_t pte) { struct page *pg; + if (mmu_has_feature(MMU_FTR_HPTE_TABLE)) + return set_pte_filter_hash(pte); + /* No exec permission in the first place, move on */ if (!pte_exec(pte) || !pte_looks_normal(pte)) return pte; @@ -138,6 +139,9 @@ static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma, { struct page *pg; + if (mmu_has_feature(MMU_FTR_HPTE_TABLE)) + return pte; + /* So here, we only care about exec faults, as we use them * to recover lost _PAGE_EXEC and perform I$/D$ coherency * if necessary. Also if _PAGE_EXEC is already set, same deal, @@ -172,8 +176,6 @@ static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma, return pte_mkexec(pte); } -#endif /* CONFIG_PPC_BOOK3S */ - /* * set_pte stores a linux PTE into the linux page table. */ @@ -221,9 +223,9 @@ int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, } #ifdef CONFIG_HUGETLB_PAGE -extern int huge_ptep_set_access_flags(struct vm_area_struct *vma, - unsigned long addr, pte_t *ptep, - pte_t pte, int dirty) +int huge_ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + pte_t pte, int dirty) { #ifdef HUGETLB_NEED_PRELOAD /* diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index bda3c6f1bd32..d67215248d82 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -45,32 +45,15 @@ extern char etext[], _stext[], _sinittext[], _einittext[]; __ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) { - pte_t *pte; + if (!slab_is_available()) + return memblock_alloc(PTE_FRAG_SIZE, PTE_FRAG_SIZE); - if (slab_is_available()) { - pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO); - } else { - pte = __va(memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE)); - if (pte) - clear_page(pte); - } - return pte; + return (pte_t *)pte_fragment_alloc(mm, address, 1); } pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) { - struct page *ptepage; - - gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT; - - ptepage = alloc_pages(flags, 0); - if (!ptepage) - return NULL; - if (!pgtable_page_ctor(ptepage)) { - __free_page(ptepage); - return NULL; - } - return ptepage; + return (pgtable_t)pte_fragment_alloc(mm, address, 0); } void __iomem * @@ -160,7 +143,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, pgprot_t prot, void *call * Don't allow anybody to remap normal RAM that we're using. * mem_init() sets high_memory so only do the check after that. */ - if (slab_is_available() && (p < virt_to_phys(high_memory)) && + if (slab_is_available() && p <= virt_to_phys(high_memory - 1) && page_is_ram(__phys_to_pfn(p))) { printk("__ioremap(): phys addr 0x%llx is RAM lr %ps\n", (unsigned long long)p, __builtin_return_address(0)); @@ -260,7 +243,7 @@ static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top) ktext = ((char *)v >= _stext && (char *)v < etext) || ((char *)v >= _sinittext && (char *)v < _einittext); map_kernel_page(v, p, ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL); -#ifdef CONFIG_PPC_STD_MMU_32 +#ifdef CONFIG_PPC_BOOK3S_32 if (ktext) hash_preload(&init_mm, v, false, 0x300); #endif diff --git a/arch/powerpc/mm/pkeys.c b/arch/powerpc/mm/pkeys.c index b271b283c785..587807763737 100644 --- a/arch/powerpc/mm/pkeys.c +++ b/arch/powerpc/mm/pkeys.c @@ -6,20 +6,21 @@ */ #include <asm/mman.h> +#include <asm/mmu_context.h> #include <asm/setup.h> #include <linux/pkeys.h> #include <linux/of_device.h> DEFINE_STATIC_KEY_TRUE(pkey_disabled); -bool pkey_execute_disable_supported; int pkeys_total; /* Total pkeys as per device tree */ -bool pkeys_devtree_defined; /* pkey property exported by device tree */ u32 initial_allocation_mask; /* Bits set for the initially allocated keys */ u32 reserved_allocation_mask; /* Bits set for reserved keys */ -u64 pkey_amr_mask; /* Bits in AMR not to be touched */ -u64 pkey_iamr_mask; /* Bits in AMR not to be touched */ -u64 pkey_uamor_mask; /* Bits in UMOR not to be touched */ -int execute_only_key = 2; +static bool pkey_execute_disable_supported; +static bool pkeys_devtree_defined; /* property exported by device tree */ +static u64 pkey_amr_mask; /* Bits in AMR not to be touched */ +static u64 pkey_iamr_mask; /* Bits in AMR not to be touched */ +static u64 pkey_uamor_mask; /* Bits in UMOR not to be touched */ +static int execute_only_key = 2; #define AMR_BITS_PER_PKEY 2 #define AMR_RD_BIT 0x1UL @@ -57,7 +58,7 @@ static inline bool pkey_mmu_enabled(void) return cpu_has_feature(CPU_FTR_PKEY); } -int pkey_initialize(void) +static int pkey_initialize(void) { int os_reserved, i; @@ -414,3 +415,13 @@ bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write, return pkey_access_permitted(vma_pkey(vma), write, execute); } + +void arch_dup_pkeys(struct mm_struct *oldmm, struct mm_struct *mm) +{ + if (static_branch_likely(&pkey_disabled)) + return; + + /* Duplicate the oldmm pkey state in mm: */ + mm_pkey_allocation_map(mm) = mm_pkey_allocation_map(oldmm); + mm->context.execute_only_pkey = oldmm->context.execute_only_pkey; +} diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c index f6f575bae3bc..3f4193201ee7 100644 --- a/arch/powerpc/mm/ppc_mmu_32.c +++ b/arch/powerpc/mm/ppc_mmu_32.c @@ -31,6 +31,7 @@ #include <asm/prom.h> #include <asm/mmu.h> #include <asm/machdep.h> +#include <asm/code-patching.h> #include "mmu_decl.h" @@ -52,7 +53,7 @@ struct batrange { /* stores address ranges mapped by BATs */ phys_addr_t v_block_mapped(unsigned long va) { int b; - for (b = 0; b < 4; ++b) + for (b = 0; b < ARRAY_SIZE(bat_addrs); ++b) if (va >= bat_addrs[b].start && va < bat_addrs[b].limit) return bat_addrs[b].phys + (va - bat_addrs[b].start); return 0; @@ -64,7 +65,7 @@ phys_addr_t v_block_mapped(unsigned long va) unsigned long p_block_mapped(phys_addr_t pa) { int b; - for (b = 0; b < 4; ++b) + for (b = 0; b < ARRAY_SIZE(bat_addrs); ++b) if (pa >= bat_addrs[b].phys && pa < (bat_addrs[b].limit-bat_addrs[b].start) +bat_addrs[b].phys) @@ -182,22 +183,8 @@ void __init MMU_init_hw(void) unsigned int hmask, mb, mb2; unsigned int n_hpteg, lg_n_hpteg; - extern unsigned int hash_page_patch_A[]; - extern unsigned int hash_page_patch_B[], hash_page_patch_C[]; - extern unsigned int hash_page[]; - extern unsigned int flush_hash_patch_A[], flush_hash_patch_B[]; - - if (!mmu_has_feature(MMU_FTR_HPTE_TABLE)) { - /* - * Put a blr (procedure return) instruction at the - * start of hash_page, since we can still get DSI - * exceptions on a 603. - */ - hash_page[0] = 0x4e800020; - flush_icache_range((unsigned long) &hash_page[0], - (unsigned long) &hash_page[1]); + if (!mmu_has_feature(MMU_FTR_HPTE_TABLE)) return; - } if ( ppc_md.progress ) ppc_md.progress("hash:enter", 0x105); @@ -244,31 +231,19 @@ void __init MMU_init_hw(void) if (lg_n_hpteg > 16) mb2 = 16 - LG_HPTEG_SIZE; - hash_page_patch_A[0] = (hash_page_patch_A[0] & ~0xffff) - | ((unsigned int)(Hash) >> 16); - hash_page_patch_A[1] = (hash_page_patch_A[1] & ~0x7c0) | (mb << 6); - hash_page_patch_A[2] = (hash_page_patch_A[2] & ~0x7c0) | (mb2 << 6); - hash_page_patch_B[0] = (hash_page_patch_B[0] & ~0xffff) | hmask; - hash_page_patch_C[0] = (hash_page_patch_C[0] & ~0xffff) | hmask; - - /* - * Ensure that the locations we've patched have been written - * out from the data cache and invalidated in the instruction - * cache, on those machines with split caches. - */ - flush_icache_range((unsigned long) &hash_page_patch_A[0], - (unsigned long) &hash_page_patch_C[1]); + modify_instruction_site(&patch__hash_page_A0, 0xffff, (unsigned int)Hash >> 16); + modify_instruction_site(&patch__hash_page_A1, 0x7c0, mb << 6); + modify_instruction_site(&patch__hash_page_A2, 0x7c0, mb2 << 6); + modify_instruction_site(&patch__hash_page_B, 0xffff, hmask); + modify_instruction_site(&patch__hash_page_C, 0xffff, hmask); /* * Patch up the instructions in hashtable.S:flush_hash_page */ - flush_hash_patch_A[0] = (flush_hash_patch_A[0] & ~0xffff) - | ((unsigned int)(Hash) >> 16); - flush_hash_patch_A[1] = (flush_hash_patch_A[1] & ~0x7c0) | (mb << 6); - flush_hash_patch_A[2] = (flush_hash_patch_A[2] & ~0x7c0) | (mb2 << 6); - flush_hash_patch_B[0] = (flush_hash_patch_B[0] & ~0xffff) | hmask; - flush_icache_range((unsigned long) &flush_hash_patch_A[0], - (unsigned long) &flush_hash_patch_B[1]); + modify_instruction_site(&patch__flush_hash_A0, 0xffff, (unsigned int)Hash >> 16); + modify_instruction_site(&patch__flush_hash_A1, 0x7c0, mb << 6); + modify_instruction_site(&patch__flush_hash_A2, 0x7c0, mb2 << 6); + modify_instruction_site(&patch__flush_hash_B, 0xffff, hmask); if ( ppc_md.progress ) ppc_md.progress("hash:done", 0x205); } diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S index 7fd20c52a8ec..9ed90064f542 100644 --- a/arch/powerpc/mm/tlb_low_64e.S +++ b/arch/powerpc/mm/tlb_low_64e.S @@ -70,6 +70,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) std r15,EX_TLB_R15(r12) std r10,EX_TLB_CR(r12) #ifdef CONFIG_PPC_FSL_BOOK3E +START_BTB_FLUSH_SECTION + mfspr r11, SPRN_SRR1 + andi. r10,r11,MSR_PR + beq 1f + BTB_FLUSH(r10) +1: +END_BTB_FLUSH_SECTION std r7,EX_TLB_R7(r12) #endif TLB_MISS_PROLOG_STATS diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h index 47fc6660845d..c2d5192ed64f 100644 --- a/arch/powerpc/net/bpf_jit.h +++ b/arch/powerpc/net/bpf_jit.h @@ -152,6 +152,10 @@ ___PPC_RS(a) | ___PPC_RB(s)) #define PPC_SRW(d, a, s) EMIT(PPC_INST_SRW | ___PPC_RA(d) | \ ___PPC_RS(a) | ___PPC_RB(s)) +#define PPC_SRAW(d, a, s) EMIT(PPC_INST_SRAW | ___PPC_RA(d) | \ + ___PPC_RS(a) | ___PPC_RB(s)) +#define PPC_SRAWI(d, a, i) EMIT(PPC_INST_SRAWI | ___PPC_RA(d) | \ + ___PPC_RS(a) | __PPC_SH(i)) #define PPC_SRD(d, a, s) EMIT(PPC_INST_SRD | ___PPC_RA(d) | \ ___PPC_RS(a) | ___PPC_RB(s)) #define PPC_SRAD(d, a, s) EMIT(PPC_INST_SRAD | ___PPC_RA(d) | \ diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index d5bfe24bb3b5..91d223cf512b 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -379,18 +379,17 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, hash)); break; case BPF_ANC | SKF_AD_VLAN_TAG: - case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); - BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000); PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, vlan_tci)); - if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) { - PPC_ANDI(r_A, r_A, ~VLAN_TAG_PRESENT); - } else { - PPC_ANDI(r_A, r_A, VLAN_TAG_PRESENT); - PPC_SRWI(r_A, r_A, 12); - } + break; + case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT: + PPC_LBZ_OFFS(r_A, r_skb, PKT_VLAN_PRESENT_OFFSET()); + if (PKT_VLAN_PRESENT_BIT) + PPC_SRWI(r_A, r_A, PKT_VLAN_PRESENT_BIT); + if (PKT_VLAN_PRESENT_BIT < 7) + PPC_ANDI(r_A, r_A, 1); break; case BPF_ANC | SKF_AD_QUEUE: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index 9393e231cbc2..7ce57657d3b8 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -529,9 +529,15 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, if (imm != 0) PPC_SRDI(dst_reg, dst_reg, imm); break; + case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */ + PPC_SRAW(dst_reg, dst_reg, src_reg); + goto bpf_alu32_trunc; case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */ PPC_SRAD(dst_reg, dst_reg, src_reg); break; + case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */ + PPC_SRAWI(dst_reg, dst_reg, imm); + goto bpf_alu32_trunc; case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */ if (imm != 0) PPC_SRADI(dst_reg, dst_reg, imm); diff --git a/arch/powerpc/oprofile/Makefile b/arch/powerpc/oprofile/Makefile index 8d26d7416481..bb2d94c8cbe6 100644 --- a/arch/powerpc/oprofile/Makefile +++ b/arch/powerpc/oprofile/Makefile @@ -16,4 +16,4 @@ oprofile-$(CONFIG_OPROFILE_CELL) += op_model_cell.o \ cell/spu_task_sync.o oprofile-$(CONFIG_PPC_BOOK3S_64) += op_model_power4.o op_model_pa6t.o oprofile-$(CONFIG_FSL_EMB_PERFMON) += op_model_fsl_emb.o -oprofile-$(CONFIG_6xx) += op_model_7450.o +oprofile-$(CONFIG_PPC_BOOK3S_32) += op_model_7450.o diff --git a/arch/powerpc/oprofile/common.c b/arch/powerpc/oprofile/common.c index bf094c5a4bd9..a11132865504 100644 --- a/arch/powerpc/oprofile/common.c +++ b/arch/powerpc/oprofile/common.c @@ -212,7 +212,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops) model = &op_model_pa6t; break; #endif -#ifdef CONFIG_6xx +#ifdef CONFIG_PPC_BOOK3S_32 case PPC_OPROFILE_G4: model = &op_model_7450; break; diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 81f8a0c838ae..b0723002a396 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -10,6 +10,7 @@ */ #include <linux/kernel.h> #include <linux/sched.h> +#include <linux/sched/clock.h> #include <linux/perf_event.h> #include <linux/percpu.h> #include <linux/hardirq.h> @@ -130,6 +131,14 @@ static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {} static void pmao_restore_workaround(bool ebb) { } #endif /* CONFIG_PPC32 */ +bool is_sier_available(void) +{ + if (ppmu->flags & PPMU_HAS_SIER) + return true; + + return false; +} + static bool regs_use_siar(struct pt_regs *regs) { /* @@ -864,6 +873,8 @@ static int power_check_constraints(struct cpu_hw_events *cpuhw, int i, j; unsigned long addf = ppmu->add_fields; unsigned long tadd = ppmu->test_adder; + unsigned long grp_mask = ppmu->group_constraint_mask; + unsigned long grp_val = ppmu->group_constraint_val; if (n_ev > ppmu->n_counter) return -1; @@ -884,15 +895,23 @@ static int power_check_constraints(struct cpu_hw_events *cpuhw, for (i = 0; i < n_ev; ++i) { nv = (value | cpuhw->avalues[i][0]) + (value & cpuhw->avalues[i][0] & addf); - if ((((nv + tadd) ^ value) & mask) != 0 || - (((nv + tadd) ^ cpuhw->avalues[i][0]) & - cpuhw->amasks[i][0]) != 0) + + if (((((nv + tadd) ^ value) & mask) & (~grp_mask)) != 0) + break; + + if (((((nv + tadd) ^ cpuhw->avalues[i][0]) & cpuhw->amasks[i][0]) + & (~grp_mask)) != 0) break; + value = nv; mask |= cpuhw->amasks[i][0]; } - if (i == n_ev) - return 0; /* all OK */ + if (i == n_ev) { + if ((value & mask & grp_mask) != (mask & grp_val)) + return -1; + else + return 0; /* all OK */ + } /* doesn't work, gather alternatives... */ if (!ppmu->get_alternatives) @@ -2148,7 +2167,7 @@ static bool pmc_overflow(unsigned long val) /* * Performance monitor interrupt stuff */ -static void perf_event_interrupt(struct pt_regs *regs) +static void __perf_event_interrupt(struct pt_regs *regs) { int i, j; struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); @@ -2232,6 +2251,14 @@ static void perf_event_interrupt(struct pt_regs *regs) irq_exit(); } +static void perf_event_interrupt(struct pt_regs *regs) +{ + u64 start_clock = sched_clock(); + + __perf_event_interrupt(regs); + perf_sample_event_took(sched_clock() - start_clock); +} + static int power_pmu_prepare_cpu(unsigned int cpu) { struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c index 6954636b16d1..f292a3f284f1 100644 --- a/arch/powerpc/perf/imc-pmu.c +++ b/arch/powerpc/perf/imc-pmu.c @@ -28,13 +28,13 @@ static DEFINE_MUTEX(nest_init_lock); static DEFINE_PER_CPU(struct imc_pmu_ref *, local_nest_imc_refc); static struct imc_pmu **per_nest_pmu_arr; static cpumask_t nest_imc_cpumask; -struct imc_pmu_ref *nest_imc_refc; +static struct imc_pmu_ref *nest_imc_refc; static int nest_pmus; /* Core IMC data structures and variables */ static cpumask_t core_imc_cpumask; -struct imc_pmu_ref *core_imc_refc; +static struct imc_pmu_ref *core_imc_refc; static struct imc_pmu *core_imc_pmu; /* Thread IMC data structures and variables */ @@ -43,7 +43,7 @@ static DEFINE_PER_CPU(u64 *, thread_imc_mem); static struct imc_pmu *thread_imc_pmu; static int thread_imc_mem_size; -struct imc_pmu *imc_event_to_pmu(struct perf_event *event) +static struct imc_pmu *imc_event_to_pmu(struct perf_event *event) { return container_of(event->pmu, struct imc_pmu, pmu); } diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c index 177de814286f..a6c24d866b2f 100644 --- a/arch/powerpc/perf/isa207-common.c +++ b/arch/powerpc/perf/isa207-common.c @@ -148,6 +148,14 @@ static bool is_thresh_cmp_valid(u64 event) return true; } +static unsigned int dc_ic_rld_quad_l1_sel(u64 event) +{ + unsigned int cache; + + cache = (event >> EVENT_CACHE_SEL_SHIFT) & MMCR1_DC_IC_QUAL_MASK; + return cache; +} + static inline u64 isa207_find_source(u64 idx, u32 sub_idx) { u64 ret = PERF_MEM_NA; @@ -226,8 +234,13 @@ void isa207_get_mem_weight(u64 *weight) u64 mmcra = mfspr(SPRN_MMCRA); u64 exp = MMCRA_THR_CTR_EXP(mmcra); u64 mantissa = MMCRA_THR_CTR_MANT(mmcra); + u64 sier = mfspr(SPRN_SIER); + u64 val = (sier & ISA207_SIER_TYPE_MASK) >> ISA207_SIER_TYPE_SHIFT; - *weight = mantissa << (2 * exp); + if (val == 0 || val == 7) + *weight = 0; + else + *weight = mantissa << (2 * exp); } int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp) @@ -274,19 +287,27 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp) } if (unit >= 6 && unit <= 9) { - /* - * L2/L3 events contain a cache selector field, which is - * supposed to be programmed into MMCRC. However MMCRC is only - * HV writable, and there is no API for guest kernels to modify - * it. The solution is for the hypervisor to initialise the - * field to zeroes, and for us to only ever allow events that - * have a cache selector of zero. The bank selector (bit 3) is - * irrelevant, as long as the rest of the value is 0. - */ - if (cache & 0x7) + if (cpu_has_feature(CPU_FTR_ARCH_300)) { + mask |= CNST_CACHE_GROUP_MASK; + value |= CNST_CACHE_GROUP_VAL(event & 0xff); + + mask |= CNST_CACHE_PMC4_MASK; + if (pmc == 4) + value |= CNST_CACHE_PMC4_VAL; + } else if (cache & 0x7) { + /* + * L2/L3 events contain a cache selector field, which is + * supposed to be programmed into MMCRC. However MMCRC is only + * HV writable, and there is no API for guest kernels to modify + * it. The solution is for the hypervisor to initialise the + * field to zeroes, and for us to only ever allow events that + * have a cache selector of zero. The bank selector (bit 3) is + * irrelevant, as long as the rest of the value is 0. + */ return -1; + } - } else if (event & EVENT_IS_L1) { + } else if (cpu_has_feature(CPU_FTR_ARCH_300) || (event & EVENT_IS_L1)) { mask |= CNST_L1_QUAL_MASK; value |= CNST_L1_QUAL_VAL(cache); } @@ -389,11 +410,14 @@ int isa207_compute_mmcr(u64 event[], int n_ev, /* In continuous sampling mode, update SDAR on TLB miss */ mmcra_sdar_mode(event[i], &mmcra); - if (event[i] & EVENT_IS_L1) { - cache = event[i] >> EVENT_CACHE_SEL_SHIFT; - mmcr1 |= (cache & 1) << MMCR1_IC_QUAL_SHIFT; - cache >>= 1; - mmcr1 |= (cache & 1) << MMCR1_DC_QUAL_SHIFT; + if (cpu_has_feature(CPU_FTR_ARCH_300)) { + cache = dc_ic_rld_quad_l1_sel(event[i]); + mmcr1 |= (cache) << MMCR1_DC_IC_QUAL_SHIFT; + } else { + if (event[i] & EVENT_IS_L1) { + cache = dc_ic_rld_quad_l1_sel(event[i]); + mmcr1 |= (cache) << MMCR1_DC_IC_QUAL_SHIFT; + } } if (is_event_marked(event[i])) { diff --git a/arch/powerpc/perf/isa207-common.h b/arch/powerpc/perf/isa207-common.h index 0028f4b9490d..91350f42a662 100644 --- a/arch/powerpc/perf/isa207-common.h +++ b/arch/powerpc/perf/isa207-common.h @@ -134,6 +134,11 @@ #define CNST_SAMPLE_VAL(v) (((v) & EVENT_SAMPLE_MASK) << 16) #define CNST_SAMPLE_MASK CNST_SAMPLE_VAL(EVENT_SAMPLE_MASK) +#define CNST_CACHE_GROUP_VAL(v) (((v) & 0xffull) << 55) +#define CNST_CACHE_GROUP_MASK CNST_CACHE_GROUP_VAL(0xff) +#define CNST_CACHE_PMC4_VAL (1ull << 54) +#define CNST_CACHE_PMC4_MASK CNST_CACHE_PMC4_VAL + /* * For NC we are counting up to 4 events. This requires three bits, and we need * the fifth event to overflow and set the 4th bit. To achieve that we bias the @@ -163,8 +168,8 @@ #define MMCR1_COMBINE_SHIFT(pmc) (35 - ((pmc) - 1)) #define MMCR1_PMCSEL_SHIFT(pmc) (24 - (((pmc) - 1)) * 8) #define MMCR1_FAB_SHIFT 36 -#define MMCR1_DC_QUAL_SHIFT 47 -#define MMCR1_IC_QUAL_SHIFT 46 +#define MMCR1_DC_IC_QUAL_MASK 0x3 +#define MMCR1_DC_IC_QUAL_SHIFT 46 /* MMCR1 Combine bits macro for power9 */ #define p9_MMCR1_COMBINE_SHIFT(pmc) (38 - ((pmc - 1) * 2)) diff --git a/arch/powerpc/perf/perf_regs.c b/arch/powerpc/perf/perf_regs.c index 09ceea6175ba..5c36b3a8d47a 100644 --- a/arch/powerpc/perf/perf_regs.c +++ b/arch/powerpc/perf/perf_regs.c @@ -69,6 +69,7 @@ static unsigned int pt_regs_offset[PERF_REG_POWERPC_MAX] = { PT_REGS_OFFSET(PERF_REG_POWERPC_TRAP, trap), PT_REGS_OFFSET(PERF_REG_POWERPC_DAR, dar), PT_REGS_OFFSET(PERF_REG_POWERPC_DSISR, dsisr), + PT_REGS_OFFSET(PERF_REG_POWERPC_SIER, dar), }; u64 perf_reg_value(struct pt_regs *regs, int idx) @@ -76,6 +77,12 @@ u64 perf_reg_value(struct pt_regs *regs, int idx) if (WARN_ON_ONCE(idx >= PERF_REG_POWERPC_MAX)) return 0; + if (idx == PERF_REG_POWERPC_SIER && + (IS_ENABLED(CONFIG_FSL_EMB_PERF_EVENT) || + IS_ENABLED(CONFIG_PPC32) || + !is_sier_available())) + return 0; + return regs_get_register(regs, pt_regs_offset[idx]); } diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c index e012b1030a5b..0ff9c43733e9 100644 --- a/arch/powerpc/perf/power9-pmu.c +++ b/arch/powerpc/perf/power9-pmu.c @@ -63,16 +63,8 @@ * MMCRA[9:11] = thresh_cmp[0:2] * MMCRA[12:18] = thresh_cmp[3:9] * - * if unit == 6 or unit == 7 - * MMCRC[53:55] = cache_sel[1:3] (L2EVENT_SEL) - * else if unit == 8 or unit == 9: - * if cache_sel[0] == 0: # L3 bank - * MMCRC[47:49] = cache_sel[1:3] (L3EVENT_SEL0) - * else if cache_sel[0] == 1: - * MMCRC[50:51] = cache_sel[2:3] (L3EVENT_SEL1) - * else if cache_sel[1]: # L1 event - * MMCR1[16] = cache_sel[2] - * MMCR1[17] = cache_sel[3] + * MMCR1[16] = cache_sel[2] + * MMCR1[17] = cache_sel[3] * * if mark: * MMCRA[63] = 1 (SAMPLE_ENABLE) @@ -179,8 +171,6 @@ CACHE_EVENT_ATTR(L1-icache-prefetches, PM_IC_PREF_WRITE); CACHE_EVENT_ATTR(LLC-load-misses, PM_DATA_FROM_L3MISS); CACHE_EVENT_ATTR(LLC-loads, PM_DATA_FROM_L3); CACHE_EVENT_ATTR(LLC-prefetches, PM_L3_PREF_ALL); -CACHE_EVENT_ATTR(LLC-store-misses, PM_L2_ST_MISS); -CACHE_EVENT_ATTR(LLC-stores, PM_L2_ST); CACHE_EVENT_ATTR(branch-load-misses, PM_BR_MPRED_CMPL); CACHE_EVENT_ATTR(branch-loads, PM_BR_CMPL); CACHE_EVENT_ATTR(dTLB-load-misses, PM_DTLB_MISS); @@ -205,8 +195,6 @@ static struct attribute *power9_events_attr[] = { CACHE_EVENT_PTR(PM_DATA_FROM_L3MISS), CACHE_EVENT_PTR(PM_DATA_FROM_L3), CACHE_EVENT_PTR(PM_L3_PREF_ALL), - CACHE_EVENT_PTR(PM_L2_ST_MISS), - CACHE_EVENT_PTR(PM_L2_ST), CACHE_EVENT_PTR(PM_BR_MPRED_CMPL), CACHE_EVENT_PTR(PM_BR_CMPL), CACHE_EVENT_PTR(PM_DTLB_MISS), @@ -354,8 +342,8 @@ static int power9_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { [ C(RESULT_MISS) ] = PM_DATA_FROM_L3MISS, }, [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = PM_L2_ST, - [ C(RESULT_MISS) ] = PM_L2_ST_MISS, + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = PM_L3_PREF_ALL, @@ -427,6 +415,8 @@ static struct power_pmu power9_pmu = { .n_counter = MAX_PMU_COUNTERS, .add_fields = ISA207_ADD_FIELDS, .test_adder = ISA207_TEST_ADDER, + .group_constraint_mask = CNST_CACHE_PMC4_MASK, + .group_constraint_val = CNST_CACHE_PMC4_VAL, .compute_mmcr = isa207_compute_mmcr, .config_bhrb = power9_config_bhrb, .bhrb_filter_map = power9_bhrb_filter_map, diff --git a/arch/powerpc/platforms/44x/warp.c b/arch/powerpc/platforms/44x/warp.c index a886c2c22097..f467247fd1c4 100644 --- a/arch/powerpc/platforms/44x/warp.c +++ b/arch/powerpc/platforms/44x/warp.c @@ -47,7 +47,7 @@ static int __init warp_probe(void) if (!of_machine_is_compatible("pika,warp")) return 0; - /* For __dma_alloc_coherent */ + /* For __dma_nommu_alloc_coherent */ ISA_DMA_THRESHOLD = ~0L; return 1; @@ -179,9 +179,9 @@ static int pika_setup_leds(void) } for_each_child_of_node(np, child) - if (strcmp(child->name, "green") == 0) + if (of_node_name_eq(child, "green")) green_led = of_get_gpio(child, 0); - else if (strcmp(child->name, "red") == 0) + else if (of_node_name_eq(child, "red")) red_led = of_get_gpio(child, 0); of_node_put(np); diff --git a/arch/powerpc/platforms/4xx/ocm.c b/arch/powerpc/platforms/4xx/ocm.c index f5bbd4563342..f2610a02844a 100644 --- a/arch/powerpc/platforms/4xx/ocm.c +++ b/arch/powerpc/platforms/4xx/ocm.c @@ -223,8 +223,6 @@ static void __init ocm_init_node(int count, struct device_node *node) INIT_LIST_HEAD(&ocm->c.list); ocm->ready = 1; - - return; } static int ocm_debugfs_show(struct seq_file *m, void *v) @@ -242,9 +240,7 @@ static int ocm_debugfs_show(struct seq_file *m, void *v) seq_printf(m, "PhysAddr : 0x%llx\n", ocm->phys); seq_printf(m, "MemTotal : %d Bytes\n", ocm->memtotal); seq_printf(m, "MemTotal(NC) : %d Bytes\n", ocm->nc.memtotal); - seq_printf(m, "MemTotal(C) : %d Bytes\n", ocm->c.memtotal); - - seq_printf(m, "\n"); + seq_printf(m, "MemTotal(C) : %d Bytes\n\n", ocm->c.memtotal); seq_printf(m, "NC.PhysAddr : 0x%llx\n", ocm->nc.phys); seq_printf(m, "NC.VirtAddr : 0x%p\n", ocm->nc.virt); @@ -256,9 +252,7 @@ static int ocm_debugfs_show(struct seq_file *m, void *v) blk->size, blk->owner); } - seq_printf(m, "\n"); - - seq_printf(m, "C.PhysAddr : 0x%llx\n", ocm->c.phys); + seq_printf(m, "\nC.PhysAddr : 0x%llx\n", ocm->c.phys); seq_printf(m, "C.VirtAddr : 0x%p\n", ocm->c.virt); seq_printf(m, "C.MemTotal : %d Bytes\n", ocm->c.memtotal); seq_printf(m, "C.MemFree : %d Bytes\n", ocm->c.memfree); @@ -268,7 +262,7 @@ static int ocm_debugfs_show(struct seq_file *m, void *v) blk->size, blk->owner); } - seq_printf(m, "\n"); + seq_putc(m, '\n'); } return 0; @@ -338,7 +332,6 @@ void *ppc4xx_ocm_alloc(phys_addr_t *phys, int size, int align, ocm_blk = kzalloc(sizeof(*ocm_blk), GFP_KERNEL); if (!ocm_blk) { - printk(KERN_ERR "PPC4XX OCM: could not allocate ocm block"); rh_free(ocm_reg->rh, offset); break; } @@ -392,10 +385,8 @@ static int __init ppc4xx_ocm_init(void) return 0; ocm_nodes = kzalloc((count * sizeof(struct ocm_info)), GFP_KERNEL); - if (!ocm_nodes) { - printk(KERN_ERR "PPC4XX OCM: failed to allocate OCM nodes!\n"); + if (!ocm_nodes) return -ENOMEM; - } ocm_count = count; count = 0; diff --git a/arch/powerpc/platforms/4xx/pci.c b/arch/powerpc/platforms/4xx/pci.c index 5aca523551ae..e6e2adcc7b64 100644 --- a/arch/powerpc/platforms/4xx/pci.c +++ b/arch/powerpc/platforms/4xx/pci.c @@ -1399,7 +1399,6 @@ static void __init ppc_476fpe_pciex_check_link(struct ppc4xx_pciex_port *port) printk(KERN_WARNING "PCIE%d: Link up failed\n", port->index); iounmap(mbase); - return; } static struct ppc4xx_pciex_hwops ppc_476fpe_pcie_hwops __initdata = @@ -2081,7 +2080,6 @@ static void __init ppc4xx_probe_pciex_bridge(struct device_node *np) const u32 *pval; int portno; unsigned int dcrs; - const char *val; /* First, proceed to core initialization as we assume there's * only one PCIe core in the system @@ -2127,10 +2125,9 @@ static void __init ppc4xx_probe_pciex_bridge(struct device_node *np) * Resulting from this setup this PCIe port will be configured * as root-complex or as endpoint. */ - val = of_get_property(port->node, "device_type", NULL); - if (!strcmp(val, "pci-endpoint")) { + if (of_node_is_type(port->node, "pci-endpoint")) { port->endpoint = 1; - } else if (!strcmp(val, "pci")) { + } else if (of_node_is_type(port->node, "pci")) { port->endpoint = 0; } else { printk(KERN_ERR "PCIE: missing or incorrect device_type for %pOF\n", diff --git a/arch/powerpc/platforms/512x/Kconfig b/arch/powerpc/platforms/512x/Kconfig index b59eab6cbb1b..0c495823152c 100644 --- a/arch/powerpc/platforms/512x/Kconfig +++ b/arch/powerpc/platforms/512x/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 config PPC_MPC512x bool "512x-based boards" - depends on 6xx + depends on PPC_BOOK3S_32 select COMMON_CLK select FSL_SOC select IPIC diff --git a/arch/powerpc/platforms/52xx/Kconfig b/arch/powerpc/platforms/52xx/Kconfig index 55a587070342..67f8c2d8fc0e 100644 --- a/arch/powerpc/platforms/52xx/Kconfig +++ b/arch/powerpc/platforms/52xx/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 config PPC_MPC52xx bool "52xx-based boards" - depends on 6xx + depends on PPC_BOOK3S_32 select COMMON_CLK select PPC_PCI_CHOICE diff --git a/arch/powerpc/platforms/52xx/efika.c b/arch/powerpc/platforms/52xx/efika.c index 1ecbf176d35a..61538869e88a 100644 --- a/arch/powerpc/platforms/52xx/efika.c +++ b/arch/powerpc/platforms/52xx/efika.c @@ -82,11 +82,9 @@ static void __init efika_pcisetup(void) return; } - for (pcictrl = NULL;;) { - pcictrl = of_get_next_child(root, pcictrl); - if ((pcictrl == NULL) || (strcmp(pcictrl->name, "pci") == 0)) + for_each_child_of_node(root, pcictrl) + if (of_node_name_eq(pcictrl, "pci")) break; - } of_node_put(root); diff --git a/arch/powerpc/platforms/82xx/Kconfig b/arch/powerpc/platforms/82xx/Kconfig index 1947a88bc69f..1af81de1c4e6 100644 --- a/arch/powerpc/platforms/82xx/Kconfig +++ b/arch/powerpc/platforms/82xx/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 menuconfig PPC_82xx bool "82xx-based boards (PQ II)" - depends on 6xx + depends on PPC_BOOK3S_32 if PPC_82xx @@ -54,7 +54,7 @@ config PQ2ADS config 8260 bool - depends on 6xx + depends on PPC_BOOK3S_32 select CPM2 help The MPC8260 is a typical embedded CPU made by Freescale. Selecting diff --git a/arch/powerpc/platforms/83xx/Kconfig b/arch/powerpc/platforms/83xx/Kconfig index 071f53b0c0a0..ff0c69dfdf1a 100644 --- a/arch/powerpc/platforms/83xx/Kconfig +++ b/arch/powerpc/platforms/83xx/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 menuconfig PPC_83xx bool "83xx-based boards" - depends on 6xx + depends on PPC_BOOK3S_32 select PPC_UDBG_16550 select PPC_PCI_CHOICE select FSL_PCI if PCI diff --git a/arch/powerpc/platforms/83xx/misc.c b/arch/powerpc/platforms/83xx/misc.c index d75c9816a5c9..2b6589fe812d 100644 --- a/arch/powerpc/platforms/83xx/misc.c +++ b/arch/powerpc/platforms/83xx/misc.c @@ -14,6 +14,7 @@ #include <linux/of_platform.h> #include <linux/pci.h> +#include <asm/debug.h> #include <asm/io.h> #include <asm/hw_irq.h> #include <asm/ipic.h> @@ -150,3 +151,19 @@ void __init mpc83xx_setup_arch(void) mpc83xx_setup_pci(); } + +int machine_check_83xx(struct pt_regs *regs) +{ + u32 mask = 1 << (31 - IPIC_MCP_WDT); + + if (!(regs->msr & SRR1_MCE_MCP) || !(ipic_get_mcp_status() & mask)) + return machine_check_generic(regs); + ipic_clear_mcp_status(mask); + + if (debugger_fault_handler(regs)) + return 1; + + die("Watchdog NMI Reset", regs, 0); + + return 1; +} diff --git a/arch/powerpc/platforms/85xx/corenet_generic.c b/arch/powerpc/platforms/85xx/corenet_generic.c index ac191a7a1337..b0dac307bebf 100644 --- a/arch/powerpc/platforms/85xx/corenet_generic.c +++ b/arch/powerpc/platforms/85xx/corenet_generic.c @@ -68,16 +68,6 @@ void __init corenet_gen_setup_arch(void) swiotlb_detect_4g(); -#if defined(CONFIG_FSL_PCI) && defined(CONFIG_ZONE_DMA32) - /* - * Inbound windows don't cover the full lower 4 GiB - * due to conflicts with PCICSRBAR and outbound windows, - * so limit the DMA32 zone to 2 GiB, to allow consistent - * allocations to succeed. - */ - limit_zone_pfn(ZONE_DMA32, 1UL << (31 - PAGE_SHIFT)); -#endif - pr_info("%s board\n", ppc_md.name); mpc85xx_qe_init(); diff --git a/arch/powerpc/platforms/85xx/qemu_e500.c b/arch/powerpc/platforms/85xx/qemu_e500.c index b63a8548366f..27631c607f3d 100644 --- a/arch/powerpc/platforms/85xx/qemu_e500.c +++ b/arch/powerpc/platforms/85xx/qemu_e500.c @@ -45,15 +45,6 @@ static void __init qemu_e500_setup_arch(void) fsl_pci_assign_primary(); swiotlb_detect_4g(); -#if defined(CONFIG_FSL_PCI) && defined(CONFIG_ZONE_DMA32) - /* - * Inbound windows don't cover the full lower 4 GiB - * due to conflicts with PCICSRBAR and outbound windows, - * so limit the DMA32 zone to 2 GiB, to allow consistent - * allocations to succeed. - */ - limit_zone_pfn(ZONE_DMA32, 1UL << (31 - PAGE_SHIFT)); -#endif mpc85xx_smp_init(); } diff --git a/arch/powerpc/platforms/85xx/t1042rdb_diu.c b/arch/powerpc/platforms/85xx/t1042rdb_diu.c index dac36ba82fea..2d1652108ba1 100644 --- a/arch/powerpc/platforms/85xx/t1042rdb_diu.c +++ b/arch/powerpc/platforms/85xx/t1042rdb_diu.c @@ -39,7 +39,7 @@ struct device_node *cpld_node; */ static void t1042rdb_set_monitor_port(enum fsl_diu_monitor_port port) { - static void __iomem *cpld_base; + void __iomem *cpld_base; cpld_base = of_iomap(cpld_node, 0); if (!cpld_base) { diff --git a/arch/powerpc/platforms/86xx/Kconfig b/arch/powerpc/platforms/86xx/Kconfig index bcd179d3ed92..df692aa6b578 100644 --- a/arch/powerpc/platforms/86xx/Kconfig +++ b/arch/powerpc/platforms/86xx/Kconfig @@ -2,7 +2,7 @@ config PPC_86xx menuconfig PPC_86xx bool "86xx-based boards" - depends on 6xx + depends on PPC_BOOK3S_32 select FSL_SOC select ALTIVEC help diff --git a/arch/powerpc/platforms/86xx/mpc86xx_smp.c b/arch/powerpc/platforms/86xx/mpc86xx_smp.c index 020e84a47a32..9f2c1ecc85c3 100644 --- a/arch/powerpc/platforms/86xx/mpc86xx_smp.c +++ b/arch/powerpc/platforms/86xx/mpc86xx_smp.c @@ -86,8 +86,7 @@ smp_86xx_kick_cpu(int nr) mdelay(1); /* Restore the exception vector */ - *vector = save_vector; - flush_icache_range((unsigned long) vector, (unsigned long) vector + 4); + patch_instruction(vector, save_vector); local_irq_restore(flags); diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig index 260a56b7602d..5c48dd823e15 100644 --- a/arch/powerpc/platforms/Kconfig +++ b/arch/powerpc/platforms/Kconfig @@ -40,7 +40,7 @@ config EPAPR_PARAVIRT config PPC_NATIVE bool - depends on 6xx || PPC64 + depends on PPC_BOOK3S_32 || PPC64 help Support for running natively on the hardware, i.e. without a hypervisor. This option is not user-selectable but should @@ -48,7 +48,7 @@ config PPC_NATIVE config PPC_OF_BOOT_TRAMPOLINE bool "Support booting from Open Firmware or yaboot" - depends on 6xx || PPC64 + depends on PPC_BOOK3S_32 || PPC64 default y help Support from booting from Open Firmware or yaboot using an @@ -197,7 +197,7 @@ endmenu config PPC601_SYNC_FIX bool "Workarounds for PPC601 bugs" - depends on 6xx && PPC_PMAC + depends on PPC_BOOK3S_32 && PPC_PMAC help Some versions of the PPC601 (the first PowerPC chip) have bugs which mean that extra synchronization instructions are required near @@ -211,7 +211,7 @@ config PPC601_SYNC_FIX config TAU bool "On-chip CPU temperature sensor support" - depends on 6xx + depends on PPC_BOOK3S_32 help G3 and G4 processors have an on-chip temperature sensor called the 'Thermal Assist Unit (TAU)', which, in theory, can measure the on-die diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index f4e2c5729374..ab176fd3dfb5 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -24,6 +24,7 @@ choice config PPC_BOOK3S_32 bool "512x/52xx/6xx/7xx/74xx/82xx/83xx/86xx" select PPC_FPU + select PPC_HAVE_PMU_SUPPORT config PPC_85xx bool "Freescale 85xx" @@ -179,11 +180,6 @@ config PPC_BOOK3E def_bool y depends on PPC_BOOK3E_64 -config 6xx - def_bool y - depends on PPC32 && PPC_BOOK3S - select PPC_HAVE_PMU_SUPPORT - config E500 select FSL_EMB_PERFMON select PPC_FSL_BOOK3E @@ -266,7 +262,7 @@ config PHYS_64BIT config ALTIVEC bool "AltiVec Support" - depends on 6xx || PPC_BOOK3S_64 || (PPC_E500MC && PPC64) + depends on PPC_BOOK3S_32 || PPC_BOOK3S_64 || (PPC_E500MC && PPC64) ---help--- This option enables kernel support for the Altivec extensions to the PowerPC processor. The kernel currently supports saving and restoring @@ -316,14 +312,6 @@ config SPE If in doubt, say Y here. -config PPC_STD_MMU - def_bool y - depends on PPC_BOOK3S - -config PPC_STD_MMU_32 - def_bool y - depends on PPC_STD_MMU && PPC32 - config ARCH_ENABLE_SPLIT_PMD_PTLOCK def_bool y depends on PPC_BOOK3S_64 @@ -358,7 +346,7 @@ config ARCH_ENABLE_HUGEPAGE_MIGRATION config PPC_MMU_NOHASH def_bool y - depends on !PPC_STD_MMU + depends on !PPC_BOOK3S config PPC_BOOK3E_MMU def_bool y @@ -412,7 +400,8 @@ config NR_CPUS config NOT_COHERENT_CACHE bool - depends on 4xx || PPC_8xx || E200 || PPC_MPC512x || GAMECUBE_COMMON + depends on 4xx || PPC_8xx || E200 || PPC_MPC512x || \ + GAMECUBE_COMMON || AMIGAONE default n if PPC_47x default y diff --git a/arch/powerpc/platforms/amigaone/Kconfig b/arch/powerpc/platforms/amigaone/Kconfig index 03dc1e37c25b..e03d26d41957 100644 --- a/arch/powerpc/platforms/amigaone/Kconfig +++ b/arch/powerpc/platforms/amigaone/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 config AMIGAONE bool "Eyetech AmigaOne/MAI Teron" - depends on 6xx && BROKEN_ON_SMP + depends on PPC_BOOK3S_32 && BROKEN_ON_SMP select PPC_I8259 select PPC_INDIRECT_PCI select PPC_UDBG_16550 diff --git a/arch/powerpc/platforms/cell/cbe_regs.c b/arch/powerpc/platforms/cell/cbe_regs.c index b926438d73af..27ee65b89099 100644 --- a/arch/powerpc/platforms/cell/cbe_regs.c +++ b/arch/powerpc/platforms/cell/cbe_regs.c @@ -53,7 +53,7 @@ static struct cbe_regs_map *cbe_find_map(struct device_node *np) int i; struct device_node *tmp_np; - if (strcasecmp(np->type, "spe")) { + if (!of_node_is_type(np, "spe")) { for (i = 0; i < cbe_regs_map_count; i++) if (cbe_regs_maps[i].cpu_node == np || cbe_regs_maps[i].be_node == np) @@ -70,8 +70,8 @@ static struct cbe_regs_map *cbe_find_map(struct device_node *np) tmp_np = tmp_np->parent; /* on a correct devicetree we wont get up to root */ BUG_ON(!tmp_np); - } while (strcasecmp(tmp_np->type, "cpu") && - strcasecmp(tmp_np->type, "be")); + } while (!of_node_is_type(tmp_np, "cpu") || + !of_node_is_type(tmp_np, "be")); np->data = cbe_find_map(tmp_np); diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index 12352a58072a..af2a3c15e0ec 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c @@ -654,7 +654,6 @@ static const struct dma_map_ops dma_iommu_fixed_ops = { .dma_supported = dma_suported_and_switch, .map_page = dma_fixed_map_page, .unmap_page = dma_fixed_unmap_page, - .mapping_error = dma_iommu_mapping_error, }; static void cell_dma_dev_setup(struct device *dev) diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c index 7d31b8d14661..e2e1371a71e2 100644 --- a/arch/powerpc/platforms/cell/setup.c +++ b/arch/powerpc/platforms/cell/setup.c @@ -131,7 +131,7 @@ static int cell_setup_phb(struct pci_controller *phb) np = phb->dn; model = of_get_property(np, "model", NULL); - if (model == NULL || strcmp(np->name, "pci")) + if (model == NULL || !of_node_name_eq(np, "pci")) return 0; /* Setup workarounds for spider */ @@ -168,8 +168,7 @@ static int __init cell_publish_devices(void) * platform devices for the PCI host bridges */ for_each_child_of_node(root, np) { - if (np->type == NULL || (strcmp(np->type, "pci") != 0 && - strcmp(np->type, "pciex") != 0)) + if (!of_node_is_type(np, "pci") && !of_node_is_type(np, "pciex")) continue; of_platform_device_create(np, NULL, NULL); } diff --git a/arch/powerpc/platforms/cell/spu_callbacks.c b/arch/powerpc/platforms/cell/spu_callbacks.c index 8ae86200ef6c..125f2a5f02de 100644 --- a/arch/powerpc/platforms/cell/spu_callbacks.c +++ b/arch/powerpc/platforms/cell/spu_callbacks.c @@ -34,20 +34,9 @@ */ static void *spu_syscall_table[] = { -#define SYSCALL(func) sys_ni_syscall, -#define COMPAT_SYS(func) sys_ni_syscall, -#define PPC_SYS(func) sys_ni_syscall, -#define OLDSYS(func) sys_ni_syscall, -#define SYS32ONLY(func) sys_ni_syscall, -#define PPC64ONLY(func) sys_ni_syscall, -#define SYSX(f, f3264, f32) sys_ni_syscall, - -#define SYSCALL_SPU(func) sys_##func, -#define COMPAT_SYS_SPU(func) sys_##func, -#define COMPAT_SPU_NEW(func) sys_##func, -#define SYSX_SPU(f, f3264, f32) f, - -#include <asm/systbl.h> +#define __SYSCALL(nr, entry, nargs) entry, +#include <asm/syscall_table_spu.h> +#undef __SYSCALL }; long spu_sys_callback(struct spu_syscall_block *s) diff --git a/arch/powerpc/platforms/cell/spu_manage.c b/arch/powerpc/platforms/cell/spu_manage.c index f7e36373f6e0..bed935c51ec2 100644 --- a/arch/powerpc/platforms/cell/spu_manage.c +++ b/arch/powerpc/platforms/cell/spu_manage.c @@ -458,7 +458,6 @@ static void init_affinity_node(int cbe) struct device_node *vic_dn, *last_spu_dn; phandle avoid_ph; const phandle *vic_handles; - const char *name; int lenp, i, added; last_spu = list_first_entry(&cbe_spu_info[cbe].spus, struct spu, @@ -480,12 +479,7 @@ static void init_affinity_node(int cbe) if (!vic_dn) continue; - /* a neighbour might be spe, mic-tm, or bif0 */ - name = of_get_property(vic_dn, "name", NULL); - if (!name) - continue; - - if (strcmp(name, "spe") == 0) { + if (of_node_name_eq(vic_dn, "spe") ) { spu = devnode_spu(cbe, vic_dn); avoid_ph = last_spu_dn->phandle; } else { @@ -498,7 +492,7 @@ static void init_affinity_node(int cbe) spu = neighbour_spu(cbe, vic_dn, last_spu_dn); if (!spu) continue; - if (!strcmp(name, "mic-tm")) { + if (of_node_name_eq(vic_dn, "mic-tm")) { last_spu->has_mem_affinity = 1; spu->has_mem_affinity = 1; } diff --git a/arch/powerpc/platforms/chrp/Kconfig b/arch/powerpc/platforms/chrp/Kconfig index ead99eff875a..43a2484aad49 100644 --- a/arch/powerpc/platforms/chrp/Kconfig +++ b/arch/powerpc/platforms/chrp/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 config PPC_CHRP bool "Common Hardware Reference Platform (CHRP) based machines" - depends on 6xx + depends on PPC_BOOK3S_32 select HAVE_PCSPKR_PLATFORM select MPIC select PPC_I8259 diff --git a/arch/powerpc/platforms/chrp/pci.c b/arch/powerpc/platforms/chrp/pci.c index 5ddb57b82921..b020c757d2bf 100644 --- a/arch/powerpc/platforms/chrp/pci.c +++ b/arch/powerpc/platforms/chrp/pci.c @@ -230,8 +230,8 @@ chrp_find_bridges(void) else if (strncmp(machine, "Pegasos", 7) == 0) is_pegasos = 1; } - for (dev = root->child; dev != NULL; dev = dev->sibling) { - if (dev->type == NULL || strcmp(dev->type, "pci") != 0) + for_each_child_of_node(root, dev) { + if (!of_node_is_type(dev, "pci")) continue; ++index; /* The GG2 bridge on the LongTrail doesn't have an address */ diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c index d6d8ffc0271e..e66644e0fb40 100644 --- a/arch/powerpc/platforms/chrp/setup.c +++ b/arch/powerpc/platforms/chrp/setup.c @@ -280,20 +280,14 @@ static __init void chrp_init(void) node = of_find_node_by_path(property); if (!node) return; - property = of_get_property(node, "device_type", NULL); - if (!property) - goto out_put; - if (strcmp(property, "serial")) + if (!of_node_is_type(node, "serial")) goto out_put; /* * The 9pin connector is either /failsafe * or /pci@80000000/isa@C/serial@i2F8 * The optional graphics card has also type 'serial' in VGA mode. */ - property = of_get_property(node, "name", NULL); - if (!property) - goto out_put; - if (!strcmp(property, "failsafe") || !strcmp(property, "serial")) + if (of_node_name_eq(node, "failsafe") || of_node_name_eq(node, "serial")) add_preferred_console("ttyS", 0, NULL); out_put: of_node_put(node); diff --git a/arch/powerpc/platforms/embedded6xx/Kconfig b/arch/powerpc/platforms/embedded6xx/Kconfig index 8ea16db5ff48..527d4aa46537 100644 --- a/arch/powerpc/platforms/embedded6xx/Kconfig +++ b/arch/powerpc/platforms/embedded6xx/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 config EMBEDDED6xx bool "Embedded 6xx/7xx/7xxx-based boards" - depends on 6xx && BROKEN_ON_SMP + depends on PPC_BOOK3S_32 && BROKEN_ON_SMP config LINKSTATION bool "Linkstation / Kurobox(HG) from Buffalo" diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c index e3821379e86f..13fba004b7e7 100644 --- a/arch/powerpc/platforms/maple/pci.c +++ b/arch/powerpc/platforms/maple/pci.c @@ -604,10 +604,8 @@ void __init maple_pci_init(void) printk(KERN_CRIT "maple_find_bridges: can't find root of device tree\n"); return; } - for (np = NULL; (np = of_get_next_child(root, np)) != NULL;) { - if (!np->type) - continue; - if (strcmp(np->type, "pci") && strcmp(np->type, "ht")) + for_each_child_of_node(root, np) { + if (!of_node_is_type(np, "pci") && !of_node_is_type(np, "ht")) continue; if ((of_device_is_compatible(np, "u4-pcie") || of_device_is_compatible(np, "u3-agp")) && diff --git a/arch/powerpc/platforms/pasemi/dma_lib.c b/arch/powerpc/platforms/pasemi/dma_lib.c index 53384eb42a76..d18d16489a15 100644 --- a/arch/powerpc/platforms/pasemi/dma_lib.c +++ b/arch/powerpc/platforms/pasemi/dma_lib.c @@ -255,15 +255,13 @@ int pasemi_dma_alloc_ring(struct pasemi_dmachan *chan, int ring_size) chan->ring_size = ring_size; - chan->ring_virt = dma_alloc_coherent(&dma_pdev->dev, + chan->ring_virt = dma_zalloc_coherent(&dma_pdev->dev, ring_size * sizeof(u64), &chan->ring_dma, GFP_KERNEL); if (!chan->ring_virt) return -ENOMEM; - memset(chan->ring_virt, 0, ring_size * sizeof(u64)); - return 0; } EXPORT_SYMBOL(pasemi_dma_alloc_ring); diff --git a/arch/powerpc/platforms/pasemi/pci.c b/arch/powerpc/platforms/pasemi/pci.c index c3c64172482d..fdc839d93837 100644 --- a/arch/powerpc/platforms/pasemi/pci.c +++ b/arch/powerpc/platforms/pasemi/pci.c @@ -27,6 +27,7 @@ #include <linux/pci.h> #include <asm/pci-bridge.h> +#include <asm/isa-bridge.h> #include <asm/machdep.h> #include <asm/ppc-pci.h> @@ -108,6 +109,61 @@ static int workaround_5945(struct pci_bus *bus, unsigned int devfn, return 1; } +#ifdef CONFIG_PPC_PASEMI_NEMO +#define PXP_ERR_CFG_REG 0x4 +#define PXP_IGNORE_PCIE_ERRORS 0x800 +#define SB600_BUS 5 + +static void sb600_set_flag(int bus) +{ + static void __iomem *iob_mapbase = NULL; + struct resource res; + struct device_node *dn; + int err; + + if (iob_mapbase == NULL) { + dn = of_find_compatible_node(NULL, "isa", "pasemi,1682m-iob"); + if (!dn) { + pr_crit("NEMO SB600 missing iob node\n"); + return; + } + + err = of_address_to_resource(dn, 0, &res); + of_node_put(dn); + + if (err) { + pr_crit("NEMO SB600 missing resource\n"); + return; + } + + pr_info("NEMO SB600 IOB base %08llx\n",res.start); + + iob_mapbase = ioremap(res.start + 0x100, 0x94); + } + + if (iob_mapbase != NULL) { + if (bus == SB600_BUS) { + /* + * This is the SB600's bus, tell the PCI-e root port + * to allow non-zero devices to enumerate. + */ + out_le32(iob_mapbase + PXP_ERR_CFG_REG, in_le32(iob_mapbase + PXP_ERR_CFG_REG) | PXP_IGNORE_PCIE_ERRORS); + } else { + /* + * Only scan device 0 on other busses + */ + out_le32(iob_mapbase + PXP_ERR_CFG_REG, in_le32(iob_mapbase + PXP_ERR_CFG_REG) & ~PXP_IGNORE_PCIE_ERRORS); + } + } +} + +#else + +static void sb600_set_flag(int bus) +{ +} +#endif + static int pa_pxp_read_config(struct pci_bus *bus, unsigned int devfn, int offset, int len, u32 *val) { @@ -126,6 +182,8 @@ static int pa_pxp_read_config(struct pci_bus *bus, unsigned int devfn, addr = pa_pxp_cfg_addr(hose, bus->number, devfn, offset); + sb600_set_flag(bus->number); + /* * Note: the caller has already checked that offset is * suitably aligned and that len is 1, 2 or 4. @@ -160,6 +218,8 @@ static int pa_pxp_write_config(struct pci_bus *bus, unsigned int devfn, addr = pa_pxp_cfg_addr(hose, bus->number, devfn, offset); + sb600_set_flag(bus->number); + /* * Note: the caller has already checked that offset is * suitably aligned and that len is 1, 2 or 4. @@ -210,6 +270,12 @@ static int __init pas_add_bridge(struct device_node *dev) /* Interpret the "ranges" property */ pci_process_bridge_OF_ranges(hose, dev, 1); + /* + * Scan for an isa bridge. This is needed to find the SB600 on the nemo + * and does nothing on machines without one. + */ + isa_bridge_find_early(hose); + return 0; } diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c index 9a6eb04cca83..c0532999f854 100644 --- a/arch/powerpc/platforms/pasemi/setup.c +++ b/arch/powerpc/platforms/pasemi/setup.c @@ -34,6 +34,7 @@ #include <asm/prom.h> #include <asm/iommu.h> #include <asm/machdep.h> +#include <asm/i8259.h> #include <asm/mpic.h> #include <asm/smp.h> #include <asm/time.h> @@ -72,6 +73,40 @@ static void __noreturn pas_restart(char *cmd) out_le32(reset_reg, 0x6000000); } +#ifdef CONFIG_PPC_PASEMI_NEMO +void pas_shutdown(void) +{ + /* Set the PLD bit that makes the SB600 think the power button is being pressed */ + void __iomem *pld_map = ioremap(0xf5000000,4096); + while (1) + out_8(pld_map+7,0x01); +} + +/* RTC platform device structure as is not in device tree */ +static struct resource rtc_resource[] = {{ + .name = "rtc", + .start = 0x70, + .end = 0x71, + .flags = IORESOURCE_IO, +}, { + .name = "rtc", + .start = 8, + .end = 8, + .flags = IORESOURCE_IRQ, +}}; + +static inline void nemo_init_rtc(void) +{ + platform_device_register_simple("rtc_cmos", -1, rtc_resource, 2); +} + +#else + +static inline void nemo_init_rtc(void) +{ +} +#endif + #ifdef CONFIG_SMP static arch_spinlock_t timebase_lock; static unsigned long timebase; @@ -183,6 +218,42 @@ static int __init pas_setup_mce_regs(void) } machine_device_initcall(pasemi, pas_setup_mce_regs); +#ifdef CONFIG_PPC_PASEMI_NEMO +static void sb600_8259_cascade(struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + unsigned int cascade_irq = i8259_irq(); + + if (cascade_irq) + generic_handle_irq(cascade_irq); + + chip->irq_eoi(&desc->irq_data); +} + +static void nemo_init_IRQ(struct mpic *mpic) +{ + struct device_node *np; + int gpio_virq; + /* Connect the SB600's legacy i8259 controller */ + np = of_find_node_by_path("/pxp@0,e0000000"); + i8259_init(np, 0); + of_node_put(np); + + gpio_virq = irq_create_mapping(NULL, 3); + irq_set_irq_type(gpio_virq, IRQ_TYPE_LEVEL_HIGH); + irq_set_chained_handler(gpio_virq, sb600_8259_cascade); + mpic_unmask_irq(irq_get_irq_data(gpio_virq)); + + irq_set_default_host(mpic->irqhost); +} + +#else + +static inline void nemo_init_IRQ(struct mpic *mpic) +{ +} +#endif + static __init void pas_init_IRQ(void) { struct device_node *np; @@ -243,6 +314,8 @@ static __init void pas_init_IRQ(void) mpic_unmask_irq(irq_get_irq_data(nmi_virq)); } + nemo_init_IRQ(mpic); + of_node_put(mpic_node); of_node_put(root); } @@ -404,6 +477,8 @@ static int __init pasemi_publish_devices(void) /* Publish OF platform devices for SDC and other non-PCI devices */ of_platform_bus_probe(NULL, pasemi_bus_ids, NULL); + nemo_init_rtc(); + return 0; } machine_device_initcall(pasemi, pasemi_publish_devices); @@ -418,6 +493,17 @@ static int __init pas_probe(void) !of_machine_is_compatible("pasemi,pwrficient")) return 0; +#ifdef CONFIG_PPC_PASEMI_NEMO + /* + * Check for the Nemo motherboard here, if we are running on one + * change the machine definition to fit + */ + if (of_machine_is_compatible("pasemi,nemo")) { + pm_power_off = pas_shutdown; + ppc_md.name = "A-EON Amigaone X1000"; + } +#endif + iommu_init_early_pasemi(); return 1; diff --git a/arch/powerpc/platforms/powermac/cache.S b/arch/powerpc/platforms/powermac/cache.S index 27862feee4a5..f0641b6e6075 100644 --- a/arch/powerpc/platforms/powermac/cache.S +++ b/arch/powerpc/platforms/powermac/cache.S @@ -28,7 +28,7 @@ */ _GLOBAL(flush_disable_caches) -#ifndef CONFIG_6xx +#ifndef CONFIG_PPC_BOOK3S_32 blr #else BEGIN_FTR_SECTION @@ -356,4 +356,4 @@ END_FTR_SECTION_IFSET(CPU_FTR_L3CR) mtmsr r11 /* restore DR and EE */ isync blr -#endif /* CONFIG_6xx */ +#endif /* CONFIG_PPC_BOOK3S_32 */ diff --git a/arch/powerpc/platforms/powermac/feature.c b/arch/powerpc/platforms/powermac/feature.c index ed2f54b3f173..c3e5ee8b5175 100644 --- a/arch/powerpc/platforms/powermac/feature.c +++ b/arch/powerpc/platforms/powermac/feature.c @@ -51,7 +51,7 @@ #define DBG(fmt...) #endif -#ifdef CONFIG_6xx +#ifdef CONFIG_PPC_BOOK3S_32 extern int powersave_lowspeed; #endif @@ -173,9 +173,9 @@ static long ohare_htw_scc_enable(struct device_node *node, long param, macio = macio_find(node, 0); if (!macio) return -ENODEV; - if (!strcmp(node->name, "ch-a")) + if (of_node_name_eq(node, "ch-a")) chan_mask = MACIO_FLAG_SCCA_ON; - else if (!strcmp(node->name, "ch-b")) + else if (of_node_name_eq(node, "ch-b")) chan_mask = MACIO_FLAG_SCCB_ON; else return -ENODEV; @@ -610,9 +610,9 @@ static long core99_scc_enable(struct device_node *node, long param, long value) macio = macio_find(node, 0); if (!macio) return -ENODEV; - if (!strcmp(node->name, "ch-a")) + if (of_node_name_eq(node, "ch-a")) chan_mask = MACIO_FLAG_SCCA_ON; - else if (!strcmp(node->name, "ch-b")) + else if (of_node_name_eq(node, "ch-b")) chan_mask = MACIO_FLAG_SCCB_ON; else return -ENODEV; @@ -1392,8 +1392,7 @@ static long g5_mpic_enable(struct device_node *node, long param, long value) if (parent == NULL) return 0; - is_u3 = strcmp(parent->name, "u3") == 0 || - strcmp(parent->name, "u4") == 0; + is_u3 = of_node_name_eq(parent, "u3") || of_node_name_eq(parent, "u4"); of_node_put(parent); if (!is_u3) return 0; @@ -1471,6 +1470,7 @@ static long g5_i2s_enable(struct device_node *node, long param, long value) case 2: if (macio->type == macio_shasta) break; + /* fall through */ default: return -ENODEV; } diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c index d4d411820597..4de058a20d2b 100644 --- a/arch/powerpc/platforms/powermac/low_i2c.c +++ b/arch/powerpc/platforms/powermac/low_i2c.c @@ -617,7 +617,7 @@ static void __init kw_i2c_probe(void) * but not for now */ child = of_get_next_child(np, NULL); - multibus = !child || strcmp(child->name, "i2c-bus"); + multibus = !of_node_name_eq(child, "i2c-bus"); of_node_put(child); /* For a multibus setup, we get the bus count based on the @@ -917,10 +917,9 @@ static void __init smu_i2c_probe(void) * type as older device trees mix i2c busses and other things * at the same level */ - for (busnode = NULL; - (busnode = of_get_next_child(controller, busnode)) != NULL;) { - if (strcmp(busnode->type, "i2c") && - strcmp(busnode->type, "i2c-bus")) + for_each_child_of_node(controller, busnode) { + if (!of_node_is_type(busnode, "i2c") && + !of_node_is_type(busnode, "i2c-bus")) continue; reg = of_get_property(busnode, "reg", NULL); if (reg == NULL) @@ -1206,7 +1205,7 @@ static void pmac_i2c_devscan(void (*callback)(struct device_node *dev, if (bus != pmac_i2c_find_bus(np)) continue; for (p = whitelist; p->name != NULL; p++) { - if (strcmp(np->name, p->name)) + if (!of_node_name_eq(np, p->name)) continue; if (p->compatible && !of_device_is_compatible(np, p->compatible)) diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c index 04527d13d5a4..3d7420503c37 100644 --- a/arch/powerpc/platforms/powermac/pci.c +++ b/arch/powerpc/platforms/powermac/pci.c @@ -501,9 +501,7 @@ static void __init init_p2pbridge(void) /* XXX it would be better here to identify the specific PCI-PCI bridge chip we have. */ p2pbridge = of_find_node_by_name(NULL, "pci-bridge"); - if (p2pbridge == NULL - || p2pbridge->parent == NULL - || strcmp(p2pbridge->parent->name, "pci") != 0) + if (p2pbridge == NULL || !of_node_name_eq(p2pbridge->parent, "pci")) goto done; if (pci_device_from_OF_node(p2pbridge, &bus, &devfn) < 0) { DBG("Can't find PCI infos for PCI<->PCI bridge\n"); @@ -828,14 +826,14 @@ static int __init pmac_add_bridge(struct device_node *dev) if (of_device_is_compatible(dev, "uni-north")) { primary = setup_uninorth(hose, &rsrc); disp_name = "UniNorth"; - } else if (strcmp(dev->name, "pci") == 0) { + } else if (of_node_name_eq(dev, "pci")) { /* XXX assume this is a mpc106 (grackle) */ setup_grackle(hose); disp_name = "Grackle (MPC106)"; - } else if (strcmp(dev->name, "bandit") == 0) { + } else if (of_node_name_eq(dev, "bandit")) { setup_bandit(hose, &rsrc); disp_name = "Bandit"; - } else if (strcmp(dev->name, "chaos") == 0) { + } else if (of_node_name_eq(dev, "chaos")) { setup_chaos(hose, &rsrc); disp_name = "Chaos"; primary = 0; @@ -914,16 +912,14 @@ void __init pmac_pci_init(void) "of device tree\n"); return; } - for (np = NULL; (np = of_get_next_child(root, np)) != NULL;) { - if (np->name == NULL) - continue; - if (strcmp(np->name, "bandit") == 0 - || strcmp(np->name, "chaos") == 0 - || strcmp(np->name, "pci") == 0) { + for_each_child_of_node(root, np) { + if (of_node_name_eq(np, "bandit") + || of_node_name_eq(np, "chaos") + || of_node_name_eq(np, "pci")) { if (pmac_add_bridge(np) == 0) of_node_get(np); } - if (strcmp(np->name, "ht") == 0) { + if (of_node_name_eq(np, "ht")) { of_node_get(np); ht = np; } @@ -983,7 +979,7 @@ static bool pmac_pci_enable_device_hook(struct pci_dev *dev) /* Firewire & GMAC were disabled after PCI probe, the driver is * claiming them, we must re-enable them now. */ - if (uninorth_child && !strcmp(node->name, "firewire") && + if (uninorth_child && of_node_name_eq(node, "firewire") && (of_device_is_compatible(node, "pci106b,18") || of_device_is_compatible(node, "pci106b,30") || of_device_is_compatible(node, "pci11c1,5811"))) { @@ -991,7 +987,7 @@ static bool pmac_pci_enable_device_hook(struct pci_dev *dev) pmac_call_feature(PMAC_FTR_1394_ENABLE, node, 0, 1); updatecfg = 1; } - if (uninorth_child && !strcmp(node->name, "ethernet") && + if (uninorth_child && of_node_name_eq(node, "ethernet") && of_device_is_compatible(node, "gmac")) { pmac_call_feature(PMAC_FTR_GMAC_ENABLE, node, 0, 1); updatecfg = 1; @@ -1262,4 +1258,3 @@ struct pci_controller_ops pmac_pci_controller_ops = { .enable_device_hook = pmac_pci_enable_device_hook, #endif }; - diff --git a/arch/powerpc/platforms/powermac/pfunc_base.c b/arch/powerpc/platforms/powermac/pfunc_base.c index fd2e210559c8..62311e84a423 100644 --- a/arch/powerpc/platforms/powermac/pfunc_base.c +++ b/arch/powerpc/platforms/powermac/pfunc_base.c @@ -101,9 +101,8 @@ static void macio_gpio_init_one(struct macio_chip *macio) * Find the "gpio" parent node */ - for (gparent = NULL; - (gparent = of_get_next_child(macio->of_node, gparent)) != NULL;) - if (strcmp(gparent->name, "gpio") == 0) + for_each_child_of_node(macio->of_node, gparent) + if (of_node_name_eq(gparent, "gpio")) break; if (gparent == NULL) return; @@ -313,7 +312,7 @@ static void uninorth_install_pfunc(void) * Install handlers for the hwclock child if any */ for (np = NULL; (np = of_get_next_child(uninorth_node, np)) != NULL;) - if (strcmp(np->name, "hw-clock") == 0) { + if (of_node_name_eq(np, "hw-clock")) { unin_hwclock = np; break; } diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c index 57bbff465964..c292ffac2ed4 100644 --- a/arch/powerpc/platforms/powermac/pic.c +++ b/arch/powerpc/platforms/powermac/pic.c @@ -417,7 +417,7 @@ int of_irq_parse_oldworld(struct device_node *device, int index, if (ints != NULL) break; device = device->parent; - if (device && strcmp(device->type, "pci") != 0) + if (!of_node_is_type(device, "pci")) break; } if (ints == NULL) @@ -553,13 +553,13 @@ void __init pmac_pic_init(void) for_each_node_with_property(np, "interrupt-controller") { /* Skip /chosen/interrupt-controller */ - if (strcmp(np->name, "chosen") == 0) + if (of_node_name_eq(np, "chosen")) continue; /* It seems like at least one person wants * to use BootX on a machine with an AppleKiwi * controller which happens to pretend to be an * interrupt controller too. */ - if (strcmp(np->name, "AppleKiwi") == 0) + if (of_node_name_eq(np, "AppleKiwi")) continue; /* I think we found one ! */ of_irq_dflt_pic = np; diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c index 2f00e3daafb0..2e8221e20ee8 100644 --- a/arch/powerpc/platforms/powermac/setup.c +++ b/arch/powerpc/platforms/powermac/setup.c @@ -560,15 +560,9 @@ static int __init check_pmac_serial_console(void) } pr_debug("stdout is %pOF\n", prom_stdout); - name = of_get_property(prom_stdout, "name", NULL); - if (!name) { - pr_debug(" stdout package has no name !\n"); - goto not_found; - } - - if (strcmp(name, "ch-a") == 0) + if (of_node_name_eq(prom_stdout, "ch-a")) offset = 0; - else if (strcmp(name, "ch-b") == 0) + else if (of_node_name_eq(prom_stdout, "ch-b")) offset = 1; else goto not_found; diff --git a/arch/powerpc/platforms/powermac/sleep.S b/arch/powerpc/platforms/powermac/sleep.S index f89808b9713d..fb64b09cad9d 100644 --- a/arch/powerpc/platforms/powermac/sleep.S +++ b/arch/powerpc/platforms/powermac/sleep.S @@ -56,7 +56,7 @@ * vector that will be called by the ROM on wakeup */ _GLOBAL(low_sleep_handler) -#ifndef CONFIG_6xx +#ifndef CONFIG_PPC_BOOK3S_32 blr #else mflr r0 @@ -394,5 +394,5 @@ sleep_storage: .long 0 .balign L1_CACHE_BYTES, 0 -#endif /* CONFIG_6xx */ +#endif /* CONFIG_PPC_BOOK3S_32 */ .section .text diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index 447da6db450a..35be6e0b886d 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c @@ -832,8 +832,7 @@ static int smp_core99_kick_cpu(int nr) mdelay(1); /* Restore our exception vector */ - *vector = save_vector; - flush_icache_range((unsigned long) vector, (unsigned long) vector + 4); + patch_instruction(vector, save_vector); local_irq_restore(flags); if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347); diff --git a/arch/powerpc/platforms/powermac/udbg_adb.c b/arch/powerpc/platforms/powermac/udbg_adb.c index 64f38f0d15ed..12158bb4fed7 100644 --- a/arch/powerpc/platforms/powermac/udbg_adb.c +++ b/arch/powerpc/platforms/powermac/udbg_adb.c @@ -194,7 +194,7 @@ int __init udbg_adb_init(int force_btext) */ for_each_node_by_name(np, "keyboard") { struct device_node *parent = of_get_parent(np); - int found = (parent && strcmp(parent->type, "adb") == 0); + int found = of_node_is_type(parent, "adb"); of_node_put(parent); if (found) break; diff --git a/arch/powerpc/platforms/powermac/udbg_scc.c b/arch/powerpc/platforms/powermac/udbg_scc.c index 8901973ed683..415b74d7c253 100644 --- a/arch/powerpc/platforms/powermac/udbg_scc.c +++ b/arch/powerpc/platforms/powermac/udbg_scc.c @@ -87,7 +87,7 @@ void udbg_scc_init(int force_scc) for (ch = NULL; (ch = of_get_next_child(escc, ch)) != NULL;) { if (ch == stdout) ch_def = of_node_get(ch); - if (strcmp(ch->name, "ch-a") == 0) + if (of_node_name_eq(ch, "ch-a")) ch_a = of_node_get(ch); } if (ch_def == NULL && !force_scc) diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c index abc0be7507c8..f38078976c5d 100644 --- a/arch/powerpc/platforms/powernv/eeh-powernv.c +++ b/arch/powerpc/platforms/powernv/eeh-powernv.c @@ -564,8 +564,8 @@ static void pnv_eeh_get_phb_diag(struct eeh_pe *pe) static int pnv_eeh_get_phb_state(struct eeh_pe *pe) { struct pnv_phb *phb = pe->phb->private_data; - u8 fstate; - __be16 pcierr; + u8 fstate = 0; + __be16 pcierr = 0; s64 rc; int result = 0; @@ -603,8 +603,8 @@ static int pnv_eeh_get_phb_state(struct eeh_pe *pe) static int pnv_eeh_get_pe_state(struct eeh_pe *pe) { struct pnv_phb *phb = pe->phb->private_data; - u8 fstate; - __be16 pcierr; + u8 fstate = 0; + __be16 pcierr = 0; s64 rc; int result; diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c index 75b935252981..d7f742ed48ba 100644 --- a/arch/powerpc/platforms/powernv/npu-dma.c +++ b/arch/powerpc/platforms/powernv/npu-dma.c @@ -9,32 +9,19 @@ * License as published by the Free Software Foundation. */ -#include <linux/slab.h> #include <linux/mmu_notifier.h> #include <linux/mmu_context.h> #include <linux/of.h> -#include <linux/export.h> #include <linux/pci.h> #include <linux/memblock.h> -#include <linux/iommu.h> #include <linux/sizes.h> #include <asm/debugfs.h> -#include <asm/tlb.h> #include <asm/powernv.h> -#include <asm/reg.h> -#include <asm/opal.h> -#include <asm/io.h> -#include <asm/iommu.h> -#include <asm/pnv-pci.h> -#include <asm/msi_bitmap.h> #include <asm/opal.h> -#include "powernv.h" #include "pci.h" -#define npu_to_phb(x) container_of(x, struct pnv_phb, npu) - /* * spinlock to protect initialisation of an npu_context for a particular * mm_struct. @@ -133,15 +120,25 @@ static struct pnv_ioda_pe *get_gpu_pci_dev_and_pe(struct pnv_ioda_pe *npe, return pe; } -long pnv_npu_set_window(struct pnv_ioda_pe *npe, int num, +static long pnv_npu_unset_window(struct iommu_table_group *table_group, + int num); + +static long pnv_npu_set_window(struct iommu_table_group *table_group, int num, struct iommu_table *tbl) { + struct pnv_ioda_pe *npe = container_of(table_group, struct pnv_ioda_pe, + table_group); struct pnv_phb *phb = npe->phb; int64_t rc; const unsigned long size = tbl->it_indirect_levels ? tbl->it_level_size : tbl->it_size; const __u64 start_addr = tbl->it_offset << tbl->it_page_shift; const __u64 win_size = tbl->it_size << tbl->it_page_shift; + int num2 = (num == 0) ? 1 : 0; + + /* NPU has just one TVE so if there is another table, remove it first */ + if (npe->table_group.tables[num2]) + pnv_npu_unset_window(&npe->table_group, num2); pe_info(npe, "Setting up window %llx..%llx pg=%lx\n", start_addr, start_addr + win_size - 1, @@ -167,11 +164,16 @@ long pnv_npu_set_window(struct pnv_ioda_pe *npe, int num, return 0; } -long pnv_npu_unset_window(struct pnv_ioda_pe *npe, int num) +static long pnv_npu_unset_window(struct iommu_table_group *table_group, int num) { + struct pnv_ioda_pe *npe = container_of(table_group, struct pnv_ioda_pe, + table_group); struct pnv_phb *phb = npe->phb; int64_t rc; + if (!npe->table_group.tables[num]) + return 0; + pe_info(npe, "Removing DMA window\n"); rc = opal_pci_map_pe_dma_window(phb->opal_id, npe->pe_number, @@ -210,7 +212,8 @@ static void pnv_npu_dma_set_32(struct pnv_ioda_pe *npe) if (!gpe) return; - rc = pnv_npu_set_window(npe, 0, gpe->table_group.tables[0]); + rc = pnv_npu_set_window(&npe->table_group, 0, + gpe->table_group.tables[0]); /* * NVLink devices use the same TCE table configuration as @@ -235,7 +238,7 @@ static int pnv_npu_dma_set_bypass(struct pnv_ioda_pe *npe) if (phb->type != PNV_PHB_NPU_NVLINK || !npe->pdev) return -EINVAL; - rc = pnv_npu_unset_window(npe, 0); + rc = pnv_npu_unset_window(&npe->table_group, 0); if (rc != OPAL_SUCCESS) return rc; @@ -288,11 +291,15 @@ void pnv_npu_try_dma_set_bypass(struct pci_dev *gpdev, bool bypass) } } +#ifdef CONFIG_IOMMU_API /* Switch ownership from platform code to external user (e.g. VFIO) */ -void pnv_npu_take_ownership(struct pnv_ioda_pe *npe) +static void pnv_npu_take_ownership(struct iommu_table_group *table_group) { + struct pnv_ioda_pe *npe = container_of(table_group, struct pnv_ioda_pe, + table_group); struct pnv_phb *phb = npe->phb; int64_t rc; + struct pci_dev *gpdev = NULL; /* * Note: NPU has just a single TVE in the hardware which means that @@ -301,7 +308,7 @@ void pnv_npu_take_ownership(struct pnv_ioda_pe *npe) * if it was enabled at the moment of ownership change. */ if (npe->table_group.tables[0]) { - pnv_npu_unset_window(npe, 0); + pnv_npu_unset_window(&npe->table_group, 0); return; } @@ -314,30 +321,315 @@ void pnv_npu_take_ownership(struct pnv_ioda_pe *npe) return; } pnv_pci_ioda2_tce_invalidate_entire(npe->phb, false); + + get_gpu_pci_dev_and_pe(npe, &gpdev); + if (gpdev) + pnv_npu2_unmap_lpar_dev(gpdev); } -struct pnv_ioda_pe *pnv_pci_npu_setup_iommu(struct pnv_ioda_pe *npe) +static void pnv_npu_release_ownership(struct iommu_table_group *table_group) { - struct pnv_phb *phb = npe->phb; - struct pci_bus *pbus = phb->hose->bus; - struct pci_dev *npdev, *gpdev = NULL, *gptmp; - struct pnv_ioda_pe *gpe = get_gpu_pci_dev_and_pe(npe, &gpdev); + struct pnv_ioda_pe *npe = container_of(table_group, struct pnv_ioda_pe, + table_group); + struct pci_dev *gpdev = NULL; + + get_gpu_pci_dev_and_pe(npe, &gpdev); + if (gpdev) + pnv_npu2_map_lpar_dev(gpdev, 0, MSR_DR | MSR_PR | MSR_HV); +} + +static struct iommu_table_group_ops pnv_pci_npu_ops = { + .set_window = pnv_npu_set_window, + .unset_window = pnv_npu_unset_window, + .take_ownership = pnv_npu_take_ownership, + .release_ownership = pnv_npu_release_ownership, +}; +#endif /* !CONFIG_IOMMU_API */ + +/* + * NPU2 ATS + */ +/* Maximum possible number of ATSD MMIO registers per NPU */ +#define NV_NMMU_ATSD_REGS 8 +#define NV_NPU_MAX_PE_NUM 16 + +/* + * A compound NPU IOMMU group which might consist of 1 GPU + 2xNPUs (POWER8) or + * up to 3 x (GPU + 2xNPUs) (POWER9). + */ +struct npu_comp { + struct iommu_table_group table_group; + int pe_num; + struct pnv_ioda_pe *pe[NV_NPU_MAX_PE_NUM]; +}; + +/* An NPU descriptor, valid for POWER9 only */ +struct npu { + int index; + __be64 *mmio_atsd_regs[NV_NMMU_ATSD_REGS]; + unsigned int mmio_atsd_count; + + /* Bitmask for MMIO register usage */ + unsigned long mmio_atsd_usage; + + /* Do we need to explicitly flush the nest mmu? */ + bool nmmu_flush; + + struct npu_comp npucomp; +}; + +#ifdef CONFIG_IOMMU_API +static long pnv_npu_peers_create_table_userspace( + struct iommu_table_group *table_group, + int num, __u32 page_shift, __u64 window_size, __u32 levels, + struct iommu_table **ptbl) +{ + struct npu_comp *npucomp = container_of(table_group, struct npu_comp, + table_group); + + if (!npucomp->pe_num || !npucomp->pe[0] || + !npucomp->pe[0]->table_group.ops || + !npucomp->pe[0]->table_group.ops->create_table) + return -EFAULT; + + return npucomp->pe[0]->table_group.ops->create_table( + &npucomp->pe[0]->table_group, num, page_shift, + window_size, levels, ptbl); +} + +static long pnv_npu_peers_set_window(struct iommu_table_group *table_group, + int num, struct iommu_table *tbl) +{ + int i, j; + long ret = 0; + struct npu_comp *npucomp = container_of(table_group, struct npu_comp, + table_group); + + for (i = 0; i < npucomp->pe_num; ++i) { + struct pnv_ioda_pe *pe = npucomp->pe[i]; + + if (!pe->table_group.ops->set_window) + continue; + + ret = pe->table_group.ops->set_window(&pe->table_group, + num, tbl); + if (ret) + break; + } + + if (ret) { + for (j = 0; j < i; ++j) { + struct pnv_ioda_pe *pe = npucomp->pe[j]; + + if (!pe->table_group.ops->unset_window) + continue; + + ret = pe->table_group.ops->unset_window( + &pe->table_group, num); + if (ret) + break; + } + } else { + table_group->tables[num] = iommu_tce_table_get(tbl); + } + + return ret; +} - if (!gpe || !gpdev) +static long pnv_npu_peers_unset_window(struct iommu_table_group *table_group, + int num) +{ + int i, j; + long ret = 0; + struct npu_comp *npucomp = container_of(table_group, struct npu_comp, + table_group); + + for (i = 0; i < npucomp->pe_num; ++i) { + struct pnv_ioda_pe *pe = npucomp->pe[i]; + + WARN_ON(npucomp->table_group.tables[num] != + table_group->tables[num]); + if (!npucomp->table_group.tables[num]) + continue; + + if (!pe->table_group.ops->unset_window) + continue; + + ret = pe->table_group.ops->unset_window(&pe->table_group, num); + if (ret) + break; + } + + if (ret) { + for (j = 0; j < i; ++j) { + struct pnv_ioda_pe *pe = npucomp->pe[j]; + + if (!npucomp->table_group.tables[num]) + continue; + + if (!pe->table_group.ops->set_window) + continue; + + ret = pe->table_group.ops->set_window(&pe->table_group, + num, table_group->tables[num]); + if (ret) + break; + } + } else if (table_group->tables[num]) { + iommu_tce_table_put(table_group->tables[num]); + table_group->tables[num] = NULL; + } + + return ret; +} + +static void pnv_npu_peers_take_ownership(struct iommu_table_group *table_group) +{ + int i; + struct npu_comp *npucomp = container_of(table_group, struct npu_comp, + table_group); + + for (i = 0; i < npucomp->pe_num; ++i) { + struct pnv_ioda_pe *pe = npucomp->pe[i]; + + if (!pe->table_group.ops->take_ownership) + continue; + pe->table_group.ops->take_ownership(&pe->table_group); + } +} + +static void pnv_npu_peers_release_ownership( + struct iommu_table_group *table_group) +{ + int i; + struct npu_comp *npucomp = container_of(table_group, struct npu_comp, + table_group); + + for (i = 0; i < npucomp->pe_num; ++i) { + struct pnv_ioda_pe *pe = npucomp->pe[i]; + + if (!pe->table_group.ops->release_ownership) + continue; + pe->table_group.ops->release_ownership(&pe->table_group); + } +} + +static struct iommu_table_group_ops pnv_npu_peers_ops = { + .get_table_size = pnv_pci_ioda2_get_table_size, + .create_table = pnv_npu_peers_create_table_userspace, + .set_window = pnv_npu_peers_set_window, + .unset_window = pnv_npu_peers_unset_window, + .take_ownership = pnv_npu_peers_take_ownership, + .release_ownership = pnv_npu_peers_release_ownership, +}; + +static void pnv_comp_attach_table_group(struct npu_comp *npucomp, + struct pnv_ioda_pe *pe) +{ + if (WARN_ON(npucomp->pe_num == NV_NPU_MAX_PE_NUM)) + return; + + npucomp->pe[npucomp->pe_num] = pe; + ++npucomp->pe_num; +} + +struct iommu_table_group *pnv_try_setup_npu_table_group(struct pnv_ioda_pe *pe) +{ + struct iommu_table_group *table_group; + struct npu_comp *npucomp; + struct pci_dev *gpdev = NULL; + struct pci_controller *hose; + struct pci_dev *npdev = NULL; + + list_for_each_entry(gpdev, &pe->pbus->devices, bus_list) { + npdev = pnv_pci_get_npu_dev(gpdev, 0); + if (npdev) + break; + } + + if (!npdev) + /* It is not an NPU attached device, skip */ + return NULL; + + hose = pci_bus_to_host(npdev->bus); + + if (hose->npu) { + table_group = &hose->npu->npucomp.table_group; + + if (!table_group->group) { + table_group->ops = &pnv_npu_peers_ops; + iommu_register_group(table_group, + hose->global_number, + pe->pe_number); + } + } else { + /* Create a group for 1 GPU and attached NPUs for POWER8 */ + pe->npucomp = kzalloc(sizeof(pe->npucomp), GFP_KERNEL); + table_group = &pe->npucomp->table_group; + table_group->ops = &pnv_npu_peers_ops; + iommu_register_group(table_group, hose->global_number, + pe->pe_number); + } + + /* Steal capabilities from a GPU PE */ + table_group->max_dynamic_windows_supported = + pe->table_group.max_dynamic_windows_supported; + table_group->tce32_start = pe->table_group.tce32_start; + table_group->tce32_size = pe->table_group.tce32_size; + table_group->max_levels = pe->table_group.max_levels; + if (!table_group->pgsizes) + table_group->pgsizes = pe->table_group.pgsizes; + + npucomp = container_of(table_group, struct npu_comp, table_group); + pnv_comp_attach_table_group(npucomp, pe); + + return table_group; +} + +struct iommu_table_group *pnv_npu_compound_attach(struct pnv_ioda_pe *pe) +{ + struct iommu_table_group *table_group; + struct npu_comp *npucomp; + struct pci_dev *gpdev = NULL; + struct pci_dev *npdev; + struct pnv_ioda_pe *gpe = get_gpu_pci_dev_and_pe(pe, &gpdev); + + WARN_ON(!(pe->flags & PNV_IODA_PE_DEV)); + if (!gpe) return NULL; - list_for_each_entry(npdev, &pbus->devices, bus_list) { - gptmp = pnv_pci_get_gpu_dev(npdev); + /* + * IODA2 bridges get this set up from pci_controller_ops::setup_bridge + * but NPU bridges do not have this hook defined so we do it here. + * We do not setup other table group parameters as they won't be used + * anyway - NVLink bridges are subordinate PEs. + */ + pe->table_group.ops = &pnv_pci_npu_ops; + + table_group = iommu_group_get_iommudata( + iommu_group_get(&gpdev->dev)); + + /* + * On P9 NPU PHB and PCI PHB support different page sizes, + * keep only matching. We expect here that NVLink bridge PE pgsizes is + * initialized by the caller. + */ + table_group->pgsizes &= pe->table_group.pgsizes; + npucomp = container_of(table_group, struct npu_comp, table_group); + pnv_comp_attach_table_group(npucomp, pe); + + list_for_each_entry(npdev, &pe->phb->hose->bus->devices, bus_list) { + struct pci_dev *gpdevtmp = pnv_pci_get_gpu_dev(npdev); - if (gptmp != gpdev) + if (gpdevtmp != gpdev) continue; - pe_info(gpe, "Attached NPU %s\n", dev_name(&npdev->dev)); - iommu_group_add_device(gpe->table_group.group, &npdev->dev); + iommu_add_device(table_group, &npdev->dev); } - return gpe; + return table_group; } +#endif /* CONFIG_IOMMU_API */ /* Maximum number of nvlinks per npu */ #define NV_MAX_LINKS 6 @@ -490,7 +782,6 @@ static void acquire_atsd_reg(struct npu_context *npu_context, int i, j; struct npu *npu; struct pci_dev *npdev; - struct pnv_phb *nphb; for (i = 0; i <= max_npu2_index; i++) { mmio_atsd_reg[i].reg = -1; @@ -505,8 +796,10 @@ static void acquire_atsd_reg(struct npu_context *npu_context, if (!npdev) continue; - nphb = pci_bus_to_host(npdev->bus)->private_data; - npu = &nphb->npu; + npu = pci_bus_to_host(npdev->bus)->npu; + if (!npu) + continue; + mmio_atsd_reg[i].npu = npu; mmio_atsd_reg[i].reg = get_mmio_atsd_reg(npu); while (mmio_atsd_reg[i].reg < 0) { @@ -671,9 +964,9 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev, u32 nvlink_index; struct device_node *nvlink_dn; struct mm_struct *mm = current->mm; - struct pnv_phb *nphb; struct npu *npu; struct npu_context *npu_context; + struct pci_controller *hose; /* * At present we don't support GPUs connected to multiple NPUs and I'm @@ -681,13 +974,14 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev, */ struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0); - if (!firmware_has_feature(FW_FEATURE_OPAL)) - return ERR_PTR(-ENODEV); - if (!npdev) /* No nvlink associated with this GPU device */ return ERR_PTR(-ENODEV); + /* We only support DR/PR/HV in pnv_npu2_map_lpar_dev() */ + if (flags & ~(MSR_DR | MSR_PR | MSR_HV)) + return ERR_PTR(-EINVAL); + nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0); if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index", &nvlink_index))) @@ -701,20 +995,10 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev, return ERR_PTR(-EINVAL); } - nphb = pci_bus_to_host(npdev->bus)->private_data; - npu = &nphb->npu; - - /* - * Setup the NPU context table for a particular GPU. These need to be - * per-GPU as we need the tables to filter ATSDs when there are no - * active contexts on a particular GPU. It is safe for these to be - * called concurrently with destroy as the OPAL call takes appropriate - * locks and refcounts on init/destroy. - */ - rc = opal_npu_init_context(nphb->opal_id, mm->context.id, flags, - PCI_DEVID(gpdev->bus->number, gpdev->devfn)); - if (rc < 0) - return ERR_PTR(-ENOSPC); + hose = pci_bus_to_host(npdev->bus); + npu = hose->npu; + if (!npu) + return ERR_PTR(-ENODEV); /* * We store the npu pci device so we can more easily get at the @@ -726,9 +1010,6 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev, if (npu_context->release_cb != cb || npu_context->priv != priv) { spin_unlock(&npu_context_lock); - opal_npu_destroy_context(nphb->opal_id, mm->context.id, - PCI_DEVID(gpdev->bus->number, - gpdev->devfn)); return ERR_PTR(-EINVAL); } @@ -754,9 +1035,6 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev, if (rc) { kfree(npu_context); - opal_npu_destroy_context(nphb->opal_id, mm->context.id, - PCI_DEVID(gpdev->bus->number, - gpdev->devfn)); return ERR_PTR(rc); } @@ -776,7 +1054,7 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev, */ WRITE_ONCE(npu_context->npdev[npu->index][nvlink_index], npdev); - if (!nphb->npu.nmmu_flush) { + if (!npu->nmmu_flush) { /* * If we're not explicitly flushing ourselves we need to mark * the thread for global flushes @@ -809,27 +1087,24 @@ void pnv_npu2_destroy_context(struct npu_context *npu_context, struct pci_dev *gpdev) { int removed; - struct pnv_phb *nphb; struct npu *npu; struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0); struct device_node *nvlink_dn; u32 nvlink_index; + struct pci_controller *hose; if (WARN_ON(!npdev)) return; - if (!firmware_has_feature(FW_FEATURE_OPAL)) + hose = pci_bus_to_host(npdev->bus); + npu = hose->npu; + if (!npu) return; - - nphb = pci_bus_to_host(npdev->bus)->private_data; - npu = &nphb->npu; nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0); if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index", &nvlink_index))) return; WRITE_ONCE(npu_context->npdev[npu->index][nvlink_index], NULL); - opal_npu_destroy_context(nphb->opal_id, npu_context->mm->context.id, - PCI_DEVID(gpdev->bus->number, gpdev->devfn)); spin_lock(&npu_context_lock); removed = kref_put(&npu_context->kref, pnv_npu2_release_context); spin_unlock(&npu_context_lock); @@ -857,13 +1132,12 @@ int pnv_npu2_handle_fault(struct npu_context *context, uintptr_t *ea, u64 rc = 0, result = 0; int i, is_write; struct page *page[1]; + const char __user *u; + char c; /* mmap_sem should be held so the struct_mm must be present */ struct mm_struct *mm = context->mm; - if (!firmware_has_feature(FW_FEATURE_OPAL)) - return -ENODEV; - WARN_ON(!rwsem_is_locked(&mm->mmap_sem)); for (i = 0; i < count; i++) { @@ -872,18 +1146,17 @@ int pnv_npu2_handle_fault(struct npu_context *context, uintptr_t *ea, is_write ? FOLL_WRITE : 0, page, NULL, NULL); - /* - * To support virtualised environments we will have to do an - * access to the page to ensure it gets faulted into the - * hypervisor. For the moment virtualisation is not supported in - * other areas so leave the access out. - */ if (rc != 1) { status[i] = rc; result = -EFAULT; continue; } + /* Make sure partition scoped tree gets a pte */ + u = page_address(page[0]); + if (__get_user(c, u)) + result = -EFAULT; + status[i] = 0; put_page(page[0]); } @@ -892,42 +1165,127 @@ int pnv_npu2_handle_fault(struct npu_context *context, uintptr_t *ea, } EXPORT_SYMBOL(pnv_npu2_handle_fault); -int pnv_npu2_init(struct pnv_phb *phb) +int pnv_npu2_init(struct pci_controller *hose) { unsigned int i; u64 mmio_atsd; - struct device_node *dn; - struct pci_dev *gpdev; static int npu_index; - uint64_t rc = 0; - - phb->npu.nmmu_flush = - of_property_read_bool(phb->hose->dn, "ibm,nmmu-flush"); - for_each_child_of_node(phb->hose->dn, dn) { - gpdev = pnv_pci_get_gpu_dev(get_pci_dev(dn)); - if (gpdev) { - rc = opal_npu_map_lpar(phb->opal_id, - PCI_DEVID(gpdev->bus->number, gpdev->devfn), - 0, 0); - if (rc) - dev_err(&gpdev->dev, - "Error %lld mapping device to LPAR\n", - rc); - } - } + struct npu *npu; + int ret; + + npu = kzalloc(sizeof(*npu), GFP_KERNEL); + if (!npu) + return -ENOMEM; - for (i = 0; !of_property_read_u64_index(phb->hose->dn, "ibm,mmio-atsd", - i, &mmio_atsd); i++) - phb->npu.mmio_atsd_regs[i] = ioremap(mmio_atsd, 32); + npu->nmmu_flush = of_property_read_bool(hose->dn, "ibm,nmmu-flush"); - pr_info("NPU%lld: Found %d MMIO ATSD registers", phb->opal_id, i); - phb->npu.mmio_atsd_count = i; - phb->npu.mmio_atsd_usage = 0; + for (i = 0; i < ARRAY_SIZE(npu->mmio_atsd_regs) && + !of_property_read_u64_index(hose->dn, "ibm,mmio-atsd", + i, &mmio_atsd); i++) + npu->mmio_atsd_regs[i] = ioremap(mmio_atsd, 32); + + pr_info("NPU%d: Found %d MMIO ATSD registers", hose->global_number, i); + npu->mmio_atsd_count = i; + npu->mmio_atsd_usage = 0; npu_index++; - if (WARN_ON(npu_index >= NV_MAX_NPUS)) - return -ENOSPC; + if (WARN_ON(npu_index >= NV_MAX_NPUS)) { + ret = -ENOSPC; + goto fail_exit; + } max_npu2_index = npu_index; - phb->npu.index = npu_index; + npu->index = npu_index; + hose->npu = npu; + + return 0; + +fail_exit: + for (i = 0; i < npu->mmio_atsd_count; ++i) + iounmap(npu->mmio_atsd_regs[i]); + + kfree(npu); + + return ret; +} + +int pnv_npu2_map_lpar_dev(struct pci_dev *gpdev, unsigned int lparid, + unsigned long msr) +{ + int ret; + struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0); + struct pci_controller *hose; + struct pnv_phb *nphb; + + if (!npdev) + return -ENODEV; + + hose = pci_bus_to_host(npdev->bus); + nphb = hose->private_data; + + dev_dbg(&gpdev->dev, "Map LPAR opalid=%llu lparid=%u\n", + nphb->opal_id, lparid); + /* + * Currently we only support radix and non-zero LPCR only makes sense + * for hash tables so skiboot expects the LPCR parameter to be a zero. + */ + ret = opal_npu_map_lpar(nphb->opal_id, + PCI_DEVID(gpdev->bus->number, gpdev->devfn), lparid, + 0 /* LPCR bits */); + if (ret) { + dev_err(&gpdev->dev, "Error %d mapping device to LPAR\n", ret); + return ret; + } + + dev_dbg(&gpdev->dev, "init context opalid=%llu msr=%lx\n", + nphb->opal_id, msr); + ret = opal_npu_init_context(nphb->opal_id, 0/*__unused*/, msr, + PCI_DEVID(gpdev->bus->number, gpdev->devfn)); + if (ret < 0) + dev_err(&gpdev->dev, "Failed to init context: %d\n", ret); + else + ret = 0; return 0; } +EXPORT_SYMBOL_GPL(pnv_npu2_map_lpar_dev); + +void pnv_npu2_map_lpar(struct pnv_ioda_pe *gpe, unsigned long msr) +{ + struct pci_dev *gpdev; + + list_for_each_entry(gpdev, &gpe->pbus->devices, bus_list) + pnv_npu2_map_lpar_dev(gpdev, 0, msr); +} + +int pnv_npu2_unmap_lpar_dev(struct pci_dev *gpdev) +{ + int ret; + struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0); + struct pci_controller *hose; + struct pnv_phb *nphb; + + if (!npdev) + return -ENODEV; + + hose = pci_bus_to_host(npdev->bus); + nphb = hose->private_data; + + dev_dbg(&gpdev->dev, "destroy context opalid=%llu\n", + nphb->opal_id); + ret = opal_npu_destroy_context(nphb->opal_id, 0/*__unused*/, + PCI_DEVID(gpdev->bus->number, gpdev->devfn)); + if (ret < 0) { + dev_err(&gpdev->dev, "Failed to destroy context: %d\n", ret); + return ret; + } + + /* Set LPID to 0 anyway, just to be safe */ + dev_dbg(&gpdev->dev, "Map LPAR opalid=%llu lparid=0\n", nphb->opal_id); + ret = opal_npu_map_lpar(nphb->opal_id, + PCI_DEVID(gpdev->bus->number, gpdev->devfn), 0 /*LPID*/, + 0 /* LPCR bits */); + if (ret) + dev_err(&gpdev->dev, "Error %d mapping device to LPAR\n", ret); + + return ret; +} +EXPORT_SYMBOL_GPL(pnv_npu2_unmap_lpar_dev); diff --git a/arch/powerpc/platforms/powernv/opal-power.c b/arch/powerpc/platforms/powernv/opal-power.c index 58dc3308237f..89ab1da57657 100644 --- a/arch/powerpc/platforms/powernv/opal-power.c +++ b/arch/powerpc/platforms/powernv/opal-power.c @@ -138,7 +138,7 @@ static struct notifier_block opal_power_control_nb = { .priority = 0, }; -static int __init opal_power_control_init(void) +int __init opal_power_control_init(void) { int ret, supported = 0; struct device_node *np; @@ -176,4 +176,3 @@ static int __init opal_power_control_init(void) return 0; } -machine_subsys_initcall(powernv, opal_power_control_init); diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c index beed86f4224b..79586f127521 100644 --- a/arch/powerpc/platforms/powernv/opal.c +++ b/arch/powerpc/platforms/powernv/opal.c @@ -877,7 +877,7 @@ static int __init opal_init(void) consoles = of_find_node_by_path("/ibm,opal/consoles"); if (consoles) { for_each_child_of_node(consoles, np) { - if (strcmp(np->name, "serial")) + if (!of_node_name_eq(np, "serial")) continue; of_platform_device_create(np, NULL, NULL); } @@ -960,6 +960,9 @@ static int __init opal_init(void) /* Initialise OPAL sensor groups */ opal_sensor_groups_init(); + /* Initialise OPAL Power control interface */ + opal_power_control_init(); + return 0; } machine_subsys_initcall(powernv, opal_init); diff --git a/arch/powerpc/platforms/powernv/pci-ioda-tce.c b/arch/powerpc/platforms/powernv/pci-ioda-tce.c index fe9691040f54..697449afb3f7 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda-tce.c +++ b/arch/powerpc/platforms/powernv/pci-ioda-tce.c @@ -299,7 +299,7 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset, if (alloc_userspace_copy) { offset = 0; uas = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift, - levels, tce_table_size, &offset, + tmplevels, tce_table_size, &offset, &total_allocated_uas); if (!uas) goto free_tces_exit; @@ -368,6 +368,7 @@ void pnv_pci_unlink_table_and_group(struct iommu_table *tbl, found = false; for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) { if (table_group->tables[i] == tbl) { + iommu_tce_table_put(tbl); table_group->tables[i] = NULL; found = true; break; @@ -393,7 +394,7 @@ long pnv_pci_link_table_and_group(int node, int num, tgl->table_group = table_group; list_add_rcu(&tgl->next, &tbl->it_group_list); - table_group->tables[num] = tbl; + table_group->tables[num] = iommu_tce_table_get(tbl); return 0; } diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index dd807446801e..1d6406a051f1 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -190,7 +190,8 @@ static void pnv_ioda_free_pe(struct pnv_ioda_pe *pe) unsigned int pe_num = pe->pe_number; WARN_ON(pe->pdev); - + WARN_ON(pe->npucomp); /* NPUs are not supposed to be freed */ + kfree(pe->npucomp); memset(pe, 0, sizeof(struct pnv_ioda_pe)); clear_bit(pe_num, phb->ioda.pe_alloc); } @@ -517,8 +518,6 @@ static void __init pnv_ioda_parse_m64_window(struct pnv_phb *phb) phb->init_m64 = pnv_ioda1_init_m64; else phb->init_m64 = pnv_ioda2_init_m64; - phb->reserve_m64_pe = pnv_ioda_reserve_m64_pe; - phb->pick_m64_pe = pnv_ioda_pick_m64_pe; } static void pnv_ioda_freeze_pe(struct pnv_phb *phb, int pe_no) @@ -604,8 +603,8 @@ static int pnv_ioda_unfreeze_pe(struct pnv_phb *phb, int pe_no, int opt) static int pnv_ioda_get_pe_state(struct pnv_phb *phb, int pe_no) { struct pnv_ioda_pe *slave, *pe; - u8 fstate, state; - __be16 pcierr; + u8 fstate = 0, state; + __be16 pcierr = 0; s64 rc; /* Sanity check on PE number */ @@ -663,10 +662,6 @@ static int pnv_ioda_get_pe_state(struct pnv_phb *phb, int pe_no) return state; } -/* Currently those 2 are only used when MSIs are enabled, this will change - * but in the meantime, we need to protect them to avoid warnings - */ -#ifdef CONFIG_PCI_MSI struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev) { struct pci_controller *hose = pci_bus_to_host(dev->bus); @@ -679,7 +674,6 @@ struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev) return NULL; return &phb->ioda.pe_array[pdn->pe_number]; } -#endif /* CONFIG_PCI_MSI */ static int pnv_ioda_set_one_peltv(struct pnv_phb *phb, struct pnv_ioda_pe *parent, @@ -1160,8 +1154,8 @@ static struct pnv_ioda_pe *pnv_ioda_setup_bus_PE(struct pci_bus *bus, bool all) pe = &phb->ioda.pe_array[phb->ioda.root_pe_idx]; /* Check if PE is determined by M64 */ - if (!pe && phb->pick_m64_pe) - pe = phb->pick_m64_pe(bus, all); + if (!pe) + pe = pnv_ioda_pick_m64_pe(bus, all); /* The PE number isn't pinned by M64 */ if (!pe) @@ -1273,19 +1267,20 @@ static void pnv_ioda_setup_npu_PEs(struct pci_bus *bus) static void pnv_pci_ioda_setup_PEs(void) { - struct pci_controller *hose, *tmp; + struct pci_controller *hose; struct pnv_phb *phb; struct pci_bus *bus; struct pci_dev *pdev; + struct pnv_ioda_pe *pe; - list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { + list_for_each_entry(hose, &hose_list, list_node) { phb = hose->private_data; if (phb->type == PNV_PHB_NPU_NVLINK) { /* PE#0 is needed for error reporting */ pnv_ioda_reserve_pe(phb, 0); pnv_ioda_setup_npu_PEs(hose->bus); if (phb->model == PNV_PHB_MODEL_NPU2) - pnv_npu2_init(phb); + WARN_ON_ONCE(pnv_npu2_init(hose)); } if (phb->type == PNV_PHB_NPU_OCAPI) { bus = hose->bus; @@ -1293,6 +1288,14 @@ static void pnv_pci_ioda_setup_PEs(void) pnv_ioda_setup_dev_PE(pdev); } } + list_for_each_entry(hose, &hose_list, list_node) { + phb = hose->private_data; + if (phb->type != PNV_PHB_IODA2) + continue; + + list_for_each_entry(pe, &phb->ioda.pe_list, list) + pnv_npu2_map_lpar(pe, MSR_DR | MSR_PR | MSR_HV); + } } #ifdef CONFIG_PCI_IOV @@ -1531,6 +1534,11 @@ void pnv_pci_sriov_disable(struct pci_dev *pdev) static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe); +#ifdef CONFIG_IOMMU_API +static void pnv_ioda_setup_bus_iommu_group(struct pnv_ioda_pe *pe, + struct iommu_table_group *table_group, struct pci_bus *bus); + +#endif static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs) { struct pci_bus *bus; @@ -1584,6 +1592,9 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs) mutex_unlock(&phb->ioda.pe_list_mutex); pnv_pci_ioda2_setup_dma_pe(phb, pe); +#ifdef CONFIG_IOMMU_API + pnv_ioda_setup_bus_iommu_group(pe, &pe->table_group, NULL); +#endif } } @@ -1923,21 +1934,16 @@ static u64 pnv_pci_ioda_dma_get_required_mask(struct pci_dev *pdev) return mask; } -static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, - struct pci_bus *bus, - bool add_to_group) +static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus) { struct pci_dev *dev; list_for_each_entry(dev, &bus->devices, bus_list) { set_iommu_table_base(&dev->dev, pe->table_group.tables[0]); set_dma_offset(&dev->dev, pe->tce_bypass_base); - if (add_to_group) - iommu_add_device(&dev->dev); if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate) - pnv_ioda_setup_bus_dma(pe, dev->subordinate, - add_to_group); + pnv_ioda_setup_bus_dma(pe, dev->subordinate); } } @@ -2366,16 +2372,8 @@ found: pe->table_group.tce32_size = tbl->it_size << tbl->it_page_shift; iommu_init_table(tbl, phb->hose->node); - if (pe->flags & PNV_IODA_PE_DEV) { - /* - * Setting table base here only for carrying iommu_group - * further down to let iommu_add_device() do the job. - * pnv_pci_ioda_dma_dev_setup will override it later anyway. - */ - set_iommu_table_base(&pe->pdev->dev, tbl); - iommu_add_device(&pe->pdev->dev); - } else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) - pnv_ioda_setup_bus_dma(pe, pe->pbus, true); + if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) + pnv_ioda_setup_bus_dma(pe, pe->pbus); return; fail: @@ -2527,14 +2525,6 @@ static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe) if (!pnv_iommu_bypass_disabled) pnv_pci_ioda2_set_bypass(pe, true); - /* - * Setting table base here only for carrying iommu_group - * further down to let iommu_add_device() do the job. - * pnv_pci_ioda_dma_dev_setup will override it later anyway. - */ - if (pe->flags & PNV_IODA_PE_DEV) - set_iommu_table_base(&pe->pdev->dev, tbl); - return 0; } @@ -2565,7 +2555,7 @@ static long pnv_pci_ioda2_unset_window(struct iommu_table_group *table_group, #endif #ifdef CONFIG_IOMMU_API -static unsigned long pnv_pci_ioda2_get_table_size(__u32 page_shift, +unsigned long pnv_pci_ioda2_get_table_size(__u32 page_shift, __u64 window_size, __u32 levels) { unsigned long bytes = 0; @@ -2616,7 +2606,7 @@ static void pnv_ioda2_take_ownership(struct iommu_table_group *table_group) pnv_pci_ioda2_set_bypass(pe, false); pnv_pci_ioda2_unset_window(&pe->table_group, 0); if (pe->pbus) - pnv_ioda_setup_bus_dma(pe, pe->pbus, false); + pnv_ioda_setup_bus_dma(pe, pe->pbus); iommu_tce_table_put(tbl); } @@ -2627,7 +2617,7 @@ static void pnv_ioda2_release_ownership(struct iommu_table_group *table_group) pnv_pci_ioda2_setup_default_config(pe); if (pe->pbus) - pnv_ioda_setup_bus_dma(pe, pe->pbus, false); + pnv_ioda_setup_bus_dma(pe, pe->pbus); } static struct iommu_table_group_ops pnv_pci_ioda2_ops = { @@ -2639,131 +2629,100 @@ static struct iommu_table_group_ops pnv_pci_ioda2_ops = { .release_ownership = pnv_ioda2_release_ownership, }; -static int gpe_table_group_to_npe_cb(struct device *dev, void *opaque) +static void pnv_ioda_setup_bus_iommu_group_add_devices(struct pnv_ioda_pe *pe, + struct iommu_table_group *table_group, + struct pci_bus *bus) { - struct pci_controller *hose; - struct pnv_phb *phb; - struct pnv_ioda_pe **ptmppe = opaque; - struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); - struct pci_dn *pdn = pci_get_pdn(pdev); - - if (!pdn || pdn->pe_number == IODA_INVALID_PE) - return 0; - - hose = pci_bus_to_host(pdev->bus); - phb = hose->private_data; - if (phb->type != PNV_PHB_NPU_NVLINK) - return 0; + struct pci_dev *dev; - *ptmppe = &phb->ioda.pe_array[pdn->pe_number]; + list_for_each_entry(dev, &bus->devices, bus_list) { + iommu_add_device(table_group, &dev->dev); - return 1; + if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate) + pnv_ioda_setup_bus_iommu_group_add_devices(pe, + table_group, dev->subordinate); + } } -/* - * This returns PE of associated NPU. - * This assumes that NPU is in the same IOMMU group with GPU and there is - * no other PEs. - */ -static struct pnv_ioda_pe *gpe_table_group_to_npe( - struct iommu_table_group *table_group) +static void pnv_ioda_setup_bus_iommu_group(struct pnv_ioda_pe *pe, + struct iommu_table_group *table_group, struct pci_bus *bus) { - struct pnv_ioda_pe *npe = NULL; - int ret = iommu_group_for_each_dev(table_group->group, &npe, - gpe_table_group_to_npe_cb); - BUG_ON(!ret || !npe); + if (pe->flags & PNV_IODA_PE_DEV) + iommu_add_device(table_group, &pe->pdev->dev); - return npe; + if ((pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) || bus) + pnv_ioda_setup_bus_iommu_group_add_devices(pe, table_group, + bus); } -static long pnv_pci_ioda2_npu_set_window(struct iommu_table_group *table_group, - int num, struct iommu_table *tbl) -{ - struct pnv_ioda_pe *npe = gpe_table_group_to_npe(table_group); - int num2 = (num == 0) ? 1 : 0; - long ret = pnv_pci_ioda2_set_window(table_group, num, tbl); - - if (ret) - return ret; - - if (table_group->tables[num2]) - pnv_npu_unset_window(npe, num2); - - ret = pnv_npu_set_window(npe, num, tbl); - if (ret) { - pnv_pci_ioda2_unset_window(table_group, num); - if (table_group->tables[num2]) - pnv_npu_set_window(npe, num2, - table_group->tables[num2]); - } - - return ret; -} +static unsigned long pnv_ioda_parse_tce_sizes(struct pnv_phb *phb); -static long pnv_pci_ioda2_npu_unset_window( - struct iommu_table_group *table_group, - int num) +static void pnv_pci_ioda_setup_iommu_api(void) { - struct pnv_ioda_pe *npe = gpe_table_group_to_npe(table_group); - int num2 = (num == 0) ? 1 : 0; - long ret = pnv_pci_ioda2_unset_window(table_group, num); - - if (ret) - return ret; - - if (!npe->table_group.tables[num]) - return 0; - - ret = pnv_npu_unset_window(npe, num); - if (ret) - return ret; - - if (table_group->tables[num2]) - ret = pnv_npu_set_window(npe, num2, table_group->tables[num2]); - - return ret; -} + struct pci_controller *hose; + struct pnv_phb *phb; + struct pnv_ioda_pe *pe; -static void pnv_ioda2_npu_take_ownership(struct iommu_table_group *table_group) -{ /* - * Detach NPU first as pnv_ioda2_take_ownership() will destroy - * the iommu_table if 32bit DMA is enabled. + * There are 4 types of PEs: + * - PNV_IODA_PE_BUS: a downstream port with an adapter, + * created from pnv_pci_setup_bridge(); + * - PNV_IODA_PE_BUS_ALL: a PCI-PCIX bridge with devices behind it, + * created from pnv_pci_setup_bridge(); + * - PNV_IODA_PE_VF: a SRIOV virtual function, + * created from pnv_pcibios_sriov_enable(); + * - PNV_IODA_PE_DEV: an NPU or OCAPI device, + * created from pnv_pci_ioda_fixup(). + * + * Normally a PE is represented by an IOMMU group, however for + * devices with side channels the groups need to be more strict. */ - pnv_npu_take_ownership(gpe_table_group_to_npe(table_group)); - pnv_ioda2_take_ownership(table_group); -} + list_for_each_entry(hose, &hose_list, list_node) { + phb = hose->private_data; -static struct iommu_table_group_ops pnv_pci_ioda2_npu_ops = { - .get_table_size = pnv_pci_ioda2_get_table_size, - .create_table = pnv_pci_ioda2_create_table_userspace, - .set_window = pnv_pci_ioda2_npu_set_window, - .unset_window = pnv_pci_ioda2_npu_unset_window, - .take_ownership = pnv_ioda2_npu_take_ownership, - .release_ownership = pnv_ioda2_release_ownership, -}; + if (phb->type == PNV_PHB_NPU_NVLINK) + continue; -static void pnv_pci_ioda_setup_iommu_api(void) -{ - struct pci_controller *hose, *tmp; - struct pnv_phb *phb; - struct pnv_ioda_pe *pe, *gpe; + list_for_each_entry(pe, &phb->ioda.pe_list, list) { + struct iommu_table_group *table_group; + + table_group = pnv_try_setup_npu_table_group(pe); + if (!table_group) { + if (!pnv_pci_ioda_pe_dma_weight(pe)) + continue; + + table_group = &pe->table_group; + iommu_register_group(&pe->table_group, + pe->phb->hose->global_number, + pe->pe_number); + } + pnv_ioda_setup_bus_iommu_group(pe, table_group, + pe->pbus); + } + } /* * Now we have all PHBs discovered, time to add NPU devices to * the corresponding IOMMU groups. */ - list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { + list_for_each_entry(hose, &hose_list, list_node) { + unsigned long pgsizes; + phb = hose->private_data; if (phb->type != PNV_PHB_NPU_NVLINK) continue; + pgsizes = pnv_ioda_parse_tce_sizes(phb); list_for_each_entry(pe, &phb->ioda.pe_list, list) { - gpe = pnv_pci_npu_setup_iommu(pe); - if (gpe) - gpe->table_group.ops = &pnv_pci_ioda2_npu_ops; + /* + * IODA2 bridges get this set up from + * pci_controller_ops::setup_bridge but NPU bridges + * do not have this hook defined so we do it here. + */ + pe->table_group.pgsizes = pgsizes; + pnv_npu_compound_attach(pe); } } } @@ -2810,9 +2769,6 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, /* TVE #1 is selected by PCI address bit 59 */ pe->tce_bypass_base = 1ull << 59; - iommu_register_group(&pe->table_group, phb->hose->global_number, - pe->pe_number); - /* The PE will reserve all possible 32-bits space */ pe_info(pe, "Setting up 32-bit TCE table at 0..%08x\n", phb->ioda.m32_pci_base); @@ -2833,10 +2789,9 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, return; if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) - pnv_ioda_setup_bus_dma(pe, pe->pbus, true); + pnv_ioda_setup_bus_dma(pe, pe->pbus); } -#ifdef CONFIG_PCI_MSI int64_t pnv_opal_pci_msi_eoi(struct irq_chip *chip, unsigned int hw_irq) { struct pnv_phb *phb = container_of(chip, struct pnv_phb, @@ -2982,9 +2937,6 @@ static void pnv_pci_init_ioda_msis(struct pnv_phb *phb) pr_info(" Allocated bitmap for %d MSIs (base IRQ 0x%x)\n", count, phb->msi_base); } -#else -static void pnv_pci_init_ioda_msis(struct pnv_phb *phb) { } -#endif /* CONFIG_PCI_MSI */ #ifdef CONFIG_PCI_IOV static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev) @@ -3402,8 +3354,7 @@ static void pnv_pci_setup_bridge(struct pci_bus *bus, unsigned long type) return; /* Reserve PEs according to used M64 resources */ - if (phb->reserve_m64_pe) - phb->reserve_m64_pe(bus, NULL, all); + pnv_ioda_reserve_m64_pe(bus, NULL, all); /* * Assign PE. We might run here because of partial hotplug. @@ -3687,6 +3638,15 @@ static void pnv_pci_release_device(struct pci_dev *pdev) pnv_ioda_release_pe(pe); } +static void pnv_npu_disable_device(struct pci_dev *pdev) +{ + struct eeh_dev *edev = pci_dev_to_eeh_dev(pdev); + struct eeh_pe *eehpe = edev ? edev->pe : NULL; + + if (eehpe && eeh_ops && eeh_ops->reset) + eeh_ops->reset(eehpe, EEH_RESET_HOT); +} + static void pnv_pci_ioda_shutdown(struct pci_controller *hose) { struct pnv_phb *phb = hose->private_data; @@ -3698,10 +3658,8 @@ static void pnv_pci_ioda_shutdown(struct pci_controller *hose) static const struct pci_controller_ops pnv_pci_ioda_controller_ops = { .dma_dev_setup = pnv_pci_dma_dev_setup, .dma_bus_setup = pnv_pci_dma_bus_setup, -#ifdef CONFIG_PCI_MSI .setup_msi_irqs = pnv_setup_msi_irqs, .teardown_msi_irqs = pnv_teardown_msi_irqs, -#endif .enable_device_hook = pnv_pci_enable_device_hook, .release_device = pnv_pci_release_device, .window_alignment = pnv_pci_window_alignment, @@ -3722,15 +3680,14 @@ static int pnv_npu_dma_set_mask(struct pci_dev *npdev, u64 dma_mask) static const struct pci_controller_ops pnv_npu_ioda_controller_ops = { .dma_dev_setup = pnv_pci_dma_dev_setup, -#ifdef CONFIG_PCI_MSI .setup_msi_irqs = pnv_setup_msi_irqs, .teardown_msi_irqs = pnv_teardown_msi_irqs, -#endif .enable_device_hook = pnv_pci_enable_device_hook, .window_alignment = pnv_pci_window_alignment, .reset_secondary_bus = pnv_pci_reset_secondary_bus, .dma_set_mask = pnv_npu_dma_set_mask, .shutdown = pnv_pci_ioda_shutdown, + .disable_device = pnv_npu_disable_device, }; static const struct pci_controller_ops pnv_npu_ocapi_ioda_controller_ops = { diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index 13aef2323bbc..45fb70b4bfa7 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c @@ -160,7 +160,6 @@ exit: } EXPORT_SYMBOL_GPL(pnv_pci_set_power_state); -#ifdef CONFIG_PCI_MSI int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) { struct pci_controller *hose = pci_bus_to_host(pdev->bus); @@ -229,7 +228,6 @@ void pnv_teardown_msi_irqs(struct pci_dev *pdev) msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, 1); } } -#endif /* CONFIG_PCI_MSI */ /* Nicely print the contents of the PE State Tables (PEST). */ static void pnv_pci_dump_pest(__be64 pestA[], __be64 pestB[], int pest_size) @@ -602,8 +600,8 @@ static void pnv_pci_handle_eeh_config(struct pnv_phb *phb, u32 pe_no) static void pnv_pci_config_check_eeh(struct pci_dn *pdn) { struct pnv_phb *phb = pdn->phb->private_data; - u8 fstate; - __be16 pcierr; + u8 fstate = 0; + __be16 pcierr = 0; unsigned int pe_no; s64 rc; @@ -1127,4 +1125,45 @@ void __init pnv_pci_init(void) set_pci_dma_ops(&dma_iommu_ops); } -machine_subsys_initcall_sync(powernv, tce_iommu_bus_notifier_init); +static int pnv_tce_iommu_bus_notifier(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct device *dev = data; + struct pci_dev *pdev; + struct pci_dn *pdn; + struct pnv_ioda_pe *pe; + struct pci_controller *hose; + struct pnv_phb *phb; + + switch (action) { + case BUS_NOTIFY_ADD_DEVICE: + pdev = to_pci_dev(dev); + pdn = pci_get_pdn(pdev); + hose = pci_bus_to_host(pdev->bus); + phb = hose->private_data; + + WARN_ON_ONCE(!phb); + if (!pdn || pdn->pe_number == IODA_INVALID_PE || !phb) + return 0; + + pe = &phb->ioda.pe_array[pdn->pe_number]; + iommu_add_device(&pe->table_group, dev); + return 0; + case BUS_NOTIFY_DEL_DEVICE: + iommu_del_device(dev); + return 0; + default: + return 0; + } +} + +static struct notifier_block pnv_tce_iommu_bus_nb = { + .notifier_call = pnv_tce_iommu_bus_notifier, +}; + +static int __init pnv_tce_iommu_bus_notifier_init(void) +{ + bus_register_notifier(&pci_bus_type, &pnv_tce_iommu_bus_nb); + return 0; +} +machine_subsys_initcall_sync(powernv, pnv_tce_iommu_bus_notifier_init); diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h index 8b37b28e3831..8e36da379252 100644 --- a/arch/powerpc/platforms/powernv/pci.h +++ b/arch/powerpc/platforms/powernv/pci.h @@ -8,9 +8,6 @@ struct pci_dn; -/* Maximum possible number of ATSD MMIO registers per NPU */ -#define NV_NMMU_ATSD_REGS 8 - enum pnv_phb_type { PNV_PHB_IODA1 = 0, PNV_PHB_IODA2 = 1, @@ -65,6 +62,7 @@ struct pnv_ioda_pe { /* "Base" iommu table, ie, 4K TCEs, 32-bit DMA */ struct iommu_table_group table_group; + struct npu_comp *npucomp; /* 64-bit TCE bypass region */ bool tce_bypass_enabled; @@ -106,20 +104,14 @@ struct pnv_phb { struct dentry *dbgfs; #endif -#ifdef CONFIG_PCI_MSI unsigned int msi_base; unsigned int msi32_support; struct msi_bitmap msi_bmp; -#endif int (*msi_setup)(struct pnv_phb *phb, struct pci_dev *dev, unsigned int hwirq, unsigned int virq, unsigned int is_64, struct msi_msg *msg); void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev); - void (*fixup_phb)(struct pci_controller *hose); int (*init_m64)(struct pnv_phb *phb); - void (*reserve_m64_pe)(struct pci_bus *bus, - unsigned long *pe_bitmap, bool all); - struct pnv_ioda_pe *(*pick_m64_pe)(struct pci_bus *bus, bool all); int (*get_pe_state)(struct pnv_phb *phb, int pe_no); void (*freeze_pe)(struct pnv_phb *phb, int pe_no); int (*unfreeze_pe)(struct pnv_phb *phb, int pe_no, int opt); @@ -180,19 +172,6 @@ struct pnv_phb { unsigned int diag_data_size; u8 *diag_data; - /* Nvlink2 data */ - struct npu { - int index; - __be64 *mmio_atsd_regs[NV_NMMU_ATSD_REGS]; - unsigned int mmio_atsd_count; - - /* Bitmask for MMIO register usage */ - unsigned long mmio_atsd_usage; - - /* Do we need to explicitly flush the nest mmu? */ - bool nmmu_flush; - } npu; - int p2p_target_count; }; @@ -210,6 +189,7 @@ extern void pnv_pci_init_ioda_hub(struct device_node *np); extern void pnv_pci_init_ioda2_phb(struct device_node *np); extern void pnv_pci_init_npu_phb(struct device_node *np); extern void pnv_pci_init_npu2_opencapi_phb(struct device_node *np); +extern void pnv_npu2_map_lpar(struct pnv_ioda_pe *gpe, unsigned long msr); extern void pnv_pci_reset_secondary_bus(struct pci_dev *dev); extern int pnv_eeh_phb_reset(struct pci_controller *hose, int option); @@ -220,6 +200,8 @@ extern void pnv_teardown_msi_irqs(struct pci_dev *pdev); extern struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev); extern void pnv_set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq); extern void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable); +extern unsigned long pnv_pci_ioda2_get_table_size(__u32 page_shift, + __u64 window_size, __u32 levels); extern int pnv_eeh_post_init(void); extern void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level, @@ -235,12 +217,10 @@ extern void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level, extern void pnv_npu_try_dma_set_bypass(struct pci_dev *gpdev, bool bypass); extern void pnv_pci_ioda2_tce_invalidate_entire(struct pnv_phb *phb, bool rm); extern struct pnv_ioda_pe *pnv_pci_npu_setup_iommu(struct pnv_ioda_pe *npe); -extern long pnv_npu_set_window(struct pnv_ioda_pe *npe, int num, - struct iommu_table *tbl); -extern long pnv_npu_unset_window(struct pnv_ioda_pe *npe, int num); -extern void pnv_npu_take_ownership(struct pnv_ioda_pe *npe); -extern void pnv_npu_release_ownership(struct pnv_ioda_pe *npe); -extern int pnv_npu2_init(struct pnv_phb *phb); +extern struct iommu_table_group *pnv_try_setup_npu_table_group( + struct pnv_ioda_pe *pe); +extern struct iommu_table_group *pnv_npu_compound_attach( + struct pnv_ioda_pe *pe); /* pci-ioda-tce.c */ #define POWERNV_IOMMU_DEFAULT_LEVELS 1 diff --git a/arch/powerpc/platforms/powernv/vas-debug.c b/arch/powerpc/platforms/powernv/vas-debug.c index 4f7276ebdf9c..4d3929fbc08f 100644 --- a/arch/powerpc/platforms/powernv/vas-debug.c +++ b/arch/powerpc/platforms/powernv/vas-debug.c @@ -30,7 +30,7 @@ static char *cop_to_str(int cop) } } -static int info_dbg_show(struct seq_file *s, void *private) +static int info_show(struct seq_file *s, void *private) { struct vas_window *window = s->private; @@ -49,17 +49,7 @@ unlock: return 0; } -static int info_dbg_open(struct inode *inode, struct file *file) -{ - return single_open(file, info_dbg_show, inode->i_private); -} - -static const struct file_operations info_fops = { - .open = info_dbg_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(info); static inline void print_reg(struct seq_file *s, struct vas_window *win, char *name, u32 reg) @@ -67,7 +57,7 @@ static inline void print_reg(struct seq_file *s, struct vas_window *win, seq_printf(s, "0x%016llx %s\n", read_hvwc_reg(win, name, reg), name); } -static int hvwc_dbg_show(struct seq_file *s, void *private) +static int hvwc_show(struct seq_file *s, void *private) { struct vas_window *window = s->private; @@ -115,17 +105,7 @@ unlock: return 0; } -static int hvwc_dbg_open(struct inode *inode, struct file *file) -{ - return single_open(file, hvwc_dbg_show, inode->i_private); -} - -static const struct file_operations hvwc_fops = { - .open = hvwc_dbg_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(hvwc); void vas_window_free_dbgdir(struct vas_window *window) { diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index 2a983b5a52e1..d291b618a559 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c @@ -197,6 +197,7 @@ static int update_lmb_associativity_index(struct drmem_lmb *lmb) found = find_aa_index(dr_node, ala_prop, lmb_assoc, &aa_index); + of_node_put(dr_node); dlpar_free_cc_nodes(lmb_node); if (!found) { @@ -313,7 +314,6 @@ out: static int pseries_remove_mem_node(struct device_node *np) { - const char *type; const __be32 *regs; unsigned long base; unsigned int lmb_size; @@ -322,8 +322,7 @@ static int pseries_remove_mem_node(struct device_node *np) /* * Check to see if we are actually removing memory */ - type = of_get_property(np, "device_type", NULL); - if (type == NULL || strcmp(type, "memory") != 0) + if (!of_node_is_type(np, "memory")) return 0; /* @@ -355,8 +354,11 @@ static bool lmb_is_removable(struct drmem_lmb *lmb) phys_addr = lmb->base_addr; #ifdef CONFIG_FA_DUMP - /* Don't hot-remove memory that falls in fadump boot memory area */ - if (is_fadump_boot_memory_area(phys_addr, block_sz)) + /* + * Don't hot-remove memory that falls in fadump boot memory area + * and memory that is reserved for capturing old kernel memory. + */ + if (is_fadump_memory_area(phys_addr, block_sz)) return false; #endif @@ -936,7 +938,6 @@ int dlpar_memory(struct pseries_hp_errorlog *hp_elog) static int pseries_add_mem_node(struct device_node *np) { - const char *type; const __be32 *regs; unsigned long base; unsigned int lmb_size; @@ -945,8 +946,7 @@ static int pseries_add_mem_node(struct device_node *np) /* * Check to see if we are actually adding memory */ - type = of_get_property(np, "device_type", NULL); - if (type == NULL || strcmp(type, "memory") != 0) + if (!of_node_is_type(np, "memory")) return 0; /* diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index 06f02960b439..8fc8fe0b9848 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -57,7 +57,6 @@ static struct iommu_table_group *iommu_pseries_alloc_group(int node) { struct iommu_table_group *table_group; struct iommu_table *tbl; - struct iommu_table_group_link *tgl; table_group = kzalloc_node(sizeof(struct iommu_table_group), GFP_KERNEL, node); @@ -68,22 +67,13 @@ static struct iommu_table_group *iommu_pseries_alloc_group(int node) if (!tbl) goto free_group; - tgl = kzalloc_node(sizeof(struct iommu_table_group_link), GFP_KERNEL, - node); - if (!tgl) - goto free_table; - INIT_LIST_HEAD_RCU(&tbl->it_group_list); kref_init(&tbl->it_kref); - tgl->table_group = table_group; - list_add_rcu(&tgl->next, &tbl->it_group_list); table_group->tables[0] = tbl; return table_group; -free_table: - kfree(tbl); free_group: kfree(table_group); return NULL; @@ -93,23 +83,12 @@ static void iommu_pseries_free_group(struct iommu_table_group *table_group, const char *node_name) { struct iommu_table *tbl; -#ifdef CONFIG_IOMMU_API - struct iommu_table_group_link *tgl; -#endif if (!table_group) return; tbl = table_group->tables[0]; #ifdef CONFIG_IOMMU_API - tgl = list_first_entry_or_null(&tbl->it_group_list, - struct iommu_table_group_link, next); - - WARN_ON_ONCE(!tgl); - if (tgl) { - list_del_rcu(&tgl->next); - kfree(tgl); - } if (table_group->group) { iommu_group_put(table_group->group); BUG_ON(table_group->group); @@ -645,7 +624,6 @@ static void pci_dma_bus_setup_pSeries(struct pci_bus *bus) iommu_table_setparms(pci->phb, dn, tbl); tbl->it_ops = &iommu_table_pseries_ops; iommu_init_table(tbl, pci->phb->node); - iommu_register_group(pci->table_group, pci_domain_nr(bus), 0); /* Divide the rest (1.75GB) among the children */ pci->phb->dma_window_size = 0x80000000ul; @@ -756,10 +734,7 @@ static void pci_dma_dev_setup_pSeries(struct pci_dev *dev) iommu_table_setparms(phb, dn, tbl); tbl->it_ops = &iommu_table_pseries_ops; iommu_init_table(tbl, phb->node); - iommu_register_group(PCI_DN(dn)->table_group, - pci_domain_nr(phb->bus), 0); set_iommu_table_base(&dev->dev, tbl); - iommu_add_device(&dev->dev); return; } @@ -770,11 +745,10 @@ static void pci_dma_dev_setup_pSeries(struct pci_dev *dev) while (dn && PCI_DN(dn) && PCI_DN(dn)->table_group == NULL) dn = dn->parent; - if (dn && PCI_DN(dn)) { + if (dn && PCI_DN(dn)) set_iommu_table_base(&dev->dev, PCI_DN(dn)->table_group->tables[0]); - iommu_add_device(&dev->dev); - } else + else printk(KERN_WARNING "iommu: Device %s has no iommu table\n", pci_name(dev)); } @@ -964,6 +938,37 @@ struct failed_ddw_pdn { static LIST_HEAD(failed_ddw_pdn_list); +static phys_addr_t ddw_memory_hotplug_max(void) +{ + phys_addr_t max_addr = memory_hotplug_max(); + struct device_node *memory; + + for_each_node_by_type(memory, "memory") { + unsigned long start, size; + int ranges, n_mem_addr_cells, n_mem_size_cells, len; + const __be32 *memcell_buf; + + memcell_buf = of_get_property(memory, "reg", &len); + if (!memcell_buf || len <= 0) + continue; + + n_mem_addr_cells = of_n_addr_cells(memory); + n_mem_size_cells = of_n_size_cells(memory); + + /* ranges in cell */ + ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells); + + start = of_read_number(memcell_buf, n_mem_addr_cells); + memcell_buf += n_mem_addr_cells; + size = of_read_number(memcell_buf, n_mem_size_cells); + memcell_buf += n_mem_size_cells; + + max_addr = max_t(phys_addr_t, max_addr, start + size); + } + + return max_addr; +} + /* * If the PE supports dynamic dma windows, and there is space for a table * that can map all pages in a linear offset, then setup such a table, @@ -1053,7 +1058,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn) } /* verify the window * number of ptes will map the partition */ /* check largest block * page size > max memory hotplug addr */ - max_addr = memory_hotplug_max(); + max_addr = ddw_memory_hotplug_max(); if (query.largest_available_block < (max_addr >> page_shift)) { dev_dbg(&dev->dev, "can't map partition max 0x%llx with %u " "%llu-sized pages\n", max_addr, query.largest_available_block, @@ -1190,7 +1195,7 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev) } set_iommu_table_base(&dev->dev, pci->table_group->tables[0]); - iommu_add_device(&dev->dev); + iommu_add_device(pci->table_group, &dev->dev); } static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask) @@ -1395,4 +1400,27 @@ static int __init disable_multitce(char *str) __setup("multitce=", disable_multitce); +static int tce_iommu_bus_notifier(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct device *dev = data; + + switch (action) { + case BUS_NOTIFY_DEL_DEVICE: + iommu_del_device(dev); + return 0; + default: + return 0; + } +} + +static struct notifier_block tce_iommu_bus_nb = { + .notifier_call = tce_iommu_bus_notifier, +}; + +static int __init tce_iommu_bus_notifier_init(void) +{ + bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb); + return 0; +} machine_subsys_initcall_sync(pseries, tce_iommu_bus_notifier_init); diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c index 41d8a4d1d02e..7725825d887d 100644 --- a/arch/powerpc/platforms/pseries/pci.c +++ b/arch/powerpc/platforms/pseries/pci.c @@ -29,6 +29,7 @@ #include <asm/pci-bridge.h> #include <asm/prom.h> #include <asm/ppc-pci.h> +#include <asm/pci.h> #include "pseries.h" #if 0 @@ -237,6 +238,8 @@ static void __init pSeries_request_regions(void) void __init pSeries_final_fixup(void) { + struct pci_controller *hose; + pSeries_request_regions(); eeh_probe_devices(); @@ -246,6 +249,25 @@ void __init pSeries_final_fixup(void) ppc_md.pcibios_sriov_enable = pseries_pcibios_sriov_enable; ppc_md.pcibios_sriov_disable = pseries_pcibios_sriov_disable; #endif + list_for_each_entry(hose, &hose_list, list_node) { + struct device_node *dn = hose->dn, *nvdn; + + while (1) { + dn = of_find_all_nodes(dn); + if (!dn) + break; + nvdn = of_parse_phandle(dn, "ibm,nvlink", 0); + if (!nvdn) + continue; + if (!of_device_is_compatible(nvdn, "ibm,npu-link")) + continue; + if (!of_device_is_compatible(nvdn->parent, + "ibm,power9-npu")) + continue; + WARN_ON_ONCE(pnv_npu2_init(hose)); + break; + } + } } /* diff --git a/arch/powerpc/platforms/pseries/pmem.c b/arch/powerpc/platforms/pseries/pmem.c index a27f40eb57b1..27f0a915c8a9 100644 --- a/arch/powerpc/platforms/pseries/pmem.c +++ b/arch/powerpc/platforms/pseries/pmem.c @@ -52,8 +52,8 @@ static ssize_t pmem_drc_add_node(u32 drc_index) /* NB: The of reconfig notifier creates platform device from the node */ rc = dlpar_attach_node(dn, pmem_node); if (rc) { - pr_err("Failed to attach node %s, rc: %d, drc index: %x\n", - dn->name, rc, drc_index); + pr_err("Failed to attach node %pOF, rc: %d, drc index: %x\n", + dn, rc, drc_index); if (dlpar_release_drc(drc_index)) dlpar_free_cc_nodes(dn); @@ -93,8 +93,8 @@ static ssize_t pmem_drc_remove_node(u32 drc_index) rc = dlpar_release_drc(drc_index); if (rc) { - pr_err("Failed to release drc (%x) for CPU %s, rc: %d\n", - drc_index, dn->name, rc); + pr_err("Failed to release drc (%x) for CPU %pOFn, rc: %d\n", + drc_index, dn, rc); dlpar_attach_node(dn, pmem_node); return rc; } diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 0f553dcfa548..41f62ca27c63 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -190,7 +190,7 @@ static void __init pseries_setup_i8259_cascade(void) of_node_put(old); if (np == NULL) break; - if (strcmp(np->name, "pci") != 0) + if (!of_node_name_eq(np, "pci")) continue; addrp = of_get_property(np, "8259-interrupt-acknowledge", NULL); if (addrp == NULL) @@ -469,8 +469,8 @@ static void __init find_and_init_phbs(void) struct device_node *root = of_find_node_by_path("/"); for_each_child_of_node(root, node) { - if (node->type == NULL || (strcmp(node->type, "pci") != 0 && - strcmp(node->type, "pciex") != 0)) + if (!of_node_is_type(node, "pci") && + !of_node_is_type(node, "pciex")) continue; phb = pcibios_alloc_controller(node); @@ -978,11 +978,7 @@ static void pseries_power_off(void) static int __init pSeries_probe(void) { - const char *dtype = of_get_property(of_root, "device_type", NULL); - - if (dtype == NULL) - return 0; - if (strcmp(dtype, "chrp")) + if (!of_node_is_type(of_root, "chrp")) return 0; /* Cell blades firmware claims to be chrp while it's not. Until this diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c index 88f1ad1d6309..1fad4649735b 100644 --- a/arch/powerpc/platforms/pseries/vio.c +++ b/arch/powerpc/platforms/pseries/vio.c @@ -519,7 +519,7 @@ static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page, { struct vio_dev *viodev = to_vio_dev(dev); struct iommu_table *tbl; - dma_addr_t ret = IOMMU_MAPPING_ERROR; + dma_addr_t ret = DMA_MAPPING_ERROR; tbl = get_iommu_table_base(dev); if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)))) { @@ -625,7 +625,6 @@ static const struct dma_map_ops vio_dma_mapping_ops = { .unmap_page = vio_dma_iommu_unmap_page, .dma_supported = vio_dma_iommu_dma_supported, .get_required_mask = vio_dma_get_required_mask, - .mapping_error = dma_iommu_mapping_error, }; /** @@ -1356,9 +1355,9 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node) */ parent_node = of_get_parent(of_node); if (parent_node) { - if (!strcmp(parent_node->type, "ibm,platform-facilities")) + if (of_node_is_type(parent_node, "ibm,platform-facilities")) family = PFO; - else if (!strcmp(parent_node->type, "vdevice")) + else if (of_node_is_type(parent_node, "vdevice")) family = VDEVICE; else { pr_warn("%s: parent(%pOF) of %pOFn not recognized.\n", @@ -1395,9 +1394,8 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node) if (viodev->family == VDEVICE) { unsigned int unit_address; - if (of_node->type != NULL) - viodev->type = of_node->type; - else { + viodev->type = of_node_get_device_type(of_node); + if (!viodev->type) { pr_warn("%s: node %pOFn is missing the 'device_type' " "property.\n", __func__, of_node); goto out; @@ -1672,32 +1670,30 @@ struct vio_dev *vio_find_node(struct device_node *vnode) { char kobj_name[20]; struct device_node *vnode_parent; - const char *dev_type; vnode_parent = of_get_parent(vnode); if (!vnode_parent) return NULL; - dev_type = of_get_property(vnode_parent, "device_type", NULL); - of_node_put(vnode_parent); - if (!dev_type) - return NULL; - /* construct the kobject name from the device node */ - if (!strcmp(dev_type, "vdevice")) { + if (of_node_is_type(vnode_parent, "vdevice")) { const __be32 *prop; prop = of_get_property(vnode, "reg", NULL); if (!prop) - return NULL; + goto out; snprintf(kobj_name, sizeof(kobj_name), "%x", (uint32_t)of_read_number(prop, 1)); - } else if (!strcmp(dev_type, "ibm,platform-facilities")) + } else if (of_node_is_type(vnode_parent, "ibm,platform-facilities")) snprintf(kobj_name, sizeof(kobj_name), "%pOFn", vnode); else - return NULL; + goto out; + of_node_put(vnode_parent); return vio_find_name(kobj_name); +out: + of_node_put(vnode_parent); + return NULL; } EXPORT_SYMBOL(vio_find_node); diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile index 2caa4defdfb6..aaf23283ba0c 100644 --- a/arch/powerpc/sysdev/Makefile +++ b/arch/powerpc/sysdev/Makefile @@ -48,7 +48,7 @@ obj-$(CONFIG_PPC_MPC512x) += mpc5xxx_clocks.o obj-$(CONFIG_PPC_MPC52xx) += mpc5xxx_clocks.o ifdef CONFIG_SUSPEND -obj-$(CONFIG_6xx) += 6xx-suspend.o +obj-$(CONFIG_PPC_BOOK3S_32) += 6xx-suspend.o endif obj-$(CONFIG_PPC_SCOM) += scom.o diff --git a/arch/powerpc/sysdev/fsl_rio.h b/arch/powerpc/sysdev/fsl_rio.h index 12dd18fd4795..6c13d9a7b7b2 100644 --- a/arch/powerpc/sysdev/fsl_rio.h +++ b/arch/powerpc/sysdev/fsl_rio.h @@ -41,7 +41,7 @@ #define DOORBELL_ROWAR_PCI 0x02000000 /* PCI window */ #define DOORBELL_ROWAR_NREAD 0x00040000 /* NREAD */ #define DOORBELL_ROWAR_MAINTRD 0x00070000 /* maintenance read */ -#define DOORBELL_ROWAR_RES 0x00002000 /* wrtpy: reserverd */ +#define DOORBELL_ROWAR_RES 0x00002000 /* wrtpy: reserved */ #define DOORBELL_ROWAR_MAINTWD 0x00007000 #define DOORBELL_ROWAR_SIZE 0x0000000b /* window size is 4k */ diff --git a/arch/powerpc/sysdev/fsl_rmu.c b/arch/powerpc/sysdev/fsl_rmu.c index 88b35a3dcdc5..8b0ebf3940d2 100644 --- a/arch/powerpc/sysdev/fsl_rmu.c +++ b/arch/powerpc/sysdev/fsl_rmu.c @@ -756,15 +756,13 @@ fsl_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries) } /* Initialize outbound message descriptor ring */ - rmu->msg_tx_ring.virt = dma_alloc_coherent(priv->dev, + rmu->msg_tx_ring.virt = dma_zalloc_coherent(priv->dev, rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE, &rmu->msg_tx_ring.phys, GFP_KERNEL); if (!rmu->msg_tx_ring.virt) { rc = -ENOMEM; goto out_dma; } - memset(rmu->msg_tx_ring.virt, 0, - rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE); rmu->msg_tx_ring.tx_slot = 0; /* Point dequeue/enqueue pointers at first entry in ring */ diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c index 6300123ce965..8030a0f55e96 100644 --- a/arch/powerpc/sysdev/ipic.c +++ b/arch/powerpc/sysdev/ipic.c @@ -771,34 +771,6 @@ struct ipic * __init ipic_init(struct device_node *node, unsigned int flags) return ipic; } -int ipic_set_priority(unsigned int virq, unsigned int priority) -{ - struct ipic *ipic = ipic_from_irq(virq); - unsigned int src = virq_to_hw(virq); - u32 temp; - - if (priority > 7) - return -EINVAL; - if (src > 127) - return -EINVAL; - if (ipic_info[src].prio == 0) - return -EINVAL; - - temp = ipic_read(ipic->regs, ipic_info[src].prio); - - if (priority < 4) { - temp &= ~(0x7 << (20 + (3 - priority) * 3)); - temp |= ipic_info[src].prio_mask << (20 + (3 - priority) * 3); - } else { - temp &= ~(0x7 << (4 + (7 - priority) * 3)); - temp |= ipic_info[src].prio_mask << (4 + (7 - priority) * 3); - } - - ipic_write(ipic->regs, ipic_info[src].prio, temp); - - return 0; -} - void ipic_set_highest_priority(unsigned int virq) { struct ipic *ipic = ipic_from_irq(virq); diff --git a/arch/powerpc/sysdev/scom.c b/arch/powerpc/sysdev/scom.c index 0f6fd5d04d33..a707b24a7ddb 100644 --- a/arch/powerpc/sysdev/scom.c +++ b/arch/powerpc/sysdev/scom.c @@ -60,7 +60,7 @@ scom_map_t scom_map_device(struct device_node *dev, int index) parent = scom_find_parent(dev); if (parent == NULL) - return 0; + return NULL; /* * We support "scom-reg" properties for adding scom registers @@ -83,7 +83,7 @@ scom_map_t scom_map_device(struct device_node *dev, int index) size >>= 2; if (index >= (size / (2*cells))) - return 0; + return NULL; reg = of_read_number(&prop[index * cells * 2], cells); cnt = of_read_number(&prop[index * cells * 2 + cells], cells); diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c index 9824074ec1b5..94a69a62f5db 100644 --- a/arch/powerpc/sysdev/xive/common.c +++ b/arch/powerpc/sysdev/xive/common.c @@ -309,7 +309,7 @@ static void xive_do_queue_eoi(struct xive_cpu *xc) * EOI an interrupt at the source. There are several methods * to do this depending on the HW version and source type */ -void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd) +static void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd) { /* If the XIVE supports the new "store EOI facility, use it */ if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) diff --git a/arch/powerpc/tools/checkpatch.sh b/arch/powerpc/tools/checkpatch.sh index 1fad3fb90e7c..3ce5c093b19d 100755 --- a/arch/powerpc/tools/checkpatch.sh +++ b/arch/powerpc/tools/checkpatch.sh @@ -19,4 +19,5 @@ exec $script_base/../../../scripts/checkpatch.pl \ --ignore GLOBAL_INITIALISERS \ --ignore LINE_SPACING \ --ignore MULTIPLE_ASSIGNMENTS \ + --ignore DT_SPLIT_BINDING_PATCH \ $@ diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 36b8dc47a3c3..757b8499aba2 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -75,6 +75,9 @@ static int xmon_gate; #define xmon_owner 0 #endif /* CONFIG_SMP */ +#ifdef CONFIG_PPC_PSERIES +static int set_indicator_token = RTAS_UNKNOWN_SERVICE; +#endif static unsigned long in_xmon __read_mostly = 0; static int xmon_on = IS_ENABLED(CONFIG_XMON_DEFAULT); @@ -273,7 +276,7 @@ Commands:\n\ X exit monitor and don't recover\n" #if defined(CONFIG_PPC64) && !defined(CONFIG_PPC_BOOK3E) " u dump segment table or SLB\n" -#elif defined(CONFIG_PPC_STD_MMU_32) +#elif defined(CONFIG_PPC_BOOK3S_32) " u dump segment registers\n" #elif defined(CONFIG_44x) || defined(CONFIG_PPC_BOOK3E) " u dump TLB\n" @@ -358,7 +361,6 @@ static inline void disable_surveillance(void) #ifdef CONFIG_PPC_PSERIES /* Since this can't be a module, args should end up below 4GB. */ static struct rtas_args args; - int token; /* * At this point we have got all the cpus we can into @@ -367,11 +369,11 @@ static inline void disable_surveillance(void) * If we did try to take rtas.lock there would be a * real possibility of deadlock. */ - token = rtas_token("set-indicator"); - if (token == RTAS_UNKNOWN_SERVICE) + if (set_indicator_token == RTAS_UNKNOWN_SERVICE) return; - rtas_call_unlocked(&args, token, 3, 1, NULL, SURVEILLANCE_TOKEN, 0, 0); + rtas_call_unlocked(&args, set_indicator_token, 3, 1, NULL, + SURVEILLANCE_TOKEN, 0, 0); #endif /* CONFIG_PPC_PSERIES */ } @@ -1058,7 +1060,7 @@ cmds(struct pt_regs *excp) case 'P': show_tasks(); break; -#ifdef CONFIG_PPC_STD_MMU +#ifdef CONFIG_PPC_BOOK3S case 'u': dump_segments(); break; @@ -2793,7 +2795,7 @@ print_address(unsigned long addr) xmon_print_symbol(addr, "\t# ", ""); } -void +static void dump_log_buf(void) { struct kmsg_dumper dumper = { .active = 1 }; @@ -2994,13 +2996,13 @@ static void show_task(struct task_struct *tsk) printf("%px %016lx %6d %6d %c %2d %s\n", tsk, tsk->thread.ksp, - tsk->pid, tsk->parent->pid, + tsk->pid, rcu_dereference(tsk->parent)->pid, state, task_thread_info(tsk)->cpu, tsk->comm); } #ifdef CONFIG_PPC_BOOK3S_64 -void format_pte(void *ptep, unsigned long pte) +static void format_pte(void *ptep, unsigned long pte) { pte_t entry = __pte(pte); @@ -3495,14 +3497,14 @@ void dump_segments(void) } #endif -#ifdef CONFIG_PPC_STD_MMU_32 +#ifdef CONFIG_PPC_BOOK3S_32 void dump_segments(void) { int i; printf("sr0-15 ="); for (i = 0; i < 16; ++i) - printf(" %x", mfsrin(i)); + printf(" %x", mfsrin(i << 28)); printf("\n"); } #endif @@ -3688,6 +3690,14 @@ static void xmon_init(int enable) __debugger_iabr_match = xmon_iabr_match; __debugger_break_match = xmon_break_match; __debugger_fault_handler = xmon_fault_handler; + +#ifdef CONFIG_PPC_PSERIES + /* + * Get the token here to avoid trying to get a lock + * during the crash, causing a deadlock. + */ + set_indicator_token = rtas_token("set-indicator"); +#endif } else { __debugger = NULL; __debugger_ipi = NULL; @@ -4033,6 +4043,7 @@ static int do_spu_cmd(void) subcmd = inchar(); if (isxdigit(subcmd) || subcmd == '\n') termch = subcmd; + /* fall through */ case 'f': scanhex(&num); if (num >= XMON_NUM_SPUS || !spu_info[num].spu) { diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 55da93f4e818..106539bb914e 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -19,11 +19,11 @@ config RISCV select ARCH_WANT_FRAME_POINTERS select CLONE_BACKWARDS select COMMON_CLK - select DMA_DIRECT_OPS select GENERIC_CLOCKEVENTS select GENERIC_CPU_DEVICES select GENERIC_IRQ_SHOW select GENERIC_PCI_IOMAP + select GENERIC_SCHED_CLOCK select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNLEN_USER select GENERIC_SMP_IDLE_THREAD @@ -227,39 +227,48 @@ endmenu menu "Boot options" -config CMDLINE_BOOL - bool "Built-in kernel command line" +config CMDLINE + string "Built-in kernel command line" help - For most platforms, it is firmware or second stage bootloader - that by default specifies the kernel command line options. - However, it might be necessary or advantageous to either override - the default kernel command line or add a few extra options to it. - For such cases, this option allows hardcoding command line options - directly into the kernel. + For most platforms, the arguments for the kernel's command line + are provided at run-time, during boot. However, there are cases + where either no arguments are being provided or the provided + arguments are insufficient or even invalid. - For that, choose 'Y' here and fill in the extra boot parameters - in CONFIG_CMDLINE. + When that occurs, it is possible to define a built-in command + line here and choose how the kernel should use it later on. - The built-in options will be concatenated to the default command - line if CMDLINE_FORCE is set to 'N'. Otherwise, the default - command line will be ignored and replaced by the built-in string. +choice + prompt "Built-in command line usage" if CMDLINE != "" + default CMDLINE_FALLBACK + help + Choose how the kernel will handle the provided built-in command + line. -config CMDLINE - string "Built-in kernel command string" - depends on CMDLINE_BOOL - default "" +config CMDLINE_FALLBACK + bool "Use bootloader kernel arguments if available" help - Supply command-line options at build time by entering them here. + Use the built-in command line as fallback in case we get nothing + during boot. This is the default behaviour. + +config CMDLINE_EXTEND + bool "Extend bootloader kernel arguments" + help + The command-line arguments provided during boot will be + appended to the built-in command line. This is useful in + cases where the provided arguments are insufficient and + you don't want to or cannot modify them. + config CMDLINE_FORCE - bool "Built-in command line overrides bootloader arguments" - depends on CMDLINE_BOOL + bool "Always use the default kernel command string" help - Set this option to 'Y' to have the kernel ignore the bootloader - or firmware command line. Instead, the built-in command line - will be used exclusively. + Always use the built-in command line, even if we get one during + boot. This is useful in case you need to override the provided + command line on systems where you don't have or want control + over it. - If you don't know what to do here, say N. +endchoice endmenu diff --git a/arch/riscv/Kconfig.debug b/arch/riscv/Kconfig.debug index c5a72f17c469..e69de29bb2d1 100644 --- a/arch/riscv/Kconfig.debug +++ b/arch/riscv/Kconfig.debug @@ -1,2 +0,0 @@ -config EARLY_PRINTK - def_bool y diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig index ef4f15df9adf..f399659d3b8d 100644 --- a/arch/riscv/configs/defconfig +++ b/arch/riscv/configs/defconfig @@ -46,6 +46,7 @@ CONFIG_INPUT_MOUSEDEV=y CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_SERIAL_EARLYCON_RISCV_SBI=y CONFIG_HVC_RISCV_SBI=y # CONFIG_PTP_1588_CLOCK is not set CONFIG_DRM=y diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h index c452359c9cb8..93826771b616 100644 --- a/arch/riscv/include/asm/atomic.h +++ b/arch/riscv/include/asm/atomic.h @@ -303,6 +303,15 @@ c_t atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n) \ ATOMIC_OPS() +#define atomic_xchg_relaxed atomic_xchg_relaxed +#define atomic_xchg_acquire atomic_xchg_acquire +#define atomic_xchg_release atomic_xchg_release +#define atomic_xchg atomic_xchg +#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed +#define atomic_cmpxchg_acquire atomic_cmpxchg_acquire +#define atomic_cmpxchg_release atomic_cmpxchg_release +#define atomic_cmpxchg atomic_cmpxchg + #undef ATOMIC_OPS #undef ATOMIC_OP diff --git a/arch/riscv/include/asm/dma-mapping.h b/arch/riscv/include/asm/dma-mapping.h deleted file mode 100644 index 8facc1c8fa05..000000000000 --- a/arch/riscv/include/asm/dma-mapping.h +++ /dev/null @@ -1,15 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#ifndef _RISCV_ASM_DMA_MAPPING_H -#define _RISCV_ASM_DMA_MAPPING_H 1 - -#ifdef CONFIG_SWIOTLB -#include <linux/swiotlb.h> -static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) -{ - return &swiotlb_dma_ops; -} -#else -#include <asm-generic/dma-mapping.h> -#endif /* CONFIG_SWIOTLB */ - -#endif /* _RISCV_ASM_DMA_MAPPING_H */ diff --git a/arch/riscv/kernel/cacheinfo.c b/arch/riscv/kernel/cacheinfo.c index cb35ffd8ec6b..638dee3f7e88 100644 --- a/arch/riscv/kernel/cacheinfo.c +++ b/arch/riscv/kernel/cacheinfo.c @@ -28,6 +28,7 @@ static int __init_cache_level(unsigned int cpu) { struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); struct device_node *np = of_cpu_device_node_get(cpu); + struct device_node *prev = NULL; int levels = 0, leaves = 0, level; if (of_property_read_bool(np, "cache-size")) @@ -39,7 +40,10 @@ static int __init_cache_level(unsigned int cpu) if (leaves > 0) levels = 1; + prev = np; while ((np = of_find_next_cache_node(np))) { + of_node_put(prev); + prev = np; if (!of_device_is_compatible(np, "cache")) break; if (of_property_read_u32(np, "cache-level", &level)) @@ -55,8 +59,10 @@ static int __init_cache_level(unsigned int cpu) levels = level; } + of_node_put(np); this_cpu_ci->num_levels = levels; this_cpu_ci->num_leaves = leaves; + return 0; } @@ -65,6 +71,7 @@ static int __populate_cache_leaves(unsigned int cpu) struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); struct cacheinfo *this_leaf = this_cpu_ci->info_list; struct device_node *np = of_cpu_device_node_get(cpu); + struct device_node *prev = NULL; int levels = 1, level = 1; if (of_property_read_bool(np, "cache-size")) @@ -74,7 +81,10 @@ static int __populate_cache_leaves(unsigned int cpu) if (of_property_read_bool(np, "d-cache-size")) ci_leaf_init(this_leaf++, np, CACHE_TYPE_DATA, level); + prev = np; while ((np = of_find_next_cache_node(np))) { + of_node_put(prev); + prev = np; if (!of_device_is_compatible(np, "cache")) break; if (of_property_read_u32(np, "cache-level", &level)) @@ -89,6 +99,7 @@ static int __populate_cache_leaves(unsigned int cpu) ci_leaf_init(this_leaf++, np, CACHE_TYPE_DATA, level); levels = level; } + of_node_put(np); return 0; } diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c index b4a7d4427fbb..f8fa2c63aa89 100644 --- a/arch/riscv/kernel/cpu.c +++ b/arch/riscv/kernel/cpu.c @@ -158,6 +158,7 @@ static int c_show(struct seq_file *m, void *v) && strcmp(compat, "riscv")) seq_printf(m, "uarch\t\t: %s\n", compat); seq_puts(m, "\n"); + of_node_put(node); return 0; } diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c index 0339087aa652..a6e369edbbd7 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c @@ -56,8 +56,10 @@ void riscv_fill_hwcap(void) if (of_property_read_string(node, "riscv,isa", &isa)) { pr_warning("Unable to find \"riscv,isa\" devicetree entry"); + of_node_put(node); return; } + of_node_put(node); for (i = 0; i < strlen(isa); ++i) elf_hwcap |= isa2hwcap[(unsigned char)(isa[i])]; diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c index c433f6d3dd64..a840b7d074f7 100644 --- a/arch/riscv/kernel/ftrace.c +++ b/arch/riscv/kernel/ftrace.c @@ -132,7 +132,6 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, { unsigned long return_hooker = (unsigned long)&return_to_handler; unsigned long old; - int err; if (unlikely(atomic_read(¤t->tracing_graph_pause))) return; diff --git a/arch/riscv/kernel/perf_event.c b/arch/riscv/kernel/perf_event.c index a243fae1c1db..667ee70defea 100644 --- a/arch/riscv/kernel/perf_event.c +++ b/arch/riscv/kernel/perf_event.c @@ -476,6 +476,7 @@ int __init init_hw_perf_events(void) if (of_id) riscv_pmu = of_id->data; + of_node_put(node); } perf_pmu_register(riscv_pmu->pmu, "cpu", PERF_TYPE_RAW); diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index 2c290e6aaa6e..fc8006a042eb 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -35,31 +35,9 @@ #include <asm/sections.h> #include <asm/pgtable.h> #include <asm/smp.h> -#include <asm/sbi.h> #include <asm/tlbflush.h> #include <asm/thread_info.h> -#ifdef CONFIG_EARLY_PRINTK -static void sbi_console_write(struct console *co, const char *buf, - unsigned int n) -{ - int i; - - for (i = 0; i < n; ++i) { - if (buf[i] == '\n') - sbi_console_putchar('\r'); - sbi_console_putchar(buf[i]); - } -} - -struct console riscv_sbi_early_console_dev __initdata = { - .name = "early", - .write = sbi_console_write, - .flags = CON_PRINTBUFFER | CON_BOOT | CON_ANYTIME, - .index = -1 -}; -#endif - #ifdef CONFIG_DUMMY_CONSOLE struct screen_info screen_info = { .orig_video_lines = 30, @@ -219,12 +197,6 @@ static void __init setup_bootmem(void) void __init setup_arch(char **cmdline_p) { -#if defined(CONFIG_EARLY_PRINTK) - if (likely(early_console == NULL)) { - early_console = &riscv_sbi_early_console_dev; - register_console(early_console); - } -#endif *cmdline_p = boot_command_line; parse_early_param(); diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c index 18cda0e8cf94..fc185ecabb0a 100644 --- a/arch/riscv/kernel/smpboot.c +++ b/arch/riscv/kernel/smpboot.c @@ -57,12 +57,15 @@ void __init setup_smp(void) while ((dn = of_find_node_by_type(dn, "cpu"))) { hart = riscv_of_processor_hartid(dn); - if (hart < 0) + if (hart < 0) { + of_node_put(dn); continue; + } if (hart == cpuid_to_hartid_map(0)) { BUG_ON(found_boot_cpu); found_boot_cpu = 1; + of_node_put(dn); continue; } @@ -70,6 +73,7 @@ void __init setup_smp(void) set_cpu_possible(cpuid, true); set_cpu_present(cpuid, true); cpuid++; + of_node_put(dn); } BUG_ON(!found_boot_cpu); diff --git a/arch/riscv/kernel/time.c b/arch/riscv/kernel/time.c index 1911c8f6b8a6..40470e669a35 100644 --- a/arch/riscv/kernel/time.c +++ b/arch/riscv/kernel/time.c @@ -26,6 +26,7 @@ void __init time_init(void) cpu = of_find_node_by_path("/cpus"); if (!cpu || of_property_read_u32(cpu, "timebase-frequency", &prop)) panic(KERN_WARNING "RISC-V system with no 'timebase-frequency' in DTS\n"); + of_node_put(cpu); riscv_timebase = prop; lpj_fine = riscv_timebase / HZ; diff --git a/arch/riscv/lib/tishift.S b/arch/riscv/lib/tishift.S index 69abb1277234..237bc9fd0763 100644 --- a/arch/riscv/lib/tishift.S +++ b/arch/riscv/lib/tishift.S @@ -10,33 +10,36 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ - .globl __lshrti3 -__lshrti3: - beqz a2, .L1 - li a5,64 - sub a5,a5,a2 - addi sp,sp,-16 - sext.w a4,a5 - blez a5, .L2 - sext.w a2,a2 - sll a4,a1,a4 - srl a0,a0,a2 - srl a1,a1,a2 - or a0,a0,a4 - sd a1,8(sp) - sd a0,0(sp) - ld a0,0(sp) - ld a1,8(sp) - addi sp,sp,16 - ret + +#include <linux/linkage.h> + +ENTRY(__lshrti3) + beqz a2, .L1 + li a5,64 + sub a5,a5,a2 + addi sp,sp,-16 + sext.w a4,a5 + blez a5, .L2 + sext.w a2,a2 + sll a4,a1,a4 + srl a0,a0,a2 + srl a1,a1,a2 + or a0,a0,a4 + sd a1,8(sp) + sd a0,0(sp) + ld a0,0(sp) + ld a1,8(sp) + addi sp,sp,16 + ret .L1: - ret + ret .L2: - negw a4,a4 - srl a1,a1,a4 - sd a1,0(sp) - sd zero,8(sp) - ld a0,0(sp) - ld a1,8(sp) - addi sp,sp,16 - ret + negw a4,a4 + srl a1,a1,a4 + sd a1,0(sp) + sd zero,8(sp) + ld a0,0(sp) + ld a1,8(sp) + addi sp,sp,16 + ret +ENDPROC(__lshrti3) diff --git a/arch/riscv/lib/udivdi3.S b/arch/riscv/lib/udivdi3.S index cb01ae5b181a..7f1c0af182a3 100644 --- a/arch/riscv/lib/udivdi3.S +++ b/arch/riscv/lib/udivdi3.S @@ -11,28 +11,30 @@ * GNU General Public License for more details. */ - .globl __udivdi3 -__udivdi3: - mv a2, a1 - mv a1, a0 - li a0, -1 - beqz a2, .L5 - li a3, 1 - bgeu a2, a1, .L2 +#include <linux/linkage.h> + +ENTRY(__udivdi3) + mv a2, a1 + mv a1, a0 + li a0, -1 + beqz a2, .L5 + li a3, 1 + bgeu a2, a1, .L2 .L1: - blez a2, .L2 - slli a2, a2, 1 - slli a3, a3, 1 - bgtu a1, a2, .L1 + blez a2, .L2 + slli a2, a2, 1 + slli a3, a3, 1 + bgtu a1, a2, .L1 .L2: - li a0, 0 + li a0, 0 .L3: - bltu a1, a2, .L4 - sub a1, a1, a2 - or a0, a0, a3 + bltu a1, a2, .L4 + sub a1, a1, a2 + or a0, a0, a3 .L4: - srli a3, a3, 1 - srli a2, a2, 1 - bnez a3, .L3 + srli a3, a3, 1 + srli a2, a2, 1 + bnez a3, .L3 .L5: - ret + ret +ENDPROC(__udivdi3) diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 5173366af8f3..21d271d04ca6 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -73,7 +73,6 @@ config S390 select ARCH_HAS_KCOV select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_SET_MEMORY - select ARCH_HAS_SG_CHAIN select ARCH_HAS_STRICT_KERNEL_RWX select ARCH_HAS_STRICT_MODULE_RWX select ARCH_HAS_UBSAN_SANITIZE_ALL @@ -140,7 +139,6 @@ config S390 select HAVE_COPY_THREAD_TLS select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_CONTIGUOUS - select DMA_DIRECT_OPS select HAVE_DYNAMIC_FTRACE select HAVE_DYNAMIC_FTRACE_WITH_REGS select HAVE_EFFICIENT_UNALIGNED_ACCESS diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index 812d9498d97b..dd456725189f 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c @@ -137,7 +137,7 @@ static int fallback_init_cip(struct crypto_tfm *tfm) struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); sctx->fallback.cip = crypto_alloc_cipher(name, 0, - CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); + CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(sctx->fallback.cip)) { pr_err("Allocating AES fallback algorithm %s failed\n", diff --git a/arch/s390/include/asm/preempt.h b/arch/s390/include/asm/preempt.h index 23a14d187fb1..b5ea9e14c017 100644 --- a/arch/s390/include/asm/preempt.h +++ b/arch/s390/include/asm/preempt.h @@ -8,6 +8,8 @@ #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES +/* We use the MSB mostly because its available */ +#define PREEMPT_NEED_RESCHED 0x80000000 #define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED) static inline int preempt_count(void) diff --git a/arch/s390/kernel/machine_kexec_file.c b/arch/s390/kernel/machine_kexec_file.c index f413f57f8d20..32023b4f9dc0 100644 --- a/arch/s390/kernel/machine_kexec_file.c +++ b/arch/s390/kernel/machine_kexec_file.c @@ -134,16 +134,6 @@ int kexec_file_add_initrd(struct kimage *image, struct s390_load_data *data, return ret; } -/* - * The kernel is loaded to a fixed location. Turn off kexec_locate_mem_hole - * and provide kbuf->mem by hand. - */ -int arch_kexec_walk_mem(struct kexec_buf *kbuf, - int (*func)(struct resource *, void *)) -{ - return 1; -} - int arch_kexec_apply_relocations_add(struct purgatory_info *pi, Elf_Shdr *section, const Elf_Shdr *relsec, diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index fe24150ff666..7f4bc58a53b9 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -11,6 +11,9 @@ * Jason J. Herne <jjherne@us.ibm.com> */ +#define KMSG_COMPONENT "kvm-s390" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/compiler.h> #include <linux/err.h> #include <linux/fs.h> @@ -44,10 +47,6 @@ #include "kvm-s390.h" #include "gaccess.h" -#define KMSG_COMPONENT "kvm-s390" -#undef pr_fmt -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt - #define CREATE_TRACE_POINTS #include "trace.h" #include "trace-s390.h" @@ -417,19 +416,30 @@ static void kvm_s390_cpu_feat_init(void) int kvm_arch_init(void *opaque) { + int rc; + kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long)); if (!kvm_s390_dbf) return -ENOMEM; if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) { - debug_unregister(kvm_s390_dbf); - return -ENOMEM; + rc = -ENOMEM; + goto out_debug_unreg; } kvm_s390_cpu_feat_init(); /* Register floating interrupt controller interface. */ - return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC); + rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC); + if (rc) { + pr_err("Failed to register FLIC rc=%d\n", rc); + goto out_debug_unreg; + } + return 0; + +out_debug_unreg: + debug_unregister(kvm_s390_dbf); + return rc; } void kvm_arch_exit(void) @@ -464,7 +474,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_S390_CSS_SUPPORT: case KVM_CAP_IOEVENTFD: case KVM_CAP_DEVICE_CTRL: - case KVM_CAP_ENABLE_CAP_VM: case KVM_CAP_S390_IRQCHIP: case KVM_CAP_VM_ATTRIBUTES: case KVM_CAP_MP_STATE: @@ -607,7 +616,7 @@ static void icpt_operexc_on_all_vcpus(struct kvm *kvm) } } -static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) +int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) { int r; @@ -1933,14 +1942,6 @@ long kvm_arch_vm_ioctl(struct file *filp, r = kvm_s390_inject_vm(kvm, &s390int); break; } - case KVM_ENABLE_CAP: { - struct kvm_enable_cap cap; - r = -EFAULT; - if (copy_from_user(&cap, argp, sizeof(cap))) - break; - r = kvm_vm_ioctl_enable_cap(kvm, &cap); - break; - } case KVM_CREATE_IRQCHIP: { struct kvm_irq_routing_entry routing; diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c index 6791562779ee..db6bb2f97a2c 100644 --- a/arch/s390/mm/pgalloc.c +++ b/arch/s390/mm/pgalloc.c @@ -352,7 +352,7 @@ void tlb_table_flush(struct mmu_gather *tlb) struct mmu_table_batch **batch = &tlb->batch; if (*batch) { - call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu); + call_rcu(&(*batch)->rcu, tlb_remove_table_rcu); *batch = NULL; } } diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index d7052cbe984f..3ff758eeb71d 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -821,10 +821,22 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i /* * BPF_ARSH */ + case BPF_ALU | BPF_ARSH | BPF_X: /* ((s32) dst) >>= src */ + /* sra %dst,%dst,0(%src) */ + EMIT4_DISP(0x8a000000, dst_reg, src_reg, 0); + EMIT_ZERO(dst_reg); + break; case BPF_ALU64 | BPF_ARSH | BPF_X: /* ((s64) dst) >>= src */ /* srag %dst,%dst,0(%src) */ EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, src_reg, 0); break; + case BPF_ALU | BPF_ARSH | BPF_K: /* ((s32) dst >> imm */ + if (imm == 0) + break; + /* sra %dst,imm(%r0) */ + EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm); + EMIT_ZERO(dst_reg); + break; case BPF_ALU64 | BPF_ARSH | BPF_K: /* ((s64) dst) >>= imm */ if (imm == 0) break; diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c index d387a0fbdd7e..9e52d1527f71 100644 --- a/arch/s390/pci/pci_dma.c +++ b/arch/s390/pci/pci_dma.c @@ -15,8 +15,6 @@ #include <linux/pci.h> #include <asm/pci_dma.h> -#define S390_MAPPING_ERROR (~(dma_addr_t) 0x0) - static struct kmem_cache *dma_region_table_cache; static struct kmem_cache *dma_page_table_cache; static int s390_iommu_strict; @@ -301,7 +299,7 @@ static dma_addr_t dma_alloc_address(struct device *dev, int size) out_error: spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags); - return S390_MAPPING_ERROR; + return DMA_MAPPING_ERROR; } static void dma_free_address(struct device *dev, dma_addr_t dma_addr, int size) @@ -349,7 +347,7 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page, /* This rounds up number of pages based on size and offset */ nr_pages = iommu_num_pages(pa, size, PAGE_SIZE); dma_addr = dma_alloc_address(dev, nr_pages); - if (dma_addr == S390_MAPPING_ERROR) { + if (dma_addr == DMA_MAPPING_ERROR) { ret = -ENOSPC; goto out_err; } @@ -372,7 +370,7 @@ out_free: out_err: zpci_err("map error:\n"); zpci_err_dma(ret, pa); - return S390_MAPPING_ERROR; + return DMA_MAPPING_ERROR; } static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr, @@ -406,7 +404,7 @@ static void *s390_dma_alloc(struct device *dev, size_t size, dma_addr_t map; size = PAGE_ALIGN(size); - page = alloc_pages(flag, get_order(size)); + page = alloc_pages(flag | __GFP_ZERO, get_order(size)); if (!page) return NULL; @@ -449,7 +447,7 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg, int ret; dma_addr_base = dma_alloc_address(dev, nr_pages); - if (dma_addr_base == S390_MAPPING_ERROR) + if (dma_addr_base == DMA_MAPPING_ERROR) return -ENOMEM; dma_addr = dma_addr_base; @@ -496,7 +494,7 @@ static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg, for (i = 1; i < nr_elements; i++) { s = sg_next(s); - s->dma_address = S390_MAPPING_ERROR; + s->dma_address = DMA_MAPPING_ERROR; s->dma_length = 0; if (s->offset || (size & ~PAGE_MASK) || @@ -546,11 +544,6 @@ static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg, } } -static int s390_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return dma_addr == S390_MAPPING_ERROR; -} - int zpci_dma_init_device(struct zpci_dev *zdev) { int rc; @@ -675,7 +668,6 @@ const struct dma_map_ops s390_pci_dma_ops = { .unmap_sg = s390_dma_unmap_sg, .map_page = s390_dma_map_pages, .unmap_page = s390_dma_unmap_pages, - .mapping_error = s390_mapping_error, /* dma_supported is unconditionally true without a callback */ }; EXPORT_SYMBOL_GPL(s390_pci_dma_ops); diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index f82a4da7adf3..10fd4e9c454b 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -7,7 +7,6 @@ config SUPERH select ARCH_NO_COHERENT_DMA_MMAP if !MMU select HAVE_PATA_PLATFORM select CLKDEV_LOOKUP - select DMA_DIRECT_OPS select HAVE_IDE if HAS_IOPORT_MAP select HAVE_MEMBLOCK_NODE_MAP select ARCH_DISCARD_MEMBLOCK diff --git a/arch/sh/Makefile b/arch/sh/Makefile index c521ade2557c..4009bef62fe9 100644 --- a/arch/sh/Makefile +++ b/arch/sh/Makefile @@ -228,6 +228,9 @@ archclean: $(Q)$(MAKE) $(clean)=$(boot) $(Q)$(MAKE) $(clean)=arch/sh/kernel/vsyscall +archheaders: + $(Q)$(MAKE) $(build)=arch/sh/kernel/syscalls all + define archhelp @echo ' zImage - Compressed kernel image' @echo ' romImage - Compressed ROM image, if supported' diff --git a/arch/sh/boards/mach-dreamcast/Makefile b/arch/sh/boards/mach-dreamcast/Makefile index 7b97546c7e5f..62b024bc2a3e 100644 --- a/arch/sh/boards/mach-dreamcast/Makefile +++ b/arch/sh/boards/mach-dreamcast/Makefile @@ -2,5 +2,5 @@ # Makefile for the Sega Dreamcast specific parts of the kernel # -obj-y := setup.o irq.o rtc.o - +obj-y := setup.o irq.o +obj-$(CONFIG_RTC_DRV_GENERIC) += rtc.o diff --git a/arch/sh/boards/mach-dreamcast/rtc.c b/arch/sh/boards/mach-dreamcast/rtc.c index 061d65714fcc..0eb12c45fa59 100644 --- a/arch/sh/boards/mach-dreamcast/rtc.c +++ b/arch/sh/boards/mach-dreamcast/rtc.c @@ -11,8 +11,9 @@ */ #include <linux/time.h> -#include <asm/rtc.h> -#include <asm/io.h> +#include <linux/rtc.h> +#include <linux/io.h> +#include <linux/platform_device.h> /* The AICA RTC has an Epoch of 1/1/1950, so we must subtract 20 years (in seconds) to get the standard Unix Epoch when getting the time, and add @@ -26,13 +27,15 @@ /** * aica_rtc_gettimeofday - Get the time from the AICA RTC - * @ts: pointer to resulting timespec + * @dev: the RTC device (ignored) + * @tm: pointer to resulting RTC time structure * * Grabs the current RTC seconds counter and adjusts it to the Unix Epoch. */ -static void aica_rtc_gettimeofday(struct timespec *ts) +static int aica_rtc_gettimeofday(struct device *dev, struct rtc_time *tm) { unsigned long val1, val2; + time64_t t; do { val1 = ((__raw_readl(AICA_RTC_SECS_H) & 0xffff) << 16) | @@ -42,22 +45,26 @@ static void aica_rtc_gettimeofday(struct timespec *ts) (__raw_readl(AICA_RTC_SECS_L) & 0xffff); } while (val1 != val2); - ts->tv_sec = val1 - TWENTY_YEARS; + /* normalize to 1970..2106 time range */ + t = (u32)(val1 - TWENTY_YEARS); - /* Can't get nanoseconds with just a seconds counter. */ - ts->tv_nsec = 0; + rtc_time64_to_tm(t, tm); + + return 0; } /** * aica_rtc_settimeofday - Set the AICA RTC to the current time - * @secs: contains the time_t to set + * @dev: the RTC device (ignored) + * @tm: pointer to new RTC time structure * * Adjusts the given @tv to the AICA Epoch and sets the RTC seconds counter. */ -static int aica_rtc_settimeofday(const time_t secs) +static int aica_rtc_settimeofday(struct device *dev, struct rtc_time *tm) { unsigned long val1, val2; - unsigned long adj = secs + TWENTY_YEARS; + time64_t secs = rtc_tm_to_time64(tm); + u32 adj = secs + TWENTY_YEARS; do { __raw_writel((adj & 0xffff0000) >> 16, AICA_RTC_SECS_H); @@ -73,9 +80,19 @@ static int aica_rtc_settimeofday(const time_t secs) return 0; } -void aica_time_init(void) +static const struct rtc_class_ops rtc_generic_ops = { + .read_time = aica_rtc_gettimeofday, + .set_time = aica_rtc_settimeofday, +}; + +static int __init aica_time_init(void) { - rtc_sh_get_time = aica_rtc_gettimeofday; - rtc_sh_set_time = aica_rtc_settimeofday; -} + struct platform_device *pdev; + + pdev = platform_device_register_data(NULL, "rtc-generic", -1, + &rtc_generic_ops, + sizeof(rtc_generic_ops)); + return PTR_ERR_OR_ZERO(pdev); +} +arch_initcall(aica_time_init); diff --git a/arch/sh/boards/mach-dreamcast/setup.c b/arch/sh/boards/mach-dreamcast/setup.c index ad1a4db72e04..672c2ad8f8d5 100644 --- a/arch/sh/boards/mach-dreamcast/setup.c +++ b/arch/sh/boards/mach-dreamcast/setup.c @@ -30,7 +30,6 @@ static void __init dreamcast_setup(char **cmdline_p) { - board_time_init = aica_time_init; } static struct sh_machine_vector mv_dreamcast __initmv = { diff --git a/arch/sh/boards/mach-sh03/Makefile b/arch/sh/boards/mach-sh03/Makefile index 400306a796ec..47007a3a2fc8 100644 --- a/arch/sh/boards/mach-sh03/Makefile +++ b/arch/sh/boards/mach-sh03/Makefile @@ -2,4 +2,5 @@ # Makefile for the Interface (CTP/PCI-SH03) specific parts of the kernel # -obj-y := setup.o rtc.o +obj-y := setup.o +obj-$(CONFIG_RTC_DRV_GENERIC) += rtc.o diff --git a/arch/sh/boards/mach-sh03/rtc.c b/arch/sh/boards/mach-sh03/rtc.c index dc3d50e3b7a2..8b23ed7c201c 100644 --- a/arch/sh/boards/mach-sh03/rtc.c +++ b/arch/sh/boards/mach-sh03/rtc.c @@ -13,8 +13,9 @@ #include <linux/bcd.h> #include <linux/rtc.h> #include <linux/spinlock.h> -#include <asm/io.h> -#include <asm/rtc.h> +#include <linux/io.h> +#include <linux/rtc.h> +#include <linux/platform_device.h> #define RTC_BASE 0xb0000000 #define RTC_SEC1 (RTC_BASE + 0) @@ -38,7 +39,7 @@ static DEFINE_SPINLOCK(sh03_rtc_lock); -unsigned long get_cmos_time(void) +static int sh03_rtc_gettimeofday(struct device *dev, struct rtc_time *tm) { unsigned int year, mon, day, hour, min, sec; @@ -75,17 +76,18 @@ unsigned long get_cmos_time(void) } spin_unlock(&sh03_rtc_lock); - return mktime(year, mon, day, hour, min, sec); -} -void sh03_rtc_gettimeofday(struct timespec *tv) -{ + tm->tm_sec = sec; + tm->tm_min = min; + tm->tm_hour = hour; + tm->tm_mday = day; + tm->tm_mon = mon; + tm->tm_year = year - 1900; - tv->tv_sec = get_cmos_time(); - tv->tv_nsec = 0; + return 0; } -static int set_rtc_mmss(unsigned long nowtime) +static int set_rtc_mmss(struct rtc_time *tm) { int retval = 0; int real_seconds, real_minutes, cmos_minutes; @@ -97,8 +99,8 @@ static int set_rtc_mmss(unsigned long nowtime) if (!(__raw_readb(RTC_CTL) & RTC_BUSY)) break; cmos_minutes = (__raw_readb(RTC_MIN1) & 0xf) + (__raw_readb(RTC_MIN10) & 0xf) * 10; - real_seconds = nowtime % 60; - real_minutes = nowtime / 60; + real_seconds = tm->tm_sec; + real_minutes = tm->tm_min; if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1) real_minutes += 30; /* correct for half hour time zone */ real_minutes %= 60; @@ -112,22 +114,31 @@ static int set_rtc_mmss(unsigned long nowtime) printk_once(KERN_NOTICE "set_rtc_mmss: can't update from %d to %d\n", cmos_minutes, real_minutes); - retval = -1; + retval = -EINVAL; } spin_unlock(&sh03_rtc_lock); return retval; } -int sh03_rtc_settimeofday(const time_t secs) +int sh03_rtc_settimeofday(struct device *dev, struct rtc_time *tm) { - unsigned long nowtime = secs; - - return set_rtc_mmss(nowtime); + return set_rtc_mmss(tm); } -void sh03_time_init(void) +static const struct rtc_class_ops rtc_generic_ops = { + .read_time = sh03_rtc_gettimeofday, + .set_time = sh03_rtc_settimeofday, +}; + +static int __init sh03_time_init(void) { - rtc_sh_get_time = sh03_rtc_gettimeofday; - rtc_sh_set_time = sh03_rtc_settimeofday; + struct platform_device *pdev; + + pdev = platform_device_register_data(NULL, "rtc-generic", -1, + &rtc_generic_ops, + sizeof(rtc_generic_ops)); + + return PTR_ERR_OR_ZERO(pdev); } +arch_initcall(sh03_time_init); diff --git a/arch/sh/boards/mach-sh03/setup.c b/arch/sh/boards/mach-sh03/setup.c index 85e7059a77e9..3901b6031ad5 100644 --- a/arch/sh/boards/mach-sh03/setup.c +++ b/arch/sh/boards/mach-sh03/setup.c @@ -22,14 +22,6 @@ static void __init init_sh03_IRQ(void) plat_irq_setup_pins(IRQ_MODE_IRQ); } -/* arch/sh/boards/sh03/rtc.c */ -void sh03_time_init(void); - -static void __init sh03_setup(char **cmdline_p) -{ - board_time_init = sh03_time_init; -} - static struct resource cf_ide_resources[] = { [0] = { .start = 0x1f0, @@ -101,6 +93,5 @@ device_initcall(sh03_devices_setup); static struct sh_machine_vector mv_sh03 __initmv = { .mv_name = "Interface (CTP/PCI-SH03)", - .mv_setup = sh03_setup, .mv_init_irq = init_sh03_IRQ, }; diff --git a/arch/sh/boards/of-generic.c b/arch/sh/boards/of-generic.c index cde370cad4ae..6e9786548ac6 100644 --- a/arch/sh/boards/of-generic.c +++ b/arch/sh/boards/of-generic.c @@ -117,18 +117,10 @@ static void __init sh_of_mem_reserve(void) early_init_fdt_scan_reserved_mem(); } -static void __init sh_of_time_init(void) -{ - pr_info("SH generic board support: scanning for clocksource devices\n"); - timer_probe(); -} - static void __init sh_of_setup(char **cmdline_p) { struct device_node *root; - board_time_init = sh_of_time_init; - sh_mv.mv_name = "Unknown SH model"; root = of_find_node_by_path("/"); if (root) { diff --git a/arch/sh/configs/dreamcast_defconfig b/arch/sh/configs/dreamcast_defconfig index 3f08dc54480b..1d27666c029f 100644 --- a/arch/sh/configs/dreamcast_defconfig +++ b/arch/sh/configs/dreamcast_defconfig @@ -70,3 +70,5 @@ CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_HUGETLBFS=y # CONFIG_CRYPTO_ANSI_CPRNG is not set +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_GENERIC=y diff --git a/arch/sh/configs/sh03_defconfig b/arch/sh/configs/sh03_defconfig index 2156223405a1..489ffdfb1517 100644 --- a/arch/sh/configs/sh03_defconfig +++ b/arch/sh/configs/sh03_defconfig @@ -130,3 +130,5 @@ CONFIG_CRYPTO_SHA1=y CONFIG_CRYPTO_DEFLATE=y # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRC_CCITT=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_GENERIC=y diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild index 6a5609a55965..b15caf34813a 100644 --- a/arch/sh/include/asm/Kbuild +++ b/arch/sh/include/asm/Kbuild @@ -1,3 +1,4 @@ +generated-y += syscall_table.h generic-y += compat.h generic-y += current.h generic-y += delay.h diff --git a/arch/sh/include/asm/rtc.h b/arch/sh/include/asm/rtc.h index c63555ee1255..69dbae2949b0 100644 --- a/arch/sh/include/asm/rtc.h +++ b/arch/sh/include/asm/rtc.h @@ -3,9 +3,6 @@ #define _ASM_RTC_H void time_init(void); -extern void (*board_time_init)(void); -extern void (*rtc_sh_get_time)(struct timespec *); -extern int (*rtc_sh_set_time)(const time_t); #define RTC_CAP_4_DIGIT_YEAR (1 << 0) diff --git a/arch/sh/include/asm/unistd.h b/arch/sh/include/asm/unistd.h index a99234b61051..a97f93ca3bd7 100644 --- a/arch/sh/include/asm/unistd.h +++ b/arch/sh/include/asm/unistd.h @@ -5,6 +5,8 @@ # include <asm/unistd_64.h> # endif +#define NR_syscalls __NR_syscalls + # define __ARCH_WANT_NEW_STAT # define __ARCH_WANT_OLD_READDIR # define __ARCH_WANT_OLD_STAT diff --git a/arch/sh/include/mach-dreamcast/mach/sysasic.h b/arch/sh/include/mach-dreamcast/mach/sysasic.h index 58f710e1ebc2..59effd1ed3e1 100644 --- a/arch/sh/include/mach-dreamcast/mach/sysasic.h +++ b/arch/sh/include/mach-dreamcast/mach/sysasic.h @@ -42,7 +42,6 @@ /* arch/sh/boards/mach-dreamcast/irq.c */ extern int systemasic_irq_demux(int); extern void systemasic_irq_init(void); -extern void aica_time_init(void); #endif /* __ASM_SH_DREAMCAST_SYSASIC_H */ diff --git a/arch/sh/include/uapi/asm/Kbuild b/arch/sh/include/uapi/asm/Kbuild index ba4d39cb321d..a55e317c1ef2 100644 --- a/arch/sh/include/uapi/asm/Kbuild +++ b/arch/sh/include/uapi/asm/Kbuild @@ -1,6 +1,7 @@ # UAPI Header export list include include/uapi/asm-generic/Kbuild.asm +generated-y += unistd_32.h generic-y += bitsperlong.h generic-y += bpf_perf_event.h generic-y += errno.h diff --git a/arch/sh/include/uapi/asm/unistd_32.h b/arch/sh/include/uapi/asm/unistd_32.h index 58f04cf3d1d9..31c85aa251ab 100644 --- a/arch/sh/include/uapi/asm/unistd_32.h +++ b/arch/sh/include/uapi/asm/unistd_32.h @@ -396,6 +396,8 @@ #define __NR_preadv2 381 #define __NR_pwritev2 382 -#define NR_syscalls 383 +#ifdef __KERNEL__ +#define __NR_syscalls 383 +#endif #endif /* __ASM_SH_UNISTD_32_H */ diff --git a/arch/sh/include/uapi/asm/unistd_64.h b/arch/sh/include/uapi/asm/unistd_64.h index 6f809a53aa24..75da54851f02 100644 --- a/arch/sh/include/uapi/asm/unistd_64.h +++ b/arch/sh/include/uapi/asm/unistd_64.h @@ -416,6 +416,8 @@ #define __NR_preadv2 392 #define __NR_pwritev2 393 -#define NR_syscalls 394 +#ifdef __KERNEL__ +#define __NR_syscalls 394 +#endif #endif /* __ASM_SH_UNISTD_64_H */ diff --git a/arch/sh/kernel/syscalls/Makefile b/arch/sh/kernel/syscalls/Makefile new file mode 100644 index 000000000000..659faefdcb1d --- /dev/null +++ b/arch/sh/kernel/syscalls/Makefile @@ -0,0 +1,38 @@ +# SPDX-License-Identifier: GPL-2.0 +kapi := arch/$(SRCARCH)/include/generated/asm +uapi := arch/$(SRCARCH)/include/generated/uapi/asm + +_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \ + $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)') + +syscall := $(srctree)/$(src)/syscall.tbl +syshdr := $(srctree)/$(src)/syscallhdr.sh +systbl := $(srctree)/$(src)/syscalltbl.sh + +quiet_cmd_syshdr = SYSHDR $@ + cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \ + '$(syshdr_abis_$(basetarget))' \ + '$(syshdr_pfx_$(basetarget))' \ + '$(syshdr_offset_$(basetarget))' + +quiet_cmd_systbl = SYSTBL $@ + cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \ + '$(systbl_abis_$(basetarget))' \ + '$(systbl_abi_$(basetarget))' \ + '$(systbl_offset_$(basetarget))' + +$(uapi)/unistd_32.h: $(syscall) $(syshdr) + $(call if_changed,syshdr) + +$(kapi)/syscall_table.h: $(syscall) $(systbl) + $(call if_changed,systbl) + +uapisyshdr-y += unistd_32.h +kapisyshdr-y += syscall_table.h + +targets += $(uapisyshdr-y) $(kapisyshdr-y) + +PHONY += all +all: $(addprefix $(uapi)/,$(uapisyshdr-y)) +all: $(addprefix $(kapi)/,$(kapisyshdr-y)) + @: diff --git a/arch/sh/kernel/syscalls/syscall.tbl b/arch/sh/kernel/syscalls/syscall.tbl new file mode 100644 index 000000000000..21ec75288562 --- /dev/null +++ b/arch/sh/kernel/syscalls/syscall.tbl @@ -0,0 +1,392 @@ +# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note +# +# system call numbers and entry vectors for sh +# +# The format is: +# <number> <abi> <name> <entry point> +# +# The <abi> is always "common" for this file +# +0 common restart_syscall sys_restart_syscall +1 common exit sys_exit +2 common fork sys_fork +3 common read sys_read +4 common write sys_write +5 common open sys_open +6 common close sys_close +7 common waitpid sys_waitpid +8 common creat sys_creat +9 common link sys_link +10 common unlink sys_unlink +11 common execve sys_execve +12 common chdir sys_chdir +13 common time sys_time +14 common mknod sys_mknod +15 common chmod sys_chmod +16 common lchown sys_lchown16 +# 17 was break +18 common oldstat sys_stat +19 common lseek sys_lseek +20 common getpid sys_getpid +21 common mount sys_mount +22 common umount sys_oldumount +23 common setuid sys_setuid16 +24 common getuid sys_getuid16 +25 common stime sys_stime +26 common ptrace sys_ptrace +27 common alarm sys_alarm +28 common oldfstat sys_fstat +29 common pause sys_pause +30 common utime sys_utime +# 31 was stty +# 32 was gtty +33 common access sys_access +34 common nice sys_nice +# 35 was ftime +36 common sync sys_sync +37 common kill sys_kill +38 common rename sys_rename +39 common mkdir sys_mkdir +40 common rmdir sys_rmdir +41 common dup sys_dup +42 common pipe sys_sh_pipe +43 common times sys_times +# 44 was prof +45 common brk sys_brk +46 common setgid sys_setgid16 +47 common getgid sys_getgid16 +48 common signal sys_signal +49 common geteuid sys_geteuid16 +50 common getegid sys_getegid16 +51 common acct sys_acct +52 common umount2 sys_umount +# 53 was lock +54 common ioctl sys_ioctl +55 common fcntl sys_fcntl +# 56 was mpx +57 common setpgid sys_setpgid +# 58 was ulimit +# 59 was olduname +60 common umask sys_umask +61 common chroot sys_chroot +62 common ustat sys_ustat +63 common dup2 sys_dup2 +64 common getppid sys_getppid +65 common getpgrp sys_getpgrp +66 common setsid sys_setsid +67 common sigaction sys_sigaction +68 common sgetmask sys_sgetmask +69 common ssetmask sys_ssetmask +70 common setreuid sys_setreuid16 +71 common setregid sys_setregid16 +72 common sigsuspend sys_sigsuspend +73 common sigpending sys_sigpending +74 common sethostname sys_sethostname +75 common setrlimit sys_setrlimit +76 common getrlimit sys_old_getrlimit +77 common getrusage sys_getrusage +78 common gettimeofday sys_gettimeofday +79 common settimeofday sys_settimeofday +80 common getgroups sys_getgroups16 +81 common setgroups sys_setgroups16 +# 82 was select +83 common symlink sys_symlink +84 common oldlstat sys_lstat +85 common readlink sys_readlink +86 common uselib sys_uselib +87 common swapon sys_swapon +88 common reboot sys_reboot +89 common readdir sys_old_readdir +90 common mmap old_mmap +91 common munmap sys_munmap +92 common truncate sys_truncate +93 common ftruncate sys_ftruncate +94 common fchmod sys_fchmod +95 common fchown sys_fchown16 +96 common getpriority sys_getpriority +97 common setpriority sys_setpriority +# 98 was profil +99 common statfs sys_statfs +100 common fstatfs sys_fstatfs +# 101 was ioperm +102 common socketcall sys_socketcall +103 common syslog sys_syslog +104 common setitimer sys_setitimer +105 common getitimer sys_getitimer +106 common stat sys_newstat +107 common lstat sys_newlstat +108 common fstat sys_newfstat +109 common olduname sys_uname +# 110 was iopl +111 common vhangup sys_vhangup +# 112 was idle +# 113 was vm86old +114 common wait4 sys_wait4 +115 common swapoff sys_swapoff +116 common sysinfo sys_sysinfo +117 common ipc sys_ipc +118 common fsync sys_fsync +119 common sigreturn sys_sigreturn +120 common clone sys_clone +121 common setdomainname sys_setdomainname +122 common uname sys_newuname +123 common cacheflush sys_cacheflush +124 common adjtimex sys_adjtimex +125 common mprotect sys_mprotect +126 common sigprocmask sys_sigprocmask +# 127 was create_module +128 common init_module sys_init_module +129 common delete_module sys_delete_module +# 130 was get_kernel_syms +131 common quotactl sys_quotactl +132 common getpgid sys_getpgid +133 common fchdir sys_fchdir +134 common bdflush sys_bdflush +135 common sysfs sys_sysfs +136 common personality sys_personality +# 137 was afs_syscall +138 common setfsuid sys_setfsuid16 +139 common setfsgid sys_setfsgid16 +140 common _llseek sys_llseek +141 common getdents sys_getdents +142 common _newselect sys_select +143 common flock sys_flock +144 common msync sys_msync +145 common readv sys_readv +146 common writev sys_writev +147 common getsid sys_getsid +148 common fdatasync sys_fdatasync +149 common _sysctl sys_sysctl +150 common mlock sys_mlock +151 common munlock sys_munlock +152 common mlockall sys_mlockall +153 common munlockall sys_munlockall +154 common sched_setparam sys_sched_setparam +155 common sched_getparam sys_sched_getparam +156 common sched_setscheduler sys_sched_setscheduler +157 common sched_getscheduler sys_sched_getscheduler +158 common sched_yield sys_sched_yield +159 common sched_get_priority_max sys_sched_get_priority_max +160 common sched_get_priority_min sys_sched_get_priority_min +161 common sched_rr_get_interval sys_sched_rr_get_interval +162 common nanosleep sys_nanosleep +163 common mremap sys_mremap +164 common setresuid sys_setresuid16 +165 common getresuid sys_getresuid16 +# 166 was vm86 +# 167 was query_module +168 common poll sys_poll +169 common nfsservctl sys_ni_syscall +170 common setresgid sys_setresgid16 +171 common getresgid sys_getresgid16 +172 common prctl sys_prctl +173 common rt_sigreturn sys_rt_sigreturn +174 common rt_sigaction sys_rt_sigaction +175 common rt_sigprocmask sys_rt_sigprocmask +176 common rt_sigpending sys_rt_sigpending +177 common rt_sigtimedwait sys_rt_sigtimedwait +178 common rt_sigqueueinfo sys_rt_sigqueueinfo +179 common rt_sigsuspend sys_rt_sigsuspend +180 common pread64 sys_pread_wrapper +181 common pwrite64 sys_pwrite_wrapper +182 common chown sys_chown16 +183 common getcwd sys_getcwd +184 common capget sys_capget +185 common capset sys_capset +186 common sigaltstack sys_sigaltstack +187 common sendfile sys_sendfile +# 188 is reserved for getpmsg +# 189 is reserved for putpmsg +190 common vfork sys_vfork +191 common ugetrlimit sys_getrlimit +192 common mmap2 sys_mmap2 +193 common truncate64 sys_truncate64 +194 common ftruncate64 sys_ftruncate64 +195 common stat64 sys_stat64 +196 common lstat64 sys_lstat64 +197 common fstat64 sys_fstat64 +198 common lchown32 sys_lchown +199 common getuid32 sys_getuid +200 common getgid32 sys_getgid +201 common geteuid32 sys_geteuid +202 common getegid32 sys_getegid +203 common setreuid32 sys_setreuid +204 common setregid32 sys_setregid +205 common getgroups32 sys_getgroups +206 common setgroups32 sys_setgroups +207 common fchown32 sys_fchown +208 common setresuid32 sys_setresuid +209 common getresuid32 sys_getresuid +210 common setresgid32 sys_setresgid +211 common getresgid32 sys_getresgid +212 common chown32 sys_chown +213 common setuid32 sys_setuid +214 common setgid32 sys_setgid +215 common setfsuid32 sys_setfsuid +216 common setfsgid32 sys_setfsgid +217 common pivot_root sys_pivot_root +218 common mincore sys_mincore +219 common madvise sys_madvise +220 common getdents64 sys_getdents64 +221 common fcntl64 sys_fcntl64 +# 222 is reserved for tux +# 223 is unused +224 common gettid sys_gettid +225 common readahead sys_readahead +226 common setxattr sys_setxattr +227 common lsetxattr sys_lsetxattr +228 common fsetxattr sys_fsetxattr +229 common getxattr sys_getxattr +230 common lgetxattr sys_lgetxattr +231 common fgetxattr sys_fgetxattr +232 common listxattr sys_listxattr +233 common llistxattr sys_llistxattr +234 common flistxattr sys_flistxattr +235 common removexattr sys_removexattr +236 common lremovexattr sys_lremovexattr +237 common fremovexattr sys_fremovexattr +238 common tkill sys_tkill +239 common sendfile64 sys_sendfile64 +240 common futex sys_futex +241 common sched_setaffinity sys_sched_setaffinity +242 common sched_getaffinity sys_sched_getaffinity +# 243 is reserved for set_thread_area +# 244 is reserved for get_thread_area +245 common io_setup sys_io_setup +246 common io_destroy sys_io_destroy +247 common io_getevents sys_io_getevents +248 common io_submit sys_io_submit +249 common io_cancel sys_io_cancel +250 common fadvise64 sys_fadvise64 +# 251 is unused +252 common exit_group sys_exit_group +253 common lookup_dcookie sys_lookup_dcookie +254 common epoll_create sys_epoll_create +255 common epoll_ctl sys_epoll_ctl +256 common epoll_wait sys_epoll_wait +257 common remap_file_pages sys_remap_file_pages +258 common set_tid_address sys_set_tid_address +259 common timer_create sys_timer_create +260 common timer_settime sys_timer_settime +261 common timer_gettime sys_timer_gettime +262 common timer_getoverrun sys_timer_getoverrun +263 common timer_delete sys_timer_delete +264 common clock_settime sys_clock_settime +265 common clock_gettime sys_clock_gettime +266 common clock_getres sys_clock_getres +267 common clock_nanosleep sys_clock_nanosleep +268 common statfs64 sys_statfs64 +269 common fstatfs64 sys_fstatfs64 +270 common tgkill sys_tgkill +271 common utimes sys_utimes +272 common fadvise64_64 sys_fadvise64_64_wrapper +# 273 is reserved for vserver +274 common mbind sys_mbind +275 common get_mempolicy sys_get_mempolicy +276 common set_mempolicy sys_set_mempolicy +277 common mq_open sys_mq_open +278 common mq_unlink sys_mq_unlink +279 common mq_timedsend sys_mq_timedsend +280 common mq_timedreceive sys_mq_timedreceive +281 common mq_notify sys_mq_notify +282 common mq_getsetattr sys_mq_getsetattr +283 common kexec_load sys_kexec_load +284 common waitid sys_waitid +285 common add_key sys_add_key +286 common request_key sys_request_key +287 common keyctl sys_keyctl +288 common ioprio_set sys_ioprio_set +289 common ioprio_get sys_ioprio_get +290 common inotify_init sys_inotify_init +291 common inotify_add_watch sys_inotify_add_watch +292 common inotify_rm_watch sys_inotify_rm_watch +# 293 is unused +294 common migrate_pages sys_migrate_pages +295 common openat sys_openat +296 common mkdirat sys_mkdirat +297 common mknodat sys_mknodat +298 common fchownat sys_fchownat +299 common futimesat sys_futimesat +300 common fstatat64 sys_fstatat64 +301 common unlinkat sys_unlinkat +302 common renameat sys_renameat +303 common linkat sys_linkat +304 common symlinkat sys_symlinkat +305 common readlinkat sys_readlinkat +306 common fchmodat sys_fchmodat +307 common faccessat sys_faccessat +308 common pselect6 sys_pselect6 +309 common ppoll sys_ppoll +310 common unshare sys_unshare +311 common set_robust_list sys_set_robust_list +312 common get_robust_list sys_get_robust_list +313 common splice sys_splice +314 common sync_file_range sys_sync_file_range +315 common tee sys_tee +316 common vmsplice sys_vmsplice +317 common move_pages sys_move_pages +318 common getcpu sys_getcpu +319 common epoll_pwait sys_epoll_pwait +320 common utimensat sys_utimensat +321 common signalfd sys_signalfd +322 common timerfd_create sys_timerfd_create +323 common eventfd sys_eventfd +324 common fallocate sys_fallocate +325 common timerfd_settime sys_timerfd_settime +326 common timerfd_gettime sys_timerfd_gettime +327 common signalfd4 sys_signalfd4 +328 common eventfd2 sys_eventfd2 +329 common epoll_create1 sys_epoll_create1 +330 common dup3 sys_dup3 +331 common pipe2 sys_pipe2 +332 common inotify_init1 sys_inotify_init1 +333 common preadv sys_preadv +334 common pwritev sys_pwritev +335 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo +336 common perf_event_open sys_perf_event_open +337 common fanotify_init sys_fanotify_init +338 common fanotify_mark sys_fanotify_mark +339 common prlimit64 sys_prlimit64 +340 common socket sys_socket +341 common bind sys_bind +342 common connect sys_connect +343 common listen sys_listen +344 common accept sys_accept +345 common getsockname sys_getsockname +346 common getpeername sys_getpeername +347 common socketpair sys_socketpair +348 common send sys_send +349 common sendto sys_sendto +350 common recv sys_recv +351 common recvfrom sys_recvfrom +352 common shutdown sys_shutdown +353 common setsockopt sys_setsockopt +354 common getsockopt sys_getsockopt +355 common sendmsg sys_sendmsg +356 common recvmsg sys_recvmsg +357 common recvmmsg sys_recvmmsg +358 common accept4 sys_accept4 +359 common name_to_handle_at sys_name_to_handle_at +360 common open_by_handle_at sys_open_by_handle_at +361 common clock_adjtime sys_clock_adjtime +362 common syncfs sys_syncfs +363 common sendmmsg sys_sendmmsg +364 common setns sys_setns +365 common process_vm_readv sys_process_vm_readv +366 common process_vm_writev sys_process_vm_writev +367 common kcmp sys_kcmp +368 common finit_module sys_finit_module +369 common sched_getattr sys_sched_getattr +370 common sched_setattr sys_sched_setattr +371 common renameat2 sys_renameat2 +372 common seccomp sys_seccomp +373 common getrandom sys_getrandom +374 common memfd_create sys_memfd_create +375 common bpf sys_bpf +376 common execveat sys_execveat +377 common userfaultfd sys_userfaultfd +378 common membarrier sys_membarrier +379 common mlock2 sys_mlock2 +380 common copy_file_range sys_copy_file_range +381 common preadv2 sys_preadv2 +382 common pwritev2 sys_pwritev2 diff --git a/arch/sh/kernel/syscalls/syscallhdr.sh b/arch/sh/kernel/syscalls/syscallhdr.sh new file mode 100644 index 000000000000..1de0334e577f --- /dev/null +++ b/arch/sh/kernel/syscalls/syscallhdr.sh @@ -0,0 +1,36 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 + +in="$1" +out="$2" +my_abis=`echo "($3)" | tr ',' '|'` +prefix="$4" +offset="$5" + +fileguard=_UAPI_ASM_SH_`basename "$out" | sed \ + -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \ + -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'` +grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( + printf "#ifndef %s\n" "${fileguard}" + printf "#define %s\n" "${fileguard}" + printf "\n" + + nxt=0 + while read nr abi name entry ; do + if [ -z "$offset" ]; then + printf "#define __NR_%s%s\t%s\n" \ + "${prefix}" "${name}" "${nr}" + else + printf "#define __NR_%s%s\t(%s + %s)\n" \ + "${prefix}" "${name}" "${offset}" "${nr}" + fi + nxt=$((nr+1)) + done + + printf "\n" + printf "#ifdef __KERNEL__\n" + printf "#define __NR_syscalls\t%s\n" "${nxt}" + printf "#endif\n" + printf "\n" + printf "#endif /* %s */" "${fileguard}" +) > "$out" diff --git a/arch/sh/kernel/syscalls/syscalltbl.sh b/arch/sh/kernel/syscalls/syscalltbl.sh new file mode 100644 index 000000000000..85d78d9309ad --- /dev/null +++ b/arch/sh/kernel/syscalls/syscalltbl.sh @@ -0,0 +1,32 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 + +in="$1" +out="$2" +my_abis=`echo "($3)" | tr ',' '|'` +my_abi="$4" +offset="$5" + +emit() { + t_nxt="$1" + t_nr="$2" + t_entry="$3" + + while [ $t_nxt -lt $t_nr ]; do + printf "__SYSCALL(%s, sys_ni_syscall, )\n" "${t_nxt}" + t_nxt=$((t_nxt+1)) + done + printf "__SYSCALL(%s, %s, )\n" "${t_nxt}" "${t_entry}" +} + +grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( + nxt=0 + if [ -z "$offset" ]; then + offset=0 + fi + + while read nr abi name entry ; do + emit $((nxt+offset)) $((nr+offset)) $entry + nxt=$((nr+1)) + done +) > "$out" diff --git a/arch/sh/kernel/syscalls_32.S b/arch/sh/kernel/syscalls_32.S index 254bc22ee57d..54978e01bf94 100644 --- a/arch/sh/kernel/syscalls_32.S +++ b/arch/sh/kernel/syscalls_32.S @@ -14,389 +14,8 @@ #include <linux/sys.h> #include <linux/linkage.h> +#define __SYSCALL(nr, entry, nargs) .long entry .data ENTRY(sys_call_table) - .long sys_restart_syscall /* 0 - old "setup()" system call*/ - .long sys_exit - .long sys_fork - .long sys_read - .long sys_write - .long sys_open /* 5 */ - .long sys_close - .long sys_waitpid - .long sys_creat - .long sys_link - .long sys_unlink /* 10 */ - .long sys_execve - .long sys_chdir - .long sys_time - .long sys_mknod - .long sys_chmod /* 15 */ - .long sys_lchown16 - .long sys_ni_syscall /* old break syscall holder */ - .long sys_stat - .long sys_lseek - .long sys_getpid /* 20 */ - .long sys_mount - .long sys_oldumount - .long sys_setuid16 - .long sys_getuid16 - .long sys_stime /* 25 */ - .long sys_ptrace - .long sys_alarm - .long sys_fstat - .long sys_pause - .long sys_utime /* 30 */ - .long sys_ni_syscall /* old stty syscall holder */ - .long sys_ni_syscall /* old gtty syscall holder */ - .long sys_access - .long sys_nice - .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */ - .long sys_sync - .long sys_kill - .long sys_rename - .long sys_mkdir - .long sys_rmdir /* 40 */ - .long sys_dup - .long sys_sh_pipe - .long sys_times - .long sys_ni_syscall /* old prof syscall holder */ - .long sys_brk /* 45 */ - .long sys_setgid16 - .long sys_getgid16 - .long sys_signal - .long sys_geteuid16 - .long sys_getegid16 /* 50 */ - .long sys_acct - .long sys_umount /* recycled never used phys() */ - .long sys_ni_syscall /* old lock syscall holder */ - .long sys_ioctl - .long sys_fcntl /* 55 */ - .long sys_ni_syscall /* old mpx syscall holder */ - .long sys_setpgid - .long sys_ni_syscall /* old ulimit syscall holder */ - .long sys_ni_syscall /* sys_olduname */ - .long sys_umask /* 60 */ - .long sys_chroot - .long sys_ustat - .long sys_dup2 - .long sys_getppid - .long sys_getpgrp /* 65 */ - .long sys_setsid - .long sys_sigaction - .long sys_sgetmask - .long sys_ssetmask - .long sys_setreuid16 /* 70 */ - .long sys_setregid16 - .long sys_sigsuspend - .long sys_sigpending - .long sys_sethostname - .long sys_setrlimit /* 75 */ - .long sys_old_getrlimit - .long sys_getrusage - .long sys_gettimeofday - .long sys_settimeofday - .long sys_getgroups16 /* 80 */ - .long sys_setgroups16 - .long sys_ni_syscall /* sys_oldselect */ - .long sys_symlink - .long sys_lstat - .long sys_readlink /* 85 */ - .long sys_uselib - .long sys_swapon - .long sys_reboot - .long sys_old_readdir - .long old_mmap /* 90 */ - .long sys_munmap - .long sys_truncate - .long sys_ftruncate - .long sys_fchmod - .long sys_fchown16 /* 95 */ - .long sys_getpriority - .long sys_setpriority - .long sys_ni_syscall /* old profil syscall holder */ - .long sys_statfs - .long sys_fstatfs /* 100 */ - .long sys_ni_syscall /* ioperm */ - .long sys_socketcall - .long sys_syslog - .long sys_setitimer - .long sys_getitimer /* 105 */ - .long sys_newstat - .long sys_newlstat - .long sys_newfstat - .long sys_uname - .long sys_ni_syscall /* 110 */ /* iopl */ - .long sys_vhangup - .long sys_ni_syscall /* idle */ - .long sys_ni_syscall /* vm86old */ - .long sys_wait4 - .long sys_swapoff /* 115 */ - .long sys_sysinfo - .long sys_ipc - .long sys_fsync - .long sys_sigreturn - .long sys_clone /* 120 */ - .long sys_setdomainname - .long sys_newuname - .long sys_cacheflush /* x86: sys_modify_ldt */ - .long sys_adjtimex - .long sys_mprotect /* 125 */ - .long sys_sigprocmask - .long sys_ni_syscall /* old "create_module" */ - .long sys_init_module - .long sys_delete_module - .long sys_ni_syscall /* 130: old "get_kernel_syms" */ - .long sys_quotactl - .long sys_getpgid - .long sys_fchdir - .long sys_bdflush - .long sys_sysfs /* 135 */ - .long sys_personality - .long sys_ni_syscall /* for afs_syscall */ - .long sys_setfsuid16 - .long sys_setfsgid16 - .long sys_llseek /* 140 */ - .long sys_getdents - .long sys_select - .long sys_flock - .long sys_msync - .long sys_readv /* 145 */ - .long sys_writev - .long sys_getsid - .long sys_fdatasync - .long sys_sysctl - .long sys_mlock /* 150 */ - .long sys_munlock - .long sys_mlockall - .long sys_munlockall - .long sys_sched_setparam - .long sys_sched_getparam /* 155 */ - .long sys_sched_setscheduler - .long sys_sched_getscheduler - .long sys_sched_yield - .long sys_sched_get_priority_max - .long sys_sched_get_priority_min /* 160 */ - .long sys_sched_rr_get_interval - .long sys_nanosleep - .long sys_mremap - .long sys_setresuid16 - .long sys_getresuid16 /* 165 */ - .long sys_ni_syscall /* vm86 */ - .long sys_ni_syscall /* old "query_module" */ - .long sys_poll - .long sys_ni_syscall /* was nfsservctl */ - .long sys_setresgid16 /* 170 */ - .long sys_getresgid16 - .long sys_prctl - .long sys_rt_sigreturn - .long sys_rt_sigaction - .long sys_rt_sigprocmask /* 175 */ - .long sys_rt_sigpending - .long sys_rt_sigtimedwait - .long sys_rt_sigqueueinfo - .long sys_rt_sigsuspend - .long sys_pread_wrapper /* 180 */ - .long sys_pwrite_wrapper - .long sys_chown16 - .long sys_getcwd - .long sys_capget - .long sys_capset /* 185 */ - .long sys_sigaltstack - .long sys_sendfile - .long sys_ni_syscall /* getpmsg */ - .long sys_ni_syscall /* putpmsg */ - .long sys_vfork /* 190 */ - .long sys_getrlimit - .long sys_mmap2 - .long sys_truncate64 - .long sys_ftruncate64 - .long sys_stat64 /* 195 */ - .long sys_lstat64 - .long sys_fstat64 - .long sys_lchown - .long sys_getuid - .long sys_getgid /* 200 */ - .long sys_geteuid - .long sys_getegid - .long sys_setreuid - .long sys_setregid - .long sys_getgroups /* 205 */ - .long sys_setgroups - .long sys_fchown - .long sys_setresuid - .long sys_getresuid - .long sys_setresgid /* 210 */ - .long sys_getresgid - .long sys_chown - .long sys_setuid - .long sys_setgid - .long sys_setfsuid /* 215 */ - .long sys_setfsgid - .long sys_pivot_root - .long sys_mincore - .long sys_madvise - .long sys_getdents64 /* 220 */ - .long sys_fcntl64 - .long sys_ni_syscall /* reserved for TUX */ - .long sys_ni_syscall /* Reserved for Security */ - .long sys_gettid - .long sys_readahead /* 225 */ - .long sys_setxattr - .long sys_lsetxattr - .long sys_fsetxattr - .long sys_getxattr - .long sys_lgetxattr /* 230 */ - .long sys_fgetxattr - .long sys_listxattr - .long sys_llistxattr - .long sys_flistxattr - .long sys_removexattr /* 235 */ - .long sys_lremovexattr - .long sys_fremovexattr - .long sys_tkill - .long sys_sendfile64 - .long sys_futex /* 240 */ - .long sys_sched_setaffinity - .long sys_sched_getaffinity - .long sys_ni_syscall /* reserved for set_thread_area */ - .long sys_ni_syscall /* reserved for get_thread_area */ - .long sys_io_setup /* 245 */ - .long sys_io_destroy - .long sys_io_getevents - .long sys_io_submit - .long sys_io_cancel - .long sys_fadvise64 /* 250 */ - .long sys_ni_syscall - .long sys_exit_group - .long sys_lookup_dcookie - .long sys_epoll_create - .long sys_epoll_ctl /* 255 */ - .long sys_epoll_wait - .long sys_remap_file_pages - .long sys_set_tid_address - .long sys_timer_create - .long sys_timer_settime /* 260 */ - .long sys_timer_gettime - .long sys_timer_getoverrun - .long sys_timer_delete - .long sys_clock_settime - .long sys_clock_gettime /* 265 */ - .long sys_clock_getres - .long sys_clock_nanosleep - .long sys_statfs64 - .long sys_fstatfs64 - .long sys_tgkill /* 270 */ - .long sys_utimes - .long sys_fadvise64_64_wrapper - .long sys_ni_syscall /* Reserved for vserver */ - .long sys_mbind - .long sys_get_mempolicy /* 275 */ - .long sys_set_mempolicy - .long sys_mq_open - .long sys_mq_unlink - .long sys_mq_timedsend - .long sys_mq_timedreceive /* 280 */ - .long sys_mq_notify - .long sys_mq_getsetattr - .long sys_kexec_load - .long sys_waitid - .long sys_add_key /* 285 */ - .long sys_request_key - .long sys_keyctl - .long sys_ioprio_set - .long sys_ioprio_get - .long sys_inotify_init /* 290 */ - .long sys_inotify_add_watch - .long sys_inotify_rm_watch - .long sys_ni_syscall - .long sys_migrate_pages - .long sys_openat /* 295 */ - .long sys_mkdirat - .long sys_mknodat - .long sys_fchownat - .long sys_futimesat - .long sys_fstatat64 /* 300 */ - .long sys_unlinkat - .long sys_renameat - .long sys_linkat - .long sys_symlinkat - .long sys_readlinkat /* 305 */ - .long sys_fchmodat - .long sys_faccessat - .long sys_pselect6 - .long sys_ppoll - .long sys_unshare /* 310 */ - .long sys_set_robust_list - .long sys_get_robust_list - .long sys_splice - .long sys_sync_file_range - .long sys_tee /* 315 */ - .long sys_vmsplice - .long sys_move_pages - .long sys_getcpu - .long sys_epoll_pwait - .long sys_utimensat /* 320 */ - .long sys_signalfd - .long sys_timerfd_create - .long sys_eventfd - .long sys_fallocate - .long sys_timerfd_settime /* 325 */ - .long sys_timerfd_gettime - .long sys_signalfd4 - .long sys_eventfd2 - .long sys_epoll_create1 - .long sys_dup3 /* 330 */ - .long sys_pipe2 - .long sys_inotify_init1 - .long sys_preadv - .long sys_pwritev - .long sys_rt_tgsigqueueinfo /* 335 */ - .long sys_perf_event_open - .long sys_fanotify_init - .long sys_fanotify_mark - .long sys_prlimit64 - /* Broken-out socket family */ - .long sys_socket /* 340 */ - .long sys_bind - .long sys_connect - .long sys_listen - .long sys_accept - .long sys_getsockname /* 345 */ - .long sys_getpeername - .long sys_socketpair - .long sys_send - .long sys_sendto - .long sys_recv /* 350 */ - .long sys_recvfrom - .long sys_shutdown - .long sys_setsockopt - .long sys_getsockopt - .long sys_sendmsg /* 355 */ - .long sys_recvmsg - .long sys_recvmmsg - .long sys_accept4 - .long sys_name_to_handle_at - .long sys_open_by_handle_at /* 360 */ - .long sys_clock_adjtime - .long sys_syncfs - .long sys_sendmmsg - .long sys_setns - .long sys_process_vm_readv /* 365 */ - .long sys_process_vm_writev - .long sys_kcmp - .long sys_finit_module - .long sys_sched_getattr - .long sys_sched_setattr /* 370 */ - .long sys_renameat2 - .long sys_seccomp - .long sys_getrandom - .long sys_memfd_create - .long sys_bpf /* 375 */ - .long sys_execveat - .long sys_userfaultfd - .long sys_membarrier - .long sys_mlock2 - .long sys_copy_file_range /* 380 */ - .long sys_preadv2 - .long sys_pwritev2 +#include <asm/syscall_table.h> +#undef __SYSCALL diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c index fcd5e41977d1..8a1c6c8ab4ec 100644 --- a/arch/sh/kernel/time.c +++ b/arch/sh/kernel/time.c @@ -22,77 +22,6 @@ #include <asm/clock.h> #include <asm/rtc.h> -/* Dummy RTC ops */ -static void null_rtc_get_time(struct timespec *tv) -{ - tv->tv_sec = mktime(2000, 1, 1, 0, 0, 0); - tv->tv_nsec = 0; -} - -static int null_rtc_set_time(const time_t secs) -{ - return 0; -} - -void (*rtc_sh_get_time)(struct timespec *) = null_rtc_get_time; -int (*rtc_sh_set_time)(const time_t) = null_rtc_set_time; - -void read_persistent_clock(struct timespec *ts) -{ - rtc_sh_get_time(ts); -} - -#ifdef CONFIG_GENERIC_CMOS_UPDATE -int update_persistent_clock(struct timespec now) -{ - return rtc_sh_set_time(now.tv_sec); -} -#endif - -static int rtc_generic_get_time(struct device *dev, struct rtc_time *tm) -{ - struct timespec tv; - - rtc_sh_get_time(&tv); - rtc_time_to_tm(tv.tv_sec, tm); - return 0; -} - -static int rtc_generic_set_time(struct device *dev, struct rtc_time *tm) -{ - unsigned long secs; - - rtc_tm_to_time(tm, &secs); - if ((rtc_sh_set_time == null_rtc_set_time) || - (rtc_sh_set_time(secs) < 0)) - return -EOPNOTSUPP; - - return 0; -} - -static const struct rtc_class_ops rtc_generic_ops = { - .read_time = rtc_generic_get_time, - .set_time = rtc_generic_set_time, -}; - -static int __init rtc_generic_init(void) -{ - struct platform_device *pdev; - - if (rtc_sh_get_time == null_rtc_get_time) - return -ENODEV; - - pdev = platform_device_register_data(NULL, "rtc-generic", -1, - &rtc_generic_ops, - sizeof(rtc_generic_ops)); - - - return PTR_ERR_OR_ZERO(pdev); -} -device_initcall(rtc_generic_init); - -void (*board_time_init)(void); - static void __init sh_late_time_init(void) { /* @@ -110,8 +39,7 @@ static void __init sh_late_time_init(void) void __init time_init(void) { - if (board_time_init) - board_time_init(); + timer_probe(); clk_init(); diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 490b2c95c212..f5bb9ded1d18 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -40,7 +40,6 @@ config SPARC select MODULES_USE_ELF_RELA select ODD_RT_SIGACTION select OLD_SIGSUSPEND - select ARCH_HAS_SG_CHAIN select CPU_NO_EFFICIENT_FFS select LOCKDEP_SMALL if LOCKDEP select NEED_DMA_MAP_STATE @@ -49,7 +48,6 @@ config SPARC config SPARC32 def_bool !64BIT select ARCH_HAS_SYNC_DMA_FOR_CPU - select DMA_DIRECT_OPS select GENERIC_ATOMIC64 select CLZ_TAB select HAVE_UID16 diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile index 048a033d6102..4a0919581697 100644 --- a/arch/sparc/Makefile +++ b/arch/sparc/Makefile @@ -81,6 +81,9 @@ install: archclean: $(Q)$(MAKE) $(clean)=$(boot) +archheaders: + $(Q)$(MAKE) $(build)=arch/sparc/kernel/syscalls all + PHONY += vdso_install vdso_install: $(Q)$(MAKE) $(build)=arch/sparc/vdso $@ diff --git a/arch/sparc/crypto/aes_glue.c b/arch/sparc/crypto/aes_glue.c index 3cd4f6b198b6..a9b8b0b94a8d 100644 --- a/arch/sparc/crypto/aes_glue.c +++ b/arch/sparc/crypto/aes_glue.c @@ -476,11 +476,6 @@ static bool __init sparc64_has_aes_opcode(void) static int __init aes_sparc64_mod_init(void) { - int i; - - for (i = 0; i < ARRAY_SIZE(algs); i++) - INIT_LIST_HEAD(&algs[i].cra_list); - if (sparc64_has_aes_opcode()) { pr_info("Using sparc64 aes opcodes optimized AES implementation\n"); return crypto_register_algs(algs, ARRAY_SIZE(algs)); diff --git a/arch/sparc/crypto/camellia_glue.c b/arch/sparc/crypto/camellia_glue.c index 561a84d93cf6..900d5c617e83 100644 --- a/arch/sparc/crypto/camellia_glue.c +++ b/arch/sparc/crypto/camellia_glue.c @@ -299,11 +299,6 @@ static bool __init sparc64_has_camellia_opcode(void) static int __init camellia_sparc64_mod_init(void) { - int i; - - for (i = 0; i < ARRAY_SIZE(algs); i++) - INIT_LIST_HEAD(&algs[i].cra_list); - if (sparc64_has_camellia_opcode()) { pr_info("Using sparc64 camellia opcodes optimized CAMELLIA implementation\n"); return crypto_register_algs(algs, ARRAY_SIZE(algs)); diff --git a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c index 61af794aa2d3..56499ea39fd3 100644 --- a/arch/sparc/crypto/des_glue.c +++ b/arch/sparc/crypto/des_glue.c @@ -510,11 +510,6 @@ static bool __init sparc64_has_des_opcode(void) static int __init des_sparc64_mod_init(void) { - int i; - - for (i = 0; i < ARRAY_SIZE(algs); i++) - INIT_LIST_HEAD(&algs[i].cra_list); - if (sparc64_has_des_opcode()) { pr_info("Using sparc64 des opcodes optimized DES implementation\n"); return crypto_register_algs(algs, ARRAY_SIZE(algs)); diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild index 410b263ef5c8..b82f64e28f55 100644 --- a/arch/sparc/include/asm/Kbuild +++ b/arch/sparc/include/asm/Kbuild @@ -1,6 +1,8 @@ # User exported sparc header files - +generated-y += syscall_table_32.h +generated-y += syscall_table_64.h +generated-y += syscall_table_c32.h generic-y += div64.h generic-y += emergency-restart.h generic-y += exec.h diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h index b0bb2fcaf1c9..ed32845bd2d2 100644 --- a/arch/sparc/include/asm/dma-mapping.h +++ b/arch/sparc/include/asm/dma-mapping.h @@ -2,9 +2,7 @@ #ifndef ___ASM_SPARC_DMA_MAPPING_H #define ___ASM_SPARC_DMA_MAPPING_H -#include <linux/scatterlist.h> -#include <linux/mm.h> -#include <linux/dma-debug.h> +#include <asm/cpu_type.h> extern const struct dma_map_ops *dma_ops; @@ -14,11 +12,11 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { #ifdef CONFIG_SPARC_LEON if (sparc_cpu_model == sparc_leon) - return &dma_direct_ops; + return NULL; #endif #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI) if (bus == &pci_bus_type) - return &dma_direct_ops; + return NULL; #endif return dma_ops; } diff --git a/arch/sparc/include/asm/dma.h b/arch/sparc/include/asm/dma.h index a1d7c86917c6..462e7c794a09 100644 --- a/arch/sparc/include/asm/dma.h +++ b/arch/sparc/include/asm/dma.h @@ -91,54 +91,10 @@ extern int isa_dma_bridge_buggy; #endif #ifdef CONFIG_SPARC32 - -/* Routines for data transfer buffers. */ struct device; -struct scatterlist; - -struct sparc32_dma_ops { - __u32 (*get_scsi_one)(struct device *, char *, unsigned long); - void (*get_scsi_sgl)(struct device *, struct scatterlist *, int); - void (*release_scsi_one)(struct device *, __u32, unsigned long); - void (*release_scsi_sgl)(struct device *, struct scatterlist *,int); -#ifdef CONFIG_SBUS - int (*map_dma_area)(struct device *, dma_addr_t *, unsigned long, unsigned long, int); - void (*unmap_dma_area)(struct device *, unsigned long, int); -#endif -}; -extern const struct sparc32_dma_ops *sparc32_dma_ops; - -#define mmu_get_scsi_one(dev,vaddr,len) \ - sparc32_dma_ops->get_scsi_one(dev, vaddr, len) -#define mmu_get_scsi_sgl(dev,sg,sz) \ - sparc32_dma_ops->get_scsi_sgl(dev, sg, sz) -#define mmu_release_scsi_one(dev,vaddr,len) \ - sparc32_dma_ops->release_scsi_one(dev, vaddr,len) -#define mmu_release_scsi_sgl(dev,sg,sz) \ - sparc32_dma_ops->release_scsi_sgl(dev, sg, sz) - -#ifdef CONFIG_SBUS -/* - * mmu_map/unmap are provided by iommu/iounit; Invalid to call on IIep. - * - * The mmu_map_dma_area establishes two mappings in one go. - * These mappings point to pages normally mapped at 'va' (linear address). - * First mapping is for CPU visible address at 'a', uncached. - * This is an alias, but it works because it is an uncached mapping. - * Second mapping is for device visible address, or "bus" address. - * The bus address is returned at '*pba'. - * - * These functions seem distinct, but are hard to split. - * On sun4m, page attributes depend on the CPU type, so we have to - * know if we are mapping RAM or I/O, so it has to be an additional argument - * to a separate mapping function for CPU visible mappings. - */ -#define sbus_map_dma_area(dev,pba,va,a,len) \ - sparc32_dma_ops->map_dma_area(dev, pba, va, a, len) -#define sbus_unmap_dma_area(dev,ba,len) \ - sparc32_dma_ops->unmap_dma_area(dev, ba, len) -#endif /* CONFIG_SBUS */ +unsigned long sparc_dma_alloc_resource(struct device *dev, size_t len); +bool sparc_dma_free_resource(void *cpu_addr, size_t size); #endif #endif /* !(_ASM_SPARC_DMA_H) */ diff --git a/arch/sparc/include/asm/floppy_64.h b/arch/sparc/include/asm/floppy_64.h index 2a050eab69a0..3729fc35ba83 100644 --- a/arch/sparc/include/asm/floppy_64.h +++ b/arch/sparc/include/asm/floppy_64.h @@ -528,9 +528,9 @@ static int sun_pci_fd_test_drive(unsigned long port, int drive) static int __init ebus_fdthree_p(struct device_node *dp) { - if (!strcmp(dp->name, "fdthree")) + if (of_node_name_eq(dp, "fdthree")) return 1; - if (!strcmp(dp->name, "floppy")) { + if (of_node_name_eq(dp, "floppy")) { const char *compat; compat = of_get_property(dp, "compatible", NULL); @@ -555,7 +555,7 @@ static unsigned long __init sun_floppy_init(void) op = NULL; for_each_node_by_name(dp, "SUNW,fdtwo") { - if (strcmp(dp->parent->name, "sbus")) + if (!of_node_name_eq(dp->parent, "sbus")) continue; op = of_find_device_by_node(dp); if (op) @@ -656,7 +656,7 @@ static unsigned long __init sun_floppy_init(void) */ config = 0; for (dp = ebus_dp->child; dp; dp = dp->sibling) { - if (!strcmp(dp->name, "ecpp")) { + if (of_node_name_eq(dp, "ecpp")) { struct platform_device *ecpp_op; ecpp_op = of_find_device_by_node(dp); diff --git a/arch/sparc/include/asm/leon.h b/arch/sparc/include/asm/leon.h index c68bb5b76e3d..c1e05e4ab9e3 100644 --- a/arch/sparc/include/asm/leon.h +++ b/arch/sparc/include/asm/leon.h @@ -225,7 +225,6 @@ void leon_update_virq_handling(unsigned int virq, irq_flow_handler_t flow_handler, const char *name, int do_ack); void leon_init_timers(void); -void leon_trans_init(struct device_node *dp); void leon_node_init(struct device_node *dp, struct device_node ***nextp); void init_leon(void); void poke_leonsparc(void); @@ -255,4 +254,13 @@ extern int leon_ipi_irq; #define _pfn_valid(pfn) ((pfn < last_valid_pfn) && (pfn >= PFN(phys_base))) #define _SRMMU_PTE_PMASK_LEON 0xffffffff +/* + * On LEON PCI Memory space is mapped 1:1 with physical address space. + * + * I/O space is located at low 64Kbytes in PCI I/O space. The I/O addresses + * are converted into CPU addresses to virtual addresses that are mapped with + * MMU to the PCI Host PCI I/O space window which are translated to the low + * 64Kbytes by the Host controller. + */ + #endif diff --git a/arch/sparc/include/asm/parport.h b/arch/sparc/include/asm/parport.h index 3c5a1c620f0f..03b27090c0c8 100644 --- a/arch/sparc/include/asm/parport.h +++ b/arch/sparc/include/asm/parport.h @@ -117,7 +117,7 @@ static int ecpp_probe(struct platform_device *op) int slot, err; parent = op->dev.of_node->parent; - if (!strcmp(parent->name, "dma")) { + if (of_node_name_eq(parent, "dma")) { p = parport_pc_probe_port(base, base + 0x400, op->archdata.irqs[0], PARPORT_DMA_NOFIFO, op->dev.parent->parent, 0); diff --git a/arch/sparc/include/asm/pci.h b/arch/sparc/include/asm/pci.h index cad79a6ce0e4..cfec79bb1831 100644 --- a/arch/sparc/include/asm/pci.h +++ b/arch/sparc/include/asm/pci.h @@ -1,9 +1,54 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef ___ASM_SPARC_PCI_H #define ___ASM_SPARC_PCI_H -#if defined(__sparc__) && defined(__arch64__) -#include <asm/pci_64.h> + + +/* Can be used to override the logic in pci_scan_bus for skipping + * already-configured bus numbers - to be used for buggy BIOSes + * or architectures with incomplete PCI setup by the loader. + */ +#define pcibios_assign_all_busses() 0 + +#define PCIBIOS_MIN_IO 0UL +#define PCIBIOS_MIN_MEM 0UL + +#define PCI_IRQ_NONE 0xffffffff + + +#ifdef CONFIG_SPARC64 + +/* PCI IOMMU mapping bypass support. */ + +/* PCI 64-bit addressing works for all slots on all controller + * types on sparc64. However, it requires that the device + * can drive enough of the 64 bits. + */ +#define PCI64_REQUIRED_MASK (~(u64)0) +#define PCI64_ADDR_BASE 0xfffc000000000000UL + +/* Return the index of the PCI controller for device PDEV. */ +int pci_domain_nr(struct pci_bus *bus); +static inline int pci_proc_domain(struct pci_bus *bus) +{ + return 1; +} + +/* Platform support for /proc/bus/pci/X/Y mmap()s. */ +#define HAVE_PCI_MMAP +#define arch_can_pci_mmap_io() 1 +#define HAVE_ARCH_PCI_GET_UNMAPPED_AREA +#define get_pci_unmapped_area get_fb_unmapped_area + +#define HAVE_ARCH_PCI_RESOURCE_TO_USER +#endif /* CONFIG_SPARC64 */ + +#if defined(CONFIG_SPARC64) || defined(CONFIG_LEON_PCI) +static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) +{ + return PCI_IRQ_NONE; +} #else -#include <asm/pci_32.h> -#endif +#include <asm-generic/pci.h> #endif + +#endif /* ___ASM_SPARC_PCI_H */ diff --git a/arch/sparc/include/asm/pci_32.h b/arch/sparc/include/asm/pci_32.h deleted file mode 100644 index cfc0ee9476c6..000000000000 --- a/arch/sparc/include/asm/pci_32.h +++ /dev/null @@ -1,41 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __SPARC_PCI_H -#define __SPARC_PCI_H - -#ifdef __KERNEL__ - -#include <linux/dma-mapping.h> - -/* Can be used to override the logic in pci_scan_bus for skipping - * already-configured bus numbers - to be used for buggy BIOSes - * or architectures with incomplete PCI setup by the loader. - */ -#define pcibios_assign_all_busses() 0 - -#define PCIBIOS_MIN_IO 0UL -#define PCIBIOS_MIN_MEM 0UL - -#define PCI_IRQ_NONE 0xffffffff - -#endif /* __KERNEL__ */ - -#ifndef CONFIG_LEON_PCI -/* generic pci stuff */ -#include <asm-generic/pci.h> -#else -/* - * On LEON PCI Memory space is mapped 1:1 with physical address space. - * - * I/O space is located at low 64Kbytes in PCI I/O space. The I/O addresses - * are converted into CPU addresses to virtual addresses that are mapped with - * MMU to the PCI Host PCI I/O space window which are translated to the low - * 64Kbytes by the Host controller. - */ - -static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) -{ - return PCI_IRQ_NONE; -} -#endif - -#endif /* __SPARC_PCI_H */ diff --git a/arch/sparc/include/asm/pci_64.h b/arch/sparc/include/asm/pci_64.h deleted file mode 100644 index fac77813402c..000000000000 --- a/arch/sparc/include/asm/pci_64.h +++ /dev/null @@ -1,52 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __SPARC64_PCI_H -#define __SPARC64_PCI_H - -#ifdef __KERNEL__ - -#include <linux/dma-mapping.h> - -/* Can be used to override the logic in pci_scan_bus for skipping - * already-configured bus numbers - to be used for buggy BIOSes - * or architectures with incomplete PCI setup by the loader. - */ -#define pcibios_assign_all_busses() 0 - -#define PCIBIOS_MIN_IO 0UL -#define PCIBIOS_MIN_MEM 0UL - -#define PCI_IRQ_NONE 0xffffffff - -/* PCI IOMMU mapping bypass support. */ - -/* PCI 64-bit addressing works for all slots on all controller - * types on sparc64. However, it requires that the device - * can drive enough of the 64 bits. - */ -#define PCI64_REQUIRED_MASK (~(u64)0) -#define PCI64_ADDR_BASE 0xfffc000000000000UL - -/* Return the index of the PCI controller for device PDEV. */ - -int pci_domain_nr(struct pci_bus *bus); -static inline int pci_proc_domain(struct pci_bus *bus) -{ - return 1; -} - -/* Platform support for /proc/bus/pci/X/Y mmap()s. */ - -#define HAVE_PCI_MMAP -#define arch_can_pci_mmap_io() 1 -#define HAVE_ARCH_PCI_GET_UNMAPPED_AREA -#define get_pci_unmapped_area get_fb_unmapped_area - -static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) -{ - return PCI_IRQ_NONE; -} - -#define HAVE_ARCH_PCI_RESOURCE_TO_USER -#endif /* __KERNEL__ */ - -#endif /* __SPARC64_PCI_H */ diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h index 00f87dbd0b17..5194d86ef72d 100644 --- a/arch/sparc/include/asm/unistd.h +++ b/arch/sparc/include/asm/unistd.h @@ -17,6 +17,8 @@ #include <uapi/asm/unistd.h> +#define NR_syscalls __NR_syscalls + #ifdef __32bit_syscall_numbers__ #else #define __NR_time 231 /* Linux sparc32 */ @@ -46,4 +48,20 @@ #define __ARCH_WANT_COMPAT_SYS_SENDFILE #endif +#ifdef __32bit_syscall_numbers__ +/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants, + * it never had the plain ones and there is no value to adding those + * old versions into the syscall table. + */ +#define __IGNORE_setresuid +#define __IGNORE_getresuid +#define __IGNORE_setresgid +#define __IGNORE_getresgid +#endif + +/* Sparc doesn't have protection keys. */ +#define __IGNORE_pkey_mprotect +#define __IGNORE_pkey_alloc +#define __IGNORE_pkey_free + #endif /* _SPARC_UNISTD_H */ diff --git a/arch/sparc/include/uapi/asm/Kbuild b/arch/sparc/include/uapi/asm/Kbuild index 4680ba246b55..ae72977287e3 100644 --- a/arch/sparc/include/uapi/asm/Kbuild +++ b/arch/sparc/include/uapi/asm/Kbuild @@ -1,5 +1,7 @@ # UAPI Header export list include include/uapi/asm-generic/Kbuild.asm +generated-y += unistd_32.h +generated-y += unistd_64.h generic-y += bpf_perf_event.h generic-y += types.h diff --git a/arch/sparc/include/uapi/asm/unistd.h b/arch/sparc/include/uapi/asm/unistd.h index 45b4bf1875e6..7f5d773b8cfc 100644 --- a/arch/sparc/include/uapi/asm/unistd.h +++ b/arch/sparc/include/uapi/asm/unistd.h @@ -21,433 +21,13 @@ #endif #endif -#define __NR_restart_syscall 0 /* Linux Specific */ -#define __NR_exit 1 /* Common */ -#define __NR_fork 2 /* Common */ -#define __NR_read 3 /* Common */ -#define __NR_write 4 /* Common */ -#define __NR_open 5 /* Common */ -#define __NR_close 6 /* Common */ -#define __NR_wait4 7 /* Common */ -#define __NR_creat 8 /* Common */ -#define __NR_link 9 /* Common */ -#define __NR_unlink 10 /* Common */ -#define __NR_execv 11 /* SunOS Specific */ -#define __NR_chdir 12 /* Common */ -#define __NR_chown 13 /* Common */ -#define __NR_mknod 14 /* Common */ -#define __NR_chmod 15 /* Common */ -#define __NR_lchown 16 /* Common */ -#define __NR_brk 17 /* Common */ -#define __NR_perfctr 18 /* Performance counter operations */ -#define __NR_lseek 19 /* Common */ -#define __NR_getpid 20 /* Common */ -#define __NR_capget 21 /* Linux Specific */ -#define __NR_capset 22 /* Linux Specific */ -#define __NR_setuid 23 /* Implemented via setreuid in SunOS */ -#define __NR_getuid 24 /* Common */ -#define __NR_vmsplice 25 /* ENOSYS under SunOS */ -#define __NR_ptrace 26 /* Common */ -#define __NR_alarm 27 /* Implemented via setitimer in SunOS */ -#define __NR_sigaltstack 28 /* Common */ -#define __NR_pause 29 /* Is sigblock(0)->sigpause() in SunOS */ -#define __NR_utime 30 /* Implemented via utimes() under SunOS */ -#ifdef __32bit_syscall_numbers__ -#define __NR_lchown32 31 /* Linux sparc32 specific */ -#define __NR_fchown32 32 /* Linux sparc32 specific */ -#endif -#define __NR_access 33 /* Common */ -#define __NR_nice 34 /* Implemented via get/setpriority() in SunOS */ -#ifdef __32bit_syscall_numbers__ -#define __NR_chown32 35 /* Linux sparc32 specific */ -#endif -#define __NR_sync 36 /* Common */ -#define __NR_kill 37 /* Common */ -#define __NR_stat 38 /* Common */ -#define __NR_sendfile 39 /* Linux Specific */ -#define __NR_lstat 40 /* Common */ -#define __NR_dup 41 /* Common */ -#define __NR_pipe 42 /* Common */ -#define __NR_times 43 /* Implemented via getrusage() in SunOS */ -#ifdef __32bit_syscall_numbers__ -#define __NR_getuid32 44 /* Linux sparc32 specific */ -#endif -#define __NR_umount2 45 /* Linux Specific */ -#define __NR_setgid 46 /* Implemented via setregid() in SunOS */ -#define __NR_getgid 47 /* Common */ -#define __NR_signal 48 /* Implemented via sigvec() in SunOS */ -#define __NR_geteuid 49 /* SunOS calls getuid() */ -#define __NR_getegid 50 /* SunOS calls getgid() */ -#define __NR_acct 51 /* Common */ -#ifdef __32bit_syscall_numbers__ -#define __NR_getgid32 53 /* Linux sparc32 specific */ -#else -#define __NR_memory_ordering 52 /* Linux Specific */ -#endif -#define __NR_ioctl 54 /* Common */ -#define __NR_reboot 55 /* Common */ -#ifdef __32bit_syscall_numbers__ -#define __NR_mmap2 56 /* Linux sparc32 Specific */ -#endif -#define __NR_symlink 57 /* Common */ -#define __NR_readlink 58 /* Common */ -#define __NR_execve 59 /* Common */ -#define __NR_umask 60 /* Common */ -#define __NR_chroot 61 /* Common */ -#define __NR_fstat 62 /* Common */ -#define __NR_fstat64 63 /* Linux Specific */ -#define __NR_getpagesize 64 /* Common */ -#define __NR_msync 65 /* Common in newer 1.3.x revs... */ -#define __NR_vfork 66 /* Common */ -#define __NR_pread64 67 /* Linux Specific */ -#define __NR_pwrite64 68 /* Linux Specific */ -#ifdef __32bit_syscall_numbers__ -#define __NR_geteuid32 69 /* Linux sparc32, sbrk under SunOS */ -#define __NR_getegid32 70 /* Linux sparc32, sstk under SunOS */ -#endif -#define __NR_mmap 71 /* Common */ -#ifdef __32bit_syscall_numbers__ -#define __NR_setreuid32 72 /* Linux sparc32, vadvise under SunOS */ -#endif -#define __NR_munmap 73 /* Common */ -#define __NR_mprotect 74 /* Common */ -#define __NR_madvise 75 /* Common */ -#define __NR_vhangup 76 /* Common */ -#ifdef __32bit_syscall_numbers__ -#define __NR_truncate64 77 /* Linux sparc32 Specific */ -#endif -#define __NR_mincore 78 /* Common */ -#define __NR_getgroups 79 /* Common */ -#define __NR_setgroups 80 /* Common */ -#define __NR_getpgrp 81 /* Common */ -#ifdef __32bit_syscall_numbers__ -#define __NR_setgroups32 82 /* Linux sparc32, setpgrp under SunOS */ -#endif -#define __NR_setitimer 83 /* Common */ -#ifdef __32bit_syscall_numbers__ -#define __NR_ftruncate64 84 /* Linux sparc32 Specific */ -#endif -#define __NR_swapon 85 /* Common */ -#define __NR_getitimer 86 /* Common */ -#ifdef __32bit_syscall_numbers__ -#define __NR_setuid32 87 /* Linux sparc32, gethostname under SunOS */ -#endif -#define __NR_sethostname 88 /* Common */ -#ifdef __32bit_syscall_numbers__ -#define __NR_setgid32 89 /* Linux sparc32, getdtablesize under SunOS */ -#endif -#define __NR_dup2 90 /* Common */ -#ifdef __32bit_syscall_numbers__ -#define __NR_setfsuid32 91 /* Linux sparc32, getdopt under SunOS */ -#endif -#define __NR_fcntl 92 /* Common */ -#define __NR_select 93 /* Common */ -#ifdef __32bit_syscall_numbers__ -#define __NR_setfsgid32 94 /* Linux sparc32, setdopt under SunOS */ -#endif -#define __NR_fsync 95 /* Common */ -#define __NR_setpriority 96 /* Common */ -#define __NR_socket 97 /* Common */ -#define __NR_connect 98 /* Common */ -#define __NR_accept 99 /* Common */ -#define __NR_getpriority 100 /* Common */ -#define __NR_rt_sigreturn 101 /* Linux Specific */ -#define __NR_rt_sigaction 102 /* Linux Specific */ -#define __NR_rt_sigprocmask 103 /* Linux Specific */ -#define __NR_rt_sigpending 104 /* Linux Specific */ -#define __NR_rt_sigtimedwait 105 /* Linux Specific */ -#define __NR_rt_sigqueueinfo 106 /* Linux Specific */ -#define __NR_rt_sigsuspend 107 /* Linux Specific */ -#ifdef __32bit_syscall_numbers__ -#define __NR_setresuid32 108 /* Linux Specific, sigvec under SunOS */ -#define __NR_getresuid32 109 /* Linux Specific, sigblock under SunOS */ -#define __NR_setresgid32 110 /* Linux Specific, sigsetmask under SunOS */ -#define __NR_getresgid32 111 /* Linux Specific, sigpause under SunOS */ -#define __NR_setregid32 112 /* Linux sparc32, sigstack under SunOS */ +#ifdef __arch64__ +#include <asm/unistd_64.h> #else -#define __NR_setresuid 108 /* Linux Specific, sigvec under SunOS */ -#define __NR_getresuid 109 /* Linux Specific, sigblock under SunOS */ -#define __NR_setresgid 110 /* Linux Specific, sigsetmask under SunOS */ -#define __NR_getresgid 111 /* Linux Specific, sigpause under SunOS */ -#endif -#define __NR_recvmsg 113 /* Common */ -#define __NR_sendmsg 114 /* Common */ -#ifdef __32bit_syscall_numbers__ -#define __NR_getgroups32 115 /* Linux sparc32, vtrace under SunOS */ -#endif -#define __NR_gettimeofday 116 /* Common */ -#define __NR_getrusage 117 /* Common */ -#define __NR_getsockopt 118 /* Common */ -#define __NR_getcwd 119 /* Linux Specific */ -#define __NR_readv 120 /* Common */ -#define __NR_writev 121 /* Common */ -#define __NR_settimeofday 122 /* Common */ -#define __NR_fchown 123 /* Common */ -#define __NR_fchmod 124 /* Common */ -#define __NR_recvfrom 125 /* Common */ -#define __NR_setreuid 126 /* Common */ -#define __NR_setregid 127 /* Common */ -#define __NR_rename 128 /* Common */ -#define __NR_truncate 129 /* Common */ -#define __NR_ftruncate 130 /* Common */ -#define __NR_flock 131 /* Common */ -#define __NR_lstat64 132 /* Linux Specific */ -#define __NR_sendto 133 /* Common */ -#define __NR_shutdown 134 /* Common */ -#define __NR_socketpair 135 /* Common */ -#define __NR_mkdir 136 /* Common */ -#define __NR_rmdir 137 /* Common */ -#define __NR_utimes 138 /* SunOS Specific */ -#define __NR_stat64 139 /* Linux Specific */ -#define __NR_sendfile64 140 /* adjtime under SunOS */ -#define __NR_getpeername 141 /* Common */ -#define __NR_futex 142 /* gethostid under SunOS */ -#define __NR_gettid 143 /* ENOSYS under SunOS */ -#define __NR_getrlimit 144 /* Common */ -#define __NR_setrlimit 145 /* Common */ -#define __NR_pivot_root 146 /* Linux Specific, killpg under SunOS */ -#define __NR_prctl 147 /* ENOSYS under SunOS */ -#define __NR_pciconfig_read 148 /* ENOSYS under SunOS */ -#define __NR_pciconfig_write 149 /* ENOSYS under SunOS */ -#define __NR_getsockname 150 /* Common */ -#define __NR_inotify_init 151 /* Linux specific */ -#define __NR_inotify_add_watch 152 /* Linux specific */ -#define __NR_poll 153 /* Common */ -#define __NR_getdents64 154 /* Linux specific */ -#ifdef __32bit_syscall_numbers__ -#define __NR_fcntl64 155 /* Linux sparc32 Specific */ +#include <asm/unistd_32.h> #endif -#define __NR_inotify_rm_watch 156 /* Linux specific */ -#define __NR_statfs 157 /* Common */ -#define __NR_fstatfs 158 /* Common */ -#define __NR_umount 159 /* Common */ -#define __NR_sched_set_affinity 160 /* Linux specific, async_daemon under SunOS */ -#define __NR_sched_get_affinity 161 /* Linux specific, getfh under SunOS */ -#define __NR_getdomainname 162 /* SunOS Specific */ -#define __NR_setdomainname 163 /* Common */ -#ifndef __32bit_syscall_numbers__ -#define __NR_utrap_install 164 /* SYSV ABI/v9 required */ -#endif -#define __NR_quotactl 165 /* Common */ -#define __NR_set_tid_address 166 /* Linux specific, exportfs under SunOS */ -#define __NR_mount 167 /* Common */ -#define __NR_ustat 168 /* Common */ -#define __NR_setxattr 169 /* SunOS: semsys */ -#define __NR_lsetxattr 170 /* SunOS: msgsys */ -#define __NR_fsetxattr 171 /* SunOS: shmsys */ -#define __NR_getxattr 172 /* SunOS: auditsys */ -#define __NR_lgetxattr 173 /* SunOS: rfssys */ -#define __NR_getdents 174 /* Common */ -#define __NR_setsid 175 /* Common */ -#define __NR_fchdir 176 /* Common */ -#define __NR_fgetxattr 177 /* SunOS: fchroot */ -#define __NR_listxattr 178 /* SunOS: vpixsys */ -#define __NR_llistxattr 179 /* SunOS: aioread */ -#define __NR_flistxattr 180 /* SunOS: aiowrite */ -#define __NR_removexattr 181 /* SunOS: aiowait */ -#define __NR_lremovexattr 182 /* SunOS: aiocancel */ -#define __NR_sigpending 183 /* Common */ -#define __NR_query_module 184 /* Linux Specific */ -#define __NR_setpgid 185 /* Common */ -#define __NR_fremovexattr 186 /* SunOS: pathconf */ -#define __NR_tkill 187 /* SunOS: fpathconf */ -#define __NR_exit_group 188 /* Linux specific, sysconf undef SunOS */ -#define __NR_uname 189 /* Linux Specific */ -#define __NR_init_module 190 /* Linux Specific */ -#define __NR_personality 191 /* Linux Specific */ -#define __NR_remap_file_pages 192 /* Linux Specific */ -#define __NR_epoll_create 193 /* Linux Specific */ -#define __NR_epoll_ctl 194 /* Linux Specific */ -#define __NR_epoll_wait 195 /* Linux Specific */ -#define __NR_ioprio_set 196 /* Linux Specific */ -#define __NR_getppid 197 /* Linux Specific */ -#define __NR_sigaction 198 /* Linux Specific */ -#define __NR_sgetmask 199 /* Linux Specific */ -#define __NR_ssetmask 200 /* Linux Specific */ -#define __NR_sigsuspend 201 /* Linux Specific */ -#define __NR_oldlstat 202 /* Linux Specific */ -#define __NR_uselib 203 /* Linux Specific */ -#define __NR_readdir 204 /* Linux Specific */ -#define __NR_readahead 205 /* Linux Specific */ -#define __NR_socketcall 206 /* Linux Specific */ -#define __NR_syslog 207 /* Linux Specific */ -#define __NR_lookup_dcookie 208 /* Linux Specific */ -#define __NR_fadvise64 209 /* Linux Specific */ -#define __NR_fadvise64_64 210 /* Linux Specific */ -#define __NR_tgkill 211 /* Linux Specific */ -#define __NR_waitpid 212 /* Linux Specific */ -#define __NR_swapoff 213 /* Linux Specific */ -#define __NR_sysinfo 214 /* Linux Specific */ -#define __NR_ipc 215 /* Linux Specific */ -#define __NR_sigreturn 216 /* Linux Specific */ -#define __NR_clone 217 /* Linux Specific */ -#define __NR_ioprio_get 218 /* Linux Specific */ -#define __NR_adjtimex 219 /* Linux Specific */ -#define __NR_sigprocmask 220 /* Linux Specific */ -#define __NR_create_module 221 /* Linux Specific */ -#define __NR_delete_module 222 /* Linux Specific */ -#define __NR_get_kernel_syms 223 /* Linux Specific */ -#define __NR_getpgid 224 /* Linux Specific */ -#define __NR_bdflush 225 /* Linux Specific */ -#define __NR_sysfs 226 /* Linux Specific */ -#define __NR_afs_syscall 227 /* Linux Specific */ -#define __NR_setfsuid 228 /* Linux Specific */ -#define __NR_setfsgid 229 /* Linux Specific */ -#define __NR__newselect 230 /* Linux Specific */ -#ifdef __32bit_syscall_numbers__ -#define __NR_time 231 /* Linux Specific */ -#else -#endif -#define __NR_splice 232 /* Linux Specific */ -#define __NR_stime 233 /* Linux Specific */ -#define __NR_statfs64 234 /* Linux Specific */ -#define __NR_fstatfs64 235 /* Linux Specific */ -#define __NR__llseek 236 /* Linux Specific */ -#define __NR_mlock 237 -#define __NR_munlock 238 -#define __NR_mlockall 239 -#define __NR_munlockall 240 -#define __NR_sched_setparam 241 -#define __NR_sched_getparam 242 -#define __NR_sched_setscheduler 243 -#define __NR_sched_getscheduler 244 -#define __NR_sched_yield 245 -#define __NR_sched_get_priority_max 246 -#define __NR_sched_get_priority_min 247 -#define __NR_sched_rr_get_interval 248 -#define __NR_nanosleep 249 -#define __NR_mremap 250 -#define __NR__sysctl 251 -#define __NR_getsid 252 -#define __NR_fdatasync 253 -#define __NR_nfsservctl 254 -#define __NR_sync_file_range 255 -#define __NR_clock_settime 256 -#define __NR_clock_gettime 257 -#define __NR_clock_getres 258 -#define __NR_clock_nanosleep 259 -#define __NR_sched_getaffinity 260 -#define __NR_sched_setaffinity 261 -#define __NR_timer_settime 262 -#define __NR_timer_gettime 263 -#define __NR_timer_getoverrun 264 -#define __NR_timer_delete 265 -#define __NR_timer_create 266 -/* #define __NR_vserver 267 Reserved for VSERVER */ -#define __NR_io_setup 268 -#define __NR_io_destroy 269 -#define __NR_io_submit 270 -#define __NR_io_cancel 271 -#define __NR_io_getevents 272 -#define __NR_mq_open 273 -#define __NR_mq_unlink 274 -#define __NR_mq_timedsend 275 -#define __NR_mq_timedreceive 276 -#define __NR_mq_notify 277 -#define __NR_mq_getsetattr 278 -#define __NR_waitid 279 -#define __NR_tee 280 -#define __NR_add_key 281 -#define __NR_request_key 282 -#define __NR_keyctl 283 -#define __NR_openat 284 -#define __NR_mkdirat 285 -#define __NR_mknodat 286 -#define __NR_fchownat 287 -#define __NR_futimesat 288 -#define __NR_fstatat64 289 -#define __NR_unlinkat 290 -#define __NR_renameat 291 -#define __NR_linkat 292 -#define __NR_symlinkat 293 -#define __NR_readlinkat 294 -#define __NR_fchmodat 295 -#define __NR_faccessat 296 -#define __NR_pselect6 297 -#define __NR_ppoll 298 -#define __NR_unshare 299 -#define __NR_set_robust_list 300 -#define __NR_get_robust_list 301 -#define __NR_migrate_pages 302 -#define __NR_mbind 303 -#define __NR_get_mempolicy 304 -#define __NR_set_mempolicy 305 -#define __NR_kexec_load 306 -#define __NR_move_pages 307 -#define __NR_getcpu 308 -#define __NR_epoll_pwait 309 -#define __NR_utimensat 310 -#define __NR_signalfd 311 -#define __NR_timerfd_create 312 -#define __NR_eventfd 313 -#define __NR_fallocate 314 -#define __NR_timerfd_settime 315 -#define __NR_timerfd_gettime 316 -#define __NR_signalfd4 317 -#define __NR_eventfd2 318 -#define __NR_epoll_create1 319 -#define __NR_dup3 320 -#define __NR_pipe2 321 -#define __NR_inotify_init1 322 -#define __NR_accept4 323 -#define __NR_preadv 324 -#define __NR_pwritev 325 -#define __NR_rt_tgsigqueueinfo 326 -#define __NR_perf_event_open 327 -#define __NR_recvmmsg 328 -#define __NR_fanotify_init 329 -#define __NR_fanotify_mark 330 -#define __NR_prlimit64 331 -#define __NR_name_to_handle_at 332 -#define __NR_open_by_handle_at 333 -#define __NR_clock_adjtime 334 -#define __NR_syncfs 335 -#define __NR_sendmmsg 336 -#define __NR_setns 337 -#define __NR_process_vm_readv 338 -#define __NR_process_vm_writev 339 -#define __NR_kern_features 340 -#define __NR_kcmp 341 -#define __NR_finit_module 342 -#define __NR_sched_setattr 343 -#define __NR_sched_getattr 344 -#define __NR_renameat2 345 -#define __NR_seccomp 346 -#define __NR_getrandom 347 -#define __NR_memfd_create 348 -#define __NR_bpf 349 -#define __NR_execveat 350 -#define __NR_membarrier 351 -#define __NR_userfaultfd 352 -#define __NR_bind 353 -#define __NR_listen 354 -#define __NR_setsockopt 355 -#define __NR_mlock2 356 -#define __NR_copy_file_range 357 -#define __NR_preadv2 358 -#define __NR_pwritev2 359 -#define __NR_statx 360 -#define __NR_io_pgetevents 361 - -#define NR_syscalls 362 /* Bitmask values returned from kern_features system call. */ #define KERN_FEATURE_MIXED_MODE_STACK 0x00000001 -#ifdef __32bit_syscall_numbers__ -/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants, - * it never had the plain ones and there is no value to adding those - * old versions into the syscall table. - */ -#define __IGNORE_setresuid -#define __IGNORE_getresuid -#define __IGNORE_setresgid -#define __IGNORE_getresgid -#endif - -/* Sparc doesn't have protection keys. */ -#define __IGNORE_pkey_mprotect -#define __IGNORE_pkey_alloc -#define __IGNORE_pkey_free - #endif /* _UAPI_SPARC_UNISTD_H */ diff --git a/arch/sparc/kernel/auxio_64.c b/arch/sparc/kernel/auxio_64.c index 4e8f56c3793c..4843f48bfe85 100644 --- a/arch/sparc/kernel/auxio_64.c +++ b/arch/sparc/kernel/auxio_64.c @@ -108,23 +108,22 @@ static int auxio_probe(struct platform_device *dev) struct device_node *dp = dev->dev.of_node; unsigned long size; - if (!strcmp(dp->parent->name, "ebus")) { + if (of_node_name_eq(dp->parent, "ebus")) { auxio_devtype = AUXIO_TYPE_EBUS; size = sizeof(u32); - } else if (!strcmp(dp->parent->name, "sbus")) { + } else if (of_node_name_eq(dp->parent, "sbus")) { auxio_devtype = AUXIO_TYPE_SBUS; size = 1; } else { - printk("auxio: Unknown parent bus type [%s]\n", - dp->parent->name); + printk("auxio: Unknown parent bus type [%pOFn]\n", + dp->parent); return -ENODEV; } auxio_register = of_ioremap(&dev->resource[0], 0, size, "auxio"); if (!auxio_register) return -ENODEV; - printk(KERN_INFO "AUXIO: Found device at %s\n", - dp->full_name); + printk(KERN_INFO "AUXIO: Found device at %pOF\n", dp); if (auxio_devtype == AUXIO_TYPE_EBUS) auxio_set_led(AUXIO_LED_ON); diff --git a/arch/sparc/kernel/central.c b/arch/sparc/kernel/central.c index 38ae4fdc9eb4..bfae98ab8638 100644 --- a/arch/sparc/kernel/central.c +++ b/arch/sparc/kernel/central.c @@ -168,7 +168,7 @@ static int fhc_probe(struct platform_device *op) goto out; } - if (!strcmp(op->dev.of_node->parent->name, "central")) + if (of_node_name_eq(op->dev.of_node->parent, "central")) p->central = true; p->pregs = of_ioremap(&op->resource[0], 0, diff --git a/arch/sparc/kernel/chmc.c b/arch/sparc/kernel/chmc.c index 0de4bcb8261f..61fe1b951ba3 100644 --- a/arch/sparc/kernel/chmc.c +++ b/arch/sparc/kernel/chmc.c @@ -464,8 +464,8 @@ static int jbusmc_probe(struct platform_device *op) mc_list_add(&p->list); - printk(KERN_INFO PFX "UltraSPARC-IIIi memory controller at %s\n", - op->dev.of_node->full_name); + printk(KERN_INFO PFX "UltraSPARC-IIIi memory controller at %pOF\n", + op->dev.of_node); dev_set_drvdata(&op->dev, p); @@ -747,8 +747,8 @@ static int chmc_probe(struct platform_device *op) mc_list_add(&p->list); - printk(KERN_INFO PFX "UltraSPARC-III memory controller at %s [%s]\n", - dp->full_name, + printk(KERN_INFO PFX "UltraSPARC-III memory controller at %pOF [%s]\n", + dp, (p->layout_size ? "ACTIVE" : "INACTIVE")); dev_set_drvdata(&op->dev, p); diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index 05eb016fc41b..b1a09080e8da 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c @@ -314,7 +314,7 @@ bad: bad_no_ctx: if (printk_ratelimit()) WARN_ON(1); - return SPARC_MAPPING_ERROR; + return DMA_MAPPING_ERROR; } static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu, @@ -547,7 +547,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, if (outcount < incount) { outs = sg_next(outs); - outs->dma_address = SPARC_MAPPING_ERROR; + outs->dma_address = DMA_MAPPING_ERROR; outs->dma_length = 0; } @@ -573,7 +573,7 @@ iommu_map_failed: iommu_tbl_range_free(&iommu->tbl, vaddr, npages, IOMMU_ERROR_CODE); - s->dma_address = SPARC_MAPPING_ERROR; + s->dma_address = DMA_MAPPING_ERROR; s->dma_length = 0; } if (s == outs) @@ -741,11 +741,6 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev, spin_unlock_irqrestore(&iommu->lock, flags); } -static int dma_4u_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return dma_addr == SPARC_MAPPING_ERROR; -} - static int dma_4u_supported(struct device *dev, u64 device_mask) { struct iommu *iommu = dev->archdata.iommu; @@ -771,7 +766,6 @@ static const struct dma_map_ops sun4u_dma_ops = { .sync_single_for_cpu = dma_4u_sync_single_for_cpu, .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu, .dma_supported = dma_4u_supported, - .mapping_error = dma_4u_mapping_error, }; const struct dma_map_ops *dma_ops = &sun4u_dma_ops; diff --git a/arch/sparc/kernel/iommu_common.h b/arch/sparc/kernel/iommu_common.h index e3c02ba32500..d62ed9c5682d 100644 --- a/arch/sparc/kernel/iommu_common.h +++ b/arch/sparc/kernel/iommu_common.h @@ -48,6 +48,4 @@ static inline int is_span_boundary(unsigned long entry, return iommu_is_span_boundary(entry, nr, shift, boundary_size); } -#define SPARC_MAPPING_ERROR (~(dma_addr_t)0x0) - #endif /* _IOMMU_COMMON_H */ diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c index 6799c93c9f27..f89603855f1e 100644 --- a/arch/sparc/kernel/ioport.c +++ b/arch/sparc/kernel/ioport.c @@ -52,8 +52,6 @@ #include <asm/io-unit.h> #include <asm/leon.h> -const struct sparc32_dma_ops *sparc32_dma_ops; - /* This function must make sure that caches and memory are coherent after DMA * On LEON systems without cache snooping it flushes the entire D-CACHE. */ @@ -247,178 +245,60 @@ static void _sparc_free_io(struct resource *res) release_resource(res); } -#ifdef CONFIG_SBUS - -void sbus_set_sbus64(struct device *dev, int x) -{ - printk("sbus_set_sbus64: unsupported\n"); -} -EXPORT_SYMBOL(sbus_set_sbus64); - -/* - * Allocate a chunk of memory suitable for DMA. - * Typically devices use them for control blocks. - * CPU may access them without any explicit flushing. - */ -static void *sbus_alloc_coherent(struct device *dev, size_t len, - dma_addr_t *dma_addrp, gfp_t gfp, - unsigned long attrs) +unsigned long sparc_dma_alloc_resource(struct device *dev, size_t len) { - struct platform_device *op = to_platform_device(dev); - unsigned long len_total = PAGE_ALIGN(len); - unsigned long va; struct resource *res; - int order; - - /* XXX why are some lengths signed, others unsigned? */ - if (len <= 0) { - return NULL; - } - /* XXX So what is maxphys for us and how do drivers know it? */ - if (len > 256*1024) { /* __get_free_pages() limit */ - return NULL; - } - - order = get_order(len_total); - va = __get_free_pages(gfp, order); - if (va == 0) - goto err_nopages; - if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) - goto err_nomem; + res = kzalloc(sizeof(*res), GFP_KERNEL); + if (!res) + return 0; + res->name = dev->of_node->full_name; - if (allocate_resource(&_sparc_dvma, res, len_total, - _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) { - printk("sbus_alloc_consistent: cannot occupy 0x%lx", len_total); - goto err_nova; + if (allocate_resource(&_sparc_dvma, res, len, _sparc_dvma.start, + _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) { + printk("%s: cannot occupy 0x%zx", __func__, len); + kfree(res); + return 0; } - // XXX The sbus_map_dma_area does this for us below, see comments. - // srmmu_mapiorange(0, virt_to_phys(va), res->start, len_total); - /* - * XXX That's where sdev would be used. Currently we load - * all iommu tables with the same translations. - */ - if (sbus_map_dma_area(dev, dma_addrp, va, res->start, len_total) != 0) - goto err_noiommu; - - res->name = op->dev.of_node->name; - - return (void *)(unsigned long)res->start; - -err_noiommu: - release_resource(res); -err_nova: - kfree(res); -err_nomem: - free_pages(va, order); -err_nopages: - return NULL; + return res->start; } -static void sbus_free_coherent(struct device *dev, size_t n, void *p, - dma_addr_t ba, unsigned long attrs) +bool sparc_dma_free_resource(void *cpu_addr, size_t size) { + unsigned long addr = (unsigned long)cpu_addr; struct resource *res; - struct page *pgv; - if ((res = lookup_resource(&_sparc_dvma, - (unsigned long)p)) == NULL) { - printk("sbus_free_consistent: cannot free %p\n", p); - return; + res = lookup_resource(&_sparc_dvma, addr); + if (!res) { + printk("%s: cannot free %p\n", __func__, cpu_addr); + return false; } - if (((unsigned long)p & (PAGE_SIZE-1)) != 0) { - printk("sbus_free_consistent: unaligned va %p\n", p); - return; + if ((addr & (PAGE_SIZE - 1)) != 0) { + printk("%s: unaligned va %p\n", __func__, cpu_addr); + return false; } - n = PAGE_ALIGN(n); - if (resource_size(res) != n) { - printk("sbus_free_consistent: region 0x%lx asked 0x%zx\n", - (long)resource_size(res), n); - return; + size = PAGE_ALIGN(size); + if (resource_size(res) != size) { + printk("%s: region 0x%lx asked 0x%zx\n", + __func__, (long)resource_size(res), size); + return false; } release_resource(res); kfree(res); - - pgv = virt_to_page(p); - sbus_unmap_dma_area(dev, ba, n); - - __free_pages(pgv, get_order(n)); -} - -/* - * Map a chunk of memory so that devices can see it. - * CPU view of this memory may be inconsistent with - * a device view and explicit flushing is necessary. - */ -static dma_addr_t sbus_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t len, - enum dma_data_direction dir, - unsigned long attrs) -{ - void *va = page_address(page) + offset; - - /* XXX why are some lengths signed, others unsigned? */ - if (len <= 0) { - return 0; - } - /* XXX So what is maxphys for us and how do drivers know it? */ - if (len > 256*1024) { /* __get_free_pages() limit */ - return 0; - } - return mmu_get_scsi_one(dev, va, len); -} - -static void sbus_unmap_page(struct device *dev, dma_addr_t ba, size_t n, - enum dma_data_direction dir, unsigned long attrs) -{ - mmu_release_scsi_one(dev, ba, n); -} - -static int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, - enum dma_data_direction dir, unsigned long attrs) -{ - mmu_get_scsi_sgl(dev, sg, n); - return n; -} - -static void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n, - enum dma_data_direction dir, unsigned long attrs) -{ - mmu_release_scsi_sgl(dev, sg, n); -} - -static void sbus_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, - int n, enum dma_data_direction dir) -{ - BUG(); + return true; } -static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg, - int n, enum dma_data_direction dir) -{ - BUG(); -} +#ifdef CONFIG_SBUS -static int sbus_dma_supported(struct device *dev, u64 mask) +void sbus_set_sbus64(struct device *dev, int x) { - return 0; + printk("sbus_set_sbus64: unsupported\n"); } - -static const struct dma_map_ops sbus_dma_ops = { - .alloc = sbus_alloc_coherent, - .free = sbus_free_coherent, - .map_page = sbus_map_page, - .unmap_page = sbus_unmap_page, - .map_sg = sbus_map_sg, - .unmap_sg = sbus_unmap_sg, - .sync_sg_for_cpu = sbus_sync_sg_for_cpu, - .sync_sg_for_device = sbus_sync_sg_for_device, - .dma_supported = sbus_dma_supported, -}; +EXPORT_SYMBOL(sbus_set_sbus64); static int __init sparc_register_ioport(void) { @@ -438,45 +318,30 @@ arch_initcall(sparc_register_ioport); void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) { - unsigned long len_total = PAGE_ALIGN(size); + unsigned long addr; void *va; - struct resource *res; - int order; - if (size == 0) { + if (!size || size > 256 * 1024) /* __get_free_pages() limit */ return NULL; - } - if (size > 256*1024) { /* __get_free_pages() limit */ - return NULL; - } - order = get_order(len_total); - va = (void *) __get_free_pages(gfp, order); - if (va == NULL) { - printk("%s: no %ld pages\n", __func__, len_total>>PAGE_SHIFT); - goto err_nopages; + size = PAGE_ALIGN(size); + va = (void *) __get_free_pages(gfp | __GFP_ZERO, get_order(size)); + if (!va) { + printk("%s: no %zd pages\n", __func__, size >> PAGE_SHIFT); + return NULL; } - if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) { - printk("%s: no core\n", __func__); + addr = sparc_dma_alloc_resource(dev, size); + if (!addr) goto err_nomem; - } - if (allocate_resource(&_sparc_dvma, res, len_total, - _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) { - printk("%s: cannot occupy 0x%lx", __func__, len_total); - goto err_nova; - } - srmmu_mapiorange(0, virt_to_phys(va), res->start, len_total); + srmmu_mapiorange(0, virt_to_phys(va), addr, size); *dma_handle = virt_to_phys(va); - return (void *) res->start; + return (void *)addr; -err_nova: - kfree(res); err_nomem: - free_pages((unsigned long)va, order); -err_nopages: + free_pages((unsigned long)va, get_order(size)); return NULL; } @@ -491,31 +356,11 @@ err_nopages: void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs) { - struct resource *res; - - if ((res = lookup_resource(&_sparc_dvma, - (unsigned long)cpu_addr)) == NULL) { - printk("%s: cannot free %p\n", __func__, cpu_addr); + if (!sparc_dma_free_resource(cpu_addr, PAGE_ALIGN(size))) return; - } - - if (((unsigned long)cpu_addr & (PAGE_SIZE-1)) != 0) { - printk("%s: unaligned va %p\n", __func__, cpu_addr); - return; - } - - size = PAGE_ALIGN(size); - if (resource_size(res) != size) { - printk("%s: region 0x%lx asked 0x%zx\n", __func__, - (long)resource_size(res), size); - return; - } dma_make_coherent(dma_addr, size); srmmu_unmapiorange((unsigned long)cpu_addr, size); - - release_resource(res); - kfree(res); free_pages((unsigned long)phys_to_virt(dma_addr), get_order(size)); } @@ -528,7 +373,7 @@ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, dma_make_coherent(paddr, PAGE_ALIGN(size)); } -const struct dma_map_ops *dma_ops = &sbus_dma_ops; +const struct dma_map_ops *dma_ops; EXPORT_SYMBOL(dma_ops); #ifdef CONFIG_PROC_FS diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index 713670e6d13d..3ec9f1402aad 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c @@ -915,7 +915,7 @@ static void map_prom_timers(void) dp = of_find_node_by_path("/"); dp = dp->child; while (dp) { - if (!strcmp(dp->name, "counter-timer")) + if (of_node_name_eq(dp, "counter-timer")) break; dp = dp->sibling; } diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c index 84b233752f28..39229940d725 100644 --- a/arch/sparc/kernel/leon_kernel.c +++ b/arch/sparc/kernel/leon_kernel.c @@ -484,20 +484,6 @@ static void leon_load_profile_irq(int cpu, unsigned int limit) { } -void __init leon_trans_init(struct device_node *dp) -{ - if (strcmp(dp->type, "cpu") == 0 && strcmp(dp->name, "<NULL>") == 0) { - struct property *p; - p = of_find_property(dp, "mid", (void *)0); - if (p) { - int mid; - dp->name = prom_early_alloc(5 + 1); - memcpy(&mid, p->value, p->length); - sprintf((char *)dp->name, "cpu%.2d", mid); - } - } -} - #ifdef CONFIG_SMP void leon_clear_profile_irq(int cpu) { diff --git a/arch/sparc/kernel/of_device_32.c b/arch/sparc/kernel/of_device_32.c index e4abe9b8f97a..4ebf51e6e78e 100644 --- a/arch/sparc/kernel/of_device_32.c +++ b/arch/sparc/kernel/of_device_32.c @@ -22,7 +22,7 @@ static int of_bus_pci_match(struct device_node *np) { - if (!strcmp(np->type, "pci") || !strcmp(np->type, "pciex")) { + if (of_node_is_type(np, "pci") || of_node_is_type(np, "pciex")) { /* Do not do PCI specific frobbing if the * PCI bridge lacks a ranges property. We * want to pass it through up to the next @@ -107,7 +107,7 @@ static unsigned long of_bus_sbus_get_flags(const u32 *addr, unsigned long flags) static int of_bus_ambapp_match(struct device_node *np) { - return !strcmp(np->type, "ambapp"); + return of_node_is_type(np, "ambapp"); } static void of_bus_ambapp_count_cells(struct device_node *child, @@ -232,10 +232,10 @@ static int __init use_1to1_mapping(struct device_node *pp) * But, we should still pass the translation work up * to the SBUS itself. */ - if (!strcmp(pp->name, "dma") || - !strcmp(pp->name, "espdma") || - !strcmp(pp->name, "ledma") || - !strcmp(pp->name, "lebuffer")) + if (of_node_name_eq(pp, "dma") || + of_node_name_eq(pp, "espdma") || + of_node_name_eq(pp, "ledma") || + of_node_name_eq(pp, "lebuffer")) return 0; return 1; @@ -324,8 +324,8 @@ static void __init build_device_resources(struct platform_device *op, memset(r, 0, sizeof(*r)); if (of_resource_verbose) - printk("%s reg[%d] -> %llx\n", - op->dev.of_node->full_name, index, + printk("%pOF reg[%d] -> %llx\n", + op->dev.of_node, index, result); if (result != OF_BAD_ADDR) { @@ -333,7 +333,7 @@ static void __init build_device_resources(struct platform_device *op, r->end = result + size - 1; r->flags = flags | ((result >> 32ULL) & 0xffUL); } - r->name = op->dev.of_node->name; + r->name = op->dev.of_node->full_name; } } @@ -386,8 +386,7 @@ static struct platform_device * __init scan_one_device(struct device_node *dp, op->dev.dma_mask = &op->dev.coherent_dma_mask; if (of_device_register(op)) { - printk("%s: Could not register of device.\n", - dp->full_name); + printk("%pOF: Could not register of device.\n", dp); kfree(op); op = NULL; } diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c index 6df6086968c6..5a9f86b1d4e7 100644 --- a/arch/sparc/kernel/of_device_64.c +++ b/arch/sparc/kernel/of_device_64.c @@ -46,7 +46,7 @@ EXPORT_SYMBOL(of_iounmap); static int of_bus_pci_match(struct device_node *np) { - if (!strcmp(np->name, "pci")) { + if (of_node_name_eq(np, "pci")) { const char *model = of_get_property(np, "model", NULL); if (model && !strcmp(model, "SUNW,simba")) @@ -77,7 +77,7 @@ static int of_bus_simba_match(struct device_node *np) /* Treat PCI busses lacking ranges property just like * simba. */ - if (!strcmp(np->name, "pci")) { + if (of_node_name_eq(np, "pci")) { if (!of_find_property(np, "ranges", NULL)) return 1; } @@ -170,8 +170,8 @@ static unsigned long of_bus_pci_get_flags(const u32 *addr, unsigned long flags) */ static int of_bus_fhc_match(struct device_node *np) { - return !strcmp(np->name, "fhc") || - !strcmp(np->name, "central"); + return of_node_name_eq(np, "fhc") || + of_node_name_eq(np, "central"); } #define of_bus_fhc_count_cells of_bus_sbus_count_cells @@ -295,17 +295,17 @@ static int __init use_1to1_mapping(struct device_node *pp) * But, we should still pass the translation work up * to the SBUS itself. */ - if (!strcmp(pp->name, "dma") || - !strcmp(pp->name, "espdma") || - !strcmp(pp->name, "ledma") || - !strcmp(pp->name, "lebuffer")) + if (of_node_name_eq(pp, "dma") || + of_node_name_eq(pp, "espdma") || + of_node_name_eq(pp, "ledma") || + of_node_name_eq(pp, "lebuffer")) return 0; /* Similarly for all PCI bridges, if we get this far * it lacks a ranges property, and this will include * cases like Simba. */ - if (!strcmp(pp->name, "pci")) + if (of_node_name_eq(pp, "pci")) return 0; return 1; @@ -341,9 +341,9 @@ static void __init build_device_resources(struct platform_device *op, /* Prevent overrunning the op->resources[] array. */ if (num_reg > PROMREG_MAX) { - printk(KERN_WARNING "%s: Too many regs (%d), " + printk(KERN_WARNING "%pOF: Too many regs (%d), " "limiting to %d.\n", - op->dev.of_node->full_name, num_reg, PROMREG_MAX); + op->dev.of_node, num_reg, PROMREG_MAX); num_reg = PROMREG_MAX; } @@ -401,8 +401,8 @@ static void __init build_device_resources(struct platform_device *op, memset(r, 0, sizeof(*r)); if (of_resource_verbose) - printk("%s reg[%d] -> %llx\n", - op->dev.of_node->full_name, index, + printk("%pOF reg[%d] -> %llx\n", + op->dev.of_node, index, result); if (result != OF_BAD_ADDR) { @@ -413,7 +413,7 @@ static void __init build_device_resources(struct platform_device *op, r->end = result + size - 1; r->flags = flags; } - r->name = op->dev.of_node->name; + r->name = op->dev.of_node->full_name; } } @@ -548,8 +548,8 @@ static unsigned int __init build_one_device_irq(struct platform_device *op, dp->irq_trans->data); if (of_irq_verbose) - printk("%s: direct translate %x --> %x\n", - dp->full_name, orig_irq, irq); + printk("%pOF: direct translate %x --> %x\n", + dp, orig_irq, irq); goto out; } @@ -579,10 +579,9 @@ static unsigned int __init build_one_device_irq(struct platform_device *op, &irq); if (of_irq_verbose) - printk("%s: Apply [%s:%x] imap --> [%s:%x]\n", - op->dev.of_node->full_name, - pp->full_name, this_orig_irq, - of_node_full_name(iret), irq); + printk("%pOF: Apply [%pOF:%x] imap --> [%pOF:%x]\n", + op->dev.of_node, + pp, this_orig_irq, iret, irq); if (!iret) break; @@ -592,15 +591,15 @@ static unsigned int __init build_one_device_irq(struct platform_device *op, break; } } else { - if (!strcmp(pp->name, "pci")) { + if (of_node_name_eq(pp, "pci")) { unsigned int this_orig_irq = irq; irq = pci_irq_swizzle(dp, pp, irq); if (of_irq_verbose) - printk("%s: PCI swizzle [%s] " + printk("%pOF: PCI swizzle [%pOF] " "%x --> %x\n", - op->dev.of_node->full_name, - pp->full_name, this_orig_irq, + op->dev.of_node, + pp, this_orig_irq, irq); } @@ -619,8 +618,8 @@ static unsigned int __init build_one_device_irq(struct platform_device *op, irq = ip->irq_trans->irq_build(op->dev.of_node, irq, ip->irq_trans->data); if (of_irq_verbose) - printk("%s: Apply IRQ trans [%s] %x --> %x\n", - op->dev.of_node->full_name, ip->full_name, orig_irq, irq); + printk("%pOF: Apply IRQ trans [%pOF] %x --> %x\n", + op->dev.of_node, ip, orig_irq, irq); out: nid = of_node_to_nid(dp); @@ -656,9 +655,9 @@ static struct platform_device * __init scan_one_device(struct device_node *dp, /* Prevent overrunning the op->irqs[] array. */ if (op->archdata.num_irqs > PROMINTR_MAX) { - printk(KERN_WARNING "%s: Too many irqs (%d), " + printk(KERN_WARNING "%pOF: Too many irqs (%d), " "limiting to %d.\n", - dp->full_name, op->archdata.num_irqs, PROMINTR_MAX); + dp, op->archdata.num_irqs, PROMINTR_MAX); op->archdata.num_irqs = PROMINTR_MAX; } memcpy(op->archdata.irqs, irq, op->archdata.num_irqs * 4); @@ -680,8 +679,7 @@ static struct platform_device * __init scan_one_device(struct device_node *dp, op->dev.dma_mask = &op->dev.coherent_dma_mask; if (of_device_register(op)) { - printk("%s: Could not register of device.\n", - dp->full_name); + printk("%pOF: Could not register of device.\n", dp); kfree(op); op = NULL; } diff --git a/arch/sparc/kernel/of_device_common.c b/arch/sparc/kernel/of_device_common.c index de0ee3971f00..b186b7f0f6c4 100644 --- a/arch/sparc/kernel/of_device_common.c +++ b/arch/sparc/kernel/of_device_common.c @@ -151,8 +151,8 @@ int of_bus_sbus_match(struct device_node *np) struct device_node *dp = np; while (dp) { - if (!strcmp(dp->name, "sbus") || - !strcmp(dp->name, "sbi")) + if (of_node_name_eq(dp, "sbus") || + of_node_name_eq(dp, "sbi")) return 1; /* Have a look at use_1to1_mapping(). We're trying diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c index 17ea16a1337c..bcfec6a85d23 100644 --- a/arch/sparc/kernel/pci.c +++ b/arch/sparc/kernel/pci.c @@ -267,7 +267,6 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm, struct dev_archdata *sd; struct platform_device *op; struct pci_dev *dev; - const char *type; u32 class; dev = pci_alloc_dev(bus); @@ -283,16 +282,12 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm, sd->stc = &pbm->stc; sd->numa_node = pbm->numa_node; - if (!strcmp(node->name, "ebus")) + if (of_node_name_eq(node, "ebus")) of_propagate_archdata(op); - type = of_get_property(node, "device_type", NULL); - if (type == NULL) - type = ""; - if (ofpci_verbose) pci_info(bus," create device, devfn: %x, type: %s\n", - devfn, type); + devfn, of_node_get_device_type(node)); dev->sysdata = node; dev->dev.parent = bus->bridge; @@ -336,11 +331,11 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm, dev->error_state = pci_channel_io_normal; dev->dma_mask = 0xffffffff; - if (!strcmp(node->name, "pci")) { + if (of_node_name_eq(node, "pci")) { /* a PCI-PCI bridge */ dev->hdr_type = PCI_HEADER_TYPE_BRIDGE; dev->rom_base_reg = PCI_ROM_ADDRESS1; - } else if (!strcmp(type, "cardbus")) { + } else if (of_node_is_type(node, "cardbus")) { dev->hdr_type = PCI_HEADER_TYPE_CARDBUS; } else { dev->hdr_type = PCI_HEADER_TYPE_NORMAL; @@ -431,13 +426,13 @@ static void of_scan_pci_bridge(struct pci_pbm_info *pbm, u64 size; if (ofpci_verbose) - pci_info(dev, "of_scan_pci_bridge(%s)\n", node->full_name); + pci_info(dev, "of_scan_pci_bridge(%pOF)\n", node); /* parse bus-range property */ busrange = of_get_property(node, "bus-range", &len); if (busrange == NULL || len != 8) { - pci_info(dev, "Can't get bus-range for PCI-PCI bridge %s\n", - node->full_name); + pci_info(dev, "Can't get bus-range for PCI-PCI bridge %pOF\n", + node); return; } @@ -455,8 +450,8 @@ static void of_scan_pci_bridge(struct pci_pbm_info *pbm, bus = pci_add_new_bus(dev->bus, dev, busrange[0]); if (!bus) { - pci_err(dev, "Failed to create pci bus for %s\n", - node->full_name); + pci_err(dev, "Failed to create pci bus for %pOF\n", + node); return; } @@ -512,13 +507,13 @@ static void of_scan_pci_bridge(struct pci_pbm_info *pbm, res = bus->resource[0]; if (res->flags) { pci_err(dev, "ignoring extra I/O range" - " for bridge %s\n", node->full_name); + " for bridge %pOF\n", node); continue; } } else { if (i >= PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES) { pci_err(dev, "too many memory ranges" - " for bridge %s\n", node->full_name); + " for bridge %pOF\n", node); continue; } res = bus->resource[i]; @@ -554,14 +549,14 @@ static void pci_of_scan_bus(struct pci_pbm_info *pbm, struct pci_dev *dev; if (ofpci_verbose) - pci_info(bus, "scan_bus[%s] bus no %d\n", - node->full_name, bus->number); + pci_info(bus, "scan_bus[%pOF] bus no %d\n", + node, bus->number); child = NULL; prev_devfn = -1; while ((child = of_get_next_child(node, child)) != NULL) { if (ofpci_verbose) - pci_info(bus, " * %s\n", child->full_name); + pci_info(bus, " * %pOF\n", child); reg = of_get_property(child, "reg", ®len); if (reg == NULL || reglen < 20) continue; @@ -598,7 +593,7 @@ show_pciobppath_attr(struct device * dev, struct device_attribute * attr, char * pdev = to_pci_dev(dev); dp = pdev->dev.of_node; - return snprintf (buf, PAGE_SIZE, "%s\n", dp->full_name); + return snprintf (buf, PAGE_SIZE, "%pOF\n", dp); } static DEVICE_ATTR(obppath, S_IRUSR | S_IRGRP | S_IROTH, show_pciobppath_attr, NULL); @@ -698,7 +693,7 @@ struct pci_bus *pci_scan_one_pbm(struct pci_pbm_info *pbm, struct device_node *node = pbm->op->dev.of_node; struct pci_bus *bus; - printk("PCI: Scanning PBM %s\n", node->full_name); + printk("PCI: Scanning PBM %pOF\n", node); pci_add_resource_offset(&resources, &pbm->io_space, pbm->io_offset); @@ -714,8 +709,7 @@ struct pci_bus *pci_scan_one_pbm(struct pci_pbm_info *pbm, bus = pci_create_root_bus(parent, pbm->pci_first_busno, pbm->pci_ops, pbm, &resources); if (!bus) { - printk(KERN_ERR "Failed to create bus for %s\n", - node->full_name); + printk(KERN_ERR "Failed to create bus for %pOF\n", node); pci_free_resource_list(&resources); return NULL; } @@ -1111,8 +1105,8 @@ static void pci_bus_slot_names(struct device_node *node, struct pci_bus *bus) sp = prop->names; if (ofpci_verbose) - pci_info(bus, "Making slots for [%s] mask[0x%02x]\n", - node->full_name, mask); + pci_info(bus, "Making slots for [%pOF] mask[0x%02x]\n", + node, mask); i = 0; while (mask) { diff --git a/arch/sparc/kernel/pci_sabre.c b/arch/sparc/kernel/pci_sabre.c index 8107286be9ab..3c38ca40a22b 100644 --- a/arch/sparc/kernel/pci_sabre.c +++ b/arch/sparc/kernel/pci_sabre.c @@ -475,7 +475,7 @@ static int sabre_probe(struct platform_device *op) * different ways, inconsistently. */ for_each_node_by_type(cpu_dp, "cpu") { - if (!strcmp(cpu_dp->name, "SUNW,UltraSPARC-IIe")) + if (of_node_name_eq(cpu_dp, "SUNW,UltraSPARC-IIe")) hummingbird_p = 1; } } diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c index 565d9ac883d0..fa0e42b4cbfb 100644 --- a/arch/sparc/kernel/pci_sun4v.c +++ b/arch/sparc/kernel/pci_sun4v.c @@ -414,12 +414,12 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, bad: if (printk_ratelimit()) WARN_ON(1); - return SPARC_MAPPING_ERROR; + return DMA_MAPPING_ERROR; iommu_map_fail: local_irq_restore(flags); iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE); - return SPARC_MAPPING_ERROR; + return DMA_MAPPING_ERROR; } static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, @@ -592,7 +592,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, if (outcount < incount) { outs = sg_next(outs); - outs->dma_address = SPARC_MAPPING_ERROR; + outs->dma_address = DMA_MAPPING_ERROR; outs->dma_length = 0; } @@ -609,7 +609,7 @@ iommu_map_failed: iommu_tbl_range_free(tbl, vaddr, npages, IOMMU_ERROR_CODE); /* XXX demap? XXX */ - s->dma_address = SPARC_MAPPING_ERROR; + s->dma_address = DMA_MAPPING_ERROR; s->dma_length = 0; } if (s == outs) @@ -688,11 +688,6 @@ static int dma_4v_supported(struct device *dev, u64 device_mask) return pci64_dma_supported(to_pci_dev(dev), device_mask); } -static int dma_4v_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return dma_addr == SPARC_MAPPING_ERROR; -} - static const struct dma_map_ops sun4v_dma_ops = { .alloc = dma_4v_alloc_coherent, .free = dma_4v_free_coherent, @@ -701,7 +696,6 @@ static const struct dma_map_ops sun4v_dma_ops = { .map_sg = dma_4v_map_sg, .unmap_sg = dma_4v_unmap_sg, .dma_supported = dma_4v_supported, - .mapping_error = dma_4v_mapping_error, }; static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm, struct device *parent) diff --git a/arch/sparc/kernel/power.c b/arch/sparc/kernel/power.c index 92627abce311..d941875dd718 100644 --- a/arch/sparc/kernel/power.c +++ b/arch/sparc/kernel/power.c @@ -41,8 +41,8 @@ static int power_probe(struct platform_device *op) power_reg = of_ioremap(res, 0, 0x4, "power"); - printk(KERN_INFO "%s: Control reg at %llx\n", - op->dev.of_node->name, res->start); + printk(KERN_INFO "%pOFn: Control reg at %llx\n", + op->dev.of_node, res->start); if (has_button_interrupt(irq, op->dev.of_node)) { if (request_irq(irq, diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c index d9662cf7e648..26cca65e9246 100644 --- a/arch/sparc/kernel/process_32.c +++ b/arch/sparc/kernel/process_32.c @@ -110,7 +110,7 @@ void machine_restart(char * cmd) void machine_power_off(void) { if (auxio_power_register && - (strcmp(of_console_device->type, "serial") || scons_pwroff)) { + (!of_node_is_type(of_console_device, "serial") || scons_pwroff)) { u8 power_register = sbus_readb(auxio_power_register); power_register |= AUXIO_POWER_OFF; sbus_writeb(power_register, auxio_power_register); diff --git a/arch/sparc/kernel/prom_32.c b/arch/sparc/kernel/prom_32.c index d41e2a749c5d..42d7f2a7da6d 100644 --- a/arch/sparc/kernel/prom_32.c +++ b/arch/sparc/kernel/prom_32.c @@ -60,6 +60,7 @@ void * __init prom_early_alloc(unsigned long size) */ static void __init sparc32_path_component(struct device_node *dp, char *tmp_buf) { + const char *name = of_get_property(dp, "name", NULL); struct linux_prom_registers *regs; struct property *rprop; @@ -69,13 +70,14 @@ static void __init sparc32_path_component(struct device_node *dp, char *tmp_buf) regs = rprop->value; sprintf(tmp_buf, "%s@%x,%x", - dp->name, + name, regs->which_io, regs->phys_addr); } /* "name@slot,offset" */ static void __init sbus_path_component(struct device_node *dp, char *tmp_buf) { + const char *name = of_get_property(dp, "name", NULL); struct linux_prom_registers *regs; struct property *prop; @@ -85,7 +87,7 @@ static void __init sbus_path_component(struct device_node *dp, char *tmp_buf) regs = prop->value; sprintf(tmp_buf, "%s@%x,%x", - dp->name, + name, regs->which_io, regs->phys_addr); } @@ -93,6 +95,7 @@ static void __init sbus_path_component(struct device_node *dp, char *tmp_buf) /* "name@devnum[,func]" */ static void __init pci_path_component(struct device_node *dp, char *tmp_buf) { + const char *name = of_get_property(dp, "name", NULL); struct linux_prom_pci_registers *regs; struct property *prop; unsigned int devfn; @@ -105,12 +108,12 @@ static void __init pci_path_component(struct device_node *dp, char *tmp_buf) devfn = (regs->phys_hi >> 8) & 0xff; if (devfn & 0x07) { sprintf(tmp_buf, "%s@%x,%x", - dp->name, + name, devfn >> 3, devfn & 0x07); } else { sprintf(tmp_buf, "%s@%x", - dp->name, + name, devfn >> 3); } } @@ -118,6 +121,7 @@ static void __init pci_path_component(struct device_node *dp, char *tmp_buf) /* "name@addrhi,addrlo" */ static void __init ebus_path_component(struct device_node *dp, char *tmp_buf) { + const char *name = of_get_property(dp, "name", NULL); struct linux_prom_registers *regs; struct property *prop; @@ -128,13 +132,14 @@ static void __init ebus_path_component(struct device_node *dp, char *tmp_buf) regs = prop->value; sprintf(tmp_buf, "%s@%x,%x", - dp->name, + name, regs->which_io, regs->phys_addr); } /* "name:vendor:device@irq,addrlo" */ static void __init ambapp_path_component(struct device_node *dp, char *tmp_buf) { + const char *name = of_get_property(dp, "name", NULL); struct amba_prom_registers *regs; unsigned int *intr, *device, *vendor, reg0; struct property *prop; @@ -168,7 +173,7 @@ static void __init ambapp_path_component(struct device_node *dp, char *tmp_buf) device = prop->value; sprintf(tmp_buf, "%s:%d:%d@%x,%x", - dp->name, *vendor, *device, + name, *vendor, *device, *intr, reg0); } @@ -177,14 +182,14 @@ static void __init __build_path_component(struct device_node *dp, char *tmp_buf) struct device_node *parent = dp->parent; if (parent != NULL) { - if (!strcmp(parent->type, "pci") || - !strcmp(parent->type, "pciex")) + if (of_node_is_type(parent, "pci") || + of_node_is_type(parent, "pciex")) return pci_path_component(dp, tmp_buf); - if (!strcmp(parent->type, "sbus")) + if (of_node_is_type(parent, "sbus")) return sbus_path_component(dp, tmp_buf); - if (!strcmp(parent->type, "ebus")) + if (of_node_is_type(parent, "ebus")) return ebus_path_component(dp, tmp_buf); - if (!strcmp(parent->type, "ambapp")) + if (of_node_is_type(parent, "ambapp")) return ambapp_path_component(dp, tmp_buf); /* "isa" is handled with platform naming */ @@ -196,12 +201,13 @@ static void __init __build_path_component(struct device_node *dp, char *tmp_buf) char * __init build_path_component(struct device_node *dp) { + const char *name = of_get_property(dp, "name", NULL); char tmp_buf[64], *n; tmp_buf[0] = '\0'; __build_path_component(dp, tmp_buf); if (tmp_buf[0] == '\0') - strcpy(tmp_buf, dp->name); + strcpy(tmp_buf, name); n = prom_early_alloc(strlen(tmp_buf) + 1); strcpy(n, tmp_buf); @@ -255,7 +261,7 @@ void __init of_console_init(void) } of_console_device = dp; - strcpy(of_console_path, dp->full_name); + sprintf(of_console_path, "%pOF", dp); if (!strcmp(type, "serial")) { strcat(of_console_path, (skip ? ":b" : ":a")); @@ -278,15 +284,9 @@ void __init of_console_init(void) prom_halt(); } dp = of_find_node_by_phandle(node); - type = of_get_property(dp, "device_type", NULL); - if (!type) { - prom_printf("Console stdout lacks " - "device_type property.\n"); - prom_halt(); - } - - if (strcmp(type, "display") && strcmp(type, "serial")) { + if (!of_node_is_type(dp, "display") && + !of_node_is_type(dp, "serial")) { prom_printf("Console device_type is neither display " "nor serial.\n"); prom_halt(); @@ -295,7 +295,7 @@ void __init of_console_init(void) of_console_device = dp; if (prom_vers == PROM_V2) { - strcpy(of_console_path, dp->full_name); + sprintf(of_console_path, "%pOF", dp); switch (*romvec->pv_stdout) { case PROMDEV_TTYA: strcat(of_console_path, ":a"); diff --git a/arch/sparc/kernel/prom_64.c b/arch/sparc/kernel/prom_64.c index c37955d127fe..e897a4ded3a1 100644 --- a/arch/sparc/kernel/prom_64.c +++ b/arch/sparc/kernel/prom_64.c @@ -72,6 +72,7 @@ void * __init prom_early_alloc(unsigned long size) */ static void __init sun4v_path_component(struct device_node *dp, char *tmp_buf) { + const char *name = of_get_property(dp, "name", NULL); struct linux_prom64_registers *regs; struct property *rprop; u32 high_bits, low_bits, type; @@ -83,7 +84,7 @@ static void __init sun4v_path_component(struct device_node *dp, char *tmp_buf) regs = rprop->value; if (!of_node_is_root(dp->parent)) { sprintf(tmp_buf, "%s@%x,%x", - dp->name, + name, (unsigned int) (regs->phys_addr >> 32UL), (unsigned int) (regs->phys_addr & 0xffffffffUL)); return; @@ -98,21 +99,22 @@ static void __init sun4v_path_component(struct device_node *dp, char *tmp_buf) if (low_bits) sprintf(tmp_buf, "%s@%s%x,%x", - dp->name, prefix, + name, prefix, high_bits, low_bits); else sprintf(tmp_buf, "%s@%s%x", - dp->name, + name, prefix, high_bits); } else if (type == 12) { sprintf(tmp_buf, "%s@%x", - dp->name, high_bits); + name, high_bits); } } static void __init sun4u_path_component(struct device_node *dp, char *tmp_buf) { + const char *name = of_get_property(dp, "name", NULL); struct linux_prom64_registers *regs; struct property *prop; @@ -123,7 +125,7 @@ static void __init sun4u_path_component(struct device_node *dp, char *tmp_buf) regs = prop->value; if (!of_node_is_root(dp->parent)) { sprintf(tmp_buf, "%s@%x,%x", - dp->name, + name, (unsigned int) (regs->phys_addr >> 32UL), (unsigned int) (regs->phys_addr & 0xffffffffUL)); return; @@ -139,7 +141,7 @@ static void __init sun4u_path_component(struct device_node *dp, char *tmp_buf) mask = 0x7fffff; sprintf(tmp_buf, "%s@%x,%x", - dp->name, + name, *(u32 *)prop->value, (unsigned int) (regs->phys_addr & mask)); } @@ -148,6 +150,7 @@ static void __init sun4u_path_component(struct device_node *dp, char *tmp_buf) /* "name@slot,offset" */ static void __init sbus_path_component(struct device_node *dp, char *tmp_buf) { + const char *name = of_get_property(dp, "name", NULL); struct linux_prom_registers *regs; struct property *prop; @@ -157,7 +160,7 @@ static void __init sbus_path_component(struct device_node *dp, char *tmp_buf) regs = prop->value; sprintf(tmp_buf, "%s@%x,%x", - dp->name, + name, regs->which_io, regs->phys_addr); } @@ -165,6 +168,7 @@ static void __init sbus_path_component(struct device_node *dp, char *tmp_buf) /* "name@devnum[,func]" */ static void __init pci_path_component(struct device_node *dp, char *tmp_buf) { + const char *name = of_get_property(dp, "name", NULL); struct linux_prom_pci_registers *regs; struct property *prop; unsigned int devfn; @@ -177,12 +181,12 @@ static void __init pci_path_component(struct device_node *dp, char *tmp_buf) devfn = (regs->phys_hi >> 8) & 0xff; if (devfn & 0x07) { sprintf(tmp_buf, "%s@%x,%x", - dp->name, + name, devfn >> 3, devfn & 0x07); } else { sprintf(tmp_buf, "%s@%x", - dp->name, + name, devfn >> 3); } } @@ -190,6 +194,7 @@ static void __init pci_path_component(struct device_node *dp, char *tmp_buf) /* "name@UPA_PORTID,offset" */ static void __init upa_path_component(struct device_node *dp, char *tmp_buf) { + const char *name = of_get_property(dp, "name", NULL); struct linux_prom64_registers *regs; struct property *prop; @@ -204,7 +209,7 @@ static void __init upa_path_component(struct device_node *dp, char *tmp_buf) return; sprintf(tmp_buf, "%s@%x,%x", - dp->name, + name, *(u32 *) prop->value, (unsigned int) (regs->phys_addr & 0xffffffffUL)); } @@ -212,6 +217,7 @@ static void __init upa_path_component(struct device_node *dp, char *tmp_buf) /* "name@reg" */ static void __init vdev_path_component(struct device_node *dp, char *tmp_buf) { + const char *name = of_get_property(dp, "name", NULL); struct property *prop; u32 *regs; @@ -221,12 +227,13 @@ static void __init vdev_path_component(struct device_node *dp, char *tmp_buf) regs = prop->value; - sprintf(tmp_buf, "%s@%x", dp->name, *regs); + sprintf(tmp_buf, "%s@%x", name, *regs); } /* "name@addrhi,addrlo" */ static void __init ebus_path_component(struct device_node *dp, char *tmp_buf) { + const char *name = of_get_property(dp, "name", NULL); struct linux_prom64_registers *regs; struct property *prop; @@ -237,7 +244,7 @@ static void __init ebus_path_component(struct device_node *dp, char *tmp_buf) regs = prop->value; sprintf(tmp_buf, "%s@%x,%x", - dp->name, + name, (unsigned int) (regs->phys_addr >> 32UL), (unsigned int) (regs->phys_addr & 0xffffffffUL)); } @@ -245,6 +252,7 @@ static void __init ebus_path_component(struct device_node *dp, char *tmp_buf) /* "name@bus,addr" */ static void __init i2c_path_component(struct device_node *dp, char *tmp_buf) { + const char *name = of_get_property(dp, "name", NULL); struct property *prop; u32 *regs; @@ -258,12 +266,13 @@ static void __init i2c_path_component(struct device_node *dp, char *tmp_buf) * property of the i2c bus node etc. etc. */ sprintf(tmp_buf, "%s@%x,%x", - dp->name, regs[0], regs[1]); + name, regs[0], regs[1]); } /* "name@reg0[,reg1]" */ static void __init usb_path_component(struct device_node *dp, char *tmp_buf) { + const char *name = of_get_property(dp, "name", NULL); struct property *prop; u32 *regs; @@ -275,16 +284,17 @@ static void __init usb_path_component(struct device_node *dp, char *tmp_buf) if (prop->length == sizeof(u32) || regs[1] == 1) { sprintf(tmp_buf, "%s@%x", - dp->name, regs[0]); + name, regs[0]); } else { sprintf(tmp_buf, "%s@%x,%x", - dp->name, regs[0], regs[1]); + name, regs[0], regs[1]); } } /* "name@reg0reg1[,reg2reg3]" */ static void __init ieee1394_path_component(struct device_node *dp, char *tmp_buf) { + const char *name = of_get_property(dp, "name", NULL); struct property *prop; u32 *regs; @@ -296,10 +306,10 @@ static void __init ieee1394_path_component(struct device_node *dp, char *tmp_buf if (regs[2] || regs[3]) { sprintf(tmp_buf, "%s@%08x%08x,%04x%08x", - dp->name, regs[0], regs[1], regs[2], regs[3]); + name, regs[0], regs[1], regs[2], regs[3]); } else { sprintf(tmp_buf, "%s@%08x%08x", - dp->name, regs[0], regs[1]); + name, regs[0], regs[1]); } } @@ -308,37 +318,37 @@ static void __init __build_path_component(struct device_node *dp, char *tmp_buf) struct device_node *parent = dp->parent; if (parent != NULL) { - if (!strcmp(parent->type, "pci") || - !strcmp(parent->type, "pciex")) { + if (of_node_is_type(parent, "pci") || + of_node_is_type(parent, "pciex")) { pci_path_component(dp, tmp_buf); return; } - if (!strcmp(parent->type, "sbus")) { + if (of_node_is_type(parent, "sbus")) { sbus_path_component(dp, tmp_buf); return; } - if (!strcmp(parent->type, "upa")) { + if (of_node_is_type(parent, "upa")) { upa_path_component(dp, tmp_buf); return; } - if (!strcmp(parent->type, "ebus")) { + if (of_node_is_type(parent, "ebus")) { ebus_path_component(dp, tmp_buf); return; } - if (!strcmp(parent->name, "usb") || - !strcmp(parent->name, "hub")) { + if (of_node_name_eq(parent, "usb") || + of_node_name_eq(parent, "hub")) { usb_path_component(dp, tmp_buf); return; } - if (!strcmp(parent->type, "i2c")) { + if (of_node_is_type(parent, "i2c")) { i2c_path_component(dp, tmp_buf); return; } - if (!strcmp(parent->type, "firewire")) { + if (of_node_is_type(parent, "firewire")) { ieee1394_path_component(dp, tmp_buf); return; } - if (!strcmp(parent->type, "virtual-devices")) { + if (of_node_is_type(parent, "virtual-devices")) { vdev_path_component(dp, tmp_buf); return; } @@ -356,12 +366,13 @@ static void __init __build_path_component(struct device_node *dp, char *tmp_buf) char * __init build_path_component(struct device_node *dp) { + const char *name = of_get_property(dp, "name", NULL); char tmp_buf[64], *n; tmp_buf[0] = '\0'; __build_path_component(dp, tmp_buf); if (tmp_buf[0] == '\0') - strcpy(tmp_buf, dp->name); + strcpy(tmp_buf, name); n = prom_early_alloc(strlen(tmp_buf) + 1); strcpy(n, tmp_buf); @@ -594,7 +605,6 @@ void __init of_console_init(void) { char *msg = "OF stdout device is: %s\n"; struct device_node *dp; - const char *type; phandle node; of_console_path = prom_early_alloc(256); @@ -617,13 +627,8 @@ void __init of_console_init(void) } dp = of_find_node_by_phandle(node); - type = of_get_property(dp, "device_type", NULL); - if (!type) { - prom_printf("Console stdout lacks device_type property.\n"); - prom_halt(); - } - if (strcmp(type, "display") && strcmp(type, "serial")) { + if (!of_node_is_type(dp, "display") && !of_node_is_type(dp, "serial")) { prom_printf("Console device_type is neither display " "nor serial.\n"); prom_halt(); diff --git a/arch/sparc/kernel/prom_irqtrans.c b/arch/sparc/kernel/prom_irqtrans.c index f3fecac7facb..28aff1c524b5 100644 --- a/arch/sparc/kernel/prom_irqtrans.c +++ b/arch/sparc/kernel/prom_irqtrans.c @@ -193,7 +193,7 @@ static int sabre_device_needs_wsync(struct device_node *dp) * the DMA synchronization handling */ while (parent) { - if (!strcmp(parent->type, "pci")) + if (of_node_is_type(parent, "pci")) break; parent = parent->parent; } @@ -725,11 +725,11 @@ static unsigned int central_build_irq(struct device_node *dp, unsigned long imap, iclr; u32 tmp; - if (!strcmp(dp->name, "eeprom")) { + if (of_node_name_eq(dp, "eeprom")) { res = ¢ral_op->resource[5]; - } else if (!strcmp(dp->name, "zs")) { + } else if (of_node_name_eq(dp, "zs")) { res = ¢ral_op->resource[4]; - } else if (!strcmp(dp->name, "clock-board")) { + } else if (of_node_name_eq(dp, "clock-board")) { res = ¢ral_op->resource[3]; } else { return ino; @@ -824,19 +824,19 @@ void __init irq_trans_init(struct device_node *dp) } #endif #ifdef CONFIG_SBUS - if (!strcmp(dp->name, "sbus") || - !strcmp(dp->name, "sbi")) { + if (of_node_name_eq(dp, "sbus") || + of_node_name_eq(dp, "sbi")) { sbus_irq_trans_init(dp); return; } #endif - if (!strcmp(dp->name, "fhc") && - !strcmp(dp->parent->name, "central")) { + if (of_node_name_eq(dp, "fhc") && + of_node_name_eq(dp->parent, "central")) { central_irq_trans_init(dp); return; } - if (!strcmp(dp->name, "virtual-devices") || - !strcmp(dp->name, "niu")) { + if (of_node_name_eq(dp, "virtual-devices") || + of_node_name_eq(dp, "niu")) { sun4v_vdev_irq_trans_init(dp); return; } diff --git a/arch/sparc/kernel/reboot.c b/arch/sparc/kernel/reboot.c index 7933ee365207..69c1b6c047d5 100644 --- a/arch/sparc/kernel/reboot.c +++ b/arch/sparc/kernel/reboot.c @@ -7,6 +7,7 @@ #include <linux/reboot.h> #include <linux/export.h> #include <linux/pm.h> +#include <linux/of.h> #include <asm/oplib.h> #include <asm/prom.h> @@ -25,7 +26,7 @@ EXPORT_SYMBOL(pm_power_off); void machine_power_off(void) { - if (strcmp(of_console_device->type, "serial") || scons_pwroff) + if (!of_node_is_type(of_console_device, "serial") || scons_pwroff) prom_halt_power_off(); prom_halt(); diff --git a/arch/sparc/kernel/sbus.c b/arch/sparc/kernel/sbus.c index c133dfc37c5c..41c5deb581b8 100644 --- a/arch/sparc/kernel/sbus.c +++ b/arch/sparc/kernel/sbus.c @@ -67,8 +67,8 @@ void sbus_set_sbus64(struct device *dev, int bursts) regs = of_get_property(op->dev.of_node, "reg", NULL); if (!regs) { - printk(KERN_ERR "sbus_set_sbus64: Cannot find regs for %s\n", - op->dev.of_node->full_name); + printk(KERN_ERR "sbus_set_sbus64: Cannot find regs for %pOF\n", + op->dev.of_node); return; } slot = regs->which_io; diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c index 13664c377196..3fd238e54af9 100644 --- a/arch/sparc/kernel/setup_32.c +++ b/arch/sparc/kernel/setup_32.c @@ -310,25 +310,24 @@ void __init setup_arch(char **cmdline_p) register_console(&prom_early_console); - printk("ARCH: "); switch(sparc_cpu_model) { case sun4m: - printk("SUN4M\n"); + pr_info("ARCH: SUN4M\n"); break; case sun4d: - printk("SUN4D\n"); + pr_info("ARCH: SUN4D\n"); break; case sun4e: - printk("SUN4E\n"); + pr_info("ARCH: SUN4E\n"); break; case sun4u: - printk("SUN4U\n"); + pr_info("ARCH: SUN4U\n"); break; case sparc_leon: - printk("LEON\n"); + pr_info("ARCH: LEON\n"); break; default: - printk("UNKNOWN!\n"); + pr_info("ARCH: UNKNOWN!\n"); break; } diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c index cd2825cb8420..ecc788aa07bd 100644 --- a/arch/sparc/kernel/setup_64.c +++ b/arch/sparc/kernel/setup_64.c @@ -642,9 +642,9 @@ void __init setup_arch(char **cmdline_p) register_console(&prom_early_console); if (tlb_type == hypervisor) - printk("ARCH: SUN4V\n"); + pr_info("ARCH: SUN4V\n"); else - printk("ARCH: SUN4U\n"); + pr_info("ARCH: SUN4U\n"); #ifdef CONFIG_DUMMY_CONSOLE conswitchp = &dummy_con; diff --git a/arch/sparc/kernel/sun4d_irq.c b/arch/sparc/kernel/sun4d_irq.c index d869d409fce6..9a137c70e8d1 100644 --- a/arch/sparc/kernel/sun4d_irq.c +++ b/arch/sparc/kernel/sun4d_irq.c @@ -335,12 +335,12 @@ static unsigned int sun4d_build_device_irq(struct platform_device *op, irq = real_irq; while (bus) { - if (!strcmp(bus->name, "sbi")) { + if (of_node_name_eq(bus, "sbi")) { bus_connection = "io-unit"; break; } - if (!strcmp(bus->name, "bootbus")) { + if (of_node_name_eq(bus, "bootbus")) { bus_connection = "cpu-unit"; break; } @@ -360,16 +360,16 @@ static unsigned int sun4d_build_device_irq(struct platform_device *op, * If Bus nodes parent is not io-unit/cpu-unit or the io-unit/cpu-unit * lacks a "board#" property, something is very wrong. */ - if (!bus->parent || strcmp(bus->parent->name, bus_connection)) { - printk(KERN_ERR "%s: Error, parent is not %s.\n", - bus->full_name, bus_connection); + if (!of_node_name_eq(bus->parent, bus_connection)) { + printk(KERN_ERR "%pOF: Error, parent is not %s.\n", + bus, bus_connection); goto err_out; } board_parent = bus->parent; board = of_getintprop_default(board_parent, "board#", -1); if (board == -1) { - printk(KERN_ERR "%s: Error, lacks board# property.\n", - board_parent->full_name); + printk(KERN_ERR "%pOF: Error, lacks board# property.\n", + board_parent); goto err_out; } diff --git a/arch/sparc/kernel/syscalls/Makefile b/arch/sparc/kernel/syscalls/Makefile new file mode 100644 index 000000000000..c22a21c39f30 --- /dev/null +++ b/arch/sparc/kernel/syscalls/Makefile @@ -0,0 +1,55 @@ +# SPDX-License-Identifier: GPL-2.0 +kapi := arch/$(SRCARCH)/include/generated/asm +uapi := arch/$(SRCARCH)/include/generated/uapi/asm + +_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \ + $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)') + +syscall := $(srctree)/$(src)/syscall.tbl +syshdr := $(srctree)/$(src)/syscallhdr.sh +systbl := $(srctree)/$(src)/syscalltbl.sh + +quiet_cmd_syshdr = SYSHDR $@ + cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \ + '$(syshdr_abis_$(basetarget))' \ + '$(syshdr_pfx_$(basetarget))' \ + '$(syshdr_offset_$(basetarget))' + +quiet_cmd_systbl = SYSTBL $@ + cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \ + '$(systbl_abis_$(basetarget))' \ + '$(systbl_abi_$(basetarget))' \ + '$(systbl_offset_$(basetarget))' + +syshdr_abis_unistd_32 := common,32 +$(uapi)/unistd_32.h: $(syscall) $(syshdr) + $(call if_changed,syshdr) + +syshdr_abis_unistd_64 := common,64 +$(uapi)/unistd_64.h: $(syscall) $(syshdr) + $(call if_changed,syshdr) + +systbl_abis_syscall_table_32 := common,32 +$(kapi)/syscall_table_32.h: $(syscall) $(systbl) + $(call if_changed,systbl) + +systbl_abis_syscall_table_64 := common,64 +$(kapi)/syscall_table_64.h: $(syscall) $(systbl) + $(call if_changed,systbl) + +systbl_abis_syscall_table_c32 := common,32 +systbl_abi_syscall_table_c32 := c32 +$(kapi)/syscall_table_c32.h: $(syscall) $(systbl) + $(call if_changed,systbl) + +uapisyshdr-y += unistd_32.h unistd_64.h +kapisyshdr-y += syscall_table_32.h \ + syscall_table_64.h \ + syscall_table_c32.h + +targets += $(uapisyshdr-y) $(kapisyshdr-y) + +PHONY += all +all: $(addprefix $(uapi)/,$(uapisyshdr-y)) +all: $(addprefix $(kapi)/,$(kapisyshdr-y)) + @: diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl new file mode 100644 index 000000000000..c8c77c05ea97 --- /dev/null +++ b/arch/sparc/kernel/syscalls/syscall.tbl @@ -0,0 +1,409 @@ +# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note +# +# system call numbers and entry vectors for sparc +# +# The format is: +# <number> <abi> <name> <entry point> <compat entry point> +# +# The <abi> can be common, 64, or 32 for this file. +# +0 common restart_syscall sys_restart_syscall +1 32 exit sys_exit sparc_exit +1 64 exit sparc_exit +2 common fork sys_fork +3 common read sys_read +4 common write sys_write +5 common open sys_open compat_sys_open +6 common close sys_close +7 common wait4 sys_wait4 compat_sys_wait4 +8 common creat sys_creat +9 common link sys_link +10 common unlink sys_unlink +11 32 execv sunos_execv +11 64 execv sys_nis_syscall +12 common chdir sys_chdir +13 32 chown sys_chown16 +13 64 chown sys_chown +14 common mknod sys_mknod +15 common chmod sys_chmod +16 32 lchown sys_lchown16 +16 64 lchown sys_lchown +17 common brk sys_brk +18 common perfctr sys_nis_syscall +19 common lseek sys_lseek compat_sys_lseek +20 common getpid sys_getpid +21 common capget sys_capget +22 common capset sys_capset +23 32 setuid sys_setuid16 +23 64 setuid sys_setuid +24 32 getuid sys_getuid16 +24 64 getuid sys_getuid +25 common vmsplice sys_vmsplice compat_sys_vmsplice +26 common ptrace sys_ptrace compat_sys_ptrace +27 common alarm sys_alarm +28 common sigaltstack sys_sigaltstack compat_sys_sigaltstack +29 32 pause sys_pause +29 64 pause sys_nis_syscall +30 common utime sys_utime compat_sys_utime +31 32 lchown32 sys_lchown +32 32 fchown32 sys_fchown +33 common access sys_access +34 common nice sys_nice +35 32 chown32 sys_chown +36 common sync sys_sync +37 common kill sys_kill +38 common stat sys_newstat compat_sys_newstat +39 32 sendfile sys_sendfile compat_sys_sendfile +39 64 sendfile sys_sendfile64 +40 common lstat sys_newlstat compat_sys_newlstat +41 common dup sys_dup +42 common pipe sys_sparc_pipe +43 common times sys_times compat_sys_times +44 32 getuid32 sys_getuid +45 common umount2 sys_umount +46 32 setgid sys_setgid16 +46 64 setgid sys_setgid +47 32 getgid sys_getgid16 +47 64 getgid sys_getgid +48 common signal sys_signal +49 32 geteuid sys_geteuid16 +49 64 geteuid sys_geteuid +50 32 getegid sys_getegid16 +50 64 getegid sys_getegid +51 common acct sys_acct +52 64 memory_ordering sys_memory_ordering +53 32 getgid32 sys_getgid +54 common ioctl sys_ioctl compat_sys_ioctl +55 common reboot sys_reboot +56 32 mmap2 sys_mmap2 sys32_mmap2 +57 common symlink sys_symlink +58 common readlink sys_readlink +59 32 execve sys_execve sys32_execve +59 64 execve sys64_execve +60 common umask sys_umask +61 common chroot sys_chroot +62 common fstat sys_newfstat compat_sys_newfstat +63 common fstat64 sys_fstat64 compat_sys_fstat64 +64 common getpagesize sys_getpagesize +65 common msync sys_msync +66 common vfork sys_vfork +67 common pread64 sys_pread64 compat_sys_pread64 +68 common pwrite64 sys_pwrite64 compat_sys_pwrite64 +69 32 geteuid32 sys_geteuid +70 32 getegid32 sys_getegid +71 common mmap sys_mmap +72 32 setreuid32 sys_setreuid +73 32 munmap sys_munmap +73 64 munmap sys_64_munmap +74 common mprotect sys_mprotect +75 common madvise sys_madvise +76 common vhangup sys_vhangup +77 32 truncate64 sys_truncate64 compat_sys_truncate64 +78 common mincore sys_mincore +79 32 getgroups sys_getgroups16 +79 64 getgroups sys_getgroups +80 32 setgroups sys_setgroups16 +80 64 setgroups sys_setgroups +81 common getpgrp sys_getpgrp +82 32 setgroups32 sys_setgroups +83 common setitimer sys_setitimer compat_sys_setitimer +84 32 ftruncate64 sys_ftruncate64 compat_sys_ftruncate64 +85 common swapon sys_swapon +86 common getitimer sys_getitimer compat_sys_getitimer +87 32 setuid32 sys_setuid +88 common sethostname sys_sethostname +89 32 setgid32 sys_setgid +90 common dup2 sys_dup2 +91 32 setfsuid32 sys_setfsuid +92 common fcntl sys_fcntl compat_sys_fcntl +93 common select sys_select +94 32 setfsgid32 sys_setfsgid +95 common fsync sys_fsync +96 common setpriority sys_setpriority +97 common socket sys_socket +98 common connect sys_connect +99 common accept sys_accept +100 common getpriority sys_getpriority +101 common rt_sigreturn sys_rt_sigreturn sys32_rt_sigreturn +102 common rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction +103 common rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask +104 common rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending +105 common rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait +106 common rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo +107 common rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend +108 32 setresuid32 sys_setresuid +108 64 setresuid sys_setresuid +109 32 getresuid32 sys_getresuid +109 64 getresuid sys_getresuid +110 32 setresgid32 sys_setresgid +110 64 setresgid sys_setresgid +111 32 getresgid32 sys_getresgid +111 64 getresgid sys_getresgid +112 32 setregid32 sys_setregid +113 common recvmsg sys_recvmsg compat_sys_recvmsg +114 common sendmsg sys_sendmsg compat_sys_sendmsg +115 32 getgroups32 sys_getgroups +116 common gettimeofday sys_gettimeofday compat_sys_gettimeofday +117 common getrusage sys_getrusage compat_sys_getrusage +118 common getsockopt sys_getsockopt compat_sys_getsockopt +119 common getcwd sys_getcwd +120 common readv sys_readv compat_sys_readv +121 common writev sys_writev compat_sys_writev +122 common settimeofday sys_settimeofday compat_sys_settimeofday +123 32 fchown sys_fchown16 +123 64 fchown sys_fchown +124 common fchmod sys_fchmod +125 common recvfrom sys_recvfrom +126 32 setreuid sys_setreuid16 +126 64 setreuid sys_setreuid +127 32 setregid sys_setregid16 +127 64 setregid sys_setregid +128 common rename sys_rename +129 common truncate sys_truncate compat_sys_truncate +130 common ftruncate sys_ftruncate compat_sys_ftruncate +131 common flock sys_flock +132 common lstat64 sys_lstat64 compat_sys_lstat64 +133 common sendto sys_sendto +134 common shutdown sys_shutdown +135 common socketpair sys_socketpair +136 common mkdir sys_mkdir +137 common rmdir sys_rmdir +138 common utimes sys_utimes compat_sys_utimes +139 common stat64 sys_stat64 compat_sys_stat64 +140 common sendfile64 sys_sendfile64 +141 common getpeername sys_getpeername +142 common futex sys_futex compat_sys_futex +143 common gettid sys_gettid +144 common getrlimit sys_getrlimit compat_sys_getrlimit +145 common setrlimit sys_setrlimit compat_sys_setrlimit +146 common pivot_root sys_pivot_root +147 common prctl sys_prctl +148 common pciconfig_read sys_pciconfig_read +149 common pciconfig_write sys_pciconfig_write +150 common getsockname sys_getsockname +151 common inotify_init sys_inotify_init +152 common inotify_add_watch sys_inotify_add_watch +153 common poll sys_poll +154 common getdents64 sys_getdents64 +155 32 fcntl64 sys_fcntl64 compat_sys_fcntl64 +156 common inotify_rm_watch sys_inotify_rm_watch +157 common statfs sys_statfs compat_sys_statfs +158 common fstatfs sys_fstatfs compat_sys_fstatfs +159 common umount sys_oldumount +160 common sched_set_affinity sys_sched_setaffinity compat_sys_sched_setaffinity +161 common sched_get_affinity sys_sched_getaffinity compat_sys_sched_getaffinity +162 common getdomainname sys_getdomainname +163 common setdomainname sys_setdomainname +164 64 utrap_install sys_utrap_install +165 common quotactl sys_quotactl +166 common set_tid_address sys_set_tid_address +167 common mount sys_mount compat_sys_mount +168 common ustat sys_ustat compat_sys_ustat +169 common setxattr sys_setxattr +170 common lsetxattr sys_lsetxattr +171 common fsetxattr sys_fsetxattr +172 common getxattr sys_getxattr +173 common lgetxattr sys_lgetxattr +174 common getdents sys_getdents compat_sys_getdents +175 common setsid sys_setsid +176 common fchdir sys_fchdir +177 common fgetxattr sys_fgetxattr +178 common listxattr sys_listxattr +179 common llistxattr sys_llistxattr +180 common flistxattr sys_flistxattr +181 common removexattr sys_removexattr +182 common lremovexattr sys_lremovexattr +183 32 sigpending sys_sigpending compat_sys_sigpending +183 64 sigpending sys_nis_syscall +184 common query_module sys_ni_syscall +185 common setpgid sys_setpgid +186 common fremovexattr sys_fremovexattr +187 common tkill sys_tkill +188 32 exit_group sys_exit_group sparc_exit_group +188 64 exit_group sparc_exit_group +189 common uname sys_newuname +190 common init_module sys_init_module +191 32 personality sys_personality sys_sparc64_personality +191 64 personality sys_sparc64_personality +192 32 remap_file_pages sys_sparc_remap_file_pages sys_remap_file_pages +192 64 remap_file_pages sys_remap_file_pages +193 common epoll_create sys_epoll_create +194 common epoll_ctl sys_epoll_ctl +195 common epoll_wait sys_epoll_wait +196 common ioprio_set sys_ioprio_set +197 common getppid sys_getppid +198 32 sigaction sys_sparc_sigaction compat_sys_sparc_sigaction +198 64 sigaction sys_nis_syscall +199 common sgetmask sys_sgetmask +200 common ssetmask sys_ssetmask +201 32 sigsuspend sys_sigsuspend +201 64 sigsuspend sys_nis_syscall +202 common oldlstat sys_newlstat compat_sys_newlstat +203 common uselib sys_uselib +204 32 readdir sys_old_readdir compat_sys_old_readdir +204 64 readdir sys_nis_syscall +205 common readahead sys_readahead compat_sys_readahead +206 common socketcall sys_socketcall sys32_socketcall +207 common syslog sys_syslog +208 common lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie +209 common fadvise64 sys_fadvise64 compat_sys_fadvise64 +210 common fadvise64_64 sys_fadvise64_64 compat_sys_fadvise64_64 +211 common tgkill sys_tgkill +212 common waitpid sys_waitpid +213 common swapoff sys_swapoff +214 common sysinfo sys_sysinfo compat_sys_sysinfo +215 32 ipc sys_ipc compat_sys_ipc +215 64 ipc sys_sparc_ipc +216 32 sigreturn sys_sigreturn sys32_sigreturn +216 64 sigreturn sys_nis_syscall +217 common clone sys_clone +218 common ioprio_get sys_ioprio_get +219 common adjtimex sys_adjtimex compat_sys_adjtimex +220 32 sigprocmask sys_sigprocmask compat_sys_sigprocmask +220 64 sigprocmask sys_nis_syscall +221 common create_module sys_ni_syscall +222 common delete_module sys_delete_module +223 common get_kernel_syms sys_ni_syscall +224 common getpgid sys_getpgid +225 common bdflush sys_bdflush +226 common sysfs sys_sysfs +227 common afs_syscall sys_nis_syscall +228 common setfsuid sys_setfsuid16 +229 common setfsgid sys_setfsgid16 +230 common _newselect sys_select compat_sys_select +231 32 time sys_time compat_sys_time +232 common splice sys_splice +233 common stime sys_stime compat_sys_stime +234 common statfs64 sys_statfs64 compat_sys_statfs64 +235 common fstatfs64 sys_fstatfs64 compat_sys_fstatfs64 +236 common _llseek sys_llseek +237 common mlock sys_mlock +238 common munlock sys_munlock +239 common mlockall sys_mlockall +240 common munlockall sys_munlockall +241 common sched_setparam sys_sched_setparam +242 common sched_getparam sys_sched_getparam +243 common sched_setscheduler sys_sched_setscheduler +244 common sched_getscheduler sys_sched_getscheduler +245 common sched_yield sys_sched_yield +246 common sched_get_priority_max sys_sched_get_priority_max +247 common sched_get_priority_min sys_sched_get_priority_min +248 common sched_rr_get_interval sys_sched_rr_get_interval compat_sys_sched_rr_get_interval +249 common nanosleep sys_nanosleep compat_sys_nanosleep +250 32 mremap sys_mremap +250 64 mremap sys_64_mremap +251 common _sysctl sys_sysctl compat_sys_sysctl +252 common getsid sys_getsid +253 common fdatasync sys_fdatasync +254 32 nfsservctl sys_ni_syscall sys_nis_syscall +254 64 nfsservctl sys_nis_syscall +255 common sync_file_range sys_sync_file_range compat_sys_sync_file_range +256 common clock_settime sys_clock_settime compat_sys_clock_settime +257 common clock_gettime sys_clock_gettime compat_sys_clock_gettime +258 common clock_getres sys_clock_getres compat_sys_clock_getres +259 common clock_nanosleep sys_clock_nanosleep compat_sys_clock_nanosleep +260 common sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity +261 common sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity +262 common timer_settime sys_timer_settime compat_sys_timer_settime +263 common timer_gettime sys_timer_gettime compat_sys_timer_gettime +264 common timer_getoverrun sys_timer_getoverrun +265 common timer_delete sys_timer_delete +266 common timer_create sys_timer_create compat_sys_timer_create +# 267 was vserver +267 common vserver sys_nis_syscall +268 common io_setup sys_io_setup compat_sys_io_setup +269 common io_destroy sys_io_destroy +270 common io_submit sys_io_submit compat_sys_io_submit +271 common io_cancel sys_io_cancel +272 common io_getevents sys_io_getevents compat_sys_io_getevents +273 common mq_open sys_mq_open compat_sys_mq_open +274 common mq_unlink sys_mq_unlink +275 common mq_timedsend sys_mq_timedsend compat_sys_mq_timedsend +276 common mq_timedreceive sys_mq_timedreceive compat_sys_mq_timedreceive +277 common mq_notify sys_mq_notify compat_sys_mq_notify +278 common mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr +279 common waitid sys_waitid compat_sys_waitid +280 common tee sys_tee +281 common add_key sys_add_key +282 common request_key sys_request_key +283 common keyctl sys_keyctl compat_sys_keyctl +284 common openat sys_openat compat_sys_openat +285 common mkdirat sys_mkdirat +286 common mknodat sys_mknodat +287 common fchownat sys_fchownat +288 common futimesat sys_futimesat compat_sys_futimesat +289 common fstatat64 sys_fstatat64 compat_sys_fstatat64 +290 common unlinkat sys_unlinkat +291 common renameat sys_renameat +292 common linkat sys_linkat +293 common symlinkat sys_symlinkat +294 common readlinkat sys_readlinkat +295 common fchmodat sys_fchmodat +296 common faccessat sys_faccessat +297 common pselect6 sys_pselect6 compat_sys_pselect6 +298 common ppoll sys_ppoll compat_sys_ppoll +299 common unshare sys_unshare +300 common set_robust_list sys_set_robust_list compat_sys_set_robust_list +301 common get_robust_list sys_get_robust_list compat_sys_get_robust_list +302 common migrate_pages sys_migrate_pages compat_sys_migrate_pages +303 common mbind sys_mbind compat_sys_mbind +304 common get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy +305 common set_mempolicy sys_set_mempolicy compat_sys_set_mempolicy +306 common kexec_load sys_kexec_load compat_sys_kexec_load +307 common move_pages sys_move_pages compat_sys_move_pages +308 common getcpu sys_getcpu +309 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait +310 common utimensat sys_utimensat compat_sys_utimensat +311 common signalfd sys_signalfd compat_sys_signalfd +312 common timerfd_create sys_timerfd_create +313 common eventfd sys_eventfd +314 common fallocate sys_fallocate compat_sys_fallocate +315 common timerfd_settime sys_timerfd_settime compat_sys_timerfd_settime +316 common timerfd_gettime sys_timerfd_gettime compat_sys_timerfd_gettime +317 common signalfd4 sys_signalfd4 compat_sys_signalfd4 +318 common eventfd2 sys_eventfd2 +319 common epoll_create1 sys_epoll_create1 +320 common dup3 sys_dup3 +321 common pipe2 sys_pipe2 +322 common inotify_init1 sys_inotify_init1 +323 common accept4 sys_accept4 +324 common preadv sys_preadv compat_sys_preadv +325 common pwritev sys_pwritev compat_sys_pwritev +326 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo +327 common perf_event_open sys_perf_event_open +328 common recvmmsg sys_recvmmsg compat_sys_recvmmsg +329 common fanotify_init sys_fanotify_init +330 common fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark +331 common prlimit64 sys_prlimit64 +332 common name_to_handle_at sys_name_to_handle_at +333 common open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at +334 common clock_adjtime sys_clock_adjtime compat_sys_clock_adjtime +335 common syncfs sys_syncfs +336 common sendmmsg sys_sendmmsg compat_sys_sendmmsg +337 common setns sys_setns +338 common process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv +339 common process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev +340 32 kern_features sys_ni_syscall sys_kern_features +340 64 kern_features sys_kern_features +341 common kcmp sys_kcmp +342 common finit_module sys_finit_module +343 common sched_setattr sys_sched_setattr +344 common sched_getattr sys_sched_getattr +345 common renameat2 sys_renameat2 +346 common seccomp sys_seccomp +347 common getrandom sys_getrandom +348 common memfd_create sys_memfd_create +349 common bpf sys_bpf +350 32 execveat sys_execveat sys32_execveat +350 64 execveat sys64_execveat +351 common membarrier sys_membarrier +352 common userfaultfd sys_userfaultfd +353 common bind sys_bind +354 common listen sys_listen +355 common setsockopt sys_setsockopt compat_sys_setsockopt +356 common mlock2 sys_mlock2 +357 common copy_file_range sys_copy_file_range +358 common preadv2 sys_preadv2 compat_sys_preadv2 +359 common pwritev2 sys_pwritev2 compat_sys_pwritev2 +360 common statx sys_statx +361 common io_pgetevents sys_io_pgetevents compat_sys_io_pgetevents diff --git a/arch/sparc/kernel/syscalls/syscallhdr.sh b/arch/sparc/kernel/syscalls/syscallhdr.sh new file mode 100644 index 000000000000..626b5740a9f1 --- /dev/null +++ b/arch/sparc/kernel/syscalls/syscallhdr.sh @@ -0,0 +1,36 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 + +in="$1" +out="$2" +my_abis=`echo "($3)" | tr ',' '|'` +prefix="$4" +offset="$5" + +fileguard=_UAPI_ASM_SPARC_`basename "$out" | sed \ + -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \ + -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'` +grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( + printf "#ifndef %s\n" "${fileguard}" + printf "#define %s\n" "${fileguard}" + printf "\n" + + nxt=0 + while read nr abi name entry compat ; do + if [ -z "$offset" ]; then + printf "#define __NR_%s%s\t%s\n" \ + "${prefix}" "${name}" "${nr}" + else + printf "#define __NR_%s%s\t(%s + %s)\n" \ + "${prefix}" "${name}" "${offset}" "${nr}" + fi + nxt=$((nr+1)) + done + + printf "\n" + printf "#ifdef __KERNEL__\n" + printf "#define __NR_syscalls\t%s\n" "${nxt}" + printf "#endif\n" + printf "\n" + printf "#endif /* %s */" "${fileguard}" +) > "$out" diff --git a/arch/sparc/kernel/syscalls/syscalltbl.sh b/arch/sparc/kernel/syscalls/syscalltbl.sh new file mode 100644 index 000000000000..77cf0143ba19 --- /dev/null +++ b/arch/sparc/kernel/syscalls/syscalltbl.sh @@ -0,0 +1,36 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 + +in="$1" +out="$2" +my_abis=`echo "($3)" | tr ',' '|'` +my_abi="$4" +offset="$5" + +emit() { + t_nxt="$1" + t_nr="$2" + t_entry="$3" + + while [ $t_nxt -lt $t_nr ]; do + printf "__SYSCALL(%s, sys_nis_syscall, )\n" "${t_nxt}" + t_nxt=$((t_nxt+1)) + done + printf "__SYSCALL(%s, %s, )\n" "${t_nxt}" "${t_entry}" +} + +grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( + nxt=0 + if [ -z "$offset" ]; then + offset=0 + fi + + while read nr abi name entry compat ; do + if [ "$my_abi" = "c32" ] && [ ! -z "$compat" ]; then + emit $((nxt+offset)) $((nr+offset)) $compat + else + emit $((nxt+offset)) $((nr+offset)) $entry + fi + nxt=$((nr+1)) + done +) > "$out" diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S index 621a363098ec..ab9e4d57685a 100644 --- a/arch/sparc/kernel/systbls_32.S +++ b/arch/sparc/kernel/systbls_32.S @@ -9,85 +9,10 @@ * Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu) */ - +#define __SYSCALL(nr, entry, nargs) .long entry .data .align 4 - - /* First, the Linux native syscall table. */ - .globl sys_call_table sys_call_table: -/*0*/ .long sys_restart_syscall, sys_exit, sys_fork, sys_read, sys_write -/*5*/ .long sys_open, sys_close, sys_wait4, sys_creat, sys_link -/*10*/ .long sys_unlink, sunos_execv, sys_chdir, sys_chown16, sys_mknod -/*15*/ .long sys_chmod, sys_lchown16, sys_brk, sys_nis_syscall, sys_lseek -/*20*/ .long sys_getpid, sys_capget, sys_capset, sys_setuid16, sys_getuid16 -/*25*/ .long sys_vmsplice, sys_ptrace, sys_alarm, sys_sigaltstack, sys_pause -/*30*/ .long sys_utime, sys_lchown, sys_fchown, sys_access, sys_nice -/*35*/ .long sys_chown, sys_sync, sys_kill, sys_newstat, sys_sendfile -/*40*/ .long sys_newlstat, sys_dup, sys_sparc_pipe, sys_times, sys_getuid -/*45*/ .long sys_umount, sys_setgid16, sys_getgid16, sys_signal, sys_geteuid16 -/*50*/ .long sys_getegid16, sys_acct, sys_nis_syscall, sys_getgid, sys_ioctl -/*55*/ .long sys_reboot, sys_mmap2, sys_symlink, sys_readlink, sys_execve -/*60*/ .long sys_umask, sys_chroot, sys_newfstat, sys_fstat64, sys_getpagesize -/*65*/ .long sys_msync, sys_vfork, sys_pread64, sys_pwrite64, sys_geteuid -/*70*/ .long sys_getegid, sys_mmap, sys_setreuid, sys_munmap, sys_mprotect -/*75*/ .long sys_madvise, sys_vhangup, sys_truncate64, sys_mincore, sys_getgroups16 -/*80*/ .long sys_setgroups16, sys_getpgrp, sys_setgroups, sys_setitimer, sys_ftruncate64 -/*85*/ .long sys_swapon, sys_getitimer, sys_setuid, sys_sethostname, sys_setgid -/*90*/ .long sys_dup2, sys_setfsuid, sys_fcntl, sys_select, sys_setfsgid -/*95*/ .long sys_fsync, sys_setpriority, sys_socket, sys_connect, sys_accept -/*100*/ .long sys_getpriority, sys_rt_sigreturn, sys_rt_sigaction, sys_rt_sigprocmask, sys_rt_sigpending -/*105*/ .long sys_rt_sigtimedwait, sys_rt_sigqueueinfo, sys_rt_sigsuspend, sys_setresuid, sys_getresuid -/*110*/ .long sys_setresgid, sys_getresgid, sys_setregid, sys_recvmsg, sys_sendmsg -/*115*/ .long sys_getgroups, sys_gettimeofday, sys_getrusage, sys_getsockopt, sys_getcwd -/*120*/ .long sys_readv, sys_writev, sys_settimeofday, sys_fchown16, sys_fchmod -/*125*/ .long sys_recvfrom, sys_setreuid16, sys_setregid16, sys_rename, sys_truncate -/*130*/ .long sys_ftruncate, sys_flock, sys_lstat64, sys_sendto, sys_shutdown -/*135*/ .long sys_socketpair, sys_mkdir, sys_rmdir, sys_utimes, sys_stat64 -/*140*/ .long sys_sendfile64, sys_getpeername, sys_futex, sys_gettid, sys_getrlimit -/*145*/ .long sys_setrlimit, sys_pivot_root, sys_prctl, sys_pciconfig_read, sys_pciconfig_write -/*150*/ .long sys_getsockname, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64 -/*155*/ .long sys_fcntl64, sys_inotify_rm_watch, sys_statfs, sys_fstatfs, sys_oldumount -/*160*/ .long sys_sched_setaffinity, sys_sched_getaffinity, sys_getdomainname, sys_setdomainname, sys_nis_syscall -/*165*/ .long sys_quotactl, sys_set_tid_address, sys_mount, sys_ustat, sys_setxattr -/*170*/ .long sys_lsetxattr, sys_fsetxattr, sys_getxattr, sys_lgetxattr, sys_getdents -/*175*/ .long sys_setsid, sys_fchdir, sys_fgetxattr, sys_listxattr, sys_llistxattr -/*180*/ .long sys_flistxattr, sys_removexattr, sys_lremovexattr, sys_sigpending, sys_ni_syscall -/*185*/ .long sys_setpgid, sys_fremovexattr, sys_tkill, sys_exit_group, sys_newuname -/*190*/ .long sys_init_module, sys_personality, sys_sparc_remap_file_pages, sys_epoll_create, sys_epoll_ctl -/*195*/ .long sys_epoll_wait, sys_ioprio_set, sys_getppid, sys_sparc_sigaction, sys_sgetmask -/*200*/ .long sys_ssetmask, sys_sigsuspend, sys_newlstat, sys_uselib, sys_old_readdir -/*205*/ .long sys_readahead, sys_socketcall, sys_syslog, sys_lookup_dcookie, sys_fadvise64 -/*210*/ .long sys_fadvise64_64, sys_tgkill, sys_waitpid, sys_swapoff, sys_sysinfo -/*215*/ .long sys_ipc, sys_sigreturn, sys_clone, sys_ioprio_get, sys_adjtimex -/*220*/ .long sys_sigprocmask, sys_ni_syscall, sys_delete_module, sys_ni_syscall, sys_getpgid -/*225*/ .long sys_bdflush, sys_sysfs, sys_nis_syscall, sys_setfsuid16, sys_setfsgid16 -/*230*/ .long sys_select, sys_time, sys_splice, sys_stime, sys_statfs64 - /* "We are the Knights of the Forest of Ni!!" */ -/*235*/ .long sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys_mlockall -/*240*/ .long sys_munlockall, sys_sched_setparam, sys_sched_getparam, sys_sched_setscheduler, sys_sched_getscheduler -/*245*/ .long sys_sched_yield, sys_sched_get_priority_max, sys_sched_get_priority_min, sys_sched_rr_get_interval, sys_nanosleep -/*250*/ .long sys_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_ni_syscall -/*255*/ .long sys_sync_file_range, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep -/*260*/ .long sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime, sys_timer_getoverrun -/*265*/ .long sys_timer_delete, sys_timer_create, sys_nis_syscall, sys_io_setup, sys_io_destroy -/*270*/ .long sys_io_submit, sys_io_cancel, sys_io_getevents, sys_mq_open, sys_mq_unlink -/*275*/ .long sys_mq_timedsend, sys_mq_timedreceive, sys_mq_notify, sys_mq_getsetattr, sys_waitid -/*280*/ .long sys_tee, sys_add_key, sys_request_key, sys_keyctl, sys_openat -/*285*/ .long sys_mkdirat, sys_mknodat, sys_fchownat, sys_futimesat, sys_fstatat64 -/*290*/ .long sys_unlinkat, sys_renameat, sys_linkat, sys_symlinkat, sys_readlinkat -/*295*/ .long sys_fchmodat, sys_faccessat, sys_pselect6, sys_ppoll, sys_unshare -/*300*/ .long sys_set_robust_list, sys_get_robust_list, sys_migrate_pages, sys_mbind, sys_get_mempolicy -/*305*/ .long sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait -/*310*/ .long sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate -/*315*/ .long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 -/*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv -/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init -/*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime -/*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev -/*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr -/*345*/ .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf -/*350*/ .long sys_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen -/*355*/ .long sys_setsockopt, sys_mlock2, sys_copy_file_range, sys_preadv2, sys_pwritev2 -/*360*/ .long sys_statx, sys_io_pgetevents +#include <asm/syscall_table_32.h> /* 32-bit native syscalls */ +#undef __SYSCALL diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S index ff9389a1c9f3..a27394bf7d7f 100644 --- a/arch/sparc/kernel/systbls_64.S +++ b/arch/sparc/kernel/systbls_64.S @@ -10,167 +10,18 @@ * Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu) */ - +#define __SYSCALL(nr, entry, nargs) .word entry .text .align 4 - #ifdef CONFIG_COMPAT - /* First, the 32-bit Linux native syscall table. */ - .globl sys_call_table32 sys_call_table32: -/*0*/ .word sys_restart_syscall, sparc_exit, sys_fork, sys_read, sys_write -/*5*/ .word compat_sys_open, sys_close, compat_sys_wait4, sys_creat, sys_link -/*10*/ .word sys_unlink, sunos_execv, sys_chdir, sys_chown16, sys_mknod -/*15*/ .word sys_chmod, sys_lchown16, sys_brk, sys_nis_syscall, compat_sys_lseek -/*20*/ .word sys_getpid, sys_capget, sys_capset, sys_setuid16, sys_getuid16 -/*25*/ .word compat_sys_vmsplice, compat_sys_ptrace, sys_alarm, compat_sys_sigaltstack, sys_pause -/*30*/ .word compat_sys_utime, sys_lchown, sys_fchown, sys_access, sys_nice - .word sys_chown, sys_sync, sys_kill, compat_sys_newstat, compat_sys_sendfile -/*40*/ .word compat_sys_newlstat, sys_dup, sys_sparc_pipe, compat_sys_times, sys_getuid - .word sys_umount, sys_setgid16, sys_getgid16, sys_signal, sys_geteuid16 -/*50*/ .word sys_getegid16, sys_acct, sys_nis_syscall, sys_getgid, compat_sys_ioctl - .word sys_reboot, sys32_mmap2, sys_symlink, sys_readlink, sys32_execve -/*60*/ .word sys_umask, sys_chroot, compat_sys_newfstat, compat_sys_fstat64, sys_getpagesize - .word sys_msync, sys_vfork, compat_sys_pread64, compat_sys_pwrite64, sys_geteuid -/*70*/ .word sys_getegid, sys_mmap, sys_setreuid, sys_munmap, sys_mprotect - .word sys_madvise, sys_vhangup, compat_sys_truncate64, sys_mincore, sys_getgroups16 -/*80*/ .word sys_setgroups16, sys_getpgrp, sys_setgroups, compat_sys_setitimer, compat_sys_ftruncate64 - .word sys_swapon, compat_sys_getitimer, sys_setuid, sys_sethostname, sys_setgid -/*90*/ .word sys_dup2, sys_setfsuid, compat_sys_fcntl, compat_sys_select, sys_setfsgid - .word sys_fsync, sys_setpriority, sys_socket, sys_connect, sys_accept -/*100*/ .word sys_getpriority, sys32_rt_sigreturn, compat_sys_rt_sigaction, compat_sys_rt_sigprocmask, compat_sys_rt_sigpending - .word compat_sys_rt_sigtimedwait, compat_sys_rt_sigqueueinfo, compat_sys_rt_sigsuspend, sys_setresuid, sys_getresuid -/*110*/ .word sys_setresgid, sys_getresgid, sys_setregid, compat_sys_recvmsg, compat_sys_sendmsg - .word sys_getgroups, compat_sys_gettimeofday, compat_sys_getrusage, compat_sys_getsockopt, sys_getcwd -/*120*/ .word compat_sys_readv, compat_sys_writev, compat_sys_settimeofday, sys_fchown16, sys_fchmod - .word sys_recvfrom, sys_setreuid16, sys_setregid16, sys_rename, compat_sys_truncate -/*130*/ .word compat_sys_ftruncate, sys_flock, compat_sys_lstat64, sys_sendto, sys_shutdown - .word sys_socketpair, sys_mkdir, sys_rmdir, compat_sys_utimes, compat_sys_stat64 -/*140*/ .word sys_sendfile64, sys_getpeername, compat_sys_futex, sys_gettid, compat_sys_getrlimit - .word compat_sys_setrlimit, sys_pivot_root, sys_prctl, sys_pciconfig_read, sys_pciconfig_write -/*150*/ .word sys_getsockname, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64 - .word compat_sys_fcntl64, sys_inotify_rm_watch, compat_sys_statfs, compat_sys_fstatfs, sys_oldumount -/*160*/ .word compat_sys_sched_setaffinity, compat_sys_sched_getaffinity, sys_getdomainname, sys_setdomainname, sys_nis_syscall - .word sys_quotactl, sys_set_tid_address, compat_sys_mount, compat_sys_ustat, sys_setxattr -/*170*/ .word sys_lsetxattr, sys_fsetxattr, sys_getxattr, sys_lgetxattr, compat_sys_getdents - .word sys_setsid, sys_fchdir, sys_fgetxattr, sys_listxattr, sys_llistxattr -/*180*/ .word sys_flistxattr, sys_removexattr, sys_lremovexattr, compat_sys_sigpending, sys_ni_syscall - .word sys_setpgid, sys_fremovexattr, sys_tkill, sparc_exit_group, sys_newuname -/*190*/ .word sys_init_module, sys_sparc64_personality, sys_remap_file_pages, sys_epoll_create, sys_epoll_ctl - .word sys_epoll_wait, sys_ioprio_set, sys_getppid, compat_sys_sparc_sigaction, sys_sgetmask -/*200*/ .word sys_ssetmask, sys_sigsuspend, compat_sys_newlstat, sys_uselib, compat_sys_old_readdir - .word compat_sys_readahead, sys32_socketcall, sys_syslog, compat_sys_lookup_dcookie, compat_sys_fadvise64 -/*210*/ .word compat_sys_fadvise64_64, sys_tgkill, sys_waitpid, sys_swapoff, compat_sys_sysinfo - .word compat_sys_ipc, sys32_sigreturn, sys_clone, sys_ioprio_get, compat_sys_adjtimex -/*220*/ .word compat_sys_sigprocmask, sys_ni_syscall, sys_delete_module, sys_ni_syscall, sys_getpgid - .word sys_bdflush, sys_sysfs, sys_nis_syscall, sys_setfsuid16, sys_setfsgid16 -/*230*/ .word compat_sys_select, compat_sys_time, sys_splice, compat_sys_stime, compat_sys_statfs64 - .word compat_sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys_mlockall -/*240*/ .word sys_munlockall, sys_sched_setparam, sys_sched_getparam, sys_sched_setscheduler, sys_sched_getscheduler - .word sys_sched_yield, sys_sched_get_priority_max, sys_sched_get_priority_min, compat_sys_sched_rr_get_interval, compat_sys_nanosleep -/*250*/ .word sys_mremap, compat_sys_sysctl, sys_getsid, sys_fdatasync, sys_nis_syscall - .word compat_sys_sync_file_range, compat_sys_clock_settime, compat_sys_clock_gettime, compat_sys_clock_getres, compat_sys_clock_nanosleep -/*260*/ .word compat_sys_sched_getaffinity, compat_sys_sched_setaffinity, compat_sys_timer_settime, compat_sys_timer_gettime, sys_timer_getoverrun - .word sys_timer_delete, compat_sys_timer_create, sys_ni_syscall, compat_sys_io_setup, sys_io_destroy -/*270*/ .word compat_sys_io_submit, sys_io_cancel, compat_sys_io_getevents, compat_sys_mq_open, sys_mq_unlink - .word compat_sys_mq_timedsend, compat_sys_mq_timedreceive, compat_sys_mq_notify, compat_sys_mq_getsetattr, compat_sys_waitid -/*280*/ .word sys_tee, sys_add_key, sys_request_key, compat_sys_keyctl, compat_sys_openat - .word sys_mkdirat, sys_mknodat, sys_fchownat, compat_sys_futimesat, compat_sys_fstatat64 -/*290*/ .word sys_unlinkat, sys_renameat, sys_linkat, sys_symlinkat, sys_readlinkat - .word sys_fchmodat, sys_faccessat, compat_sys_pselect6, compat_sys_ppoll, sys_unshare -/*300*/ .word compat_sys_set_robust_list, compat_sys_get_robust_list, compat_sys_migrate_pages, compat_sys_mbind, compat_sys_get_mempolicy - .word compat_sys_set_mempolicy, compat_sys_kexec_load, compat_sys_move_pages, sys_getcpu, compat_sys_epoll_pwait -/*310*/ .word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate - .word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1 -/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv - .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg, sys_fanotify_init -/*330*/ .word compat_sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime - .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev -/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr - .word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf -/*350*/ .word sys32_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen - .word compat_sys_setsockopt, sys_mlock2, sys_copy_file_range, compat_sys_preadv2, compat_sys_pwritev2 -/*360*/ .word sys_statx, compat_sys_io_pgetevents - +#include <asm/syscall_table_c32.h> /* Compat syscalls */ #endif /* CONFIG_COMPAT */ - /* Now the 64-bit native Linux syscall table. */ - .align 4 .globl sys_call_table64, sys_call_table sys_call_table64: sys_call_table: -/*0*/ .word sys_restart_syscall, sparc_exit, sys_fork, sys_read, sys_write -/*5*/ .word sys_open, sys_close, sys_wait4, sys_creat, sys_link -/*10*/ .word sys_unlink, sys_nis_syscall, sys_chdir, sys_chown, sys_mknod -/*15*/ .word sys_chmod, sys_lchown, sys_brk, sys_nis_syscall, sys_lseek -/*20*/ .word sys_getpid, sys_capget, sys_capset, sys_setuid, sys_getuid -/*25*/ .word sys_vmsplice, sys_ptrace, sys_alarm, sys_sigaltstack, sys_nis_syscall -/*30*/ .word sys_utime, sys_nis_syscall, sys_nis_syscall, sys_access, sys_nice - .word sys_nis_syscall, sys_sync, sys_kill, sys_newstat, sys_sendfile64 -/*40*/ .word sys_newlstat, sys_dup, sys_sparc_pipe, sys_times, sys_nis_syscall - .word sys_umount, sys_setgid, sys_getgid, sys_signal, sys_geteuid -/*50*/ .word sys_getegid, sys_acct, sys_memory_ordering, sys_nis_syscall, sys_ioctl - .word sys_reboot, sys_nis_syscall, sys_symlink, sys_readlink, sys64_execve -/*60*/ .word sys_umask, sys_chroot, sys_newfstat, sys_fstat64, sys_getpagesize - .word sys_msync, sys_vfork, sys_pread64, sys_pwrite64, sys_nis_syscall -/*70*/ .word sys_nis_syscall, sys_mmap, sys_nis_syscall, sys_64_munmap, sys_mprotect - .word sys_madvise, sys_vhangup, sys_nis_syscall, sys_mincore, sys_getgroups -/*80*/ .word sys_setgroups, sys_getpgrp, sys_nis_syscall, sys_setitimer, sys_nis_syscall - .word sys_swapon, sys_getitimer, sys_nis_syscall, sys_sethostname, sys_nis_syscall -/*90*/ .word sys_dup2, sys_nis_syscall, sys_fcntl, sys_select, sys_nis_syscall - .word sys_fsync, sys_setpriority, sys_socket, sys_connect, sys_accept -/*100*/ .word sys_getpriority, sys_rt_sigreturn, sys_rt_sigaction, sys_rt_sigprocmask, sys_rt_sigpending - .word sys_rt_sigtimedwait, sys_rt_sigqueueinfo, sys_rt_sigsuspend, sys_setresuid, sys_getresuid -/*110*/ .word sys_setresgid, sys_getresgid, sys_nis_syscall, sys_recvmsg, sys_sendmsg - .word sys_nis_syscall, sys_gettimeofday, sys_getrusage, sys_getsockopt, sys_getcwd -/*120*/ .word sys_readv, sys_writev, sys_settimeofday, sys_fchown, sys_fchmod - .word sys_recvfrom, sys_setreuid, sys_setregid, sys_rename, sys_truncate -/*130*/ .word sys_ftruncate, sys_flock, sys_lstat64, sys_sendto, sys_shutdown - .word sys_socketpair, sys_mkdir, sys_rmdir, sys_utimes, sys_stat64 -/*140*/ .word sys_sendfile64, sys_getpeername, sys_futex, sys_gettid, sys_getrlimit - .word sys_setrlimit, sys_pivot_root, sys_prctl, sys_pciconfig_read, sys_pciconfig_write -/*150*/ .word sys_getsockname, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64 - .word sys_nis_syscall, sys_inotify_rm_watch, sys_statfs, sys_fstatfs, sys_oldumount -/*160*/ .word sys_sched_setaffinity, sys_sched_getaffinity, sys_getdomainname, sys_setdomainname, sys_utrap_install - .word sys_quotactl, sys_set_tid_address, sys_mount, sys_ustat, sys_setxattr -/*170*/ .word sys_lsetxattr, sys_fsetxattr, sys_getxattr, sys_lgetxattr, sys_getdents - .word sys_setsid, sys_fchdir, sys_fgetxattr, sys_listxattr, sys_llistxattr -/*180*/ .word sys_flistxattr, sys_removexattr, sys_lremovexattr, sys_nis_syscall, sys_ni_syscall - .word sys_setpgid, sys_fremovexattr, sys_tkill, sparc_exit_group, sys_newuname -/*190*/ .word sys_init_module, sys_sparc64_personality, sys_remap_file_pages, sys_epoll_create, sys_epoll_ctl - .word sys_epoll_wait, sys_ioprio_set, sys_getppid, sys_nis_syscall, sys_sgetmask -/*200*/ .word sys_ssetmask, sys_nis_syscall, sys_newlstat, sys_uselib, sys_nis_syscall - .word sys_readahead, sys_socketcall, sys_syslog, sys_lookup_dcookie, sys_fadvise64 -/*210*/ .word sys_fadvise64_64, sys_tgkill, sys_waitpid, sys_swapoff, sys_sysinfo - .word sys_sparc_ipc, sys_nis_syscall, sys_clone, sys_ioprio_get, sys_adjtimex -/*220*/ .word sys_nis_syscall, sys_ni_syscall, sys_delete_module, sys_ni_syscall, sys_getpgid - .word sys_bdflush, sys_sysfs, sys_nis_syscall, sys_setfsuid, sys_setfsgid -/*230*/ .word sys_select, sys_nis_syscall, sys_splice, sys_stime, sys_statfs64 - .word sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys_mlockall -/*240*/ .word sys_munlockall, sys_sched_setparam, sys_sched_getparam, sys_sched_setscheduler, sys_sched_getscheduler - .word sys_sched_yield, sys_sched_get_priority_max, sys_sched_get_priority_min, sys_sched_rr_get_interval, sys_nanosleep -/*250*/ .word sys_64_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nis_syscall - .word sys_sync_file_range, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep -/*260*/ .word sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime, sys_timer_getoverrun - .word sys_timer_delete, sys_timer_create, sys_ni_syscall, sys_io_setup, sys_io_destroy -/*270*/ .word sys_io_submit, sys_io_cancel, sys_io_getevents, sys_mq_open, sys_mq_unlink - .word sys_mq_timedsend, sys_mq_timedreceive, sys_mq_notify, sys_mq_getsetattr, sys_waitid -/*280*/ .word sys_tee, sys_add_key, sys_request_key, sys_keyctl, sys_openat - .word sys_mkdirat, sys_mknodat, sys_fchownat, sys_futimesat, sys_fstatat64 -/*290*/ .word sys_unlinkat, sys_renameat, sys_linkat, sys_symlinkat, sys_readlinkat - .word sys_fchmodat, sys_faccessat, sys_pselect6, sys_ppoll, sys_unshare -/*300*/ .word sys_set_robust_list, sys_get_robust_list, sys_migrate_pages, sys_mbind, sys_get_mempolicy - .word sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait -/*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate - .word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 -/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv - .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init -/*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime - .word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev -/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr - .word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf -/*350*/ .word sys64_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen - .word sys_setsockopt, sys_mlock2, sys_copy_file_range, sys_preadv2, sys_pwritev2 -/*360*/ .word sys_statx, sys_io_pgetevents +#include <asm/syscall_table_64.h> /* 64-bit native syscalls */ +#undef __SYSCALL diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c index 5f356dc8e178..3eb77943ce12 100644 --- a/arch/sparc/kernel/time_64.c +++ b/arch/sparc/kernel/time_64.c @@ -445,8 +445,8 @@ static int rtc_probe(struct platform_device *op) { struct resource *r; - printk(KERN_INFO "%s: RTC regs at 0x%llx\n", - op->dev.of_node->full_name, op->resource[0].start); + printk(KERN_INFO "%pOF: RTC regs at 0x%llx\n", + op->dev.of_node, op->resource[0].start); /* The CMOS RTC driver only accepts IORESOURCE_IO, so cons * up a fake resource so that the probe works for all cases. @@ -501,8 +501,8 @@ static struct platform_device rtc_bq4802_device = { static int bq4802_probe(struct platform_device *op) { - printk(KERN_INFO "%s: BQ4802 regs at 0x%llx\n", - op->dev.of_node->full_name, op->resource[0].start); + printk(KERN_INFO "%pOF: BQ4802 regs at 0x%llx\n", + op->dev.of_node, op->resource[0].start); rtc_bq4802_device.resource = &op->resource[0]; return platform_device_register(&rtc_bq4802_device); @@ -561,12 +561,12 @@ static int mostek_probe(struct platform_device *op) /* On an Enterprise system there can be multiple mostek clocks. * We should only match the one that is on the central FHC bus. */ - if (!strcmp(dp->parent->name, "fhc") && - strcmp(dp->parent->parent->name, "central") != 0) + if (of_node_name_eq(dp->parent, "fhc") && + !of_node_name_eq(dp->parent->parent, "central")) return -ENODEV; - printk(KERN_INFO "%s: Mostek regs at 0x%llx\n", - dp->full_name, op->resource[0].start); + printk(KERN_INFO "%pOF: Mostek regs at 0x%llx\n", + dp, op->resource[0].start); m48t59_rtc.resource = &op->resource[0]; return platform_device_register(&m48t59_rtc); diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c index 32bae68e34c1..c7cad9b7bba7 100644 --- a/arch/sparc/kernel/vio.c +++ b/arch/sparc/kernel/vio.c @@ -193,7 +193,7 @@ show_pciobppath_attr(struct device *dev, struct device_attribute *attr, vdev = to_vio_dev(dev); dp = vdev->dp; - return snprintf (buf, PAGE_SIZE, "%s\n", dp->full_name); + return snprintf (buf, PAGE_SIZE, "%pOF\n", dp); } static DEVICE_ATTR(obppath, S_IRUSR | S_IRGRP | S_IROTH, @@ -366,12 +366,9 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp, if (parent == NULL) { dp = cdev_node; } else if (to_vio_dev(parent) == root_vdev) { - dp = of_get_next_child(cdev_node, NULL); - while (dp) { - if (!strcmp(dp->type, type)) + for_each_child_of_node(cdev_node, dp) { + if (of_node_is_type(dp, type)) break; - - dp = of_get_next_child(cdev_node, dp); } } else { dp = to_vio_dev(parent)->dp; diff --git a/arch/sparc/mm/io-unit.c b/arch/sparc/mm/io-unit.c index c8cb27d3ea75..f770ee7229d8 100644 --- a/arch/sparc/mm/io-unit.c +++ b/arch/sparc/mm/io-unit.c @@ -12,7 +12,7 @@ #include <linux/mm.h> #include <linux/highmem.h> /* pte_offset_map => kmap_atomic */ #include <linux/bitops.h> -#include <linux/scatterlist.h> +#include <linux/dma-mapping.h> #include <linux/of.h> #include <linux/of_device.h> @@ -140,34 +140,44 @@ nexti: scan = find_next_zero_bit(iounit->bmap, limit, scan); return vaddr; } -static __u32 iounit_get_scsi_one(struct device *dev, char *vaddr, unsigned long len) +static dma_addr_t iounit_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t len, enum dma_data_direction dir, + unsigned long attrs) { + void *vaddr = page_address(page) + offset; struct iounit_struct *iounit = dev->archdata.iommu; unsigned long ret, flags; + /* XXX So what is maxphys for us and how do drivers know it? */ + if (!len || len > 256 * 1024) + return DMA_MAPPING_ERROR; + spin_lock_irqsave(&iounit->lock, flags); ret = iounit_get_area(iounit, (unsigned long)vaddr, len); spin_unlock_irqrestore(&iounit->lock, flags); return ret; } -static void iounit_get_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz) +static int iounit_map_sg(struct device *dev, struct scatterlist *sgl, int nents, + enum dma_data_direction dir, unsigned long attrs) { struct iounit_struct *iounit = dev->archdata.iommu; + struct scatterlist *sg; unsigned long flags; + int i; /* FIXME: Cache some resolved pages - often several sg entries are to the same page */ spin_lock_irqsave(&iounit->lock, flags); - while (sz != 0) { - --sz; + for_each_sg(sgl, sg, nents, i) { sg->dma_address = iounit_get_area(iounit, (unsigned long) sg_virt(sg), sg->length); sg->dma_length = sg->length; - sg = sg_next(sg); } spin_unlock_irqrestore(&iounit->lock, flags); + return nents; } -static void iounit_release_scsi_one(struct device *dev, __u32 vaddr, unsigned long len) +static void iounit_unmap_page(struct device *dev, dma_addr_t vaddr, size_t len, + enum dma_data_direction dir, unsigned long attrs) { struct iounit_struct *iounit = dev->archdata.iommu; unsigned long flags; @@ -181,34 +191,47 @@ static void iounit_release_scsi_one(struct device *dev, __u32 vaddr, unsigned lo spin_unlock_irqrestore(&iounit->lock, flags); } -static void iounit_release_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz) +static void iounit_unmap_sg(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, unsigned long attrs) { struct iounit_struct *iounit = dev->archdata.iommu; - unsigned long flags; - unsigned long vaddr, len; + unsigned long flags, vaddr, len; + struct scatterlist *sg; + int i; spin_lock_irqsave(&iounit->lock, flags); - while (sz != 0) { - --sz; + for_each_sg(sgl, sg, nents, i) { len = ((sg->dma_address & ~PAGE_MASK) + sg->length + (PAGE_SIZE-1)) >> PAGE_SHIFT; vaddr = (sg->dma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT; IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr)); for (len += vaddr; vaddr < len; vaddr++) clear_bit(vaddr, iounit->bmap); - sg = sg_next(sg); } spin_unlock_irqrestore(&iounit->lock, flags); } #ifdef CONFIG_SBUS -static int iounit_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long va, unsigned long addr, int len) +static void *iounit_alloc(struct device *dev, size_t len, + dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) { struct iounit_struct *iounit = dev->archdata.iommu; - unsigned long page, end; + unsigned long va, addr, page, end, ret; pgprot_t dvma_prot; iopte_t __iomem *iopte; - *pba = addr; + /* XXX So what is maxphys for us and how do drivers know it? */ + if (!len || len > 256 * 1024) + return NULL; + + len = PAGE_ALIGN(len); + va = __get_free_pages(gfp | __GFP_ZERO, get_order(len)); + if (!va) + return NULL; + + addr = ret = sparc_dma_alloc_resource(dev, len); + if (!addr) + goto out_free_pages; + *dma_handle = addr; dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV); end = PAGE_ALIGN((addr + len)); @@ -237,27 +260,32 @@ static int iounit_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned lon flush_cache_all(); flush_tlb_all(); - return 0; + return (void *)ret; + +out_free_pages: + free_pages(va, get_order(len)); + return NULL; } -static void iounit_unmap_dma_area(struct device *dev, unsigned long addr, int len) +static void iounit_free(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t dma_addr, unsigned long attrs) { /* XXX Somebody please fill this in */ } #endif -static const struct sparc32_dma_ops iounit_dma_ops = { - .get_scsi_one = iounit_get_scsi_one, - .get_scsi_sgl = iounit_get_scsi_sgl, - .release_scsi_one = iounit_release_scsi_one, - .release_scsi_sgl = iounit_release_scsi_sgl, +static const struct dma_map_ops iounit_dma_ops = { #ifdef CONFIG_SBUS - .map_dma_area = iounit_map_dma_area, - .unmap_dma_area = iounit_unmap_dma_area, + .alloc = iounit_alloc, + .free = iounit_free, #endif + .map_page = iounit_map_page, + .unmap_page = iounit_unmap_page, + .map_sg = iounit_map_sg, + .unmap_sg = iounit_unmap_sg, }; void __init ld_mmu_iounit(void) { - sparc32_dma_ops = &iounit_dma_ops; + dma_ops = &iounit_dma_ops; } diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c index 2c5f8a648f8c..e8d5d73ca40d 100644 --- a/arch/sparc/mm/iommu.c +++ b/arch/sparc/mm/iommu.c @@ -13,7 +13,7 @@ #include <linux/mm.h> #include <linux/slab.h> #include <linux/highmem.h> /* pte_offset_map => kmap_atomic */ -#include <linux/scatterlist.h> +#include <linux/dma-mapping.h> #include <linux/of.h> #include <linux/of_device.h> @@ -205,59 +205,67 @@ static u32 iommu_get_one(struct device *dev, struct page *page, int npages) return busa0; } -static u32 iommu_get_scsi_one(struct device *dev, char *vaddr, unsigned int len) +static dma_addr_t __sbus_iommu_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t len) { - unsigned long off; - int npages; - struct page *page; - u32 busa; - - off = (unsigned long)vaddr & ~PAGE_MASK; - npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT; - page = virt_to_page((unsigned long)vaddr & PAGE_MASK); - busa = iommu_get_one(dev, page, npages); - return busa + off; + void *vaddr = page_address(page) + offset; + unsigned long off = (unsigned long)vaddr & ~PAGE_MASK; + unsigned long npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT; + + /* XXX So what is maxphys for us and how do drivers know it? */ + if (!len || len > 256 * 1024) + return DMA_MAPPING_ERROR; + return iommu_get_one(dev, virt_to_page(vaddr), npages) + off; } -static __u32 iommu_get_scsi_one_gflush(struct device *dev, char *vaddr, unsigned long len) +static dma_addr_t sbus_iommu_map_page_gflush(struct device *dev, + struct page *page, unsigned long offset, size_t len, + enum dma_data_direction dir, unsigned long attrs) { flush_page_for_dma(0); - return iommu_get_scsi_one(dev, vaddr, len); + return __sbus_iommu_map_page(dev, page, offset, len); } -static __u32 iommu_get_scsi_one_pflush(struct device *dev, char *vaddr, unsigned long len) +static dma_addr_t sbus_iommu_map_page_pflush(struct device *dev, + struct page *page, unsigned long offset, size_t len, + enum dma_data_direction dir, unsigned long attrs) { - unsigned long page = ((unsigned long) vaddr) & PAGE_MASK; + void *vaddr = page_address(page) + offset; + unsigned long p = ((unsigned long)vaddr) & PAGE_MASK; - while(page < ((unsigned long)(vaddr + len))) { - flush_page_for_dma(page); - page += PAGE_SIZE; + while (p < (unsigned long)vaddr + len) { + flush_page_for_dma(p); + p += PAGE_SIZE; } - return iommu_get_scsi_one(dev, vaddr, len); + + return __sbus_iommu_map_page(dev, page, offset, len); } -static void iommu_get_scsi_sgl_gflush(struct device *dev, struct scatterlist *sg, int sz) +static int sbus_iommu_map_sg_gflush(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, unsigned long attrs) { - int n; + struct scatterlist *sg; + int i, n; flush_page_for_dma(0); - while (sz != 0) { - --sz; + + for_each_sg(sgl, sg, nents, i) { n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT; sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset; sg->dma_length = sg->length; - sg = sg_next(sg); } + + return nents; } -static void iommu_get_scsi_sgl_pflush(struct device *dev, struct scatterlist *sg, int sz) +static int sbus_iommu_map_sg_pflush(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, unsigned long attrs) { unsigned long page, oldpage = 0; - int n, i; - - while(sz != 0) { - --sz; + struct scatterlist *sg; + int i, j, n; + for_each_sg(sgl, sg, nents, j) { n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT; /* @@ -277,8 +285,9 @@ static void iommu_get_scsi_sgl_pflush(struct device *dev, struct scatterlist *sg sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset; sg->dma_length = sg->length; - sg = sg_next(sg); } + + return nents; } static void iommu_release_one(struct device *dev, u32 busa, int npages) @@ -297,40 +306,52 @@ static void iommu_release_one(struct device *dev, u32 busa, int npages) bit_map_clear(&iommu->usemap, ioptex, npages); } -static void iommu_release_scsi_one(struct device *dev, __u32 vaddr, unsigned long len) +static void sbus_iommu_unmap_page(struct device *dev, dma_addr_t dma_addr, + size_t len, enum dma_data_direction dir, unsigned long attrs) { - unsigned long off; + unsigned long off = dma_addr & ~PAGE_MASK; int npages; - off = vaddr & ~PAGE_MASK; npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT; - iommu_release_one(dev, vaddr & PAGE_MASK, npages); + iommu_release_one(dev, dma_addr & PAGE_MASK, npages); } -static void iommu_release_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz) +static void sbus_iommu_unmap_sg(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, unsigned long attrs) { - int n; - - while(sz != 0) { - --sz; + struct scatterlist *sg; + int i, n; + for_each_sg(sgl, sg, nents, i) { n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT; iommu_release_one(dev, sg->dma_address & PAGE_MASK, n); sg->dma_address = 0x21212121; - sg = sg_next(sg); } } #ifdef CONFIG_SBUS -static int iommu_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long va, - unsigned long addr, int len) +static void *sbus_iommu_alloc(struct device *dev, size_t len, + dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) { struct iommu_struct *iommu = dev->archdata.iommu; - unsigned long page, end; + unsigned long va, addr, page, end, ret; iopte_t *iopte = iommu->page_table; iopte_t *first; int ioptex; + /* XXX So what is maxphys for us and how do drivers know it? */ + if (!len || len > 256 * 1024) + return NULL; + + len = PAGE_ALIGN(len); + va = __get_free_pages(gfp | __GFP_ZERO, get_order(len)); + if (va == 0) + return NULL; + + addr = ret = sparc_dma_alloc_resource(dev, len); + if (!addr) + goto out_free_pages; + BUG_ON((va & ~PAGE_MASK) != 0); BUG_ON((addr & ~PAGE_MASK) != 0); BUG_ON((len & ~PAGE_MASK) != 0); @@ -385,16 +406,25 @@ static int iommu_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long flush_tlb_all(); iommu_invalidate(iommu->regs); - *pba = iommu->start + (ioptex << PAGE_SHIFT); - return 0; + *dma_handle = iommu->start + (ioptex << PAGE_SHIFT); + return (void *)ret; + +out_free_pages: + free_pages(va, get_order(len)); + return NULL; } -static void iommu_unmap_dma_area(struct device *dev, unsigned long busa, int len) +static void sbus_iommu_free(struct device *dev, size_t len, void *cpu_addr, + dma_addr_t busa, unsigned long attrs) { struct iommu_struct *iommu = dev->archdata.iommu; iopte_t *iopte = iommu->page_table; - unsigned long end; + struct page *page = virt_to_page(cpu_addr); int ioptex = (busa - iommu->start) >> PAGE_SHIFT; + unsigned long end; + + if (!sparc_dma_free_resource(cpu_addr, len)) + return; BUG_ON((busa & ~PAGE_MASK) != 0); BUG_ON((len & ~PAGE_MASK) != 0); @@ -408,38 +438,40 @@ static void iommu_unmap_dma_area(struct device *dev, unsigned long busa, int len flush_tlb_all(); iommu_invalidate(iommu->regs); bit_map_clear(&iommu->usemap, ioptex, len >> PAGE_SHIFT); + + __free_pages(page, get_order(len)); } #endif -static const struct sparc32_dma_ops iommu_dma_gflush_ops = { - .get_scsi_one = iommu_get_scsi_one_gflush, - .get_scsi_sgl = iommu_get_scsi_sgl_gflush, - .release_scsi_one = iommu_release_scsi_one, - .release_scsi_sgl = iommu_release_scsi_sgl, +static const struct dma_map_ops sbus_iommu_dma_gflush_ops = { #ifdef CONFIG_SBUS - .map_dma_area = iommu_map_dma_area, - .unmap_dma_area = iommu_unmap_dma_area, + .alloc = sbus_iommu_alloc, + .free = sbus_iommu_free, #endif + .map_page = sbus_iommu_map_page_gflush, + .unmap_page = sbus_iommu_unmap_page, + .map_sg = sbus_iommu_map_sg_gflush, + .unmap_sg = sbus_iommu_unmap_sg, }; -static const struct sparc32_dma_ops iommu_dma_pflush_ops = { - .get_scsi_one = iommu_get_scsi_one_pflush, - .get_scsi_sgl = iommu_get_scsi_sgl_pflush, - .release_scsi_one = iommu_release_scsi_one, - .release_scsi_sgl = iommu_release_scsi_sgl, +static const struct dma_map_ops sbus_iommu_dma_pflush_ops = { #ifdef CONFIG_SBUS - .map_dma_area = iommu_map_dma_area, - .unmap_dma_area = iommu_unmap_dma_area, + .alloc = sbus_iommu_alloc, + .free = sbus_iommu_free, #endif + .map_page = sbus_iommu_map_page_pflush, + .unmap_page = sbus_iommu_unmap_page, + .map_sg = sbus_iommu_map_sg_pflush, + .unmap_sg = sbus_iommu_unmap_sg, }; void __init ld_mmu_iommu(void) { if (flush_page_for_dma_global) { /* flush_page_for_dma flushes everything, no matter of what page is it */ - sparc32_dma_ops = &iommu_dma_gflush_ops; + dma_ops = &sbus_iommu_dma_gflush_ops; } else { - sparc32_dma_ops = &iommu_dma_pflush_ops; + dma_ops = &sbus_iommu_dma_pflush_ops; } if (viking_mxcc_present || srmmu_modtype == HyperSparc) { diff --git a/arch/sparc/net/bpf_jit_comp_32.c b/arch/sparc/net/bpf_jit_comp_32.c index a5ff88643d5c..84cc8f7f83e9 100644 --- a/arch/sparc/net/bpf_jit_comp_32.c +++ b/arch/sparc/net/bpf_jit_comp_32.c @@ -552,15 +552,14 @@ void bpf_jit_compile(struct bpf_prog *fp) emit_skb_load32(hash, r_A); break; case BPF_ANC | SKF_AD_VLAN_TAG: - case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT: emit_skb_load16(vlan_tci, r_A); - if (code != (BPF_ANC | SKF_AD_VLAN_TAG)) { - emit_alu_K(SRL, 12); + break; + case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT: + __emit_skb_load8(__pkt_vlan_present_offset, r_A); + if (PKT_VLAN_PRESENT_BIT) + emit_alu_K(SRL, PKT_VLAN_PRESENT_BIT); + if (PKT_VLAN_PRESENT_BIT < 7) emit_andi(r_A, 1, r_A); - } else { - emit_loadimm(~VLAN_TAG_PRESENT, r_TMP); - emit_and(r_A, r_TMP, r_A); - } break; case BPF_LD | BPF_W | BPF_LEN: emit_skb_load32(len, r_A); diff --git a/arch/sparc/net/bpf_jit_comp_64.c b/arch/sparc/net/bpf_jit_comp_64.c index 5fda4f7bf15d..65428e79b2f3 100644 --- a/arch/sparc/net/bpf_jit_comp_64.c +++ b/arch/sparc/net/bpf_jit_comp_64.c @@ -1575,6 +1575,7 @@ skip_init_ctx: prog->jited_len = image_size; if (!prog->is_func || extra_pass) { + bpf_prog_fill_jited_linfo(prog, ctx.offset); out_off: kfree(ctx.offset); kfree(jit_data); diff --git a/arch/sparc/oprofile/init.c b/arch/sparc/oprofile/init.c index f9024bccff16..43730c9b1c86 100644 --- a/arch/sparc/oprofile/init.c +++ b/arch/sparc/oprofile/init.c @@ -53,7 +53,7 @@ static void timer_stop(void) { nmi_adjust_hz(1); unregister_die_notifier(&profile_timer_exceptions_nb); - synchronize_sched(); /* Allow already-started NMIs to complete. */ + synchronize_rcu(); /* Allow already-started NMIs to complete. */ } static int op_nmi_timer_init(struct oprofile_operations *ops) diff --git a/arch/sparc/vdso/Makefile b/arch/sparc/vdso/Makefile index a6e18ca4cc18..74e97f77e23b 100644 --- a/arch/sparc/vdso/Makefile +++ b/arch/sparc/vdso/Makefile @@ -34,7 +34,7 @@ targets += $(vdso_img_sodbg) $(vdso_img-y:%=vdso%.so) CPPFLAGS_vdso.lds += -P -C VDSO_LDFLAGS_vdso.lds = -m elf64_sparc -soname linux-vdso.so.1 --no-undefined \ - -z max-page-size=8192 -z common-page-size=8192 + -z max-page-size=8192 $(obj)/vdso64.so.dbg: $(obj)/vdso.lds $(vobjs) FORCE $(call if_changed,vdso) diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig index a4c05159dca5..2681027d7bff 100644 --- a/arch/unicore32/Kconfig +++ b/arch/unicore32/Kconfig @@ -4,7 +4,6 @@ config UNICORE32 select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_SERIO - select DMA_DIRECT_OPS select HAVE_GENERIC_DMA_COHERENT select HAVE_KERNEL_GZIP select HAVE_KERNEL_BZIP2 diff --git a/arch/x86/Kbuild b/arch/x86/Kbuild index 0038a2d10a7a..c625f57472f7 100644 --- a/arch/x86/Kbuild +++ b/arch/x86/Kbuild @@ -7,6 +7,8 @@ obj-$(CONFIG_KVM) += kvm/ # Xen paravirtualization support obj-$(CONFIG_XEN) += xen/ +obj-$(CONFIG_PVH) += platform/pvh/ + # Hyper-V paravirtualization support obj-$(subst m,y,$(CONFIG_HYPERV)) += hyperv/ diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 8689e794a43c..57552f2b37eb 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -66,7 +66,6 @@ config X86 select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64 select ARCH_HAS_UACCESS_MCSAFE if X86_64 && X86_MCE select ARCH_HAS_SET_MEMORY - select ARCH_HAS_SG_CHAIN select ARCH_HAS_STRICT_KERNEL_RWX select ARCH_HAS_STRICT_MODULE_RWX select ARCH_HAS_SYNC_CORE_BEFORE_USERMODE @@ -90,7 +89,6 @@ config X86 select CLOCKSOURCE_VALIDATE_LAST_CYCLE select CLOCKSOURCE_WATCHDOG select DCACHE_WORD_ACCESS - select DMA_DIRECT_OPS select EDAC_ATOMIC_SCRUB select EDAC_SUPPORT select GENERIC_CLOCKEVENTS @@ -444,15 +442,23 @@ config RETPOLINE branches. Requires a compiler with -mindirect-branch=thunk-extern support for full protection. The kernel may run slower. -config INTEL_RDT - bool "Intel Resource Director Technology support" - depends on X86 && CPU_SUP_INTEL +config RESCTRL + bool "Resource Control support" + depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD) select KERNFS help - Select to enable resource allocation and monitoring which are - sub-features of Intel Resource Director Technology(RDT). More - information about RDT can be found in the Intel x86 - Architecture Software Developer Manual. + Enable Resource Control support. + + Provide support for the allocation and monitoring of system resources + usage by the CPU. + + Intel calls this Intel Resource Director Technology + (Intel(R) RDT). More information about RDT can be found in the + Intel x86 Architecture Software Developer Manual. + + AMD calls this AMD Platform Quality of Service (AMD QoS). + More information about AMD QoS can be found in the AMD64 Technology + Platform Quality of Service Extensions manual. Say N if unsure. @@ -796,6 +802,12 @@ config KVM_GUEST underlying device model, the host provides the guest with timing infrastructure such as time of day, and system time +config PVH + bool "Support for running PVH guests" + ---help--- + This option enables the PVH entry point for guest virtual machines + as specified in the x86/HVM direct boot ABI. + config KVM_DEBUG_FS bool "Enable debug information for KVM Guests in debugfs" depends on KVM_GUEST && DEBUG_FS diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 75ef499a66e2..16c3145c0a5f 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -130,10 +130,6 @@ else KBUILD_CFLAGS += -mno-red-zone KBUILD_CFLAGS += -mcmodel=kernel - - # -funit-at-a-time shrinks the kernel .text considerably - # unfortunately it makes reading oopses harder. - KBUILD_CFLAGS += $(call cc-option,-funit-at-a-time) endif ifdef CONFIG_X86_X32 @@ -232,13 +228,6 @@ archscripts: scripts_basic archheaders: $(Q)$(MAKE) $(build)=arch/x86/entry/syscalls all -archmacros: - $(Q)$(MAKE) $(build)=arch/x86/kernel arch/x86/kernel/macros.s - -ASM_MACRO_FLAGS = -Wa,arch/x86/kernel/macros.s -export ASM_MACRO_FLAGS -KBUILD_CFLAGS += $(ASM_MACRO_FLAGS) - ### # Kernel objects diff --git a/arch/x86/Makefile.um b/arch/x86/Makefile.um index 91085a08de6c..1db7913795f5 100644 --- a/arch/x86/Makefile.um +++ b/arch/x86/Makefile.um @@ -26,13 +26,6 @@ cflags-y += $(call cc-option,-mpreferred-stack-boundary=2) # an unresolved reference. cflags-y += -ffreestanding -# Disable unit-at-a-time mode on pre-gcc-4.0 compilers, it makes gcc use -# a lot more stack due to the lack of sharing of stacklots. Also, gcc -# 4.3.0 needs -funit-at-a-time for extern inline functions. -KBUILD_CFLAGS += $(shell if [ $(cc-version) -lt 0400 ] ; then \ - echo $(call cc-option,-fno-unit-at-a-time); \ - else echo $(call cc-option,-funit-at-a-time); fi ;) - KBUILD_CFLAGS += $(cflags-y) else @@ -54,6 +47,4 @@ ELF_FORMAT := elf64-x86-64 LINK-$(CONFIG_LD_SCRIPT_DYN) += -Wl,-rpath,/lib64 LINK-y += -m64 -# Do unit-at-a-time unconditionally on x86_64, following the host -KBUILD_CFLAGS += $(call cc-option,-funit-at-a-time) endif diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h index ef5a9cc66fb8..32a09eb5c101 100644 --- a/arch/x86/boot/boot.h +++ b/arch/x86/boot/boot.h @@ -309,7 +309,7 @@ void query_edd(void); void __attribute__((noreturn)) die(void); /* memory.c */ -int detect_memory(void); +void detect_memory(void); /* pm.c */ void __attribute__((noreturn)) go_to_protected_mode(void); diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c index 7df2b28207be..f06c147b5140 100644 --- a/arch/x86/boot/memory.c +++ b/arch/x86/boot/memory.c @@ -17,7 +17,7 @@ #define SMAP 0x534d4150 /* ASCII "SMAP" */ -static int detect_memory_e820(void) +static void detect_memory_e820(void) { int count = 0; struct biosregs ireg, oreg; @@ -68,10 +68,10 @@ static int detect_memory_e820(void) count++; } while (ireg.ebx && count < ARRAY_SIZE(boot_params.e820_table)); - return boot_params.e820_entries = count; + boot_params.e820_entries = count; } -static int detect_memory_e801(void) +static void detect_memory_e801(void) { struct biosregs ireg, oreg; @@ -80,7 +80,7 @@ static int detect_memory_e801(void) intcall(0x15, &ireg, &oreg); if (oreg.eflags & X86_EFLAGS_CF) - return -1; + return; /* Do we really need to do this? */ if (oreg.cx || oreg.dx) { @@ -89,7 +89,7 @@ static int detect_memory_e801(void) } if (oreg.ax > 15*1024) { - return -1; /* Bogus! */ + return; /* Bogus! */ } else if (oreg.ax == 15*1024) { boot_params.alt_mem_k = (oreg.bx << 6) + oreg.ax; } else { @@ -102,11 +102,9 @@ static int detect_memory_e801(void) */ boot_params.alt_mem_k = oreg.ax; } - - return 0; } -static int detect_memory_88(void) +static void detect_memory_88(void) { struct biosregs ireg, oreg; @@ -115,22 +113,13 @@ static int detect_memory_88(void) intcall(0x15, &ireg, &oreg); boot_params.screen_info.ext_mem_k = oreg.ax; - - return -(oreg.eflags & X86_EFLAGS_CF); /* 0 or -1 */ } -int detect_memory(void) +void detect_memory(void) { - int err = -1; - - if (detect_memory_e820() > 0) - err = 0; - - if (!detect_memory_e801()) - err = 0; + detect_memory_e820(); - if (!detect_memory_88()) - err = 0; + detect_memory_e801(); - return err; + detect_memory_88(); } diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c index bf0e82400358..a93d44e58f9c 100644 --- a/arch/x86/boot/tools/build.c +++ b/arch/x86/boot/tools/build.c @@ -132,6 +132,7 @@ static void die(const char * str, ...) va_list args; va_start(args, str); vfprintf(stderr, str, args); + va_end(args); fputc('\n', stderr); exit(1); } diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index a4b0007a54e1..45734e1cf967 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile @@ -8,6 +8,7 @@ OBJECT_FILES_NON_STANDARD := y avx_supported := $(call as-instr,vpxor %xmm0$(comma)%xmm0$(comma)%xmm0,yes,no) avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\ $(comma)4)$(comma)%ymm2,yes,no) +avx512_supported :=$(call as-instr,vpmovm2b %k1$(comma)%zmm5,yes,no) sha1_ni_supported :=$(call as-instr,sha1msg1 %xmm0$(comma)%xmm1,yes,no) sha256_ni_supported :=$(call as-instr,sha256msg1 %xmm0$(comma)%xmm1,yes,no) @@ -23,7 +24,7 @@ obj-$(CONFIG_CRYPTO_CAMELLIA_X86_64) += camellia-x86_64.o obj-$(CONFIG_CRYPTO_BLOWFISH_X86_64) += blowfish-x86_64.o obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o obj-$(CONFIG_CRYPTO_TWOFISH_X86_64_3WAY) += twofish-x86_64-3way.o -obj-$(CONFIG_CRYPTO_CHACHA20_X86_64) += chacha20-x86_64.o +obj-$(CONFIG_CRYPTO_CHACHA20_X86_64) += chacha-x86_64.o obj-$(CONFIG_CRYPTO_SERPENT_SSE2_X86_64) += serpent-sse2-x86_64.o obj-$(CONFIG_CRYPTO_AES_NI_INTEL) += aesni-intel.o obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o @@ -46,6 +47,9 @@ obj-$(CONFIG_CRYPTO_MORUS1280_GLUE) += morus1280_glue.o obj-$(CONFIG_CRYPTO_MORUS640_SSE2) += morus640-sse2.o obj-$(CONFIG_CRYPTO_MORUS1280_SSE2) += morus1280-sse2.o +obj-$(CONFIG_CRYPTO_NHPOLY1305_SSE2) += nhpoly1305-sse2.o +obj-$(CONFIG_CRYPTO_NHPOLY1305_AVX2) += nhpoly1305-avx2.o + # These modules require assembler to support AVX. ifeq ($(avx_supported),yes) obj-$(CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64) += \ @@ -74,7 +78,7 @@ camellia-x86_64-y := camellia-x86_64-asm_64.o camellia_glue.o blowfish-x86_64-y := blowfish-x86_64-asm_64.o blowfish_glue.o twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o twofish-x86_64-3way-y := twofish-x86_64-asm_64-3way.o twofish_glue_3way.o -chacha20-x86_64-y := chacha20-ssse3-x86_64.o chacha20_glue.o +chacha-x86_64-y := chacha-ssse3-x86_64.o chacha_glue.o serpent-sse2-x86_64-y := serpent-sse2-x86_64-asm_64.o serpent_sse2_glue.o aegis128-aesni-y := aegis128-aesni-asm.o aegis128-aesni-glue.o @@ -84,6 +88,8 @@ aegis256-aesni-y := aegis256-aesni-asm.o aegis256-aesni-glue.o morus640-sse2-y := morus640-sse2-asm.o morus640-sse2-glue.o morus1280-sse2-y := morus1280-sse2-asm.o morus1280-sse2-glue.o +nhpoly1305-sse2-y := nh-sse2-x86_64.o nhpoly1305-sse2-glue.o + ifeq ($(avx_supported),yes) camellia-aesni-avx-x86_64-y := camellia-aesni-avx-asm_64.o \ camellia_aesni_avx_glue.o @@ -97,10 +103,16 @@ endif ifeq ($(avx2_supported),yes) camellia-aesni-avx2-y := camellia-aesni-avx2-asm_64.o camellia_aesni_avx2_glue.o - chacha20-x86_64-y += chacha20-avx2-x86_64.o + chacha-x86_64-y += chacha-avx2-x86_64.o serpent-avx2-y := serpent-avx2-asm_64.o serpent_avx2_glue.o morus1280-avx2-y := morus1280-avx2-asm.o morus1280-avx2-glue.o + + nhpoly1305-avx2-y := nh-avx2-x86_64.o nhpoly1305-avx2-glue.o +endif + +ifeq ($(avx512_supported),yes) + chacha-x86_64-y += chacha-avx512vl-x86_64.o endif aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o diff --git a/arch/x86/crypto/aesni-intel_avx-x86_64.S b/arch/x86/crypto/aesni-intel_avx-x86_64.S index 1985ea0b551b..91c039ab5699 100644 --- a/arch/x86/crypto/aesni-intel_avx-x86_64.S +++ b/arch/x86/crypto/aesni-intel_avx-x86_64.S @@ -182,43 +182,30 @@ aad_shift_arr: .text -##define the fields of the gcm aes context -#{ -# u8 expanded_keys[16*11] store expanded keys -# u8 shifted_hkey_1[16] store HashKey <<1 mod poly here -# u8 shifted_hkey_2[16] store HashKey^2 <<1 mod poly here -# u8 shifted_hkey_3[16] store HashKey^3 <<1 mod poly here -# u8 shifted_hkey_4[16] store HashKey^4 <<1 mod poly here -# u8 shifted_hkey_5[16] store HashKey^5 <<1 mod poly here -# u8 shifted_hkey_6[16] store HashKey^6 <<1 mod poly here -# u8 shifted_hkey_7[16] store HashKey^7 <<1 mod poly here -# u8 shifted_hkey_8[16] store HashKey^8 <<1 mod poly here -# u8 shifted_hkey_1_k[16] store XOR HashKey <<1 mod poly here (for Karatsuba purposes) -# u8 shifted_hkey_2_k[16] store XOR HashKey^2 <<1 mod poly here (for Karatsuba purposes) -# u8 shifted_hkey_3_k[16] store XOR HashKey^3 <<1 mod poly here (for Karatsuba purposes) -# u8 shifted_hkey_4_k[16] store XOR HashKey^4 <<1 mod poly here (for Karatsuba purposes) -# u8 shifted_hkey_5_k[16] store XOR HashKey^5 <<1 mod poly here (for Karatsuba purposes) -# u8 shifted_hkey_6_k[16] store XOR HashKey^6 <<1 mod poly here (for Karatsuba purposes) -# u8 shifted_hkey_7_k[16] store XOR HashKey^7 <<1 mod poly here (for Karatsuba purposes) -# u8 shifted_hkey_8_k[16] store XOR HashKey^8 <<1 mod poly here (for Karatsuba purposes) -#} gcm_ctx# - -HashKey = 16*11 # store HashKey <<1 mod poly here -HashKey_2 = 16*12 # store HashKey^2 <<1 mod poly here -HashKey_3 = 16*13 # store HashKey^3 <<1 mod poly here -HashKey_4 = 16*14 # store HashKey^4 <<1 mod poly here -HashKey_5 = 16*15 # store HashKey^5 <<1 mod poly here -HashKey_6 = 16*16 # store HashKey^6 <<1 mod poly here -HashKey_7 = 16*17 # store HashKey^7 <<1 mod poly here -HashKey_8 = 16*18 # store HashKey^8 <<1 mod poly here -HashKey_k = 16*19 # store XOR of HashKey <<1 mod poly here (for Karatsuba purposes) -HashKey_2_k = 16*20 # store XOR of HashKey^2 <<1 mod poly here (for Karatsuba purposes) -HashKey_3_k = 16*21 # store XOR of HashKey^3 <<1 mod poly here (for Karatsuba purposes) -HashKey_4_k = 16*22 # store XOR of HashKey^4 <<1 mod poly here (for Karatsuba purposes) -HashKey_5_k = 16*23 # store XOR of HashKey^5 <<1 mod poly here (for Karatsuba purposes) -HashKey_6_k = 16*24 # store XOR of HashKey^6 <<1 mod poly here (for Karatsuba purposes) -HashKey_7_k = 16*25 # store XOR of HashKey^7 <<1 mod poly here (for Karatsuba purposes) -HashKey_8_k = 16*26 # store XOR of HashKey^8 <<1 mod poly here (for Karatsuba purposes) +#define AadHash 16*0 +#define AadLen 16*1 +#define InLen (16*1)+8 +#define PBlockEncKey 16*2 +#define OrigIV 16*3 +#define CurCount 16*4 +#define PBlockLen 16*5 + +HashKey = 16*6 # store HashKey <<1 mod poly here +HashKey_2 = 16*7 # store HashKey^2 <<1 mod poly here +HashKey_3 = 16*8 # store HashKey^3 <<1 mod poly here +HashKey_4 = 16*9 # store HashKey^4 <<1 mod poly here +HashKey_5 = 16*10 # store HashKey^5 <<1 mod poly here +HashKey_6 = 16*11 # store HashKey^6 <<1 mod poly here +HashKey_7 = 16*12 # store HashKey^7 <<1 mod poly here +HashKey_8 = 16*13 # store HashKey^8 <<1 mod poly here +HashKey_k = 16*14 # store XOR of HashKey <<1 mod poly here (for Karatsuba purposes) +HashKey_2_k = 16*15 # store XOR of HashKey^2 <<1 mod poly here (for Karatsuba purposes) +HashKey_3_k = 16*16 # store XOR of HashKey^3 <<1 mod poly here (for Karatsuba purposes) +HashKey_4_k = 16*17 # store XOR of HashKey^4 <<1 mod poly here (for Karatsuba purposes) +HashKey_5_k = 16*18 # store XOR of HashKey^5 <<1 mod poly here (for Karatsuba purposes) +HashKey_6_k = 16*19 # store XOR of HashKey^6 <<1 mod poly here (for Karatsuba purposes) +HashKey_7_k = 16*20 # store XOR of HashKey^7 <<1 mod poly here (for Karatsuba purposes) +HashKey_8_k = 16*21 # store XOR of HashKey^8 <<1 mod poly here (for Karatsuba purposes) #define arg1 %rdi #define arg2 %rsi @@ -229,6 +216,8 @@ HashKey_8_k = 16*26 # store XOR of HashKey^8 <<1 mod poly here (for Karatsu #define arg7 STACK_OFFSET+8*1(%r14) #define arg8 STACK_OFFSET+8*2(%r14) #define arg9 STACK_OFFSET+8*3(%r14) +#define arg10 STACK_OFFSET+8*4(%r14) +#define keysize 2*15*16(arg1) i = 0 j = 0 @@ -267,19 +256,636 @@ VARIABLE_OFFSET = 16*8 # Utility Macros ################################ +.macro FUNC_SAVE + #the number of pushes must equal STACK_OFFSET + push %r12 + push %r13 + push %r14 + push %r15 + + mov %rsp, %r14 + + + + sub $VARIABLE_OFFSET, %rsp + and $~63, %rsp # align rsp to 64 bytes +.endm + +.macro FUNC_RESTORE + mov %r14, %rsp + + pop %r15 + pop %r14 + pop %r13 + pop %r12 +.endm + # Encryption of a single block -.macro ENCRYPT_SINGLE_BLOCK XMM0 +.macro ENCRYPT_SINGLE_BLOCK REP XMM0 vpxor (arg1), \XMM0, \XMM0 - i = 1 - setreg -.rep 9 + i = 1 + setreg +.rep \REP vaesenc 16*i(arg1), \XMM0, \XMM0 - i = (i+1) - setreg + i = (i+1) + setreg .endr - vaesenclast 16*10(arg1), \XMM0, \XMM0 + vaesenclast 16*i(arg1), \XMM0, \XMM0 .endm +# combined for GCM encrypt and decrypt functions +# clobbering all xmm registers +# clobbering r10, r11, r12, r13, r14, r15 +.macro GCM_ENC_DEC INITIAL_BLOCKS GHASH_8_ENCRYPT_8_PARALLEL GHASH_LAST_8 GHASH_MUL ENC_DEC REP + vmovdqu AadHash(arg2), %xmm8 + vmovdqu HashKey(arg2), %xmm13 # xmm13 = HashKey + add arg5, InLen(arg2) + + # initialize the data pointer offset as zero + xor %r11d, %r11d + + PARTIAL_BLOCK \GHASH_MUL, arg3, arg4, arg5, %r11, %xmm8, \ENC_DEC + sub %r11, arg5 + + mov arg5, %r13 # save the number of bytes of plaintext/ciphertext + and $-16, %r13 # r13 = r13 - (r13 mod 16) + + mov %r13, %r12 + shr $4, %r12 + and $7, %r12 + jz _initial_num_blocks_is_0\@ + + cmp $7, %r12 + je _initial_num_blocks_is_7\@ + cmp $6, %r12 + je _initial_num_blocks_is_6\@ + cmp $5, %r12 + je _initial_num_blocks_is_5\@ + cmp $4, %r12 + je _initial_num_blocks_is_4\@ + cmp $3, %r12 + je _initial_num_blocks_is_3\@ + cmp $2, %r12 + je _initial_num_blocks_is_2\@ + + jmp _initial_num_blocks_is_1\@ + +_initial_num_blocks_is_7\@: + \INITIAL_BLOCKS \REP, 7, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC + sub $16*7, %r13 + jmp _initial_blocks_encrypted\@ + +_initial_num_blocks_is_6\@: + \INITIAL_BLOCKS \REP, 6, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC + sub $16*6, %r13 + jmp _initial_blocks_encrypted\@ + +_initial_num_blocks_is_5\@: + \INITIAL_BLOCKS \REP, 5, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC + sub $16*5, %r13 + jmp _initial_blocks_encrypted\@ + +_initial_num_blocks_is_4\@: + \INITIAL_BLOCKS \REP, 4, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC + sub $16*4, %r13 + jmp _initial_blocks_encrypted\@ + +_initial_num_blocks_is_3\@: + \INITIAL_BLOCKS \REP, 3, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC + sub $16*3, %r13 + jmp _initial_blocks_encrypted\@ + +_initial_num_blocks_is_2\@: + \INITIAL_BLOCKS \REP, 2, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC + sub $16*2, %r13 + jmp _initial_blocks_encrypted\@ + +_initial_num_blocks_is_1\@: + \INITIAL_BLOCKS \REP, 1, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC + sub $16*1, %r13 + jmp _initial_blocks_encrypted\@ + +_initial_num_blocks_is_0\@: + \INITIAL_BLOCKS \REP, 0, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC + + +_initial_blocks_encrypted\@: + cmp $0, %r13 + je _zero_cipher_left\@ + + sub $128, %r13 + je _eight_cipher_left\@ + + + + + vmovd %xmm9, %r15d + and $255, %r15d + vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 + + +_encrypt_by_8_new\@: + cmp $(255-8), %r15d + jg _encrypt_by_8\@ + + + + add $8, %r15b + \GHASH_8_ENCRYPT_8_PARALLEL \REP, %xmm0, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm15, out_order, \ENC_DEC + add $128, %r11 + sub $128, %r13 + jne _encrypt_by_8_new\@ + + vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 + jmp _eight_cipher_left\@ + +_encrypt_by_8\@: + vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 + add $8, %r15b + \GHASH_8_ENCRYPT_8_PARALLEL \REP, %xmm0, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm15, in_order, \ENC_DEC + vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 + add $128, %r11 + sub $128, %r13 + jne _encrypt_by_8_new\@ + + vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 + + + + +_eight_cipher_left\@: + \GHASH_LAST_8 %xmm0, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8 + + +_zero_cipher_left\@: + vmovdqu %xmm14, AadHash(arg2) + vmovdqu %xmm9, CurCount(arg2) + + # check for 0 length + mov arg5, %r13 + and $15, %r13 # r13 = (arg5 mod 16) + + je _multiple_of_16_bytes\@ + + # handle the last <16 Byte block separately + + mov %r13, PBlockLen(arg2) + + vpaddd ONE(%rip), %xmm9, %xmm9 # INCR CNT to get Yn + vmovdqu %xmm9, CurCount(arg2) + vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 + + ENCRYPT_SINGLE_BLOCK \REP, %xmm9 # E(K, Yn) + vmovdqu %xmm9, PBlockEncKey(arg2) + + cmp $16, arg5 + jge _large_enough_update\@ + + lea (arg4,%r11,1), %r10 + mov %r13, %r12 + + READ_PARTIAL_BLOCK %r10 %r12 %xmm1 + + lea SHIFT_MASK+16(%rip), %r12 + sub %r13, %r12 # adjust the shuffle mask pointer to be + # able to shift 16-r13 bytes (r13 is the + # number of bytes in plaintext mod 16) + + jmp _final_ghash_mul\@ + +_large_enough_update\@: + sub $16, %r11 + add %r13, %r11 + + # receive the last <16 Byte block + vmovdqu (arg4, %r11, 1), %xmm1 + + sub %r13, %r11 + add $16, %r11 + + lea SHIFT_MASK+16(%rip), %r12 + # adjust the shuffle mask pointer to be able to shift 16-r13 bytes + # (r13 is the number of bytes in plaintext mod 16) + sub %r13, %r12 + # get the appropriate shuffle mask + vmovdqu (%r12), %xmm2 + # shift right 16-r13 bytes + vpshufb %xmm2, %xmm1, %xmm1 + +_final_ghash_mul\@: + .if \ENC_DEC == DEC + vmovdqa %xmm1, %xmm2 + vpxor %xmm1, %xmm9, %xmm9 # Plaintext XOR E(K, Yn) + vmovdqu ALL_F-SHIFT_MASK(%r12), %xmm1 # get the appropriate mask to + # mask out top 16-r13 bytes of xmm9 + vpand %xmm1, %xmm9, %xmm9 # mask out top 16-r13 bytes of xmm9 + vpand %xmm1, %xmm2, %xmm2 + vpshufb SHUF_MASK(%rip), %xmm2, %xmm2 + vpxor %xmm2, %xmm14, %xmm14 + + vmovdqu %xmm14, AadHash(arg2) + .else + vpxor %xmm1, %xmm9, %xmm9 # Plaintext XOR E(K, Yn) + vmovdqu ALL_F-SHIFT_MASK(%r12), %xmm1 # get the appropriate mask to + # mask out top 16-r13 bytes of xmm9 + vpand %xmm1, %xmm9, %xmm9 # mask out top 16-r13 bytes of xmm9 + vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 + vpxor %xmm9, %xmm14, %xmm14 + + vmovdqu %xmm14, AadHash(arg2) + vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 # shuffle xmm9 back to output as ciphertext + .endif + + + ############################# + # output r13 Bytes + vmovq %xmm9, %rax + cmp $8, %r13 + jle _less_than_8_bytes_left\@ + + mov %rax, (arg3 , %r11) + add $8, %r11 + vpsrldq $8, %xmm9, %xmm9 + vmovq %xmm9, %rax + sub $8, %r13 + +_less_than_8_bytes_left\@: + movb %al, (arg3 , %r11) + add $1, %r11 + shr $8, %rax + sub $1, %r13 + jne _less_than_8_bytes_left\@ + ############################# + +_multiple_of_16_bytes\@: +.endm + + +# GCM_COMPLETE Finishes update of tag of last partial block +# Output: Authorization Tag (AUTH_TAG) +# Clobbers rax, r10-r12, and xmm0, xmm1, xmm5-xmm15 +.macro GCM_COMPLETE GHASH_MUL REP AUTH_TAG AUTH_TAG_LEN + vmovdqu AadHash(arg2), %xmm14 + vmovdqu HashKey(arg2), %xmm13 + + mov PBlockLen(arg2), %r12 + cmp $0, %r12 + je _partial_done\@ + + #GHASH computation for the last <16 Byte block + \GHASH_MUL %xmm14, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6 + +_partial_done\@: + mov AadLen(arg2), %r12 # r12 = aadLen (number of bytes) + shl $3, %r12 # convert into number of bits + vmovd %r12d, %xmm15 # len(A) in xmm15 + + mov InLen(arg2), %r12 + shl $3, %r12 # len(C) in bits (*128) + vmovq %r12, %xmm1 + vpslldq $8, %xmm15, %xmm15 # xmm15 = len(A)|| 0x0000000000000000 + vpxor %xmm1, %xmm15, %xmm15 # xmm15 = len(A)||len(C) + + vpxor %xmm15, %xmm14, %xmm14 + \GHASH_MUL %xmm14, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6 # final GHASH computation + vpshufb SHUF_MASK(%rip), %xmm14, %xmm14 # perform a 16Byte swap + + vmovdqu OrigIV(arg2), %xmm9 + + ENCRYPT_SINGLE_BLOCK \REP, %xmm9 # E(K, Y0) + + vpxor %xmm14, %xmm9, %xmm9 + + + +_return_T\@: + mov \AUTH_TAG, %r10 # r10 = authTag + mov \AUTH_TAG_LEN, %r11 # r11 = auth_tag_len + + cmp $16, %r11 + je _T_16\@ + + cmp $8, %r11 + jl _T_4\@ + +_T_8\@: + vmovq %xmm9, %rax + mov %rax, (%r10) + add $8, %r10 + sub $8, %r11 + vpsrldq $8, %xmm9, %xmm9 + cmp $0, %r11 + je _return_T_done\@ +_T_4\@: + vmovd %xmm9, %eax + mov %eax, (%r10) + add $4, %r10 + sub $4, %r11 + vpsrldq $4, %xmm9, %xmm9 + cmp $0, %r11 + je _return_T_done\@ +_T_123\@: + vmovd %xmm9, %eax + cmp $2, %r11 + jl _T_1\@ + mov %ax, (%r10) + cmp $2, %r11 + je _return_T_done\@ + add $2, %r10 + sar $16, %eax +_T_1\@: + mov %al, (%r10) + jmp _return_T_done\@ + +_T_16\@: + vmovdqu %xmm9, (%r10) + +_return_T_done\@: +.endm + +.macro CALC_AAD_HASH GHASH_MUL AAD AADLEN T1 T2 T3 T4 T5 T6 T7 T8 + + mov \AAD, %r10 # r10 = AAD + mov \AADLEN, %r12 # r12 = aadLen + + + mov %r12, %r11 + + vpxor \T8, \T8, \T8 + vpxor \T7, \T7, \T7 + cmp $16, %r11 + jl _get_AAD_rest8\@ +_get_AAD_blocks\@: + vmovdqu (%r10), \T7 + vpshufb SHUF_MASK(%rip), \T7, \T7 + vpxor \T7, \T8, \T8 + \GHASH_MUL \T8, \T2, \T1, \T3, \T4, \T5, \T6 + add $16, %r10 + sub $16, %r12 + sub $16, %r11 + cmp $16, %r11 + jge _get_AAD_blocks\@ + vmovdqu \T8, \T7 + cmp $0, %r11 + je _get_AAD_done\@ + + vpxor \T7, \T7, \T7 + + /* read the last <16B of AAD. since we have at least 4B of + data right after the AAD (the ICV, and maybe some CT), we can + read 4B/8B blocks safely, and then get rid of the extra stuff */ +_get_AAD_rest8\@: + cmp $4, %r11 + jle _get_AAD_rest4\@ + movq (%r10), \T1 + add $8, %r10 + sub $8, %r11 + vpslldq $8, \T1, \T1 + vpsrldq $8, \T7, \T7 + vpxor \T1, \T7, \T7 + jmp _get_AAD_rest8\@ +_get_AAD_rest4\@: + cmp $0, %r11 + jle _get_AAD_rest0\@ + mov (%r10), %eax + movq %rax, \T1 + add $4, %r10 + sub $4, %r11 + vpslldq $12, \T1, \T1 + vpsrldq $4, \T7, \T7 + vpxor \T1, \T7, \T7 +_get_AAD_rest0\@: + /* finalize: shift out the extra bytes we read, and align + left. since pslldq can only shift by an immediate, we use + vpshufb and an array of shuffle masks */ + movq %r12, %r11 + salq $4, %r11 + vmovdqu aad_shift_arr(%r11), \T1 + vpshufb \T1, \T7, \T7 +_get_AAD_rest_final\@: + vpshufb SHUF_MASK(%rip), \T7, \T7 + vpxor \T8, \T7, \T7 + \GHASH_MUL \T7, \T2, \T1, \T3, \T4, \T5, \T6 + +_get_AAD_done\@: + vmovdqu \T7, AadHash(arg2) +.endm + +.macro INIT GHASH_MUL PRECOMPUTE + mov arg6, %r11 + mov %r11, AadLen(arg2) # ctx_data.aad_length = aad_length + xor %r11d, %r11d + mov %r11, InLen(arg2) # ctx_data.in_length = 0 + + mov %r11, PBlockLen(arg2) # ctx_data.partial_block_length = 0 + mov %r11, PBlockEncKey(arg2) # ctx_data.partial_block_enc_key = 0 + mov arg3, %rax + movdqu (%rax), %xmm0 + movdqu %xmm0, OrigIV(arg2) # ctx_data.orig_IV = iv + + vpshufb SHUF_MASK(%rip), %xmm0, %xmm0 + movdqu %xmm0, CurCount(arg2) # ctx_data.current_counter = iv + + vmovdqu (arg4), %xmm6 # xmm6 = HashKey + + vpshufb SHUF_MASK(%rip), %xmm6, %xmm6 + ############### PRECOMPUTATION of HashKey<<1 mod poly from the HashKey + vmovdqa %xmm6, %xmm2 + vpsllq $1, %xmm6, %xmm6 + vpsrlq $63, %xmm2, %xmm2 + vmovdqa %xmm2, %xmm1 + vpslldq $8, %xmm2, %xmm2 + vpsrldq $8, %xmm1, %xmm1 + vpor %xmm2, %xmm6, %xmm6 + #reduction + vpshufd $0b00100100, %xmm1, %xmm2 + vpcmpeqd TWOONE(%rip), %xmm2, %xmm2 + vpand POLY(%rip), %xmm2, %xmm2 + vpxor %xmm2, %xmm6, %xmm6 # xmm6 holds the HashKey<<1 mod poly + ####################################################################### + vmovdqu %xmm6, HashKey(arg2) # store HashKey<<1 mod poly + + CALC_AAD_HASH \GHASH_MUL, arg5, arg6, %xmm2, %xmm6, %xmm3, %xmm4, %xmm5, %xmm7, %xmm1, %xmm0 + + \PRECOMPUTE %xmm6, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5 +.endm + + +# Reads DLEN bytes starting at DPTR and stores in XMMDst +# where 0 < DLEN < 16 +# Clobbers %rax, DLEN +.macro READ_PARTIAL_BLOCK DPTR DLEN XMMDst + vpxor \XMMDst, \XMMDst, \XMMDst + + cmp $8, \DLEN + jl _read_lt8_\@ + mov (\DPTR), %rax + vpinsrq $0, %rax, \XMMDst, \XMMDst + sub $8, \DLEN + jz _done_read_partial_block_\@ + xor %eax, %eax +_read_next_byte_\@: + shl $8, %rax + mov 7(\DPTR, \DLEN, 1), %al + dec \DLEN + jnz _read_next_byte_\@ + vpinsrq $1, %rax, \XMMDst, \XMMDst + jmp _done_read_partial_block_\@ +_read_lt8_\@: + xor %eax, %eax +_read_next_byte_lt8_\@: + shl $8, %rax + mov -1(\DPTR, \DLEN, 1), %al + dec \DLEN + jnz _read_next_byte_lt8_\@ + vpinsrq $0, %rax, \XMMDst, \XMMDst +_done_read_partial_block_\@: +.endm + +# PARTIAL_BLOCK: Handles encryption/decryption and the tag partial blocks +# between update calls. +# Requires the input data be at least 1 byte long due to READ_PARTIAL_BLOCK +# Outputs encrypted bytes, and updates hash and partial info in gcm_data_context +# Clobbers rax, r10, r12, r13, xmm0-6, xmm9-13 +.macro PARTIAL_BLOCK GHASH_MUL CYPH_PLAIN_OUT PLAIN_CYPH_IN PLAIN_CYPH_LEN DATA_OFFSET \ + AAD_HASH ENC_DEC + mov PBlockLen(arg2), %r13 + cmp $0, %r13 + je _partial_block_done_\@ # Leave Macro if no partial blocks + # Read in input data without over reading + cmp $16, \PLAIN_CYPH_LEN + jl _fewer_than_16_bytes_\@ + vmovdqu (\PLAIN_CYPH_IN), %xmm1 # If more than 16 bytes, just fill xmm + jmp _data_read_\@ + +_fewer_than_16_bytes_\@: + lea (\PLAIN_CYPH_IN, \DATA_OFFSET, 1), %r10 + mov \PLAIN_CYPH_LEN, %r12 + READ_PARTIAL_BLOCK %r10 %r12 %xmm1 + + mov PBlockLen(arg2), %r13 + +_data_read_\@: # Finished reading in data + + vmovdqu PBlockEncKey(arg2), %xmm9 + vmovdqu HashKey(arg2), %xmm13 + + lea SHIFT_MASK(%rip), %r12 + + # adjust the shuffle mask pointer to be able to shift r13 bytes + # r16-r13 is the number of bytes in plaintext mod 16) + add %r13, %r12 + vmovdqu (%r12), %xmm2 # get the appropriate shuffle mask + vpshufb %xmm2, %xmm9, %xmm9 # shift right r13 bytes + +.if \ENC_DEC == DEC + vmovdqa %xmm1, %xmm3 + pxor %xmm1, %xmm9 # Cyphertext XOR E(K, Yn) + + mov \PLAIN_CYPH_LEN, %r10 + add %r13, %r10 + # Set r10 to be the amount of data left in CYPH_PLAIN_IN after filling + sub $16, %r10 + # Determine if if partial block is not being filled and + # shift mask accordingly + jge _no_extra_mask_1_\@ + sub %r10, %r12 +_no_extra_mask_1_\@: + + vmovdqu ALL_F-SHIFT_MASK(%r12), %xmm1 + # get the appropriate mask to mask out bottom r13 bytes of xmm9 + vpand %xmm1, %xmm9, %xmm9 # mask out bottom r13 bytes of xmm9 + + vpand %xmm1, %xmm3, %xmm3 + vmovdqa SHUF_MASK(%rip), %xmm10 + vpshufb %xmm10, %xmm3, %xmm3 + vpshufb %xmm2, %xmm3, %xmm3 + vpxor %xmm3, \AAD_HASH, \AAD_HASH + + cmp $0, %r10 + jl _partial_incomplete_1_\@ + + # GHASH computation for the last <16 Byte block + \GHASH_MUL \AAD_HASH, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6 + xor %eax,%eax + + mov %rax, PBlockLen(arg2) + jmp _dec_done_\@ +_partial_incomplete_1_\@: + add \PLAIN_CYPH_LEN, PBlockLen(arg2) +_dec_done_\@: + vmovdqu \AAD_HASH, AadHash(arg2) +.else + vpxor %xmm1, %xmm9, %xmm9 # Plaintext XOR E(K, Yn) + + mov \PLAIN_CYPH_LEN, %r10 + add %r13, %r10 + # Set r10 to be the amount of data left in CYPH_PLAIN_IN after filling + sub $16, %r10 + # Determine if if partial block is not being filled and + # shift mask accordingly + jge _no_extra_mask_2_\@ + sub %r10, %r12 +_no_extra_mask_2_\@: + + vmovdqu ALL_F-SHIFT_MASK(%r12), %xmm1 + # get the appropriate mask to mask out bottom r13 bytes of xmm9 + vpand %xmm1, %xmm9, %xmm9 + + vmovdqa SHUF_MASK(%rip), %xmm1 + vpshufb %xmm1, %xmm9, %xmm9 + vpshufb %xmm2, %xmm9, %xmm9 + vpxor %xmm9, \AAD_HASH, \AAD_HASH + + cmp $0, %r10 + jl _partial_incomplete_2_\@ + + # GHASH computation for the last <16 Byte block + \GHASH_MUL \AAD_HASH, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6 + xor %eax,%eax + + mov %rax, PBlockLen(arg2) + jmp _encode_done_\@ +_partial_incomplete_2_\@: + add \PLAIN_CYPH_LEN, PBlockLen(arg2) +_encode_done_\@: + vmovdqu \AAD_HASH, AadHash(arg2) + + vmovdqa SHUF_MASK(%rip), %xmm10 + # shuffle xmm9 back to output as ciphertext + vpshufb %xmm10, %xmm9, %xmm9 + vpshufb %xmm2, %xmm9, %xmm9 +.endif + # output encrypted Bytes + cmp $0, %r10 + jl _partial_fill_\@ + mov %r13, %r12 + mov $16, %r13 + # Set r13 to be the number of bytes to write out + sub %r12, %r13 + jmp _count_set_\@ +_partial_fill_\@: + mov \PLAIN_CYPH_LEN, %r13 +_count_set_\@: + vmovdqa %xmm9, %xmm0 + vmovq %xmm0, %rax + cmp $8, %r13 + jle _less_than_8_bytes_left_\@ + + mov %rax, (\CYPH_PLAIN_OUT, \DATA_OFFSET, 1) + add $8, \DATA_OFFSET + psrldq $8, %xmm0 + vmovq %xmm0, %rax + sub $8, %r13 +_less_than_8_bytes_left_\@: + movb %al, (\CYPH_PLAIN_OUT, \DATA_OFFSET, 1) + add $1, \DATA_OFFSET + shr $8, %rax + sub $1, %r13 + jne _less_than_8_bytes_left_\@ +_partial_block_done_\@: +.endm # PARTIAL_BLOCK + #ifdef CONFIG_AS_AVX ############################################################################### # GHASH_MUL MACRO to implement: Data*HashKey mod (128,127,126,121,0) @@ -341,49 +947,49 @@ VARIABLE_OFFSET = 16*8 vpshufd $0b01001110, \T5, \T1 vpxor \T5, \T1, \T1 - vmovdqa \T1, HashKey_k(arg1) + vmovdqu \T1, HashKey_k(arg2) GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^2<<1 mod poly - vmovdqa \T5, HashKey_2(arg1) # [HashKey_2] = HashKey^2<<1 mod poly + vmovdqu \T5, HashKey_2(arg2) # [HashKey_2] = HashKey^2<<1 mod poly vpshufd $0b01001110, \T5, \T1 vpxor \T5, \T1, \T1 - vmovdqa \T1, HashKey_2_k(arg1) + vmovdqu \T1, HashKey_2_k(arg2) GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^3<<1 mod poly - vmovdqa \T5, HashKey_3(arg1) + vmovdqu \T5, HashKey_3(arg2) vpshufd $0b01001110, \T5, \T1 vpxor \T5, \T1, \T1 - vmovdqa \T1, HashKey_3_k(arg1) + vmovdqu \T1, HashKey_3_k(arg2) GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^4<<1 mod poly - vmovdqa \T5, HashKey_4(arg1) + vmovdqu \T5, HashKey_4(arg2) vpshufd $0b01001110, \T5, \T1 vpxor \T5, \T1, \T1 - vmovdqa \T1, HashKey_4_k(arg1) + vmovdqu \T1, HashKey_4_k(arg2) GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^5<<1 mod poly - vmovdqa \T5, HashKey_5(arg1) + vmovdqu \T5, HashKey_5(arg2) vpshufd $0b01001110, \T5, \T1 vpxor \T5, \T1, \T1 - vmovdqa \T1, HashKey_5_k(arg1) + vmovdqu \T1, HashKey_5_k(arg2) GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^6<<1 mod poly - vmovdqa \T5, HashKey_6(arg1) + vmovdqu \T5, HashKey_6(arg2) vpshufd $0b01001110, \T5, \T1 vpxor \T5, \T1, \T1 - vmovdqa \T1, HashKey_6_k(arg1) + vmovdqu \T1, HashKey_6_k(arg2) GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^7<<1 mod poly - vmovdqa \T5, HashKey_7(arg1) + vmovdqu \T5, HashKey_7(arg2) vpshufd $0b01001110, \T5, \T1 vpxor \T5, \T1, \T1 - vmovdqa \T1, HashKey_7_k(arg1) + vmovdqu \T1, HashKey_7_k(arg2) GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^8<<1 mod poly - vmovdqa \T5, HashKey_8(arg1) + vmovdqu \T5, HashKey_8(arg2) vpshufd $0b01001110, \T5, \T1 vpxor \T5, \T1, \T1 - vmovdqa \T1, HashKey_8_k(arg1) + vmovdqu \T1, HashKey_8_k(arg2) .endm @@ -392,84 +998,15 @@ VARIABLE_OFFSET = 16*8 ## num_initial_blocks = b mod 4# ## encrypt the initial num_initial_blocks blocks and apply ghash on the ciphertext ## r10, r11, r12, rax are clobbered -## arg1, arg2, arg3, r14 are used as a pointer only, not modified +## arg1, arg3, arg4, r14 are used as a pointer only, not modified -.macro INITIAL_BLOCKS_AVX num_initial_blocks T1 T2 T3 T4 T5 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T6 T_key ENC_DEC +.macro INITIAL_BLOCKS_AVX REP num_initial_blocks T1 T2 T3 T4 T5 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T6 T_key ENC_DEC i = (8-\num_initial_blocks) - j = 0 setreg - - mov arg6, %r10 # r10 = AAD - mov arg7, %r12 # r12 = aadLen - - - mov %r12, %r11 - - vpxor reg_j, reg_j, reg_j - vpxor reg_i, reg_i, reg_i - cmp $16, %r11 - jl _get_AAD_rest8\@ -_get_AAD_blocks\@: - vmovdqu (%r10), reg_i - vpshufb SHUF_MASK(%rip), reg_i, reg_i - vpxor reg_i, reg_j, reg_j - GHASH_MUL_AVX reg_j, \T2, \T1, \T3, \T4, \T5, \T6 - add $16, %r10 - sub $16, %r12 - sub $16, %r11 - cmp $16, %r11 - jge _get_AAD_blocks\@ - vmovdqu reg_j, reg_i - cmp $0, %r11 - je _get_AAD_done\@ - - vpxor reg_i, reg_i, reg_i - - /* read the last <16B of AAD. since we have at least 4B of - data right after the AAD (the ICV, and maybe some CT), we can - read 4B/8B blocks safely, and then get rid of the extra stuff */ -_get_AAD_rest8\@: - cmp $4, %r11 - jle _get_AAD_rest4\@ - movq (%r10), \T1 - add $8, %r10 - sub $8, %r11 - vpslldq $8, \T1, \T1 - vpsrldq $8, reg_i, reg_i - vpxor \T1, reg_i, reg_i - jmp _get_AAD_rest8\@ -_get_AAD_rest4\@: - cmp $0, %r11 - jle _get_AAD_rest0\@ - mov (%r10), %eax - movq %rax, \T1 - add $4, %r10 - sub $4, %r11 - vpslldq $12, \T1, \T1 - vpsrldq $4, reg_i, reg_i - vpxor \T1, reg_i, reg_i -_get_AAD_rest0\@: - /* finalize: shift out the extra bytes we read, and align - left. since pslldq can only shift by an immediate, we use - vpshufb and an array of shuffle masks */ - movq %r12, %r11 - salq $4, %r11 - movdqu aad_shift_arr(%r11), \T1 - vpshufb \T1, reg_i, reg_i -_get_AAD_rest_final\@: - vpshufb SHUF_MASK(%rip), reg_i, reg_i - vpxor reg_j, reg_i, reg_i - GHASH_MUL_AVX reg_i, \T2, \T1, \T3, \T4, \T5, \T6 - -_get_AAD_done\@: - # initialize the data pointer offset as zero - xor %r11d, %r11d + vmovdqu AadHash(arg2), reg_i # start AES for num_initial_blocks blocks - mov arg5, %rax # rax = *Y0 - vmovdqu (%rax), \CTR # CTR = Y0 - vpshufb SHUF_MASK(%rip), \CTR, \CTR - + vmovdqu CurCount(arg2), \CTR i = (9-\num_initial_blocks) setreg @@ -490,10 +1027,10 @@ _get_AAD_done\@: setreg .endr - j = 1 - setreg -.rep 9 - vmovdqa 16*j(arg1), \T_key + j = 1 + setreg +.rep \REP + vmovdqa 16*j(arg1), \T_key i = (9-\num_initial_blocks) setreg .rep \num_initial_blocks @@ -502,12 +1039,11 @@ _get_AAD_done\@: setreg .endr - j = (j+1) - setreg + j = (j+1) + setreg .endr - - vmovdqa 16*10(arg1), \T_key + vmovdqa 16*j(arg1), \T_key i = (9-\num_initial_blocks) setreg .rep \num_initial_blocks @@ -519,9 +1055,9 @@ _get_AAD_done\@: i = (9-\num_initial_blocks) setreg .rep \num_initial_blocks - vmovdqu (arg3, %r11), \T1 + vmovdqu (arg4, %r11), \T1 vpxor \T1, reg_i, reg_i - vmovdqu reg_i, (arg2 , %r11) # write back ciphertext for num_initial_blocks blocks + vmovdqu reg_i, (arg3 , %r11) # write back ciphertext for num_initial_blocks blocks add $16, %r11 .if \ENC_DEC == DEC vmovdqa \T1, reg_i @@ -595,9 +1131,9 @@ _get_AAD_done\@: vpxor \T_key, \XMM7, \XMM7 vpxor \T_key, \XMM8, \XMM8 - i = 1 - setreg -.rep 9 # do 9 rounds + i = 1 + setreg +.rep \REP # do REP rounds vmovdqa 16*i(arg1), \T_key vaesenc \T_key, \XMM1, \XMM1 vaesenc \T_key, \XMM2, \XMM2 @@ -607,11 +1143,10 @@ _get_AAD_done\@: vaesenc \T_key, \XMM6, \XMM6 vaesenc \T_key, \XMM7, \XMM7 vaesenc \T_key, \XMM8, \XMM8 - i = (i+1) - setreg + i = (i+1) + setreg .endr - vmovdqa 16*i(arg1), \T_key vaesenclast \T_key, \XMM1, \XMM1 vaesenclast \T_key, \XMM2, \XMM2 @@ -622,58 +1157,58 @@ _get_AAD_done\@: vaesenclast \T_key, \XMM7, \XMM7 vaesenclast \T_key, \XMM8, \XMM8 - vmovdqu (arg3, %r11), \T1 + vmovdqu (arg4, %r11), \T1 vpxor \T1, \XMM1, \XMM1 - vmovdqu \XMM1, (arg2 , %r11) + vmovdqu \XMM1, (arg3 , %r11) .if \ENC_DEC == DEC vmovdqa \T1, \XMM1 .endif - vmovdqu 16*1(arg3, %r11), \T1 + vmovdqu 16*1(arg4, %r11), \T1 vpxor \T1, \XMM2, \XMM2 - vmovdqu \XMM2, 16*1(arg2 , %r11) + vmovdqu \XMM2, 16*1(arg3 , %r11) .if \ENC_DEC == DEC vmovdqa \T1, \XMM2 .endif - vmovdqu 16*2(arg3, %r11), \T1 + vmovdqu 16*2(arg4, %r11), \T1 vpxor \T1, \XMM3, \XMM3 - vmovdqu \XMM3, 16*2(arg2 , %r11) + vmovdqu \XMM3, 16*2(arg3 , %r11) .if \ENC_DEC == DEC vmovdqa \T1, \XMM3 .endif - vmovdqu 16*3(arg3, %r11), \T1 + vmovdqu 16*3(arg4, %r11), \T1 vpxor \T1, \XMM4, \XMM4 - vmovdqu \XMM4, 16*3(arg2 , %r11) + vmovdqu \XMM4, 16*3(arg3 , %r11) .if \ENC_DEC == DEC vmovdqa \T1, \XMM4 .endif - vmovdqu 16*4(arg3, %r11), \T1 + vmovdqu 16*4(arg4, %r11), \T1 vpxor \T1, \XMM5, \XMM5 - vmovdqu \XMM5, 16*4(arg2 , %r11) + vmovdqu \XMM5, 16*4(arg3 , %r11) .if \ENC_DEC == DEC vmovdqa \T1, \XMM5 .endif - vmovdqu 16*5(arg3, %r11), \T1 + vmovdqu 16*5(arg4, %r11), \T1 vpxor \T1, \XMM6, \XMM6 - vmovdqu \XMM6, 16*5(arg2 , %r11) + vmovdqu \XMM6, 16*5(arg3 , %r11) .if \ENC_DEC == DEC vmovdqa \T1, \XMM6 .endif - vmovdqu 16*6(arg3, %r11), \T1 + vmovdqu 16*6(arg4, %r11), \T1 vpxor \T1, \XMM7, \XMM7 - vmovdqu \XMM7, 16*6(arg2 , %r11) + vmovdqu \XMM7, 16*6(arg3 , %r11) .if \ENC_DEC == DEC vmovdqa \T1, \XMM7 .endif - vmovdqu 16*7(arg3, %r11), \T1 + vmovdqu 16*7(arg4, %r11), \T1 vpxor \T1, \XMM8, \XMM8 - vmovdqu \XMM8, 16*7(arg2 , %r11) + vmovdqu \XMM8, 16*7(arg3 , %r11) .if \ENC_DEC == DEC vmovdqa \T1, \XMM8 .endif @@ -698,9 +1233,9 @@ _initial_blocks_done\@: # encrypt 8 blocks at a time # ghash the 8 previously encrypted ciphertext blocks -# arg1, arg2, arg3 are used as pointers only, not modified +# arg1, arg3, arg4 are used as pointers only, not modified # r11 is the data offset value -.macro GHASH_8_ENCRYPT_8_PARALLEL_AVX T1 T2 T3 T4 T5 T6 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T7 loop_idx ENC_DEC +.macro GHASH_8_ENCRYPT_8_PARALLEL_AVX REP T1 T2 T3 T4 T5 T6 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T7 loop_idx ENC_DEC vmovdqa \XMM1, \T2 vmovdqa \XMM2, TMP2(%rsp) @@ -784,14 +1319,14 @@ _initial_blocks_done\@: ####################################################################### - vmovdqa HashKey_8(arg1), \T5 + vmovdqu HashKey_8(arg2), \T5 vpclmulqdq $0x11, \T5, \T2, \T4 # T4 = a1*b1 vpclmulqdq $0x00, \T5, \T2, \T7 # T7 = a0*b0 vpshufd $0b01001110, \T2, \T6 vpxor \T2, \T6, \T6 - vmovdqa HashKey_8_k(arg1), \T5 + vmovdqu HashKey_8_k(arg2), \T5 vpclmulqdq $0x00, \T5, \T6, \T6 vmovdqu 16*3(arg1), \T1 @@ -805,7 +1340,7 @@ _initial_blocks_done\@: vaesenc \T1, \XMM8, \XMM8 vmovdqa TMP2(%rsp), \T1 - vmovdqa HashKey_7(arg1), \T5 + vmovdqu HashKey_7(arg2), \T5 vpclmulqdq $0x11, \T5, \T1, \T3 vpxor \T3, \T4, \T4 vpclmulqdq $0x00, \T5, \T1, \T3 @@ -813,7 +1348,7 @@ _initial_blocks_done\@: vpshufd $0b01001110, \T1, \T3 vpxor \T1, \T3, \T3 - vmovdqa HashKey_7_k(arg1), \T5 + vmovdqu HashKey_7_k(arg2), \T5 vpclmulqdq $0x10, \T5, \T3, \T3 vpxor \T3, \T6, \T6 @@ -830,7 +1365,7 @@ _initial_blocks_done\@: ####################################################################### vmovdqa TMP3(%rsp), \T1 - vmovdqa HashKey_6(arg1), \T5 + vmovdqu HashKey_6(arg2), \T5 vpclmulqdq $0x11, \T5, \T1, \T3 vpxor \T3, \T4, \T4 vpclmulqdq $0x00, \T5, \T1, \T3 @@ -838,7 +1373,7 @@ _initial_blocks_done\@: vpshufd $0b01001110, \T1, \T3 vpxor \T1, \T3, \T3 - vmovdqa HashKey_6_k(arg1), \T5 + vmovdqu HashKey_6_k(arg2), \T5 vpclmulqdq $0x10, \T5, \T3, \T3 vpxor \T3, \T6, \T6 @@ -853,7 +1388,7 @@ _initial_blocks_done\@: vaesenc \T1, \XMM8, \XMM8 vmovdqa TMP4(%rsp), \T1 - vmovdqa HashKey_5(arg1), \T5 + vmovdqu HashKey_5(arg2), \T5 vpclmulqdq $0x11, \T5, \T1, \T3 vpxor \T3, \T4, \T4 vpclmulqdq $0x00, \T5, \T1, \T3 @@ -861,7 +1396,7 @@ _initial_blocks_done\@: vpshufd $0b01001110, \T1, \T3 vpxor \T1, \T3, \T3 - vmovdqa HashKey_5_k(arg1), \T5 + vmovdqu HashKey_5_k(arg2), \T5 vpclmulqdq $0x10, \T5, \T3, \T3 vpxor \T3, \T6, \T6 @@ -877,7 +1412,7 @@ _initial_blocks_done\@: vmovdqa TMP5(%rsp), \T1 - vmovdqa HashKey_4(arg1), \T5 + vmovdqu HashKey_4(arg2), \T5 vpclmulqdq $0x11, \T5, \T1, \T3 vpxor \T3, \T4, \T4 vpclmulqdq $0x00, \T5, \T1, \T3 @@ -885,7 +1420,7 @@ _initial_blocks_done\@: vpshufd $0b01001110, \T1, \T3 vpxor \T1, \T3, \T3 - vmovdqa HashKey_4_k(arg1), \T5 + vmovdqu HashKey_4_k(arg2), \T5 vpclmulqdq $0x10, \T5, \T3, \T3 vpxor \T3, \T6, \T6 @@ -900,7 +1435,7 @@ _initial_blocks_done\@: vaesenc \T1, \XMM8, \XMM8 vmovdqa TMP6(%rsp), \T1 - vmovdqa HashKey_3(arg1), \T5 + vmovdqu HashKey_3(arg2), \T5 vpclmulqdq $0x11, \T5, \T1, \T3 vpxor \T3, \T4, \T4 vpclmulqdq $0x00, \T5, \T1, \T3 @@ -908,7 +1443,7 @@ _initial_blocks_done\@: vpshufd $0b01001110, \T1, \T3 vpxor \T1, \T3, \T3 - vmovdqa HashKey_3_k(arg1), \T5 + vmovdqu HashKey_3_k(arg2), \T5 vpclmulqdq $0x10, \T5, \T3, \T3 vpxor \T3, \T6, \T6 @@ -924,7 +1459,7 @@ _initial_blocks_done\@: vaesenc \T1, \XMM8, \XMM8 vmovdqa TMP7(%rsp), \T1 - vmovdqa HashKey_2(arg1), \T5 + vmovdqu HashKey_2(arg2), \T5 vpclmulqdq $0x11, \T5, \T1, \T3 vpxor \T3, \T4, \T4 vpclmulqdq $0x00, \T5, \T1, \T3 @@ -932,7 +1467,7 @@ _initial_blocks_done\@: vpshufd $0b01001110, \T1, \T3 vpxor \T1, \T3, \T3 - vmovdqa HashKey_2_k(arg1), \T5 + vmovdqu HashKey_2_k(arg2), \T5 vpclmulqdq $0x10, \T5, \T3, \T3 vpxor \T3, \T6, \T6 @@ -949,7 +1484,7 @@ _initial_blocks_done\@: vaesenc \T5, \XMM8, \XMM8 vmovdqa TMP8(%rsp), \T1 - vmovdqa HashKey(arg1), \T5 + vmovdqu HashKey(arg2), \T5 vpclmulqdq $0x11, \T5, \T1, \T3 vpxor \T3, \T4, \T4 vpclmulqdq $0x00, \T5, \T1, \T3 @@ -957,7 +1492,7 @@ _initial_blocks_done\@: vpshufd $0b01001110, \T1, \T3 vpxor \T1, \T3, \T3 - vmovdqa HashKey_k(arg1), \T5 + vmovdqu HashKey_k(arg2), \T5 vpclmulqdq $0x10, \T5, \T3, \T3 vpxor \T3, \T6, \T6 @@ -966,17 +1501,35 @@ _initial_blocks_done\@: vmovdqu 16*10(arg1), \T5 + i = 11 + setreg +.rep (\REP-9) + + vaesenc \T5, \XMM1, \XMM1 + vaesenc \T5, \XMM2, \XMM2 + vaesenc \T5, \XMM3, \XMM3 + vaesenc \T5, \XMM4, \XMM4 + vaesenc \T5, \XMM5, \XMM5 + vaesenc \T5, \XMM6, \XMM6 + vaesenc \T5, \XMM7, \XMM7 + vaesenc \T5, \XMM8, \XMM8 + + vmovdqu 16*i(arg1), \T5 + i = i + 1 + setreg +.endr + i = 0 j = 1 setreg .rep 8 - vpxor 16*i(arg3, %r11), \T5, \T2 + vpxor 16*i(arg4, %r11), \T5, \T2 .if \ENC_DEC == ENC vaesenclast \T2, reg_j, reg_j .else vaesenclast \T2, reg_j, \T3 - vmovdqu 16*i(arg3, %r11), reg_j - vmovdqu \T3, 16*i(arg2, %r11) + vmovdqu 16*i(arg4, %r11), reg_j + vmovdqu \T3, 16*i(arg3, %r11) .endif i = (i+1) j = (j+1) @@ -1008,14 +1561,14 @@ _initial_blocks_done\@: vpxor \T2, \T7, \T7 # first phase of the reduction complete ####################################################################### .if \ENC_DEC == ENC - vmovdqu \XMM1, 16*0(arg2,%r11) # Write to the Ciphertext buffer - vmovdqu \XMM2, 16*1(arg2,%r11) # Write to the Ciphertext buffer - vmovdqu \XMM3, 16*2(arg2,%r11) # Write to the Ciphertext buffer - vmovdqu \XMM4, 16*3(arg2,%r11) # Write to the Ciphertext buffer - vmovdqu \XMM5, 16*4(arg2,%r11) # Write to the Ciphertext buffer - vmovdqu \XMM6, 16*5(arg2,%r11) # Write to the Ciphertext buffer - vmovdqu \XMM7, 16*6(arg2,%r11) # Write to the Ciphertext buffer - vmovdqu \XMM8, 16*7(arg2,%r11) # Write to the Ciphertext buffer + vmovdqu \XMM1, 16*0(arg3,%r11) # Write to the Ciphertext buffer + vmovdqu \XMM2, 16*1(arg3,%r11) # Write to the Ciphertext buffer + vmovdqu \XMM3, 16*2(arg3,%r11) # Write to the Ciphertext buffer + vmovdqu \XMM4, 16*3(arg3,%r11) # Write to the Ciphertext buffer + vmovdqu \XMM5, 16*4(arg3,%r11) # Write to the Ciphertext buffer + vmovdqu \XMM6, 16*5(arg3,%r11) # Write to the Ciphertext buffer + vmovdqu \XMM7, 16*6(arg3,%r11) # Write to the Ciphertext buffer + vmovdqu \XMM8, 16*7(arg3,%r11) # Write to the Ciphertext buffer .endif ####################################################################### @@ -1056,25 +1609,25 @@ _initial_blocks_done\@: vpshufd $0b01001110, \XMM1, \T2 vpxor \XMM1, \T2, \T2 - vmovdqa HashKey_8(arg1), \T5 + vmovdqu HashKey_8(arg2), \T5 vpclmulqdq $0x11, \T5, \XMM1, \T6 vpclmulqdq $0x00, \T5, \XMM1, \T7 - vmovdqa HashKey_8_k(arg1), \T3 + vmovdqu HashKey_8_k(arg2), \T3 vpclmulqdq $0x00, \T3, \T2, \XMM1 ###################### vpshufd $0b01001110, \XMM2, \T2 vpxor \XMM2, \T2, \T2 - vmovdqa HashKey_7(arg1), \T5 + vmovdqu HashKey_7(arg2), \T5 vpclmulqdq $0x11, \T5, \XMM2, \T4 vpxor \T4, \T6, \T6 vpclmulqdq $0x00, \T5, \XMM2, \T4 vpxor \T4, \T7, \T7 - vmovdqa HashKey_7_k(arg1), \T3 + vmovdqu HashKey_7_k(arg2), \T3 vpclmulqdq $0x00, \T3, \T2, \T2 vpxor \T2, \XMM1, \XMM1 @@ -1082,14 +1635,14 @@ _initial_blocks_done\@: vpshufd $0b01001110, \XMM3, \T2 vpxor \XMM3, \T2, \T2 - vmovdqa HashKey_6(arg1), \T5 + vmovdqu HashKey_6(arg2), \T5 vpclmulqdq $0x11, \T5, \XMM3, \T4 vpxor \T4, \T6, \T6 vpclmulqdq $0x00, \T5, \XMM3, \T4 vpxor \T4, \T7, \T7 - vmovdqa HashKey_6_k(arg1), \T3 + vmovdqu HashKey_6_k(arg2), \T3 vpclmulqdq $0x00, \T3, \T2, \T2 vpxor \T2, \XMM1, \XMM1 @@ -1097,14 +1650,14 @@ _initial_blocks_done\@: vpshufd $0b01001110, \XMM4, \T2 vpxor \XMM4, \T2, \T2 - vmovdqa HashKey_5(arg1), \T5 + vmovdqu HashKey_5(arg2), \T5 vpclmulqdq $0x11, \T5, \XMM4, \T4 vpxor \T4, \T6, \T6 vpclmulqdq $0x00, \T5, \XMM4, \T4 vpxor \T4, \T7, \T7 - vmovdqa HashKey_5_k(arg1), \T3 + vmovdqu HashKey_5_k(arg2), \T3 vpclmulqdq $0x00, \T3, \T2, \T2 vpxor \T2, \XMM1, \XMM1 @@ -1112,14 +1665,14 @@ _initial_blocks_done\@: vpshufd $0b01001110, \XMM5, \T2 vpxor \XMM5, \T2, \T2 - vmovdqa HashKey_4(arg1), \T5 + vmovdqu HashKey_4(arg2), \T5 vpclmulqdq $0x11, \T5, \XMM5, \T4 vpxor \T4, \T6, \T6 vpclmulqdq $0x00, \T5, \XMM5, \T4 vpxor \T4, \T7, \T7 - vmovdqa HashKey_4_k(arg1), \T3 + vmovdqu HashKey_4_k(arg2), \T3 vpclmulqdq $0x00, \T3, \T2, \T2 vpxor \T2, \XMM1, \XMM1 @@ -1127,14 +1680,14 @@ _initial_blocks_done\@: vpshufd $0b01001110, \XMM6, \T2 vpxor \XMM6, \T2, \T2 - vmovdqa HashKey_3(arg1), \T5 + vmovdqu HashKey_3(arg2), \T5 vpclmulqdq $0x11, \T5, \XMM6, \T4 vpxor \T4, \T6, \T6 vpclmulqdq $0x00, \T5, \XMM6, \T4 vpxor \T4, \T7, \T7 - vmovdqa HashKey_3_k(arg1), \T3 + vmovdqu HashKey_3_k(arg2), \T3 vpclmulqdq $0x00, \T3, \T2, \T2 vpxor \T2, \XMM1, \XMM1 @@ -1142,14 +1695,14 @@ _initial_blocks_done\@: vpshufd $0b01001110, \XMM7, \T2 vpxor \XMM7, \T2, \T2 - vmovdqa HashKey_2(arg1), \T5 + vmovdqu HashKey_2(arg2), \T5 vpclmulqdq $0x11, \T5, \XMM7, \T4 vpxor \T4, \T6, \T6 vpclmulqdq $0x00, \T5, \XMM7, \T4 vpxor \T4, \T7, \T7 - vmovdqa HashKey_2_k(arg1), \T3 + vmovdqu HashKey_2_k(arg2), \T3 vpclmulqdq $0x00, \T3, \T2, \T2 vpxor \T2, \XMM1, \XMM1 @@ -1157,14 +1710,14 @@ _initial_blocks_done\@: vpshufd $0b01001110, \XMM8, \T2 vpxor \XMM8, \T2, \T2 - vmovdqa HashKey(arg1), \T5 + vmovdqu HashKey(arg2), \T5 vpclmulqdq $0x11, \T5, \XMM8, \T4 vpxor \T4, \T6, \T6 vpclmulqdq $0x00, \T5, \XMM8, \T4 vpxor \T4, \T7, \T7 - vmovdqa HashKey_k(arg1), \T3 + vmovdqu HashKey_k(arg2), \T3 vpclmulqdq $0x00, \T3, \T2, \T2 vpxor \T2, \XMM1, \XMM1 @@ -1210,413 +1763,112 @@ _initial_blocks_done\@: .endm - -# combined for GCM encrypt and decrypt functions -# clobbering all xmm registers -# clobbering r10, r11, r12, r13, r14, r15 -.macro GCM_ENC_DEC_AVX ENC_DEC - - #the number of pushes must equal STACK_OFFSET - push %r12 - push %r13 - push %r14 - push %r15 - - mov %rsp, %r14 - - - - - sub $VARIABLE_OFFSET, %rsp - and $~63, %rsp # align rsp to 64 bytes - - - vmovdqu HashKey(arg1), %xmm13 # xmm13 = HashKey - - mov arg4, %r13 # save the number of bytes of plaintext/ciphertext - and $-16, %r13 # r13 = r13 - (r13 mod 16) - - mov %r13, %r12 - shr $4, %r12 - and $7, %r12 - jz _initial_num_blocks_is_0\@ - - cmp $7, %r12 - je _initial_num_blocks_is_7\@ - cmp $6, %r12 - je _initial_num_blocks_is_6\@ - cmp $5, %r12 - je _initial_num_blocks_is_5\@ - cmp $4, %r12 - je _initial_num_blocks_is_4\@ - cmp $3, %r12 - je _initial_num_blocks_is_3\@ - cmp $2, %r12 - je _initial_num_blocks_is_2\@ - - jmp _initial_num_blocks_is_1\@ - -_initial_num_blocks_is_7\@: - INITIAL_BLOCKS_AVX 7, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC - sub $16*7, %r13 - jmp _initial_blocks_encrypted\@ - -_initial_num_blocks_is_6\@: - INITIAL_BLOCKS_AVX 6, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC - sub $16*6, %r13 - jmp _initial_blocks_encrypted\@ - -_initial_num_blocks_is_5\@: - INITIAL_BLOCKS_AVX 5, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC - sub $16*5, %r13 - jmp _initial_blocks_encrypted\@ - -_initial_num_blocks_is_4\@: - INITIAL_BLOCKS_AVX 4, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC - sub $16*4, %r13 - jmp _initial_blocks_encrypted\@ - -_initial_num_blocks_is_3\@: - INITIAL_BLOCKS_AVX 3, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC - sub $16*3, %r13 - jmp _initial_blocks_encrypted\@ - -_initial_num_blocks_is_2\@: - INITIAL_BLOCKS_AVX 2, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC - sub $16*2, %r13 - jmp _initial_blocks_encrypted\@ - -_initial_num_blocks_is_1\@: - INITIAL_BLOCKS_AVX 1, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC - sub $16*1, %r13 - jmp _initial_blocks_encrypted\@ - -_initial_num_blocks_is_0\@: - INITIAL_BLOCKS_AVX 0, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC - - -_initial_blocks_encrypted\@: - cmp $0, %r13 - je _zero_cipher_left\@ - - sub $128, %r13 - je _eight_cipher_left\@ - - - - - vmovd %xmm9, %r15d - and $255, %r15d - vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 - - -_encrypt_by_8_new\@: - cmp $(255-8), %r15d - jg _encrypt_by_8\@ - - - - add $8, %r15b - GHASH_8_ENCRYPT_8_PARALLEL_AVX %xmm0, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm15, out_order, \ENC_DEC - add $128, %r11 - sub $128, %r13 - jne _encrypt_by_8_new\@ - - vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 - jmp _eight_cipher_left\@ - -_encrypt_by_8\@: - vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 - add $8, %r15b - GHASH_8_ENCRYPT_8_PARALLEL_AVX %xmm0, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm15, in_order, \ENC_DEC - vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 - add $128, %r11 - sub $128, %r13 - jne _encrypt_by_8_new\@ - - vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 - - - - -_eight_cipher_left\@: - GHASH_LAST_8_AVX %xmm0, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8 - - -_zero_cipher_left\@: - cmp $16, arg4 - jl _only_less_than_16\@ - - mov arg4, %r13 - and $15, %r13 # r13 = (arg4 mod 16) - - je _multiple_of_16_bytes\@ - - # handle the last <16 Byte block seperately - - - vpaddd ONE(%rip), %xmm9, %xmm9 # INCR CNT to get Yn - vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 - ENCRYPT_SINGLE_BLOCK %xmm9 # E(K, Yn) - - sub $16, %r11 - add %r13, %r11 - vmovdqu (arg3, %r11), %xmm1 # receive the last <16 Byte block - - lea SHIFT_MASK+16(%rip), %r12 - sub %r13, %r12 # adjust the shuffle mask pointer to be - # able to shift 16-r13 bytes (r13 is the - # number of bytes in plaintext mod 16) - vmovdqu (%r12), %xmm2 # get the appropriate shuffle mask - vpshufb %xmm2, %xmm1, %xmm1 # shift right 16-r13 bytes - jmp _final_ghash_mul\@ - -_only_less_than_16\@: - # check for 0 length - mov arg4, %r13 - and $15, %r13 # r13 = (arg4 mod 16) - - je _multiple_of_16_bytes\@ - - # handle the last <16 Byte block seperately - - - vpaddd ONE(%rip), %xmm9, %xmm9 # INCR CNT to get Yn - vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 - ENCRYPT_SINGLE_BLOCK %xmm9 # E(K, Yn) - - - lea SHIFT_MASK+16(%rip), %r12 - sub %r13, %r12 # adjust the shuffle mask pointer to be - # able to shift 16-r13 bytes (r13 is the - # number of bytes in plaintext mod 16) - -_get_last_16_byte_loop\@: - movb (arg3, %r11), %al - movb %al, TMP1 (%rsp , %r11) - add $1, %r11 - cmp %r13, %r11 - jne _get_last_16_byte_loop\@ - - vmovdqu TMP1(%rsp), %xmm1 - - sub $16, %r11 - -_final_ghash_mul\@: - .if \ENC_DEC == DEC - vmovdqa %xmm1, %xmm2 - vpxor %xmm1, %xmm9, %xmm9 # Plaintext XOR E(K, Yn) - vmovdqu ALL_F-SHIFT_MASK(%r12), %xmm1 # get the appropriate mask to - # mask out top 16-r13 bytes of xmm9 - vpand %xmm1, %xmm9, %xmm9 # mask out top 16-r13 bytes of xmm9 - vpand %xmm1, %xmm2, %xmm2 - vpshufb SHUF_MASK(%rip), %xmm2, %xmm2 - vpxor %xmm2, %xmm14, %xmm14 - #GHASH computation for the last <16 Byte block - GHASH_MUL_AVX %xmm14, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6 - sub %r13, %r11 - add $16, %r11 - .else - vpxor %xmm1, %xmm9, %xmm9 # Plaintext XOR E(K, Yn) - vmovdqu ALL_F-SHIFT_MASK(%r12), %xmm1 # get the appropriate mask to - # mask out top 16-r13 bytes of xmm9 - vpand %xmm1, %xmm9, %xmm9 # mask out top 16-r13 bytes of xmm9 - vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 - vpxor %xmm9, %xmm14, %xmm14 - #GHASH computation for the last <16 Byte block - GHASH_MUL_AVX %xmm14, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6 - sub %r13, %r11 - add $16, %r11 - vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 # shuffle xmm9 back to output as ciphertext - .endif - - - ############################# - # output r13 Bytes - vmovq %xmm9, %rax - cmp $8, %r13 - jle _less_than_8_bytes_left\@ - - mov %rax, (arg2 , %r11) - add $8, %r11 - vpsrldq $8, %xmm9, %xmm9 - vmovq %xmm9, %rax - sub $8, %r13 - -_less_than_8_bytes_left\@: - movb %al, (arg2 , %r11) - add $1, %r11 - shr $8, %rax - sub $1, %r13 - jne _less_than_8_bytes_left\@ - ############################# - -_multiple_of_16_bytes\@: - mov arg7, %r12 # r12 = aadLen (number of bytes) - shl $3, %r12 # convert into number of bits - vmovd %r12d, %xmm15 # len(A) in xmm15 - - shl $3, arg4 # len(C) in bits (*128) - vmovq arg4, %xmm1 - vpslldq $8, %xmm15, %xmm15 # xmm15 = len(A)|| 0x0000000000000000 - vpxor %xmm1, %xmm15, %xmm15 # xmm15 = len(A)||len(C) - - vpxor %xmm15, %xmm14, %xmm14 - GHASH_MUL_AVX %xmm14, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6 # final GHASH computation - vpshufb SHUF_MASK(%rip), %xmm14, %xmm14 # perform a 16Byte swap - - mov arg5, %rax # rax = *Y0 - vmovdqu (%rax), %xmm9 # xmm9 = Y0 - - ENCRYPT_SINGLE_BLOCK %xmm9 # E(K, Y0) - - vpxor %xmm14, %xmm9, %xmm9 - - - -_return_T\@: - mov arg8, %r10 # r10 = authTag - mov arg9, %r11 # r11 = auth_tag_len - - cmp $16, %r11 - je _T_16\@ - - cmp $8, %r11 - jl _T_4\@ - -_T_8\@: - vmovq %xmm9, %rax - mov %rax, (%r10) - add $8, %r10 - sub $8, %r11 - vpsrldq $8, %xmm9, %xmm9 - cmp $0, %r11 - je _return_T_done\@ -_T_4\@: - vmovd %xmm9, %eax - mov %eax, (%r10) - add $4, %r10 - sub $4, %r11 - vpsrldq $4, %xmm9, %xmm9 - cmp $0, %r11 - je _return_T_done\@ -_T_123\@: - vmovd %xmm9, %eax - cmp $2, %r11 - jl _T_1\@ - mov %ax, (%r10) - cmp $2, %r11 - je _return_T_done\@ - add $2, %r10 - sar $16, %eax -_T_1\@: - mov %al, (%r10) - jmp _return_T_done\@ - -_T_16\@: - vmovdqu %xmm9, (%r10) - -_return_T_done\@: - mov %r14, %rsp - - pop %r15 - pop %r14 - pop %r13 - pop %r12 -.endm - - ############################################################# #void aesni_gcm_precomp_avx_gen2 # (gcm_data *my_ctx_data, -# u8 *hash_subkey)# /* H, the Hash sub key input. Data starts on a 16-byte boundary. */ +# gcm_context_data *data, +# u8 *hash_subkey# /* H, the Hash sub key input. Data starts on a 16-byte boundary. */ +# u8 *iv, /* Pre-counter block j0: 4 byte salt +# (from Security Association) concatenated with 8 byte +# Initialisation Vector (from IPSec ESP Payload) +# concatenated with 0x00000001. 16-byte aligned pointer. */ +# const u8 *aad, /* Additional Authentication Data (AAD)*/ +# u64 aad_len) /* Length of AAD in bytes. With RFC4106 this is going to be 8 or 12 Bytes */ ############################################################# -ENTRY(aesni_gcm_precomp_avx_gen2) - #the number of pushes must equal STACK_OFFSET - push %r12 - push %r13 - push %r14 - push %r15 - - mov %rsp, %r14 - - - - sub $VARIABLE_OFFSET, %rsp - and $~63, %rsp # align rsp to 64 bytes - - vmovdqu (arg2), %xmm6 # xmm6 = HashKey - - vpshufb SHUF_MASK(%rip), %xmm6, %xmm6 - ############### PRECOMPUTATION of HashKey<<1 mod poly from the HashKey - vmovdqa %xmm6, %xmm2 - vpsllq $1, %xmm6, %xmm6 - vpsrlq $63, %xmm2, %xmm2 - vmovdqa %xmm2, %xmm1 - vpslldq $8, %xmm2, %xmm2 - vpsrldq $8, %xmm1, %xmm1 - vpor %xmm2, %xmm6, %xmm6 - #reduction - vpshufd $0b00100100, %xmm1, %xmm2 - vpcmpeqd TWOONE(%rip), %xmm2, %xmm2 - vpand POLY(%rip), %xmm2, %xmm2 - vpxor %xmm2, %xmm6, %xmm6 # xmm6 holds the HashKey<<1 mod poly - ####################################################################### - vmovdqa %xmm6, HashKey(arg1) # store HashKey<<1 mod poly - - - PRECOMPUTE_AVX %xmm6, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5 - - mov %r14, %rsp - - pop %r15 - pop %r14 - pop %r13 - pop %r12 +ENTRY(aesni_gcm_init_avx_gen2) + FUNC_SAVE + INIT GHASH_MUL_AVX, PRECOMPUTE_AVX + FUNC_RESTORE ret -ENDPROC(aesni_gcm_precomp_avx_gen2) +ENDPROC(aesni_gcm_init_avx_gen2) ############################################################################### -#void aesni_gcm_enc_avx_gen2( +#void aesni_gcm_enc_update_avx_gen2( # gcm_data *my_ctx_data, /* aligned to 16 Bytes */ +# gcm_context_data *data, # u8 *out, /* Ciphertext output. Encrypt in-place is allowed. */ # const u8 *in, /* Plaintext input */ -# u64 plaintext_len, /* Length of data in Bytes for encryption. */ -# u8 *iv, /* Pre-counter block j0: 4 byte salt -# (from Security Association) concatenated with 8 byte -# Initialisation Vector (from IPSec ESP Payload) -# concatenated with 0x00000001. 16-byte aligned pointer. */ -# const u8 *aad, /* Additional Authentication Data (AAD)*/ -# u64 aad_len, /* Length of AAD in bytes. With RFC4106 this is going to be 8 or 12 Bytes */ -# u8 *auth_tag, /* Authenticated Tag output. */ -# u64 auth_tag_len)# /* Authenticated Tag Length in bytes. -# Valid values are 16 (most likely), 12 or 8. */ +# u64 plaintext_len) /* Length of data in Bytes for encryption. */ ############################################################################### -ENTRY(aesni_gcm_enc_avx_gen2) - GCM_ENC_DEC_AVX ENC - ret -ENDPROC(aesni_gcm_enc_avx_gen2) +ENTRY(aesni_gcm_enc_update_avx_gen2) + FUNC_SAVE + mov keysize, %eax + cmp $32, %eax + je key_256_enc_update + cmp $16, %eax + je key_128_enc_update + # must be 192 + GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, ENC, 11 + FUNC_RESTORE + ret +key_128_enc_update: + GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, ENC, 9 + FUNC_RESTORE + ret +key_256_enc_update: + GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, ENC, 13 + FUNC_RESTORE + ret +ENDPROC(aesni_gcm_enc_update_avx_gen2) ############################################################################### -#void aesni_gcm_dec_avx_gen2( +#void aesni_gcm_dec_update_avx_gen2( # gcm_data *my_ctx_data, /* aligned to 16 Bytes */ +# gcm_context_data *data, # u8 *out, /* Plaintext output. Decrypt in-place is allowed. */ # const u8 *in, /* Ciphertext input */ -# u64 plaintext_len, /* Length of data in Bytes for encryption. */ -# u8 *iv, /* Pre-counter block j0: 4 byte salt -# (from Security Association) concatenated with 8 byte -# Initialisation Vector (from IPSec ESP Payload) -# concatenated with 0x00000001. 16-byte aligned pointer. */ -# const u8 *aad, /* Additional Authentication Data (AAD)*/ -# u64 aad_len, /* Length of AAD in bytes. With RFC4106 this is going to be 8 or 12 Bytes */ +# u64 plaintext_len) /* Length of data in Bytes for encryption. */ +############################################################################### +ENTRY(aesni_gcm_dec_update_avx_gen2) + FUNC_SAVE + mov keysize,%eax + cmp $32, %eax + je key_256_dec_update + cmp $16, %eax + je key_128_dec_update + # must be 192 + GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, DEC, 11 + FUNC_RESTORE + ret +key_128_dec_update: + GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, DEC, 9 + FUNC_RESTORE + ret +key_256_dec_update: + GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, DEC, 13 + FUNC_RESTORE + ret +ENDPROC(aesni_gcm_dec_update_avx_gen2) + +############################################################################### +#void aesni_gcm_finalize_avx_gen2( +# gcm_data *my_ctx_data, /* aligned to 16 Bytes */ +# gcm_context_data *data, # u8 *auth_tag, /* Authenticated Tag output. */ # u64 auth_tag_len)# /* Authenticated Tag Length in bytes. # Valid values are 16 (most likely), 12 or 8. */ ############################################################################### -ENTRY(aesni_gcm_dec_avx_gen2) - GCM_ENC_DEC_AVX DEC - ret -ENDPROC(aesni_gcm_dec_avx_gen2) +ENTRY(aesni_gcm_finalize_avx_gen2) + FUNC_SAVE + mov keysize,%eax + cmp $32, %eax + je key_256_finalize + cmp $16, %eax + je key_128_finalize + # must be 192 + GCM_COMPLETE GHASH_MUL_AVX, 11, arg3, arg4 + FUNC_RESTORE + ret +key_128_finalize: + GCM_COMPLETE GHASH_MUL_AVX, 9, arg3, arg4 + FUNC_RESTORE + ret +key_256_finalize: + GCM_COMPLETE GHASH_MUL_AVX, 13, arg3, arg4 + FUNC_RESTORE + ret +ENDPROC(aesni_gcm_finalize_avx_gen2) + #endif /* CONFIG_AS_AVX */ #ifdef CONFIG_AS_AVX2 @@ -1670,113 +1922,42 @@ ENDPROC(aesni_gcm_dec_avx_gen2) # Haskey_i_k holds XORed values of the low and high parts of the Haskey_i vmovdqa \HK, \T5 GHASH_MUL_AVX2 \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^2<<1 mod poly - vmovdqa \T5, HashKey_2(arg1) # [HashKey_2] = HashKey^2<<1 mod poly + vmovdqu \T5, HashKey_2(arg2) # [HashKey_2] = HashKey^2<<1 mod poly GHASH_MUL_AVX2 \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^3<<1 mod poly - vmovdqa \T5, HashKey_3(arg1) + vmovdqu \T5, HashKey_3(arg2) GHASH_MUL_AVX2 \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^4<<1 mod poly - vmovdqa \T5, HashKey_4(arg1) + vmovdqu \T5, HashKey_4(arg2) GHASH_MUL_AVX2 \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^5<<1 mod poly - vmovdqa \T5, HashKey_5(arg1) + vmovdqu \T5, HashKey_5(arg2) GHASH_MUL_AVX2 \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^6<<1 mod poly - vmovdqa \T5, HashKey_6(arg1) + vmovdqu \T5, HashKey_6(arg2) GHASH_MUL_AVX2 \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^7<<1 mod poly - vmovdqa \T5, HashKey_7(arg1) + vmovdqu \T5, HashKey_7(arg2) GHASH_MUL_AVX2 \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^8<<1 mod poly - vmovdqa \T5, HashKey_8(arg1) + vmovdqu \T5, HashKey_8(arg2) .endm - ## if a = number of total plaintext bytes ## b = floor(a/16) ## num_initial_blocks = b mod 4# ## encrypt the initial num_initial_blocks blocks and apply ghash on the ciphertext ## r10, r11, r12, rax are clobbered -## arg1, arg2, arg3, r14 are used as a pointer only, not modified +## arg1, arg3, arg4, r14 are used as a pointer only, not modified -.macro INITIAL_BLOCKS_AVX2 num_initial_blocks T1 T2 T3 T4 T5 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T6 T_key ENC_DEC VER +.macro INITIAL_BLOCKS_AVX2 REP num_initial_blocks T1 T2 T3 T4 T5 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T6 T_key ENC_DEC VER i = (8-\num_initial_blocks) - j = 0 setreg - - mov arg6, %r10 # r10 = AAD - mov arg7, %r12 # r12 = aadLen - - - mov %r12, %r11 - - vpxor reg_j, reg_j, reg_j - vpxor reg_i, reg_i, reg_i - - cmp $16, %r11 - jl _get_AAD_rest8\@ -_get_AAD_blocks\@: - vmovdqu (%r10), reg_i - vpshufb SHUF_MASK(%rip), reg_i, reg_i - vpxor reg_i, reg_j, reg_j - GHASH_MUL_AVX2 reg_j, \T2, \T1, \T3, \T4, \T5, \T6 - add $16, %r10 - sub $16, %r12 - sub $16, %r11 - cmp $16, %r11 - jge _get_AAD_blocks\@ - vmovdqu reg_j, reg_i - cmp $0, %r11 - je _get_AAD_done\@ - - vpxor reg_i, reg_i, reg_i - - /* read the last <16B of AAD. since we have at least 4B of - data right after the AAD (the ICV, and maybe some CT), we can - read 4B/8B blocks safely, and then get rid of the extra stuff */ -_get_AAD_rest8\@: - cmp $4, %r11 - jle _get_AAD_rest4\@ - movq (%r10), \T1 - add $8, %r10 - sub $8, %r11 - vpslldq $8, \T1, \T1 - vpsrldq $8, reg_i, reg_i - vpxor \T1, reg_i, reg_i - jmp _get_AAD_rest8\@ -_get_AAD_rest4\@: - cmp $0, %r11 - jle _get_AAD_rest0\@ - mov (%r10), %eax - movq %rax, \T1 - add $4, %r10 - sub $4, %r11 - vpslldq $12, \T1, \T1 - vpsrldq $4, reg_i, reg_i - vpxor \T1, reg_i, reg_i -_get_AAD_rest0\@: - /* finalize: shift out the extra bytes we read, and align - left. since pslldq can only shift by an immediate, we use - vpshufb and an array of shuffle masks */ - movq %r12, %r11 - salq $4, %r11 - movdqu aad_shift_arr(%r11), \T1 - vpshufb \T1, reg_i, reg_i -_get_AAD_rest_final\@: - vpshufb SHUF_MASK(%rip), reg_i, reg_i - vpxor reg_j, reg_i, reg_i - GHASH_MUL_AVX2 reg_i, \T2, \T1, \T3, \T4, \T5, \T6 - -_get_AAD_done\@: - # initialize the data pointer offset as zero - xor %r11d, %r11d + vmovdqu AadHash(arg2), reg_i # start AES for num_initial_blocks blocks - mov arg5, %rax # rax = *Y0 - vmovdqu (%rax), \CTR # CTR = Y0 - vpshufb SHUF_MASK(%rip), \CTR, \CTR - + vmovdqu CurCount(arg2), \CTR i = (9-\num_initial_blocks) setreg @@ -1799,7 +1980,7 @@ _get_AAD_done\@: j = 1 setreg -.rep 9 +.rep \REP vmovdqa 16*j(arg1), \T_key i = (9-\num_initial_blocks) setreg @@ -1814,7 +1995,7 @@ _get_AAD_done\@: .endr - vmovdqa 16*10(arg1), \T_key + vmovdqa 16*j(arg1), \T_key i = (9-\num_initial_blocks) setreg .rep \num_initial_blocks @@ -1826,9 +2007,9 @@ _get_AAD_done\@: i = (9-\num_initial_blocks) setreg .rep \num_initial_blocks - vmovdqu (arg3, %r11), \T1 + vmovdqu (arg4, %r11), \T1 vpxor \T1, reg_i, reg_i - vmovdqu reg_i, (arg2 , %r11) # write back ciphertext for + vmovdqu reg_i, (arg3 , %r11) # write back ciphertext for # num_initial_blocks blocks add $16, %r11 .if \ENC_DEC == DEC @@ -1905,7 +2086,7 @@ _get_AAD_done\@: i = 1 setreg -.rep 9 # do 9 rounds +.rep \REP # do REP rounds vmovdqa 16*i(arg1), \T_key vaesenc \T_key, \XMM1, \XMM1 vaesenc \T_key, \XMM2, \XMM2 @@ -1930,58 +2111,58 @@ _get_AAD_done\@: vaesenclast \T_key, \XMM7, \XMM7 vaesenclast \T_key, \XMM8, \XMM8 - vmovdqu (arg3, %r11), \T1 + vmovdqu (arg4, %r11), \T1 vpxor \T1, \XMM1, \XMM1 - vmovdqu \XMM1, (arg2 , %r11) + vmovdqu \XMM1, (arg3 , %r11) .if \ENC_DEC == DEC vmovdqa \T1, \XMM1 .endif - vmovdqu 16*1(arg3, %r11), \T1 + vmovdqu 16*1(arg4, %r11), \T1 vpxor \T1, \XMM2, \XMM2 - vmovdqu \XMM2, 16*1(arg2 , %r11) + vmovdqu \XMM2, 16*1(arg3 , %r11) .if \ENC_DEC == DEC vmovdqa \T1, \XMM2 .endif - vmovdqu 16*2(arg3, %r11), \T1 + vmovdqu 16*2(arg4, %r11), \T1 vpxor \T1, \XMM3, \XMM3 - vmovdqu \XMM3, 16*2(arg2 , %r11) + vmovdqu \XMM3, 16*2(arg3 , %r11) .if \ENC_DEC == DEC vmovdqa \T1, \XMM3 .endif - vmovdqu 16*3(arg3, %r11), \T1 + vmovdqu 16*3(arg4, %r11), \T1 vpxor \T1, \XMM4, \XMM4 - vmovdqu \XMM4, 16*3(arg2 , %r11) + vmovdqu \XMM4, 16*3(arg3 , %r11) .if \ENC_DEC == DEC vmovdqa \T1, \XMM4 .endif - vmovdqu 16*4(arg3, %r11), \T1 + vmovdqu 16*4(arg4, %r11), \T1 vpxor \T1, \XMM5, \XMM5 - vmovdqu \XMM5, 16*4(arg2 , %r11) + vmovdqu \XMM5, 16*4(arg3 , %r11) .if \ENC_DEC == DEC vmovdqa \T1, \XMM5 .endif - vmovdqu 16*5(arg3, %r11), \T1 + vmovdqu 16*5(arg4, %r11), \T1 vpxor \T1, \XMM6, \XMM6 - vmovdqu \XMM6, 16*5(arg2 , %r11) + vmovdqu \XMM6, 16*5(arg3 , %r11) .if \ENC_DEC == DEC vmovdqa \T1, \XMM6 .endif - vmovdqu 16*6(arg3, %r11), \T1 + vmovdqu 16*6(arg4, %r11), \T1 vpxor \T1, \XMM7, \XMM7 - vmovdqu \XMM7, 16*6(arg2 , %r11) + vmovdqu \XMM7, 16*6(arg3 , %r11) .if \ENC_DEC == DEC vmovdqa \T1, \XMM7 .endif - vmovdqu 16*7(arg3, %r11), \T1 + vmovdqu 16*7(arg4, %r11), \T1 vpxor \T1, \XMM8, \XMM8 - vmovdqu \XMM8, 16*7(arg2 , %r11) + vmovdqu \XMM8, 16*7(arg3 , %r11) .if \ENC_DEC == DEC vmovdqa \T1, \XMM8 .endif @@ -2010,9 +2191,9 @@ _initial_blocks_done\@: # encrypt 8 blocks at a time # ghash the 8 previously encrypted ciphertext blocks -# arg1, arg2, arg3 are used as pointers only, not modified +# arg1, arg3, arg4 are used as pointers only, not modified # r11 is the data offset value -.macro GHASH_8_ENCRYPT_8_PARALLEL_AVX2 T1 T2 T3 T4 T5 T6 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T7 loop_idx ENC_DEC +.macro GHASH_8_ENCRYPT_8_PARALLEL_AVX2 REP T1 T2 T3 T4 T5 T6 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T7 loop_idx ENC_DEC vmovdqa \XMM1, \T2 vmovdqa \XMM2, TMP2(%rsp) @@ -2096,7 +2277,7 @@ _initial_blocks_done\@: ####################################################################### - vmovdqa HashKey_8(arg1), \T5 + vmovdqu HashKey_8(arg2), \T5 vpclmulqdq $0x11, \T5, \T2, \T4 # T4 = a1*b1 vpclmulqdq $0x00, \T5, \T2, \T7 # T7 = a0*b0 vpclmulqdq $0x01, \T5, \T2, \T6 # T6 = a1*b0 @@ -2114,7 +2295,7 @@ _initial_blocks_done\@: vaesenc \T1, \XMM8, \XMM8 vmovdqa TMP2(%rsp), \T1 - vmovdqa HashKey_7(arg1), \T5 + vmovdqu HashKey_7(arg2), \T5 vpclmulqdq $0x11, \T5, \T1, \T3 vpxor \T3, \T4, \T4 @@ -2140,7 +2321,7 @@ _initial_blocks_done\@: ####################################################################### vmovdqa TMP3(%rsp), \T1 - vmovdqa HashKey_6(arg1), \T5 + vmovdqu HashKey_6(arg2), \T5 vpclmulqdq $0x11, \T5, \T1, \T3 vpxor \T3, \T4, \T4 @@ -2164,7 +2345,7 @@ _initial_blocks_done\@: vaesenc \T1, \XMM8, \XMM8 vmovdqa TMP4(%rsp), \T1 - vmovdqa HashKey_5(arg1), \T5 + vmovdqu HashKey_5(arg2), \T5 vpclmulqdq $0x11, \T5, \T1, \T3 vpxor \T3, \T4, \T4 @@ -2189,7 +2370,7 @@ _initial_blocks_done\@: vmovdqa TMP5(%rsp), \T1 - vmovdqa HashKey_4(arg1), \T5 + vmovdqu HashKey_4(arg2), \T5 vpclmulqdq $0x11, \T5, \T1, \T3 vpxor \T3, \T4, \T4 @@ -2213,7 +2394,7 @@ _initial_blocks_done\@: vaesenc \T1, \XMM8, \XMM8 vmovdqa TMP6(%rsp), \T1 - vmovdqa HashKey_3(arg1), \T5 + vmovdqu HashKey_3(arg2), \T5 vpclmulqdq $0x11, \T5, \T1, \T3 vpxor \T3, \T4, \T4 @@ -2237,7 +2418,7 @@ _initial_blocks_done\@: vaesenc \T1, \XMM8, \XMM8 vmovdqa TMP7(%rsp), \T1 - vmovdqa HashKey_2(arg1), \T5 + vmovdqu HashKey_2(arg2), \T5 vpclmulqdq $0x11, \T5, \T1, \T3 vpxor \T3, \T4, \T4 @@ -2264,7 +2445,7 @@ _initial_blocks_done\@: vaesenc \T5, \XMM8, \XMM8 vmovdqa TMP8(%rsp), \T1 - vmovdqa HashKey(arg1), \T5 + vmovdqu HashKey(arg2), \T5 vpclmulqdq $0x00, \T5, \T1, \T3 vpxor \T3, \T7, \T7 @@ -2281,17 +2462,34 @@ _initial_blocks_done\@: vmovdqu 16*10(arg1), \T5 + i = 11 + setreg +.rep (\REP-9) + vaesenc \T5, \XMM1, \XMM1 + vaesenc \T5, \XMM2, \XMM2 + vaesenc \T5, \XMM3, \XMM3 + vaesenc \T5, \XMM4, \XMM4 + vaesenc \T5, \XMM5, \XMM5 + vaesenc \T5, \XMM6, \XMM6 + vaesenc \T5, \XMM7, \XMM7 + vaesenc \T5, \XMM8, \XMM8 + + vmovdqu 16*i(arg1), \T5 + i = i + 1 + setreg +.endr + i = 0 j = 1 setreg .rep 8 - vpxor 16*i(arg3, %r11), \T5, \T2 + vpxor 16*i(arg4, %r11), \T5, \T2 .if \ENC_DEC == ENC vaesenclast \T2, reg_j, reg_j .else vaesenclast \T2, reg_j, \T3 - vmovdqu 16*i(arg3, %r11), reg_j - vmovdqu \T3, 16*i(arg2, %r11) + vmovdqu 16*i(arg4, %r11), reg_j + vmovdqu \T3, 16*i(arg3, %r11) .endif i = (i+1) j = (j+1) @@ -2317,14 +2515,14 @@ _initial_blocks_done\@: vpxor \T2, \T7, \T7 # first phase of the reduction complete ####################################################################### .if \ENC_DEC == ENC - vmovdqu \XMM1, 16*0(arg2,%r11) # Write to the Ciphertext buffer - vmovdqu \XMM2, 16*1(arg2,%r11) # Write to the Ciphertext buffer - vmovdqu \XMM3, 16*2(arg2,%r11) # Write to the Ciphertext buffer - vmovdqu \XMM4, 16*3(arg2,%r11) # Write to the Ciphertext buffer - vmovdqu \XMM5, 16*4(arg2,%r11) # Write to the Ciphertext buffer - vmovdqu \XMM6, 16*5(arg2,%r11) # Write to the Ciphertext buffer - vmovdqu \XMM7, 16*6(arg2,%r11) # Write to the Ciphertext buffer - vmovdqu \XMM8, 16*7(arg2,%r11) # Write to the Ciphertext buffer + vmovdqu \XMM1, 16*0(arg3,%r11) # Write to the Ciphertext buffer + vmovdqu \XMM2, 16*1(arg3,%r11) # Write to the Ciphertext buffer + vmovdqu \XMM3, 16*2(arg3,%r11) # Write to the Ciphertext buffer + vmovdqu \XMM4, 16*3(arg3,%r11) # Write to the Ciphertext buffer + vmovdqu \XMM5, 16*4(arg3,%r11) # Write to the Ciphertext buffer + vmovdqu \XMM6, 16*5(arg3,%r11) # Write to the Ciphertext buffer + vmovdqu \XMM7, 16*6(arg3,%r11) # Write to the Ciphertext buffer + vmovdqu \XMM8, 16*7(arg3,%r11) # Write to the Ciphertext buffer .endif ####################################################################### @@ -2361,7 +2559,7 @@ _initial_blocks_done\@: ## Karatsuba Method - vmovdqa HashKey_8(arg1), \T5 + vmovdqu HashKey_8(arg2), \T5 vpshufd $0b01001110, \XMM1, \T2 vpshufd $0b01001110, \T5, \T3 @@ -2375,7 +2573,7 @@ _initial_blocks_done\@: ###################### - vmovdqa HashKey_7(arg1), \T5 + vmovdqu HashKey_7(arg2), \T5 vpshufd $0b01001110, \XMM2, \T2 vpshufd $0b01001110, \T5, \T3 vpxor \XMM2, \T2, \T2 @@ -2393,7 +2591,7 @@ _initial_blocks_done\@: ###################### - vmovdqa HashKey_6(arg1), \T5 + vmovdqu HashKey_6(arg2), \T5 vpshufd $0b01001110, \XMM3, \T2 vpshufd $0b01001110, \T5, \T3 vpxor \XMM3, \T2, \T2 @@ -2411,7 +2609,7 @@ _initial_blocks_done\@: ###################### - vmovdqa HashKey_5(arg1), \T5 + vmovdqu HashKey_5(arg2), \T5 vpshufd $0b01001110, \XMM4, \T2 vpshufd $0b01001110, \T5, \T3 vpxor \XMM4, \T2, \T2 @@ -2429,7 +2627,7 @@ _initial_blocks_done\@: ###################### - vmovdqa HashKey_4(arg1), \T5 + vmovdqu HashKey_4(arg2), \T5 vpshufd $0b01001110, \XMM5, \T2 vpshufd $0b01001110, \T5, \T3 vpxor \XMM5, \T2, \T2 @@ -2447,7 +2645,7 @@ _initial_blocks_done\@: ###################### - vmovdqa HashKey_3(arg1), \T5 + vmovdqu HashKey_3(arg2), \T5 vpshufd $0b01001110, \XMM6, \T2 vpshufd $0b01001110, \T5, \T3 vpxor \XMM6, \T2, \T2 @@ -2465,7 +2663,7 @@ _initial_blocks_done\@: ###################### - vmovdqa HashKey_2(arg1), \T5 + vmovdqu HashKey_2(arg2), \T5 vpshufd $0b01001110, \XMM7, \T2 vpshufd $0b01001110, \T5, \T3 vpxor \XMM7, \T2, \T2 @@ -2483,7 +2681,7 @@ _initial_blocks_done\@: ###################### - vmovdqa HashKey(arg1), \T5 + vmovdqu HashKey(arg2), \T5 vpshufd $0b01001110, \XMM8, \T2 vpshufd $0b01001110, \T5, \T3 vpxor \XMM8, \T2, \T2 @@ -2536,411 +2734,110 @@ _initial_blocks_done\@: -# combined for GCM encrypt and decrypt functions -# clobbering all xmm registers -# clobbering r10, r11, r12, r13, r14, r15 -.macro GCM_ENC_DEC_AVX2 ENC_DEC - - #the number of pushes must equal STACK_OFFSET - push %r12 - push %r13 - push %r14 - push %r15 - - mov %rsp, %r14 - - - - - sub $VARIABLE_OFFSET, %rsp - and $~63, %rsp # align rsp to 64 bytes - - - vmovdqu HashKey(arg1), %xmm13 # xmm13 = HashKey - - mov arg4, %r13 # save the number of bytes of plaintext/ciphertext - and $-16, %r13 # r13 = r13 - (r13 mod 16) - - mov %r13, %r12 - shr $4, %r12 - and $7, %r12 - jz _initial_num_blocks_is_0\@ - - cmp $7, %r12 - je _initial_num_blocks_is_7\@ - cmp $6, %r12 - je _initial_num_blocks_is_6\@ - cmp $5, %r12 - je _initial_num_blocks_is_5\@ - cmp $4, %r12 - je _initial_num_blocks_is_4\@ - cmp $3, %r12 - je _initial_num_blocks_is_3\@ - cmp $2, %r12 - je _initial_num_blocks_is_2\@ - - jmp _initial_num_blocks_is_1\@ - -_initial_num_blocks_is_7\@: - INITIAL_BLOCKS_AVX2 7, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC - sub $16*7, %r13 - jmp _initial_blocks_encrypted\@ - -_initial_num_blocks_is_6\@: - INITIAL_BLOCKS_AVX2 6, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC - sub $16*6, %r13 - jmp _initial_blocks_encrypted\@ - -_initial_num_blocks_is_5\@: - INITIAL_BLOCKS_AVX2 5, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC - sub $16*5, %r13 - jmp _initial_blocks_encrypted\@ - -_initial_num_blocks_is_4\@: - INITIAL_BLOCKS_AVX2 4, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC - sub $16*4, %r13 - jmp _initial_blocks_encrypted\@ - -_initial_num_blocks_is_3\@: - INITIAL_BLOCKS_AVX2 3, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC - sub $16*3, %r13 - jmp _initial_blocks_encrypted\@ - -_initial_num_blocks_is_2\@: - INITIAL_BLOCKS_AVX2 2, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC - sub $16*2, %r13 - jmp _initial_blocks_encrypted\@ - -_initial_num_blocks_is_1\@: - INITIAL_BLOCKS_AVX2 1, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC - sub $16*1, %r13 - jmp _initial_blocks_encrypted\@ - -_initial_num_blocks_is_0\@: - INITIAL_BLOCKS_AVX2 0, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC - - -_initial_blocks_encrypted\@: - cmp $0, %r13 - je _zero_cipher_left\@ - - sub $128, %r13 - je _eight_cipher_left\@ - - - - - vmovd %xmm9, %r15d - and $255, %r15d - vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 - - -_encrypt_by_8_new\@: - cmp $(255-8), %r15d - jg _encrypt_by_8\@ - - - - add $8, %r15b - GHASH_8_ENCRYPT_8_PARALLEL_AVX2 %xmm0, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm15, out_order, \ENC_DEC - add $128, %r11 - sub $128, %r13 - jne _encrypt_by_8_new\@ - - vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 - jmp _eight_cipher_left\@ - -_encrypt_by_8\@: - vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 - add $8, %r15b - GHASH_8_ENCRYPT_8_PARALLEL_AVX2 %xmm0, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm15, in_order, \ENC_DEC - vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 - add $128, %r11 - sub $128, %r13 - jne _encrypt_by_8_new\@ - - vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 - - - - -_eight_cipher_left\@: - GHASH_LAST_8_AVX2 %xmm0, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8 - - -_zero_cipher_left\@: - cmp $16, arg4 - jl _only_less_than_16\@ - - mov arg4, %r13 - and $15, %r13 # r13 = (arg4 mod 16) - - je _multiple_of_16_bytes\@ - - # handle the last <16 Byte block seperately - - - vpaddd ONE(%rip), %xmm9, %xmm9 # INCR CNT to get Yn - vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 - ENCRYPT_SINGLE_BLOCK %xmm9 # E(K, Yn) - - sub $16, %r11 - add %r13, %r11 - vmovdqu (arg3, %r11), %xmm1 # receive the last <16 Byte block - - lea SHIFT_MASK+16(%rip), %r12 - sub %r13, %r12 # adjust the shuffle mask pointer - # to be able to shift 16-r13 bytes - # (r13 is the number of bytes in plaintext mod 16) - vmovdqu (%r12), %xmm2 # get the appropriate shuffle mask - vpshufb %xmm2, %xmm1, %xmm1 # shift right 16-r13 bytes - jmp _final_ghash_mul\@ - -_only_less_than_16\@: - # check for 0 length - mov arg4, %r13 - and $15, %r13 # r13 = (arg4 mod 16) - - je _multiple_of_16_bytes\@ - - # handle the last <16 Byte block seperately - - - vpaddd ONE(%rip), %xmm9, %xmm9 # INCR CNT to get Yn - vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 - ENCRYPT_SINGLE_BLOCK %xmm9 # E(K, Yn) - - - lea SHIFT_MASK+16(%rip), %r12 - sub %r13, %r12 # adjust the shuffle mask pointer to be - # able to shift 16-r13 bytes (r13 is the - # number of bytes in plaintext mod 16) - -_get_last_16_byte_loop\@: - movb (arg3, %r11), %al - movb %al, TMP1 (%rsp , %r11) - add $1, %r11 - cmp %r13, %r11 - jne _get_last_16_byte_loop\@ - - vmovdqu TMP1(%rsp), %xmm1 - - sub $16, %r11 - -_final_ghash_mul\@: - .if \ENC_DEC == DEC - vmovdqa %xmm1, %xmm2 - vpxor %xmm1, %xmm9, %xmm9 # Plaintext XOR E(K, Yn) - vmovdqu ALL_F-SHIFT_MASK(%r12), %xmm1 # get the appropriate mask to mask out top 16-r13 bytes of xmm9 - vpand %xmm1, %xmm9, %xmm9 # mask out top 16-r13 bytes of xmm9 - vpand %xmm1, %xmm2, %xmm2 - vpshufb SHUF_MASK(%rip), %xmm2, %xmm2 - vpxor %xmm2, %xmm14, %xmm14 - #GHASH computation for the last <16 Byte block - GHASH_MUL_AVX2 %xmm14, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6 - sub %r13, %r11 - add $16, %r11 - .else - vpxor %xmm1, %xmm9, %xmm9 # Plaintext XOR E(K, Yn) - vmovdqu ALL_F-SHIFT_MASK(%r12), %xmm1 # get the appropriate mask to mask out top 16-r13 bytes of xmm9 - vpand %xmm1, %xmm9, %xmm9 # mask out top 16-r13 bytes of xmm9 - vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 - vpxor %xmm9, %xmm14, %xmm14 - #GHASH computation for the last <16 Byte block - GHASH_MUL_AVX2 %xmm14, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6 - sub %r13, %r11 - add $16, %r11 - vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 # shuffle xmm9 back to output as ciphertext - .endif - - - ############################# - # output r13 Bytes - vmovq %xmm9, %rax - cmp $8, %r13 - jle _less_than_8_bytes_left\@ - - mov %rax, (arg2 , %r11) - add $8, %r11 - vpsrldq $8, %xmm9, %xmm9 - vmovq %xmm9, %rax - sub $8, %r13 - -_less_than_8_bytes_left\@: - movb %al, (arg2 , %r11) - add $1, %r11 - shr $8, %rax - sub $1, %r13 - jne _less_than_8_bytes_left\@ - ############################# - -_multiple_of_16_bytes\@: - mov arg7, %r12 # r12 = aadLen (number of bytes) - shl $3, %r12 # convert into number of bits - vmovd %r12d, %xmm15 # len(A) in xmm15 - - shl $3, arg4 # len(C) in bits (*128) - vmovq arg4, %xmm1 - vpslldq $8, %xmm15, %xmm15 # xmm15 = len(A)|| 0x0000000000000000 - vpxor %xmm1, %xmm15, %xmm15 # xmm15 = len(A)||len(C) - - vpxor %xmm15, %xmm14, %xmm14 - GHASH_MUL_AVX2 %xmm14, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6 # final GHASH computation - vpshufb SHUF_MASK(%rip), %xmm14, %xmm14 # perform a 16Byte swap - - mov arg5, %rax # rax = *Y0 - vmovdqu (%rax), %xmm9 # xmm9 = Y0 - - ENCRYPT_SINGLE_BLOCK %xmm9 # E(K, Y0) - - vpxor %xmm14, %xmm9, %xmm9 - - - -_return_T\@: - mov arg8, %r10 # r10 = authTag - mov arg9, %r11 # r11 = auth_tag_len - - cmp $16, %r11 - je _T_16\@ - - cmp $8, %r11 - jl _T_4\@ - -_T_8\@: - vmovq %xmm9, %rax - mov %rax, (%r10) - add $8, %r10 - sub $8, %r11 - vpsrldq $8, %xmm9, %xmm9 - cmp $0, %r11 - je _return_T_done\@ -_T_4\@: - vmovd %xmm9, %eax - mov %eax, (%r10) - add $4, %r10 - sub $4, %r11 - vpsrldq $4, %xmm9, %xmm9 - cmp $0, %r11 - je _return_T_done\@ -_T_123\@: - vmovd %xmm9, %eax - cmp $2, %r11 - jl _T_1\@ - mov %ax, (%r10) - cmp $2, %r11 - je _return_T_done\@ - add $2, %r10 - sar $16, %eax -_T_1\@: - mov %al, (%r10) - jmp _return_T_done\@ - -_T_16\@: - vmovdqu %xmm9, (%r10) - -_return_T_done\@: - mov %r14, %rsp - - pop %r15 - pop %r14 - pop %r13 - pop %r12 -.endm - - ############################################################# -#void aesni_gcm_precomp_avx_gen4 +#void aesni_gcm_init_avx_gen4 # (gcm_data *my_ctx_data, -# u8 *hash_subkey)# /* H, the Hash sub key input. -# Data starts on a 16-byte boundary. */ +# gcm_context_data *data, +# u8 *iv, /* Pre-counter block j0: 4 byte salt +# (from Security Association) concatenated with 8 byte +# Initialisation Vector (from IPSec ESP Payload) +# concatenated with 0x00000001. 16-byte aligned pointer. */ +# u8 *hash_subkey# /* H, the Hash sub key input. Data starts on a 16-byte boundary. */ +# const u8 *aad, /* Additional Authentication Data (AAD)*/ +# u64 aad_len) /* Length of AAD in bytes. With RFC4106 this is going to be 8 or 12 Bytes */ ############################################################# -ENTRY(aesni_gcm_precomp_avx_gen4) - #the number of pushes must equal STACK_OFFSET - push %r12 - push %r13 - push %r14 - push %r15 - - mov %rsp, %r14 - - - - sub $VARIABLE_OFFSET, %rsp - and $~63, %rsp # align rsp to 64 bytes - - vmovdqu (arg2), %xmm6 # xmm6 = HashKey - - vpshufb SHUF_MASK(%rip), %xmm6, %xmm6 - ############### PRECOMPUTATION of HashKey<<1 mod poly from the HashKey - vmovdqa %xmm6, %xmm2 - vpsllq $1, %xmm6, %xmm6 - vpsrlq $63, %xmm2, %xmm2 - vmovdqa %xmm2, %xmm1 - vpslldq $8, %xmm2, %xmm2 - vpsrldq $8, %xmm1, %xmm1 - vpor %xmm2, %xmm6, %xmm6 - #reduction - vpshufd $0b00100100, %xmm1, %xmm2 - vpcmpeqd TWOONE(%rip), %xmm2, %xmm2 - vpand POLY(%rip), %xmm2, %xmm2 - vpxor %xmm2, %xmm6, %xmm6 # xmm6 holds the HashKey<<1 mod poly - ####################################################################### - vmovdqa %xmm6, HashKey(arg1) # store HashKey<<1 mod poly - - - PRECOMPUTE_AVX2 %xmm6, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5 - - mov %r14, %rsp - - pop %r15 - pop %r14 - pop %r13 - pop %r12 +ENTRY(aesni_gcm_init_avx_gen4) + FUNC_SAVE + INIT GHASH_MUL_AVX2, PRECOMPUTE_AVX2 + FUNC_RESTORE ret -ENDPROC(aesni_gcm_precomp_avx_gen4) - +ENDPROC(aesni_gcm_init_avx_gen4) ############################################################################### #void aesni_gcm_enc_avx_gen4( # gcm_data *my_ctx_data, /* aligned to 16 Bytes */ +# gcm_context_data *data, # u8 *out, /* Ciphertext output. Encrypt in-place is allowed. */ # const u8 *in, /* Plaintext input */ -# u64 plaintext_len, /* Length of data in Bytes for encryption. */ -# u8 *iv, /* Pre-counter block j0: 4 byte salt -# (from Security Association) concatenated with 8 byte -# Initialisation Vector (from IPSec ESP Payload) -# concatenated with 0x00000001. 16-byte aligned pointer. */ -# const u8 *aad, /* Additional Authentication Data (AAD)*/ -# u64 aad_len, /* Length of AAD in bytes. With RFC4106 this is going to be 8 or 12 Bytes */ -# u8 *auth_tag, /* Authenticated Tag output. */ -# u64 auth_tag_len)# /* Authenticated Tag Length in bytes. -# Valid values are 16 (most likely), 12 or 8. */ +# u64 plaintext_len) /* Length of data in Bytes for encryption. */ ############################################################################### -ENTRY(aesni_gcm_enc_avx_gen4) - GCM_ENC_DEC_AVX2 ENC +ENTRY(aesni_gcm_enc_update_avx_gen4) + FUNC_SAVE + mov keysize,%eax + cmp $32, %eax + je key_256_enc_update4 + cmp $16, %eax + je key_128_enc_update4 + # must be 192 + GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, ENC, 11 + FUNC_RESTORE + ret +key_128_enc_update4: + GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, ENC, 9 + FUNC_RESTORE + ret +key_256_enc_update4: + GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, ENC, 13 + FUNC_RESTORE ret -ENDPROC(aesni_gcm_enc_avx_gen4) +ENDPROC(aesni_gcm_enc_update_avx_gen4) ############################################################################### -#void aesni_gcm_dec_avx_gen4( +#void aesni_gcm_dec_update_avx_gen4( # gcm_data *my_ctx_data, /* aligned to 16 Bytes */ +# gcm_context_data *data, # u8 *out, /* Plaintext output. Decrypt in-place is allowed. */ # const u8 *in, /* Ciphertext input */ -# u64 plaintext_len, /* Length of data in Bytes for encryption. */ -# u8 *iv, /* Pre-counter block j0: 4 byte salt -# (from Security Association) concatenated with 8 byte -# Initialisation Vector (from IPSec ESP Payload) -# concatenated with 0x00000001. 16-byte aligned pointer. */ -# const u8 *aad, /* Additional Authentication Data (AAD)*/ -# u64 aad_len, /* Length of AAD in bytes. With RFC4106 this is going to be 8 or 12 Bytes */ +# u64 plaintext_len) /* Length of data in Bytes for encryption. */ +############################################################################### +ENTRY(aesni_gcm_dec_update_avx_gen4) + FUNC_SAVE + mov keysize,%eax + cmp $32, %eax + je key_256_dec_update4 + cmp $16, %eax + je key_128_dec_update4 + # must be 192 + GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, DEC, 11 + FUNC_RESTORE + ret +key_128_dec_update4: + GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, DEC, 9 + FUNC_RESTORE + ret +key_256_dec_update4: + GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, DEC, 13 + FUNC_RESTORE + ret +ENDPROC(aesni_gcm_dec_update_avx_gen4) + +############################################################################### +#void aesni_gcm_finalize_avx_gen4( +# gcm_data *my_ctx_data, /* aligned to 16 Bytes */ +# gcm_context_data *data, # u8 *auth_tag, /* Authenticated Tag output. */ # u64 auth_tag_len)# /* Authenticated Tag Length in bytes. -# Valid values are 16 (most likely), 12 or 8. */ +# Valid values are 16 (most likely), 12 or 8. */ ############################################################################### -ENTRY(aesni_gcm_dec_avx_gen4) - GCM_ENC_DEC_AVX2 DEC - ret -ENDPROC(aesni_gcm_dec_avx_gen4) +ENTRY(aesni_gcm_finalize_avx_gen4) + FUNC_SAVE + mov keysize,%eax + cmp $32, %eax + je key_256_finalize4 + cmp $16, %eax + je key_128_finalize4 + # must be 192 + GCM_COMPLETE GHASH_MUL_AVX2, 11, arg3, arg4 + FUNC_RESTORE + ret +key_128_finalize4: + GCM_COMPLETE GHASH_MUL_AVX2, 9, arg3, arg4 + FUNC_RESTORE + ret +key_256_finalize4: + GCM_COMPLETE GHASH_MUL_AVX2, 13, arg3, arg4 + FUNC_RESTORE + ret +ENDPROC(aesni_gcm_finalize_avx_gen4) #endif /* CONFIG_AS_AVX2 */ diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 661f7daf43da..1321700d6647 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -84,7 +84,7 @@ struct gcm_context_data { u8 current_counter[GCM_BLOCK_LEN]; u64 partial_block_len; u64 unused; - u8 hash_keys[GCM_BLOCK_LEN * 8]; + u8 hash_keys[GCM_BLOCK_LEN * 16]; }; asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key, @@ -175,6 +175,32 @@ asmlinkage void aesni_gcm_finalize(void *ctx, struct gcm_context_data *gdata, u8 *auth_tag, unsigned long auth_tag_len); +static struct aesni_gcm_tfm_s { +void (*init)(void *ctx, + struct gcm_context_data *gdata, + u8 *iv, + u8 *hash_subkey, const u8 *aad, + unsigned long aad_len); +void (*enc_update)(void *ctx, + struct gcm_context_data *gdata, u8 *out, + const u8 *in, + unsigned long plaintext_len); +void (*dec_update)(void *ctx, + struct gcm_context_data *gdata, u8 *out, + const u8 *in, + unsigned long ciphertext_len); +void (*finalize)(void *ctx, + struct gcm_context_data *gdata, + u8 *auth_tag, unsigned long auth_tag_len); +} *aesni_gcm_tfm; + +struct aesni_gcm_tfm_s aesni_gcm_tfm_sse = { + .init = &aesni_gcm_init, + .enc_update = &aesni_gcm_enc_update, + .dec_update = &aesni_gcm_dec_update, + .finalize = &aesni_gcm_finalize, +}; + #ifdef CONFIG_AS_AVX asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv, void *keys, u8 *out, unsigned int num_bytes); @@ -183,136 +209,94 @@ asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv, asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv, void *keys, u8 *out, unsigned int num_bytes); /* - * asmlinkage void aesni_gcm_precomp_avx_gen2() + * asmlinkage void aesni_gcm_init_avx_gen2() * gcm_data *my_ctx_data, context data * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary. */ -asmlinkage void aesni_gcm_precomp_avx_gen2(void *my_ctx_data, u8 *hash_subkey); +asmlinkage void aesni_gcm_init_avx_gen2(void *my_ctx_data, + struct gcm_context_data *gdata, + u8 *iv, + u8 *hash_subkey, + const u8 *aad, + unsigned long aad_len); + +asmlinkage void aesni_gcm_enc_update_avx_gen2(void *ctx, + struct gcm_context_data *gdata, u8 *out, + const u8 *in, unsigned long plaintext_len); +asmlinkage void aesni_gcm_dec_update_avx_gen2(void *ctx, + struct gcm_context_data *gdata, u8 *out, + const u8 *in, + unsigned long ciphertext_len); +asmlinkage void aesni_gcm_finalize_avx_gen2(void *ctx, + struct gcm_context_data *gdata, + u8 *auth_tag, unsigned long auth_tag_len); -asmlinkage void aesni_gcm_enc_avx_gen2(void *ctx, u8 *out, +asmlinkage void aesni_gcm_enc_avx_gen2(void *ctx, + struct gcm_context_data *gdata, u8 *out, const u8 *in, unsigned long plaintext_len, u8 *iv, const u8 *aad, unsigned long aad_len, u8 *auth_tag, unsigned long auth_tag_len); -asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx, u8 *out, +asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx, + struct gcm_context_data *gdata, u8 *out, const u8 *in, unsigned long ciphertext_len, u8 *iv, const u8 *aad, unsigned long aad_len, u8 *auth_tag, unsigned long auth_tag_len); -static void aesni_gcm_enc_avx(void *ctx, - struct gcm_context_data *data, u8 *out, - const u8 *in, unsigned long plaintext_len, u8 *iv, - u8 *hash_subkey, const u8 *aad, unsigned long aad_len, - u8 *auth_tag, unsigned long auth_tag_len) -{ - struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx; - if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)){ - aesni_gcm_enc(ctx, data, out, in, - plaintext_len, iv, hash_subkey, aad, - aad_len, auth_tag, auth_tag_len); - } else { - aesni_gcm_precomp_avx_gen2(ctx, hash_subkey); - aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad, - aad_len, auth_tag, auth_tag_len); - } -} +struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen2 = { + .init = &aesni_gcm_init_avx_gen2, + .enc_update = &aesni_gcm_enc_update_avx_gen2, + .dec_update = &aesni_gcm_dec_update_avx_gen2, + .finalize = &aesni_gcm_finalize_avx_gen2, +}; -static void aesni_gcm_dec_avx(void *ctx, - struct gcm_context_data *data, u8 *out, - const u8 *in, unsigned long ciphertext_len, u8 *iv, - u8 *hash_subkey, const u8 *aad, unsigned long aad_len, - u8 *auth_tag, unsigned long auth_tag_len) -{ - struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx; - if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) { - aesni_gcm_dec(ctx, data, out, in, - ciphertext_len, iv, hash_subkey, aad, - aad_len, auth_tag, auth_tag_len); - } else { - aesni_gcm_precomp_avx_gen2(ctx, hash_subkey); - aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad, - aad_len, auth_tag, auth_tag_len); - } -} #endif #ifdef CONFIG_AS_AVX2 /* - * asmlinkage void aesni_gcm_precomp_avx_gen4() + * asmlinkage void aesni_gcm_init_avx_gen4() * gcm_data *my_ctx_data, context data * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary. */ -asmlinkage void aesni_gcm_precomp_avx_gen4(void *my_ctx_data, u8 *hash_subkey); +asmlinkage void aesni_gcm_init_avx_gen4(void *my_ctx_data, + struct gcm_context_data *gdata, + u8 *iv, + u8 *hash_subkey, + const u8 *aad, + unsigned long aad_len); + +asmlinkage void aesni_gcm_enc_update_avx_gen4(void *ctx, + struct gcm_context_data *gdata, u8 *out, + const u8 *in, unsigned long plaintext_len); +asmlinkage void aesni_gcm_dec_update_avx_gen4(void *ctx, + struct gcm_context_data *gdata, u8 *out, + const u8 *in, + unsigned long ciphertext_len); +asmlinkage void aesni_gcm_finalize_avx_gen4(void *ctx, + struct gcm_context_data *gdata, + u8 *auth_tag, unsigned long auth_tag_len); -asmlinkage void aesni_gcm_enc_avx_gen4(void *ctx, u8 *out, +asmlinkage void aesni_gcm_enc_avx_gen4(void *ctx, + struct gcm_context_data *gdata, u8 *out, const u8 *in, unsigned long plaintext_len, u8 *iv, const u8 *aad, unsigned long aad_len, u8 *auth_tag, unsigned long auth_tag_len); -asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx, u8 *out, +asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx, + struct gcm_context_data *gdata, u8 *out, const u8 *in, unsigned long ciphertext_len, u8 *iv, const u8 *aad, unsigned long aad_len, u8 *auth_tag, unsigned long auth_tag_len); -static void aesni_gcm_enc_avx2(void *ctx, - struct gcm_context_data *data, u8 *out, - const u8 *in, unsigned long plaintext_len, u8 *iv, - u8 *hash_subkey, const u8 *aad, unsigned long aad_len, - u8 *auth_tag, unsigned long auth_tag_len) -{ - struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx; - if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) { - aesni_gcm_enc(ctx, data, out, in, - plaintext_len, iv, hash_subkey, aad, - aad_len, auth_tag, auth_tag_len); - } else if (plaintext_len < AVX_GEN4_OPTSIZE) { - aesni_gcm_precomp_avx_gen2(ctx, hash_subkey); - aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad, - aad_len, auth_tag, auth_tag_len); - } else { - aesni_gcm_precomp_avx_gen4(ctx, hash_subkey); - aesni_gcm_enc_avx_gen4(ctx, out, in, plaintext_len, iv, aad, - aad_len, auth_tag, auth_tag_len); - } -} +struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen4 = { + .init = &aesni_gcm_init_avx_gen4, + .enc_update = &aesni_gcm_enc_update_avx_gen4, + .dec_update = &aesni_gcm_dec_update_avx_gen4, + .finalize = &aesni_gcm_finalize_avx_gen4, +}; -static void aesni_gcm_dec_avx2(void *ctx, - struct gcm_context_data *data, u8 *out, - const u8 *in, unsigned long ciphertext_len, u8 *iv, - u8 *hash_subkey, const u8 *aad, unsigned long aad_len, - u8 *auth_tag, unsigned long auth_tag_len) -{ - struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx; - if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) { - aesni_gcm_dec(ctx, data, out, in, - ciphertext_len, iv, hash_subkey, - aad, aad_len, auth_tag, auth_tag_len); - } else if (ciphertext_len < AVX_GEN4_OPTSIZE) { - aesni_gcm_precomp_avx_gen2(ctx, hash_subkey); - aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad, - aad_len, auth_tag, auth_tag_len); - } else { - aesni_gcm_precomp_avx_gen4(ctx, hash_subkey); - aesni_gcm_dec_avx_gen4(ctx, out, in, ciphertext_len, iv, aad, - aad_len, auth_tag, auth_tag_len); - } -} #endif -static void (*aesni_gcm_enc_tfm)(void *ctx, - struct gcm_context_data *data, u8 *out, - const u8 *in, unsigned long plaintext_len, - u8 *iv, u8 *hash_subkey, const u8 *aad, - unsigned long aad_len, u8 *auth_tag, - unsigned long auth_tag_len); - -static void (*aesni_gcm_dec_tfm)(void *ctx, - struct gcm_context_data *data, u8 *out, - const u8 *in, unsigned long ciphertext_len, - u8 *iv, u8 *hash_subkey, const u8 *aad, - unsigned long aad_len, u8 *auth_tag, - unsigned long auth_tag_len); - static inline struct aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm) { @@ -794,6 +778,7 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req, { struct crypto_aead *tfm = crypto_aead_reqtfm(req); unsigned long auth_tag_len = crypto_aead_authsize(tfm); + struct aesni_gcm_tfm_s *gcm_tfm = aesni_gcm_tfm; struct gcm_context_data data AESNI_ALIGN_ATTR; struct scatter_walk dst_sg_walk = {}; unsigned long left = req->cryptlen; @@ -811,6 +796,15 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req, if (!enc) left -= auth_tag_len; +#ifdef CONFIG_AS_AVX2 + if (left < AVX_GEN4_OPTSIZE && gcm_tfm == &aesni_gcm_tfm_avx_gen4) + gcm_tfm = &aesni_gcm_tfm_avx_gen2; +#endif +#ifdef CONFIG_AS_AVX + if (left < AVX_GEN2_OPTSIZE && gcm_tfm == &aesni_gcm_tfm_avx_gen2) + gcm_tfm = &aesni_gcm_tfm_sse; +#endif + /* Linearize assoc, if not already linear */ if (req->src->length >= assoclen && req->src->length && (!PageHighMem(sg_page(req->src)) || @@ -835,7 +829,7 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req, } kernel_fpu_begin(); - aesni_gcm_init(aes_ctx, &data, iv, + gcm_tfm->init(aes_ctx, &data, iv, hash_subkey, assoc, assoclen); if (req->src != req->dst) { while (left) { @@ -846,10 +840,10 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req, len = min(srclen, dstlen); if (len) { if (enc) - aesni_gcm_enc_update(aes_ctx, &data, + gcm_tfm->enc_update(aes_ctx, &data, dst, src, len); else - aesni_gcm_dec_update(aes_ctx, &data, + gcm_tfm->dec_update(aes_ctx, &data, dst, src, len); } left -= len; @@ -867,10 +861,10 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req, len = scatterwalk_clamp(&src_sg_walk, left); if (len) { if (enc) - aesni_gcm_enc_update(aes_ctx, &data, + gcm_tfm->enc_update(aes_ctx, &data, src, src, len); else - aesni_gcm_dec_update(aes_ctx, &data, + gcm_tfm->dec_update(aes_ctx, &data, src, src, len); } left -= len; @@ -879,7 +873,7 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req, scatterwalk_done(&src_sg_walk, 1, left); } } - aesni_gcm_finalize(aes_ctx, &data, authTag, auth_tag_len); + gcm_tfm->finalize(aes_ctx, &data, authTag, auth_tag_len); kernel_fpu_end(); if (!assocmem) @@ -912,147 +906,15 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req, static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen, u8 *hash_subkey, u8 *iv, void *aes_ctx) { - u8 one_entry_in_sg = 0; - u8 *src, *dst, *assoc; - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - unsigned long auth_tag_len = crypto_aead_authsize(tfm); - struct scatter_walk src_sg_walk; - struct scatter_walk dst_sg_walk = {}; - struct gcm_context_data data AESNI_ALIGN_ATTR; - - if (((struct crypto_aes_ctx *)aes_ctx)->key_length != AES_KEYSIZE_128 || - aesni_gcm_enc_tfm == aesni_gcm_enc || - req->cryptlen < AVX_GEN2_OPTSIZE) { - return gcmaes_crypt_by_sg(true, req, assoclen, hash_subkey, iv, - aes_ctx); - } - if (sg_is_last(req->src) && - (!PageHighMem(sg_page(req->src)) || - req->src->offset + req->src->length <= PAGE_SIZE) && - sg_is_last(req->dst) && - (!PageHighMem(sg_page(req->dst)) || - req->dst->offset + req->dst->length <= PAGE_SIZE)) { - one_entry_in_sg = 1; - scatterwalk_start(&src_sg_walk, req->src); - assoc = scatterwalk_map(&src_sg_walk); - src = assoc + req->assoclen; - dst = src; - if (unlikely(req->src != req->dst)) { - scatterwalk_start(&dst_sg_walk, req->dst); - dst = scatterwalk_map(&dst_sg_walk) + req->assoclen; - } - } else { - /* Allocate memory for src, dst, assoc */ - assoc = kmalloc(req->cryptlen + auth_tag_len + req->assoclen, - GFP_ATOMIC); - if (unlikely(!assoc)) - return -ENOMEM; - scatterwalk_map_and_copy(assoc, req->src, 0, - req->assoclen + req->cryptlen, 0); - src = assoc + req->assoclen; - dst = src; - } - - kernel_fpu_begin(); - aesni_gcm_enc_tfm(aes_ctx, &data, dst, src, req->cryptlen, iv, - hash_subkey, assoc, assoclen, - dst + req->cryptlen, auth_tag_len); - kernel_fpu_end(); - - /* The authTag (aka the Integrity Check Value) needs to be written - * back to the packet. */ - if (one_entry_in_sg) { - if (unlikely(req->src != req->dst)) { - scatterwalk_unmap(dst - req->assoclen); - scatterwalk_advance(&dst_sg_walk, req->dst->length); - scatterwalk_done(&dst_sg_walk, 1, 0); - } - scatterwalk_unmap(assoc); - scatterwalk_advance(&src_sg_walk, req->src->length); - scatterwalk_done(&src_sg_walk, req->src == req->dst, 0); - } else { - scatterwalk_map_and_copy(dst, req->dst, req->assoclen, - req->cryptlen + auth_tag_len, 1); - kfree(assoc); - } - return 0; + return gcmaes_crypt_by_sg(true, req, assoclen, hash_subkey, iv, + aes_ctx); } static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen, u8 *hash_subkey, u8 *iv, void *aes_ctx) { - u8 one_entry_in_sg = 0; - u8 *src, *dst, *assoc; - unsigned long tempCipherLen = 0; - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - unsigned long auth_tag_len = crypto_aead_authsize(tfm); - u8 authTag[16]; - struct scatter_walk src_sg_walk; - struct scatter_walk dst_sg_walk = {}; - struct gcm_context_data data AESNI_ALIGN_ATTR; - int retval = 0; - - if (((struct crypto_aes_ctx *)aes_ctx)->key_length != AES_KEYSIZE_128 || - aesni_gcm_enc_tfm == aesni_gcm_enc || - req->cryptlen < AVX_GEN2_OPTSIZE) { - return gcmaes_crypt_by_sg(false, req, assoclen, hash_subkey, iv, - aes_ctx); - } - tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len); - - if (sg_is_last(req->src) && - (!PageHighMem(sg_page(req->src)) || - req->src->offset + req->src->length <= PAGE_SIZE) && - sg_is_last(req->dst) && req->dst->length && - (!PageHighMem(sg_page(req->dst)) || - req->dst->offset + req->dst->length <= PAGE_SIZE)) { - one_entry_in_sg = 1; - scatterwalk_start(&src_sg_walk, req->src); - assoc = scatterwalk_map(&src_sg_walk); - src = assoc + req->assoclen; - dst = src; - if (unlikely(req->src != req->dst)) { - scatterwalk_start(&dst_sg_walk, req->dst); - dst = scatterwalk_map(&dst_sg_walk) + req->assoclen; - } - } else { - /* Allocate memory for src, dst, assoc */ - assoc = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC); - if (!assoc) - return -ENOMEM; - scatterwalk_map_and_copy(assoc, req->src, 0, - req->assoclen + req->cryptlen, 0); - src = assoc + req->assoclen; - dst = src; - } - - - kernel_fpu_begin(); - aesni_gcm_dec_tfm(aes_ctx, &data, dst, src, tempCipherLen, iv, - hash_subkey, assoc, assoclen, - authTag, auth_tag_len); - kernel_fpu_end(); - - /* Compare generated tag with passed in tag. */ - retval = crypto_memneq(src + tempCipherLen, authTag, auth_tag_len) ? - -EBADMSG : 0; - - if (one_entry_in_sg) { - if (unlikely(req->src != req->dst)) { - scatterwalk_unmap(dst - req->assoclen); - scatterwalk_advance(&dst_sg_walk, req->dst->length); - scatterwalk_done(&dst_sg_walk, 1, 0); - } - scatterwalk_unmap(assoc); - scatterwalk_advance(&src_sg_walk, req->src->length); - scatterwalk_done(&src_sg_walk, req->src == req->dst, 0); - } else { - scatterwalk_map_and_copy(dst, req->dst, req->assoclen, - tempCipherLen, 1); - kfree(assoc); - } - return retval; - + return gcmaes_crypt_by_sg(false, req, assoclen, hash_subkey, iv, + aes_ctx); } static int helper_rfc4106_encrypt(struct aead_request *req) @@ -1420,21 +1282,18 @@ static int __init aesni_init(void) #ifdef CONFIG_AS_AVX2 if (boot_cpu_has(X86_FEATURE_AVX2)) { pr_info("AVX2 version of gcm_enc/dec engaged.\n"); - aesni_gcm_enc_tfm = aesni_gcm_enc_avx2; - aesni_gcm_dec_tfm = aesni_gcm_dec_avx2; + aesni_gcm_tfm = &aesni_gcm_tfm_avx_gen4; } else #endif #ifdef CONFIG_AS_AVX if (boot_cpu_has(X86_FEATURE_AVX)) { pr_info("AVX version of gcm_enc/dec engaged.\n"); - aesni_gcm_enc_tfm = aesni_gcm_enc_avx; - aesni_gcm_dec_tfm = aesni_gcm_dec_avx; + aesni_gcm_tfm = &aesni_gcm_tfm_avx_gen2; } else #endif { pr_info("SSE version of gcm_enc/dec engaged.\n"); - aesni_gcm_enc_tfm = aesni_gcm_enc; - aesni_gcm_dec_tfm = aesni_gcm_dec; + aesni_gcm_tfm = &aesni_gcm_tfm_sse; } aesni_ctr_enc_tfm = aesni_ctr_enc; #ifdef CONFIG_AS_AVX diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c index 41034745d6a2..d1ce49119da8 100644 --- a/arch/x86/crypto/cast5_avx_glue.c +++ b/arch/x86/crypto/cast5_avx_glue.c @@ -1,5 +1,5 @@ /* - * Glue Code for the AVX assembler implemention of the Cast5 Cipher + * Glue Code for the AVX assembler implementation of the Cast5 Cipher * * Copyright (C) 2012 Johannes Goetzfried * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de> diff --git a/arch/x86/crypto/cast6_avx_glue.c b/arch/x86/crypto/cast6_avx_glue.c index 9fb66b5e94b2..18965c39305e 100644 --- a/arch/x86/crypto/cast6_avx_glue.c +++ b/arch/x86/crypto/cast6_avx_glue.c @@ -1,5 +1,5 @@ /* - * Glue Code for the AVX assembler implemention of the Cast6 Cipher + * Glue Code for the AVX assembler implementation of the Cast6 Cipher * * Copyright (C) 2012 Johannes Goetzfried * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de> diff --git a/arch/x86/crypto/chacha-avx2-x86_64.S b/arch/x86/crypto/chacha-avx2-x86_64.S new file mode 100644 index 000000000000..32903fd450af --- /dev/null +++ b/arch/x86/crypto/chacha-avx2-x86_64.S @@ -0,0 +1,1025 @@ +/* + * ChaCha 256-bit cipher algorithm, x64 AVX2 functions + * + * Copyright (C) 2015 Martin Willi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/linkage.h> + +.section .rodata.cst32.ROT8, "aM", @progbits, 32 +.align 32 +ROT8: .octa 0x0e0d0c0f0a09080b0605040702010003 + .octa 0x0e0d0c0f0a09080b0605040702010003 + +.section .rodata.cst32.ROT16, "aM", @progbits, 32 +.align 32 +ROT16: .octa 0x0d0c0f0e09080b0a0504070601000302 + .octa 0x0d0c0f0e09080b0a0504070601000302 + +.section .rodata.cst32.CTRINC, "aM", @progbits, 32 +.align 32 +CTRINC: .octa 0x00000003000000020000000100000000 + .octa 0x00000007000000060000000500000004 + +.section .rodata.cst32.CTR2BL, "aM", @progbits, 32 +.align 32 +CTR2BL: .octa 0x00000000000000000000000000000000 + .octa 0x00000000000000000000000000000001 + +.section .rodata.cst32.CTR4BL, "aM", @progbits, 32 +.align 32 +CTR4BL: .octa 0x00000000000000000000000000000002 + .octa 0x00000000000000000000000000000003 + +.text + +ENTRY(chacha_2block_xor_avx2) + # %rdi: Input state matrix, s + # %rsi: up to 2 data blocks output, o + # %rdx: up to 2 data blocks input, i + # %rcx: input/output length in bytes + # %r8d: nrounds + + # This function encrypts two ChaCha blocks by loading the state + # matrix twice across four AVX registers. It performs matrix operations + # on four words in each matrix in parallel, but requires shuffling to + # rearrange the words after each round. + + vzeroupper + + # x0..3[0-2] = s0..3 + vbroadcasti128 0x00(%rdi),%ymm0 + vbroadcasti128 0x10(%rdi),%ymm1 + vbroadcasti128 0x20(%rdi),%ymm2 + vbroadcasti128 0x30(%rdi),%ymm3 + + vpaddd CTR2BL(%rip),%ymm3,%ymm3 + + vmovdqa %ymm0,%ymm8 + vmovdqa %ymm1,%ymm9 + vmovdqa %ymm2,%ymm10 + vmovdqa %ymm3,%ymm11 + + vmovdqa ROT8(%rip),%ymm4 + vmovdqa ROT16(%rip),%ymm5 + + mov %rcx,%rax + +.Ldoubleround: + + # x0 += x1, x3 = rotl32(x3 ^ x0, 16) + vpaddd %ymm1,%ymm0,%ymm0 + vpxor %ymm0,%ymm3,%ymm3 + vpshufb %ymm5,%ymm3,%ymm3 + + # x2 += x3, x1 = rotl32(x1 ^ x2, 12) + vpaddd %ymm3,%ymm2,%ymm2 + vpxor %ymm2,%ymm1,%ymm1 + vmovdqa %ymm1,%ymm6 + vpslld $12,%ymm6,%ymm6 + vpsrld $20,%ymm1,%ymm1 + vpor %ymm6,%ymm1,%ymm1 + + # x0 += x1, x3 = rotl32(x3 ^ x0, 8) + vpaddd %ymm1,%ymm0,%ymm0 + vpxor %ymm0,%ymm3,%ymm3 + vpshufb %ymm4,%ymm3,%ymm3 + + # x2 += x3, x1 = rotl32(x1 ^ x2, 7) + vpaddd %ymm3,%ymm2,%ymm2 + vpxor %ymm2,%ymm1,%ymm1 + vmovdqa %ymm1,%ymm7 + vpslld $7,%ymm7,%ymm7 + vpsrld $25,%ymm1,%ymm1 + vpor %ymm7,%ymm1,%ymm1 + + # x1 = shuffle32(x1, MASK(0, 3, 2, 1)) + vpshufd $0x39,%ymm1,%ymm1 + # x2 = shuffle32(x2, MASK(1, 0, 3, 2)) + vpshufd $0x4e,%ymm2,%ymm2 + # x3 = shuffle32(x3, MASK(2, 1, 0, 3)) + vpshufd $0x93,%ymm3,%ymm3 + + # x0 += x1, x3 = rotl32(x3 ^ x0, 16) + vpaddd %ymm1,%ymm0,%ymm0 + vpxor %ymm0,%ymm3,%ymm3 + vpshufb %ymm5,%ymm3,%ymm3 + + # x2 += x3, x1 = rotl32(x1 ^ x2, 12) + vpaddd %ymm3,%ymm2,%ymm2 + vpxor %ymm2,%ymm1,%ymm1 + vmovdqa %ymm1,%ymm6 + vpslld $12,%ymm6,%ymm6 + vpsrld $20,%ymm1,%ymm1 + vpor %ymm6,%ymm1,%ymm1 + + # x0 += x1, x3 = rotl32(x3 ^ x0, 8) + vpaddd %ymm1,%ymm0,%ymm0 + vpxor %ymm0,%ymm3,%ymm3 + vpshufb %ymm4,%ymm3,%ymm3 + + # x2 += x3, x1 = rotl32(x1 ^ x2, 7) + vpaddd %ymm3,%ymm2,%ymm2 + vpxor %ymm2,%ymm1,%ymm1 + vmovdqa %ymm1,%ymm7 + vpslld $7,%ymm7,%ymm7 + vpsrld $25,%ymm1,%ymm1 + vpor %ymm7,%ymm1,%ymm1 + + # x1 = shuffle32(x1, MASK(2, 1, 0, 3)) + vpshufd $0x93,%ymm1,%ymm1 + # x2 = shuffle32(x2, MASK(1, 0, 3, 2)) + vpshufd $0x4e,%ymm2,%ymm2 + # x3 = shuffle32(x3, MASK(0, 3, 2, 1)) + vpshufd $0x39,%ymm3,%ymm3 + + sub $2,%r8d + jnz .Ldoubleround + + # o0 = i0 ^ (x0 + s0) + vpaddd %ymm8,%ymm0,%ymm7 + cmp $0x10,%rax + jl .Lxorpart2 + vpxor 0x00(%rdx),%xmm7,%xmm6 + vmovdqu %xmm6,0x00(%rsi) + vextracti128 $1,%ymm7,%xmm0 + # o1 = i1 ^ (x1 + s1) + vpaddd %ymm9,%ymm1,%ymm7 + cmp $0x20,%rax + jl .Lxorpart2 + vpxor 0x10(%rdx),%xmm7,%xmm6 + vmovdqu %xmm6,0x10(%rsi) + vextracti128 $1,%ymm7,%xmm1 + # o2 = i2 ^ (x2 + s2) + vpaddd %ymm10,%ymm2,%ymm7 + cmp $0x30,%rax + jl .Lxorpart2 + vpxor 0x20(%rdx),%xmm7,%xmm6 + vmovdqu %xmm6,0x20(%rsi) + vextracti128 $1,%ymm7,%xmm2 + # o3 = i3 ^ (x3 + s3) + vpaddd %ymm11,%ymm3,%ymm7 + cmp $0x40,%rax + jl .Lxorpart2 + vpxor 0x30(%rdx),%xmm7,%xmm6 + vmovdqu %xmm6,0x30(%rsi) + vextracti128 $1,%ymm7,%xmm3 + + # xor and write second block + vmovdqa %xmm0,%xmm7 + cmp $0x50,%rax + jl .Lxorpart2 + vpxor 0x40(%rdx),%xmm7,%xmm6 + vmovdqu %xmm6,0x40(%rsi) + + vmovdqa %xmm1,%xmm7 + cmp $0x60,%rax + jl .Lxorpart2 + vpxor 0x50(%rdx),%xmm7,%xmm6 + vmovdqu %xmm6,0x50(%rsi) + + vmovdqa %xmm2,%xmm7 + cmp $0x70,%rax + jl .Lxorpart2 + vpxor 0x60(%rdx),%xmm7,%xmm6 + vmovdqu %xmm6,0x60(%rsi) + + vmovdqa %xmm3,%xmm7 + cmp $0x80,%rax + jl .Lxorpart2 + vpxor 0x70(%rdx),%xmm7,%xmm6 + vmovdqu %xmm6,0x70(%rsi) + +.Ldone2: + vzeroupper + ret + +.Lxorpart2: + # xor remaining bytes from partial register into output + mov %rax,%r9 + and $0x0f,%r9 + jz .Ldone2 + and $~0x0f,%rax + + mov %rsi,%r11 + + lea 8(%rsp),%r10 + sub $0x10,%rsp + and $~31,%rsp + + lea (%rdx,%rax),%rsi + mov %rsp,%rdi + mov %r9,%rcx + rep movsb + + vpxor 0x00(%rsp),%xmm7,%xmm7 + vmovdqa %xmm7,0x00(%rsp) + + mov %rsp,%rsi + lea (%r11,%rax),%rdi + mov %r9,%rcx + rep movsb + + lea -8(%r10),%rsp + jmp .Ldone2 + +ENDPROC(chacha_2block_xor_avx2) + +ENTRY(chacha_4block_xor_avx2) + # %rdi: Input state matrix, s + # %rsi: up to 4 data blocks output, o + # %rdx: up to 4 data blocks input, i + # %rcx: input/output length in bytes + # %r8d: nrounds + + # This function encrypts four ChaCha blocks by loading the state + # matrix four times across eight AVX registers. It performs matrix + # operations on four words in two matrices in parallel, sequentially + # to the operations on the four words of the other two matrices. The + # required word shuffling has a rather high latency, we can do the + # arithmetic on two matrix-pairs without much slowdown. + + vzeroupper + + # x0..3[0-4] = s0..3 + vbroadcasti128 0x00(%rdi),%ymm0 + vbroadcasti128 0x10(%rdi),%ymm1 + vbroadcasti128 0x20(%rdi),%ymm2 + vbroadcasti128 0x30(%rdi),%ymm3 + + vmovdqa %ymm0,%ymm4 + vmovdqa %ymm1,%ymm5 + vmovdqa %ymm2,%ymm6 + vmovdqa %ymm3,%ymm7 + + vpaddd CTR2BL(%rip),%ymm3,%ymm3 + vpaddd CTR4BL(%rip),%ymm7,%ymm7 + + vmovdqa %ymm0,%ymm11 + vmovdqa %ymm1,%ymm12 + vmovdqa %ymm2,%ymm13 + vmovdqa %ymm3,%ymm14 + vmovdqa %ymm7,%ymm15 + + vmovdqa ROT8(%rip),%ymm8 + vmovdqa ROT16(%rip),%ymm9 + + mov %rcx,%rax + +.Ldoubleround4: + + # x0 += x1, x3 = rotl32(x3 ^ x0, 16) + vpaddd %ymm1,%ymm0,%ymm0 + vpxor %ymm0,%ymm3,%ymm3 + vpshufb %ymm9,%ymm3,%ymm3 + + vpaddd %ymm5,%ymm4,%ymm4 + vpxor %ymm4,%ymm7,%ymm7 + vpshufb %ymm9,%ymm7,%ymm7 + + # x2 += x3, x1 = rotl32(x1 ^ x2, 12) + vpaddd %ymm3,%ymm2,%ymm2 + vpxor %ymm2,%ymm1,%ymm1 + vmovdqa %ymm1,%ymm10 + vpslld $12,%ymm10,%ymm10 + vpsrld $20,%ymm1,%ymm1 + vpor %ymm10,%ymm1,%ymm1 + + vpaddd %ymm7,%ymm6,%ymm6 + vpxor %ymm6,%ymm5,%ymm5 + vmovdqa %ymm5,%ymm10 + vpslld $12,%ymm10,%ymm10 + vpsrld $20,%ymm5,%ymm5 + vpor %ymm10,%ymm5,%ymm5 + + # x0 += x1, x3 = rotl32(x3 ^ x0, 8) + vpaddd %ymm1,%ymm0,%ymm0 + vpxor %ymm0,%ymm3,%ymm3 + vpshufb %ymm8,%ymm3,%ymm3 + + vpaddd %ymm5,%ymm4,%ymm4 + vpxor %ymm4,%ymm7,%ymm7 + vpshufb %ymm8,%ymm7,%ymm7 + + # x2 += x3, x1 = rotl32(x1 ^ x2, 7) + vpaddd %ymm3,%ymm2,%ymm2 + vpxor %ymm2,%ymm1,%ymm1 + vmovdqa %ymm1,%ymm10 + vpslld $7,%ymm10,%ymm10 + vpsrld $25,%ymm1,%ymm1 + vpor %ymm10,%ymm1,%ymm1 + + vpaddd %ymm7,%ymm6,%ymm6 + vpxor %ymm6,%ymm5,%ymm5 + vmovdqa %ymm5,%ymm10 + vpslld $7,%ymm10,%ymm10 + vpsrld $25,%ymm5,%ymm5 + vpor %ymm10,%ymm5,%ymm5 + + # x1 = shuffle32(x1, MASK(0, 3, 2, 1)) + vpshufd $0x39,%ymm1,%ymm1 + vpshufd $0x39,%ymm5,%ymm5 + # x2 = shuffle32(x2, MASK(1, 0, 3, 2)) + vpshufd $0x4e,%ymm2,%ymm2 + vpshufd $0x4e,%ymm6,%ymm6 + # x3 = shuffle32(x3, MASK(2, 1, 0, 3)) + vpshufd $0x93,%ymm3,%ymm3 + vpshufd $0x93,%ymm7,%ymm7 + + # x0 += x1, x3 = rotl32(x3 ^ x0, 16) + vpaddd %ymm1,%ymm0,%ymm0 + vpxor %ymm0,%ymm3,%ymm3 + vpshufb %ymm9,%ymm3,%ymm3 + + vpaddd %ymm5,%ymm4,%ymm4 + vpxor %ymm4,%ymm7,%ymm7 + vpshufb %ymm9,%ymm7,%ymm7 + + # x2 += x3, x1 = rotl32(x1 ^ x2, 12) + vpaddd %ymm3,%ymm2,%ymm2 + vpxor %ymm2,%ymm1,%ymm1 + vmovdqa %ymm1,%ymm10 + vpslld $12,%ymm10,%ymm10 + vpsrld $20,%ymm1,%ymm1 + vpor %ymm10,%ymm1,%ymm1 + + vpaddd %ymm7,%ymm6,%ymm6 + vpxor %ymm6,%ymm5,%ymm5 + vmovdqa %ymm5,%ymm10 + vpslld $12,%ymm10,%ymm10 + vpsrld $20,%ymm5,%ymm5 + vpor %ymm10,%ymm5,%ymm5 + + # x0 += x1, x3 = rotl32(x3 ^ x0, 8) + vpaddd %ymm1,%ymm0,%ymm0 + vpxor %ymm0,%ymm3,%ymm3 + vpshufb %ymm8,%ymm3,%ymm3 + + vpaddd %ymm5,%ymm4,%ymm4 + vpxor %ymm4,%ymm7,%ymm7 + vpshufb %ymm8,%ymm7,%ymm7 + + # x2 += x3, x1 = rotl32(x1 ^ x2, 7) + vpaddd %ymm3,%ymm2,%ymm2 + vpxor %ymm2,%ymm1,%ymm1 + vmovdqa %ymm1,%ymm10 + vpslld $7,%ymm10,%ymm10 + vpsrld $25,%ymm1,%ymm1 + vpor %ymm10,%ymm1,%ymm1 + + vpaddd %ymm7,%ymm6,%ymm6 + vpxor %ymm6,%ymm5,%ymm5 + vmovdqa %ymm5,%ymm10 + vpslld $7,%ymm10,%ymm10 + vpsrld $25,%ymm5,%ymm5 + vpor %ymm10,%ymm5,%ymm5 + + # x1 = shuffle32(x1, MASK(2, 1, 0, 3)) + vpshufd $0x93,%ymm1,%ymm1 + vpshufd $0x93,%ymm5,%ymm5 + # x2 = shuffle32(x2, MASK(1, 0, 3, 2)) + vpshufd $0x4e,%ymm2,%ymm2 + vpshufd $0x4e,%ymm6,%ymm6 + # x3 = shuffle32(x3, MASK(0, 3, 2, 1)) + vpshufd $0x39,%ymm3,%ymm3 + vpshufd $0x39,%ymm7,%ymm7 + + sub $2,%r8d + jnz .Ldoubleround4 + + # o0 = i0 ^ (x0 + s0), first block + vpaddd %ymm11,%ymm0,%ymm10 + cmp $0x10,%rax + jl .Lxorpart4 + vpxor 0x00(%rdx),%xmm10,%xmm9 + vmovdqu %xmm9,0x00(%rsi) + vextracti128 $1,%ymm10,%xmm0 + # o1 = i1 ^ (x1 + s1), first block + vpaddd %ymm12,%ymm1,%ymm10 + cmp $0x20,%rax + jl .Lxorpart4 + vpxor 0x10(%rdx),%xmm10,%xmm9 + vmovdqu %xmm9,0x10(%rsi) + vextracti128 $1,%ymm10,%xmm1 + # o2 = i2 ^ (x2 + s2), first block + vpaddd %ymm13,%ymm2,%ymm10 + cmp $0x30,%rax + jl .Lxorpart4 + vpxor 0x20(%rdx),%xmm10,%xmm9 + vmovdqu %xmm9,0x20(%rsi) + vextracti128 $1,%ymm10,%xmm2 + # o3 = i3 ^ (x3 + s3), first block + vpaddd %ymm14,%ymm3,%ymm10 + cmp $0x40,%rax + jl .Lxorpart4 + vpxor 0x30(%rdx),%xmm10,%xmm9 + vmovdqu %xmm9,0x30(%rsi) + vextracti128 $1,%ymm10,%xmm3 + + # xor and write second block + vmovdqa %xmm0,%xmm10 + cmp $0x50,%rax + jl .Lxorpart4 + vpxor 0x40(%rdx),%xmm10,%xmm9 + vmovdqu %xmm9,0x40(%rsi) + + vmovdqa %xmm1,%xmm10 + cmp $0x60,%rax + jl .Lxorpart4 + vpxor 0x50(%rdx),%xmm10,%xmm9 + vmovdqu %xmm9,0x50(%rsi) + + vmovdqa %xmm2,%xmm10 + cmp $0x70,%rax + jl .Lxorpart4 + vpxor 0x60(%rdx),%xmm10,%xmm9 + vmovdqu %xmm9,0x60(%rsi) + + vmovdqa %xmm3,%xmm10 + cmp $0x80,%rax + jl .Lxorpart4 + vpxor 0x70(%rdx),%xmm10,%xmm9 + vmovdqu %xmm9,0x70(%rsi) + + # o0 = i0 ^ (x0 + s0), third block + vpaddd %ymm11,%ymm4,%ymm10 + cmp $0x90,%rax + jl .Lxorpart4 + vpxor 0x80(%rdx),%xmm10,%xmm9 + vmovdqu %xmm9,0x80(%rsi) + vextracti128 $1,%ymm10,%xmm4 + # o1 = i1 ^ (x1 + s1), third block + vpaddd %ymm12,%ymm5,%ymm10 + cmp $0xa0,%rax + jl .Lxorpart4 + vpxor 0x90(%rdx),%xmm10,%xmm9 + vmovdqu %xmm9,0x90(%rsi) + vextracti128 $1,%ymm10,%xmm5 + # o2 = i2 ^ (x2 + s2), third block + vpaddd %ymm13,%ymm6,%ymm10 + cmp $0xb0,%rax + jl .Lxorpart4 + vpxor 0xa0(%rdx),%xmm10,%xmm9 + vmovdqu %xmm9,0xa0(%rsi) + vextracti128 $1,%ymm10,%xmm6 + # o3 = i3 ^ (x3 + s3), third block + vpaddd %ymm15,%ymm7,%ymm10 + cmp $0xc0,%rax + jl .Lxorpart4 + vpxor 0xb0(%rdx),%xmm10,%xmm9 + vmovdqu %xmm9,0xb0(%rsi) + vextracti128 $1,%ymm10,%xmm7 + + # xor and write fourth block + vmovdqa %xmm4,%xmm10 + cmp $0xd0,%rax + jl .Lxorpart4 + vpxor 0xc0(%rdx),%xmm10,%xmm9 + vmovdqu %xmm9,0xc0(%rsi) + + vmovdqa %xmm5,%xmm10 + cmp $0xe0,%rax + jl .Lxorpart4 + vpxor 0xd0(%rdx),%xmm10,%xmm9 + vmovdqu %xmm9,0xd0(%rsi) + + vmovdqa %xmm6,%xmm10 + cmp $0xf0,%rax + jl .Lxorpart4 + vpxor 0xe0(%rdx),%xmm10,%xmm9 + vmovdqu %xmm9,0xe0(%rsi) + + vmovdqa %xmm7,%xmm10 + cmp $0x100,%rax + jl .Lxorpart4 + vpxor 0xf0(%rdx),%xmm10,%xmm9 + vmovdqu %xmm9,0xf0(%rsi) + +.Ldone4: + vzeroupper + ret + +.Lxorpart4: + # xor remaining bytes from partial register into output + mov %rax,%r9 + and $0x0f,%r9 + jz .Ldone4 + and $~0x0f,%rax + + mov %rsi,%r11 + + lea 8(%rsp),%r10 + sub $0x10,%rsp + and $~31,%rsp + + lea (%rdx,%rax),%rsi + mov %rsp,%rdi + mov %r9,%rcx + rep movsb + + vpxor 0x00(%rsp),%xmm10,%xmm10 + vmovdqa %xmm10,0x00(%rsp) + + mov %rsp,%rsi + lea (%r11,%rax),%rdi + mov %r9,%rcx + rep movsb + + lea -8(%r10),%rsp + jmp .Ldone4 + +ENDPROC(chacha_4block_xor_avx2) + +ENTRY(chacha_8block_xor_avx2) + # %rdi: Input state matrix, s + # %rsi: up to 8 data blocks output, o + # %rdx: up to 8 data blocks input, i + # %rcx: input/output length in bytes + # %r8d: nrounds + + # This function encrypts eight consecutive ChaCha blocks by loading + # the state matrix in AVX registers eight times. As we need some + # scratch registers, we save the first four registers on the stack. The + # algorithm performs each operation on the corresponding word of each + # state matrix, hence requires no word shuffling. For final XORing step + # we transpose the matrix by interleaving 32-, 64- and then 128-bit + # words, which allows us to do XOR in AVX registers. 8/16-bit word + # rotation is done with the slightly better performing byte shuffling, + # 7/12-bit word rotation uses traditional shift+OR. + + vzeroupper + # 4 * 32 byte stack, 32-byte aligned + lea 8(%rsp),%r10 + and $~31, %rsp + sub $0x80, %rsp + mov %rcx,%rax + + # x0..15[0-7] = s[0..15] + vpbroadcastd 0x00(%rdi),%ymm0 + vpbroadcastd 0x04(%rdi),%ymm1 + vpbroadcastd 0x08(%rdi),%ymm2 + vpbroadcastd 0x0c(%rdi),%ymm3 + vpbroadcastd 0x10(%rdi),%ymm4 + vpbroadcastd 0x14(%rdi),%ymm5 + vpbroadcastd 0x18(%rdi),%ymm6 + vpbroadcastd 0x1c(%rdi),%ymm7 + vpbroadcastd 0x20(%rdi),%ymm8 + vpbroadcastd 0x24(%rdi),%ymm9 + vpbroadcastd 0x28(%rdi),%ymm10 + vpbroadcastd 0x2c(%rdi),%ymm11 + vpbroadcastd 0x30(%rdi),%ymm12 + vpbroadcastd 0x34(%rdi),%ymm13 + vpbroadcastd 0x38(%rdi),%ymm14 + vpbroadcastd 0x3c(%rdi),%ymm15 + # x0..3 on stack + vmovdqa %ymm0,0x00(%rsp) + vmovdqa %ymm1,0x20(%rsp) + vmovdqa %ymm2,0x40(%rsp) + vmovdqa %ymm3,0x60(%rsp) + + vmovdqa CTRINC(%rip),%ymm1 + vmovdqa ROT8(%rip),%ymm2 + vmovdqa ROT16(%rip),%ymm3 + + # x12 += counter values 0-3 + vpaddd %ymm1,%ymm12,%ymm12 + +.Ldoubleround8: + # x0 += x4, x12 = rotl32(x12 ^ x0, 16) + vpaddd 0x00(%rsp),%ymm4,%ymm0 + vmovdqa %ymm0,0x00(%rsp) + vpxor %ymm0,%ymm12,%ymm12 + vpshufb %ymm3,%ymm12,%ymm12 + # x1 += x5, x13 = rotl32(x13 ^ x1, 16) + vpaddd 0x20(%rsp),%ymm5,%ymm0 + vmovdqa %ymm0,0x20(%rsp) + vpxor %ymm0,%ymm13,%ymm13 + vpshufb %ymm3,%ymm13,%ymm13 + # x2 += x6, x14 = rotl32(x14 ^ x2, 16) + vpaddd 0x40(%rsp),%ymm6,%ymm0 + vmovdqa %ymm0,0x40(%rsp) + vpxor %ymm0,%ymm14,%ymm14 + vpshufb %ymm3,%ymm14,%ymm14 + # x3 += x7, x15 = rotl32(x15 ^ x3, 16) + vpaddd 0x60(%rsp),%ymm7,%ymm0 + vmovdqa %ymm0,0x60(%rsp) + vpxor %ymm0,%ymm15,%ymm15 + vpshufb %ymm3,%ymm15,%ymm15 + + # x8 += x12, x4 = rotl32(x4 ^ x8, 12) + vpaddd %ymm12,%ymm8,%ymm8 + vpxor %ymm8,%ymm4,%ymm4 + vpslld $12,%ymm4,%ymm0 + vpsrld $20,%ymm4,%ymm4 + vpor %ymm0,%ymm4,%ymm4 + # x9 += x13, x5 = rotl32(x5 ^ x9, 12) + vpaddd %ymm13,%ymm9,%ymm9 + vpxor %ymm9,%ymm5,%ymm5 + vpslld $12,%ymm5,%ymm0 + vpsrld $20,%ymm5,%ymm5 + vpor %ymm0,%ymm5,%ymm5 + # x10 += x14, x6 = rotl32(x6 ^ x10, 12) + vpaddd %ymm14,%ymm10,%ymm10 + vpxor %ymm10,%ymm6,%ymm6 + vpslld $12,%ymm6,%ymm0 + vpsrld $20,%ymm6,%ymm6 + vpor %ymm0,%ymm6,%ymm6 + # x11 += x15, x7 = rotl32(x7 ^ x11, 12) + vpaddd %ymm15,%ymm11,%ymm11 + vpxor %ymm11,%ymm7,%ymm7 + vpslld $12,%ymm7,%ymm0 + vpsrld $20,%ymm7,%ymm7 + vpor %ymm0,%ymm7,%ymm7 + + # x0 += x4, x12 = rotl32(x12 ^ x0, 8) + vpaddd 0x00(%rsp),%ymm4,%ymm0 + vmovdqa %ymm0,0x00(%rsp) + vpxor %ymm0,%ymm12,%ymm12 + vpshufb %ymm2,%ymm12,%ymm12 + # x1 += x5, x13 = rotl32(x13 ^ x1, 8) + vpaddd 0x20(%rsp),%ymm5,%ymm0 + vmovdqa %ymm0,0x20(%rsp) + vpxor %ymm0,%ymm13,%ymm13 + vpshufb %ymm2,%ymm13,%ymm13 + # x2 += x6, x14 = rotl32(x14 ^ x2, 8) + vpaddd 0x40(%rsp),%ymm6,%ymm0 + vmovdqa %ymm0,0x40(%rsp) + vpxor %ymm0,%ymm14,%ymm14 + vpshufb %ymm2,%ymm14,%ymm14 + # x3 += x7, x15 = rotl32(x15 ^ x3, 8) + vpaddd 0x60(%rsp),%ymm7,%ymm0 + vmovdqa %ymm0,0x60(%rsp) + vpxor %ymm0,%ymm15,%ymm15 + vpshufb %ymm2,%ymm15,%ymm15 + + # x8 += x12, x4 = rotl32(x4 ^ x8, 7) + vpaddd %ymm12,%ymm8,%ymm8 + vpxor %ymm8,%ymm4,%ymm4 + vpslld $7,%ymm4,%ymm0 + vpsrld $25,%ymm4,%ymm4 + vpor %ymm0,%ymm4,%ymm4 + # x9 += x13, x5 = rotl32(x5 ^ x9, 7) + vpaddd %ymm13,%ymm9,%ymm9 + vpxor %ymm9,%ymm5,%ymm5 + vpslld $7,%ymm5,%ymm0 + vpsrld $25,%ymm5,%ymm5 + vpor %ymm0,%ymm5,%ymm5 + # x10 += x14, x6 = rotl32(x6 ^ x10, 7) + vpaddd %ymm14,%ymm10,%ymm10 + vpxor %ymm10,%ymm6,%ymm6 + vpslld $7,%ymm6,%ymm0 + vpsrld $25,%ymm6,%ymm6 + vpor %ymm0,%ymm6,%ymm6 + # x11 += x15, x7 = rotl32(x7 ^ x11, 7) + vpaddd %ymm15,%ymm11,%ymm11 + vpxor %ymm11,%ymm7,%ymm7 + vpslld $7,%ymm7,%ymm0 + vpsrld $25,%ymm7,%ymm7 + vpor %ymm0,%ymm7,%ymm7 + + # x0 += x5, x15 = rotl32(x15 ^ x0, 16) + vpaddd 0x00(%rsp),%ymm5,%ymm0 + vmovdqa %ymm0,0x00(%rsp) + vpxor %ymm0,%ymm15,%ymm15 + vpshufb %ymm3,%ymm15,%ymm15 + # x1 += x6, x12 = rotl32(x12 ^ x1, 16)%ymm0 + vpaddd 0x20(%rsp),%ymm6,%ymm0 + vmovdqa %ymm0,0x20(%rsp) + vpxor %ymm0,%ymm12,%ymm12 + vpshufb %ymm3,%ymm12,%ymm12 + # x2 += x7, x13 = rotl32(x13 ^ x2, 16) + vpaddd 0x40(%rsp),%ymm7,%ymm0 + vmovdqa %ymm0,0x40(%rsp) + vpxor %ymm0,%ymm13,%ymm13 + vpshufb %ymm3,%ymm13,%ymm13 + # x3 += x4, x14 = rotl32(x14 ^ x3, 16) + vpaddd 0x60(%rsp),%ymm4,%ymm0 + vmovdqa %ymm0,0x60(%rsp) + vpxor %ymm0,%ymm14,%ymm14 + vpshufb %ymm3,%ymm14,%ymm14 + + # x10 += x15, x5 = rotl32(x5 ^ x10, 12) + vpaddd %ymm15,%ymm10,%ymm10 + vpxor %ymm10,%ymm5,%ymm5 + vpslld $12,%ymm5,%ymm0 + vpsrld $20,%ymm5,%ymm5 + vpor %ymm0,%ymm5,%ymm5 + # x11 += x12, x6 = rotl32(x6 ^ x11, 12) + vpaddd %ymm12,%ymm11,%ymm11 + vpxor %ymm11,%ymm6,%ymm6 + vpslld $12,%ymm6,%ymm0 + vpsrld $20,%ymm6,%ymm6 + vpor %ymm0,%ymm6,%ymm6 + # x8 += x13, x7 = rotl32(x7 ^ x8, 12) + vpaddd %ymm13,%ymm8,%ymm8 + vpxor %ymm8,%ymm7,%ymm7 + vpslld $12,%ymm7,%ymm0 + vpsrld $20,%ymm7,%ymm7 + vpor %ymm0,%ymm7,%ymm7 + # x9 += x14, x4 = rotl32(x4 ^ x9, 12) + vpaddd %ymm14,%ymm9,%ymm9 + vpxor %ymm9,%ymm4,%ymm4 + vpslld $12,%ymm4,%ymm0 + vpsrld $20,%ymm4,%ymm4 + vpor %ymm0,%ymm4,%ymm4 + + # x0 += x5, x15 = rotl32(x15 ^ x0, 8) + vpaddd 0x00(%rsp),%ymm5,%ymm0 + vmovdqa %ymm0,0x00(%rsp) + vpxor %ymm0,%ymm15,%ymm15 + vpshufb %ymm2,%ymm15,%ymm15 + # x1 += x6, x12 = rotl32(x12 ^ x1, 8) + vpaddd 0x20(%rsp),%ymm6,%ymm0 + vmovdqa %ymm0,0x20(%rsp) + vpxor %ymm0,%ymm12,%ymm12 + vpshufb %ymm2,%ymm12,%ymm12 + # x2 += x7, x13 = rotl32(x13 ^ x2, 8) + vpaddd 0x40(%rsp),%ymm7,%ymm0 + vmovdqa %ymm0,0x40(%rsp) + vpxor %ymm0,%ymm13,%ymm13 + vpshufb %ymm2,%ymm13,%ymm13 + # x3 += x4, x14 = rotl32(x14 ^ x3, 8) + vpaddd 0x60(%rsp),%ymm4,%ymm0 + vmovdqa %ymm0,0x60(%rsp) + vpxor %ymm0,%ymm14,%ymm14 + vpshufb %ymm2,%ymm14,%ymm14 + + # x10 += x15, x5 = rotl32(x5 ^ x10, 7) + vpaddd %ymm15,%ymm10,%ymm10 + vpxor %ymm10,%ymm5,%ymm5 + vpslld $7,%ymm5,%ymm0 + vpsrld $25,%ymm5,%ymm5 + vpor %ymm0,%ymm5,%ymm5 + # x11 += x12, x6 = rotl32(x6 ^ x11, 7) + vpaddd %ymm12,%ymm11,%ymm11 + vpxor %ymm11,%ymm6,%ymm6 + vpslld $7,%ymm6,%ymm0 + vpsrld $25,%ymm6,%ymm6 + vpor %ymm0,%ymm6,%ymm6 + # x8 += x13, x7 = rotl32(x7 ^ x8, 7) + vpaddd %ymm13,%ymm8,%ymm8 + vpxor %ymm8,%ymm7,%ymm7 + vpslld $7,%ymm7,%ymm0 + vpsrld $25,%ymm7,%ymm7 + vpor %ymm0,%ymm7,%ymm7 + # x9 += x14, x4 = rotl32(x4 ^ x9, 7) + vpaddd %ymm14,%ymm9,%ymm9 + vpxor %ymm9,%ymm4,%ymm4 + vpslld $7,%ymm4,%ymm0 + vpsrld $25,%ymm4,%ymm4 + vpor %ymm0,%ymm4,%ymm4 + + sub $2,%r8d + jnz .Ldoubleround8 + + # x0..15[0-3] += s[0..15] + vpbroadcastd 0x00(%rdi),%ymm0 + vpaddd 0x00(%rsp),%ymm0,%ymm0 + vmovdqa %ymm0,0x00(%rsp) + vpbroadcastd 0x04(%rdi),%ymm0 + vpaddd 0x20(%rsp),%ymm0,%ymm0 + vmovdqa %ymm0,0x20(%rsp) + vpbroadcastd 0x08(%rdi),%ymm0 + vpaddd 0x40(%rsp),%ymm0,%ymm0 + vmovdqa %ymm0,0x40(%rsp) + vpbroadcastd 0x0c(%rdi),%ymm0 + vpaddd 0x60(%rsp),%ymm0,%ymm0 + vmovdqa %ymm0,0x60(%rsp) + vpbroadcastd 0x10(%rdi),%ymm0 + vpaddd %ymm0,%ymm4,%ymm4 + vpbroadcastd 0x14(%rdi),%ymm0 + vpaddd %ymm0,%ymm5,%ymm5 + vpbroadcastd 0x18(%rdi),%ymm0 + vpaddd %ymm0,%ymm6,%ymm6 + vpbroadcastd 0x1c(%rdi),%ymm0 + vpaddd %ymm0,%ymm7,%ymm7 + vpbroadcastd 0x20(%rdi),%ymm0 + vpaddd %ymm0,%ymm8,%ymm8 + vpbroadcastd 0x24(%rdi),%ymm0 + vpaddd %ymm0,%ymm9,%ymm9 + vpbroadcastd 0x28(%rdi),%ymm0 + vpaddd %ymm0,%ymm10,%ymm10 + vpbroadcastd 0x2c(%rdi),%ymm0 + vpaddd %ymm0,%ymm11,%ymm11 + vpbroadcastd 0x30(%rdi),%ymm0 + vpaddd %ymm0,%ymm12,%ymm12 + vpbroadcastd 0x34(%rdi),%ymm0 + vpaddd %ymm0,%ymm13,%ymm13 + vpbroadcastd 0x38(%rdi),%ymm0 + vpaddd %ymm0,%ymm14,%ymm14 + vpbroadcastd 0x3c(%rdi),%ymm0 + vpaddd %ymm0,%ymm15,%ymm15 + + # x12 += counter values 0-3 + vpaddd %ymm1,%ymm12,%ymm12 + + # interleave 32-bit words in state n, n+1 + vmovdqa 0x00(%rsp),%ymm0 + vmovdqa 0x20(%rsp),%ymm1 + vpunpckldq %ymm1,%ymm0,%ymm2 + vpunpckhdq %ymm1,%ymm0,%ymm1 + vmovdqa %ymm2,0x00(%rsp) + vmovdqa %ymm1,0x20(%rsp) + vmovdqa 0x40(%rsp),%ymm0 + vmovdqa 0x60(%rsp),%ymm1 + vpunpckldq %ymm1,%ymm0,%ymm2 + vpunpckhdq %ymm1,%ymm0,%ymm1 + vmovdqa %ymm2,0x40(%rsp) + vmovdqa %ymm1,0x60(%rsp) + vmovdqa %ymm4,%ymm0 + vpunpckldq %ymm5,%ymm0,%ymm4 + vpunpckhdq %ymm5,%ymm0,%ymm5 + vmovdqa %ymm6,%ymm0 + vpunpckldq %ymm7,%ymm0,%ymm6 + vpunpckhdq %ymm7,%ymm0,%ymm7 + vmovdqa %ymm8,%ymm0 + vpunpckldq %ymm9,%ymm0,%ymm8 + vpunpckhdq %ymm9,%ymm0,%ymm9 + vmovdqa %ymm10,%ymm0 + vpunpckldq %ymm11,%ymm0,%ymm10 + vpunpckhdq %ymm11,%ymm0,%ymm11 + vmovdqa %ymm12,%ymm0 + vpunpckldq %ymm13,%ymm0,%ymm12 + vpunpckhdq %ymm13,%ymm0,%ymm13 + vmovdqa %ymm14,%ymm0 + vpunpckldq %ymm15,%ymm0,%ymm14 + vpunpckhdq %ymm15,%ymm0,%ymm15 + + # interleave 64-bit words in state n, n+2 + vmovdqa 0x00(%rsp),%ymm0 + vmovdqa 0x40(%rsp),%ymm2 + vpunpcklqdq %ymm2,%ymm0,%ymm1 + vpunpckhqdq %ymm2,%ymm0,%ymm2 + vmovdqa %ymm1,0x00(%rsp) + vmovdqa %ymm2,0x40(%rsp) + vmovdqa 0x20(%rsp),%ymm0 + vmovdqa 0x60(%rsp),%ymm2 + vpunpcklqdq %ymm2,%ymm0,%ymm1 + vpunpckhqdq %ymm2,%ymm0,%ymm2 + vmovdqa %ymm1,0x20(%rsp) + vmovdqa %ymm2,0x60(%rsp) + vmovdqa %ymm4,%ymm0 + vpunpcklqdq %ymm6,%ymm0,%ymm4 + vpunpckhqdq %ymm6,%ymm0,%ymm6 + vmovdqa %ymm5,%ymm0 + vpunpcklqdq %ymm7,%ymm0,%ymm5 + vpunpckhqdq %ymm7,%ymm0,%ymm7 + vmovdqa %ymm8,%ymm0 + vpunpcklqdq %ymm10,%ymm0,%ymm8 + vpunpckhqdq %ymm10,%ymm0,%ymm10 + vmovdqa %ymm9,%ymm0 + vpunpcklqdq %ymm11,%ymm0,%ymm9 + vpunpckhqdq %ymm11,%ymm0,%ymm11 + vmovdqa %ymm12,%ymm0 + vpunpcklqdq %ymm14,%ymm0,%ymm12 + vpunpckhqdq %ymm14,%ymm0,%ymm14 + vmovdqa %ymm13,%ymm0 + vpunpcklqdq %ymm15,%ymm0,%ymm13 + vpunpckhqdq %ymm15,%ymm0,%ymm15 + + # interleave 128-bit words in state n, n+4 + # xor/write first four blocks + vmovdqa 0x00(%rsp),%ymm1 + vperm2i128 $0x20,%ymm4,%ymm1,%ymm0 + cmp $0x0020,%rax + jl .Lxorpart8 + vpxor 0x0000(%rdx),%ymm0,%ymm0 + vmovdqu %ymm0,0x0000(%rsi) + vperm2i128 $0x31,%ymm4,%ymm1,%ymm4 + + vperm2i128 $0x20,%ymm12,%ymm8,%ymm0 + cmp $0x0040,%rax + jl .Lxorpart8 + vpxor 0x0020(%rdx),%ymm0,%ymm0 + vmovdqu %ymm0,0x0020(%rsi) + vperm2i128 $0x31,%ymm12,%ymm8,%ymm12 + + vmovdqa 0x40(%rsp),%ymm1 + vperm2i128 $0x20,%ymm6,%ymm1,%ymm0 + cmp $0x0060,%rax + jl .Lxorpart8 + vpxor 0x0040(%rdx),%ymm0,%ymm0 + vmovdqu %ymm0,0x0040(%rsi) + vperm2i128 $0x31,%ymm6,%ymm1,%ymm6 + + vperm2i128 $0x20,%ymm14,%ymm10,%ymm0 + cmp $0x0080,%rax + jl .Lxorpart8 + vpxor 0x0060(%rdx),%ymm0,%ymm0 + vmovdqu %ymm0,0x0060(%rsi) + vperm2i128 $0x31,%ymm14,%ymm10,%ymm14 + + vmovdqa 0x20(%rsp),%ymm1 + vperm2i128 $0x20,%ymm5,%ymm1,%ymm0 + cmp $0x00a0,%rax + jl .Lxorpart8 + vpxor 0x0080(%rdx),%ymm0,%ymm0 + vmovdqu %ymm0,0x0080(%rsi) + vperm2i128 $0x31,%ymm5,%ymm1,%ymm5 + + vperm2i128 $0x20,%ymm13,%ymm9,%ymm0 + cmp $0x00c0,%rax + jl .Lxorpart8 + vpxor 0x00a0(%rdx),%ymm0,%ymm0 + vmovdqu %ymm0,0x00a0(%rsi) + vperm2i128 $0x31,%ymm13,%ymm9,%ymm13 + + vmovdqa 0x60(%rsp),%ymm1 + vperm2i128 $0x20,%ymm7,%ymm1,%ymm0 + cmp $0x00e0,%rax + jl .Lxorpart8 + vpxor 0x00c0(%rdx),%ymm0,%ymm0 + vmovdqu %ymm0,0x00c0(%rsi) + vperm2i128 $0x31,%ymm7,%ymm1,%ymm7 + + vperm2i128 $0x20,%ymm15,%ymm11,%ymm0 + cmp $0x0100,%rax + jl .Lxorpart8 + vpxor 0x00e0(%rdx),%ymm0,%ymm0 + vmovdqu %ymm0,0x00e0(%rsi) + vperm2i128 $0x31,%ymm15,%ymm11,%ymm15 + + # xor remaining blocks, write to output + vmovdqa %ymm4,%ymm0 + cmp $0x0120,%rax + jl .Lxorpart8 + vpxor 0x0100(%rdx),%ymm0,%ymm0 + vmovdqu %ymm0,0x0100(%rsi) + + vmovdqa %ymm12,%ymm0 + cmp $0x0140,%rax + jl .Lxorpart8 + vpxor 0x0120(%rdx),%ymm0,%ymm0 + vmovdqu %ymm0,0x0120(%rsi) + + vmovdqa %ymm6,%ymm0 + cmp $0x0160,%rax + jl .Lxorpart8 + vpxor 0x0140(%rdx),%ymm0,%ymm0 + vmovdqu %ymm0,0x0140(%rsi) + + vmovdqa %ymm14,%ymm0 + cmp $0x0180,%rax + jl .Lxorpart8 + vpxor 0x0160(%rdx),%ymm0,%ymm0 + vmovdqu %ymm0,0x0160(%rsi) + + vmovdqa %ymm5,%ymm0 + cmp $0x01a0,%rax + jl .Lxorpart8 + vpxor 0x0180(%rdx),%ymm0,%ymm0 + vmovdqu %ymm0,0x0180(%rsi) + + vmovdqa %ymm13,%ymm0 + cmp $0x01c0,%rax + jl .Lxorpart8 + vpxor 0x01a0(%rdx),%ymm0,%ymm0 + vmovdqu %ymm0,0x01a0(%rsi) + + vmovdqa %ymm7,%ymm0 + cmp $0x01e0,%rax + jl .Lxorpart8 + vpxor 0x01c0(%rdx),%ymm0,%ymm0 + vmovdqu %ymm0,0x01c0(%rsi) + + vmovdqa %ymm15,%ymm0 + cmp $0x0200,%rax + jl .Lxorpart8 + vpxor 0x01e0(%rdx),%ymm0,%ymm0 + vmovdqu %ymm0,0x01e0(%rsi) + +.Ldone8: + vzeroupper + lea -8(%r10),%rsp + ret + +.Lxorpart8: + # xor remaining bytes from partial register into output + mov %rax,%r9 + and $0x1f,%r9 + jz .Ldone8 + and $~0x1f,%rax + + mov %rsi,%r11 + + lea (%rdx,%rax),%rsi + mov %rsp,%rdi + mov %r9,%rcx + rep movsb + + vpxor 0x00(%rsp),%ymm0,%ymm0 + vmovdqa %ymm0,0x00(%rsp) + + mov %rsp,%rsi + lea (%r11,%rax),%rdi + mov %r9,%rcx + rep movsb + + jmp .Ldone8 + +ENDPROC(chacha_8block_xor_avx2) diff --git a/arch/x86/crypto/chacha-avx512vl-x86_64.S b/arch/x86/crypto/chacha-avx512vl-x86_64.S new file mode 100644 index 000000000000..848f9c75fd4f --- /dev/null +++ b/arch/x86/crypto/chacha-avx512vl-x86_64.S @@ -0,0 +1,836 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * ChaCha 256-bit cipher algorithm, x64 AVX-512VL functions + * + * Copyright (C) 2018 Martin Willi + */ + +#include <linux/linkage.h> + +.section .rodata.cst32.CTR2BL, "aM", @progbits, 32 +.align 32 +CTR2BL: .octa 0x00000000000000000000000000000000 + .octa 0x00000000000000000000000000000001 + +.section .rodata.cst32.CTR4BL, "aM", @progbits, 32 +.align 32 +CTR4BL: .octa 0x00000000000000000000000000000002 + .octa 0x00000000000000000000000000000003 + +.section .rodata.cst32.CTR8BL, "aM", @progbits, 32 +.align 32 +CTR8BL: .octa 0x00000003000000020000000100000000 + .octa 0x00000007000000060000000500000004 + +.text + +ENTRY(chacha_2block_xor_avx512vl) + # %rdi: Input state matrix, s + # %rsi: up to 2 data blocks output, o + # %rdx: up to 2 data blocks input, i + # %rcx: input/output length in bytes + # %r8d: nrounds + + # This function encrypts two ChaCha blocks by loading the state + # matrix twice across four AVX registers. It performs matrix operations + # on four words in each matrix in parallel, but requires shuffling to + # rearrange the words after each round. + + vzeroupper + + # x0..3[0-2] = s0..3 + vbroadcasti128 0x00(%rdi),%ymm0 + vbroadcasti128 0x10(%rdi),%ymm1 + vbroadcasti128 0x20(%rdi),%ymm2 + vbroadcasti128 0x30(%rdi),%ymm3 + + vpaddd CTR2BL(%rip),%ymm3,%ymm3 + + vmovdqa %ymm0,%ymm8 + vmovdqa %ymm1,%ymm9 + vmovdqa %ymm2,%ymm10 + vmovdqa %ymm3,%ymm11 + +.Ldoubleround: + + # x0 += x1, x3 = rotl32(x3 ^ x0, 16) + vpaddd %ymm1,%ymm0,%ymm0 + vpxord %ymm0,%ymm3,%ymm3 + vprold $16,%ymm3,%ymm3 + + # x2 += x3, x1 = rotl32(x1 ^ x2, 12) + vpaddd %ymm3,%ymm2,%ymm2 + vpxord %ymm2,%ymm1,%ymm1 + vprold $12,%ymm1,%ymm1 + + # x0 += x1, x3 = rotl32(x3 ^ x0, 8) + vpaddd %ymm1,%ymm0,%ymm0 + vpxord %ymm0,%ymm3,%ymm3 + vprold $8,%ymm3,%ymm3 + + # x2 += x3, x1 = rotl32(x1 ^ x2, 7) + vpaddd %ymm3,%ymm2,%ymm2 + vpxord %ymm2,%ymm1,%ymm1 + vprold $7,%ymm1,%ymm1 + + # x1 = shuffle32(x1, MASK(0, 3, 2, 1)) + vpshufd $0x39,%ymm1,%ymm1 + # x2 = shuffle32(x2, MASK(1, 0, 3, 2)) + vpshufd $0x4e,%ymm2,%ymm2 + # x3 = shuffle32(x3, MASK(2, 1, 0, 3)) + vpshufd $0x93,%ymm3,%ymm3 + + # x0 += x1, x3 = rotl32(x3 ^ x0, 16) + vpaddd %ymm1,%ymm0,%ymm0 + vpxord %ymm0,%ymm3,%ymm3 + vprold $16,%ymm3,%ymm3 + + # x2 += x3, x1 = rotl32(x1 ^ x2, 12) + vpaddd %ymm3,%ymm2,%ymm2 + vpxord %ymm2,%ymm1,%ymm1 + vprold $12,%ymm1,%ymm1 + + # x0 += x1, x3 = rotl32(x3 ^ x0, 8) + vpaddd %ymm1,%ymm0,%ymm0 + vpxord %ymm0,%ymm3,%ymm3 + vprold $8,%ymm3,%ymm3 + + # x2 += x3, x1 = rotl32(x1 ^ x2, 7) + vpaddd %ymm3,%ymm2,%ymm2 + vpxord %ymm2,%ymm1,%ymm1 + vprold $7,%ymm1,%ymm1 + + # x1 = shuffle32(x1, MASK(2, 1, 0, 3)) + vpshufd $0x93,%ymm1,%ymm1 + # x2 = shuffle32(x2, MASK(1, 0, 3, 2)) + vpshufd $0x4e,%ymm2,%ymm2 + # x3 = shuffle32(x3, MASK(0, 3, 2, 1)) + vpshufd $0x39,%ymm3,%ymm3 + + sub $2,%r8d + jnz .Ldoubleround + + # o0 = i0 ^ (x0 + s0) + vpaddd %ymm8,%ymm0,%ymm7 + cmp $0x10,%rcx + jl .Lxorpart2 + vpxord 0x00(%rdx),%xmm7,%xmm6 + vmovdqu %xmm6,0x00(%rsi) + vextracti128 $1,%ymm7,%xmm0 + # o1 = i1 ^ (x1 + s1) + vpaddd %ymm9,%ymm1,%ymm7 + cmp $0x20,%rcx + jl .Lxorpart2 + vpxord 0x10(%rdx),%xmm7,%xmm6 + vmovdqu %xmm6,0x10(%rsi) + vextracti128 $1,%ymm7,%xmm1 + # o2 = i2 ^ (x2 + s2) + vpaddd %ymm10,%ymm2,%ymm7 + cmp $0x30,%rcx + jl .Lxorpart2 + vpxord 0x20(%rdx),%xmm7,%xmm6 + vmovdqu %xmm6,0x20(%rsi) + vextracti128 $1,%ymm7,%xmm2 + # o3 = i3 ^ (x3 + s3) + vpaddd %ymm11,%ymm3,%ymm7 + cmp $0x40,%rcx + jl .Lxorpart2 + vpxord 0x30(%rdx),%xmm7,%xmm6 + vmovdqu %xmm6,0x30(%rsi) + vextracti128 $1,%ymm7,%xmm3 + + # xor and write second block + vmovdqa %xmm0,%xmm7 + cmp $0x50,%rcx + jl .Lxorpart2 + vpxord 0x40(%rdx),%xmm7,%xmm6 + vmovdqu %xmm6,0x40(%rsi) + + vmovdqa %xmm1,%xmm7 + cmp $0x60,%rcx + jl .Lxorpart2 + vpxord 0x50(%rdx),%xmm7,%xmm6 + vmovdqu %xmm6,0x50(%rsi) + + vmovdqa %xmm2,%xmm7 + cmp $0x70,%rcx + jl .Lxorpart2 + vpxord 0x60(%rdx),%xmm7,%xmm6 + vmovdqu %xmm6,0x60(%rsi) + + vmovdqa %xmm3,%xmm7 + cmp $0x80,%rcx + jl .Lxorpart2 + vpxord 0x70(%rdx),%xmm7,%xmm6 + vmovdqu %xmm6,0x70(%rsi) + +.Ldone2: + vzeroupper + ret + +.Lxorpart2: + # xor remaining bytes from partial register into output + mov %rcx,%rax + and $0xf,%rcx + jz .Ldone8 + mov %rax,%r9 + and $~0xf,%r9 + + mov $1,%rax + shld %cl,%rax,%rax + sub $1,%rax + kmovq %rax,%k1 + + vmovdqu8 (%rdx,%r9),%xmm1{%k1}{z} + vpxord %xmm7,%xmm1,%xmm1 + vmovdqu8 %xmm1,(%rsi,%r9){%k1} + + jmp .Ldone2 + +ENDPROC(chacha_2block_xor_avx512vl) + +ENTRY(chacha_4block_xor_avx512vl) + # %rdi: Input state matrix, s + # %rsi: up to 4 data blocks output, o + # %rdx: up to 4 data blocks input, i + # %rcx: input/output length in bytes + # %r8d: nrounds + + # This function encrypts four ChaCha blocks by loading the state + # matrix four times across eight AVX registers. It performs matrix + # operations on four words in two matrices in parallel, sequentially + # to the operations on the four words of the other two matrices. The + # required word shuffling has a rather high latency, we can do the + # arithmetic on two matrix-pairs without much slowdown. + + vzeroupper + + # x0..3[0-4] = s0..3 + vbroadcasti128 0x00(%rdi),%ymm0 + vbroadcasti128 0x10(%rdi),%ymm1 + vbroadcasti128 0x20(%rdi),%ymm2 + vbroadcasti128 0x30(%rdi),%ymm3 + + vmovdqa %ymm0,%ymm4 + vmovdqa %ymm1,%ymm5 + vmovdqa %ymm2,%ymm6 + vmovdqa %ymm3,%ymm7 + + vpaddd CTR2BL(%rip),%ymm3,%ymm3 + vpaddd CTR4BL(%rip),%ymm7,%ymm7 + + vmovdqa %ymm0,%ymm11 + vmovdqa %ymm1,%ymm12 + vmovdqa %ymm2,%ymm13 + vmovdqa %ymm3,%ymm14 + vmovdqa %ymm7,%ymm15 + +.Ldoubleround4: + + # x0 += x1, x3 = rotl32(x3 ^ x0, 16) + vpaddd %ymm1,%ymm0,%ymm0 + vpxord %ymm0,%ymm3,%ymm3 + vprold $16,%ymm3,%ymm3 + + vpaddd %ymm5,%ymm4,%ymm4 + vpxord %ymm4,%ymm7,%ymm7 + vprold $16,%ymm7,%ymm7 + + # x2 += x3, x1 = rotl32(x1 ^ x2, 12) + vpaddd %ymm3,%ymm2,%ymm2 + vpxord %ymm2,%ymm1,%ymm1 + vprold $12,%ymm1,%ymm1 + + vpaddd %ymm7,%ymm6,%ymm6 + vpxord %ymm6,%ymm5,%ymm5 + vprold $12,%ymm5,%ymm5 + + # x0 += x1, x3 = rotl32(x3 ^ x0, 8) + vpaddd %ymm1,%ymm0,%ymm0 + vpxord %ymm0,%ymm3,%ymm3 + vprold $8,%ymm3,%ymm3 + + vpaddd %ymm5,%ymm4,%ymm4 + vpxord %ymm4,%ymm7,%ymm7 + vprold $8,%ymm7,%ymm7 + + # x2 += x3, x1 = rotl32(x1 ^ x2, 7) + vpaddd %ymm3,%ymm2,%ymm2 + vpxord %ymm2,%ymm1,%ymm1 + vprold $7,%ymm1,%ymm1 + + vpaddd %ymm7,%ymm6,%ymm6 + vpxord %ymm6,%ymm5,%ymm5 + vprold $7,%ymm5,%ymm5 + + # x1 = shuffle32(x1, MASK(0, 3, 2, 1)) + vpshufd $0x39,%ymm1,%ymm1 + vpshufd $0x39,%ymm5,%ymm5 + # x2 = shuffle32(x2, MASK(1, 0, 3, 2)) + vpshufd $0x4e,%ymm2,%ymm2 + vpshufd $0x4e,%ymm6,%ymm6 + # x3 = shuffle32(x3, MASK(2, 1, 0, 3)) + vpshufd $0x93,%ymm3,%ymm3 + vpshufd $0x93,%ymm7,%ymm7 + + # x0 += x1, x3 = rotl32(x3 ^ x0, 16) + vpaddd %ymm1,%ymm0,%ymm0 + vpxord %ymm0,%ymm3,%ymm3 + vprold $16,%ymm3,%ymm3 + + vpaddd %ymm5,%ymm4,%ymm4 + vpxord %ymm4,%ymm7,%ymm7 + vprold $16,%ymm7,%ymm7 + + # x2 += x3, x1 = rotl32(x1 ^ x2, 12) + vpaddd %ymm3,%ymm2,%ymm2 + vpxord %ymm2,%ymm1,%ymm1 + vprold $12,%ymm1,%ymm1 + + vpaddd %ymm7,%ymm6,%ymm6 + vpxord %ymm6,%ymm5,%ymm5 + vprold $12,%ymm5,%ymm5 + + # x0 += x1, x3 = rotl32(x3 ^ x0, 8) + vpaddd %ymm1,%ymm0,%ymm0 + vpxord %ymm0,%ymm3,%ymm3 + vprold $8,%ymm3,%ymm3 + + vpaddd %ymm5,%ymm4,%ymm4 + vpxord %ymm4,%ymm7,%ymm7 + vprold $8,%ymm7,%ymm7 + + # x2 += x3, x1 = rotl32(x1 ^ x2, 7) + vpaddd %ymm3,%ymm2,%ymm2 + vpxord %ymm2,%ymm1,%ymm1 + vprold $7,%ymm1,%ymm1 + + vpaddd %ymm7,%ymm6,%ymm6 + vpxord %ymm6,%ymm5,%ymm5 + vprold $7,%ymm5,%ymm5 + + # x1 = shuffle32(x1, MASK(2, 1, 0, 3)) + vpshufd $0x93,%ymm1,%ymm1 + vpshufd $0x93,%ymm5,%ymm5 + # x2 = shuffle32(x2, MASK(1, 0, 3, 2)) + vpshufd $0x4e,%ymm2,%ymm2 + vpshufd $0x4e,%ymm6,%ymm6 + # x3 = shuffle32(x3, MASK(0, 3, 2, 1)) + vpshufd $0x39,%ymm3,%ymm3 + vpshufd $0x39,%ymm7,%ymm7 + + sub $2,%r8d + jnz .Ldoubleround4 + + # o0 = i0 ^ (x0 + s0), first block + vpaddd %ymm11,%ymm0,%ymm10 + cmp $0x10,%rcx + jl .Lxorpart4 + vpxord 0x00(%rdx),%xmm10,%xmm9 + vmovdqu %xmm9,0x00(%rsi) + vextracti128 $1,%ymm10,%xmm0 + # o1 = i1 ^ (x1 + s1), first block + vpaddd %ymm12,%ymm1,%ymm10 + cmp $0x20,%rcx + jl .Lxorpart4 + vpxord 0x10(%rdx),%xmm10,%xmm9 + vmovdqu %xmm9,0x10(%rsi) + vextracti128 $1,%ymm10,%xmm1 + # o2 = i2 ^ (x2 + s2), first block + vpaddd %ymm13,%ymm2,%ymm10 + cmp $0x30,%rcx + jl .Lxorpart4 + vpxord 0x20(%rdx),%xmm10,%xmm9 + vmovdqu %xmm9,0x20(%rsi) + vextracti128 $1,%ymm10,%xmm2 + # o3 = i3 ^ (x3 + s3), first block + vpaddd %ymm14,%ymm3,%ymm10 + cmp $0x40,%rcx + jl .Lxorpart4 + vpxord 0x30(%rdx),%xmm10,%xmm9 + vmovdqu %xmm9,0x30(%rsi) + vextracti128 $1,%ymm10,%xmm3 + + # xor and write second block + vmovdqa %xmm0,%xmm10 + cmp $0x50,%rcx + jl .Lxorpart4 + vpxord 0x40(%rdx),%xmm10,%xmm9 + vmovdqu %xmm9,0x40(%rsi) + + vmovdqa %xmm1,%xmm10 + cmp $0x60,%rcx + jl .Lxorpart4 + vpxord 0x50(%rdx),%xmm10,%xmm9 + vmovdqu %xmm9,0x50(%rsi) + + vmovdqa %xmm2,%xmm10 + cmp $0x70,%rcx + jl .Lxorpart4 + vpxord 0x60(%rdx),%xmm10,%xmm9 + vmovdqu %xmm9,0x60(%rsi) + + vmovdqa %xmm3,%xmm10 + cmp $0x80,%rcx + jl .Lxorpart4 + vpxord 0x70(%rdx),%xmm10,%xmm9 + vmovdqu %xmm9,0x70(%rsi) + + # o0 = i0 ^ (x0 + s0), third block + vpaddd %ymm11,%ymm4,%ymm10 + cmp $0x90,%rcx + jl .Lxorpart4 + vpxord 0x80(%rdx),%xmm10,%xmm9 + vmovdqu %xmm9,0x80(%rsi) + vextracti128 $1,%ymm10,%xmm4 + # o1 = i1 ^ (x1 + s1), third block + vpaddd %ymm12,%ymm5,%ymm10 + cmp $0xa0,%rcx + jl .Lxorpart4 + vpxord 0x90(%rdx),%xmm10,%xmm9 + vmovdqu %xmm9,0x90(%rsi) + vextracti128 $1,%ymm10,%xmm5 + # o2 = i2 ^ (x2 + s2), third block + vpaddd %ymm13,%ymm6,%ymm10 + cmp $0xb0,%rcx + jl .Lxorpart4 + vpxord 0xa0(%rdx),%xmm10,%xmm9 + vmovdqu %xmm9,0xa0(%rsi) + vextracti128 $1,%ymm10,%xmm6 + # o3 = i3 ^ (x3 + s3), third block + vpaddd %ymm15,%ymm7,%ymm10 + cmp $0xc0,%rcx + jl .Lxorpart4 + vpxord 0xb0(%rdx),%xmm10,%xmm9 + vmovdqu %xmm9,0xb0(%rsi) + vextracti128 $1,%ymm10,%xmm7 + + # xor and write fourth block + vmovdqa %xmm4,%xmm10 + cmp $0xd0,%rcx + jl .Lxorpart4 + vpxord 0xc0(%rdx),%xmm10,%xmm9 + vmovdqu %xmm9,0xc0(%rsi) + + vmovdqa %xmm5,%xmm10 + cmp $0xe0,%rcx + jl .Lxorpart4 + vpxord 0xd0(%rdx),%xmm10,%xmm9 + vmovdqu %xmm9,0xd0(%rsi) + + vmovdqa %xmm6,%xmm10 + cmp $0xf0,%rcx + jl .Lxorpart4 + vpxord 0xe0(%rdx),%xmm10,%xmm9 + vmovdqu %xmm9,0xe0(%rsi) + + vmovdqa %xmm7,%xmm10 + cmp $0x100,%rcx + jl .Lxorpart4 + vpxord 0xf0(%rdx),%xmm10,%xmm9 + vmovdqu %xmm9,0xf0(%rsi) + +.Ldone4: + vzeroupper + ret + +.Lxorpart4: + # xor remaining bytes from partial register into output + mov %rcx,%rax + and $0xf,%rcx + jz .Ldone8 + mov %rax,%r9 + and $~0xf,%r9 + + mov $1,%rax + shld %cl,%rax,%rax + sub $1,%rax + kmovq %rax,%k1 + + vmovdqu8 (%rdx,%r9),%xmm1{%k1}{z} + vpxord %xmm10,%xmm1,%xmm1 + vmovdqu8 %xmm1,(%rsi,%r9){%k1} + + jmp .Ldone4 + +ENDPROC(chacha_4block_xor_avx512vl) + +ENTRY(chacha_8block_xor_avx512vl) + # %rdi: Input state matrix, s + # %rsi: up to 8 data blocks output, o + # %rdx: up to 8 data blocks input, i + # %rcx: input/output length in bytes + # %r8d: nrounds + + # This function encrypts eight consecutive ChaCha blocks by loading + # the state matrix in AVX registers eight times. Compared to AVX2, this + # mostly benefits from the new rotate instructions in VL and the + # additional registers. + + vzeroupper + + # x0..15[0-7] = s[0..15] + vpbroadcastd 0x00(%rdi),%ymm0 + vpbroadcastd 0x04(%rdi),%ymm1 + vpbroadcastd 0x08(%rdi),%ymm2 + vpbroadcastd 0x0c(%rdi),%ymm3 + vpbroadcastd 0x10(%rdi),%ymm4 + vpbroadcastd 0x14(%rdi),%ymm5 + vpbroadcastd 0x18(%rdi),%ymm6 + vpbroadcastd 0x1c(%rdi),%ymm7 + vpbroadcastd 0x20(%rdi),%ymm8 + vpbroadcastd 0x24(%rdi),%ymm9 + vpbroadcastd 0x28(%rdi),%ymm10 + vpbroadcastd 0x2c(%rdi),%ymm11 + vpbroadcastd 0x30(%rdi),%ymm12 + vpbroadcastd 0x34(%rdi),%ymm13 + vpbroadcastd 0x38(%rdi),%ymm14 + vpbroadcastd 0x3c(%rdi),%ymm15 + + # x12 += counter values 0-3 + vpaddd CTR8BL(%rip),%ymm12,%ymm12 + + vmovdqa64 %ymm0,%ymm16 + vmovdqa64 %ymm1,%ymm17 + vmovdqa64 %ymm2,%ymm18 + vmovdqa64 %ymm3,%ymm19 + vmovdqa64 %ymm4,%ymm20 + vmovdqa64 %ymm5,%ymm21 + vmovdqa64 %ymm6,%ymm22 + vmovdqa64 %ymm7,%ymm23 + vmovdqa64 %ymm8,%ymm24 + vmovdqa64 %ymm9,%ymm25 + vmovdqa64 %ymm10,%ymm26 + vmovdqa64 %ymm11,%ymm27 + vmovdqa64 %ymm12,%ymm28 + vmovdqa64 %ymm13,%ymm29 + vmovdqa64 %ymm14,%ymm30 + vmovdqa64 %ymm15,%ymm31 + +.Ldoubleround8: + # x0 += x4, x12 = rotl32(x12 ^ x0, 16) + vpaddd %ymm0,%ymm4,%ymm0 + vpxord %ymm0,%ymm12,%ymm12 + vprold $16,%ymm12,%ymm12 + # x1 += x5, x13 = rotl32(x13 ^ x1, 16) + vpaddd %ymm1,%ymm5,%ymm1 + vpxord %ymm1,%ymm13,%ymm13 + vprold $16,%ymm13,%ymm13 + # x2 += x6, x14 = rotl32(x14 ^ x2, 16) + vpaddd %ymm2,%ymm6,%ymm2 + vpxord %ymm2,%ymm14,%ymm14 + vprold $16,%ymm14,%ymm14 + # x3 += x7, x15 = rotl32(x15 ^ x3, 16) + vpaddd %ymm3,%ymm7,%ymm3 + vpxord %ymm3,%ymm15,%ymm15 + vprold $16,%ymm15,%ymm15 + + # x8 += x12, x4 = rotl32(x4 ^ x8, 12) + vpaddd %ymm12,%ymm8,%ymm8 + vpxord %ymm8,%ymm4,%ymm4 + vprold $12,%ymm4,%ymm4 + # x9 += x13, x5 = rotl32(x5 ^ x9, 12) + vpaddd %ymm13,%ymm9,%ymm9 + vpxord %ymm9,%ymm5,%ymm5 + vprold $12,%ymm5,%ymm5 + # x10 += x14, x6 = rotl32(x6 ^ x10, 12) + vpaddd %ymm14,%ymm10,%ymm10 + vpxord %ymm10,%ymm6,%ymm6 + vprold $12,%ymm6,%ymm6 + # x11 += x15, x7 = rotl32(x7 ^ x11, 12) + vpaddd %ymm15,%ymm11,%ymm11 + vpxord %ymm11,%ymm7,%ymm7 + vprold $12,%ymm7,%ymm7 + + # x0 += x4, x12 = rotl32(x12 ^ x0, 8) + vpaddd %ymm0,%ymm4,%ymm0 + vpxord %ymm0,%ymm12,%ymm12 + vprold $8,%ymm12,%ymm12 + # x1 += x5, x13 = rotl32(x13 ^ x1, 8) + vpaddd %ymm1,%ymm5,%ymm1 + vpxord %ymm1,%ymm13,%ymm13 + vprold $8,%ymm13,%ymm13 + # x2 += x6, x14 = rotl32(x14 ^ x2, 8) + vpaddd %ymm2,%ymm6,%ymm2 + vpxord %ymm2,%ymm14,%ymm14 + vprold $8,%ymm14,%ymm14 + # x3 += x7, x15 = rotl32(x15 ^ x3, 8) + vpaddd %ymm3,%ymm7,%ymm3 + vpxord %ymm3,%ymm15,%ymm15 + vprold $8,%ymm15,%ymm15 + + # x8 += x12, x4 = rotl32(x4 ^ x8, 7) + vpaddd %ymm12,%ymm8,%ymm8 + vpxord %ymm8,%ymm4,%ymm4 + vprold $7,%ymm4,%ymm4 + # x9 += x13, x5 = rotl32(x5 ^ x9, 7) + vpaddd %ymm13,%ymm9,%ymm9 + vpxord %ymm9,%ymm5,%ymm5 + vprold $7,%ymm5,%ymm5 + # x10 += x14, x6 = rotl32(x6 ^ x10, 7) + vpaddd %ymm14,%ymm10,%ymm10 + vpxord %ymm10,%ymm6,%ymm6 + vprold $7,%ymm6,%ymm6 + # x11 += x15, x7 = rotl32(x7 ^ x11, 7) + vpaddd %ymm15,%ymm11,%ymm11 + vpxord %ymm11,%ymm7,%ymm7 + vprold $7,%ymm7,%ymm7 + + # x0 += x5, x15 = rotl32(x15 ^ x0, 16) + vpaddd %ymm0,%ymm5,%ymm0 + vpxord %ymm0,%ymm15,%ymm15 + vprold $16,%ymm15,%ymm15 + # x1 += x6, x12 = rotl32(x12 ^ x1, 16) + vpaddd %ymm1,%ymm6,%ymm1 + vpxord %ymm1,%ymm12,%ymm12 + vprold $16,%ymm12,%ymm12 + # x2 += x7, x13 = rotl32(x13 ^ x2, 16) + vpaddd %ymm2,%ymm7,%ymm2 + vpxord %ymm2,%ymm13,%ymm13 + vprold $16,%ymm13,%ymm13 + # x3 += x4, x14 = rotl32(x14 ^ x3, 16) + vpaddd %ymm3,%ymm4,%ymm3 + vpxord %ymm3,%ymm14,%ymm14 + vprold $16,%ymm14,%ymm14 + + # x10 += x15, x5 = rotl32(x5 ^ x10, 12) + vpaddd %ymm15,%ymm10,%ymm10 + vpxord %ymm10,%ymm5,%ymm5 + vprold $12,%ymm5,%ymm5 + # x11 += x12, x6 = rotl32(x6 ^ x11, 12) + vpaddd %ymm12,%ymm11,%ymm11 + vpxord %ymm11,%ymm6,%ymm6 + vprold $12,%ymm6,%ymm6 + # x8 += x13, x7 = rotl32(x7 ^ x8, 12) + vpaddd %ymm13,%ymm8,%ymm8 + vpxord %ymm8,%ymm7,%ymm7 + vprold $12,%ymm7,%ymm7 + # x9 += x14, x4 = rotl32(x4 ^ x9, 12) + vpaddd %ymm14,%ymm9,%ymm9 + vpxord %ymm9,%ymm4,%ymm4 + vprold $12,%ymm4,%ymm4 + + # x0 += x5, x15 = rotl32(x15 ^ x0, 8) + vpaddd %ymm0,%ymm5,%ymm0 + vpxord %ymm0,%ymm15,%ymm15 + vprold $8,%ymm15,%ymm15 + # x1 += x6, x12 = rotl32(x12 ^ x1, 8) + vpaddd %ymm1,%ymm6,%ymm1 + vpxord %ymm1,%ymm12,%ymm12 + vprold $8,%ymm12,%ymm12 + # x2 += x7, x13 = rotl32(x13 ^ x2, 8) + vpaddd %ymm2,%ymm7,%ymm2 + vpxord %ymm2,%ymm13,%ymm13 + vprold $8,%ymm13,%ymm13 + # x3 += x4, x14 = rotl32(x14 ^ x3, 8) + vpaddd %ymm3,%ymm4,%ymm3 + vpxord %ymm3,%ymm14,%ymm14 + vprold $8,%ymm14,%ymm14 + + # x10 += x15, x5 = rotl32(x5 ^ x10, 7) + vpaddd %ymm15,%ymm10,%ymm10 + vpxord %ymm10,%ymm5,%ymm5 + vprold $7,%ymm5,%ymm5 + # x11 += x12, x6 = rotl32(x6 ^ x11, 7) + vpaddd %ymm12,%ymm11,%ymm11 + vpxord %ymm11,%ymm6,%ymm6 + vprold $7,%ymm6,%ymm6 + # x8 += x13, x7 = rotl32(x7 ^ x8, 7) + vpaddd %ymm13,%ymm8,%ymm8 + vpxord %ymm8,%ymm7,%ymm7 + vprold $7,%ymm7,%ymm7 + # x9 += x14, x4 = rotl32(x4 ^ x9, 7) + vpaddd %ymm14,%ymm9,%ymm9 + vpxord %ymm9,%ymm4,%ymm4 + vprold $7,%ymm4,%ymm4 + + sub $2,%r8d + jnz .Ldoubleround8 + + # x0..15[0-3] += s[0..15] + vpaddd %ymm16,%ymm0,%ymm0 + vpaddd %ymm17,%ymm1,%ymm1 + vpaddd %ymm18,%ymm2,%ymm2 + vpaddd %ymm19,%ymm3,%ymm3 + vpaddd %ymm20,%ymm4,%ymm4 + vpaddd %ymm21,%ymm5,%ymm5 + vpaddd %ymm22,%ymm6,%ymm6 + vpaddd %ymm23,%ymm7,%ymm7 + vpaddd %ymm24,%ymm8,%ymm8 + vpaddd %ymm25,%ymm9,%ymm9 + vpaddd %ymm26,%ymm10,%ymm10 + vpaddd %ymm27,%ymm11,%ymm11 + vpaddd %ymm28,%ymm12,%ymm12 + vpaddd %ymm29,%ymm13,%ymm13 + vpaddd %ymm30,%ymm14,%ymm14 + vpaddd %ymm31,%ymm15,%ymm15 + + # interleave 32-bit words in state n, n+1 + vpunpckldq %ymm1,%ymm0,%ymm16 + vpunpckhdq %ymm1,%ymm0,%ymm17 + vpunpckldq %ymm3,%ymm2,%ymm18 + vpunpckhdq %ymm3,%ymm2,%ymm19 + vpunpckldq %ymm5,%ymm4,%ymm20 + vpunpckhdq %ymm5,%ymm4,%ymm21 + vpunpckldq %ymm7,%ymm6,%ymm22 + vpunpckhdq %ymm7,%ymm6,%ymm23 + vpunpckldq %ymm9,%ymm8,%ymm24 + vpunpckhdq %ymm9,%ymm8,%ymm25 + vpunpckldq %ymm11,%ymm10,%ymm26 + vpunpckhdq %ymm11,%ymm10,%ymm27 + vpunpckldq %ymm13,%ymm12,%ymm28 + vpunpckhdq %ymm13,%ymm12,%ymm29 + vpunpckldq %ymm15,%ymm14,%ymm30 + vpunpckhdq %ymm15,%ymm14,%ymm31 + + # interleave 64-bit words in state n, n+2 + vpunpcklqdq %ymm18,%ymm16,%ymm0 + vpunpcklqdq %ymm19,%ymm17,%ymm1 + vpunpckhqdq %ymm18,%ymm16,%ymm2 + vpunpckhqdq %ymm19,%ymm17,%ymm3 + vpunpcklqdq %ymm22,%ymm20,%ymm4 + vpunpcklqdq %ymm23,%ymm21,%ymm5 + vpunpckhqdq %ymm22,%ymm20,%ymm6 + vpunpckhqdq %ymm23,%ymm21,%ymm7 + vpunpcklqdq %ymm26,%ymm24,%ymm8 + vpunpcklqdq %ymm27,%ymm25,%ymm9 + vpunpckhqdq %ymm26,%ymm24,%ymm10 + vpunpckhqdq %ymm27,%ymm25,%ymm11 + vpunpcklqdq %ymm30,%ymm28,%ymm12 + vpunpcklqdq %ymm31,%ymm29,%ymm13 + vpunpckhqdq %ymm30,%ymm28,%ymm14 + vpunpckhqdq %ymm31,%ymm29,%ymm15 + + # interleave 128-bit words in state n, n+4 + # xor/write first four blocks + vmovdqa64 %ymm0,%ymm16 + vperm2i128 $0x20,%ymm4,%ymm0,%ymm0 + cmp $0x0020,%rcx + jl .Lxorpart8 + vpxord 0x0000(%rdx),%ymm0,%ymm0 + vmovdqu64 %ymm0,0x0000(%rsi) + vmovdqa64 %ymm16,%ymm0 + vperm2i128 $0x31,%ymm4,%ymm0,%ymm4 + + vperm2i128 $0x20,%ymm12,%ymm8,%ymm0 + cmp $0x0040,%rcx + jl .Lxorpart8 + vpxord 0x0020(%rdx),%ymm0,%ymm0 + vmovdqu64 %ymm0,0x0020(%rsi) + vperm2i128 $0x31,%ymm12,%ymm8,%ymm12 + + vperm2i128 $0x20,%ymm6,%ymm2,%ymm0 + cmp $0x0060,%rcx + jl .Lxorpart8 + vpxord 0x0040(%rdx),%ymm0,%ymm0 + vmovdqu64 %ymm0,0x0040(%rsi) + vperm2i128 $0x31,%ymm6,%ymm2,%ymm6 + + vperm2i128 $0x20,%ymm14,%ymm10,%ymm0 + cmp $0x0080,%rcx + jl .Lxorpart8 + vpxord 0x0060(%rdx),%ymm0,%ymm0 + vmovdqu64 %ymm0,0x0060(%rsi) + vperm2i128 $0x31,%ymm14,%ymm10,%ymm14 + + vperm2i128 $0x20,%ymm5,%ymm1,%ymm0 + cmp $0x00a0,%rcx + jl .Lxorpart8 + vpxord 0x0080(%rdx),%ymm0,%ymm0 + vmovdqu64 %ymm0,0x0080(%rsi) + vperm2i128 $0x31,%ymm5,%ymm1,%ymm5 + + vperm2i128 $0x20,%ymm13,%ymm9,%ymm0 + cmp $0x00c0,%rcx + jl .Lxorpart8 + vpxord 0x00a0(%rdx),%ymm0,%ymm0 + vmovdqu64 %ymm0,0x00a0(%rsi) + vperm2i128 $0x31,%ymm13,%ymm9,%ymm13 + + vperm2i128 $0x20,%ymm7,%ymm3,%ymm0 + cmp $0x00e0,%rcx + jl .Lxorpart8 + vpxord 0x00c0(%rdx),%ymm0,%ymm0 + vmovdqu64 %ymm0,0x00c0(%rsi) + vperm2i128 $0x31,%ymm7,%ymm3,%ymm7 + + vperm2i128 $0x20,%ymm15,%ymm11,%ymm0 + cmp $0x0100,%rcx + jl .Lxorpart8 + vpxord 0x00e0(%rdx),%ymm0,%ymm0 + vmovdqu64 %ymm0,0x00e0(%rsi) + vperm2i128 $0x31,%ymm15,%ymm11,%ymm15 + + # xor remaining blocks, write to output + vmovdqa64 %ymm4,%ymm0 + cmp $0x0120,%rcx + jl .Lxorpart8 + vpxord 0x0100(%rdx),%ymm0,%ymm0 + vmovdqu64 %ymm0,0x0100(%rsi) + + vmovdqa64 %ymm12,%ymm0 + cmp $0x0140,%rcx + jl .Lxorpart8 + vpxord 0x0120(%rdx),%ymm0,%ymm0 + vmovdqu64 %ymm0,0x0120(%rsi) + + vmovdqa64 %ymm6,%ymm0 + cmp $0x0160,%rcx + jl .Lxorpart8 + vpxord 0x0140(%rdx),%ymm0,%ymm0 + vmovdqu64 %ymm0,0x0140(%rsi) + + vmovdqa64 %ymm14,%ymm0 + cmp $0x0180,%rcx + jl .Lxorpart8 + vpxord 0x0160(%rdx),%ymm0,%ymm0 + vmovdqu64 %ymm0,0x0160(%rsi) + + vmovdqa64 %ymm5,%ymm0 + cmp $0x01a0,%rcx + jl .Lxorpart8 + vpxord 0x0180(%rdx),%ymm0,%ymm0 + vmovdqu64 %ymm0,0x0180(%rsi) + + vmovdqa64 %ymm13,%ymm0 + cmp $0x01c0,%rcx + jl .Lxorpart8 + vpxord 0x01a0(%rdx),%ymm0,%ymm0 + vmovdqu64 %ymm0,0x01a0(%rsi) + + vmovdqa64 %ymm7,%ymm0 + cmp $0x01e0,%rcx + jl .Lxorpart8 + vpxord 0x01c0(%rdx),%ymm0,%ymm0 + vmovdqu64 %ymm0,0x01c0(%rsi) + + vmovdqa64 %ymm15,%ymm0 + cmp $0x0200,%rcx + jl .Lxorpart8 + vpxord 0x01e0(%rdx),%ymm0,%ymm0 + vmovdqu64 %ymm0,0x01e0(%rsi) + +.Ldone8: + vzeroupper + ret + +.Lxorpart8: + # xor remaining bytes from partial register into output + mov %rcx,%rax + and $0x1f,%rcx + jz .Ldone8 + mov %rax,%r9 + and $~0x1f,%r9 + + mov $1,%rax + shld %cl,%rax,%rax + sub $1,%rax + kmovq %rax,%k1 + + vmovdqu8 (%rdx,%r9),%ymm1{%k1}{z} + vpxord %ymm0,%ymm1,%ymm1 + vmovdqu8 %ymm1,(%rsi,%r9){%k1} + + jmp .Ldone8 + +ENDPROC(chacha_8block_xor_avx512vl) diff --git a/arch/x86/crypto/chacha20-ssse3-x86_64.S b/arch/x86/crypto/chacha-ssse3-x86_64.S index 512a2b500fd1..c05a7a963dc3 100644 --- a/arch/x86/crypto/chacha20-ssse3-x86_64.S +++ b/arch/x86/crypto/chacha-ssse3-x86_64.S @@ -1,5 +1,5 @@ /* - * ChaCha20 256-bit cipher algorithm, RFC7539, x64 SSSE3 functions + * ChaCha 256-bit cipher algorithm, x64 SSSE3 functions * * Copyright (C) 2015 Martin Willi * @@ -10,6 +10,7 @@ */ #include <linux/linkage.h> +#include <asm/frame.h> .section .rodata.cst16.ROT8, "aM", @progbits, 16 .align 16 @@ -23,35 +24,25 @@ CTRINC: .octa 0x00000003000000020000000100000000 .text -ENTRY(chacha20_block_xor_ssse3) - # %rdi: Input state matrix, s - # %rsi: 1 data block output, o - # %rdx: 1 data block input, i - - # This function encrypts one ChaCha20 block by loading the state matrix - # in four SSE registers. It performs matrix operation on four words in - # parallel, but requireds shuffling to rearrange the words after each - # round. 8/16-bit word rotation is done with the slightly better - # performing SSSE3 byte shuffling, 7/12-bit word rotation uses - # traditional shift+OR. - - # x0..3 = s0..3 - movdqa 0x00(%rdi),%xmm0 - movdqa 0x10(%rdi),%xmm1 - movdqa 0x20(%rdi),%xmm2 - movdqa 0x30(%rdi),%xmm3 - movdqa %xmm0,%xmm8 - movdqa %xmm1,%xmm9 - movdqa %xmm2,%xmm10 - movdqa %xmm3,%xmm11 +/* + * chacha_permute - permute one block + * + * Permute one 64-byte block where the state matrix is in %xmm0-%xmm3. This + * function performs matrix operations on four words in parallel, but requires + * shuffling to rearrange the words after each round. 8/16-bit word rotation is + * done with the slightly better performing SSSE3 byte shuffling, 7/12-bit word + * rotation uses traditional shift+OR. + * + * The round count is given in %r8d. + * + * Clobbers: %r8d, %xmm4-%xmm7 + */ +chacha_permute: movdqa ROT8(%rip),%xmm4 movdqa ROT16(%rip),%xmm5 - mov $10,%ecx - .Ldoubleround: - # x0 += x1, x3 = rotl32(x3 ^ x0, 16) paddd %xmm1,%xmm0 pxor %xmm0,%xmm3 @@ -118,39 +109,129 @@ ENTRY(chacha20_block_xor_ssse3) # x3 = shuffle32(x3, MASK(0, 3, 2, 1)) pshufd $0x39,%xmm3,%xmm3 - dec %ecx + sub $2,%r8d jnz .Ldoubleround + ret +ENDPROC(chacha_permute) + +ENTRY(chacha_block_xor_ssse3) + # %rdi: Input state matrix, s + # %rsi: up to 1 data block output, o + # %rdx: up to 1 data block input, i + # %rcx: input/output length in bytes + # %r8d: nrounds + FRAME_BEGIN + + # x0..3 = s0..3 + movdqa 0x00(%rdi),%xmm0 + movdqa 0x10(%rdi),%xmm1 + movdqa 0x20(%rdi),%xmm2 + movdqa 0x30(%rdi),%xmm3 + movdqa %xmm0,%xmm8 + movdqa %xmm1,%xmm9 + movdqa %xmm2,%xmm10 + movdqa %xmm3,%xmm11 + + mov %rcx,%rax + call chacha_permute + # o0 = i0 ^ (x0 + s0) - movdqu 0x00(%rdx),%xmm4 paddd %xmm8,%xmm0 + cmp $0x10,%rax + jl .Lxorpart + movdqu 0x00(%rdx),%xmm4 pxor %xmm4,%xmm0 movdqu %xmm0,0x00(%rsi) # o1 = i1 ^ (x1 + s1) - movdqu 0x10(%rdx),%xmm5 paddd %xmm9,%xmm1 - pxor %xmm5,%xmm1 - movdqu %xmm1,0x10(%rsi) + movdqa %xmm1,%xmm0 + cmp $0x20,%rax + jl .Lxorpart + movdqu 0x10(%rdx),%xmm0 + pxor %xmm1,%xmm0 + movdqu %xmm0,0x10(%rsi) # o2 = i2 ^ (x2 + s2) - movdqu 0x20(%rdx),%xmm6 paddd %xmm10,%xmm2 - pxor %xmm6,%xmm2 - movdqu %xmm2,0x20(%rsi) + movdqa %xmm2,%xmm0 + cmp $0x30,%rax + jl .Lxorpart + movdqu 0x20(%rdx),%xmm0 + pxor %xmm2,%xmm0 + movdqu %xmm0,0x20(%rsi) # o3 = i3 ^ (x3 + s3) - movdqu 0x30(%rdx),%xmm7 paddd %xmm11,%xmm3 - pxor %xmm7,%xmm3 - movdqu %xmm3,0x30(%rsi) + movdqa %xmm3,%xmm0 + cmp $0x40,%rax + jl .Lxorpart + movdqu 0x30(%rdx),%xmm0 + pxor %xmm3,%xmm0 + movdqu %xmm0,0x30(%rsi) + +.Ldone: + FRAME_END + ret + +.Lxorpart: + # xor remaining bytes from partial register into output + mov %rax,%r9 + and $0x0f,%r9 + jz .Ldone + and $~0x0f,%rax + + mov %rsi,%r11 + + lea 8(%rsp),%r10 + sub $0x10,%rsp + and $~31,%rsp + + lea (%rdx,%rax),%rsi + mov %rsp,%rdi + mov %r9,%rcx + rep movsb + + pxor 0x00(%rsp),%xmm0 + movdqa %xmm0,0x00(%rsp) + mov %rsp,%rsi + lea (%r11,%rax),%rdi + mov %r9,%rcx + rep movsb + + lea -8(%r10),%rsp + jmp .Ldone + +ENDPROC(chacha_block_xor_ssse3) + +ENTRY(hchacha_block_ssse3) + # %rdi: Input state matrix, s + # %rsi: output (8 32-bit words) + # %edx: nrounds + FRAME_BEGIN + + movdqa 0x00(%rdi),%xmm0 + movdqa 0x10(%rdi),%xmm1 + movdqa 0x20(%rdi),%xmm2 + movdqa 0x30(%rdi),%xmm3 + + mov %edx,%r8d + call chacha_permute + + movdqu %xmm0,0x00(%rsi) + movdqu %xmm3,0x10(%rsi) + + FRAME_END ret -ENDPROC(chacha20_block_xor_ssse3) +ENDPROC(hchacha_block_ssse3) -ENTRY(chacha20_4block_xor_ssse3) +ENTRY(chacha_4block_xor_ssse3) # %rdi: Input state matrix, s - # %rsi: 4 data blocks output, o - # %rdx: 4 data blocks input, i + # %rsi: up to 4 data blocks output, o + # %rdx: up to 4 data blocks input, i + # %rcx: input/output length in bytes + # %r8d: nrounds - # This function encrypts four consecutive ChaCha20 blocks by loading the + # This function encrypts four consecutive ChaCha blocks by loading the # the state matrix in SSE registers four times. As we need some scratch # registers, we save the first four registers on the stack. The # algorithm performs each operation on the corresponding word of each @@ -163,6 +244,7 @@ ENTRY(chacha20_4block_xor_ssse3) lea 8(%rsp),%r10 sub $0x80,%rsp and $~63,%rsp + mov %rcx,%rax # x0..15[0-3] = s0..3[0..3] movq 0x00(%rdi),%xmm1 @@ -202,8 +284,6 @@ ENTRY(chacha20_4block_xor_ssse3) # x12 += counter values 0-3 paddd %xmm1,%xmm12 - mov $10,%ecx - .Ldoubleround4: # x0 += x4, x12 = rotl32(x12 ^ x0, 16) movdqa 0x00(%rsp),%xmm0 @@ -421,7 +501,7 @@ ENTRY(chacha20_4block_xor_ssse3) psrld $25,%xmm4 por %xmm0,%xmm4 - dec %ecx + sub $2,%r8d jnz .Ldoubleround4 # x0[0-3] += s0[0] @@ -573,58 +653,143 @@ ENTRY(chacha20_4block_xor_ssse3) # xor with corresponding input, write to output movdqa 0x00(%rsp),%xmm0 + cmp $0x10,%rax + jl .Lxorpart4 movdqu 0x00(%rdx),%xmm1 pxor %xmm1,%xmm0 movdqu %xmm0,0x00(%rsi) - movdqa 0x10(%rsp),%xmm0 - movdqu 0x80(%rdx),%xmm1 + + movdqu %xmm4,%xmm0 + cmp $0x20,%rax + jl .Lxorpart4 + movdqu 0x10(%rdx),%xmm1 pxor %xmm1,%xmm0 - movdqu %xmm0,0x80(%rsi) + movdqu %xmm0,0x10(%rsi) + + movdqu %xmm8,%xmm0 + cmp $0x30,%rax + jl .Lxorpart4 + movdqu 0x20(%rdx),%xmm1 + pxor %xmm1,%xmm0 + movdqu %xmm0,0x20(%rsi) + + movdqu %xmm12,%xmm0 + cmp $0x40,%rax + jl .Lxorpart4 + movdqu 0x30(%rdx),%xmm1 + pxor %xmm1,%xmm0 + movdqu %xmm0,0x30(%rsi) + movdqa 0x20(%rsp),%xmm0 + cmp $0x50,%rax + jl .Lxorpart4 movdqu 0x40(%rdx),%xmm1 pxor %xmm1,%xmm0 movdqu %xmm0,0x40(%rsi) + + movdqu %xmm6,%xmm0 + cmp $0x60,%rax + jl .Lxorpart4 + movdqu 0x50(%rdx),%xmm1 + pxor %xmm1,%xmm0 + movdqu %xmm0,0x50(%rsi) + + movdqu %xmm10,%xmm0 + cmp $0x70,%rax + jl .Lxorpart4 + movdqu 0x60(%rdx),%xmm1 + pxor %xmm1,%xmm0 + movdqu %xmm0,0x60(%rsi) + + movdqu %xmm14,%xmm0 + cmp $0x80,%rax + jl .Lxorpart4 + movdqu 0x70(%rdx),%xmm1 + pxor %xmm1,%xmm0 + movdqu %xmm0,0x70(%rsi) + + movdqa 0x10(%rsp),%xmm0 + cmp $0x90,%rax + jl .Lxorpart4 + movdqu 0x80(%rdx),%xmm1 + pxor %xmm1,%xmm0 + movdqu %xmm0,0x80(%rsi) + + movdqu %xmm5,%xmm0 + cmp $0xa0,%rax + jl .Lxorpart4 + movdqu 0x90(%rdx),%xmm1 + pxor %xmm1,%xmm0 + movdqu %xmm0,0x90(%rsi) + + movdqu %xmm9,%xmm0 + cmp $0xb0,%rax + jl .Lxorpart4 + movdqu 0xa0(%rdx),%xmm1 + pxor %xmm1,%xmm0 + movdqu %xmm0,0xa0(%rsi) + + movdqu %xmm13,%xmm0 + cmp $0xc0,%rax + jl .Lxorpart4 + movdqu 0xb0(%rdx),%xmm1 + pxor %xmm1,%xmm0 + movdqu %xmm0,0xb0(%rsi) + movdqa 0x30(%rsp),%xmm0 + cmp $0xd0,%rax + jl .Lxorpart4 movdqu 0xc0(%rdx),%xmm1 pxor %xmm1,%xmm0 movdqu %xmm0,0xc0(%rsi) - movdqu 0x10(%rdx),%xmm1 - pxor %xmm1,%xmm4 - movdqu %xmm4,0x10(%rsi) - movdqu 0x90(%rdx),%xmm1 - pxor %xmm1,%xmm5 - movdqu %xmm5,0x90(%rsi) - movdqu 0x50(%rdx),%xmm1 - pxor %xmm1,%xmm6 - movdqu %xmm6,0x50(%rsi) + + movdqu %xmm7,%xmm0 + cmp $0xe0,%rax + jl .Lxorpart4 movdqu 0xd0(%rdx),%xmm1 - pxor %xmm1,%xmm7 - movdqu %xmm7,0xd0(%rsi) - movdqu 0x20(%rdx),%xmm1 - pxor %xmm1,%xmm8 - movdqu %xmm8,0x20(%rsi) - movdqu 0xa0(%rdx),%xmm1 - pxor %xmm1,%xmm9 - movdqu %xmm9,0xa0(%rsi) - movdqu 0x60(%rdx),%xmm1 - pxor %xmm1,%xmm10 - movdqu %xmm10,0x60(%rsi) + pxor %xmm1,%xmm0 + movdqu %xmm0,0xd0(%rsi) + + movdqu %xmm11,%xmm0 + cmp $0xf0,%rax + jl .Lxorpart4 movdqu 0xe0(%rdx),%xmm1 - pxor %xmm1,%xmm11 - movdqu %xmm11,0xe0(%rsi) - movdqu 0x30(%rdx),%xmm1 - pxor %xmm1,%xmm12 - movdqu %xmm12,0x30(%rsi) - movdqu 0xb0(%rdx),%xmm1 - pxor %xmm1,%xmm13 - movdqu %xmm13,0xb0(%rsi) - movdqu 0x70(%rdx),%xmm1 - pxor %xmm1,%xmm14 - movdqu %xmm14,0x70(%rsi) + pxor %xmm1,%xmm0 + movdqu %xmm0,0xe0(%rsi) + + movdqu %xmm15,%xmm0 + cmp $0x100,%rax + jl .Lxorpart4 movdqu 0xf0(%rdx),%xmm1 - pxor %xmm1,%xmm15 - movdqu %xmm15,0xf0(%rsi) + pxor %xmm1,%xmm0 + movdqu %xmm0,0xf0(%rsi) +.Ldone4: lea -8(%r10),%rsp ret -ENDPROC(chacha20_4block_xor_ssse3) + +.Lxorpart4: + # xor remaining bytes from partial register into output + mov %rax,%r9 + and $0x0f,%r9 + jz .Ldone4 + and $~0x0f,%rax + + mov %rsi,%r11 + + lea (%rdx,%rax),%rsi + mov %rsp,%rdi + mov %r9,%rcx + rep movsb + + pxor 0x00(%rsp),%xmm0 + movdqa %xmm0,0x00(%rsp) + + mov %rsp,%rsi + lea (%r11,%rax),%rdi + mov %r9,%rcx + rep movsb + + jmp .Ldone4 + +ENDPROC(chacha_4block_xor_ssse3) diff --git a/arch/x86/crypto/chacha20-avx2-x86_64.S b/arch/x86/crypto/chacha20-avx2-x86_64.S deleted file mode 100644 index f3cd26f48332..000000000000 --- a/arch/x86/crypto/chacha20-avx2-x86_64.S +++ /dev/null @@ -1,448 +0,0 @@ -/* - * ChaCha20 256-bit cipher algorithm, RFC7539, x64 AVX2 functions - * - * Copyright (C) 2015 Martin Willi - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include <linux/linkage.h> - -.section .rodata.cst32.ROT8, "aM", @progbits, 32 -.align 32 -ROT8: .octa 0x0e0d0c0f0a09080b0605040702010003 - .octa 0x0e0d0c0f0a09080b0605040702010003 - -.section .rodata.cst32.ROT16, "aM", @progbits, 32 -.align 32 -ROT16: .octa 0x0d0c0f0e09080b0a0504070601000302 - .octa 0x0d0c0f0e09080b0a0504070601000302 - -.section .rodata.cst32.CTRINC, "aM", @progbits, 32 -.align 32 -CTRINC: .octa 0x00000003000000020000000100000000 - .octa 0x00000007000000060000000500000004 - -.text - -ENTRY(chacha20_8block_xor_avx2) - # %rdi: Input state matrix, s - # %rsi: 8 data blocks output, o - # %rdx: 8 data blocks input, i - - # This function encrypts eight consecutive ChaCha20 blocks by loading - # the state matrix in AVX registers eight times. As we need some - # scratch registers, we save the first four registers on the stack. The - # algorithm performs each operation on the corresponding word of each - # state matrix, hence requires no word shuffling. For final XORing step - # we transpose the matrix by interleaving 32-, 64- and then 128-bit - # words, which allows us to do XOR in AVX registers. 8/16-bit word - # rotation is done with the slightly better performing byte shuffling, - # 7/12-bit word rotation uses traditional shift+OR. - - vzeroupper - # 4 * 32 byte stack, 32-byte aligned - lea 8(%rsp),%r10 - and $~31, %rsp - sub $0x80, %rsp - - # x0..15[0-7] = s[0..15] - vpbroadcastd 0x00(%rdi),%ymm0 - vpbroadcastd 0x04(%rdi),%ymm1 - vpbroadcastd 0x08(%rdi),%ymm2 - vpbroadcastd 0x0c(%rdi),%ymm3 - vpbroadcastd 0x10(%rdi),%ymm4 - vpbroadcastd 0x14(%rdi),%ymm5 - vpbroadcastd 0x18(%rdi),%ymm6 - vpbroadcastd 0x1c(%rdi),%ymm7 - vpbroadcastd 0x20(%rdi),%ymm8 - vpbroadcastd 0x24(%rdi),%ymm9 - vpbroadcastd 0x28(%rdi),%ymm10 - vpbroadcastd 0x2c(%rdi),%ymm11 - vpbroadcastd 0x30(%rdi),%ymm12 - vpbroadcastd 0x34(%rdi),%ymm13 - vpbroadcastd 0x38(%rdi),%ymm14 - vpbroadcastd 0x3c(%rdi),%ymm15 - # x0..3 on stack - vmovdqa %ymm0,0x00(%rsp) - vmovdqa %ymm1,0x20(%rsp) - vmovdqa %ymm2,0x40(%rsp) - vmovdqa %ymm3,0x60(%rsp) - - vmovdqa CTRINC(%rip),%ymm1 - vmovdqa ROT8(%rip),%ymm2 - vmovdqa ROT16(%rip),%ymm3 - - # x12 += counter values 0-3 - vpaddd %ymm1,%ymm12,%ymm12 - - mov $10,%ecx - -.Ldoubleround8: - # x0 += x4, x12 = rotl32(x12 ^ x0, 16) - vpaddd 0x00(%rsp),%ymm4,%ymm0 - vmovdqa %ymm0,0x00(%rsp) - vpxor %ymm0,%ymm12,%ymm12 - vpshufb %ymm3,%ymm12,%ymm12 - # x1 += x5, x13 = rotl32(x13 ^ x1, 16) - vpaddd 0x20(%rsp),%ymm5,%ymm0 - vmovdqa %ymm0,0x20(%rsp) - vpxor %ymm0,%ymm13,%ymm13 - vpshufb %ymm3,%ymm13,%ymm13 - # x2 += x6, x14 = rotl32(x14 ^ x2, 16) - vpaddd 0x40(%rsp),%ymm6,%ymm0 - vmovdqa %ymm0,0x40(%rsp) - vpxor %ymm0,%ymm14,%ymm14 - vpshufb %ymm3,%ymm14,%ymm14 - # x3 += x7, x15 = rotl32(x15 ^ x3, 16) - vpaddd 0x60(%rsp),%ymm7,%ymm0 - vmovdqa %ymm0,0x60(%rsp) - vpxor %ymm0,%ymm15,%ymm15 - vpshufb %ymm3,%ymm15,%ymm15 - - # x8 += x12, x4 = rotl32(x4 ^ x8, 12) - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpslld $12,%ymm4,%ymm0 - vpsrld $20,%ymm4,%ymm4 - vpor %ymm0,%ymm4,%ymm4 - # x9 += x13, x5 = rotl32(x5 ^ x9, 12) - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpslld $12,%ymm5,%ymm0 - vpsrld $20,%ymm5,%ymm5 - vpor %ymm0,%ymm5,%ymm5 - # x10 += x14, x6 = rotl32(x6 ^ x10, 12) - vpaddd %ymm14,%ymm10,%ymm10 - vpxor %ymm10,%ymm6,%ymm6 - vpslld $12,%ymm6,%ymm0 - vpsrld $20,%ymm6,%ymm6 - vpor %ymm0,%ymm6,%ymm6 - # x11 += x15, x7 = rotl32(x7 ^ x11, 12) - vpaddd %ymm15,%ymm11,%ymm11 - vpxor %ymm11,%ymm7,%ymm7 - vpslld $12,%ymm7,%ymm0 - vpsrld $20,%ymm7,%ymm7 - vpor %ymm0,%ymm7,%ymm7 - - # x0 += x4, x12 = rotl32(x12 ^ x0, 8) - vpaddd 0x00(%rsp),%ymm4,%ymm0 - vmovdqa %ymm0,0x00(%rsp) - vpxor %ymm0,%ymm12,%ymm12 - vpshufb %ymm2,%ymm12,%ymm12 - # x1 += x5, x13 = rotl32(x13 ^ x1, 8) - vpaddd 0x20(%rsp),%ymm5,%ymm0 - vmovdqa %ymm0,0x20(%rsp) - vpxor %ymm0,%ymm13,%ymm13 - vpshufb %ymm2,%ymm13,%ymm13 - # x2 += x6, x14 = rotl32(x14 ^ x2, 8) - vpaddd 0x40(%rsp),%ymm6,%ymm0 - vmovdqa %ymm0,0x40(%rsp) - vpxor %ymm0,%ymm14,%ymm14 - vpshufb %ymm2,%ymm14,%ymm14 - # x3 += x7, x15 = rotl32(x15 ^ x3, 8) - vpaddd 0x60(%rsp),%ymm7,%ymm0 - vmovdqa %ymm0,0x60(%rsp) - vpxor %ymm0,%ymm15,%ymm15 - vpshufb %ymm2,%ymm15,%ymm15 - - # x8 += x12, x4 = rotl32(x4 ^ x8, 7) - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpslld $7,%ymm4,%ymm0 - vpsrld $25,%ymm4,%ymm4 - vpor %ymm0,%ymm4,%ymm4 - # x9 += x13, x5 = rotl32(x5 ^ x9, 7) - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpslld $7,%ymm5,%ymm0 - vpsrld $25,%ymm5,%ymm5 - vpor %ymm0,%ymm5,%ymm5 - # x10 += x14, x6 = rotl32(x6 ^ x10, 7) - vpaddd %ymm14,%ymm10,%ymm10 - vpxor %ymm10,%ymm6,%ymm6 - vpslld $7,%ymm6,%ymm0 - vpsrld $25,%ymm6,%ymm6 - vpor %ymm0,%ymm6,%ymm6 - # x11 += x15, x7 = rotl32(x7 ^ x11, 7) - vpaddd %ymm15,%ymm11,%ymm11 - vpxor %ymm11,%ymm7,%ymm7 - vpslld $7,%ymm7,%ymm0 - vpsrld $25,%ymm7,%ymm7 - vpor %ymm0,%ymm7,%ymm7 - - # x0 += x5, x15 = rotl32(x15 ^ x0, 16) - vpaddd 0x00(%rsp),%ymm5,%ymm0 - vmovdqa %ymm0,0x00(%rsp) - vpxor %ymm0,%ymm15,%ymm15 - vpshufb %ymm3,%ymm15,%ymm15 - # x1 += x6, x12 = rotl32(x12 ^ x1, 16)%ymm0 - vpaddd 0x20(%rsp),%ymm6,%ymm0 - vmovdqa %ymm0,0x20(%rsp) - vpxor %ymm0,%ymm12,%ymm12 - vpshufb %ymm3,%ymm12,%ymm12 - # x2 += x7, x13 = rotl32(x13 ^ x2, 16) - vpaddd 0x40(%rsp),%ymm7,%ymm0 - vmovdqa %ymm0,0x40(%rsp) - vpxor %ymm0,%ymm13,%ymm13 - vpshufb %ymm3,%ymm13,%ymm13 - # x3 += x4, x14 = rotl32(x14 ^ x3, 16) - vpaddd 0x60(%rsp),%ymm4,%ymm0 - vmovdqa %ymm0,0x60(%rsp) - vpxor %ymm0,%ymm14,%ymm14 - vpshufb %ymm3,%ymm14,%ymm14 - - # x10 += x15, x5 = rotl32(x5 ^ x10, 12) - vpaddd %ymm15,%ymm10,%ymm10 - vpxor %ymm10,%ymm5,%ymm5 - vpslld $12,%ymm5,%ymm0 - vpsrld $20,%ymm5,%ymm5 - vpor %ymm0,%ymm5,%ymm5 - # x11 += x12, x6 = rotl32(x6 ^ x11, 12) - vpaddd %ymm12,%ymm11,%ymm11 - vpxor %ymm11,%ymm6,%ymm6 - vpslld $12,%ymm6,%ymm0 - vpsrld $20,%ymm6,%ymm6 - vpor %ymm0,%ymm6,%ymm6 - # x8 += x13, x7 = rotl32(x7 ^ x8, 12) - vpaddd %ymm13,%ymm8,%ymm8 - vpxor %ymm8,%ymm7,%ymm7 - vpslld $12,%ymm7,%ymm0 - vpsrld $20,%ymm7,%ymm7 - vpor %ymm0,%ymm7,%ymm7 - # x9 += x14, x4 = rotl32(x4 ^ x9, 12) - vpaddd %ymm14,%ymm9,%ymm9 - vpxor %ymm9,%ymm4,%ymm4 - vpslld $12,%ymm4,%ymm0 - vpsrld $20,%ymm4,%ymm4 - vpor %ymm0,%ymm4,%ymm4 - - # x0 += x5, x15 = rotl32(x15 ^ x0, 8) - vpaddd 0x00(%rsp),%ymm5,%ymm0 - vmovdqa %ymm0,0x00(%rsp) - vpxor %ymm0,%ymm15,%ymm15 - vpshufb %ymm2,%ymm15,%ymm15 - # x1 += x6, x12 = rotl32(x12 ^ x1, 8) - vpaddd 0x20(%rsp),%ymm6,%ymm0 - vmovdqa %ymm0,0x20(%rsp) - vpxor %ymm0,%ymm12,%ymm12 - vpshufb %ymm2,%ymm12,%ymm12 - # x2 += x7, x13 = rotl32(x13 ^ x2, 8) - vpaddd 0x40(%rsp),%ymm7,%ymm0 - vmovdqa %ymm0,0x40(%rsp) - vpxor %ymm0,%ymm13,%ymm13 - vpshufb %ymm2,%ymm13,%ymm13 - # x3 += x4, x14 = rotl32(x14 ^ x3, 8) - vpaddd 0x60(%rsp),%ymm4,%ymm0 - vmovdqa %ymm0,0x60(%rsp) - vpxor %ymm0,%ymm14,%ymm14 - vpshufb %ymm2,%ymm14,%ymm14 - - # x10 += x15, x5 = rotl32(x5 ^ x10, 7) - vpaddd %ymm15,%ymm10,%ymm10 - vpxor %ymm10,%ymm5,%ymm5 - vpslld $7,%ymm5,%ymm0 - vpsrld $25,%ymm5,%ymm5 - vpor %ymm0,%ymm5,%ymm5 - # x11 += x12, x6 = rotl32(x6 ^ x11, 7) - vpaddd %ymm12,%ymm11,%ymm11 - vpxor %ymm11,%ymm6,%ymm6 - vpslld $7,%ymm6,%ymm0 - vpsrld $25,%ymm6,%ymm6 - vpor %ymm0,%ymm6,%ymm6 - # x8 += x13, x7 = rotl32(x7 ^ x8, 7) - vpaddd %ymm13,%ymm8,%ymm8 - vpxor %ymm8,%ymm7,%ymm7 - vpslld $7,%ymm7,%ymm0 - vpsrld $25,%ymm7,%ymm7 - vpor %ymm0,%ymm7,%ymm7 - # x9 += x14, x4 = rotl32(x4 ^ x9, 7) - vpaddd %ymm14,%ymm9,%ymm9 - vpxor %ymm9,%ymm4,%ymm4 - vpslld $7,%ymm4,%ymm0 - vpsrld $25,%ymm4,%ymm4 - vpor %ymm0,%ymm4,%ymm4 - - dec %ecx - jnz .Ldoubleround8 - - # x0..15[0-3] += s[0..15] - vpbroadcastd 0x00(%rdi),%ymm0 - vpaddd 0x00(%rsp),%ymm0,%ymm0 - vmovdqa %ymm0,0x00(%rsp) - vpbroadcastd 0x04(%rdi),%ymm0 - vpaddd 0x20(%rsp),%ymm0,%ymm0 - vmovdqa %ymm0,0x20(%rsp) - vpbroadcastd 0x08(%rdi),%ymm0 - vpaddd 0x40(%rsp),%ymm0,%ymm0 - vmovdqa %ymm0,0x40(%rsp) - vpbroadcastd 0x0c(%rdi),%ymm0 - vpaddd 0x60(%rsp),%ymm0,%ymm0 - vmovdqa %ymm0,0x60(%rsp) - vpbroadcastd 0x10(%rdi),%ymm0 - vpaddd %ymm0,%ymm4,%ymm4 - vpbroadcastd 0x14(%rdi),%ymm0 - vpaddd %ymm0,%ymm5,%ymm5 - vpbroadcastd 0x18(%rdi),%ymm0 - vpaddd %ymm0,%ymm6,%ymm6 - vpbroadcastd 0x1c(%rdi),%ymm0 - vpaddd %ymm0,%ymm7,%ymm7 - vpbroadcastd 0x20(%rdi),%ymm0 - vpaddd %ymm0,%ymm8,%ymm8 - vpbroadcastd 0x24(%rdi),%ymm0 - vpaddd %ymm0,%ymm9,%ymm9 - vpbroadcastd 0x28(%rdi),%ymm0 - vpaddd %ymm0,%ymm10,%ymm10 - vpbroadcastd 0x2c(%rdi),%ymm0 - vpaddd %ymm0,%ymm11,%ymm11 - vpbroadcastd 0x30(%rdi),%ymm0 - vpaddd %ymm0,%ymm12,%ymm12 - vpbroadcastd 0x34(%rdi),%ymm0 - vpaddd %ymm0,%ymm13,%ymm13 - vpbroadcastd 0x38(%rdi),%ymm0 - vpaddd %ymm0,%ymm14,%ymm14 - vpbroadcastd 0x3c(%rdi),%ymm0 - vpaddd %ymm0,%ymm15,%ymm15 - - # x12 += counter values 0-3 - vpaddd %ymm1,%ymm12,%ymm12 - - # interleave 32-bit words in state n, n+1 - vmovdqa 0x00(%rsp),%ymm0 - vmovdqa 0x20(%rsp),%ymm1 - vpunpckldq %ymm1,%ymm0,%ymm2 - vpunpckhdq %ymm1,%ymm0,%ymm1 - vmovdqa %ymm2,0x00(%rsp) - vmovdqa %ymm1,0x20(%rsp) - vmovdqa 0x40(%rsp),%ymm0 - vmovdqa 0x60(%rsp),%ymm1 - vpunpckldq %ymm1,%ymm0,%ymm2 - vpunpckhdq %ymm1,%ymm0,%ymm1 - vmovdqa %ymm2,0x40(%rsp) - vmovdqa %ymm1,0x60(%rsp) - vmovdqa %ymm4,%ymm0 - vpunpckldq %ymm5,%ymm0,%ymm4 - vpunpckhdq %ymm5,%ymm0,%ymm5 - vmovdqa %ymm6,%ymm0 - vpunpckldq %ymm7,%ymm0,%ymm6 - vpunpckhdq %ymm7,%ymm0,%ymm7 - vmovdqa %ymm8,%ymm0 - vpunpckldq %ymm9,%ymm0,%ymm8 - vpunpckhdq %ymm9,%ymm0,%ymm9 - vmovdqa %ymm10,%ymm0 - vpunpckldq %ymm11,%ymm0,%ymm10 - vpunpckhdq %ymm11,%ymm0,%ymm11 - vmovdqa %ymm12,%ymm0 - vpunpckldq %ymm13,%ymm0,%ymm12 - vpunpckhdq %ymm13,%ymm0,%ymm13 - vmovdqa %ymm14,%ymm0 - vpunpckldq %ymm15,%ymm0,%ymm14 - vpunpckhdq %ymm15,%ymm0,%ymm15 - - # interleave 64-bit words in state n, n+2 - vmovdqa 0x00(%rsp),%ymm0 - vmovdqa 0x40(%rsp),%ymm2 - vpunpcklqdq %ymm2,%ymm0,%ymm1 - vpunpckhqdq %ymm2,%ymm0,%ymm2 - vmovdqa %ymm1,0x00(%rsp) - vmovdqa %ymm2,0x40(%rsp) - vmovdqa 0x20(%rsp),%ymm0 - vmovdqa 0x60(%rsp),%ymm2 - vpunpcklqdq %ymm2,%ymm0,%ymm1 - vpunpckhqdq %ymm2,%ymm0,%ymm2 - vmovdqa %ymm1,0x20(%rsp) - vmovdqa %ymm2,0x60(%rsp) - vmovdqa %ymm4,%ymm0 - vpunpcklqdq %ymm6,%ymm0,%ymm4 - vpunpckhqdq %ymm6,%ymm0,%ymm6 - vmovdqa %ymm5,%ymm0 - vpunpcklqdq %ymm7,%ymm0,%ymm5 - vpunpckhqdq %ymm7,%ymm0,%ymm7 - vmovdqa %ymm8,%ymm0 - vpunpcklqdq %ymm10,%ymm0,%ymm8 - vpunpckhqdq %ymm10,%ymm0,%ymm10 - vmovdqa %ymm9,%ymm0 - vpunpcklqdq %ymm11,%ymm0,%ymm9 - vpunpckhqdq %ymm11,%ymm0,%ymm11 - vmovdqa %ymm12,%ymm0 - vpunpcklqdq %ymm14,%ymm0,%ymm12 - vpunpckhqdq %ymm14,%ymm0,%ymm14 - vmovdqa %ymm13,%ymm0 - vpunpcklqdq %ymm15,%ymm0,%ymm13 - vpunpckhqdq %ymm15,%ymm0,%ymm15 - - # interleave 128-bit words in state n, n+4 - vmovdqa 0x00(%rsp),%ymm0 - vperm2i128 $0x20,%ymm4,%ymm0,%ymm1 - vperm2i128 $0x31,%ymm4,%ymm0,%ymm4 - vmovdqa %ymm1,0x00(%rsp) - vmovdqa 0x20(%rsp),%ymm0 - vperm2i128 $0x20,%ymm5,%ymm0,%ymm1 - vperm2i128 $0x31,%ymm5,%ymm0,%ymm5 - vmovdqa %ymm1,0x20(%rsp) - vmovdqa 0x40(%rsp),%ymm0 - vperm2i128 $0x20,%ymm6,%ymm0,%ymm1 - vperm2i128 $0x31,%ymm6,%ymm0,%ymm6 - vmovdqa %ymm1,0x40(%rsp) - vmovdqa 0x60(%rsp),%ymm0 - vperm2i128 $0x20,%ymm7,%ymm0,%ymm1 - vperm2i128 $0x31,%ymm7,%ymm0,%ymm7 - vmovdqa %ymm1,0x60(%rsp) - vperm2i128 $0x20,%ymm12,%ymm8,%ymm0 - vperm2i128 $0x31,%ymm12,%ymm8,%ymm12 - vmovdqa %ymm0,%ymm8 - vperm2i128 $0x20,%ymm13,%ymm9,%ymm0 - vperm2i128 $0x31,%ymm13,%ymm9,%ymm13 - vmovdqa %ymm0,%ymm9 - vperm2i128 $0x20,%ymm14,%ymm10,%ymm0 - vperm2i128 $0x31,%ymm14,%ymm10,%ymm14 - vmovdqa %ymm0,%ymm10 - vperm2i128 $0x20,%ymm15,%ymm11,%ymm0 - vperm2i128 $0x31,%ymm15,%ymm11,%ymm15 - vmovdqa %ymm0,%ymm11 - - # xor with corresponding input, write to output - vmovdqa 0x00(%rsp),%ymm0 - vpxor 0x0000(%rdx),%ymm0,%ymm0 - vmovdqu %ymm0,0x0000(%rsi) - vmovdqa 0x20(%rsp),%ymm0 - vpxor 0x0080(%rdx),%ymm0,%ymm0 - vmovdqu %ymm0,0x0080(%rsi) - vmovdqa 0x40(%rsp),%ymm0 - vpxor 0x0040(%rdx),%ymm0,%ymm0 - vmovdqu %ymm0,0x0040(%rsi) - vmovdqa 0x60(%rsp),%ymm0 - vpxor 0x00c0(%rdx),%ymm0,%ymm0 - vmovdqu %ymm0,0x00c0(%rsi) - vpxor 0x0100(%rdx),%ymm4,%ymm4 - vmovdqu %ymm4,0x0100(%rsi) - vpxor 0x0180(%rdx),%ymm5,%ymm5 - vmovdqu %ymm5,0x00180(%rsi) - vpxor 0x0140(%rdx),%ymm6,%ymm6 - vmovdqu %ymm6,0x0140(%rsi) - vpxor 0x01c0(%rdx),%ymm7,%ymm7 - vmovdqu %ymm7,0x01c0(%rsi) - vpxor 0x0020(%rdx),%ymm8,%ymm8 - vmovdqu %ymm8,0x0020(%rsi) - vpxor 0x00a0(%rdx),%ymm9,%ymm9 - vmovdqu %ymm9,0x00a0(%rsi) - vpxor 0x0060(%rdx),%ymm10,%ymm10 - vmovdqu %ymm10,0x0060(%rsi) - vpxor 0x00e0(%rdx),%ymm11,%ymm11 - vmovdqu %ymm11,0x00e0(%rsi) - vpxor 0x0120(%rdx),%ymm12,%ymm12 - vmovdqu %ymm12,0x0120(%rsi) - vpxor 0x01a0(%rdx),%ymm13,%ymm13 - vmovdqu %ymm13,0x01a0(%rsi) - vpxor 0x0160(%rdx),%ymm14,%ymm14 - vmovdqu %ymm14,0x0160(%rsi) - vpxor 0x01e0(%rdx),%ymm15,%ymm15 - vmovdqu %ymm15,0x01e0(%rsi) - - vzeroupper - lea -8(%r10),%rsp - ret -ENDPROC(chacha20_8block_xor_avx2) diff --git a/arch/x86/crypto/chacha20_glue.c b/arch/x86/crypto/chacha20_glue.c deleted file mode 100644 index dce7c5d39c2f..000000000000 --- a/arch/x86/crypto/chacha20_glue.c +++ /dev/null @@ -1,146 +0,0 @@ -/* - * ChaCha20 256-bit cipher algorithm, RFC7539, SIMD glue code - * - * Copyright (C) 2015 Martin Willi - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include <crypto/algapi.h> -#include <crypto/chacha20.h> -#include <crypto/internal/skcipher.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <asm/fpu/api.h> -#include <asm/simd.h> - -#define CHACHA20_STATE_ALIGN 16 - -asmlinkage void chacha20_block_xor_ssse3(u32 *state, u8 *dst, const u8 *src); -asmlinkage void chacha20_4block_xor_ssse3(u32 *state, u8 *dst, const u8 *src); -#ifdef CONFIG_AS_AVX2 -asmlinkage void chacha20_8block_xor_avx2(u32 *state, u8 *dst, const u8 *src); -static bool chacha20_use_avx2; -#endif - -static void chacha20_dosimd(u32 *state, u8 *dst, const u8 *src, - unsigned int bytes) -{ - u8 buf[CHACHA20_BLOCK_SIZE]; - -#ifdef CONFIG_AS_AVX2 - if (chacha20_use_avx2) { - while (bytes >= CHACHA20_BLOCK_SIZE * 8) { - chacha20_8block_xor_avx2(state, dst, src); - bytes -= CHACHA20_BLOCK_SIZE * 8; - src += CHACHA20_BLOCK_SIZE * 8; - dst += CHACHA20_BLOCK_SIZE * 8; - state[12] += 8; - } - } -#endif - while (bytes >= CHACHA20_BLOCK_SIZE * 4) { - chacha20_4block_xor_ssse3(state, dst, src); - bytes -= CHACHA20_BLOCK_SIZE * 4; - src += CHACHA20_BLOCK_SIZE * 4; - dst += CHACHA20_BLOCK_SIZE * 4; - state[12] += 4; - } - while (bytes >= CHACHA20_BLOCK_SIZE) { - chacha20_block_xor_ssse3(state, dst, src); - bytes -= CHACHA20_BLOCK_SIZE; - src += CHACHA20_BLOCK_SIZE; - dst += CHACHA20_BLOCK_SIZE; - state[12]++; - } - if (bytes) { - memcpy(buf, src, bytes); - chacha20_block_xor_ssse3(state, buf, buf); - memcpy(dst, buf, bytes); - } -} - -static int chacha20_simd(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct chacha20_ctx *ctx = crypto_skcipher_ctx(tfm); - u32 *state, state_buf[16 + 2] __aligned(8); - struct skcipher_walk walk; - int err; - - BUILD_BUG_ON(CHACHA20_STATE_ALIGN != 16); - state = PTR_ALIGN(state_buf + 0, CHACHA20_STATE_ALIGN); - - if (req->cryptlen <= CHACHA20_BLOCK_SIZE || !may_use_simd()) - return crypto_chacha20_crypt(req); - - err = skcipher_walk_virt(&walk, req, true); - - crypto_chacha20_init(state, ctx, walk.iv); - - kernel_fpu_begin(); - - while (walk.nbytes >= CHACHA20_BLOCK_SIZE) { - chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr, - rounddown(walk.nbytes, CHACHA20_BLOCK_SIZE)); - err = skcipher_walk_done(&walk, - walk.nbytes % CHACHA20_BLOCK_SIZE); - } - - if (walk.nbytes) { - chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr, - walk.nbytes); - err = skcipher_walk_done(&walk, 0); - } - - kernel_fpu_end(); - - return err; -} - -static struct skcipher_alg alg = { - .base.cra_name = "chacha20", - .base.cra_driver_name = "chacha20-simd", - .base.cra_priority = 300, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct chacha20_ctx), - .base.cra_module = THIS_MODULE, - - .min_keysize = CHACHA20_KEY_SIZE, - .max_keysize = CHACHA20_KEY_SIZE, - .ivsize = CHACHA20_IV_SIZE, - .chunksize = CHACHA20_BLOCK_SIZE, - .setkey = crypto_chacha20_setkey, - .encrypt = chacha20_simd, - .decrypt = chacha20_simd, -}; - -static int __init chacha20_simd_mod_init(void) -{ - if (!boot_cpu_has(X86_FEATURE_SSSE3)) - return -ENODEV; - -#ifdef CONFIG_AS_AVX2 - chacha20_use_avx2 = boot_cpu_has(X86_FEATURE_AVX) && - boot_cpu_has(X86_FEATURE_AVX2) && - cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL); -#endif - return crypto_register_skcipher(&alg); -} - -static void __exit chacha20_simd_mod_fini(void) -{ - crypto_unregister_skcipher(&alg); -} - -module_init(chacha20_simd_mod_init); -module_exit(chacha20_simd_mod_fini); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Martin Willi <martin@strongswan.org>"); -MODULE_DESCRIPTION("chacha20 cipher algorithm, SIMD accelerated"); -MODULE_ALIAS_CRYPTO("chacha20"); -MODULE_ALIAS_CRYPTO("chacha20-simd"); diff --git a/arch/x86/crypto/chacha_glue.c b/arch/x86/crypto/chacha_glue.c new file mode 100644 index 000000000000..45c1c4143176 --- /dev/null +++ b/arch/x86/crypto/chacha_glue.c @@ -0,0 +1,304 @@ +/* + * x64 SIMD accelerated ChaCha and XChaCha stream ciphers, + * including ChaCha20 (RFC7539) + * + * Copyright (C) 2015 Martin Willi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <crypto/algapi.h> +#include <crypto/chacha.h> +#include <crypto/internal/skcipher.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <asm/fpu/api.h> +#include <asm/simd.h> + +#define CHACHA_STATE_ALIGN 16 + +asmlinkage void chacha_block_xor_ssse3(u32 *state, u8 *dst, const u8 *src, + unsigned int len, int nrounds); +asmlinkage void chacha_4block_xor_ssse3(u32 *state, u8 *dst, const u8 *src, + unsigned int len, int nrounds); +asmlinkage void hchacha_block_ssse3(const u32 *state, u32 *out, int nrounds); +#ifdef CONFIG_AS_AVX2 +asmlinkage void chacha_2block_xor_avx2(u32 *state, u8 *dst, const u8 *src, + unsigned int len, int nrounds); +asmlinkage void chacha_4block_xor_avx2(u32 *state, u8 *dst, const u8 *src, + unsigned int len, int nrounds); +asmlinkage void chacha_8block_xor_avx2(u32 *state, u8 *dst, const u8 *src, + unsigned int len, int nrounds); +static bool chacha_use_avx2; +#ifdef CONFIG_AS_AVX512 +asmlinkage void chacha_2block_xor_avx512vl(u32 *state, u8 *dst, const u8 *src, + unsigned int len, int nrounds); +asmlinkage void chacha_4block_xor_avx512vl(u32 *state, u8 *dst, const u8 *src, + unsigned int len, int nrounds); +asmlinkage void chacha_8block_xor_avx512vl(u32 *state, u8 *dst, const u8 *src, + unsigned int len, int nrounds); +static bool chacha_use_avx512vl; +#endif +#endif + +static unsigned int chacha_advance(unsigned int len, unsigned int maxblocks) +{ + len = min(len, maxblocks * CHACHA_BLOCK_SIZE); + return round_up(len, CHACHA_BLOCK_SIZE) / CHACHA_BLOCK_SIZE; +} + +static void chacha_dosimd(u32 *state, u8 *dst, const u8 *src, + unsigned int bytes, int nrounds) +{ +#ifdef CONFIG_AS_AVX2 +#ifdef CONFIG_AS_AVX512 + if (chacha_use_avx512vl) { + while (bytes >= CHACHA_BLOCK_SIZE * 8) { + chacha_8block_xor_avx512vl(state, dst, src, bytes, + nrounds); + bytes -= CHACHA_BLOCK_SIZE * 8; + src += CHACHA_BLOCK_SIZE * 8; + dst += CHACHA_BLOCK_SIZE * 8; + state[12] += 8; + } + if (bytes > CHACHA_BLOCK_SIZE * 4) { + chacha_8block_xor_avx512vl(state, dst, src, bytes, + nrounds); + state[12] += chacha_advance(bytes, 8); + return; + } + if (bytes > CHACHA_BLOCK_SIZE * 2) { + chacha_4block_xor_avx512vl(state, dst, src, bytes, + nrounds); + state[12] += chacha_advance(bytes, 4); + return; + } + if (bytes) { + chacha_2block_xor_avx512vl(state, dst, src, bytes, + nrounds); + state[12] += chacha_advance(bytes, 2); + return; + } + } +#endif + if (chacha_use_avx2) { + while (bytes >= CHACHA_BLOCK_SIZE * 8) { + chacha_8block_xor_avx2(state, dst, src, bytes, nrounds); + bytes -= CHACHA_BLOCK_SIZE * 8; + src += CHACHA_BLOCK_SIZE * 8; + dst += CHACHA_BLOCK_SIZE * 8; + state[12] += 8; + } + if (bytes > CHACHA_BLOCK_SIZE * 4) { + chacha_8block_xor_avx2(state, dst, src, bytes, nrounds); + state[12] += chacha_advance(bytes, 8); + return; + } + if (bytes > CHACHA_BLOCK_SIZE * 2) { + chacha_4block_xor_avx2(state, dst, src, bytes, nrounds); + state[12] += chacha_advance(bytes, 4); + return; + } + if (bytes > CHACHA_BLOCK_SIZE) { + chacha_2block_xor_avx2(state, dst, src, bytes, nrounds); + state[12] += chacha_advance(bytes, 2); + return; + } + } +#endif + while (bytes >= CHACHA_BLOCK_SIZE * 4) { + chacha_4block_xor_ssse3(state, dst, src, bytes, nrounds); + bytes -= CHACHA_BLOCK_SIZE * 4; + src += CHACHA_BLOCK_SIZE * 4; + dst += CHACHA_BLOCK_SIZE * 4; + state[12] += 4; + } + if (bytes > CHACHA_BLOCK_SIZE) { + chacha_4block_xor_ssse3(state, dst, src, bytes, nrounds); + state[12] += chacha_advance(bytes, 4); + return; + } + if (bytes) { + chacha_block_xor_ssse3(state, dst, src, bytes, nrounds); + state[12]++; + } +} + +static int chacha_simd_stream_xor(struct skcipher_walk *walk, + struct chacha_ctx *ctx, u8 *iv) +{ + u32 *state, state_buf[16 + 2] __aligned(8); + int next_yield = 4096; /* bytes until next FPU yield */ + int err = 0; + + BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16); + state = PTR_ALIGN(state_buf + 0, CHACHA_STATE_ALIGN); + + crypto_chacha_init(state, ctx, iv); + + while (walk->nbytes > 0) { + unsigned int nbytes = walk->nbytes; + + if (nbytes < walk->total) { + nbytes = round_down(nbytes, walk->stride); + next_yield -= nbytes; + } + + chacha_dosimd(state, walk->dst.virt.addr, walk->src.virt.addr, + nbytes, ctx->nrounds); + + if (next_yield <= 0) { + /* temporarily allow preemption */ + kernel_fpu_end(); + kernel_fpu_begin(); + next_yield = 4096; + } + + err = skcipher_walk_done(walk, walk->nbytes - nbytes); + } + + return err; +} + +static int chacha_simd(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + int err; + + if (req->cryptlen <= CHACHA_BLOCK_SIZE || !irq_fpu_usable()) + return crypto_chacha_crypt(req); + + err = skcipher_walk_virt(&walk, req, true); + if (err) + return err; + + kernel_fpu_begin(); + err = chacha_simd_stream_xor(&walk, ctx, req->iv); + kernel_fpu_end(); + return err; +} + +static int xchacha_simd(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + struct chacha_ctx subctx; + u32 *state, state_buf[16 + 2] __aligned(8); + u8 real_iv[16]; + int err; + + if (req->cryptlen <= CHACHA_BLOCK_SIZE || !irq_fpu_usable()) + return crypto_xchacha_crypt(req); + + err = skcipher_walk_virt(&walk, req, true); + if (err) + return err; + + BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16); + state = PTR_ALIGN(state_buf + 0, CHACHA_STATE_ALIGN); + crypto_chacha_init(state, ctx, req->iv); + + kernel_fpu_begin(); + + hchacha_block_ssse3(state, subctx.key, ctx->nrounds); + subctx.nrounds = ctx->nrounds; + + memcpy(&real_iv[0], req->iv + 24, 8); + memcpy(&real_iv[8], req->iv + 16, 8); + err = chacha_simd_stream_xor(&walk, &subctx, real_iv); + + kernel_fpu_end(); + + return err; +} + +static struct skcipher_alg algs[] = { + { + .base.cra_name = "chacha20", + .base.cra_driver_name = "chacha20-simd", + .base.cra_priority = 300, + .base.cra_blocksize = 1, + .base.cra_ctxsize = sizeof(struct chacha_ctx), + .base.cra_module = THIS_MODULE, + + .min_keysize = CHACHA_KEY_SIZE, + .max_keysize = CHACHA_KEY_SIZE, + .ivsize = CHACHA_IV_SIZE, + .chunksize = CHACHA_BLOCK_SIZE, + .setkey = crypto_chacha20_setkey, + .encrypt = chacha_simd, + .decrypt = chacha_simd, + }, { + .base.cra_name = "xchacha20", + .base.cra_driver_name = "xchacha20-simd", + .base.cra_priority = 300, + .base.cra_blocksize = 1, + .base.cra_ctxsize = sizeof(struct chacha_ctx), + .base.cra_module = THIS_MODULE, + + .min_keysize = CHACHA_KEY_SIZE, + .max_keysize = CHACHA_KEY_SIZE, + .ivsize = XCHACHA_IV_SIZE, + .chunksize = CHACHA_BLOCK_SIZE, + .setkey = crypto_chacha20_setkey, + .encrypt = xchacha_simd, + .decrypt = xchacha_simd, + }, { + .base.cra_name = "xchacha12", + .base.cra_driver_name = "xchacha12-simd", + .base.cra_priority = 300, + .base.cra_blocksize = 1, + .base.cra_ctxsize = sizeof(struct chacha_ctx), + .base.cra_module = THIS_MODULE, + + .min_keysize = CHACHA_KEY_SIZE, + .max_keysize = CHACHA_KEY_SIZE, + .ivsize = XCHACHA_IV_SIZE, + .chunksize = CHACHA_BLOCK_SIZE, + .setkey = crypto_chacha12_setkey, + .encrypt = xchacha_simd, + .decrypt = xchacha_simd, + }, +}; + +static int __init chacha_simd_mod_init(void) +{ + if (!boot_cpu_has(X86_FEATURE_SSSE3)) + return -ENODEV; + +#ifdef CONFIG_AS_AVX2 + chacha_use_avx2 = boot_cpu_has(X86_FEATURE_AVX) && + boot_cpu_has(X86_FEATURE_AVX2) && + cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL); +#ifdef CONFIG_AS_AVX512 + chacha_use_avx512vl = chacha_use_avx2 && + boot_cpu_has(X86_FEATURE_AVX512VL) && + boot_cpu_has(X86_FEATURE_AVX512BW); /* kmovq */ +#endif +#endif + return crypto_register_skciphers(algs, ARRAY_SIZE(algs)); +} + +static void __exit chacha_simd_mod_fini(void) +{ + crypto_unregister_skciphers(algs, ARRAY_SIZE(algs)); +} + +module_init(chacha_simd_mod_init); +module_exit(chacha_simd_mod_fini); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Martin Willi <martin@strongswan.org>"); +MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (x64 SIMD accelerated)"); +MODULE_ALIAS_CRYPTO("chacha20"); +MODULE_ALIAS_CRYPTO("chacha20-simd"); +MODULE_ALIAS_CRYPTO("xchacha20"); +MODULE_ALIAS_CRYPTO("xchacha20-simd"); +MODULE_ALIAS_CRYPTO("xchacha12"); +MODULE_ALIAS_CRYPTO("xchacha12-simd"); diff --git a/arch/x86/crypto/nh-avx2-x86_64.S b/arch/x86/crypto/nh-avx2-x86_64.S new file mode 100644 index 000000000000..f7946ea1b704 --- /dev/null +++ b/arch/x86/crypto/nh-avx2-x86_64.S @@ -0,0 +1,157 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * NH - ε-almost-universal hash function, x86_64 AVX2 accelerated + * + * Copyright 2018 Google LLC + * + * Author: Eric Biggers <ebiggers@google.com> + */ + +#include <linux/linkage.h> + +#define PASS0_SUMS %ymm0 +#define PASS1_SUMS %ymm1 +#define PASS2_SUMS %ymm2 +#define PASS3_SUMS %ymm3 +#define K0 %ymm4 +#define K0_XMM %xmm4 +#define K1 %ymm5 +#define K1_XMM %xmm5 +#define K2 %ymm6 +#define K2_XMM %xmm6 +#define K3 %ymm7 +#define K3_XMM %xmm7 +#define T0 %ymm8 +#define T1 %ymm9 +#define T2 %ymm10 +#define T2_XMM %xmm10 +#define T3 %ymm11 +#define T3_XMM %xmm11 +#define T4 %ymm12 +#define T5 %ymm13 +#define T6 %ymm14 +#define T7 %ymm15 +#define KEY %rdi +#define MESSAGE %rsi +#define MESSAGE_LEN %rdx +#define HASH %rcx + +.macro _nh_2xstride k0, k1, k2, k3 + + // Add message words to key words + vpaddd \k0, T3, T0 + vpaddd \k1, T3, T1 + vpaddd \k2, T3, T2 + vpaddd \k3, T3, T3 + + // Multiply 32x32 => 64 and accumulate + vpshufd $0x10, T0, T4 + vpshufd $0x32, T0, T0 + vpshufd $0x10, T1, T5 + vpshufd $0x32, T1, T1 + vpshufd $0x10, T2, T6 + vpshufd $0x32, T2, T2 + vpshufd $0x10, T3, T7 + vpshufd $0x32, T3, T3 + vpmuludq T4, T0, T0 + vpmuludq T5, T1, T1 + vpmuludq T6, T2, T2 + vpmuludq T7, T3, T3 + vpaddq T0, PASS0_SUMS, PASS0_SUMS + vpaddq T1, PASS1_SUMS, PASS1_SUMS + vpaddq T2, PASS2_SUMS, PASS2_SUMS + vpaddq T3, PASS3_SUMS, PASS3_SUMS +.endm + +/* + * void nh_avx2(const u32 *key, const u8 *message, size_t message_len, + * u8 hash[NH_HASH_BYTES]) + * + * It's guaranteed that message_len % 16 == 0. + */ +ENTRY(nh_avx2) + + vmovdqu 0x00(KEY), K0 + vmovdqu 0x10(KEY), K1 + add $0x20, KEY + vpxor PASS0_SUMS, PASS0_SUMS, PASS0_SUMS + vpxor PASS1_SUMS, PASS1_SUMS, PASS1_SUMS + vpxor PASS2_SUMS, PASS2_SUMS, PASS2_SUMS + vpxor PASS3_SUMS, PASS3_SUMS, PASS3_SUMS + + sub $0x40, MESSAGE_LEN + jl .Lloop4_done +.Lloop4: + vmovdqu (MESSAGE), T3 + vmovdqu 0x00(KEY), K2 + vmovdqu 0x10(KEY), K3 + _nh_2xstride K0, K1, K2, K3 + + vmovdqu 0x20(MESSAGE), T3 + vmovdqu 0x20(KEY), K0 + vmovdqu 0x30(KEY), K1 + _nh_2xstride K2, K3, K0, K1 + + add $0x40, MESSAGE + add $0x40, KEY + sub $0x40, MESSAGE_LEN + jge .Lloop4 + +.Lloop4_done: + and $0x3f, MESSAGE_LEN + jz .Ldone + + cmp $0x20, MESSAGE_LEN + jl .Llast + + // 2 or 3 strides remain; do 2 more. + vmovdqu (MESSAGE), T3 + vmovdqu 0x00(KEY), K2 + vmovdqu 0x10(KEY), K3 + _nh_2xstride K0, K1, K2, K3 + add $0x20, MESSAGE + add $0x20, KEY + sub $0x20, MESSAGE_LEN + jz .Ldone + vmovdqa K2, K0 + vmovdqa K3, K1 +.Llast: + // Last stride. Zero the high 128 bits of the message and keys so they + // don't affect the result when processing them like 2 strides. + vmovdqu (MESSAGE), T3_XMM + vmovdqa K0_XMM, K0_XMM + vmovdqa K1_XMM, K1_XMM + vmovdqu 0x00(KEY), K2_XMM + vmovdqu 0x10(KEY), K3_XMM + _nh_2xstride K0, K1, K2, K3 + +.Ldone: + // Sum the accumulators for each pass, then store the sums to 'hash' + + // PASS0_SUMS is (0A 0B 0C 0D) + // PASS1_SUMS is (1A 1B 1C 1D) + // PASS2_SUMS is (2A 2B 2C 2D) + // PASS3_SUMS is (3A 3B 3C 3D) + // We need the horizontal sums: + // (0A + 0B + 0C + 0D, + // 1A + 1B + 1C + 1D, + // 2A + 2B + 2C + 2D, + // 3A + 3B + 3C + 3D) + // + + vpunpcklqdq PASS1_SUMS, PASS0_SUMS, T0 // T0 = (0A 1A 0C 1C) + vpunpckhqdq PASS1_SUMS, PASS0_SUMS, T1 // T1 = (0B 1B 0D 1D) + vpunpcklqdq PASS3_SUMS, PASS2_SUMS, T2 // T2 = (2A 3A 2C 3C) + vpunpckhqdq PASS3_SUMS, PASS2_SUMS, T3 // T3 = (2B 3B 2D 3D) + + vinserti128 $0x1, T2_XMM, T0, T4 // T4 = (0A 1A 2A 3A) + vinserti128 $0x1, T3_XMM, T1, T5 // T5 = (0B 1B 2B 3B) + vperm2i128 $0x31, T2, T0, T0 // T0 = (0C 1C 2C 3C) + vperm2i128 $0x31, T3, T1, T1 // T1 = (0D 1D 2D 3D) + + vpaddq T5, T4, T4 + vpaddq T1, T0, T0 + vpaddq T4, T0, T0 + vmovdqu T0, (HASH) + ret +ENDPROC(nh_avx2) diff --git a/arch/x86/crypto/nh-sse2-x86_64.S b/arch/x86/crypto/nh-sse2-x86_64.S new file mode 100644 index 000000000000..51f52d4ab4bb --- /dev/null +++ b/arch/x86/crypto/nh-sse2-x86_64.S @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * NH - ε-almost-universal hash function, x86_64 SSE2 accelerated + * + * Copyright 2018 Google LLC + * + * Author: Eric Biggers <ebiggers@google.com> + */ + +#include <linux/linkage.h> + +#define PASS0_SUMS %xmm0 +#define PASS1_SUMS %xmm1 +#define PASS2_SUMS %xmm2 +#define PASS3_SUMS %xmm3 +#define K0 %xmm4 +#define K1 %xmm5 +#define K2 %xmm6 +#define K3 %xmm7 +#define T0 %xmm8 +#define T1 %xmm9 +#define T2 %xmm10 +#define T3 %xmm11 +#define T4 %xmm12 +#define T5 %xmm13 +#define T6 %xmm14 +#define T7 %xmm15 +#define KEY %rdi +#define MESSAGE %rsi +#define MESSAGE_LEN %rdx +#define HASH %rcx + +.macro _nh_stride k0, k1, k2, k3, offset + + // Load next message stride + movdqu \offset(MESSAGE), T1 + + // Load next key stride + movdqu \offset(KEY), \k3 + + // Add message words to key words + movdqa T1, T2 + movdqa T1, T3 + paddd T1, \k0 // reuse k0 to avoid a move + paddd \k1, T1 + paddd \k2, T2 + paddd \k3, T3 + + // Multiply 32x32 => 64 and accumulate + pshufd $0x10, \k0, T4 + pshufd $0x32, \k0, \k0 + pshufd $0x10, T1, T5 + pshufd $0x32, T1, T1 + pshufd $0x10, T2, T6 + pshufd $0x32, T2, T2 + pshufd $0x10, T3, T7 + pshufd $0x32, T3, T3 + pmuludq T4, \k0 + pmuludq T5, T1 + pmuludq T6, T2 + pmuludq T7, T3 + paddq \k0, PASS0_SUMS + paddq T1, PASS1_SUMS + paddq T2, PASS2_SUMS + paddq T3, PASS3_SUMS +.endm + +/* + * void nh_sse2(const u32 *key, const u8 *message, size_t message_len, + * u8 hash[NH_HASH_BYTES]) + * + * It's guaranteed that message_len % 16 == 0. + */ +ENTRY(nh_sse2) + + movdqu 0x00(KEY), K0 + movdqu 0x10(KEY), K1 + movdqu 0x20(KEY), K2 + add $0x30, KEY + pxor PASS0_SUMS, PASS0_SUMS + pxor PASS1_SUMS, PASS1_SUMS + pxor PASS2_SUMS, PASS2_SUMS + pxor PASS3_SUMS, PASS3_SUMS + + sub $0x40, MESSAGE_LEN + jl .Lloop4_done +.Lloop4: + _nh_stride K0, K1, K2, K3, 0x00 + _nh_stride K1, K2, K3, K0, 0x10 + _nh_stride K2, K3, K0, K1, 0x20 + _nh_stride K3, K0, K1, K2, 0x30 + add $0x40, KEY + add $0x40, MESSAGE + sub $0x40, MESSAGE_LEN + jge .Lloop4 + +.Lloop4_done: + and $0x3f, MESSAGE_LEN + jz .Ldone + _nh_stride K0, K1, K2, K3, 0x00 + + sub $0x10, MESSAGE_LEN + jz .Ldone + _nh_stride K1, K2, K3, K0, 0x10 + + sub $0x10, MESSAGE_LEN + jz .Ldone + _nh_stride K2, K3, K0, K1, 0x20 + +.Ldone: + // Sum the accumulators for each pass, then store the sums to 'hash' + movdqa PASS0_SUMS, T0 + movdqa PASS2_SUMS, T1 + punpcklqdq PASS1_SUMS, T0 // => (PASS0_SUM_A PASS1_SUM_A) + punpcklqdq PASS3_SUMS, T1 // => (PASS2_SUM_A PASS3_SUM_A) + punpckhqdq PASS1_SUMS, PASS0_SUMS // => (PASS0_SUM_B PASS1_SUM_B) + punpckhqdq PASS3_SUMS, PASS2_SUMS // => (PASS2_SUM_B PASS3_SUM_B) + paddq PASS0_SUMS, T0 + paddq PASS2_SUMS, T1 + movdqu T0, 0x00(HASH) + movdqu T1, 0x10(HASH) + ret +ENDPROC(nh_sse2) diff --git a/arch/x86/crypto/nhpoly1305-avx2-glue.c b/arch/x86/crypto/nhpoly1305-avx2-glue.c new file mode 100644 index 000000000000..20d815ea4b6a --- /dev/null +++ b/arch/x86/crypto/nhpoly1305-avx2-glue.c @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * NHPoly1305 - ε-almost-∆-universal hash function for Adiantum + * (AVX2 accelerated version) + * + * Copyright 2018 Google LLC + */ + +#include <crypto/internal/hash.h> +#include <crypto/nhpoly1305.h> +#include <linux/module.h> +#include <asm/fpu/api.h> + +asmlinkage void nh_avx2(const u32 *key, const u8 *message, size_t message_len, + u8 hash[NH_HASH_BYTES]); + +/* wrapper to avoid indirect call to assembly, which doesn't work with CFI */ +static void _nh_avx2(const u32 *key, const u8 *message, size_t message_len, + __le64 hash[NH_NUM_PASSES]) +{ + nh_avx2(key, message, message_len, (u8 *)hash); +} + +static int nhpoly1305_avx2_update(struct shash_desc *desc, + const u8 *src, unsigned int srclen) +{ + if (srclen < 64 || !irq_fpu_usable()) + return crypto_nhpoly1305_update(desc, src, srclen); + + do { + unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE); + + kernel_fpu_begin(); + crypto_nhpoly1305_update_helper(desc, src, n, _nh_avx2); + kernel_fpu_end(); + src += n; + srclen -= n; + } while (srclen); + return 0; +} + +static struct shash_alg nhpoly1305_alg = { + .base.cra_name = "nhpoly1305", + .base.cra_driver_name = "nhpoly1305-avx2", + .base.cra_priority = 300, + .base.cra_ctxsize = sizeof(struct nhpoly1305_key), + .base.cra_module = THIS_MODULE, + .digestsize = POLY1305_DIGEST_SIZE, + .init = crypto_nhpoly1305_init, + .update = nhpoly1305_avx2_update, + .final = crypto_nhpoly1305_final, + .setkey = crypto_nhpoly1305_setkey, + .descsize = sizeof(struct nhpoly1305_state), +}; + +static int __init nhpoly1305_mod_init(void) +{ + if (!boot_cpu_has(X86_FEATURE_AVX2) || + !boot_cpu_has(X86_FEATURE_OSXSAVE)) + return -ENODEV; + + return crypto_register_shash(&nhpoly1305_alg); +} + +static void __exit nhpoly1305_mod_exit(void) +{ + crypto_unregister_shash(&nhpoly1305_alg); +} + +module_init(nhpoly1305_mod_init); +module_exit(nhpoly1305_mod_exit); + +MODULE_DESCRIPTION("NHPoly1305 ε-almost-∆-universal hash function (AVX2-accelerated)"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>"); +MODULE_ALIAS_CRYPTO("nhpoly1305"); +MODULE_ALIAS_CRYPTO("nhpoly1305-avx2"); diff --git a/arch/x86/crypto/nhpoly1305-sse2-glue.c b/arch/x86/crypto/nhpoly1305-sse2-glue.c new file mode 100644 index 000000000000..ed68d164ce14 --- /dev/null +++ b/arch/x86/crypto/nhpoly1305-sse2-glue.c @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * NHPoly1305 - ε-almost-∆-universal hash function for Adiantum + * (SSE2 accelerated version) + * + * Copyright 2018 Google LLC + */ + +#include <crypto/internal/hash.h> +#include <crypto/nhpoly1305.h> +#include <linux/module.h> +#include <asm/fpu/api.h> + +asmlinkage void nh_sse2(const u32 *key, const u8 *message, size_t message_len, + u8 hash[NH_HASH_BYTES]); + +/* wrapper to avoid indirect call to assembly, which doesn't work with CFI */ +static void _nh_sse2(const u32 *key, const u8 *message, size_t message_len, + __le64 hash[NH_NUM_PASSES]) +{ + nh_sse2(key, message, message_len, (u8 *)hash); +} + +static int nhpoly1305_sse2_update(struct shash_desc *desc, + const u8 *src, unsigned int srclen) +{ + if (srclen < 64 || !irq_fpu_usable()) + return crypto_nhpoly1305_update(desc, src, srclen); + + do { + unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE); + + kernel_fpu_begin(); + crypto_nhpoly1305_update_helper(desc, src, n, _nh_sse2); + kernel_fpu_end(); + src += n; + srclen -= n; + } while (srclen); + return 0; +} + +static struct shash_alg nhpoly1305_alg = { + .base.cra_name = "nhpoly1305", + .base.cra_driver_name = "nhpoly1305-sse2", + .base.cra_priority = 200, + .base.cra_ctxsize = sizeof(struct nhpoly1305_key), + .base.cra_module = THIS_MODULE, + .digestsize = POLY1305_DIGEST_SIZE, + .init = crypto_nhpoly1305_init, + .update = nhpoly1305_sse2_update, + .final = crypto_nhpoly1305_final, + .setkey = crypto_nhpoly1305_setkey, + .descsize = sizeof(struct nhpoly1305_state), +}; + +static int __init nhpoly1305_mod_init(void) +{ + if (!boot_cpu_has(X86_FEATURE_XMM2)) + return -ENODEV; + + return crypto_register_shash(&nhpoly1305_alg); +} + +static void __exit nhpoly1305_mod_exit(void) +{ + crypto_unregister_shash(&nhpoly1305_alg); +} + +module_init(nhpoly1305_mod_init); +module_exit(nhpoly1305_mod_exit); + +MODULE_DESCRIPTION("NHPoly1305 ε-almost-∆-universal hash function (SSE2-accelerated)"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>"); +MODULE_ALIAS_CRYPTO("nhpoly1305"); +MODULE_ALIAS_CRYPTO("nhpoly1305-sse2"); diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c index f012b7e28ad1..88cc01506c84 100644 --- a/arch/x86/crypto/poly1305_glue.c +++ b/arch/x86/crypto/poly1305_glue.c @@ -83,35 +83,37 @@ static unsigned int poly1305_simd_blocks(struct poly1305_desc_ctx *dctx, if (poly1305_use_avx2 && srclen >= POLY1305_BLOCK_SIZE * 4) { if (unlikely(!sctx->wset)) { if (!sctx->uset) { - memcpy(sctx->u, dctx->r, sizeof(sctx->u)); - poly1305_simd_mult(sctx->u, dctx->r); + memcpy(sctx->u, dctx->r.r, sizeof(sctx->u)); + poly1305_simd_mult(sctx->u, dctx->r.r); sctx->uset = true; } memcpy(sctx->u + 5, sctx->u, sizeof(sctx->u)); - poly1305_simd_mult(sctx->u + 5, dctx->r); + poly1305_simd_mult(sctx->u + 5, dctx->r.r); memcpy(sctx->u + 10, sctx->u + 5, sizeof(sctx->u)); - poly1305_simd_mult(sctx->u + 10, dctx->r); + poly1305_simd_mult(sctx->u + 10, dctx->r.r); sctx->wset = true; } blocks = srclen / (POLY1305_BLOCK_SIZE * 4); - poly1305_4block_avx2(dctx->h, src, dctx->r, blocks, sctx->u); + poly1305_4block_avx2(dctx->h.h, src, dctx->r.r, blocks, + sctx->u); src += POLY1305_BLOCK_SIZE * 4 * blocks; srclen -= POLY1305_BLOCK_SIZE * 4 * blocks; } #endif if (likely(srclen >= POLY1305_BLOCK_SIZE * 2)) { if (unlikely(!sctx->uset)) { - memcpy(sctx->u, dctx->r, sizeof(sctx->u)); - poly1305_simd_mult(sctx->u, dctx->r); + memcpy(sctx->u, dctx->r.r, sizeof(sctx->u)); + poly1305_simd_mult(sctx->u, dctx->r.r); sctx->uset = true; } blocks = srclen / (POLY1305_BLOCK_SIZE * 2); - poly1305_2block_sse2(dctx->h, src, dctx->r, blocks, sctx->u); + poly1305_2block_sse2(dctx->h.h, src, dctx->r.r, blocks, + sctx->u); src += POLY1305_BLOCK_SIZE * 2 * blocks; srclen -= POLY1305_BLOCK_SIZE * 2 * blocks; } if (srclen >= POLY1305_BLOCK_SIZE) { - poly1305_block_sse2(dctx->h, src, dctx->r, 1); + poly1305_block_sse2(dctx->h.h, src, dctx->r.r, 1); srclen -= POLY1305_BLOCK_SIZE; } return srclen; diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index 25e5a6bda8c3..20d0885b00fb 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -352,7 +352,7 @@ For 32-bit we have the following conventions - kernel is built with .macro CALL_enter_from_user_mode #ifdef CONFIG_CONTEXT_TRACKING #ifdef HAVE_JUMP_LABEL - STATIC_BRANCH_JMP l_yes=.Lafter_call_\@, key=context_tracking_enabled, branch=1 + STATIC_JUMP_IF_FALSE .Lafter_call_\@, context_tracking_enabled, def=0 #endif call enter_from_user_mode .Lafter_call_\@: diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c index 3b2490b81918..7bc105f47d21 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c @@ -140,7 +140,7 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags) /* * In order to return to user mode, we need to have IRQs off with * none of EXIT_TO_USERMODE_LOOP_FLAGS set. Several of these flags - * can be set at any time on preemptable kernels if we have IRQs on, + * can be set at any time on preemptible kernels if we have IRQs on, * so we need to loop. Disabling preemption wouldn't help: doing the * work to clear some of the flags can sleep. */ diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile index 0624bf2266fd..5bfe2243a08f 100644 --- a/arch/x86/entry/vdso/Makefile +++ b/arch/x86/entry/vdso/Makefile @@ -171,7 +171,8 @@ quiet_cmd_vdso = VDSO $@ sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@' VDSO_LDFLAGS = -shared $(call ld-option, --hash-style=both) \ - $(call ld-option, --build-id) -Bsymbolic + $(call ld-option, --build-id) $(call ld-option, --eh-frame-hdr) \ + -Bsymbolic GCOV_PROFILE := n # diff --git a/arch/x86/entry/vdso/vdso-layout.lds.S b/arch/x86/entry/vdso/vdso-layout.lds.S index acfd5ba7d943..93c6dc7812d0 100644 --- a/arch/x86/entry/vdso/vdso-layout.lds.S +++ b/arch/x86/entry/vdso/vdso-layout.lds.S @@ -7,16 +7,6 @@ * This script controls its layout. */ -#if defined(BUILD_VDSO64) -# define SHDR_SIZE 64 -#elif defined(BUILD_VDSO32) || defined(BUILD_VDSOX32) -# define SHDR_SIZE 40 -#else -# error unknown VDSO target -#endif - -#define NUM_FAKE_SHDRS 13 - SECTIONS { /* @@ -60,20 +50,8 @@ SECTIONS *(.bss*) *(.dynbss*) *(.gnu.linkonce.b.*) - - /* - * Ideally this would live in a C file, but that won't - * work cleanly for x32 until we start building the x32 - * C code using an x32 toolchain. - */ - VDSO_FAKE_SECTION_TABLE_START = .; - . = . + NUM_FAKE_SHDRS * SHDR_SIZE; - VDSO_FAKE_SECTION_TABLE_END = .; } :text - .fake_shstrtab : { *(.fake_shstrtab) } :text - - .note : { *(.note.*) } :text :note .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr @@ -87,11 +65,6 @@ SECTIONS .text : { *(.text*) } :text =0x90909090, - /* - * At the end so that eu-elflint stays happy when vdso2c strips - * these. A better implementation would avoid allocating space - * for these. - */ .altinstructions : { *(.altinstructions) } :text .altinstr_replacement : { *(.altinstr_replacement) } :text diff --git a/arch/x86/entry/vdso/vdso2c.c b/arch/x86/entry/vdso/vdso2c.c index 4674f58581a1..8e470b018512 100644 --- a/arch/x86/entry/vdso/vdso2c.c +++ b/arch/x86/entry/vdso/vdso2c.c @@ -76,8 +76,6 @@ enum { sym_hpet_page, sym_pvclock_page, sym_hvclock_page, - sym_VDSO_FAKE_SECTION_TABLE_START, - sym_VDSO_FAKE_SECTION_TABLE_END, }; const int special_pages[] = { @@ -98,12 +96,6 @@ struct vdso_sym required_syms[] = { [sym_hpet_page] = {"hpet_page", true}, [sym_pvclock_page] = {"pvclock_page", true}, [sym_hvclock_page] = {"hvclock_page", true}, - [sym_VDSO_FAKE_SECTION_TABLE_START] = { - "VDSO_FAKE_SECTION_TABLE_START", false - }, - [sym_VDSO_FAKE_SECTION_TABLE_END] = { - "VDSO_FAKE_SECTION_TABLE_END", false - }, {"VDSO32_NOTE_MASK", true}, {"__kernel_vsyscall", true}, {"__kernel_sigreturn", true}, diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c index 7eb878561910..babc4e7a519c 100644 --- a/arch/x86/entry/vdso/vma.c +++ b/arch/x86/entry/vdso/vma.c @@ -261,7 +261,7 @@ int map_vdso_once(const struct vdso_image *image, unsigned long addr) * abusing from userspace install_speciall_mapping, which may * not do accounting and rlimit right. * We could search vma near context.vdso, but it's a slowpath, - * so let's explicitely check all VMAs to be completely sure. + * so let's explicitly check all VMAs to be completely sure. */ for (vma = mm->mmap; vma; vma = vma->vm_next) { if (vma_is_special_mapping(vma, &vdso_mapping) || diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c index 85fd85d52ffd..d78bcc03e60e 100644 --- a/arch/x86/entry/vsyscall/vsyscall_64.c +++ b/arch/x86/entry/vsyscall/vsyscall_64.c @@ -102,7 +102,7 @@ static bool write_ok_or_segv(unsigned long ptr, size_t size) if (!access_ok(VERIFY_WRITE, (void __user *)ptr, size)) { struct thread_struct *thread = ¤t->thread; - thread->error_code = 6; /* user fault, no page, write */ + thread->error_code = X86_PF_USER | X86_PF_WRITE; thread->cr2 = ptr; thread->trap_nr = X86_TRAP_PF; diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c index 24ffa1e88cf9..a01ef1b0f883 100644 --- a/arch/x86/events/intel/bts.c +++ b/arch/x86/events/intel/bts.c @@ -589,7 +589,7 @@ static __init int bts_init(void) * the AUX buffer. * * However, since this driver supports per-CPU and per-task inherit - * we cannot use the user mapping since it will not be availble + * we cannot use the user mapping since it will not be available * if we're not running the owning process. * * With PTI we can't use the kernal map either, because its not diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index ecc3e34ca955..40e12cfc87f6 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -1930,7 +1930,7 @@ static void intel_pmu_enable_all(int added) * in sequence on the same PMC or on different PMCs. * * In practise it appears some of these events do in fact count, and - * we need to programm all 4 events. + * we need to program all 4 events. */ static void intel_pmu_nhm_workaround(void) { diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index b7b01d762d32..e9acf1d2e7b2 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -1199,7 +1199,7 @@ static void setup_pebs_sample_data(struct perf_event *event, /* * We must however always use iregs for the unwinder to stay sane; the * record BP,SP,IP can point into thin air when the record is from a - * previous PMI context or an (I)RET happend between the record and + * previous PMI context or an (I)RET happened between the record and * PMI. */ if (sample_type & PERF_SAMPLE_CALLCHAIN) diff --git a/arch/x86/events/intel/p4.c b/arch/x86/events/intel/p4.c index d32c0eed38ca..dee579efb2b2 100644 --- a/arch/x86/events/intel/p4.c +++ b/arch/x86/events/intel/p4.c @@ -1259,7 +1259,7 @@ again: } /* * Perf does test runs to see if a whole group can be assigned - * together succesfully. There can be multiple rounds of this. + * together successfully. There can be multiple rounds of this. * Unfortunately, p4_pmu_swap_config_ts touches the hwc->config * bits, such that the next round of group assignments will * cause the above p4_should_swap_ts to pass instead of fail. diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c index 3a0aa83cbd07..9494ca68fd9d 100644 --- a/arch/x86/events/intel/pt.c +++ b/arch/x86/events/intel/pt.c @@ -68,6 +68,7 @@ static struct pt_cap_desc { PT_CAP(topa_output, 0, CPUID_ECX, BIT(0)), PT_CAP(topa_multiple_entries, 0, CPUID_ECX, BIT(1)), PT_CAP(single_range_output, 0, CPUID_ECX, BIT(2)), + PT_CAP(output_subsys, 0, CPUID_ECX, BIT(3)), PT_CAP(payloads_lip, 0, CPUID_ECX, BIT(31)), PT_CAP(num_address_ranges, 1, CPUID_EAX, 0x3), PT_CAP(mtc_periods, 1, CPUID_EAX, 0xffff0000), @@ -75,14 +76,21 @@ static struct pt_cap_desc { PT_CAP(psb_periods, 1, CPUID_EBX, 0xffff0000), }; -static u32 pt_cap_get(enum pt_capabilities cap) +u32 intel_pt_validate_cap(u32 *caps, enum pt_capabilities capability) { - struct pt_cap_desc *cd = &pt_caps[cap]; - u32 c = pt_pmu.caps[cd->leaf * PT_CPUID_REGS_NUM + cd->reg]; + struct pt_cap_desc *cd = &pt_caps[capability]; + u32 c = caps[cd->leaf * PT_CPUID_REGS_NUM + cd->reg]; unsigned int shift = __ffs(cd->mask); return (c & cd->mask) >> shift; } +EXPORT_SYMBOL_GPL(intel_pt_validate_cap); + +u32 intel_pt_validate_hw_cap(enum pt_capabilities cap) +{ + return intel_pt_validate_cap(pt_pmu.caps, cap); +} +EXPORT_SYMBOL_GPL(intel_pt_validate_hw_cap); static ssize_t pt_cap_show(struct device *cdev, struct device_attribute *attr, @@ -92,7 +100,7 @@ static ssize_t pt_cap_show(struct device *cdev, container_of(attr, struct dev_ext_attribute, attr); enum pt_capabilities cap = (long)ea->var; - return snprintf(buf, PAGE_SIZE, "%x\n", pt_cap_get(cap)); + return snprintf(buf, PAGE_SIZE, "%x\n", intel_pt_validate_hw_cap(cap)); } static struct attribute_group pt_cap_group __ro_after_init = { @@ -310,16 +318,16 @@ static bool pt_event_valid(struct perf_event *event) return false; if (config & RTIT_CTL_CYC_PSB) { - if (!pt_cap_get(PT_CAP_psb_cyc)) + if (!intel_pt_validate_hw_cap(PT_CAP_psb_cyc)) return false; - allowed = pt_cap_get(PT_CAP_psb_periods); + allowed = intel_pt_validate_hw_cap(PT_CAP_psb_periods); requested = (config & RTIT_CTL_PSB_FREQ) >> RTIT_CTL_PSB_FREQ_OFFSET; if (requested && (!(allowed & BIT(requested)))) return false; - allowed = pt_cap_get(PT_CAP_cycle_thresholds); + allowed = intel_pt_validate_hw_cap(PT_CAP_cycle_thresholds); requested = (config & RTIT_CTL_CYC_THRESH) >> RTIT_CTL_CYC_THRESH_OFFSET; if (requested && (!(allowed & BIT(requested)))) @@ -334,10 +342,10 @@ static bool pt_event_valid(struct perf_event *event) * Spec says that setting mtc period bits while mtc bit in * CPUID is 0 will #GP, so better safe than sorry. */ - if (!pt_cap_get(PT_CAP_mtc)) + if (!intel_pt_validate_hw_cap(PT_CAP_mtc)) return false; - allowed = pt_cap_get(PT_CAP_mtc_periods); + allowed = intel_pt_validate_hw_cap(PT_CAP_mtc_periods); if (!allowed) return false; @@ -349,11 +357,11 @@ static bool pt_event_valid(struct perf_event *event) } if (config & RTIT_CTL_PWR_EVT_EN && - !pt_cap_get(PT_CAP_power_event_trace)) + !intel_pt_validate_hw_cap(PT_CAP_power_event_trace)) return false; if (config & RTIT_CTL_PTW) { - if (!pt_cap_get(PT_CAP_ptwrite)) + if (!intel_pt_validate_hw_cap(PT_CAP_ptwrite)) return false; /* FUPonPTW without PTW doesn't make sense */ @@ -598,7 +606,7 @@ static struct topa *topa_alloc(int cpu, gfp_t gfp) * In case of singe-entry ToPA, always put the self-referencing END * link as the 2nd entry in the table */ - if (!pt_cap_get(PT_CAP_topa_multiple_entries)) { + if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) { TOPA_ENTRY(topa, 1)->base = topa->phys >> TOPA_SHIFT; TOPA_ENTRY(topa, 1)->end = 1; } @@ -638,7 +646,7 @@ static void topa_insert_table(struct pt_buffer *buf, struct topa *topa) topa->offset = last->offset + last->size; buf->last = topa; - if (!pt_cap_get(PT_CAP_topa_multiple_entries)) + if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) return; BUG_ON(last->last != TENTS_PER_PAGE - 1); @@ -654,7 +662,7 @@ static void topa_insert_table(struct pt_buffer *buf, struct topa *topa) static bool topa_table_full(struct topa *topa) { /* single-entry ToPA is a special case */ - if (!pt_cap_get(PT_CAP_topa_multiple_entries)) + if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) return !!topa->last; return topa->last == TENTS_PER_PAGE - 1; @@ -690,7 +698,8 @@ static int topa_insert_pages(struct pt_buffer *buf, gfp_t gfp) TOPA_ENTRY(topa, -1)->base = page_to_phys(p) >> TOPA_SHIFT; TOPA_ENTRY(topa, -1)->size = order; - if (!buf->snapshot && !pt_cap_get(PT_CAP_topa_multiple_entries)) { + if (!buf->snapshot && + !intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) { TOPA_ENTRY(topa, -1)->intr = 1; TOPA_ENTRY(topa, -1)->stop = 1; } @@ -725,7 +734,7 @@ static void pt_topa_dump(struct pt_buffer *buf) topa->table[i].intr ? 'I' : ' ', topa->table[i].stop ? 'S' : ' ', *(u64 *)&topa->table[i]); - if ((pt_cap_get(PT_CAP_topa_multiple_entries) && + if ((intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) && topa->table[i].stop) || topa->table[i].end) break; @@ -828,7 +837,7 @@ static void pt_handle_status(struct pt *pt) * means we are already losing data; need to let the decoder * know. */ - if (!pt_cap_get(PT_CAP_topa_multiple_entries) || + if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) || buf->output_off == sizes(TOPA_ENTRY(buf->cur, buf->cur_idx)->size)) { perf_aux_output_flag(&pt->handle, PERF_AUX_FLAG_TRUNCATED); @@ -840,7 +849,8 @@ static void pt_handle_status(struct pt *pt) * Also on single-entry ToPA implementations, interrupt will come * before the output reaches its output region's boundary. */ - if (!pt_cap_get(PT_CAP_topa_multiple_entries) && !buf->snapshot && + if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) && + !buf->snapshot && pt_buffer_region_size(buf) - buf->output_off <= TOPA_PMI_MARGIN) { void *head = pt_buffer_region(buf); @@ -931,7 +941,7 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf, /* single entry ToPA is handled by marking all regions STOP=1 INT=1 */ - if (!pt_cap_get(PT_CAP_topa_multiple_entries)) + if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) return 0; /* clear STOP and INT from current entry */ @@ -1082,7 +1092,7 @@ static int pt_buffer_init_topa(struct pt_buffer *buf, unsigned long nr_pages, pt_buffer_setup_topa_index(buf); /* link last table to the first one, unless we're double buffering */ - if (pt_cap_get(PT_CAP_topa_multiple_entries)) { + if (intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) { TOPA_ENTRY(buf->last, -1)->base = buf->first->phys >> TOPA_SHIFT; TOPA_ENTRY(buf->last, -1)->end = 1; } @@ -1153,7 +1163,7 @@ static int pt_addr_filters_init(struct perf_event *event) struct pt_filters *filters; int node = event->cpu == -1 ? -1 : cpu_to_node(event->cpu); - if (!pt_cap_get(PT_CAP_num_address_ranges)) + if (!intel_pt_validate_hw_cap(PT_CAP_num_address_ranges)) return 0; filters = kzalloc_node(sizeof(struct pt_filters), GFP_KERNEL, node); @@ -1202,7 +1212,7 @@ static int pt_event_addr_filters_validate(struct list_head *filters) return -EINVAL; } - if (++range > pt_cap_get(PT_CAP_num_address_ranges)) + if (++range > intel_pt_validate_hw_cap(PT_CAP_num_address_ranges)) return -EOPNOTSUPP; } @@ -1507,12 +1517,12 @@ static __init int pt_init(void) if (ret) return ret; - if (!pt_cap_get(PT_CAP_topa_output)) { + if (!intel_pt_validate_hw_cap(PT_CAP_topa_output)) { pr_warn("ToPA output is not supported on this CPU\n"); return -ENODEV; } - if (!pt_cap_get(PT_CAP_topa_multiple_entries)) + if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) pt_pmu.pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_AUX_SW_DOUBLEBUF; @@ -1530,7 +1540,7 @@ static __init int pt_init(void) pt_pmu.pmu.addr_filters_sync = pt_event_addr_filters_sync; pt_pmu.pmu.addr_filters_validate = pt_event_addr_filters_validate; pt_pmu.pmu.nr_addr_filters = - pt_cap_get(PT_CAP_num_address_ranges); + intel_pt_validate_hw_cap(PT_CAP_num_address_ranges); ret = perf_pmu_register(&pt_pmu.pmu, "intel_pt", -1); diff --git a/arch/x86/events/intel/pt.h b/arch/x86/events/intel/pt.h index 0eb41d07b79a..269e15a9086c 100644 --- a/arch/x86/events/intel/pt.h +++ b/arch/x86/events/intel/pt.h @@ -20,43 +20,6 @@ #define __INTEL_PT_H__ /* - * PT MSR bit definitions - */ -#define RTIT_CTL_TRACEEN BIT(0) -#define RTIT_CTL_CYCLEACC BIT(1) -#define RTIT_CTL_OS BIT(2) -#define RTIT_CTL_USR BIT(3) -#define RTIT_CTL_PWR_EVT_EN BIT(4) -#define RTIT_CTL_FUP_ON_PTW BIT(5) -#define RTIT_CTL_CR3EN BIT(7) -#define RTIT_CTL_TOPA BIT(8) -#define RTIT_CTL_MTC_EN BIT(9) -#define RTIT_CTL_TSC_EN BIT(10) -#define RTIT_CTL_DISRETC BIT(11) -#define RTIT_CTL_PTW_EN BIT(12) -#define RTIT_CTL_BRANCH_EN BIT(13) -#define RTIT_CTL_MTC_RANGE_OFFSET 14 -#define RTIT_CTL_MTC_RANGE (0x0full << RTIT_CTL_MTC_RANGE_OFFSET) -#define RTIT_CTL_CYC_THRESH_OFFSET 19 -#define RTIT_CTL_CYC_THRESH (0x0full << RTIT_CTL_CYC_THRESH_OFFSET) -#define RTIT_CTL_PSB_FREQ_OFFSET 24 -#define RTIT_CTL_PSB_FREQ (0x0full << RTIT_CTL_PSB_FREQ_OFFSET) -#define RTIT_CTL_ADDR0_OFFSET 32 -#define RTIT_CTL_ADDR0 (0x0full << RTIT_CTL_ADDR0_OFFSET) -#define RTIT_CTL_ADDR1_OFFSET 36 -#define RTIT_CTL_ADDR1 (0x0full << RTIT_CTL_ADDR1_OFFSET) -#define RTIT_CTL_ADDR2_OFFSET 40 -#define RTIT_CTL_ADDR2 (0x0full << RTIT_CTL_ADDR2_OFFSET) -#define RTIT_CTL_ADDR3_OFFSET 44 -#define RTIT_CTL_ADDR3 (0x0full << RTIT_CTL_ADDR3_OFFSET) -#define RTIT_STATUS_FILTEREN BIT(0) -#define RTIT_STATUS_CONTEXTEN BIT(1) -#define RTIT_STATUS_TRIGGEREN BIT(2) -#define RTIT_STATUS_BUFFOVF BIT(3) -#define RTIT_STATUS_ERROR BIT(4) -#define RTIT_STATUS_STOPPED BIT(5) - -/* * Single-entry ToPA: when this close to region boundary, switch * buffers to avoid losing data. */ @@ -82,30 +45,9 @@ struct topa_entry { u64 rsvd4 : 16; }; -#define PT_CPUID_LEAVES 2 -#define PT_CPUID_REGS_NUM 4 /* number of regsters (eax, ebx, ecx, edx) */ - /* TSC to Core Crystal Clock Ratio */ #define CPUID_TSC_LEAF 0x15 -enum pt_capabilities { - PT_CAP_max_subleaf = 0, - PT_CAP_cr3_filtering, - PT_CAP_psb_cyc, - PT_CAP_ip_filtering, - PT_CAP_mtc, - PT_CAP_ptwrite, - PT_CAP_power_event_trace, - PT_CAP_topa_output, - PT_CAP_topa_multiple_entries, - PT_CAP_single_range_output, - PT_CAP_payloads_lip, - PT_CAP_num_address_ranges, - PT_CAP_mtc_periods, - PT_CAP_cycle_thresholds, - PT_CAP_psb_periods, -}; - struct pt_pmu { struct pmu pmu; u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES]; diff --git a/arch/x86/hyperv/nested.c b/arch/x86/hyperv/nested.c index b8e60cc50461..dd0a843f766d 100644 --- a/arch/x86/hyperv/nested.c +++ b/arch/x86/hyperv/nested.c @@ -7,6 +7,7 @@ * * Author : Lan Tianyu <Tianyu.Lan@microsoft.com> */ +#define pr_fmt(fmt) "Hyper-V: " fmt #include <linux/types.h> @@ -54,3 +55,82 @@ fault: return ret; } EXPORT_SYMBOL_GPL(hyperv_flush_guest_mapping); + +int hyperv_fill_flush_guest_mapping_list( + struct hv_guest_mapping_flush_list *flush, + u64 start_gfn, u64 pages) +{ + u64 cur = start_gfn; + u64 additional_pages; + int gpa_n = 0; + + do { + /* + * If flush requests exceed max flush count, go back to + * flush tlbs without range. + */ + if (gpa_n >= HV_MAX_FLUSH_REP_COUNT) + return -ENOSPC; + + additional_pages = min_t(u64, pages, HV_MAX_FLUSH_PAGES) - 1; + + flush->gpa_list[gpa_n].page.additional_pages = additional_pages; + flush->gpa_list[gpa_n].page.largepage = false; + flush->gpa_list[gpa_n].page.basepfn = cur; + + pages -= additional_pages + 1; + cur += additional_pages + 1; + gpa_n++; + } while (pages > 0); + + return gpa_n; +} +EXPORT_SYMBOL_GPL(hyperv_fill_flush_guest_mapping_list); + +int hyperv_flush_guest_mapping_range(u64 as, + hyperv_fill_flush_list_func fill_flush_list_func, void *data) +{ + struct hv_guest_mapping_flush_list **flush_pcpu; + struct hv_guest_mapping_flush_list *flush; + u64 status = 0; + unsigned long flags; + int ret = -ENOTSUPP; + int gpa_n = 0; + + if (!hv_hypercall_pg || !fill_flush_list_func) + goto fault; + + local_irq_save(flags); + + flush_pcpu = (struct hv_guest_mapping_flush_list **) + this_cpu_ptr(hyperv_pcpu_input_arg); + + flush = *flush_pcpu; + if (unlikely(!flush)) { + local_irq_restore(flags); + goto fault; + } + + flush->address_space = as; + flush->flags = 0; + + gpa_n = fill_flush_list_func(flush, data); + if (gpa_n < 0) { + local_irq_restore(flags); + goto fault; + } + + status = hv_do_rep_hypercall(HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_LIST, + gpa_n, 0, flush, NULL); + + local_irq_restore(flags); + + if (!(status & HV_HYPERCALL_RESULT_MASK)) + ret = 0; + else + ret = status; +fault: + trace_hyperv_nested_flush_guest_mapping_range(as, ret); + return ret; +} +EXPORT_SYMBOL_GPL(hyperv_flush_guest_mapping_range); diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h index 8e4ea39e55d0..31b627b43a8e 100644 --- a/arch/x86/include/asm/alternative-asm.h +++ b/arch/x86/include/asm/alternative-asm.h @@ -7,24 +7,16 @@ #include <asm/asm.h> #ifdef CONFIG_SMP -.macro LOCK_PREFIX_HERE + .macro LOCK_PREFIX +672: lock .pushsection .smp_locks,"a" .balign 4 - .long 671f - . # offset + .long 672b - . .popsection -671: -.endm - -.macro LOCK_PREFIX insn:vararg - LOCK_PREFIX_HERE - lock \insn -.endm + .endm #else -.macro LOCK_PREFIX_HERE -.endm - -.macro LOCK_PREFIX insn:vararg -.endm + .macro LOCK_PREFIX + .endm #endif /* diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index d7faa16622d8..0660e14690c8 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h @@ -31,8 +31,15 @@ */ #ifdef CONFIG_SMP -#define LOCK_PREFIX_HERE "LOCK_PREFIX_HERE\n\t" -#define LOCK_PREFIX "LOCK_PREFIX " +#define LOCK_PREFIX_HERE \ + ".pushsection .smp_locks,\"a\"\n" \ + ".balign 4\n" \ + ".long 671f - .\n" /* offset */ \ + ".popsection\n" \ + "671:" + +#define LOCK_PREFIX LOCK_PREFIX_HERE "\n\tlock; " + #else /* ! CONFIG_SMP */ #define LOCK_PREFIX_HERE "" #define LOCK_PREFIX "" @@ -167,7 +174,7 @@ static inline int alternatives_text_reserved(void *start, void *end) /* * Alternative inline assembly with input. * - * Pecularities: + * Peculiarities: * No memory clobber here. * Argument numbers start with 1. * Best is to use constraints that are fixed size (like (%1) ... "r") diff --git a/arch/x86/include/asm/arch_hweight.h b/arch/x86/include/asm/arch_hweight.h index 34a10b2d5b73..fc0693569f7a 100644 --- a/arch/x86/include/asm/arch_hweight.h +++ b/arch/x86/include/asm/arch_hweight.h @@ -5,15 +5,9 @@ #include <asm/cpufeatures.h> #ifdef CONFIG_64BIT -/* popcnt %edi, %eax */ -#define POPCNT32 ".byte 0xf3,0x0f,0xb8,0xc7" -/* popcnt %rdi, %rax */ -#define POPCNT64 ".byte 0xf3,0x48,0x0f,0xb8,0xc7" #define REG_IN "D" #define REG_OUT "a" #else -/* popcnt %eax, %eax */ -#define POPCNT32 ".byte 0xf3,0x0f,0xb8,0xc0" #define REG_IN "a" #define REG_OUT "a" #endif @@ -24,7 +18,7 @@ static __always_inline unsigned int __arch_hweight32(unsigned int w) { unsigned int res; - asm (ALTERNATIVE("call __sw_hweight32", POPCNT32, X86_FEATURE_POPCNT) + asm (ALTERNATIVE("call __sw_hweight32", "popcntl %1, %0", X86_FEATURE_POPCNT) : "="REG_OUT (res) : REG_IN (w)); @@ -52,7 +46,7 @@ static __always_inline unsigned long __arch_hweight64(__u64 w) { unsigned long res; - asm (ALTERNATIVE("call __sw_hweight64", POPCNT64, X86_FEATURE_POPCNT) + asm (ALTERNATIVE("call __sw_hweight64", "popcntq %1, %0", X86_FEATURE_POPCNT) : "="REG_OUT (res) : REG_IN (w)); diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h index 21b086786404..6467757bb39f 100644 --- a/arch/x86/include/asm/asm.h +++ b/arch/x86/include/asm/asm.h @@ -120,25 +120,12 @@ /* Exception table entry */ #ifdef __ASSEMBLY__ # define _ASM_EXTABLE_HANDLE(from, to, handler) \ - ASM_EXTABLE_HANDLE from to handler - -.macro ASM_EXTABLE_HANDLE from:req to:req handler:req - .pushsection "__ex_table","a" - .balign 4 - .long (\from) - . - .long (\to) - . - .long (\handler) - . + .pushsection "__ex_table","a" ; \ + .balign 4 ; \ + .long (from) - . ; \ + .long (to) - . ; \ + .long (handler) - . ; \ .popsection -.endm -#else /* __ASSEMBLY__ */ - -# define _ASM_EXTABLE_HANDLE(from, to, handler) \ - "ASM_EXTABLE_HANDLE from=" #from " to=" #to \ - " handler=\"" #handler "\"\n\t" - -/* For C file, we already have NOKPROBE_SYMBOL macro */ - -#endif /* __ASSEMBLY__ */ # define _ASM_EXTABLE(from, to) \ _ASM_EXTABLE_HANDLE(from, to, ex_handler_default) @@ -161,7 +148,6 @@ _ASM_PTR (entry); \ .popsection -#ifdef __ASSEMBLY__ .macro ALIGN_DESTINATION /* check for bad alignment of destination */ movl %edi,%ecx @@ -185,7 +171,34 @@ _ASM_EXTABLE_UA(100b, 103b) _ASM_EXTABLE_UA(101b, 103b) .endm -#endif /* __ASSEMBLY__ */ + +#else +# define _EXPAND_EXTABLE_HANDLE(x) #x +# define _ASM_EXTABLE_HANDLE(from, to, handler) \ + " .pushsection \"__ex_table\",\"a\"\n" \ + " .balign 4\n" \ + " .long (" #from ") - .\n" \ + " .long (" #to ") - .\n" \ + " .long (" _EXPAND_EXTABLE_HANDLE(handler) ") - .\n" \ + " .popsection\n" + +# define _ASM_EXTABLE(from, to) \ + _ASM_EXTABLE_HANDLE(from, to, ex_handler_default) + +# define _ASM_EXTABLE_UA(from, to) \ + _ASM_EXTABLE_HANDLE(from, to, ex_handler_uaccess) + +# define _ASM_EXTABLE_FAULT(from, to) \ + _ASM_EXTABLE_HANDLE(from, to, ex_handler_fault) + +# define _ASM_EXTABLE_EX(from, to) \ + _ASM_EXTABLE_HANDLE(from, to, ex_handler_ext) + +# define _ASM_EXTABLE_REFCOUNT(from, to) \ + _ASM_EXTABLE_HANDLE(from, to, ex_handler_refcount) + +/* For C file, we already have NOKPROBE_SYMBOL macro */ +#endif #ifndef __ASSEMBLY__ /* diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h index 5090035e6d16..6804d6642767 100644 --- a/arch/x86/include/asm/bug.h +++ b/arch/x86/include/asm/bug.h @@ -4,8 +4,6 @@ #include <linux/stringify.h> -#ifndef __ASSEMBLY__ - /* * Despite that some emulators terminate on UD2, we use it for WARN(). * @@ -22,15 +20,53 @@ #define LEN_UD2 2 +#ifdef CONFIG_GENERIC_BUG + +#ifdef CONFIG_X86_32 +# define __BUG_REL(val) ".long " __stringify(val) +#else +# define __BUG_REL(val) ".long " __stringify(val) " - 2b" +#endif + +#ifdef CONFIG_DEBUG_BUGVERBOSE + +#define _BUG_FLAGS(ins, flags) \ +do { \ + asm volatile("1:\t" ins "\n" \ + ".pushsection __bug_table,\"aw\"\n" \ + "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \ + "\t" __BUG_REL(%c0) "\t# bug_entry::file\n" \ + "\t.word %c1" "\t# bug_entry::line\n" \ + "\t.word %c2" "\t# bug_entry::flags\n" \ + "\t.org 2b+%c3\n" \ + ".popsection" \ + : : "i" (__FILE__), "i" (__LINE__), \ + "i" (flags), \ + "i" (sizeof(struct bug_entry))); \ +} while (0) + +#else /* !CONFIG_DEBUG_BUGVERBOSE */ + #define _BUG_FLAGS(ins, flags) \ do { \ - asm volatile("ASM_BUG ins=\"" ins "\" file=%c0 line=%c1 " \ - "flags=%c2 size=%c3" \ - : : "i" (__FILE__), "i" (__LINE__), \ - "i" (flags), \ + asm volatile("1:\t" ins "\n" \ + ".pushsection __bug_table,\"aw\"\n" \ + "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \ + "\t.word %c0" "\t# bug_entry::flags\n" \ + "\t.org 2b+%c1\n" \ + ".popsection" \ + : : "i" (flags), \ "i" (sizeof(struct bug_entry))); \ } while (0) +#endif /* CONFIG_DEBUG_BUGVERBOSE */ + +#else + +#define _BUG_FLAGS(ins, flags) asm volatile(ins) + +#endif /* CONFIG_GENERIC_BUG */ + #define HAVE_ARCH_BUG #define BUG() \ do { \ @@ -46,54 +82,4 @@ do { \ #include <asm-generic/bug.h> -#else /* __ASSEMBLY__ */ - -#ifdef CONFIG_GENERIC_BUG - -#ifdef CONFIG_X86_32 -.macro __BUG_REL val:req - .long \val -.endm -#else -.macro __BUG_REL val:req - .long \val - 2b -.endm -#endif - -#ifdef CONFIG_DEBUG_BUGVERBOSE - -.macro ASM_BUG ins:req file:req line:req flags:req size:req -1: \ins - .pushsection __bug_table,"aw" -2: __BUG_REL val=1b # bug_entry::bug_addr - __BUG_REL val=\file # bug_entry::file - .word \line # bug_entry::line - .word \flags # bug_entry::flags - .org 2b+\size - .popsection -.endm - -#else /* !CONFIG_DEBUG_BUGVERBOSE */ - -.macro ASM_BUG ins:req file:req line:req flags:req size:req -1: \ins - .pushsection __bug_table,"aw" -2: __BUG_REL val=1b # bug_entry::bug_addr - .word \flags # bug_entry::flags - .org 2b+\size - .popsection -.endm - -#endif /* CONFIG_DEBUG_BUGVERBOSE */ - -#else /* CONFIG_GENERIC_BUG */ - -.macro ASM_BUG ins:req file:req line:req flags:req size:req - \ins -.endm - -#endif /* CONFIG_GENERIC_BUG */ - -#endif /* __ASSEMBLY__ */ - #endif /* _ASM_X86_BUG_H */ diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h index bfb85e5844ab..a8bfac131256 100644 --- a/arch/x86/include/asm/cmpxchg.h +++ b/arch/x86/include/asm/cmpxchg.h @@ -7,7 +7,7 @@ #include <asm/alternative.h> /* Provides LOCK_PREFIX */ /* - * Non-existant functions to indicate usage errors at link time + * Non-existent functions to indicate usage errors at link time * (or compile-time if the compiler implements __compiletime_error(). */ extern void __xchg_wrong_size(void) diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 7d442722ef24..aced6c9290d6 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -2,10 +2,10 @@ #ifndef _ASM_X86_CPUFEATURE_H #define _ASM_X86_CPUFEATURE_H -#ifdef __KERNEL__ -#ifndef __ASSEMBLY__ - #include <asm/processor.h> + +#if defined(__KERNEL__) && !defined(__ASSEMBLY__) + #include <asm/asm.h> #include <linux/bitops.h> @@ -161,10 +161,37 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit); */ static __always_inline __pure bool _static_cpu_has(u16 bit) { - asm_volatile_goto("STATIC_CPU_HAS bitnum=%[bitnum] " - "cap_byte=\"%[cap_byte]\" " - "feature=%P[feature] t_yes=%l[t_yes] " - "t_no=%l[t_no] always=%P[always]" + asm_volatile_goto("1: jmp 6f\n" + "2:\n" + ".skip -(((5f-4f) - (2b-1b)) > 0) * " + "((5f-4f) - (2b-1b)),0x90\n" + "3:\n" + ".section .altinstructions,\"a\"\n" + " .long 1b - .\n" /* src offset */ + " .long 4f - .\n" /* repl offset */ + " .word %P[always]\n" /* always replace */ + " .byte 3b - 1b\n" /* src len */ + " .byte 5f - 4f\n" /* repl len */ + " .byte 3b - 2b\n" /* pad len */ + ".previous\n" + ".section .altinstr_replacement,\"ax\"\n" + "4: jmp %l[t_no]\n" + "5:\n" + ".previous\n" + ".section .altinstructions,\"a\"\n" + " .long 1b - .\n" /* src offset */ + " .long 0\n" /* no replacement */ + " .word %P[feature]\n" /* feature bit */ + " .byte 3b - 1b\n" /* src len */ + " .byte 0\n" /* repl len */ + " .byte 0\n" /* pad len */ + ".previous\n" + ".section .altinstr_aux,\"ax\"\n" + "6:\n" + " testb %[bitnum],%[cap_byte]\n" + " jnz %l[t_yes]\n" + " jmp %l[t_no]\n" + ".previous\n" : : [feature] "i" (bit), [always] "i" (X86_FEATURE_ALWAYS), [bitnum] "i" (1 << (bit & 7)), @@ -199,44 +226,5 @@ t_no: #define CPU_FEATURE_TYPEVAL boot_cpu_data.x86_vendor, boot_cpu_data.x86, \ boot_cpu_data.x86_model -#else /* __ASSEMBLY__ */ - -.macro STATIC_CPU_HAS bitnum:req cap_byte:req feature:req t_yes:req t_no:req always:req -1: - jmp 6f -2: - .skip -(((5f-4f) - (2b-1b)) > 0) * ((5f-4f) - (2b-1b)),0x90 -3: - .section .altinstructions,"a" - .long 1b - . /* src offset */ - .long 4f - . /* repl offset */ - .word \always /* always replace */ - .byte 3b - 1b /* src len */ - .byte 5f - 4f /* repl len */ - .byte 3b - 2b /* pad len */ - .previous - .section .altinstr_replacement,"ax" -4: - jmp \t_no -5: - .previous - .section .altinstructions,"a" - .long 1b - . /* src offset */ - .long 0 /* no replacement */ - .word \feature /* feature bit */ - .byte 3b - 1b /* src len */ - .byte 0 /* repl len */ - .byte 0 /* pad len */ - .previous - .section .altinstr_aux,"ax" -6: - testb \bitnum,\cap_byte - jnz \t_yes - jmp \t_no - .previous -.endm - -#endif /* __ASSEMBLY__ */ - -#endif /* __KERNEL__ */ +#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */ #endif /* _ASM_X86_CPUFEATURE_H */ diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 28c4a502b419..6d6122524711 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -281,9 +281,11 @@ #define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */ #define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */ #define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */ +#define X86_FEATURE_WBNOINVD (13*32+ 9) /* WBNOINVD instruction */ #define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */ #define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */ #define X86_FEATURE_AMD_STIBP (13*32+15) /* "" Single Thread Indirect Branch Predictors */ +#define X86_FEATURE_AMD_STIBP_ALWAYS_ON (13*32+17) /* "" Single Thread Indirect Branch Predictors always-on preferred */ #define X86_FEATURE_AMD_SSBD (13*32+24) /* "" Speculative Store Bypass Disable */ #define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */ #define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */ diff --git a/arch/x86/include/asm/crash.h b/arch/x86/include/asm/crash.h index a7adb2bfbf0b..0acf5ee45a21 100644 --- a/arch/x86/include/asm/crash.h +++ b/arch/x86/include/asm/crash.h @@ -6,5 +6,6 @@ int crash_load_segments(struct kimage *image); int crash_copy_backup_region(struct kimage *image); int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params); +void crash_smp_send_stop(void); #endif /* _ASM_X86_CRASH_H */ diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h index 33833d1909af..a5ea841cc6d2 100644 --- a/arch/x86/include/asm/disabled-features.h +++ b/arch/x86/include/asm/disabled-features.h @@ -16,6 +16,12 @@ # define DISABLE_MPX (1<<(X86_FEATURE_MPX & 31)) #endif +#ifdef CONFIG_X86_SMAP +# define DISABLE_SMAP 0 +#else +# define DISABLE_SMAP (1<<(X86_FEATURE_SMAP & 31)) +#endif + #ifdef CONFIG_X86_INTEL_UMIP # define DISABLE_UMIP 0 #else @@ -68,7 +74,7 @@ #define DISABLED_MASK6 0 #define DISABLED_MASK7 (DISABLE_PTI) #define DISABLED_MASK8 0 -#define DISABLED_MASK9 (DISABLE_MPX) +#define DISABLED_MASK9 (DISABLE_MPX|DISABLE_SMAP) #define DISABLED_MASK10 0 #define DISABLED_MASK11 0 #define DISABLED_MASK12 0 diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index eea40d52ca78..107283b1eb1e 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -19,7 +19,7 @@ * This is the main reason why we're doing stable VA mappings for RT * services. * - * This flag is used in conjuction with a chicken bit called + * This flag is used in conjunction with a chicken bit called * "efi=old_map" which can be used as a fallback to the old runtime * services mapping method in case there's some b0rkage with a * particular EFI implementation (haha, it is hard to hold up the @@ -82,8 +82,7 @@ struct efi_scratch { #define arch_efi_call_virt_setup() \ ({ \ efi_sync_low_kernel_mappings(); \ - preempt_disable(); \ - __kernel_fpu_begin(); \ + kernel_fpu_begin(); \ firmware_restrict_branch_speculation_start(); \ \ if (!efi_enabled(EFI_OLD_MEMMAP)) \ @@ -99,8 +98,7 @@ struct efi_scratch { efi_switch_mm(efi_scratch.prev_mm); \ \ firmware_restrict_branch_speculation_end(); \ - __kernel_fpu_end(); \ - preempt_enable(); \ + kernel_fpu_end(); \ }) extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size, @@ -141,6 +139,8 @@ extern int __init efi_reuse_config(u64 tables, int nr_tables); extern void efi_delete_dummy_variable(void); extern void efi_switch_mm(struct mm_struct *mm); extern void efi_recover_from_page_fault(unsigned long phys_addr); +extern void efi_free_boot_services(void); +extern void efi_reserve_boot_services(void); struct efi_setup_data { u64 fw_vendor; diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h index a9caac9d4a72..b56d504af654 100644 --- a/arch/x86/include/asm/fpu/api.h +++ b/arch/x86/include/asm/fpu/api.h @@ -12,17 +12,12 @@ #define _ASM_X86_FPU_API_H /* - * Careful: __kernel_fpu_begin/end() must be called with preempt disabled - * and they don't touch the preempt state on their own. - * If you enable preemption after __kernel_fpu_begin(), preempt notifier - * should call the __kernel_fpu_end() to prevent the kernel/user FPU - * state from getting corrupted. KVM for example uses this model. - * - * All other cases use kernel_fpu_begin/end() which disable preemption - * during kernel FPU usage. + * Use kernel_fpu_begin/end() if you intend to use FPU in kernel context. It + * disables preemption so be careful if you intend to use it for long periods + * of time. + * If you intend to use the FPU in softirq you need to check first with + * irq_fpu_usable() if it is possible. */ -extern void __kernel_fpu_begin(void); -extern void __kernel_fpu_end(void); extern void kernel_fpu_begin(void); extern void kernel_fpu_end(void); extern bool irq_fpu_usable(void); diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h index 69dcdf195b61..fa2c93cb42a2 100644 --- a/arch/x86/include/asm/fpu/internal.h +++ b/arch/x86/include/asm/fpu/internal.h @@ -106,6 +106,9 @@ extern void fpstate_sanitize_xstate(struct fpu *fpu); #define user_insn(insn, output, input...) \ ({ \ int err; \ + \ + might_fault(); \ + \ asm volatile(ASM_STAC "\n" \ "1:" #insn "\n\t" \ "2: " ASM_CLAC "\n" \ diff --git a/arch/x86/include/asm/fsgsbase.h b/arch/x86/include/asm/fsgsbase.h index eb377b6e9eed..bca4c743de77 100644 --- a/arch/x86/include/asm/fsgsbase.h +++ b/arch/x86/include/asm/fsgsbase.h @@ -16,8 +16,8 @@ */ extern unsigned long x86_fsbase_read_task(struct task_struct *task); extern unsigned long x86_gsbase_read_task(struct task_struct *task); -extern int x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase); -extern int x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase); +extern void x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase); +extern void x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase); /* Helper functions for reading/writing FS/GS base */ @@ -39,8 +39,15 @@ static inline unsigned long x86_gsbase_read_cpu_inactive(void) return gsbase; } -extern void x86_fsbase_write_cpu(unsigned long fsbase); -extern void x86_gsbase_write_cpu_inactive(unsigned long gsbase); +static inline void x86_fsbase_write_cpu(unsigned long fsbase) +{ + wrmsrl(MSR_FS_BASE, fsbase); +} + +static inline void x86_gsbase_write_cpu_inactive(unsigned long gsbase) +{ + wrmsrl(MSR_KERNEL_GS_BASE, gsbase); +} #endif /* CONFIG_X86_64 */ diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h index 4139f7650fe5..705dafc2d11a 100644 --- a/arch/x86/include/asm/hyperv-tlfs.h +++ b/arch/x86/include/asm/hyperv-tlfs.h @@ -10,6 +10,7 @@ #define _ASM_X86_HYPERV_TLFS_H #include <linux/types.h> +#include <asm/page.h> /* * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent @@ -30,158 +31,150 @@ /* * Feature identification. EAX indicates which features are available * to the partition based upon the current partition privileges. + * These are HYPERV_CPUID_FEATURES.EAX bits. */ /* VP Runtime (HV_X64_MSR_VP_RUNTIME) available */ -#define HV_X64_MSR_VP_RUNTIME_AVAILABLE (1 << 0) +#define HV_X64_MSR_VP_RUNTIME_AVAILABLE BIT(0) /* Partition Reference Counter (HV_X64_MSR_TIME_REF_COUNT) available*/ -#define HV_MSR_TIME_REF_COUNT_AVAILABLE (1 << 1) -/* Partition reference TSC MSR is available */ -#define HV_MSR_REFERENCE_TSC_AVAILABLE (1 << 9) -/* Partition Guest IDLE MSR is available */ -#define HV_X64_MSR_GUEST_IDLE_AVAILABLE (1 << 10) - -/* A partition's reference time stamp counter (TSC) page */ -#define HV_X64_MSR_REFERENCE_TSC 0x40000021 - -/* - * There is a single feature flag that signifies if the partition has access - * to MSRs with local APIC and TSC frequencies. - */ -#define HV_X64_ACCESS_FREQUENCY_MSRS (1 << 11) - -/* AccessReenlightenmentControls privilege */ -#define HV_X64_ACCESS_REENLIGHTENMENT BIT(13) - +#define HV_MSR_TIME_REF_COUNT_AVAILABLE BIT(1) /* * Basic SynIC MSRs (HV_X64_MSR_SCONTROL through HV_X64_MSR_EOM * and HV_X64_MSR_SINT0 through HV_X64_MSR_SINT15) available */ -#define HV_X64_MSR_SYNIC_AVAILABLE (1 << 2) +#define HV_X64_MSR_SYNIC_AVAILABLE BIT(2) /* * Synthetic Timer MSRs (HV_X64_MSR_STIMER0_CONFIG through * HV_X64_MSR_STIMER3_COUNT) available */ -#define HV_MSR_SYNTIMER_AVAILABLE (1 << 3) +#define HV_MSR_SYNTIMER_AVAILABLE BIT(3) /* * APIC access MSRs (HV_X64_MSR_EOI, HV_X64_MSR_ICR and HV_X64_MSR_TPR) * are available */ -#define HV_X64_MSR_APIC_ACCESS_AVAILABLE (1 << 4) +#define HV_X64_MSR_APIC_ACCESS_AVAILABLE BIT(4) /* Hypercall MSRs (HV_X64_MSR_GUEST_OS_ID and HV_X64_MSR_HYPERCALL) available*/ -#define HV_X64_MSR_HYPERCALL_AVAILABLE (1 << 5) +#define HV_X64_MSR_HYPERCALL_AVAILABLE BIT(5) /* Access virtual processor index MSR (HV_X64_MSR_VP_INDEX) available*/ -#define HV_X64_MSR_VP_INDEX_AVAILABLE (1 << 6) +#define HV_X64_MSR_VP_INDEX_AVAILABLE BIT(6) /* Virtual system reset MSR (HV_X64_MSR_RESET) is available*/ -#define HV_X64_MSR_RESET_AVAILABLE (1 << 7) - /* - * Access statistics pages MSRs (HV_X64_MSR_STATS_PARTITION_RETAIL_PAGE, - * HV_X64_MSR_STATS_PARTITION_INTERNAL_PAGE, HV_X64_MSR_STATS_VP_RETAIL_PAGE, - * HV_X64_MSR_STATS_VP_INTERNAL_PAGE) available - */ -#define HV_X64_MSR_STAT_PAGES_AVAILABLE (1 << 8) - -/* Frequency MSRs available */ -#define HV_FEATURE_FREQUENCY_MSRS_AVAILABLE (1 << 8) - -/* Crash MSR available */ -#define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE (1 << 10) - -/* stimer Direct Mode is available */ -#define HV_STIMER_DIRECT_MODE_AVAILABLE (1 << 19) +#define HV_X64_MSR_RESET_AVAILABLE BIT(7) +/* + * Access statistics pages MSRs (HV_X64_MSR_STATS_PARTITION_RETAIL_PAGE, + * HV_X64_MSR_STATS_PARTITION_INTERNAL_PAGE, HV_X64_MSR_STATS_VP_RETAIL_PAGE, + * HV_X64_MSR_STATS_VP_INTERNAL_PAGE) available + */ +#define HV_X64_MSR_STAT_PAGES_AVAILABLE BIT(8) +/* Partition reference TSC MSR is available */ +#define HV_MSR_REFERENCE_TSC_AVAILABLE BIT(9) +/* Partition Guest IDLE MSR is available */ +#define HV_X64_MSR_GUEST_IDLE_AVAILABLE BIT(10) +/* + * There is a single feature flag that signifies if the partition has access + * to MSRs with local APIC and TSC frequencies. + */ +#define HV_X64_ACCESS_FREQUENCY_MSRS BIT(11) +/* AccessReenlightenmentControls privilege */ +#define HV_X64_ACCESS_REENLIGHTENMENT BIT(13) /* - * Feature identification: EBX indicates which flags were specified at - * partition creation. The format is the same as the partition creation - * flag structure defined in section Partition Creation Flags. + * Feature identification: indicates which flags were specified at partition + * creation. The format is the same as the partition creation flag structure + * defined in section Partition Creation Flags. + * These are HYPERV_CPUID_FEATURES.EBX bits. */ -#define HV_X64_CREATE_PARTITIONS (1 << 0) -#define HV_X64_ACCESS_PARTITION_ID (1 << 1) -#define HV_X64_ACCESS_MEMORY_POOL (1 << 2) -#define HV_X64_ADJUST_MESSAGE_BUFFERS (1 << 3) -#define HV_X64_POST_MESSAGES (1 << 4) -#define HV_X64_SIGNAL_EVENTS (1 << 5) -#define HV_X64_CREATE_PORT (1 << 6) -#define HV_X64_CONNECT_PORT (1 << 7) -#define HV_X64_ACCESS_STATS (1 << 8) -#define HV_X64_DEBUGGING (1 << 11) -#define HV_X64_CPU_POWER_MANAGEMENT (1 << 12) -#define HV_X64_CONFIGURE_PROFILER (1 << 13) +#define HV_X64_CREATE_PARTITIONS BIT(0) +#define HV_X64_ACCESS_PARTITION_ID BIT(1) +#define HV_X64_ACCESS_MEMORY_POOL BIT(2) +#define HV_X64_ADJUST_MESSAGE_BUFFERS BIT(3) +#define HV_X64_POST_MESSAGES BIT(4) +#define HV_X64_SIGNAL_EVENTS BIT(5) +#define HV_X64_CREATE_PORT BIT(6) +#define HV_X64_CONNECT_PORT BIT(7) +#define HV_X64_ACCESS_STATS BIT(8) +#define HV_X64_DEBUGGING BIT(11) +#define HV_X64_CPU_POWER_MANAGEMENT BIT(12) /* * Feature identification. EDX indicates which miscellaneous features * are available to the partition. + * These are HYPERV_CPUID_FEATURES.EDX bits. */ /* The MWAIT instruction is available (per section MONITOR / MWAIT) */ -#define HV_X64_MWAIT_AVAILABLE (1 << 0) +#define HV_X64_MWAIT_AVAILABLE BIT(0) /* Guest debugging support is available */ -#define HV_X64_GUEST_DEBUGGING_AVAILABLE (1 << 1) +#define HV_X64_GUEST_DEBUGGING_AVAILABLE BIT(1) /* Performance Monitor support is available*/ -#define HV_X64_PERF_MONITOR_AVAILABLE (1 << 2) +#define HV_X64_PERF_MONITOR_AVAILABLE BIT(2) /* Support for physical CPU dynamic partitioning events is available*/ -#define HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE (1 << 3) +#define HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE BIT(3) /* * Support for passing hypercall input parameter block via XMM * registers is available */ -#define HV_X64_HYPERCALL_PARAMS_XMM_AVAILABLE (1 << 4) +#define HV_X64_HYPERCALL_PARAMS_XMM_AVAILABLE BIT(4) /* Support for a virtual guest idle state is available */ -#define HV_X64_GUEST_IDLE_STATE_AVAILABLE (1 << 5) -/* Guest crash data handler available */ -#define HV_X64_GUEST_CRASH_MSR_AVAILABLE (1 << 10) +#define HV_X64_GUEST_IDLE_STATE_AVAILABLE BIT(5) +/* Frequency MSRs available */ +#define HV_FEATURE_FREQUENCY_MSRS_AVAILABLE BIT(8) +/* Crash MSR available */ +#define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE BIT(10) +/* stimer Direct Mode is available */ +#define HV_STIMER_DIRECT_MODE_AVAILABLE BIT(19) /* * Implementation recommendations. Indicates which behaviors the hypervisor * recommends the OS implement for optimal performance. + * These are HYPERV_CPUID_ENLIGHTMENT_INFO.EAX bits. + */ +/* + * Recommend using hypercall for address space switches rather + * than MOV to CR3 instruction */ - /* - * Recommend using hypercall for address space switches rather - * than MOV to CR3 instruction - */ -#define HV_X64_AS_SWITCH_RECOMMENDED (1 << 0) +#define HV_X64_AS_SWITCH_RECOMMENDED BIT(0) /* Recommend using hypercall for local TLB flushes rather * than INVLPG or MOV to CR3 instructions */ -#define HV_X64_LOCAL_TLB_FLUSH_RECOMMENDED (1 << 1) +#define HV_X64_LOCAL_TLB_FLUSH_RECOMMENDED BIT(1) /* * Recommend using hypercall for remote TLB flushes rather * than inter-processor interrupts */ -#define HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED (1 << 2) +#define HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED BIT(2) /* * Recommend using MSRs for accessing APIC registers * EOI, ICR and TPR rather than their memory-mapped counterparts */ -#define HV_X64_APIC_ACCESS_RECOMMENDED (1 << 3) +#define HV_X64_APIC_ACCESS_RECOMMENDED BIT(3) /* Recommend using the hypervisor-provided MSR to initiate a system RESET */ -#define HV_X64_SYSTEM_RESET_RECOMMENDED (1 << 4) +#define HV_X64_SYSTEM_RESET_RECOMMENDED BIT(4) /* * Recommend using relaxed timing for this partition. If used, * the VM should disable any watchdog timeouts that rely on the * timely delivery of external interrupts */ -#define HV_X64_RELAXED_TIMING_RECOMMENDED (1 << 5) +#define HV_X64_RELAXED_TIMING_RECOMMENDED BIT(5) /* * Recommend not using Auto End-Of-Interrupt feature */ -#define HV_DEPRECATING_AEOI_RECOMMENDED (1 << 9) +#define HV_DEPRECATING_AEOI_RECOMMENDED BIT(9) /* * Recommend using cluster IPI hypercalls. */ -#define HV_X64_CLUSTER_IPI_RECOMMENDED (1 << 10) +#define HV_X64_CLUSTER_IPI_RECOMMENDED BIT(10) /* Recommend using the newer ExProcessorMasks interface */ -#define HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED (1 << 11) +#define HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED BIT(11) /* Recommend using enlightened VMCS */ -#define HV_X64_ENLIGHTENED_VMCS_RECOMMENDED (1 << 14) +#define HV_X64_ENLIGHTENED_VMCS_RECOMMENDED BIT(14) -/* - * Crash notification flags. - */ -#define HV_CRASH_CTL_CRASH_NOTIFY_MSG BIT_ULL(62) -#define HV_CRASH_CTL_CRASH_NOTIFY BIT_ULL(63) +/* Nested features. These are HYPERV_CPUID_NESTED_FEATURES.EAX bits. */ +#define HV_X64_NESTED_GUEST_MAPPING_FLUSH BIT(18) +#define HV_X64_NESTED_MSR_BITMAP BIT(19) + +/* Hyper-V specific model specific registers (MSRs) */ /* MSR used to identify the guest OS. */ #define HV_X64_MSR_GUEST_OS_ID 0x40000000 @@ -201,6 +194,9 @@ /* MSR used to read the per-partition time reference counter */ #define HV_X64_MSR_TIME_REF_COUNT 0x40000020 +/* A partition's reference time stamp counter (TSC) page */ +#define HV_X64_MSR_REFERENCE_TSC 0x40000021 + /* MSR used to retrieve the TSC frequency */ #define HV_X64_MSR_TSC_FREQUENCY 0x40000022 @@ -258,9 +254,11 @@ #define HV_X64_MSR_CRASH_P3 0x40000103 #define HV_X64_MSR_CRASH_P4 0x40000104 #define HV_X64_MSR_CRASH_CTL 0x40000105 -#define HV_X64_MSR_CRASH_CTL_NOTIFY (1ULL << 63) -#define HV_X64_MSR_CRASH_PARAMS \ - (1 + (HV_X64_MSR_CRASH_P4 - HV_X64_MSR_CRASH_P0)) + +/* TSC emulation after migration */ +#define HV_X64_MSR_REENLIGHTENMENT_CONTROL 0x40000106 +#define HV_X64_MSR_TSC_EMULATION_CONTROL 0x40000107 +#define HV_X64_MSR_TSC_EMULATION_STATUS 0x40000108 /* * Declare the MSR used to setup pages used to communicate with the hypervisor. @@ -271,7 +269,7 @@ union hv_x64_msr_hypercall_contents { u64 enable:1; u64 reserved:11; u64 guest_physical_address:52; - }; + } __packed; }; /* @@ -283,7 +281,7 @@ struct ms_hyperv_tsc_page { volatile u64 tsc_scale; volatile s64 tsc_offset; u64 reserved2[509]; -}; +} __packed; /* * The guest OS needs to register the guest ID with the hypervisor. @@ -311,39 +309,37 @@ struct ms_hyperv_tsc_page { #define HV_LINUX_VENDOR_ID 0x8100 -/* TSC emulation after migration */ -#define HV_X64_MSR_REENLIGHTENMENT_CONTROL 0x40000106 - -/* Nested features (CPUID 0x4000000A) EAX */ -#define HV_X64_NESTED_GUEST_MAPPING_FLUSH BIT(18) -#define HV_X64_NESTED_MSR_BITMAP BIT(19) - struct hv_reenlightenment_control { __u64 vector:8; __u64 reserved1:8; __u64 enabled:1; __u64 reserved2:15; __u64 target_vp:32; -}; - -#define HV_X64_MSR_TSC_EMULATION_CONTROL 0x40000107 -#define HV_X64_MSR_TSC_EMULATION_STATUS 0x40000108 +} __packed; struct hv_tsc_emulation_control { __u64 enabled:1; __u64 reserved:63; -}; +} __packed; struct hv_tsc_emulation_status { __u64 inprogress:1; __u64 reserved:63; -}; +} __packed; #define HV_X64_MSR_HYPERCALL_ENABLE 0x00000001 #define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT 12 #define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK \ (~((1ull << HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT) - 1)) +/* + * Crash notification (HV_X64_MSR_CRASH_CTL) flags. + */ +#define HV_CRASH_CTL_CRASH_NOTIFY_MSG BIT_ULL(62) +#define HV_CRASH_CTL_CRASH_NOTIFY BIT_ULL(63) +#define HV_X64_MSR_CRASH_PARAMS \ + (1 + (HV_X64_MSR_CRASH_P4 - HV_X64_MSR_CRASH_P0)) + #define HV_IPI_LOW_VECTOR 0x10 #define HV_IPI_HIGH_VECTOR 0xff @@ -358,6 +354,7 @@ struct hv_tsc_emulation_status { #define HVCALL_POST_MESSAGE 0x005c #define HVCALL_SIGNAL_EVENT 0x005d #define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE 0x00af +#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_LIST 0x00b0 #define HV_X64_MSR_VP_ASSIST_PAGE_ENABLE 0x00000001 #define HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT 12 @@ -409,7 +406,7 @@ typedef struct _HV_REFERENCE_TSC_PAGE { __u32 res1; __u64 tsc_scale; __s64 tsc_offset; -} HV_REFERENCE_TSC_PAGE, *PHV_REFERENCE_TSC_PAGE; +} __packed HV_REFERENCE_TSC_PAGE, *PHV_REFERENCE_TSC_PAGE; /* Define the number of synthetic interrupt sources. */ #define HV_SYNIC_SINT_COUNT (16) @@ -466,7 +463,7 @@ union hv_message_flags { struct { __u8 msg_pending:1; __u8 reserved:7; - }; + } __packed; }; /* Define port identifier type. */ @@ -475,7 +472,7 @@ union hv_port_id { struct { __u32 id:24; __u32 reserved:8; - } u; + } __packed u; }; /* Define synthetic interrupt controller message header. */ @@ -488,7 +485,7 @@ struct hv_message_header { __u64 sender; union hv_port_id port; }; -}; +} __packed; /* Define synthetic interrupt controller message format. */ struct hv_message { @@ -496,12 +493,12 @@ struct hv_message { union { __u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT]; } u; -}; +} __packed; /* Define the synthetic interrupt message page layout. */ struct hv_message_page { struct hv_message sint_message[HV_SYNIC_SINT_COUNT]; -}; +} __packed; /* Define timer message payload structure. */ struct hv_timer_message_payload { @@ -509,7 +506,7 @@ struct hv_timer_message_payload { __u32 reserved; __u64 expiration_time; /* When the timer expired */ __u64 delivery_time; /* When the message was delivered */ -}; +} __packed; /* Define virtual processor assist page structure. */ struct hv_vp_assist_page { @@ -518,8 +515,9 @@ struct hv_vp_assist_page { __u64 vtl_control[2]; __u64 nested_enlightenments_control[2]; __u32 enlighten_vmentry; + __u32 padding; __u64 current_nested_vmcs; -}; +} __packed; struct hv_enlightened_vmcs { u32 revision_id; @@ -533,6 +531,8 @@ struct hv_enlightened_vmcs { u16 host_gs_selector; u16 host_tr_selector; + u16 padding16_1; + u64 host_ia32_pat; u64 host_ia32_efer; @@ -651,7 +651,7 @@ struct hv_enlightened_vmcs { u64 ept_pointer; u16 virtual_processor_id; - u16 padding16[3]; + u16 padding16_2[3]; u64 padding64_2[5]; u64 guest_physical_address; @@ -693,7 +693,7 @@ struct hv_enlightened_vmcs { u32 nested_flush_hypercall:1; u32 msr_bitmap:1; u32 reserved:30; - } hv_enlightenments_control; + } __packed hv_enlightenments_control; u32 hv_vp_id; u64 hv_vm_id; @@ -703,7 +703,7 @@ struct hv_enlightened_vmcs { u64 padding64_5[7]; u64 xss_exit_bitmap; u64 padding64_6[7]; -}; +} __packed; #define HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE 0 #define HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP BIT(0) @@ -725,36 +725,129 @@ struct hv_enlightened_vmcs { #define HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL 0xFFFF -#define HV_STIMER_ENABLE (1ULL << 0) -#define HV_STIMER_PERIODIC (1ULL << 1) -#define HV_STIMER_LAZY (1ULL << 2) -#define HV_STIMER_AUTOENABLE (1ULL << 3) -#define HV_STIMER_SINT(config) (__u8)(((config) >> 16) & 0x0F) +/* Define synthetic interrupt controller flag constants. */ +#define HV_EVENT_FLAGS_COUNT (256 * 8) +#define HV_EVENT_FLAGS_LONG_COUNT (256 / sizeof(unsigned long)) + +/* + * Synthetic timer configuration. + */ +union hv_stimer_config { + u64 as_uint64; + struct { + u64 enable:1; + u64 periodic:1; + u64 lazy:1; + u64 auto_enable:1; + u64 apic_vector:8; + u64 direct_mode:1; + u64 reserved_z0:3; + u64 sintx:4; + u64 reserved_z1:44; + } __packed; +}; + + +/* Define the synthetic interrupt controller event flags format. */ +union hv_synic_event_flags { + unsigned long flags[HV_EVENT_FLAGS_LONG_COUNT]; +}; + +/* Define SynIC control register. */ +union hv_synic_scontrol { + u64 as_uint64; + struct { + u64 enable:1; + u64 reserved:63; + } __packed; +}; + +/* Define synthetic interrupt source. */ +union hv_synic_sint { + u64 as_uint64; + struct { + u64 vector:8; + u64 reserved1:8; + u64 masked:1; + u64 auto_eoi:1; + u64 reserved2:46; + } __packed; +}; + +/* Define the format of the SIMP register */ +union hv_synic_simp { + u64 as_uint64; + struct { + u64 simp_enabled:1; + u64 preserved:11; + u64 base_simp_gpa:52; + } __packed; +}; + +/* Define the format of the SIEFP register */ +union hv_synic_siefp { + u64 as_uint64; + struct { + u64 siefp_enabled:1; + u64 preserved:11; + u64 base_siefp_gpa:52; + } __packed; +}; struct hv_vpset { u64 format; u64 valid_bank_mask; u64 bank_contents[]; -}; +} __packed; /* HvCallSendSyntheticClusterIpi hypercall */ struct hv_send_ipi { u32 vector; u32 reserved; u64 cpu_mask; -}; +} __packed; /* HvCallSendSyntheticClusterIpiEx hypercall */ struct hv_send_ipi_ex { u32 vector; u32 reserved; struct hv_vpset vp_set; -}; +} __packed; /* HvFlushGuestPhysicalAddressSpace hypercalls */ struct hv_guest_mapping_flush { u64 address_space; u64 flags; +} __packed; + +/* + * HV_MAX_FLUSH_PAGES = "additional_pages" + 1. It's limited + * by the bitwidth of "additional_pages" in union hv_gpa_page_range. + */ +#define HV_MAX_FLUSH_PAGES (2048) + +/* HvFlushGuestPhysicalAddressList hypercall */ +union hv_gpa_page_range { + u64 address_space; + struct { + u64 additional_pages:11; + u64 largepage:1; + u64 basepfn:52; + } page; +}; + +/* + * All input flush parameters should be in single page. The max flush + * count is equal with how many entries of union hv_gpa_page_range can + * be populated into the input parameter page. + */ +#define HV_MAX_FLUSH_REP_COUNT (PAGE_SIZE - 2 * sizeof(u64) / \ + sizeof(union hv_gpa_page_range)) + +struct hv_guest_mapping_flush_list { + u64 address_space; + u64 flags; + union hv_gpa_page_range gpa_list[HV_MAX_FLUSH_REP_COUNT]; }; /* HvFlushVirtualAddressSpace, HvFlushVirtualAddressList hypercalls */ @@ -763,7 +856,7 @@ struct hv_tlb_flush { u64 flags; u64 processor_mask; u64 gva_list[]; -}; +} __packed; /* HvFlushVirtualAddressSpaceEx, HvFlushVirtualAddressListEx hypercalls */ struct hv_tlb_flush_ex { @@ -771,6 +864,6 @@ struct hv_tlb_flush_ex { u64 flags; struct hv_vpset hv_vp_set; u64 gva_list[]; -}; +} __packed; #endif diff --git a/arch/x86/include/asm/intel_pt.h b/arch/x86/include/asm/intel_pt.h index b523f51c5400..634f99b1dc22 100644 --- a/arch/x86/include/asm/intel_pt.h +++ b/arch/x86/include/asm/intel_pt.h @@ -2,10 +2,36 @@ #ifndef _ASM_X86_INTEL_PT_H #define _ASM_X86_INTEL_PT_H +#define PT_CPUID_LEAVES 2 +#define PT_CPUID_REGS_NUM 4 /* number of regsters (eax, ebx, ecx, edx) */ + +enum pt_capabilities { + PT_CAP_max_subleaf = 0, + PT_CAP_cr3_filtering, + PT_CAP_psb_cyc, + PT_CAP_ip_filtering, + PT_CAP_mtc, + PT_CAP_ptwrite, + PT_CAP_power_event_trace, + PT_CAP_topa_output, + PT_CAP_topa_multiple_entries, + PT_CAP_single_range_output, + PT_CAP_output_subsys, + PT_CAP_payloads_lip, + PT_CAP_num_address_ranges, + PT_CAP_mtc_periods, + PT_CAP_cycle_thresholds, + PT_CAP_psb_periods, +}; + #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL) void cpu_emergency_stop_pt(void); +extern u32 intel_pt_validate_hw_cap(enum pt_capabilities cap); +extern u32 intel_pt_validate_cap(u32 *caps, enum pt_capabilities cap); #else static inline void cpu_emergency_stop_pt(void) {} +static inline u32 intel_pt_validate_hw_cap(enum pt_capabilities cap) { return 0; } +static inline u32 intel_pt_validate_cap(u32 *caps, enum pt_capabilities capability) { return 0; } #endif #endif /* _ASM_X86_INTEL_PT_H */ diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h index 2395bb794c7b..fbb16e6b6c18 100644 --- a/arch/x86/include/asm/irq.h +++ b/arch/x86/include/asm/irq.h @@ -30,6 +30,9 @@ extern void fixup_irqs(void); #ifdef CONFIG_HAVE_KVM extern void kvm_set_posted_intr_wakeup_handler(void (*handler)(void)); +extern __visible void smp_kvm_posted_intr_ipi(struct pt_regs *regs); +extern __visible void smp_kvm_posted_intr_wakeup_ipi(struct pt_regs *regs); +extern __visible void smp_kvm_posted_intr_nested_ipi(struct pt_regs *regs); #endif extern void (*x86_platform_ipi_callback)(void); @@ -41,9 +44,13 @@ extern __visible unsigned int do_IRQ(struct pt_regs *regs); extern void init_ISA_irqs(void); +extern void __init init_IRQ(void); + #ifdef CONFIG_X86_LOCAL_APIC void arch_trigger_cpumask_backtrace(const struct cpumask *mask, bool exclude_self); + +extern __visible void smp_x86_platform_ipi(struct pt_regs *regs); #define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace #endif diff --git a/arch/x86/include/asm/irq_work.h b/arch/x86/include/asm/irq_work.h index 800ffce0db29..80b35e3adf03 100644 --- a/arch/x86/include/asm/irq_work.h +++ b/arch/x86/include/asm/irq_work.h @@ -10,6 +10,7 @@ static inline bool arch_irq_work_has_interrupt(void) return boot_cpu_has(X86_FEATURE_APIC); } extern void arch_irq_work_raise(void); +extern __visible void smp_irq_work_interrupt(struct pt_regs *regs); #else static inline bool arch_irq_work_has_interrupt(void) { diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h index a5fb34fe56a4..21efc9d07ed9 100644 --- a/arch/x86/include/asm/jump_label.h +++ b/arch/x86/include/asm/jump_label.h @@ -2,6 +2,19 @@ #ifndef _ASM_X86_JUMP_LABEL_H #define _ASM_X86_JUMP_LABEL_H +#ifndef HAVE_JUMP_LABEL +/* + * For better or for worse, if jump labels (the gcc extension) are missing, + * then the entire static branch patching infrastructure is compiled out. + * If that happens, the code in here will malfunction. Raise a compiler + * error instead. + * + * In theory, jump labels and the static branch patching infrastructure + * could be decoupled to fix this. + */ +#error asm/jump_label.h included on a non-jump-label kernel +#endif + #define JUMP_LABEL_NOP_SIZE 5 #ifdef CONFIG_X86_64 @@ -20,9 +33,15 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool branch) { - asm_volatile_goto("STATIC_BRANCH_NOP l_yes=\"%l[l_yes]\" key=\"%c0\" " - "branch=\"%c1\"" - : : "i" (key), "i" (branch) : : l_yes); + asm_volatile_goto("1:" + ".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t" + ".pushsection __jump_table, \"aw\" \n\t" + _ASM_ALIGN "\n\t" + ".long 1b - ., %l[l_yes] - . \n\t" + _ASM_PTR "%c0 + %c1 - .\n\t" + ".popsection \n\t" + : : "i" (key), "i" (branch) : : l_yes); + return false; l_yes: return true; @@ -30,8 +49,14 @@ l_yes: static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) { - asm_volatile_goto("STATIC_BRANCH_JMP l_yes=\"%l[l_yes]\" key=\"%c0\" " - "branch=\"%c1\"" + asm_volatile_goto("1:" + ".byte 0xe9\n\t .long %l[l_yes] - 2f\n\t" + "2:\n\t" + ".pushsection __jump_table, \"aw\" \n\t" + _ASM_ALIGN "\n\t" + ".long 1b - ., %l[l_yes] - . \n\t" + _ASM_PTR "%c0 + %c1 - .\n\t" + ".popsection \n\t" : : "i" (key), "i" (branch) : : l_yes); return false; @@ -41,26 +66,37 @@ l_yes: #else /* __ASSEMBLY__ */ -.macro STATIC_BRANCH_NOP l_yes:req key:req branch:req -.Lstatic_branch_nop_\@: - .byte STATIC_KEY_INIT_NOP -.Lstatic_branch_no_after_\@: +.macro STATIC_JUMP_IF_TRUE target, key, def +.Lstatic_jump_\@: + .if \def + /* Equivalent to "jmp.d32 \target" */ + .byte 0xe9 + .long \target - .Lstatic_jump_after_\@ +.Lstatic_jump_after_\@: + .else + .byte STATIC_KEY_INIT_NOP + .endif .pushsection __jump_table, "aw" _ASM_ALIGN - .long .Lstatic_branch_nop_\@ - ., \l_yes - . - _ASM_PTR \key + \branch - . + .long .Lstatic_jump_\@ - ., \target - . + _ASM_PTR \key - . .popsection .endm -.macro STATIC_BRANCH_JMP l_yes:req key:req branch:req -.Lstatic_branch_jmp_\@: - .byte 0xe9 - .long \l_yes - .Lstatic_branch_jmp_after_\@ -.Lstatic_branch_jmp_after_\@: +.macro STATIC_JUMP_IF_FALSE target, key, def +.Lstatic_jump_\@: + .if \def + .byte STATIC_KEY_INIT_NOP + .else + /* Equivalent to "jmp.d32 \target" */ + .byte 0xe9 + .long \target - .Lstatic_jump_after_\@ +.Lstatic_jump_after_\@: + .endif .pushsection __jump_table, "aw" _ASM_ALIGN - .long .Lstatic_branch_jmp_\@ - ., \l_yes - . - _ASM_PTR \key + \branch - . + .long .Lstatic_jump_\@ - ., \target - . + _ASM_PTR \key + 1 - . .popsection .endm diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index fbda5a917c5b..4660ce90de7f 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -439,6 +439,11 @@ struct kvm_mmu { u64 pdptrs[4]; /* pae */ }; +struct kvm_tlb_range { + u64 start_gfn; + u64 pages; +}; + enum pmc_type { KVM_PMC_GP = 0, KVM_PMC_FIXED, @@ -497,7 +502,7 @@ struct kvm_mtrr { struct kvm_vcpu_hv_stimer { struct hrtimer timer; int index; - u64 config; + union hv_stimer_config config; u64 count; u64 exp_time; struct hv_message msg; @@ -601,17 +606,16 @@ struct kvm_vcpu_arch { /* * QEMU userspace and the guest each have their own FPU state. - * In vcpu_run, we switch between the user and guest FPU contexts. - * While running a VCPU, the VCPU thread will have the guest FPU - * context. + * In vcpu_run, we switch between the user, maintained in the + * task_struct struct, and guest FPU contexts. While running a VCPU, + * the VCPU thread will have the guest FPU context. * * Note that while the PKRU state lives inside the fpu registers, * it is switched out separately at VMENTER and VMEXIT time. The * "guest_fpu" state here contains the guest FPU context, with the * host PRKU bits. */ - struct fpu user_fpu; - struct fpu guest_fpu; + struct fpu *guest_fpu; u64 xcr0; u64 guest_supported_xcr0; @@ -1042,6 +1046,8 @@ struct kvm_x86_ops { void (*tlb_flush)(struct kvm_vcpu *vcpu, bool invalidate_gpa); int (*tlb_remote_flush)(struct kvm *kvm); + int (*tlb_remote_flush_with_range)(struct kvm *kvm, + struct kvm_tlb_range *range); /* * Flush any TLB entries associated with the given GVA. @@ -1106,6 +1112,7 @@ struct kvm_x86_ops { bool (*mpx_supported)(void); bool (*xsaves_supported)(void); bool (*umip_emulated)(void); + bool (*pt_supported)(void); int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr); void (*request_immediate_exit)(struct kvm_vcpu *vcpu); @@ -1186,6 +1193,7 @@ struct kvm_x86_ops { int (*nested_enable_evmcs)(struct kvm_vcpu *vcpu, uint16_t *vmcs_version); + uint16_t (*nested_get_evmcs_version)(struct kvm_vcpu *vcpu); }; struct kvm_arch_async_pf { @@ -1196,6 +1204,7 @@ struct kvm_arch_async_pf { }; extern struct kvm_x86_ops *kvm_x86_ops; +extern struct kmem_cache *x86_fpu_cache; #define __KVM_HAVE_ARCH_VM_ALLOC static inline struct kvm *kvm_arch_alloc_vm(void) @@ -1492,7 +1501,7 @@ asmlinkage void kvm_spurious_fault(void); "cmpb $0, kvm_rebooting \n\t" \ "jne 668b \n\t" \ __ASM_SIZE(push) " $666b \n\t" \ - "call kvm_spurious_fault \n\t" \ + "jmp kvm_spurious_fault \n\t" \ ".popsection \n\t" \ _ASM_EXTABLE(666b, 667b) @@ -1503,7 +1512,7 @@ asmlinkage void kvm_spurious_fault(void); int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end); int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); -void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); +int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v); int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h index 4c723632c036..5ed3cf1c3934 100644 --- a/arch/x86/include/asm/kvm_para.h +++ b/arch/x86/include/asm/kvm_para.h @@ -92,6 +92,7 @@ void kvm_async_pf_task_wait(u32 token, int interrupt_kernel); void kvm_async_pf_task_wake(u32 token); u32 kvm_read_and_reset_pf_reason(void); extern void kvm_disable_steal_time(void); +void do_async_page_fault(struct pt_regs *regs, unsigned long error_code); #ifdef CONFIG_PARAVIRT_SPINLOCKS void __init kvm_spinlock_init(void); diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index 1d0a7778e163..cc60e617931c 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -22,6 +22,11 @@ struct ms_hyperv_info { extern struct ms_hyperv_info ms_hyperv; + +typedef int (*hyperv_fill_flush_list_func)( + struct hv_guest_mapping_flush_list *flush, + void *data); + /* * Generate the guest ID. */ @@ -348,6 +353,11 @@ void set_hv_tscchange_cb(void (*cb)(void)); void clear_hv_tscchange_cb(void); void hyperv_stop_tsc_emulation(void); int hyperv_flush_guest_mapping(u64 as); +int hyperv_flush_guest_mapping_range(u64 as, + hyperv_fill_flush_list_func fill_func, void *data); +int hyperv_fill_flush_guest_mapping_list( + struct hv_guest_mapping_flush_list *flush, + u64 start_gfn, u64 end_gfn); #ifdef CONFIG_X86_64 void hv_apic_init(void); @@ -370,6 +380,11 @@ static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu) return NULL; } static inline int hyperv_flush_guest_mapping(u64 as) { return -1; } +static inline int hyperv_flush_guest_mapping_range(u64 as, + hyperv_fill_flush_list_func fill_func, void *data) +{ + return -1; +} #endif /* CONFIG_HYPERV */ #ifdef CONFIG_HYPERV_TSCPAGE diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index c8f73efb4ece..8e40c2446fd1 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -121,7 +121,43 @@ #define MSR_PEBS_LD_LAT_THRESHOLD 0x000003f6 #define MSR_IA32_RTIT_CTL 0x00000570 +#define RTIT_CTL_TRACEEN BIT(0) +#define RTIT_CTL_CYCLEACC BIT(1) +#define RTIT_CTL_OS BIT(2) +#define RTIT_CTL_USR BIT(3) +#define RTIT_CTL_PWR_EVT_EN BIT(4) +#define RTIT_CTL_FUP_ON_PTW BIT(5) +#define RTIT_CTL_FABRIC_EN BIT(6) +#define RTIT_CTL_CR3EN BIT(7) +#define RTIT_CTL_TOPA BIT(8) +#define RTIT_CTL_MTC_EN BIT(9) +#define RTIT_CTL_TSC_EN BIT(10) +#define RTIT_CTL_DISRETC BIT(11) +#define RTIT_CTL_PTW_EN BIT(12) +#define RTIT_CTL_BRANCH_EN BIT(13) +#define RTIT_CTL_MTC_RANGE_OFFSET 14 +#define RTIT_CTL_MTC_RANGE (0x0full << RTIT_CTL_MTC_RANGE_OFFSET) +#define RTIT_CTL_CYC_THRESH_OFFSET 19 +#define RTIT_CTL_CYC_THRESH (0x0full << RTIT_CTL_CYC_THRESH_OFFSET) +#define RTIT_CTL_PSB_FREQ_OFFSET 24 +#define RTIT_CTL_PSB_FREQ (0x0full << RTIT_CTL_PSB_FREQ_OFFSET) +#define RTIT_CTL_ADDR0_OFFSET 32 +#define RTIT_CTL_ADDR0 (0x0full << RTIT_CTL_ADDR0_OFFSET) +#define RTIT_CTL_ADDR1_OFFSET 36 +#define RTIT_CTL_ADDR1 (0x0full << RTIT_CTL_ADDR1_OFFSET) +#define RTIT_CTL_ADDR2_OFFSET 40 +#define RTIT_CTL_ADDR2 (0x0full << RTIT_CTL_ADDR2_OFFSET) +#define RTIT_CTL_ADDR3_OFFSET 44 +#define RTIT_CTL_ADDR3 (0x0full << RTIT_CTL_ADDR3_OFFSET) #define MSR_IA32_RTIT_STATUS 0x00000571 +#define RTIT_STATUS_FILTEREN BIT(0) +#define RTIT_STATUS_CONTEXTEN BIT(1) +#define RTIT_STATUS_TRIGGEREN BIT(2) +#define RTIT_STATUS_BUFFOVF BIT(3) +#define RTIT_STATUS_ERROR BIT(4) +#define RTIT_STATUS_STOPPED BIT(5) +#define RTIT_STATUS_BYTECNT_OFFSET 32 +#define RTIT_STATUS_BYTECNT (0x1ffffull << RTIT_STATUS_BYTECNT_OFFSET) #define MSR_IA32_RTIT_ADDR0_A 0x00000580 #define MSR_IA32_RTIT_ADDR0_B 0x00000581 #define MSR_IA32_RTIT_ADDR1_A 0x00000582 @@ -390,6 +426,7 @@ #define MSR_F15H_NB_PERF_CTR 0xc0010241 #define MSR_F15H_PTSC 0xc0010280 #define MSR_F15H_IC_CFG 0xc0011021 +#define MSR_F15H_EX_CFG 0xc001102c /* Fam 10h MSRs */ #define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058 @@ -771,6 +808,7 @@ #define VMX_BASIC_INOUT 0x0040000000000000LLU /* MSR_IA32_VMX_MISC bits */ +#define MSR_IA32_VMX_MISC_INTEL_PT (1ULL << 14) #define MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS (1ULL << 29) #define MSR_IA32_VMX_MISC_PREEMPTION_TIMER_SCALE 0x1F /* AMD-V MSRs */ diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 032b6009baab..dad12b767ba0 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -232,6 +232,7 @@ enum spectre_v2_mitigation { enum spectre_v2_user_mitigation { SPECTRE_V2_USER_NONE, SPECTRE_V2_USER_STRICT, + SPECTRE_V2_USER_STRICT_PREFERRED, SPECTRE_V2_USER_PRCTL, SPECTRE_V2_USER_SECCOMP, }; diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 4bf42f9e4eea..a97f28d914d5 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -26,6 +26,11 @@ struct static_key; extern struct static_key paravirt_steal_enabled; extern struct static_key paravirt_steal_rq_enabled; +__visible void __native_queued_spin_unlock(struct qspinlock *lock); +bool pv_is_native_spin_unlock(void); +__visible bool __native_vcpu_is_preempted(long cpu); +bool pv_is_native_vcpu_is_preempted(void); + static inline u64 paravirt_steal_clock(int cpu) { return PVOP_CALL1(u64, time.steal_clock, cpu); diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 26942ad63830..488c59686a73 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -348,11 +348,23 @@ extern struct paravirt_patch_template pv_ops; #define paravirt_clobber(clobber) \ [paravirt_clobber] "i" (clobber) +/* + * Generate some code, and mark it as patchable by the + * apply_paravirt() alternate instruction patcher. + */ +#define _paravirt_alt(insn_string, type, clobber) \ + "771:\n\t" insn_string "\n" "772:\n" \ + ".pushsection .parainstructions,\"a\"\n" \ + _ASM_ALIGN "\n" \ + _ASM_PTR " 771b\n" \ + " .byte " type "\n" \ + " .byte 772b-771b\n" \ + " .short " clobber "\n" \ + ".popsection\n" + /* Generate patchable code, with the default asm parameters. */ -#define paravirt_call \ - "PARAVIRT_CALL type=\"%c[paravirt_typenum]\"" \ - " clobber=\"%c[paravirt_clobber]\"" \ - " pv_opptr=\"%c[paravirt_opptr]\";" +#define paravirt_alt(insn_string) \ + _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]") /* Simple instruction patching code. */ #define NATIVE_LABEL(a,x,b) "\n\t.globl " a #x "_" #b "\n" a #x "_" #b ":\n\t" @@ -373,6 +385,16 @@ unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len); int paravirt_disable_iospace(void); /* + * This generates an indirect call based on the operation type number. + * The type number, computed in PARAVIRT_PATCH, is derived from the + * offset into the paravirt_patch_template structure, and can therefore be + * freely converted back into a structure offset. + */ +#define PARAVIRT_CALL \ + ANNOTATE_RETPOLINE_SAFE \ + "call *%c[paravirt_opptr];" + +/* * These macros are intended to wrap calls through one of the paravirt * ops structs, so that they can be later identified and patched at * runtime. @@ -509,7 +531,7 @@ int paravirt_disable_iospace(void); /* since this condition will never hold */ \ if (sizeof(rettype) > sizeof(unsigned long)) { \ asm volatile(pre \ - paravirt_call \ + paravirt_alt(PARAVIRT_CALL) \ post \ : call_clbr, ASM_CALL_CONSTRAINT \ : paravirt_type(op), \ @@ -519,7 +541,7 @@ int paravirt_disable_iospace(void); __ret = (rettype)((((u64)__edx) << 32) | __eax); \ } else { \ asm volatile(pre \ - paravirt_call \ + paravirt_alt(PARAVIRT_CALL) \ post \ : call_clbr, ASM_CALL_CONSTRAINT \ : paravirt_type(op), \ @@ -546,7 +568,7 @@ int paravirt_disable_iospace(void); PVOP_VCALL_ARGS; \ PVOP_TEST_NULL(op); \ asm volatile(pre \ - paravirt_call \ + paravirt_alt(PARAVIRT_CALL) \ post \ : call_clbr, ASM_CALL_CONSTRAINT \ : paravirt_type(op), \ @@ -664,26 +686,6 @@ struct paravirt_patch_site { extern struct paravirt_patch_site __parainstructions[], __parainstructions_end[]; -#else /* __ASSEMBLY__ */ - -/* - * This generates an indirect call based on the operation type number. - * The type number, computed in PARAVIRT_PATCH, is derived from the - * offset into the paravirt_patch_template structure, and can therefore be - * freely converted back into a structure offset. - */ -.macro PARAVIRT_CALL type:req clobber:req pv_opptr:req -771: ANNOTATE_RETPOLINE_SAFE - call *\pv_opptr -772: .pushsection .parainstructions,"a" - _ASM_ALIGN - _ASM_PTR 771b - .byte \type - .byte 772b-771b - .short \clobber - .popsection -.endm - #endif /* __ASSEMBLY__ */ #endif /* _ASM_X86_PARAVIRT_TYPES_H */ diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h index 959d618dbb17..73bb404f4d2a 100644 --- a/arch/x86/include/asm/pci_x86.h +++ b/arch/x86/include/asm/pci_x86.h @@ -121,7 +121,14 @@ extern void __init dmi_check_pciprobe(void); extern void __init dmi_check_skip_isa_align(void); /* some common used subsys_initcalls */ +#ifdef CONFIG_PCI extern int __init pci_acpi_init(void); +#else +static inline int __init pci_acpi_init(void) +{ + return -EINVAL; +} +#endif extern void __init pcibios_irq_init(void); extern int __init pcibios_init(void); extern int pci_legacy_init(void); diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h index ec7f43327033..1ea41aaef68b 100644 --- a/arch/x86/include/asm/pgalloc.h +++ b/arch/x86/include/asm/pgalloc.h @@ -80,6 +80,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE)); } +static inline void pmd_populate_kernel_safe(struct mm_struct *mm, + pmd_t *pmd, pte_t *pte) +{ + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT); + set_pmd_safe(pmd, __pmd(__pa(pte) | _PAGE_TABLE)); +} + static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte) { @@ -132,6 +139,12 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd))); } + +static inline void pud_populate_safe(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) +{ + paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); + set_pud_safe(pud, __pud(_PAGE_TABLE | __pa(pmd))); +} #endif /* CONFIG_X86_PAE */ #if CONFIG_PGTABLE_LEVELS > 3 @@ -141,6 +154,12 @@ static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud) set_p4d(p4d, __p4d(_PAGE_TABLE | __pa(pud))); } +static inline void p4d_populate_safe(struct mm_struct *mm, p4d_t *p4d, pud_t *pud) +{ + paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT); + set_p4d_safe(p4d, __p4d(_PAGE_TABLE | __pa(pud))); +} + static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) { gfp_t gfp = GFP_KERNEL_ACCOUNT; @@ -173,6 +192,14 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d) set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(p4d))); } +static inline void pgd_populate_safe(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d) +{ + if (!pgtable_l5_enabled()) + return; + paravirt_alloc_p4d(mm, __pa(p4d) >> PAGE_SHIFT); + set_pgd_safe(pgd, __pgd(_PAGE_TABLE | __pa(p4d))); +} + static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr) { gfp_t gfp = GFP_KERNEL_ACCOUNT; diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h index 84bd9bdc1987..88bca456da99 100644 --- a/arch/x86/include/asm/pgtable_64_types.h +++ b/arch/x86/include/asm/pgtable_64_types.h @@ -111,6 +111,11 @@ extern unsigned int ptrs_per_p4d; */ #define MAXMEM (1UL << MAX_PHYSMEM_BITS) +#define GUARD_HOLE_PGD_ENTRY -256UL +#define GUARD_HOLE_SIZE (16UL << PGDIR_SHIFT) +#define GUARD_HOLE_BASE_ADDR (GUARD_HOLE_PGD_ENTRY << PGDIR_SHIFT) +#define GUARD_HOLE_END_ADDR (GUARD_HOLE_BASE_ADDR + GUARD_HOLE_SIZE) + #define LDT_PGD_ENTRY -240UL #define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT) #define LDT_END_ADDR (LDT_BASE_ADDR + PGDIR_SIZE) diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index 106b7d0e2dae..d6ff0bbdb394 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -564,8 +564,12 @@ extern pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address, unsigned int *level); extern pmd_t *lookup_pmd_address(unsigned long address); extern phys_addr_t slow_virt_to_phys(void *__address); -extern int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address, - unsigned numpages, unsigned long page_flags); +extern int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, + unsigned long address, + unsigned numpages, + unsigned long page_flags); +extern int __init kernel_unmap_pages_in_pgd(pgd_t *pgd, unsigned long address, + unsigned long numpages); #endif /* !__ASSEMBLY__ */ #endif /* _ASM_X86_PGTABLE_DEFS_H */ diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h index 90cb2f36c042..99a7fa9ab0a3 100644 --- a/arch/x86/include/asm/preempt.h +++ b/arch/x86/include/asm/preempt.h @@ -8,6 +8,9 @@ DECLARE_PER_CPU(int, __preempt_count); +/* We use the MSB mostly because its available */ +#define PREEMPT_NEED_RESCHED 0x80000000 + /* * We use the PREEMPT_NEED_RESCHED bit as an inverted NEED_RESCHED such * that a decrement hitting 0 means we can and should reschedule. diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h index a671a1145906..04c17be9b5fd 100644 --- a/arch/x86/include/asm/reboot.h +++ b/arch/x86/include/asm/reboot.h @@ -26,6 +26,7 @@ void __noreturn machine_real_restart(unsigned int type); #define MRR_APM 1 typedef void (*nmi_shootdown_cb)(int, struct pt_regs*); +void nmi_panic_self_stop(struct pt_regs *regs); void nmi_shootdown_cpus(nmi_shootdown_cb callback); void run_crash_ipi_callback(struct pt_regs *regs); diff --git a/arch/x86/include/asm/refcount.h b/arch/x86/include/asm/refcount.h index a8b5e1e13319..dbaed55c1c24 100644 --- a/arch/x86/include/asm/refcount.h +++ b/arch/x86/include/asm/refcount.h @@ -4,41 +4,6 @@ * x86-specific implementation of refcount_t. Based on PAX_REFCOUNT from * PaX/grsecurity. */ - -#ifdef __ASSEMBLY__ - -#include <asm/asm.h> -#include <asm/bug.h> - -.macro REFCOUNT_EXCEPTION counter:req - .pushsection .text..refcount -111: lea \counter, %_ASM_CX -112: ud2 - ASM_UNREACHABLE - .popsection -113: _ASM_EXTABLE_REFCOUNT(112b, 113b) -.endm - -/* Trigger refcount exception if refcount result is negative. */ -.macro REFCOUNT_CHECK_LT_ZERO counter:req - js 111f - REFCOUNT_EXCEPTION counter="\counter" -.endm - -/* Trigger refcount exception if refcount result is zero or negative. */ -.macro REFCOUNT_CHECK_LE_ZERO counter:req - jz 111f - REFCOUNT_CHECK_LT_ZERO counter="\counter" -.endm - -/* Trigger refcount exception unconditionally. */ -.macro REFCOUNT_ERROR counter:req - jmp 111f - REFCOUNT_EXCEPTION counter="\counter" -.endm - -#else /* __ASSEMBLY__ */ - #include <linux/refcount.h> #include <asm/bug.h> @@ -50,12 +15,35 @@ * central refcount exception. The fixup address for the exception points * back to the regular execution flow in .text. */ +#define _REFCOUNT_EXCEPTION \ + ".pushsection .text..refcount\n" \ + "111:\tlea %[var], %%" _ASM_CX "\n" \ + "112:\t" ASM_UD2 "\n" \ + ASM_UNREACHABLE \ + ".popsection\n" \ + "113:\n" \ + _ASM_EXTABLE_REFCOUNT(112b, 113b) + +/* Trigger refcount exception if refcount result is negative. */ +#define REFCOUNT_CHECK_LT_ZERO \ + "js 111f\n\t" \ + _REFCOUNT_EXCEPTION + +/* Trigger refcount exception if refcount result is zero or negative. */ +#define REFCOUNT_CHECK_LE_ZERO \ + "jz 111f\n\t" \ + REFCOUNT_CHECK_LT_ZERO + +/* Trigger refcount exception unconditionally. */ +#define REFCOUNT_ERROR \ + "jmp 111f\n\t" \ + _REFCOUNT_EXCEPTION static __always_inline void refcount_add(unsigned int i, refcount_t *r) { asm volatile(LOCK_PREFIX "addl %1,%0\n\t" - "REFCOUNT_CHECK_LT_ZERO counter=\"%[counter]\"" - : [counter] "+m" (r->refs.counter) + REFCOUNT_CHECK_LT_ZERO + : [var] "+m" (r->refs.counter) : "ir" (i) : "cc", "cx"); } @@ -63,32 +51,31 @@ static __always_inline void refcount_add(unsigned int i, refcount_t *r) static __always_inline void refcount_inc(refcount_t *r) { asm volatile(LOCK_PREFIX "incl %0\n\t" - "REFCOUNT_CHECK_LT_ZERO counter=\"%[counter]\"" - : [counter] "+m" (r->refs.counter) + REFCOUNT_CHECK_LT_ZERO + : [var] "+m" (r->refs.counter) : : "cc", "cx"); } static __always_inline void refcount_dec(refcount_t *r) { asm volatile(LOCK_PREFIX "decl %0\n\t" - "REFCOUNT_CHECK_LE_ZERO counter=\"%[counter]\"" - : [counter] "+m" (r->refs.counter) + REFCOUNT_CHECK_LE_ZERO + : [var] "+m" (r->refs.counter) : : "cc", "cx"); } static __always_inline __must_check bool refcount_sub_and_test(unsigned int i, refcount_t *r) { - return GEN_BINARY_SUFFIXED_RMWcc(LOCK_PREFIX "subl", - "REFCOUNT_CHECK_LT_ZERO counter=\"%[var]\"", + REFCOUNT_CHECK_LT_ZERO, r->refs.counter, e, "er", i, "cx"); } static __always_inline __must_check bool refcount_dec_and_test(refcount_t *r) { return GEN_UNARY_SUFFIXED_RMWcc(LOCK_PREFIX "decl", - "REFCOUNT_CHECK_LT_ZERO counter=\"%[var]\"", + REFCOUNT_CHECK_LT_ZERO, r->refs.counter, e, "cx"); } @@ -106,8 +93,8 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r) /* Did we try to increment from/to an undesirable state? */ if (unlikely(c < 0 || c == INT_MAX || result < c)) { - asm volatile("REFCOUNT_ERROR counter=\"%[counter]\"" - : : [counter] "m" (r->refs.counter) + asm volatile(REFCOUNT_ERROR + : : [var] "m" (r->refs.counter) : "cc", "cx"); break; } @@ -122,6 +109,4 @@ static __always_inline __must_check bool refcount_inc_not_zero(refcount_t *r) return refcount_add_not_zero(1, r); } -#endif /* __ASSEMBLY__ */ - #endif diff --git a/arch/x86/include/asm/intel_rdt_sched.h b/arch/x86/include/asm/resctrl_sched.h index 9acb06b6f81e..54990fe2a3ae 100644 --- a/arch/x86/include/asm/intel_rdt_sched.h +++ b/arch/x86/include/asm/resctrl_sched.h @@ -1,8 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_X86_INTEL_RDT_SCHED_H -#define _ASM_X86_INTEL_RDT_SCHED_H +#ifndef _ASM_X86_RESCTRL_SCHED_H +#define _ASM_X86_RESCTRL_SCHED_H -#ifdef CONFIG_INTEL_RDT +#ifdef CONFIG_RESCTRL #include <linux/sched.h> #include <linux/jump_label.h> @@ -10,7 +10,7 @@ #define IA32_PQR_ASSOC 0x0c8f /** - * struct intel_pqr_state - State cache for the PQR MSR + * struct resctrl_pqr_state - State cache for the PQR MSR * @cur_rmid: The cached Resource Monitoring ID * @cur_closid: The cached Class Of Service ID * @default_rmid: The user assigned Resource Monitoring ID @@ -24,21 +24,21 @@ * The cache also helps to avoid pointless updates if the value does * not change. */ -struct intel_pqr_state { +struct resctrl_pqr_state { u32 cur_rmid; u32 cur_closid; u32 default_rmid; u32 default_closid; }; -DECLARE_PER_CPU(struct intel_pqr_state, pqr_state); +DECLARE_PER_CPU(struct resctrl_pqr_state, pqr_state); DECLARE_STATIC_KEY_FALSE(rdt_enable_key); DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key); DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key); /* - * __intel_rdt_sched_in() - Writes the task's CLOSid/RMID to IA32_PQR_MSR + * __resctrl_sched_in() - Writes the task's CLOSid/RMID to IA32_PQR_MSR * * Following considerations are made so that this has minimal impact * on scheduler hot path: @@ -51,9 +51,9 @@ DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key); * simple as possible. * Must be called with preemption disabled. */ -static void __intel_rdt_sched_in(void) +static void __resctrl_sched_in(void) { - struct intel_pqr_state *state = this_cpu_ptr(&pqr_state); + struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state); u32 closid = state->default_closid; u32 rmid = state->default_rmid; @@ -78,16 +78,16 @@ static void __intel_rdt_sched_in(void) } } -static inline void intel_rdt_sched_in(void) +static inline void resctrl_sched_in(void) { if (static_branch_likely(&rdt_enable_key)) - __intel_rdt_sched_in(); + __resctrl_sched_in(); } #else -static inline void intel_rdt_sched_in(void) {} +static inline void resctrl_sched_in(void) {} -#endif /* CONFIG_INTEL_RDT */ +#endif /* CONFIG_RESCTRL */ -#endif /* _ASM_X86_INTEL_RDT_SCHED_H */ +#endif /* _ASM_X86_RESCTRL_SCHED_H */ diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index ae13bc974416..ed8ec011a9fd 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h @@ -46,6 +46,9 @@ extern unsigned long saved_video_mode; extern void reserve_standard_io_resources(void); extern void i386_reserve_resources(void); +extern unsigned long __startup_64(unsigned long physaddr, struct boot_params *bp); +extern unsigned long __startup_secondary_64(void); +extern int early_make_pgtable(unsigned long address); #ifdef CONFIG_X86_INTEL_MID extern void x86_intel_mid_early_setup(void); diff --git a/arch/x86/include/asm/sighandling.h b/arch/x86/include/asm/sighandling.h index bd26834724e5..2fcbd6f33ef7 100644 --- a/arch/x86/include/asm/sighandling.h +++ b/arch/x86/include/asm/sighandling.h @@ -17,4 +17,9 @@ void signal_fault(struct pt_regs *regs, void __user *frame, char *where); int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate, struct pt_regs *regs, unsigned long mask); + +#ifdef CONFIG_X86_X32_ABI +asmlinkage long sys32_x32_rt_sigreturn(void); +#endif + #endif /* _ASM_X86_SIGHANDLING_H */ diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 547c4fe50711..2e95b6c1bca3 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -148,6 +148,12 @@ void x86_idle_thread_init(unsigned int cpu, struct task_struct *idle); void smp_store_boot_cpu_info(void); void smp_store_cpu_info(int id); + +asmlinkage __visible void smp_reboot_interrupt(void); +__visible void smp_reschedule_interrupt(struct pt_regs *regs); +__visible void smp_call_function_interrupt(struct pt_regs *regs); +__visible void smp_call_function_single_interrupt(struct pt_regs *r); + #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) #define cpu_acpi_id(cpu) per_cpu(x86_cpu_to_acpiid, cpu) diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index 93b462e48067..dec9c1e84c78 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h @@ -290,11 +290,4 @@ struct __attribute__ ((__packed__)) vmcb { #define SVM_CR0_SELECTIVE_MASK (X86_CR0_TS | X86_CR0_MP) -#define SVM_VMLOAD ".byte 0x0f, 0x01, 0xda" -#define SVM_VMRUN ".byte 0x0f, 0x01, 0xd8" -#define SVM_VMSAVE ".byte 0x0f, 0x01, 0xdb" -#define SVM_CLGI ".byte 0x0f, 0x01, 0xdd" -#define SVM_STGI ".byte 0x0f, 0x01, 0xdc" -#define SVM_INVLPGA ".byte 0x0f, 0x01, 0xdf" - #endif diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index 82b73b75d67c..e0eccbcb8447 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -140,14 +140,6 @@ struct thread_info { _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \ _TIF_NOHZ) -/* work to do on any return to user space */ -#define _TIF_ALLWORK_MASK \ - (_TIF_SYSCALL_TRACE | _TIF_NOTIFY_RESUME | _TIF_SIGPENDING | \ - _TIF_NEED_RESCHED | _TIF_SINGLESTEP | _TIF_SYSCALL_EMU | \ - _TIF_SYSCALL_AUDIT | _TIF_USER_RETURN_NOTIFY | _TIF_UPROBE | \ - _TIF_PATCH_PENDING | _TIF_NOHZ | _TIF_SYSCALL_TRACEPOINT | \ - _TIF_FSCHECK) - /* flags to check in __switch_to() */ #define _TIF_WORK_CTXSW_BASE \ (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP| \ diff --git a/arch/x86/include/asm/trace/exceptions.h b/arch/x86/include/asm/trace/exceptions.h index 69615e387973..e0e6d7f21399 100644 --- a/arch/x86/include/asm/trace/exceptions.h +++ b/arch/x86/include/asm/trace/exceptions.h @@ -45,6 +45,7 @@ DEFINE_PAGE_FAULT_EVENT(page_fault_user); DEFINE_PAGE_FAULT_EVENT(page_fault_kernel); #undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE #define TRACE_INCLUDE_PATH . #define TRACE_INCLUDE_FILE exceptions #endif /* _TRACE_PAGE_FAULT_H */ diff --git a/arch/x86/include/asm/trace/hyperv.h b/arch/x86/include/asm/trace/hyperv.h index 2e6245a023ef..ace464f09681 100644 --- a/arch/x86/include/asm/trace/hyperv.h +++ b/arch/x86/include/asm/trace/hyperv.h @@ -42,6 +42,20 @@ TRACE_EVENT(hyperv_nested_flush_guest_mapping, TP_printk("address space %llx ret %d", __entry->as, __entry->ret) ); +TRACE_EVENT(hyperv_nested_flush_guest_mapping_range, + TP_PROTO(u64 as, int ret), + TP_ARGS(as, ret), + + TP_STRUCT__entry( + __field(u64, as) + __field(int, ret) + ), + TP_fast_assign(__entry->as = as; + __entry->ret = ret; + ), + TP_printk("address space %llx ret %d", __entry->as, __entry->ret) + ); + TRACE_EVENT(hyperv_send_ipi_mask, TP_PROTO(const struct cpumask *cpus, int vector), diff --git a/arch/x86/include/asm/trace/irq_vectors.h b/arch/x86/include/asm/trace/irq_vectors.h index 0af81b590a0c..33b9d0f0aafe 100644 --- a/arch/x86/include/asm/trace/irq_vectors.h +++ b/arch/x86/include/asm/trace/irq_vectors.h @@ -389,6 +389,7 @@ TRACE_EVENT(vector_free_moved, #endif /* CONFIG_X86_LOCAL_APIC */ #undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE #define TRACE_INCLUDE_PATH . #define TRACE_INCLUDE_FILE irq_vectors #endif /* _TRACE_IRQ_VECTORS_H */ diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h index 3de69330e6c5..7d6f3f3fad78 100644 --- a/arch/x86/include/asm/traps.h +++ b/arch/x86/include/asm/traps.h @@ -61,34 +61,38 @@ asmlinkage void xen_machine_check(void); asmlinkage void xen_simd_coprocessor_error(void); #endif -dotraplinkage void do_divide_error(struct pt_regs *, long); -dotraplinkage void do_debug(struct pt_regs *, long); -dotraplinkage void do_nmi(struct pt_regs *, long); -dotraplinkage void do_int3(struct pt_regs *, long); -dotraplinkage void do_overflow(struct pt_regs *, long); -dotraplinkage void do_bounds(struct pt_regs *, long); -dotraplinkage void do_invalid_op(struct pt_regs *, long); -dotraplinkage void do_device_not_available(struct pt_regs *, long); -dotraplinkage void do_coprocessor_segment_overrun(struct pt_regs *, long); -dotraplinkage void do_invalid_TSS(struct pt_regs *, long); -dotraplinkage void do_segment_not_present(struct pt_regs *, long); -dotraplinkage void do_stack_segment(struct pt_regs *, long); +dotraplinkage void do_divide_error(struct pt_regs *regs, long error_code); +dotraplinkage void do_debug(struct pt_regs *regs, long error_code); +dotraplinkage void do_nmi(struct pt_regs *regs, long error_code); +dotraplinkage void do_int3(struct pt_regs *regs, long error_code); +dotraplinkage void do_overflow(struct pt_regs *regs, long error_code); +dotraplinkage void do_bounds(struct pt_regs *regs, long error_code); +dotraplinkage void do_invalid_op(struct pt_regs *regs, long error_code); +dotraplinkage void do_device_not_available(struct pt_regs *regs, long error_code); +dotraplinkage void do_coprocessor_segment_overrun(struct pt_regs *regs, long error_code); +dotraplinkage void do_invalid_TSS(struct pt_regs *regs, long error_code); +dotraplinkage void do_segment_not_present(struct pt_regs *regs, long error_code); +dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code); #ifdef CONFIG_X86_64 -dotraplinkage void do_double_fault(struct pt_regs *, long); +dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code); +asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs); +asmlinkage __visible notrace +struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s); +void __init trap_init(void); #endif -dotraplinkage void do_general_protection(struct pt_regs *, long); -dotraplinkage void do_page_fault(struct pt_regs *, unsigned long); -dotraplinkage void do_spurious_interrupt_bug(struct pt_regs *, long); -dotraplinkage void do_coprocessor_error(struct pt_regs *, long); -dotraplinkage void do_alignment_check(struct pt_regs *, long); +dotraplinkage void do_general_protection(struct pt_regs *regs, long error_code); +dotraplinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code); +dotraplinkage void do_spurious_interrupt_bug(struct pt_regs *regs, long error_code); +dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code); +dotraplinkage void do_alignment_check(struct pt_regs *regs, long error_code); #ifdef CONFIG_X86_MCE -dotraplinkage void do_machine_check(struct pt_regs *, long); +dotraplinkage void do_machine_check(struct pt_regs *regs, long error_code); #endif -dotraplinkage void do_simd_coprocessor_error(struct pt_regs *, long); +dotraplinkage void do_simd_coprocessor_error(struct pt_regs *regs, long error_code); #ifdef CONFIG_X86_32 -dotraplinkage void do_iret_error(struct pt_regs *, long); +dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code); #endif -dotraplinkage void do_mce(struct pt_regs *, long); +dotraplinkage void do_mce(struct pt_regs *regs, long error_code); static inline int get_si_code(unsigned long condition) { @@ -104,11 +108,16 @@ extern int panic_on_unrecovered_nmi; void math_emulate(struct math_emu_info *); #ifndef CONFIG_X86_32 -asmlinkage void smp_thermal_interrupt(void); -asmlinkage void smp_threshold_interrupt(void); -asmlinkage void smp_deferred_error_interrupt(void); +asmlinkage void smp_thermal_interrupt(struct pt_regs *regs); +asmlinkage void smp_threshold_interrupt(struct pt_regs *regs); +asmlinkage void smp_deferred_error_interrupt(struct pt_regs *regs); #endif +void smp_apic_timer_interrupt(struct pt_regs *regs); +void smp_spurious_interrupt(struct pt_regs *regs); +void smp_error_interrupt(struct pt_regs *regs); +asmlinkage void smp_irq_move_cleanup_interrupt(void); + extern void ist_enter(struct pt_regs *regs); extern void ist_exit(struct pt_regs *regs); extern void ist_begin_non_atomic(struct pt_regs *regs); diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h index eb5bbfeccb66..8a0c25c6bf09 100644 --- a/arch/x86/include/asm/tsc.h +++ b/arch/x86/include/asm/tsc.h @@ -35,6 +35,7 @@ extern struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns); extern void tsc_early_init(void); extern void tsc_init(void); +extern unsigned long calibrate_delay_is_known(void); extern void mark_tsc_unstable(char *reason); extern int unsynchronized_tsc(void); extern int check_tsc_unstable(void); diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index ade0f153947d..4e4133e86484 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -77,7 +77,10 @@ #define SECONDARY_EXEC_ENCLS_EXITING 0x00008000 #define SECONDARY_EXEC_RDSEED_EXITING 0x00010000 #define SECONDARY_EXEC_ENABLE_PML 0x00020000 +#define SECONDARY_EXEC_PT_CONCEAL_VMX 0x00080000 #define SECONDARY_EXEC_XSAVES 0x00100000 +#define SECONDARY_EXEC_PT_USE_GPA 0x01000000 +#define SECONDARY_EXEC_MODE_BASED_EPT_EXEC 0x00400000 #define SECONDARY_EXEC_TSC_SCALING 0x02000000 #define PIN_BASED_EXT_INTR_MASK 0x00000001 @@ -98,6 +101,8 @@ #define VM_EXIT_LOAD_IA32_EFER 0x00200000 #define VM_EXIT_SAVE_VMX_PREEMPTION_TIMER 0x00400000 #define VM_EXIT_CLEAR_BNDCFGS 0x00800000 +#define VM_EXIT_PT_CONCEAL_PIP 0x01000000 +#define VM_EXIT_CLEAR_IA32_RTIT_CTL 0x02000000 #define VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR 0x00036dff @@ -109,6 +114,8 @@ #define VM_ENTRY_LOAD_IA32_PAT 0x00004000 #define VM_ENTRY_LOAD_IA32_EFER 0x00008000 #define VM_ENTRY_LOAD_BNDCFGS 0x00010000 +#define VM_ENTRY_PT_CONCEAL_PIP 0x00020000 +#define VM_ENTRY_LOAD_IA32_RTIT_CTL 0x00040000 #define VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR 0x000011ff @@ -240,6 +247,8 @@ enum vmcs_field { GUEST_PDPTR3_HIGH = 0x00002811, GUEST_BNDCFGS = 0x00002812, GUEST_BNDCFGS_HIGH = 0x00002813, + GUEST_IA32_RTIT_CTL = 0x00002814, + GUEST_IA32_RTIT_CTL_HIGH = 0x00002815, HOST_IA32_PAT = 0x00002c00, HOST_IA32_PAT_HIGH = 0x00002c01, HOST_IA32_EFER = 0x00002c02, diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 06635fbca81c..2624de16cd7a 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -848,7 +848,7 @@ EXPORT_SYMBOL(acpi_unregister_ioapic); /** * acpi_ioapic_registered - Check whether IOAPIC assoicatied with @gsi_base * has been registered - * @handle: ACPI handle of the IOAPIC deivce + * @handle: ACPI handle of the IOAPIC device * @gsi_base: GSI base associated with the IOAPIC * * Assume caller holds some type of lock to serialize acpi_ioapic_registered() diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c index 3f9d1b4019bb..e0ff3ac8c127 100644 --- a/arch/x86/kernel/amd_gart_64.c +++ b/arch/x86/kernel/amd_gart_64.c @@ -50,8 +50,6 @@ static unsigned long iommu_pages; /* .. and in pages */ static u32 *iommu_gatt_base; /* Remapping table */ -static dma_addr_t bad_dma_addr; - /* * If this is disabled the IOMMU will use an optimized flushing strategy * of only flushing when an mapping is reused. With it true the GART is @@ -74,8 +72,6 @@ static u32 gart_unmapped_entry; (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT) #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28)) -#define EMERGENCY_PAGES 32 /* = 128KB */ - #ifdef CONFIG_AGP #define AGPEXTERN extern #else @@ -155,9 +151,6 @@ static void flush_gart(void) #ifdef CONFIG_IOMMU_LEAK /* Debugging aid for drivers that don't free their IOMMU tables */ -static int leak_trace; -static int iommu_leak_pages = 20; - static void dump_leak(void) { static int dump; @@ -184,14 +177,6 @@ static void iommu_full(struct device *dev, size_t size, int dir) */ dev_err(dev, "PCI-DMA: Out of IOMMU space for %lu bytes\n", size); - - if (size > PAGE_SIZE*EMERGENCY_PAGES) { - if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL) - panic("PCI-DMA: Memory would be corrupted\n"); - if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL) - panic(KERN_ERR - "PCI-DMA: Random memory would be DMAed\n"); - } #ifdef CONFIG_IOMMU_LEAK dump_leak(); #endif @@ -220,7 +205,7 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, int i; if (unlikely(phys_mem + size > GART_MAX_PHYS_ADDR)) - return bad_dma_addr; + return DMA_MAPPING_ERROR; iommu_page = alloc_iommu(dev, npages, align_mask); if (iommu_page == -1) { @@ -229,7 +214,7 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, if (panic_on_overflow) panic("dma_map_area overflow %lu bytes\n", size); iommu_full(dev, size, dir); - return bad_dma_addr; + return DMA_MAPPING_ERROR; } for (i = 0; i < npages; i++) { @@ -271,7 +256,7 @@ static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr, int npages; int i; - if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE || + if (dma_addr == DMA_MAPPING_ERROR || dma_addr >= iommu_bus_base + iommu_size) return; @@ -315,7 +300,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg, if (nonforced_iommu(dev, addr, s->length)) { addr = dma_map_area(dev, addr, s->length, dir, 0); - if (addr == bad_dma_addr) { + if (addr == DMA_MAPPING_ERROR) { if (i > 0) gart_unmap_sg(dev, sg, i, dir, 0); nents = 0; @@ -471,7 +456,7 @@ error: iommu_full(dev, pages << PAGE_SHIFT, dir); for_each_sg(sg, s, nents, i) - s->dma_address = bad_dma_addr; + s->dma_address = DMA_MAPPING_ERROR; return 0; } @@ -490,7 +475,7 @@ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, *dma_addr = dma_map_area(dev, virt_to_phys(vaddr), size, DMA_BIDIRECTIONAL, (1UL << get_order(size)) - 1); flush_gart(); - if (unlikely(*dma_addr == bad_dma_addr)) + if (unlikely(*dma_addr == DMA_MAPPING_ERROR)) goto out_free; return vaddr; out_free: @@ -507,11 +492,6 @@ gart_free_coherent(struct device *dev, size_t size, void *vaddr, dma_direct_free_pages(dev, size, vaddr, dma_addr, attrs); } -static int gart_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return (dma_addr == bad_dma_addr); -} - static int no_agp; static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size) @@ -695,7 +675,6 @@ static const struct dma_map_ops gart_dma_ops = { .unmap_page = gart_unmap_page, .alloc = gart_alloc_coherent, .free = gart_free_coherent, - .mapping_error = gart_mapping_error, .dma_supported = dma_direct_supported, }; @@ -730,7 +709,6 @@ int __init gart_iommu_init(void) unsigned long aper_base, aper_size; unsigned long start_pfn, end_pfn; unsigned long scratch; - long i; if (!amd_nb_has_feature(AMD_NB_GART)) return 0; @@ -774,29 +752,12 @@ int __init gart_iommu_init(void) if (!iommu_gart_bitmap) panic("Cannot allocate iommu bitmap\n"); -#ifdef CONFIG_IOMMU_LEAK - if (leak_trace) { - int ret; - - ret = dma_debug_resize_entries(iommu_pages); - if (ret) - pr_debug("PCI-DMA: Cannot trace all the entries\n"); - } -#endif - - /* - * Out of IOMMU space handling. - * Reserve some invalid pages at the beginning of the GART. - */ - bitmap_set(iommu_gart_bitmap, 0, EMERGENCY_PAGES); - pr_info("PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n", iommu_size >> 20); agp_memory_reserved = iommu_size; iommu_start = aper_size - iommu_size; iommu_bus_base = info.aper_base + iommu_start; - bad_dma_addr = iommu_bus_base; iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT); /* @@ -838,8 +799,6 @@ int __init gart_iommu_init(void) if (!scratch) panic("Cannot allocate iommu scratch page"); gart_unmapped_entry = GPTE_ENCODE(__pa(scratch)); - for (i = EMERGENCY_PAGES; i < iommu_pages; i++) - iommu_gatt_base[i] = gart_unmapped_entry; flush_gart(); dma_ops = &gart_dma_ops; @@ -853,16 +812,6 @@ void __init gart_parse_options(char *p) { int arg; -#ifdef CONFIG_IOMMU_LEAK - if (!strncmp(p, "leak", 4)) { - leak_trace = 1; - p += 4; - if (*p == '=') - ++p; - if (isdigit(*p) && get_option(&p, &arg)) - iommu_leak_pages = arg; - } -#endif if (isdigit(*p) && get_option(&p, &arg)) iommu_size = arg; if (!strncmp(p, "fullflush", 9)) diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index a6eca647bc76..cc51275c8759 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -11,14 +11,15 @@ #include <linux/errno.h> #include <linux/export.h> #include <linux/spinlock.h> +#include <linux/pci_ids.h> #include <asm/amd_nb.h> #define PCI_DEVICE_ID_AMD_17H_ROOT 0x1450 #define PCI_DEVICE_ID_AMD_17H_M10H_ROOT 0x15d0 -#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463 +#define PCI_DEVICE_ID_AMD_17H_M30H_ROOT 0x1480 #define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464 -#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec +#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494 /* Protect the PCI config register pairs used for SMN and DF indirect access. */ static DEFINE_MUTEX(smn_mutex); @@ -28,9 +29,11 @@ static u32 *flush_words; static const struct pci_device_id amd_root_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) }, {} }; + #define PCI_DEVICE_ID_AMD_CNB17H_F4 0x1704 const struct pci_device_id amd_nb_misc_ids[] = { @@ -44,6 +47,7 @@ const struct pci_device_id amd_nb_misc_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) }, {} }; @@ -57,6 +61,7 @@ static const struct pci_device_id amd_nb_link_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) }, {} }; @@ -214,7 +219,10 @@ int amd_cache_northbridges(void) const struct pci_device_id *root_ids = amd_root_ids; struct pci_dev *root, *misc, *link; struct amd_northbridge *nb; - u16 i = 0; + u16 roots_per_misc = 0; + u16 misc_count = 0; + u16 root_count = 0; + u16 i, j; if (amd_northbridges.num) return 0; @@ -227,26 +235,55 @@ int amd_cache_northbridges(void) misc = NULL; while ((misc = next_northbridge(misc, misc_ids)) != NULL) - i++; + misc_count++; - if (!i) + if (!misc_count) return -ENODEV; - nb = kcalloc(i, sizeof(struct amd_northbridge), GFP_KERNEL); + root = NULL; + while ((root = next_northbridge(root, root_ids)) != NULL) + root_count++; + + if (root_count) { + roots_per_misc = root_count / misc_count; + + /* + * There should be _exactly_ N roots for each DF/SMN + * interface. + */ + if (!roots_per_misc || (root_count % roots_per_misc)) { + pr_info("Unsupported AMD DF/PCI configuration found\n"); + return -ENODEV; + } + } + + nb = kcalloc(misc_count, sizeof(struct amd_northbridge), GFP_KERNEL); if (!nb) return -ENOMEM; amd_northbridges.nb = nb; - amd_northbridges.num = i; + amd_northbridges.num = misc_count; link = misc = root = NULL; - for (i = 0; i != amd_northbridges.num; i++) { + for (i = 0; i < amd_northbridges.num; i++) { node_to_amd_nb(i)->root = root = next_northbridge(root, root_ids); node_to_amd_nb(i)->misc = misc = next_northbridge(misc, misc_ids); node_to_amd_nb(i)->link = link = next_northbridge(link, link_ids); + + /* + * If there are more PCI root devices than data fabric/ + * system management network interfaces, then the (N) + * PCI roots per DF/SMN interface are functionally the + * same (for DF/SMN access) and N-1 are redundant. N-1 + * PCI roots should be skipped per DF/SMN interface so + * the following DF/SMN interfaces get mapped to + * correct PCI roots. + */ + for (j = 1; j < roots_per_misc; j++) + root = next_northbridge(root, root_ids); } if (amd_gart_present()) diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index 2c4d5ece7456..58176b56354e 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c @@ -264,18 +264,23 @@ static int __init parse_gart_mem(char *p) } early_param("gart_fix_e820", parse_gart_mem); +/* + * With kexec/kdump, if the first kernel doesn't shut down the GART and the + * second kernel allocates a different GART region, there might be two + * overlapping GART regions present: + * + * - the first still used by the GART initialized in the first kernel. + * - (sub-)set of it used as normal RAM by the second kernel. + * + * which leads to memory corruptions and a kernel panic eventually. + * + * This can also happen if the BIOS has forgotten to mark the GART region + * as reserved. + * + * Try to update the e820 map to mark that new region as reserved. + */ void __init early_gart_iommu_check(void) { - /* - * in case it is enabled before, esp for kexec/kdump, - * previous kernel already enable that. memset called - * by allocate_aperture/__alloc_bootmem_nopanic cause restart. - * or second kernel have different position for GART hole. and new - * kernel could use hole as RAM that is still used by GART set by - * first kernel - * or BIOS forget to put that in reserved. - * try to update e820 to make that region as reserved. - */ u32 agp_aper_order = 0; int i, fix, slot, valid_agp = 0; u32 ctl; diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 32b2b7a41ef5..b7bcdd781651 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -44,6 +44,7 @@ #include <asm/mpspec.h> #include <asm/i8259.h> #include <asm/proto.h> +#include <asm/traps.h> #include <asm/apic.h> #include <asm/io_apic.h> #include <asm/desc.h> diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c index e84c9eb4e5b4..0005c284a5c5 100644 --- a/arch/x86/kernel/apic/apic_flat_64.c +++ b/arch/x86/kernel/apic/apic_flat_64.c @@ -8,6 +8,7 @@ * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and * James Cleverdon. */ +#include <linux/acpi.h> #include <linux/errno.h> #include <linux/threads.h> #include <linux/cpumask.h> @@ -16,13 +17,13 @@ #include <linux/ctype.h> #include <linux/hardirq.h> #include <linux/export.h> + #include <asm/smp.h> -#include <asm/apic.h> #include <asm/ipi.h> +#include <asm/apic.h> +#include <asm/apic_flat_64.h> #include <asm/jailhouse_para.h> -#include <linux/acpi.h> - static struct apic apic_physflat; static struct apic apic_flat; diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c index 652e7ffa9b9d..3173e07d3791 100644 --- a/arch/x86/kernel/apic/vector.c +++ b/arch/x86/kernel/apic/vector.c @@ -18,6 +18,7 @@ #include <linux/slab.h> #include <asm/irqdomain.h> #include <asm/hw_irq.h> +#include <asm/traps.h> #include <asm/apic.h> #include <asm/i8259.h> #include <asm/desc.h> diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 391f358ebb4c..a555da094157 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -1079,7 +1079,7 @@ late_initcall(uv_init_heartbeat); #endif /* !CONFIG_HOTPLUG_CPU */ /* Direct Legacy VGA I/O traffic to designated IOH */ -int uv_set_vga_state(struct pci_dev *pdev, bool decode, unsigned int command_bits, u32 flags) +static int uv_set_vga_state(struct pci_dev *pdev, bool decode, unsigned int command_bits, u32 flags) { int domain, bus, rc; @@ -1148,7 +1148,7 @@ static void get_mn(struct mn *mnp) mnp->m_shift = mnp->m_val ? 64 - mnp->m_val : 0; } -void __init uv_init_hub_info(struct uv_hub_info_s *hi) +static void __init uv_init_hub_info(struct uv_hub_info_s *hi) { union uvh_node_id_u node_id; struct mn mn; diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c index 72adf6c335dc..168543d077d7 100644 --- a/arch/x86/kernel/asm-offsets.c +++ b/arch/x86/kernel/asm-offsets.c @@ -29,7 +29,8 @@ # include "asm-offsets_64.c" #endif -void common(void) { +static void __used common(void) +{ BLANK(); OFFSET(TASK_threadsp, task_struct, thread.sp); #ifdef CONFIG_STACKPROTECTOR diff --git a/arch/x86/kernel/check.c b/arch/x86/kernel/check.c index 1979a76bfadd..5136e6818da8 100644 --- a/arch/x86/kernel/check.c +++ b/arch/x86/kernel/check.c @@ -9,6 +9,7 @@ #include <linux/memblock.h> #include <asm/proto.h> +#include <asm/setup.h> /* * Some BIOSes seem to corrupt the low 64k of memory during events @@ -136,7 +137,7 @@ void __init setup_bios_corruption_check(void) } -void check_for_bios_corruption(void) +static void check_for_bios_corruption(void) { int i; int corruption = 0; diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 1f5d2291c31e..ac78f90aea56 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -36,13 +36,10 @@ obj-$(CONFIG_CPU_SUP_CENTAUR) += centaur.o obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o -obj-$(CONFIG_INTEL_RDT) += intel_rdt.o intel_rdt_rdtgroup.o intel_rdt_monitor.o -obj-$(CONFIG_INTEL_RDT) += intel_rdt_ctrlmondata.o intel_rdt_pseudo_lock.o -CFLAGS_intel_rdt_pseudo_lock.o = -I$(src) - -obj-$(CONFIG_X86_MCE) += mcheck/ +obj-$(CONFIG_X86_MCE) += mce/ obj-$(CONFIG_MTRR) += mtrr/ obj-$(CONFIG_MICROCODE) += microcode/ +obj-$(CONFIG_RESCTRL) += resctrl/ obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index eeea634bee0a..69f6bbb41be0 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -15,6 +15,7 @@ #include <asm/smp.h> #include <asm/pci-direct.h> #include <asm/delay.h> +#include <asm/debugreg.h> #ifdef CONFIG_X86_64 # include <asm/mmconfig.h> diff --git a/arch/x86/kernel/cpu/aperfmperf.c b/arch/x86/kernel/cpu/aperfmperf.c index 7eba34df54c3..804c49493938 100644 --- a/arch/x86/kernel/cpu/aperfmperf.c +++ b/arch/x86/kernel/cpu/aperfmperf.c @@ -12,6 +12,7 @@ #include <linux/ktime.h> #include <linux/math64.h> #include <linux/percpu.h> +#include <linux/cpufreq.h> #include <linux/smp.h> #include "cpu.h" diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 500278f5308e..8654b8b0c848 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -32,6 +32,8 @@ #include <asm/e820/api.h> #include <asm/hypervisor.h> +#include "cpu.h" + static void __init spectre_v2_select_mitigation(void); static void __init ssb_select_mitigation(void); static void __init l1tf_select_mitigation(void); @@ -54,7 +56,7 @@ static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS; u64 __ro_after_init x86_amd_ls_cfg_base; u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask; -/* Control conditional STIPB in switch_to() */ +/* Control conditional STIBP in switch_to() */ DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp); /* Control conditional IBPB in switch_mm() */ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); @@ -262,10 +264,11 @@ enum spectre_v2_user_cmd { }; static const char * const spectre_v2_user_strings[] = { - [SPECTRE_V2_USER_NONE] = "User space: Vulnerable", - [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection", - [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl", - [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl", + [SPECTRE_V2_USER_NONE] = "User space: Vulnerable", + [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection", + [SPECTRE_V2_USER_STRICT_PREFERRED] = "User space: Mitigation: STIBP always-on protection", + [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl", + [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl", }; static const struct { @@ -355,6 +358,15 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd) break; } + /* + * At this point, an STIBP mode other than "off" has been set. + * If STIBP support is not being forced, check if STIBP always-on + * is preferred. + */ + if (mode != SPECTRE_V2_USER_STRICT && + boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON)) + mode = SPECTRE_V2_USER_STRICT_PREFERRED; + /* Initialize Indirect Branch Prediction Barrier */ if (boot_cpu_has(X86_FEATURE_IBPB)) { setup_force_cpu_cap(X86_FEATURE_USE_IBPB); @@ -379,12 +391,12 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd) "always-on" : "conditional"); } - /* If enhanced IBRS is enabled no STIPB required */ + /* If enhanced IBRS is enabled no STIBP required */ if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) return; /* - * If SMT is not possible or STIBP is not available clear the STIPB + * If SMT is not possible or STIBP is not available clear the STIBP * mode. */ if (!smt_possible || !boot_cpu_has(X86_FEATURE_STIBP)) @@ -610,6 +622,7 @@ void arch_smt_update(void) case SPECTRE_V2_USER_NONE: break; case SPECTRE_V2_USER_STRICT: + case SPECTRE_V2_USER_STRICT_PREFERRED: update_stibp_strict(); break; case SPECTRE_V2_USER_PRCTL: @@ -812,7 +825,8 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) * Indirect branch speculation is always disabled in strict * mode. */ - if (spectre_v2_user == SPECTRE_V2_USER_STRICT) + if (spectre_v2_user == SPECTRE_V2_USER_STRICT || + spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED) return -EPERM; task_clear_spec_ib_disable(task); task_update_spec_tif(task); @@ -825,7 +839,8 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) */ if (spectre_v2_user == SPECTRE_V2_USER_NONE) return -EPERM; - if (spectre_v2_user == SPECTRE_V2_USER_STRICT) + if (spectre_v2_user == SPECTRE_V2_USER_STRICT || + spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED) return 0; task_set_spec_ib_disable(task); if (ctrl == PR_SPEC_FORCE_DISABLE) @@ -896,6 +911,7 @@ static int ib_prctl_get(struct task_struct *task) return PR_SPEC_PRCTL | PR_SPEC_DISABLE; return PR_SPEC_PRCTL | PR_SPEC_ENABLE; case SPECTRE_V2_USER_STRICT: + case SPECTRE_V2_USER_STRICT_PREFERRED: return PR_SPEC_DISABLE; default: return PR_SPEC_NOT_AFFECTED; @@ -1002,7 +1018,8 @@ static void __init l1tf_select_mitigation(void) #endif half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT; - if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) { + if (l1tf_mitigation != L1TF_MITIGATION_OFF && + e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) { pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n"); pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n", half_pa); @@ -1088,6 +1105,8 @@ static char *stibp_state(void) return ", STIBP: disabled"; case SPECTRE_V2_USER_STRICT: return ", STIBP: forced"; + case SPECTRE_V2_USER_STRICT_PREFERRED: + return ", STIBP: always-on"; case SPECTRE_V2_USER_PRCTL: case SPECTRE_V2_USER_SECCOMP: if (static_key_enabled(&switch_to_cond_stibp)) diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c index dc1b9342e9c4..c4d1023fb0ab 100644 --- a/arch/x86/kernel/cpu/cacheinfo.c +++ b/arch/x86/kernel/cpu/cacheinfo.c @@ -17,6 +17,7 @@ #include <linux/pci.h> #include <asm/cpufeature.h> +#include <asm/cacheinfo.h> #include <asm/amd_nb.h> #include <asm/smp.h> diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index ffb181f959d2..cb28e98a0659 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -353,7 +353,7 @@ static __always_inline void setup_umip(struct cpuinfo_x86 *c) cr4_set_bits(X86_CR4_UMIP); - pr_info("x86/cpu: Activated the Intel User Mode Instruction Prevention (UMIP) CPU feature\n"); + pr_info_once("x86/cpu: User Mode Instruction Prevention (UMIP) activated\n"); return; diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h index da5446acc241..5eb946b9a9f3 100644 --- a/arch/x86/kernel/cpu/cpu.h +++ b/arch/x86/kernel/cpu/cpu.h @@ -49,9 +49,6 @@ extern void get_cpu_cap(struct cpuinfo_x86 *c); extern void get_cpu_address_sizes(struct cpuinfo_x86 *c); extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c); extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); -extern u32 get_scattered_cpuid_leaf(unsigned int level, - unsigned int sub_leaf, - enum cpuid_regs_idx reg); extern void init_intel_cacheinfo(struct cpuinfo_x86 *c); extern void init_amd_cacheinfo(struct cpuinfo_x86 *c); extern void init_hygon_cacheinfo(struct cpuinfo_x86 *c); diff --git a/arch/x86/kernel/cpu/mcheck/Makefile b/arch/x86/kernel/cpu/mce/Makefile index bcc7c54c7041..9f020c994154 100644 --- a/arch/x86/kernel/cpu/mcheck/Makefile +++ b/arch/x86/kernel/cpu/mce/Makefile @@ -1,14 +1,16 @@ # SPDX-License-Identifier: GPL-2.0 -obj-y = mce.o mce-severity.o mce-genpool.o +obj-y = core.o severity.o genpool.o obj-$(CONFIG_X86_ANCIENT_MCE) += winchip.o p5.o -obj-$(CONFIG_X86_MCE_INTEL) += mce_intel.o -obj-$(CONFIG_X86_MCE_AMD) += mce_amd.o +obj-$(CONFIG_X86_MCE_INTEL) += intel.o +obj-$(CONFIG_X86_MCE_AMD) += amd.o obj-$(CONFIG_X86_MCE_THRESHOLD) += threshold.o + +mce-inject-y := inject.o obj-$(CONFIG_X86_MCE_INJECT) += mce-inject.o obj-$(CONFIG_X86_THERMAL_VECTOR) += therm_throt.o -obj-$(CONFIG_ACPI_APEI) += mce-apei.o +obj-$(CONFIG_ACPI_APEI) += apei.o obj-$(CONFIG_X86_MCELOG_LEGACY) += dev-mcelog.o diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mce/amd.c index e12454e21b8a..89298c83de53 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mce/amd.c @@ -23,12 +23,13 @@ #include <linux/string.h> #include <asm/amd_nb.h> +#include <asm/traps.h> #include <asm/apic.h> #include <asm/mce.h> #include <asm/msr.h> #include <asm/trace/irq_vectors.h> -#include "mce-internal.h" +#include "internal.h" #define NR_BLOCKS 5 #define THRESHOLD_MAX 0xFFF @@ -99,7 +100,7 @@ static u32 smca_bank_addrs[MAX_NR_BANKS][NR_BLOCKS] __ro_after_init = [0 ... MAX_NR_BANKS - 1] = { [0 ... NR_BLOCKS - 1] = -1 } }; -const char *smca_get_name(enum smca_bank_types t) +static const char *smca_get_name(enum smca_bank_types t) { if (t >= N_SMCA_BANK_TYPES) return NULL; @@ -824,7 +825,7 @@ static void __log_error(unsigned int bank, u64 status, u64 addr, u64 misc) mce_log(&m); } -asmlinkage __visible void __irq_entry smp_deferred_error_interrupt(void) +asmlinkage __visible void __irq_entry smp_deferred_error_interrupt(struct pt_regs *regs) { entering_irq(); trace_deferred_error_apic_entry(DEFERRED_ERROR_VECTOR); diff --git a/arch/x86/kernel/cpu/mcheck/mce-apei.c b/arch/x86/kernel/cpu/mce/apei.c index 2eee85379689..1d9b3ce662a0 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-apei.c +++ b/arch/x86/kernel/cpu/mce/apei.c @@ -36,7 +36,7 @@ #include <acpi/ghes.h> #include <asm/mce.h> -#include "mce-internal.h" +#include "internal.h" void apei_mce_report_mem_error(int severity, struct cper_sec_mem_err *mem_err) { diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mce/core.c index 36d2696c9563..672c7225cb1b 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -8,8 +8,6 @@ * Author: Andi Kleen */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include <linux/thread_info.h> #include <linux/capability.h> #include <linux/miscdevice.h> @@ -52,7 +50,7 @@ #include <asm/msr.h> #include <asm/reboot.h> -#include "mce-internal.h" +#include "internal.h" static DEFINE_MUTEX(mce_log_mutex); @@ -686,7 +684,7 @@ DEFINE_PER_CPU(unsigned, mce_poll_count); * errors here. However this would be quite problematic -- * we would need to reimplement the Monarch handling and * it would mess up the exclusion between exception handler - * and poll hander -- * so we skip this for now. + * and poll handler -- * so we skip this for now. * These cases should not happen anyways, or only when the CPU * is already totally * confused. In this case it's likely it will * not fully execute the machine check handler either. diff --git a/arch/x86/kernel/cpu/mcheck/dev-mcelog.c b/arch/x86/kernel/cpu/mce/dev-mcelog.c index 27f394ac983f..9690ec5c8051 100644 --- a/arch/x86/kernel/cpu/mcheck/dev-mcelog.c +++ b/arch/x86/kernel/cpu/mce/dev-mcelog.c @@ -8,14 +8,12 @@ * Author: Andi Kleen */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include <linux/miscdevice.h> #include <linux/slab.h> #include <linux/kmod.h> #include <linux/poll.h> -#include "mce-internal.h" +#include "internal.h" static BLOCKING_NOTIFIER_HEAD(mce_injector_chain); diff --git a/arch/x86/kernel/cpu/mcheck/mce-genpool.c b/arch/x86/kernel/cpu/mce/genpool.c index 217cd4449bc9..3395549c51d3 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-genpool.c +++ b/arch/x86/kernel/cpu/mce/genpool.c @@ -10,7 +10,7 @@ #include <linux/mm.h> #include <linux/genalloc.h> #include <linux/llist.h> -#include "mce-internal.h" +#include "internal.h" /* * printk() is not safe in MCE context. This is a lock-less memory allocator diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mce/inject.c index 1fc424c40a31..8492ef7d9015 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-inject.c +++ b/arch/x86/kernel/cpu/mce/inject.c @@ -38,7 +38,7 @@ #include <asm/nmi.h> #include <asm/smp.h> -#include "mce-internal.h" +#include "internal.h" /* * Collect all the MCi_XXX settings diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mce/intel.c index d05be307d081..e43eb6732630 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel.c +++ b/arch/x86/kernel/cpu/mce/intel.c @@ -18,7 +18,7 @@ #include <asm/msr.h> #include <asm/mce.h> -#include "mce-internal.h" +#include "internal.h" /* * Support for Intel Correct Machine Check Interrupts. This allows diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mce/internal.h index ceb67cd5918f..af5eab1e65e2 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-internal.h +++ b/arch/x86/kernel/cpu/mce/internal.h @@ -2,6 +2,9 @@ #ifndef __X86_MCE_INTERNAL_H__ #define __X86_MCE_INTERNAL_H__ +#undef pr_fmt +#define pr_fmt(fmt) "mce: " fmt + #include <linux/device.h> #include <asm/mce.h> diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mce/p5.c index 5cddf831720f..4ae6df556526 100644 --- a/arch/x86/kernel/cpu/mcheck/p5.c +++ b/arch/x86/kernel/cpu/mce/p5.c @@ -14,6 +14,8 @@ #include <asm/mce.h> #include <asm/msr.h> +#include "internal.h" + /* By default disabled */ int mce_p5_enabled __read_mostly; diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mce/severity.c index 44396d521987..dc3e26e905a3 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-severity.c +++ b/arch/x86/kernel/cpu/mce/severity.c @@ -16,7 +16,7 @@ #include <asm/mce.h> #include <linux/uaccess.h> -#include "mce-internal.h" +#include "internal.h" /* * Grade an mce by severity. In general the most severe ones are processed diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mce/therm_throt.c index 2da67b70ba98..10a3b0599300 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mce/therm_throt.c @@ -25,11 +25,14 @@ #include <linux/cpu.h> #include <asm/processor.h> +#include <asm/traps.h> #include <asm/apic.h> #include <asm/mce.h> #include <asm/msr.h> #include <asm/trace/irq_vectors.h> +#include "internal.h" + /* How long to wait between reporting thermal events */ #define CHECK_INTERVAL (300 * HZ) @@ -390,7 +393,7 @@ static void unexpected_thermal_interrupt(void) static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt; -asmlinkage __visible void __irq_entry smp_thermal_interrupt(struct pt_regs *r) +asmlinkage __visible void __irq_entry smp_thermal_interrupt(struct pt_regs *regs) { entering_irq(); trace_thermal_apic_entry(THERMAL_APIC_VECTOR); diff --git a/arch/x86/kernel/cpu/mcheck/threshold.c b/arch/x86/kernel/cpu/mce/threshold.c index 2b584b319eff..28812cc15300 100644 --- a/arch/x86/kernel/cpu/mcheck/threshold.c +++ b/arch/x86/kernel/cpu/mce/threshold.c @@ -6,10 +6,13 @@ #include <linux/kernel.h> #include <asm/irq_vectors.h> +#include <asm/traps.h> #include <asm/apic.h> #include <asm/mce.h> #include <asm/trace/irq_vectors.h> +#include "internal.h" + static void default_threshold_interrupt(void) { pr_err("Unexpected threshold interrupt at vector %x\n", @@ -18,7 +21,7 @@ static void default_threshold_interrupt(void) void (*mce_threshold_vector)(void) = default_threshold_interrupt; -asmlinkage __visible void __irq_entry smp_threshold_interrupt(void) +asmlinkage __visible void __irq_entry smp_threshold_interrupt(struct pt_regs *regs) { entering_irq(); trace_threshold_apic_entry(THRESHOLD_APIC_VECTOR); diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mce/winchip.c index 3b45b270a865..a30ea13cccc2 100644 --- a/arch/x86/kernel/cpu/mcheck/winchip.c +++ b/arch/x86/kernel/cpu/mce/winchip.c @@ -13,6 +13,8 @@ #include <asm/mce.h> #include <asm/msr.h> +#include "internal.h" + /* Machine check handler for WinChip C6: */ static void winchip_machine_check(struct pt_regs *regs, long error_code) { diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 07b5fc00b188..51adde0a0f1a 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -5,7 +5,7 @@ * CPUs and later. * * Copyright (C) 2008-2011 Advanced Micro Devices Inc. - * 2013-2016 Borislav Petkov <bp@alien8.de> + * 2013-2018 Borislav Petkov <bp@alien8.de> * * Author: Peter Oruba <peter.oruba@amd.com> * @@ -38,7 +38,10 @@ #include <asm/cpu.h> #include <asm/msr.h> -static struct equiv_cpu_entry *equiv_cpu_table; +static struct equiv_cpu_table { + unsigned int num_entries; + struct equiv_cpu_entry *entry; +} equiv_table; /* * This points to the current valid container of microcode patches which we will @@ -63,13 +66,225 @@ static u8 amd_ucode_patch[PATCH_MAX_SIZE]; static const char ucode_path[] __maybe_unused = "kernel/x86/microcode/AuthenticAMD.bin"; -static u16 find_equiv_id(struct equiv_cpu_entry *equiv_table, u32 sig) +static u16 find_equiv_id(struct equiv_cpu_table *et, u32 sig) { - for (; equiv_table && equiv_table->installed_cpu; equiv_table++) { - if (sig == equiv_table->installed_cpu) - return equiv_table->equiv_cpu; + unsigned int i; + + if (!et || !et->num_entries) + return 0; + + for (i = 0; i < et->num_entries; i++) { + struct equiv_cpu_entry *e = &et->entry[i]; + + if (sig == e->installed_cpu) + return e->equiv_cpu; + + e++; + } + return 0; +} + +/* + * Check whether there is a valid microcode container file at the beginning + * of @buf of size @buf_size. Set @early to use this function in the early path. + */ +static bool verify_container(const u8 *buf, size_t buf_size, bool early) +{ + u32 cont_magic; + + if (buf_size <= CONTAINER_HDR_SZ) { + if (!early) + pr_debug("Truncated microcode container header.\n"); + + return false; + } + + cont_magic = *(const u32 *)buf; + if (cont_magic != UCODE_MAGIC) { + if (!early) + pr_debug("Invalid magic value (0x%08x).\n", cont_magic); + + return false; + } + + return true; +} + +/* + * Check whether there is a valid, non-truncated CPU equivalence table at the + * beginning of @buf of size @buf_size. Set @early to use this function in the + * early path. + */ +static bool verify_equivalence_table(const u8 *buf, size_t buf_size, bool early) +{ + const u32 *hdr = (const u32 *)buf; + u32 cont_type, equiv_tbl_len; + + if (!verify_container(buf, buf_size, early)) + return false; + + cont_type = hdr[1]; + if (cont_type != UCODE_EQUIV_CPU_TABLE_TYPE) { + if (!early) + pr_debug("Wrong microcode container equivalence table type: %u.\n", + cont_type); + + return false; + } + + buf_size -= CONTAINER_HDR_SZ; + + equiv_tbl_len = hdr[2]; + if (equiv_tbl_len < sizeof(struct equiv_cpu_entry) || + buf_size < equiv_tbl_len) { + if (!early) + pr_debug("Truncated equivalence table.\n"); + + return false; + } + + return true; +} + +/* + * Check whether there is a valid, non-truncated microcode patch section at the + * beginning of @buf of size @buf_size. Set @early to use this function in the + * early path. + * + * On success, @sh_psize returns the patch size according to the section header, + * to the caller. + */ +static bool +__verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize, bool early) +{ + u32 p_type, p_size; + const u32 *hdr; + + if (buf_size < SECTION_HDR_SIZE) { + if (!early) + pr_debug("Truncated patch section.\n"); + + return false; + } + + hdr = (const u32 *)buf; + p_type = hdr[0]; + p_size = hdr[1]; + + if (p_type != UCODE_UCODE_TYPE) { + if (!early) + pr_debug("Invalid type field (0x%x) in container file section header.\n", + p_type); + + return false; + } + + if (p_size < sizeof(struct microcode_header_amd)) { + if (!early) + pr_debug("Patch of size %u too short.\n", p_size); + + return false; + } + + *sh_psize = p_size; + + return true; +} + +/* + * Check whether the passed remaining file @buf_size is large enough to contain + * a patch of the indicated @sh_psize (and also whether this size does not + * exceed the per-family maximum). @sh_psize is the size read from the section + * header. + */ +static unsigned int __verify_patch_size(u8 family, u32 sh_psize, size_t buf_size) +{ + u32 max_size; + + if (family >= 0x15) + return min_t(u32, sh_psize, buf_size); + +#define F1XH_MPB_MAX_SIZE 2048 +#define F14H_MPB_MAX_SIZE 1824 + + switch (family) { + case 0x10 ... 0x12: + max_size = F1XH_MPB_MAX_SIZE; + break; + case 0x14: + max_size = F14H_MPB_MAX_SIZE; + break; + default: + WARN(1, "%s: WTF family: 0x%x\n", __func__, family); + return 0; + break; + } + + if (sh_psize > min_t(u32, buf_size, max_size)) + return 0; + + return sh_psize; +} + +/* + * Verify the patch in @buf. + * + * Returns: + * negative: on error + * positive: patch is not for this family, skip it + * 0: success + */ +static int +verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size, bool early) +{ + struct microcode_header_amd *mc_hdr; + unsigned int ret; + u32 sh_psize; + u16 proc_id; + u8 patch_fam; + + if (!__verify_patch_section(buf, buf_size, &sh_psize, early)) + return -1; + + /* + * The section header length is not included in this indicated size + * but is present in the leftover file length so we need to subtract + * it before passing this value to the function below. + */ + buf_size -= SECTION_HDR_SIZE; + + /* + * Check if the remaining buffer is big enough to contain a patch of + * size sh_psize, as the section claims. + */ + if (buf_size < sh_psize) { + if (!early) + pr_debug("Patch of size %u truncated.\n", sh_psize); + + return -1; + } + + ret = __verify_patch_size(family, sh_psize, buf_size); + if (!ret) { + if (!early) + pr_debug("Per-family patch size mismatch.\n"); + return -1; + } + + *patch_size = sh_psize; + + mc_hdr = (struct microcode_header_amd *)(buf + SECTION_HDR_SIZE); + if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) { + if (!early) + pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n", mc_hdr->patch_id); + return -1; } + proc_id = mc_hdr->processor_rev_id; + patch_fam = 0xf + (proc_id >> 12); + if (patch_fam != family) + return 1; + return 0; } @@ -80,26 +295,28 @@ static u16 find_equiv_id(struct equiv_cpu_entry *equiv_table, u32 sig) * Returns the amount of bytes consumed while scanning. @desc contains all the * data we're going to use in later stages of the application. */ -static ssize_t parse_container(u8 *ucode, ssize_t size, struct cont_desc *desc) +static size_t parse_container(u8 *ucode, size_t size, struct cont_desc *desc) { - struct equiv_cpu_entry *eq; - ssize_t orig_size = size; + struct equiv_cpu_table table; + size_t orig_size = size; u32 *hdr = (u32 *)ucode; u16 eq_id; u8 *buf; - /* Am I looking at an equivalence table header? */ - if (hdr[0] != UCODE_MAGIC || - hdr[1] != UCODE_EQUIV_CPU_TABLE_TYPE || - hdr[2] == 0) - return CONTAINER_HDR_SZ; + if (!verify_equivalence_table(ucode, size, true)) + return 0; buf = ucode; - eq = (struct equiv_cpu_entry *)(buf + CONTAINER_HDR_SZ); + table.entry = (struct equiv_cpu_entry *)(buf + CONTAINER_HDR_SZ); + table.num_entries = hdr[2] / sizeof(struct equiv_cpu_entry); - /* Find the equivalence ID of our CPU in this table: */ - eq_id = find_equiv_id(eq, desc->cpuid_1_eax); + /* + * Find the equivalence ID of our CPU in this table. Even if this table + * doesn't contain a patch for the CPU, scan through the whole container + * so that it can be skipped in case there are other containers appended. + */ + eq_id = find_equiv_id(&table, desc->cpuid_1_eax); buf += hdr[2] + CONTAINER_HDR_SZ; size -= hdr[2] + CONTAINER_HDR_SZ; @@ -111,29 +328,29 @@ static ssize_t parse_container(u8 *ucode, ssize_t size, struct cont_desc *desc) while (size > 0) { struct microcode_amd *mc; u32 patch_size; + int ret; + + ret = verify_patch(x86_family(desc->cpuid_1_eax), buf, size, &patch_size, true); + if (ret < 0) { + /* + * Patch verification failed, skip to the next + * container, if there's one: + */ + goto out; + } else if (ret > 0) { + goto skip; + } - hdr = (u32 *)buf; - - if (hdr[0] != UCODE_UCODE_TYPE) - break; - - /* Sanity-check patch size. */ - patch_size = hdr[1]; - if (patch_size > PATCH_MAX_SIZE) - break; - - /* Skip patch section header: */ - buf += SECTION_HDR_SIZE; - size -= SECTION_HDR_SIZE; - - mc = (struct microcode_amd *)buf; + mc = (struct microcode_amd *)(buf + SECTION_HDR_SIZE); if (eq_id == mc->hdr.processor_rev_id) { desc->psize = patch_size; desc->mc = mc; } - buf += patch_size; - size -= patch_size; +skip: + /* Skip patch section header too: */ + buf += patch_size + SECTION_HDR_SIZE; + size -= patch_size + SECTION_HDR_SIZE; } /* @@ -150,6 +367,7 @@ static ssize_t parse_container(u8 *ucode, ssize_t size, struct cont_desc *desc) return 0; } +out: return orig_size - size; } @@ -159,15 +377,18 @@ static ssize_t parse_container(u8 *ucode, ssize_t size, struct cont_desc *desc) */ static void scan_containers(u8 *ucode, size_t size, struct cont_desc *desc) { - ssize_t rem = size; - - while (rem >= 0) { - ssize_t s = parse_container(ucode, rem, desc); + while (size) { + size_t s = parse_container(ucode, size, desc); if (!s) return; - ucode += s; - rem -= s; + /* catch wraparound */ + if (size >= s) { + ucode += s; + size -= s; + } else { + return; + } } } @@ -364,21 +585,7 @@ void reload_ucode_amd(void) static u16 __find_equiv_id(unsigned int cpu) { struct ucode_cpu_info *uci = ucode_cpu_info + cpu; - return find_equiv_id(equiv_cpu_table, uci->cpu_sig.sig); -} - -static u32 find_cpu_family_by_equiv_cpu(u16 equiv_cpu) -{ - int i = 0; - - BUG_ON(!equiv_cpu_table); - - while (equiv_cpu_table[i].equiv_cpu != 0) { - if (equiv_cpu == equiv_cpu_table[i].equiv_cpu) - return equiv_cpu_table[i].installed_cpu; - i++; - } - return 0; + return find_equiv_id(&equiv_table, uci->cpu_sig.sig); } /* @@ -461,43 +668,6 @@ static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig) return 0; } -static unsigned int verify_patch_size(u8 family, u32 patch_size, - unsigned int size) -{ - u32 max_size; - -#define F1XH_MPB_MAX_SIZE 2048 -#define F14H_MPB_MAX_SIZE 1824 -#define F15H_MPB_MAX_SIZE 4096 -#define F16H_MPB_MAX_SIZE 3458 -#define F17H_MPB_MAX_SIZE 3200 - - switch (family) { - case 0x14: - max_size = F14H_MPB_MAX_SIZE; - break; - case 0x15: - max_size = F15H_MPB_MAX_SIZE; - break; - case 0x16: - max_size = F16H_MPB_MAX_SIZE; - break; - case 0x17: - max_size = F17H_MPB_MAX_SIZE; - break; - default: - max_size = F1XH_MPB_MAX_SIZE; - break; - } - - if (patch_size > min_t(u32, size, max_size)) { - pr_err("patch size mismatch\n"); - return 0; - } - - return patch_size; -} - static enum ucode_state apply_microcode_amd(int cpu) { struct cpuinfo_x86 *c = &cpu_data(cpu); @@ -548,34 +718,34 @@ out: return ret; } -static int install_equiv_cpu_table(const u8 *buf) +static size_t install_equiv_cpu_table(const u8 *buf, size_t buf_size) { - unsigned int *ibuf = (unsigned int *)buf; - unsigned int type = ibuf[1]; - unsigned int size = ibuf[2]; + u32 equiv_tbl_len; + const u32 *hdr; - if (type != UCODE_EQUIV_CPU_TABLE_TYPE || !size) { - pr_err("empty section/" - "invalid type field in container file section header\n"); - return -EINVAL; - } + if (!verify_equivalence_table(buf, buf_size, false)) + return 0; + + hdr = (const u32 *)buf; + equiv_tbl_len = hdr[2]; - equiv_cpu_table = vmalloc(size); - if (!equiv_cpu_table) { + equiv_table.entry = vmalloc(equiv_tbl_len); + if (!equiv_table.entry) { pr_err("failed to allocate equivalent CPU table\n"); - return -ENOMEM; + return 0; } - memcpy(equiv_cpu_table, buf + CONTAINER_HDR_SZ, size); + memcpy(equiv_table.entry, buf + CONTAINER_HDR_SZ, equiv_tbl_len); + equiv_table.num_entries = equiv_tbl_len / sizeof(struct equiv_cpu_entry); /* add header length */ - return size + CONTAINER_HDR_SZ; + return equiv_tbl_len + CONTAINER_HDR_SZ; } static void free_equiv_cpu_table(void) { - vfree(equiv_cpu_table); - equiv_cpu_table = NULL; + vfree(equiv_table.entry); + memset(&equiv_table, 0, sizeof(equiv_table)); } static void cleanup(void) @@ -585,47 +755,23 @@ static void cleanup(void) } /* - * We return the current size even if some of the checks failed so that + * Return a non-negative value even if some of the checks failed so that * we can skip over the next patch. If we return a negative value, we * signal a grave error like a memory allocation has failed and the * driver cannot continue functioning normally. In such cases, we tear * down everything we've used up so far and exit. */ -static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover) +static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover, + unsigned int *patch_size) { struct microcode_header_amd *mc_hdr; struct ucode_patch *patch; - unsigned int patch_size, crnt_size, ret; - u32 proc_fam; u16 proc_id; + int ret; - patch_size = *(u32 *)(fw + 4); - crnt_size = patch_size + SECTION_HDR_SIZE; - mc_hdr = (struct microcode_header_amd *)(fw + SECTION_HDR_SIZE); - proc_id = mc_hdr->processor_rev_id; - - proc_fam = find_cpu_family_by_equiv_cpu(proc_id); - if (!proc_fam) { - pr_err("No patch family for equiv ID: 0x%04x\n", proc_id); - return crnt_size; - } - - /* check if patch is for the current family */ - proc_fam = ((proc_fam >> 8) & 0xf) + ((proc_fam >> 20) & 0xff); - if (proc_fam != family) - return crnt_size; - - if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) { - pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n", - mc_hdr->patch_id); - return crnt_size; - } - - ret = verify_patch_size(family, patch_size, leftover); - if (!ret) { - pr_err("Patch-ID 0x%08x: size mismatch.\n", mc_hdr->patch_id); - return crnt_size; - } + ret = verify_patch(family, fw, leftover, patch_size, false); + if (ret) + return ret; patch = kzalloc(sizeof(*patch), GFP_KERNEL); if (!patch) { @@ -633,13 +779,16 @@ static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover) return -EINVAL; } - patch->data = kmemdup(fw + SECTION_HDR_SIZE, patch_size, GFP_KERNEL); + patch->data = kmemdup(fw + SECTION_HDR_SIZE, *patch_size, GFP_KERNEL); if (!patch->data) { pr_err("Patch data allocation failure.\n"); kfree(patch); return -EINVAL; } + mc_hdr = (struct microcode_header_amd *)(fw + SECTION_HDR_SIZE); + proc_id = mc_hdr->processor_rev_id; + INIT_LIST_HEAD(&patch->plist); patch->patch_id = mc_hdr->patch_id; patch->equiv_cpu = proc_id; @@ -650,39 +799,38 @@ static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover) /* ... and add to cache. */ update_cache(patch); - return crnt_size; + return 0; } static enum ucode_state __load_microcode_amd(u8 family, const u8 *data, size_t size) { - enum ucode_state ret = UCODE_ERROR; - unsigned int leftover; u8 *fw = (u8 *)data; - int crnt_size = 0; - int offset; + size_t offset; - offset = install_equiv_cpu_table(data); - if (offset < 0) { - pr_err("failed to create equivalent cpu table\n"); - return ret; - } - fw += offset; - leftover = size - offset; + offset = install_equiv_cpu_table(data, size); + if (!offset) + return UCODE_ERROR; + + fw += offset; + size -= offset; if (*(u32 *)fw != UCODE_UCODE_TYPE) { pr_err("invalid type field in container file section header\n"); free_equiv_cpu_table(); - return ret; + return UCODE_ERROR; } - while (leftover) { - crnt_size = verify_and_add_patch(family, fw, leftover); - if (crnt_size < 0) - return ret; + while (size > 0) { + unsigned int crnt_size = 0; + int ret; - fw += crnt_size; - leftover -= crnt_size; + ret = verify_and_add_patch(family, fw, size, &crnt_size); + if (ret < 0) + return UCODE_ERROR; + + fw += crnt_size + SECTION_HDR_SIZE; + size -= (crnt_size + SECTION_HDR_SIZE); } return UCODE_OK; @@ -761,10 +909,8 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device, } ret = UCODE_ERROR; - if (*(u32 *)fw->data != UCODE_MAGIC) { - pr_err("invalid magic value (0x%08x)\n", *(u32 *)fw->data); + if (!verify_container(fw->data, fw->size, false)) goto fw_release; - } ret = load_microcode_amd(bsp, c->x86, fw->data, fw->size); diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c index 2e173d47b450..4d36dcc1cf87 100644 --- a/arch/x86/kernel/cpu/mtrr/if.c +++ b/arch/x86/kernel/cpu/mtrr/if.c @@ -165,6 +165,8 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg) struct mtrr_gentry gentry; void __user *arg = (void __user *) __arg; + memset(&gentry, 0, sizeof(gentry)); + switch (cmd) { case MTRRIOC_ADD_ENTRY: case MTRRIOC_SET_ENTRY: diff --git a/arch/x86/kernel/cpu/resctrl/Makefile b/arch/x86/kernel/cpu/resctrl/Makefile new file mode 100644 index 000000000000..6895049ceef7 --- /dev/null +++ b/arch/x86/kernel/cpu/resctrl/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_RESCTRL) += core.o rdtgroup.o monitor.o +obj-$(CONFIG_RESCTRL) += ctrlmondata.o pseudo_lock.o +CFLAGS_pseudo_lock.o = -I$(src) diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/resctrl/core.c index 44272b7107ad..c3a9dc63edf2 100644 --- a/arch/x86/kernel/cpu/intel_rdt.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -22,7 +22,7 @@ * Software Developer Manual June 2016, volume 3, section 17.17. */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#define pr_fmt(fmt) "resctrl: " fmt #include <linux/slab.h> #include <linux/err.h> @@ -30,22 +30,19 @@ #include <linux/cpuhotplug.h> #include <asm/intel-family.h> -#include <asm/intel_rdt_sched.h> -#include "intel_rdt.h" - -#define MBA_IS_LINEAR 0x4 -#define MBA_MAX_MBPS U32_MAX +#include <asm/resctrl_sched.h> +#include "internal.h" /* Mutex to protect rdtgroup access. */ DEFINE_MUTEX(rdtgroup_mutex); /* - * The cached intel_pqr_state is strictly per CPU and can never be + * The cached resctrl_pqr_state is strictly per CPU and can never be * updated from a remote CPU. Functions which modify the state * are called with interrupts disabled and no preemption, which * is sufficient for the protection. */ -DEFINE_PER_CPU(struct intel_pqr_state, pqr_state); +DEFINE_PER_CPU(struct resctrl_pqr_state, pqr_state); /* * Used to store the max resource name width and max resource data width @@ -60,9 +57,13 @@ int max_name_width, max_data_width; bool rdt_alloc_capable; static void -mba_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r); +mba_wrmsr_intel(struct rdt_domain *d, struct msr_param *m, + struct rdt_resource *r); static void cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r); +static void +mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m, + struct rdt_resource *r); #define domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].domains) @@ -72,7 +73,7 @@ struct rdt_resource rdt_resources_all[] = { .rid = RDT_RESOURCE_L3, .name = "L3", .domains = domain_init(RDT_RESOURCE_L3), - .msr_base = IA32_L3_CBM_BASE, + .msr_base = MSR_IA32_L3_CBM_BASE, .msr_update = cat_wrmsr, .cache_level = 3, .cache = { @@ -89,7 +90,7 @@ struct rdt_resource rdt_resources_all[] = { .rid = RDT_RESOURCE_L3DATA, .name = "L3DATA", .domains = domain_init(RDT_RESOURCE_L3DATA), - .msr_base = IA32_L3_CBM_BASE, + .msr_base = MSR_IA32_L3_CBM_BASE, .msr_update = cat_wrmsr, .cache_level = 3, .cache = { @@ -106,7 +107,7 @@ struct rdt_resource rdt_resources_all[] = { .rid = RDT_RESOURCE_L3CODE, .name = "L3CODE", .domains = domain_init(RDT_RESOURCE_L3CODE), - .msr_base = IA32_L3_CBM_BASE, + .msr_base = MSR_IA32_L3_CBM_BASE, .msr_update = cat_wrmsr, .cache_level = 3, .cache = { @@ -123,7 +124,7 @@ struct rdt_resource rdt_resources_all[] = { .rid = RDT_RESOURCE_L2, .name = "L2", .domains = domain_init(RDT_RESOURCE_L2), - .msr_base = IA32_L2_CBM_BASE, + .msr_base = MSR_IA32_L2_CBM_BASE, .msr_update = cat_wrmsr, .cache_level = 2, .cache = { @@ -140,7 +141,7 @@ struct rdt_resource rdt_resources_all[] = { .rid = RDT_RESOURCE_L2DATA, .name = "L2DATA", .domains = domain_init(RDT_RESOURCE_L2DATA), - .msr_base = IA32_L2_CBM_BASE, + .msr_base = MSR_IA32_L2_CBM_BASE, .msr_update = cat_wrmsr, .cache_level = 2, .cache = { @@ -157,7 +158,7 @@ struct rdt_resource rdt_resources_all[] = { .rid = RDT_RESOURCE_L2CODE, .name = "L2CODE", .domains = domain_init(RDT_RESOURCE_L2CODE), - .msr_base = IA32_L2_CBM_BASE, + .msr_base = MSR_IA32_L2_CBM_BASE, .msr_update = cat_wrmsr, .cache_level = 2, .cache = { @@ -174,10 +175,7 @@ struct rdt_resource rdt_resources_all[] = { .rid = RDT_RESOURCE_MBA, .name = "MB", .domains = domain_init(RDT_RESOURCE_MBA), - .msr_base = IA32_MBA_THRTL_BASE, - .msr_update = mba_wrmsr, .cache_level = 3, - .parse_ctrlval = parse_bw, .format_str = "%d=%*u", .fflags = RFTYPE_RES_MB, }, @@ -211,9 +209,10 @@ static inline void cache_alloc_hsw_probe(void) struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3]; u32 l, h, max_cbm = BIT_MASK(20) - 1; - if (wrmsr_safe(IA32_L3_CBM_BASE, max_cbm, 0)) + if (wrmsr_safe(MSR_IA32_L3_CBM_BASE, max_cbm, 0)) return; - rdmsr(IA32_L3_CBM_BASE, l, h); + + rdmsr(MSR_IA32_L3_CBM_BASE, l, h); /* If all the bits were set in MSR, return success */ if (l != max_cbm) @@ -259,7 +258,7 @@ static inline bool rdt_get_mb_table(struct rdt_resource *r) return false; } -static bool rdt_get_mem_config(struct rdt_resource *r) +static bool __get_mem_config_intel(struct rdt_resource *r) { union cpuid_0x10_3_eax eax; union cpuid_0x10_x_edx edx; @@ -285,6 +284,30 @@ static bool rdt_get_mem_config(struct rdt_resource *r) return true; } +static bool __rdt_get_mem_config_amd(struct rdt_resource *r) +{ + union cpuid_0x10_3_eax eax; + union cpuid_0x10_x_edx edx; + u32 ebx, ecx; + + cpuid_count(0x80000020, 1, &eax.full, &ebx, &ecx, &edx.full); + r->num_closid = edx.split.cos_max + 1; + r->default_ctrl = MAX_MBA_BW_AMD; + + /* AMD does not use delay */ + r->membw.delay_linear = false; + + r->membw.min_bw = 0; + r->membw.bw_gran = 1; + /* Max value is 2048, Data width should be 4 in decimal */ + r->data_width = 4; + + r->alloc_capable = true; + r->alloc_enabled = true; + + return true; +} + static void rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r) { union cpuid_0x10_1_eax eax; @@ -344,6 +367,15 @@ static int get_cache_id(int cpu, int level) return -1; } +static void +mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r) +{ + unsigned int i; + + for (i = m->low; i < m->high; i++) + wrmsrl(r->msr_base + i, d->ctrl_val[i]); +} + /* * Map the memory b/w percentage value to delay values * that can be written to QOS_MSRs. @@ -359,7 +391,8 @@ u32 delay_bw_map(unsigned long bw, struct rdt_resource *r) } static void -mba_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r) +mba_wrmsr_intel(struct rdt_domain *d, struct msr_param *m, + struct rdt_resource *r) { unsigned int i; @@ -421,7 +454,7 @@ struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id, struct list_head *l; if (id < 0) - return ERR_PTR(id); + return ERR_PTR(-ENODEV); list_for_each(l, &r->domains) { d = list_entry(l, struct rdt_domain, list); @@ -639,7 +672,7 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r) static void clear_closid_rmid(int cpu) { - struct intel_pqr_state *state = this_cpu_ptr(&pqr_state); + struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state); state->default_closid = 0; state->default_rmid = 0; @@ -648,7 +681,7 @@ static void clear_closid_rmid(int cpu) wrmsr(IA32_PQR_ASSOC, 0, 0); } -static int intel_rdt_online_cpu(unsigned int cpu) +static int resctrl_online_cpu(unsigned int cpu) { struct rdt_resource *r; @@ -674,7 +707,7 @@ static void clear_childcpus(struct rdtgroup *r, unsigned int cpu) } } -static int intel_rdt_offline_cpu(unsigned int cpu) +static int resctrl_offline_cpu(unsigned int cpu) { struct rdtgroup *rdtgrp; struct rdt_resource *r; @@ -794,6 +827,19 @@ static bool __init rdt_cpu_has(int flag) return ret; } +static __init bool get_mem_config(void) +{ + if (!rdt_cpu_has(X86_FEATURE_MBA)) + return false; + + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) + return __get_mem_config_intel(&rdt_resources_all[RDT_RESOURCE_MBA]); + else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) + return __rdt_get_mem_config_amd(&rdt_resources_all[RDT_RESOURCE_MBA]); + + return false; +} + static __init bool get_rdt_alloc_resources(void) { bool ret = false; @@ -818,10 +864,9 @@ static __init bool get_rdt_alloc_resources(void) ret = true; } - if (rdt_cpu_has(X86_FEATURE_MBA)) { - if (rdt_get_mem_config(&rdt_resources_all[RDT_RESOURCE_MBA])) - ret = true; - } + if (get_mem_config()) + ret = true; + return ret; } @@ -840,7 +885,7 @@ static __init bool get_rdt_mon_resources(void) return !rdt_get_mon_l3_config(&rdt_resources_all[RDT_RESOURCE_L3]); } -static __init void rdt_quirks(void) +static __init void __check_quirks_intel(void) { switch (boot_cpu_data.x86_model) { case INTEL_FAM6_HASWELL_X: @@ -855,30 +900,91 @@ static __init void rdt_quirks(void) } } +static __init void check_quirks(void) +{ + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) + __check_quirks_intel(); +} + static __init bool get_rdt_resources(void) { - rdt_quirks(); rdt_alloc_capable = get_rdt_alloc_resources(); rdt_mon_capable = get_rdt_mon_resources(); return (rdt_mon_capable || rdt_alloc_capable); } +static __init void rdt_init_res_defs_intel(void) +{ + struct rdt_resource *r; + + for_each_rdt_resource(r) { + if (r->rid == RDT_RESOURCE_L3 || + r->rid == RDT_RESOURCE_L3DATA || + r->rid == RDT_RESOURCE_L3CODE || + r->rid == RDT_RESOURCE_L2 || + r->rid == RDT_RESOURCE_L2DATA || + r->rid == RDT_RESOURCE_L2CODE) + r->cbm_validate = cbm_validate_intel; + else if (r->rid == RDT_RESOURCE_MBA) { + r->msr_base = MSR_IA32_MBA_THRTL_BASE; + r->msr_update = mba_wrmsr_intel; + r->parse_ctrlval = parse_bw_intel; + } + } +} + +static __init void rdt_init_res_defs_amd(void) +{ + struct rdt_resource *r; + + for_each_rdt_resource(r) { + if (r->rid == RDT_RESOURCE_L3 || + r->rid == RDT_RESOURCE_L3DATA || + r->rid == RDT_RESOURCE_L3CODE || + r->rid == RDT_RESOURCE_L2 || + r->rid == RDT_RESOURCE_L2DATA || + r->rid == RDT_RESOURCE_L2CODE) + r->cbm_validate = cbm_validate_amd; + else if (r->rid == RDT_RESOURCE_MBA) { + r->msr_base = MSR_IA32_MBA_BW_BASE; + r->msr_update = mba_wrmsr_amd; + r->parse_ctrlval = parse_bw_amd; + } + } +} + +static __init void rdt_init_res_defs(void) +{ + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) + rdt_init_res_defs_intel(); + else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) + rdt_init_res_defs_amd(); +} + static enum cpuhp_state rdt_online; -static int __init intel_rdt_late_init(void) +static int __init resctrl_late_init(void) { struct rdt_resource *r; int state, ret; + /* + * Initialize functions(or definitions) that are different + * between vendors here. + */ + rdt_init_res_defs(); + + check_quirks(); + if (!get_rdt_resources()) return -ENODEV; rdt_init_padding(); state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, - "x86/rdt/cat:online:", - intel_rdt_online_cpu, intel_rdt_offline_cpu); + "x86/resctrl/cat:online:", + resctrl_online_cpu, resctrl_offline_cpu); if (state < 0) return state; @@ -890,20 +996,20 @@ static int __init intel_rdt_late_init(void) rdt_online = state; for_each_alloc_capable_rdt_resource(r) - pr_info("Intel RDT %s allocation detected\n", r->name); + pr_info("%s allocation detected\n", r->name); for_each_mon_capable_rdt_resource(r) - pr_info("Intel RDT %s monitoring detected\n", r->name); + pr_info("%s monitoring detected\n", r->name); return 0; } -late_initcall(intel_rdt_late_init); +late_initcall(resctrl_late_init); -static void __exit intel_rdt_exit(void) +static void __exit resctrl_exit(void) { cpuhp_remove_state(rdt_online); rdtgroup_exit(); } -__exitcall(intel_rdt_exit); +__exitcall(resctrl_exit); diff --git a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c index 27937458c231..2dbd990a2eb7 100644 --- a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c +++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c @@ -23,10 +23,58 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/cpu.h> #include <linux/kernfs.h> #include <linux/seq_file.h> #include <linux/slab.h> -#include "intel_rdt.h" +#include "internal.h" + +/* + * Check whether MBA bandwidth percentage value is correct. The value is + * checked against the minimum and maximum bandwidth values specified by + * the hardware. The allocated bandwidth percentage is rounded to the next + * control step available on the hardware. + */ +static bool bw_validate_amd(char *buf, unsigned long *data, + struct rdt_resource *r) +{ + unsigned long bw; + int ret; + + ret = kstrtoul(buf, 10, &bw); + if (ret) { + rdt_last_cmd_printf("Non-decimal digit in MB value %s\n", buf); + return false; + } + + if (bw < r->membw.min_bw || bw > r->default_ctrl) { + rdt_last_cmd_printf("MB value %ld out of range [%d,%d]\n", bw, + r->membw.min_bw, r->default_ctrl); + return false; + } + + *data = roundup(bw, (unsigned long)r->membw.bw_gran); + return true; +} + +int parse_bw_amd(struct rdt_parse_data *data, struct rdt_resource *r, + struct rdt_domain *d) +{ + unsigned long bw_val; + + if (d->have_new_ctrl) { + rdt_last_cmd_printf("Duplicate domain %d\n", d->id); + return -EINVAL; + } + + if (!bw_validate_amd(data->buf, &bw_val, r)) + return -EINVAL; + + d->new_ctrl = bw_val; + d->have_new_ctrl = true; + + return 0; +} /* * Check whether MBA bandwidth percentage value is correct. The value is @@ -64,13 +112,13 @@ static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r) return true; } -int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r, - struct rdt_domain *d) +int parse_bw_intel(struct rdt_parse_data *data, struct rdt_resource *r, + struct rdt_domain *d) { unsigned long bw_val; if (d->have_new_ctrl) { - rdt_last_cmd_printf("duplicate domain %d\n", d->id); + rdt_last_cmd_printf("Duplicate domain %d\n", d->id); return -EINVAL; } @@ -88,7 +136,7 @@ int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r, * are allowed (e.g. FFFFH, 0FF0H, 003CH, etc.). * Additionally Haswell requires at least two bits set. */ -static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r) +bool cbm_validate_intel(char *buf, u32 *data, struct rdt_resource *r) { unsigned long first_bit, zero_bit, val; unsigned int cbm_len = r->cache.cbm_len; @@ -96,12 +144,12 @@ static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r) ret = kstrtoul(buf, 16, &val); if (ret) { - rdt_last_cmd_printf("non-hex character in mask %s\n", buf); + rdt_last_cmd_printf("Non-hex character in the mask %s\n", buf); return false; } if (val == 0 || val > r->default_ctrl) { - rdt_last_cmd_puts("mask out of range\n"); + rdt_last_cmd_puts("Mask out of range\n"); return false; } @@ -109,12 +157,12 @@ static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r) zero_bit = find_next_zero_bit(&val, cbm_len, first_bit); if (find_next_bit(&val, cbm_len, zero_bit) < cbm_len) { - rdt_last_cmd_printf("mask %lx has non-consecutive 1-bits\n", val); + rdt_last_cmd_printf("The mask %lx has non-consecutive 1-bits\n", val); return false; } if ((zero_bit - first_bit) < r->cache.min_cbm_bits) { - rdt_last_cmd_printf("Need at least %d bits in mask\n", + rdt_last_cmd_printf("Need at least %d bits in the mask\n", r->cache.min_cbm_bits); return false; } @@ -124,6 +172,30 @@ static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r) } /* + * Check whether a cache bit mask is valid. AMD allows non-contiguous + * bitmasks + */ +bool cbm_validate_amd(char *buf, u32 *data, struct rdt_resource *r) +{ + unsigned long val; + int ret; + + ret = kstrtoul(buf, 16, &val); + if (ret) { + rdt_last_cmd_printf("Non-hex character in the mask %s\n", buf); + return false; + } + + if (val > r->default_ctrl) { + rdt_last_cmd_puts("Mask out of range\n"); + return false; + } + + *data = val; + return true; +} + +/* * Read one cache bit mask (hex). Check that it is valid for the current * resource type. */ @@ -134,7 +206,7 @@ int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r, u32 cbm_val; if (d->have_new_ctrl) { - rdt_last_cmd_printf("duplicate domain %d\n", d->id); + rdt_last_cmd_printf("Duplicate domain %d\n", d->id); return -EINVAL; } @@ -144,17 +216,17 @@ int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r, */ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP && rdtgroup_pseudo_locked_in_hierarchy(d)) { - rdt_last_cmd_printf("pseudo-locked region in hierarchy\n"); + rdt_last_cmd_puts("Pseudo-locked region in hierarchy\n"); return -EINVAL; } - if (!cbm_validate(data->buf, &cbm_val, r)) + if (!r->cbm_validate(data->buf, &cbm_val, r)) return -EINVAL; if ((rdtgrp->mode == RDT_MODE_EXCLUSIVE || rdtgrp->mode == RDT_MODE_SHAREABLE) && rdtgroup_cbm_overlaps_pseudo_locked(d, cbm_val)) { - rdt_last_cmd_printf("CBM overlaps with pseudo-locked region\n"); + rdt_last_cmd_puts("CBM overlaps with pseudo-locked region\n"); return -EINVAL; } @@ -163,14 +235,14 @@ int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r, * either is exclusive. */ if (rdtgroup_cbm_overlaps(r, d, cbm_val, rdtgrp->closid, true)) { - rdt_last_cmd_printf("overlaps with exclusive group\n"); + rdt_last_cmd_puts("Overlaps with exclusive group\n"); return -EINVAL; } if (rdtgroup_cbm_overlaps(r, d, cbm_val, rdtgrp->closid, false)) { if (rdtgrp->mode == RDT_MODE_EXCLUSIVE || rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { - rdt_last_cmd_printf("overlaps with other group\n"); + rdt_last_cmd_puts("Overlaps with other group\n"); return -EINVAL; } } @@ -292,7 +364,7 @@ static int rdtgroup_parse_resource(char *resname, char *tok, if (!strcmp(resname, r->name) && rdtgrp->closid < r->num_closid) return parse_line(tok, r, rdtgrp); } - rdt_last_cmd_printf("unknown/unsupported resource name '%s'\n", resname); + rdt_last_cmd_printf("Unknown or unsupported resource name '%s'\n", resname); return -EINVAL; } @@ -310,9 +382,11 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, return -EINVAL; buf[nbytes - 1] = '\0'; + cpus_read_lock(); rdtgrp = rdtgroup_kn_lock_live(of->kn); if (!rdtgrp) { rdtgroup_kn_unlock(of->kn); + cpus_read_unlock(); return -ENOENT; } rdt_last_cmd_clear(); @@ -323,7 +397,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, */ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { ret = -EINVAL; - rdt_last_cmd_puts("resource group is pseudo-locked\n"); + rdt_last_cmd_puts("Resource group is pseudo-locked\n"); goto out; } @@ -367,6 +441,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, out: rdtgroup_kn_unlock(of->kn); + cpus_read_unlock(); return ret ?: nbytes; } @@ -463,7 +538,7 @@ int rdtgroup_mondata_show(struct seq_file *m, void *arg) r = &rdt_resources_all[resid]; d = rdt_find_domain(r, domid, NULL); - if (!d) { + if (IS_ERR_OR_NULL(d)) { ret = -ENOENT; goto out; } diff --git a/arch/x86/kernel/cpu/intel_rdt.h b/arch/x86/kernel/cpu/resctrl/internal.h index 3736f6dc9545..822b7db634ee 100644 --- a/arch/x86/kernel/cpu/intel_rdt.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -1,20 +1,24 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_X86_INTEL_RDT_H -#define _ASM_X86_INTEL_RDT_H +#ifndef _ASM_X86_RESCTRL_INTERNAL_H +#define _ASM_X86_RESCTRL_INTERNAL_H #include <linux/sched.h> #include <linux/kernfs.h> #include <linux/jump_label.h> -#define IA32_L3_QOS_CFG 0xc81 -#define IA32_L2_QOS_CFG 0xc82 -#define IA32_L3_CBM_BASE 0xc90 -#define IA32_L2_CBM_BASE 0xd10 -#define IA32_MBA_THRTL_BASE 0xd50 +#define MSR_IA32_L3_QOS_CFG 0xc81 +#define MSR_IA32_L2_QOS_CFG 0xc82 +#define MSR_IA32_L3_CBM_BASE 0xc90 +#define MSR_IA32_L2_CBM_BASE 0xd10 +#define MSR_IA32_MBA_THRTL_BASE 0xd50 +#define MSR_IA32_MBA_BW_BASE 0xc0000200 -#define L3_QOS_CDP_ENABLE 0x01ULL +#define MSR_IA32_QM_CTR 0x0c8e +#define MSR_IA32_QM_EVTSEL 0x0c8d -#define L2_QOS_CDP_ENABLE 0x01ULL +#define L3_QOS_CDP_ENABLE 0x01ULL + +#define L2_QOS_CDP_ENABLE 0x01ULL /* * Event IDs are used to program IA32_QM_EVTSEL before reading event @@ -29,6 +33,9 @@ #define MBM_CNTR_WIDTH 24 #define MBM_OVERFLOW_INTERVAL 1000 #define MAX_MBA_BW 100u +#define MBA_IS_LINEAR 0x4 +#define MBA_MAX_MBPS U32_MAX +#define MAX_MBA_BW_AMD 0x800 #define RMID_VAL_ERROR BIT_ULL(63) #define RMID_VAL_UNAVAIL BIT_ULL(62) @@ -69,7 +76,7 @@ struct rmid_read { u64 val; }; -extern unsigned int intel_cqm_threshold; +extern unsigned int resctrl_cqm_threshold; extern bool rdt_alloc_capable; extern bool rdt_mon_capable; extern unsigned int rdt_mon_features; @@ -391,9 +398,9 @@ struct rdt_parse_data { * struct rdt_resource - attributes of an RDT resource * @rid: The index of the resource * @alloc_enabled: Is allocation enabled on this machine - * @mon_enabled: Is monitoring enabled for this feature + * @mon_enabled: Is monitoring enabled for this feature * @alloc_capable: Is allocation available on this machine - * @mon_capable: Is monitor feature available on this machine + * @mon_capable: Is monitor feature available on this machine * @name: Name to use in "schemata" file * @num_closid: Number of CLOSIDs available * @cache_level: Which cache level defines scope of this resource @@ -405,10 +412,11 @@ struct rdt_parse_data { * @cache: Cache allocation related data * @format_str: Per resource format string to show domain value * @parse_ctrlval: Per resource function pointer to parse control values - * @evt_list: List of monitoring events - * @num_rmid: Number of RMIDs available - * @mon_scale: cqm counter * mon_scale = occupancy in bytes - * @fflags: flags to choose base and info files + * @cbm_validate Cache bitmask validate function + * @evt_list: List of monitoring events + * @num_rmid: Number of RMIDs available + * @mon_scale: cqm counter * mon_scale = occupancy in bytes + * @fflags: flags to choose base and info files */ struct rdt_resource { int rid; @@ -431,6 +439,7 @@ struct rdt_resource { int (*parse_ctrlval)(struct rdt_parse_data *data, struct rdt_resource *r, struct rdt_domain *d); + bool (*cbm_validate)(char *buf, u32 *data, struct rdt_resource *r); struct list_head evt_list; int num_rmid; unsigned int mon_scale; @@ -439,8 +448,10 @@ struct rdt_resource { int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r, struct rdt_domain *d); -int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r, - struct rdt_domain *d); +int parse_bw_intel(struct rdt_parse_data *data, struct rdt_resource *r, + struct rdt_domain *d); +int parse_bw_amd(struct rdt_parse_data *data, struct rdt_resource *r, + struct rdt_domain *d); extern struct mutex rdtgroup_mutex; @@ -463,6 +474,10 @@ enum { RDT_NUM_RESOURCES, }; +#define for_each_rdt_resource(r) \ + for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\ + r++) + #define for_each_capable_rdt_resource(r) \ for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\ r++) \ @@ -567,5 +582,7 @@ void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms); void cqm_handle_limbo(struct work_struct *work); bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d); void __check_limbo(struct rdt_domain *d, bool force_free); +bool cbm_validate_intel(char *buf, u32 *data, struct rdt_resource *r); +bool cbm_validate_amd(char *buf, u32 *data, struct rdt_resource *r); -#endif /* _ASM_X86_INTEL_RDT_H */ +#endif /* _ASM_X86_RESCTRL_INTERNAL_H */ diff --git a/arch/x86/kernel/cpu/intel_rdt_monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index b0f3aed76b75..f33f11f69078 100644 --- a/arch/x86/kernel/cpu/intel_rdt_monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -26,10 +26,7 @@ #include <linux/module.h> #include <linux/slab.h> #include <asm/cpu_device_id.h> -#include "intel_rdt.h" - -#define MSR_IA32_QM_CTR 0x0c8e -#define MSR_IA32_QM_EVTSEL 0x0c8d +#include "internal.h" struct rmid_entry { u32 rmid; @@ -73,7 +70,7 @@ unsigned int rdt_mon_features; * This is the threshold cache occupancy at which we will consider an * RMID available for re-allocation. */ -unsigned int intel_cqm_threshold; +unsigned int resctrl_cqm_threshold; static inline struct rmid_entry *__rmid_entry(u32 rmid) { @@ -107,7 +104,7 @@ static bool rmid_dirty(struct rmid_entry *entry) { u64 val = __rmid_read(entry->rmid, QOS_L3_OCCUP_EVENT_ID); - return val >= intel_cqm_threshold; + return val >= resctrl_cqm_threshold; } /* @@ -187,7 +184,7 @@ static void add_rmid_to_limbo(struct rmid_entry *entry) list_for_each_entry(d, &r->domains, list) { if (cpumask_test_cpu(cpu, &d->cpu_mask)) { val = __rmid_read(entry->rmid, QOS_L3_OCCUP_EVENT_ID); - if (val <= intel_cqm_threshold) + if (val <= resctrl_cqm_threshold) continue; } @@ -625,6 +622,7 @@ static void l3_mon_evt_init(struct rdt_resource *r) int rdt_get_mon_l3_config(struct rdt_resource *r) { + unsigned int cl_size = boot_cpu_data.x86_cache_size; int ret; r->mon_scale = boot_cpu_data.x86_cache_occ_scale; @@ -637,10 +635,10 @@ int rdt_get_mon_l3_config(struct rdt_resource *r) * * For a 35MB LLC and 56 RMIDs, this is ~1.8% of the LLC. */ - intel_cqm_threshold = boot_cpu_data.x86_cache_size * 1024 / r->num_rmid; + resctrl_cqm_threshold = cl_size * 1024 / r->num_rmid; /* h/w works in units of "boot_cpu_data.x86_cache_occ_scale" */ - intel_cqm_threshold /= r->mon_scale; + resctrl_cqm_threshold /= r->mon_scale; ret = dom_data_init(r); if (ret) diff --git a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c index 815b4e92522c..14bed6af8377 100644 --- a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c +++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c @@ -24,14 +24,14 @@ #include <asm/cacheflush.h> #include <asm/intel-family.h> -#include <asm/intel_rdt_sched.h> +#include <asm/resctrl_sched.h> #include <asm/perf_event.h> #include "../../events/perf_event.h" /* For X86_CONFIG() */ -#include "intel_rdt.h" +#include "internal.h" #define CREATE_TRACE_POINTS -#include "intel_rdt_pseudo_lock_event.h" +#include "pseudo_lock_event.h" /* * MSR_MISC_FEATURE_CONTROL register enables the modification of hardware @@ -213,7 +213,7 @@ static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr) for_each_cpu(cpu, &plr->d->cpu_mask) { pm_req = kzalloc(sizeof(*pm_req), GFP_KERNEL); if (!pm_req) { - rdt_last_cmd_puts("fail allocating mem for PM QoS\n"); + rdt_last_cmd_puts("Failure to allocate memory for PM QoS\n"); ret = -ENOMEM; goto out_err; } @@ -222,7 +222,7 @@ static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr) DEV_PM_QOS_RESUME_LATENCY, 30); if (ret < 0) { - rdt_last_cmd_printf("fail to add latency req cpu%d\n", + rdt_last_cmd_printf("Failed to add latency req CPU%d\n", cpu); kfree(pm_req); ret = -1; @@ -289,7 +289,7 @@ static int pseudo_lock_region_init(struct pseudo_lock_region *plr) plr->cpu = cpumask_first(&plr->d->cpu_mask); if (!cpu_online(plr->cpu)) { - rdt_last_cmd_printf("cpu %u associated with cache not online\n", + rdt_last_cmd_printf("CPU %u associated with cache not online\n", plr->cpu); ret = -ENODEV; goto out_region; @@ -307,7 +307,7 @@ static int pseudo_lock_region_init(struct pseudo_lock_region *plr) } ret = -1; - rdt_last_cmd_puts("unable to determine cache line size\n"); + rdt_last_cmd_puts("Unable to determine cache line size\n"); out_region: pseudo_lock_region_clear(plr); return ret; @@ -361,14 +361,14 @@ static int pseudo_lock_region_alloc(struct pseudo_lock_region *plr) * KMALLOC_MAX_SIZE. */ if (plr->size > KMALLOC_MAX_SIZE) { - rdt_last_cmd_puts("requested region exceeds maximum size\n"); + rdt_last_cmd_puts("Requested region exceeds maximum size\n"); ret = -E2BIG; goto out_region; } plr->kmem = kzalloc(plr->size, GFP_KERNEL); if (!plr->kmem) { - rdt_last_cmd_puts("unable to allocate memory\n"); + rdt_last_cmd_puts("Unable to allocate memory\n"); ret = -ENOMEM; goto out_region; } @@ -665,7 +665,7 @@ int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp) * default closid associated with it. */ if (rdtgrp == &rdtgroup_default) { - rdt_last_cmd_puts("cannot pseudo-lock default group\n"); + rdt_last_cmd_puts("Cannot pseudo-lock default group\n"); return -EINVAL; } @@ -707,17 +707,17 @@ int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp) */ prefetch_disable_bits = get_prefetch_disable_bits(); if (prefetch_disable_bits == 0) { - rdt_last_cmd_puts("pseudo-locking not supported\n"); + rdt_last_cmd_puts("Pseudo-locking not supported\n"); return -EINVAL; } if (rdtgroup_monitor_in_progress(rdtgrp)) { - rdt_last_cmd_puts("monitoring in progress\n"); + rdt_last_cmd_puts("Monitoring in progress\n"); return -EINVAL; } if (rdtgroup_tasks_assigned(rdtgrp)) { - rdt_last_cmd_puts("tasks assigned to resource group\n"); + rdt_last_cmd_puts("Tasks assigned to resource group\n"); return -EINVAL; } @@ -727,13 +727,13 @@ int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp) } if (rdtgroup_locksetup_user_restrict(rdtgrp)) { - rdt_last_cmd_puts("unable to modify resctrl permissions\n"); + rdt_last_cmd_puts("Unable to modify resctrl permissions\n"); return -EIO; } ret = pseudo_lock_init(rdtgrp); if (ret) { - rdt_last_cmd_puts("unable to init pseudo-lock region\n"); + rdt_last_cmd_puts("Unable to init pseudo-lock region\n"); goto out_release; } @@ -770,7 +770,7 @@ int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp) if (rdt_mon_capable) { ret = alloc_rmid(); if (ret < 0) { - rdt_last_cmd_puts("out of RMIDs\n"); + rdt_last_cmd_puts("Out of RMIDs\n"); return ret; } rdtgrp->mon.rmid = ret; @@ -1304,7 +1304,7 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp) "pseudo_lock/%u", plr->cpu); if (IS_ERR(thread)) { ret = PTR_ERR(thread); - rdt_last_cmd_printf("locking thread returned error %d\n", ret); + rdt_last_cmd_printf("Locking thread returned error %d\n", ret); goto out_cstates; } @@ -1322,13 +1322,13 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp) * the cleared, but not freed, plr struct resulting in an * empty pseudo-locking loop. */ - rdt_last_cmd_puts("locking thread interrupted\n"); + rdt_last_cmd_puts("Locking thread interrupted\n"); goto out_cstates; } ret = pseudo_lock_minor_get(&new_minor); if (ret < 0) { - rdt_last_cmd_puts("unable to obtain a new minor number\n"); + rdt_last_cmd_puts("Unable to obtain a new minor number\n"); goto out_cstates; } @@ -1360,7 +1360,7 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp) if (IS_ERR(dev)) { ret = PTR_ERR(dev); - rdt_last_cmd_printf("failed to create character device: %d\n", + rdt_last_cmd_printf("Failed to create character device: %d\n", ret); goto out_debugfs; } diff --git a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock_event.h b/arch/x86/kernel/cpu/resctrl/pseudo_lock_event.h index 2c041e6d9f05..428ebbd4270b 100644 --- a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock_event.h +++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock_event.h @@ -39,5 +39,5 @@ TRACE_EVENT(pseudo_lock_l3, #undef TRACE_INCLUDE_PATH #define TRACE_INCLUDE_PATH . -#define TRACE_INCLUDE_FILE intel_rdt_pseudo_lock_event +#define TRACE_INCLUDE_FILE pseudo_lock_event #include <trace/define_trace.h> diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index f27b8115ffa2..8388adf241b2 100644 --- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -35,8 +35,8 @@ #include <uapi/linux/magic.h> -#include <asm/intel_rdt_sched.h> -#include "intel_rdt.h" +#include <asm/resctrl_sched.h> +#include "internal.h" DEFINE_STATIC_KEY_FALSE(rdt_enable_key); DEFINE_STATIC_KEY_FALSE(rdt_mon_enable_key); @@ -298,7 +298,7 @@ static int rdtgroup_cpus_show(struct kernfs_open_file *of, } /* - * This is safe against intel_rdt_sched_in() called from __switch_to() + * This is safe against resctrl_sched_in() called from __switch_to() * because __switch_to() is executed with interrupts disabled. A local call * from update_closid_rmid() is proteced against __switch_to() because * preemption is disabled. @@ -317,7 +317,7 @@ static void update_cpu_closid_rmid(void *info) * executing task might have its own closid selected. Just reuse * the context switch code. */ - intel_rdt_sched_in(); + resctrl_sched_in(); } /* @@ -345,7 +345,7 @@ static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, /* Check whether cpus belong to parent ctrl group */ cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask); if (cpumask_weight(tmpmask)) { - rdt_last_cmd_puts("can only add CPUs to mongroup that belong to parent\n"); + rdt_last_cmd_puts("Can only add CPUs to mongroup that belong to parent\n"); return -EINVAL; } @@ -470,14 +470,14 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of, rdt_last_cmd_clear(); if (!rdtgrp) { ret = -ENOENT; - rdt_last_cmd_puts("directory was removed\n"); + rdt_last_cmd_puts("Directory was removed\n"); goto unlock; } if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED || rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { ret = -EINVAL; - rdt_last_cmd_puts("pseudo-locking in progress\n"); + rdt_last_cmd_puts("Pseudo-locking in progress\n"); goto unlock; } @@ -487,7 +487,7 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of, ret = cpumask_parse(buf, newmask); if (ret) { - rdt_last_cmd_puts("bad cpu list/mask\n"); + rdt_last_cmd_puts("Bad CPU list/mask\n"); goto unlock; } @@ -495,7 +495,7 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of, cpumask_andnot(tmpmask, newmask, cpu_online_mask); if (cpumask_weight(tmpmask)) { ret = -EINVAL; - rdt_last_cmd_puts("can only assign online cpus\n"); + rdt_last_cmd_puts("Can only assign online CPUs\n"); goto unlock; } @@ -542,7 +542,7 @@ static void move_myself(struct callback_head *head) preempt_disable(); /* update PQR_ASSOC MSR to make resource group go into effect */ - intel_rdt_sched_in(); + resctrl_sched_in(); preempt_enable(); kfree(callback); @@ -574,7 +574,7 @@ static int __rdtgroup_move_task(struct task_struct *tsk, */ atomic_dec(&rdtgrp->waitcount); kfree(callback); - rdt_last_cmd_puts("task exited\n"); + rdt_last_cmd_puts("Task exited\n"); } else { /* * For ctrl_mon groups move both closid and rmid. @@ -692,7 +692,7 @@ static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of, if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED || rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { ret = -EINVAL; - rdt_last_cmd_puts("pseudo-locking in progress\n"); + rdt_last_cmd_puts("Pseudo-locking in progress\n"); goto unlock; } @@ -926,7 +926,7 @@ static int max_threshold_occ_show(struct kernfs_open_file *of, { struct rdt_resource *r = of->kn->parent->priv; - seq_printf(seq, "%u\n", intel_cqm_threshold * r->mon_scale); + seq_printf(seq, "%u\n", resctrl_cqm_threshold * r->mon_scale); return 0; } @@ -945,7 +945,7 @@ static ssize_t max_threshold_occ_write(struct kernfs_open_file *of, if (bytes > (boot_cpu_data.x86_cache_size * 1024)) return -EINVAL; - intel_cqm_threshold = bytes / r->mon_scale; + resctrl_cqm_threshold = bytes / r->mon_scale; return nbytes; } @@ -1029,7 +1029,7 @@ static int rdt_cdp_peer_get(struct rdt_resource *r, struct rdt_domain *d, * peer RDT CDP resource. Hence the WARN. */ _d_cdp = rdt_find_domain(_r_cdp, d->id, NULL); - if (WARN_ON(!_d_cdp)) { + if (WARN_ON(IS_ERR_OR_NULL(_d_cdp))) { _r_cdp = NULL; ret = -EINVAL; } @@ -1158,14 +1158,14 @@ static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp) list_for_each_entry(d, &r->domains, list) { if (rdtgroup_cbm_overlaps(r, d, d->ctrl_val[closid], rdtgrp->closid, false)) { - rdt_last_cmd_puts("schemata overlaps\n"); + rdt_last_cmd_puts("Schemata overlaps\n"); return false; } } } if (!has_cache) { - rdt_last_cmd_puts("cannot be exclusive without CAT/CDP\n"); + rdt_last_cmd_puts("Cannot be exclusive without CAT/CDP\n"); return false; } @@ -1206,7 +1206,7 @@ static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of, goto out; if (mode == RDT_MODE_PSEUDO_LOCKED) { - rdt_last_cmd_printf("cannot change pseudo-locked group\n"); + rdt_last_cmd_puts("Cannot change pseudo-locked group\n"); ret = -EINVAL; goto out; } @@ -1235,7 +1235,7 @@ static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of, goto out; rdtgrp->mode = RDT_MODE_PSEUDO_LOCKSETUP; } else { - rdt_last_cmd_printf("unknown/unsupported mode\n"); + rdt_last_cmd_puts("Unknown or unsupported mode\n"); ret = -EINVAL; } @@ -1722,14 +1722,14 @@ static void l3_qos_cfg_update(void *arg) { bool *enable = arg; - wrmsrl(IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL); + wrmsrl(MSR_IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL); } static void l2_qos_cfg_update(void *arg) { bool *enable = arg; - wrmsrl(IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL); + wrmsrl(MSR_IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL); } static inline bool is_mba_linear(void) @@ -1878,7 +1878,10 @@ static int parse_rdtgroupfs_options(char *data) if (ret) goto out; } else if (!strcmp(token, "mba_MBps")) { - ret = set_mba_sc(true); + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) + ret = set_mba_sc(true); + else + ret = -EINVAL; if (ret) goto out; } else { @@ -2540,7 +2543,7 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp) tmp_cbm = d->new_ctrl; if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < r->cache.min_cbm_bits) { - rdt_last_cmd_printf("no space on %s:%d\n", + rdt_last_cmd_printf("No space on %s:%d\n", r->name, d->id); return -ENOSPC; } @@ -2557,7 +2560,7 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp) continue; ret = update_domains(r, rdtgrp->closid); if (ret < 0) { - rdt_last_cmd_puts("failed to initialize allocations\n"); + rdt_last_cmd_puts("Failed to initialize allocations\n"); return ret; } rdtgrp->mode = RDT_MODE_SHAREABLE; @@ -2580,7 +2583,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, rdt_last_cmd_clear(); if (!prdtgrp) { ret = -ENODEV; - rdt_last_cmd_puts("directory was removed\n"); + rdt_last_cmd_puts("Directory was removed\n"); goto out_unlock; } @@ -2588,7 +2591,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, (prdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || prdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)) { ret = -EINVAL; - rdt_last_cmd_puts("pseudo-locking in progress\n"); + rdt_last_cmd_puts("Pseudo-locking in progress\n"); goto out_unlock; } @@ -2596,7 +2599,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL); if (!rdtgrp) { ret = -ENOSPC; - rdt_last_cmd_puts("kernel out of memory\n"); + rdt_last_cmd_puts("Kernel out of memory\n"); goto out_unlock; } *r = rdtgrp; @@ -2637,7 +2640,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, if (rdt_mon_capable) { ret = alloc_rmid(); if (ret < 0) { - rdt_last_cmd_puts("out of RMIDs\n"); + rdt_last_cmd_puts("Out of RMIDs\n"); goto out_destroy; } rdtgrp->mon.rmid = ret; @@ -2725,7 +2728,7 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn, kn = rdtgrp->kn; ret = closid_alloc(); if (ret < 0) { - rdt_last_cmd_puts("out of CLOSIDs\n"); + rdt_last_cmd_puts("Out of CLOSIDs\n"); goto out_common_fail; } closid = ret; diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c index 772c219b6889..94aa1c72ca98 100644 --- a/arch/x86/kernel/cpu/scattered.c +++ b/arch/x86/kernel/cpu/scattered.c @@ -5,9 +5,10 @@ #include <linux/cpu.h> #include <asm/pat.h> +#include <asm/apic.h> #include <asm/processor.h> -#include <asm/apic.h> +#include "cpu.h" struct cpuid_bit { u16 feature; @@ -17,7 +18,11 @@ struct cpuid_bit { u32 sub_leaf; }; -/* Please keep the leaf sorted by cpuid_bit.level for faster search. */ +/* + * Please keep the leaf sorted by cpuid_bit.level for faster search. + * X86_FEATURE_MBA is supported by both Intel and AMD. But the CPUID + * levels are different and there is a separate entry for each. + */ static const struct cpuid_bit cpuid_bits[] = { { X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 }, { X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 }, @@ -29,6 +34,7 @@ static const struct cpuid_bit cpuid_bits[] = { { X86_FEATURE_HW_PSTATE, CPUID_EDX, 7, 0x80000007, 0 }, { X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 }, { X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 }, + { X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 }, { X86_FEATURE_SME, CPUID_EAX, 0, 0x8000001f, 0 }, { X86_FEATURE_SEV, CPUID_EAX, 1, 0x8000001f, 0 }, { 0, 0, 0, 0, 0 } @@ -56,27 +62,3 @@ void init_scattered_cpuid_features(struct cpuinfo_x86 *c) set_cpu_cap(c, cb->feature); } } - -u32 get_scattered_cpuid_leaf(unsigned int level, unsigned int sub_leaf, - enum cpuid_regs_idx reg) -{ - const struct cpuid_bit *cb; - u32 cpuid_val = 0; - - for (cb = cpuid_bits; cb->feature; cb++) { - - if (level > cb->level) - continue; - - if (level < cb->level) - break; - - if (reg == cb->reg && sub_leaf == cb->sub_leaf) { - if (cpu_has(&boot_cpu_data, cb->feature)) - cpuid_val |= BIT(cb->bit); - } - } - - return cpuid_val; -} -EXPORT_SYMBOL_GPL(get_scattered_cpuid_leaf); diff --git a/arch/x86/kernel/cpu/topology.c b/arch/x86/kernel/cpu/topology.c index 71ca064e3794..8f6c784141d1 100644 --- a/arch/x86/kernel/cpu/topology.c +++ b/arch/x86/kernel/cpu/topology.c @@ -10,6 +10,8 @@ #include <asm/pat.h> #include <asm/processor.h> +#include "cpu.h" + /* leaf 0xb SMT level */ #define SMT_LEVEL 0 diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index f631a3f15587..c8b07d8ea5a2 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c @@ -37,6 +37,7 @@ #include <asm/reboot.h> #include <asm/virtext.h> #include <asm/intel_pt.h> +#include <asm/crash.h> /* Used while preparing memory map entries for second kernel */ struct crash_memmap_data { diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c index eb8ab3915268..22369dd5de3b 100644 --- a/arch/x86/kernel/crash_dump_64.c +++ b/arch/x86/kernel/crash_dump_64.c @@ -62,7 +62,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize, /** * copy_oldmem_page_encrypted - same as copy_oldmem_page() above but ioremap the - * memory with the encryption mask set to accomodate kdump on SME-enabled + * memory with the encryption mask set to accommodate kdump on SME-enabled * machines. */ ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize, diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c index 7299dcbf8e85..8d85e00bb40a 100644 --- a/arch/x86/kernel/devicetree.c +++ b/arch/x86/kernel/devicetree.c @@ -23,6 +23,7 @@ #include <asm/pci_x86.h> #include <asm/setup.h> #include <asm/i8259.h> +#include <asm/prom.h> __initdata u64 initial_dtb; char __initdata cmd_line[COMMAND_LINE_SIZE]; diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index 2ea85b32421a..2e5003fef51a 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -93,7 +93,7 @@ bool irq_fpu_usable(void) } EXPORT_SYMBOL(irq_fpu_usable); -void __kernel_fpu_begin(void) +static void __kernel_fpu_begin(void) { struct fpu *fpu = ¤t->thread.fpu; @@ -111,9 +111,8 @@ void __kernel_fpu_begin(void) __cpu_invalidate_fpregs_state(); } } -EXPORT_SYMBOL(__kernel_fpu_begin); -void __kernel_fpu_end(void) +static void __kernel_fpu_end(void) { struct fpu *fpu = ¤t->thread.fpu; @@ -122,7 +121,6 @@ void __kernel_fpu_end(void) kernel_fpu_enable(); } -EXPORT_SYMBOL(__kernel_fpu_end); void kernel_fpu_begin(void) { diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index 87a57b7642d3..9cc108456d0b 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -444,7 +444,7 @@ static int xfeature_uncompacted_offset(int xfeature_nr) * format. Checking a supervisor state's uncompacted offset is * an error. */ - if (XFEATURE_MASK_SUPERVISOR & (1 << xfeature_nr)) { + if (XFEATURE_MASK_SUPERVISOR & BIT_ULL(xfeature_nr)) { WARN_ONCE(1, "No fixed offset for xstate %d\n", xfeature_nr); return -1; } @@ -808,10 +808,8 @@ void fpu__resume_cpu(void) * Given an xstate feature mask, calculate where in the xsave * buffer the state is. Callers should ensure that the buffer * is valid. - * - * Note: does not work for compacted buffers. */ -void *__raw_xsave_addr(struct xregs_state *xsave, int xstate_feature_mask) +static void *__raw_xsave_addr(struct xregs_state *xsave, int xstate_feature_mask) { int feature_nr = fls64(xstate_feature_mask) - 1; diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 747c758f67b7..d1dbe8e4eb82 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -386,7 +386,7 @@ NEXT_PAGE(early_dynamic_pgts) .data -#if defined(CONFIG_XEN_PV) || defined(CONFIG_XEN_PVH) +#if defined(CONFIG_XEN_PV) || defined(CONFIG_PVH) NEXT_PGD_PAGE(init_top_pgt) .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC .org init_top_pgt + L4_PAGE_OFFSET*8, 0 diff --git a/arch/x86/kernel/jailhouse.c b/arch/x86/kernel/jailhouse.c index 108c48d0d40e..1b2ee55a2dfb 100644 --- a/arch/x86/kernel/jailhouse.c +++ b/arch/x86/kernel/jailhouse.c @@ -19,6 +19,7 @@ #include <asm/pci_x86.h> #include <asm/reboot.h> #include <asm/setup.h> +#include <asm/jailhouse_para.h> static __initdata struct jailhouse_setup_data setup_data; static unsigned int precalibrated_tsc_khz; diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index c33b06f5faa4..4ba75afba527 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -751,7 +751,7 @@ STACK_FRAME_NON_STANDARD(kretprobe_trampoline); /* * Called from kretprobe_trampoline */ -__visible __used void *trampoline_handler(struct pt_regs *regs) +static __used void *trampoline_handler(struct pt_regs *regs) { struct kretprobe_instance *ri = NULL; struct hlist_head *head, empty_rp; @@ -1026,12 +1026,10 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr) } NOKPROBE_SYMBOL(kprobe_fault_handler); -bool arch_within_kprobe_blacklist(unsigned long addr) +int __init arch_populate_kprobe_blacklist(void) { - return (addr >= (unsigned long)__kprobes_text_start && - addr < (unsigned long)__kprobes_text_end) || - (addr >= (unsigned long)__entry_text_start && - addr < (unsigned long)__entry_text_end); + return kprobe_add_area_blacklist((unsigned long)__entry_text_start, + (unsigned long)__entry_text_end); } int __init arch_init_kprobes(void) diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 30084ecaa20f..e811d4d1c824 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c @@ -1,19 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* KVM paravirtual clock driver. A clocksource implementation Copyright (C) 2008 Glauber de Oliveira Costa, Red Hat Inc. - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/clocksource.h> diff --git a/arch/x86/kernel/macros.S b/arch/x86/kernel/macros.S deleted file mode 100644 index 161c95059044..000000000000 --- a/arch/x86/kernel/macros.S +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ - -/* - * This file includes headers whose assembly part includes macros which are - * commonly used. The macros are precompiled into assmebly file which is later - * assembled together with each compiled file. - */ - -#include <linux/compiler.h> -#include <asm/refcount.h> -#include <asm/alternative-asm.h> -#include <asm/bug.h> -#include <asm/paravirt.h> -#include <asm/asm.h> -#include <asm/cpufeature.h> -#include <asm/jump_label.h> diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c index bbfc8b1e9104..c70720f61a34 100644 --- a/arch/x86/kernel/pci-calgary_64.c +++ b/arch/x86/kernel/pci-calgary_64.c @@ -51,8 +51,6 @@ #include <asm/x86_init.h> #include <asm/iommu_table.h> -#define CALGARY_MAPPING_ERROR 0 - #ifdef CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT int use_calgary __read_mostly = 1; #else @@ -157,8 +155,6 @@ static const unsigned long phb_debug_offsets[] = { #define PHB_DEBUG_STUFF_OFFSET 0x0020 -#define EMERGENCY_PAGES 32 /* = 128KB */ - unsigned int specified_table_size = TCE_TABLE_SIZE_UNSPECIFIED; static int translate_empty_slots __read_mostly = 0; static int calgary_detected __read_mostly = 0; @@ -255,7 +251,7 @@ static unsigned long iommu_range_alloc(struct device *dev, if (panic_on_overflow) panic("Calgary: fix the allocator.\n"); else - return CALGARY_MAPPING_ERROR; + return DMA_MAPPING_ERROR; } } @@ -274,11 +270,10 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, dma_addr_t ret; entry = iommu_range_alloc(dev, tbl, npages); - - if (unlikely(entry == CALGARY_MAPPING_ERROR)) { + if (unlikely(entry == DMA_MAPPING_ERROR)) { pr_warn("failed to allocate %u pages in iommu %p\n", npages, tbl); - return CALGARY_MAPPING_ERROR; + return DMA_MAPPING_ERROR; } /* set the return dma address */ @@ -294,12 +289,10 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, unsigned int npages) { unsigned long entry; - unsigned long badend; unsigned long flags; /* were we called with bad_dma_address? */ - badend = CALGARY_MAPPING_ERROR + (EMERGENCY_PAGES * PAGE_SIZE); - if (unlikely(dma_addr < badend)) { + if (unlikely(dma_addr == DMA_MAPPING_ERROR)) { WARN(1, KERN_ERR "Calgary: driver tried unmapping bad DMA " "address 0x%Lx\n", dma_addr); return; @@ -383,7 +376,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg, npages = iommu_num_pages(vaddr, s->length, PAGE_SIZE); entry = iommu_range_alloc(dev, tbl, npages); - if (entry == CALGARY_MAPPING_ERROR) { + if (entry == DMA_MAPPING_ERROR) { /* makes sure unmap knows to stop */ s->dma_length = 0; goto error; @@ -401,7 +394,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg, error: calgary_unmap_sg(dev, sg, nelems, dir, 0); for_each_sg(sg, s, nelems, i) { - sg->dma_address = CALGARY_MAPPING_ERROR; + sg->dma_address = DMA_MAPPING_ERROR; sg->dma_length = 0; } return 0; @@ -454,7 +447,7 @@ static void* calgary_alloc_coherent(struct device *dev, size_t size, /* set up tces to cover the allocated range */ mapping = iommu_alloc(dev, tbl, ret, npages, DMA_BIDIRECTIONAL); - if (mapping == CALGARY_MAPPING_ERROR) + if (mapping == DMA_MAPPING_ERROR) goto free; *dma_handle = mapping; return ret; @@ -479,11 +472,6 @@ static void calgary_free_coherent(struct device *dev, size_t size, free_pages((unsigned long)vaddr, get_order(size)); } -static int calgary_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return dma_addr == CALGARY_MAPPING_ERROR; -} - static const struct dma_map_ops calgary_dma_ops = { .alloc = calgary_alloc_coherent, .free = calgary_free_coherent, @@ -491,7 +479,6 @@ static const struct dma_map_ops calgary_dma_ops = { .unmap_sg = calgary_unmap_sg, .map_page = calgary_map_page, .unmap_page = calgary_unmap_page, - .mapping_error = calgary_mapping_error, .dma_supported = dma_direct_supported, }; @@ -739,9 +726,6 @@ static void __init calgary_reserve_regions(struct pci_dev *dev) u64 start; struct iommu_table *tbl = pci_iommu(dev->bus); - /* reserve EMERGENCY_PAGES from bad_dma_address and up */ - iommu_range_reserve(tbl, CALGARY_MAPPING_ERROR, EMERGENCY_PAGES); - /* avoid the BIOS/VGA first 640KB-1MB region */ /* for CalIOC2 - avoid the entire first MB */ if (is_calgary(dev->device)) { diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index f4562fcec681..d460998ae828 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c @@ -17,7 +17,7 @@ static bool disable_dac_quirk __read_mostly; -const struct dma_map_ops *dma_ops = &dma_direct_ops; +const struct dma_map_ops *dma_ops; EXPORT_SYMBOL(dma_ops); #ifdef CONFIG_IOMMU_DEBUG diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c index bd08b9e1c9e2..5f5302028a9a 100644 --- a/arch/x86/kernel/pci-swiotlb.c +++ b/arch/x86/kernel/pci-swiotlb.c @@ -62,10 +62,8 @@ IOMMU_INIT(pci_swiotlb_detect_4gb, void __init pci_swiotlb_init(void) { - if (swiotlb) { + if (swiotlb) swiotlb_init(0); - dma_ops = &swiotlb_dma_ops; - } } void __init pci_swiotlb_late_init(void) diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 7d31192296a8..90ae0ca51083 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -22,6 +22,8 @@ #include <linux/utsname.h> #include <linux/stackprotector.h> #include <linux/cpuidle.h> +#include <linux/acpi.h> +#include <linux/elf-randomize.h> #include <trace/events/power.h> #include <linux/hw_breakpoint.h> #include <asm/cpu.h> @@ -39,6 +41,7 @@ #include <asm/desc.h> #include <asm/prctl.h> #include <asm/spec-ctrl.h> +#include <asm/proto.h> #include "process.h" @@ -793,7 +796,7 @@ unsigned long get_wchan(struct task_struct *p) unsigned long start, bottom, top, sp, fp, ip, ret = 0; int count = 0; - if (!p || p == current || p->state == TASK_RUNNING) + if (p == current || p->state == TASK_RUNNING) return 0; if (!try_get_task_stack(p)) diff --git a/arch/x86/kernel/process.h b/arch/x86/kernel/process.h index 898e97cf6629..320ab978fb1f 100644 --- a/arch/x86/kernel/process.h +++ b/arch/x86/kernel/process.h @@ -19,7 +19,7 @@ static inline void switch_to_extra(struct task_struct *prev, if (IS_ENABLED(CONFIG_SMP)) { /* * Avoid __switch_to_xtra() invocation when conditional - * STIPB is disabled and the only different bit is + * STIBP is disabled and the only different bit is * TIF_SPEC_IB. For CONFIG_SMP=n TIF_SPEC_IB is not * in the TIF_WORK_CTXSW masks. */ diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index d3e593eb189f..e471d8e6f0b2 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -44,9 +44,6 @@ #include <asm/processor.h> #include <asm/fpu/internal.h> #include <asm/desc.h> -#ifdef CONFIG_MATH_EMULATION -#include <asm/math_emu.h> -#endif #include <linux/err.h> @@ -56,7 +53,7 @@ #include <asm/debugreg.h> #include <asm/switch_to.h> #include <asm/vm86.h> -#include <asm/intel_rdt_sched.h> +#include <asm/resctrl_sched.h> #include <asm/proto.h> #include "process.h" @@ -298,7 +295,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) this_cpu_write(current_task, next_p); /* Load the Intel cache allocation PQR MSR. */ - intel_rdt_sched_in(); + resctrl_sched_in(); return prev_p; } diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index bbfbf017065c..6a62f4af9fcf 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -52,7 +52,7 @@ #include <asm/switch_to.h> #include <asm/xen/hypervisor.h> #include <asm/vdso.h> -#include <asm/intel_rdt_sched.h> +#include <asm/resctrl_sched.h> #include <asm/unistd.h> #include <asm/fsgsbase.h> #ifdef CONFIG_IA32_EMULATION @@ -68,7 +68,7 @@ void __show_regs(struct pt_regs *regs, enum show_regs_mode mode) unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs; unsigned long d0, d1, d2, d3, d6, d7; unsigned int fsindex, gsindex; - unsigned int ds, cs, es; + unsigned int ds, es; show_iret_regs(regs); @@ -100,7 +100,6 @@ void __show_regs(struct pt_regs *regs, enum show_regs_mode mode) } asm("movl %%ds,%0" : "=r" (ds)); - asm("movl %%cs,%0" : "=r" (cs)); asm("movl %%es,%0" : "=r" (es)); asm("movl %%fs,%0" : "=r" (fsindex)); asm("movl %%gs,%0" : "=r" (gsindex)); @@ -116,7 +115,7 @@ void __show_regs(struct pt_regs *regs, enum show_regs_mode mode) printk(KERN_DEFAULT "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n", fs, fsindex, gs, gsindex, shadowgs); - printk(KERN_DEFAULT "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, + printk(KERN_DEFAULT "CS: %04lx DS: %04x ES: %04x CR0: %016lx\n", regs->cs, ds, es, cr0); printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4); @@ -339,24 +338,6 @@ static unsigned long x86_fsgsbase_read_task(struct task_struct *task, return base; } -void x86_fsbase_write_cpu(unsigned long fsbase) -{ - /* - * Set the selector to 0 as a notion, that the segment base is - * overwritten, which will be checked for skipping the segment load - * during context switch. - */ - loadseg(FS, 0); - wrmsrl(MSR_FS_BASE, fsbase); -} - -void x86_gsbase_write_cpu_inactive(unsigned long gsbase) -{ - /* Set the selector to 0 for the same reason as %fs above. */ - loadseg(GS, 0); - wrmsrl(MSR_KERNEL_GS_BASE, gsbase); -} - unsigned long x86_fsbase_read_task(struct task_struct *task) { unsigned long fsbase; @@ -385,38 +366,18 @@ unsigned long x86_gsbase_read_task(struct task_struct *task) return gsbase; } -int x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase) +void x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase) { - /* - * Not strictly needed for %fs, but do it for symmetry - * with %gs - */ - if (unlikely(fsbase >= TASK_SIZE_MAX)) - return -EPERM; + WARN_ON_ONCE(task == current); - preempt_disable(); task->thread.fsbase = fsbase; - if (task == current) - x86_fsbase_write_cpu(fsbase); - task->thread.fsindex = 0; - preempt_enable(); - - return 0; } -int x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase) +void x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase) { - if (unlikely(gsbase >= TASK_SIZE_MAX)) - return -EPERM; + WARN_ON_ONCE(task == current); - preempt_disable(); task->thread.gsbase = gsbase; - if (task == current) - x86_gsbase_write_cpu_inactive(gsbase); - task->thread.gsindex = 0; - preempt_enable(); - - return 0; } int copy_thread_tls(unsigned long clone_flags, unsigned long sp, @@ -660,7 +621,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) } /* Load the Intel cache allocation PQR MSR. */ - intel_rdt_sched_in(); + resctrl_sched_in(); return prev_p; } @@ -684,7 +645,7 @@ void set_personality_64bit(void) /* TBD: overwrites user setup. Should have two bits. But 64bit processes have always behaved this way, so it's not too bad. The main problem is just that - 32bit childs are affected again. */ + 32bit children are affected again. */ current->personality &= ~READ_IMPLIES_EXEC; } @@ -754,11 +715,60 @@ long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2) switch (option) { case ARCH_SET_GS: { - ret = x86_gsbase_write_task(task, arg2); + if (unlikely(arg2 >= TASK_SIZE_MAX)) + return -EPERM; + + preempt_disable(); + /* + * ARCH_SET_GS has always overwritten the index + * and the base. Zero is the most sensible value + * to put in the index, and is the only value that + * makes any sense if FSGSBASE is unavailable. + */ + if (task == current) { + loadseg(GS, 0); + x86_gsbase_write_cpu_inactive(arg2); + + /* + * On non-FSGSBASE systems, save_base_legacy() expects + * that we also fill in thread.gsbase. + */ + task->thread.gsbase = arg2; + + } else { + task->thread.gsindex = 0; + x86_gsbase_write_task(task, arg2); + } + preempt_enable(); break; } case ARCH_SET_FS: { - ret = x86_fsbase_write_task(task, arg2); + /* + * Not strictly needed for %fs, but do it for symmetry + * with %gs + */ + if (unlikely(arg2 >= TASK_SIZE_MAX)) + return -EPERM; + + preempt_disable(); + /* + * Set the selector to 0 for the same reason + * as %gs above. + */ + if (task == current) { + loadseg(FS, 0); + x86_fsbase_write_cpu(arg2); + + /* + * On non-FSGSBASE systems, save_base_legacy() expects + * that we also fill in thread.fsbase. + */ + task->thread.fsbase = arg2; + } else { + task->thread.fsindex = 0; + x86_fsbase_write_task(task, arg2); + } + preempt_enable(); break; } case ARCH_GET_FS: { diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index ffae9b9740fd..4b8ee05dd6ad 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -397,11 +397,12 @@ static int putreg(struct task_struct *child, if (value >= TASK_SIZE_MAX) return -EIO; /* - * When changing the FS base, use the same - * mechanism as for do_arch_prctl_64(). + * When changing the FS base, use do_arch_prctl_64() + * to set the index to zero and to set the base + * as requested. */ if (child->thread.fsbase != value) - return x86_fsbase_write_task(child, value); + return do_arch_prctl_64(child, ARCH_SET_FS, value); return 0; case offsetof(struct user_regs_struct,gs_base): /* @@ -410,7 +411,7 @@ static int putreg(struct task_struct *child, if (value >= TASK_SIZE_MAX) return -EIO; if (child->thread.gsbase != value) - return x86_gsbase_write_task(child, value); + return do_arch_prctl_64(child, ARCH_SET_GS, value); return 0; #endif } diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c index 736348ead421..8451f38ad399 100644 --- a/arch/x86/kernel/quirks.c +++ b/arch/x86/kernel/quirks.c @@ -7,6 +7,7 @@ #include <linux/irq.h> #include <asm/hpet.h> +#include <asm/setup.h> #if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP) && defined(CONFIG_PCI) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index a9134d1910b9..ccd1f2a8e557 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1347,7 +1347,7 @@ void __init calculate_max_logical_packages(void) * extrapolate the boot cpu's data to all packages. */ ncpus = cpu_data(0).booted_cores * topology_max_smt_threads(); - __max_logical_packages = DIV_ROUND_UP(nr_cpu_ids, ncpus); + __max_logical_packages = DIV_ROUND_UP(total_cpus, ncpus); pr_info("Max logical packages: %u\n", __max_logical_packages); } diff --git a/arch/x86/kernel/sysfb_efi.c b/arch/x86/kernel/sysfb_efi.c index 623965e86b65..fa51723571c8 100644 --- a/arch/x86/kernel/sysfb_efi.c +++ b/arch/x86/kernel/sysfb_efi.c @@ -19,12 +19,15 @@ #include <linux/dmi.h> #include <linux/err.h> +#include <linux/efi.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/pci.h> #include <linux/screen_info.h> #include <video/vga.h> + +#include <asm/efi.h> #include <asm/sysfb.h> enum { diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c index 5bd30c442794..496748ed266a 100644 --- a/arch/x86/kernel/tracepoint.c +++ b/arch/x86/kernel/tracepoint.c @@ -10,6 +10,8 @@ #include <asm/hw_irq.h> #include <asm/desc.h> +#include <asm/trace/exceptions.h> +#include <asm/trace/irq_vectors.h> DEFINE_STATIC_KEY_FALSE(trace_pagefault_key); diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile index dc4f2fdf5e57..69b3a7c30013 100644 --- a/arch/x86/kvm/Makefile +++ b/arch/x86/kvm/Makefile @@ -16,7 +16,7 @@ kvm-y += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \ i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o \ hyperv.o page_track.o debugfs.o -kvm-intel-y += vmx.o pmu_intel.o +kvm-intel-y += vmx/vmx.o vmx/vmenter.o vmx/pmu_intel.o vmx/vmcs12.o vmx/evmcs.o vmx/nested.o kvm-amd-y += svm.o pmu_amd.o obj-$(CONFIG_KVM) += kvm.o diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 7bcfa61375c0..bbffa6c54697 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -67,9 +67,6 @@ u64 kvm_supported_xcr0(void) #define F(x) bit(X86_FEATURE_##x) -/* For scattered features from cpufeatures.h; we currently expose none */ -#define KF(x) bit(KVM_CPUID_BIT_##x) - int kvm_update_cpuid(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best; @@ -337,6 +334,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, unsigned f_mpx = kvm_mpx_supported() ? F(MPX) : 0; unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0; unsigned f_umip = kvm_x86_ops->umip_emulated() ? F(UMIP) : 0; + unsigned f_intel_pt = kvm_x86_ops->pt_supported() ? F(INTEL_PT) : 0; /* cpuid 1.edx */ const u32 kvm_cpuid_1_edx_x86_features = @@ -380,8 +378,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, /* cpuid 0x80000008.ebx */ const u32 kvm_cpuid_8000_0008_ebx_x86_features = - F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) | - F(AMD_SSB_NO); + F(WBNOINVD) | F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) | + F(AMD_SSB_NO) | F(AMD_STIBP); /* cpuid 0xC0000001.edx */ const u32 kvm_cpuid_C000_0001_edx_x86_features = @@ -395,7 +393,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, F(BMI2) | F(ERMS) | f_invpcid | F(RTM) | f_mpx | F(RDSEED) | F(ADX) | F(SMAP) | F(AVX512IFMA) | F(AVX512F) | F(AVX512PF) | F(AVX512ER) | F(AVX512CD) | F(CLFLUSHOPT) | F(CLWB) | F(AVX512DQ) | - F(SHA_NI) | F(AVX512BW) | F(AVX512VL); + F(SHA_NI) | F(AVX512BW) | F(AVX512VL) | f_intel_pt; /* cpuid 0xD.1.eax */ const u32 kvm_cpuid_D_1_eax_x86_features = @@ -411,7 +409,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, /* cpuid 7.0.edx*/ const u32 kvm_cpuid_7_0_edx_x86_features = F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | - F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES); + F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP); /* all calls to cpuid_count() should be made on the same cpu */ get_cpu(); @@ -426,7 +424,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, switch (function) { case 0: - entry->eax = min(entry->eax, (u32)0xd); + entry->eax = min(entry->eax, (u32)(f_intel_pt ? 0x14 : 0xd)); break; case 1: entry->edx &= kvm_cpuid_1_edx_x86_features; @@ -603,6 +601,23 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, } break; } + /* Intel PT */ + case 0x14: { + int t, times = entry->eax; + + if (!f_intel_pt) + break; + + entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; + for (t = 1; t <= times; ++t) { + if (*nent >= maxnent) + goto out; + do_cpuid_1_ent(&entry[t], function, t); + entry[t].flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; + ++*nent; + } + break; + } case KVM_CPUID_SIGNATURE: { static const char signature[12] = "KVMKVMKVM\0\0"; const u32 *sigptr = (const u32 *)signature; diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 4e80080f277a..c90a5352d158 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -38,6 +38,9 @@ #define KVM_HV_MAX_SPARSE_VCPU_SET_BITS DIV_ROUND_UP(KVM_MAX_VCPUS, 64) +static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer, + bool vcpu_kick); + static inline u64 synic_read_sint(struct kvm_vcpu_hv_synic *synic, int sint) { return atomic64_read(&synic->sint[sint]); @@ -158,59 +161,24 @@ static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vpidx) return (synic->active) ? synic : NULL; } -static void synic_clear_sint_msg_pending(struct kvm_vcpu_hv_synic *synic, - u32 sint) -{ - struct kvm_vcpu *vcpu = synic_to_vcpu(synic); - struct page *page; - gpa_t gpa; - struct hv_message *msg; - struct hv_message_page *msg_page; - - gpa = synic->msg_page & PAGE_MASK; - page = kvm_vcpu_gfn_to_page(vcpu, gpa >> PAGE_SHIFT); - if (is_error_page(page)) { - vcpu_err(vcpu, "Hyper-V SynIC can't get msg page, gpa 0x%llx\n", - gpa); - return; - } - msg_page = kmap_atomic(page); - - msg = &msg_page->sint_message[sint]; - msg->header.message_flags.msg_pending = 0; - - kunmap_atomic(msg_page); - kvm_release_page_dirty(page); - kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT); -} - static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint) { struct kvm *kvm = vcpu->kvm; struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu); struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu); struct kvm_vcpu_hv_stimer *stimer; - int gsi, idx, stimers_pending; + int gsi, idx; trace_kvm_hv_notify_acked_sint(vcpu->vcpu_id, sint); - if (synic->msg_page & HV_SYNIC_SIMP_ENABLE) - synic_clear_sint_msg_pending(synic, sint); - /* Try to deliver pending Hyper-V SynIC timers messages */ - stimers_pending = 0; for (idx = 0; idx < ARRAY_SIZE(hv_vcpu->stimer); idx++) { stimer = &hv_vcpu->stimer[idx]; - if (stimer->msg_pending && - (stimer->config & HV_STIMER_ENABLE) && - HV_STIMER_SINT(stimer->config) == sint) { - set_bit(stimer->index, - hv_vcpu->stimer_pending_bitmap); - stimers_pending++; - } + if (stimer->msg_pending && stimer->config.enable && + !stimer->config.direct_mode && + stimer->config.sintx == sint) + stimer_mark_pending(stimer, false); } - if (stimers_pending) - kvm_make_request(KVM_REQ_HV_STIMER, vcpu); idx = srcu_read_lock(&kvm->irq_srcu); gsi = atomic_read(&synic->sint_to_gsi[sint]); @@ -497,7 +465,7 @@ static int stimer_start(struct kvm_vcpu_hv_stimer *stimer) time_now = get_time_ref_counter(stimer_to_vcpu(stimer)->kvm); ktime_now = ktime_get(); - if (stimer->config & HV_STIMER_PERIODIC) { + if (stimer->config.periodic) { if (stimer->exp_time) { if (time_now >= stimer->exp_time) { u64 remainder; @@ -546,13 +514,18 @@ static int stimer_start(struct kvm_vcpu_hv_stimer *stimer) static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config, bool host) { + union hv_stimer_config new_config = {.as_uint64 = config}, + old_config = {.as_uint64 = stimer->config.as_uint64}; + trace_kvm_hv_stimer_set_config(stimer_to_vcpu(stimer)->vcpu_id, stimer->index, config, host); stimer_cleanup(stimer); - if ((stimer->config & HV_STIMER_ENABLE) && HV_STIMER_SINT(config) == 0) - config &= ~HV_STIMER_ENABLE; - stimer->config = config; + if (old_config.enable && + !new_config.direct_mode && new_config.sintx == 0) + new_config.enable = 0; + stimer->config.as_uint64 = new_config.as_uint64; + stimer_mark_pending(stimer, false); return 0; } @@ -566,16 +539,16 @@ static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count, stimer_cleanup(stimer); stimer->count = count; if (stimer->count == 0) - stimer->config &= ~HV_STIMER_ENABLE; - else if (stimer->config & HV_STIMER_AUTOENABLE) - stimer->config |= HV_STIMER_ENABLE; + stimer->config.enable = 0; + else if (stimer->config.auto_enable) + stimer->config.enable = 1; stimer_mark_pending(stimer, false); return 0; } static int stimer_get_config(struct kvm_vcpu_hv_stimer *stimer, u64 *pconfig) { - *pconfig = stimer->config; + *pconfig = stimer->config.as_uint64; return 0; } @@ -586,44 +559,60 @@ static int stimer_get_count(struct kvm_vcpu_hv_stimer *stimer, u64 *pcount) } static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint, - struct hv_message *src_msg) + struct hv_message *src_msg, bool no_retry) { struct kvm_vcpu *vcpu = synic_to_vcpu(synic); - struct page *page; - gpa_t gpa; - struct hv_message *dst_msg; + int msg_off = offsetof(struct hv_message_page, sint_message[sint]); + gfn_t msg_page_gfn; + struct hv_message_header hv_hdr; int r; - struct hv_message_page *msg_page; if (!(synic->msg_page & HV_SYNIC_SIMP_ENABLE)) return -ENOENT; - gpa = synic->msg_page & PAGE_MASK; - page = kvm_vcpu_gfn_to_page(vcpu, gpa >> PAGE_SHIFT); - if (is_error_page(page)) - return -EFAULT; + msg_page_gfn = synic->msg_page >> PAGE_SHIFT; - msg_page = kmap_atomic(page); - dst_msg = &msg_page->sint_message[sint]; - if (sync_cmpxchg(&dst_msg->header.message_type, HVMSG_NONE, - src_msg->header.message_type) != HVMSG_NONE) { - dst_msg->header.message_flags.msg_pending = 1; - r = -EAGAIN; - } else { - memcpy(&dst_msg->u.payload, &src_msg->u.payload, - src_msg->header.payload_size); - dst_msg->header.message_type = src_msg->header.message_type; - dst_msg->header.payload_size = src_msg->header.payload_size; - r = synic_set_irq(synic, sint); - if (r >= 1) - r = 0; - else if (r == 0) - r = -EFAULT; + /* + * Strictly following the spec-mandated ordering would assume setting + * .msg_pending before checking .message_type. However, this function + * is only called in vcpu context so the entire update is atomic from + * guest POV and thus the exact order here doesn't matter. + */ + r = kvm_vcpu_read_guest_page(vcpu, msg_page_gfn, &hv_hdr.message_type, + msg_off + offsetof(struct hv_message, + header.message_type), + sizeof(hv_hdr.message_type)); + if (r < 0) + return r; + + if (hv_hdr.message_type != HVMSG_NONE) { + if (no_retry) + return 0; + + hv_hdr.message_flags.msg_pending = 1; + r = kvm_vcpu_write_guest_page(vcpu, msg_page_gfn, + &hv_hdr.message_flags, + msg_off + + offsetof(struct hv_message, + header.message_flags), + sizeof(hv_hdr.message_flags)); + if (r < 0) + return r; + return -EAGAIN; } - kunmap_atomic(msg_page); - kvm_release_page_dirty(page); - kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT); - return r; + + r = kvm_vcpu_write_guest_page(vcpu, msg_page_gfn, src_msg, msg_off, + sizeof(src_msg->header) + + src_msg->header.payload_size); + if (r < 0) + return r; + + r = synic_set_irq(synic, sint); + if (r < 0) + return r; + if (r == 0) + return -EFAULT; + return 0; } static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer) @@ -633,24 +622,45 @@ static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer) struct hv_timer_message_payload *payload = (struct hv_timer_message_payload *)&msg->u.payload; + /* + * To avoid piling up periodic ticks, don't retry message + * delivery for them (within "lazy" lost ticks policy). + */ + bool no_retry = stimer->config.periodic; + payload->expiration_time = stimer->exp_time; payload->delivery_time = get_time_ref_counter(vcpu->kvm); return synic_deliver_msg(vcpu_to_synic(vcpu), - HV_STIMER_SINT(stimer->config), msg); + stimer->config.sintx, msg, + no_retry); +} + +static int stimer_notify_direct(struct kvm_vcpu_hv_stimer *stimer) +{ + struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer); + struct kvm_lapic_irq irq = { + .delivery_mode = APIC_DM_FIXED, + .vector = stimer->config.apic_vector + }; + + return !kvm_apic_set_irq(vcpu, &irq, NULL); } static void stimer_expiration(struct kvm_vcpu_hv_stimer *stimer) { - int r; + int r, direct = stimer->config.direct_mode; stimer->msg_pending = true; - r = stimer_send_msg(stimer); + if (!direct) + r = stimer_send_msg(stimer); + else + r = stimer_notify_direct(stimer); trace_kvm_hv_stimer_expiration(stimer_to_vcpu(stimer)->vcpu_id, - stimer->index, r); + stimer->index, direct, r); if (!r) { stimer->msg_pending = false; - if (!(stimer->config & HV_STIMER_PERIODIC)) - stimer->config &= ~HV_STIMER_ENABLE; + if (!(stimer->config.periodic)) + stimer->config.enable = 0; } } @@ -664,7 +674,7 @@ void kvm_hv_process_stimers(struct kvm_vcpu *vcpu) for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) if (test_and_clear_bit(i, hv_vcpu->stimer_pending_bitmap)) { stimer = &hv_vcpu->stimer[i]; - if (stimer->config & HV_STIMER_ENABLE) { + if (stimer->config.enable) { exp_time = stimer->exp_time; if (exp_time) { @@ -674,7 +684,7 @@ void kvm_hv_process_stimers(struct kvm_vcpu *vcpu) stimer_expiration(stimer); } - if ((stimer->config & HV_STIMER_ENABLE) && + if ((stimer->config.enable) && stimer->count) { if (!stimer->msg_pending) stimer_start(stimer); @@ -815,9 +825,9 @@ static int kvm_hv_msr_set_crash_ctl(struct kvm_vcpu *vcpu, u64 data, bool host) struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; if (host) - hv->hv_crash_ctl = data & HV_X64_MSR_CRASH_CTL_NOTIFY; + hv->hv_crash_ctl = data & HV_CRASH_CTL_CRASH_NOTIFY; - if (!host && (data & HV_X64_MSR_CRASH_CTL_NOTIFY)) { + if (!host && (data & HV_CRASH_CTL_CRASH_NOTIFY)) { vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n", hv->hv_crash_param[0], @@ -1758,3 +1768,124 @@ int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args) return kvm_hv_eventfd_deassign(kvm, args->conn_id); return kvm_hv_eventfd_assign(kvm, args->conn_id, args->fd); } + +int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, + struct kvm_cpuid_entry2 __user *entries) +{ + uint16_t evmcs_ver = kvm_x86_ops->nested_get_evmcs_version(vcpu); + struct kvm_cpuid_entry2 cpuid_entries[] = { + { .function = HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS }, + { .function = HYPERV_CPUID_INTERFACE }, + { .function = HYPERV_CPUID_VERSION }, + { .function = HYPERV_CPUID_FEATURES }, + { .function = HYPERV_CPUID_ENLIGHTMENT_INFO }, + { .function = HYPERV_CPUID_IMPLEMENT_LIMITS }, + { .function = HYPERV_CPUID_NESTED_FEATURES }, + }; + int i, nent = ARRAY_SIZE(cpuid_entries); + + /* Skip NESTED_FEATURES if eVMCS is not supported */ + if (!evmcs_ver) + --nent; + + if (cpuid->nent < nent) + return -E2BIG; + + if (cpuid->nent > nent) + cpuid->nent = nent; + + for (i = 0; i < nent; i++) { + struct kvm_cpuid_entry2 *ent = &cpuid_entries[i]; + u32 signature[3]; + + switch (ent->function) { + case HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS: + memcpy(signature, "Linux KVM Hv", 12); + + ent->eax = HYPERV_CPUID_NESTED_FEATURES; + ent->ebx = signature[0]; + ent->ecx = signature[1]; + ent->edx = signature[2]; + break; + + case HYPERV_CPUID_INTERFACE: + memcpy(signature, "Hv#1\0\0\0\0\0\0\0\0", 12); + ent->eax = signature[0]; + break; + + case HYPERV_CPUID_VERSION: + /* + * We implement some Hyper-V 2016 functions so let's use + * this version. + */ + ent->eax = 0x00003839; + ent->ebx = 0x000A0000; + break; + + case HYPERV_CPUID_FEATURES: + ent->eax |= HV_X64_MSR_VP_RUNTIME_AVAILABLE; + ent->eax |= HV_MSR_TIME_REF_COUNT_AVAILABLE; + ent->eax |= HV_X64_MSR_SYNIC_AVAILABLE; + ent->eax |= HV_MSR_SYNTIMER_AVAILABLE; + ent->eax |= HV_X64_MSR_APIC_ACCESS_AVAILABLE; + ent->eax |= HV_X64_MSR_HYPERCALL_AVAILABLE; + ent->eax |= HV_X64_MSR_VP_INDEX_AVAILABLE; + ent->eax |= HV_X64_MSR_RESET_AVAILABLE; + ent->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE; + ent->eax |= HV_X64_MSR_GUEST_IDLE_AVAILABLE; + ent->eax |= HV_X64_ACCESS_FREQUENCY_MSRS; + ent->eax |= HV_X64_ACCESS_REENLIGHTENMENT; + + ent->ebx |= HV_X64_POST_MESSAGES; + ent->ebx |= HV_X64_SIGNAL_EVENTS; + + ent->edx |= HV_FEATURE_FREQUENCY_MSRS_AVAILABLE; + ent->edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE; + ent->edx |= HV_STIMER_DIRECT_MODE_AVAILABLE; + + break; + + case HYPERV_CPUID_ENLIGHTMENT_INFO: + ent->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED; + ent->eax |= HV_X64_APIC_ACCESS_RECOMMENDED; + ent->eax |= HV_X64_SYSTEM_RESET_RECOMMENDED; + ent->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED; + ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED; + ent->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED; + ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED; + + /* + * Default number of spinlock retry attempts, matches + * HyperV 2016. + */ + ent->ebx = 0x00000FFF; + + break; + + case HYPERV_CPUID_IMPLEMENT_LIMITS: + /* Maximum number of virtual processors */ + ent->eax = KVM_MAX_VCPUS; + /* + * Maximum number of logical processors, matches + * HyperV 2016. + */ + ent->ebx = 64; + + break; + + case HYPERV_CPUID_NESTED_FEATURES: + ent->eax = evmcs_ver; + + break; + + default: + break; + } + } + + if (copy_to_user(entries, cpuid_entries, + nent * sizeof(struct kvm_cpuid_entry2))) + return -EFAULT; + + return 0; +} diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h index 0e66c12ed2c3..fd7cf13a2144 100644 --- a/arch/x86/kvm/hyperv.h +++ b/arch/x86/kvm/hyperv.h @@ -24,6 +24,8 @@ #ifndef __ARCH_X86_KVM_HYPERV_H__ #define __ARCH_X86_KVM_HYPERV_H__ +#include <linux/kvm_host.h> + static inline struct kvm_vcpu_hv *vcpu_to_hv_vcpu(struct kvm_vcpu *vcpu) { return &vcpu->arch.hyperv; @@ -95,5 +97,7 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm, void kvm_hv_init_vm(struct kvm *kvm); void kvm_hv_destroy_vm(struct kvm *kvm); int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args); +int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, + struct kvm_cpuid_entry2 __user *entries); #endif diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h index 9619dcc2b325..f8f56a93358b 100644 --- a/arch/x86/kvm/kvm_cache_regs.h +++ b/arch/x86/kvm/kvm_cache_regs.h @@ -2,6 +2,8 @@ #ifndef ASM_KVM_CACHE_REGS_H #define ASM_KVM_CACHE_REGS_H +#include <linux/kvm_host.h> + #define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS #define KVM_POSSIBLE_CR4_GUEST_BITS \ (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \ diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index c4533d05c214..9f089e2e09d0 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -251,10 +251,9 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val) if (enabled != apic->sw_enabled) { apic->sw_enabled = enabled; - if (enabled) { + if (enabled) static_key_slow_dec_deferred(&apic_sw_disabled); - recalculate_apic_map(apic->vcpu->kvm); - } else + else static_key_slow_inc(&apic_sw_disabled.key); } } diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 7c03c0f35444..ce770b446238 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -264,6 +264,35 @@ static void mmu_spte_set(u64 *sptep, u64 spte); static union kvm_mmu_page_role kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu); + +static inline bool kvm_available_flush_tlb_with_range(void) +{ + return kvm_x86_ops->tlb_remote_flush_with_range; +} + +static void kvm_flush_remote_tlbs_with_range(struct kvm *kvm, + struct kvm_tlb_range *range) +{ + int ret = -ENOTSUPP; + + if (range && kvm_x86_ops->tlb_remote_flush_with_range) + ret = kvm_x86_ops->tlb_remote_flush_with_range(kvm, range); + + if (ret) + kvm_flush_remote_tlbs(kvm); +} + +static void kvm_flush_remote_tlbs_with_address(struct kvm *kvm, + u64 start_gfn, u64 pages) +{ + struct kvm_tlb_range range; + + range.start_gfn = start_gfn; + range.pages = pages; + + kvm_flush_remote_tlbs_with_range(kvm, &range); +} + void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value) { BUG_ON((mmio_mask & mmio_value) != mmio_value); @@ -1456,8 +1485,12 @@ static bool __drop_large_spte(struct kvm *kvm, u64 *sptep) static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep) { - if (__drop_large_spte(vcpu->kvm, sptep)) - kvm_flush_remote_tlbs(vcpu->kvm); + if (__drop_large_spte(vcpu->kvm, sptep)) { + struct kvm_mmu_page *sp = page_header(__pa(sptep)); + + kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn, + KVM_PAGES_PER_HPAGE(sp->role.level)); + } } /* @@ -1743,10 +1776,12 @@ restart: } } - if (need_flush) - kvm_flush_remote_tlbs(kvm); + if (need_flush && kvm_available_flush_tlb_with_range()) { + kvm_flush_remote_tlbs_with_address(kvm, gfn, 1); + return 0; + } - return 0; + return need_flush; } struct slot_rmap_walk_iterator { @@ -1880,9 +1915,9 @@ int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp); } -void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) +int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) { - kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp); + return kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp); } static int kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, @@ -1925,7 +1960,8 @@ static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp); kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, 0); - kvm_flush_remote_tlbs(vcpu->kvm); + kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn, + KVM_PAGES_PER_HPAGE(sp->role.level)); } int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end) @@ -2441,7 +2477,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, account_shadowed(vcpu->kvm, sp); if (level == PT_PAGE_TABLE_LEVEL && rmap_write_protect(vcpu, gfn)) - kvm_flush_remote_tlbs(vcpu->kvm); + kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1); if (level > PT_PAGE_TABLE_LEVEL && need_sync) flush |= kvm_sync_pages(vcpu, gfn, &invalid_list); @@ -2561,7 +2597,7 @@ static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep, return; drop_parent_pte(child, sptep); - kvm_flush_remote_tlbs(vcpu->kvm); + kvm_flush_remote_tlbs_with_address(vcpu->kvm, child->gfn, 1); } } @@ -2985,8 +3021,10 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access, ret = RET_PF_EMULATE; kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); } + if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH || flush) - kvm_flush_remote_tlbs(vcpu->kvm); + kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, + KVM_PAGES_PER_HPAGE(level)); if (unlikely(is_mmio_spte(*sptep))) ret = RET_PF_EMULATE; @@ -5586,8 +5624,13 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) { struct kvm_memslots *slots; struct kvm_memory_slot *memslot; + bool flush_tlb = true; + bool flush = false; int i; + if (kvm_available_flush_tlb_with_range()) + flush_tlb = false; + spin_lock(&kvm->mmu_lock); for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { slots = __kvm_memslots(kvm, i); @@ -5599,12 +5642,17 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) if (start >= end) continue; - slot_handle_level_range(kvm, memslot, kvm_zap_rmapp, - PT_PAGE_TABLE_LEVEL, PT_MAX_HUGEPAGE_LEVEL, - start, end - 1, true); + flush |= slot_handle_level_range(kvm, memslot, + kvm_zap_rmapp, PT_PAGE_TABLE_LEVEL, + PT_MAX_HUGEPAGE_LEVEL, start, + end - 1, flush_tlb); } } + if (flush) + kvm_flush_remote_tlbs_with_address(kvm, gfn_start, + gfn_end - gfn_start + 1); + spin_unlock(&kvm->mmu_lock); } @@ -5638,12 +5686,13 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, * spte from present to present (changing the spte from present * to nonpresent will flush all the TLBs immediately), in other * words, the only case we care is mmu_spte_update() where we - * haved checked SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE + * have checked SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE * instead of PT_WRITABLE_MASK, that means it does not depend * on PT_WRITABLE_MASK anymore. */ if (flush) - kvm_flush_remote_tlbs(kvm); + kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn, + memslot->npages); } static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm, @@ -5671,7 +5720,13 @@ restart: !kvm_is_reserved_pfn(pfn) && PageTransCompoundMap(pfn_to_page(pfn))) { pte_list_remove(rmap_head, sptep); - need_tlb_flush = 1; + + if (kvm_available_flush_tlb_with_range()) + kvm_flush_remote_tlbs_with_address(kvm, sp->gfn, + KVM_PAGES_PER_HPAGE(sp->role.level)); + else + need_tlb_flush = 1; + goto restart; } } @@ -5707,7 +5762,8 @@ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, * dirty_bitmap. */ if (flush) - kvm_flush_remote_tlbs(kvm); + kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn, + memslot->npages); } EXPORT_SYMBOL_GPL(kvm_mmu_slot_leaf_clear_dirty); @@ -5725,7 +5781,8 @@ void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm, lockdep_assert_held(&kvm->slots_lock); if (flush) - kvm_flush_remote_tlbs(kvm); + kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn, + memslot->npages); } EXPORT_SYMBOL_GPL(kvm_mmu_slot_largepage_remove_write_access); @@ -5742,7 +5799,8 @@ void kvm_mmu_slot_set_dirty(struct kvm *kvm, /* see kvm_mmu_slot_leaf_clear_dirty */ if (flush) - kvm_flush_remote_tlbs(kvm); + kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn, + memslot->npages); } EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty); diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 7cf2185b7eb5..6bdca39829bc 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -894,7 +894,8 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa) pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t); if (mmu_page_zap_pte(vcpu->kvm, sp, sptep)) - kvm_flush_remote_tlbs(vcpu->kvm); + kvm_flush_remote_tlbs_with_address(vcpu->kvm, + sp->gfn, KVM_PAGES_PER_HPAGE(sp->role.level)); if (!rmap_can_add(vcpu)) break; diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index cc6467b35a85..307e5bddb6d9 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -675,11 +675,6 @@ struct svm_cpu_data { static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data); -struct svm_init_data { - int cpu; - int r; -}; - static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000}; #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges) @@ -711,17 +706,17 @@ static u32 svm_msrpm_offset(u32 msr) static inline void clgi(void) { - asm volatile (__ex(SVM_CLGI)); + asm volatile (__ex("clgi")); } static inline void stgi(void) { - asm volatile (__ex(SVM_STGI)); + asm volatile (__ex("stgi")); } static inline void invlpga(unsigned long addr, u32 asid) { - asm volatile (__ex(SVM_INVLPGA) : : "a"(addr), "c"(asid)); + asm volatile (__ex("invlpga %1, %0") : : "c"(asid), "a"(addr)); } static int get_npt_level(struct kvm_vcpu *vcpu) @@ -1456,10 +1451,11 @@ static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) g_tsc_offset = svm->vmcb->control.tsc_offset - svm->nested.hsave->control.tsc_offset; svm->nested.hsave->control.tsc_offset = offset; - } else - trace_kvm_write_tsc_offset(vcpu->vcpu_id, - svm->vmcb->control.tsc_offset, - offset); + } + + trace_kvm_write_tsc_offset(vcpu->vcpu_id, + svm->vmcb->control.tsc_offset - g_tsc_offset, + offset); svm->vmcb->control.tsc_offset = offset + g_tsc_offset; @@ -2129,6 +2125,13 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) goto out; } + svm->vcpu.arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache, GFP_KERNEL); + if (!svm->vcpu.arch.guest_fpu) { + printk(KERN_ERR "kvm: failed to allocate vcpu's fpu\n"); + err = -ENOMEM; + goto free_partial_svm; + } + err = kvm_vcpu_init(&svm->vcpu, kvm, id); if (err) goto free_svm; @@ -2188,6 +2191,8 @@ free_page1: uninit: kvm_vcpu_uninit(&svm->vcpu); free_svm: + kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.guest_fpu); +free_partial_svm: kmem_cache_free(kvm_vcpu_cache, svm); out: return ERR_PTR(err); @@ -2217,6 +2222,7 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu) __free_page(virt_to_page(svm->nested.hsave)); __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER); kvm_vcpu_uninit(vcpu); + kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.guest_fpu); kmem_cache_free(kvm_vcpu_cache, svm); } @@ -2937,6 +2943,8 @@ static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu, static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu) { WARN_ON(mmu_is_nested(vcpu)); + + vcpu->arch.mmu = &vcpu->arch.guest_mmu; kvm_init_shadow_mmu(vcpu); vcpu->arch.mmu->set_cr3 = nested_svm_set_tdp_cr3; vcpu->arch.mmu->get_cr3 = nested_svm_get_tdp_cr3; @@ -2949,6 +2957,7 @@ static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu) static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu) { + vcpu->arch.mmu = &vcpu->arch.root_mmu; vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; } @@ -3275,6 +3284,8 @@ static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *fr dst->event_inj_err = from->event_inj_err; dst->nested_cr3 = from->nested_cr3; dst->virt_ext = from->virt_ext; + dst->pause_filter_count = from->pause_filter_count; + dst->pause_filter_thresh = from->pause_filter_thresh; } static int nested_svm_vmexit(struct vcpu_svm *svm) @@ -3353,6 +3364,11 @@ static int nested_svm_vmexit(struct vcpu_svm *svm) nested_vmcb->control.event_inj = 0; nested_vmcb->control.event_inj_err = 0; + nested_vmcb->control.pause_filter_count = + svm->vmcb->control.pause_filter_count; + nested_vmcb->control.pause_filter_thresh = + svm->vmcb->control.pause_filter_thresh; + /* We always set V_INTR_MASKING and remember the old value in hflags */ if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK)) nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; @@ -3458,7 +3474,6 @@ static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa, svm->vcpu.arch.hflags &= ~HF_HIF_MASK; if (nested_vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) { - kvm_mmu_unload(&svm->vcpu); svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3; nested_svm_init_mmu_context(&svm->vcpu); } @@ -3530,6 +3545,11 @@ static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa, svm->vmcb->control.event_inj = nested_vmcb->control.event_inj; svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err; + svm->vmcb->control.pause_filter_count = + nested_vmcb->control.pause_filter_count; + svm->vmcb->control.pause_filter_thresh = + nested_vmcb->control.pause_filter_thresh; + nested_svm_unmap(page); /* Enter Guest-Mode */ @@ -5634,9 +5654,9 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) /* Enter guest mode */ "push %%" _ASM_AX " \n\t" "mov %c[vmcb](%[svm]), %%" _ASM_AX " \n\t" - __ex(SVM_VMLOAD) "\n\t" - __ex(SVM_VMRUN) "\n\t" - __ex(SVM_VMSAVE) "\n\t" + __ex("vmload %%" _ASM_AX) "\n\t" + __ex("vmrun %%" _ASM_AX) "\n\t" + __ex("vmsave %%" _ASM_AX) "\n\t" "pop %%" _ASM_AX " \n\t" /* Save guest registers, load host registers */ @@ -5834,6 +5854,13 @@ static bool svm_cpu_has_accelerated_tpr(void) static bool svm_has_emulated_msr(int index) { + switch (index) { + case MSR_IA32_MCG_EXT_CTL: + return false; + default: + break; + } + return true; } @@ -5922,6 +5949,11 @@ static bool svm_umip_emulated(void) return false; } +static bool svm_pt_supported(void) +{ + return false; +} + static bool svm_has_wbinvd_exit(void) { return true; @@ -7051,6 +7083,12 @@ failed: return ret; } +static uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu) +{ + /* Not supported */ + return 0; +} + static int nested_enable_evmcs(struct kvm_vcpu *vcpu, uint16_t *vmcs_version) { @@ -7157,6 +7195,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { .mpx_supported = svm_mpx_supported, .xsaves_supported = svm_xsaves_supported, .umip_emulated = svm_umip_emulated, + .pt_supported = svm_pt_supported, .set_supported_cpuid = svm_set_supported_cpuid, @@ -7189,6 +7228,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { .mem_enc_unreg_region = svm_unregister_enc_region, .nested_enable_evmcs = nested_enable_evmcs, + .nested_get_evmcs_version = nested_get_evmcs_version, }; static int __init svm_init(void) diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h index 0659465a745c..705f40ae2532 100644 --- a/arch/x86/kvm/trace.h +++ b/arch/x86/kvm/trace.h @@ -1254,24 +1254,26 @@ TRACE_EVENT(kvm_hv_stimer_callback, * Tracepoint for stimer_expiration. */ TRACE_EVENT(kvm_hv_stimer_expiration, - TP_PROTO(int vcpu_id, int timer_index, int msg_send_result), - TP_ARGS(vcpu_id, timer_index, msg_send_result), + TP_PROTO(int vcpu_id, int timer_index, int direct, int msg_send_result), + TP_ARGS(vcpu_id, timer_index, direct, msg_send_result), TP_STRUCT__entry( __field(int, vcpu_id) __field(int, timer_index) + __field(int, direct) __field(int, msg_send_result) ), TP_fast_assign( __entry->vcpu_id = vcpu_id; __entry->timer_index = timer_index; + __entry->direct = direct; __entry->msg_send_result = msg_send_result; ), - TP_printk("vcpu_id %d timer %d msg send result %d", + TP_printk("vcpu_id %d timer %d direct %d send result %d", __entry->vcpu_id, __entry->timer_index, - __entry->msg_send_result) + __entry->direct, __entry->msg_send_result) ); /* diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c deleted file mode 100644 index 02edd9960e9d..000000000000 --- a/arch/x86/kvm/vmx.c +++ /dev/null @@ -1,15250 +0,0 @@ -/* - * Kernel-based Virtual Machine driver for Linux - * - * This module enables machines with Intel VT-x extensions to run virtual - * machines without emulation or binary translation. - * - * Copyright (C) 2006 Qumranet, Inc. - * Copyright 2010 Red Hat, Inc. and/or its affiliates. - * - * Authors: - * Avi Kivity <avi@qumranet.com> - * Yaniv Kamay <yaniv@qumranet.com> - * - * This work is licensed under the terms of the GNU GPL, version 2. See - * the COPYING file in the top-level directory. - * - */ - -#include "irq.h" -#include "mmu.h" -#include "cpuid.h" -#include "lapic.h" -#include "hyperv.h" - -#include <linux/kvm_host.h> -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/mm.h> -#include <linux/highmem.h> -#include <linux/sched.h> -#include <linux/moduleparam.h> -#include <linux/mod_devicetable.h> -#include <linux/trace_events.h> -#include <linux/slab.h> -#include <linux/tboot.h> -#include <linux/hrtimer.h> -#include <linux/frame.h> -#include <linux/nospec.h> -#include "kvm_cache_regs.h" -#include "x86.h" - -#include <asm/asm.h> -#include <asm/cpu.h> -#include <asm/io.h> -#include <asm/desc.h> -#include <asm/vmx.h> -#include <asm/virtext.h> -#include <asm/mce.h> -#include <asm/fpu/internal.h> -#include <asm/perf_event.h> -#include <asm/debugreg.h> -#include <asm/kexec.h> -#include <asm/apic.h> -#include <asm/irq_remapping.h> -#include <asm/mmu_context.h> -#include <asm/spec-ctrl.h> -#include <asm/mshyperv.h> - -#include "trace.h" -#include "pmu.h" -#include "vmx_evmcs.h" - -#define __ex(x) __kvm_handle_fault_on_reboot(x) -#define __ex_clear(x, reg) \ - ____kvm_handle_fault_on_reboot(x, "xor " reg ", " reg) - -MODULE_AUTHOR("Qumranet"); -MODULE_LICENSE("GPL"); - -static const struct x86_cpu_id vmx_cpu_id[] = { - X86_FEATURE_MATCH(X86_FEATURE_VMX), - {} -}; -MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id); - -static bool __read_mostly enable_vpid = 1; -module_param_named(vpid, enable_vpid, bool, 0444); - -static bool __read_mostly enable_vnmi = 1; -module_param_named(vnmi, enable_vnmi, bool, S_IRUGO); - -static bool __read_mostly flexpriority_enabled = 1; -module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO); - -static bool __read_mostly enable_ept = 1; -module_param_named(ept, enable_ept, bool, S_IRUGO); - -static bool __read_mostly enable_unrestricted_guest = 1; -module_param_named(unrestricted_guest, - enable_unrestricted_guest, bool, S_IRUGO); - -static bool __read_mostly enable_ept_ad_bits = 1; -module_param_named(eptad, enable_ept_ad_bits, bool, S_IRUGO); - -static bool __read_mostly emulate_invalid_guest_state = true; -module_param(emulate_invalid_guest_state, bool, S_IRUGO); - -static bool __read_mostly fasteoi = 1; -module_param(fasteoi, bool, S_IRUGO); - -static bool __read_mostly enable_apicv = 1; -module_param(enable_apicv, bool, S_IRUGO); - -static bool __read_mostly enable_shadow_vmcs = 1; -module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO); -/* - * If nested=1, nested virtualization is supported, i.e., guests may use - * VMX and be a hypervisor for its own guests. If nested=0, guests may not - * use VMX instructions. - */ -static bool __read_mostly nested = 1; -module_param(nested, bool, S_IRUGO); - -static bool __read_mostly nested_early_check = 0; -module_param(nested_early_check, bool, S_IRUGO); - -static u64 __read_mostly host_xss; - -static bool __read_mostly enable_pml = 1; -module_param_named(pml, enable_pml, bool, S_IRUGO); - -#define MSR_TYPE_R 1 -#define MSR_TYPE_W 2 -#define MSR_TYPE_RW 3 - -#define MSR_BITMAP_MODE_X2APIC 1 -#define MSR_BITMAP_MODE_X2APIC_APICV 2 - -#define KVM_VMX_TSC_MULTIPLIER_MAX 0xffffffffffffffffULL - -/* Guest_tsc -> host_tsc conversion requires 64-bit division. */ -static int __read_mostly cpu_preemption_timer_multi; -static bool __read_mostly enable_preemption_timer = 1; -#ifdef CONFIG_X86_64 -module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO); -#endif - -#define KVM_VM_CR0_ALWAYS_OFF (X86_CR0_NW | X86_CR0_CD) -#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR0_NE -#define KVM_VM_CR0_ALWAYS_ON \ - (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | \ - X86_CR0_WP | X86_CR0_PG | X86_CR0_PE) -#define KVM_CR4_GUEST_OWNED_BITS \ - (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \ - | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_TSD) - -#define KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR4_VMXE -#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE) -#define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE) - -#define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM)) - -#define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5 - -/* - * Hyper-V requires all of these, so mark them as supported even though - * they are just treated the same as all-context. - */ -#define VMX_VPID_EXTENT_SUPPORTED_MASK \ - (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT | \ - VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | \ - VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT | \ - VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT) - -/* - * These 2 parameters are used to config the controls for Pause-Loop Exiting: - * ple_gap: upper bound on the amount of time between two successive - * executions of PAUSE in a loop. Also indicate if ple enabled. - * According to test, this time is usually smaller than 128 cycles. - * ple_window: upper bound on the amount of time a guest is allowed to execute - * in a PAUSE loop. Tests indicate that most spinlocks are held for - * less than 2^12 cycles - * Time is measured based on a counter that runs at the same rate as the TSC, - * refer SDM volume 3b section 21.6.13 & 22.1.3. - */ -static unsigned int ple_gap = KVM_DEFAULT_PLE_GAP; -module_param(ple_gap, uint, 0444); - -static unsigned int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW; -module_param(ple_window, uint, 0444); - -/* Default doubles per-vcpu window every exit. */ -static unsigned int ple_window_grow = KVM_DEFAULT_PLE_WINDOW_GROW; -module_param(ple_window_grow, uint, 0444); - -/* Default resets per-vcpu window every exit to ple_window. */ -static unsigned int ple_window_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK; -module_param(ple_window_shrink, uint, 0444); - -/* Default is to compute the maximum so we can never overflow. */ -static unsigned int ple_window_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX; -module_param(ple_window_max, uint, 0444); - -extern const ulong vmx_return; -extern const ulong vmx_early_consistency_check_return; - -static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush); -static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond); -static DEFINE_MUTEX(vmx_l1d_flush_mutex); - -/* Storage for pre module init parameter parsing */ -static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = VMENTER_L1D_FLUSH_AUTO; - -static const struct { - const char *option; - bool for_parse; -} vmentry_l1d_param[] = { - [VMENTER_L1D_FLUSH_AUTO] = {"auto", true}, - [VMENTER_L1D_FLUSH_NEVER] = {"never", true}, - [VMENTER_L1D_FLUSH_COND] = {"cond", true}, - [VMENTER_L1D_FLUSH_ALWAYS] = {"always", true}, - [VMENTER_L1D_FLUSH_EPT_DISABLED] = {"EPT disabled", false}, - [VMENTER_L1D_FLUSH_NOT_REQUIRED] = {"not required", false}, -}; - -#define L1D_CACHE_ORDER 4 -static void *vmx_l1d_flush_pages; - -static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf) -{ - struct page *page; - unsigned int i; - - if (!enable_ept) { - l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED; - return 0; - } - - if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) { - u64 msr; - - rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr); - if (msr & ARCH_CAP_SKIP_VMENTRY_L1DFLUSH) { - l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED; - return 0; - } - } - - /* If set to auto use the default l1tf mitigation method */ - if (l1tf == VMENTER_L1D_FLUSH_AUTO) { - switch (l1tf_mitigation) { - case L1TF_MITIGATION_OFF: - l1tf = VMENTER_L1D_FLUSH_NEVER; - break; - case L1TF_MITIGATION_FLUSH_NOWARN: - case L1TF_MITIGATION_FLUSH: - case L1TF_MITIGATION_FLUSH_NOSMT: - l1tf = VMENTER_L1D_FLUSH_COND; - break; - case L1TF_MITIGATION_FULL: - case L1TF_MITIGATION_FULL_FORCE: - l1tf = VMENTER_L1D_FLUSH_ALWAYS; - break; - } - } else if (l1tf_mitigation == L1TF_MITIGATION_FULL_FORCE) { - l1tf = VMENTER_L1D_FLUSH_ALWAYS; - } - - if (l1tf != VMENTER_L1D_FLUSH_NEVER && !vmx_l1d_flush_pages && - !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) { - page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER); - if (!page) - return -ENOMEM; - vmx_l1d_flush_pages = page_address(page); - - /* - * Initialize each page with a different pattern in - * order to protect against KSM in the nested - * virtualization case. - */ - for (i = 0; i < 1u << L1D_CACHE_ORDER; ++i) { - memset(vmx_l1d_flush_pages + i * PAGE_SIZE, i + 1, - PAGE_SIZE); - } - } - - l1tf_vmx_mitigation = l1tf; - - if (l1tf != VMENTER_L1D_FLUSH_NEVER) - static_branch_enable(&vmx_l1d_should_flush); - else - static_branch_disable(&vmx_l1d_should_flush); - - if (l1tf == VMENTER_L1D_FLUSH_COND) - static_branch_enable(&vmx_l1d_flush_cond); - else - static_branch_disable(&vmx_l1d_flush_cond); - return 0; -} - -static int vmentry_l1d_flush_parse(const char *s) -{ - unsigned int i; - - if (s) { - for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) { - if (vmentry_l1d_param[i].for_parse && - sysfs_streq(s, vmentry_l1d_param[i].option)) - return i; - } - } - return -EINVAL; -} - -static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp) -{ - int l1tf, ret; - - l1tf = vmentry_l1d_flush_parse(s); - if (l1tf < 0) - return l1tf; - - if (!boot_cpu_has(X86_BUG_L1TF)) - return 0; - - /* - * Has vmx_init() run already? If not then this is the pre init - * parameter parsing. In that case just store the value and let - * vmx_init() do the proper setup after enable_ept has been - * established. - */ - if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) { - vmentry_l1d_flush_param = l1tf; - return 0; - } - - mutex_lock(&vmx_l1d_flush_mutex); - ret = vmx_setup_l1d_flush(l1tf); - mutex_unlock(&vmx_l1d_flush_mutex); - return ret; -} - -static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp) -{ - if (WARN_ON_ONCE(l1tf_vmx_mitigation >= ARRAY_SIZE(vmentry_l1d_param))) - return sprintf(s, "???\n"); - - return sprintf(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option); -} - -static const struct kernel_param_ops vmentry_l1d_flush_ops = { - .set = vmentry_l1d_flush_set, - .get = vmentry_l1d_flush_get, -}; -module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, NULL, 0644); - -enum ept_pointers_status { - EPT_POINTERS_CHECK = 0, - EPT_POINTERS_MATCH = 1, - EPT_POINTERS_MISMATCH = 2 -}; - -struct kvm_vmx { - struct kvm kvm; - - unsigned int tss_addr; - bool ept_identity_pagetable_done; - gpa_t ept_identity_map_addr; - - enum ept_pointers_status ept_pointers_match; - spinlock_t ept_pointer_lock; -}; - -#define NR_AUTOLOAD_MSRS 8 - -struct vmcs_hdr { - u32 revision_id:31; - u32 shadow_vmcs:1; -}; - -struct vmcs { - struct vmcs_hdr hdr; - u32 abort; - char data[0]; -}; - -/* - * vmcs_host_state tracks registers that are loaded from the VMCS on VMEXIT - * and whose values change infrequently, but are not constant. I.e. this is - * used as a write-through cache of the corresponding VMCS fields. - */ -struct vmcs_host_state { - unsigned long cr3; /* May not match real cr3 */ - unsigned long cr4; /* May not match real cr4 */ - unsigned long gs_base; - unsigned long fs_base; - - u16 fs_sel, gs_sel, ldt_sel; -#ifdef CONFIG_X86_64 - u16 ds_sel, es_sel; -#endif -}; - -/* - * Track a VMCS that may be loaded on a certain CPU. If it is (cpu!=-1), also - * remember whether it was VMLAUNCHed, and maintain a linked list of all VMCSs - * loaded on this CPU (so we can clear them if the CPU goes down). - */ -struct loaded_vmcs { - struct vmcs *vmcs; - struct vmcs *shadow_vmcs; - int cpu; - bool launched; - bool nmi_known_unmasked; - bool hv_timer_armed; - /* Support for vnmi-less CPUs */ - int soft_vnmi_blocked; - ktime_t entry_time; - s64 vnmi_blocked_time; - unsigned long *msr_bitmap; - struct list_head loaded_vmcss_on_cpu_link; - struct vmcs_host_state host_state; -}; - -struct shared_msr_entry { - unsigned index; - u64 data; - u64 mask; -}; - -/* - * struct vmcs12 describes the state that our guest hypervisor (L1) keeps for a - * single nested guest (L2), hence the name vmcs12. Any VMX implementation has - * a VMCS structure, and vmcs12 is our emulated VMX's VMCS. This structure is - * stored in guest memory specified by VMPTRLD, but is opaque to the guest, - * which must access it using VMREAD/VMWRITE/VMCLEAR instructions. - * More than one of these structures may exist, if L1 runs multiple L2 guests. - * nested_vmx_run() will use the data here to build the vmcs02: a VMCS for the - * underlying hardware which will be used to run L2. - * This structure is packed to ensure that its layout is identical across - * machines (necessary for live migration). - * - * IMPORTANT: Changing the layout of existing fields in this structure - * will break save/restore compatibility with older kvm releases. When - * adding new fields, either use space in the reserved padding* arrays - * or add the new fields to the end of the structure. - */ -typedef u64 natural_width; -struct __packed vmcs12 { - /* According to the Intel spec, a VMCS region must start with the - * following two fields. Then follow implementation-specific data. - */ - struct vmcs_hdr hdr; - u32 abort; - - u32 launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */ - u32 padding[7]; /* room for future expansion */ - - u64 io_bitmap_a; - u64 io_bitmap_b; - u64 msr_bitmap; - u64 vm_exit_msr_store_addr; - u64 vm_exit_msr_load_addr; - u64 vm_entry_msr_load_addr; - u64 tsc_offset; - u64 virtual_apic_page_addr; - u64 apic_access_addr; - u64 posted_intr_desc_addr; - u64 ept_pointer; - u64 eoi_exit_bitmap0; - u64 eoi_exit_bitmap1; - u64 eoi_exit_bitmap2; - u64 eoi_exit_bitmap3; - u64 xss_exit_bitmap; - u64 guest_physical_address; - u64 vmcs_link_pointer; - u64 guest_ia32_debugctl; - u64 guest_ia32_pat; - u64 guest_ia32_efer; - u64 guest_ia32_perf_global_ctrl; - u64 guest_pdptr0; - u64 guest_pdptr1; - u64 guest_pdptr2; - u64 guest_pdptr3; - u64 guest_bndcfgs; - u64 host_ia32_pat; - u64 host_ia32_efer; - u64 host_ia32_perf_global_ctrl; - u64 vmread_bitmap; - u64 vmwrite_bitmap; - u64 vm_function_control; - u64 eptp_list_address; - u64 pml_address; - u64 padding64[3]; /* room for future expansion */ - /* - * To allow migration of L1 (complete with its L2 guests) between - * machines of different natural widths (32 or 64 bit), we cannot have - * unsigned long fields with no explict size. We use u64 (aliased - * natural_width) instead. Luckily, x86 is little-endian. - */ - natural_width cr0_guest_host_mask; - natural_width cr4_guest_host_mask; - natural_width cr0_read_shadow; - natural_width cr4_read_shadow; - natural_width cr3_target_value0; - natural_width cr3_target_value1; - natural_width cr3_target_value2; - natural_width cr3_target_value3; - natural_width exit_qualification; - natural_width guest_linear_address; - natural_width guest_cr0; - natural_width guest_cr3; - natural_width guest_cr4; - natural_width guest_es_base; - natural_width guest_cs_base; - natural_width guest_ss_base; - natural_width guest_ds_base; - natural_width guest_fs_base; - natural_width guest_gs_base; - natural_width guest_ldtr_base; - natural_width guest_tr_base; - natural_width guest_gdtr_base; - natural_width guest_idtr_base; - natural_width guest_dr7; - natural_width guest_rsp; - natural_width guest_rip; - natural_width guest_rflags; - natural_width guest_pending_dbg_exceptions; - natural_width guest_sysenter_esp; - natural_width guest_sysenter_eip; - natural_width host_cr0; - natural_width host_cr3; - natural_width host_cr4; - natural_width host_fs_base; - natural_width host_gs_base; - natural_width host_tr_base; - natural_width host_gdtr_base; - natural_width host_idtr_base; - natural_width host_ia32_sysenter_esp; - natural_width host_ia32_sysenter_eip; - natural_width host_rsp; - natural_width host_rip; - natural_width paddingl[8]; /* room for future expansion */ - u32 pin_based_vm_exec_control; - u32 cpu_based_vm_exec_control; - u32 exception_bitmap; - u32 page_fault_error_code_mask; - u32 page_fault_error_code_match; - u32 cr3_target_count; - u32 vm_exit_controls; - u32 vm_exit_msr_store_count; - u32 vm_exit_msr_load_count; - u32 vm_entry_controls; - u32 vm_entry_msr_load_count; - u32 vm_entry_intr_info_field; - u32 vm_entry_exception_error_code; - u32 vm_entry_instruction_len; - u32 tpr_threshold; - u32 secondary_vm_exec_control; - u32 vm_instruction_error; - u32 vm_exit_reason; - u32 vm_exit_intr_info; - u32 vm_exit_intr_error_code; - u32 idt_vectoring_info_field; - u32 idt_vectoring_error_code; - u32 vm_exit_instruction_len; - u32 vmx_instruction_info; - u32 guest_es_limit; - u32 guest_cs_limit; - u32 guest_ss_limit; - u32 guest_ds_limit; - u32 guest_fs_limit; - u32 guest_gs_limit; - u32 guest_ldtr_limit; - u32 guest_tr_limit; - u32 guest_gdtr_limit; - u32 guest_idtr_limit; - u32 guest_es_ar_bytes; - u32 guest_cs_ar_bytes; - u32 guest_ss_ar_bytes; - u32 guest_ds_ar_bytes; - u32 guest_fs_ar_bytes; - u32 guest_gs_ar_bytes; - u32 guest_ldtr_ar_bytes; - u32 guest_tr_ar_bytes; - u32 guest_interruptibility_info; - u32 guest_activity_state; - u32 guest_sysenter_cs; - u32 host_ia32_sysenter_cs; - u32 vmx_preemption_timer_value; - u32 padding32[7]; /* room for future expansion */ - u16 virtual_processor_id; - u16 posted_intr_nv; - u16 guest_es_selector; - u16 guest_cs_selector; - u16 guest_ss_selector; - u16 guest_ds_selector; - u16 guest_fs_selector; - u16 guest_gs_selector; - u16 guest_ldtr_selector; - u16 guest_tr_selector; - u16 guest_intr_status; - u16 host_es_selector; - u16 host_cs_selector; - u16 host_ss_selector; - u16 host_ds_selector; - u16 host_fs_selector; - u16 host_gs_selector; - u16 host_tr_selector; - u16 guest_pml_index; -}; - -/* - * For save/restore compatibility, the vmcs12 field offsets must not change. - */ -#define CHECK_OFFSET(field, loc) \ - BUILD_BUG_ON_MSG(offsetof(struct vmcs12, field) != (loc), \ - "Offset of " #field " in struct vmcs12 has changed.") - -static inline void vmx_check_vmcs12_offsets(void) { - CHECK_OFFSET(hdr, 0); - CHECK_OFFSET(abort, 4); - CHECK_OFFSET(launch_state, 8); - CHECK_OFFSET(io_bitmap_a, 40); - CHECK_OFFSET(io_bitmap_b, 48); - CHECK_OFFSET(msr_bitmap, 56); - CHECK_OFFSET(vm_exit_msr_store_addr, 64); - CHECK_OFFSET(vm_exit_msr_load_addr, 72); - CHECK_OFFSET(vm_entry_msr_load_addr, 80); - CHECK_OFFSET(tsc_offset, 88); - CHECK_OFFSET(virtual_apic_page_addr, 96); - CHECK_OFFSET(apic_access_addr, 104); - CHECK_OFFSET(posted_intr_desc_addr, 112); - CHECK_OFFSET(ept_pointer, 120); - CHECK_OFFSET(eoi_exit_bitmap0, 128); - CHECK_OFFSET(eoi_exit_bitmap1, 136); - CHECK_OFFSET(eoi_exit_bitmap2, 144); - CHECK_OFFSET(eoi_exit_bitmap3, 152); - CHECK_OFFSET(xss_exit_bitmap, 160); - CHECK_OFFSET(guest_physical_address, 168); - CHECK_OFFSET(vmcs_link_pointer, 176); - CHECK_OFFSET(guest_ia32_debugctl, 184); - CHECK_OFFSET(guest_ia32_pat, 192); - CHECK_OFFSET(guest_ia32_efer, 200); - CHECK_OFFSET(guest_ia32_perf_global_ctrl, 208); - CHECK_OFFSET(guest_pdptr0, 216); - CHECK_OFFSET(guest_pdptr1, 224); - CHECK_OFFSET(guest_pdptr2, 232); - CHECK_OFFSET(guest_pdptr3, 240); - CHECK_OFFSET(guest_bndcfgs, 248); - CHECK_OFFSET(host_ia32_pat, 256); - CHECK_OFFSET(host_ia32_efer, 264); - CHECK_OFFSET(host_ia32_perf_global_ctrl, 272); - CHECK_OFFSET(vmread_bitmap, 280); - CHECK_OFFSET(vmwrite_bitmap, 288); - CHECK_OFFSET(vm_function_control, 296); - CHECK_OFFSET(eptp_list_address, 304); - CHECK_OFFSET(pml_address, 312); - CHECK_OFFSET(cr0_guest_host_mask, 344); - CHECK_OFFSET(cr4_guest_host_mask, 352); - CHECK_OFFSET(cr0_read_shadow, 360); - CHECK_OFFSET(cr4_read_shadow, 368); - CHECK_OFFSET(cr3_target_value0, 376); - CHECK_OFFSET(cr3_target_value1, 384); - CHECK_OFFSET(cr3_target_value2, 392); - CHECK_OFFSET(cr3_target_value3, 400); - CHECK_OFFSET(exit_qualification, 408); - CHECK_OFFSET(guest_linear_address, 416); - CHECK_OFFSET(guest_cr0, 424); - CHECK_OFFSET(guest_cr3, 432); - CHECK_OFFSET(guest_cr4, 440); - CHECK_OFFSET(guest_es_base, 448); - CHECK_OFFSET(guest_cs_base, 456); - CHECK_OFFSET(guest_ss_base, 464); - CHECK_OFFSET(guest_ds_base, 472); - CHECK_OFFSET(guest_fs_base, 480); - CHECK_OFFSET(guest_gs_base, 488); - CHECK_OFFSET(guest_ldtr_base, 496); - CHECK_OFFSET(guest_tr_base, 504); - CHECK_OFFSET(guest_gdtr_base, 512); - CHECK_OFFSET(guest_idtr_base, 520); - CHECK_OFFSET(guest_dr7, 528); - CHECK_OFFSET(guest_rsp, 536); - CHECK_OFFSET(guest_rip, 544); - CHECK_OFFSET(guest_rflags, 552); - CHECK_OFFSET(guest_pending_dbg_exceptions, 560); - CHECK_OFFSET(guest_sysenter_esp, 568); - CHECK_OFFSET(guest_sysenter_eip, 576); - CHECK_OFFSET(host_cr0, 584); - CHECK_OFFSET(host_cr3, 592); - CHECK_OFFSET(host_cr4, 600); - CHECK_OFFSET(host_fs_base, 608); - CHECK_OFFSET(host_gs_base, 616); - CHECK_OFFSET(host_tr_base, 624); - CHECK_OFFSET(host_gdtr_base, 632); - CHECK_OFFSET(host_idtr_base, 640); - CHECK_OFFSET(host_ia32_sysenter_esp, 648); - CHECK_OFFSET(host_ia32_sysenter_eip, 656); - CHECK_OFFSET(host_rsp, 664); - CHECK_OFFSET(host_rip, 672); - CHECK_OFFSET(pin_based_vm_exec_control, 744); - CHECK_OFFSET(cpu_based_vm_exec_control, 748); - CHECK_OFFSET(exception_bitmap, 752); - CHECK_OFFSET(page_fault_error_code_mask, 756); - CHECK_OFFSET(page_fault_error_code_match, 760); - CHECK_OFFSET(cr3_target_count, 764); - CHECK_OFFSET(vm_exit_controls, 768); - CHECK_OFFSET(vm_exit_msr_store_count, 772); - CHECK_OFFSET(vm_exit_msr_load_count, 776); - CHECK_OFFSET(vm_entry_controls, 780); - CHECK_OFFSET(vm_entry_msr_load_count, 784); - CHECK_OFFSET(vm_entry_intr_info_field, 788); - CHECK_OFFSET(vm_entry_exception_error_code, 792); - CHECK_OFFSET(vm_entry_instruction_len, 796); - CHECK_OFFSET(tpr_threshold, 800); - CHECK_OFFSET(secondary_vm_exec_control, 804); - CHECK_OFFSET(vm_instruction_error, 808); - CHECK_OFFSET(vm_exit_reason, 812); - CHECK_OFFSET(vm_exit_intr_info, 816); - CHECK_OFFSET(vm_exit_intr_error_code, 820); - CHECK_OFFSET(idt_vectoring_info_field, 824); - CHECK_OFFSET(idt_vectoring_error_code, 828); - CHECK_OFFSET(vm_exit_instruction_len, 832); - CHECK_OFFSET(vmx_instruction_info, 836); - CHECK_OFFSET(guest_es_limit, 840); - CHECK_OFFSET(guest_cs_limit, 844); - CHECK_OFFSET(guest_ss_limit, 848); - CHECK_OFFSET(guest_ds_limit, 852); - CHECK_OFFSET(guest_fs_limit, 856); - CHECK_OFFSET(guest_gs_limit, 860); - CHECK_OFFSET(guest_ldtr_limit, 864); - CHECK_OFFSET(guest_tr_limit, 868); - CHECK_OFFSET(guest_gdtr_limit, 872); - CHECK_OFFSET(guest_idtr_limit, 876); - CHECK_OFFSET(guest_es_ar_bytes, 880); - CHECK_OFFSET(guest_cs_ar_bytes, 884); - CHECK_OFFSET(guest_ss_ar_bytes, 888); - CHECK_OFFSET(guest_ds_ar_bytes, 892); - CHECK_OFFSET(guest_fs_ar_bytes, 896); - CHECK_OFFSET(guest_gs_ar_bytes, 900); - CHECK_OFFSET(guest_ldtr_ar_bytes, 904); - CHECK_OFFSET(guest_tr_ar_bytes, 908); - CHECK_OFFSET(guest_interruptibility_info, 912); - CHECK_OFFSET(guest_activity_state, 916); - CHECK_OFFSET(guest_sysenter_cs, 920); - CHECK_OFFSET(host_ia32_sysenter_cs, 924); - CHECK_OFFSET(vmx_preemption_timer_value, 928); - CHECK_OFFSET(virtual_processor_id, 960); - CHECK_OFFSET(posted_intr_nv, 962); - CHECK_OFFSET(guest_es_selector, 964); - CHECK_OFFSET(guest_cs_selector, 966); - CHECK_OFFSET(guest_ss_selector, 968); - CHECK_OFFSET(guest_ds_selector, 970); - CHECK_OFFSET(guest_fs_selector, 972); - CHECK_OFFSET(guest_gs_selector, 974); - CHECK_OFFSET(guest_ldtr_selector, 976); - CHECK_OFFSET(guest_tr_selector, 978); - CHECK_OFFSET(guest_intr_status, 980); - CHECK_OFFSET(host_es_selector, 982); - CHECK_OFFSET(host_cs_selector, 984); - CHECK_OFFSET(host_ss_selector, 986); - CHECK_OFFSET(host_ds_selector, 988); - CHECK_OFFSET(host_fs_selector, 990); - CHECK_OFFSET(host_gs_selector, 992); - CHECK_OFFSET(host_tr_selector, 994); - CHECK_OFFSET(guest_pml_index, 996); -} - -/* - * VMCS12_REVISION is an arbitrary id that should be changed if the content or - * layout of struct vmcs12 is changed. MSR_IA32_VMX_BASIC returns this id, and - * VMPTRLD verifies that the VMCS region that L1 is loading contains this id. - * - * IMPORTANT: Changing this value will break save/restore compatibility with - * older kvm releases. - */ -#define VMCS12_REVISION 0x11e57ed0 - -/* - * VMCS12_SIZE is the number of bytes L1 should allocate for the VMXON region - * and any VMCS region. Although only sizeof(struct vmcs12) are used by the - * current implementation, 4K are reserved to avoid future complications. - */ -#define VMCS12_SIZE 0x1000 - -/* - * VMCS12_MAX_FIELD_INDEX is the highest index value used in any - * supported VMCS12 field encoding. - */ -#define VMCS12_MAX_FIELD_INDEX 0x17 - -struct nested_vmx_msrs { - /* - * We only store the "true" versions of the VMX capability MSRs. We - * generate the "non-true" versions by setting the must-be-1 bits - * according to the SDM. - */ - u32 procbased_ctls_low; - u32 procbased_ctls_high; - u32 secondary_ctls_low; - u32 secondary_ctls_high; - u32 pinbased_ctls_low; - u32 pinbased_ctls_high; - u32 exit_ctls_low; - u32 exit_ctls_high; - u32 entry_ctls_low; - u32 entry_ctls_high; - u32 misc_low; - u32 misc_high; - u32 ept_caps; - u32 vpid_caps; - u64 basic; - u64 cr0_fixed0; - u64 cr0_fixed1; - u64 cr4_fixed0; - u64 cr4_fixed1; - u64 vmcs_enum; - u64 vmfunc_controls; -}; - -/* - * The nested_vmx structure is part of vcpu_vmx, and holds information we need - * for correct emulation of VMX (i.e., nested VMX) on this vcpu. - */ -struct nested_vmx { - /* Has the level1 guest done vmxon? */ - bool vmxon; - gpa_t vmxon_ptr; - bool pml_full; - - /* The guest-physical address of the current VMCS L1 keeps for L2 */ - gpa_t current_vmptr; - /* - * Cache of the guest's VMCS, existing outside of guest memory. - * Loaded from guest memory during VMPTRLD. Flushed to guest - * memory during VMCLEAR and VMPTRLD. - */ - struct vmcs12 *cached_vmcs12; - /* - * Cache of the guest's shadow VMCS, existing outside of guest - * memory. Loaded from guest memory during VM entry. Flushed - * to guest memory during VM exit. - */ - struct vmcs12 *cached_shadow_vmcs12; - /* - * Indicates if the shadow vmcs or enlightened vmcs must be updated - * with the data held by struct vmcs12. - */ - bool need_vmcs12_sync; - bool dirty_vmcs12; - - /* - * vmcs02 has been initialized, i.e. state that is constant for - * vmcs02 has been written to the backing VMCS. Initialization - * is delayed until L1 actually attempts to run a nested VM. - */ - bool vmcs02_initialized; - - bool change_vmcs01_virtual_apic_mode; - - /* - * Enlightened VMCS has been enabled. It does not mean that L1 has to - * use it. However, VMX features available to L1 will be limited based - * on what the enlightened VMCS supports. - */ - bool enlightened_vmcs_enabled; - - /* L2 must run next, and mustn't decide to exit to L1. */ - bool nested_run_pending; - - struct loaded_vmcs vmcs02; - - /* - * Guest pages referred to in the vmcs02 with host-physical - * pointers, so we must keep them pinned while L2 runs. - */ - struct page *apic_access_page; - struct page *virtual_apic_page; - struct page *pi_desc_page; - struct pi_desc *pi_desc; - bool pi_pending; - u16 posted_intr_nv; - - struct hrtimer preemption_timer; - bool preemption_timer_expired; - - /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */ - u64 vmcs01_debugctl; - u64 vmcs01_guest_bndcfgs; - - u16 vpid02; - u16 last_vpid; - - struct nested_vmx_msrs msrs; - - /* SMM related state */ - struct { - /* in VMX operation on SMM entry? */ - bool vmxon; - /* in guest mode on SMM entry? */ - bool guest_mode; - } smm; - - gpa_t hv_evmcs_vmptr; - struct page *hv_evmcs_page; - struct hv_enlightened_vmcs *hv_evmcs; -}; - -#define POSTED_INTR_ON 0 -#define POSTED_INTR_SN 1 - -/* Posted-Interrupt Descriptor */ -struct pi_desc { - u32 pir[8]; /* Posted interrupt requested */ - union { - struct { - /* bit 256 - Outstanding Notification */ - u16 on : 1, - /* bit 257 - Suppress Notification */ - sn : 1, - /* bit 271:258 - Reserved */ - rsvd_1 : 14; - /* bit 279:272 - Notification Vector */ - u8 nv; - /* bit 287:280 - Reserved */ - u8 rsvd_2; - /* bit 319:288 - Notification Destination */ - u32 ndst; - }; - u64 control; - }; - u32 rsvd[6]; -} __aligned(64); - -static bool pi_test_and_set_on(struct pi_desc *pi_desc) -{ - return test_and_set_bit(POSTED_INTR_ON, - (unsigned long *)&pi_desc->control); -} - -static bool pi_test_and_clear_on(struct pi_desc *pi_desc) -{ - return test_and_clear_bit(POSTED_INTR_ON, - (unsigned long *)&pi_desc->control); -} - -static int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc) -{ - return test_and_set_bit(vector, (unsigned long *)pi_desc->pir); -} - -static inline void pi_clear_sn(struct pi_desc *pi_desc) -{ - return clear_bit(POSTED_INTR_SN, - (unsigned long *)&pi_desc->control); -} - -static inline void pi_set_sn(struct pi_desc *pi_desc) -{ - return set_bit(POSTED_INTR_SN, - (unsigned long *)&pi_desc->control); -} - -static inline void pi_clear_on(struct pi_desc *pi_desc) -{ - clear_bit(POSTED_INTR_ON, - (unsigned long *)&pi_desc->control); -} - -static inline int pi_test_on(struct pi_desc *pi_desc) -{ - return test_bit(POSTED_INTR_ON, - (unsigned long *)&pi_desc->control); -} - -static inline int pi_test_sn(struct pi_desc *pi_desc) -{ - return test_bit(POSTED_INTR_SN, - (unsigned long *)&pi_desc->control); -} - -struct vmx_msrs { - unsigned int nr; - struct vmx_msr_entry val[NR_AUTOLOAD_MSRS]; -}; - -struct vcpu_vmx { - struct kvm_vcpu vcpu; - unsigned long host_rsp; - u8 fail; - u8 msr_bitmap_mode; - u32 exit_intr_info; - u32 idt_vectoring_info; - ulong rflags; - struct shared_msr_entry *guest_msrs; - int nmsrs; - int save_nmsrs; - bool guest_msrs_dirty; - unsigned long host_idt_base; -#ifdef CONFIG_X86_64 - u64 msr_host_kernel_gs_base; - u64 msr_guest_kernel_gs_base; -#endif - - u64 arch_capabilities; - u64 spec_ctrl; - - u32 vm_entry_controls_shadow; - u32 vm_exit_controls_shadow; - u32 secondary_exec_control; - - /* - * loaded_vmcs points to the VMCS currently used in this vcpu. For a - * non-nested (L1) guest, it always points to vmcs01. For a nested - * guest (L2), it points to a different VMCS. loaded_cpu_state points - * to the VMCS whose state is loaded into the CPU registers that only - * need to be switched when transitioning to/from the kernel; a NULL - * value indicates that host state is loaded. - */ - struct loaded_vmcs vmcs01; - struct loaded_vmcs *loaded_vmcs; - struct loaded_vmcs *loaded_cpu_state; - bool __launched; /* temporary, used in vmx_vcpu_run */ - struct msr_autoload { - struct vmx_msrs guest; - struct vmx_msrs host; - } msr_autoload; - - struct { - int vm86_active; - ulong save_rflags; - struct kvm_segment segs[8]; - } rmode; - struct { - u32 bitmask; /* 4 bits per segment (1 bit per field) */ - struct kvm_save_segment { - u16 selector; - unsigned long base; - u32 limit; - u32 ar; - } seg[8]; - } segment_cache; - int vpid; - bool emulation_required; - - u32 exit_reason; - - /* Posted interrupt descriptor */ - struct pi_desc pi_desc; - - /* Support for a guest hypervisor (nested VMX) */ - struct nested_vmx nested; - - /* Dynamic PLE window. */ - int ple_window; - bool ple_window_dirty; - - bool req_immediate_exit; - - /* Support for PML */ -#define PML_ENTITY_NUM 512 - struct page *pml_pg; - - /* apic deadline value in host tsc */ - u64 hv_deadline_tsc; - - u64 current_tsc_ratio; - - u32 host_pkru; - - unsigned long host_debugctlmsr; - - /* - * Only bits masked by msr_ia32_feature_control_valid_bits can be set in - * msr_ia32_feature_control. FEATURE_CONTROL_LOCKED is always included - * in msr_ia32_feature_control_valid_bits. - */ - u64 msr_ia32_feature_control; - u64 msr_ia32_feature_control_valid_bits; - u64 ept_pointer; -}; - -enum segment_cache_field { - SEG_FIELD_SEL = 0, - SEG_FIELD_BASE = 1, - SEG_FIELD_LIMIT = 2, - SEG_FIELD_AR = 3, - - SEG_FIELD_NR = 4 -}; - -static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm) -{ - return container_of(kvm, struct kvm_vmx, kvm); -} - -static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu) -{ - return container_of(vcpu, struct vcpu_vmx, vcpu); -} - -static struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu) -{ - return &(to_vmx(vcpu)->pi_desc); -} - -#define ROL16(val, n) ((u16)(((u16)(val) << (n)) | ((u16)(val) >> (16 - (n))))) -#define VMCS12_OFFSET(x) offsetof(struct vmcs12, x) -#define FIELD(number, name) [ROL16(number, 6)] = VMCS12_OFFSET(name) -#define FIELD64(number, name) \ - FIELD(number, name), \ - [ROL16(number##_HIGH, 6)] = VMCS12_OFFSET(name) + sizeof(u32) - - -static u16 shadow_read_only_fields[] = { -#define SHADOW_FIELD_RO(x) x, -#include "vmx_shadow_fields.h" -}; -static int max_shadow_read_only_fields = - ARRAY_SIZE(shadow_read_only_fields); - -static u16 shadow_read_write_fields[] = { -#define SHADOW_FIELD_RW(x) x, -#include "vmx_shadow_fields.h" -}; -static int max_shadow_read_write_fields = - ARRAY_SIZE(shadow_read_write_fields); - -static const unsigned short vmcs_field_to_offset_table[] = { - FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id), - FIELD(POSTED_INTR_NV, posted_intr_nv), - FIELD(GUEST_ES_SELECTOR, guest_es_selector), - FIELD(GUEST_CS_SELECTOR, guest_cs_selector), - FIELD(GUEST_SS_SELECTOR, guest_ss_selector), - FIELD(GUEST_DS_SELECTOR, guest_ds_selector), - FIELD(GUEST_FS_SELECTOR, guest_fs_selector), - FIELD(GUEST_GS_SELECTOR, guest_gs_selector), - FIELD(GUEST_LDTR_SELECTOR, guest_ldtr_selector), - FIELD(GUEST_TR_SELECTOR, guest_tr_selector), - FIELD(GUEST_INTR_STATUS, guest_intr_status), - FIELD(GUEST_PML_INDEX, guest_pml_index), - FIELD(HOST_ES_SELECTOR, host_es_selector), - FIELD(HOST_CS_SELECTOR, host_cs_selector), - FIELD(HOST_SS_SELECTOR, host_ss_selector), - FIELD(HOST_DS_SELECTOR, host_ds_selector), - FIELD(HOST_FS_SELECTOR, host_fs_selector), - FIELD(HOST_GS_SELECTOR, host_gs_selector), - FIELD(HOST_TR_SELECTOR, host_tr_selector), - FIELD64(IO_BITMAP_A, io_bitmap_a), - FIELD64(IO_BITMAP_B, io_bitmap_b), - FIELD64(MSR_BITMAP, msr_bitmap), - FIELD64(VM_EXIT_MSR_STORE_ADDR, vm_exit_msr_store_addr), - FIELD64(VM_EXIT_MSR_LOAD_ADDR, vm_exit_msr_load_addr), - FIELD64(VM_ENTRY_MSR_LOAD_ADDR, vm_entry_msr_load_addr), - FIELD64(PML_ADDRESS, pml_address), - FIELD64(TSC_OFFSET, tsc_offset), - FIELD64(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr), - FIELD64(APIC_ACCESS_ADDR, apic_access_addr), - FIELD64(POSTED_INTR_DESC_ADDR, posted_intr_desc_addr), - FIELD64(VM_FUNCTION_CONTROL, vm_function_control), - FIELD64(EPT_POINTER, ept_pointer), - FIELD64(EOI_EXIT_BITMAP0, eoi_exit_bitmap0), - FIELD64(EOI_EXIT_BITMAP1, eoi_exit_bitmap1), - FIELD64(EOI_EXIT_BITMAP2, eoi_exit_bitmap2), - FIELD64(EOI_EXIT_BITMAP3, eoi_exit_bitmap3), - FIELD64(EPTP_LIST_ADDRESS, eptp_list_address), - FIELD64(VMREAD_BITMAP, vmread_bitmap), - FIELD64(VMWRITE_BITMAP, vmwrite_bitmap), - FIELD64(XSS_EXIT_BITMAP, xss_exit_bitmap), - FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address), - FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer), - FIELD64(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl), - FIELD64(GUEST_IA32_PAT, guest_ia32_pat), - FIELD64(GUEST_IA32_EFER, guest_ia32_efer), - FIELD64(GUEST_IA32_PERF_GLOBAL_CTRL, guest_ia32_perf_global_ctrl), - FIELD64(GUEST_PDPTR0, guest_pdptr0), - FIELD64(GUEST_PDPTR1, guest_pdptr1), - FIELD64(GUEST_PDPTR2, guest_pdptr2), - FIELD64(GUEST_PDPTR3, guest_pdptr3), - FIELD64(GUEST_BNDCFGS, guest_bndcfgs), - FIELD64(HOST_IA32_PAT, host_ia32_pat), - FIELD64(HOST_IA32_EFER, host_ia32_efer), - FIELD64(HOST_IA32_PERF_GLOBAL_CTRL, host_ia32_perf_global_ctrl), - FIELD(PIN_BASED_VM_EXEC_CONTROL, pin_based_vm_exec_control), - FIELD(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control), - FIELD(EXCEPTION_BITMAP, exception_bitmap), - FIELD(PAGE_FAULT_ERROR_CODE_MASK, page_fault_error_code_mask), - FIELD(PAGE_FAULT_ERROR_CODE_MATCH, page_fault_error_code_match), - FIELD(CR3_TARGET_COUNT, cr3_target_count), - FIELD(VM_EXIT_CONTROLS, vm_exit_controls), - FIELD(VM_EXIT_MSR_STORE_COUNT, vm_exit_msr_store_count), - FIELD(VM_EXIT_MSR_LOAD_COUNT, vm_exit_msr_load_count), - FIELD(VM_ENTRY_CONTROLS, vm_entry_controls), - FIELD(VM_ENTRY_MSR_LOAD_COUNT, vm_entry_msr_load_count), - FIELD(VM_ENTRY_INTR_INFO_FIELD, vm_entry_intr_info_field), - FIELD(VM_ENTRY_EXCEPTION_ERROR_CODE, vm_entry_exception_error_code), - FIELD(VM_ENTRY_INSTRUCTION_LEN, vm_entry_instruction_len), - FIELD(TPR_THRESHOLD, tpr_threshold), - FIELD(SECONDARY_VM_EXEC_CONTROL, secondary_vm_exec_control), - FIELD(VM_INSTRUCTION_ERROR, vm_instruction_error), - FIELD(VM_EXIT_REASON, vm_exit_reason), - FIELD(VM_EXIT_INTR_INFO, vm_exit_intr_info), - FIELD(VM_EXIT_INTR_ERROR_CODE, vm_exit_intr_error_code), - FIELD(IDT_VECTORING_INFO_FIELD, idt_vectoring_info_field), - FIELD(IDT_VECTORING_ERROR_CODE, idt_vectoring_error_code), - FIELD(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len), - FIELD(VMX_INSTRUCTION_INFO, vmx_instruction_info), - FIELD(GUEST_ES_LIMIT, guest_es_limit), - FIELD(GUEST_CS_LIMIT, guest_cs_limit), - FIELD(GUEST_SS_LIMIT, guest_ss_limit), - FIELD(GUEST_DS_LIMIT, guest_ds_limit), - FIELD(GUEST_FS_LIMIT, guest_fs_limit), - FIELD(GUEST_GS_LIMIT, guest_gs_limit), - FIELD(GUEST_LDTR_LIMIT, guest_ldtr_limit), - FIELD(GUEST_TR_LIMIT, guest_tr_limit), - FIELD(GUEST_GDTR_LIMIT, guest_gdtr_limit), - FIELD(GUEST_IDTR_LIMIT, guest_idtr_limit), - FIELD(GUEST_ES_AR_BYTES, guest_es_ar_bytes), - FIELD(GUEST_CS_AR_BYTES, guest_cs_ar_bytes), - FIELD(GUEST_SS_AR_BYTES, guest_ss_ar_bytes), - FIELD(GUEST_DS_AR_BYTES, guest_ds_ar_bytes), - FIELD(GUEST_FS_AR_BYTES, guest_fs_ar_bytes), - FIELD(GUEST_GS_AR_BYTES, guest_gs_ar_bytes), - FIELD(GUEST_LDTR_AR_BYTES, guest_ldtr_ar_bytes), - FIELD(GUEST_TR_AR_BYTES, guest_tr_ar_bytes), - FIELD(GUEST_INTERRUPTIBILITY_INFO, guest_interruptibility_info), - FIELD(GUEST_ACTIVITY_STATE, guest_activity_state), - FIELD(GUEST_SYSENTER_CS, guest_sysenter_cs), - FIELD(HOST_IA32_SYSENTER_CS, host_ia32_sysenter_cs), - FIELD(VMX_PREEMPTION_TIMER_VALUE, vmx_preemption_timer_value), - FIELD(CR0_GUEST_HOST_MASK, cr0_guest_host_mask), - FIELD(CR4_GUEST_HOST_MASK, cr4_guest_host_mask), - FIELD(CR0_READ_SHADOW, cr0_read_shadow), - FIELD(CR4_READ_SHADOW, cr4_read_shadow), - FIELD(CR3_TARGET_VALUE0, cr3_target_value0), - FIELD(CR3_TARGET_VALUE1, cr3_target_value1), - FIELD(CR3_TARGET_VALUE2, cr3_target_value2), - FIELD(CR3_TARGET_VALUE3, cr3_target_value3), - FIELD(EXIT_QUALIFICATION, exit_qualification), - FIELD(GUEST_LINEAR_ADDRESS, guest_linear_address), - FIELD(GUEST_CR0, guest_cr0), - FIELD(GUEST_CR3, guest_cr3), - FIELD(GUEST_CR4, guest_cr4), - FIELD(GUEST_ES_BASE, guest_es_base), - FIELD(GUEST_CS_BASE, guest_cs_base), - FIELD(GUEST_SS_BASE, guest_ss_base), - FIELD(GUEST_DS_BASE, guest_ds_base), - FIELD(GUEST_FS_BASE, guest_fs_base), - FIELD(GUEST_GS_BASE, guest_gs_base), - FIELD(GUEST_LDTR_BASE, guest_ldtr_base), - FIELD(GUEST_TR_BASE, guest_tr_base), - FIELD(GUEST_GDTR_BASE, guest_gdtr_base), - FIELD(GUEST_IDTR_BASE, guest_idtr_base), - FIELD(GUEST_DR7, guest_dr7), - FIELD(GUEST_RSP, guest_rsp), - FIELD(GUEST_RIP, guest_rip), - FIELD(GUEST_RFLAGS, guest_rflags), - FIELD(GUEST_PENDING_DBG_EXCEPTIONS, guest_pending_dbg_exceptions), - FIELD(GUEST_SYSENTER_ESP, guest_sysenter_esp), - FIELD(GUEST_SYSENTER_EIP, guest_sysenter_eip), - FIELD(HOST_CR0, host_cr0), - FIELD(HOST_CR3, host_cr3), - FIELD(HOST_CR4, host_cr4), - FIELD(HOST_FS_BASE, host_fs_base), - FIELD(HOST_GS_BASE, host_gs_base), - FIELD(HOST_TR_BASE, host_tr_base), - FIELD(HOST_GDTR_BASE, host_gdtr_base), - FIELD(HOST_IDTR_BASE, host_idtr_base), - FIELD(HOST_IA32_SYSENTER_ESP, host_ia32_sysenter_esp), - FIELD(HOST_IA32_SYSENTER_EIP, host_ia32_sysenter_eip), - FIELD(HOST_RSP, host_rsp), - FIELD(HOST_RIP, host_rip), -}; - -static inline short vmcs_field_to_offset(unsigned long field) -{ - const size_t size = ARRAY_SIZE(vmcs_field_to_offset_table); - unsigned short offset; - unsigned index; - - if (field >> 15) - return -ENOENT; - - index = ROL16(field, 6); - if (index >= size) - return -ENOENT; - - index = array_index_nospec(index, size); - offset = vmcs_field_to_offset_table[index]; - if (offset == 0) - return -ENOENT; - return offset; -} - -static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu) -{ - return to_vmx(vcpu)->nested.cached_vmcs12; -} - -static inline struct vmcs12 *get_shadow_vmcs12(struct kvm_vcpu *vcpu) -{ - return to_vmx(vcpu)->nested.cached_shadow_vmcs12; -} - -static bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu); -static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu); -static u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa); -static bool vmx_xsaves_supported(void); -static void vmx_set_segment(struct kvm_vcpu *vcpu, - struct kvm_segment *var, int seg); -static void vmx_get_segment(struct kvm_vcpu *vcpu, - struct kvm_segment *var, int seg); -static bool guest_state_valid(struct kvm_vcpu *vcpu); -static u32 vmx_segment_access_rights(struct kvm_segment *var); -static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx); -static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu); -static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked); -static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12, - u16 error_code); -static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu); -static __always_inline void vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, - u32 msr, int type); - -static DEFINE_PER_CPU(struct vmcs *, vmxarea); -static DEFINE_PER_CPU(struct vmcs *, current_vmcs); -/* - * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed - * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it. - */ -static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu); - -/* - * We maintian a per-CPU linked-list of vCPU, so in wakeup_handler() we - * can find which vCPU should be waken up. - */ -static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu); -static DEFINE_PER_CPU(spinlock_t, blocked_vcpu_on_cpu_lock); - -enum { - VMX_VMREAD_BITMAP, - VMX_VMWRITE_BITMAP, - VMX_BITMAP_NR -}; - -static unsigned long *vmx_bitmap[VMX_BITMAP_NR]; - -#define vmx_vmread_bitmap (vmx_bitmap[VMX_VMREAD_BITMAP]) -#define vmx_vmwrite_bitmap (vmx_bitmap[VMX_VMWRITE_BITMAP]) - -static bool cpu_has_load_ia32_efer; -static bool cpu_has_load_perf_global_ctrl; - -static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS); -static DEFINE_SPINLOCK(vmx_vpid_lock); - -static struct vmcs_config { - int size; - int order; - u32 basic_cap; - u32 revision_id; - u32 pin_based_exec_ctrl; - u32 cpu_based_exec_ctrl; - u32 cpu_based_2nd_exec_ctrl; - u32 vmexit_ctrl; - u32 vmentry_ctrl; - struct nested_vmx_msrs nested; -} vmcs_config; - -static struct vmx_capability { - u32 ept; - u32 vpid; -} vmx_capability; - -#define VMX_SEGMENT_FIELD(seg) \ - [VCPU_SREG_##seg] = { \ - .selector = GUEST_##seg##_SELECTOR, \ - .base = GUEST_##seg##_BASE, \ - .limit = GUEST_##seg##_LIMIT, \ - .ar_bytes = GUEST_##seg##_AR_BYTES, \ - } - -static const struct kvm_vmx_segment_field { - unsigned selector; - unsigned base; - unsigned limit; - unsigned ar_bytes; -} kvm_vmx_segment_fields[] = { - VMX_SEGMENT_FIELD(CS), - VMX_SEGMENT_FIELD(DS), - VMX_SEGMENT_FIELD(ES), - VMX_SEGMENT_FIELD(FS), - VMX_SEGMENT_FIELD(GS), - VMX_SEGMENT_FIELD(SS), - VMX_SEGMENT_FIELD(TR), - VMX_SEGMENT_FIELD(LDTR), -}; - -static u64 host_efer; - -static void ept_save_pdptrs(struct kvm_vcpu *vcpu); - -/* - * Keep MSR_STAR at the end, as setup_msrs() will try to optimize it - * away by decrementing the array size. - */ -static const u32 vmx_msr_index[] = { -#ifdef CONFIG_X86_64 - MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, -#endif - MSR_EFER, MSR_TSC_AUX, MSR_STAR, -}; - -DEFINE_STATIC_KEY_FALSE(enable_evmcs); - -#define current_evmcs ((struct hv_enlightened_vmcs *)this_cpu_read(current_vmcs)) - -#define KVM_EVMCS_VERSION 1 - -/* - * Enlightened VMCSv1 doesn't support these: - * - * POSTED_INTR_NV = 0x00000002, - * GUEST_INTR_STATUS = 0x00000810, - * APIC_ACCESS_ADDR = 0x00002014, - * POSTED_INTR_DESC_ADDR = 0x00002016, - * EOI_EXIT_BITMAP0 = 0x0000201c, - * EOI_EXIT_BITMAP1 = 0x0000201e, - * EOI_EXIT_BITMAP2 = 0x00002020, - * EOI_EXIT_BITMAP3 = 0x00002022, - * GUEST_PML_INDEX = 0x00000812, - * PML_ADDRESS = 0x0000200e, - * VM_FUNCTION_CONTROL = 0x00002018, - * EPTP_LIST_ADDRESS = 0x00002024, - * VMREAD_BITMAP = 0x00002026, - * VMWRITE_BITMAP = 0x00002028, - * - * TSC_MULTIPLIER = 0x00002032, - * PLE_GAP = 0x00004020, - * PLE_WINDOW = 0x00004022, - * VMX_PREEMPTION_TIMER_VALUE = 0x0000482E, - * GUEST_IA32_PERF_GLOBAL_CTRL = 0x00002808, - * HOST_IA32_PERF_GLOBAL_CTRL = 0x00002c04, - * - * Currently unsupported in KVM: - * GUEST_IA32_RTIT_CTL = 0x00002814, - */ -#define EVMCS1_UNSUPPORTED_PINCTRL (PIN_BASED_POSTED_INTR | \ - PIN_BASED_VMX_PREEMPTION_TIMER) -#define EVMCS1_UNSUPPORTED_2NDEXEC \ - (SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | \ - SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | \ - SECONDARY_EXEC_APIC_REGISTER_VIRT | \ - SECONDARY_EXEC_ENABLE_PML | \ - SECONDARY_EXEC_ENABLE_VMFUNC | \ - SECONDARY_EXEC_SHADOW_VMCS | \ - SECONDARY_EXEC_TSC_SCALING | \ - SECONDARY_EXEC_PAUSE_LOOP_EXITING) -#define EVMCS1_UNSUPPORTED_VMEXIT_CTRL (VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) -#define EVMCS1_UNSUPPORTED_VMENTRY_CTRL (VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) -#define EVMCS1_UNSUPPORTED_VMFUNC (VMX_VMFUNC_EPTP_SWITCHING) - -#if IS_ENABLED(CONFIG_HYPERV) -static bool __read_mostly enlightened_vmcs = true; -module_param(enlightened_vmcs, bool, 0444); - -static inline void evmcs_write64(unsigned long field, u64 value) -{ - u16 clean_field; - int offset = get_evmcs_offset(field, &clean_field); - - if (offset < 0) - return; - - *(u64 *)((char *)current_evmcs + offset) = value; - - current_evmcs->hv_clean_fields &= ~clean_field; -} - -static inline void evmcs_write32(unsigned long field, u32 value) -{ - u16 clean_field; - int offset = get_evmcs_offset(field, &clean_field); - - if (offset < 0) - return; - - *(u32 *)((char *)current_evmcs + offset) = value; - current_evmcs->hv_clean_fields &= ~clean_field; -} - -static inline void evmcs_write16(unsigned long field, u16 value) -{ - u16 clean_field; - int offset = get_evmcs_offset(field, &clean_field); - - if (offset < 0) - return; - - *(u16 *)((char *)current_evmcs + offset) = value; - current_evmcs->hv_clean_fields &= ~clean_field; -} - -static inline u64 evmcs_read64(unsigned long field) -{ - int offset = get_evmcs_offset(field, NULL); - - if (offset < 0) - return 0; - - return *(u64 *)((char *)current_evmcs + offset); -} - -static inline u32 evmcs_read32(unsigned long field) -{ - int offset = get_evmcs_offset(field, NULL); - - if (offset < 0) - return 0; - - return *(u32 *)((char *)current_evmcs + offset); -} - -static inline u16 evmcs_read16(unsigned long field) -{ - int offset = get_evmcs_offset(field, NULL); - - if (offset < 0) - return 0; - - return *(u16 *)((char *)current_evmcs + offset); -} - -static inline void evmcs_touch_msr_bitmap(void) -{ - if (unlikely(!current_evmcs)) - return; - - if (current_evmcs->hv_enlightenments_control.msr_bitmap) - current_evmcs->hv_clean_fields &= - ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP; -} - -static void evmcs_load(u64 phys_addr) -{ - struct hv_vp_assist_page *vp_ap = - hv_get_vp_assist_page(smp_processor_id()); - - vp_ap->current_nested_vmcs = phys_addr; - vp_ap->enlighten_vmentry = 1; -} - -static void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf) -{ - vmcs_conf->pin_based_exec_ctrl &= ~EVMCS1_UNSUPPORTED_PINCTRL; - vmcs_conf->cpu_based_2nd_exec_ctrl &= ~EVMCS1_UNSUPPORTED_2NDEXEC; - - vmcs_conf->vmexit_ctrl &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL; - vmcs_conf->vmentry_ctrl &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL; - -} - -/* check_ept_pointer() should be under protection of ept_pointer_lock. */ -static void check_ept_pointer_match(struct kvm *kvm) -{ - struct kvm_vcpu *vcpu; - u64 tmp_eptp = INVALID_PAGE; - int i; - - kvm_for_each_vcpu(i, vcpu, kvm) { - if (!VALID_PAGE(tmp_eptp)) { - tmp_eptp = to_vmx(vcpu)->ept_pointer; - } else if (tmp_eptp != to_vmx(vcpu)->ept_pointer) { - to_kvm_vmx(kvm)->ept_pointers_match - = EPT_POINTERS_MISMATCH; - return; - } - } - - to_kvm_vmx(kvm)->ept_pointers_match = EPT_POINTERS_MATCH; -} - -static int vmx_hv_remote_flush_tlb(struct kvm *kvm) -{ - struct kvm_vcpu *vcpu; - int ret = -ENOTSUPP, i; - - spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock); - - if (to_kvm_vmx(kvm)->ept_pointers_match == EPT_POINTERS_CHECK) - check_ept_pointer_match(kvm); - - /* - * FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE hypercall needs the address of the - * base of EPT PML4 table, strip off EPT configuration information. - */ - if (to_kvm_vmx(kvm)->ept_pointers_match != EPT_POINTERS_MATCH) { - kvm_for_each_vcpu(i, vcpu, kvm) - ret |= hyperv_flush_guest_mapping( - to_vmx(kvm_get_vcpu(kvm, i))->ept_pointer & PAGE_MASK); - } else { - ret = hyperv_flush_guest_mapping( - to_vmx(kvm_get_vcpu(kvm, 0))->ept_pointer & PAGE_MASK); - } - - spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock); - return ret; -} -#else /* !IS_ENABLED(CONFIG_HYPERV) */ -static inline void evmcs_write64(unsigned long field, u64 value) {} -static inline void evmcs_write32(unsigned long field, u32 value) {} -static inline void evmcs_write16(unsigned long field, u16 value) {} -static inline u64 evmcs_read64(unsigned long field) { return 0; } -static inline u32 evmcs_read32(unsigned long field) { return 0; } -static inline u16 evmcs_read16(unsigned long field) { return 0; } -static inline void evmcs_load(u64 phys_addr) {} -static inline void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf) {} -static inline void evmcs_touch_msr_bitmap(void) {} -#endif /* IS_ENABLED(CONFIG_HYPERV) */ - -static int nested_enable_evmcs(struct kvm_vcpu *vcpu, - uint16_t *vmcs_version) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - - /* - * vmcs_version represents the range of supported Enlightened VMCS - * versions: lower 8 bits is the minimal version, higher 8 bits is the - * maximum supported version. KVM supports versions from 1 to - * KVM_EVMCS_VERSION. - */ - if (vmcs_version) - *vmcs_version = (KVM_EVMCS_VERSION << 8) | 1; - - /* We don't support disabling the feature for simplicity. */ - if (vmx->nested.enlightened_vmcs_enabled) - return 0; - - vmx->nested.enlightened_vmcs_enabled = true; - - vmx->nested.msrs.pinbased_ctls_high &= ~EVMCS1_UNSUPPORTED_PINCTRL; - vmx->nested.msrs.entry_ctls_high &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL; - vmx->nested.msrs.exit_ctls_high &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL; - vmx->nested.msrs.secondary_ctls_high &= ~EVMCS1_UNSUPPORTED_2NDEXEC; - vmx->nested.msrs.vmfunc_controls &= ~EVMCS1_UNSUPPORTED_VMFUNC; - - return 0; -} - -static inline bool is_exception_n(u32 intr_info, u8 vector) -{ - return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | - INTR_INFO_VALID_MASK)) == - (INTR_TYPE_HARD_EXCEPTION | vector | INTR_INFO_VALID_MASK); -} - -static inline bool is_debug(u32 intr_info) -{ - return is_exception_n(intr_info, DB_VECTOR); -} - -static inline bool is_breakpoint(u32 intr_info) -{ - return is_exception_n(intr_info, BP_VECTOR); -} - -static inline bool is_page_fault(u32 intr_info) -{ - return is_exception_n(intr_info, PF_VECTOR); -} - -static inline bool is_invalid_opcode(u32 intr_info) -{ - return is_exception_n(intr_info, UD_VECTOR); -} - -static inline bool is_gp_fault(u32 intr_info) -{ - return is_exception_n(intr_info, GP_VECTOR); -} - -static inline bool is_machine_check(u32 intr_info) -{ - return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | - INTR_INFO_VALID_MASK)) == - (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK); -} - -/* Undocumented: icebp/int1 */ -static inline bool is_icebp(u32 intr_info) -{ - return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) - == (INTR_TYPE_PRIV_SW_EXCEPTION | INTR_INFO_VALID_MASK); -} - -static inline bool cpu_has_vmx_msr_bitmap(void) -{ - return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS; -} - -static inline bool cpu_has_vmx_tpr_shadow(void) -{ - return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW; -} - -static inline bool cpu_need_tpr_shadow(struct kvm_vcpu *vcpu) -{ - return cpu_has_vmx_tpr_shadow() && lapic_in_kernel(vcpu); -} - -static inline bool cpu_has_secondary_exec_ctrls(void) -{ - return vmcs_config.cpu_based_exec_ctrl & - CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; -} - -static inline bool cpu_has_vmx_virtualize_apic_accesses(void) -{ - return vmcs_config.cpu_based_2nd_exec_ctrl & - SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; -} - -static inline bool cpu_has_vmx_virtualize_x2apic_mode(void) -{ - return vmcs_config.cpu_based_2nd_exec_ctrl & - SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; -} - -static inline bool cpu_has_vmx_apic_register_virt(void) -{ - return vmcs_config.cpu_based_2nd_exec_ctrl & - SECONDARY_EXEC_APIC_REGISTER_VIRT; -} - -static inline bool cpu_has_vmx_virtual_intr_delivery(void) -{ - return vmcs_config.cpu_based_2nd_exec_ctrl & - SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY; -} - -static inline bool cpu_has_vmx_encls_vmexit(void) -{ - return vmcs_config.cpu_based_2nd_exec_ctrl & - SECONDARY_EXEC_ENCLS_EXITING; -} - -/* - * Comment's format: document - errata name - stepping - processor name. - * Refer from - * https://www.virtualbox.org/svn/vbox/trunk/src/VBox/VMM/VMMR0/HMR0.cpp - */ -static u32 vmx_preemption_cpu_tfms[] = { -/* 323344.pdf - BA86 - D0 - Xeon 7500 Series */ -0x000206E6, -/* 323056.pdf - AAX65 - C2 - Xeon L3406 */ -/* 322814.pdf - AAT59 - C2 - i7-600, i5-500, i5-400 and i3-300 Mobile */ -/* 322911.pdf - AAU65 - C2 - i5-600, i3-500 Desktop and Pentium G6950 */ -0x00020652, -/* 322911.pdf - AAU65 - K0 - i5-600, i3-500 Desktop and Pentium G6950 */ -0x00020655, -/* 322373.pdf - AAO95 - B1 - Xeon 3400 Series */ -/* 322166.pdf - AAN92 - B1 - i7-800 and i5-700 Desktop */ -/* - * 320767.pdf - AAP86 - B1 - - * i7-900 Mobile Extreme, i7-800 and i7-700 Mobile - */ -0x000106E5, -/* 321333.pdf - AAM126 - C0 - Xeon 3500 */ -0x000106A0, -/* 321333.pdf - AAM126 - C1 - Xeon 3500 */ -0x000106A1, -/* 320836.pdf - AAJ124 - C0 - i7-900 Desktop Extreme and i7-900 Desktop */ -0x000106A4, - /* 321333.pdf - AAM126 - D0 - Xeon 3500 */ - /* 321324.pdf - AAK139 - D0 - Xeon 5500 */ - /* 320836.pdf - AAJ124 - D0 - i7-900 Extreme and i7-900 Desktop */ -0x000106A5, -}; - -static inline bool cpu_has_broken_vmx_preemption_timer(void) -{ - u32 eax = cpuid_eax(0x00000001), i; - - /* Clear the reserved bits */ - eax &= ~(0x3U << 14 | 0xfU << 28); - for (i = 0; i < ARRAY_SIZE(vmx_preemption_cpu_tfms); i++) - if (eax == vmx_preemption_cpu_tfms[i]) - return true; - - return false; -} - -static inline bool cpu_has_vmx_preemption_timer(void) -{ - return vmcs_config.pin_based_exec_ctrl & - PIN_BASED_VMX_PREEMPTION_TIMER; -} - -static inline bool cpu_has_vmx_posted_intr(void) -{ - return IS_ENABLED(CONFIG_X86_LOCAL_APIC) && - vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR; -} - -static inline bool cpu_has_vmx_apicv(void) -{ - return cpu_has_vmx_apic_register_virt() && - cpu_has_vmx_virtual_intr_delivery() && - cpu_has_vmx_posted_intr(); -} - -static inline bool cpu_has_vmx_flexpriority(void) -{ - return cpu_has_vmx_tpr_shadow() && - cpu_has_vmx_virtualize_apic_accesses(); -} - -static inline bool cpu_has_vmx_ept_execute_only(void) -{ - return vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT; -} - -static inline bool cpu_has_vmx_ept_2m_page(void) -{ - return vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT; -} - -static inline bool cpu_has_vmx_ept_1g_page(void) -{ - return vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT; -} - -static inline bool cpu_has_vmx_ept_4levels(void) -{ - return vmx_capability.ept & VMX_EPT_PAGE_WALK_4_BIT; -} - -static inline bool cpu_has_vmx_ept_mt_wb(void) -{ - return vmx_capability.ept & VMX_EPTP_WB_BIT; -} - -static inline bool cpu_has_vmx_ept_5levels(void) -{ - return vmx_capability.ept & VMX_EPT_PAGE_WALK_5_BIT; -} - -static inline bool cpu_has_vmx_ept_ad_bits(void) -{ - return vmx_capability.ept & VMX_EPT_AD_BIT; -} - -static inline bool cpu_has_vmx_invept_context(void) -{ - return vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT; -} - -static inline bool cpu_has_vmx_invept_global(void) -{ - return vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT; -} - -static inline bool cpu_has_vmx_invvpid_individual_addr(void) -{ - return vmx_capability.vpid & VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT; -} - -static inline bool cpu_has_vmx_invvpid_single(void) -{ - return vmx_capability.vpid & VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT; -} - -static inline bool cpu_has_vmx_invvpid_global(void) -{ - return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT; -} - -static inline bool cpu_has_vmx_invvpid(void) -{ - return vmx_capability.vpid & VMX_VPID_INVVPID_BIT; -} - -static inline bool cpu_has_vmx_ept(void) -{ - return vmcs_config.cpu_based_2nd_exec_ctrl & - SECONDARY_EXEC_ENABLE_EPT; -} - -static inline bool cpu_has_vmx_unrestricted_guest(void) -{ - return vmcs_config.cpu_based_2nd_exec_ctrl & - SECONDARY_EXEC_UNRESTRICTED_GUEST; -} - -static inline bool cpu_has_vmx_ple(void) -{ - return vmcs_config.cpu_based_2nd_exec_ctrl & - SECONDARY_EXEC_PAUSE_LOOP_EXITING; -} - -static inline bool cpu_has_vmx_basic_inout(void) -{ - return (((u64)vmcs_config.basic_cap << 32) & VMX_BASIC_INOUT); -} - -static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu) -{ - return flexpriority_enabled && lapic_in_kernel(vcpu); -} - -static inline bool cpu_has_vmx_vpid(void) -{ - return vmcs_config.cpu_based_2nd_exec_ctrl & - SECONDARY_EXEC_ENABLE_VPID; -} - -static inline bool cpu_has_vmx_rdtscp(void) -{ - return vmcs_config.cpu_based_2nd_exec_ctrl & - SECONDARY_EXEC_RDTSCP; -} - -static inline bool cpu_has_vmx_invpcid(void) -{ - return vmcs_config.cpu_based_2nd_exec_ctrl & - SECONDARY_EXEC_ENABLE_INVPCID; -} - -static inline bool cpu_has_virtual_nmis(void) -{ - return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS; -} - -static inline bool cpu_has_vmx_wbinvd_exit(void) -{ - return vmcs_config.cpu_based_2nd_exec_ctrl & - SECONDARY_EXEC_WBINVD_EXITING; -} - -static inline bool cpu_has_vmx_shadow_vmcs(void) -{ - u64 vmx_msr; - rdmsrl(MSR_IA32_VMX_MISC, vmx_msr); - /* check if the cpu supports writing r/o exit information fields */ - if (!(vmx_msr & MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS)) - return false; - - return vmcs_config.cpu_based_2nd_exec_ctrl & - SECONDARY_EXEC_SHADOW_VMCS; -} - -static inline bool cpu_has_vmx_pml(void) -{ - return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_ENABLE_PML; -} - -static inline bool cpu_has_vmx_tsc_scaling(void) -{ - return vmcs_config.cpu_based_2nd_exec_ctrl & - SECONDARY_EXEC_TSC_SCALING; -} - -static inline bool cpu_has_vmx_vmfunc(void) -{ - return vmcs_config.cpu_based_2nd_exec_ctrl & - SECONDARY_EXEC_ENABLE_VMFUNC; -} - -static bool vmx_umip_emulated(void) -{ - return vmcs_config.cpu_based_2nd_exec_ctrl & - SECONDARY_EXEC_DESC; -} - -static inline bool report_flexpriority(void) -{ - return flexpriority_enabled; -} - -static inline unsigned nested_cpu_vmx_misc_cr3_count(struct kvm_vcpu *vcpu) -{ - return vmx_misc_cr3_count(to_vmx(vcpu)->nested.msrs.misc_low); -} - -/* - * Do the virtual VMX capability MSRs specify that L1 can use VMWRITE - * to modify any valid field of the VMCS, or are the VM-exit - * information fields read-only? - */ -static inline bool nested_cpu_has_vmwrite_any_field(struct kvm_vcpu *vcpu) -{ - return to_vmx(vcpu)->nested.msrs.misc_low & - MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS; -} - -static inline bool nested_cpu_has_zero_length_injection(struct kvm_vcpu *vcpu) -{ - return to_vmx(vcpu)->nested.msrs.misc_low & VMX_MISC_ZERO_LEN_INS; -} - -static inline bool nested_cpu_supports_monitor_trap_flag(struct kvm_vcpu *vcpu) -{ - return to_vmx(vcpu)->nested.msrs.procbased_ctls_high & - CPU_BASED_MONITOR_TRAP_FLAG; -} - -static inline bool nested_cpu_has_vmx_shadow_vmcs(struct kvm_vcpu *vcpu) -{ - return to_vmx(vcpu)->nested.msrs.secondary_ctls_high & - SECONDARY_EXEC_SHADOW_VMCS; -} - -static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit) -{ - return vmcs12->cpu_based_vm_exec_control & bit; -} - -static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit) -{ - return (vmcs12->cpu_based_vm_exec_control & - CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) && - (vmcs12->secondary_vm_exec_control & bit); -} - -static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12) -{ - return vmcs12->pin_based_vm_exec_control & - PIN_BASED_VMX_PREEMPTION_TIMER; -} - -static inline bool nested_cpu_has_nmi_exiting(struct vmcs12 *vmcs12) -{ - return vmcs12->pin_based_vm_exec_control & PIN_BASED_NMI_EXITING; -} - -static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12) -{ - return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS; -} - -static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12) -{ - return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT); -} - -static inline bool nested_cpu_has_xsaves(struct vmcs12 *vmcs12) -{ - return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES); -} - -static inline bool nested_cpu_has_pml(struct vmcs12 *vmcs12) -{ - return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_PML); -} - -static inline bool nested_cpu_has_virt_x2apic_mode(struct vmcs12 *vmcs12) -{ - return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE); -} - -static inline bool nested_cpu_has_vpid(struct vmcs12 *vmcs12) -{ - return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VPID); -} - -static inline bool nested_cpu_has_apic_reg_virt(struct vmcs12 *vmcs12) -{ - return nested_cpu_has2(vmcs12, SECONDARY_EXEC_APIC_REGISTER_VIRT); -} - -static inline bool nested_cpu_has_vid(struct vmcs12 *vmcs12) -{ - return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); -} - -static inline bool nested_cpu_has_posted_intr(struct vmcs12 *vmcs12) -{ - return vmcs12->pin_based_vm_exec_control & PIN_BASED_POSTED_INTR; -} - -static inline bool nested_cpu_has_vmfunc(struct vmcs12 *vmcs12) -{ - return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VMFUNC); -} - -static inline bool nested_cpu_has_eptp_switching(struct vmcs12 *vmcs12) -{ - return nested_cpu_has_vmfunc(vmcs12) && - (vmcs12->vm_function_control & - VMX_VMFUNC_EPTP_SWITCHING); -} - -static inline bool nested_cpu_has_shadow_vmcs(struct vmcs12 *vmcs12) -{ - return nested_cpu_has2(vmcs12, SECONDARY_EXEC_SHADOW_VMCS); -} - -static inline bool is_nmi(u32 intr_info) -{ - return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) - == (INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK); -} - -static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, - u32 exit_intr_info, - unsigned long exit_qualification); - -static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr) -{ - int i; - - for (i = 0; i < vmx->nmsrs; ++i) - if (vmx_msr_index[vmx->guest_msrs[i].index] == msr) - return i; - return -1; -} - -static inline void __invvpid(unsigned long ext, u16 vpid, gva_t gva) -{ - struct { - u64 vpid : 16; - u64 rsvd : 48; - u64 gva; - } operand = { vpid, 0, gva }; - bool error; - - asm volatile (__ex("invvpid %2, %1") CC_SET(na) - : CC_OUT(na) (error) : "r"(ext), "m"(operand)); - BUG_ON(error); -} - -static inline void __invept(unsigned long ext, u64 eptp, gpa_t gpa) -{ - struct { - u64 eptp, gpa; - } operand = {eptp, gpa}; - bool error; - - asm volatile (__ex("invept %2, %1") CC_SET(na) - : CC_OUT(na) (error) : "r"(ext), "m"(operand)); - BUG_ON(error); -} - -static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr) -{ - int i; - - i = __find_msr_index(vmx, msr); - if (i >= 0) - return &vmx->guest_msrs[i]; - return NULL; -} - -static void vmcs_clear(struct vmcs *vmcs) -{ - u64 phys_addr = __pa(vmcs); - bool error; - - asm volatile (__ex("vmclear %1") CC_SET(na) - : CC_OUT(na) (error) : "m"(phys_addr)); - if (unlikely(error)) - printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n", - vmcs, phys_addr); -} - -static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs) -{ - vmcs_clear(loaded_vmcs->vmcs); - if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched) - vmcs_clear(loaded_vmcs->shadow_vmcs); - loaded_vmcs->cpu = -1; - loaded_vmcs->launched = 0; -} - -static void vmcs_load(struct vmcs *vmcs) -{ - u64 phys_addr = __pa(vmcs); - bool error; - - if (static_branch_unlikely(&enable_evmcs)) - return evmcs_load(phys_addr); - - asm volatile (__ex("vmptrld %1") CC_SET(na) - : CC_OUT(na) (error) : "m"(phys_addr)); - if (unlikely(error)) - printk(KERN_ERR "kvm: vmptrld %p/%llx failed\n", - vmcs, phys_addr); -} - -#ifdef CONFIG_KEXEC_CORE -/* - * This bitmap is used to indicate whether the vmclear - * operation is enabled on all cpus. All disabled by - * default. - */ -static cpumask_t crash_vmclear_enabled_bitmap = CPU_MASK_NONE; - -static inline void crash_enable_local_vmclear(int cpu) -{ - cpumask_set_cpu(cpu, &crash_vmclear_enabled_bitmap); -} - -static inline void crash_disable_local_vmclear(int cpu) -{ - cpumask_clear_cpu(cpu, &crash_vmclear_enabled_bitmap); -} - -static inline int crash_local_vmclear_enabled(int cpu) -{ - return cpumask_test_cpu(cpu, &crash_vmclear_enabled_bitmap); -} - -static void crash_vmclear_local_loaded_vmcss(void) -{ - int cpu = raw_smp_processor_id(); - struct loaded_vmcs *v; - - if (!crash_local_vmclear_enabled(cpu)) - return; - - list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu), - loaded_vmcss_on_cpu_link) - vmcs_clear(v->vmcs); -} -#else -static inline void crash_enable_local_vmclear(int cpu) { } -static inline void crash_disable_local_vmclear(int cpu) { } -#endif /* CONFIG_KEXEC_CORE */ - -static void __loaded_vmcs_clear(void *arg) -{ - struct loaded_vmcs *loaded_vmcs = arg; - int cpu = raw_smp_processor_id(); - - if (loaded_vmcs->cpu != cpu) - return; /* vcpu migration can race with cpu offline */ - if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs) - per_cpu(current_vmcs, cpu) = NULL; - crash_disable_local_vmclear(cpu); - list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link); - - /* - * we should ensure updating loaded_vmcs->loaded_vmcss_on_cpu_link - * is before setting loaded_vmcs->vcpu to -1 which is done in - * loaded_vmcs_init. Otherwise, other cpu can see vcpu = -1 fist - * then adds the vmcs into percpu list before it is deleted. - */ - smp_wmb(); - - loaded_vmcs_init(loaded_vmcs); - crash_enable_local_vmclear(cpu); -} - -static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs) -{ - int cpu = loaded_vmcs->cpu; - - if (cpu != -1) - smp_call_function_single(cpu, - __loaded_vmcs_clear, loaded_vmcs, 1); -} - -static inline bool vpid_sync_vcpu_addr(int vpid, gva_t addr) -{ - if (vpid == 0) - return true; - - if (cpu_has_vmx_invvpid_individual_addr()) { - __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR, vpid, addr); - return true; - } - - return false; -} - -static inline void vpid_sync_vcpu_single(int vpid) -{ - if (vpid == 0) - return; - - if (cpu_has_vmx_invvpid_single()) - __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0); -} - -static inline void vpid_sync_vcpu_global(void) -{ - if (cpu_has_vmx_invvpid_global()) - __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0); -} - -static inline void vpid_sync_context(int vpid) -{ - if (cpu_has_vmx_invvpid_single()) - vpid_sync_vcpu_single(vpid); - else - vpid_sync_vcpu_global(); -} - -static inline void ept_sync_global(void) -{ - __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0); -} - -static inline void ept_sync_context(u64 eptp) -{ - if (cpu_has_vmx_invept_context()) - __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0); - else - ept_sync_global(); -} - -static __always_inline void vmcs_check16(unsigned long field) -{ - BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000, - "16-bit accessor invalid for 64-bit field"); - BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001, - "16-bit accessor invalid for 64-bit high field"); - BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000, - "16-bit accessor invalid for 32-bit high field"); - BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000, - "16-bit accessor invalid for natural width field"); -} - -static __always_inline void vmcs_check32(unsigned long field) -{ - BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0, - "32-bit accessor invalid for 16-bit field"); - BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000, - "32-bit accessor invalid for natural width field"); -} - -static __always_inline void vmcs_check64(unsigned long field) -{ - BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0, - "64-bit accessor invalid for 16-bit field"); - BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001, - "64-bit accessor invalid for 64-bit high field"); - BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000, - "64-bit accessor invalid for 32-bit field"); - BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000, - "64-bit accessor invalid for natural width field"); -} - -static __always_inline void vmcs_checkl(unsigned long field) -{ - BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0, - "Natural width accessor invalid for 16-bit field"); - BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000, - "Natural width accessor invalid for 64-bit field"); - BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001, - "Natural width accessor invalid for 64-bit high field"); - BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000, - "Natural width accessor invalid for 32-bit field"); -} - -static __always_inline unsigned long __vmcs_readl(unsigned long field) -{ - unsigned long value; - - asm volatile (__ex_clear("vmread %1, %0", "%k0") - : "=r"(value) : "r"(field)); - return value; -} - -static __always_inline u16 vmcs_read16(unsigned long field) -{ - vmcs_check16(field); - if (static_branch_unlikely(&enable_evmcs)) - return evmcs_read16(field); - return __vmcs_readl(field); -} - -static __always_inline u32 vmcs_read32(unsigned long field) -{ - vmcs_check32(field); - if (static_branch_unlikely(&enable_evmcs)) - return evmcs_read32(field); - return __vmcs_readl(field); -} - -static __always_inline u64 vmcs_read64(unsigned long field) -{ - vmcs_check64(field); - if (static_branch_unlikely(&enable_evmcs)) - return evmcs_read64(field); -#ifdef CONFIG_X86_64 - return __vmcs_readl(field); -#else - return __vmcs_readl(field) | ((u64)__vmcs_readl(field+1) << 32); -#endif -} - -static __always_inline unsigned long vmcs_readl(unsigned long field) -{ - vmcs_checkl(field); - if (static_branch_unlikely(&enable_evmcs)) - return evmcs_read64(field); - return __vmcs_readl(field); -} - -static noinline void vmwrite_error(unsigned long field, unsigned long value) -{ - printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n", - field, value, vmcs_read32(VM_INSTRUCTION_ERROR)); - dump_stack(); -} - -static __always_inline void __vmcs_writel(unsigned long field, unsigned long value) -{ - bool error; - - asm volatile (__ex("vmwrite %2, %1") CC_SET(na) - : CC_OUT(na) (error) : "r"(field), "rm"(value)); - if (unlikely(error)) - vmwrite_error(field, value); -} - -static __always_inline void vmcs_write16(unsigned long field, u16 value) -{ - vmcs_check16(field); - if (static_branch_unlikely(&enable_evmcs)) - return evmcs_write16(field, value); - - __vmcs_writel(field, value); -} - -static __always_inline void vmcs_write32(unsigned long field, u32 value) -{ - vmcs_check32(field); - if (static_branch_unlikely(&enable_evmcs)) - return evmcs_write32(field, value); - - __vmcs_writel(field, value); -} - -static __always_inline void vmcs_write64(unsigned long field, u64 value) -{ - vmcs_check64(field); - if (static_branch_unlikely(&enable_evmcs)) - return evmcs_write64(field, value); - - __vmcs_writel(field, value); -#ifndef CONFIG_X86_64 - asm volatile (""); - __vmcs_writel(field+1, value >> 32); -#endif -} - -static __always_inline void vmcs_writel(unsigned long field, unsigned long value) -{ - vmcs_checkl(field); - if (static_branch_unlikely(&enable_evmcs)) - return evmcs_write64(field, value); - - __vmcs_writel(field, value); -} - -static __always_inline void vmcs_clear_bits(unsigned long field, u32 mask) -{ - BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000, - "vmcs_clear_bits does not support 64-bit fields"); - if (static_branch_unlikely(&enable_evmcs)) - return evmcs_write32(field, evmcs_read32(field) & ~mask); - - __vmcs_writel(field, __vmcs_readl(field) & ~mask); -} - -static __always_inline void vmcs_set_bits(unsigned long field, u32 mask) -{ - BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000, - "vmcs_set_bits does not support 64-bit fields"); - if (static_branch_unlikely(&enable_evmcs)) - return evmcs_write32(field, evmcs_read32(field) | mask); - - __vmcs_writel(field, __vmcs_readl(field) | mask); -} - -static inline void vm_entry_controls_reset_shadow(struct vcpu_vmx *vmx) -{ - vmx->vm_entry_controls_shadow = vmcs_read32(VM_ENTRY_CONTROLS); -} - -static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val) -{ - vmcs_write32(VM_ENTRY_CONTROLS, val); - vmx->vm_entry_controls_shadow = val; -} - -static inline void vm_entry_controls_set(struct vcpu_vmx *vmx, u32 val) -{ - if (vmx->vm_entry_controls_shadow != val) - vm_entry_controls_init(vmx, val); -} - -static inline u32 vm_entry_controls_get(struct vcpu_vmx *vmx) -{ - return vmx->vm_entry_controls_shadow; -} - - -static inline void vm_entry_controls_setbit(struct vcpu_vmx *vmx, u32 val) -{ - vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) | val); -} - -static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val) -{ - vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val); -} - -static inline void vm_exit_controls_reset_shadow(struct vcpu_vmx *vmx) -{ - vmx->vm_exit_controls_shadow = vmcs_read32(VM_EXIT_CONTROLS); -} - -static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val) -{ - vmcs_write32(VM_EXIT_CONTROLS, val); - vmx->vm_exit_controls_shadow = val; -} - -static inline void vm_exit_controls_set(struct vcpu_vmx *vmx, u32 val) -{ - if (vmx->vm_exit_controls_shadow != val) - vm_exit_controls_init(vmx, val); -} - -static inline u32 vm_exit_controls_get(struct vcpu_vmx *vmx) -{ - return vmx->vm_exit_controls_shadow; -} - - -static inline void vm_exit_controls_setbit(struct vcpu_vmx *vmx, u32 val) -{ - vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) | val); -} - -static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val) -{ - vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val); -} - -static void vmx_segment_cache_clear(struct vcpu_vmx *vmx) -{ - vmx->segment_cache.bitmask = 0; -} - -static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg, - unsigned field) -{ - bool ret; - u32 mask = 1 << (seg * SEG_FIELD_NR + field); - - if (!(vmx->vcpu.arch.regs_avail & (1 << VCPU_EXREG_SEGMENTS))) { - vmx->vcpu.arch.regs_avail |= (1 << VCPU_EXREG_SEGMENTS); - vmx->segment_cache.bitmask = 0; - } - ret = vmx->segment_cache.bitmask & mask; - vmx->segment_cache.bitmask |= mask; - return ret; -} - -static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg) -{ - u16 *p = &vmx->segment_cache.seg[seg].selector; - - if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL)) - *p = vmcs_read16(kvm_vmx_segment_fields[seg].selector); - return *p; -} - -static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg) -{ - ulong *p = &vmx->segment_cache.seg[seg].base; - - if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE)) - *p = vmcs_readl(kvm_vmx_segment_fields[seg].base); - return *p; -} - -static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg) -{ - u32 *p = &vmx->segment_cache.seg[seg].limit; - - if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT)) - *p = vmcs_read32(kvm_vmx_segment_fields[seg].limit); - return *p; -} - -static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg) -{ - u32 *p = &vmx->segment_cache.seg[seg].ar; - - if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR)) - *p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes); - return *p; -} - -static void update_exception_bitmap(struct kvm_vcpu *vcpu) -{ - u32 eb; - - eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) | - (1u << DB_VECTOR) | (1u << AC_VECTOR); - /* - * Guest access to VMware backdoor ports could legitimately - * trigger #GP because of TSS I/O permission bitmap. - * We intercept those #GP and allow access to them anyway - * as VMware does. - */ - if (enable_vmware_backdoor) - eb |= (1u << GP_VECTOR); - if ((vcpu->guest_debug & - (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) == - (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) - eb |= 1u << BP_VECTOR; - if (to_vmx(vcpu)->rmode.vm86_active) - eb = ~0; - if (enable_ept) - eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */ - - /* When we are running a nested L2 guest and L1 specified for it a - * certain exception bitmap, we must trap the same exceptions and pass - * them to L1. When running L2, we will only handle the exceptions - * specified above if L1 did not want them. - */ - if (is_guest_mode(vcpu)) - eb |= get_vmcs12(vcpu)->exception_bitmap; - - vmcs_write32(EXCEPTION_BITMAP, eb); -} - -/* - * Check if MSR is intercepted for currently loaded MSR bitmap. - */ -static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr) -{ - unsigned long *msr_bitmap; - int f = sizeof(unsigned long); - - if (!cpu_has_vmx_msr_bitmap()) - return true; - - msr_bitmap = to_vmx(vcpu)->loaded_vmcs->msr_bitmap; - - if (msr <= 0x1fff) { - return !!test_bit(msr, msr_bitmap + 0x800 / f); - } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { - msr &= 0x1fff; - return !!test_bit(msr, msr_bitmap + 0xc00 / f); - } - - return true; -} - -/* - * Check if MSR is intercepted for L01 MSR bitmap. - */ -static bool msr_write_intercepted_l01(struct kvm_vcpu *vcpu, u32 msr) -{ - unsigned long *msr_bitmap; - int f = sizeof(unsigned long); - - if (!cpu_has_vmx_msr_bitmap()) - return true; - - msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap; - - if (msr <= 0x1fff) { - return !!test_bit(msr, msr_bitmap + 0x800 / f); - } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { - msr &= 0x1fff; - return !!test_bit(msr, msr_bitmap + 0xc00 / f); - } - - return true; -} - -static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx, - unsigned long entry, unsigned long exit) -{ - vm_entry_controls_clearbit(vmx, entry); - vm_exit_controls_clearbit(vmx, exit); -} - -static int find_msr(struct vmx_msrs *m, unsigned int msr) -{ - unsigned int i; - - for (i = 0; i < m->nr; ++i) { - if (m->val[i].index == msr) - return i; - } - return -ENOENT; -} - -static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) -{ - int i; - struct msr_autoload *m = &vmx->msr_autoload; - - switch (msr) { - case MSR_EFER: - if (cpu_has_load_ia32_efer) { - clear_atomic_switch_msr_special(vmx, - VM_ENTRY_LOAD_IA32_EFER, - VM_EXIT_LOAD_IA32_EFER); - return; - } - break; - case MSR_CORE_PERF_GLOBAL_CTRL: - if (cpu_has_load_perf_global_ctrl) { - clear_atomic_switch_msr_special(vmx, - VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, - VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL); - return; - } - break; - } - i = find_msr(&m->guest, msr); - if (i < 0) - goto skip_guest; - --m->guest.nr; - m->guest.val[i] = m->guest.val[m->guest.nr]; - vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr); - -skip_guest: - i = find_msr(&m->host, msr); - if (i < 0) - return; - - --m->host.nr; - m->host.val[i] = m->host.val[m->host.nr]; - vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr); -} - -static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx, - unsigned long entry, unsigned long exit, - unsigned long guest_val_vmcs, unsigned long host_val_vmcs, - u64 guest_val, u64 host_val) -{ - vmcs_write64(guest_val_vmcs, guest_val); - if (host_val_vmcs != HOST_IA32_EFER) - vmcs_write64(host_val_vmcs, host_val); - vm_entry_controls_setbit(vmx, entry); - vm_exit_controls_setbit(vmx, exit); -} - -static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, - u64 guest_val, u64 host_val, bool entry_only) -{ - int i, j = 0; - struct msr_autoload *m = &vmx->msr_autoload; - - switch (msr) { - case MSR_EFER: - if (cpu_has_load_ia32_efer) { - add_atomic_switch_msr_special(vmx, - VM_ENTRY_LOAD_IA32_EFER, - VM_EXIT_LOAD_IA32_EFER, - GUEST_IA32_EFER, - HOST_IA32_EFER, - guest_val, host_val); - return; - } - break; - case MSR_CORE_PERF_GLOBAL_CTRL: - if (cpu_has_load_perf_global_ctrl) { - add_atomic_switch_msr_special(vmx, - VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, - VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL, - GUEST_IA32_PERF_GLOBAL_CTRL, - HOST_IA32_PERF_GLOBAL_CTRL, - guest_val, host_val); - return; - } - break; - case MSR_IA32_PEBS_ENABLE: - /* PEBS needs a quiescent period after being disabled (to write - * a record). Disabling PEBS through VMX MSR swapping doesn't - * provide that period, so a CPU could write host's record into - * guest's memory. - */ - wrmsrl(MSR_IA32_PEBS_ENABLE, 0); - } - - i = find_msr(&m->guest, msr); - if (!entry_only) - j = find_msr(&m->host, msr); - - if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) { - printk_once(KERN_WARNING "Not enough msr switch entries. " - "Can't add msr %x\n", msr); - return; - } - if (i < 0) { - i = m->guest.nr++; - vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr); - } - m->guest.val[i].index = msr; - m->guest.val[i].value = guest_val; - - if (entry_only) - return; - - if (j < 0) { - j = m->host.nr++; - vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr); - } - m->host.val[j].index = msr; - m->host.val[j].value = host_val; -} - -static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) -{ - u64 guest_efer = vmx->vcpu.arch.efer; - u64 ignore_bits = 0; - - if (!enable_ept) { - /* - * NX is needed to handle CR0.WP=1, CR4.SMEP=1. Testing - * host CPUID is more efficient than testing guest CPUID - * or CR4. Host SMEP is anyway a requirement for guest SMEP. - */ - if (boot_cpu_has(X86_FEATURE_SMEP)) - guest_efer |= EFER_NX; - else if (!(guest_efer & EFER_NX)) - ignore_bits |= EFER_NX; - } - - /* - * LMA and LME handled by hardware; SCE meaningless outside long mode. - */ - ignore_bits |= EFER_SCE; -#ifdef CONFIG_X86_64 - ignore_bits |= EFER_LMA | EFER_LME; - /* SCE is meaningful only in long mode on Intel */ - if (guest_efer & EFER_LMA) - ignore_bits &= ~(u64)EFER_SCE; -#endif - - /* - * On EPT, we can't emulate NX, so we must switch EFER atomically. - * On CPUs that support "load IA32_EFER", always switch EFER - * atomically, since it's faster than switching it manually. - */ - if (cpu_has_load_ia32_efer || - (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) { - if (!(guest_efer & EFER_LMA)) - guest_efer &= ~EFER_LME; - if (guest_efer != host_efer) - add_atomic_switch_msr(vmx, MSR_EFER, - guest_efer, host_efer, false); - else - clear_atomic_switch_msr(vmx, MSR_EFER); - return false; - } else { - clear_atomic_switch_msr(vmx, MSR_EFER); - - guest_efer &= ~ignore_bits; - guest_efer |= host_efer & ignore_bits; - - vmx->guest_msrs[efer_offset].data = guest_efer; - vmx->guest_msrs[efer_offset].mask = ~ignore_bits; - - return true; - } -} - -#ifdef CONFIG_X86_32 -/* - * On 32-bit kernels, VM exits still load the FS and GS bases from the - * VMCS rather than the segment table. KVM uses this helper to figure - * out the current bases to poke them into the VMCS before entry. - */ -static unsigned long segment_base(u16 selector) -{ - struct desc_struct *table; - unsigned long v; - - if (!(selector & ~SEGMENT_RPL_MASK)) - return 0; - - table = get_current_gdt_ro(); - - if ((selector & SEGMENT_TI_MASK) == SEGMENT_LDT) { - u16 ldt_selector = kvm_read_ldt(); - - if (!(ldt_selector & ~SEGMENT_RPL_MASK)) - return 0; - - table = (struct desc_struct *)segment_base(ldt_selector); - } - v = get_desc_base(&table[selector >> 3]); - return v; -} -#endif - -static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - struct vmcs_host_state *host_state; -#ifdef CONFIG_X86_64 - int cpu = raw_smp_processor_id(); -#endif - unsigned long fs_base, gs_base; - u16 fs_sel, gs_sel; - int i; - - vmx->req_immediate_exit = false; - - /* - * Note that guest MSRs to be saved/restored can also be changed - * when guest state is loaded. This happens when guest transitions - * to/from long-mode by setting MSR_EFER.LMA. - */ - if (!vmx->loaded_cpu_state || vmx->guest_msrs_dirty) { - vmx->guest_msrs_dirty = false; - for (i = 0; i < vmx->save_nmsrs; ++i) - kvm_set_shared_msr(vmx->guest_msrs[i].index, - vmx->guest_msrs[i].data, - vmx->guest_msrs[i].mask); - - } - - if (vmx->loaded_cpu_state) - return; - - vmx->loaded_cpu_state = vmx->loaded_vmcs; - host_state = &vmx->loaded_cpu_state->host_state; - - /* - * Set host fs and gs selectors. Unfortunately, 22.2.3 does not - * allow segment selectors with cpl > 0 or ti == 1. - */ - host_state->ldt_sel = kvm_read_ldt(); - -#ifdef CONFIG_X86_64 - savesegment(ds, host_state->ds_sel); - savesegment(es, host_state->es_sel); - - gs_base = cpu_kernelmode_gs_base(cpu); - if (likely(is_64bit_mm(current->mm))) { - save_fsgs_for_kvm(); - fs_sel = current->thread.fsindex; - gs_sel = current->thread.gsindex; - fs_base = current->thread.fsbase; - vmx->msr_host_kernel_gs_base = current->thread.gsbase; - } else { - savesegment(fs, fs_sel); - savesegment(gs, gs_sel); - fs_base = read_msr(MSR_FS_BASE); - vmx->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE); - } - - wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); -#else - savesegment(fs, fs_sel); - savesegment(gs, gs_sel); - fs_base = segment_base(fs_sel); - gs_base = segment_base(gs_sel); -#endif - - if (unlikely(fs_sel != host_state->fs_sel)) { - if (!(fs_sel & 7)) - vmcs_write16(HOST_FS_SELECTOR, fs_sel); - else - vmcs_write16(HOST_FS_SELECTOR, 0); - host_state->fs_sel = fs_sel; - } - if (unlikely(gs_sel != host_state->gs_sel)) { - if (!(gs_sel & 7)) - vmcs_write16(HOST_GS_SELECTOR, gs_sel); - else - vmcs_write16(HOST_GS_SELECTOR, 0); - host_state->gs_sel = gs_sel; - } - if (unlikely(fs_base != host_state->fs_base)) { - vmcs_writel(HOST_FS_BASE, fs_base); - host_state->fs_base = fs_base; - } - if (unlikely(gs_base != host_state->gs_base)) { - vmcs_writel(HOST_GS_BASE, gs_base); - host_state->gs_base = gs_base; - } -} - -static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx) -{ - struct vmcs_host_state *host_state; - - if (!vmx->loaded_cpu_state) - return; - - WARN_ON_ONCE(vmx->loaded_cpu_state != vmx->loaded_vmcs); - host_state = &vmx->loaded_cpu_state->host_state; - - ++vmx->vcpu.stat.host_state_reload; - vmx->loaded_cpu_state = NULL; - -#ifdef CONFIG_X86_64 - rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); -#endif - if (host_state->ldt_sel || (host_state->gs_sel & 7)) { - kvm_load_ldt(host_state->ldt_sel); -#ifdef CONFIG_X86_64 - load_gs_index(host_state->gs_sel); -#else - loadsegment(gs, host_state->gs_sel); -#endif - } - if (host_state->fs_sel & 7) - loadsegment(fs, host_state->fs_sel); -#ifdef CONFIG_X86_64 - if (unlikely(host_state->ds_sel | host_state->es_sel)) { - loadsegment(ds, host_state->ds_sel); - loadsegment(es, host_state->es_sel); - } -#endif - invalidate_tss_limit(); -#ifdef CONFIG_X86_64 - wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); -#endif - load_fixmap_gdt(raw_smp_processor_id()); -} - -#ifdef CONFIG_X86_64 -static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx) -{ - preempt_disable(); - if (vmx->loaded_cpu_state) - rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); - preempt_enable(); - return vmx->msr_guest_kernel_gs_base; -} - -static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data) -{ - preempt_disable(); - if (vmx->loaded_cpu_state) - wrmsrl(MSR_KERNEL_GS_BASE, data); - preempt_enable(); - vmx->msr_guest_kernel_gs_base = data; -} -#endif - -static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu) -{ - struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); - struct pi_desc old, new; - unsigned int dest; - - /* - * In case of hot-plug or hot-unplug, we may have to undo - * vmx_vcpu_pi_put even if there is no assigned device. And we - * always keep PI.NDST up to date for simplicity: it makes the - * code easier, and CPU migration is not a fast path. - */ - if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu) - return; - - /* - * First handle the simple case where no cmpxchg is necessary; just - * allow posting non-urgent interrupts. - * - * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change - * PI.NDST: pi_post_block will do it for us and the wakeup_handler - * expects the VCPU to be on the blocked_vcpu_list that matches - * PI.NDST. - */ - if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR || - vcpu->cpu == cpu) { - pi_clear_sn(pi_desc); - return; - } - - /* The full case. */ - do { - old.control = new.control = pi_desc->control; - - dest = cpu_physical_id(cpu); - - if (x2apic_enabled()) - new.ndst = dest; - else - new.ndst = (dest << 8) & 0xFF00; - - new.sn = 0; - } while (cmpxchg64(&pi_desc->control, old.control, - new.control) != old.control); -} - -static void decache_tsc_multiplier(struct vcpu_vmx *vmx) -{ - vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio; - vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio); -} - -/* - * Switches to specified vcpu, until a matching vcpu_put(), but assumes - * vcpu mutex is already taken. - */ -static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - bool already_loaded = vmx->loaded_vmcs->cpu == cpu; - - if (!already_loaded) { - loaded_vmcs_clear(vmx->loaded_vmcs); - local_irq_disable(); - crash_disable_local_vmclear(cpu); - - /* - * Read loaded_vmcs->cpu should be before fetching - * loaded_vmcs->loaded_vmcss_on_cpu_link. - * See the comments in __loaded_vmcs_clear(). - */ - smp_rmb(); - - list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link, - &per_cpu(loaded_vmcss_on_cpu, cpu)); - crash_enable_local_vmclear(cpu); - local_irq_enable(); - } - - if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) { - per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs; - vmcs_load(vmx->loaded_vmcs->vmcs); - indirect_branch_prediction_barrier(); - } - - if (!already_loaded) { - void *gdt = get_current_gdt_ro(); - unsigned long sysenter_esp; - - kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); - - /* - * Linux uses per-cpu TSS and GDT, so set these when switching - * processors. See 22.2.4. - */ - vmcs_writel(HOST_TR_BASE, - (unsigned long)&get_cpu_entry_area(cpu)->tss.x86_tss); - vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt); /* 22.2.4 */ - - /* - * VM exits change the host TR limit to 0x67 after a VM - * exit. This is okay, since 0x67 covers everything except - * the IO bitmap and have have code to handle the IO bitmap - * being lost after a VM exit. - */ - BUILD_BUG_ON(IO_BITMAP_OFFSET - 1 != 0x67); - - rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp); - vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */ - - vmx->loaded_vmcs->cpu = cpu; - } - - /* Setup TSC multiplier */ - if (kvm_has_tsc_control && - vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio) - decache_tsc_multiplier(vmx); - - vmx_vcpu_pi_load(vcpu, cpu); - vmx->host_pkru = read_pkru(); - vmx->host_debugctlmsr = get_debugctlmsr(); -} - -static void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu) -{ - struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); - - if (!kvm_arch_has_assigned_device(vcpu->kvm) || - !irq_remapping_cap(IRQ_POSTING_CAP) || - !kvm_vcpu_apicv_active(vcpu)) - return; - - /* Set SN when the vCPU is preempted */ - if (vcpu->preempted) - pi_set_sn(pi_desc); -} - -static void vmx_vcpu_put(struct kvm_vcpu *vcpu) -{ - vmx_vcpu_pi_put(vcpu); - - vmx_prepare_switch_to_host(to_vmx(vcpu)); -} - -static bool emulation_required(struct kvm_vcpu *vcpu) -{ - return emulate_invalid_guest_state && !guest_state_valid(vcpu); -} - -static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu); - -/* - * Return the cr0 value that a nested guest would read. This is a combination - * of the real cr0 used to run the guest (guest_cr0), and the bits shadowed by - * its hypervisor (cr0_read_shadow). - */ -static inline unsigned long nested_read_cr0(struct vmcs12 *fields) -{ - return (fields->guest_cr0 & ~fields->cr0_guest_host_mask) | - (fields->cr0_read_shadow & fields->cr0_guest_host_mask); -} -static inline unsigned long nested_read_cr4(struct vmcs12 *fields) -{ - return (fields->guest_cr4 & ~fields->cr4_guest_host_mask) | - (fields->cr4_read_shadow & fields->cr4_guest_host_mask); -} - -static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) -{ - unsigned long rflags, save_rflags; - - if (!test_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail)) { - __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail); - rflags = vmcs_readl(GUEST_RFLAGS); - if (to_vmx(vcpu)->rmode.vm86_active) { - rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS; - save_rflags = to_vmx(vcpu)->rmode.save_rflags; - rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS; - } - to_vmx(vcpu)->rflags = rflags; - } - return to_vmx(vcpu)->rflags; -} - -static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) -{ - unsigned long old_rflags = vmx_get_rflags(vcpu); - - __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail); - to_vmx(vcpu)->rflags = rflags; - if (to_vmx(vcpu)->rmode.vm86_active) { - to_vmx(vcpu)->rmode.save_rflags = rflags; - rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; - } - vmcs_writel(GUEST_RFLAGS, rflags); - - if ((old_rflags ^ to_vmx(vcpu)->rflags) & X86_EFLAGS_VM) - to_vmx(vcpu)->emulation_required = emulation_required(vcpu); -} - -static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu) -{ - u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); - int ret = 0; - - if (interruptibility & GUEST_INTR_STATE_STI) - ret |= KVM_X86_SHADOW_INT_STI; - if (interruptibility & GUEST_INTR_STATE_MOV_SS) - ret |= KVM_X86_SHADOW_INT_MOV_SS; - - return ret; -} - -static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) -{ - u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); - u32 interruptibility = interruptibility_old; - - interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS); - - if (mask & KVM_X86_SHADOW_INT_MOV_SS) - interruptibility |= GUEST_INTR_STATE_MOV_SS; - else if (mask & KVM_X86_SHADOW_INT_STI) - interruptibility |= GUEST_INTR_STATE_STI; - - if ((interruptibility != interruptibility_old)) - vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility); -} - -static void skip_emulated_instruction(struct kvm_vcpu *vcpu) -{ - unsigned long rip; - - rip = kvm_rip_read(vcpu); - rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN); - kvm_rip_write(vcpu, rip); - - /* skipping an emulated instruction also counts */ - vmx_set_interrupt_shadow(vcpu, 0); -} - -static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu, - unsigned long exit_qual) -{ - struct vmcs12 *vmcs12 = get_vmcs12(vcpu); - unsigned int nr = vcpu->arch.exception.nr; - u32 intr_info = nr | INTR_INFO_VALID_MASK; - - if (vcpu->arch.exception.has_error_code) { - vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code; - intr_info |= INTR_INFO_DELIVER_CODE_MASK; - } - - if (kvm_exception_is_soft(nr)) - intr_info |= INTR_TYPE_SOFT_EXCEPTION; - else - intr_info |= INTR_TYPE_HARD_EXCEPTION; - - if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) && - vmx_get_nmi_mask(vcpu)) - intr_info |= INTR_INFO_UNBLOCK_NMI; - - nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual); -} - -/* - * KVM wants to inject page-faults which it got to the guest. This function - * checks whether in a nested guest, we need to inject them to L1 or L2. - */ -static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit_qual) -{ - struct vmcs12 *vmcs12 = get_vmcs12(vcpu); - unsigned int nr = vcpu->arch.exception.nr; - bool has_payload = vcpu->arch.exception.has_payload; - unsigned long payload = vcpu->arch.exception.payload; - - if (nr == PF_VECTOR) { - if (vcpu->arch.exception.nested_apf) { - *exit_qual = vcpu->arch.apf.nested_apf_token; - return 1; - } - if (nested_vmx_is_page_fault_vmexit(vmcs12, - vcpu->arch.exception.error_code)) { - *exit_qual = has_payload ? payload : vcpu->arch.cr2; - return 1; - } - } else if (vmcs12->exception_bitmap & (1u << nr)) { - if (nr == DB_VECTOR) { - if (!has_payload) { - payload = vcpu->arch.dr6; - payload &= ~(DR6_FIXED_1 | DR6_BT); - payload ^= DR6_RTM; - } - *exit_qual = payload; - } else - *exit_qual = 0; - return 1; - } - - return 0; -} - -static void vmx_clear_hlt(struct kvm_vcpu *vcpu) -{ - /* - * Ensure that we clear the HLT state in the VMCS. We don't need to - * explicitly skip the instruction because if the HLT state is set, - * then the instruction is already executing and RIP has already been - * advanced. - */ - if (kvm_hlt_in_guest(vcpu->kvm) && - vmcs_read32(GUEST_ACTIVITY_STATE) == GUEST_ACTIVITY_HLT) - vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE); -} - -static void vmx_queue_exception(struct kvm_vcpu *vcpu) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - unsigned nr = vcpu->arch.exception.nr; - bool has_error_code = vcpu->arch.exception.has_error_code; - u32 error_code = vcpu->arch.exception.error_code; - u32 intr_info = nr | INTR_INFO_VALID_MASK; - - kvm_deliver_exception_payload(vcpu); - - if (has_error_code) { - vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code); - intr_info |= INTR_INFO_DELIVER_CODE_MASK; - } - - if (vmx->rmode.vm86_active) { - int inc_eip = 0; - if (kvm_exception_is_soft(nr)) - inc_eip = vcpu->arch.event_exit_inst_len; - if (kvm_inject_realmode_interrupt(vcpu, nr, inc_eip) != EMULATE_DONE) - kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); - return; - } - - WARN_ON_ONCE(vmx->emulation_required); - - if (kvm_exception_is_soft(nr)) { - vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, - vmx->vcpu.arch.event_exit_inst_len); - intr_info |= INTR_TYPE_SOFT_EXCEPTION; - } else - intr_info |= INTR_TYPE_HARD_EXCEPTION; - - vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info); - - vmx_clear_hlt(vcpu); -} - -static bool vmx_rdtscp_supported(void) -{ - return cpu_has_vmx_rdtscp(); -} - -static bool vmx_invpcid_supported(void) -{ - return cpu_has_vmx_invpcid(); -} - -/* - * Swap MSR entry in host/guest MSR entry array. - */ -static void move_msr_up(struct vcpu_vmx *vmx, int from, int to) -{ - struct shared_msr_entry tmp; - - tmp = vmx->guest_msrs[to]; - vmx->guest_msrs[to] = vmx->guest_msrs[from]; - vmx->guest_msrs[from] = tmp; -} - -/* - * Set up the vmcs to automatically save and restore system - * msrs. Don't touch the 64-bit msrs if the guest is in legacy - * mode, as fiddling with msrs is very expensive. - */ -static void setup_msrs(struct vcpu_vmx *vmx) -{ - int save_nmsrs, index; - - save_nmsrs = 0; -#ifdef CONFIG_X86_64 - if (is_long_mode(&vmx->vcpu)) { - index = __find_msr_index(vmx, MSR_SYSCALL_MASK); - if (index >= 0) - move_msr_up(vmx, index, save_nmsrs++); - index = __find_msr_index(vmx, MSR_LSTAR); - if (index >= 0) - move_msr_up(vmx, index, save_nmsrs++); - index = __find_msr_index(vmx, MSR_CSTAR); - if (index >= 0) - move_msr_up(vmx, index, save_nmsrs++); - index = __find_msr_index(vmx, MSR_TSC_AUX); - if (index >= 0 && guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP)) - move_msr_up(vmx, index, save_nmsrs++); - /* - * MSR_STAR is only needed on long mode guests, and only - * if efer.sce is enabled. - */ - index = __find_msr_index(vmx, MSR_STAR); - if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE)) - move_msr_up(vmx, index, save_nmsrs++); - } -#endif - index = __find_msr_index(vmx, MSR_EFER); - if (index >= 0 && update_transition_efer(vmx, index)) - move_msr_up(vmx, index, save_nmsrs++); - - vmx->save_nmsrs = save_nmsrs; - vmx->guest_msrs_dirty = true; - - if (cpu_has_vmx_msr_bitmap()) - vmx_update_msr_bitmap(&vmx->vcpu); -} - -static u64 vmx_read_l1_tsc_offset(struct kvm_vcpu *vcpu) -{ - struct vmcs12 *vmcs12 = get_vmcs12(vcpu); - - if (is_guest_mode(vcpu) && - (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)) - return vcpu->arch.tsc_offset - vmcs12->tsc_offset; - - return vcpu->arch.tsc_offset; -} - -static u64 vmx_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) -{ - u64 active_offset = offset; - if (is_guest_mode(vcpu)) { - /* - * We're here if L1 chose not to trap WRMSR to TSC. According - * to the spec, this should set L1's TSC; The offset that L1 - * set for L2 remains unchanged, and still needs to be added - * to the newly set TSC to get L2's TSC. - */ - struct vmcs12 *vmcs12 = get_vmcs12(vcpu); - if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING)) - active_offset += vmcs12->tsc_offset; - } else { - trace_kvm_write_tsc_offset(vcpu->vcpu_id, - vmcs_read64(TSC_OFFSET), offset); - } - - vmcs_write64(TSC_OFFSET, active_offset); - return active_offset; -} - -/* - * nested_vmx_allowed() checks whether a guest should be allowed to use VMX - * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for - * all guests if the "nested" module option is off, and can also be disabled - * for a single guest by disabling its VMX cpuid bit. - */ -static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu) -{ - return nested && guest_cpuid_has(vcpu, X86_FEATURE_VMX); -} - -/* - * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be - * returned for the various VMX controls MSRs when nested VMX is enabled. - * The same values should also be used to verify that vmcs12 control fields are - * valid during nested entry from L1 to L2. - * Each of these control msrs has a low and high 32-bit half: A low bit is on - * if the corresponding bit in the (32-bit) control field *must* be on, and a - * bit in the high half is on if the corresponding bit in the control field - * may be on. See also vmx_control_verify(). - */ -static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv) -{ - if (!nested) { - memset(msrs, 0, sizeof(*msrs)); - return; - } - - /* - * Note that as a general rule, the high half of the MSRs (bits in - * the control fields which may be 1) should be initialized by the - * intersection of the underlying hardware's MSR (i.e., features which - * can be supported) and the list of features we want to expose - - * because they are known to be properly supported in our code. - * Also, usually, the low half of the MSRs (bits which must be 1) can - * be set to 0, meaning that L1 may turn off any of these bits. The - * reason is that if one of these bits is necessary, it will appear - * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control - * fields of vmcs01 and vmcs02, will turn these bits off - and - * nested_vmx_exit_reflected() will not pass related exits to L1. - * These rules have exceptions below. - */ - - /* pin-based controls */ - rdmsr(MSR_IA32_VMX_PINBASED_CTLS, - msrs->pinbased_ctls_low, - msrs->pinbased_ctls_high); - msrs->pinbased_ctls_low |= - PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR; - msrs->pinbased_ctls_high &= - PIN_BASED_EXT_INTR_MASK | - PIN_BASED_NMI_EXITING | - PIN_BASED_VIRTUAL_NMIS | - (apicv ? PIN_BASED_POSTED_INTR : 0); - msrs->pinbased_ctls_high |= - PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR | - PIN_BASED_VMX_PREEMPTION_TIMER; - - /* exit controls */ - rdmsr(MSR_IA32_VMX_EXIT_CTLS, - msrs->exit_ctls_low, - msrs->exit_ctls_high); - msrs->exit_ctls_low = - VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR; - - msrs->exit_ctls_high &= -#ifdef CONFIG_X86_64 - VM_EXIT_HOST_ADDR_SPACE_SIZE | -#endif - VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT; - msrs->exit_ctls_high |= - VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR | - VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER | - VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT; - - /* We support free control of debug control saving. */ - msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS; - - /* entry controls */ - rdmsr(MSR_IA32_VMX_ENTRY_CTLS, - msrs->entry_ctls_low, - msrs->entry_ctls_high); - msrs->entry_ctls_low = - VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR; - msrs->entry_ctls_high &= -#ifdef CONFIG_X86_64 - VM_ENTRY_IA32E_MODE | -#endif - VM_ENTRY_LOAD_IA32_PAT; - msrs->entry_ctls_high |= - (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER); - - /* We support free control of debug control loading. */ - msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS; - - /* cpu-based controls */ - rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, - msrs->procbased_ctls_low, - msrs->procbased_ctls_high); - msrs->procbased_ctls_low = - CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR; - msrs->procbased_ctls_high &= - CPU_BASED_VIRTUAL_INTR_PENDING | - CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING | - CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING | - CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING | - CPU_BASED_CR3_STORE_EXITING | -#ifdef CONFIG_X86_64 - CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING | -#endif - CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING | - CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG | - CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING | - CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING | - CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; - /* - * We can allow some features even when not supported by the - * hardware. For example, L1 can specify an MSR bitmap - and we - * can use it to avoid exits to L1 - even when L0 runs L2 - * without MSR bitmaps. - */ - msrs->procbased_ctls_high |= - CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR | - CPU_BASED_USE_MSR_BITMAPS; - - /* We support free control of CR3 access interception. */ - msrs->procbased_ctls_low &= - ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING); - - /* - * secondary cpu-based controls. Do not include those that - * depend on CPUID bits, they are added later by vmx_cpuid_update. - */ - rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, - msrs->secondary_ctls_low, - msrs->secondary_ctls_high); - msrs->secondary_ctls_low = 0; - msrs->secondary_ctls_high &= - SECONDARY_EXEC_DESC | - SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | - SECONDARY_EXEC_APIC_REGISTER_VIRT | - SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | - SECONDARY_EXEC_WBINVD_EXITING; - - /* - * We can emulate "VMCS shadowing," even if the hardware - * doesn't support it. - */ - msrs->secondary_ctls_high |= - SECONDARY_EXEC_SHADOW_VMCS; - - if (enable_ept) { - /* nested EPT: emulate EPT also to L1 */ - msrs->secondary_ctls_high |= - SECONDARY_EXEC_ENABLE_EPT; - msrs->ept_caps = VMX_EPT_PAGE_WALK_4_BIT | - VMX_EPTP_WB_BIT | VMX_EPT_INVEPT_BIT; - if (cpu_has_vmx_ept_execute_only()) - msrs->ept_caps |= - VMX_EPT_EXECUTE_ONLY_BIT; - msrs->ept_caps &= vmx_capability.ept; - msrs->ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT | - VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT | - VMX_EPT_1GB_PAGE_BIT; - if (enable_ept_ad_bits) { - msrs->secondary_ctls_high |= - SECONDARY_EXEC_ENABLE_PML; - msrs->ept_caps |= VMX_EPT_AD_BIT; - } - } - - if (cpu_has_vmx_vmfunc()) { - msrs->secondary_ctls_high |= - SECONDARY_EXEC_ENABLE_VMFUNC; - /* - * Advertise EPTP switching unconditionally - * since we emulate it - */ - if (enable_ept) - msrs->vmfunc_controls = - VMX_VMFUNC_EPTP_SWITCHING; - } - - /* - * Old versions of KVM use the single-context version without - * checking for support, so declare that it is supported even - * though it is treated as global context. The alternative is - * not failing the single-context invvpid, and it is worse. - */ - if (enable_vpid) { - msrs->secondary_ctls_high |= - SECONDARY_EXEC_ENABLE_VPID; - msrs->vpid_caps = VMX_VPID_INVVPID_BIT | - VMX_VPID_EXTENT_SUPPORTED_MASK; - } - - if (enable_unrestricted_guest) - msrs->secondary_ctls_high |= - SECONDARY_EXEC_UNRESTRICTED_GUEST; - - if (flexpriority_enabled) - msrs->secondary_ctls_high |= - SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; - - /* miscellaneous data */ - rdmsr(MSR_IA32_VMX_MISC, - msrs->misc_low, - msrs->misc_high); - msrs->misc_low &= VMX_MISC_SAVE_EFER_LMA; - msrs->misc_low |= - MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS | - VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE | - VMX_MISC_ACTIVITY_HLT; - msrs->misc_high = 0; - - /* - * This MSR reports some information about VMX support. We - * should return information about the VMX we emulate for the - * guest, and the VMCS structure we give it - not about the - * VMX support of the underlying hardware. - */ - msrs->basic = - VMCS12_REVISION | - VMX_BASIC_TRUE_CTLS | - ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) | - (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT); - - if (cpu_has_vmx_basic_inout()) - msrs->basic |= VMX_BASIC_INOUT; - - /* - * These MSRs specify bits which the guest must keep fixed on - * while L1 is in VMXON mode (in L1's root mode, or running an L2). - * We picked the standard core2 setting. - */ -#define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE) -#define VMXON_CR4_ALWAYSON X86_CR4_VMXE - msrs->cr0_fixed0 = VMXON_CR0_ALWAYSON; - msrs->cr4_fixed0 = VMXON_CR4_ALWAYSON; - - /* These MSRs specify bits which the guest must keep fixed off. */ - rdmsrl(MSR_IA32_VMX_CR0_FIXED1, msrs->cr0_fixed1); - rdmsrl(MSR_IA32_VMX_CR4_FIXED1, msrs->cr4_fixed1); - - /* highest index: VMX_PREEMPTION_TIMER_VALUE */ - msrs->vmcs_enum = VMCS12_MAX_FIELD_INDEX << 1; -} - -/* - * if fixed0[i] == 1: val[i] must be 1 - * if fixed1[i] == 0: val[i] must be 0 - */ -static inline bool fixed_bits_valid(u64 val, u64 fixed0, u64 fixed1) -{ - return ((val & fixed1) | fixed0) == val; -} - -static inline bool vmx_control_verify(u32 control, u32 low, u32 high) -{ - return fixed_bits_valid(control, low, high); -} - -static inline u64 vmx_control_msr(u32 low, u32 high) -{ - return low | ((u64)high << 32); -} - -static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask) -{ - superset &= mask; - subset &= mask; - - return (superset | subset) == superset; -} - -static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data) -{ - const u64 feature_and_reserved = - /* feature (except bit 48; see below) */ - BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) | - /* reserved */ - BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56); - u64 vmx_basic = vmx->nested.msrs.basic; - - if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved)) - return -EINVAL; - - /* - * KVM does not emulate a version of VMX that constrains physical - * addresses of VMX structures (e.g. VMCS) to 32-bits. - */ - if (data & BIT_ULL(48)) - return -EINVAL; - - if (vmx_basic_vmcs_revision_id(vmx_basic) != - vmx_basic_vmcs_revision_id(data)) - return -EINVAL; - - if (vmx_basic_vmcs_size(vmx_basic) > vmx_basic_vmcs_size(data)) - return -EINVAL; - - vmx->nested.msrs.basic = data; - return 0; -} - -static int -vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) -{ - u64 supported; - u32 *lowp, *highp; - - switch (msr_index) { - case MSR_IA32_VMX_TRUE_PINBASED_CTLS: - lowp = &vmx->nested.msrs.pinbased_ctls_low; - highp = &vmx->nested.msrs.pinbased_ctls_high; - break; - case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: - lowp = &vmx->nested.msrs.procbased_ctls_low; - highp = &vmx->nested.msrs.procbased_ctls_high; - break; - case MSR_IA32_VMX_TRUE_EXIT_CTLS: - lowp = &vmx->nested.msrs.exit_ctls_low; - highp = &vmx->nested.msrs.exit_ctls_high; - break; - case MSR_IA32_VMX_TRUE_ENTRY_CTLS: - lowp = &vmx->nested.msrs.entry_ctls_low; - highp = &vmx->nested.msrs.entry_ctls_high; - break; - case MSR_IA32_VMX_PROCBASED_CTLS2: - lowp = &vmx->nested.msrs.secondary_ctls_low; - highp = &vmx->nested.msrs.secondary_ctls_high; - break; - default: - BUG(); - } - - supported = vmx_control_msr(*lowp, *highp); - - /* Check must-be-1 bits are still 1. */ - if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0))) - return -EINVAL; - - /* Check must-be-0 bits are still 0. */ - if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32))) - return -EINVAL; - - *lowp = data; - *highp = data >> 32; - return 0; -} - -static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data) -{ - const u64 feature_and_reserved_bits = - /* feature */ - BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) | - BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) | - /* reserved */ - GENMASK_ULL(13, 9) | BIT_ULL(31); - u64 vmx_misc; - - vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low, - vmx->nested.msrs.misc_high); - - if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits)) - return -EINVAL; - - if ((vmx->nested.msrs.pinbased_ctls_high & - PIN_BASED_VMX_PREEMPTION_TIMER) && - vmx_misc_preemption_timer_rate(data) != - vmx_misc_preemption_timer_rate(vmx_misc)) - return -EINVAL; - - if (vmx_misc_cr3_count(data) > vmx_misc_cr3_count(vmx_misc)) - return -EINVAL; - - if (vmx_misc_max_msr(data) > vmx_misc_max_msr(vmx_misc)) - return -EINVAL; - - if (vmx_misc_mseg_revid(data) != vmx_misc_mseg_revid(vmx_misc)) - return -EINVAL; - - vmx->nested.msrs.misc_low = data; - vmx->nested.msrs.misc_high = data >> 32; - - /* - * If L1 has read-only VM-exit information fields, use the - * less permissive vmx_vmwrite_bitmap to specify write - * permissions for the shadow VMCS. - */ - if (enable_shadow_vmcs && !nested_cpu_has_vmwrite_any_field(&vmx->vcpu)) - vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap)); - - return 0; -} - -static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data) -{ - u64 vmx_ept_vpid_cap; - - vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.msrs.ept_caps, - vmx->nested.msrs.vpid_caps); - - /* Every bit is either reserved or a feature bit. */ - if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL)) - return -EINVAL; - - vmx->nested.msrs.ept_caps = data; - vmx->nested.msrs.vpid_caps = data >> 32; - return 0; -} - -static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) -{ - u64 *msr; - - switch (msr_index) { - case MSR_IA32_VMX_CR0_FIXED0: - msr = &vmx->nested.msrs.cr0_fixed0; - break; - case MSR_IA32_VMX_CR4_FIXED0: - msr = &vmx->nested.msrs.cr4_fixed0; - break; - default: - BUG(); - } - - /* - * 1 bits (which indicates bits which "must-be-1" during VMX operation) - * must be 1 in the restored value. - */ - if (!is_bitwise_subset(data, *msr, -1ULL)) - return -EINVAL; - - *msr = data; - return 0; -} - -/* - * Called when userspace is restoring VMX MSRs. - * - * Returns 0 on success, non-0 otherwise. - */ -static int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - - /* - * Don't allow changes to the VMX capability MSRs while the vCPU - * is in VMX operation. - */ - if (vmx->nested.vmxon) - return -EBUSY; - - switch (msr_index) { - case MSR_IA32_VMX_BASIC: - return vmx_restore_vmx_basic(vmx, data); - case MSR_IA32_VMX_PINBASED_CTLS: - case MSR_IA32_VMX_PROCBASED_CTLS: - case MSR_IA32_VMX_EXIT_CTLS: - case MSR_IA32_VMX_ENTRY_CTLS: - /* - * The "non-true" VMX capability MSRs are generated from the - * "true" MSRs, so we do not support restoring them directly. - * - * If userspace wants to emulate VMX_BASIC[55]=0, userspace - * should restore the "true" MSRs with the must-be-1 bits - * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND - * DEFAULT SETTINGS". - */ - return -EINVAL; - case MSR_IA32_VMX_TRUE_PINBASED_CTLS: - case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: - case MSR_IA32_VMX_TRUE_EXIT_CTLS: - case MSR_IA32_VMX_TRUE_ENTRY_CTLS: - case MSR_IA32_VMX_PROCBASED_CTLS2: - return vmx_restore_control_msr(vmx, msr_index, data); - case MSR_IA32_VMX_MISC: - return vmx_restore_vmx_misc(vmx, data); - case MSR_IA32_VMX_CR0_FIXED0: - case MSR_IA32_VMX_CR4_FIXED0: - return vmx_restore_fixed0_msr(vmx, msr_index, data); - case MSR_IA32_VMX_CR0_FIXED1: - case MSR_IA32_VMX_CR4_FIXED1: - /* - * These MSRs are generated based on the vCPU's CPUID, so we - * do not support restoring them directly. - */ - return -EINVAL; - case MSR_IA32_VMX_EPT_VPID_CAP: - return vmx_restore_vmx_ept_vpid_cap(vmx, data); - case MSR_IA32_VMX_VMCS_ENUM: - vmx->nested.msrs.vmcs_enum = data; - return 0; - default: - /* - * The rest of the VMX capability MSRs do not support restore. - */ - return -EINVAL; - } -} - -/* Returns 0 on success, non-0 otherwise. */ -static int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata) -{ - switch (msr_index) { - case MSR_IA32_VMX_BASIC: - *pdata = msrs->basic; - break; - case MSR_IA32_VMX_TRUE_PINBASED_CTLS: - case MSR_IA32_VMX_PINBASED_CTLS: - *pdata = vmx_control_msr( - msrs->pinbased_ctls_low, - msrs->pinbased_ctls_high); - if (msr_index == MSR_IA32_VMX_PINBASED_CTLS) - *pdata |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR; - break; - case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: - case MSR_IA32_VMX_PROCBASED_CTLS: - *pdata = vmx_control_msr( - msrs->procbased_ctls_low, - msrs->procbased_ctls_high); - if (msr_index == MSR_IA32_VMX_PROCBASED_CTLS) - *pdata |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR; - break; - case MSR_IA32_VMX_TRUE_EXIT_CTLS: - case MSR_IA32_VMX_EXIT_CTLS: - *pdata = vmx_control_msr( - msrs->exit_ctls_low, - msrs->exit_ctls_high); - if (msr_index == MSR_IA32_VMX_EXIT_CTLS) - *pdata |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR; - break; - case MSR_IA32_VMX_TRUE_ENTRY_CTLS: - case MSR_IA32_VMX_ENTRY_CTLS: - *pdata = vmx_control_msr( - msrs->entry_ctls_low, - msrs->entry_ctls_high); - if (msr_index == MSR_IA32_VMX_ENTRY_CTLS) - *pdata |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR; - break; - case MSR_IA32_VMX_MISC: - *pdata = vmx_control_msr( - msrs->misc_low, - msrs->misc_high); - break; - case MSR_IA32_VMX_CR0_FIXED0: - *pdata = msrs->cr0_fixed0; - break; - case MSR_IA32_VMX_CR0_FIXED1: - *pdata = msrs->cr0_fixed1; - break; - case MSR_IA32_VMX_CR4_FIXED0: - *pdata = msrs->cr4_fixed0; - break; - case MSR_IA32_VMX_CR4_FIXED1: - *pdata = msrs->cr4_fixed1; - break; - case MSR_IA32_VMX_VMCS_ENUM: - *pdata = msrs->vmcs_enum; - break; - case MSR_IA32_VMX_PROCBASED_CTLS2: - *pdata = vmx_control_msr( - msrs->secondary_ctls_low, - msrs->secondary_ctls_high); - break; - case MSR_IA32_VMX_EPT_VPID_CAP: - *pdata = msrs->ept_caps | - ((u64)msrs->vpid_caps << 32); - break; - case MSR_IA32_VMX_VMFUNC: - *pdata = msrs->vmfunc_controls; - break; - default: - return 1; - } - - return 0; -} - -static inline bool vmx_feature_control_msr_valid(struct kvm_vcpu *vcpu, - uint64_t val) -{ - uint64_t valid_bits = to_vmx(vcpu)->msr_ia32_feature_control_valid_bits; - - return !(val & ~valid_bits); -} - -static int vmx_get_msr_feature(struct kvm_msr_entry *msr) -{ - switch (msr->index) { - case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: - if (!nested) - return 1; - return vmx_get_vmx_msr(&vmcs_config.nested, msr->index, &msr->data); - default: - return 1; - } - - return 0; -} - -/* - * Reads an msr value (of 'msr_index') into 'pdata'. - * Returns 0 on success, non-0 otherwise. - * Assumes vcpu_load() was already called. - */ -static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - struct shared_msr_entry *msr; - - switch (msr_info->index) { -#ifdef CONFIG_X86_64 - case MSR_FS_BASE: - msr_info->data = vmcs_readl(GUEST_FS_BASE); - break; - case MSR_GS_BASE: - msr_info->data = vmcs_readl(GUEST_GS_BASE); - break; - case MSR_KERNEL_GS_BASE: - msr_info->data = vmx_read_guest_kernel_gs_base(vmx); - break; -#endif - case MSR_EFER: - return kvm_get_msr_common(vcpu, msr_info); - case MSR_IA32_SPEC_CTRL: - if (!msr_info->host_initiated && - !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)) - return 1; - - msr_info->data = to_vmx(vcpu)->spec_ctrl; - break; - case MSR_IA32_ARCH_CAPABILITIES: - if (!msr_info->host_initiated && - !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES)) - return 1; - msr_info->data = to_vmx(vcpu)->arch_capabilities; - break; - case MSR_IA32_SYSENTER_CS: - msr_info->data = vmcs_read32(GUEST_SYSENTER_CS); - break; - case MSR_IA32_SYSENTER_EIP: - msr_info->data = vmcs_readl(GUEST_SYSENTER_EIP); - break; - case MSR_IA32_SYSENTER_ESP: - msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP); - break; - case MSR_IA32_BNDCFGS: - if (!kvm_mpx_supported() || - (!msr_info->host_initiated && - !guest_cpuid_has(vcpu, X86_FEATURE_MPX))) - return 1; - msr_info->data = vmcs_read64(GUEST_BNDCFGS); - break; - case MSR_IA32_MCG_EXT_CTL: - if (!msr_info->host_initiated && - !(vmx->msr_ia32_feature_control & - FEATURE_CONTROL_LMCE)) - return 1; - msr_info->data = vcpu->arch.mcg_ext_ctl; - break; - case MSR_IA32_FEATURE_CONTROL: - msr_info->data = vmx->msr_ia32_feature_control; - break; - case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: - if (!nested_vmx_allowed(vcpu)) - return 1; - return vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index, - &msr_info->data); - case MSR_IA32_XSS: - if (!vmx_xsaves_supported()) - return 1; - msr_info->data = vcpu->arch.ia32_xss; - break; - case MSR_TSC_AUX: - if (!msr_info->host_initiated && - !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP)) - return 1; - /* Otherwise falls through */ - default: - msr = find_msr_entry(vmx, msr_info->index); - if (msr) { - msr_info->data = msr->data; - break; - } - return kvm_get_msr_common(vcpu, msr_info); - } - - return 0; -} - -static void vmx_leave_nested(struct kvm_vcpu *vcpu); - -/* - * Writes msr value into into the appropriate "register". - * Returns 0 on success, non-0 otherwise. - * Assumes vcpu_load() was already called. - */ -static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - struct shared_msr_entry *msr; - int ret = 0; - u32 msr_index = msr_info->index; - u64 data = msr_info->data; - - switch (msr_index) { - case MSR_EFER: - ret = kvm_set_msr_common(vcpu, msr_info); - break; -#ifdef CONFIG_X86_64 - case MSR_FS_BASE: - vmx_segment_cache_clear(vmx); - vmcs_writel(GUEST_FS_BASE, data); - break; - case MSR_GS_BASE: - vmx_segment_cache_clear(vmx); - vmcs_writel(GUEST_GS_BASE, data); - break; - case MSR_KERNEL_GS_BASE: - vmx_write_guest_kernel_gs_base(vmx, data); - break; -#endif - case MSR_IA32_SYSENTER_CS: - vmcs_write32(GUEST_SYSENTER_CS, data); - break; - case MSR_IA32_SYSENTER_EIP: - vmcs_writel(GUEST_SYSENTER_EIP, data); - break; - case MSR_IA32_SYSENTER_ESP: - vmcs_writel(GUEST_SYSENTER_ESP, data); - break; - case MSR_IA32_BNDCFGS: - if (!kvm_mpx_supported() || - (!msr_info->host_initiated && - !guest_cpuid_has(vcpu, X86_FEATURE_MPX))) - return 1; - if (is_noncanonical_address(data & PAGE_MASK, vcpu) || - (data & MSR_IA32_BNDCFGS_RSVD)) - return 1; - vmcs_write64(GUEST_BNDCFGS, data); - break; - case MSR_IA32_SPEC_CTRL: - if (!msr_info->host_initiated && - !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)) - return 1; - - /* The STIBP bit doesn't fault even if it's not advertised */ - if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD)) - return 1; - - vmx->spec_ctrl = data; - - if (!data) - break; - - /* - * For non-nested: - * When it's written (to non-zero) for the first time, pass - * it through. - * - * For nested: - * The handling of the MSR bitmap for L2 guests is done in - * nested_vmx_merge_msr_bitmap. We should not touch the - * vmcs02.msr_bitmap here since it gets completely overwritten - * in the merging. We update the vmcs01 here for L1 as well - * since it will end up touching the MSR anyway now. - */ - vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, - MSR_IA32_SPEC_CTRL, - MSR_TYPE_RW); - break; - case MSR_IA32_PRED_CMD: - if (!msr_info->host_initiated && - !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)) - return 1; - - if (data & ~PRED_CMD_IBPB) - return 1; - - if (!data) - break; - - wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB); - - /* - * For non-nested: - * When it's written (to non-zero) for the first time, pass - * it through. - * - * For nested: - * The handling of the MSR bitmap for L2 guests is done in - * nested_vmx_merge_msr_bitmap. We should not touch the - * vmcs02.msr_bitmap here since it gets completely overwritten - * in the merging. - */ - vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, MSR_IA32_PRED_CMD, - MSR_TYPE_W); - break; - case MSR_IA32_ARCH_CAPABILITIES: - if (!msr_info->host_initiated) - return 1; - vmx->arch_capabilities = data; - break; - case MSR_IA32_CR_PAT: - if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { - if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data)) - return 1; - vmcs_write64(GUEST_IA32_PAT, data); - vcpu->arch.pat = data; - break; - } - ret = kvm_set_msr_common(vcpu, msr_info); - break; - case MSR_IA32_TSC_ADJUST: - ret = kvm_set_msr_common(vcpu, msr_info); - break; - case MSR_IA32_MCG_EXT_CTL: - if ((!msr_info->host_initiated && - !(to_vmx(vcpu)->msr_ia32_feature_control & - FEATURE_CONTROL_LMCE)) || - (data & ~MCG_EXT_CTL_LMCE_EN)) - return 1; - vcpu->arch.mcg_ext_ctl = data; - break; - case MSR_IA32_FEATURE_CONTROL: - if (!vmx_feature_control_msr_valid(vcpu, data) || - (to_vmx(vcpu)->msr_ia32_feature_control & - FEATURE_CONTROL_LOCKED && !msr_info->host_initiated)) - return 1; - vmx->msr_ia32_feature_control = data; - if (msr_info->host_initiated && data == 0) - vmx_leave_nested(vcpu); - break; - case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: - if (!msr_info->host_initiated) - return 1; /* they are read-only */ - if (!nested_vmx_allowed(vcpu)) - return 1; - return vmx_set_vmx_msr(vcpu, msr_index, data); - case MSR_IA32_XSS: - if (!vmx_xsaves_supported()) - return 1; - /* - * The only supported bit as of Skylake is bit 8, but - * it is not supported on KVM. - */ - if (data != 0) - return 1; - vcpu->arch.ia32_xss = data; - if (vcpu->arch.ia32_xss != host_xss) - add_atomic_switch_msr(vmx, MSR_IA32_XSS, - vcpu->arch.ia32_xss, host_xss, false); - else - clear_atomic_switch_msr(vmx, MSR_IA32_XSS); - break; - case MSR_TSC_AUX: - if (!msr_info->host_initiated && - !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP)) - return 1; - /* Check reserved bit, higher 32 bits should be zero */ - if ((data >> 32) != 0) - return 1; - /* Otherwise falls through */ - default: - msr = find_msr_entry(vmx, msr_index); - if (msr) { - u64 old_msr_data = msr->data; - msr->data = data; - if (msr - vmx->guest_msrs < vmx->save_nmsrs) { - preempt_disable(); - ret = kvm_set_shared_msr(msr->index, msr->data, - msr->mask); - preempt_enable(); - if (ret) - msr->data = old_msr_data; - } - break; - } - ret = kvm_set_msr_common(vcpu, msr_info); - } - - return ret; -} - -static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) -{ - __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); - switch (reg) { - case VCPU_REGS_RSP: - vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP); - break; - case VCPU_REGS_RIP: - vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP); - break; - case VCPU_EXREG_PDPTR: - if (enable_ept) - ept_save_pdptrs(vcpu); - break; - default: - break; - } -} - -static __init int cpu_has_kvm_support(void) -{ - return cpu_has_vmx(); -} - -static __init int vmx_disabled_by_bios(void) -{ - u64 msr; - - rdmsrl(MSR_IA32_FEATURE_CONTROL, msr); - if (msr & FEATURE_CONTROL_LOCKED) { - /* launched w/ TXT and VMX disabled */ - if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX) - && tboot_enabled()) - return 1; - /* launched w/o TXT and VMX only enabled w/ TXT */ - if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX) - && (msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX) - && !tboot_enabled()) { - printk(KERN_WARNING "kvm: disable TXT in the BIOS or " - "activate TXT before enabling KVM\n"); - return 1; - } - /* launched w/o TXT and VMX disabled */ - if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX) - && !tboot_enabled()) - return 1; - } - - return 0; -} - -static void kvm_cpu_vmxon(u64 addr) -{ - cr4_set_bits(X86_CR4_VMXE); - intel_pt_handle_vmx(1); - - asm volatile ("vmxon %0" : : "m"(addr)); -} - -static int hardware_enable(void) -{ - int cpu = raw_smp_processor_id(); - u64 phys_addr = __pa(per_cpu(vmxarea, cpu)); - u64 old, test_bits; - - if (cr4_read_shadow() & X86_CR4_VMXE) - return -EBUSY; - - /* - * This can happen if we hot-added a CPU but failed to allocate - * VP assist page for it. - */ - if (static_branch_unlikely(&enable_evmcs) && - !hv_get_vp_assist_page(cpu)) - return -EFAULT; - - INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu)); - INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu)); - spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); - - /* - * Now we can enable the vmclear operation in kdump - * since the loaded_vmcss_on_cpu list on this cpu - * has been initialized. - * - * Though the cpu is not in VMX operation now, there - * is no problem to enable the vmclear operation - * for the loaded_vmcss_on_cpu list is empty! - */ - crash_enable_local_vmclear(cpu); - - rdmsrl(MSR_IA32_FEATURE_CONTROL, old); - - test_bits = FEATURE_CONTROL_LOCKED; - test_bits |= FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; - if (tboot_enabled()) - test_bits |= FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX; - - if ((old & test_bits) != test_bits) { - /* enable and lock */ - wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits); - } - kvm_cpu_vmxon(phys_addr); - if (enable_ept) - ept_sync_global(); - - return 0; -} - -static void vmclear_local_loaded_vmcss(void) -{ - int cpu = raw_smp_processor_id(); - struct loaded_vmcs *v, *n; - - list_for_each_entry_safe(v, n, &per_cpu(loaded_vmcss_on_cpu, cpu), - loaded_vmcss_on_cpu_link) - __loaded_vmcs_clear(v); -} - - -/* Just like cpu_vmxoff(), but with the __kvm_handle_fault_on_reboot() - * tricks. - */ -static void kvm_cpu_vmxoff(void) -{ - asm volatile (__ex("vmxoff")); - - intel_pt_handle_vmx(0); - cr4_clear_bits(X86_CR4_VMXE); -} - -static void hardware_disable(void) -{ - vmclear_local_loaded_vmcss(); - kvm_cpu_vmxoff(); -} - -static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, - u32 msr, u32 *result) -{ - u32 vmx_msr_low, vmx_msr_high; - u32 ctl = ctl_min | ctl_opt; - - rdmsr(msr, vmx_msr_low, vmx_msr_high); - - ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */ - ctl |= vmx_msr_low; /* bit == 1 in low word ==> must be one */ - - /* Ensure minimum (required) set of control bits are supported. */ - if (ctl_min & ~ctl) - return -EIO; - - *result = ctl; - return 0; -} - -static __init bool allow_1_setting(u32 msr, u32 ctl) -{ - u32 vmx_msr_low, vmx_msr_high; - - rdmsr(msr, vmx_msr_low, vmx_msr_high); - return vmx_msr_high & ctl; -} - -static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) -{ - u32 vmx_msr_low, vmx_msr_high; - u32 min, opt, min2, opt2; - u32 _pin_based_exec_control = 0; - u32 _cpu_based_exec_control = 0; - u32 _cpu_based_2nd_exec_control = 0; - u32 _vmexit_control = 0; - u32 _vmentry_control = 0; - - memset(vmcs_conf, 0, sizeof(*vmcs_conf)); - min = CPU_BASED_HLT_EXITING | -#ifdef CONFIG_X86_64 - CPU_BASED_CR8_LOAD_EXITING | - CPU_BASED_CR8_STORE_EXITING | -#endif - CPU_BASED_CR3_LOAD_EXITING | - CPU_BASED_CR3_STORE_EXITING | - CPU_BASED_UNCOND_IO_EXITING | - CPU_BASED_MOV_DR_EXITING | - CPU_BASED_USE_TSC_OFFSETING | - CPU_BASED_MWAIT_EXITING | - CPU_BASED_MONITOR_EXITING | - CPU_BASED_INVLPG_EXITING | - CPU_BASED_RDPMC_EXITING; - - opt = CPU_BASED_TPR_SHADOW | - CPU_BASED_USE_MSR_BITMAPS | - CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; - if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS, - &_cpu_based_exec_control) < 0) - return -EIO; -#ifdef CONFIG_X86_64 - if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW)) - _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING & - ~CPU_BASED_CR8_STORE_EXITING; -#endif - if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) { - min2 = 0; - opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | - SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | - SECONDARY_EXEC_WBINVD_EXITING | - SECONDARY_EXEC_ENABLE_VPID | - SECONDARY_EXEC_ENABLE_EPT | - SECONDARY_EXEC_UNRESTRICTED_GUEST | - SECONDARY_EXEC_PAUSE_LOOP_EXITING | - SECONDARY_EXEC_DESC | - SECONDARY_EXEC_RDTSCP | - SECONDARY_EXEC_ENABLE_INVPCID | - SECONDARY_EXEC_APIC_REGISTER_VIRT | - SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | - SECONDARY_EXEC_SHADOW_VMCS | - SECONDARY_EXEC_XSAVES | - SECONDARY_EXEC_RDSEED_EXITING | - SECONDARY_EXEC_RDRAND_EXITING | - SECONDARY_EXEC_ENABLE_PML | - SECONDARY_EXEC_TSC_SCALING | - SECONDARY_EXEC_ENABLE_VMFUNC | - SECONDARY_EXEC_ENCLS_EXITING; - if (adjust_vmx_controls(min2, opt2, - MSR_IA32_VMX_PROCBASED_CTLS2, - &_cpu_based_2nd_exec_control) < 0) - return -EIO; - } -#ifndef CONFIG_X86_64 - if (!(_cpu_based_2nd_exec_control & - SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) - _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW; -#endif - - if (!(_cpu_based_exec_control & CPU_BASED_TPR_SHADOW)) - _cpu_based_2nd_exec_control &= ~( - SECONDARY_EXEC_APIC_REGISTER_VIRT | - SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | - SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); - - rdmsr_safe(MSR_IA32_VMX_EPT_VPID_CAP, - &vmx_capability.ept, &vmx_capability.vpid); - - if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) { - /* CR3 accesses and invlpg don't need to cause VM Exits when EPT - enabled */ - _cpu_based_exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING | - CPU_BASED_CR3_STORE_EXITING | - CPU_BASED_INVLPG_EXITING); - } else if (vmx_capability.ept) { - vmx_capability.ept = 0; - pr_warn_once("EPT CAP should not exist if not support " - "1-setting enable EPT VM-execution control\n"); - } - if (!(_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_VPID) && - vmx_capability.vpid) { - vmx_capability.vpid = 0; - pr_warn_once("VPID CAP should not exist if not support " - "1-setting enable VPID VM-execution control\n"); - } - - min = VM_EXIT_SAVE_DEBUG_CONTROLS | VM_EXIT_ACK_INTR_ON_EXIT; -#ifdef CONFIG_X86_64 - min |= VM_EXIT_HOST_ADDR_SPACE_SIZE; -#endif - opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT | - VM_EXIT_CLEAR_BNDCFGS; - if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS, - &_vmexit_control) < 0) - return -EIO; - - min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING; - opt = PIN_BASED_VIRTUAL_NMIS | PIN_BASED_POSTED_INTR | - PIN_BASED_VMX_PREEMPTION_TIMER; - if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS, - &_pin_based_exec_control) < 0) - return -EIO; - - if (cpu_has_broken_vmx_preemption_timer()) - _pin_based_exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER; - if (!(_cpu_based_2nd_exec_control & - SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)) - _pin_based_exec_control &= ~PIN_BASED_POSTED_INTR; - - min = VM_ENTRY_LOAD_DEBUG_CONTROLS; - opt = VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_BNDCFGS; - if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS, - &_vmentry_control) < 0) - return -EIO; - - rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high); - - /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */ - if ((vmx_msr_high & 0x1fff) > PAGE_SIZE) - return -EIO; - -#ifdef CONFIG_X86_64 - /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */ - if (vmx_msr_high & (1u<<16)) - return -EIO; -#endif - - /* Require Write-Back (WB) memory type for VMCS accesses. */ - if (((vmx_msr_high >> 18) & 15) != 6) - return -EIO; - - vmcs_conf->size = vmx_msr_high & 0x1fff; - vmcs_conf->order = get_order(vmcs_conf->size); - vmcs_conf->basic_cap = vmx_msr_high & ~0x1fff; - - vmcs_conf->revision_id = vmx_msr_low; - - vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control; - vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control; - vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control; - vmcs_conf->vmexit_ctrl = _vmexit_control; - vmcs_conf->vmentry_ctrl = _vmentry_control; - - if (static_branch_unlikely(&enable_evmcs)) - evmcs_sanitize_exec_ctrls(vmcs_conf); - - cpu_has_load_ia32_efer = - allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS, - VM_ENTRY_LOAD_IA32_EFER) - && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS, - VM_EXIT_LOAD_IA32_EFER); - - cpu_has_load_perf_global_ctrl = - allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS, - VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) - && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS, - VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL); - - /* - * Some cpus support VM_ENTRY_(LOAD|SAVE)_IA32_PERF_GLOBAL_CTRL - * but due to errata below it can't be used. Workaround is to use - * msr load mechanism to switch IA32_PERF_GLOBAL_CTRL. - * - * VM Exit May Incorrectly Clear IA32_PERF_GLOBAL_CTRL [34:32] - * - * AAK155 (model 26) - * AAP115 (model 30) - * AAT100 (model 37) - * BC86,AAY89,BD102 (model 44) - * BA97 (model 46) - * - */ - if (cpu_has_load_perf_global_ctrl && boot_cpu_data.x86 == 0x6) { - switch (boot_cpu_data.x86_model) { - case 26: - case 30: - case 37: - case 44: - case 46: - cpu_has_load_perf_global_ctrl = false; - printk_once(KERN_WARNING"kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL " - "does not work properly. Using workaround\n"); - break; - default: - break; - } - } - - if (boot_cpu_has(X86_FEATURE_XSAVES)) - rdmsrl(MSR_IA32_XSS, host_xss); - - return 0; -} - -static struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu) -{ - int node = cpu_to_node(cpu); - struct page *pages; - struct vmcs *vmcs; - - pages = __alloc_pages_node(node, GFP_KERNEL, vmcs_config.order); - if (!pages) - return NULL; - vmcs = page_address(pages); - memset(vmcs, 0, vmcs_config.size); - - /* KVM supports Enlightened VMCS v1 only */ - if (static_branch_unlikely(&enable_evmcs)) - vmcs->hdr.revision_id = KVM_EVMCS_VERSION; - else - vmcs->hdr.revision_id = vmcs_config.revision_id; - - if (shadow) - vmcs->hdr.shadow_vmcs = 1; - return vmcs; -} - -static void free_vmcs(struct vmcs *vmcs) -{ - free_pages((unsigned long)vmcs, vmcs_config.order); -} - -/* - * Free a VMCS, but before that VMCLEAR it on the CPU where it was last loaded - */ -static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs) -{ - if (!loaded_vmcs->vmcs) - return; - loaded_vmcs_clear(loaded_vmcs); - free_vmcs(loaded_vmcs->vmcs); - loaded_vmcs->vmcs = NULL; - if (loaded_vmcs->msr_bitmap) - free_page((unsigned long)loaded_vmcs->msr_bitmap); - WARN_ON(loaded_vmcs->shadow_vmcs != NULL); -} - -static struct vmcs *alloc_vmcs(bool shadow) -{ - return alloc_vmcs_cpu(shadow, raw_smp_processor_id()); -} - -static int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs) -{ - loaded_vmcs->vmcs = alloc_vmcs(false); - if (!loaded_vmcs->vmcs) - return -ENOMEM; - - loaded_vmcs->shadow_vmcs = NULL; - loaded_vmcs_init(loaded_vmcs); - - if (cpu_has_vmx_msr_bitmap()) { - loaded_vmcs->msr_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL); - if (!loaded_vmcs->msr_bitmap) - goto out_vmcs; - memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE); - - if (IS_ENABLED(CONFIG_HYPERV) && - static_branch_unlikely(&enable_evmcs) && - (ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) { - struct hv_enlightened_vmcs *evmcs = - (struct hv_enlightened_vmcs *)loaded_vmcs->vmcs; - - evmcs->hv_enlightenments_control.msr_bitmap = 1; - } - } - - memset(&loaded_vmcs->host_state, 0, sizeof(struct vmcs_host_state)); - - return 0; - -out_vmcs: - free_loaded_vmcs(loaded_vmcs); - return -ENOMEM; -} - -static void free_kvm_area(void) -{ - int cpu; - - for_each_possible_cpu(cpu) { - free_vmcs(per_cpu(vmxarea, cpu)); - per_cpu(vmxarea, cpu) = NULL; - } -} - -enum vmcs_field_width { - VMCS_FIELD_WIDTH_U16 = 0, - VMCS_FIELD_WIDTH_U64 = 1, - VMCS_FIELD_WIDTH_U32 = 2, - VMCS_FIELD_WIDTH_NATURAL_WIDTH = 3 -}; - -static inline int vmcs_field_width(unsigned long field) -{ - if (0x1 & field) /* the *_HIGH fields are all 32 bit */ - return VMCS_FIELD_WIDTH_U32; - return (field >> 13) & 0x3 ; -} - -static inline int vmcs_field_readonly(unsigned long field) -{ - return (((field >> 10) & 0x3) == 1); -} - -static void init_vmcs_shadow_fields(void) -{ - int i, j; - - for (i = j = 0; i < max_shadow_read_only_fields; i++) { - u16 field = shadow_read_only_fields[i]; - if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 && - (i + 1 == max_shadow_read_only_fields || - shadow_read_only_fields[i + 1] != field + 1)) - pr_err("Missing field from shadow_read_only_field %x\n", - field + 1); - - clear_bit(field, vmx_vmread_bitmap); -#ifdef CONFIG_X86_64 - if (field & 1) - continue; -#endif - if (j < i) - shadow_read_only_fields[j] = field; - j++; - } - max_shadow_read_only_fields = j; - - for (i = j = 0; i < max_shadow_read_write_fields; i++) { - u16 field = shadow_read_write_fields[i]; - if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 && - (i + 1 == max_shadow_read_write_fields || - shadow_read_write_fields[i + 1] != field + 1)) - pr_err("Missing field from shadow_read_write_field %x\n", - field + 1); - - /* - * PML and the preemption timer can be emulated, but the - * processor cannot vmwrite to fields that don't exist - * on bare metal. - */ - switch (field) { - case GUEST_PML_INDEX: - if (!cpu_has_vmx_pml()) - continue; - break; - case VMX_PREEMPTION_TIMER_VALUE: - if (!cpu_has_vmx_preemption_timer()) - continue; - break; - case GUEST_INTR_STATUS: - if (!cpu_has_vmx_apicv()) - continue; - break; - default: - break; - } - - clear_bit(field, vmx_vmwrite_bitmap); - clear_bit(field, vmx_vmread_bitmap); -#ifdef CONFIG_X86_64 - if (field & 1) - continue; -#endif - if (j < i) - shadow_read_write_fields[j] = field; - j++; - } - max_shadow_read_write_fields = j; -} - -static __init int alloc_kvm_area(void) -{ - int cpu; - - for_each_possible_cpu(cpu) { - struct vmcs *vmcs; - - vmcs = alloc_vmcs_cpu(false, cpu); - if (!vmcs) { - free_kvm_area(); - return -ENOMEM; - } - - /* - * When eVMCS is enabled, alloc_vmcs_cpu() sets - * vmcs->revision_id to KVM_EVMCS_VERSION instead of - * revision_id reported by MSR_IA32_VMX_BASIC. - * - * However, even though not explictly documented by - * TLFS, VMXArea passed as VMXON argument should - * still be marked with revision_id reported by - * physical CPU. - */ - if (static_branch_unlikely(&enable_evmcs)) - vmcs->hdr.revision_id = vmcs_config.revision_id; - - per_cpu(vmxarea, cpu) = vmcs; - } - return 0; -} - -static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg, - struct kvm_segment *save) -{ - if (!emulate_invalid_guest_state) { - /* - * CS and SS RPL should be equal during guest entry according - * to VMX spec, but in reality it is not always so. Since vcpu - * is in the middle of the transition from real mode to - * protected mode it is safe to assume that RPL 0 is a good - * default value. - */ - if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS) - save->selector &= ~SEGMENT_RPL_MASK; - save->dpl = save->selector & SEGMENT_RPL_MASK; - save->s = 1; - } - vmx_set_segment(vcpu, save, seg); -} - -static void enter_pmode(struct kvm_vcpu *vcpu) -{ - unsigned long flags; - struct vcpu_vmx *vmx = to_vmx(vcpu); - - /* - * Update real mode segment cache. It may be not up-to-date if sement - * register was written while vcpu was in a guest mode. - */ - vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); - vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); - vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); - vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); - vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); - vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); - - vmx->rmode.vm86_active = 0; - - vmx_segment_cache_clear(vmx); - - vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); - - flags = vmcs_readl(GUEST_RFLAGS); - flags &= RMODE_GUEST_OWNED_EFLAGS_BITS; - flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS; - vmcs_writel(GUEST_RFLAGS, flags); - - vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) | - (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME)); - - update_exception_bitmap(vcpu); - - fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); - fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); - fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]); - fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]); - fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]); - fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]); -} - -static void fix_rmode_seg(int seg, struct kvm_segment *save) -{ - const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; - struct kvm_segment var = *save; - - var.dpl = 0x3; - if (seg == VCPU_SREG_CS) - var.type = 0x3; - - if (!emulate_invalid_guest_state) { - var.selector = var.base >> 4; - var.base = var.base & 0xffff0; - var.limit = 0xffff; - var.g = 0; - var.db = 0; - var.present = 1; - var.s = 1; - var.l = 0; - var.unusable = 0; - var.type = 0x3; - var.avl = 0; - if (save->base & 0xf) - printk_once(KERN_WARNING "kvm: segment base is not " - "paragraph aligned when entering " - "protected mode (seg=%d)", seg); - } - - vmcs_write16(sf->selector, var.selector); - vmcs_writel(sf->base, var.base); - vmcs_write32(sf->limit, var.limit); - vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(&var)); -} - -static void enter_rmode(struct kvm_vcpu *vcpu) -{ - unsigned long flags; - struct vcpu_vmx *vmx = to_vmx(vcpu); - struct kvm_vmx *kvm_vmx = to_kvm_vmx(vcpu->kvm); - - vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); - vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); - vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); - vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); - vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); - vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); - vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); - - vmx->rmode.vm86_active = 1; - - /* - * Very old userspace does not call KVM_SET_TSS_ADDR before entering - * vcpu. Warn the user that an update is overdue. - */ - if (!kvm_vmx->tss_addr) - printk_once(KERN_WARNING "kvm: KVM_SET_TSS_ADDR need to be " - "called before entering vcpu\n"); - - vmx_segment_cache_clear(vmx); - - vmcs_writel(GUEST_TR_BASE, kvm_vmx->tss_addr); - vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1); - vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); - - flags = vmcs_readl(GUEST_RFLAGS); - vmx->rmode.save_rflags = flags; - - flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; - - vmcs_writel(GUEST_RFLAGS, flags); - vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME); - update_exception_bitmap(vcpu); - - fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); - fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); - fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]); - fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]); - fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]); - fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]); - - kvm_mmu_reset_context(vcpu); -} - -static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - struct shared_msr_entry *msr = find_msr_entry(vmx, MSR_EFER); - - if (!msr) - return; - - vcpu->arch.efer = efer; - if (efer & EFER_LMA) { - vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); - msr->data = efer; - } else { - vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); - - msr->data = efer & ~EFER_LME; - } - setup_msrs(vmx); -} - -#ifdef CONFIG_X86_64 - -static void enter_lmode(struct kvm_vcpu *vcpu) -{ - u32 guest_tr_ar; - - vmx_segment_cache_clear(to_vmx(vcpu)); - - guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES); - if ((guest_tr_ar & VMX_AR_TYPE_MASK) != VMX_AR_TYPE_BUSY_64_TSS) { - pr_debug_ratelimited("%s: tss fixup for long mode. \n", - __func__); - vmcs_write32(GUEST_TR_AR_BYTES, - (guest_tr_ar & ~VMX_AR_TYPE_MASK) - | VMX_AR_TYPE_BUSY_64_TSS); - } - vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA); -} - -static void exit_lmode(struct kvm_vcpu *vcpu) -{ - vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); - vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA); -} - -#endif - -static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid, - bool invalidate_gpa) -{ - if (enable_ept && (invalidate_gpa || !enable_vpid)) { - if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) - return; - ept_sync_context(construct_eptp(vcpu, - vcpu->arch.mmu->root_hpa)); - } else { - vpid_sync_context(vpid); - } -} - -static void vmx_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa) -{ - __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid, invalidate_gpa); -} - -static void vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr) -{ - int vpid = to_vmx(vcpu)->vpid; - - if (!vpid_sync_vcpu_addr(vpid, addr)) - vpid_sync_context(vpid); - - /* - * If VPIDs are not supported or enabled, then the above is a no-op. - * But we don't really need a TLB flush in that case anyway, because - * each VM entry/exit includes an implicit flush when VPID is 0. - */ -} - -static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu) -{ - ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits; - - vcpu->arch.cr0 &= ~cr0_guest_owned_bits; - vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits; -} - -static void vmx_decache_cr3(struct kvm_vcpu *vcpu) -{ - if (enable_unrestricted_guest || (enable_ept && is_paging(vcpu))) - vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); - __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); -} - -static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) -{ - ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits; - - vcpu->arch.cr4 &= ~cr4_guest_owned_bits; - vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & cr4_guest_owned_bits; -} - -static void ept_load_pdptrs(struct kvm_vcpu *vcpu) -{ - struct kvm_mmu *mmu = vcpu->arch.walk_mmu; - - if (!test_bit(VCPU_EXREG_PDPTR, - (unsigned long *)&vcpu->arch.regs_dirty)) - return; - - if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { - vmcs_write64(GUEST_PDPTR0, mmu->pdptrs[0]); - vmcs_write64(GUEST_PDPTR1, mmu->pdptrs[1]); - vmcs_write64(GUEST_PDPTR2, mmu->pdptrs[2]); - vmcs_write64(GUEST_PDPTR3, mmu->pdptrs[3]); - } -} - -static void ept_save_pdptrs(struct kvm_vcpu *vcpu) -{ - struct kvm_mmu *mmu = vcpu->arch.walk_mmu; - - if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { - mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0); - mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1); - mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2); - mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3); - } - - __set_bit(VCPU_EXREG_PDPTR, - (unsigned long *)&vcpu->arch.regs_avail); - __set_bit(VCPU_EXREG_PDPTR, - (unsigned long *)&vcpu->arch.regs_dirty); -} - -static bool nested_guest_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val) -{ - u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0; - u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr0_fixed1; - struct vmcs12 *vmcs12 = get_vmcs12(vcpu); - - if (to_vmx(vcpu)->nested.msrs.secondary_ctls_high & - SECONDARY_EXEC_UNRESTRICTED_GUEST && - nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST)) - fixed0 &= ~(X86_CR0_PE | X86_CR0_PG); - - return fixed_bits_valid(val, fixed0, fixed1); -} - -static bool nested_host_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val) -{ - u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0; - u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr0_fixed1; - - return fixed_bits_valid(val, fixed0, fixed1); -} - -static bool nested_cr4_valid(struct kvm_vcpu *vcpu, unsigned long val) -{ - u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr4_fixed0; - u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr4_fixed1; - - return fixed_bits_valid(val, fixed0, fixed1); -} - -/* No difference in the restrictions on guest and host CR4 in VMX operation. */ -#define nested_guest_cr4_valid nested_cr4_valid -#define nested_host_cr4_valid nested_cr4_valid - -static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); - -static void ept_update_paging_mode_cr0(unsigned long *hw_cr0, - unsigned long cr0, - struct kvm_vcpu *vcpu) -{ - if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail)) - vmx_decache_cr3(vcpu); - if (!(cr0 & X86_CR0_PG)) { - /* From paging/starting to nonpaging */ - vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, - vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) | - (CPU_BASED_CR3_LOAD_EXITING | - CPU_BASED_CR3_STORE_EXITING)); - vcpu->arch.cr0 = cr0; - vmx_set_cr4(vcpu, kvm_read_cr4(vcpu)); - } else if (!is_paging(vcpu)) { - /* From nonpaging to paging */ - vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, - vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) & - ~(CPU_BASED_CR3_LOAD_EXITING | - CPU_BASED_CR3_STORE_EXITING)); - vcpu->arch.cr0 = cr0; - vmx_set_cr4(vcpu, kvm_read_cr4(vcpu)); - } - - if (!(cr0 & X86_CR0_WP)) - *hw_cr0 &= ~X86_CR0_WP; -} - -static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - unsigned long hw_cr0; - - hw_cr0 = (cr0 & ~KVM_VM_CR0_ALWAYS_OFF); - if (enable_unrestricted_guest) - hw_cr0 |= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST; - else { - hw_cr0 |= KVM_VM_CR0_ALWAYS_ON; - - if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE)) - enter_pmode(vcpu); - - if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE)) - enter_rmode(vcpu); - } - -#ifdef CONFIG_X86_64 - if (vcpu->arch.efer & EFER_LME) { - if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) - enter_lmode(vcpu); - if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) - exit_lmode(vcpu); - } -#endif - - if (enable_ept && !enable_unrestricted_guest) - ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu); - - vmcs_writel(CR0_READ_SHADOW, cr0); - vmcs_writel(GUEST_CR0, hw_cr0); - vcpu->arch.cr0 = cr0; - - /* depends on vcpu->arch.cr0 to be set to a new value */ - vmx->emulation_required = emulation_required(vcpu); -} - -static int get_ept_level(struct kvm_vcpu *vcpu) -{ - if (cpu_has_vmx_ept_5levels() && (cpuid_maxphyaddr(vcpu) > 48)) - return 5; - return 4; -} - -static u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa) -{ - u64 eptp = VMX_EPTP_MT_WB; - - eptp |= (get_ept_level(vcpu) == 5) ? VMX_EPTP_PWL_5 : VMX_EPTP_PWL_4; - - if (enable_ept_ad_bits && - (!is_guest_mode(vcpu) || nested_ept_ad_enabled(vcpu))) - eptp |= VMX_EPTP_AD_ENABLE_BIT; - eptp |= (root_hpa & PAGE_MASK); - - return eptp; -} - -static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) -{ - struct kvm *kvm = vcpu->kvm; - unsigned long guest_cr3; - u64 eptp; - - guest_cr3 = cr3; - if (enable_ept) { - eptp = construct_eptp(vcpu, cr3); - vmcs_write64(EPT_POINTER, eptp); - - if (kvm_x86_ops->tlb_remote_flush) { - spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock); - to_vmx(vcpu)->ept_pointer = eptp; - to_kvm_vmx(kvm)->ept_pointers_match - = EPT_POINTERS_CHECK; - spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock); - } - - if (enable_unrestricted_guest || is_paging(vcpu) || - is_guest_mode(vcpu)) - guest_cr3 = kvm_read_cr3(vcpu); - else - guest_cr3 = to_kvm_vmx(kvm)->ept_identity_map_addr; - ept_load_pdptrs(vcpu); - } - - vmcs_writel(GUEST_CR3, guest_cr3); -} - -static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) -{ - /* - * Pass through host's Machine Check Enable value to hw_cr4, which - * is in force while we are in guest mode. Do not let guests control - * this bit, even if host CR4.MCE == 0. - */ - unsigned long hw_cr4; - - hw_cr4 = (cr4_read_shadow() & X86_CR4_MCE) | (cr4 & ~X86_CR4_MCE); - if (enable_unrestricted_guest) - hw_cr4 |= KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST; - else if (to_vmx(vcpu)->rmode.vm86_active) - hw_cr4 |= KVM_RMODE_VM_CR4_ALWAYS_ON; - else - hw_cr4 |= KVM_PMODE_VM_CR4_ALWAYS_ON; - - if (!boot_cpu_has(X86_FEATURE_UMIP) && vmx_umip_emulated()) { - if (cr4 & X86_CR4_UMIP) { - vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL, - SECONDARY_EXEC_DESC); - hw_cr4 &= ~X86_CR4_UMIP; - } else if (!is_guest_mode(vcpu) || - !nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC)) - vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, - SECONDARY_EXEC_DESC); - } - - if (cr4 & X86_CR4_VMXE) { - /* - * To use VMXON (and later other VMX instructions), a guest - * must first be able to turn on cr4.VMXE (see handle_vmon()). - * So basically the check on whether to allow nested VMX - * is here. We operate under the default treatment of SMM, - * so VMX cannot be enabled under SMM. - */ - if (!nested_vmx_allowed(vcpu) || is_smm(vcpu)) - return 1; - } - - if (to_vmx(vcpu)->nested.vmxon && !nested_cr4_valid(vcpu, cr4)) - return 1; - - vcpu->arch.cr4 = cr4; - - if (!enable_unrestricted_guest) { - if (enable_ept) { - if (!is_paging(vcpu)) { - hw_cr4 &= ~X86_CR4_PAE; - hw_cr4 |= X86_CR4_PSE; - } else if (!(cr4 & X86_CR4_PAE)) { - hw_cr4 &= ~X86_CR4_PAE; - } - } - - /* - * SMEP/SMAP/PKU is disabled if CPU is in non-paging mode in - * hardware. To emulate this behavior, SMEP/SMAP/PKU needs - * to be manually disabled when guest switches to non-paging - * mode. - * - * If !enable_unrestricted_guest, the CPU is always running - * with CR0.PG=1 and CR4 needs to be modified. - * If enable_unrestricted_guest, the CPU automatically - * disables SMEP/SMAP/PKU when the guest sets CR0.PG=0. - */ - if (!is_paging(vcpu)) - hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE); - } - - vmcs_writel(CR4_READ_SHADOW, cr4); - vmcs_writel(GUEST_CR4, hw_cr4); - return 0; -} - -static void vmx_get_segment(struct kvm_vcpu *vcpu, - struct kvm_segment *var, int seg) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - u32 ar; - - if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) { - *var = vmx->rmode.segs[seg]; - if (seg == VCPU_SREG_TR - || var->selector == vmx_read_guest_seg_selector(vmx, seg)) - return; - var->base = vmx_read_guest_seg_base(vmx, seg); - var->selector = vmx_read_guest_seg_selector(vmx, seg); - return; - } - var->base = vmx_read_guest_seg_base(vmx, seg); - var->limit = vmx_read_guest_seg_limit(vmx, seg); - var->selector = vmx_read_guest_seg_selector(vmx, seg); - ar = vmx_read_guest_seg_ar(vmx, seg); - var->unusable = (ar >> 16) & 1; - var->type = ar & 15; - var->s = (ar >> 4) & 1; - var->dpl = (ar >> 5) & 3; - /* - * Some userspaces do not preserve unusable property. Since usable - * segment has to be present according to VMX spec we can use present - * property to amend userspace bug by making unusable segment always - * nonpresent. vmx_segment_access_rights() already marks nonpresent - * segment as unusable. - */ - var->present = !var->unusable; - var->avl = (ar >> 12) & 1; - var->l = (ar >> 13) & 1; - var->db = (ar >> 14) & 1; - var->g = (ar >> 15) & 1; -} - -static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg) -{ - struct kvm_segment s; - - if (to_vmx(vcpu)->rmode.vm86_active) { - vmx_get_segment(vcpu, &s, seg); - return s.base; - } - return vmx_read_guest_seg_base(to_vmx(vcpu), seg); -} - -static int vmx_get_cpl(struct kvm_vcpu *vcpu) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - - if (unlikely(vmx->rmode.vm86_active)) - return 0; - else { - int ar = vmx_read_guest_seg_ar(vmx, VCPU_SREG_SS); - return VMX_AR_DPL(ar); - } -} - -static u32 vmx_segment_access_rights(struct kvm_segment *var) -{ - u32 ar; - - if (var->unusable || !var->present) - ar = 1 << 16; - else { - ar = var->type & 15; - ar |= (var->s & 1) << 4; - ar |= (var->dpl & 3) << 5; - ar |= (var->present & 1) << 7; - ar |= (var->avl & 1) << 12; - ar |= (var->l & 1) << 13; - ar |= (var->db & 1) << 14; - ar |= (var->g & 1) << 15; - } - - return ar; -} - -static void vmx_set_segment(struct kvm_vcpu *vcpu, - struct kvm_segment *var, int seg) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; - - vmx_segment_cache_clear(vmx); - - if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) { - vmx->rmode.segs[seg] = *var; - if (seg == VCPU_SREG_TR) - vmcs_write16(sf->selector, var->selector); - else if (var->s) - fix_rmode_seg(seg, &vmx->rmode.segs[seg]); - goto out; - } - - vmcs_writel(sf->base, var->base); - vmcs_write32(sf->limit, var->limit); - vmcs_write16(sf->selector, var->selector); - - /* - * Fix the "Accessed" bit in AR field of segment registers for older - * qemu binaries. - * IA32 arch specifies that at the time of processor reset the - * "Accessed" bit in the AR field of segment registers is 1. And qemu - * is setting it to 0 in the userland code. This causes invalid guest - * state vmexit when "unrestricted guest" mode is turned on. - * Fix for this setup issue in cpu_reset is being pushed in the qemu - * tree. Newer qemu binaries with that qemu fix would not need this - * kvm hack. - */ - if (enable_unrestricted_guest && (seg != VCPU_SREG_LDTR)) - var->type |= 0x1; /* Accessed */ - - vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(var)); - -out: - vmx->emulation_required = emulation_required(vcpu); -} - -static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) -{ - u32 ar = vmx_read_guest_seg_ar(to_vmx(vcpu), VCPU_SREG_CS); - - *db = (ar >> 14) & 1; - *l = (ar >> 13) & 1; -} - -static void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) -{ - dt->size = vmcs_read32(GUEST_IDTR_LIMIT); - dt->address = vmcs_readl(GUEST_IDTR_BASE); -} - -static void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) -{ - vmcs_write32(GUEST_IDTR_LIMIT, dt->size); - vmcs_writel(GUEST_IDTR_BASE, dt->address); -} - -static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) -{ - dt->size = vmcs_read32(GUEST_GDTR_LIMIT); - dt->address = vmcs_readl(GUEST_GDTR_BASE); -} - -static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) -{ - vmcs_write32(GUEST_GDTR_LIMIT, dt->size); - vmcs_writel(GUEST_GDTR_BASE, dt->address); -} - -static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg) -{ - struct kvm_segment var; - u32 ar; - - vmx_get_segment(vcpu, &var, seg); - var.dpl = 0x3; - if (seg == VCPU_SREG_CS) - var.type = 0x3; - ar = vmx_segment_access_rights(&var); - - if (var.base != (var.selector << 4)) - return false; - if (var.limit != 0xffff) - return false; - if (ar != 0xf3) - return false; - - return true; -} - -static bool code_segment_valid(struct kvm_vcpu *vcpu) -{ - struct kvm_segment cs; - unsigned int cs_rpl; - - vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); - cs_rpl = cs.selector & SEGMENT_RPL_MASK; - - if (cs.unusable) - return false; - if (~cs.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_ACCESSES_MASK)) - return false; - if (!cs.s) - return false; - if (cs.type & VMX_AR_TYPE_WRITEABLE_MASK) { - if (cs.dpl > cs_rpl) - return false; - } else { - if (cs.dpl != cs_rpl) - return false; - } - if (!cs.present) - return false; - - /* TODO: Add Reserved field check, this'll require a new member in the kvm_segment_field structure */ - return true; -} - -static bool stack_segment_valid(struct kvm_vcpu *vcpu) -{ - struct kvm_segment ss; - unsigned int ss_rpl; - - vmx_get_segment(vcpu, &ss, VCPU_SREG_SS); - ss_rpl = ss.selector & SEGMENT_RPL_MASK; - - if (ss.unusable) - return true; - if (ss.type != 3 && ss.type != 7) - return false; - if (!ss.s) - return false; - if (ss.dpl != ss_rpl) /* DPL != RPL */ - return false; - if (!ss.present) - return false; - - return true; -} - -static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg) -{ - struct kvm_segment var; - unsigned int rpl; - - vmx_get_segment(vcpu, &var, seg); - rpl = var.selector & SEGMENT_RPL_MASK; - - if (var.unusable) - return true; - if (!var.s) - return false; - if (!var.present) - return false; - if (~var.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_WRITEABLE_MASK)) { - if (var.dpl < rpl) /* DPL < RPL */ - return false; - } - - /* TODO: Add other members to kvm_segment_field to allow checking for other access - * rights flags - */ - return true; -} - -static bool tr_valid(struct kvm_vcpu *vcpu) -{ - struct kvm_segment tr; - - vmx_get_segment(vcpu, &tr, VCPU_SREG_TR); - - if (tr.unusable) - return false; - if (tr.selector & SEGMENT_TI_MASK) /* TI = 1 */ - return false; - if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */ - return false; - if (!tr.present) - return false; - - return true; -} - -static bool ldtr_valid(struct kvm_vcpu *vcpu) -{ - struct kvm_segment ldtr; - - vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR); - - if (ldtr.unusable) - return true; - if (ldtr.selector & SEGMENT_TI_MASK) /* TI = 1 */ - return false; - if (ldtr.type != 2) - return false; - if (!ldtr.present) - return false; - - return true; -} - -static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu) -{ - struct kvm_segment cs, ss; - - vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); - vmx_get_segment(vcpu, &ss, VCPU_SREG_SS); - - return ((cs.selector & SEGMENT_RPL_MASK) == - (ss.selector & SEGMENT_RPL_MASK)); -} - -/* - * Check if guest state is valid. Returns true if valid, false if - * not. - * We assume that registers are always usable - */ -static bool guest_state_valid(struct kvm_vcpu *vcpu) -{ - if (enable_unrestricted_guest) - return true; - - /* real mode guest state checks */ - if (!is_protmode(vcpu) || (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) { - if (!rmode_segment_valid(vcpu, VCPU_SREG_CS)) - return false; - if (!rmode_segment_valid(vcpu, VCPU_SREG_SS)) - return false; - if (!rmode_segment_valid(vcpu, VCPU_SREG_DS)) - return false; - if (!rmode_segment_valid(vcpu, VCPU_SREG_ES)) - return false; - if (!rmode_segment_valid(vcpu, VCPU_SREG_FS)) - return false; - if (!rmode_segment_valid(vcpu, VCPU_SREG_GS)) - return false; - } else { - /* protected mode guest state checks */ - if (!cs_ss_rpl_check(vcpu)) - return false; - if (!code_segment_valid(vcpu)) - return false; - if (!stack_segment_valid(vcpu)) - return false; - if (!data_segment_valid(vcpu, VCPU_SREG_DS)) - return false; - if (!data_segment_valid(vcpu, VCPU_SREG_ES)) - return false; - if (!data_segment_valid(vcpu, VCPU_SREG_FS)) - return false; - if (!data_segment_valid(vcpu, VCPU_SREG_GS)) - return false; - if (!tr_valid(vcpu)) - return false; - if (!ldtr_valid(vcpu)) - return false; - } - /* TODO: - * - Add checks on RIP - * - Add checks on RFLAGS - */ - - return true; -} - -static bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa) -{ - return PAGE_ALIGNED(gpa) && !(gpa >> cpuid_maxphyaddr(vcpu)); -} - -static int init_rmode_tss(struct kvm *kvm) -{ - gfn_t fn; - u16 data = 0; - int idx, r; - - idx = srcu_read_lock(&kvm->srcu); - fn = to_kvm_vmx(kvm)->tss_addr >> PAGE_SHIFT; - r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE); - if (r < 0) - goto out; - data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE; - r = kvm_write_guest_page(kvm, fn++, &data, - TSS_IOPB_BASE_OFFSET, sizeof(u16)); - if (r < 0) - goto out; - r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE); - if (r < 0) - goto out; - r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE); - if (r < 0) - goto out; - data = ~0; - r = kvm_write_guest_page(kvm, fn, &data, - RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1, - sizeof(u8)); -out: - srcu_read_unlock(&kvm->srcu, idx); - return r; -} - -static int init_rmode_identity_map(struct kvm *kvm) -{ - struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm); - int i, idx, r = 0; - kvm_pfn_t identity_map_pfn; - u32 tmp; - - /* Protect kvm_vmx->ept_identity_pagetable_done. */ - mutex_lock(&kvm->slots_lock); - - if (likely(kvm_vmx->ept_identity_pagetable_done)) - goto out2; - - if (!kvm_vmx->ept_identity_map_addr) - kvm_vmx->ept_identity_map_addr = VMX_EPT_IDENTITY_PAGETABLE_ADDR; - identity_map_pfn = kvm_vmx->ept_identity_map_addr >> PAGE_SHIFT; - - r = __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, - kvm_vmx->ept_identity_map_addr, PAGE_SIZE); - if (r < 0) - goto out2; - - idx = srcu_read_lock(&kvm->srcu); - r = kvm_clear_guest_page(kvm, identity_map_pfn, 0, PAGE_SIZE); - if (r < 0) - goto out; - /* Set up identity-mapping pagetable for EPT in real mode */ - for (i = 0; i < PT32_ENT_PER_PAGE; i++) { - tmp = (i << 22) + (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | - _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE); - r = kvm_write_guest_page(kvm, identity_map_pfn, - &tmp, i * sizeof(tmp), sizeof(tmp)); - if (r < 0) - goto out; - } - kvm_vmx->ept_identity_pagetable_done = true; - -out: - srcu_read_unlock(&kvm->srcu, idx); - -out2: - mutex_unlock(&kvm->slots_lock); - return r; -} - -static void seg_setup(int seg) -{ - const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; - unsigned int ar; - - vmcs_write16(sf->selector, 0); - vmcs_writel(sf->base, 0); - vmcs_write32(sf->limit, 0xffff); - ar = 0x93; - if (seg == VCPU_SREG_CS) - ar |= 0x08; /* code segment */ - - vmcs_write32(sf->ar_bytes, ar); -} - -static int alloc_apic_access_page(struct kvm *kvm) -{ - struct page *page; - int r = 0; - - mutex_lock(&kvm->slots_lock); - if (kvm->arch.apic_access_page_done) - goto out; - r = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, - APIC_DEFAULT_PHYS_BASE, PAGE_SIZE); - if (r) - goto out; - - page = gfn_to_page(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT); - if (is_error_page(page)) { - r = -EFAULT; - goto out; - } - - /* - * Do not pin the page in memory, so that memory hot-unplug - * is able to migrate it. - */ - put_page(page); - kvm->arch.apic_access_page_done = true; -out: - mutex_unlock(&kvm->slots_lock); - return r; -} - -static int allocate_vpid(void) -{ - int vpid; - - if (!enable_vpid) - return 0; - spin_lock(&vmx_vpid_lock); - vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS); - if (vpid < VMX_NR_VPIDS) - __set_bit(vpid, vmx_vpid_bitmap); - else - vpid = 0; - spin_unlock(&vmx_vpid_lock); - return vpid; -} - -static void free_vpid(int vpid) -{ - if (!enable_vpid || vpid == 0) - return; - spin_lock(&vmx_vpid_lock); - __clear_bit(vpid, vmx_vpid_bitmap); - spin_unlock(&vmx_vpid_lock); -} - -static __always_inline void vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, - u32 msr, int type) -{ - int f = sizeof(unsigned long); - - if (!cpu_has_vmx_msr_bitmap()) - return; - - if (static_branch_unlikely(&enable_evmcs)) - evmcs_touch_msr_bitmap(); - - /* - * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals - * have the write-low and read-high bitmap offsets the wrong way round. - * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff. - */ - if (msr <= 0x1fff) { - if (type & MSR_TYPE_R) - /* read-low */ - __clear_bit(msr, msr_bitmap + 0x000 / f); - - if (type & MSR_TYPE_W) - /* write-low */ - __clear_bit(msr, msr_bitmap + 0x800 / f); - - } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { - msr &= 0x1fff; - if (type & MSR_TYPE_R) - /* read-high */ - __clear_bit(msr, msr_bitmap + 0x400 / f); - - if (type & MSR_TYPE_W) - /* write-high */ - __clear_bit(msr, msr_bitmap + 0xc00 / f); - - } -} - -static __always_inline void vmx_enable_intercept_for_msr(unsigned long *msr_bitmap, - u32 msr, int type) -{ - int f = sizeof(unsigned long); - - if (!cpu_has_vmx_msr_bitmap()) - return; - - if (static_branch_unlikely(&enable_evmcs)) - evmcs_touch_msr_bitmap(); - - /* - * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals - * have the write-low and read-high bitmap offsets the wrong way round. - * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff. - */ - if (msr <= 0x1fff) { - if (type & MSR_TYPE_R) - /* read-low */ - __set_bit(msr, msr_bitmap + 0x000 / f); - - if (type & MSR_TYPE_W) - /* write-low */ - __set_bit(msr, msr_bitmap + 0x800 / f); - - } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { - msr &= 0x1fff; - if (type & MSR_TYPE_R) - /* read-high */ - __set_bit(msr, msr_bitmap + 0x400 / f); - - if (type & MSR_TYPE_W) - /* write-high */ - __set_bit(msr, msr_bitmap + 0xc00 / f); - - } -} - -static __always_inline void vmx_set_intercept_for_msr(unsigned long *msr_bitmap, - u32 msr, int type, bool value) -{ - if (value) - vmx_enable_intercept_for_msr(msr_bitmap, msr, type); - else - vmx_disable_intercept_for_msr(msr_bitmap, msr, type); -} - -/* - * If a msr is allowed by L0, we should check whether it is allowed by L1. - * The corresponding bit will be cleared unless both of L0 and L1 allow it. - */ -static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1, - unsigned long *msr_bitmap_nested, - u32 msr, int type) -{ - int f = sizeof(unsigned long); - - /* - * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals - * have the write-low and read-high bitmap offsets the wrong way round. - * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff. - */ - if (msr <= 0x1fff) { - if (type & MSR_TYPE_R && - !test_bit(msr, msr_bitmap_l1 + 0x000 / f)) - /* read-low */ - __clear_bit(msr, msr_bitmap_nested + 0x000 / f); - - if (type & MSR_TYPE_W && - !test_bit(msr, msr_bitmap_l1 + 0x800 / f)) - /* write-low */ - __clear_bit(msr, msr_bitmap_nested + 0x800 / f); - - } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { - msr &= 0x1fff; - if (type & MSR_TYPE_R && - !test_bit(msr, msr_bitmap_l1 + 0x400 / f)) - /* read-high */ - __clear_bit(msr, msr_bitmap_nested + 0x400 / f); - - if (type & MSR_TYPE_W && - !test_bit(msr, msr_bitmap_l1 + 0xc00 / f)) - /* write-high */ - __clear_bit(msr, msr_bitmap_nested + 0xc00 / f); - - } -} - -static u8 vmx_msr_bitmap_mode(struct kvm_vcpu *vcpu) -{ - u8 mode = 0; - - if (cpu_has_secondary_exec_ctrls() && - (vmcs_read32(SECONDARY_VM_EXEC_CONTROL) & - SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) { - mode |= MSR_BITMAP_MODE_X2APIC; - if (enable_apicv && kvm_vcpu_apicv_active(vcpu)) - mode |= MSR_BITMAP_MODE_X2APIC_APICV; - } - - return mode; -} - -#define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4)) - -static void vmx_update_msr_bitmap_x2apic(unsigned long *msr_bitmap, - u8 mode) -{ - int msr; - - for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { - unsigned word = msr / BITS_PER_LONG; - msr_bitmap[word] = (mode & MSR_BITMAP_MODE_X2APIC_APICV) ? 0 : ~0; - msr_bitmap[word + (0x800 / sizeof(long))] = ~0; - } - - if (mode & MSR_BITMAP_MODE_X2APIC) { - /* - * TPR reads and writes can be virtualized even if virtual interrupt - * delivery is not in use. - */ - vmx_disable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_TASKPRI), MSR_TYPE_RW); - if (mode & MSR_BITMAP_MODE_X2APIC_APICV) { - vmx_enable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_TMCCT), MSR_TYPE_R); - vmx_disable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_EOI), MSR_TYPE_W); - vmx_disable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_SELF_IPI), MSR_TYPE_W); - } - } -} - -static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap; - u8 mode = vmx_msr_bitmap_mode(vcpu); - u8 changed = mode ^ vmx->msr_bitmap_mode; - - if (!changed) - return; - - if (changed & (MSR_BITMAP_MODE_X2APIC | MSR_BITMAP_MODE_X2APIC_APICV)) - vmx_update_msr_bitmap_x2apic(msr_bitmap, mode); - - vmx->msr_bitmap_mode = mode; -} - -static bool vmx_get_enable_apicv(struct kvm_vcpu *vcpu) -{ - return enable_apicv; -} - -static void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu) -{ - struct vmcs12 *vmcs12 = get_vmcs12(vcpu); - gfn_t gfn; - - /* - * Don't need to mark the APIC access page dirty; it is never - * written to by the CPU during APIC virtualization. - */ - - if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) { - gfn = vmcs12->virtual_apic_page_addr >> PAGE_SHIFT; - kvm_vcpu_mark_page_dirty(vcpu, gfn); - } - - if (nested_cpu_has_posted_intr(vmcs12)) { - gfn = vmcs12->posted_intr_desc_addr >> PAGE_SHIFT; - kvm_vcpu_mark_page_dirty(vcpu, gfn); - } -} - - -static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - int max_irr; - void *vapic_page; - u16 status; - - if (!vmx->nested.pi_desc || !vmx->nested.pi_pending) - return; - - vmx->nested.pi_pending = false; - if (!pi_test_and_clear_on(vmx->nested.pi_desc)) - return; - - max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256); - if (max_irr != 256) { - vapic_page = kmap(vmx->nested.virtual_apic_page); - __kvm_apic_update_irr(vmx->nested.pi_desc->pir, - vapic_page, &max_irr); - kunmap(vmx->nested.virtual_apic_page); - - status = vmcs_read16(GUEST_INTR_STATUS); - if ((u8)max_irr > ((u8)status & 0xff)) { - status &= ~0xff; - status |= (u8)max_irr; - vmcs_write16(GUEST_INTR_STATUS, status); - } - } - - nested_mark_vmcs12_pages_dirty(vcpu); -} - -static u8 vmx_get_rvi(void) -{ - return vmcs_read16(GUEST_INTR_STATUS) & 0xff; -} - -static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - void *vapic_page; - u32 vppr; - int rvi; - - if (WARN_ON_ONCE(!is_guest_mode(vcpu)) || - !nested_cpu_has_vid(get_vmcs12(vcpu)) || - WARN_ON_ONCE(!vmx->nested.virtual_apic_page)) - return false; - - rvi = vmx_get_rvi(); - - vapic_page = kmap(vmx->nested.virtual_apic_page); - vppr = *((u32 *)(vapic_page + APIC_PROCPRI)); - kunmap(vmx->nested.virtual_apic_page); - - return ((rvi & 0xf0) > (vppr & 0xf0)); -} - -static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu, - bool nested) -{ -#ifdef CONFIG_SMP - int pi_vec = nested ? POSTED_INTR_NESTED_VECTOR : POSTED_INTR_VECTOR; - - if (vcpu->mode == IN_GUEST_MODE) { - /* - * The vector of interrupt to be delivered to vcpu had - * been set in PIR before this function. - * - * Following cases will be reached in this block, and - * we always send a notification event in all cases as - * explained below. - * - * Case 1: vcpu keeps in non-root mode. Sending a - * notification event posts the interrupt to vcpu. - * - * Case 2: vcpu exits to root mode and is still - * runnable. PIR will be synced to vIRR before the - * next vcpu entry. Sending a notification event in - * this case has no effect, as vcpu is not in root - * mode. - * - * Case 3: vcpu exits to root mode and is blocked. - * vcpu_block() has already synced PIR to vIRR and - * never blocks vcpu if vIRR is not cleared. Therefore, - * a blocked vcpu here does not wait for any requested - * interrupts in PIR, and sending a notification event - * which has no effect is safe here. - */ - - apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec); - return true; - } -#endif - return false; -} - -static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu, - int vector) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - - if (is_guest_mode(vcpu) && - vector == vmx->nested.posted_intr_nv) { - /* - * If a posted intr is not recognized by hardware, - * we will accomplish it in the next vmentry. - */ - vmx->nested.pi_pending = true; - kvm_make_request(KVM_REQ_EVENT, vcpu); - /* the PIR and ON have been set by L1. */ - if (!kvm_vcpu_trigger_posted_interrupt(vcpu, true)) - kvm_vcpu_kick(vcpu); - return 0; - } - return -1; -} -/* - * Send interrupt to vcpu via posted interrupt way. - * 1. If target vcpu is running(non-root mode), send posted interrupt - * notification to vcpu and hardware will sync PIR to vIRR atomically. - * 2. If target vcpu isn't running(root mode), kick it to pick up the - * interrupt from PIR in next vmentry. - */ -static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - int r; - - r = vmx_deliver_nested_posted_interrupt(vcpu, vector); - if (!r) - return; - - if (pi_test_and_set_pir(vector, &vmx->pi_desc)) - return; - - /* If a previous notification has sent the IPI, nothing to do. */ - if (pi_test_and_set_on(&vmx->pi_desc)) - return; - - if (!kvm_vcpu_trigger_posted_interrupt(vcpu, false)) - kvm_vcpu_kick(vcpu); -} - -/* - * Set up the vmcs's constant host-state fields, i.e., host-state fields that - * will not change in the lifetime of the guest. - * Note that host-state that does change is set elsewhere. E.g., host-state - * that is set differently for each CPU is set in vmx_vcpu_load(), not here. - */ -static void vmx_set_constant_host_state(struct vcpu_vmx *vmx) -{ - u32 low32, high32; - unsigned long tmpl; - struct desc_ptr dt; - unsigned long cr0, cr3, cr4; - - cr0 = read_cr0(); - WARN_ON(cr0 & X86_CR0_TS); - vmcs_writel(HOST_CR0, cr0); /* 22.2.3 */ - - /* - * Save the most likely value for this task's CR3 in the VMCS. - * We can't use __get_current_cr3_fast() because we're not atomic. - */ - cr3 = __read_cr3(); - vmcs_writel(HOST_CR3, cr3); /* 22.2.3 FIXME: shadow tables */ - vmx->loaded_vmcs->host_state.cr3 = cr3; - - /* Save the most likely value for this task's CR4 in the VMCS. */ - cr4 = cr4_read_shadow(); - vmcs_writel(HOST_CR4, cr4); /* 22.2.3, 22.2.5 */ - vmx->loaded_vmcs->host_state.cr4 = cr4; - - vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ -#ifdef CONFIG_X86_64 - /* - * Load null selectors, so we can avoid reloading them in - * vmx_prepare_switch_to_host(), in case userspace uses - * the null selectors too (the expected case). - */ - vmcs_write16(HOST_DS_SELECTOR, 0); - vmcs_write16(HOST_ES_SELECTOR, 0); -#else - vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ - vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */ -#endif - vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ - vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */ - - store_idt(&dt); - vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */ - vmx->host_idt_base = dt.address; - - vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */ - - rdmsr(MSR_IA32_SYSENTER_CS, low32, high32); - vmcs_write32(HOST_IA32_SYSENTER_CS, low32); - rdmsrl(MSR_IA32_SYSENTER_EIP, tmpl); - vmcs_writel(HOST_IA32_SYSENTER_EIP, tmpl); /* 22.2.3 */ - - if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) { - rdmsr(MSR_IA32_CR_PAT, low32, high32); - vmcs_write64(HOST_IA32_PAT, low32 | ((u64) high32 << 32)); - } - - if (cpu_has_load_ia32_efer) - vmcs_write64(HOST_IA32_EFER, host_efer); -} - -static void set_cr4_guest_host_mask(struct vcpu_vmx *vmx) -{ - vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS; - if (enable_ept) - vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE; - if (is_guest_mode(&vmx->vcpu)) - vmx->vcpu.arch.cr4_guest_owned_bits &= - ~get_vmcs12(&vmx->vcpu)->cr4_guest_host_mask; - vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits); -} - -static u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx) -{ - u32 pin_based_exec_ctrl = vmcs_config.pin_based_exec_ctrl; - - if (!kvm_vcpu_apicv_active(&vmx->vcpu)) - pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR; - - if (!enable_vnmi) - pin_based_exec_ctrl &= ~PIN_BASED_VIRTUAL_NMIS; - - /* Enable the preemption timer dynamically */ - pin_based_exec_ctrl &= ~PIN_BASED_VMX_PREEMPTION_TIMER; - return pin_based_exec_ctrl; -} - -static void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - - vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_ctrl(vmx)); - if (cpu_has_secondary_exec_ctrls()) { - if (kvm_vcpu_apicv_active(vcpu)) - vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL, - SECONDARY_EXEC_APIC_REGISTER_VIRT | - SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); - else - vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, - SECONDARY_EXEC_APIC_REGISTER_VIRT | - SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); - } - - if (cpu_has_vmx_msr_bitmap()) - vmx_update_msr_bitmap(vcpu); -} - -static u32 vmx_exec_control(struct vcpu_vmx *vmx) -{ - u32 exec_control = vmcs_config.cpu_based_exec_ctrl; - - if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT) - exec_control &= ~CPU_BASED_MOV_DR_EXITING; - - if (!cpu_need_tpr_shadow(&vmx->vcpu)) { - exec_control &= ~CPU_BASED_TPR_SHADOW; -#ifdef CONFIG_X86_64 - exec_control |= CPU_BASED_CR8_STORE_EXITING | - CPU_BASED_CR8_LOAD_EXITING; -#endif - } - if (!enable_ept) - exec_control |= CPU_BASED_CR3_STORE_EXITING | - CPU_BASED_CR3_LOAD_EXITING | - CPU_BASED_INVLPG_EXITING; - if (kvm_mwait_in_guest(vmx->vcpu.kvm)) - exec_control &= ~(CPU_BASED_MWAIT_EXITING | - CPU_BASED_MONITOR_EXITING); - if (kvm_hlt_in_guest(vmx->vcpu.kvm)) - exec_control &= ~CPU_BASED_HLT_EXITING; - return exec_control; -} - -static bool vmx_rdrand_supported(void) -{ - return vmcs_config.cpu_based_2nd_exec_ctrl & - SECONDARY_EXEC_RDRAND_EXITING; -} - -static bool vmx_rdseed_supported(void) -{ - return vmcs_config.cpu_based_2nd_exec_ctrl & - SECONDARY_EXEC_RDSEED_EXITING; -} - -static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx) -{ - struct kvm_vcpu *vcpu = &vmx->vcpu; - - u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl; - - if (!cpu_need_virtualize_apic_accesses(vcpu)) - exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; - if (vmx->vpid == 0) - exec_control &= ~SECONDARY_EXEC_ENABLE_VPID; - if (!enable_ept) { - exec_control &= ~SECONDARY_EXEC_ENABLE_EPT; - enable_unrestricted_guest = 0; - } - if (!enable_unrestricted_guest) - exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST; - if (kvm_pause_in_guest(vmx->vcpu.kvm)) - exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING; - if (!kvm_vcpu_apicv_active(vcpu)) - exec_control &= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT | - SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); - exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; - - /* SECONDARY_EXEC_DESC is enabled/disabled on writes to CR4.UMIP, - * in vmx_set_cr4. */ - exec_control &= ~SECONDARY_EXEC_DESC; - - /* SECONDARY_EXEC_SHADOW_VMCS is enabled when L1 executes VMPTRLD - (handle_vmptrld). - We can NOT enable shadow_vmcs here because we don't have yet - a current VMCS12 - */ - exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS; - - if (!enable_pml) - exec_control &= ~SECONDARY_EXEC_ENABLE_PML; - - if (vmx_xsaves_supported()) { - /* Exposing XSAVES only when XSAVE is exposed */ - bool xsaves_enabled = - guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && - guest_cpuid_has(vcpu, X86_FEATURE_XSAVES); - - if (!xsaves_enabled) - exec_control &= ~SECONDARY_EXEC_XSAVES; - - if (nested) { - if (xsaves_enabled) - vmx->nested.msrs.secondary_ctls_high |= - SECONDARY_EXEC_XSAVES; - else - vmx->nested.msrs.secondary_ctls_high &= - ~SECONDARY_EXEC_XSAVES; - } - } - - if (vmx_rdtscp_supported()) { - bool rdtscp_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP); - if (!rdtscp_enabled) - exec_control &= ~SECONDARY_EXEC_RDTSCP; - - if (nested) { - if (rdtscp_enabled) - vmx->nested.msrs.secondary_ctls_high |= - SECONDARY_EXEC_RDTSCP; - else - vmx->nested.msrs.secondary_ctls_high &= - ~SECONDARY_EXEC_RDTSCP; - } - } - - if (vmx_invpcid_supported()) { - /* Exposing INVPCID only when PCID is exposed */ - bool invpcid_enabled = - guest_cpuid_has(vcpu, X86_FEATURE_INVPCID) && - guest_cpuid_has(vcpu, X86_FEATURE_PCID); - - if (!invpcid_enabled) { - exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID; - guest_cpuid_clear(vcpu, X86_FEATURE_INVPCID); - } - - if (nested) { - if (invpcid_enabled) - vmx->nested.msrs.secondary_ctls_high |= - SECONDARY_EXEC_ENABLE_INVPCID; - else - vmx->nested.msrs.secondary_ctls_high &= - ~SECONDARY_EXEC_ENABLE_INVPCID; - } - } - - if (vmx_rdrand_supported()) { - bool rdrand_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDRAND); - if (rdrand_enabled) - exec_control &= ~SECONDARY_EXEC_RDRAND_EXITING; - - if (nested) { - if (rdrand_enabled) - vmx->nested.msrs.secondary_ctls_high |= - SECONDARY_EXEC_RDRAND_EXITING; - else - vmx->nested.msrs.secondary_ctls_high &= - ~SECONDARY_EXEC_RDRAND_EXITING; - } - } - - if (vmx_rdseed_supported()) { - bool rdseed_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDSEED); - if (rdseed_enabled) - exec_control &= ~SECONDARY_EXEC_RDSEED_EXITING; - - if (nested) { - if (rdseed_enabled) - vmx->nested.msrs.secondary_ctls_high |= - SECONDARY_EXEC_RDSEED_EXITING; - else - vmx->nested.msrs.secondary_ctls_high &= - ~SECONDARY_EXEC_RDSEED_EXITING; - } - } - - vmx->secondary_exec_control = exec_control; -} - -static void ept_set_mmio_spte_mask(void) -{ - /* - * EPT Misconfigurations can be generated if the value of bits 2:0 - * of an EPT paging-structure entry is 110b (write/execute). - */ - kvm_mmu_set_mmio_spte_mask(VMX_EPT_RWX_MASK, - VMX_EPT_MISCONFIG_WX_VALUE); -} - -#define VMX_XSS_EXIT_BITMAP 0 -/* - * Sets up the vmcs for emulated real mode. - */ -static void vmx_vcpu_setup(struct vcpu_vmx *vmx) -{ - int i; - - if (enable_shadow_vmcs) { - /* - * At vCPU creation, "VMWRITE to any supported field - * in the VMCS" is supported, so use the more - * permissive vmx_vmread_bitmap to specify both read - * and write permissions for the shadow VMCS. - */ - vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap)); - vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmread_bitmap)); - } - if (cpu_has_vmx_msr_bitmap()) - vmcs_write64(MSR_BITMAP, __pa(vmx->vmcs01.msr_bitmap)); - - vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */ - - /* Control */ - vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_ctrl(vmx)); - vmx->hv_deadline_tsc = -1; - - vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmx_exec_control(vmx)); - - if (cpu_has_secondary_exec_ctrls()) { - vmx_compute_secondary_exec_control(vmx); - vmcs_write32(SECONDARY_VM_EXEC_CONTROL, - vmx->secondary_exec_control); - } - - if (kvm_vcpu_apicv_active(&vmx->vcpu)) { - vmcs_write64(EOI_EXIT_BITMAP0, 0); - vmcs_write64(EOI_EXIT_BITMAP1, 0); - vmcs_write64(EOI_EXIT_BITMAP2, 0); - vmcs_write64(EOI_EXIT_BITMAP3, 0); - - vmcs_write16(GUEST_INTR_STATUS, 0); - - vmcs_write16(POSTED_INTR_NV, POSTED_INTR_VECTOR); - vmcs_write64(POSTED_INTR_DESC_ADDR, __pa((&vmx->pi_desc))); - } - - if (!kvm_pause_in_guest(vmx->vcpu.kvm)) { - vmcs_write32(PLE_GAP, ple_gap); - vmx->ple_window = ple_window; - vmx->ple_window_dirty = true; - } - - vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0); - vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0); - vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */ - - vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */ - vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */ - vmx_set_constant_host_state(vmx); - vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */ - vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */ - - if (cpu_has_vmx_vmfunc()) - vmcs_write64(VM_FUNCTION_CONTROL, 0); - - vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); - vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); - vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val)); - vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0); - vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val)); - - if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) - vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); - - for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) { - u32 index = vmx_msr_index[i]; - u32 data_low, data_high; - int j = vmx->nmsrs; - - if (rdmsr_safe(index, &data_low, &data_high) < 0) - continue; - if (wrmsr_safe(index, data_low, data_high) < 0) - continue; - vmx->guest_msrs[j].index = i; - vmx->guest_msrs[j].data = 0; - vmx->guest_msrs[j].mask = -1ull; - ++vmx->nmsrs; - } - - vmx->arch_capabilities = kvm_get_arch_capabilities(); - - vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl); - - /* 22.2.1, 20.8.1 */ - vm_entry_controls_init(vmx, vmcs_config.vmentry_ctrl); - - vmx->vcpu.arch.cr0_guest_owned_bits = X86_CR0_TS; - vmcs_writel(CR0_GUEST_HOST_MASK, ~X86_CR0_TS); - - set_cr4_guest_host_mask(vmx); - - if (vmx_xsaves_supported()) - vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP); - - if (enable_pml) { - vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); - vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); - } - - if (cpu_has_vmx_encls_vmexit()) - vmcs_write64(ENCLS_EXITING_BITMAP, -1ull); -} - -static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - struct msr_data apic_base_msr; - u64 cr0; - - vmx->rmode.vm86_active = 0; - vmx->spec_ctrl = 0; - - vcpu->arch.microcode_version = 0x100000000ULL; - vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); - kvm_set_cr8(vcpu, 0); - - if (!init_event) { - apic_base_msr.data = APIC_DEFAULT_PHYS_BASE | - MSR_IA32_APICBASE_ENABLE; - if (kvm_vcpu_is_reset_bsp(vcpu)) - apic_base_msr.data |= MSR_IA32_APICBASE_BSP; - apic_base_msr.host_initiated = true; - kvm_set_apic_base(vcpu, &apic_base_msr); - } - - vmx_segment_cache_clear(vmx); - - seg_setup(VCPU_SREG_CS); - vmcs_write16(GUEST_CS_SELECTOR, 0xf000); - vmcs_writel(GUEST_CS_BASE, 0xffff0000ul); - - seg_setup(VCPU_SREG_DS); - seg_setup(VCPU_SREG_ES); - seg_setup(VCPU_SREG_FS); - seg_setup(VCPU_SREG_GS); - seg_setup(VCPU_SREG_SS); - - vmcs_write16(GUEST_TR_SELECTOR, 0); - vmcs_writel(GUEST_TR_BASE, 0); - vmcs_write32(GUEST_TR_LIMIT, 0xffff); - vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); - - vmcs_write16(GUEST_LDTR_SELECTOR, 0); - vmcs_writel(GUEST_LDTR_BASE, 0); - vmcs_write32(GUEST_LDTR_LIMIT, 0xffff); - vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082); - - if (!init_event) { - vmcs_write32(GUEST_SYSENTER_CS, 0); - vmcs_writel(GUEST_SYSENTER_ESP, 0); - vmcs_writel(GUEST_SYSENTER_EIP, 0); - vmcs_write64(GUEST_IA32_DEBUGCTL, 0); - } - - kvm_set_rflags(vcpu, X86_EFLAGS_FIXED); - kvm_rip_write(vcpu, 0xfff0); - - vmcs_writel(GUEST_GDTR_BASE, 0); - vmcs_write32(GUEST_GDTR_LIMIT, 0xffff); - - vmcs_writel(GUEST_IDTR_BASE, 0); - vmcs_write32(GUEST_IDTR_LIMIT, 0xffff); - - vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE); - vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0); - vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, 0); - if (kvm_mpx_supported()) - vmcs_write64(GUEST_BNDCFGS, 0); - - setup_msrs(vmx); - - vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */ - - if (cpu_has_vmx_tpr_shadow() && !init_event) { - vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0); - if (cpu_need_tpr_shadow(vcpu)) - vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, - __pa(vcpu->arch.apic->regs)); - vmcs_write32(TPR_THRESHOLD, 0); - } - - kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); - - if (vmx->vpid != 0) - vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); - - cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET; - vmx->vcpu.arch.cr0 = cr0; - vmx_set_cr0(vcpu, cr0); /* enter rmode */ - vmx_set_cr4(vcpu, 0); - vmx_set_efer(vcpu, 0); - - update_exception_bitmap(vcpu); - - vpid_sync_context(vmx->vpid); - if (init_event) - vmx_clear_hlt(vcpu); -} - -/* - * In nested virtualization, check if L1 asked to exit on external interrupts. - * For most existing hypervisors, this will always return true. - */ -static bool nested_exit_on_intr(struct kvm_vcpu *vcpu) -{ - return get_vmcs12(vcpu)->pin_based_vm_exec_control & - PIN_BASED_EXT_INTR_MASK; -} - -/* - * In nested virtualization, check if L1 has set - * VM_EXIT_ACK_INTR_ON_EXIT - */ -static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu) -{ - return get_vmcs12(vcpu)->vm_exit_controls & - VM_EXIT_ACK_INTR_ON_EXIT; -} - -static bool nested_exit_on_nmi(struct kvm_vcpu *vcpu) -{ - return nested_cpu_has_nmi_exiting(get_vmcs12(vcpu)); -} - -static void enable_irq_window(struct kvm_vcpu *vcpu) -{ - vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL, - CPU_BASED_VIRTUAL_INTR_PENDING); -} - -static void enable_nmi_window(struct kvm_vcpu *vcpu) -{ - if (!enable_vnmi || - vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) { - enable_irq_window(vcpu); - return; - } - - vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL, - CPU_BASED_VIRTUAL_NMI_PENDING); -} - -static void vmx_inject_irq(struct kvm_vcpu *vcpu) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - uint32_t intr; - int irq = vcpu->arch.interrupt.nr; - - trace_kvm_inj_virq(irq); - - ++vcpu->stat.irq_injections; - if (vmx->rmode.vm86_active) { - int inc_eip = 0; - if (vcpu->arch.interrupt.soft) - inc_eip = vcpu->arch.event_exit_inst_len; - if (kvm_inject_realmode_interrupt(vcpu, irq, inc_eip) != EMULATE_DONE) - kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); - return; - } - intr = irq | INTR_INFO_VALID_MASK; - if (vcpu->arch.interrupt.soft) { - intr |= INTR_TYPE_SOFT_INTR; - vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, - vmx->vcpu.arch.event_exit_inst_len); - } else - intr |= INTR_TYPE_EXT_INTR; - vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr); - - vmx_clear_hlt(vcpu); -} - -static void vmx_inject_nmi(struct kvm_vcpu *vcpu) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - - if (!enable_vnmi) { - /* - * Tracking the NMI-blocked state in software is built upon - * finding the next open IRQ window. This, in turn, depends on - * well-behaving guests: They have to keep IRQs disabled at - * least as long as the NMI handler runs. Otherwise we may - * cause NMI nesting, maybe breaking the guest. But as this is - * highly unlikely, we can live with the residual risk. - */ - vmx->loaded_vmcs->soft_vnmi_blocked = 1; - vmx->loaded_vmcs->vnmi_blocked_time = 0; - } - - ++vcpu->stat.nmi_injections; - vmx->loaded_vmcs->nmi_known_unmasked = false; - - if (vmx->rmode.vm86_active) { - if (kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0) != EMULATE_DONE) - kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); - return; - } - - vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, - INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR); - - vmx_clear_hlt(vcpu); -} - -static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - bool masked; - - if (!enable_vnmi) - return vmx->loaded_vmcs->soft_vnmi_blocked; - if (vmx->loaded_vmcs->nmi_known_unmasked) - return false; - masked = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI; - vmx->loaded_vmcs->nmi_known_unmasked = !masked; - return masked; -} - -static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - - if (!enable_vnmi) { - if (vmx->loaded_vmcs->soft_vnmi_blocked != masked) { - vmx->loaded_vmcs->soft_vnmi_blocked = masked; - vmx->loaded_vmcs->vnmi_blocked_time = 0; - } - } else { - vmx->loaded_vmcs->nmi_known_unmasked = !masked; - if (masked) - vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, - GUEST_INTR_STATE_NMI); - else - vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO, - GUEST_INTR_STATE_NMI); - } -} - -static int vmx_nmi_allowed(struct kvm_vcpu *vcpu) -{ - if (to_vmx(vcpu)->nested.nested_run_pending) - return 0; - - if (!enable_vnmi && - to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked) - return 0; - - return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & - (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI - | GUEST_INTR_STATE_NMI)); -} - -static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu) -{ - return (!to_vmx(vcpu)->nested.nested_run_pending && - vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && - !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & - (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS)); -} - -static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr) -{ - int ret; - - if (enable_unrestricted_guest) - return 0; - - ret = x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, addr, - PAGE_SIZE * 3); - if (ret) - return ret; - to_kvm_vmx(kvm)->tss_addr = addr; - return init_rmode_tss(kvm); -} - -static int vmx_set_identity_map_addr(struct kvm *kvm, u64 ident_addr) -{ - to_kvm_vmx(kvm)->ept_identity_map_addr = ident_addr; - return 0; -} - -static bool rmode_exception(struct kvm_vcpu *vcpu, int vec) -{ - switch (vec) { - case BP_VECTOR: - /* - * Update instruction length as we may reinject the exception - * from user space while in guest debugging mode. - */ - to_vmx(vcpu)->vcpu.arch.event_exit_inst_len = - vmcs_read32(VM_EXIT_INSTRUCTION_LEN); - if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) - return false; - /* fall through */ - case DB_VECTOR: - if (vcpu->guest_debug & - (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) - return false; - /* fall through */ - case DE_VECTOR: - case OF_VECTOR: - case BR_VECTOR: - case UD_VECTOR: - case DF_VECTOR: - case SS_VECTOR: - case GP_VECTOR: - case MF_VECTOR: - return true; - break; - } - return false; -} - -static int handle_rmode_exception(struct kvm_vcpu *vcpu, - int vec, u32 err_code) -{ - /* - * Instruction with address size override prefix opcode 0x67 - * Cause the #SS fault with 0 error code in VM86 mode. - */ - if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) { - if (kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE) { - if (vcpu->arch.halt_request) { - vcpu->arch.halt_request = 0; - return kvm_vcpu_halt(vcpu); - } - return 1; - } - return 0; - } - - /* - * Forward all other exceptions that are valid in real mode. - * FIXME: Breaks guest debugging in real mode, needs to be fixed with - * the required debugging infrastructure rework. - */ - kvm_queue_exception(vcpu, vec); - return 1; -} - -/* - * Trigger machine check on the host. We assume all the MSRs are already set up - * by the CPU and that we still run on the same CPU as the MCE occurred on. - * We pass a fake environment to the machine check handler because we want - * the guest to be always treated like user space, no matter what context - * it used internally. - */ -static void kvm_machine_check(void) -{ -#if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_64) - struct pt_regs regs = { - .cs = 3, /* Fake ring 3 no matter what the guest ran on */ - .flags = X86_EFLAGS_IF, - }; - - do_machine_check(®s, 0); -#endif -} - -static int handle_machine_check(struct kvm_vcpu *vcpu) -{ - /* already handled by vcpu_run */ - return 1; -} - -static int handle_exception(struct kvm_vcpu *vcpu) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - struct kvm_run *kvm_run = vcpu->run; - u32 intr_info, ex_no, error_code; - unsigned long cr2, rip, dr6; - u32 vect_info; - enum emulation_result er; - - vect_info = vmx->idt_vectoring_info; - intr_info = vmx->exit_intr_info; - - if (is_machine_check(intr_info)) - return handle_machine_check(vcpu); - - if (is_nmi(intr_info)) - return 1; /* already handled by vmx_vcpu_run() */ - - if (is_invalid_opcode(intr_info)) - return handle_ud(vcpu); - - error_code = 0; - if (intr_info & INTR_INFO_DELIVER_CODE_MASK) - error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE); - - if (!vmx->rmode.vm86_active && is_gp_fault(intr_info)) { - WARN_ON_ONCE(!enable_vmware_backdoor); - er = kvm_emulate_instruction(vcpu, - EMULTYPE_VMWARE | EMULTYPE_NO_UD_ON_FAIL); - if (er == EMULATE_USER_EXIT) - return 0; - else if (er != EMULATE_DONE) - kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); - return 1; - } - - /* - * The #PF with PFEC.RSVD = 1 indicates the guest is accessing - * MMIO, it is better to report an internal error. - * See the comments in vmx_handle_exit. - */ - if ((vect_info & VECTORING_INFO_VALID_MASK) && - !(is_page_fault(intr_info) && !(error_code & PFERR_RSVD_MASK))) { - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX; - vcpu->run->internal.ndata = 3; - vcpu->run->internal.data[0] = vect_info; - vcpu->run->internal.data[1] = intr_info; - vcpu->run->internal.data[2] = error_code; - return 0; - } - - if (is_page_fault(intr_info)) { - cr2 = vmcs_readl(EXIT_QUALIFICATION); - /* EPT won't cause page fault directly */ - WARN_ON_ONCE(!vcpu->arch.apf.host_apf_reason && enable_ept); - return kvm_handle_page_fault(vcpu, error_code, cr2, NULL, 0); - } - - ex_no = intr_info & INTR_INFO_VECTOR_MASK; - - if (vmx->rmode.vm86_active && rmode_exception(vcpu, ex_no)) - return handle_rmode_exception(vcpu, ex_no, error_code); - - switch (ex_no) { - case AC_VECTOR: - kvm_queue_exception_e(vcpu, AC_VECTOR, error_code); - return 1; - case DB_VECTOR: - dr6 = vmcs_readl(EXIT_QUALIFICATION); - if (!(vcpu->guest_debug & - (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) { - vcpu->arch.dr6 &= ~15; - vcpu->arch.dr6 |= dr6 | DR6_RTM; - if (is_icebp(intr_info)) - skip_emulated_instruction(vcpu); - - kvm_queue_exception(vcpu, DB_VECTOR); - return 1; - } - kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1; - kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7); - /* fall through */ - case BP_VECTOR: - /* - * Update instruction length as we may reinject #BP from - * user space while in guest debugging mode. Reading it for - * #DB as well causes no harm, it is not used in that case. - */ - vmx->vcpu.arch.event_exit_inst_len = - vmcs_read32(VM_EXIT_INSTRUCTION_LEN); - kvm_run->exit_reason = KVM_EXIT_DEBUG; - rip = kvm_rip_read(vcpu); - kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip; - kvm_run->debug.arch.exception = ex_no; - break; - default: - kvm_run->exit_reason = KVM_EXIT_EXCEPTION; - kvm_run->ex.exception = ex_no; - kvm_run->ex.error_code = error_code; - break; - } - return 0; -} - -static int handle_external_interrupt(struct kvm_vcpu *vcpu) -{ - ++vcpu->stat.irq_exits; - return 1; -} - -static int handle_triple_fault(struct kvm_vcpu *vcpu) -{ - vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; - vcpu->mmio_needed = 0; - return 0; -} - -static int handle_io(struct kvm_vcpu *vcpu) -{ - unsigned long exit_qualification; - int size, in, string; - unsigned port; - - exit_qualification = vmcs_readl(EXIT_QUALIFICATION); - string = (exit_qualification & 16) != 0; - - ++vcpu->stat.io_exits; - - if (string) - return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE; - - port = exit_qualification >> 16; - size = (exit_qualification & 7) + 1; - in = (exit_qualification & 8) != 0; - - return kvm_fast_pio(vcpu, size, port, in); -} - -static void -vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) -{ - /* - * Patch in the VMCALL instruction: - */ - hypercall[0] = 0x0f; - hypercall[1] = 0x01; - hypercall[2] = 0xc1; -} - -/* called to set cr0 as appropriate for a mov-to-cr0 exit. */ -static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val) -{ - if (is_guest_mode(vcpu)) { - struct vmcs12 *vmcs12 = get_vmcs12(vcpu); - unsigned long orig_val = val; - - /* - * We get here when L2 changed cr0 in a way that did not change - * any of L1's shadowed bits (see nested_vmx_exit_handled_cr), - * but did change L0 shadowed bits. So we first calculate the - * effective cr0 value that L1 would like to write into the - * hardware. It consists of the L2-owned bits from the new - * value combined with the L1-owned bits from L1's guest_cr0. - */ - val = (val & ~vmcs12->cr0_guest_host_mask) | - (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask); - - if (!nested_guest_cr0_valid(vcpu, val)) - return 1; - - if (kvm_set_cr0(vcpu, val)) - return 1; - vmcs_writel(CR0_READ_SHADOW, orig_val); - return 0; - } else { - if (to_vmx(vcpu)->nested.vmxon && - !nested_host_cr0_valid(vcpu, val)) - return 1; - - return kvm_set_cr0(vcpu, val); - } -} - -static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val) -{ - if (is_guest_mode(vcpu)) { - struct vmcs12 *vmcs12 = get_vmcs12(vcpu); - unsigned long orig_val = val; - - /* analogously to handle_set_cr0 */ - val = (val & ~vmcs12->cr4_guest_host_mask) | - (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask); - if (kvm_set_cr4(vcpu, val)) - return 1; - vmcs_writel(CR4_READ_SHADOW, orig_val); - return 0; - } else - return kvm_set_cr4(vcpu, val); -} - -static int handle_desc(struct kvm_vcpu *vcpu) -{ - WARN_ON(!(vcpu->arch.cr4 & X86_CR4_UMIP)); - return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE; -} - -static int handle_cr(struct kvm_vcpu *vcpu) -{ - unsigned long exit_qualification, val; - int cr; - int reg; - int err; - int ret; - - exit_qualification = vmcs_readl(EXIT_QUALIFICATION); - cr = exit_qualification & 15; - reg = (exit_qualification >> 8) & 15; - switch ((exit_qualification >> 4) & 3) { - case 0: /* mov to cr */ - val = kvm_register_readl(vcpu, reg); - trace_kvm_cr_write(cr, val); - switch (cr) { - case 0: - err = handle_set_cr0(vcpu, val); - return kvm_complete_insn_gp(vcpu, err); - case 3: - WARN_ON_ONCE(enable_unrestricted_guest); - err = kvm_set_cr3(vcpu, val); - return kvm_complete_insn_gp(vcpu, err); - case 4: - err = handle_set_cr4(vcpu, val); - return kvm_complete_insn_gp(vcpu, err); - case 8: { - u8 cr8_prev = kvm_get_cr8(vcpu); - u8 cr8 = (u8)val; - err = kvm_set_cr8(vcpu, cr8); - ret = kvm_complete_insn_gp(vcpu, err); - if (lapic_in_kernel(vcpu)) - return ret; - if (cr8_prev <= cr8) - return ret; - /* - * TODO: we might be squashing a - * KVM_GUESTDBG_SINGLESTEP-triggered - * KVM_EXIT_DEBUG here. - */ - vcpu->run->exit_reason = KVM_EXIT_SET_TPR; - return 0; - } - } - break; - case 2: /* clts */ - WARN_ONCE(1, "Guest should always own CR0.TS"); - vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS)); - trace_kvm_cr_write(0, kvm_read_cr0(vcpu)); - return kvm_skip_emulated_instruction(vcpu); - case 1: /*mov from cr*/ - switch (cr) { - case 3: - WARN_ON_ONCE(enable_unrestricted_guest); - val = kvm_read_cr3(vcpu); - kvm_register_write(vcpu, reg, val); - trace_kvm_cr_read(cr, val); - return kvm_skip_emulated_instruction(vcpu); - case 8: - val = kvm_get_cr8(vcpu); - kvm_register_write(vcpu, reg, val); - trace_kvm_cr_read(cr, val); - return kvm_skip_emulated_instruction(vcpu); - } - break; - case 3: /* lmsw */ - val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f; - trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val); - kvm_lmsw(vcpu, val); - - return kvm_skip_emulated_instruction(vcpu); - default: - break; - } - vcpu->run->exit_reason = 0; - vcpu_unimpl(vcpu, "unhandled control register: op %d cr %d\n", - (int)(exit_qualification >> 4) & 3, cr); - return 0; -} - -static int handle_dr(struct kvm_vcpu *vcpu) -{ - unsigned long exit_qualification; - int dr, dr7, reg; - - exit_qualification = vmcs_readl(EXIT_QUALIFICATION); - dr = exit_qualification & DEBUG_REG_ACCESS_NUM; - - /* First, if DR does not exist, trigger UD */ - if (!kvm_require_dr(vcpu, dr)) - return 1; - - /* Do not handle if the CPL > 0, will trigger GP on re-entry */ - if (!kvm_require_cpl(vcpu, 0)) - return 1; - dr7 = vmcs_readl(GUEST_DR7); - if (dr7 & DR7_GD) { - /* - * As the vm-exit takes precedence over the debug trap, we - * need to emulate the latter, either for the host or the - * guest debugging itself. - */ - if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { - vcpu->run->debug.arch.dr6 = vcpu->arch.dr6; - vcpu->run->debug.arch.dr7 = dr7; - vcpu->run->debug.arch.pc = kvm_get_linear_rip(vcpu); - vcpu->run->debug.arch.exception = DB_VECTOR; - vcpu->run->exit_reason = KVM_EXIT_DEBUG; - return 0; - } else { - vcpu->arch.dr6 &= ~15; - vcpu->arch.dr6 |= DR6_BD | DR6_RTM; - kvm_queue_exception(vcpu, DB_VECTOR); - return 1; - } - } - - if (vcpu->guest_debug == 0) { - vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL, - CPU_BASED_MOV_DR_EXITING); - - /* - * No more DR vmexits; force a reload of the debug registers - * and reenter on this instruction. The next vmexit will - * retrieve the full state of the debug registers. - */ - vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT; - return 1; - } - - reg = DEBUG_REG_ACCESS_REG(exit_qualification); - if (exit_qualification & TYPE_MOV_FROM_DR) { - unsigned long val; - - if (kvm_get_dr(vcpu, dr, &val)) - return 1; - kvm_register_write(vcpu, reg, val); - } else - if (kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg))) - return 1; - - return kvm_skip_emulated_instruction(vcpu); -} - -static u64 vmx_get_dr6(struct kvm_vcpu *vcpu) -{ - return vcpu->arch.dr6; -} - -static void vmx_set_dr6(struct kvm_vcpu *vcpu, unsigned long val) -{ -} - -static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu) -{ - get_debugreg(vcpu->arch.db[0], 0); - get_debugreg(vcpu->arch.db[1], 1); - get_debugreg(vcpu->arch.db[2], 2); - get_debugreg(vcpu->arch.db[3], 3); - get_debugreg(vcpu->arch.dr6, 6); - vcpu->arch.dr7 = vmcs_readl(GUEST_DR7); - - vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT; - vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL, CPU_BASED_MOV_DR_EXITING); -} - -static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val) -{ - vmcs_writel(GUEST_DR7, val); -} - -static int handle_cpuid(struct kvm_vcpu *vcpu) -{ - return kvm_emulate_cpuid(vcpu); -} - -static int handle_rdmsr(struct kvm_vcpu *vcpu) -{ - u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; - struct msr_data msr_info; - - msr_info.index = ecx; - msr_info.host_initiated = false; - if (vmx_get_msr(vcpu, &msr_info)) { - trace_kvm_msr_read_ex(ecx); - kvm_inject_gp(vcpu, 0); - return 1; - } - - trace_kvm_msr_read(ecx, msr_info.data); - - /* FIXME: handling of bits 32:63 of rax, rdx */ - vcpu->arch.regs[VCPU_REGS_RAX] = msr_info.data & -1u; - vcpu->arch.regs[VCPU_REGS_RDX] = (msr_info.data >> 32) & -1u; - return kvm_skip_emulated_instruction(vcpu); -} - -static int handle_wrmsr(struct kvm_vcpu *vcpu) -{ - struct msr_data msr; - u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; - u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u) - | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32); - - msr.data = data; - msr.index = ecx; - msr.host_initiated = false; - if (kvm_set_msr(vcpu, &msr) != 0) { - trace_kvm_msr_write_ex(ecx, data); - kvm_inject_gp(vcpu, 0); - return 1; - } - - trace_kvm_msr_write(ecx, data); - return kvm_skip_emulated_instruction(vcpu); -} - -static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu) -{ - kvm_apic_update_ppr(vcpu); - return 1; -} - -static int handle_interrupt_window(struct kvm_vcpu *vcpu) -{ - vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL, - CPU_BASED_VIRTUAL_INTR_PENDING); - - kvm_make_request(KVM_REQ_EVENT, vcpu); - - ++vcpu->stat.irq_window_exits; - return 1; -} - -static int handle_halt(struct kvm_vcpu *vcpu) -{ - return kvm_emulate_halt(vcpu); -} - -static int handle_vmcall(struct kvm_vcpu *vcpu) -{ - return kvm_emulate_hypercall(vcpu); -} - -static int handle_invd(struct kvm_vcpu *vcpu) -{ - return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE; -} - -static int handle_invlpg(struct kvm_vcpu *vcpu) -{ - unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); - - kvm_mmu_invlpg(vcpu, exit_qualification); - return kvm_skip_emulated_instruction(vcpu); -} - -static int handle_rdpmc(struct kvm_vcpu *vcpu) -{ - int err; - - err = kvm_rdpmc(vcpu); - return kvm_complete_insn_gp(vcpu, err); -} - -static int handle_wbinvd(struct kvm_vcpu *vcpu) -{ - return kvm_emulate_wbinvd(vcpu); -} - -static int handle_xsetbv(struct kvm_vcpu *vcpu) -{ - u64 new_bv = kvm_read_edx_eax(vcpu); - u32 index = kvm_register_read(vcpu, VCPU_REGS_RCX); - - if (kvm_set_xcr(vcpu, index, new_bv) == 0) - return kvm_skip_emulated_instruction(vcpu); - return 1; -} - -static int handle_xsaves(struct kvm_vcpu *vcpu) -{ - kvm_skip_emulated_instruction(vcpu); - WARN(1, "this should never happen\n"); - return 1; -} - -static int handle_xrstors(struct kvm_vcpu *vcpu) -{ - kvm_skip_emulated_instruction(vcpu); - WARN(1, "this should never happen\n"); - return 1; -} - -static int handle_apic_access(struct kvm_vcpu *vcpu) -{ - if (likely(fasteoi)) { - unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); - int access_type, offset; - - access_type = exit_qualification & APIC_ACCESS_TYPE; - offset = exit_qualification & APIC_ACCESS_OFFSET; - /* - * Sane guest uses MOV to write EOI, with written value - * not cared. So make a short-circuit here by avoiding - * heavy instruction emulation. - */ - if ((access_type == TYPE_LINEAR_APIC_INST_WRITE) && - (offset == APIC_EOI)) { - kvm_lapic_set_eoi(vcpu); - return kvm_skip_emulated_instruction(vcpu); - } - } - return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE; -} - -static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu) -{ - unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); - int vector = exit_qualification & 0xff; - - /* EOI-induced VM exit is trap-like and thus no need to adjust IP */ - kvm_apic_set_eoi_accelerated(vcpu, vector); - return 1; -} - -static int handle_apic_write(struct kvm_vcpu *vcpu) -{ - unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); - u32 offset = exit_qualification & 0xfff; - - /* APIC-write VM exit is trap-like and thus no need to adjust IP */ - kvm_apic_write_nodecode(vcpu, offset); - return 1; -} - -static int handle_task_switch(struct kvm_vcpu *vcpu) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - unsigned long exit_qualification; - bool has_error_code = false; - u32 error_code = 0; - u16 tss_selector; - int reason, type, idt_v, idt_index; - - idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK); - idt_index = (vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK); - type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK); - - exit_qualification = vmcs_readl(EXIT_QUALIFICATION); - - reason = (u32)exit_qualification >> 30; - if (reason == TASK_SWITCH_GATE && idt_v) { - switch (type) { - case INTR_TYPE_NMI_INTR: - vcpu->arch.nmi_injected = false; - vmx_set_nmi_mask(vcpu, true); - break; - case INTR_TYPE_EXT_INTR: - case INTR_TYPE_SOFT_INTR: - kvm_clear_interrupt_queue(vcpu); - break; - case INTR_TYPE_HARD_EXCEPTION: - if (vmx->idt_vectoring_info & - VECTORING_INFO_DELIVER_CODE_MASK) { - has_error_code = true; - error_code = - vmcs_read32(IDT_VECTORING_ERROR_CODE); - } - /* fall through */ - case INTR_TYPE_SOFT_EXCEPTION: - kvm_clear_exception_queue(vcpu); - break; - default: - break; - } - } - tss_selector = exit_qualification; - - if (!idt_v || (type != INTR_TYPE_HARD_EXCEPTION && - type != INTR_TYPE_EXT_INTR && - type != INTR_TYPE_NMI_INTR)) - skip_emulated_instruction(vcpu); - - if (kvm_task_switch(vcpu, tss_selector, - type == INTR_TYPE_SOFT_INTR ? idt_index : -1, reason, - has_error_code, error_code) == EMULATE_FAIL) { - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; - vcpu->run->internal.ndata = 0; - return 0; - } - - /* - * TODO: What about debug traps on tss switch? - * Are we supposed to inject them and update dr6? - */ - - return 1; -} - -static int handle_ept_violation(struct kvm_vcpu *vcpu) -{ - unsigned long exit_qualification; - gpa_t gpa; - u64 error_code; - - exit_qualification = vmcs_readl(EXIT_QUALIFICATION); - - /* - * EPT violation happened while executing iret from NMI, - * "blocked by NMI" bit has to be set before next VM entry. - * There are errata that may cause this bit to not be set: - * AAK134, BY25. - */ - if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && - enable_vnmi && - (exit_qualification & INTR_INFO_UNBLOCK_NMI)) - vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); - - gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); - trace_kvm_page_fault(gpa, exit_qualification); - - /* Is it a read fault? */ - error_code = (exit_qualification & EPT_VIOLATION_ACC_READ) - ? PFERR_USER_MASK : 0; - /* Is it a write fault? */ - error_code |= (exit_qualification & EPT_VIOLATION_ACC_WRITE) - ? PFERR_WRITE_MASK : 0; - /* Is it a fetch fault? */ - error_code |= (exit_qualification & EPT_VIOLATION_ACC_INSTR) - ? PFERR_FETCH_MASK : 0; - /* ept page table entry is present? */ - error_code |= (exit_qualification & - (EPT_VIOLATION_READABLE | EPT_VIOLATION_WRITABLE | - EPT_VIOLATION_EXECUTABLE)) - ? PFERR_PRESENT_MASK : 0; - - error_code |= (exit_qualification & 0x100) != 0 ? - PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK; - - vcpu->arch.exit_qualification = exit_qualification; - return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0); -} - -static int handle_ept_misconfig(struct kvm_vcpu *vcpu) -{ - gpa_t gpa; - - /* - * A nested guest cannot optimize MMIO vmexits, because we have an - * nGPA here instead of the required GPA. - */ - gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); - if (!is_guest_mode(vcpu) && - !kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) { - trace_kvm_fast_mmio(gpa); - /* - * Doing kvm_skip_emulated_instruction() depends on undefined - * behavior: Intel's manual doesn't mandate - * VM_EXIT_INSTRUCTION_LEN to be set in VMCS when EPT MISCONFIG - * occurs and while on real hardware it was observed to be set, - * other hypervisors (namely Hyper-V) don't set it, we end up - * advancing IP with some random value. Disable fast mmio when - * running nested and keep it for real hardware in hope that - * VM_EXIT_INSTRUCTION_LEN will always be set correctly. - */ - if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) - return kvm_skip_emulated_instruction(vcpu); - else - return kvm_emulate_instruction(vcpu, EMULTYPE_SKIP) == - EMULATE_DONE; - } - - return kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0); -} - -static int handle_nmi_window(struct kvm_vcpu *vcpu) -{ - WARN_ON_ONCE(!enable_vnmi); - vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL, - CPU_BASED_VIRTUAL_NMI_PENDING); - ++vcpu->stat.nmi_window_exits; - kvm_make_request(KVM_REQ_EVENT, vcpu); - - return 1; -} - -static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - enum emulation_result err = EMULATE_DONE; - int ret = 1; - u32 cpu_exec_ctrl; - bool intr_window_requested; - unsigned count = 130; - - /* - * We should never reach the point where we are emulating L2 - * due to invalid guest state as that means we incorrectly - * allowed a nested VMEntry with an invalid vmcs12. - */ - WARN_ON_ONCE(vmx->emulation_required && vmx->nested.nested_run_pending); - - cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); - intr_window_requested = cpu_exec_ctrl & CPU_BASED_VIRTUAL_INTR_PENDING; - - while (vmx->emulation_required && count-- != 0) { - if (intr_window_requested && vmx_interrupt_allowed(vcpu)) - return handle_interrupt_window(&vmx->vcpu); - - if (kvm_test_request(KVM_REQ_EVENT, vcpu)) - return 1; - - err = kvm_emulate_instruction(vcpu, 0); - - if (err == EMULATE_USER_EXIT) { - ++vcpu->stat.mmio_exits; - ret = 0; - goto out; - } - - if (err != EMULATE_DONE) - goto emulation_error; - - if (vmx->emulation_required && !vmx->rmode.vm86_active && - vcpu->arch.exception.pending) - goto emulation_error; - - if (vcpu->arch.halt_request) { - vcpu->arch.halt_request = 0; - ret = kvm_vcpu_halt(vcpu); - goto out; - } - - if (signal_pending(current)) - goto out; - if (need_resched()) - schedule(); - } - -out: - return ret; - -emulation_error: - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; - vcpu->run->internal.ndata = 0; - return 0; -} - -static void grow_ple_window(struct kvm_vcpu *vcpu) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - int old = vmx->ple_window; - - vmx->ple_window = __grow_ple_window(old, ple_window, - ple_window_grow, - ple_window_max); - - if (vmx->ple_window != old) - vmx->ple_window_dirty = true; - - trace_kvm_ple_window_grow(vcpu->vcpu_id, vmx->ple_window, old); -} - -static void shrink_ple_window(struct kvm_vcpu *vcpu) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - int old = vmx->ple_window; - - vmx->ple_window = __shrink_ple_window(old, ple_window, - ple_window_shrink, - ple_window); - - if (vmx->ple_window != old) - vmx->ple_window_dirty = true; - - trace_kvm_ple_window_shrink(vcpu->vcpu_id, vmx->ple_window, old); -} - -/* - * Handler for POSTED_INTERRUPT_WAKEUP_VECTOR. - */ -static void wakeup_handler(void) -{ - struct kvm_vcpu *vcpu; - int cpu = smp_processor_id(); - - spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); - list_for_each_entry(vcpu, &per_cpu(blocked_vcpu_on_cpu, cpu), - blocked_vcpu_list) { - struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); - - if (pi_test_on(pi_desc) == 1) - kvm_vcpu_kick(vcpu); - } - spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); -} - -static void vmx_enable_tdp(void) -{ - kvm_mmu_set_mask_ptes(VMX_EPT_READABLE_MASK, - enable_ept_ad_bits ? VMX_EPT_ACCESS_BIT : 0ull, - enable_ept_ad_bits ? VMX_EPT_DIRTY_BIT : 0ull, - 0ull, VMX_EPT_EXECUTABLE_MASK, - cpu_has_vmx_ept_execute_only() ? 0ull : VMX_EPT_READABLE_MASK, - VMX_EPT_RWX_MASK, 0ull); - - ept_set_mmio_spte_mask(); - kvm_enable_tdp(); -} - -static __init int hardware_setup(void) -{ - unsigned long host_bndcfgs; - int r = -ENOMEM, i; - - rdmsrl_safe(MSR_EFER, &host_efer); - - for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) - kvm_define_shared_msr(i, vmx_msr_index[i]); - - for (i = 0; i < VMX_BITMAP_NR; i++) { - vmx_bitmap[i] = (unsigned long *)__get_free_page(GFP_KERNEL); - if (!vmx_bitmap[i]) - goto out; - } - - memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE); - memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE); - - if (setup_vmcs_config(&vmcs_config) < 0) { - r = -EIO; - goto out; - } - - if (boot_cpu_has(X86_FEATURE_NX)) - kvm_enable_efer_bits(EFER_NX); - - if (boot_cpu_has(X86_FEATURE_MPX)) { - rdmsrl(MSR_IA32_BNDCFGS, host_bndcfgs); - WARN_ONCE(host_bndcfgs, "KVM: BNDCFGS in host will be lost"); - } - - if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() || - !(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global())) - enable_vpid = 0; - - if (!cpu_has_vmx_ept() || - !cpu_has_vmx_ept_4levels() || - !cpu_has_vmx_ept_mt_wb() || - !cpu_has_vmx_invept_global()) - enable_ept = 0; - - if (!cpu_has_vmx_ept_ad_bits() || !enable_ept) - enable_ept_ad_bits = 0; - - if (!cpu_has_vmx_unrestricted_guest() || !enable_ept) - enable_unrestricted_guest = 0; - - if (!cpu_has_vmx_flexpriority()) - flexpriority_enabled = 0; - - if (!cpu_has_virtual_nmis()) - enable_vnmi = 0; - - /* - * set_apic_access_page_addr() is used to reload apic access - * page upon invalidation. No need to do anything if not - * using the APIC_ACCESS_ADDR VMCS field. - */ - if (!flexpriority_enabled) - kvm_x86_ops->set_apic_access_page_addr = NULL; - - if (!cpu_has_vmx_tpr_shadow()) - kvm_x86_ops->update_cr8_intercept = NULL; - - if (enable_ept && !cpu_has_vmx_ept_2m_page()) - kvm_disable_largepages(); - -#if IS_ENABLED(CONFIG_HYPERV) - if (ms_hyperv.nested_features & HV_X64_NESTED_GUEST_MAPPING_FLUSH - && enable_ept) - kvm_x86_ops->tlb_remote_flush = vmx_hv_remote_flush_tlb; -#endif - - if (!cpu_has_vmx_ple()) { - ple_gap = 0; - ple_window = 0; - ple_window_grow = 0; - ple_window_max = 0; - ple_window_shrink = 0; - } - - if (!cpu_has_vmx_apicv()) { - enable_apicv = 0; - kvm_x86_ops->sync_pir_to_irr = NULL; - } - - if (cpu_has_vmx_tsc_scaling()) { - kvm_has_tsc_control = true; - kvm_max_tsc_scaling_ratio = KVM_VMX_TSC_MULTIPLIER_MAX; - kvm_tsc_scaling_ratio_frac_bits = 48; - } - - set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */ - - if (enable_ept) - vmx_enable_tdp(); - else - kvm_disable_tdp(); - - if (!nested) { - kvm_x86_ops->get_nested_state = NULL; - kvm_x86_ops->set_nested_state = NULL; - } - - /* - * Only enable PML when hardware supports PML feature, and both EPT - * and EPT A/D bit features are enabled -- PML depends on them to work. - */ - if (!enable_ept || !enable_ept_ad_bits || !cpu_has_vmx_pml()) - enable_pml = 0; - - if (!enable_pml) { - kvm_x86_ops->slot_enable_log_dirty = NULL; - kvm_x86_ops->slot_disable_log_dirty = NULL; - kvm_x86_ops->flush_log_dirty = NULL; - kvm_x86_ops->enable_log_dirty_pt_masked = NULL; - } - - if (!cpu_has_vmx_preemption_timer()) - kvm_x86_ops->request_immediate_exit = __kvm_request_immediate_exit; - - if (cpu_has_vmx_preemption_timer() && enable_preemption_timer) { - u64 vmx_msr; - - rdmsrl(MSR_IA32_VMX_MISC, vmx_msr); - cpu_preemption_timer_multi = - vmx_msr & VMX_MISC_PREEMPTION_TIMER_RATE_MASK; - } else { - kvm_x86_ops->set_hv_timer = NULL; - kvm_x86_ops->cancel_hv_timer = NULL; - } - - if (!cpu_has_vmx_shadow_vmcs()) - enable_shadow_vmcs = 0; - if (enable_shadow_vmcs) - init_vmcs_shadow_fields(); - - kvm_set_posted_intr_wakeup_handler(wakeup_handler); - nested_vmx_setup_ctls_msrs(&vmcs_config.nested, enable_apicv); - - kvm_mce_cap_supported |= MCG_LMCE_P; - - return alloc_kvm_area(); - -out: - for (i = 0; i < VMX_BITMAP_NR; i++) - free_page((unsigned long)vmx_bitmap[i]); - - return r; -} - -static __exit void hardware_unsetup(void) -{ - int i; - - for (i = 0; i < VMX_BITMAP_NR; i++) - free_page((unsigned long)vmx_bitmap[i]); - - free_kvm_area(); -} - -/* - * Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE - * exiting, so only get here on cpu with PAUSE-Loop-Exiting. - */ -static int handle_pause(struct kvm_vcpu *vcpu) -{ - if (!kvm_pause_in_guest(vcpu->kvm)) - grow_ple_window(vcpu); - - /* - * Intel sdm vol3 ch-25.1.3 says: The "PAUSE-loop exiting" - * VM-execution control is ignored if CPL > 0. OTOH, KVM - * never set PAUSE_EXITING and just set PLE if supported, - * so the vcpu must be CPL=0 if it gets a PAUSE exit. - */ - kvm_vcpu_on_spin(vcpu, true); - return kvm_skip_emulated_instruction(vcpu); -} - -static int handle_nop(struct kvm_vcpu *vcpu) -{ - return kvm_skip_emulated_instruction(vcpu); -} - -static int handle_mwait(struct kvm_vcpu *vcpu) -{ - printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n"); - return handle_nop(vcpu); -} - -static int handle_invalid_op(struct kvm_vcpu *vcpu) -{ - kvm_queue_exception(vcpu, UD_VECTOR); - return 1; -} - -static int handle_monitor_trap(struct kvm_vcpu *vcpu) -{ - return 1; -} - -static int handle_monitor(struct kvm_vcpu *vcpu) -{ - printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n"); - return handle_nop(vcpu); -} - -/* - * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(), - * set the success or error code of an emulated VMX instruction (as specified - * by Vol 2B, VMX Instruction Reference, "Conventions"), and skip the emulated - * instruction. - */ -static int nested_vmx_succeed(struct kvm_vcpu *vcpu) -{ - vmx_set_rflags(vcpu, vmx_get_rflags(vcpu) - & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | - X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF)); - return kvm_skip_emulated_instruction(vcpu); -} - -static int nested_vmx_failInvalid(struct kvm_vcpu *vcpu) -{ - vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) - & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF | - X86_EFLAGS_SF | X86_EFLAGS_OF)) - | X86_EFLAGS_CF); - return kvm_skip_emulated_instruction(vcpu); -} - -static int nested_vmx_failValid(struct kvm_vcpu *vcpu, - u32 vm_instruction_error) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - - /* - * failValid writes the error number to the current VMCS, which - * can't be done if there isn't a current VMCS. - */ - if (vmx->nested.current_vmptr == -1ull && !vmx->nested.hv_evmcs) - return nested_vmx_failInvalid(vcpu); - - vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) - & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | - X86_EFLAGS_SF | X86_EFLAGS_OF)) - | X86_EFLAGS_ZF); - get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error; - /* - * We don't need to force a shadow sync because - * VM_INSTRUCTION_ERROR is not shadowed - */ - return kvm_skip_emulated_instruction(vcpu); -} - -static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator) -{ - /* TODO: not to reset guest simply here. */ - kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); - pr_debug_ratelimited("kvm: nested vmx abort, indicator %d\n", indicator); -} - -static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer) -{ - struct vcpu_vmx *vmx = - container_of(timer, struct vcpu_vmx, nested.preemption_timer); - - vmx->nested.preemption_timer_expired = true; - kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu); - kvm_vcpu_kick(&vmx->vcpu); - - return HRTIMER_NORESTART; -} - -/* - * Decode the memory-address operand of a vmx instruction, as recorded on an - * exit caused by such an instruction (run by a guest hypervisor). - * On success, returns 0. When the operand is invalid, returns 1 and throws - * #UD or #GP. - */ -static int get_vmx_mem_address(struct kvm_vcpu *vcpu, - unsigned long exit_qualification, - u32 vmx_instruction_info, bool wr, gva_t *ret) -{ - gva_t off; - bool exn; - struct kvm_segment s; - - /* - * According to Vol. 3B, "Information for VM Exits Due to Instruction - * Execution", on an exit, vmx_instruction_info holds most of the - * addressing components of the operand. Only the displacement part - * is put in exit_qualification (see 3B, "Basic VM-Exit Information"). - * For how an actual address is calculated from all these components, - * refer to Vol. 1, "Operand Addressing". - */ - int scaling = vmx_instruction_info & 3; - int addr_size = (vmx_instruction_info >> 7) & 7; - bool is_reg = vmx_instruction_info & (1u << 10); - int seg_reg = (vmx_instruction_info >> 15) & 7; - int index_reg = (vmx_instruction_info >> 18) & 0xf; - bool index_is_valid = !(vmx_instruction_info & (1u << 22)); - int base_reg = (vmx_instruction_info >> 23) & 0xf; - bool base_is_valid = !(vmx_instruction_info & (1u << 27)); - - if (is_reg) { - kvm_queue_exception(vcpu, UD_VECTOR); - return 1; - } - - /* Addr = segment_base + offset */ - /* offset = base + [index * scale] + displacement */ - off = exit_qualification; /* holds the displacement */ - if (base_is_valid) - off += kvm_register_read(vcpu, base_reg); - if (index_is_valid) - off += kvm_register_read(vcpu, index_reg)<<scaling; - vmx_get_segment(vcpu, &s, seg_reg); - *ret = s.base + off; - - if (addr_size == 1) /* 32 bit */ - *ret &= 0xffffffff; - - /* Checks for #GP/#SS exceptions. */ - exn = false; - if (is_long_mode(vcpu)) { - /* Long mode: #GP(0)/#SS(0) if the memory address is in a - * non-canonical form. This is the only check on the memory - * destination for long mode! - */ - exn = is_noncanonical_address(*ret, vcpu); - } else if (is_protmode(vcpu)) { - /* Protected mode: apply checks for segment validity in the - * following order: - * - segment type check (#GP(0) may be thrown) - * - usability check (#GP(0)/#SS(0)) - * - limit check (#GP(0)/#SS(0)) - */ - if (wr) - /* #GP(0) if the destination operand is located in a - * read-only data segment or any code segment. - */ - exn = ((s.type & 0xa) == 0 || (s.type & 8)); - else - /* #GP(0) if the source operand is located in an - * execute-only code segment - */ - exn = ((s.type & 0xa) == 8); - if (exn) { - kvm_queue_exception_e(vcpu, GP_VECTOR, 0); - return 1; - } - /* Protected mode: #GP(0)/#SS(0) if the segment is unusable. - */ - exn = (s.unusable != 0); - /* Protected mode: #GP(0)/#SS(0) if the memory - * operand is outside the segment limit. - */ - exn = exn || (off + sizeof(u64) > s.limit); - } - if (exn) { - kvm_queue_exception_e(vcpu, - seg_reg == VCPU_SREG_SS ? - SS_VECTOR : GP_VECTOR, - 0); - return 1; - } - - return 0; -} - -static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer) -{ - gva_t gva; - struct x86_exception e; - - if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), - vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva)) - return 1; - - if (kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e)) { - kvm_inject_page_fault(vcpu, &e); - return 1; - } - - return 0; -} - -/* - * Allocate a shadow VMCS and associate it with the currently loaded - * VMCS, unless such a shadow VMCS already exists. The newly allocated - * VMCS is also VMCLEARed, so that it is ready for use. - */ -static struct vmcs *alloc_shadow_vmcs(struct kvm_vcpu *vcpu) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - struct loaded_vmcs *loaded_vmcs = vmx->loaded_vmcs; - - /* - * We should allocate a shadow vmcs for vmcs01 only when L1 - * executes VMXON and free it when L1 executes VMXOFF. - * As it is invalid to execute VMXON twice, we shouldn't reach - * here when vmcs01 already have an allocated shadow vmcs. - */ - WARN_ON(loaded_vmcs == &vmx->vmcs01 && loaded_vmcs->shadow_vmcs); - - if (!loaded_vmcs->shadow_vmcs) { - loaded_vmcs->shadow_vmcs = alloc_vmcs(true); - if (loaded_vmcs->shadow_vmcs) - vmcs_clear(loaded_vmcs->shadow_vmcs); - } - return loaded_vmcs->shadow_vmcs; -} - -static int enter_vmx_operation(struct kvm_vcpu *vcpu) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - int r; - - r = alloc_loaded_vmcs(&vmx->nested.vmcs02); - if (r < 0) - goto out_vmcs02; - - vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL); - if (!vmx->nested.cached_vmcs12) - goto out_cached_vmcs12; - - vmx->nested.cached_shadow_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL); - if (!vmx->nested.cached_shadow_vmcs12) - goto out_cached_shadow_vmcs12; - - if (enable_shadow_vmcs && !alloc_shadow_vmcs(vcpu)) - goto out_shadow_vmcs; - - hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC, - HRTIMER_MODE_REL_PINNED); - vmx->nested.preemption_timer.function = vmx_preemption_timer_fn; - - vmx->nested.vpid02 = allocate_vpid(); - - vmx->nested.vmcs02_initialized = false; - vmx->nested.vmxon = true; - return 0; - -out_shadow_vmcs: - kfree(vmx->nested.cached_shadow_vmcs12); - -out_cached_shadow_vmcs12: - kfree(vmx->nested.cached_vmcs12); - -out_cached_vmcs12: - free_loaded_vmcs(&vmx->nested.vmcs02); - -out_vmcs02: - return -ENOMEM; -} - -/* - * Emulate the VMXON instruction. - * Currently, we just remember that VMX is active, and do not save or even - * inspect the argument to VMXON (the so-called "VMXON pointer") because we - * do not currently need to store anything in that guest-allocated memory - * region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their - * argument is different from the VMXON pointer (which the spec says they do). - */ -static int handle_vmon(struct kvm_vcpu *vcpu) -{ - int ret; - gpa_t vmptr; - struct page *page; - struct vcpu_vmx *vmx = to_vmx(vcpu); - const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED - | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; - - /* - * The Intel VMX Instruction Reference lists a bunch of bits that are - * prerequisite to running VMXON, most notably cr4.VMXE must be set to - * 1 (see vmx_set_cr4() for when we allow the guest to set this). - * Otherwise, we should fail with #UD. But most faulting conditions - * have already been checked by hardware, prior to the VM-exit for - * VMXON. We do test guest cr4.VMXE because processor CR4 always has - * that bit set to 1 in non-root mode. - */ - if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) { - kvm_queue_exception(vcpu, UD_VECTOR); - return 1; - } - - /* CPL=0 must be checked manually. */ - if (vmx_get_cpl(vcpu)) { - kvm_inject_gp(vcpu, 0); - return 1; - } - - if (vmx->nested.vmxon) - return nested_vmx_failValid(vcpu, - VMXERR_VMXON_IN_VMX_ROOT_OPERATION); - - if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES) - != VMXON_NEEDED_FEATURES) { - kvm_inject_gp(vcpu, 0); - return 1; - } - - if (nested_vmx_get_vmptr(vcpu, &vmptr)) - return 1; - - /* - * SDM 3: 24.11.5 - * The first 4 bytes of VMXON region contain the supported - * VMCS revision identifier - * - * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case; - * which replaces physical address width with 32 - */ - if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) - return nested_vmx_failInvalid(vcpu); - - page = kvm_vcpu_gpa_to_page(vcpu, vmptr); - if (is_error_page(page)) - return nested_vmx_failInvalid(vcpu); - - if (*(u32 *)kmap(page) != VMCS12_REVISION) { - kunmap(page); - kvm_release_page_clean(page); - return nested_vmx_failInvalid(vcpu); - } - kunmap(page); - kvm_release_page_clean(page); - - vmx->nested.vmxon_ptr = vmptr; - ret = enter_vmx_operation(vcpu); - if (ret) - return ret; - - return nested_vmx_succeed(vcpu); -} - -/* - * Intel's VMX Instruction Reference specifies a common set of prerequisites - * for running VMX instructions (except VMXON, whose prerequisites are - * slightly different). It also specifies what exception to inject otherwise. - * Note that many of these exceptions have priority over VM exits, so they - * don't have to be checked again here. - */ -static int nested_vmx_check_permission(struct kvm_vcpu *vcpu) -{ - if (!to_vmx(vcpu)->nested.vmxon) { - kvm_queue_exception(vcpu, UD_VECTOR); - return 0; - } - - if (vmx_get_cpl(vcpu)) { - kvm_inject_gp(vcpu, 0); - return 0; - } - - return 1; -} - -static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx) -{ - vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_SHADOW_VMCS); - vmcs_write64(VMCS_LINK_POINTER, -1ull); -} - -static inline void nested_release_evmcs(struct kvm_vcpu *vcpu) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - - if (!vmx->nested.hv_evmcs) - return; - - kunmap(vmx->nested.hv_evmcs_page); - kvm_release_page_dirty(vmx->nested.hv_evmcs_page); - vmx->nested.hv_evmcs_vmptr = -1ull; - vmx->nested.hv_evmcs_page = NULL; - vmx->nested.hv_evmcs = NULL; -} - -static inline void nested_release_vmcs12(struct kvm_vcpu *vcpu) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - - if (vmx->nested.current_vmptr == -1ull) - return; - - if (enable_shadow_vmcs) { - /* copy to memory all shadowed fields in case - they were modified */ - copy_shadow_to_vmcs12(vmx); - vmx->nested.need_vmcs12_sync = false; - vmx_disable_shadow_vmcs(vmx); - } - vmx->nested.posted_intr_nv = -1; - - /* Flush VMCS12 to guest memory */ - kvm_vcpu_write_guest_page(vcpu, - vmx->nested.current_vmptr >> PAGE_SHIFT, - vmx->nested.cached_vmcs12, 0, VMCS12_SIZE); - - kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); - - vmx->nested.current_vmptr = -1ull; -} - -/* - * Free whatever needs to be freed from vmx->nested when L1 goes down, or - * just stops using VMX. - */ -static void free_nested(struct kvm_vcpu *vcpu) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - - if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon) - return; - - vmx->nested.vmxon = false; - vmx->nested.smm.vmxon = false; - free_vpid(vmx->nested.vpid02); - vmx->nested.posted_intr_nv = -1; - vmx->nested.current_vmptr = -1ull; - if (enable_shadow_vmcs) { - vmx_disable_shadow_vmcs(vmx); - vmcs_clear(vmx->vmcs01.shadow_vmcs); - free_vmcs(vmx->vmcs01.shadow_vmcs); - vmx->vmcs01.shadow_vmcs = NULL; - } - kfree(vmx->nested.cached_vmcs12); - kfree(vmx->nested.cached_shadow_vmcs12); - /* Unpin physical memory we referred to in the vmcs02 */ - if (vmx->nested.apic_access_page) { - kvm_release_page_dirty(vmx->nested.apic_access_page); - vmx->nested.apic_access_page = NULL; - } - if (vmx->nested.virtual_apic_page) { - kvm_release_page_dirty(vmx->nested.virtual_apic_page); - vmx->nested.virtual_apic_page = NULL; - } - if (vmx->nested.pi_desc_page) { - kunmap(vmx->nested.pi_desc_page); - kvm_release_page_dirty(vmx->nested.pi_desc_page); - vmx->nested.pi_desc_page = NULL; - vmx->nested.pi_desc = NULL; - } - - kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); - - nested_release_evmcs(vcpu); - - free_loaded_vmcs(&vmx->nested.vmcs02); -} - -/* Emulate the VMXOFF instruction */ -static int handle_vmoff(struct kvm_vcpu *vcpu) -{ - if (!nested_vmx_check_permission(vcpu)) - return 1; - free_nested(vcpu); - return nested_vmx_succeed(vcpu); -} - -/* Emulate the VMCLEAR instruction */ -static int handle_vmclear(struct kvm_vcpu *vcpu) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - u32 zero = 0; - gpa_t vmptr; - - if (!nested_vmx_check_permission(vcpu)) - return 1; - - if (nested_vmx_get_vmptr(vcpu, &vmptr)) - return 1; - - if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) - return nested_vmx_failValid(vcpu, - VMXERR_VMCLEAR_INVALID_ADDRESS); - - if (vmptr == vmx->nested.vmxon_ptr) - return nested_vmx_failValid(vcpu, - VMXERR_VMCLEAR_VMXON_POINTER); - - if (vmx->nested.hv_evmcs_page) { - if (vmptr == vmx->nested.hv_evmcs_vmptr) - nested_release_evmcs(vcpu); - } else { - if (vmptr == vmx->nested.current_vmptr) - nested_release_vmcs12(vcpu); - - kvm_vcpu_write_guest(vcpu, - vmptr + offsetof(struct vmcs12, - launch_state), - &zero, sizeof(zero)); - } - - return nested_vmx_succeed(vcpu); -} - -static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch); - -/* Emulate the VMLAUNCH instruction */ -static int handle_vmlaunch(struct kvm_vcpu *vcpu) -{ - return nested_vmx_run(vcpu, true); -} - -/* Emulate the VMRESUME instruction */ -static int handle_vmresume(struct kvm_vcpu *vcpu) -{ - - return nested_vmx_run(vcpu, false); -} - -/* - * Read a vmcs12 field. Since these can have varying lengths and we return - * one type, we chose the biggest type (u64) and zero-extend the return value - * to that size. Note that the caller, handle_vmread, might need to use only - * some of the bits we return here (e.g., on 32-bit guests, only 32 bits of - * 64-bit fields are to be returned). - */ -static inline int vmcs12_read_any(struct vmcs12 *vmcs12, - unsigned long field, u64 *ret) -{ - short offset = vmcs_field_to_offset(field); - char *p; - - if (offset < 0) - return offset; - - p = (char *)vmcs12 + offset; - - switch (vmcs_field_width(field)) { - case VMCS_FIELD_WIDTH_NATURAL_WIDTH: - *ret = *((natural_width *)p); - return 0; - case VMCS_FIELD_WIDTH_U16: - *ret = *((u16 *)p); - return 0; - case VMCS_FIELD_WIDTH_U32: - *ret = *((u32 *)p); - return 0; - case VMCS_FIELD_WIDTH_U64: - *ret = *((u64 *)p); - return 0; - default: - WARN_ON(1); - return -ENOENT; - } -} - - -static inline int vmcs12_write_any(struct vmcs12 *vmcs12, - unsigned long field, u64 field_value){ - short offset = vmcs_field_to_offset(field); - char *p = (char *)vmcs12 + offset; - if (offset < 0) - return offset; - - switch (vmcs_field_width(field)) { - case VMCS_FIELD_WIDTH_U16: - *(u16 *)p = field_value; - return 0; - case VMCS_FIELD_WIDTH_U32: - *(u32 *)p = field_value; - return 0; - case VMCS_FIELD_WIDTH_U64: - *(u64 *)p = field_value; - return 0; - case VMCS_FIELD_WIDTH_NATURAL_WIDTH: - *(natural_width *)p = field_value; - return 0; - default: - WARN_ON(1); - return -ENOENT; - } - -} - -static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx) -{ - struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; - struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; - - /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */ - vmcs12->tpr_threshold = evmcs->tpr_threshold; - vmcs12->guest_rip = evmcs->guest_rip; - - if (unlikely(!(evmcs->hv_clean_fields & - HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC))) { - vmcs12->guest_rsp = evmcs->guest_rsp; - vmcs12->guest_rflags = evmcs->guest_rflags; - vmcs12->guest_interruptibility_info = - evmcs->guest_interruptibility_info; - } - - if (unlikely(!(evmcs->hv_clean_fields & - HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) { - vmcs12->cpu_based_vm_exec_control = - evmcs->cpu_based_vm_exec_control; - } - - if (unlikely(!(evmcs->hv_clean_fields & - HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) { - vmcs12->exception_bitmap = evmcs->exception_bitmap; - } - - if (unlikely(!(evmcs->hv_clean_fields & - HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY))) { - vmcs12->vm_entry_controls = evmcs->vm_entry_controls; - } - - if (unlikely(!(evmcs->hv_clean_fields & - HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT))) { - vmcs12->vm_entry_intr_info_field = - evmcs->vm_entry_intr_info_field; - vmcs12->vm_entry_exception_error_code = - evmcs->vm_entry_exception_error_code; - vmcs12->vm_entry_instruction_len = - evmcs->vm_entry_instruction_len; - } - - if (unlikely(!(evmcs->hv_clean_fields & - HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) { - vmcs12->host_ia32_pat = evmcs->host_ia32_pat; - vmcs12->host_ia32_efer = evmcs->host_ia32_efer; - vmcs12->host_cr0 = evmcs->host_cr0; - vmcs12->host_cr3 = evmcs->host_cr3; - vmcs12->host_cr4 = evmcs->host_cr4; - vmcs12->host_ia32_sysenter_esp = evmcs->host_ia32_sysenter_esp; - vmcs12->host_ia32_sysenter_eip = evmcs->host_ia32_sysenter_eip; - vmcs12->host_rip = evmcs->host_rip; - vmcs12->host_ia32_sysenter_cs = evmcs->host_ia32_sysenter_cs; - vmcs12->host_es_selector = evmcs->host_es_selector; - vmcs12->host_cs_selector = evmcs->host_cs_selector; - vmcs12->host_ss_selector = evmcs->host_ss_selector; - vmcs12->host_ds_selector = evmcs->host_ds_selector; - vmcs12->host_fs_selector = evmcs->host_fs_selector; - vmcs12->host_gs_selector = evmcs->host_gs_selector; - vmcs12->host_tr_selector = evmcs->host_tr_selector; - } - - if (unlikely(!(evmcs->hv_clean_fields & - HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) { - vmcs12->pin_based_vm_exec_control = - evmcs->pin_based_vm_exec_control; - vmcs12->vm_exit_controls = evmcs->vm_exit_controls; - vmcs12->secondary_vm_exec_control = - evmcs->secondary_vm_exec_control; - } - - if (unlikely(!(evmcs->hv_clean_fields & - HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP))) { - vmcs12->io_bitmap_a = evmcs->io_bitmap_a; - vmcs12->io_bitmap_b = evmcs->io_bitmap_b; - } - - if (unlikely(!(evmcs->hv_clean_fields & - HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP))) { - vmcs12->msr_bitmap = evmcs->msr_bitmap; - } - - if (unlikely(!(evmcs->hv_clean_fields & - HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2))) { - vmcs12->guest_es_base = evmcs->guest_es_base; - vmcs12->guest_cs_base = evmcs->guest_cs_base; - vmcs12->guest_ss_base = evmcs->guest_ss_base; - vmcs12->guest_ds_base = evmcs->guest_ds_base; - vmcs12->guest_fs_base = evmcs->guest_fs_base; - vmcs12->guest_gs_base = evmcs->guest_gs_base; - vmcs12->guest_ldtr_base = evmcs->guest_ldtr_base; - vmcs12->guest_tr_base = evmcs->guest_tr_base; - vmcs12->guest_gdtr_base = evmcs->guest_gdtr_base; - vmcs12->guest_idtr_base = evmcs->guest_idtr_base; - vmcs12->guest_es_limit = evmcs->guest_es_limit; - vmcs12->guest_cs_limit = evmcs->guest_cs_limit; - vmcs12->guest_ss_limit = evmcs->guest_ss_limit; - vmcs12->guest_ds_limit = evmcs->guest_ds_limit; - vmcs12->guest_fs_limit = evmcs->guest_fs_limit; - vmcs12->guest_gs_limit = evmcs->guest_gs_limit; - vmcs12->guest_ldtr_limit = evmcs->guest_ldtr_limit; - vmcs12->guest_tr_limit = evmcs->guest_tr_limit; - vmcs12->guest_gdtr_limit = evmcs->guest_gdtr_limit; - vmcs12->guest_idtr_limit = evmcs->guest_idtr_limit; - vmcs12->guest_es_ar_bytes = evmcs->guest_es_ar_bytes; - vmcs12->guest_cs_ar_bytes = evmcs->guest_cs_ar_bytes; - vmcs12->guest_ss_ar_bytes = evmcs->guest_ss_ar_bytes; - vmcs12->guest_ds_ar_bytes = evmcs->guest_ds_ar_bytes; - vmcs12->guest_fs_ar_bytes = evmcs->guest_fs_ar_bytes; - vmcs12->guest_gs_ar_bytes = evmcs->guest_gs_ar_bytes; - vmcs12->guest_ldtr_ar_bytes = evmcs->guest_ldtr_ar_bytes; - vmcs12->guest_tr_ar_bytes = evmcs->guest_tr_ar_bytes; - vmcs12->guest_es_selector = evmcs->guest_es_selector; - vmcs12->guest_cs_selector = evmcs->guest_cs_selector; - vmcs12->guest_ss_selector = evmcs->guest_ss_selector; - vmcs12->guest_ds_selector = evmcs->guest_ds_selector; - vmcs12->guest_fs_selector = evmcs->guest_fs_selector; - vmcs12->guest_gs_selector = evmcs->guest_gs_selector; - vmcs12->guest_ldtr_selector = evmcs->guest_ldtr_selector; - vmcs12->guest_tr_selector = evmcs->guest_tr_selector; - } - - if (unlikely(!(evmcs->hv_clean_fields & - HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2))) { - vmcs12->tsc_offset = evmcs->tsc_offset; - vmcs12->virtual_apic_page_addr = evmcs->virtual_apic_page_addr; - vmcs12->xss_exit_bitmap = evmcs->xss_exit_bitmap; - } - - if (unlikely(!(evmcs->hv_clean_fields & - HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR))) { - vmcs12->cr0_guest_host_mask = evmcs->cr0_guest_host_mask; - vmcs12->cr4_guest_host_mask = evmcs->cr4_guest_host_mask; - vmcs12->cr0_read_shadow = evmcs->cr0_read_shadow; - vmcs12->cr4_read_shadow = evmcs->cr4_read_shadow; - vmcs12->guest_cr0 = evmcs->guest_cr0; - vmcs12->guest_cr3 = evmcs->guest_cr3; - vmcs12->guest_cr4 = evmcs->guest_cr4; - vmcs12->guest_dr7 = evmcs->guest_dr7; - } - - if (unlikely(!(evmcs->hv_clean_fields & - HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER))) { - vmcs12->host_fs_base = evmcs->host_fs_base; - vmcs12->host_gs_base = evmcs->host_gs_base; - vmcs12->host_tr_base = evmcs->host_tr_base; - vmcs12->host_gdtr_base = evmcs->host_gdtr_base; - vmcs12->host_idtr_base = evmcs->host_idtr_base; - vmcs12->host_rsp = evmcs->host_rsp; - } - - if (unlikely(!(evmcs->hv_clean_fields & - HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT))) { - vmcs12->ept_pointer = evmcs->ept_pointer; - vmcs12->virtual_processor_id = evmcs->virtual_processor_id; - } - - if (unlikely(!(evmcs->hv_clean_fields & - HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1))) { - vmcs12->vmcs_link_pointer = evmcs->vmcs_link_pointer; - vmcs12->guest_ia32_debugctl = evmcs->guest_ia32_debugctl; - vmcs12->guest_ia32_pat = evmcs->guest_ia32_pat; - vmcs12->guest_ia32_efer = evmcs->guest_ia32_efer; - vmcs12->guest_pdptr0 = evmcs->guest_pdptr0; - vmcs12->guest_pdptr1 = evmcs->guest_pdptr1; - vmcs12->guest_pdptr2 = evmcs->guest_pdptr2; - vmcs12->guest_pdptr3 = evmcs->guest_pdptr3; - vmcs12->guest_pending_dbg_exceptions = - evmcs->guest_pending_dbg_exceptions; - vmcs12->guest_sysenter_esp = evmcs->guest_sysenter_esp; - vmcs12->guest_sysenter_eip = evmcs->guest_sysenter_eip; - vmcs12->guest_bndcfgs = evmcs->guest_bndcfgs; - vmcs12->guest_activity_state = evmcs->guest_activity_state; - vmcs12->guest_sysenter_cs = evmcs->guest_sysenter_cs; - } - - /* - * Not used? - * vmcs12->vm_exit_msr_store_addr = evmcs->vm_exit_msr_store_addr; - * vmcs12->vm_exit_msr_load_addr = evmcs->vm_exit_msr_load_addr; - * vmcs12->vm_entry_msr_load_addr = evmcs->vm_entry_msr_load_addr; - * vmcs12->cr3_target_value0 = evmcs->cr3_target_value0; - * vmcs12->cr3_target_value1 = evmcs->cr3_target_value1; - * vmcs12->cr3_target_value2 = evmcs->cr3_target_value2; - * vmcs12->cr3_target_value3 = evmcs->cr3_target_value3; - * vmcs12->page_fault_error_code_mask = - * evmcs->page_fault_error_code_mask; - * vmcs12->page_fault_error_code_match = - * evmcs->page_fault_error_code_match; - * vmcs12->cr3_target_count = evmcs->cr3_target_count; - * vmcs12->vm_exit_msr_store_count = evmcs->vm_exit_msr_store_count; - * vmcs12->vm_exit_msr_load_count = evmcs->vm_exit_msr_load_count; - * vmcs12->vm_entry_msr_load_count = evmcs->vm_entry_msr_load_count; - */ - - /* - * Read only fields: - * vmcs12->guest_physical_address = evmcs->guest_physical_address; - * vmcs12->vm_instruction_error = evmcs->vm_instruction_error; - * vmcs12->vm_exit_reason = evmcs->vm_exit_reason; - * vmcs12->vm_exit_intr_info = evmcs->vm_exit_intr_info; - * vmcs12->vm_exit_intr_error_code = evmcs->vm_exit_intr_error_code; - * vmcs12->idt_vectoring_info_field = evmcs->idt_vectoring_info_field; - * vmcs12->idt_vectoring_error_code = evmcs->idt_vectoring_error_code; - * vmcs12->vm_exit_instruction_len = evmcs->vm_exit_instruction_len; - * vmcs12->vmx_instruction_info = evmcs->vmx_instruction_info; - * vmcs12->exit_qualification = evmcs->exit_qualification; - * vmcs12->guest_linear_address = evmcs->guest_linear_address; - * - * Not present in struct vmcs12: - * vmcs12->exit_io_instruction_ecx = evmcs->exit_io_instruction_ecx; - * vmcs12->exit_io_instruction_esi = evmcs->exit_io_instruction_esi; - * vmcs12->exit_io_instruction_edi = evmcs->exit_io_instruction_edi; - * vmcs12->exit_io_instruction_eip = evmcs->exit_io_instruction_eip; - */ - - return 0; -} - -static int copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx) -{ - struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; - struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; - - /* - * Should not be changed by KVM: - * - * evmcs->host_es_selector = vmcs12->host_es_selector; - * evmcs->host_cs_selector = vmcs12->host_cs_selector; - * evmcs->host_ss_selector = vmcs12->host_ss_selector; - * evmcs->host_ds_selector = vmcs12->host_ds_selector; - * evmcs->host_fs_selector = vmcs12->host_fs_selector; - * evmcs->host_gs_selector = vmcs12->host_gs_selector; - * evmcs->host_tr_selector = vmcs12->host_tr_selector; - * evmcs->host_ia32_pat = vmcs12->host_ia32_pat; - * evmcs->host_ia32_efer = vmcs12->host_ia32_efer; - * evmcs->host_cr0 = vmcs12->host_cr0; - * evmcs->host_cr3 = vmcs12->host_cr3; - * evmcs->host_cr4 = vmcs12->host_cr4; - * evmcs->host_ia32_sysenter_esp = vmcs12->host_ia32_sysenter_esp; - * evmcs->host_ia32_sysenter_eip = vmcs12->host_ia32_sysenter_eip; - * evmcs->host_rip = vmcs12->host_rip; - * evmcs->host_ia32_sysenter_cs = vmcs12->host_ia32_sysenter_cs; - * evmcs->host_fs_base = vmcs12->host_fs_base; - * evmcs->host_gs_base = vmcs12->host_gs_base; - * evmcs->host_tr_base = vmcs12->host_tr_base; - * evmcs->host_gdtr_base = vmcs12->host_gdtr_base; - * evmcs->host_idtr_base = vmcs12->host_idtr_base; - * evmcs->host_rsp = vmcs12->host_rsp; - * sync_vmcs12() doesn't read these: - * evmcs->io_bitmap_a = vmcs12->io_bitmap_a; - * evmcs->io_bitmap_b = vmcs12->io_bitmap_b; - * evmcs->msr_bitmap = vmcs12->msr_bitmap; - * evmcs->ept_pointer = vmcs12->ept_pointer; - * evmcs->xss_exit_bitmap = vmcs12->xss_exit_bitmap; - * evmcs->vm_exit_msr_store_addr = vmcs12->vm_exit_msr_store_addr; - * evmcs->vm_exit_msr_load_addr = vmcs12->vm_exit_msr_load_addr; - * evmcs->vm_entry_msr_load_addr = vmcs12->vm_entry_msr_load_addr; - * evmcs->cr3_target_value0 = vmcs12->cr3_target_value0; - * evmcs->cr3_target_value1 = vmcs12->cr3_target_value1; - * evmcs->cr3_target_value2 = vmcs12->cr3_target_value2; - * evmcs->cr3_target_value3 = vmcs12->cr3_target_value3; - * evmcs->tpr_threshold = vmcs12->tpr_threshold; - * evmcs->virtual_processor_id = vmcs12->virtual_processor_id; - * evmcs->exception_bitmap = vmcs12->exception_bitmap; - * evmcs->vmcs_link_pointer = vmcs12->vmcs_link_pointer; - * evmcs->pin_based_vm_exec_control = vmcs12->pin_based_vm_exec_control; - * evmcs->vm_exit_controls = vmcs12->vm_exit_controls; - * evmcs->secondary_vm_exec_control = vmcs12->secondary_vm_exec_control; - * evmcs->page_fault_error_code_mask = - * vmcs12->page_fault_error_code_mask; - * evmcs->page_fault_error_code_match = - * vmcs12->page_fault_error_code_match; - * evmcs->cr3_target_count = vmcs12->cr3_target_count; - * evmcs->virtual_apic_page_addr = vmcs12->virtual_apic_page_addr; - * evmcs->tsc_offset = vmcs12->tsc_offset; - * evmcs->guest_ia32_debugctl = vmcs12->guest_ia32_debugctl; - * evmcs->cr0_guest_host_mask = vmcs12->cr0_guest_host_mask; - * evmcs->cr4_guest_host_mask = vmcs12->cr4_guest_host_mask; - * evmcs->cr0_read_shadow = vmcs12->cr0_read_shadow; - * evmcs->cr4_read_shadow = vmcs12->cr4_read_shadow; - * evmcs->vm_exit_msr_store_count = vmcs12->vm_exit_msr_store_count; - * evmcs->vm_exit_msr_load_count = vmcs12->vm_exit_msr_load_count; - * evmcs->vm_entry_msr_load_count = vmcs12->vm_entry_msr_load_count; - * - * Not present in struct vmcs12: - * evmcs->exit_io_instruction_ecx = vmcs12->exit_io_instruction_ecx; - * evmcs->exit_io_instruction_esi = vmcs12->exit_io_instruction_esi; - * evmcs->exit_io_instruction_edi = vmcs12->exit_io_instruction_edi; - * evmcs->exit_io_instruction_eip = vmcs12->exit_io_instruction_eip; - */ - - evmcs->guest_es_selector = vmcs12->guest_es_selector; - evmcs->guest_cs_selector = vmcs12->guest_cs_selector; - evmcs->guest_ss_selector = vmcs12->guest_ss_selector; - evmcs->guest_ds_selector = vmcs12->guest_ds_selector; - evmcs->guest_fs_selector = vmcs12->guest_fs_selector; - evmcs->guest_gs_selector = vmcs12->guest_gs_selector; - evmcs->guest_ldtr_selector = vmcs12->guest_ldtr_selector; - evmcs->guest_tr_selector = vmcs12->guest_tr_selector; - - evmcs->guest_es_limit = vmcs12->guest_es_limit; - evmcs->guest_cs_limit = vmcs12->guest_cs_limit; - evmcs->guest_ss_limit = vmcs12->guest_ss_limit; - evmcs->guest_ds_limit = vmcs12->guest_ds_limit; - evmcs->guest_fs_limit = vmcs12->guest_fs_limit; - evmcs->guest_gs_limit = vmcs12->guest_gs_limit; - evmcs->guest_ldtr_limit = vmcs12->guest_ldtr_limit; - evmcs->guest_tr_limit = vmcs12->guest_tr_limit; - evmcs->guest_gdtr_limit = vmcs12->guest_gdtr_limit; - evmcs->guest_idtr_limit = vmcs12->guest_idtr_limit; - - evmcs->guest_es_ar_bytes = vmcs12->guest_es_ar_bytes; - evmcs->guest_cs_ar_bytes = vmcs12->guest_cs_ar_bytes; - evmcs->guest_ss_ar_bytes = vmcs12->guest_ss_ar_bytes; - evmcs->guest_ds_ar_bytes = vmcs12->guest_ds_ar_bytes; - evmcs->guest_fs_ar_bytes = vmcs12->guest_fs_ar_bytes; - evmcs->guest_gs_ar_bytes = vmcs12->guest_gs_ar_bytes; - evmcs->guest_ldtr_ar_bytes = vmcs12->guest_ldtr_ar_bytes; - evmcs->guest_tr_ar_bytes = vmcs12->guest_tr_ar_bytes; - - evmcs->guest_es_base = vmcs12->guest_es_base; - evmcs->guest_cs_base = vmcs12->guest_cs_base; - evmcs->guest_ss_base = vmcs12->guest_ss_base; - evmcs->guest_ds_base = vmcs12->guest_ds_base; - evmcs->guest_fs_base = vmcs12->guest_fs_base; - evmcs->guest_gs_base = vmcs12->guest_gs_base; - evmcs->guest_ldtr_base = vmcs12->guest_ldtr_base; - evmcs->guest_tr_base = vmcs12->guest_tr_base; - evmcs->guest_gdtr_base = vmcs12->guest_gdtr_base; - evmcs->guest_idtr_base = vmcs12->guest_idtr_base; - - evmcs->guest_ia32_pat = vmcs12->guest_ia32_pat; - evmcs->guest_ia32_efer = vmcs12->guest_ia32_efer; - - evmcs->guest_pdptr0 = vmcs12->guest_pdptr0; - evmcs->guest_pdptr1 = vmcs12->guest_pdptr1; - evmcs->guest_pdptr2 = vmcs12->guest_pdptr2; - evmcs->guest_pdptr3 = vmcs12->guest_pdptr3; - - evmcs->guest_pending_dbg_exceptions = - vmcs12->guest_pending_dbg_exceptions; - evmcs->guest_sysenter_esp = vmcs12->guest_sysenter_esp; - evmcs->guest_sysenter_eip = vmcs12->guest_sysenter_eip; - - evmcs->guest_activity_state = vmcs12->guest_activity_state; - evmcs->guest_sysenter_cs = vmcs12->guest_sysenter_cs; - - evmcs->guest_cr0 = vmcs12->guest_cr0; - evmcs->guest_cr3 = vmcs12->guest_cr3; - evmcs->guest_cr4 = vmcs12->guest_cr4; - evmcs->guest_dr7 = vmcs12->guest_dr7; - - evmcs->guest_physical_address = vmcs12->guest_physical_address; - - evmcs->vm_instruction_error = vmcs12->vm_instruction_error; - evmcs->vm_exit_reason = vmcs12->vm_exit_reason; - evmcs->vm_exit_intr_info = vmcs12->vm_exit_intr_info; - evmcs->vm_exit_intr_error_code = vmcs12->vm_exit_intr_error_code; - evmcs->idt_vectoring_info_field = vmcs12->idt_vectoring_info_field; - evmcs->idt_vectoring_error_code = vmcs12->idt_vectoring_error_code; - evmcs->vm_exit_instruction_len = vmcs12->vm_exit_instruction_len; - evmcs->vmx_instruction_info = vmcs12->vmx_instruction_info; - - evmcs->exit_qualification = vmcs12->exit_qualification; - - evmcs->guest_linear_address = vmcs12->guest_linear_address; - evmcs->guest_rsp = vmcs12->guest_rsp; - evmcs->guest_rflags = vmcs12->guest_rflags; - - evmcs->guest_interruptibility_info = - vmcs12->guest_interruptibility_info; - evmcs->cpu_based_vm_exec_control = vmcs12->cpu_based_vm_exec_control; - evmcs->vm_entry_controls = vmcs12->vm_entry_controls; - evmcs->vm_entry_intr_info_field = vmcs12->vm_entry_intr_info_field; - evmcs->vm_entry_exception_error_code = - vmcs12->vm_entry_exception_error_code; - evmcs->vm_entry_instruction_len = vmcs12->vm_entry_instruction_len; - - evmcs->guest_rip = vmcs12->guest_rip; - - evmcs->guest_bndcfgs = vmcs12->guest_bndcfgs; - - return 0; -} - -/* - * Copy the writable VMCS shadow fields back to the VMCS12, in case - * they have been modified by the L1 guest. Note that the "read-only" - * VM-exit information fields are actually writable if the vCPU is - * configured to support "VMWRITE to any supported field in the VMCS." - */ -static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx) -{ - const u16 *fields[] = { - shadow_read_write_fields, - shadow_read_only_fields - }; - const int max_fields[] = { - max_shadow_read_write_fields, - max_shadow_read_only_fields - }; - int i, q; - unsigned long field; - u64 field_value; - struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; - - preempt_disable(); - - vmcs_load(shadow_vmcs); - - for (q = 0; q < ARRAY_SIZE(fields); q++) { - for (i = 0; i < max_fields[q]; i++) { - field = fields[q][i]; - field_value = __vmcs_readl(field); - vmcs12_write_any(get_vmcs12(&vmx->vcpu), field, field_value); - } - /* - * Skip the VM-exit information fields if they are read-only. - */ - if (!nested_cpu_has_vmwrite_any_field(&vmx->vcpu)) - break; - } - - vmcs_clear(shadow_vmcs); - vmcs_load(vmx->loaded_vmcs->vmcs); - - preempt_enable(); -} - -static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx) -{ - const u16 *fields[] = { - shadow_read_write_fields, - shadow_read_only_fields - }; - const int max_fields[] = { - max_shadow_read_write_fields, - max_shadow_read_only_fields - }; - int i, q; - unsigned long field; - u64 field_value = 0; - struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; - - vmcs_load(shadow_vmcs); - - for (q = 0; q < ARRAY_SIZE(fields); q++) { - for (i = 0; i < max_fields[q]; i++) { - field = fields[q][i]; - vmcs12_read_any(get_vmcs12(&vmx->vcpu), field, &field_value); - __vmcs_writel(field, field_value); - } - } - - vmcs_clear(shadow_vmcs); - vmcs_load(vmx->loaded_vmcs->vmcs); -} - -static int handle_vmread(struct kvm_vcpu *vcpu) -{ - unsigned long field; - u64 field_value; - unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); - u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); - gva_t gva = 0; - struct vmcs12 *vmcs12; - - if (!nested_vmx_check_permission(vcpu)) - return 1; - - if (to_vmx(vcpu)->nested.current_vmptr == -1ull) - return nested_vmx_failInvalid(vcpu); - - if (!is_guest_mode(vcpu)) - vmcs12 = get_vmcs12(vcpu); - else { - /* - * When vmcs->vmcs_link_pointer is -1ull, any VMREAD - * to shadowed-field sets the ALU flags for VMfailInvalid. - */ - if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull) - return nested_vmx_failInvalid(vcpu); - vmcs12 = get_shadow_vmcs12(vcpu); - } - - /* Decode instruction info and find the field to read */ - field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); - /* Read the field, zero-extended to a u64 field_value */ - if (vmcs12_read_any(vmcs12, field, &field_value) < 0) - return nested_vmx_failValid(vcpu, - VMXERR_UNSUPPORTED_VMCS_COMPONENT); - - /* - * Now copy part of this value to register or memory, as requested. - * Note that the number of bits actually copied is 32 or 64 depending - * on the guest's mode (32 or 64 bit), not on the given field's length. - */ - if (vmx_instruction_info & (1u << 10)) { - kvm_register_writel(vcpu, (((vmx_instruction_info) >> 3) & 0xf), - field_value); - } else { - if (get_vmx_mem_address(vcpu, exit_qualification, - vmx_instruction_info, true, &gva)) - return 1; - /* _system ok, nested_vmx_check_permission has verified cpl=0 */ - kvm_write_guest_virt_system(vcpu, gva, &field_value, - (is_long_mode(vcpu) ? 8 : 4), NULL); - } - - return nested_vmx_succeed(vcpu); -} - - -static int handle_vmwrite(struct kvm_vcpu *vcpu) -{ - unsigned long field; - gva_t gva; - struct vcpu_vmx *vmx = to_vmx(vcpu); - unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); - u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); - - /* The value to write might be 32 or 64 bits, depending on L1's long - * mode, and eventually we need to write that into a field of several - * possible lengths. The code below first zero-extends the value to 64 - * bit (field_value), and then copies only the appropriate number of - * bits into the vmcs12 field. - */ - u64 field_value = 0; - struct x86_exception e; - struct vmcs12 *vmcs12; - - if (!nested_vmx_check_permission(vcpu)) - return 1; - - if (vmx->nested.current_vmptr == -1ull) - return nested_vmx_failInvalid(vcpu); - - if (vmx_instruction_info & (1u << 10)) - field_value = kvm_register_readl(vcpu, - (((vmx_instruction_info) >> 3) & 0xf)); - else { - if (get_vmx_mem_address(vcpu, exit_qualification, - vmx_instruction_info, false, &gva)) - return 1; - if (kvm_read_guest_virt(vcpu, gva, &field_value, - (is_64_bit_mode(vcpu) ? 8 : 4), &e)) { - kvm_inject_page_fault(vcpu, &e); - return 1; - } - } - - - field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); - /* - * If the vCPU supports "VMWRITE to any supported field in the - * VMCS," then the "read-only" fields are actually read/write. - */ - if (vmcs_field_readonly(field) && - !nested_cpu_has_vmwrite_any_field(vcpu)) - return nested_vmx_failValid(vcpu, - VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT); - - if (!is_guest_mode(vcpu)) - vmcs12 = get_vmcs12(vcpu); - else { - /* - * When vmcs->vmcs_link_pointer is -1ull, any VMWRITE - * to shadowed-field sets the ALU flags for VMfailInvalid. - */ - if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull) - return nested_vmx_failInvalid(vcpu); - vmcs12 = get_shadow_vmcs12(vcpu); - } - - if (vmcs12_write_any(vmcs12, field, field_value) < 0) - return nested_vmx_failValid(vcpu, - VMXERR_UNSUPPORTED_VMCS_COMPONENT); - - /* - * Do not track vmcs12 dirty-state if in guest-mode - * as we actually dirty shadow vmcs12 instead of vmcs12. - */ - if (!is_guest_mode(vcpu)) { - switch (field) { -#define SHADOW_FIELD_RW(x) case x: -#include "vmx_shadow_fields.h" - /* - * The fields that can be updated by L1 without a vmexit are - * always updated in the vmcs02, the others go down the slow - * path of prepare_vmcs02. - */ - break; - default: - vmx->nested.dirty_vmcs12 = true; - break; - } - } - - return nested_vmx_succeed(vcpu); -} - -static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr) -{ - vmx->nested.current_vmptr = vmptr; - if (enable_shadow_vmcs) { - vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL, - SECONDARY_EXEC_SHADOW_VMCS); - vmcs_write64(VMCS_LINK_POINTER, - __pa(vmx->vmcs01.shadow_vmcs)); - vmx->nested.need_vmcs12_sync = true; - } - vmx->nested.dirty_vmcs12 = true; -} - -/* Emulate the VMPTRLD instruction */ -static int handle_vmptrld(struct kvm_vcpu *vcpu) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - gpa_t vmptr; - - if (!nested_vmx_check_permission(vcpu)) - return 1; - - if (nested_vmx_get_vmptr(vcpu, &vmptr)) - return 1; - - if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) - return nested_vmx_failValid(vcpu, - VMXERR_VMPTRLD_INVALID_ADDRESS); - - if (vmptr == vmx->nested.vmxon_ptr) - return nested_vmx_failValid(vcpu, - VMXERR_VMPTRLD_VMXON_POINTER); - - /* Forbid normal VMPTRLD if Enlightened version was used */ - if (vmx->nested.hv_evmcs) - return 1; - - if (vmx->nested.current_vmptr != vmptr) { - struct vmcs12 *new_vmcs12; - struct page *page; - page = kvm_vcpu_gpa_to_page(vcpu, vmptr); - if (is_error_page(page)) - return nested_vmx_failInvalid(vcpu); - - new_vmcs12 = kmap(page); - if (new_vmcs12->hdr.revision_id != VMCS12_REVISION || - (new_vmcs12->hdr.shadow_vmcs && - !nested_cpu_has_vmx_shadow_vmcs(vcpu))) { - kunmap(page); - kvm_release_page_clean(page); - return nested_vmx_failValid(vcpu, - VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); - } - - nested_release_vmcs12(vcpu); - - /* - * Load VMCS12 from guest memory since it is not already - * cached. - */ - memcpy(vmx->nested.cached_vmcs12, new_vmcs12, VMCS12_SIZE); - kunmap(page); - kvm_release_page_clean(page); - - set_current_vmptr(vmx, vmptr); - } - - return nested_vmx_succeed(vcpu); -} - -/* - * This is an equivalent of the nested hypervisor executing the vmptrld - * instruction. - */ -static int nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu, - bool from_launch) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - struct hv_vp_assist_page assist_page; - - if (likely(!vmx->nested.enlightened_vmcs_enabled)) - return 1; - - if (unlikely(!kvm_hv_get_assist_page(vcpu, &assist_page))) - return 1; - - if (unlikely(!assist_page.enlighten_vmentry)) - return 1; - - if (unlikely(assist_page.current_nested_vmcs != - vmx->nested.hv_evmcs_vmptr)) { - - if (!vmx->nested.hv_evmcs) - vmx->nested.current_vmptr = -1ull; - - nested_release_evmcs(vcpu); - - vmx->nested.hv_evmcs_page = kvm_vcpu_gpa_to_page( - vcpu, assist_page.current_nested_vmcs); - - if (unlikely(is_error_page(vmx->nested.hv_evmcs_page))) - return 0; - - vmx->nested.hv_evmcs = kmap(vmx->nested.hv_evmcs_page); - - /* - * Currently, KVM only supports eVMCS version 1 - * (== KVM_EVMCS_VERSION) and thus we expect guest to set this - * value to first u32 field of eVMCS which should specify eVMCS - * VersionNumber. - * - * Guest should be aware of supported eVMCS versions by host by - * examining CPUID.0x4000000A.EAX[0:15]. Host userspace VMM is - * expected to set this CPUID leaf according to the value - * returned in vmcs_version from nested_enable_evmcs(). - * - * However, it turns out that Microsoft Hyper-V fails to comply - * to their own invented interface: When Hyper-V use eVMCS, it - * just sets first u32 field of eVMCS to revision_id specified - * in MSR_IA32_VMX_BASIC. Instead of used eVMCS version number - * which is one of the supported versions specified in - * CPUID.0x4000000A.EAX[0:15]. - * - * To overcome Hyper-V bug, we accept here either a supported - * eVMCS version or VMCS12 revision_id as valid values for first - * u32 field of eVMCS. - */ - if ((vmx->nested.hv_evmcs->revision_id != KVM_EVMCS_VERSION) && - (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION)) { - nested_release_evmcs(vcpu); - return 0; - } - - vmx->nested.dirty_vmcs12 = true; - /* - * As we keep L2 state for one guest only 'hv_clean_fields' mask - * can't be used when we switch between them. Reset it here for - * simplicity. - */ - vmx->nested.hv_evmcs->hv_clean_fields &= - ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL; - vmx->nested.hv_evmcs_vmptr = assist_page.current_nested_vmcs; - - /* - * Unlike normal vmcs12, enlightened vmcs12 is not fully - * reloaded from guest's memory (read only fields, fields not - * present in struct hv_enlightened_vmcs, ...). Make sure there - * are no leftovers. - */ - if (from_launch) { - struct vmcs12 *vmcs12 = get_vmcs12(vcpu); - memset(vmcs12, 0, sizeof(*vmcs12)); - vmcs12->hdr.revision_id = VMCS12_REVISION; - } - - } - return 1; -} - -/* Emulate the VMPTRST instruction */ -static int handle_vmptrst(struct kvm_vcpu *vcpu) -{ - unsigned long exit_qual = vmcs_readl(EXIT_QUALIFICATION); - u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO); - gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr; - struct x86_exception e; - gva_t gva; - - if (!nested_vmx_check_permission(vcpu)) - return 1; - - if (unlikely(to_vmx(vcpu)->nested.hv_evmcs)) - return 1; - - if (get_vmx_mem_address(vcpu, exit_qual, instr_info, true, &gva)) - return 1; - /* *_system ok, nested_vmx_check_permission has verified cpl=0 */ - if (kvm_write_guest_virt_system(vcpu, gva, (void *)¤t_vmptr, - sizeof(gpa_t), &e)) { - kvm_inject_page_fault(vcpu, &e); - return 1; - } - return nested_vmx_succeed(vcpu); -} - -/* Emulate the INVEPT instruction */ -static int handle_invept(struct kvm_vcpu *vcpu) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - u32 vmx_instruction_info, types; - unsigned long type; - gva_t gva; - struct x86_exception e; - struct { - u64 eptp, gpa; - } operand; - - if (!(vmx->nested.msrs.secondary_ctls_high & - SECONDARY_EXEC_ENABLE_EPT) || - !(vmx->nested.msrs.ept_caps & VMX_EPT_INVEPT_BIT)) { - kvm_queue_exception(vcpu, UD_VECTOR); - return 1; - } - - if (!nested_vmx_check_permission(vcpu)) - return 1; - - vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); - type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); - - types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6; - - if (type >= 32 || !(types & (1 << type))) - return nested_vmx_failValid(vcpu, - VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); - - /* According to the Intel VMX instruction reference, the memory - * operand is read even if it isn't needed (e.g., for type==global) - */ - if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), - vmx_instruction_info, false, &gva)) - return 1; - if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) { - kvm_inject_page_fault(vcpu, &e); - return 1; - } - - switch (type) { - case VMX_EPT_EXTENT_GLOBAL: - /* - * TODO: track mappings and invalidate - * single context requests appropriately - */ - case VMX_EPT_EXTENT_CONTEXT: - kvm_mmu_sync_roots(vcpu); - kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); - break; - default: - BUG_ON(1); - break; - } - - return nested_vmx_succeed(vcpu); -} - -static u16 nested_get_vpid02(struct kvm_vcpu *vcpu) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - - return vmx->nested.vpid02 ? vmx->nested.vpid02 : vmx->vpid; -} - -static int handle_invvpid(struct kvm_vcpu *vcpu) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - u32 vmx_instruction_info; - unsigned long type, types; - gva_t gva; - struct x86_exception e; - struct { - u64 vpid; - u64 gla; - } operand; - u16 vpid02; - - if (!(vmx->nested.msrs.secondary_ctls_high & - SECONDARY_EXEC_ENABLE_VPID) || - !(vmx->nested.msrs.vpid_caps & VMX_VPID_INVVPID_BIT)) { - kvm_queue_exception(vcpu, UD_VECTOR); - return 1; - } - - if (!nested_vmx_check_permission(vcpu)) - return 1; - - vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); - type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); - - types = (vmx->nested.msrs.vpid_caps & - VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8; - - if (type >= 32 || !(types & (1 << type))) - return nested_vmx_failValid(vcpu, - VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); - - /* according to the intel vmx instruction reference, the memory - * operand is read even if it isn't needed (e.g., for type==global) - */ - if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), - vmx_instruction_info, false, &gva)) - return 1; - if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) { - kvm_inject_page_fault(vcpu, &e); - return 1; - } - if (operand.vpid >> 16) - return nested_vmx_failValid(vcpu, - VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); - - vpid02 = nested_get_vpid02(vcpu); - switch (type) { - case VMX_VPID_EXTENT_INDIVIDUAL_ADDR: - if (!operand.vpid || - is_noncanonical_address(operand.gla, vcpu)) - return nested_vmx_failValid(vcpu, - VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); - if (cpu_has_vmx_invvpid_individual_addr()) { - __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR, - vpid02, operand.gla); - } else - __vmx_flush_tlb(vcpu, vpid02, false); - break; - case VMX_VPID_EXTENT_SINGLE_CONTEXT: - case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL: - if (!operand.vpid) - return nested_vmx_failValid(vcpu, - VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); - __vmx_flush_tlb(vcpu, vpid02, false); - break; - case VMX_VPID_EXTENT_ALL_CONTEXT: - __vmx_flush_tlb(vcpu, vpid02, false); - break; - default: - WARN_ON_ONCE(1); - return kvm_skip_emulated_instruction(vcpu); - } - - return nested_vmx_succeed(vcpu); -} - -static int handle_invpcid(struct kvm_vcpu *vcpu) -{ - u32 vmx_instruction_info; - unsigned long type; - bool pcid_enabled; - gva_t gva; - struct x86_exception e; - unsigned i; - unsigned long roots_to_free = 0; - struct { - u64 pcid; - u64 gla; - } operand; - - if (!guest_cpuid_has(vcpu, X86_FEATURE_INVPCID)) { - kvm_queue_exception(vcpu, UD_VECTOR); - return 1; - } - - vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); - type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); - - if (type > 3) { - kvm_inject_gp(vcpu, 0); - return 1; - } - - /* According to the Intel instruction reference, the memory operand - * is read even if it isn't needed (e.g., for type==all) - */ - if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), - vmx_instruction_info, false, &gva)) - return 1; - - if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) { - kvm_inject_page_fault(vcpu, &e); - return 1; - } - - if (operand.pcid >> 12 != 0) { - kvm_inject_gp(vcpu, 0); - return 1; - } - - pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE); - - switch (type) { - case INVPCID_TYPE_INDIV_ADDR: - if ((!pcid_enabled && (operand.pcid != 0)) || - is_noncanonical_address(operand.gla, vcpu)) { - kvm_inject_gp(vcpu, 0); - return 1; - } - kvm_mmu_invpcid_gva(vcpu, operand.gla, operand.pcid); - return kvm_skip_emulated_instruction(vcpu); - - case INVPCID_TYPE_SINGLE_CTXT: - if (!pcid_enabled && (operand.pcid != 0)) { - kvm_inject_gp(vcpu, 0); - return 1; - } - - if (kvm_get_active_pcid(vcpu) == operand.pcid) { - kvm_mmu_sync_roots(vcpu); - kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); - } - - for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) - if (kvm_get_pcid(vcpu, vcpu->arch.mmu->prev_roots[i].cr3) - == operand.pcid) - roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i); - - kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, roots_to_free); - /* - * If neither the current cr3 nor any of the prev_roots use the - * given PCID, then nothing needs to be done here because a - * resync will happen anyway before switching to any other CR3. - */ - - return kvm_skip_emulated_instruction(vcpu); - - case INVPCID_TYPE_ALL_NON_GLOBAL: - /* - * Currently, KVM doesn't mark global entries in the shadow - * page tables, so a non-global flush just degenerates to a - * global flush. If needed, we could optimize this later by - * keeping track of global entries in shadow page tables. - */ - - /* fall-through */ - case INVPCID_TYPE_ALL_INCL_GLOBAL: - kvm_mmu_unload(vcpu); - return kvm_skip_emulated_instruction(vcpu); - - default: - BUG(); /* We have already checked above that type <= 3 */ - } -} - -static int handle_pml_full(struct kvm_vcpu *vcpu) -{ - unsigned long exit_qualification; - - trace_kvm_pml_full(vcpu->vcpu_id); - - exit_qualification = vmcs_readl(EXIT_QUALIFICATION); - - /* - * PML buffer FULL happened while executing iret from NMI, - * "blocked by NMI" bit has to be set before next VM entry. - */ - if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && - enable_vnmi && - (exit_qualification & INTR_INFO_UNBLOCK_NMI)) - vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, - GUEST_INTR_STATE_NMI); - - /* - * PML buffer already flushed at beginning of VMEXIT. Nothing to do - * here.., and there's no userspace involvement needed for PML. - */ - return 1; -} - -static int handle_preemption_timer(struct kvm_vcpu *vcpu) -{ - if (!to_vmx(vcpu)->req_immediate_exit) - kvm_lapic_expired_hv_timer(vcpu); - return 1; -} - -static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - int maxphyaddr = cpuid_maxphyaddr(vcpu); - - /* Check for memory type validity */ - switch (address & VMX_EPTP_MT_MASK) { - case VMX_EPTP_MT_UC: - if (!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT)) - return false; - break; - case VMX_EPTP_MT_WB: - if (!(vmx->nested.msrs.ept_caps & VMX_EPTP_WB_BIT)) - return false; - break; - default: - return false; - } - - /* only 4 levels page-walk length are valid */ - if ((address & VMX_EPTP_PWL_MASK) != VMX_EPTP_PWL_4) - return false; - - /* Reserved bits should not be set */ - if (address >> maxphyaddr || ((address >> 7) & 0x1f)) - return false; - - /* AD, if set, should be supported */ - if (address & VMX_EPTP_AD_ENABLE_BIT) { - if (!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT)) - return false; - } - - return true; -} - -static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu, - struct vmcs12 *vmcs12) -{ - u32 index = vcpu->arch.regs[VCPU_REGS_RCX]; - u64 address; - bool accessed_dirty; - struct kvm_mmu *mmu = vcpu->arch.walk_mmu; - - if (!nested_cpu_has_eptp_switching(vmcs12) || - !nested_cpu_has_ept(vmcs12)) - return 1; - - if (index >= VMFUNC_EPTP_ENTRIES) - return 1; - - - if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT, - &address, index * 8, 8)) - return 1; - - accessed_dirty = !!(address & VMX_EPTP_AD_ENABLE_BIT); - - /* - * If the (L2) guest does a vmfunc to the currently - * active ept pointer, we don't have to do anything else - */ - if (vmcs12->ept_pointer != address) { - if (!valid_ept_address(vcpu, address)) - return 1; - - kvm_mmu_unload(vcpu); - mmu->ept_ad = accessed_dirty; - mmu->mmu_role.base.ad_disabled = !accessed_dirty; - vmcs12->ept_pointer = address; - /* - * TODO: Check what's the correct approach in case - * mmu reload fails. Currently, we just let the next - * reload potentially fail - */ - kvm_mmu_reload(vcpu); - } - - return 0; -} - -static int handle_vmfunc(struct kvm_vcpu *vcpu) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - struct vmcs12 *vmcs12; - u32 function = vcpu->arch.regs[VCPU_REGS_RAX]; - - /* - * VMFUNC is only supported for nested guests, but we always enable the - * secondary control for simplicity; for non-nested mode, fake that we - * didn't by injecting #UD. - */ - if (!is_guest_mode(vcpu)) { - kvm_queue_exception(vcpu, UD_VECTOR); - return 1; - } - - vmcs12 = get_vmcs12(vcpu); - if ((vmcs12->vm_function_control & (1 << function)) == 0) - goto fail; - - switch (function) { - case 0: - if (nested_vmx_eptp_switching(vcpu, vmcs12)) - goto fail; - break; - default: - goto fail; - } - return kvm_skip_emulated_instruction(vcpu); - -fail: - nested_vmx_vmexit(vcpu, vmx->exit_reason, - vmcs_read32(VM_EXIT_INTR_INFO), - vmcs_readl(EXIT_QUALIFICATION)); - return 1; -} - -static int handle_encls(struct kvm_vcpu *vcpu) -{ - /* - * SGX virtualization is not yet supported. There is no software - * enable bit for SGX, so we have to trap ENCLS and inject a #UD - * to prevent the guest from executing ENCLS. - */ - kvm_queue_exception(vcpu, UD_VECTOR); - return 1; -} - -/* - * The exit handlers return 1 if the exit was handled fully and guest execution - * may resume. Otherwise they set the kvm_run parameter to indicate what needs - * to be done to userspace and return 0. - */ -static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { - [EXIT_REASON_EXCEPTION_NMI] = handle_exception, - [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, - [EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault, - [EXIT_REASON_NMI_WINDOW] = handle_nmi_window, - [EXIT_REASON_IO_INSTRUCTION] = handle_io, - [EXIT_REASON_CR_ACCESS] = handle_cr, - [EXIT_REASON_DR_ACCESS] = handle_dr, - [EXIT_REASON_CPUID] = handle_cpuid, - [EXIT_REASON_MSR_READ] = handle_rdmsr, - [EXIT_REASON_MSR_WRITE] = handle_wrmsr, - [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window, - [EXIT_REASON_HLT] = handle_halt, - [EXIT_REASON_INVD] = handle_invd, - [EXIT_REASON_INVLPG] = handle_invlpg, - [EXIT_REASON_RDPMC] = handle_rdpmc, - [EXIT_REASON_VMCALL] = handle_vmcall, - [EXIT_REASON_VMCLEAR] = handle_vmclear, - [EXIT_REASON_VMLAUNCH] = handle_vmlaunch, - [EXIT_REASON_VMPTRLD] = handle_vmptrld, - [EXIT_REASON_VMPTRST] = handle_vmptrst, - [EXIT_REASON_VMREAD] = handle_vmread, - [EXIT_REASON_VMRESUME] = handle_vmresume, - [EXIT_REASON_VMWRITE] = handle_vmwrite, - [EXIT_REASON_VMOFF] = handle_vmoff, - [EXIT_REASON_VMON] = handle_vmon, - [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold, - [EXIT_REASON_APIC_ACCESS] = handle_apic_access, - [EXIT_REASON_APIC_WRITE] = handle_apic_write, - [EXIT_REASON_EOI_INDUCED] = handle_apic_eoi_induced, - [EXIT_REASON_WBINVD] = handle_wbinvd, - [EXIT_REASON_XSETBV] = handle_xsetbv, - [EXIT_REASON_TASK_SWITCH] = handle_task_switch, - [EXIT_REASON_MCE_DURING_VMENTRY] = handle_machine_check, - [EXIT_REASON_GDTR_IDTR] = handle_desc, - [EXIT_REASON_LDTR_TR] = handle_desc, - [EXIT_REASON_EPT_VIOLATION] = handle_ept_violation, - [EXIT_REASON_EPT_MISCONFIG] = handle_ept_misconfig, - [EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause, - [EXIT_REASON_MWAIT_INSTRUCTION] = handle_mwait, - [EXIT_REASON_MONITOR_TRAP_FLAG] = handle_monitor_trap, - [EXIT_REASON_MONITOR_INSTRUCTION] = handle_monitor, - [EXIT_REASON_INVEPT] = handle_invept, - [EXIT_REASON_INVVPID] = handle_invvpid, - [EXIT_REASON_RDRAND] = handle_invalid_op, - [EXIT_REASON_RDSEED] = handle_invalid_op, - [EXIT_REASON_XSAVES] = handle_xsaves, - [EXIT_REASON_XRSTORS] = handle_xrstors, - [EXIT_REASON_PML_FULL] = handle_pml_full, - [EXIT_REASON_INVPCID] = handle_invpcid, - [EXIT_REASON_VMFUNC] = handle_vmfunc, - [EXIT_REASON_PREEMPTION_TIMER] = handle_preemption_timer, - [EXIT_REASON_ENCLS] = handle_encls, -}; - -static const int kvm_vmx_max_exit_handlers = - ARRAY_SIZE(kvm_vmx_exit_handlers); - -static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu, - struct vmcs12 *vmcs12) -{ - unsigned long exit_qualification; - gpa_t bitmap, last_bitmap; - unsigned int port; - int size; - u8 b; - - if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS)) - return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING); - - exit_qualification = vmcs_readl(EXIT_QUALIFICATION); - - port = exit_qualification >> 16; - size = (exit_qualification & 7) + 1; - - last_bitmap = (gpa_t)-1; - b = -1; - - while (size > 0) { - if (port < 0x8000) - bitmap = vmcs12->io_bitmap_a; - else if (port < 0x10000) - bitmap = vmcs12->io_bitmap_b; - else - return true; - bitmap += (port & 0x7fff) / 8; - - if (last_bitmap != bitmap) - if (kvm_vcpu_read_guest(vcpu, bitmap, &b, 1)) - return true; - if (b & (1 << (port & 7))) - return true; - - port++; - size--; - last_bitmap = bitmap; - } - - return false; -} - -/* - * Return 1 if we should exit from L2 to L1 to handle an MSR access access, - * rather than handle it ourselves in L0. I.e., check whether L1 expressed - * disinterest in the current event (read or write a specific MSR) by using an - * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps. - */ -static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu, - struct vmcs12 *vmcs12, u32 exit_reason) -{ - u32 msr_index = vcpu->arch.regs[VCPU_REGS_RCX]; - gpa_t bitmap; - - if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) - return true; - - /* - * The MSR_BITMAP page is divided into four 1024-byte bitmaps, - * for the four combinations of read/write and low/high MSR numbers. - * First we need to figure out which of the four to use: - */ - bitmap = vmcs12->msr_bitmap; - if (exit_reason == EXIT_REASON_MSR_WRITE) - bitmap += 2048; - if (msr_index >= 0xc0000000) { - msr_index -= 0xc0000000; - bitmap += 1024; - } - - /* Then read the msr_index'th bit from this bitmap: */ - if (msr_index < 1024*8) { - unsigned char b; - if (kvm_vcpu_read_guest(vcpu, bitmap + msr_index/8, &b, 1)) - return true; - return 1 & (b >> (msr_index & 7)); - } else - return true; /* let L1 handle the wrong parameter */ -} - -/* - * Return 1 if we should exit from L2 to L1 to handle a CR access exit, - * rather than handle it ourselves in L0. I.e., check if L1 wanted to - * intercept (via guest_host_mask etc.) the current event. - */ -static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, - struct vmcs12 *vmcs12) -{ - unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); - int cr = exit_qualification & 15; - int reg; - unsigned long val; - - switch ((exit_qualification >> 4) & 3) { - case 0: /* mov to cr */ - reg = (exit_qualification >> 8) & 15; - val = kvm_register_readl(vcpu, reg); - switch (cr) { - case 0: - if (vmcs12->cr0_guest_host_mask & - (val ^ vmcs12->cr0_read_shadow)) - return true; - break; - case 3: - if ((vmcs12->cr3_target_count >= 1 && - vmcs12->cr3_target_value0 == val) || - (vmcs12->cr3_target_count >= 2 && - vmcs12->cr3_target_value1 == val) || - (vmcs12->cr3_target_count >= 3 && - vmcs12->cr3_target_value2 == val) || - (vmcs12->cr3_target_count >= 4 && - vmcs12->cr3_target_value3 == val)) - return false; - if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING)) - return true; - break; - case 4: - if (vmcs12->cr4_guest_host_mask & - (vmcs12->cr4_read_shadow ^ val)) - return true; - break; - case 8: - if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING)) - return true; - break; - } - break; - case 2: /* clts */ - if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) && - (vmcs12->cr0_read_shadow & X86_CR0_TS)) - return true; - break; - case 1: /* mov from cr */ - switch (cr) { - case 3: - if (vmcs12->cpu_based_vm_exec_control & - CPU_BASED_CR3_STORE_EXITING) - return true; - break; - case 8: - if (vmcs12->cpu_based_vm_exec_control & - CPU_BASED_CR8_STORE_EXITING) - return true; - break; - } - break; - case 3: /* lmsw */ - /* - * lmsw can change bits 1..3 of cr0, and only set bit 0 of - * cr0. Other attempted changes are ignored, with no exit. - */ - val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f; - if (vmcs12->cr0_guest_host_mask & 0xe & - (val ^ vmcs12->cr0_read_shadow)) - return true; - if ((vmcs12->cr0_guest_host_mask & 0x1) && - !(vmcs12->cr0_read_shadow & 0x1) && - (val & 0x1)) - return true; - break; - } - return false; -} - -static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu, - struct vmcs12 *vmcs12, gpa_t bitmap) -{ - u32 vmx_instruction_info; - unsigned long field; - u8 b; - - if (!nested_cpu_has_shadow_vmcs(vmcs12)) - return true; - - /* Decode instruction info and find the field to access */ - vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); - field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); - - /* Out-of-range fields always cause a VM exit from L2 to L1 */ - if (field >> 15) - return true; - - if (kvm_vcpu_read_guest(vcpu, bitmap + field/8, &b, 1)) - return true; - - return 1 & (b >> (field & 7)); -} - -/* - * Return 1 if we should exit from L2 to L1 to handle an exit, or 0 if we - * should handle it ourselves in L0 (and then continue L2). Only call this - * when in is_guest_mode (L2). - */ -static bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason) -{ - u32 intr_info = vmcs_read32(VM_EXIT_INTR_INFO); - struct vcpu_vmx *vmx = to_vmx(vcpu); - struct vmcs12 *vmcs12 = get_vmcs12(vcpu); - - if (vmx->nested.nested_run_pending) - return false; - - if (unlikely(vmx->fail)) { - pr_info_ratelimited("%s failed vm entry %x\n", __func__, - vmcs_read32(VM_INSTRUCTION_ERROR)); - return true; - } - - /* - * The host physical addresses of some pages of guest memory - * are loaded into the vmcs02 (e.g. vmcs12's Virtual APIC - * Page). The CPU may write to these pages via their host - * physical address while L2 is running, bypassing any - * address-translation-based dirty tracking (e.g. EPT write - * protection). - * - * Mark them dirty on every exit from L2 to prevent them from - * getting out of sync with dirty tracking. - */ - nested_mark_vmcs12_pages_dirty(vcpu); - - trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason, - vmcs_readl(EXIT_QUALIFICATION), - vmx->idt_vectoring_info, - intr_info, - vmcs_read32(VM_EXIT_INTR_ERROR_CODE), - KVM_ISA_VMX); - - switch (exit_reason) { - case EXIT_REASON_EXCEPTION_NMI: - if (is_nmi(intr_info)) - return false; - else if (is_page_fault(intr_info)) - return !vmx->vcpu.arch.apf.host_apf_reason && enable_ept; - else if (is_debug(intr_info) && - vcpu->guest_debug & - (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) - return false; - else if (is_breakpoint(intr_info) && - vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) - return false; - return vmcs12->exception_bitmap & - (1u << (intr_info & INTR_INFO_VECTOR_MASK)); - case EXIT_REASON_EXTERNAL_INTERRUPT: - return false; - case EXIT_REASON_TRIPLE_FAULT: - return true; - case EXIT_REASON_PENDING_INTERRUPT: - return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_INTR_PENDING); - case EXIT_REASON_NMI_WINDOW: - return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING); - case EXIT_REASON_TASK_SWITCH: - return true; - case EXIT_REASON_CPUID: - return true; - case EXIT_REASON_HLT: - return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING); - case EXIT_REASON_INVD: - return true; - case EXIT_REASON_INVLPG: - return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING); - case EXIT_REASON_RDPMC: - return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING); - case EXIT_REASON_RDRAND: - return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDRAND_EXITING); - case EXIT_REASON_RDSEED: - return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDSEED_EXITING); - case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP: - return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING); - case EXIT_REASON_VMREAD: - return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12, - vmcs12->vmread_bitmap); - case EXIT_REASON_VMWRITE: - return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12, - vmcs12->vmwrite_bitmap); - case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR: - case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD: - case EXIT_REASON_VMPTRST: case EXIT_REASON_VMRESUME: - case EXIT_REASON_VMOFF: case EXIT_REASON_VMON: - case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID: - /* - * VMX instructions trap unconditionally. This allows L1 to - * emulate them for its L2 guest, i.e., allows 3-level nesting! - */ - return true; - case EXIT_REASON_CR_ACCESS: - return nested_vmx_exit_handled_cr(vcpu, vmcs12); - case EXIT_REASON_DR_ACCESS: - return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING); - case EXIT_REASON_IO_INSTRUCTION: - return nested_vmx_exit_handled_io(vcpu, vmcs12); - case EXIT_REASON_GDTR_IDTR: case EXIT_REASON_LDTR_TR: - return nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC); - case EXIT_REASON_MSR_READ: - case EXIT_REASON_MSR_WRITE: - return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason); - case EXIT_REASON_INVALID_STATE: - return true; - case EXIT_REASON_MWAIT_INSTRUCTION: - return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING); - case EXIT_REASON_MONITOR_TRAP_FLAG: - return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_TRAP_FLAG); - case EXIT_REASON_MONITOR_INSTRUCTION: - return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING); - case EXIT_REASON_PAUSE_INSTRUCTION: - return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) || - nested_cpu_has2(vmcs12, - SECONDARY_EXEC_PAUSE_LOOP_EXITING); - case EXIT_REASON_MCE_DURING_VMENTRY: - return false; - case EXIT_REASON_TPR_BELOW_THRESHOLD: - return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW); - case EXIT_REASON_APIC_ACCESS: - case EXIT_REASON_APIC_WRITE: - case EXIT_REASON_EOI_INDUCED: - /* - * The controls for "virtualize APIC accesses," "APIC- - * register virtualization," and "virtual-interrupt - * delivery" only come from vmcs12. - */ - return true; - case EXIT_REASON_EPT_VIOLATION: - /* - * L0 always deals with the EPT violation. If nested EPT is - * used, and the nested mmu code discovers that the address is - * missing in the guest EPT table (EPT12), the EPT violation - * will be injected with nested_ept_inject_page_fault() - */ - return false; - case EXIT_REASON_EPT_MISCONFIG: - /* - * L2 never uses directly L1's EPT, but rather L0's own EPT - * table (shadow on EPT) or a merged EPT table that L0 built - * (EPT on EPT). So any problems with the structure of the - * table is L0's fault. - */ - return false; - case EXIT_REASON_INVPCID: - return - nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_INVPCID) && - nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING); - case EXIT_REASON_WBINVD: - return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING); - case EXIT_REASON_XSETBV: - return true; - case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS: - /* - * This should never happen, since it is not possible to - * set XSS to a non-zero value---neither in L1 nor in L2. - * If if it were, XSS would have to be checked against - * the XSS exit bitmap in vmcs12. - */ - return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES); - case EXIT_REASON_PREEMPTION_TIMER: - return false; - case EXIT_REASON_PML_FULL: - /* We emulate PML support to L1. */ - return false; - case EXIT_REASON_VMFUNC: - /* VM functions are emulated through L2->L0 vmexits. */ - return false; - case EXIT_REASON_ENCLS: - /* SGX is never exposed to L1 */ - return false; - default: - return true; - } -} - -static int nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason) -{ - u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); - - /* - * At this point, the exit interruption info in exit_intr_info - * is only valid for EXCEPTION_NMI exits. For EXTERNAL_INTERRUPT - * we need to query the in-kernel LAPIC. - */ - WARN_ON(exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT); - if ((exit_intr_info & - (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) == - (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) { - struct vmcs12 *vmcs12 = get_vmcs12(vcpu); - vmcs12->vm_exit_intr_error_code = - vmcs_read32(VM_EXIT_INTR_ERROR_CODE); - } - - nested_vmx_vmexit(vcpu, exit_reason, exit_intr_info, - vmcs_readl(EXIT_QUALIFICATION)); - return 1; -} - -static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2) -{ - *info1 = vmcs_readl(EXIT_QUALIFICATION); - *info2 = vmcs_read32(VM_EXIT_INTR_INFO); -} - -static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx) -{ - if (vmx->pml_pg) { - __free_page(vmx->pml_pg); - vmx->pml_pg = NULL; - } -} - -static void vmx_flush_pml_buffer(struct kvm_vcpu *vcpu) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - u64 *pml_buf; - u16 pml_idx; - - pml_idx = vmcs_read16(GUEST_PML_INDEX); - - /* Do nothing if PML buffer is empty */ - if (pml_idx == (PML_ENTITY_NUM - 1)) - return; - - /* PML index always points to next available PML buffer entity */ - if (pml_idx >= PML_ENTITY_NUM) - pml_idx = 0; - else - pml_idx++; - - pml_buf = page_address(vmx->pml_pg); - for (; pml_idx < PML_ENTITY_NUM; pml_idx++) { - u64 gpa; - - gpa = pml_buf[pml_idx]; - WARN_ON(gpa & (PAGE_SIZE - 1)); - kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT); - } - - /* reset PML index */ - vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); -} - -/* - * Flush all vcpus' PML buffer and update logged GPAs to dirty_bitmap. - * Called before reporting dirty_bitmap to userspace. - */ -static void kvm_flush_pml_buffers(struct kvm *kvm) -{ - int i; - struct kvm_vcpu *vcpu; - /* - * We only need to kick vcpu out of guest mode here, as PML buffer - * is flushed at beginning of all VMEXITs, and it's obvious that only - * vcpus running in guest are possible to have unflushed GPAs in PML - * buffer. - */ - kvm_for_each_vcpu(i, vcpu, kvm) - kvm_vcpu_kick(vcpu); -} - -static void vmx_dump_sel(char *name, uint32_t sel) -{ - pr_err("%s sel=0x%04x, attr=0x%05x, limit=0x%08x, base=0x%016lx\n", - name, vmcs_read16(sel), - vmcs_read32(sel + GUEST_ES_AR_BYTES - GUEST_ES_SELECTOR), - vmcs_read32(sel + GUEST_ES_LIMIT - GUEST_ES_SELECTOR), - vmcs_readl(sel + GUEST_ES_BASE - GUEST_ES_SELECTOR)); -} - -static void vmx_dump_dtsel(char *name, uint32_t limit) -{ - pr_err("%s limit=0x%08x, base=0x%016lx\n", - name, vmcs_read32(limit), - vmcs_readl(limit + GUEST_GDTR_BASE - GUEST_GDTR_LIMIT)); -} - -static void dump_vmcs(void) -{ - u32 vmentry_ctl = vmcs_read32(VM_ENTRY_CONTROLS); - u32 vmexit_ctl = vmcs_read32(VM_EXIT_CONTROLS); - u32 cpu_based_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); - u32 pin_based_exec_ctrl = vmcs_read32(PIN_BASED_VM_EXEC_CONTROL); - u32 secondary_exec_control = 0; - unsigned long cr4 = vmcs_readl(GUEST_CR4); - u64 efer = vmcs_read64(GUEST_IA32_EFER); - int i, n; - - if (cpu_has_secondary_exec_ctrls()) - secondary_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); - - pr_err("*** Guest State ***\n"); - pr_err("CR0: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n", - vmcs_readl(GUEST_CR0), vmcs_readl(CR0_READ_SHADOW), - vmcs_readl(CR0_GUEST_HOST_MASK)); - pr_err("CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n", - cr4, vmcs_readl(CR4_READ_SHADOW), vmcs_readl(CR4_GUEST_HOST_MASK)); - pr_err("CR3 = 0x%016lx\n", vmcs_readl(GUEST_CR3)); - if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT) && - (cr4 & X86_CR4_PAE) && !(efer & EFER_LMA)) - { - pr_err("PDPTR0 = 0x%016llx PDPTR1 = 0x%016llx\n", - vmcs_read64(GUEST_PDPTR0), vmcs_read64(GUEST_PDPTR1)); - pr_err("PDPTR2 = 0x%016llx PDPTR3 = 0x%016llx\n", - vmcs_read64(GUEST_PDPTR2), vmcs_read64(GUEST_PDPTR3)); - } - pr_err("RSP = 0x%016lx RIP = 0x%016lx\n", - vmcs_readl(GUEST_RSP), vmcs_readl(GUEST_RIP)); - pr_err("RFLAGS=0x%08lx DR7 = 0x%016lx\n", - vmcs_readl(GUEST_RFLAGS), vmcs_readl(GUEST_DR7)); - pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n", - vmcs_readl(GUEST_SYSENTER_ESP), - vmcs_read32(GUEST_SYSENTER_CS), vmcs_readl(GUEST_SYSENTER_EIP)); - vmx_dump_sel("CS: ", GUEST_CS_SELECTOR); - vmx_dump_sel("DS: ", GUEST_DS_SELECTOR); - vmx_dump_sel("SS: ", GUEST_SS_SELECTOR); - vmx_dump_sel("ES: ", GUEST_ES_SELECTOR); - vmx_dump_sel("FS: ", GUEST_FS_SELECTOR); - vmx_dump_sel("GS: ", GUEST_GS_SELECTOR); - vmx_dump_dtsel("GDTR:", GUEST_GDTR_LIMIT); - vmx_dump_sel("LDTR:", GUEST_LDTR_SELECTOR); - vmx_dump_dtsel("IDTR:", GUEST_IDTR_LIMIT); - vmx_dump_sel("TR: ", GUEST_TR_SELECTOR); - if ((vmexit_ctl & (VM_EXIT_SAVE_IA32_PAT | VM_EXIT_SAVE_IA32_EFER)) || - (vmentry_ctl & (VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_IA32_EFER))) - pr_err("EFER = 0x%016llx PAT = 0x%016llx\n", - efer, vmcs_read64(GUEST_IA32_PAT)); - pr_err("DebugCtl = 0x%016llx DebugExceptions = 0x%016lx\n", - vmcs_read64(GUEST_IA32_DEBUGCTL), - vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS)); - if (cpu_has_load_perf_global_ctrl && - vmentry_ctl & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) - pr_err("PerfGlobCtl = 0x%016llx\n", - vmcs_read64(GUEST_IA32_PERF_GLOBAL_CTRL)); - if (vmentry_ctl & VM_ENTRY_LOAD_BNDCFGS) - pr_err("BndCfgS = 0x%016llx\n", vmcs_read64(GUEST_BNDCFGS)); - pr_err("Interruptibility = %08x ActivityState = %08x\n", - vmcs_read32(GUEST_INTERRUPTIBILITY_INFO), - vmcs_read32(GUEST_ACTIVITY_STATE)); - if (secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) - pr_err("InterruptStatus = %04x\n", - vmcs_read16(GUEST_INTR_STATUS)); - - pr_err("*** Host State ***\n"); - pr_err("RIP = 0x%016lx RSP = 0x%016lx\n", - vmcs_readl(HOST_RIP), vmcs_readl(HOST_RSP)); - pr_err("CS=%04x SS=%04x DS=%04x ES=%04x FS=%04x GS=%04x TR=%04x\n", - vmcs_read16(HOST_CS_SELECTOR), vmcs_read16(HOST_SS_SELECTOR), - vmcs_read16(HOST_DS_SELECTOR), vmcs_read16(HOST_ES_SELECTOR), - vmcs_read16(HOST_FS_SELECTOR), vmcs_read16(HOST_GS_SELECTOR), - vmcs_read16(HOST_TR_SELECTOR)); - pr_err("FSBase=%016lx GSBase=%016lx TRBase=%016lx\n", - vmcs_readl(HOST_FS_BASE), vmcs_readl(HOST_GS_BASE), - vmcs_readl(HOST_TR_BASE)); - pr_err("GDTBase=%016lx IDTBase=%016lx\n", - vmcs_readl(HOST_GDTR_BASE), vmcs_readl(HOST_IDTR_BASE)); - pr_err("CR0=%016lx CR3=%016lx CR4=%016lx\n", - vmcs_readl(HOST_CR0), vmcs_readl(HOST_CR3), - vmcs_readl(HOST_CR4)); - pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n", - vmcs_readl(HOST_IA32_SYSENTER_ESP), - vmcs_read32(HOST_IA32_SYSENTER_CS), - vmcs_readl(HOST_IA32_SYSENTER_EIP)); - if (vmexit_ctl & (VM_EXIT_LOAD_IA32_PAT | VM_EXIT_LOAD_IA32_EFER)) - pr_err("EFER = 0x%016llx PAT = 0x%016llx\n", - vmcs_read64(HOST_IA32_EFER), - vmcs_read64(HOST_IA32_PAT)); - if (cpu_has_load_perf_global_ctrl && - vmexit_ctl & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) - pr_err("PerfGlobCtl = 0x%016llx\n", - vmcs_read64(HOST_IA32_PERF_GLOBAL_CTRL)); - - pr_err("*** Control State ***\n"); - pr_err("PinBased=%08x CPUBased=%08x SecondaryExec=%08x\n", - pin_based_exec_ctrl, cpu_based_exec_ctrl, secondary_exec_control); - pr_err("EntryControls=%08x ExitControls=%08x\n", vmentry_ctl, vmexit_ctl); - pr_err("ExceptionBitmap=%08x PFECmask=%08x PFECmatch=%08x\n", - vmcs_read32(EXCEPTION_BITMAP), - vmcs_read32(PAGE_FAULT_ERROR_CODE_MASK), - vmcs_read32(PAGE_FAULT_ERROR_CODE_MATCH)); - pr_err("VMEntry: intr_info=%08x errcode=%08x ilen=%08x\n", - vmcs_read32(VM_ENTRY_INTR_INFO_FIELD), - vmcs_read32(VM_ENTRY_EXCEPTION_ERROR_CODE), - vmcs_read32(VM_ENTRY_INSTRUCTION_LEN)); - pr_err("VMExit: intr_info=%08x errcode=%08x ilen=%08x\n", - vmcs_read32(VM_EXIT_INTR_INFO), - vmcs_read32(VM_EXIT_INTR_ERROR_CODE), - vmcs_read32(VM_EXIT_INSTRUCTION_LEN)); - pr_err(" reason=%08x qualification=%016lx\n", - vmcs_read32(VM_EXIT_REASON), vmcs_readl(EXIT_QUALIFICATION)); - pr_err("IDTVectoring: info=%08x errcode=%08x\n", - vmcs_read32(IDT_VECTORING_INFO_FIELD), - vmcs_read32(IDT_VECTORING_ERROR_CODE)); - pr_err("TSC Offset = 0x%016llx\n", vmcs_read64(TSC_OFFSET)); - if (secondary_exec_control & SECONDARY_EXEC_TSC_SCALING) - pr_err("TSC Multiplier = 0x%016llx\n", - vmcs_read64(TSC_MULTIPLIER)); - if (cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW) - pr_err("TPR Threshold = 0x%02x\n", vmcs_read32(TPR_THRESHOLD)); - if (pin_based_exec_ctrl & PIN_BASED_POSTED_INTR) - pr_err("PostedIntrVec = 0x%02x\n", vmcs_read16(POSTED_INTR_NV)); - if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT)) - pr_err("EPT pointer = 0x%016llx\n", vmcs_read64(EPT_POINTER)); - n = vmcs_read32(CR3_TARGET_COUNT); - for (i = 0; i + 1 < n; i += 4) - pr_err("CR3 target%u=%016lx target%u=%016lx\n", - i, vmcs_readl(CR3_TARGET_VALUE0 + i * 2), - i + 1, vmcs_readl(CR3_TARGET_VALUE0 + i * 2 + 2)); - if (i < n) - pr_err("CR3 target%u=%016lx\n", - i, vmcs_readl(CR3_TARGET_VALUE0 + i * 2)); - if (secondary_exec_control & SECONDARY_EXEC_PAUSE_LOOP_EXITING) - pr_err("PLE Gap=%08x Window=%08x\n", - vmcs_read32(PLE_GAP), vmcs_read32(PLE_WINDOW)); - if (secondary_exec_control & SECONDARY_EXEC_ENABLE_VPID) - pr_err("Virtual processor ID = 0x%04x\n", - vmcs_read16(VIRTUAL_PROCESSOR_ID)); -} - -/* - * The guest has exited. See if we can fix it or if we need userspace - * assistance. - */ -static int vmx_handle_exit(struct kvm_vcpu *vcpu) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - u32 exit_reason = vmx->exit_reason; - u32 vectoring_info = vmx->idt_vectoring_info; - - trace_kvm_exit(exit_reason, vcpu, KVM_ISA_VMX); - - /* - * Flush logged GPAs PML buffer, this will make dirty_bitmap more - * updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before - * querying dirty_bitmap, we only need to kick all vcpus out of guest - * mode as if vcpus is in root mode, the PML buffer must has been - * flushed already. - */ - if (enable_pml) - vmx_flush_pml_buffer(vcpu); - - /* If guest state is invalid, start emulating */ - if (vmx->emulation_required) - return handle_invalid_guest_state(vcpu); - - if (is_guest_mode(vcpu) && nested_vmx_exit_reflected(vcpu, exit_reason)) - return nested_vmx_reflect_vmexit(vcpu, exit_reason); - - if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) { - dump_vmcs(); - vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; - vcpu->run->fail_entry.hardware_entry_failure_reason - = exit_reason; - return 0; - } - - if (unlikely(vmx->fail)) { - vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; - vcpu->run->fail_entry.hardware_entry_failure_reason - = vmcs_read32(VM_INSTRUCTION_ERROR); - return 0; - } - - /* - * Note: - * Do not try to fix EXIT_REASON_EPT_MISCONFIG if it caused by - * delivery event since it indicates guest is accessing MMIO. - * The vm-exit can be triggered again after return to guest that - * will cause infinite loop. - */ - if ((vectoring_info & VECTORING_INFO_VALID_MASK) && - (exit_reason != EXIT_REASON_EXCEPTION_NMI && - exit_reason != EXIT_REASON_EPT_VIOLATION && - exit_reason != EXIT_REASON_PML_FULL && - exit_reason != EXIT_REASON_TASK_SWITCH)) { - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV; - vcpu->run->internal.ndata = 3; - vcpu->run->internal.data[0] = vectoring_info; - vcpu->run->internal.data[1] = exit_reason; - vcpu->run->internal.data[2] = vcpu->arch.exit_qualification; - if (exit_reason == EXIT_REASON_EPT_MISCONFIG) { - vcpu->run->internal.ndata++; - vcpu->run->internal.data[3] = - vmcs_read64(GUEST_PHYSICAL_ADDRESS); - } - return 0; - } - - if (unlikely(!enable_vnmi && - vmx->loaded_vmcs->soft_vnmi_blocked)) { - if (vmx_interrupt_allowed(vcpu)) { - vmx->loaded_vmcs->soft_vnmi_blocked = 0; - } else if (vmx->loaded_vmcs->vnmi_blocked_time > 1000000000LL && - vcpu->arch.nmi_pending) { - /* - * This CPU don't support us in finding the end of an - * NMI-blocked window if the guest runs with IRQs - * disabled. So we pull the trigger after 1 s of - * futile waiting, but inform the user about this. - */ - printk(KERN_WARNING "%s: Breaking out of NMI-blocked " - "state on VCPU %d after 1 s timeout\n", - __func__, vcpu->vcpu_id); - vmx->loaded_vmcs->soft_vnmi_blocked = 0; - } - } - - if (exit_reason < kvm_vmx_max_exit_handlers - && kvm_vmx_exit_handlers[exit_reason]) - return kvm_vmx_exit_handlers[exit_reason](vcpu); - else { - vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n", - exit_reason); - kvm_queue_exception(vcpu, UD_VECTOR); - return 1; - } -} - -/* - * Software based L1D cache flush which is used when microcode providing - * the cache control MSR is not loaded. - * - * The L1D cache is 32 KiB on Nehalem and later microarchitectures, but to - * flush it is required to read in 64 KiB because the replacement algorithm - * is not exactly LRU. This could be sized at runtime via topology - * information but as all relevant affected CPUs have 32KiB L1D cache size - * there is no point in doing so. - */ -static void vmx_l1d_flush(struct kvm_vcpu *vcpu) -{ - int size = PAGE_SIZE << L1D_CACHE_ORDER; - - /* - * This code is only executed when the the flush mode is 'cond' or - * 'always' - */ - if (static_branch_likely(&vmx_l1d_flush_cond)) { - bool flush_l1d; - - /* - * Clear the per-vcpu flush bit, it gets set again - * either from vcpu_run() or from one of the unsafe - * VMEXIT handlers. - */ - flush_l1d = vcpu->arch.l1tf_flush_l1d; - vcpu->arch.l1tf_flush_l1d = false; - - /* - * Clear the per-cpu flush bit, it gets set again from - * the interrupt handlers. - */ - flush_l1d |= kvm_get_cpu_l1tf_flush_l1d(); - kvm_clear_cpu_l1tf_flush_l1d(); - - if (!flush_l1d) - return; - } - - vcpu->stat.l1d_flush++; - - if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) { - wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH); - return; - } - - asm volatile( - /* First ensure the pages are in the TLB */ - "xorl %%eax, %%eax\n" - ".Lpopulate_tlb:\n\t" - "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t" - "addl $4096, %%eax\n\t" - "cmpl %%eax, %[size]\n\t" - "jne .Lpopulate_tlb\n\t" - "xorl %%eax, %%eax\n\t" - "cpuid\n\t" - /* Now fill the cache */ - "xorl %%eax, %%eax\n" - ".Lfill_cache:\n" - "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t" - "addl $64, %%eax\n\t" - "cmpl %%eax, %[size]\n\t" - "jne .Lfill_cache\n\t" - "lfence\n" - :: [flush_pages] "r" (vmx_l1d_flush_pages), - [size] "r" (size) - : "eax", "ebx", "ecx", "edx"); -} - -static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) -{ - struct vmcs12 *vmcs12 = get_vmcs12(vcpu); - - if (is_guest_mode(vcpu) && - nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) - return; - - if (irr == -1 || tpr < irr) { - vmcs_write32(TPR_THRESHOLD, 0); - return; - } - - vmcs_write32(TPR_THRESHOLD, irr); -} - -static void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu) -{ - u32 sec_exec_control; - - if (!lapic_in_kernel(vcpu)) - return; - - if (!flexpriority_enabled && - !cpu_has_vmx_virtualize_x2apic_mode()) - return; - - /* Postpone execution until vmcs01 is the current VMCS. */ - if (is_guest_mode(vcpu)) { - to_vmx(vcpu)->nested.change_vmcs01_virtual_apic_mode = true; - return; - } - - sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); - sec_exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | - SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE); - - switch (kvm_get_apic_mode(vcpu)) { - case LAPIC_MODE_INVALID: - WARN_ONCE(true, "Invalid local APIC state"); - case LAPIC_MODE_DISABLED: - break; - case LAPIC_MODE_XAPIC: - if (flexpriority_enabled) { - sec_exec_control |= - SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; - vmx_flush_tlb(vcpu, true); - } - break; - case LAPIC_MODE_X2APIC: - if (cpu_has_vmx_virtualize_x2apic_mode()) - sec_exec_control |= - SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; - break; - } - vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control); - - vmx_update_msr_bitmap(vcpu); -} - -static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa) -{ - if (!is_guest_mode(vcpu)) { - vmcs_write64(APIC_ACCESS_ADDR, hpa); - vmx_flush_tlb(vcpu, true); - } -} - -static void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr) -{ - u16 status; - u8 old; - - if (max_isr == -1) - max_isr = 0; - - status = vmcs_read16(GUEST_INTR_STATUS); - old = status >> 8; - if (max_isr != old) { - status &= 0xff; - status |= max_isr << 8; - vmcs_write16(GUEST_INTR_STATUS, status); - } -} - -static void vmx_set_rvi(int vector) -{ - u16 status; - u8 old; - - if (vector == -1) - vector = 0; - - status = vmcs_read16(GUEST_INTR_STATUS); - old = (u8)status & 0xff; - if ((u8)vector != old) { - status &= ~0xff; - status |= (u8)vector; - vmcs_write16(GUEST_INTR_STATUS, status); - } -} - -static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr) -{ - /* - * When running L2, updating RVI is only relevant when - * vmcs12 virtual-interrupt-delivery enabled. - * However, it can be enabled only when L1 also - * intercepts external-interrupts and in that case - * we should not update vmcs02 RVI but instead intercept - * interrupt. Therefore, do nothing when running L2. - */ - if (!is_guest_mode(vcpu)) - vmx_set_rvi(max_irr); -} - -static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - int max_irr; - bool max_irr_updated; - - WARN_ON(!vcpu->arch.apicv_active); - if (pi_test_on(&vmx->pi_desc)) { - pi_clear_on(&vmx->pi_desc); - /* - * IOMMU can write to PIR.ON, so the barrier matters even on UP. - * But on x86 this is just a compiler barrier anyway. - */ - smp_mb__after_atomic(); - max_irr_updated = - kvm_apic_update_irr(vcpu, vmx->pi_desc.pir, &max_irr); - - /* - * If we are running L2 and L1 has a new pending interrupt - * which can be injected, we should re-evaluate - * what should be done with this new L1 interrupt. - * If L1 intercepts external-interrupts, we should - * exit from L2 to L1. Otherwise, interrupt should be - * delivered directly to L2. - */ - if (is_guest_mode(vcpu) && max_irr_updated) { - if (nested_exit_on_intr(vcpu)) - kvm_vcpu_exiting_guest_mode(vcpu); - else - kvm_make_request(KVM_REQ_EVENT, vcpu); - } - } else { - max_irr = kvm_lapic_find_highest_irr(vcpu); - } - vmx_hwapic_irr_update(vcpu, max_irr); - return max_irr; -} - -static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu) -{ - u8 rvi = vmx_get_rvi(); - u8 vppr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_PROCPRI); - - return ((rvi & 0xf0) > (vppr & 0xf0)); -} - -static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap) -{ - if (!kvm_vcpu_apicv_active(vcpu)) - return; - - vmcs_write64(EOI_EXIT_BITMAP0, eoi_exit_bitmap[0]); - vmcs_write64(EOI_EXIT_BITMAP1, eoi_exit_bitmap[1]); - vmcs_write64(EOI_EXIT_BITMAP2, eoi_exit_bitmap[2]); - vmcs_write64(EOI_EXIT_BITMAP3, eoi_exit_bitmap[3]); -} - -static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - - pi_clear_on(&vmx->pi_desc); - memset(vmx->pi_desc.pir, 0, sizeof(vmx->pi_desc.pir)); -} - -static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx) -{ - u32 exit_intr_info = 0; - u16 basic_exit_reason = (u16)vmx->exit_reason; - - if (!(basic_exit_reason == EXIT_REASON_MCE_DURING_VMENTRY - || basic_exit_reason == EXIT_REASON_EXCEPTION_NMI)) - return; - - if (!(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) - exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); - vmx->exit_intr_info = exit_intr_info; - - /* if exit due to PF check for async PF */ - if (is_page_fault(exit_intr_info)) - vmx->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason(); - - /* Handle machine checks before interrupts are enabled */ - if (basic_exit_reason == EXIT_REASON_MCE_DURING_VMENTRY || - is_machine_check(exit_intr_info)) - kvm_machine_check(); - - /* We need to handle NMIs before interrupts are enabled */ - if (is_nmi(exit_intr_info)) { - kvm_before_interrupt(&vmx->vcpu); - asm("int $2"); - kvm_after_interrupt(&vmx->vcpu); - } -} - -static void vmx_handle_external_intr(struct kvm_vcpu *vcpu) -{ - u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); - - if ((exit_intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK)) - == (INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR)) { - unsigned int vector; - unsigned long entry; - gate_desc *desc; - struct vcpu_vmx *vmx = to_vmx(vcpu); -#ifdef CONFIG_X86_64 - unsigned long tmp; -#endif - - vector = exit_intr_info & INTR_INFO_VECTOR_MASK; - desc = (gate_desc *)vmx->host_idt_base + vector; - entry = gate_offset(desc); - asm volatile( -#ifdef CONFIG_X86_64 - "mov %%" _ASM_SP ", %[sp]\n\t" - "and $0xfffffffffffffff0, %%" _ASM_SP "\n\t" - "push $%c[ss]\n\t" - "push %[sp]\n\t" -#endif - "pushf\n\t" - __ASM_SIZE(push) " $%c[cs]\n\t" - CALL_NOSPEC - : -#ifdef CONFIG_X86_64 - [sp]"=&r"(tmp), -#endif - ASM_CALL_CONSTRAINT - : - THUNK_TARGET(entry), - [ss]"i"(__KERNEL_DS), - [cs]"i"(__KERNEL_CS) - ); - } -} -STACK_FRAME_NON_STANDARD(vmx_handle_external_intr); - -static bool vmx_has_emulated_msr(int index) -{ - switch (index) { - case MSR_IA32_SMBASE: - /* - * We cannot do SMM unless we can run the guest in big - * real mode. - */ - return enable_unrestricted_guest || emulate_invalid_guest_state; - case MSR_AMD64_VIRT_SPEC_CTRL: - /* This is AMD only. */ - return false; - default: - return true; - } -} - -static bool vmx_mpx_supported(void) -{ - return (vmcs_config.vmexit_ctrl & VM_EXIT_CLEAR_BNDCFGS) && - (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_BNDCFGS); -} - -static bool vmx_xsaves_supported(void) -{ - return vmcs_config.cpu_based_2nd_exec_ctrl & - SECONDARY_EXEC_XSAVES; -} - -static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx) -{ - u32 exit_intr_info; - bool unblock_nmi; - u8 vector; - bool idtv_info_valid; - - idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK; - - if (enable_vnmi) { - if (vmx->loaded_vmcs->nmi_known_unmasked) - return; - /* - * Can't use vmx->exit_intr_info since we're not sure what - * the exit reason is. - */ - exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); - unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0; - vector = exit_intr_info & INTR_INFO_VECTOR_MASK; - /* - * SDM 3: 27.7.1.2 (September 2008) - * Re-set bit "block by NMI" before VM entry if vmexit caused by - * a guest IRET fault. - * SDM 3: 23.2.2 (September 2008) - * Bit 12 is undefined in any of the following cases: - * If the VM exit sets the valid bit in the IDT-vectoring - * information field. - * If the VM exit is due to a double fault. - */ - if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi && - vector != DF_VECTOR && !idtv_info_valid) - vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, - GUEST_INTR_STATE_NMI); - else - vmx->loaded_vmcs->nmi_known_unmasked = - !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) - & GUEST_INTR_STATE_NMI); - } else if (unlikely(vmx->loaded_vmcs->soft_vnmi_blocked)) - vmx->loaded_vmcs->vnmi_blocked_time += - ktime_to_ns(ktime_sub(ktime_get(), - vmx->loaded_vmcs->entry_time)); -} - -static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu, - u32 idt_vectoring_info, - int instr_len_field, - int error_code_field) -{ - u8 vector; - int type; - bool idtv_info_valid; - - idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK; - - vcpu->arch.nmi_injected = false; - kvm_clear_exception_queue(vcpu); - kvm_clear_interrupt_queue(vcpu); - - if (!idtv_info_valid) - return; - - kvm_make_request(KVM_REQ_EVENT, vcpu); - - vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK; - type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK; - - switch (type) { - case INTR_TYPE_NMI_INTR: - vcpu->arch.nmi_injected = true; - /* - * SDM 3: 27.7.1.2 (September 2008) - * Clear bit "block by NMI" before VM entry if a NMI - * delivery faulted. - */ - vmx_set_nmi_mask(vcpu, false); - break; - case INTR_TYPE_SOFT_EXCEPTION: - vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); - /* fall through */ - case INTR_TYPE_HARD_EXCEPTION: - if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) { - u32 err = vmcs_read32(error_code_field); - kvm_requeue_exception_e(vcpu, vector, err); - } else - kvm_requeue_exception(vcpu, vector); - break; - case INTR_TYPE_SOFT_INTR: - vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); - /* fall through */ - case INTR_TYPE_EXT_INTR: - kvm_queue_interrupt(vcpu, vector, type == INTR_TYPE_SOFT_INTR); - break; - default: - break; - } -} - -static void vmx_complete_interrupts(struct vcpu_vmx *vmx) -{ - __vmx_complete_interrupts(&vmx->vcpu, vmx->idt_vectoring_info, - VM_EXIT_INSTRUCTION_LEN, - IDT_VECTORING_ERROR_CODE); -} - -static void vmx_cancel_injection(struct kvm_vcpu *vcpu) -{ - __vmx_complete_interrupts(vcpu, - vmcs_read32(VM_ENTRY_INTR_INFO_FIELD), - VM_ENTRY_INSTRUCTION_LEN, - VM_ENTRY_EXCEPTION_ERROR_CODE); - - vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); -} - -static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx) -{ - int i, nr_msrs; - struct perf_guest_switch_msr *msrs; - - msrs = perf_guest_get_msrs(&nr_msrs); - - if (!msrs) - return; - - for (i = 0; i < nr_msrs; i++) - if (msrs[i].host == msrs[i].guest) - clear_atomic_switch_msr(vmx, msrs[i].msr); - else - add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest, - msrs[i].host, false); -} - -static void vmx_arm_hv_timer(struct vcpu_vmx *vmx, u32 val) -{ - vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, val); - if (!vmx->loaded_vmcs->hv_timer_armed) - vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL, - PIN_BASED_VMX_PREEMPTION_TIMER); - vmx->loaded_vmcs->hv_timer_armed = true; -} - -static void vmx_update_hv_timer(struct kvm_vcpu *vcpu) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - u64 tscl; - u32 delta_tsc; - - if (vmx->req_immediate_exit) { - vmx_arm_hv_timer(vmx, 0); - return; - } - - if (vmx->hv_deadline_tsc != -1) { - tscl = rdtsc(); - if (vmx->hv_deadline_tsc > tscl) - /* set_hv_timer ensures the delta fits in 32-bits */ - delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >> - cpu_preemption_timer_multi); - else - delta_tsc = 0; - - vmx_arm_hv_timer(vmx, delta_tsc); - return; - } - - if (vmx->loaded_vmcs->hv_timer_armed) - vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL, - PIN_BASED_VMX_PREEMPTION_TIMER); - vmx->loaded_vmcs->hv_timer_armed = false; -} - -static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - unsigned long cr3, cr4, evmcs_rsp; - - /* Record the guest's net vcpu time for enforced NMI injections. */ - if (unlikely(!enable_vnmi && - vmx->loaded_vmcs->soft_vnmi_blocked)) - vmx->loaded_vmcs->entry_time = ktime_get(); - - /* Don't enter VMX if guest state is invalid, let the exit handler - start emulation until we arrive back to a valid state */ - if (vmx->emulation_required) - return; - - if (vmx->ple_window_dirty) { - vmx->ple_window_dirty = false; - vmcs_write32(PLE_WINDOW, vmx->ple_window); - } - - if (vmx->nested.need_vmcs12_sync) { - /* - * hv_evmcs may end up being not mapped after migration (when - * L2 was running), map it here to make sure vmcs12 changes are - * properly reflected. - */ - if (vmx->nested.enlightened_vmcs_enabled && - !vmx->nested.hv_evmcs) - nested_vmx_handle_enlightened_vmptrld(vcpu, false); - - if (vmx->nested.hv_evmcs) { - copy_vmcs12_to_enlightened(vmx); - /* All fields are clean */ - vmx->nested.hv_evmcs->hv_clean_fields |= - HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL; - } else { - copy_vmcs12_to_shadow(vmx); - } - vmx->nested.need_vmcs12_sync = false; - } - - if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty)) - vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); - if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty)) - vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); - - cr3 = __get_current_cr3_fast(); - if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) { - vmcs_writel(HOST_CR3, cr3); - vmx->loaded_vmcs->host_state.cr3 = cr3; - } - - cr4 = cr4_read_shadow(); - if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) { - vmcs_writel(HOST_CR4, cr4); - vmx->loaded_vmcs->host_state.cr4 = cr4; - } - - /* When single-stepping over STI and MOV SS, we must clear the - * corresponding interruptibility bits in the guest state. Otherwise - * vmentry fails as it then expects bit 14 (BS) in pending debug - * exceptions being set, but that's not correct for the guest debugging - * case. */ - if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) - vmx_set_interrupt_shadow(vcpu, 0); - - if (static_cpu_has(X86_FEATURE_PKU) && - kvm_read_cr4_bits(vcpu, X86_CR4_PKE) && - vcpu->arch.pkru != vmx->host_pkru) - __write_pkru(vcpu->arch.pkru); - - atomic_switch_perf_msrs(vmx); - - vmx_update_hv_timer(vcpu); - - /* - * If this vCPU has touched SPEC_CTRL, restore the guest's value if - * it's non-zero. Since vmentry is serialising on affected CPUs, there - * is no need to worry about the conditional branch over the wrmsr - * being speculatively taken. - */ - x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0); - - vmx->__launched = vmx->loaded_vmcs->launched; - - evmcs_rsp = static_branch_unlikely(&enable_evmcs) ? - (unsigned long)¤t_evmcs->host_rsp : 0; - - if (static_branch_unlikely(&vmx_l1d_should_flush)) - vmx_l1d_flush(vcpu); - - asm( - /* Store host registers */ - "push %%" _ASM_DX "; push %%" _ASM_BP ";" - "push %%" _ASM_CX " \n\t" /* placeholder for guest rcx */ - "push %%" _ASM_CX " \n\t" - "cmp %%" _ASM_SP ", %c[host_rsp](%0) \n\t" - "je 1f \n\t" - "mov %%" _ASM_SP ", %c[host_rsp](%0) \n\t" - /* Avoid VMWRITE when Enlightened VMCS is in use */ - "test %%" _ASM_SI ", %%" _ASM_SI " \n\t" - "jz 2f \n\t" - "mov %%" _ASM_SP ", (%%" _ASM_SI ") \n\t" - "jmp 1f \n\t" - "2: \n\t" - __ex("vmwrite %%" _ASM_SP ", %%" _ASM_DX) "\n\t" - "1: \n\t" - /* Reload cr2 if changed */ - "mov %c[cr2](%0), %%" _ASM_AX " \n\t" - "mov %%cr2, %%" _ASM_DX " \n\t" - "cmp %%" _ASM_AX ", %%" _ASM_DX " \n\t" - "je 3f \n\t" - "mov %%" _ASM_AX", %%cr2 \n\t" - "3: \n\t" - /* Check if vmlaunch of vmresume is needed */ - "cmpl $0, %c[launched](%0) \n\t" - /* Load guest registers. Don't clobber flags. */ - "mov %c[rax](%0), %%" _ASM_AX " \n\t" - "mov %c[rbx](%0), %%" _ASM_BX " \n\t" - "mov %c[rdx](%0), %%" _ASM_DX " \n\t" - "mov %c[rsi](%0), %%" _ASM_SI " \n\t" - "mov %c[rdi](%0), %%" _ASM_DI " \n\t" - "mov %c[rbp](%0), %%" _ASM_BP " \n\t" -#ifdef CONFIG_X86_64 - "mov %c[r8](%0), %%r8 \n\t" - "mov %c[r9](%0), %%r9 \n\t" - "mov %c[r10](%0), %%r10 \n\t" - "mov %c[r11](%0), %%r11 \n\t" - "mov %c[r12](%0), %%r12 \n\t" - "mov %c[r13](%0), %%r13 \n\t" - "mov %c[r14](%0), %%r14 \n\t" - "mov %c[r15](%0), %%r15 \n\t" -#endif - "mov %c[rcx](%0), %%" _ASM_CX " \n\t" /* kills %0 (ecx) */ - - /* Enter guest mode */ - "jne 1f \n\t" - __ex("vmlaunch") "\n\t" - "jmp 2f \n\t" - "1: " __ex("vmresume") "\n\t" - "2: " - /* Save guest registers, load host registers, keep flags */ - "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t" - "pop %0 \n\t" - "setbe %c[fail](%0)\n\t" - "mov %%" _ASM_AX ", %c[rax](%0) \n\t" - "mov %%" _ASM_BX ", %c[rbx](%0) \n\t" - __ASM_SIZE(pop) " %c[rcx](%0) \n\t" - "mov %%" _ASM_DX ", %c[rdx](%0) \n\t" - "mov %%" _ASM_SI ", %c[rsi](%0) \n\t" - "mov %%" _ASM_DI ", %c[rdi](%0) \n\t" - "mov %%" _ASM_BP ", %c[rbp](%0) \n\t" -#ifdef CONFIG_X86_64 - "mov %%r8, %c[r8](%0) \n\t" - "mov %%r9, %c[r9](%0) \n\t" - "mov %%r10, %c[r10](%0) \n\t" - "mov %%r11, %c[r11](%0) \n\t" - "mov %%r12, %c[r12](%0) \n\t" - "mov %%r13, %c[r13](%0) \n\t" - "mov %%r14, %c[r14](%0) \n\t" - "mov %%r15, %c[r15](%0) \n\t" - /* - * Clear host registers marked as clobbered to prevent - * speculative use. - */ - "xor %%r8d, %%r8d \n\t" - "xor %%r9d, %%r9d \n\t" - "xor %%r10d, %%r10d \n\t" - "xor %%r11d, %%r11d \n\t" - "xor %%r12d, %%r12d \n\t" - "xor %%r13d, %%r13d \n\t" - "xor %%r14d, %%r14d \n\t" - "xor %%r15d, %%r15d \n\t" -#endif - "mov %%cr2, %%" _ASM_AX " \n\t" - "mov %%" _ASM_AX ", %c[cr2](%0) \n\t" - - "xor %%eax, %%eax \n\t" - "xor %%ebx, %%ebx \n\t" - "xor %%esi, %%esi \n\t" - "xor %%edi, %%edi \n\t" - "pop %%" _ASM_BP "; pop %%" _ASM_DX " \n\t" - ".pushsection .rodata \n\t" - ".global vmx_return \n\t" - "vmx_return: " _ASM_PTR " 2b \n\t" - ".popsection" - : : "c"(vmx), "d"((unsigned long)HOST_RSP), "S"(evmcs_rsp), - [launched]"i"(offsetof(struct vcpu_vmx, __launched)), - [fail]"i"(offsetof(struct vcpu_vmx, fail)), - [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)), - [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])), - [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])), - [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])), - [rdx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDX])), - [rsi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RSI])), - [rdi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDI])), - [rbp]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBP])), -#ifdef CONFIG_X86_64 - [r8]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R8])), - [r9]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R9])), - [r10]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R10])), - [r11]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R11])), - [r12]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R12])), - [r13]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R13])), - [r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])), - [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])), -#endif - [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)), - [wordsize]"i"(sizeof(ulong)) - : "cc", "memory" -#ifdef CONFIG_X86_64 - , "rax", "rbx", "rdi" - , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" -#else - , "eax", "ebx", "edi" -#endif - ); - - /* - * We do not use IBRS in the kernel. If this vCPU has used the - * SPEC_CTRL MSR it may have left it on; save the value and - * turn it off. This is much more efficient than blindly adding - * it to the atomic save/restore list. Especially as the former - * (Saving guest MSRs on vmexit) doesn't even exist in KVM. - * - * For non-nested case: - * If the L01 MSR bitmap does not intercept the MSR, then we need to - * save it. - * - * For nested case: - * If the L02 MSR bitmap does not intercept the MSR, then we need to - * save it. - */ - if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))) - vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); - - x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0); - - /* Eliminate branch target predictions from guest mode */ - vmexit_fill_RSB(); - - /* All fields are clean at this point */ - if (static_branch_unlikely(&enable_evmcs)) - current_evmcs->hv_clean_fields |= - HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL; - - /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */ - if (vmx->host_debugctlmsr) - update_debugctlmsr(vmx->host_debugctlmsr); - -#ifndef CONFIG_X86_64 - /* - * The sysexit path does not restore ds/es, so we must set them to - * a reasonable value ourselves. - * - * We can't defer this to vmx_prepare_switch_to_host() since that - * function may be executed in interrupt context, which saves and - * restore segments around it, nullifying its effect. - */ - loadsegment(ds, __USER_DS); - loadsegment(es, __USER_DS); -#endif - - vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP) - | (1 << VCPU_EXREG_RFLAGS) - | (1 << VCPU_EXREG_PDPTR) - | (1 << VCPU_EXREG_SEGMENTS) - | (1 << VCPU_EXREG_CR3)); - vcpu->arch.regs_dirty = 0; - - /* - * eager fpu is enabled if PKEY is supported and CR4 is switched - * back on host, so it is safe to read guest PKRU from current - * XSAVE. - */ - if (static_cpu_has(X86_FEATURE_PKU) && - kvm_read_cr4_bits(vcpu, X86_CR4_PKE)) { - vcpu->arch.pkru = __read_pkru(); - if (vcpu->arch.pkru != vmx->host_pkru) - __write_pkru(vmx->host_pkru); - } - - vmx->nested.nested_run_pending = 0; - vmx->idt_vectoring_info = 0; - - vmx->exit_reason = vmx->fail ? 0xdead : vmcs_read32(VM_EXIT_REASON); - if (vmx->fail || (vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) - return; - - vmx->loaded_vmcs->launched = 1; - vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); - - vmx_complete_atomic_exit(vmx); - vmx_recover_nmi_blocking(vmx); - vmx_complete_interrupts(vmx); -} -STACK_FRAME_NON_STANDARD(vmx_vcpu_run); - -static struct kvm *vmx_vm_alloc(void) -{ - struct kvm_vmx *kvm_vmx = vzalloc(sizeof(struct kvm_vmx)); - return &kvm_vmx->kvm; -} - -static void vmx_vm_free(struct kvm *kvm) -{ - vfree(to_kvm_vmx(kvm)); -} - -static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - int cpu; - - if (vmx->loaded_vmcs == vmcs) - return; - - cpu = get_cpu(); - vmx_vcpu_put(vcpu); - vmx->loaded_vmcs = vmcs; - vmx_vcpu_load(vcpu, cpu); - put_cpu(); - - vm_entry_controls_reset_shadow(vmx); - vm_exit_controls_reset_shadow(vmx); - vmx_segment_cache_clear(vmx); -} - -/* - * Ensure that the current vmcs of the logical processor is the - * vmcs01 of the vcpu before calling free_nested(). - */ -static void vmx_free_vcpu_nested(struct kvm_vcpu *vcpu) -{ - vcpu_load(vcpu); - vmx_switch_vmcs(vcpu, &to_vmx(vcpu)->vmcs01); - free_nested(vcpu); - vcpu_put(vcpu); -} - -static void vmx_free_vcpu(struct kvm_vcpu *vcpu) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - - if (enable_pml) - vmx_destroy_pml_buffer(vmx); - free_vpid(vmx->vpid); - leave_guest_mode(vcpu); - vmx_free_vcpu_nested(vcpu); - free_loaded_vmcs(vmx->loaded_vmcs); - kfree(vmx->guest_msrs); - kvm_vcpu_uninit(vcpu); - kmem_cache_free(kvm_vcpu_cache, vmx); -} - -static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) -{ - int err; - struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); - unsigned long *msr_bitmap; - int cpu; - - if (!vmx) - return ERR_PTR(-ENOMEM); - - vmx->vpid = allocate_vpid(); - - err = kvm_vcpu_init(&vmx->vcpu, kvm, id); - if (err) - goto free_vcpu; - - err = -ENOMEM; - - /* - * If PML is turned on, failure on enabling PML just results in failure - * of creating the vcpu, therefore we can simplify PML logic (by - * avoiding dealing with cases, such as enabling PML partially on vcpus - * for the guest, etc. - */ - if (enable_pml) { - vmx->pml_pg = alloc_page(GFP_KERNEL | __GFP_ZERO); - if (!vmx->pml_pg) - goto uninit_vcpu; - } - - vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL); - BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) * sizeof(vmx->guest_msrs[0]) - > PAGE_SIZE); - - if (!vmx->guest_msrs) - goto free_pml; - - err = alloc_loaded_vmcs(&vmx->vmcs01); - if (err < 0) - goto free_msrs; - - msr_bitmap = vmx->vmcs01.msr_bitmap; - vmx_disable_intercept_for_msr(msr_bitmap, MSR_FS_BASE, MSR_TYPE_RW); - vmx_disable_intercept_for_msr(msr_bitmap, MSR_GS_BASE, MSR_TYPE_RW); - vmx_disable_intercept_for_msr(msr_bitmap, MSR_KERNEL_GS_BASE, MSR_TYPE_RW); - vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW); - vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW); - vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW); - vmx->msr_bitmap_mode = 0; - - vmx->loaded_vmcs = &vmx->vmcs01; - cpu = get_cpu(); - vmx_vcpu_load(&vmx->vcpu, cpu); - vmx->vcpu.cpu = cpu; - vmx_vcpu_setup(vmx); - vmx_vcpu_put(&vmx->vcpu); - put_cpu(); - if (cpu_need_virtualize_apic_accesses(&vmx->vcpu)) { - err = alloc_apic_access_page(kvm); - if (err) - goto free_vmcs; - } - - if (enable_ept && !enable_unrestricted_guest) { - err = init_rmode_identity_map(kvm); - if (err) - goto free_vmcs; - } - - if (nested) - nested_vmx_setup_ctls_msrs(&vmx->nested.msrs, - kvm_vcpu_apicv_active(&vmx->vcpu)); - - vmx->nested.posted_intr_nv = -1; - vmx->nested.current_vmptr = -1ull; - - vmx->msr_ia32_feature_control_valid_bits = FEATURE_CONTROL_LOCKED; - - /* - * Enforce invariant: pi_desc.nv is always either POSTED_INTR_VECTOR - * or POSTED_INTR_WAKEUP_VECTOR. - */ - vmx->pi_desc.nv = POSTED_INTR_VECTOR; - vmx->pi_desc.sn = 1; - - return &vmx->vcpu; - -free_vmcs: - free_loaded_vmcs(vmx->loaded_vmcs); -free_msrs: - kfree(vmx->guest_msrs); -free_pml: - vmx_destroy_pml_buffer(vmx); -uninit_vcpu: - kvm_vcpu_uninit(&vmx->vcpu); -free_vcpu: - free_vpid(vmx->vpid); - kmem_cache_free(kvm_vcpu_cache, vmx); - return ERR_PTR(err); -} - -#define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html for details.\n" -#define L1TF_MSG_L1D "L1TF CPU bug present and virtualization mitigation disabled, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html for details.\n" - -static int vmx_vm_init(struct kvm *kvm) -{ - spin_lock_init(&to_kvm_vmx(kvm)->ept_pointer_lock); - - if (!ple_gap) - kvm->arch.pause_in_guest = true; - - if (boot_cpu_has(X86_BUG_L1TF) && enable_ept) { - switch (l1tf_mitigation) { - case L1TF_MITIGATION_OFF: - case L1TF_MITIGATION_FLUSH_NOWARN: - /* 'I explicitly don't care' is set */ - break; - case L1TF_MITIGATION_FLUSH: - case L1TF_MITIGATION_FLUSH_NOSMT: - case L1TF_MITIGATION_FULL: - /* - * Warn upon starting the first VM in a potentially - * insecure environment. - */ - if (cpu_smt_control == CPU_SMT_ENABLED) - pr_warn_once(L1TF_MSG_SMT); - if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER) - pr_warn_once(L1TF_MSG_L1D); - break; - case L1TF_MITIGATION_FULL_FORCE: - /* Flush is enforced */ - break; - } - } - return 0; -} - -static void __init vmx_check_processor_compat(void *rtn) -{ - struct vmcs_config vmcs_conf; - - *(int *)rtn = 0; - if (setup_vmcs_config(&vmcs_conf) < 0) - *(int *)rtn = -EIO; - nested_vmx_setup_ctls_msrs(&vmcs_conf.nested, enable_apicv); - if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) { - printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n", - smp_processor_id()); - *(int *)rtn = -EIO; - } -} - -static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) -{ - u8 cache; - u64 ipat = 0; - - /* For VT-d and EPT combination - * 1. MMIO: always map as UC - * 2. EPT with VT-d: - * a. VT-d without snooping control feature: can't guarantee the - * result, try to trust guest. - * b. VT-d with snooping control feature: snooping control feature of - * VT-d engine can guarantee the cache correctness. Just set it - * to WB to keep consistent with host. So the same as item 3. - * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep - * consistent with host MTRR - */ - if (is_mmio) { - cache = MTRR_TYPE_UNCACHABLE; - goto exit; - } - - if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) { - ipat = VMX_EPT_IPAT_BIT; - cache = MTRR_TYPE_WRBACK; - goto exit; - } - - if (kvm_read_cr0(vcpu) & X86_CR0_CD) { - ipat = VMX_EPT_IPAT_BIT; - if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) - cache = MTRR_TYPE_WRBACK; - else - cache = MTRR_TYPE_UNCACHABLE; - goto exit; - } - - cache = kvm_mtrr_get_guest_memory_type(vcpu, gfn); - -exit: - return (cache << VMX_EPT_MT_EPTE_SHIFT) | ipat; -} - -static int vmx_get_lpage_level(void) -{ - if (enable_ept && !cpu_has_vmx_ept_1g_page()) - return PT_DIRECTORY_LEVEL; - else - /* For shadow and EPT supported 1GB page */ - return PT_PDPE_LEVEL; -} - -static void vmcs_set_secondary_exec_control(u32 new_ctl) -{ - /* - * These bits in the secondary execution controls field - * are dynamic, the others are mostly based on the hypervisor - * architecture and the guest's CPUID. Do not touch the - * dynamic bits. - */ - u32 mask = - SECONDARY_EXEC_SHADOW_VMCS | - SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | - SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | - SECONDARY_EXEC_DESC; - - u32 cur_ctl = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); - - vmcs_write32(SECONDARY_VM_EXEC_CONTROL, - (new_ctl & ~mask) | (cur_ctl & mask)); -} - -/* - * Generate MSR_IA32_VMX_CR{0,4}_FIXED1 according to CPUID. Only set bits - * (indicating "allowed-1") if they are supported in the guest's CPUID. - */ -static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - struct kvm_cpuid_entry2 *entry; - - vmx->nested.msrs.cr0_fixed1 = 0xffffffff; - vmx->nested.msrs.cr4_fixed1 = X86_CR4_PCE; - -#define cr4_fixed1_update(_cr4_mask, _reg, _cpuid_mask) do { \ - if (entry && (entry->_reg & (_cpuid_mask))) \ - vmx->nested.msrs.cr4_fixed1 |= (_cr4_mask); \ -} while (0) - - entry = kvm_find_cpuid_entry(vcpu, 0x1, 0); - cr4_fixed1_update(X86_CR4_VME, edx, bit(X86_FEATURE_VME)); - cr4_fixed1_update(X86_CR4_PVI, edx, bit(X86_FEATURE_VME)); - cr4_fixed1_update(X86_CR4_TSD, edx, bit(X86_FEATURE_TSC)); - cr4_fixed1_update(X86_CR4_DE, edx, bit(X86_FEATURE_DE)); - cr4_fixed1_update(X86_CR4_PSE, edx, bit(X86_FEATURE_PSE)); - cr4_fixed1_update(X86_CR4_PAE, edx, bit(X86_FEATURE_PAE)); - cr4_fixed1_update(X86_CR4_MCE, edx, bit(X86_FEATURE_MCE)); - cr4_fixed1_update(X86_CR4_PGE, edx, bit(X86_FEATURE_PGE)); - cr4_fixed1_update(X86_CR4_OSFXSR, edx, bit(X86_FEATURE_FXSR)); - cr4_fixed1_update(X86_CR4_OSXMMEXCPT, edx, bit(X86_FEATURE_XMM)); - cr4_fixed1_update(X86_CR4_VMXE, ecx, bit(X86_FEATURE_VMX)); - cr4_fixed1_update(X86_CR4_SMXE, ecx, bit(X86_FEATURE_SMX)); - cr4_fixed1_update(X86_CR4_PCIDE, ecx, bit(X86_FEATURE_PCID)); - cr4_fixed1_update(X86_CR4_OSXSAVE, ecx, bit(X86_FEATURE_XSAVE)); - - entry = kvm_find_cpuid_entry(vcpu, 0x7, 0); - cr4_fixed1_update(X86_CR4_FSGSBASE, ebx, bit(X86_FEATURE_FSGSBASE)); - cr4_fixed1_update(X86_CR4_SMEP, ebx, bit(X86_FEATURE_SMEP)); - cr4_fixed1_update(X86_CR4_SMAP, ebx, bit(X86_FEATURE_SMAP)); - cr4_fixed1_update(X86_CR4_PKE, ecx, bit(X86_FEATURE_PKU)); - cr4_fixed1_update(X86_CR4_UMIP, ecx, bit(X86_FEATURE_UMIP)); - -#undef cr4_fixed1_update -} - -static void nested_vmx_entry_exit_ctls_update(struct kvm_vcpu *vcpu) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - - if (kvm_mpx_supported()) { - bool mpx_enabled = guest_cpuid_has(vcpu, X86_FEATURE_MPX); - - if (mpx_enabled) { - vmx->nested.msrs.entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS; - vmx->nested.msrs.exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS; - } else { - vmx->nested.msrs.entry_ctls_high &= ~VM_ENTRY_LOAD_BNDCFGS; - vmx->nested.msrs.exit_ctls_high &= ~VM_EXIT_CLEAR_BNDCFGS; - } - } -} - -static void vmx_cpuid_update(struct kvm_vcpu *vcpu) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - - if (cpu_has_secondary_exec_ctrls()) { - vmx_compute_secondary_exec_control(vmx); - vmcs_set_secondary_exec_control(vmx->secondary_exec_control); - } - - if (nested_vmx_allowed(vcpu)) - to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |= - FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; - else - to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &= - ~FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; - - if (nested_vmx_allowed(vcpu)) { - nested_vmx_cr_fixed1_bits_update(vcpu); - nested_vmx_entry_exit_ctls_update(vcpu); - } -} - -static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry) -{ - if (func == 1 && nested) - entry->ecx |= bit(X86_FEATURE_VMX); -} - -static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu, - struct x86_exception *fault) -{ - struct vmcs12 *vmcs12 = get_vmcs12(vcpu); - struct vcpu_vmx *vmx = to_vmx(vcpu); - u32 exit_reason; - unsigned long exit_qualification = vcpu->arch.exit_qualification; - - if (vmx->nested.pml_full) { - exit_reason = EXIT_REASON_PML_FULL; - vmx->nested.pml_full = false; - exit_qualification &= INTR_INFO_UNBLOCK_NMI; - } else if (fault->error_code & PFERR_RSVD_MASK) - exit_reason = EXIT_REASON_EPT_MISCONFIG; - else - exit_reason = EXIT_REASON_EPT_VIOLATION; - - nested_vmx_vmexit(vcpu, exit_reason, 0, exit_qualification); - vmcs12->guest_physical_address = fault->address; -} - -static bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu) -{ - return nested_ept_get_cr3(vcpu) & VMX_EPTP_AD_ENABLE_BIT; -} - -/* Callbacks for nested_ept_init_mmu_context: */ - -static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu) -{ - /* return the page table to be shadowed - in our case, EPT12 */ - return get_vmcs12(vcpu)->ept_pointer; -} - -static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu) -{ - WARN_ON(mmu_is_nested(vcpu)); - - vcpu->arch.mmu = &vcpu->arch.guest_mmu; - kvm_init_shadow_ept_mmu(vcpu, - to_vmx(vcpu)->nested.msrs.ept_caps & - VMX_EPT_EXECUTE_ONLY_BIT, - nested_ept_ad_enabled(vcpu), - nested_ept_get_cr3(vcpu)); - vcpu->arch.mmu->set_cr3 = vmx_set_cr3; - vcpu->arch.mmu->get_cr3 = nested_ept_get_cr3; - vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault; - vcpu->arch.mmu->get_pdptr = kvm_pdptr_read; - - vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; -} - -static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu) -{ - vcpu->arch.mmu = &vcpu->arch.root_mmu; - vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; -} - -static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12, - u16 error_code) -{ - bool inequality, bit; - - bit = (vmcs12->exception_bitmap & (1u << PF_VECTOR)) != 0; - inequality = - (error_code & vmcs12->page_fault_error_code_mask) != - vmcs12->page_fault_error_code_match; - return inequality ^ bit; -} - -static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu, - struct x86_exception *fault) -{ - struct vmcs12 *vmcs12 = get_vmcs12(vcpu); - - WARN_ON(!is_guest_mode(vcpu)); - - if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code) && - !to_vmx(vcpu)->nested.nested_run_pending) { - vmcs12->vm_exit_intr_error_code = fault->error_code; - nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, - PF_VECTOR | INTR_TYPE_HARD_EXCEPTION | - INTR_INFO_DELIVER_CODE_MASK | INTR_INFO_VALID_MASK, - fault->address); - } else { - kvm_inject_page_fault(vcpu, fault); - } -} - -static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu, - struct vmcs12 *vmcs12); - -static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu) -{ - struct vmcs12 *vmcs12 = get_vmcs12(vcpu); - struct vcpu_vmx *vmx = to_vmx(vcpu); - struct page *page; - u64 hpa; - - if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { - /* - * Translate L1 physical address to host physical - * address for vmcs02. Keep the page pinned, so this - * physical address remains valid. We keep a reference - * to it so we can release it later. - */ - if (vmx->nested.apic_access_page) { /* shouldn't happen */ - kvm_release_page_dirty(vmx->nested.apic_access_page); - vmx->nested.apic_access_page = NULL; - } - page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->apic_access_addr); - /* - * If translation failed, no matter: This feature asks - * to exit when accessing the given address, and if it - * can never be accessed, this feature won't do - * anything anyway. - */ - if (!is_error_page(page)) { - vmx->nested.apic_access_page = page; - hpa = page_to_phys(vmx->nested.apic_access_page); - vmcs_write64(APIC_ACCESS_ADDR, hpa); - } else { - vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, - SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES); - } - } - - if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) { - if (vmx->nested.virtual_apic_page) { /* shouldn't happen */ - kvm_release_page_dirty(vmx->nested.virtual_apic_page); - vmx->nested.virtual_apic_page = NULL; - } - page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->virtual_apic_page_addr); - - /* - * If translation failed, VM entry will fail because - * prepare_vmcs02 set VIRTUAL_APIC_PAGE_ADDR to -1ull. - * Failing the vm entry is _not_ what the processor - * does but it's basically the only possibility we - * have. We could still enter the guest if CR8 load - * exits are enabled, CR8 store exits are enabled, and - * virtualize APIC access is disabled; in this case - * the processor would never use the TPR shadow and we - * could simply clear the bit from the execution - * control. But such a configuration is useless, so - * let's keep the code simple. - */ - if (!is_error_page(page)) { - vmx->nested.virtual_apic_page = page; - hpa = page_to_phys(vmx->nested.virtual_apic_page); - vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, hpa); - } - } - - if (nested_cpu_has_posted_intr(vmcs12)) { - if (vmx->nested.pi_desc_page) { /* shouldn't happen */ - kunmap(vmx->nested.pi_desc_page); - kvm_release_page_dirty(vmx->nested.pi_desc_page); - vmx->nested.pi_desc_page = NULL; - } - page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->posted_intr_desc_addr); - if (is_error_page(page)) - return; - vmx->nested.pi_desc_page = page; - vmx->nested.pi_desc = kmap(vmx->nested.pi_desc_page); - vmx->nested.pi_desc = - (struct pi_desc *)((void *)vmx->nested.pi_desc + - (unsigned long)(vmcs12->posted_intr_desc_addr & - (PAGE_SIZE - 1))); - vmcs_write64(POSTED_INTR_DESC_ADDR, - page_to_phys(vmx->nested.pi_desc_page) + - (unsigned long)(vmcs12->posted_intr_desc_addr & - (PAGE_SIZE - 1))); - } - if (nested_vmx_prepare_msr_bitmap(vcpu, vmcs12)) - vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL, - CPU_BASED_USE_MSR_BITMAPS); - else - vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL, - CPU_BASED_USE_MSR_BITMAPS); -} - -static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu) -{ - u64 preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value; - struct vcpu_vmx *vmx = to_vmx(vcpu); - - /* - * A timer value of zero is architecturally guaranteed to cause - * a VMExit prior to executing any instructions in the guest. - */ - if (preemption_timeout == 0) { - vmx_preemption_timer_fn(&vmx->nested.preemption_timer); - return; - } - - if (vcpu->arch.virtual_tsc_khz == 0) - return; - - preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; - preemption_timeout *= 1000000; - do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz); - hrtimer_start(&vmx->nested.preemption_timer, - ns_to_ktime(preemption_timeout), HRTIMER_MODE_REL); -} - -static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu *vcpu, - struct vmcs12 *vmcs12) -{ - if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS)) - return 0; - - if (!page_address_valid(vcpu, vmcs12->io_bitmap_a) || - !page_address_valid(vcpu, vmcs12->io_bitmap_b)) - return -EINVAL; - - return 0; -} - -static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu, - struct vmcs12 *vmcs12) -{ - if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) - return 0; - - if (!page_address_valid(vcpu, vmcs12->msr_bitmap)) - return -EINVAL; - - return 0; -} - -static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu, - struct vmcs12 *vmcs12) -{ - if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) - return 0; - - if (!page_address_valid(vcpu, vmcs12->virtual_apic_page_addr)) - return -EINVAL; - - return 0; -} - -/* - * Merge L0's and L1's MSR bitmap, return false to indicate that - * we do not use the hardware. - */ -static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu, - struct vmcs12 *vmcs12) -{ - int msr; - struct page *page; - unsigned long *msr_bitmap_l1; - unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.vmcs02.msr_bitmap; - /* - * pred_cmd & spec_ctrl are trying to verify two things: - * - * 1. L0 gave a permission to L1 to actually passthrough the MSR. This - * ensures that we do not accidentally generate an L02 MSR bitmap - * from the L12 MSR bitmap that is too permissive. - * 2. That L1 or L2s have actually used the MSR. This avoids - * unnecessarily merging of the bitmap if the MSR is unused. This - * works properly because we only update the L01 MSR bitmap lazily. - * So even if L0 should pass L1 these MSRs, the L01 bitmap is only - * updated to reflect this when L1 (or its L2s) actually write to - * the MSR. - */ - bool pred_cmd = !msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD); - bool spec_ctrl = !msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL); - - /* Nothing to do if the MSR bitmap is not in use. */ - if (!cpu_has_vmx_msr_bitmap() || - !nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) - return false; - - if (!nested_cpu_has_virt_x2apic_mode(vmcs12) && - !pred_cmd && !spec_ctrl) - return false; - - page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->msr_bitmap); - if (is_error_page(page)) - return false; - - msr_bitmap_l1 = (unsigned long *)kmap(page); - if (nested_cpu_has_apic_reg_virt(vmcs12)) { - /* - * L0 need not intercept reads for MSRs between 0x800 and 0x8ff, it - * just lets the processor take the value from the virtual-APIC page; - * take those 256 bits directly from the L1 bitmap. - */ - for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { - unsigned word = msr / BITS_PER_LONG; - msr_bitmap_l0[word] = msr_bitmap_l1[word]; - msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0; - } - } else { - for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { - unsigned word = msr / BITS_PER_LONG; - msr_bitmap_l0[word] = ~0; - msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0; - } - } - - nested_vmx_disable_intercept_for_msr( - msr_bitmap_l1, msr_bitmap_l0, - X2APIC_MSR(APIC_TASKPRI), - MSR_TYPE_W); - - if (nested_cpu_has_vid(vmcs12)) { - nested_vmx_disable_intercept_for_msr( - msr_bitmap_l1, msr_bitmap_l0, - X2APIC_MSR(APIC_EOI), - MSR_TYPE_W); - nested_vmx_disable_intercept_for_msr( - msr_bitmap_l1, msr_bitmap_l0, - X2APIC_MSR(APIC_SELF_IPI), - MSR_TYPE_W); - } - - if (spec_ctrl) - nested_vmx_disable_intercept_for_msr( - msr_bitmap_l1, msr_bitmap_l0, - MSR_IA32_SPEC_CTRL, - MSR_TYPE_R | MSR_TYPE_W); - - if (pred_cmd) - nested_vmx_disable_intercept_for_msr( - msr_bitmap_l1, msr_bitmap_l0, - MSR_IA32_PRED_CMD, - MSR_TYPE_W); - - kunmap(page); - kvm_release_page_clean(page); - - return true; -} - -static void nested_cache_shadow_vmcs12(struct kvm_vcpu *vcpu, - struct vmcs12 *vmcs12) -{ - struct vmcs12 *shadow; - struct page *page; - - if (!nested_cpu_has_shadow_vmcs(vmcs12) || - vmcs12->vmcs_link_pointer == -1ull) - return; - - shadow = get_shadow_vmcs12(vcpu); - page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->vmcs_link_pointer); - - memcpy(shadow, kmap(page), VMCS12_SIZE); - - kunmap(page); - kvm_release_page_clean(page); -} - -static void nested_flush_cached_shadow_vmcs12(struct kvm_vcpu *vcpu, - struct vmcs12 *vmcs12) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - - if (!nested_cpu_has_shadow_vmcs(vmcs12) || - vmcs12->vmcs_link_pointer == -1ull) - return; - - kvm_write_guest(vmx->vcpu.kvm, vmcs12->vmcs_link_pointer, - get_shadow_vmcs12(vcpu), VMCS12_SIZE); -} - -static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu, - struct vmcs12 *vmcs12) -{ - if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) && - !page_address_valid(vcpu, vmcs12->apic_access_addr)) - return -EINVAL; - else - return 0; -} - -static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu, - struct vmcs12 *vmcs12) -{ - if (!nested_cpu_has_virt_x2apic_mode(vmcs12) && - !nested_cpu_has_apic_reg_virt(vmcs12) && - !nested_cpu_has_vid(vmcs12) && - !nested_cpu_has_posted_intr(vmcs12)) - return 0; - - /* - * If virtualize x2apic mode is enabled, - * virtualize apic access must be disabled. - */ - if (nested_cpu_has_virt_x2apic_mode(vmcs12) && - nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) - return -EINVAL; - - /* - * If virtual interrupt delivery is enabled, - * we must exit on external interrupts. - */ - if (nested_cpu_has_vid(vmcs12) && - !nested_exit_on_intr(vcpu)) - return -EINVAL; - - /* - * bits 15:8 should be zero in posted_intr_nv, - * the descriptor address has been already checked - * in nested_get_vmcs12_pages. - * - * bits 5:0 of posted_intr_desc_addr should be zero. - */ - if (nested_cpu_has_posted_intr(vmcs12) && - (!nested_cpu_has_vid(vmcs12) || - !nested_exit_intr_ack_set(vcpu) || - (vmcs12->posted_intr_nv & 0xff00) || - (vmcs12->posted_intr_desc_addr & 0x3f) || - (vmcs12->posted_intr_desc_addr >> cpuid_maxphyaddr(vcpu)))) - return -EINVAL; - - /* tpr shadow is needed by all apicv features. */ - if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) - return -EINVAL; - - return 0; -} - -static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu, - unsigned long count_field, - unsigned long addr_field) -{ - struct vmcs12 *vmcs12 = get_vmcs12(vcpu); - int maxphyaddr; - u64 count, addr; - - if (vmcs12_read_any(vmcs12, count_field, &count) || - vmcs12_read_any(vmcs12, addr_field, &addr)) { - WARN_ON(1); - return -EINVAL; - } - if (count == 0) - return 0; - maxphyaddr = cpuid_maxphyaddr(vcpu); - if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr || - (addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr) { - pr_debug_ratelimited( - "nVMX: invalid MSR switch (0x%lx, %d, %llu, 0x%08llx)", - addr_field, maxphyaddr, count, addr); - return -EINVAL; - } - return 0; -} - -static int nested_vmx_check_msr_switch_controls(struct kvm_vcpu *vcpu, - struct vmcs12 *vmcs12) -{ - if (vmcs12->vm_exit_msr_load_count == 0 && - vmcs12->vm_exit_msr_store_count == 0 && - vmcs12->vm_entry_msr_load_count == 0) - return 0; /* Fast path */ - if (nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_LOAD_COUNT, - VM_EXIT_MSR_LOAD_ADDR) || - nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_STORE_COUNT, - VM_EXIT_MSR_STORE_ADDR) || - nested_vmx_check_msr_switch(vcpu, VM_ENTRY_MSR_LOAD_COUNT, - VM_ENTRY_MSR_LOAD_ADDR)) - return -EINVAL; - return 0; -} - -static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu, - struct vmcs12 *vmcs12) -{ - if (!nested_cpu_has_pml(vmcs12)) - return 0; - - if (!nested_cpu_has_ept(vmcs12) || - !page_address_valid(vcpu, vmcs12->pml_address)) - return -EINVAL; - - return 0; -} - -static int nested_vmx_check_shadow_vmcs_controls(struct kvm_vcpu *vcpu, - struct vmcs12 *vmcs12) -{ - if (!nested_cpu_has_shadow_vmcs(vmcs12)) - return 0; - - if (!page_address_valid(vcpu, vmcs12->vmread_bitmap) || - !page_address_valid(vcpu, vmcs12->vmwrite_bitmap)) - return -EINVAL; - - return 0; -} - -static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu, - struct vmx_msr_entry *e) -{ - /* x2APIC MSR accesses are not allowed */ - if (vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8) - return -EINVAL; - if (e->index == MSR_IA32_UCODE_WRITE || /* SDM Table 35-2 */ - e->index == MSR_IA32_UCODE_REV) - return -EINVAL; - if (e->reserved != 0) - return -EINVAL; - return 0; -} - -static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu, - struct vmx_msr_entry *e) -{ - if (e->index == MSR_FS_BASE || - e->index == MSR_GS_BASE || - e->index == MSR_IA32_SMM_MONITOR_CTL || /* SMM is not supported */ - nested_vmx_msr_check_common(vcpu, e)) - return -EINVAL; - return 0; -} - -static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu, - struct vmx_msr_entry *e) -{ - if (e->index == MSR_IA32_SMBASE || /* SMM is not supported */ - nested_vmx_msr_check_common(vcpu, e)) - return -EINVAL; - return 0; -} - -/* - * Load guest's/host's msr at nested entry/exit. - * return 0 for success, entry index for failure. - */ -static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) -{ - u32 i; - struct vmx_msr_entry e; - struct msr_data msr; - - msr.host_initiated = false; - for (i = 0; i < count; i++) { - if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e), - &e, sizeof(e))) { - pr_debug_ratelimited( - "%s cannot read MSR entry (%u, 0x%08llx)\n", - __func__, i, gpa + i * sizeof(e)); - goto fail; - } - if (nested_vmx_load_msr_check(vcpu, &e)) { - pr_debug_ratelimited( - "%s check failed (%u, 0x%x, 0x%x)\n", - __func__, i, e.index, e.reserved); - goto fail; - } - msr.index = e.index; - msr.data = e.value; - if (kvm_set_msr(vcpu, &msr)) { - pr_debug_ratelimited( - "%s cannot write MSR (%u, 0x%x, 0x%llx)\n", - __func__, i, e.index, e.value); - goto fail; - } - } - return 0; -fail: - return i + 1; -} - -static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) -{ - u32 i; - struct vmx_msr_entry e; - - for (i = 0; i < count; i++) { - struct msr_data msr_info; - if (kvm_vcpu_read_guest(vcpu, - gpa + i * sizeof(e), - &e, 2 * sizeof(u32))) { - pr_debug_ratelimited( - "%s cannot read MSR entry (%u, 0x%08llx)\n", - __func__, i, gpa + i * sizeof(e)); - return -EINVAL; - } - if (nested_vmx_store_msr_check(vcpu, &e)) { - pr_debug_ratelimited( - "%s check failed (%u, 0x%x, 0x%x)\n", - __func__, i, e.index, e.reserved); - return -EINVAL; - } - msr_info.host_initiated = false; - msr_info.index = e.index; - if (kvm_get_msr(vcpu, &msr_info)) { - pr_debug_ratelimited( - "%s cannot read MSR (%u, 0x%x)\n", - __func__, i, e.index); - return -EINVAL; - } - if (kvm_vcpu_write_guest(vcpu, - gpa + i * sizeof(e) + - offsetof(struct vmx_msr_entry, value), - &msr_info.data, sizeof(msr_info.data))) { - pr_debug_ratelimited( - "%s cannot write MSR (%u, 0x%x, 0x%llx)\n", - __func__, i, e.index, msr_info.data); - return -EINVAL; - } - } - return 0; -} - -static bool nested_cr3_valid(struct kvm_vcpu *vcpu, unsigned long val) -{ - unsigned long invalid_mask; - - invalid_mask = (~0ULL) << cpuid_maxphyaddr(vcpu); - return (val & invalid_mask) == 0; -} - -/* - * Load guest's/host's cr3 at nested entry/exit. nested_ept is true if we are - * emulating VM entry into a guest with EPT enabled. - * Returns 0 on success, 1 on failure. Invalid state exit qualification code - * is assigned to entry_failure_code on failure. - */ -static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_ept, - u32 *entry_failure_code) -{ - if (cr3 != kvm_read_cr3(vcpu) || (!nested_ept && pdptrs_changed(vcpu))) { - if (!nested_cr3_valid(vcpu, cr3)) { - *entry_failure_code = ENTRY_FAIL_DEFAULT; - return 1; - } - - /* - * If PAE paging and EPT are both on, CR3 is not used by the CPU and - * must not be dereferenced. - */ - if (!is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu) && - !nested_ept) { - if (!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) { - *entry_failure_code = ENTRY_FAIL_PDPTE; - return 1; - } - } - } - - if (!nested_ept) - kvm_mmu_new_cr3(vcpu, cr3, false); - - vcpu->arch.cr3 = cr3; - __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); - - kvm_init_mmu(vcpu, false); - - return 0; -} - -/* - * Returns if KVM is able to config CPU to tag TLB entries - * populated by L2 differently than TLB entries populated - * by L1. - * - * If L1 uses EPT, then TLB entries are tagged with different EPTP. - * - * If L1 uses VPID and we allocated a vpid02, TLB entries are tagged - * with different VPID (L1 entries are tagged with vmx->vpid - * while L2 entries are tagged with vmx->nested.vpid02). - */ -static bool nested_has_guest_tlb_tag(struct kvm_vcpu *vcpu) -{ - struct vmcs12 *vmcs12 = get_vmcs12(vcpu); - - return nested_cpu_has_ept(vmcs12) || - (nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02); -} - -static u64 nested_vmx_calc_efer(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) -{ - if (vmx->nested.nested_run_pending && - (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) - return vmcs12->guest_ia32_efer; - else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) - return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME); - else - return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME); -} - -static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx) -{ - /* - * If vmcs02 hasn't been initialized, set the constant vmcs02 state - * according to L0's settings (vmcs12 is irrelevant here). Host - * fields that come from L0 and are not constant, e.g. HOST_CR3, - * will be set as needed prior to VMLAUNCH/VMRESUME. - */ - if (vmx->nested.vmcs02_initialized) - return; - vmx->nested.vmcs02_initialized = true; - - /* - * We don't care what the EPTP value is we just need to guarantee - * it's valid so we don't get a false positive when doing early - * consistency checks. - */ - if (enable_ept && nested_early_check) - vmcs_write64(EPT_POINTER, construct_eptp(&vmx->vcpu, 0)); - - /* All VMFUNCs are currently emulated through L0 vmexits. */ - if (cpu_has_vmx_vmfunc()) - vmcs_write64(VM_FUNCTION_CONTROL, 0); - - if (cpu_has_vmx_posted_intr()) - vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR); - - if (cpu_has_vmx_msr_bitmap()) - vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap)); - - if (enable_pml) - vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); - - /* - * Set the MSR load/store lists to match L0's settings. Only the - * addresses are constant (for vmcs02), the counts can change based - * on L2's behavior, e.g. switching to/from long mode. - */ - vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); - vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val)); - vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val)); - - vmx_set_constant_host_state(vmx); -} - -static void prepare_vmcs02_early_full(struct vcpu_vmx *vmx, - struct vmcs12 *vmcs12) -{ - prepare_vmcs02_constant_state(vmx); - - vmcs_write64(VMCS_LINK_POINTER, -1ull); - - if (enable_vpid) { - if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) - vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02); - else - vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); - } -} - -static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) -{ - u32 exec_control, vmcs12_exec_ctrl; - u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12); - - if (vmx->nested.dirty_vmcs12 || vmx->nested.hv_evmcs) - prepare_vmcs02_early_full(vmx, vmcs12); - - /* - * HOST_RSP is normally set correctly in vmx_vcpu_run() just before - * entry, but only if the current (host) sp changed from the value - * we wrote last (vmx->host_rsp). This cache is no longer relevant - * if we switch vmcs, and rather than hold a separate cache per vmcs, - * here we just force the write to happen on entry. host_rsp will - * also be written unconditionally by nested_vmx_check_vmentry_hw() - * if we are doing early consistency checks via hardware. - */ - vmx->host_rsp = 0; - - /* - * PIN CONTROLS - */ - exec_control = vmcs12->pin_based_vm_exec_control; - - /* Preemption timer setting is computed directly in vmx_vcpu_run. */ - exec_control |= vmcs_config.pin_based_exec_ctrl; - exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER; - vmx->loaded_vmcs->hv_timer_armed = false; - - /* Posted interrupts setting is only taken from vmcs12. */ - if (nested_cpu_has_posted_intr(vmcs12)) { - vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv; - vmx->nested.pi_pending = false; - } else { - exec_control &= ~PIN_BASED_POSTED_INTR; - } - vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, exec_control); - - /* - * EXEC CONTROLS - */ - exec_control = vmx_exec_control(vmx); /* L0's desires */ - exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING; - exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING; - exec_control &= ~CPU_BASED_TPR_SHADOW; - exec_control |= vmcs12->cpu_based_vm_exec_control; - - /* - * Write an illegal value to VIRTUAL_APIC_PAGE_ADDR. Later, if - * nested_get_vmcs12_pages can't fix it up, the illegal value - * will result in a VM entry failure. - */ - if (exec_control & CPU_BASED_TPR_SHADOW) { - vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, -1ull); - vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold); - } else { -#ifdef CONFIG_X86_64 - exec_control |= CPU_BASED_CR8_LOAD_EXITING | - CPU_BASED_CR8_STORE_EXITING; -#endif - } - - /* - * A vmexit (to either L1 hypervisor or L0 userspace) is always needed - * for I/O port accesses. - */ - exec_control &= ~CPU_BASED_USE_IO_BITMAPS; - exec_control |= CPU_BASED_UNCOND_IO_EXITING; - vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control); - - /* - * SECONDARY EXEC CONTROLS - */ - if (cpu_has_secondary_exec_ctrls()) { - exec_control = vmx->secondary_exec_control; - - /* Take the following fields only from vmcs12 */ - exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | - SECONDARY_EXEC_ENABLE_INVPCID | - SECONDARY_EXEC_RDTSCP | - SECONDARY_EXEC_XSAVES | - SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | - SECONDARY_EXEC_APIC_REGISTER_VIRT | - SECONDARY_EXEC_ENABLE_VMFUNC); - if (nested_cpu_has(vmcs12, - CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) { - vmcs12_exec_ctrl = vmcs12->secondary_vm_exec_control & - ~SECONDARY_EXEC_ENABLE_PML; - exec_control |= vmcs12_exec_ctrl; - } - - /* VMCS shadowing for L2 is emulated for now */ - exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS; - - if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) - vmcs_write16(GUEST_INTR_STATUS, - vmcs12->guest_intr_status); - - /* - * Write an illegal value to APIC_ACCESS_ADDR. Later, - * nested_get_vmcs12_pages will either fix it up or - * remove the VM execution control. - */ - if (exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) - vmcs_write64(APIC_ACCESS_ADDR, -1ull); - - if (exec_control & SECONDARY_EXEC_ENCLS_EXITING) - vmcs_write64(ENCLS_EXITING_BITMAP, -1ull); - - vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); - } - - /* - * ENTRY CONTROLS - * - * vmcs12's VM_{ENTRY,EXIT}_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE - * are emulated by vmx_set_efer() in prepare_vmcs02(), but speculate - * on the related bits (if supported by the CPU) in the hope that - * we can avoid VMWrites during vmx_set_efer(). - */ - exec_control = (vmcs12->vm_entry_controls | vmcs_config.vmentry_ctrl) & - ~VM_ENTRY_IA32E_MODE & ~VM_ENTRY_LOAD_IA32_EFER; - if (cpu_has_load_ia32_efer) { - if (guest_efer & EFER_LMA) - exec_control |= VM_ENTRY_IA32E_MODE; - if (guest_efer != host_efer) - exec_control |= VM_ENTRY_LOAD_IA32_EFER; - } - vm_entry_controls_init(vmx, exec_control); - - /* - * EXIT CONTROLS - * - * L2->L1 exit controls are emulated - the hardware exit is to L0 so - * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER - * bits may be modified by vmx_set_efer() in prepare_vmcs02(). - */ - exec_control = vmcs_config.vmexit_ctrl; - if (cpu_has_load_ia32_efer && guest_efer != host_efer) - exec_control |= VM_EXIT_LOAD_IA32_EFER; - vm_exit_controls_init(vmx, exec_control); - - /* - * Conceptually we want to copy the PML address and index from - * vmcs01 here, and then back to vmcs01 on nested vmexit. But, - * since we always flush the log on each vmexit and never change - * the PML address (once set), this happens to be equivalent to - * simply resetting the index in vmcs02. - */ - if (enable_pml) - vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); - - /* - * Interrupt/Exception Fields - */ - if (vmx->nested.nested_run_pending) { - vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, - vmcs12->vm_entry_intr_info_field); - vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, - vmcs12->vm_entry_exception_error_code); - vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, - vmcs12->vm_entry_instruction_len); - vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, - vmcs12->guest_interruptibility_info); - vmx->loaded_vmcs->nmi_known_unmasked = - !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI); - } else { - vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); - } -} - -static void prepare_vmcs02_full(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) -{ - struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs; - - if (!hv_evmcs || !(hv_evmcs->hv_clean_fields & - HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) { - vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector); - vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector); - vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector); - vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector); - vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector); - vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector); - vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector); - vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector); - vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit); - vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit); - vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit); - vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit); - vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit); - vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit); - vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit); - vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit); - vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit); - vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit); - vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes); - vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes); - vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes); - vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes); - vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes); - vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes); - vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base); - vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base); - vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base); - vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base); - vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base); - vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base); - vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base); - vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base); - vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base); - vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base); - } - - if (!hv_evmcs || !(hv_evmcs->hv_clean_fields & - HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1)) { - vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs); - vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, - vmcs12->guest_pending_dbg_exceptions); - vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp); - vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip); - - /* - * L1 may access the L2's PDPTR, so save them to construct - * vmcs12 - */ - if (enable_ept) { - vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0); - vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); - vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); - vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); - } - } - - if (nested_cpu_has_xsaves(vmcs12)) - vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap); - - /* - * Whether page-faults are trapped is determined by a combination of - * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF. - * If enable_ept, L0 doesn't care about page faults and we should - * set all of these to L1's desires. However, if !enable_ept, L0 does - * care about (at least some) page faults, and because it is not easy - * (if at all possible?) to merge L0 and L1's desires, we simply ask - * to exit on each and every L2 page fault. This is done by setting - * MASK=MATCH=0 and (see below) EB.PF=1. - * Note that below we don't need special code to set EB.PF beyond the - * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept, - * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when - * !enable_ept, EB.PF is 1, so the "or" will always be 1. - */ - vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, - enable_ept ? vmcs12->page_fault_error_code_mask : 0); - vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, - enable_ept ? vmcs12->page_fault_error_code_match : 0); - - if (cpu_has_vmx_apicv()) { - vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0); - vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1); - vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2); - vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3); - } - - vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); - vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); - - set_cr4_guest_host_mask(vmx); - - if (kvm_mpx_supported()) { - if (vmx->nested.nested_run_pending && - (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)) - vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs); - else - vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs); - } -} - -/* - * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested - * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it - * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2 - * guest in a way that will both be appropriate to L1's requests, and our - * needs. In addition to modifying the active vmcs (which is vmcs02), this - * function also has additional necessary side-effects, like setting various - * vcpu->arch fields. - * Returns 0 on success, 1 on failure. Invalid state exit qualification code - * is assigned to entry_failure_code on failure. - */ -static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, - u32 *entry_failure_code) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs; - - if (vmx->nested.dirty_vmcs12 || vmx->nested.hv_evmcs) { - prepare_vmcs02_full(vmx, vmcs12); - vmx->nested.dirty_vmcs12 = false; - } - - /* - * First, the fields that are shadowed. This must be kept in sync - * with vmx_shadow_fields.h. - */ - if (!hv_evmcs || !(hv_evmcs->hv_clean_fields & - HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) { - vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes); - vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes); - } - - if (vmx->nested.nested_run_pending && - (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) { - kvm_set_dr(vcpu, 7, vmcs12->guest_dr7); - vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl); - } else { - kvm_set_dr(vcpu, 7, vcpu->arch.dr7); - vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl); - } - vmx_set_rflags(vcpu, vmcs12->guest_rflags); - - vmx->nested.preemption_timer_expired = false; - if (nested_cpu_has_preemption_timer(vmcs12)) - vmx_start_preemption_timer(vcpu); - - /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the - * bitwise-or of what L1 wants to trap for L2, and what we want to - * trap. Note that CR0.TS also needs updating - we do this later. - */ - update_exception_bitmap(vcpu); - vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask; - vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); - - if (vmx->nested.nested_run_pending && - (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)) { - vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat); - vcpu->arch.pat = vmcs12->guest_ia32_pat; - } else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { - vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); - } - - vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); - - if (kvm_has_tsc_control) - decache_tsc_multiplier(vmx); - - if (enable_vpid) { - /* - * There is no direct mapping between vpid02 and vpid12, the - * vpid02 is per-vCPU for L0 and reused while the value of - * vpid12 is changed w/ one invvpid during nested vmentry. - * The vpid12 is allocated by L1 for L2, so it will not - * influence global bitmap(for vpid01 and vpid02 allocation) - * even if spawn a lot of nested vCPUs. - */ - if (nested_cpu_has_vpid(vmcs12) && nested_has_guest_tlb_tag(vcpu)) { - if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) { - vmx->nested.last_vpid = vmcs12->virtual_processor_id; - __vmx_flush_tlb(vcpu, nested_get_vpid02(vcpu), false); - } - } else { - /* - * If L1 use EPT, then L0 needs to execute INVEPT on - * EPTP02 instead of EPTP01. Therefore, delay TLB - * flush until vmcs02->eptp is fully updated by - * KVM_REQ_LOAD_CR3. Note that this assumes - * KVM_REQ_TLB_FLUSH is evaluated after - * KVM_REQ_LOAD_CR3 in vcpu_enter_guest(). - */ - kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); - } - } - - if (nested_cpu_has_ept(vmcs12)) - nested_ept_init_mmu_context(vcpu); - else if (nested_cpu_has2(vmcs12, - SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) - vmx_flush_tlb(vcpu, true); - - /* - * This sets GUEST_CR0 to vmcs12->guest_cr0, possibly modifying those - * bits which we consider mandatory enabled. - * The CR0_READ_SHADOW is what L2 should have expected to read given - * the specifications by L1; It's not enough to take - * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we - * have more bits than L1 expected. - */ - vmx_set_cr0(vcpu, vmcs12->guest_cr0); - vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12)); - - vmx_set_cr4(vcpu, vmcs12->guest_cr4); - vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12)); - - vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12); - /* Note: may modify VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */ - vmx_set_efer(vcpu, vcpu->arch.efer); - - /* - * Guest state is invalid and unrestricted guest is disabled, - * which means L1 attempted VMEntry to L2 with invalid state. - * Fail the VMEntry. - */ - if (vmx->emulation_required) { - *entry_failure_code = ENTRY_FAIL_DEFAULT; - return 1; - } - - /* Shadow page tables on either EPT or shadow page tables. */ - if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12), - entry_failure_code)) - return 1; - - if (!enable_ept) - vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested; - - kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp); - kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip); - return 0; -} - -static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12) -{ - if (!nested_cpu_has_nmi_exiting(vmcs12) && - nested_cpu_has_virtual_nmis(vmcs12)) - return -EINVAL; - - if (!nested_cpu_has_virtual_nmis(vmcs12) && - nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING)) - return -EINVAL; - - return 0; -} - -static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - bool ia32e; - - if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE && - vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT) - return VMXERR_ENTRY_INVALID_CONTROL_FIELD; - - if (nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id) - return VMXERR_ENTRY_INVALID_CONTROL_FIELD; - - if (nested_vmx_check_io_bitmap_controls(vcpu, vmcs12)) - return VMXERR_ENTRY_INVALID_CONTROL_FIELD; - - if (nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12)) - return VMXERR_ENTRY_INVALID_CONTROL_FIELD; - - if (nested_vmx_check_apic_access_controls(vcpu, vmcs12)) - return VMXERR_ENTRY_INVALID_CONTROL_FIELD; - - if (nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12)) - return VMXERR_ENTRY_INVALID_CONTROL_FIELD; - - if (nested_vmx_check_apicv_controls(vcpu, vmcs12)) - return VMXERR_ENTRY_INVALID_CONTROL_FIELD; - - if (nested_vmx_check_msr_switch_controls(vcpu, vmcs12)) - return VMXERR_ENTRY_INVALID_CONTROL_FIELD; - - if (nested_vmx_check_pml_controls(vcpu, vmcs12)) - return VMXERR_ENTRY_INVALID_CONTROL_FIELD; - - if (nested_vmx_check_shadow_vmcs_controls(vcpu, vmcs12)) - return VMXERR_ENTRY_INVALID_CONTROL_FIELD; - - if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control, - vmx->nested.msrs.procbased_ctls_low, - vmx->nested.msrs.procbased_ctls_high) || - (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) && - !vmx_control_verify(vmcs12->secondary_vm_exec_control, - vmx->nested.msrs.secondary_ctls_low, - vmx->nested.msrs.secondary_ctls_high)) || - !vmx_control_verify(vmcs12->pin_based_vm_exec_control, - vmx->nested.msrs.pinbased_ctls_low, - vmx->nested.msrs.pinbased_ctls_high) || - !vmx_control_verify(vmcs12->vm_exit_controls, - vmx->nested.msrs.exit_ctls_low, - vmx->nested.msrs.exit_ctls_high) || - !vmx_control_verify(vmcs12->vm_entry_controls, - vmx->nested.msrs.entry_ctls_low, - vmx->nested.msrs.entry_ctls_high)) - return VMXERR_ENTRY_INVALID_CONTROL_FIELD; - - if (nested_vmx_check_nmi_controls(vmcs12)) - return VMXERR_ENTRY_INVALID_CONTROL_FIELD; - - if (nested_cpu_has_vmfunc(vmcs12)) { - if (vmcs12->vm_function_control & - ~vmx->nested.msrs.vmfunc_controls) - return VMXERR_ENTRY_INVALID_CONTROL_FIELD; - - if (nested_cpu_has_eptp_switching(vmcs12)) { - if (!nested_cpu_has_ept(vmcs12) || - !page_address_valid(vcpu, vmcs12->eptp_list_address)) - return VMXERR_ENTRY_INVALID_CONTROL_FIELD; - } - } - - if (vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu)) - return VMXERR_ENTRY_INVALID_CONTROL_FIELD; - - if (!nested_host_cr0_valid(vcpu, vmcs12->host_cr0) || - !nested_host_cr4_valid(vcpu, vmcs12->host_cr4) || - !nested_cr3_valid(vcpu, vmcs12->host_cr3)) - return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD; - - /* - * If the load IA32_EFER VM-exit control is 1, bits reserved in the - * IA32_EFER MSR must be 0 in the field for that register. In addition, - * the values of the LMA and LME bits in the field must each be that of - * the host address-space size VM-exit control. - */ - if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) { - ia32e = (vmcs12->vm_exit_controls & - VM_EXIT_HOST_ADDR_SPACE_SIZE) != 0; - if (!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer) || - ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA) || - ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)) - return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD; - } - - /* - * From the Intel SDM, volume 3: - * Fields relevant to VM-entry event injection must be set properly. - * These fields are the VM-entry interruption-information field, the - * VM-entry exception error code, and the VM-entry instruction length. - */ - if (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) { - u32 intr_info = vmcs12->vm_entry_intr_info_field; - u8 vector = intr_info & INTR_INFO_VECTOR_MASK; - u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK; - bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK; - bool should_have_error_code; - bool urg = nested_cpu_has2(vmcs12, - SECONDARY_EXEC_UNRESTRICTED_GUEST); - bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE; - - /* VM-entry interruption-info field: interruption type */ - if (intr_type == INTR_TYPE_RESERVED || - (intr_type == INTR_TYPE_OTHER_EVENT && - !nested_cpu_supports_monitor_trap_flag(vcpu))) - return VMXERR_ENTRY_INVALID_CONTROL_FIELD; - - /* VM-entry interruption-info field: vector */ - if ((intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) || - (intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) || - (intr_type == INTR_TYPE_OTHER_EVENT && vector != 0)) - return VMXERR_ENTRY_INVALID_CONTROL_FIELD; - - /* VM-entry interruption-info field: deliver error code */ - should_have_error_code = - intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode && - x86_exception_has_error_code(vector); - if (has_error_code != should_have_error_code) - return VMXERR_ENTRY_INVALID_CONTROL_FIELD; - - /* VM-entry exception error code */ - if (has_error_code && - vmcs12->vm_entry_exception_error_code & GENMASK(31, 15)) - return VMXERR_ENTRY_INVALID_CONTROL_FIELD; - - /* VM-entry interruption-info field: reserved bits */ - if (intr_info & INTR_INFO_RESVD_BITS_MASK) - return VMXERR_ENTRY_INVALID_CONTROL_FIELD; - - /* VM-entry instruction length */ - switch (intr_type) { - case INTR_TYPE_SOFT_EXCEPTION: - case INTR_TYPE_SOFT_INTR: - case INTR_TYPE_PRIV_SW_EXCEPTION: - if ((vmcs12->vm_entry_instruction_len > 15) || - (vmcs12->vm_entry_instruction_len == 0 && - !nested_cpu_has_zero_length_injection(vcpu))) - return VMXERR_ENTRY_INVALID_CONTROL_FIELD; - } - } - - if (nested_cpu_has_ept(vmcs12) && - !valid_ept_address(vcpu, vmcs12->ept_pointer)) - return VMXERR_ENTRY_INVALID_CONTROL_FIELD; - - return 0; -} - -static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu, - struct vmcs12 *vmcs12) -{ - int r; - struct page *page; - struct vmcs12 *shadow; - - if (vmcs12->vmcs_link_pointer == -1ull) - return 0; - - if (!page_address_valid(vcpu, vmcs12->vmcs_link_pointer)) - return -EINVAL; - - page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->vmcs_link_pointer); - if (is_error_page(page)) - return -EINVAL; - - r = 0; - shadow = kmap(page); - if (shadow->hdr.revision_id != VMCS12_REVISION || - shadow->hdr.shadow_vmcs != nested_cpu_has_shadow_vmcs(vmcs12)) - r = -EINVAL; - kunmap(page); - kvm_release_page_clean(page); - return r; -} - -static int check_vmentry_postreqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, - u32 *exit_qual) -{ - bool ia32e; - - *exit_qual = ENTRY_FAIL_DEFAULT; - - if (!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0) || - !nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4)) - return 1; - - if (nested_vmx_check_vmcs_link_ptr(vcpu, vmcs12)) { - *exit_qual = ENTRY_FAIL_VMCS_LINK_PTR; - return 1; - } - - /* - * If the load IA32_EFER VM-entry control is 1, the following checks - * are performed on the field for the IA32_EFER MSR: - * - Bits reserved in the IA32_EFER MSR must be 0. - * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of - * the IA-32e mode guest VM-exit control. It must also be identical - * to bit 8 (LME) if bit 31 in the CR0 field (corresponding to - * CR0.PG) is 1. - */ - if (to_vmx(vcpu)->nested.nested_run_pending && - (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) { - ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0; - if (!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer) || - ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA) || - ((vmcs12->guest_cr0 & X86_CR0_PG) && - ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))) - return 1; - } - - if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) && - (is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu) || - (vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD))) - return 1; - - return 0; -} - -static int __noclone nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - unsigned long cr3, cr4; - - if (!nested_early_check) - return 0; - - if (vmx->msr_autoload.host.nr) - vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); - if (vmx->msr_autoload.guest.nr) - vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0); - - preempt_disable(); - - vmx_prepare_switch_to_guest(vcpu); - - /* - * Induce a consistency check VMExit by clearing bit 1 in GUEST_RFLAGS, - * which is reserved to '1' by hardware. GUEST_RFLAGS is guaranteed to - * be written (by preparve_vmcs02()) before the "real" VMEnter, i.e. - * there is no need to preserve other bits or save/restore the field. - */ - vmcs_writel(GUEST_RFLAGS, 0); - - vmcs_writel(HOST_RIP, vmx_early_consistency_check_return); - - cr3 = __get_current_cr3_fast(); - if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) { - vmcs_writel(HOST_CR3, cr3); - vmx->loaded_vmcs->host_state.cr3 = cr3; - } - - cr4 = cr4_read_shadow(); - if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) { - vmcs_writel(HOST_CR4, cr4); - vmx->loaded_vmcs->host_state.cr4 = cr4; - } - - vmx->__launched = vmx->loaded_vmcs->launched; - - asm( - /* Set HOST_RSP */ - __ex("vmwrite %%" _ASM_SP ", %%" _ASM_DX) "\n\t" - "mov %%" _ASM_SP ", %c[host_rsp](%0)\n\t" - - /* Check if vmlaunch of vmresume is needed */ - "cmpl $0, %c[launched](%0)\n\t" - "je 1f\n\t" - __ex("vmresume") "\n\t" - "jmp 2f\n\t" - "1: " __ex("vmlaunch") "\n\t" - "jmp 2f\n\t" - "2: " - - /* Set vmx->fail accordingly */ - "setbe %c[fail](%0)\n\t" - - ".pushsection .rodata\n\t" - ".global vmx_early_consistency_check_return\n\t" - "vmx_early_consistency_check_return: " _ASM_PTR " 2b\n\t" - ".popsection" - : - : "c"(vmx), "d"((unsigned long)HOST_RSP), - [launched]"i"(offsetof(struct vcpu_vmx, __launched)), - [fail]"i"(offsetof(struct vcpu_vmx, fail)), - [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)) - : "rax", "cc", "memory" - ); - - vmcs_writel(HOST_RIP, vmx_return); - - preempt_enable(); - - if (vmx->msr_autoload.host.nr) - vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); - if (vmx->msr_autoload.guest.nr) - vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); - - if (vmx->fail) { - WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) != - VMXERR_ENTRY_INVALID_CONTROL_FIELD); - vmx->fail = 0; - return 1; - } - - /* - * VMExit clears RFLAGS.IF and DR7, even on a consistency check. - */ - local_irq_enable(); - if (hw_breakpoint_active()) - set_debugreg(__this_cpu_read(cpu_dr7), 7); - - /* - * A non-failing VMEntry means we somehow entered guest mode with - * an illegal RIP, and that's just the tip of the iceberg. There - * is no telling what memory has been modified or what state has - * been exposed to unknown code. Hitting this all but guarantees - * a (very critical) hardware issue. - */ - WARN_ON(!(vmcs_read32(VM_EXIT_REASON) & - VMX_EXIT_REASONS_FAILED_VMENTRY)); - - return 0; -} -STACK_FRAME_NON_STANDARD(nested_vmx_check_vmentry_hw); - -static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, - struct vmcs12 *vmcs12); - -/* - * If from_vmentry is false, this is being called from state restore (either RSM - * or KVM_SET_NESTED_STATE). Otherwise it's called from vmlaunch/vmresume. -+ * -+ * Returns: -+ * 0 - success, i.e. proceed with actual VMEnter -+ * 1 - consistency check VMExit -+ * -1 - consistency check VMFail - */ -static int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, - bool from_vmentry) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - struct vmcs12 *vmcs12 = get_vmcs12(vcpu); - bool evaluate_pending_interrupts; - u32 exit_reason = EXIT_REASON_INVALID_STATE; - u32 exit_qual; - - evaluate_pending_interrupts = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) & - (CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING); - if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu)) - evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu); - - if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) - vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); - if (kvm_mpx_supported() && - !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)) - vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS); - - vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02); - - prepare_vmcs02_early(vmx, vmcs12); - - if (from_vmentry) { - nested_get_vmcs12_pages(vcpu); - - if (nested_vmx_check_vmentry_hw(vcpu)) { - vmx_switch_vmcs(vcpu, &vmx->vmcs01); - return -1; - } - - if (check_vmentry_postreqs(vcpu, vmcs12, &exit_qual)) - goto vmentry_fail_vmexit; - } - - enter_guest_mode(vcpu); - if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) - vcpu->arch.tsc_offset += vmcs12->tsc_offset; - - if (prepare_vmcs02(vcpu, vmcs12, &exit_qual)) - goto vmentry_fail_vmexit_guest_mode; - - if (from_vmentry) { - exit_reason = EXIT_REASON_MSR_LOAD_FAIL; - exit_qual = nested_vmx_load_msr(vcpu, - vmcs12->vm_entry_msr_load_addr, - vmcs12->vm_entry_msr_load_count); - if (exit_qual) - goto vmentry_fail_vmexit_guest_mode; - } else { - /* - * The MMU is not initialized to point at the right entities yet and - * "get pages" would need to read data from the guest (i.e. we will - * need to perform gpa to hpa translation). Request a call - * to nested_get_vmcs12_pages before the next VM-entry. The MSRs - * have already been set at vmentry time and should not be reset. - */ - kvm_make_request(KVM_REQ_GET_VMCS12_PAGES, vcpu); - } - - /* - * If L1 had a pending IRQ/NMI until it executed - * VMLAUNCH/VMRESUME which wasn't delivered because it was - * disallowed (e.g. interrupts disabled), L0 needs to - * evaluate if this pending event should cause an exit from L2 - * to L1 or delivered directly to L2 (e.g. In case L1 don't - * intercept EXTERNAL_INTERRUPT). - * - * Usually this would be handled by the processor noticing an - * IRQ/NMI window request, or checking RVI during evaluation of - * pending virtual interrupts. However, this setting was done - * on VMCS01 and now VMCS02 is active instead. Thus, we force L0 - * to perform pending event evaluation by requesting a KVM_REQ_EVENT. - */ - if (unlikely(evaluate_pending_interrupts)) - kvm_make_request(KVM_REQ_EVENT, vcpu); - - /* - * Note no nested_vmx_succeed or nested_vmx_fail here. At this point - * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet - * returned as far as L1 is concerned. It will only return (and set - * the success flag) when L2 exits (see nested_vmx_vmexit()). - */ - return 0; - - /* - * A failed consistency check that leads to a VMExit during L1's - * VMEnter to L2 is a variation of a normal VMexit, as explained in - * 26.7 "VM-entry failures during or after loading guest state". - */ -vmentry_fail_vmexit_guest_mode: - if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) - vcpu->arch.tsc_offset -= vmcs12->tsc_offset; - leave_guest_mode(vcpu); - -vmentry_fail_vmexit: - vmx_switch_vmcs(vcpu, &vmx->vmcs01); - - if (!from_vmentry) - return 1; - - load_vmcs12_host_state(vcpu, vmcs12); - vmcs12->vm_exit_reason = exit_reason | VMX_EXIT_REASONS_FAILED_VMENTRY; - vmcs12->exit_qualification = exit_qual; - if (enable_shadow_vmcs || vmx->nested.hv_evmcs) - vmx->nested.need_vmcs12_sync = true; - return 1; -} - -/* - * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1 - * for running an L2 nested guest. - */ -static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) -{ - struct vmcs12 *vmcs12; - struct vcpu_vmx *vmx = to_vmx(vcpu); - u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu); - int ret; - - if (!nested_vmx_check_permission(vcpu)) - return 1; - - if (!nested_vmx_handle_enlightened_vmptrld(vcpu, true)) - return 1; - - if (!vmx->nested.hv_evmcs && vmx->nested.current_vmptr == -1ull) - return nested_vmx_failInvalid(vcpu); - - vmcs12 = get_vmcs12(vcpu); - - /* - * Can't VMLAUNCH or VMRESUME a shadow VMCS. Despite the fact - * that there *is* a valid VMCS pointer, RFLAGS.CF is set - * rather than RFLAGS.ZF, and no error number is stored to the - * VM-instruction error field. - */ - if (vmcs12->hdr.shadow_vmcs) - return nested_vmx_failInvalid(vcpu); - - if (vmx->nested.hv_evmcs) { - copy_enlightened_to_vmcs12(vmx); - /* Enlightened VMCS doesn't have launch state */ - vmcs12->launch_state = !launch; - } else if (enable_shadow_vmcs) { - copy_shadow_to_vmcs12(vmx); - } - - /* - * The nested entry process starts with enforcing various prerequisites - * on vmcs12 as required by the Intel SDM, and act appropriately when - * they fail: As the SDM explains, some conditions should cause the - * instruction to fail, while others will cause the instruction to seem - * to succeed, but return an EXIT_REASON_INVALID_STATE. - * To speed up the normal (success) code path, we should avoid checking - * for misconfigurations which will anyway be caught by the processor - * when using the merged vmcs02. - */ - if (interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS) - return nested_vmx_failValid(vcpu, - VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS); - - if (vmcs12->launch_state == launch) - return nested_vmx_failValid(vcpu, - launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS - : VMXERR_VMRESUME_NONLAUNCHED_VMCS); - - ret = check_vmentry_prereqs(vcpu, vmcs12); - if (ret) - return nested_vmx_failValid(vcpu, ret); - - /* - * We're finally done with prerequisite checking, and can start with - * the nested entry. - */ - vmx->nested.nested_run_pending = 1; - ret = nested_vmx_enter_non_root_mode(vcpu, true); - vmx->nested.nested_run_pending = !ret; - if (ret > 0) - return 1; - else if (ret) - return nested_vmx_failValid(vcpu, - VMXERR_ENTRY_INVALID_CONTROL_FIELD); - - /* Hide L1D cache contents from the nested guest. */ - vmx->vcpu.arch.l1tf_flush_l1d = true; - - /* - * Must happen outside of nested_vmx_enter_non_root_mode() as it will - * also be used as part of restoring nVMX state for - * snapshot restore (migration). - * - * In this flow, it is assumed that vmcs12 cache was - * trasferred as part of captured nVMX state and should - * therefore not be read from guest memory (which may not - * exist on destination host yet). - */ - nested_cache_shadow_vmcs12(vcpu, vmcs12); - - /* - * If we're entering a halted L2 vcpu and the L2 vcpu won't be woken - * by event injection, halt vcpu. - */ - if ((vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) && - !(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK)) { - vmx->nested.nested_run_pending = 0; - return kvm_vcpu_halt(vcpu); - } - return 1; -} - -/* - * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date - * because L2 may have changed some cr0 bits directly (CRO_GUEST_HOST_MASK). - * This function returns the new value we should put in vmcs12.guest_cr0. - * It's not enough to just return the vmcs02 GUEST_CR0. Rather, - * 1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now - * available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0 - * didn't trap the bit, because if L1 did, so would L0). - * 2. Bits that L1 asked to trap (and therefore L0 also did) could not have - * been modified by L2, and L1 knows it. So just leave the old value of - * the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0 - * isn't relevant, because if L0 traps this bit it can set it to anything. - * 3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have - * changed these bits, and therefore they need to be updated, but L0 - * didn't necessarily allow them to be changed in GUEST_CR0 - and rather - * put them in vmcs02 CR0_READ_SHADOW. So take these bits from there. - */ -static inline unsigned long -vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) -{ - return - /*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) | - /*2*/ (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) | - /*3*/ (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask | - vcpu->arch.cr0_guest_owned_bits)); -} - -static inline unsigned long -vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) -{ - return - /*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) | - /*2*/ (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) | - /*3*/ (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask | - vcpu->arch.cr4_guest_owned_bits)); -} - -static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu, - struct vmcs12 *vmcs12) -{ - u32 idt_vectoring; - unsigned int nr; - - if (vcpu->arch.exception.injected) { - nr = vcpu->arch.exception.nr; - idt_vectoring = nr | VECTORING_INFO_VALID_MASK; - - if (kvm_exception_is_soft(nr)) { - vmcs12->vm_exit_instruction_len = - vcpu->arch.event_exit_inst_len; - idt_vectoring |= INTR_TYPE_SOFT_EXCEPTION; - } else - idt_vectoring |= INTR_TYPE_HARD_EXCEPTION; - - if (vcpu->arch.exception.has_error_code) { - idt_vectoring |= VECTORING_INFO_DELIVER_CODE_MASK; - vmcs12->idt_vectoring_error_code = - vcpu->arch.exception.error_code; - } - - vmcs12->idt_vectoring_info_field = idt_vectoring; - } else if (vcpu->arch.nmi_injected) { - vmcs12->idt_vectoring_info_field = - INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR; - } else if (vcpu->arch.interrupt.injected) { - nr = vcpu->arch.interrupt.nr; - idt_vectoring = nr | VECTORING_INFO_VALID_MASK; - - if (vcpu->arch.interrupt.soft) { - idt_vectoring |= INTR_TYPE_SOFT_INTR; - vmcs12->vm_entry_instruction_len = - vcpu->arch.event_exit_inst_len; - } else - idt_vectoring |= INTR_TYPE_EXT_INTR; - - vmcs12->idt_vectoring_info_field = idt_vectoring; - } -} - -static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - unsigned long exit_qual; - bool block_nested_events = - vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu); - - if (vcpu->arch.exception.pending && - nested_vmx_check_exception(vcpu, &exit_qual)) { - if (block_nested_events) - return -EBUSY; - nested_vmx_inject_exception_vmexit(vcpu, exit_qual); - return 0; - } - - if (nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) && - vmx->nested.preemption_timer_expired) { - if (block_nested_events) - return -EBUSY; - nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0); - return 0; - } - - if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) { - if (block_nested_events) - return -EBUSY; - nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, - NMI_VECTOR | INTR_TYPE_NMI_INTR | - INTR_INFO_VALID_MASK, 0); - /* - * The NMI-triggered VM exit counts as injection: - * clear this one and block further NMIs. - */ - vcpu->arch.nmi_pending = 0; - vmx_set_nmi_mask(vcpu, true); - return 0; - } - - if ((kvm_cpu_has_interrupt(vcpu) || external_intr) && - nested_exit_on_intr(vcpu)) { - if (block_nested_events) - return -EBUSY; - nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0); - return 0; - } - - vmx_complete_nested_posted_interrupt(vcpu); - return 0; -} - -static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu) -{ - to_vmx(vcpu)->req_immediate_exit = true; -} - -static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu) -{ - ktime_t remaining = - hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer); - u64 value; - - if (ktime_to_ns(remaining) <= 0) - return 0; - - value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz; - do_div(value, 1000000); - return value >> VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; -} - -/* - * Update the guest state fields of vmcs12 to reflect changes that - * occurred while L2 was running. (The "IA-32e mode guest" bit of the - * VM-entry controls is also updated, since this is really a guest - * state bit.) - */ -static void sync_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) -{ - vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12); - vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12); - - vmcs12->guest_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP); - vmcs12->guest_rip = kvm_register_read(vcpu, VCPU_REGS_RIP); - vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS); - - vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR); - vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR); - vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR); - vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR); - vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR); - vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR); - vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR); - vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR); - vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT); - vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT); - vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT); - vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT); - vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT); - vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT); - vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT); - vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT); - vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT); - vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT); - vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES); - vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES); - vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES); - vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES); - vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES); - vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES); - vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES); - vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES); - vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE); - vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE); - vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE); - vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE); - vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE); - vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE); - vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE); - vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE); - vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE); - vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE); - - vmcs12->guest_interruptibility_info = - vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); - vmcs12->guest_pending_dbg_exceptions = - vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS); - if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) - vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT; - else - vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE; - - if (nested_cpu_has_preemption_timer(vmcs12)) { - if (vmcs12->vm_exit_controls & - VM_EXIT_SAVE_VMX_PREEMPTION_TIMER) - vmcs12->vmx_preemption_timer_value = - vmx_get_preemption_timer_value(vcpu); - hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer); - } - - /* - * In some cases (usually, nested EPT), L2 is allowed to change its - * own CR3 without exiting. If it has changed it, we must keep it. - * Of course, if L0 is using shadow page tables, GUEST_CR3 was defined - * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12. - * - * Additionally, restore L2's PDPTR to vmcs12. - */ - if (enable_ept) { - vmcs12->guest_cr3 = vmcs_readl(GUEST_CR3); - vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0); - vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1); - vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2); - vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3); - } - - vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS); - - if (nested_cpu_has_vid(vmcs12)) - vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS); - - vmcs12->vm_entry_controls = - (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) | - (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE); - - if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS) { - kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7); - vmcs12->guest_ia32_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); - } - - /* TODO: These cannot have changed unless we have MSR bitmaps and - * the relevant bit asks not to trap the change */ - if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT) - vmcs12->guest_ia32_pat = vmcs_read64(GUEST_IA32_PAT); - if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER) - vmcs12->guest_ia32_efer = vcpu->arch.efer; - vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS); - vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP); - vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP); - if (kvm_mpx_supported()) - vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS); -} - -/* - * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits - * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12), - * and this function updates it to reflect the changes to the guest state while - * L2 was running (and perhaps made some exits which were handled directly by L0 - * without going back to L1), and to reflect the exit reason. - * Note that we do not have to copy here all VMCS fields, just those that - * could have changed by the L2 guest or the exit - i.e., the guest-state and - * exit-information fields only. Other fields are modified by L1 with VMWRITE, - * which already writes to vmcs12 directly. - */ -static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, - u32 exit_reason, u32 exit_intr_info, - unsigned long exit_qualification) -{ - /* update guest state fields: */ - sync_vmcs12(vcpu, vmcs12); - - /* update exit information fields: */ - - vmcs12->vm_exit_reason = exit_reason; - vmcs12->exit_qualification = exit_qualification; - vmcs12->vm_exit_intr_info = exit_intr_info; - - vmcs12->idt_vectoring_info_field = 0; - vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN); - vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); - - if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) { - vmcs12->launch_state = 1; - - /* vm_entry_intr_info_field is cleared on exit. Emulate this - * instead of reading the real value. */ - vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK; - - /* - * Transfer the event that L0 or L1 may wanted to inject into - * L2 to IDT_VECTORING_INFO_FIELD. - */ - vmcs12_save_pending_event(vcpu, vmcs12); - } - - /* - * Drop what we picked up for L2 via vmx_complete_interrupts. It is - * preserved above and would only end up incorrectly in L1. - */ - vcpu->arch.nmi_injected = false; - kvm_clear_exception_queue(vcpu); - kvm_clear_interrupt_queue(vcpu); -} - -/* - * A part of what we need to when the nested L2 guest exits and we want to - * run its L1 parent, is to reset L1's guest state to the host state specified - * in vmcs12. - * This function is to be called not only on normal nested exit, but also on - * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry - * Failures During or After Loading Guest State"). - * This function should be called when the active VMCS is L1's (vmcs01). - */ -static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, - struct vmcs12 *vmcs12) -{ - struct kvm_segment seg; - u32 entry_failure_code; - - if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) - vcpu->arch.efer = vmcs12->host_ia32_efer; - else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) - vcpu->arch.efer |= (EFER_LMA | EFER_LME); - else - vcpu->arch.efer &= ~(EFER_LMA | EFER_LME); - vmx_set_efer(vcpu, vcpu->arch.efer); - - kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->host_rsp); - kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->host_rip); - vmx_set_rflags(vcpu, X86_EFLAGS_FIXED); - vmx_set_interrupt_shadow(vcpu, 0); - - /* - * Note that calling vmx_set_cr0 is important, even if cr0 hasn't - * actually changed, because vmx_set_cr0 refers to efer set above. - * - * CR0_GUEST_HOST_MASK is already set in the original vmcs01 - * (KVM doesn't change it); - */ - vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS; - vmx_set_cr0(vcpu, vmcs12->host_cr0); - - /* Same as above - no reason to call set_cr4_guest_host_mask(). */ - vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); - vmx_set_cr4(vcpu, vmcs12->host_cr4); - - nested_ept_uninit_mmu_context(vcpu); - - /* - * Only PDPTE load can fail as the value of cr3 was checked on entry and - * couldn't have changed. - */ - if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code)) - nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL); - - if (!enable_ept) - vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault; - - /* - * If vmcs01 doesn't use VPID, CPU flushes TLB on every - * VMEntry/VMExit. Thus, no need to flush TLB. - * - * If vmcs12 doesn't use VPID, L1 expects TLB to be - * flushed on every VMEntry/VMExit. - * - * Otherwise, we can preserve TLB entries as long as we are - * able to tag L1 TLB entries differently than L2 TLB entries. - * - * If vmcs12 uses EPT, we need to execute this flush on EPTP01 - * and therefore we request the TLB flush to happen only after VMCS EPTP - * has been set by KVM_REQ_LOAD_CR3. - */ - if (enable_vpid && - (!nested_cpu_has_vpid(vmcs12) || !nested_has_guest_tlb_tag(vcpu))) { - kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); - } - - vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs); - vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp); - vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip); - vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base); - vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base); - vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF); - vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF); - - /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */ - if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS) - vmcs_write64(GUEST_BNDCFGS, 0); - - if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) { - vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat); - vcpu->arch.pat = vmcs12->host_ia32_pat; - } - if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) - vmcs_write64(GUEST_IA32_PERF_GLOBAL_CTRL, - vmcs12->host_ia32_perf_global_ctrl); - - /* Set L1 segment info according to Intel SDM - 27.5.2 Loading Host Segment and Descriptor-Table Registers */ - seg = (struct kvm_segment) { - .base = 0, - .limit = 0xFFFFFFFF, - .selector = vmcs12->host_cs_selector, - .type = 11, - .present = 1, - .s = 1, - .g = 1 - }; - if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) - seg.l = 1; - else - seg.db = 1; - vmx_set_segment(vcpu, &seg, VCPU_SREG_CS); - seg = (struct kvm_segment) { - .base = 0, - .limit = 0xFFFFFFFF, - .type = 3, - .present = 1, - .s = 1, - .db = 1, - .g = 1 - }; - seg.selector = vmcs12->host_ds_selector; - vmx_set_segment(vcpu, &seg, VCPU_SREG_DS); - seg.selector = vmcs12->host_es_selector; - vmx_set_segment(vcpu, &seg, VCPU_SREG_ES); - seg.selector = vmcs12->host_ss_selector; - vmx_set_segment(vcpu, &seg, VCPU_SREG_SS); - seg.selector = vmcs12->host_fs_selector; - seg.base = vmcs12->host_fs_base; - vmx_set_segment(vcpu, &seg, VCPU_SREG_FS); - seg.selector = vmcs12->host_gs_selector; - seg.base = vmcs12->host_gs_base; - vmx_set_segment(vcpu, &seg, VCPU_SREG_GS); - seg = (struct kvm_segment) { - .base = vmcs12->host_tr_base, - .limit = 0x67, - .selector = vmcs12->host_tr_selector, - .type = 11, - .present = 1 - }; - vmx_set_segment(vcpu, &seg, VCPU_SREG_TR); - - kvm_set_dr(vcpu, 7, 0x400); - vmcs_write64(GUEST_IA32_DEBUGCTL, 0); - - if (cpu_has_vmx_msr_bitmap()) - vmx_update_msr_bitmap(vcpu); - - if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr, - vmcs12->vm_exit_msr_load_count)) - nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL); -} - -static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx) -{ - struct shared_msr_entry *efer_msr; - unsigned int i; - - if (vm_entry_controls_get(vmx) & VM_ENTRY_LOAD_IA32_EFER) - return vmcs_read64(GUEST_IA32_EFER); - - if (cpu_has_load_ia32_efer) - return host_efer; - - for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) { - if (vmx->msr_autoload.guest.val[i].index == MSR_EFER) - return vmx->msr_autoload.guest.val[i].value; - } - - efer_msr = find_msr_entry(vmx, MSR_EFER); - if (efer_msr) - return efer_msr->data; - - return host_efer; -} - -static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu) -{ - struct vmcs12 *vmcs12 = get_vmcs12(vcpu); - struct vcpu_vmx *vmx = to_vmx(vcpu); - struct vmx_msr_entry g, h; - struct msr_data msr; - gpa_t gpa; - u32 i, j; - - vcpu->arch.pat = vmcs_read64(GUEST_IA32_PAT); - - if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) { - /* - * L1's host DR7 is lost if KVM_GUESTDBG_USE_HW_BP is set - * as vmcs01.GUEST_DR7 contains a userspace defined value - * and vcpu->arch.dr7 is not squirreled away before the - * nested VMENTER (not worth adding a variable in nested_vmx). - */ - if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) - kvm_set_dr(vcpu, 7, DR7_FIXED_1); - else - WARN_ON(kvm_set_dr(vcpu, 7, vmcs_readl(GUEST_DR7))); - } - - /* - * Note that calling vmx_set_{efer,cr0,cr4} is important as they - * handle a variety of side effects to KVM's software model. - */ - vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx)); - - vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS; - vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW)); - - vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); - vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW)); - - nested_ept_uninit_mmu_context(vcpu); - vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); - __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); - - /* - * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs - * from vmcs01 (if necessary). The PDPTRs are not loaded on - * VMFail, like everything else we just need to ensure our - * software model is up-to-date. - */ - ept_save_pdptrs(vcpu); - - kvm_mmu_reset_context(vcpu); - - if (cpu_has_vmx_msr_bitmap()) - vmx_update_msr_bitmap(vcpu); - - /* - * This nasty bit of open coding is a compromise between blindly - * loading L1's MSRs using the exit load lists (incorrect emulation - * of VMFail), leaving the nested VM's MSRs in the software model - * (incorrect behavior) and snapshotting the modified MSRs (too - * expensive since the lists are unbound by hardware). For each - * MSR that was (prematurely) loaded from the nested VMEntry load - * list, reload it from the exit load list if it exists and differs - * from the guest value. The intent is to stuff host state as - * silently as possible, not to fully process the exit load list. - */ - msr.host_initiated = false; - for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) { - gpa = vmcs12->vm_entry_msr_load_addr + (i * sizeof(g)); - if (kvm_vcpu_read_guest(vcpu, gpa, &g, sizeof(g))) { - pr_debug_ratelimited( - "%s read MSR index failed (%u, 0x%08llx)\n", - __func__, i, gpa); - goto vmabort; - } - - for (j = 0; j < vmcs12->vm_exit_msr_load_count; j++) { - gpa = vmcs12->vm_exit_msr_load_addr + (j * sizeof(h)); - if (kvm_vcpu_read_guest(vcpu, gpa, &h, sizeof(h))) { - pr_debug_ratelimited( - "%s read MSR failed (%u, 0x%08llx)\n", - __func__, j, gpa); - goto vmabort; - } - if (h.index != g.index) - continue; - if (h.value == g.value) - break; - - if (nested_vmx_load_msr_check(vcpu, &h)) { - pr_debug_ratelimited( - "%s check failed (%u, 0x%x, 0x%x)\n", - __func__, j, h.index, h.reserved); - goto vmabort; - } - - msr.index = h.index; - msr.data = h.value; - if (kvm_set_msr(vcpu, &msr)) { - pr_debug_ratelimited( - "%s WRMSR failed (%u, 0x%x, 0x%llx)\n", - __func__, j, h.index, h.value); - goto vmabort; - } - } - } - - return; - -vmabort: - nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL); -} - -/* - * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1 - * and modify vmcs12 to make it see what it would expect to see there if - * L2 was its real guest. Must only be called when in L2 (is_guest_mode()) - */ -static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, - u32 exit_intr_info, - unsigned long exit_qualification) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - struct vmcs12 *vmcs12 = get_vmcs12(vcpu); - - /* trying to cancel vmlaunch/vmresume is a bug */ - WARN_ON_ONCE(vmx->nested.nested_run_pending); - - leave_guest_mode(vcpu); - - if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) - vcpu->arch.tsc_offset -= vmcs12->tsc_offset; - - if (likely(!vmx->fail)) { - if (exit_reason == -1) - sync_vmcs12(vcpu, vmcs12); - else - prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info, - exit_qualification); - - /* - * Must happen outside of sync_vmcs12() as it will - * also be used to capture vmcs12 cache as part of - * capturing nVMX state for snapshot (migration). - * - * Otherwise, this flush will dirty guest memory at a - * point it is already assumed by user-space to be - * immutable. - */ - nested_flush_cached_shadow_vmcs12(vcpu, vmcs12); - - if (nested_vmx_store_msr(vcpu, vmcs12->vm_exit_msr_store_addr, - vmcs12->vm_exit_msr_store_count)) - nested_vmx_abort(vcpu, VMX_ABORT_SAVE_GUEST_MSR_FAIL); - } else { - /* - * The only expected VM-instruction error is "VM entry with - * invalid control field(s)." Anything else indicates a - * problem with L0. And we should never get here with a - * VMFail of any type if early consistency checks are enabled. - */ - WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) != - VMXERR_ENTRY_INVALID_CONTROL_FIELD); - WARN_ON_ONCE(nested_early_check); - } - - vmx_switch_vmcs(vcpu, &vmx->vmcs01); - - /* Update any VMCS fields that might have changed while L2 ran */ - vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); - vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); - vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); - - if (kvm_has_tsc_control) - decache_tsc_multiplier(vmx); - - if (vmx->nested.change_vmcs01_virtual_apic_mode) { - vmx->nested.change_vmcs01_virtual_apic_mode = false; - vmx_set_virtual_apic_mode(vcpu); - } else if (!nested_cpu_has_ept(vmcs12) && - nested_cpu_has2(vmcs12, - SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { - vmx_flush_tlb(vcpu, true); - } - - /* This is needed for same reason as it was needed in prepare_vmcs02 */ - vmx->host_rsp = 0; - - /* Unpin physical memory we referred to in vmcs02 */ - if (vmx->nested.apic_access_page) { - kvm_release_page_dirty(vmx->nested.apic_access_page); - vmx->nested.apic_access_page = NULL; - } - if (vmx->nested.virtual_apic_page) { - kvm_release_page_dirty(vmx->nested.virtual_apic_page); - vmx->nested.virtual_apic_page = NULL; - } - if (vmx->nested.pi_desc_page) { - kunmap(vmx->nested.pi_desc_page); - kvm_release_page_dirty(vmx->nested.pi_desc_page); - vmx->nested.pi_desc_page = NULL; - vmx->nested.pi_desc = NULL; - } - - /* - * We are now running in L2, mmu_notifier will force to reload the - * page's hpa for L2 vmcs. Need to reload it for L1 before entering L1. - */ - kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); - - if ((exit_reason != -1) && (enable_shadow_vmcs || vmx->nested.hv_evmcs)) - vmx->nested.need_vmcs12_sync = true; - - /* in case we halted in L2 */ - vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; - - if (likely(!vmx->fail)) { - /* - * TODO: SDM says that with acknowledge interrupt on - * exit, bit 31 of the VM-exit interrupt information - * (valid interrupt) is always set to 1 on - * EXIT_REASON_EXTERNAL_INTERRUPT, so we shouldn't - * need kvm_cpu_has_interrupt(). See the commit - * message for details. - */ - if (nested_exit_intr_ack_set(vcpu) && - exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT && - kvm_cpu_has_interrupt(vcpu)) { - int irq = kvm_cpu_get_interrupt(vcpu); - WARN_ON(irq < 0); - vmcs12->vm_exit_intr_info = irq | - INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR; - } - - if (exit_reason != -1) - trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason, - vmcs12->exit_qualification, - vmcs12->idt_vectoring_info_field, - vmcs12->vm_exit_intr_info, - vmcs12->vm_exit_intr_error_code, - KVM_ISA_VMX); - - load_vmcs12_host_state(vcpu, vmcs12); - - return; - } - - /* - * After an early L2 VM-entry failure, we're now back - * in L1 which thinks it just finished a VMLAUNCH or - * VMRESUME instruction, so we need to set the failure - * flag and the VM-instruction error field of the VMCS - * accordingly, and skip the emulated instruction. - */ - (void)nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); - - /* - * Restore L1's host state to KVM's software model. We're here - * because a consistency check was caught by hardware, which - * means some amount of guest state has been propagated to KVM's - * model and needs to be unwound to the host's state. - */ - nested_vmx_restore_host_state(vcpu); - - vmx->fail = 0; -} - -/* - * Forcibly leave nested mode in order to be able to reset the VCPU later on. - */ -static void vmx_leave_nested(struct kvm_vcpu *vcpu) -{ - if (is_guest_mode(vcpu)) { - to_vmx(vcpu)->nested.nested_run_pending = 0; - nested_vmx_vmexit(vcpu, -1, 0, 0); - } - free_nested(vcpu); -} - -static int vmx_check_intercept(struct kvm_vcpu *vcpu, - struct x86_instruction_info *info, - enum x86_intercept_stage stage) -{ - struct vmcs12 *vmcs12 = get_vmcs12(vcpu); - struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; - - /* - * RDPID causes #UD if disabled through secondary execution controls. - * Because it is marked as EmulateOnUD, we need to intercept it here. - */ - if (info->intercept == x86_intercept_rdtscp && - !nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDTSCP)) { - ctxt->exception.vector = UD_VECTOR; - ctxt->exception.error_code_valid = false; - return X86EMUL_PROPAGATE_FAULT; - } - - /* TODO: check more intercepts... */ - return X86EMUL_CONTINUE; -} - -#ifdef CONFIG_X86_64 -/* (a << shift) / divisor, return 1 if overflow otherwise 0 */ -static inline int u64_shl_div_u64(u64 a, unsigned int shift, - u64 divisor, u64 *result) -{ - u64 low = a << shift, high = a >> (64 - shift); - - /* To avoid the overflow on divq */ - if (high >= divisor) - return 1; - - /* Low hold the result, high hold rem which is discarded */ - asm("divq %2\n\t" : "=a" (low), "=d" (high) : - "rm" (divisor), "0" (low), "1" (high)); - *result = low; - - return 0; -} - -static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc) -{ - struct vcpu_vmx *vmx; - u64 tscl, guest_tscl, delta_tsc, lapic_timer_advance_cycles; - - if (kvm_mwait_in_guest(vcpu->kvm)) - return -EOPNOTSUPP; - - vmx = to_vmx(vcpu); - tscl = rdtsc(); - guest_tscl = kvm_read_l1_tsc(vcpu, tscl); - delta_tsc = max(guest_deadline_tsc, guest_tscl) - guest_tscl; - lapic_timer_advance_cycles = nsec_to_cycles(vcpu, lapic_timer_advance_ns); - - if (delta_tsc > lapic_timer_advance_cycles) - delta_tsc -= lapic_timer_advance_cycles; - else - delta_tsc = 0; - - /* Convert to host delta tsc if tsc scaling is enabled */ - if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio && - u64_shl_div_u64(delta_tsc, - kvm_tsc_scaling_ratio_frac_bits, - vcpu->arch.tsc_scaling_ratio, - &delta_tsc)) - return -ERANGE; - - /* - * If the delta tsc can't fit in the 32 bit after the multi shift, - * we can't use the preemption timer. - * It's possible that it fits on later vmentries, but checking - * on every vmentry is costly so we just use an hrtimer. - */ - if (delta_tsc >> (cpu_preemption_timer_multi + 32)) - return -ERANGE; - - vmx->hv_deadline_tsc = tscl + delta_tsc; - return delta_tsc == 0; -} - -static void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu) -{ - to_vmx(vcpu)->hv_deadline_tsc = -1; -} -#endif - -static void vmx_sched_in(struct kvm_vcpu *vcpu, int cpu) -{ - if (!kvm_pause_in_guest(vcpu->kvm)) - shrink_ple_window(vcpu); -} - -static void vmx_slot_enable_log_dirty(struct kvm *kvm, - struct kvm_memory_slot *slot) -{ - kvm_mmu_slot_leaf_clear_dirty(kvm, slot); - kvm_mmu_slot_largepage_remove_write_access(kvm, slot); -} - -static void vmx_slot_disable_log_dirty(struct kvm *kvm, - struct kvm_memory_slot *slot) -{ - kvm_mmu_slot_set_dirty(kvm, slot); -} - -static void vmx_flush_log_dirty(struct kvm *kvm) -{ - kvm_flush_pml_buffers(kvm); -} - -static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu) -{ - struct vmcs12 *vmcs12; - struct vcpu_vmx *vmx = to_vmx(vcpu); - gpa_t gpa; - struct page *page = NULL; - u64 *pml_address; - - if (is_guest_mode(vcpu)) { - WARN_ON_ONCE(vmx->nested.pml_full); - - /* - * Check if PML is enabled for the nested guest. - * Whether eptp bit 6 is set is already checked - * as part of A/D emulation. - */ - vmcs12 = get_vmcs12(vcpu); - if (!nested_cpu_has_pml(vmcs12)) - return 0; - - if (vmcs12->guest_pml_index >= PML_ENTITY_NUM) { - vmx->nested.pml_full = true; - return 1; - } - - gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS) & ~0xFFFull; - - page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->pml_address); - if (is_error_page(page)) - return 0; - - pml_address = kmap(page); - pml_address[vmcs12->guest_pml_index--] = gpa; - kunmap(page); - kvm_release_page_clean(page); - } - - return 0; -} - -static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm, - struct kvm_memory_slot *memslot, - gfn_t offset, unsigned long mask) -{ - kvm_mmu_clear_dirty_pt_masked(kvm, memslot, offset, mask); -} - -static void __pi_post_block(struct kvm_vcpu *vcpu) -{ - struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); - struct pi_desc old, new; - unsigned int dest; - - do { - old.control = new.control = pi_desc->control; - WARN(old.nv != POSTED_INTR_WAKEUP_VECTOR, - "Wakeup handler not enabled while the VCPU is blocked\n"); - - dest = cpu_physical_id(vcpu->cpu); - - if (x2apic_enabled()) - new.ndst = dest; - else - new.ndst = (dest << 8) & 0xFF00; - - /* set 'NV' to 'notification vector' */ - new.nv = POSTED_INTR_VECTOR; - } while (cmpxchg64(&pi_desc->control, old.control, - new.control) != old.control); - - if (!WARN_ON_ONCE(vcpu->pre_pcpu == -1)) { - spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); - list_del(&vcpu->blocked_vcpu_list); - spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); - vcpu->pre_pcpu = -1; - } -} - -/* - * This routine does the following things for vCPU which is going - * to be blocked if VT-d PI is enabled. - * - Store the vCPU to the wakeup list, so when interrupts happen - * we can find the right vCPU to wake up. - * - Change the Posted-interrupt descriptor as below: - * 'NDST' <-- vcpu->pre_pcpu - * 'NV' <-- POSTED_INTR_WAKEUP_VECTOR - * - If 'ON' is set during this process, which means at least one - * interrupt is posted for this vCPU, we cannot block it, in - * this case, return 1, otherwise, return 0. - * - */ -static int pi_pre_block(struct kvm_vcpu *vcpu) -{ - unsigned int dest; - struct pi_desc old, new; - struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); - - if (!kvm_arch_has_assigned_device(vcpu->kvm) || - !irq_remapping_cap(IRQ_POSTING_CAP) || - !kvm_vcpu_apicv_active(vcpu)) - return 0; - - WARN_ON(irqs_disabled()); - local_irq_disable(); - if (!WARN_ON_ONCE(vcpu->pre_pcpu != -1)) { - vcpu->pre_pcpu = vcpu->cpu; - spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); - list_add_tail(&vcpu->blocked_vcpu_list, - &per_cpu(blocked_vcpu_on_cpu, - vcpu->pre_pcpu)); - spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); - } - - do { - old.control = new.control = pi_desc->control; - - WARN((pi_desc->sn == 1), - "Warning: SN field of posted-interrupts " - "is set before blocking\n"); - - /* - * Since vCPU can be preempted during this process, - * vcpu->cpu could be different with pre_pcpu, we - * need to set pre_pcpu as the destination of wakeup - * notification event, then we can find the right vCPU - * to wakeup in wakeup handler if interrupts happen - * when the vCPU is in blocked state. - */ - dest = cpu_physical_id(vcpu->pre_pcpu); - - if (x2apic_enabled()) - new.ndst = dest; - else - new.ndst = (dest << 8) & 0xFF00; - - /* set 'NV' to 'wakeup vector' */ - new.nv = POSTED_INTR_WAKEUP_VECTOR; - } while (cmpxchg64(&pi_desc->control, old.control, - new.control) != old.control); - - /* We should not block the vCPU if an interrupt is posted for it. */ - if (pi_test_on(pi_desc) == 1) - __pi_post_block(vcpu); - - local_irq_enable(); - return (vcpu->pre_pcpu == -1); -} - -static int vmx_pre_block(struct kvm_vcpu *vcpu) -{ - if (pi_pre_block(vcpu)) - return 1; - - if (kvm_lapic_hv_timer_in_use(vcpu)) - kvm_lapic_switch_to_sw_timer(vcpu); - - return 0; -} - -static void pi_post_block(struct kvm_vcpu *vcpu) -{ - if (vcpu->pre_pcpu == -1) - return; - - WARN_ON(irqs_disabled()); - local_irq_disable(); - __pi_post_block(vcpu); - local_irq_enable(); -} - -static void vmx_post_block(struct kvm_vcpu *vcpu) -{ - if (kvm_x86_ops->set_hv_timer) - kvm_lapic_switch_to_hv_timer(vcpu); - - pi_post_block(vcpu); -} - -/* - * vmx_update_pi_irte - set IRTE for Posted-Interrupts - * - * @kvm: kvm - * @host_irq: host irq of the interrupt - * @guest_irq: gsi of the interrupt - * @set: set or unset PI - * returns 0 on success, < 0 on failure - */ -static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq, - uint32_t guest_irq, bool set) -{ - struct kvm_kernel_irq_routing_entry *e; - struct kvm_irq_routing_table *irq_rt; - struct kvm_lapic_irq irq; - struct kvm_vcpu *vcpu; - struct vcpu_data vcpu_info; - int idx, ret = 0; - - if (!kvm_arch_has_assigned_device(kvm) || - !irq_remapping_cap(IRQ_POSTING_CAP) || - !kvm_vcpu_apicv_active(kvm->vcpus[0])) - return 0; - - idx = srcu_read_lock(&kvm->irq_srcu); - irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu); - if (guest_irq >= irq_rt->nr_rt_entries || - hlist_empty(&irq_rt->map[guest_irq])) { - pr_warn_once("no route for guest_irq %u/%u (broken user space?)\n", - guest_irq, irq_rt->nr_rt_entries); - goto out; - } - - hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) { - if (e->type != KVM_IRQ_ROUTING_MSI) - continue; - /* - * VT-d PI cannot support posting multicast/broadcast - * interrupts to a vCPU, we still use interrupt remapping - * for these kind of interrupts. - * - * For lowest-priority interrupts, we only support - * those with single CPU as the destination, e.g. user - * configures the interrupts via /proc/irq or uses - * irqbalance to make the interrupts single-CPU. - * - * We will support full lowest-priority interrupt later. - */ - - kvm_set_msi_irq(kvm, e, &irq); - if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu)) { - /* - * Make sure the IRTE is in remapped mode if - * we don't handle it in posted mode. - */ - ret = irq_set_vcpu_affinity(host_irq, NULL); - if (ret < 0) { - printk(KERN_INFO - "failed to back to remapped mode, irq: %u\n", - host_irq); - goto out; - } - - continue; - } - - vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu)); - vcpu_info.vector = irq.vector; - - trace_kvm_pi_irte_update(host_irq, vcpu->vcpu_id, e->gsi, - vcpu_info.vector, vcpu_info.pi_desc_addr, set); - - if (set) - ret = irq_set_vcpu_affinity(host_irq, &vcpu_info); - else - ret = irq_set_vcpu_affinity(host_irq, NULL); - - if (ret < 0) { - printk(KERN_INFO "%s: failed to update PI IRTE\n", - __func__); - goto out; - } - } - - ret = 0; -out: - srcu_read_unlock(&kvm->irq_srcu, idx); - return ret; -} - -static void vmx_setup_mce(struct kvm_vcpu *vcpu) -{ - if (vcpu->arch.mcg_cap & MCG_LMCE_P) - to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |= - FEATURE_CONTROL_LMCE; - else - to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &= - ~FEATURE_CONTROL_LMCE; -} - -static int vmx_smi_allowed(struct kvm_vcpu *vcpu) -{ - /* we need a nested vmexit to enter SMM, postpone if run is pending */ - if (to_vmx(vcpu)->nested.nested_run_pending) - return 0; - return 1; -} - -static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - - vmx->nested.smm.guest_mode = is_guest_mode(vcpu); - if (vmx->nested.smm.guest_mode) - nested_vmx_vmexit(vcpu, -1, 0, 0); - - vmx->nested.smm.vmxon = vmx->nested.vmxon; - vmx->nested.vmxon = false; - vmx_clear_hlt(vcpu); - return 0; -} - -static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - int ret; - - if (vmx->nested.smm.vmxon) { - vmx->nested.vmxon = true; - vmx->nested.smm.vmxon = false; - } - - if (vmx->nested.smm.guest_mode) { - vcpu->arch.hflags &= ~HF_SMM_MASK; - ret = nested_vmx_enter_non_root_mode(vcpu, false); - vcpu->arch.hflags |= HF_SMM_MASK; - if (ret) - return ret; - - vmx->nested.smm.guest_mode = false; - } - return 0; -} - -static int enable_smi_window(struct kvm_vcpu *vcpu) -{ - return 0; -} - -static inline int vmx_has_valid_vmcs12(struct kvm_vcpu *vcpu) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - - /* - * In case we do two consecutive get/set_nested_state()s while L2 was - * running hv_evmcs may end up not being mapped (we map it from - * nested_vmx_run()/vmx_vcpu_run()). Check is_guest_mode() as we always - * have vmcs12 if it is true. - */ - return is_guest_mode(vcpu) || vmx->nested.current_vmptr != -1ull || - vmx->nested.hv_evmcs; -} - -static int vmx_get_nested_state(struct kvm_vcpu *vcpu, - struct kvm_nested_state __user *user_kvm_nested_state, - u32 user_data_size) -{ - struct vcpu_vmx *vmx; - struct vmcs12 *vmcs12; - struct kvm_nested_state kvm_state = { - .flags = 0, - .format = 0, - .size = sizeof(kvm_state), - .vmx.vmxon_pa = -1ull, - .vmx.vmcs_pa = -1ull, - }; - - if (!vcpu) - return kvm_state.size + 2 * VMCS12_SIZE; - - vmx = to_vmx(vcpu); - vmcs12 = get_vmcs12(vcpu); - - if (nested_vmx_allowed(vcpu) && vmx->nested.enlightened_vmcs_enabled) - kvm_state.flags |= KVM_STATE_NESTED_EVMCS; - - if (nested_vmx_allowed(vcpu) && - (vmx->nested.vmxon || vmx->nested.smm.vmxon)) { - kvm_state.vmx.vmxon_pa = vmx->nested.vmxon_ptr; - kvm_state.vmx.vmcs_pa = vmx->nested.current_vmptr; - - if (vmx_has_valid_vmcs12(vcpu)) { - kvm_state.size += VMCS12_SIZE; - - if (is_guest_mode(vcpu) && - nested_cpu_has_shadow_vmcs(vmcs12) && - vmcs12->vmcs_link_pointer != -1ull) - kvm_state.size += VMCS12_SIZE; - } - - if (vmx->nested.smm.vmxon) - kvm_state.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON; - - if (vmx->nested.smm.guest_mode) - kvm_state.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE; - - if (is_guest_mode(vcpu)) { - kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE; - - if (vmx->nested.nested_run_pending) - kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING; - } - } - - if (user_data_size < kvm_state.size) - goto out; - - if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state))) - return -EFAULT; - - if (!vmx_has_valid_vmcs12(vcpu)) - goto out; - - /* - * When running L2, the authoritative vmcs12 state is in the - * vmcs02. When running L1, the authoritative vmcs12 state is - * in the shadow or enlightened vmcs linked to vmcs01, unless - * need_vmcs12_sync is set, in which case, the authoritative - * vmcs12 state is in the vmcs12 already. - */ - if (is_guest_mode(vcpu)) { - sync_vmcs12(vcpu, vmcs12); - } else if (!vmx->nested.need_vmcs12_sync) { - if (vmx->nested.hv_evmcs) - copy_enlightened_to_vmcs12(vmx); - else if (enable_shadow_vmcs) - copy_shadow_to_vmcs12(vmx); - } - - if (copy_to_user(user_kvm_nested_state->data, vmcs12, sizeof(*vmcs12))) - return -EFAULT; - - if (nested_cpu_has_shadow_vmcs(vmcs12) && - vmcs12->vmcs_link_pointer != -1ull) { - if (copy_to_user(user_kvm_nested_state->data + VMCS12_SIZE, - get_shadow_vmcs12(vcpu), sizeof(*vmcs12))) - return -EFAULT; - } - -out: - return kvm_state.size; -} - -static int vmx_set_nested_state(struct kvm_vcpu *vcpu, - struct kvm_nested_state __user *user_kvm_nested_state, - struct kvm_nested_state *kvm_state) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - struct vmcs12 *vmcs12; - u32 exit_qual; - int ret; - - if (kvm_state->format != 0) - return -EINVAL; - - if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) - nested_enable_evmcs(vcpu, NULL); - - if (!nested_vmx_allowed(vcpu)) - return kvm_state->vmx.vmxon_pa == -1ull ? 0 : -EINVAL; - - if (kvm_state->vmx.vmxon_pa == -1ull) { - if (kvm_state->vmx.smm.flags) - return -EINVAL; - - if (kvm_state->vmx.vmcs_pa != -1ull) - return -EINVAL; - - vmx_leave_nested(vcpu); - return 0; - } - - if (!page_address_valid(vcpu, kvm_state->vmx.vmxon_pa)) - return -EINVAL; - - if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && - (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) - return -EINVAL; - - if (kvm_state->vmx.smm.flags & - ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON)) - return -EINVAL; - - /* - * SMM temporarily disables VMX, so we cannot be in guest mode, - * nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags - * must be zero. - */ - if (is_smm(vcpu) ? kvm_state->flags : kvm_state->vmx.smm.flags) - return -EINVAL; - - if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && - !(kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON)) - return -EINVAL; - - vmx_leave_nested(vcpu); - if (kvm_state->vmx.vmxon_pa == -1ull) - return 0; - - vmx->nested.vmxon_ptr = kvm_state->vmx.vmxon_pa; - ret = enter_vmx_operation(vcpu); - if (ret) - return ret; - - /* Empty 'VMXON' state is permitted */ - if (kvm_state->size < sizeof(kvm_state) + sizeof(*vmcs12)) - return 0; - - if (kvm_state->vmx.vmcs_pa != -1ull) { - if (kvm_state->vmx.vmcs_pa == kvm_state->vmx.vmxon_pa || - !page_address_valid(vcpu, kvm_state->vmx.vmcs_pa)) - return -EINVAL; - - set_current_vmptr(vmx, kvm_state->vmx.vmcs_pa); - } else if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) { - /* - * Sync eVMCS upon entry as we may not have - * HV_X64_MSR_VP_ASSIST_PAGE set up yet. - */ - vmx->nested.need_vmcs12_sync = true; - } else { - return -EINVAL; - } - - if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) { - vmx->nested.smm.vmxon = true; - vmx->nested.vmxon = false; - - if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) - vmx->nested.smm.guest_mode = true; - } - - vmcs12 = get_vmcs12(vcpu); - if (copy_from_user(vmcs12, user_kvm_nested_state->data, sizeof(*vmcs12))) - return -EFAULT; - - if (vmcs12->hdr.revision_id != VMCS12_REVISION) - return -EINVAL; - - if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) - return 0; - - vmx->nested.nested_run_pending = - !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING); - - if (nested_cpu_has_shadow_vmcs(vmcs12) && - vmcs12->vmcs_link_pointer != -1ull) { - struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu); - if (kvm_state->size < sizeof(kvm_state) + 2 * sizeof(*vmcs12)) - return -EINVAL; - - if (copy_from_user(shadow_vmcs12, - user_kvm_nested_state->data + VMCS12_SIZE, - sizeof(*vmcs12))) - return -EFAULT; - - if (shadow_vmcs12->hdr.revision_id != VMCS12_REVISION || - !shadow_vmcs12->hdr.shadow_vmcs) - return -EINVAL; - } - - if (check_vmentry_prereqs(vcpu, vmcs12) || - check_vmentry_postreqs(vcpu, vmcs12, &exit_qual)) - return -EINVAL; - - vmx->nested.dirty_vmcs12 = true; - ret = nested_vmx_enter_non_root_mode(vcpu, false); - if (ret) - return -EINVAL; - - return 0; -} - -static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { - .cpu_has_kvm_support = cpu_has_kvm_support, - .disabled_by_bios = vmx_disabled_by_bios, - .hardware_setup = hardware_setup, - .hardware_unsetup = hardware_unsetup, - .check_processor_compatibility = vmx_check_processor_compat, - .hardware_enable = hardware_enable, - .hardware_disable = hardware_disable, - .cpu_has_accelerated_tpr = report_flexpriority, - .has_emulated_msr = vmx_has_emulated_msr, - - .vm_init = vmx_vm_init, - .vm_alloc = vmx_vm_alloc, - .vm_free = vmx_vm_free, - - .vcpu_create = vmx_create_vcpu, - .vcpu_free = vmx_free_vcpu, - .vcpu_reset = vmx_vcpu_reset, - - .prepare_guest_switch = vmx_prepare_switch_to_guest, - .vcpu_load = vmx_vcpu_load, - .vcpu_put = vmx_vcpu_put, - - .update_bp_intercept = update_exception_bitmap, - .get_msr_feature = vmx_get_msr_feature, - .get_msr = vmx_get_msr, - .set_msr = vmx_set_msr, - .get_segment_base = vmx_get_segment_base, - .get_segment = vmx_get_segment, - .set_segment = vmx_set_segment, - .get_cpl = vmx_get_cpl, - .get_cs_db_l_bits = vmx_get_cs_db_l_bits, - .decache_cr0_guest_bits = vmx_decache_cr0_guest_bits, - .decache_cr3 = vmx_decache_cr3, - .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits, - .set_cr0 = vmx_set_cr0, - .set_cr3 = vmx_set_cr3, - .set_cr4 = vmx_set_cr4, - .set_efer = vmx_set_efer, - .get_idt = vmx_get_idt, - .set_idt = vmx_set_idt, - .get_gdt = vmx_get_gdt, - .set_gdt = vmx_set_gdt, - .get_dr6 = vmx_get_dr6, - .set_dr6 = vmx_set_dr6, - .set_dr7 = vmx_set_dr7, - .sync_dirty_debug_regs = vmx_sync_dirty_debug_regs, - .cache_reg = vmx_cache_reg, - .get_rflags = vmx_get_rflags, - .set_rflags = vmx_set_rflags, - - .tlb_flush = vmx_flush_tlb, - .tlb_flush_gva = vmx_flush_tlb_gva, - - .run = vmx_vcpu_run, - .handle_exit = vmx_handle_exit, - .skip_emulated_instruction = skip_emulated_instruction, - .set_interrupt_shadow = vmx_set_interrupt_shadow, - .get_interrupt_shadow = vmx_get_interrupt_shadow, - .patch_hypercall = vmx_patch_hypercall, - .set_irq = vmx_inject_irq, - .set_nmi = vmx_inject_nmi, - .queue_exception = vmx_queue_exception, - .cancel_injection = vmx_cancel_injection, - .interrupt_allowed = vmx_interrupt_allowed, - .nmi_allowed = vmx_nmi_allowed, - .get_nmi_mask = vmx_get_nmi_mask, - .set_nmi_mask = vmx_set_nmi_mask, - .enable_nmi_window = enable_nmi_window, - .enable_irq_window = enable_irq_window, - .update_cr8_intercept = update_cr8_intercept, - .set_virtual_apic_mode = vmx_set_virtual_apic_mode, - .set_apic_access_page_addr = vmx_set_apic_access_page_addr, - .get_enable_apicv = vmx_get_enable_apicv, - .refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl, - .load_eoi_exitmap = vmx_load_eoi_exitmap, - .apicv_post_state_restore = vmx_apicv_post_state_restore, - .hwapic_irr_update = vmx_hwapic_irr_update, - .hwapic_isr_update = vmx_hwapic_isr_update, - .guest_apic_has_interrupt = vmx_guest_apic_has_interrupt, - .sync_pir_to_irr = vmx_sync_pir_to_irr, - .deliver_posted_interrupt = vmx_deliver_posted_interrupt, - - .set_tss_addr = vmx_set_tss_addr, - .set_identity_map_addr = vmx_set_identity_map_addr, - .get_tdp_level = get_ept_level, - .get_mt_mask = vmx_get_mt_mask, - - .get_exit_info = vmx_get_exit_info, - - .get_lpage_level = vmx_get_lpage_level, - - .cpuid_update = vmx_cpuid_update, - - .rdtscp_supported = vmx_rdtscp_supported, - .invpcid_supported = vmx_invpcid_supported, - - .set_supported_cpuid = vmx_set_supported_cpuid, - - .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit, - - .read_l1_tsc_offset = vmx_read_l1_tsc_offset, - .write_l1_tsc_offset = vmx_write_l1_tsc_offset, - - .set_tdp_cr3 = vmx_set_cr3, - - .check_intercept = vmx_check_intercept, - .handle_external_intr = vmx_handle_external_intr, - .mpx_supported = vmx_mpx_supported, - .xsaves_supported = vmx_xsaves_supported, - .umip_emulated = vmx_umip_emulated, - - .check_nested_events = vmx_check_nested_events, - .request_immediate_exit = vmx_request_immediate_exit, - - .sched_in = vmx_sched_in, - - .slot_enable_log_dirty = vmx_slot_enable_log_dirty, - .slot_disable_log_dirty = vmx_slot_disable_log_dirty, - .flush_log_dirty = vmx_flush_log_dirty, - .enable_log_dirty_pt_masked = vmx_enable_log_dirty_pt_masked, - .write_log_dirty = vmx_write_pml_buffer, - - .pre_block = vmx_pre_block, - .post_block = vmx_post_block, - - .pmu_ops = &intel_pmu_ops, - - .update_pi_irte = vmx_update_pi_irte, - -#ifdef CONFIG_X86_64 - .set_hv_timer = vmx_set_hv_timer, - .cancel_hv_timer = vmx_cancel_hv_timer, -#endif - - .setup_mce = vmx_setup_mce, - - .get_nested_state = vmx_get_nested_state, - .set_nested_state = vmx_set_nested_state, - .get_vmcs12_pages = nested_get_vmcs12_pages, - - .smi_allowed = vmx_smi_allowed, - .pre_enter_smm = vmx_pre_enter_smm, - .pre_leave_smm = vmx_pre_leave_smm, - .enable_smi_window = enable_smi_window, - - .nested_enable_evmcs = nested_enable_evmcs, -}; - -static void vmx_cleanup_l1d_flush(void) -{ - if (vmx_l1d_flush_pages) { - free_pages((unsigned long)vmx_l1d_flush_pages, L1D_CACHE_ORDER); - vmx_l1d_flush_pages = NULL; - } - /* Restore state so sysfs ignores VMX */ - l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; -} - -static void vmx_exit(void) -{ -#ifdef CONFIG_KEXEC_CORE - RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL); - synchronize_rcu(); -#endif - - kvm_exit(); - -#if IS_ENABLED(CONFIG_HYPERV) - if (static_branch_unlikely(&enable_evmcs)) { - int cpu; - struct hv_vp_assist_page *vp_ap; - /* - * Reset everything to support using non-enlightened VMCS - * access later (e.g. when we reload the module with - * enlightened_vmcs=0) - */ - for_each_online_cpu(cpu) { - vp_ap = hv_get_vp_assist_page(cpu); - - if (!vp_ap) - continue; - - vp_ap->current_nested_vmcs = 0; - vp_ap->enlighten_vmentry = 0; - } - - static_branch_disable(&enable_evmcs); - } -#endif - vmx_cleanup_l1d_flush(); -} -module_exit(vmx_exit); - -static int __init vmx_init(void) -{ - int r; - -#if IS_ENABLED(CONFIG_HYPERV) - /* - * Enlightened VMCS usage should be recommended and the host needs - * to support eVMCS v1 or above. We can also disable eVMCS support - * with module parameter. - */ - if (enlightened_vmcs && - ms_hyperv.hints & HV_X64_ENLIGHTENED_VMCS_RECOMMENDED && - (ms_hyperv.nested_features & HV_X64_ENLIGHTENED_VMCS_VERSION) >= - KVM_EVMCS_VERSION) { - int cpu; - - /* Check that we have assist pages on all online CPUs */ - for_each_online_cpu(cpu) { - if (!hv_get_vp_assist_page(cpu)) { - enlightened_vmcs = false; - break; - } - } - - if (enlightened_vmcs) { - pr_info("KVM: vmx: using Hyper-V Enlightened VMCS\n"); - static_branch_enable(&enable_evmcs); - } - } else { - enlightened_vmcs = false; - } -#endif - - r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), - __alignof__(struct vcpu_vmx), THIS_MODULE); - if (r) - return r; - - /* - * Must be called after kvm_init() so enable_ept is properly set - * up. Hand the parameter mitigation value in which was stored in - * the pre module init parser. If no parameter was given, it will - * contain 'auto' which will be turned into the default 'cond' - * mitigation mode. - */ - if (boot_cpu_has(X86_BUG_L1TF)) { - r = vmx_setup_l1d_flush(vmentry_l1d_flush_param); - if (r) { - vmx_exit(); - return r; - } - } - -#ifdef CONFIG_KEXEC_CORE - rcu_assign_pointer(crash_vmclear_loaded_vmcss, - crash_vmclear_local_loaded_vmcss); -#endif - vmx_check_vmcs12_offsets(); - - return 0; -} -module_init(vmx_init); diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h new file mode 100644 index 000000000000..854e144131c6 --- /dev/null +++ b/arch/x86/kvm/vmx/capabilities.h @@ -0,0 +1,343 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __KVM_X86_VMX_CAPS_H +#define __KVM_X86_VMX_CAPS_H + +#include "lapic.h" + +extern bool __read_mostly enable_vpid; +extern bool __read_mostly flexpriority_enabled; +extern bool __read_mostly enable_ept; +extern bool __read_mostly enable_unrestricted_guest; +extern bool __read_mostly enable_ept_ad_bits; +extern bool __read_mostly enable_pml; +extern int __read_mostly pt_mode; + +#define PT_MODE_SYSTEM 0 +#define PT_MODE_HOST_GUEST 1 + +struct nested_vmx_msrs { + /* + * We only store the "true" versions of the VMX capability MSRs. We + * generate the "non-true" versions by setting the must-be-1 bits + * according to the SDM. + */ + u32 procbased_ctls_low; + u32 procbased_ctls_high; + u32 secondary_ctls_low; + u32 secondary_ctls_high; + u32 pinbased_ctls_low; + u32 pinbased_ctls_high; + u32 exit_ctls_low; + u32 exit_ctls_high; + u32 entry_ctls_low; + u32 entry_ctls_high; + u32 misc_low; + u32 misc_high; + u32 ept_caps; + u32 vpid_caps; + u64 basic; + u64 cr0_fixed0; + u64 cr0_fixed1; + u64 cr4_fixed0; + u64 cr4_fixed1; + u64 vmcs_enum; + u64 vmfunc_controls; +}; + +struct vmcs_config { + int size; + int order; + u32 basic_cap; + u32 revision_id; + u32 pin_based_exec_ctrl; + u32 cpu_based_exec_ctrl; + u32 cpu_based_2nd_exec_ctrl; + u32 vmexit_ctrl; + u32 vmentry_ctrl; + struct nested_vmx_msrs nested; +}; +extern struct vmcs_config vmcs_config; + +struct vmx_capability { + u32 ept; + u32 vpid; +}; +extern struct vmx_capability vmx_capability; + +static inline bool cpu_has_vmx_basic_inout(void) +{ + return (((u64)vmcs_config.basic_cap << 32) & VMX_BASIC_INOUT); +} + +static inline bool cpu_has_virtual_nmis(void) +{ + return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS; +} + +static inline bool cpu_has_vmx_preemption_timer(void) +{ + return vmcs_config.pin_based_exec_ctrl & + PIN_BASED_VMX_PREEMPTION_TIMER; +} + +static inline bool cpu_has_vmx_posted_intr(void) +{ + return IS_ENABLED(CONFIG_X86_LOCAL_APIC) && + vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR; +} + +static inline bool cpu_has_load_ia32_efer(void) +{ + return (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_EFER) && + (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_EFER); +} + +static inline bool cpu_has_load_perf_global_ctrl(void) +{ + return (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) && + (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL); +} + +static inline bool vmx_mpx_supported(void) +{ + return (vmcs_config.vmexit_ctrl & VM_EXIT_CLEAR_BNDCFGS) && + (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_BNDCFGS); +} + +static inline bool cpu_has_vmx_tpr_shadow(void) +{ + return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW; +} + +static inline bool cpu_need_tpr_shadow(struct kvm_vcpu *vcpu) +{ + return cpu_has_vmx_tpr_shadow() && lapic_in_kernel(vcpu); +} + +static inline bool cpu_has_vmx_msr_bitmap(void) +{ + return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS; +} + +static inline bool cpu_has_secondary_exec_ctrls(void) +{ + return vmcs_config.cpu_based_exec_ctrl & + CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; +} + +static inline bool cpu_has_vmx_virtualize_apic_accesses(void) +{ + return vmcs_config.cpu_based_2nd_exec_ctrl & + SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; +} + +static inline bool cpu_has_vmx_ept(void) +{ + return vmcs_config.cpu_based_2nd_exec_ctrl & + SECONDARY_EXEC_ENABLE_EPT; +} + +static inline bool vmx_umip_emulated(void) +{ + return vmcs_config.cpu_based_2nd_exec_ctrl & + SECONDARY_EXEC_DESC; +} + +static inline bool cpu_has_vmx_rdtscp(void) +{ + return vmcs_config.cpu_based_2nd_exec_ctrl & + SECONDARY_EXEC_RDTSCP; +} + +static inline bool cpu_has_vmx_virtualize_x2apic_mode(void) +{ + return vmcs_config.cpu_based_2nd_exec_ctrl & + SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; +} + +static inline bool cpu_has_vmx_vpid(void) +{ + return vmcs_config.cpu_based_2nd_exec_ctrl & + SECONDARY_EXEC_ENABLE_VPID; +} + +static inline bool cpu_has_vmx_wbinvd_exit(void) +{ + return vmcs_config.cpu_based_2nd_exec_ctrl & + SECONDARY_EXEC_WBINVD_EXITING; +} + +static inline bool cpu_has_vmx_unrestricted_guest(void) +{ + return vmcs_config.cpu_based_2nd_exec_ctrl & + SECONDARY_EXEC_UNRESTRICTED_GUEST; +} + +static inline bool cpu_has_vmx_apic_register_virt(void) +{ + return vmcs_config.cpu_based_2nd_exec_ctrl & + SECONDARY_EXEC_APIC_REGISTER_VIRT; +} + +static inline bool cpu_has_vmx_virtual_intr_delivery(void) +{ + return vmcs_config.cpu_based_2nd_exec_ctrl & + SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY; +} + +static inline bool cpu_has_vmx_ple(void) +{ + return vmcs_config.cpu_based_2nd_exec_ctrl & + SECONDARY_EXEC_PAUSE_LOOP_EXITING; +} + +static inline bool vmx_rdrand_supported(void) +{ + return vmcs_config.cpu_based_2nd_exec_ctrl & + SECONDARY_EXEC_RDRAND_EXITING; +} + +static inline bool cpu_has_vmx_invpcid(void) +{ + return vmcs_config.cpu_based_2nd_exec_ctrl & + SECONDARY_EXEC_ENABLE_INVPCID; +} + +static inline bool cpu_has_vmx_vmfunc(void) +{ + return vmcs_config.cpu_based_2nd_exec_ctrl & + SECONDARY_EXEC_ENABLE_VMFUNC; +} + +static inline bool cpu_has_vmx_shadow_vmcs(void) +{ + u64 vmx_msr; + + /* check if the cpu supports writing r/o exit information fields */ + rdmsrl(MSR_IA32_VMX_MISC, vmx_msr); + if (!(vmx_msr & MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS)) + return false; + + return vmcs_config.cpu_based_2nd_exec_ctrl & + SECONDARY_EXEC_SHADOW_VMCS; +} + +static inline bool cpu_has_vmx_encls_vmexit(void) +{ + return vmcs_config.cpu_based_2nd_exec_ctrl & + SECONDARY_EXEC_ENCLS_EXITING; +} + +static inline bool vmx_rdseed_supported(void) +{ + return vmcs_config.cpu_based_2nd_exec_ctrl & + SECONDARY_EXEC_RDSEED_EXITING; +} + +static inline bool cpu_has_vmx_pml(void) +{ + return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_ENABLE_PML; +} + +static inline bool vmx_xsaves_supported(void) +{ + return vmcs_config.cpu_based_2nd_exec_ctrl & + SECONDARY_EXEC_XSAVES; +} + +static inline bool cpu_has_vmx_tsc_scaling(void) +{ + return vmcs_config.cpu_based_2nd_exec_ctrl & + SECONDARY_EXEC_TSC_SCALING; +} + +static inline bool cpu_has_vmx_apicv(void) +{ + return cpu_has_vmx_apic_register_virt() && + cpu_has_vmx_virtual_intr_delivery() && + cpu_has_vmx_posted_intr(); +} + +static inline bool cpu_has_vmx_flexpriority(void) +{ + return cpu_has_vmx_tpr_shadow() && + cpu_has_vmx_virtualize_apic_accesses(); +} + +static inline bool cpu_has_vmx_ept_execute_only(void) +{ + return vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT; +} + +static inline bool cpu_has_vmx_ept_4levels(void) +{ + return vmx_capability.ept & VMX_EPT_PAGE_WALK_4_BIT; +} + +static inline bool cpu_has_vmx_ept_5levels(void) +{ + return vmx_capability.ept & VMX_EPT_PAGE_WALK_5_BIT; +} + +static inline bool cpu_has_vmx_ept_mt_wb(void) +{ + return vmx_capability.ept & VMX_EPTP_WB_BIT; +} + +static inline bool cpu_has_vmx_ept_2m_page(void) +{ + return vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT; +} + +static inline bool cpu_has_vmx_ept_1g_page(void) +{ + return vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT; +} + +static inline bool cpu_has_vmx_ept_ad_bits(void) +{ + return vmx_capability.ept & VMX_EPT_AD_BIT; +} + +static inline bool cpu_has_vmx_invept_context(void) +{ + return vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT; +} + +static inline bool cpu_has_vmx_invept_global(void) +{ + return vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT; +} + +static inline bool cpu_has_vmx_invvpid(void) +{ + return vmx_capability.vpid & VMX_VPID_INVVPID_BIT; +} + +static inline bool cpu_has_vmx_invvpid_individual_addr(void) +{ + return vmx_capability.vpid & VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT; +} + +static inline bool cpu_has_vmx_invvpid_single(void) +{ + return vmx_capability.vpid & VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT; +} + +static inline bool cpu_has_vmx_invvpid_global(void) +{ + return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT; +} + +static inline bool cpu_has_vmx_intel_pt(void) +{ + u64 vmx_msr; + + rdmsrl(MSR_IA32_VMX_MISC, vmx_msr); + return (vmx_msr & MSR_IA32_VMX_MISC_INTEL_PT) && + (vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_PT_USE_GPA) && + (vmcs_config.vmexit_ctrl & VM_EXIT_CLEAR_IA32_RTIT_CTL) && + (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_RTIT_CTL); +} + +#endif /* __KVM_X86_VMX_CAPS_H */ diff --git a/arch/x86/kvm/vmx_evmcs.h b/arch/x86/kvm/vmx/evmcs.c index 210a884090ad..95bc2247478d 100644 --- a/arch/x86/kvm/vmx_evmcs.h +++ b/arch/x86/kvm/vmx/evmcs.c @@ -1,20 +1,22 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __KVM_X86_VMX_EVMCS_H -#define __KVM_X86_VMX_EVMCS_H +// SPDX-License-Identifier: GPL-2.0 -#include <asm/hyperv-tlfs.h> +#include <linux/errno.h> +#include <linux/smp.h> + +#include "evmcs.h" +#include "vmcs.h" +#include "vmx.h" + +DEFINE_STATIC_KEY_FALSE(enable_evmcs); + +#if IS_ENABLED(CONFIG_HYPERV) #define ROL16(val, n) ((u16)(((u16)(val) << (n)) | ((u16)(val) >> (16 - (n))))) #define EVMCS1_OFFSET(x) offsetof(struct hv_enlightened_vmcs, x) #define EVMCS1_FIELD(number, name, clean_field)[ROL16(number, 6)] = \ {EVMCS1_OFFSET(name), clean_field} -struct evmcs_field { - u16 offset; - u16 clean_field; -}; - -static const struct evmcs_field vmcs_field_to_evmcs_1[] = { +const struct evmcs_field vmcs_field_to_evmcs_1[] = { /* 64 bit rw */ EVMCS1_FIELD(GUEST_RIP, guest_rip, HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE), @@ -298,27 +300,53 @@ static const struct evmcs_field vmcs_field_to_evmcs_1[] = { EVMCS1_FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id, HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT), }; +const unsigned int nr_evmcs_1_fields = ARRAY_SIZE(vmcs_field_to_evmcs_1); -static __always_inline int get_evmcs_offset(unsigned long field, - u16 *clean_field) +void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf) { - unsigned int index = ROL16(field, 6); - const struct evmcs_field *evmcs_field; + vmcs_conf->pin_based_exec_ctrl &= ~EVMCS1_UNSUPPORTED_PINCTRL; + vmcs_conf->cpu_based_2nd_exec_ctrl &= ~EVMCS1_UNSUPPORTED_2NDEXEC; - if (unlikely(index >= ARRAY_SIZE(vmcs_field_to_evmcs_1))) { - WARN_ONCE(1, "KVM: accessing unsupported EVMCS field %lx\n", - field); - return -ENOENT; - } + vmcs_conf->vmexit_ctrl &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL; + vmcs_conf->vmentry_ctrl &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL; - evmcs_field = &vmcs_field_to_evmcs_1[index]; +} +#endif - if (clean_field) - *clean_field = evmcs_field->clean_field; +uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + /* + * vmcs_version represents the range of supported Enlightened VMCS + * versions: lower 8 bits is the minimal version, higher 8 bits is the + * maximum supported version. KVM supports versions from 1 to + * KVM_EVMCS_VERSION. + */ + if (vmx->nested.enlightened_vmcs_enabled) + return (KVM_EVMCS_VERSION << 8) | 1; - return evmcs_field->offset; + return 0; } -#undef ROL16 +int nested_enable_evmcs(struct kvm_vcpu *vcpu, + uint16_t *vmcs_version) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + + if (vmcs_version) + *vmcs_version = nested_get_evmcs_version(vcpu); + + /* We don't support disabling the feature for simplicity. */ + if (vmx->nested.enlightened_vmcs_enabled) + return 0; -#endif /* __KVM_X86_VMX_EVMCS_H */ + vmx->nested.enlightened_vmcs_enabled = true; + + vmx->nested.msrs.pinbased_ctls_high &= ~EVMCS1_UNSUPPORTED_PINCTRL; + vmx->nested.msrs.entry_ctls_high &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL; + vmx->nested.msrs.exit_ctls_high &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL; + vmx->nested.msrs.secondary_ctls_high &= ~EVMCS1_UNSUPPORTED_2NDEXEC; + vmx->nested.msrs.vmfunc_controls &= ~EVMCS1_UNSUPPORTED_VMFUNC; + + return 0; +} diff --git a/arch/x86/kvm/vmx/evmcs.h b/arch/x86/kvm/vmx/evmcs.h new file mode 100644 index 000000000000..e0fcef85b332 --- /dev/null +++ b/arch/x86/kvm/vmx/evmcs.h @@ -0,0 +1,202 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __KVM_X86_VMX_EVMCS_H +#define __KVM_X86_VMX_EVMCS_H + +#include <linux/jump_label.h> + +#include <asm/hyperv-tlfs.h> +#include <asm/mshyperv.h> +#include <asm/vmx.h> + +#include "capabilities.h" +#include "vmcs.h" + +struct vmcs_config; + +DECLARE_STATIC_KEY_FALSE(enable_evmcs); + +#define current_evmcs ((struct hv_enlightened_vmcs *)this_cpu_read(current_vmcs)) + +#define KVM_EVMCS_VERSION 1 + +/* + * Enlightened VMCSv1 doesn't support these: + * + * POSTED_INTR_NV = 0x00000002, + * GUEST_INTR_STATUS = 0x00000810, + * APIC_ACCESS_ADDR = 0x00002014, + * POSTED_INTR_DESC_ADDR = 0x00002016, + * EOI_EXIT_BITMAP0 = 0x0000201c, + * EOI_EXIT_BITMAP1 = 0x0000201e, + * EOI_EXIT_BITMAP2 = 0x00002020, + * EOI_EXIT_BITMAP3 = 0x00002022, + * GUEST_PML_INDEX = 0x00000812, + * PML_ADDRESS = 0x0000200e, + * VM_FUNCTION_CONTROL = 0x00002018, + * EPTP_LIST_ADDRESS = 0x00002024, + * VMREAD_BITMAP = 0x00002026, + * VMWRITE_BITMAP = 0x00002028, + * + * TSC_MULTIPLIER = 0x00002032, + * PLE_GAP = 0x00004020, + * PLE_WINDOW = 0x00004022, + * VMX_PREEMPTION_TIMER_VALUE = 0x0000482E, + * GUEST_IA32_PERF_GLOBAL_CTRL = 0x00002808, + * HOST_IA32_PERF_GLOBAL_CTRL = 0x00002c04, + * + * Currently unsupported in KVM: + * GUEST_IA32_RTIT_CTL = 0x00002814, + */ +#define EVMCS1_UNSUPPORTED_PINCTRL (PIN_BASED_POSTED_INTR | \ + PIN_BASED_VMX_PREEMPTION_TIMER) +#define EVMCS1_UNSUPPORTED_2NDEXEC \ + (SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | \ + SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | \ + SECONDARY_EXEC_APIC_REGISTER_VIRT | \ + SECONDARY_EXEC_ENABLE_PML | \ + SECONDARY_EXEC_ENABLE_VMFUNC | \ + SECONDARY_EXEC_SHADOW_VMCS | \ + SECONDARY_EXEC_TSC_SCALING | \ + SECONDARY_EXEC_PAUSE_LOOP_EXITING) +#define EVMCS1_UNSUPPORTED_VMEXIT_CTRL (VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) +#define EVMCS1_UNSUPPORTED_VMENTRY_CTRL (VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) +#define EVMCS1_UNSUPPORTED_VMFUNC (VMX_VMFUNC_EPTP_SWITCHING) + +#if IS_ENABLED(CONFIG_HYPERV) + +struct evmcs_field { + u16 offset; + u16 clean_field; +}; + +extern const struct evmcs_field vmcs_field_to_evmcs_1[]; +extern const unsigned int nr_evmcs_1_fields; + +#define ROL16(val, n) ((u16)(((u16)(val) << (n)) | ((u16)(val) >> (16 - (n))))) + +static __always_inline int get_evmcs_offset(unsigned long field, + u16 *clean_field) +{ + unsigned int index = ROL16(field, 6); + const struct evmcs_field *evmcs_field; + + if (unlikely(index >= nr_evmcs_1_fields)) { + WARN_ONCE(1, "KVM: accessing unsupported EVMCS field %lx\n", + field); + return -ENOENT; + } + + evmcs_field = &vmcs_field_to_evmcs_1[index]; + + if (clean_field) + *clean_field = evmcs_field->clean_field; + + return evmcs_field->offset; +} + +#undef ROL16 + +static inline void evmcs_write64(unsigned long field, u64 value) +{ + u16 clean_field; + int offset = get_evmcs_offset(field, &clean_field); + + if (offset < 0) + return; + + *(u64 *)((char *)current_evmcs + offset) = value; + + current_evmcs->hv_clean_fields &= ~clean_field; +} + +static inline void evmcs_write32(unsigned long field, u32 value) +{ + u16 clean_field; + int offset = get_evmcs_offset(field, &clean_field); + + if (offset < 0) + return; + + *(u32 *)((char *)current_evmcs + offset) = value; + current_evmcs->hv_clean_fields &= ~clean_field; +} + +static inline void evmcs_write16(unsigned long field, u16 value) +{ + u16 clean_field; + int offset = get_evmcs_offset(field, &clean_field); + + if (offset < 0) + return; + + *(u16 *)((char *)current_evmcs + offset) = value; + current_evmcs->hv_clean_fields &= ~clean_field; +} + +static inline u64 evmcs_read64(unsigned long field) +{ + int offset = get_evmcs_offset(field, NULL); + + if (offset < 0) + return 0; + + return *(u64 *)((char *)current_evmcs + offset); +} + +static inline u32 evmcs_read32(unsigned long field) +{ + int offset = get_evmcs_offset(field, NULL); + + if (offset < 0) + return 0; + + return *(u32 *)((char *)current_evmcs + offset); +} + +static inline u16 evmcs_read16(unsigned long field) +{ + int offset = get_evmcs_offset(field, NULL); + + if (offset < 0) + return 0; + + return *(u16 *)((char *)current_evmcs + offset); +} + +static inline void evmcs_touch_msr_bitmap(void) +{ + if (unlikely(!current_evmcs)) + return; + + if (current_evmcs->hv_enlightenments_control.msr_bitmap) + current_evmcs->hv_clean_fields &= + ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP; +} + +static inline void evmcs_load(u64 phys_addr) +{ + struct hv_vp_assist_page *vp_ap = + hv_get_vp_assist_page(smp_processor_id()); + + vp_ap->current_nested_vmcs = phys_addr; + vp_ap->enlighten_vmentry = 1; +} + +void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf); +#else /* !IS_ENABLED(CONFIG_HYPERV) */ +static inline void evmcs_write64(unsigned long field, u64 value) {} +static inline void evmcs_write32(unsigned long field, u32 value) {} +static inline void evmcs_write16(unsigned long field, u16 value) {} +static inline u64 evmcs_read64(unsigned long field) { return 0; } +static inline u32 evmcs_read32(unsigned long field) { return 0; } +static inline u16 evmcs_read16(unsigned long field) { return 0; } +static inline void evmcs_load(u64 phys_addr) {} +static inline void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf) {} +static inline void evmcs_touch_msr_bitmap(void) {} +#endif /* IS_ENABLED(CONFIG_HYPERV) */ + +uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu); +int nested_enable_evmcs(struct kvm_vcpu *vcpu, + uint16_t *vmcs_version); + +#endif /* __KVM_X86_VMX_EVMCS_H */ diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c new file mode 100644 index 000000000000..3170e291215d --- /dev/null +++ b/arch/x86/kvm/vmx/nested.c @@ -0,0 +1,5721 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/frame.h> +#include <linux/percpu.h> + +#include <asm/debugreg.h> +#include <asm/mmu_context.h> + +#include "cpuid.h" +#include "hyperv.h" +#include "mmu.h" +#include "nested.h" +#include "trace.h" +#include "x86.h" + +static bool __read_mostly enable_shadow_vmcs = 1; +module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO); + +static bool __read_mostly nested_early_check = 0; +module_param(nested_early_check, bool, S_IRUGO); + +/* + * Hyper-V requires all of these, so mark them as supported even though + * they are just treated the same as all-context. + */ +#define VMX_VPID_EXTENT_SUPPORTED_MASK \ + (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT | \ + VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | \ + VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT | \ + VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT) + +#define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5 + +enum { + VMX_VMREAD_BITMAP, + VMX_VMWRITE_BITMAP, + VMX_BITMAP_NR +}; +static unsigned long *vmx_bitmap[VMX_BITMAP_NR]; + +#define vmx_vmread_bitmap (vmx_bitmap[VMX_VMREAD_BITMAP]) +#define vmx_vmwrite_bitmap (vmx_bitmap[VMX_VMWRITE_BITMAP]) + +static u16 shadow_read_only_fields[] = { +#define SHADOW_FIELD_RO(x) x, +#include "vmcs_shadow_fields.h" +}; +static int max_shadow_read_only_fields = + ARRAY_SIZE(shadow_read_only_fields); + +static u16 shadow_read_write_fields[] = { +#define SHADOW_FIELD_RW(x) x, +#include "vmcs_shadow_fields.h" +}; +static int max_shadow_read_write_fields = + ARRAY_SIZE(shadow_read_write_fields); + +void init_vmcs_shadow_fields(void) +{ + int i, j; + + memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE); + memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE); + + for (i = j = 0; i < max_shadow_read_only_fields; i++) { + u16 field = shadow_read_only_fields[i]; + + if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 && + (i + 1 == max_shadow_read_only_fields || + shadow_read_only_fields[i + 1] != field + 1)) + pr_err("Missing field from shadow_read_only_field %x\n", + field + 1); + + clear_bit(field, vmx_vmread_bitmap); +#ifdef CONFIG_X86_64 + if (field & 1) + continue; +#endif + if (j < i) + shadow_read_only_fields[j] = field; + j++; + } + max_shadow_read_only_fields = j; + + for (i = j = 0; i < max_shadow_read_write_fields; i++) { + u16 field = shadow_read_write_fields[i]; + + if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 && + (i + 1 == max_shadow_read_write_fields || + shadow_read_write_fields[i + 1] != field + 1)) + pr_err("Missing field from shadow_read_write_field %x\n", + field + 1); + + /* + * PML and the preemption timer can be emulated, but the + * processor cannot vmwrite to fields that don't exist + * on bare metal. + */ + switch (field) { + case GUEST_PML_INDEX: + if (!cpu_has_vmx_pml()) + continue; + break; + case VMX_PREEMPTION_TIMER_VALUE: + if (!cpu_has_vmx_preemption_timer()) + continue; + break; + case GUEST_INTR_STATUS: + if (!cpu_has_vmx_apicv()) + continue; + break; + default: + break; + } + + clear_bit(field, vmx_vmwrite_bitmap); + clear_bit(field, vmx_vmread_bitmap); +#ifdef CONFIG_X86_64 + if (field & 1) + continue; +#endif + if (j < i) + shadow_read_write_fields[j] = field; + j++; + } + max_shadow_read_write_fields = j; +} + +/* + * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(), + * set the success or error code of an emulated VMX instruction (as specified + * by Vol 2B, VMX Instruction Reference, "Conventions"), and skip the emulated + * instruction. + */ +static int nested_vmx_succeed(struct kvm_vcpu *vcpu) +{ + vmx_set_rflags(vcpu, vmx_get_rflags(vcpu) + & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | + X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF)); + return kvm_skip_emulated_instruction(vcpu); +} + +static int nested_vmx_failInvalid(struct kvm_vcpu *vcpu) +{ + vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) + & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF | + X86_EFLAGS_SF | X86_EFLAGS_OF)) + | X86_EFLAGS_CF); + return kvm_skip_emulated_instruction(vcpu); +} + +static int nested_vmx_failValid(struct kvm_vcpu *vcpu, + u32 vm_instruction_error) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + + /* + * failValid writes the error number to the current VMCS, which + * can't be done if there isn't a current VMCS. + */ + if (vmx->nested.current_vmptr == -1ull && !vmx->nested.hv_evmcs) + return nested_vmx_failInvalid(vcpu); + + vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) + & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | + X86_EFLAGS_SF | X86_EFLAGS_OF)) + | X86_EFLAGS_ZF); + get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error; + /* + * We don't need to force a shadow sync because + * VM_INSTRUCTION_ERROR is not shadowed + */ + return kvm_skip_emulated_instruction(vcpu); +} + +static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator) +{ + /* TODO: not to reset guest simply here. */ + kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); + pr_debug_ratelimited("kvm: nested vmx abort, indicator %d\n", indicator); +} + +static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx) +{ + vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_SHADOW_VMCS); + vmcs_write64(VMCS_LINK_POINTER, -1ull); +} + +static inline void nested_release_evmcs(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + + if (!vmx->nested.hv_evmcs) + return; + + kunmap(vmx->nested.hv_evmcs_page); + kvm_release_page_dirty(vmx->nested.hv_evmcs_page); + vmx->nested.hv_evmcs_vmptr = -1ull; + vmx->nested.hv_evmcs_page = NULL; + vmx->nested.hv_evmcs = NULL; +} + +/* + * Free whatever needs to be freed from vmx->nested when L1 goes down, or + * just stops using VMX. + */ +static void free_nested(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + + if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon) + return; + + vmx->nested.vmxon = false; + vmx->nested.smm.vmxon = false; + free_vpid(vmx->nested.vpid02); + vmx->nested.posted_intr_nv = -1; + vmx->nested.current_vmptr = -1ull; + if (enable_shadow_vmcs) { + vmx_disable_shadow_vmcs(vmx); + vmcs_clear(vmx->vmcs01.shadow_vmcs); + free_vmcs(vmx->vmcs01.shadow_vmcs); + vmx->vmcs01.shadow_vmcs = NULL; + } + kfree(vmx->nested.cached_vmcs12); + kfree(vmx->nested.cached_shadow_vmcs12); + /* Unpin physical memory we referred to in the vmcs02 */ + if (vmx->nested.apic_access_page) { + kvm_release_page_dirty(vmx->nested.apic_access_page); + vmx->nested.apic_access_page = NULL; + } + if (vmx->nested.virtual_apic_page) { + kvm_release_page_dirty(vmx->nested.virtual_apic_page); + vmx->nested.virtual_apic_page = NULL; + } + if (vmx->nested.pi_desc_page) { + kunmap(vmx->nested.pi_desc_page); + kvm_release_page_dirty(vmx->nested.pi_desc_page); + vmx->nested.pi_desc_page = NULL; + vmx->nested.pi_desc = NULL; + } + + kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); + + nested_release_evmcs(vcpu); + + free_loaded_vmcs(&vmx->nested.vmcs02); +} + +static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + int cpu; + + if (vmx->loaded_vmcs == vmcs) + return; + + cpu = get_cpu(); + vmx_vcpu_put(vcpu); + vmx->loaded_vmcs = vmcs; + vmx_vcpu_load(vcpu, cpu); + put_cpu(); + + vm_entry_controls_reset_shadow(vmx); + vm_exit_controls_reset_shadow(vmx); + vmx_segment_cache_clear(vmx); +} + +/* + * Ensure that the current vmcs of the logical processor is the + * vmcs01 of the vcpu before calling free_nested(). + */ +void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu) +{ + vcpu_load(vcpu); + vmx_switch_vmcs(vcpu, &to_vmx(vcpu)->vmcs01); + free_nested(vcpu); + vcpu_put(vcpu); +} + +static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu, + struct x86_exception *fault) +{ + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); + struct vcpu_vmx *vmx = to_vmx(vcpu); + u32 exit_reason; + unsigned long exit_qualification = vcpu->arch.exit_qualification; + + if (vmx->nested.pml_full) { + exit_reason = EXIT_REASON_PML_FULL; + vmx->nested.pml_full = false; + exit_qualification &= INTR_INFO_UNBLOCK_NMI; + } else if (fault->error_code & PFERR_RSVD_MASK) + exit_reason = EXIT_REASON_EPT_MISCONFIG; + else + exit_reason = EXIT_REASON_EPT_VIOLATION; + + nested_vmx_vmexit(vcpu, exit_reason, 0, exit_qualification); + vmcs12->guest_physical_address = fault->address; +} + +static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu) +{ + WARN_ON(mmu_is_nested(vcpu)); + + vcpu->arch.mmu = &vcpu->arch.guest_mmu; + kvm_init_shadow_ept_mmu(vcpu, + to_vmx(vcpu)->nested.msrs.ept_caps & + VMX_EPT_EXECUTE_ONLY_BIT, + nested_ept_ad_enabled(vcpu), + nested_ept_get_cr3(vcpu)); + vcpu->arch.mmu->set_cr3 = vmx_set_cr3; + vcpu->arch.mmu->get_cr3 = nested_ept_get_cr3; + vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault; + vcpu->arch.mmu->get_pdptr = kvm_pdptr_read; + + vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; +} + +static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu) +{ + vcpu->arch.mmu = &vcpu->arch.root_mmu; + vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; +} + +static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12, + u16 error_code) +{ + bool inequality, bit; + + bit = (vmcs12->exception_bitmap & (1u << PF_VECTOR)) != 0; + inequality = + (error_code & vmcs12->page_fault_error_code_mask) != + vmcs12->page_fault_error_code_match; + return inequality ^ bit; +} + + +/* + * KVM wants to inject page-faults which it got to the guest. This function + * checks whether in a nested guest, we need to inject them to L1 or L2. + */ +static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit_qual) +{ + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); + unsigned int nr = vcpu->arch.exception.nr; + bool has_payload = vcpu->arch.exception.has_payload; + unsigned long payload = vcpu->arch.exception.payload; + + if (nr == PF_VECTOR) { + if (vcpu->arch.exception.nested_apf) { + *exit_qual = vcpu->arch.apf.nested_apf_token; + return 1; + } + if (nested_vmx_is_page_fault_vmexit(vmcs12, + vcpu->arch.exception.error_code)) { + *exit_qual = has_payload ? payload : vcpu->arch.cr2; + return 1; + } + } else if (vmcs12->exception_bitmap & (1u << nr)) { + if (nr == DB_VECTOR) { + if (!has_payload) { + payload = vcpu->arch.dr6; + payload &= ~(DR6_FIXED_1 | DR6_BT); + payload ^= DR6_RTM; + } + *exit_qual = payload; + } else + *exit_qual = 0; + return 1; + } + + return 0; +} + + +static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu, + struct x86_exception *fault) +{ + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); + + WARN_ON(!is_guest_mode(vcpu)); + + if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code) && + !to_vmx(vcpu)->nested.nested_run_pending) { + vmcs12->vm_exit_intr_error_code = fault->error_code; + nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, + PF_VECTOR | INTR_TYPE_HARD_EXCEPTION | + INTR_INFO_DELIVER_CODE_MASK | INTR_INFO_VALID_MASK, + fault->address); + } else { + kvm_inject_page_fault(vcpu, fault); + } +} + +static bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa) +{ + return PAGE_ALIGNED(gpa) && !(gpa >> cpuid_maxphyaddr(vcpu)); +} + +static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12) +{ + if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS)) + return 0; + + if (!page_address_valid(vcpu, vmcs12->io_bitmap_a) || + !page_address_valid(vcpu, vmcs12->io_bitmap_b)) + return -EINVAL; + + return 0; +} + +static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12) +{ + if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) + return 0; + + if (!page_address_valid(vcpu, vmcs12->msr_bitmap)) + return -EINVAL; + + return 0; +} + +static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12) +{ + if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) + return 0; + + if (!page_address_valid(vcpu, vmcs12->virtual_apic_page_addr)) + return -EINVAL; + + return 0; +} + +/* + * Check if MSR is intercepted for L01 MSR bitmap. + */ +static bool msr_write_intercepted_l01(struct kvm_vcpu *vcpu, u32 msr) +{ + unsigned long *msr_bitmap; + int f = sizeof(unsigned long); + + if (!cpu_has_vmx_msr_bitmap()) + return true; + + msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap; + + if (msr <= 0x1fff) { + return !!test_bit(msr, msr_bitmap + 0x800 / f); + } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { + msr &= 0x1fff; + return !!test_bit(msr, msr_bitmap + 0xc00 / f); + } + + return true; +} + +/* + * If a msr is allowed by L0, we should check whether it is allowed by L1. + * The corresponding bit will be cleared unless both of L0 and L1 allow it. + */ +static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1, + unsigned long *msr_bitmap_nested, + u32 msr, int type) +{ + int f = sizeof(unsigned long); + + /* + * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals + * have the write-low and read-high bitmap offsets the wrong way round. + * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff. + */ + if (msr <= 0x1fff) { + if (type & MSR_TYPE_R && + !test_bit(msr, msr_bitmap_l1 + 0x000 / f)) + /* read-low */ + __clear_bit(msr, msr_bitmap_nested + 0x000 / f); + + if (type & MSR_TYPE_W && + !test_bit(msr, msr_bitmap_l1 + 0x800 / f)) + /* write-low */ + __clear_bit(msr, msr_bitmap_nested + 0x800 / f); + + } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { + msr &= 0x1fff; + if (type & MSR_TYPE_R && + !test_bit(msr, msr_bitmap_l1 + 0x400 / f)) + /* read-high */ + __clear_bit(msr, msr_bitmap_nested + 0x400 / f); + + if (type & MSR_TYPE_W && + !test_bit(msr, msr_bitmap_l1 + 0xc00 / f)) + /* write-high */ + __clear_bit(msr, msr_bitmap_nested + 0xc00 / f); + + } +} + +/* + * Merge L0's and L1's MSR bitmap, return false to indicate that + * we do not use the hardware. + */ +static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12) +{ + int msr; + struct page *page; + unsigned long *msr_bitmap_l1; + unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.vmcs02.msr_bitmap; + /* + * pred_cmd & spec_ctrl are trying to verify two things: + * + * 1. L0 gave a permission to L1 to actually passthrough the MSR. This + * ensures that we do not accidentally generate an L02 MSR bitmap + * from the L12 MSR bitmap that is too permissive. + * 2. That L1 or L2s have actually used the MSR. This avoids + * unnecessarily merging of the bitmap if the MSR is unused. This + * works properly because we only update the L01 MSR bitmap lazily. + * So even if L0 should pass L1 these MSRs, the L01 bitmap is only + * updated to reflect this when L1 (or its L2s) actually write to + * the MSR. + */ + bool pred_cmd = !msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD); + bool spec_ctrl = !msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL); + + /* Nothing to do if the MSR bitmap is not in use. */ + if (!cpu_has_vmx_msr_bitmap() || + !nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) + return false; + + if (!nested_cpu_has_virt_x2apic_mode(vmcs12) && + !pred_cmd && !spec_ctrl) + return false; + + page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->msr_bitmap); + if (is_error_page(page)) + return false; + + msr_bitmap_l1 = (unsigned long *)kmap(page); + if (nested_cpu_has_apic_reg_virt(vmcs12)) { + /* + * L0 need not intercept reads for MSRs between 0x800 and 0x8ff, it + * just lets the processor take the value from the virtual-APIC page; + * take those 256 bits directly from the L1 bitmap. + */ + for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { + unsigned word = msr / BITS_PER_LONG; + msr_bitmap_l0[word] = msr_bitmap_l1[word]; + msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0; + } + } else { + for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { + unsigned word = msr / BITS_PER_LONG; + msr_bitmap_l0[word] = ~0; + msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0; + } + } + + nested_vmx_disable_intercept_for_msr( + msr_bitmap_l1, msr_bitmap_l0, + X2APIC_MSR(APIC_TASKPRI), + MSR_TYPE_W); + + if (nested_cpu_has_vid(vmcs12)) { + nested_vmx_disable_intercept_for_msr( + msr_bitmap_l1, msr_bitmap_l0, + X2APIC_MSR(APIC_EOI), + MSR_TYPE_W); + nested_vmx_disable_intercept_for_msr( + msr_bitmap_l1, msr_bitmap_l0, + X2APIC_MSR(APIC_SELF_IPI), + MSR_TYPE_W); + } + + if (spec_ctrl) + nested_vmx_disable_intercept_for_msr( + msr_bitmap_l1, msr_bitmap_l0, + MSR_IA32_SPEC_CTRL, + MSR_TYPE_R | MSR_TYPE_W); + + if (pred_cmd) + nested_vmx_disable_intercept_for_msr( + msr_bitmap_l1, msr_bitmap_l0, + MSR_IA32_PRED_CMD, + MSR_TYPE_W); + + kunmap(page); + kvm_release_page_clean(page); + + return true; +} + +static void nested_cache_shadow_vmcs12(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12) +{ + struct vmcs12 *shadow; + struct page *page; + + if (!nested_cpu_has_shadow_vmcs(vmcs12) || + vmcs12->vmcs_link_pointer == -1ull) + return; + + shadow = get_shadow_vmcs12(vcpu); + page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->vmcs_link_pointer); + + memcpy(shadow, kmap(page), VMCS12_SIZE); + + kunmap(page); + kvm_release_page_clean(page); +} + +static void nested_flush_cached_shadow_vmcs12(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + + if (!nested_cpu_has_shadow_vmcs(vmcs12) || + vmcs12->vmcs_link_pointer == -1ull) + return; + + kvm_write_guest(vmx->vcpu.kvm, vmcs12->vmcs_link_pointer, + get_shadow_vmcs12(vcpu), VMCS12_SIZE); +} + +/* + * In nested virtualization, check if L1 has set + * VM_EXIT_ACK_INTR_ON_EXIT + */ +static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu) +{ + return get_vmcs12(vcpu)->vm_exit_controls & + VM_EXIT_ACK_INTR_ON_EXIT; +} + +static bool nested_exit_on_nmi(struct kvm_vcpu *vcpu) +{ + return nested_cpu_has_nmi_exiting(get_vmcs12(vcpu)); +} + +static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12) +{ + if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) && + !page_address_valid(vcpu, vmcs12->apic_access_addr)) + return -EINVAL; + else + return 0; +} + +static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12) +{ + if (!nested_cpu_has_virt_x2apic_mode(vmcs12) && + !nested_cpu_has_apic_reg_virt(vmcs12) && + !nested_cpu_has_vid(vmcs12) && + !nested_cpu_has_posted_intr(vmcs12)) + return 0; + + /* + * If virtualize x2apic mode is enabled, + * virtualize apic access must be disabled. + */ + if (nested_cpu_has_virt_x2apic_mode(vmcs12) && + nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) + return -EINVAL; + + /* + * If virtual interrupt delivery is enabled, + * we must exit on external interrupts. + */ + if (nested_cpu_has_vid(vmcs12) && + !nested_exit_on_intr(vcpu)) + return -EINVAL; + + /* + * bits 15:8 should be zero in posted_intr_nv, + * the descriptor address has been already checked + * in nested_get_vmcs12_pages. + * + * bits 5:0 of posted_intr_desc_addr should be zero. + */ + if (nested_cpu_has_posted_intr(vmcs12) && + (!nested_cpu_has_vid(vmcs12) || + !nested_exit_intr_ack_set(vcpu) || + (vmcs12->posted_intr_nv & 0xff00) || + (vmcs12->posted_intr_desc_addr & 0x3f) || + (vmcs12->posted_intr_desc_addr >> cpuid_maxphyaddr(vcpu)))) + return -EINVAL; + + /* tpr shadow is needed by all apicv features. */ + if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) + return -EINVAL; + + return 0; +} + +static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu, + u32 count, u64 addr) +{ + int maxphyaddr; + + if (count == 0) + return 0; + maxphyaddr = cpuid_maxphyaddr(vcpu); + if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr || + (addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr) + return -EINVAL; + + return 0; +} + +static int nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12) +{ + if (nested_vmx_check_msr_switch(vcpu, vmcs12->vm_exit_msr_load_count, + vmcs12->vm_exit_msr_load_addr) || + nested_vmx_check_msr_switch(vcpu, vmcs12->vm_exit_msr_store_count, + vmcs12->vm_exit_msr_store_addr)) + return -EINVAL; + + return 0; +} + +static int nested_vmx_check_entry_msr_switch_controls(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12) +{ + if (nested_vmx_check_msr_switch(vcpu, vmcs12->vm_entry_msr_load_count, + vmcs12->vm_entry_msr_load_addr)) + return -EINVAL; + + return 0; +} + +static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12) +{ + if (!nested_cpu_has_pml(vmcs12)) + return 0; + + if (!nested_cpu_has_ept(vmcs12) || + !page_address_valid(vcpu, vmcs12->pml_address)) + return -EINVAL; + + return 0; +} + +static int nested_vmx_check_unrestricted_guest_controls(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12) +{ + if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST) && + !nested_cpu_has_ept(vmcs12)) + return -EINVAL; + return 0; +} + +static int nested_vmx_check_mode_based_ept_exec_controls(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12) +{ + if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_MODE_BASED_EPT_EXEC) && + !nested_cpu_has_ept(vmcs12)) + return -EINVAL; + return 0; +} + +static int nested_vmx_check_shadow_vmcs_controls(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12) +{ + if (!nested_cpu_has_shadow_vmcs(vmcs12)) + return 0; + + if (!page_address_valid(vcpu, vmcs12->vmread_bitmap) || + !page_address_valid(vcpu, vmcs12->vmwrite_bitmap)) + return -EINVAL; + + return 0; +} + +static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu, + struct vmx_msr_entry *e) +{ + /* x2APIC MSR accesses are not allowed */ + if (vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8) + return -EINVAL; + if (e->index == MSR_IA32_UCODE_WRITE || /* SDM Table 35-2 */ + e->index == MSR_IA32_UCODE_REV) + return -EINVAL; + if (e->reserved != 0) + return -EINVAL; + return 0; +} + +static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu, + struct vmx_msr_entry *e) +{ + if (e->index == MSR_FS_BASE || + e->index == MSR_GS_BASE || + e->index == MSR_IA32_SMM_MONITOR_CTL || /* SMM is not supported */ + nested_vmx_msr_check_common(vcpu, e)) + return -EINVAL; + return 0; +} + +static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu, + struct vmx_msr_entry *e) +{ + if (e->index == MSR_IA32_SMBASE || /* SMM is not supported */ + nested_vmx_msr_check_common(vcpu, e)) + return -EINVAL; + return 0; +} + +/* + * Load guest's/host's msr at nested entry/exit. + * return 0 for success, entry index for failure. + */ +static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) +{ + u32 i; + struct vmx_msr_entry e; + struct msr_data msr; + + msr.host_initiated = false; + for (i = 0; i < count; i++) { + if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e), + &e, sizeof(e))) { + pr_debug_ratelimited( + "%s cannot read MSR entry (%u, 0x%08llx)\n", + __func__, i, gpa + i * sizeof(e)); + goto fail; + } + if (nested_vmx_load_msr_check(vcpu, &e)) { + pr_debug_ratelimited( + "%s check failed (%u, 0x%x, 0x%x)\n", + __func__, i, e.index, e.reserved); + goto fail; + } + msr.index = e.index; + msr.data = e.value; + if (kvm_set_msr(vcpu, &msr)) { + pr_debug_ratelimited( + "%s cannot write MSR (%u, 0x%x, 0x%llx)\n", + __func__, i, e.index, e.value); + goto fail; + } + } + return 0; +fail: + return i + 1; +} + +static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) +{ + u32 i; + struct vmx_msr_entry e; + + for (i = 0; i < count; i++) { + struct msr_data msr_info; + if (kvm_vcpu_read_guest(vcpu, + gpa + i * sizeof(e), + &e, 2 * sizeof(u32))) { + pr_debug_ratelimited( + "%s cannot read MSR entry (%u, 0x%08llx)\n", + __func__, i, gpa + i * sizeof(e)); + return -EINVAL; + } + if (nested_vmx_store_msr_check(vcpu, &e)) { + pr_debug_ratelimited( + "%s check failed (%u, 0x%x, 0x%x)\n", + __func__, i, e.index, e.reserved); + return -EINVAL; + } + msr_info.host_initiated = false; + msr_info.index = e.index; + if (kvm_get_msr(vcpu, &msr_info)) { + pr_debug_ratelimited( + "%s cannot read MSR (%u, 0x%x)\n", + __func__, i, e.index); + return -EINVAL; + } + if (kvm_vcpu_write_guest(vcpu, + gpa + i * sizeof(e) + + offsetof(struct vmx_msr_entry, value), + &msr_info.data, sizeof(msr_info.data))) { + pr_debug_ratelimited( + "%s cannot write MSR (%u, 0x%x, 0x%llx)\n", + __func__, i, e.index, msr_info.data); + return -EINVAL; + } + } + return 0; +} + +static bool nested_cr3_valid(struct kvm_vcpu *vcpu, unsigned long val) +{ + unsigned long invalid_mask; + + invalid_mask = (~0ULL) << cpuid_maxphyaddr(vcpu); + return (val & invalid_mask) == 0; +} + +/* + * Load guest's/host's cr3 at nested entry/exit. nested_ept is true if we are + * emulating VM entry into a guest with EPT enabled. + * Returns 0 on success, 1 on failure. Invalid state exit qualification code + * is assigned to entry_failure_code on failure. + */ +static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_ept, + u32 *entry_failure_code) +{ + if (cr3 != kvm_read_cr3(vcpu) || (!nested_ept && pdptrs_changed(vcpu))) { + if (!nested_cr3_valid(vcpu, cr3)) { + *entry_failure_code = ENTRY_FAIL_DEFAULT; + return 1; + } + + /* + * If PAE paging and EPT are both on, CR3 is not used by the CPU and + * must not be dereferenced. + */ + if (!is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu) && + !nested_ept) { + if (!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) { + *entry_failure_code = ENTRY_FAIL_PDPTE; + return 1; + } + } + } + + if (!nested_ept) + kvm_mmu_new_cr3(vcpu, cr3, false); + + vcpu->arch.cr3 = cr3; + __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); + + kvm_init_mmu(vcpu, false); + + return 0; +} + +/* + * Returns if KVM is able to config CPU to tag TLB entries + * populated by L2 differently than TLB entries populated + * by L1. + * + * If L1 uses EPT, then TLB entries are tagged with different EPTP. + * + * If L1 uses VPID and we allocated a vpid02, TLB entries are tagged + * with different VPID (L1 entries are tagged with vmx->vpid + * while L2 entries are tagged with vmx->nested.vpid02). + */ +static bool nested_has_guest_tlb_tag(struct kvm_vcpu *vcpu) +{ + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); + + return nested_cpu_has_ept(vmcs12) || + (nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02); +} + +static u16 nested_get_vpid02(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + + return vmx->nested.vpid02 ? vmx->nested.vpid02 : vmx->vpid; +} + + +static inline bool vmx_control_verify(u32 control, u32 low, u32 high) +{ + return fixed_bits_valid(control, low, high); +} + +static inline u64 vmx_control_msr(u32 low, u32 high) +{ + return low | ((u64)high << 32); +} + +static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask) +{ + superset &= mask; + subset &= mask; + + return (superset | subset) == superset; +} + +static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data) +{ + const u64 feature_and_reserved = + /* feature (except bit 48; see below) */ + BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) | + /* reserved */ + BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56); + u64 vmx_basic = vmx->nested.msrs.basic; + + if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved)) + return -EINVAL; + + /* + * KVM does not emulate a version of VMX that constrains physical + * addresses of VMX structures (e.g. VMCS) to 32-bits. + */ + if (data & BIT_ULL(48)) + return -EINVAL; + + if (vmx_basic_vmcs_revision_id(vmx_basic) != + vmx_basic_vmcs_revision_id(data)) + return -EINVAL; + + if (vmx_basic_vmcs_size(vmx_basic) > vmx_basic_vmcs_size(data)) + return -EINVAL; + + vmx->nested.msrs.basic = data; + return 0; +} + +static int +vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) +{ + u64 supported; + u32 *lowp, *highp; + + switch (msr_index) { + case MSR_IA32_VMX_TRUE_PINBASED_CTLS: + lowp = &vmx->nested.msrs.pinbased_ctls_low; + highp = &vmx->nested.msrs.pinbased_ctls_high; + break; + case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: + lowp = &vmx->nested.msrs.procbased_ctls_low; + highp = &vmx->nested.msrs.procbased_ctls_high; + break; + case MSR_IA32_VMX_TRUE_EXIT_CTLS: + lowp = &vmx->nested.msrs.exit_ctls_low; + highp = &vmx->nested.msrs.exit_ctls_high; + break; + case MSR_IA32_VMX_TRUE_ENTRY_CTLS: + lowp = &vmx->nested.msrs.entry_ctls_low; + highp = &vmx->nested.msrs.entry_ctls_high; + break; + case MSR_IA32_VMX_PROCBASED_CTLS2: + lowp = &vmx->nested.msrs.secondary_ctls_low; + highp = &vmx->nested.msrs.secondary_ctls_high; + break; + default: + BUG(); + } + + supported = vmx_control_msr(*lowp, *highp); + + /* Check must-be-1 bits are still 1. */ + if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0))) + return -EINVAL; + + /* Check must-be-0 bits are still 0. */ + if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32))) + return -EINVAL; + + *lowp = data; + *highp = data >> 32; + return 0; +} + +static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data) +{ + const u64 feature_and_reserved_bits = + /* feature */ + BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) | + BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) | + /* reserved */ + GENMASK_ULL(13, 9) | BIT_ULL(31); + u64 vmx_misc; + + vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low, + vmx->nested.msrs.misc_high); + + if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits)) + return -EINVAL; + + if ((vmx->nested.msrs.pinbased_ctls_high & + PIN_BASED_VMX_PREEMPTION_TIMER) && + vmx_misc_preemption_timer_rate(data) != + vmx_misc_preemption_timer_rate(vmx_misc)) + return -EINVAL; + + if (vmx_misc_cr3_count(data) > vmx_misc_cr3_count(vmx_misc)) + return -EINVAL; + + if (vmx_misc_max_msr(data) > vmx_misc_max_msr(vmx_misc)) + return -EINVAL; + + if (vmx_misc_mseg_revid(data) != vmx_misc_mseg_revid(vmx_misc)) + return -EINVAL; + + vmx->nested.msrs.misc_low = data; + vmx->nested.msrs.misc_high = data >> 32; + + /* + * If L1 has read-only VM-exit information fields, use the + * less permissive vmx_vmwrite_bitmap to specify write + * permissions for the shadow VMCS. + */ + if (enable_shadow_vmcs && !nested_cpu_has_vmwrite_any_field(&vmx->vcpu)) + vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap)); + + return 0; +} + +static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data) +{ + u64 vmx_ept_vpid_cap; + + vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.msrs.ept_caps, + vmx->nested.msrs.vpid_caps); + + /* Every bit is either reserved or a feature bit. */ + if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL)) + return -EINVAL; + + vmx->nested.msrs.ept_caps = data; + vmx->nested.msrs.vpid_caps = data >> 32; + return 0; +} + +static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) +{ + u64 *msr; + + switch (msr_index) { + case MSR_IA32_VMX_CR0_FIXED0: + msr = &vmx->nested.msrs.cr0_fixed0; + break; + case MSR_IA32_VMX_CR4_FIXED0: + msr = &vmx->nested.msrs.cr4_fixed0; + break; + default: + BUG(); + } + + /* + * 1 bits (which indicates bits which "must-be-1" during VMX operation) + * must be 1 in the restored value. + */ + if (!is_bitwise_subset(data, *msr, -1ULL)) + return -EINVAL; + + *msr = data; + return 0; +} + +/* + * Called when userspace is restoring VMX MSRs. + * + * Returns 0 on success, non-0 otherwise. + */ +int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + + /* + * Don't allow changes to the VMX capability MSRs while the vCPU + * is in VMX operation. + */ + if (vmx->nested.vmxon) + return -EBUSY; + + switch (msr_index) { + case MSR_IA32_VMX_BASIC: + return vmx_restore_vmx_basic(vmx, data); + case MSR_IA32_VMX_PINBASED_CTLS: + case MSR_IA32_VMX_PROCBASED_CTLS: + case MSR_IA32_VMX_EXIT_CTLS: + case MSR_IA32_VMX_ENTRY_CTLS: + /* + * The "non-true" VMX capability MSRs are generated from the + * "true" MSRs, so we do not support restoring them directly. + * + * If userspace wants to emulate VMX_BASIC[55]=0, userspace + * should restore the "true" MSRs with the must-be-1 bits + * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND + * DEFAULT SETTINGS". + */ + return -EINVAL; + case MSR_IA32_VMX_TRUE_PINBASED_CTLS: + case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: + case MSR_IA32_VMX_TRUE_EXIT_CTLS: + case MSR_IA32_VMX_TRUE_ENTRY_CTLS: + case MSR_IA32_VMX_PROCBASED_CTLS2: + return vmx_restore_control_msr(vmx, msr_index, data); + case MSR_IA32_VMX_MISC: + return vmx_restore_vmx_misc(vmx, data); + case MSR_IA32_VMX_CR0_FIXED0: + case MSR_IA32_VMX_CR4_FIXED0: + return vmx_restore_fixed0_msr(vmx, msr_index, data); + case MSR_IA32_VMX_CR0_FIXED1: + case MSR_IA32_VMX_CR4_FIXED1: + /* + * These MSRs are generated based on the vCPU's CPUID, so we + * do not support restoring them directly. + */ + return -EINVAL; + case MSR_IA32_VMX_EPT_VPID_CAP: + return vmx_restore_vmx_ept_vpid_cap(vmx, data); + case MSR_IA32_VMX_VMCS_ENUM: + vmx->nested.msrs.vmcs_enum = data; + return 0; + default: + /* + * The rest of the VMX capability MSRs do not support restore. + */ + return -EINVAL; + } +} + +/* Returns 0 on success, non-0 otherwise. */ +int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata) +{ + switch (msr_index) { + case MSR_IA32_VMX_BASIC: + *pdata = msrs->basic; + break; + case MSR_IA32_VMX_TRUE_PINBASED_CTLS: + case MSR_IA32_VMX_PINBASED_CTLS: + *pdata = vmx_control_msr( + msrs->pinbased_ctls_low, + msrs->pinbased_ctls_high); + if (msr_index == MSR_IA32_VMX_PINBASED_CTLS) + *pdata |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR; + break; + case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: + case MSR_IA32_VMX_PROCBASED_CTLS: + *pdata = vmx_control_msr( + msrs->procbased_ctls_low, + msrs->procbased_ctls_high); + if (msr_index == MSR_IA32_VMX_PROCBASED_CTLS) + *pdata |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR; + break; + case MSR_IA32_VMX_TRUE_EXIT_CTLS: + case MSR_IA32_VMX_EXIT_CTLS: + *pdata = vmx_control_msr( + msrs->exit_ctls_low, + msrs->exit_ctls_high); + if (msr_index == MSR_IA32_VMX_EXIT_CTLS) + *pdata |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR; + break; + case MSR_IA32_VMX_TRUE_ENTRY_CTLS: + case MSR_IA32_VMX_ENTRY_CTLS: + *pdata = vmx_control_msr( + msrs->entry_ctls_low, + msrs->entry_ctls_high); + if (msr_index == MSR_IA32_VMX_ENTRY_CTLS) + *pdata |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR; + break; + case MSR_IA32_VMX_MISC: + *pdata = vmx_control_msr( + msrs->misc_low, + msrs->misc_high); + break; + case MSR_IA32_VMX_CR0_FIXED0: + *pdata = msrs->cr0_fixed0; + break; + case MSR_IA32_VMX_CR0_FIXED1: + *pdata = msrs->cr0_fixed1; + break; + case MSR_IA32_VMX_CR4_FIXED0: + *pdata = msrs->cr4_fixed0; + break; + case MSR_IA32_VMX_CR4_FIXED1: + *pdata = msrs->cr4_fixed1; + break; + case MSR_IA32_VMX_VMCS_ENUM: + *pdata = msrs->vmcs_enum; + break; + case MSR_IA32_VMX_PROCBASED_CTLS2: + *pdata = vmx_control_msr( + msrs->secondary_ctls_low, + msrs->secondary_ctls_high); + break; + case MSR_IA32_VMX_EPT_VPID_CAP: + *pdata = msrs->ept_caps | + ((u64)msrs->vpid_caps << 32); + break; + case MSR_IA32_VMX_VMFUNC: + *pdata = msrs->vmfunc_controls; + break; + default: + return 1; + } + + return 0; +} + +/* + * Copy the writable VMCS shadow fields back to the VMCS12, in case + * they have been modified by the L1 guest. Note that the "read-only" + * VM-exit information fields are actually writable if the vCPU is + * configured to support "VMWRITE to any supported field in the VMCS." + */ +static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx) +{ + const u16 *fields[] = { + shadow_read_write_fields, + shadow_read_only_fields + }; + const int max_fields[] = { + max_shadow_read_write_fields, + max_shadow_read_only_fields + }; + int i, q; + unsigned long field; + u64 field_value; + struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; + + preempt_disable(); + + vmcs_load(shadow_vmcs); + + for (q = 0; q < ARRAY_SIZE(fields); q++) { + for (i = 0; i < max_fields[q]; i++) { + field = fields[q][i]; + field_value = __vmcs_readl(field); + vmcs12_write_any(get_vmcs12(&vmx->vcpu), field, field_value); + } + /* + * Skip the VM-exit information fields if they are read-only. + */ + if (!nested_cpu_has_vmwrite_any_field(&vmx->vcpu)) + break; + } + + vmcs_clear(shadow_vmcs); + vmcs_load(vmx->loaded_vmcs->vmcs); + + preempt_enable(); +} + +static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx) +{ + const u16 *fields[] = { + shadow_read_write_fields, + shadow_read_only_fields + }; + const int max_fields[] = { + max_shadow_read_write_fields, + max_shadow_read_only_fields + }; + int i, q; + unsigned long field; + u64 field_value = 0; + struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; + + vmcs_load(shadow_vmcs); + + for (q = 0; q < ARRAY_SIZE(fields); q++) { + for (i = 0; i < max_fields[q]; i++) { + field = fields[q][i]; + vmcs12_read_any(get_vmcs12(&vmx->vcpu), field, &field_value); + __vmcs_writel(field, field_value); + } + } + + vmcs_clear(shadow_vmcs); + vmcs_load(vmx->loaded_vmcs->vmcs); +} + +static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx) +{ + struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; + struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; + + /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */ + vmcs12->tpr_threshold = evmcs->tpr_threshold; + vmcs12->guest_rip = evmcs->guest_rip; + + if (unlikely(!(evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC))) { + vmcs12->guest_rsp = evmcs->guest_rsp; + vmcs12->guest_rflags = evmcs->guest_rflags; + vmcs12->guest_interruptibility_info = + evmcs->guest_interruptibility_info; + } + + if (unlikely(!(evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) { + vmcs12->cpu_based_vm_exec_control = + evmcs->cpu_based_vm_exec_control; + } + + if (unlikely(!(evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) { + vmcs12->exception_bitmap = evmcs->exception_bitmap; + } + + if (unlikely(!(evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY))) { + vmcs12->vm_entry_controls = evmcs->vm_entry_controls; + } + + if (unlikely(!(evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT))) { + vmcs12->vm_entry_intr_info_field = + evmcs->vm_entry_intr_info_field; + vmcs12->vm_entry_exception_error_code = + evmcs->vm_entry_exception_error_code; + vmcs12->vm_entry_instruction_len = + evmcs->vm_entry_instruction_len; + } + + if (unlikely(!(evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) { + vmcs12->host_ia32_pat = evmcs->host_ia32_pat; + vmcs12->host_ia32_efer = evmcs->host_ia32_efer; + vmcs12->host_cr0 = evmcs->host_cr0; + vmcs12->host_cr3 = evmcs->host_cr3; + vmcs12->host_cr4 = evmcs->host_cr4; + vmcs12->host_ia32_sysenter_esp = evmcs->host_ia32_sysenter_esp; + vmcs12->host_ia32_sysenter_eip = evmcs->host_ia32_sysenter_eip; + vmcs12->host_rip = evmcs->host_rip; + vmcs12->host_ia32_sysenter_cs = evmcs->host_ia32_sysenter_cs; + vmcs12->host_es_selector = evmcs->host_es_selector; + vmcs12->host_cs_selector = evmcs->host_cs_selector; + vmcs12->host_ss_selector = evmcs->host_ss_selector; + vmcs12->host_ds_selector = evmcs->host_ds_selector; + vmcs12->host_fs_selector = evmcs->host_fs_selector; + vmcs12->host_gs_selector = evmcs->host_gs_selector; + vmcs12->host_tr_selector = evmcs->host_tr_selector; + } + + if (unlikely(!(evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) { + vmcs12->pin_based_vm_exec_control = + evmcs->pin_based_vm_exec_control; + vmcs12->vm_exit_controls = evmcs->vm_exit_controls; + vmcs12->secondary_vm_exec_control = + evmcs->secondary_vm_exec_control; + } + + if (unlikely(!(evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP))) { + vmcs12->io_bitmap_a = evmcs->io_bitmap_a; + vmcs12->io_bitmap_b = evmcs->io_bitmap_b; + } + + if (unlikely(!(evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP))) { + vmcs12->msr_bitmap = evmcs->msr_bitmap; + } + + if (unlikely(!(evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2))) { + vmcs12->guest_es_base = evmcs->guest_es_base; + vmcs12->guest_cs_base = evmcs->guest_cs_base; + vmcs12->guest_ss_base = evmcs->guest_ss_base; + vmcs12->guest_ds_base = evmcs->guest_ds_base; + vmcs12->guest_fs_base = evmcs->guest_fs_base; + vmcs12->guest_gs_base = evmcs->guest_gs_base; + vmcs12->guest_ldtr_base = evmcs->guest_ldtr_base; + vmcs12->guest_tr_base = evmcs->guest_tr_base; + vmcs12->guest_gdtr_base = evmcs->guest_gdtr_base; + vmcs12->guest_idtr_base = evmcs->guest_idtr_base; + vmcs12->guest_es_limit = evmcs->guest_es_limit; + vmcs12->guest_cs_limit = evmcs->guest_cs_limit; + vmcs12->guest_ss_limit = evmcs->guest_ss_limit; + vmcs12->guest_ds_limit = evmcs->guest_ds_limit; + vmcs12->guest_fs_limit = evmcs->guest_fs_limit; + vmcs12->guest_gs_limit = evmcs->guest_gs_limit; + vmcs12->guest_ldtr_limit = evmcs->guest_ldtr_limit; + vmcs12->guest_tr_limit = evmcs->guest_tr_limit; + vmcs12->guest_gdtr_limit = evmcs->guest_gdtr_limit; + vmcs12->guest_idtr_limit = evmcs->guest_idtr_limit; + vmcs12->guest_es_ar_bytes = evmcs->guest_es_ar_bytes; + vmcs12->guest_cs_ar_bytes = evmcs->guest_cs_ar_bytes; + vmcs12->guest_ss_ar_bytes = evmcs->guest_ss_ar_bytes; + vmcs12->guest_ds_ar_bytes = evmcs->guest_ds_ar_bytes; + vmcs12->guest_fs_ar_bytes = evmcs->guest_fs_ar_bytes; + vmcs12->guest_gs_ar_bytes = evmcs->guest_gs_ar_bytes; + vmcs12->guest_ldtr_ar_bytes = evmcs->guest_ldtr_ar_bytes; + vmcs12->guest_tr_ar_bytes = evmcs->guest_tr_ar_bytes; + vmcs12->guest_es_selector = evmcs->guest_es_selector; + vmcs12->guest_cs_selector = evmcs->guest_cs_selector; + vmcs12->guest_ss_selector = evmcs->guest_ss_selector; + vmcs12->guest_ds_selector = evmcs->guest_ds_selector; + vmcs12->guest_fs_selector = evmcs->guest_fs_selector; + vmcs12->guest_gs_selector = evmcs->guest_gs_selector; + vmcs12->guest_ldtr_selector = evmcs->guest_ldtr_selector; + vmcs12->guest_tr_selector = evmcs->guest_tr_selector; + } + + if (unlikely(!(evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2))) { + vmcs12->tsc_offset = evmcs->tsc_offset; + vmcs12->virtual_apic_page_addr = evmcs->virtual_apic_page_addr; + vmcs12->xss_exit_bitmap = evmcs->xss_exit_bitmap; + } + + if (unlikely(!(evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR))) { + vmcs12->cr0_guest_host_mask = evmcs->cr0_guest_host_mask; + vmcs12->cr4_guest_host_mask = evmcs->cr4_guest_host_mask; + vmcs12->cr0_read_shadow = evmcs->cr0_read_shadow; + vmcs12->cr4_read_shadow = evmcs->cr4_read_shadow; + vmcs12->guest_cr0 = evmcs->guest_cr0; + vmcs12->guest_cr3 = evmcs->guest_cr3; + vmcs12->guest_cr4 = evmcs->guest_cr4; + vmcs12->guest_dr7 = evmcs->guest_dr7; + } + + if (unlikely(!(evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER))) { + vmcs12->host_fs_base = evmcs->host_fs_base; + vmcs12->host_gs_base = evmcs->host_gs_base; + vmcs12->host_tr_base = evmcs->host_tr_base; + vmcs12->host_gdtr_base = evmcs->host_gdtr_base; + vmcs12->host_idtr_base = evmcs->host_idtr_base; + vmcs12->host_rsp = evmcs->host_rsp; + } + + if (unlikely(!(evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT))) { + vmcs12->ept_pointer = evmcs->ept_pointer; + vmcs12->virtual_processor_id = evmcs->virtual_processor_id; + } + + if (unlikely(!(evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1))) { + vmcs12->vmcs_link_pointer = evmcs->vmcs_link_pointer; + vmcs12->guest_ia32_debugctl = evmcs->guest_ia32_debugctl; + vmcs12->guest_ia32_pat = evmcs->guest_ia32_pat; + vmcs12->guest_ia32_efer = evmcs->guest_ia32_efer; + vmcs12->guest_pdptr0 = evmcs->guest_pdptr0; + vmcs12->guest_pdptr1 = evmcs->guest_pdptr1; + vmcs12->guest_pdptr2 = evmcs->guest_pdptr2; + vmcs12->guest_pdptr3 = evmcs->guest_pdptr3; + vmcs12->guest_pending_dbg_exceptions = + evmcs->guest_pending_dbg_exceptions; + vmcs12->guest_sysenter_esp = evmcs->guest_sysenter_esp; + vmcs12->guest_sysenter_eip = evmcs->guest_sysenter_eip; + vmcs12->guest_bndcfgs = evmcs->guest_bndcfgs; + vmcs12->guest_activity_state = evmcs->guest_activity_state; + vmcs12->guest_sysenter_cs = evmcs->guest_sysenter_cs; + } + + /* + * Not used? + * vmcs12->vm_exit_msr_store_addr = evmcs->vm_exit_msr_store_addr; + * vmcs12->vm_exit_msr_load_addr = evmcs->vm_exit_msr_load_addr; + * vmcs12->vm_entry_msr_load_addr = evmcs->vm_entry_msr_load_addr; + * vmcs12->cr3_target_value0 = evmcs->cr3_target_value0; + * vmcs12->cr3_target_value1 = evmcs->cr3_target_value1; + * vmcs12->cr3_target_value2 = evmcs->cr3_target_value2; + * vmcs12->cr3_target_value3 = evmcs->cr3_target_value3; + * vmcs12->page_fault_error_code_mask = + * evmcs->page_fault_error_code_mask; + * vmcs12->page_fault_error_code_match = + * evmcs->page_fault_error_code_match; + * vmcs12->cr3_target_count = evmcs->cr3_target_count; + * vmcs12->vm_exit_msr_store_count = evmcs->vm_exit_msr_store_count; + * vmcs12->vm_exit_msr_load_count = evmcs->vm_exit_msr_load_count; + * vmcs12->vm_entry_msr_load_count = evmcs->vm_entry_msr_load_count; + */ + + /* + * Read only fields: + * vmcs12->guest_physical_address = evmcs->guest_physical_address; + * vmcs12->vm_instruction_error = evmcs->vm_instruction_error; + * vmcs12->vm_exit_reason = evmcs->vm_exit_reason; + * vmcs12->vm_exit_intr_info = evmcs->vm_exit_intr_info; + * vmcs12->vm_exit_intr_error_code = evmcs->vm_exit_intr_error_code; + * vmcs12->idt_vectoring_info_field = evmcs->idt_vectoring_info_field; + * vmcs12->idt_vectoring_error_code = evmcs->idt_vectoring_error_code; + * vmcs12->vm_exit_instruction_len = evmcs->vm_exit_instruction_len; + * vmcs12->vmx_instruction_info = evmcs->vmx_instruction_info; + * vmcs12->exit_qualification = evmcs->exit_qualification; + * vmcs12->guest_linear_address = evmcs->guest_linear_address; + * + * Not present in struct vmcs12: + * vmcs12->exit_io_instruction_ecx = evmcs->exit_io_instruction_ecx; + * vmcs12->exit_io_instruction_esi = evmcs->exit_io_instruction_esi; + * vmcs12->exit_io_instruction_edi = evmcs->exit_io_instruction_edi; + * vmcs12->exit_io_instruction_eip = evmcs->exit_io_instruction_eip; + */ + + return 0; +} + +static int copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx) +{ + struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; + struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; + + /* + * Should not be changed by KVM: + * + * evmcs->host_es_selector = vmcs12->host_es_selector; + * evmcs->host_cs_selector = vmcs12->host_cs_selector; + * evmcs->host_ss_selector = vmcs12->host_ss_selector; + * evmcs->host_ds_selector = vmcs12->host_ds_selector; + * evmcs->host_fs_selector = vmcs12->host_fs_selector; + * evmcs->host_gs_selector = vmcs12->host_gs_selector; + * evmcs->host_tr_selector = vmcs12->host_tr_selector; + * evmcs->host_ia32_pat = vmcs12->host_ia32_pat; + * evmcs->host_ia32_efer = vmcs12->host_ia32_efer; + * evmcs->host_cr0 = vmcs12->host_cr0; + * evmcs->host_cr3 = vmcs12->host_cr3; + * evmcs->host_cr4 = vmcs12->host_cr4; + * evmcs->host_ia32_sysenter_esp = vmcs12->host_ia32_sysenter_esp; + * evmcs->host_ia32_sysenter_eip = vmcs12->host_ia32_sysenter_eip; + * evmcs->host_rip = vmcs12->host_rip; + * evmcs->host_ia32_sysenter_cs = vmcs12->host_ia32_sysenter_cs; + * evmcs->host_fs_base = vmcs12->host_fs_base; + * evmcs->host_gs_base = vmcs12->host_gs_base; + * evmcs->host_tr_base = vmcs12->host_tr_base; + * evmcs->host_gdtr_base = vmcs12->host_gdtr_base; + * evmcs->host_idtr_base = vmcs12->host_idtr_base; + * evmcs->host_rsp = vmcs12->host_rsp; + * sync_vmcs12() doesn't read these: + * evmcs->io_bitmap_a = vmcs12->io_bitmap_a; + * evmcs->io_bitmap_b = vmcs12->io_bitmap_b; + * evmcs->msr_bitmap = vmcs12->msr_bitmap; + * evmcs->ept_pointer = vmcs12->ept_pointer; + * evmcs->xss_exit_bitmap = vmcs12->xss_exit_bitmap; + * evmcs->vm_exit_msr_store_addr = vmcs12->vm_exit_msr_store_addr; + * evmcs->vm_exit_msr_load_addr = vmcs12->vm_exit_msr_load_addr; + * evmcs->vm_entry_msr_load_addr = vmcs12->vm_entry_msr_load_addr; + * evmcs->cr3_target_value0 = vmcs12->cr3_target_value0; + * evmcs->cr3_target_value1 = vmcs12->cr3_target_value1; + * evmcs->cr3_target_value2 = vmcs12->cr3_target_value2; + * evmcs->cr3_target_value3 = vmcs12->cr3_target_value3; + * evmcs->tpr_threshold = vmcs12->tpr_threshold; + * evmcs->virtual_processor_id = vmcs12->virtual_processor_id; + * evmcs->exception_bitmap = vmcs12->exception_bitmap; + * evmcs->vmcs_link_pointer = vmcs12->vmcs_link_pointer; + * evmcs->pin_based_vm_exec_control = vmcs12->pin_based_vm_exec_control; + * evmcs->vm_exit_controls = vmcs12->vm_exit_controls; + * evmcs->secondary_vm_exec_control = vmcs12->secondary_vm_exec_control; + * evmcs->page_fault_error_code_mask = + * vmcs12->page_fault_error_code_mask; + * evmcs->page_fault_error_code_match = + * vmcs12->page_fault_error_code_match; + * evmcs->cr3_target_count = vmcs12->cr3_target_count; + * evmcs->virtual_apic_page_addr = vmcs12->virtual_apic_page_addr; + * evmcs->tsc_offset = vmcs12->tsc_offset; + * evmcs->guest_ia32_debugctl = vmcs12->guest_ia32_debugctl; + * evmcs->cr0_guest_host_mask = vmcs12->cr0_guest_host_mask; + * evmcs->cr4_guest_host_mask = vmcs12->cr4_guest_host_mask; + * evmcs->cr0_read_shadow = vmcs12->cr0_read_shadow; + * evmcs->cr4_read_shadow = vmcs12->cr4_read_shadow; + * evmcs->vm_exit_msr_store_count = vmcs12->vm_exit_msr_store_count; + * evmcs->vm_exit_msr_load_count = vmcs12->vm_exit_msr_load_count; + * evmcs->vm_entry_msr_load_count = vmcs12->vm_entry_msr_load_count; + * + * Not present in struct vmcs12: + * evmcs->exit_io_instruction_ecx = vmcs12->exit_io_instruction_ecx; + * evmcs->exit_io_instruction_esi = vmcs12->exit_io_instruction_esi; + * evmcs->exit_io_instruction_edi = vmcs12->exit_io_instruction_edi; + * evmcs->exit_io_instruction_eip = vmcs12->exit_io_instruction_eip; + */ + + evmcs->guest_es_selector = vmcs12->guest_es_selector; + evmcs->guest_cs_selector = vmcs12->guest_cs_selector; + evmcs->guest_ss_selector = vmcs12->guest_ss_selector; + evmcs->guest_ds_selector = vmcs12->guest_ds_selector; + evmcs->guest_fs_selector = vmcs12->guest_fs_selector; + evmcs->guest_gs_selector = vmcs12->guest_gs_selector; + evmcs->guest_ldtr_selector = vmcs12->guest_ldtr_selector; + evmcs->guest_tr_selector = vmcs12->guest_tr_selector; + + evmcs->guest_es_limit = vmcs12->guest_es_limit; + evmcs->guest_cs_limit = vmcs12->guest_cs_limit; + evmcs->guest_ss_limit = vmcs12->guest_ss_limit; + evmcs->guest_ds_limit = vmcs12->guest_ds_limit; + evmcs->guest_fs_limit = vmcs12->guest_fs_limit; + evmcs->guest_gs_limit = vmcs12->guest_gs_limit; + evmcs->guest_ldtr_limit = vmcs12->guest_ldtr_limit; + evmcs->guest_tr_limit = vmcs12->guest_tr_limit; + evmcs->guest_gdtr_limit = vmcs12->guest_gdtr_limit; + evmcs->guest_idtr_limit = vmcs12->guest_idtr_limit; + + evmcs->guest_es_ar_bytes = vmcs12->guest_es_ar_bytes; + evmcs->guest_cs_ar_bytes = vmcs12->guest_cs_ar_bytes; + evmcs->guest_ss_ar_bytes = vmcs12->guest_ss_ar_bytes; + evmcs->guest_ds_ar_bytes = vmcs12->guest_ds_ar_bytes; + evmcs->guest_fs_ar_bytes = vmcs12->guest_fs_ar_bytes; + evmcs->guest_gs_ar_bytes = vmcs12->guest_gs_ar_bytes; + evmcs->guest_ldtr_ar_bytes = vmcs12->guest_ldtr_ar_bytes; + evmcs->guest_tr_ar_bytes = vmcs12->guest_tr_ar_bytes; + + evmcs->guest_es_base = vmcs12->guest_es_base; + evmcs->guest_cs_base = vmcs12->guest_cs_base; + evmcs->guest_ss_base = vmcs12->guest_ss_base; + evmcs->guest_ds_base = vmcs12->guest_ds_base; + evmcs->guest_fs_base = vmcs12->guest_fs_base; + evmcs->guest_gs_base = vmcs12->guest_gs_base; + evmcs->guest_ldtr_base = vmcs12->guest_ldtr_base; + evmcs->guest_tr_base = vmcs12->guest_tr_base; + evmcs->guest_gdtr_base = vmcs12->guest_gdtr_base; + evmcs->guest_idtr_base = vmcs12->guest_idtr_base; + + evmcs->guest_ia32_pat = vmcs12->guest_ia32_pat; + evmcs->guest_ia32_efer = vmcs12->guest_ia32_efer; + + evmcs->guest_pdptr0 = vmcs12->guest_pdptr0; + evmcs->guest_pdptr1 = vmcs12->guest_pdptr1; + evmcs->guest_pdptr2 = vmcs12->guest_pdptr2; + evmcs->guest_pdptr3 = vmcs12->guest_pdptr3; + + evmcs->guest_pending_dbg_exceptions = + vmcs12->guest_pending_dbg_exceptions; + evmcs->guest_sysenter_esp = vmcs12->guest_sysenter_esp; + evmcs->guest_sysenter_eip = vmcs12->guest_sysenter_eip; + + evmcs->guest_activity_state = vmcs12->guest_activity_state; + evmcs->guest_sysenter_cs = vmcs12->guest_sysenter_cs; + + evmcs->guest_cr0 = vmcs12->guest_cr0; + evmcs->guest_cr3 = vmcs12->guest_cr3; + evmcs->guest_cr4 = vmcs12->guest_cr4; + evmcs->guest_dr7 = vmcs12->guest_dr7; + + evmcs->guest_physical_address = vmcs12->guest_physical_address; + + evmcs->vm_instruction_error = vmcs12->vm_instruction_error; + evmcs->vm_exit_reason = vmcs12->vm_exit_reason; + evmcs->vm_exit_intr_info = vmcs12->vm_exit_intr_info; + evmcs->vm_exit_intr_error_code = vmcs12->vm_exit_intr_error_code; + evmcs->idt_vectoring_info_field = vmcs12->idt_vectoring_info_field; + evmcs->idt_vectoring_error_code = vmcs12->idt_vectoring_error_code; + evmcs->vm_exit_instruction_len = vmcs12->vm_exit_instruction_len; + evmcs->vmx_instruction_info = vmcs12->vmx_instruction_info; + + evmcs->exit_qualification = vmcs12->exit_qualification; + + evmcs->guest_linear_address = vmcs12->guest_linear_address; + evmcs->guest_rsp = vmcs12->guest_rsp; + evmcs->guest_rflags = vmcs12->guest_rflags; + + evmcs->guest_interruptibility_info = + vmcs12->guest_interruptibility_info; + evmcs->cpu_based_vm_exec_control = vmcs12->cpu_based_vm_exec_control; + evmcs->vm_entry_controls = vmcs12->vm_entry_controls; + evmcs->vm_entry_intr_info_field = vmcs12->vm_entry_intr_info_field; + evmcs->vm_entry_exception_error_code = + vmcs12->vm_entry_exception_error_code; + evmcs->vm_entry_instruction_len = vmcs12->vm_entry_instruction_len; + + evmcs->guest_rip = vmcs12->guest_rip; + + evmcs->guest_bndcfgs = vmcs12->guest_bndcfgs; + + return 0; +} + +/* + * This is an equivalent of the nested hypervisor executing the vmptrld + * instruction. + */ +static int nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu, + bool from_launch) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + struct hv_vp_assist_page assist_page; + + if (likely(!vmx->nested.enlightened_vmcs_enabled)) + return 1; + + if (unlikely(!kvm_hv_get_assist_page(vcpu, &assist_page))) + return 1; + + if (unlikely(!assist_page.enlighten_vmentry)) + return 1; + + if (unlikely(assist_page.current_nested_vmcs != + vmx->nested.hv_evmcs_vmptr)) { + + if (!vmx->nested.hv_evmcs) + vmx->nested.current_vmptr = -1ull; + + nested_release_evmcs(vcpu); + + vmx->nested.hv_evmcs_page = kvm_vcpu_gpa_to_page( + vcpu, assist_page.current_nested_vmcs); + + if (unlikely(is_error_page(vmx->nested.hv_evmcs_page))) + return 0; + + vmx->nested.hv_evmcs = kmap(vmx->nested.hv_evmcs_page); + + /* + * Currently, KVM only supports eVMCS version 1 + * (== KVM_EVMCS_VERSION) and thus we expect guest to set this + * value to first u32 field of eVMCS which should specify eVMCS + * VersionNumber. + * + * Guest should be aware of supported eVMCS versions by host by + * examining CPUID.0x4000000A.EAX[0:15]. Host userspace VMM is + * expected to set this CPUID leaf according to the value + * returned in vmcs_version from nested_enable_evmcs(). + * + * However, it turns out that Microsoft Hyper-V fails to comply + * to their own invented interface: When Hyper-V use eVMCS, it + * just sets first u32 field of eVMCS to revision_id specified + * in MSR_IA32_VMX_BASIC. Instead of used eVMCS version number + * which is one of the supported versions specified in + * CPUID.0x4000000A.EAX[0:15]. + * + * To overcome Hyper-V bug, we accept here either a supported + * eVMCS version or VMCS12 revision_id as valid values for first + * u32 field of eVMCS. + */ + if ((vmx->nested.hv_evmcs->revision_id != KVM_EVMCS_VERSION) && + (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION)) { + nested_release_evmcs(vcpu); + return 0; + } + + vmx->nested.dirty_vmcs12 = true; + /* + * As we keep L2 state for one guest only 'hv_clean_fields' mask + * can't be used when we switch between them. Reset it here for + * simplicity. + */ + vmx->nested.hv_evmcs->hv_clean_fields &= + ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL; + vmx->nested.hv_evmcs_vmptr = assist_page.current_nested_vmcs; + + /* + * Unlike normal vmcs12, enlightened vmcs12 is not fully + * reloaded from guest's memory (read only fields, fields not + * present in struct hv_enlightened_vmcs, ...). Make sure there + * are no leftovers. + */ + if (from_launch) { + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); + memset(vmcs12, 0, sizeof(*vmcs12)); + vmcs12->hdr.revision_id = VMCS12_REVISION; + } + + } + return 1; +} + +void nested_sync_from_vmcs12(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + + /* + * hv_evmcs may end up being not mapped after migration (when + * L2 was running), map it here to make sure vmcs12 changes are + * properly reflected. + */ + if (vmx->nested.enlightened_vmcs_enabled && !vmx->nested.hv_evmcs) + nested_vmx_handle_enlightened_vmptrld(vcpu, false); + + if (vmx->nested.hv_evmcs) { + copy_vmcs12_to_enlightened(vmx); + /* All fields are clean */ + vmx->nested.hv_evmcs->hv_clean_fields |= + HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL; + } else { + copy_vmcs12_to_shadow(vmx); + } + + vmx->nested.need_vmcs12_sync = false; +} + +static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer) +{ + struct vcpu_vmx *vmx = + container_of(timer, struct vcpu_vmx, nested.preemption_timer); + + vmx->nested.preemption_timer_expired = true; + kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu); + kvm_vcpu_kick(&vmx->vcpu); + + return HRTIMER_NORESTART; +} + +static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu) +{ + u64 preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value; + struct vcpu_vmx *vmx = to_vmx(vcpu); + + /* + * A timer value of zero is architecturally guaranteed to cause + * a VMExit prior to executing any instructions in the guest. + */ + if (preemption_timeout == 0) { + vmx_preemption_timer_fn(&vmx->nested.preemption_timer); + return; + } + + if (vcpu->arch.virtual_tsc_khz == 0) + return; + + preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; + preemption_timeout *= 1000000; + do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz); + hrtimer_start(&vmx->nested.preemption_timer, + ns_to_ktime(preemption_timeout), HRTIMER_MODE_REL); +} + +static u64 nested_vmx_calc_efer(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) +{ + if (vmx->nested.nested_run_pending && + (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) + return vmcs12->guest_ia32_efer; + else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) + return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME); + else + return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME); +} + +static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx) +{ + /* + * If vmcs02 hasn't been initialized, set the constant vmcs02 state + * according to L0's settings (vmcs12 is irrelevant here). Host + * fields that come from L0 and are not constant, e.g. HOST_CR3, + * will be set as needed prior to VMLAUNCH/VMRESUME. + */ + if (vmx->nested.vmcs02_initialized) + return; + vmx->nested.vmcs02_initialized = true; + + /* + * We don't care what the EPTP value is we just need to guarantee + * it's valid so we don't get a false positive when doing early + * consistency checks. + */ + if (enable_ept && nested_early_check) + vmcs_write64(EPT_POINTER, construct_eptp(&vmx->vcpu, 0)); + + /* All VMFUNCs are currently emulated through L0 vmexits. */ + if (cpu_has_vmx_vmfunc()) + vmcs_write64(VM_FUNCTION_CONTROL, 0); + + if (cpu_has_vmx_posted_intr()) + vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR); + + if (cpu_has_vmx_msr_bitmap()) + vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap)); + + if (enable_pml) + vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); + + /* + * Set the MSR load/store lists to match L0's settings. Only the + * addresses are constant (for vmcs02), the counts can change based + * on L2's behavior, e.g. switching to/from long mode. + */ + vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); + vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val)); + vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val)); + + vmx_set_constant_host_state(vmx); +} + +static void prepare_vmcs02_early_full(struct vcpu_vmx *vmx, + struct vmcs12 *vmcs12) +{ + prepare_vmcs02_constant_state(vmx); + + vmcs_write64(VMCS_LINK_POINTER, -1ull); + + if (enable_vpid) { + if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) + vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02); + else + vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); + } +} + +static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) +{ + u32 exec_control, vmcs12_exec_ctrl; + u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12); + + if (vmx->nested.dirty_vmcs12 || vmx->nested.hv_evmcs) + prepare_vmcs02_early_full(vmx, vmcs12); + + /* + * HOST_RSP is normally set correctly in vmx_vcpu_run() just before + * entry, but only if the current (host) sp changed from the value + * we wrote last (vmx->host_rsp). This cache is no longer relevant + * if we switch vmcs, and rather than hold a separate cache per vmcs, + * here we just force the write to happen on entry. host_rsp will + * also be written unconditionally by nested_vmx_check_vmentry_hw() + * if we are doing early consistency checks via hardware. + */ + vmx->host_rsp = 0; + + /* + * PIN CONTROLS + */ + exec_control = vmcs12->pin_based_vm_exec_control; + + /* Preemption timer setting is computed directly in vmx_vcpu_run. */ + exec_control |= vmcs_config.pin_based_exec_ctrl; + exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER; + vmx->loaded_vmcs->hv_timer_armed = false; + + /* Posted interrupts setting is only taken from vmcs12. */ + if (nested_cpu_has_posted_intr(vmcs12)) { + vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv; + vmx->nested.pi_pending = false; + } else { + exec_control &= ~PIN_BASED_POSTED_INTR; + } + vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, exec_control); + + /* + * EXEC CONTROLS + */ + exec_control = vmx_exec_control(vmx); /* L0's desires */ + exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING; + exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING; + exec_control &= ~CPU_BASED_TPR_SHADOW; + exec_control |= vmcs12->cpu_based_vm_exec_control; + + /* + * Write an illegal value to VIRTUAL_APIC_PAGE_ADDR. Later, if + * nested_get_vmcs12_pages can't fix it up, the illegal value + * will result in a VM entry failure. + */ + if (exec_control & CPU_BASED_TPR_SHADOW) { + vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, -1ull); + vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold); + } else { +#ifdef CONFIG_X86_64 + exec_control |= CPU_BASED_CR8_LOAD_EXITING | + CPU_BASED_CR8_STORE_EXITING; +#endif + } + + /* + * A vmexit (to either L1 hypervisor or L0 userspace) is always needed + * for I/O port accesses. + */ + exec_control &= ~CPU_BASED_USE_IO_BITMAPS; + exec_control |= CPU_BASED_UNCOND_IO_EXITING; + vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control); + + /* + * SECONDARY EXEC CONTROLS + */ + if (cpu_has_secondary_exec_ctrls()) { + exec_control = vmx->secondary_exec_control; + + /* Take the following fields only from vmcs12 */ + exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | + SECONDARY_EXEC_ENABLE_INVPCID | + SECONDARY_EXEC_RDTSCP | + SECONDARY_EXEC_XSAVES | + SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | + SECONDARY_EXEC_APIC_REGISTER_VIRT | + SECONDARY_EXEC_ENABLE_VMFUNC); + if (nested_cpu_has(vmcs12, + CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) { + vmcs12_exec_ctrl = vmcs12->secondary_vm_exec_control & + ~SECONDARY_EXEC_ENABLE_PML; + exec_control |= vmcs12_exec_ctrl; + } + + /* VMCS shadowing for L2 is emulated for now */ + exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS; + + if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) + vmcs_write16(GUEST_INTR_STATUS, + vmcs12->guest_intr_status); + + /* + * Write an illegal value to APIC_ACCESS_ADDR. Later, + * nested_get_vmcs12_pages will either fix it up or + * remove the VM execution control. + */ + if (exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) + vmcs_write64(APIC_ACCESS_ADDR, -1ull); + + if (exec_control & SECONDARY_EXEC_ENCLS_EXITING) + vmcs_write64(ENCLS_EXITING_BITMAP, -1ull); + + vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); + } + + /* + * ENTRY CONTROLS + * + * vmcs12's VM_{ENTRY,EXIT}_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE + * are emulated by vmx_set_efer() in prepare_vmcs02(), but speculate + * on the related bits (if supported by the CPU) in the hope that + * we can avoid VMWrites during vmx_set_efer(). + */ + exec_control = (vmcs12->vm_entry_controls | vmx_vmentry_ctrl()) & + ~VM_ENTRY_IA32E_MODE & ~VM_ENTRY_LOAD_IA32_EFER; + if (cpu_has_load_ia32_efer()) { + if (guest_efer & EFER_LMA) + exec_control |= VM_ENTRY_IA32E_MODE; + if (guest_efer != host_efer) + exec_control |= VM_ENTRY_LOAD_IA32_EFER; + } + vm_entry_controls_init(vmx, exec_control); + + /* + * EXIT CONTROLS + * + * L2->L1 exit controls are emulated - the hardware exit is to L0 so + * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER + * bits may be modified by vmx_set_efer() in prepare_vmcs02(). + */ + exec_control = vmx_vmexit_ctrl(); + if (cpu_has_load_ia32_efer() && guest_efer != host_efer) + exec_control |= VM_EXIT_LOAD_IA32_EFER; + vm_exit_controls_init(vmx, exec_control); + + /* + * Conceptually we want to copy the PML address and index from + * vmcs01 here, and then back to vmcs01 on nested vmexit. But, + * since we always flush the log on each vmexit and never change + * the PML address (once set), this happens to be equivalent to + * simply resetting the index in vmcs02. + */ + if (enable_pml) + vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); + + /* + * Interrupt/Exception Fields + */ + if (vmx->nested.nested_run_pending) { + vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, + vmcs12->vm_entry_intr_info_field); + vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, + vmcs12->vm_entry_exception_error_code); + vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, + vmcs12->vm_entry_instruction_len); + vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, + vmcs12->guest_interruptibility_info); + vmx->loaded_vmcs->nmi_known_unmasked = + !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI); + } else { + vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); + } +} + +static void prepare_vmcs02_full(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) +{ + struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs; + + if (!hv_evmcs || !(hv_evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) { + vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector); + vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector); + vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector); + vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector); + vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector); + vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector); + vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector); + vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector); + vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit); + vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit); + vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit); + vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit); + vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit); + vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit); + vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit); + vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit); + vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit); + vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit); + vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes); + vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes); + vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes); + vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes); + vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes); + vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes); + vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base); + vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base); + vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base); + vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base); + vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base); + vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base); + vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base); + vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base); + vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base); + vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base); + } + + if (!hv_evmcs || !(hv_evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1)) { + vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs); + vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, + vmcs12->guest_pending_dbg_exceptions); + vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp); + vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip); + + /* + * L1 may access the L2's PDPTR, so save them to construct + * vmcs12 + */ + if (enable_ept) { + vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0); + vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); + vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); + vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); + } + } + + if (nested_cpu_has_xsaves(vmcs12)) + vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap); + + /* + * Whether page-faults are trapped is determined by a combination of + * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF. + * If enable_ept, L0 doesn't care about page faults and we should + * set all of these to L1's desires. However, if !enable_ept, L0 does + * care about (at least some) page faults, and because it is not easy + * (if at all possible?) to merge L0 and L1's desires, we simply ask + * to exit on each and every L2 page fault. This is done by setting + * MASK=MATCH=0 and (see below) EB.PF=1. + * Note that below we don't need special code to set EB.PF beyond the + * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept, + * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when + * !enable_ept, EB.PF is 1, so the "or" will always be 1. + */ + vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, + enable_ept ? vmcs12->page_fault_error_code_mask : 0); + vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, + enable_ept ? vmcs12->page_fault_error_code_match : 0); + + if (cpu_has_vmx_apicv()) { + vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0); + vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1); + vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2); + vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3); + } + + vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); + vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); + + set_cr4_guest_host_mask(vmx); + + if (kvm_mpx_supported()) { + if (vmx->nested.nested_run_pending && + (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)) + vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs); + else + vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs); + } +} + +/* + * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested + * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it + * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2 + * guest in a way that will both be appropriate to L1's requests, and our + * needs. In addition to modifying the active vmcs (which is vmcs02), this + * function also has additional necessary side-effects, like setting various + * vcpu->arch fields. + * Returns 0 on success, 1 on failure. Invalid state exit qualification code + * is assigned to entry_failure_code on failure. + */ +static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, + u32 *entry_failure_code) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs; + + if (vmx->nested.dirty_vmcs12 || vmx->nested.hv_evmcs) { + prepare_vmcs02_full(vmx, vmcs12); + vmx->nested.dirty_vmcs12 = false; + } + + /* + * First, the fields that are shadowed. This must be kept in sync + * with vmcs_shadow_fields.h. + */ + if (!hv_evmcs || !(hv_evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) { + vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes); + vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes); + } + + if (vmx->nested.nested_run_pending && + (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) { + kvm_set_dr(vcpu, 7, vmcs12->guest_dr7); + vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl); + } else { + kvm_set_dr(vcpu, 7, vcpu->arch.dr7); + vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl); + } + vmx_set_rflags(vcpu, vmcs12->guest_rflags); + + vmx->nested.preemption_timer_expired = false; + if (nested_cpu_has_preemption_timer(vmcs12)) + vmx_start_preemption_timer(vcpu); + + /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the + * bitwise-or of what L1 wants to trap for L2, and what we want to + * trap. Note that CR0.TS also needs updating - we do this later. + */ + update_exception_bitmap(vcpu); + vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask; + vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); + + if (vmx->nested.nested_run_pending && + (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)) { + vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat); + vcpu->arch.pat = vmcs12->guest_ia32_pat; + } else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { + vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); + } + + vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); + + if (kvm_has_tsc_control) + decache_tsc_multiplier(vmx); + + if (enable_vpid) { + /* + * There is no direct mapping between vpid02 and vpid12, the + * vpid02 is per-vCPU for L0 and reused while the value of + * vpid12 is changed w/ one invvpid during nested vmentry. + * The vpid12 is allocated by L1 for L2, so it will not + * influence global bitmap(for vpid01 and vpid02 allocation) + * even if spawn a lot of nested vCPUs. + */ + if (nested_cpu_has_vpid(vmcs12) && nested_has_guest_tlb_tag(vcpu)) { + if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) { + vmx->nested.last_vpid = vmcs12->virtual_processor_id; + __vmx_flush_tlb(vcpu, nested_get_vpid02(vcpu), false); + } + } else { + /* + * If L1 use EPT, then L0 needs to execute INVEPT on + * EPTP02 instead of EPTP01. Therefore, delay TLB + * flush until vmcs02->eptp is fully updated by + * KVM_REQ_LOAD_CR3. Note that this assumes + * KVM_REQ_TLB_FLUSH is evaluated after + * KVM_REQ_LOAD_CR3 in vcpu_enter_guest(). + */ + kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); + } + } + + if (nested_cpu_has_ept(vmcs12)) + nested_ept_init_mmu_context(vcpu); + else if (nested_cpu_has2(vmcs12, + SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) + vmx_flush_tlb(vcpu, true); + + /* + * This sets GUEST_CR0 to vmcs12->guest_cr0, possibly modifying those + * bits which we consider mandatory enabled. + * The CR0_READ_SHADOW is what L2 should have expected to read given + * the specifications by L1; It's not enough to take + * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we + * have more bits than L1 expected. + */ + vmx_set_cr0(vcpu, vmcs12->guest_cr0); + vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12)); + + vmx_set_cr4(vcpu, vmcs12->guest_cr4); + vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12)); + + vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12); + /* Note: may modify VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */ + vmx_set_efer(vcpu, vcpu->arch.efer); + + /* + * Guest state is invalid and unrestricted guest is disabled, + * which means L1 attempted VMEntry to L2 with invalid state. + * Fail the VMEntry. + */ + if (vmx->emulation_required) { + *entry_failure_code = ENTRY_FAIL_DEFAULT; + return 1; + } + + /* Shadow page tables on either EPT or shadow page tables. */ + if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12), + entry_failure_code)) + return 1; + + if (!enable_ept) + vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested; + + kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp); + kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip); + return 0; +} + +static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12) +{ + if (!nested_cpu_has_nmi_exiting(vmcs12) && + nested_cpu_has_virtual_nmis(vmcs12)) + return -EINVAL; + + if (!nested_cpu_has_virtual_nmis(vmcs12) && + nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING)) + return -EINVAL; + + return 0; +} + +static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + int maxphyaddr = cpuid_maxphyaddr(vcpu); + + /* Check for memory type validity */ + switch (address & VMX_EPTP_MT_MASK) { + case VMX_EPTP_MT_UC: + if (!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT)) + return false; + break; + case VMX_EPTP_MT_WB: + if (!(vmx->nested.msrs.ept_caps & VMX_EPTP_WB_BIT)) + return false; + break; + default: + return false; + } + + /* only 4 levels page-walk length are valid */ + if ((address & VMX_EPTP_PWL_MASK) != VMX_EPTP_PWL_4) + return false; + + /* Reserved bits should not be set */ + if (address >> maxphyaddr || ((address >> 7) & 0x1f)) + return false; + + /* AD, if set, should be supported */ + if (address & VMX_EPTP_AD_ENABLE_BIT) { + if (!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT)) + return false; + } + + return true; +} + +/* + * Checks related to VM-Execution Control Fields + */ +static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + + if (!vmx_control_verify(vmcs12->pin_based_vm_exec_control, + vmx->nested.msrs.pinbased_ctls_low, + vmx->nested.msrs.pinbased_ctls_high) || + !vmx_control_verify(vmcs12->cpu_based_vm_exec_control, + vmx->nested.msrs.procbased_ctls_low, + vmx->nested.msrs.procbased_ctls_high)) + return -EINVAL; + + if (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) && + !vmx_control_verify(vmcs12->secondary_vm_exec_control, + vmx->nested.msrs.secondary_ctls_low, + vmx->nested.msrs.secondary_ctls_high)) + return -EINVAL; + + if (vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu) || + nested_vmx_check_io_bitmap_controls(vcpu, vmcs12) || + nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12) || + nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12) || + nested_vmx_check_apic_access_controls(vcpu, vmcs12) || + nested_vmx_check_apicv_controls(vcpu, vmcs12) || + nested_vmx_check_nmi_controls(vmcs12) || + nested_vmx_check_pml_controls(vcpu, vmcs12) || + nested_vmx_check_unrestricted_guest_controls(vcpu, vmcs12) || + nested_vmx_check_mode_based_ept_exec_controls(vcpu, vmcs12) || + nested_vmx_check_shadow_vmcs_controls(vcpu, vmcs12) || + (nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id)) + return -EINVAL; + + if (nested_cpu_has_ept(vmcs12) && + !valid_ept_address(vcpu, vmcs12->ept_pointer)) + return -EINVAL; + + if (nested_cpu_has_vmfunc(vmcs12)) { + if (vmcs12->vm_function_control & + ~vmx->nested.msrs.vmfunc_controls) + return -EINVAL; + + if (nested_cpu_has_eptp_switching(vmcs12)) { + if (!nested_cpu_has_ept(vmcs12) || + !page_address_valid(vcpu, vmcs12->eptp_list_address)) + return -EINVAL; + } + } + + return 0; +} + +/* + * Checks related to VM-Exit Control Fields + */ +static int nested_check_vm_exit_controls(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + + if (!vmx_control_verify(vmcs12->vm_exit_controls, + vmx->nested.msrs.exit_ctls_low, + vmx->nested.msrs.exit_ctls_high) || + nested_vmx_check_exit_msr_switch_controls(vcpu, vmcs12)) + return -EINVAL; + + return 0; +} + +/* + * Checks related to VM-Entry Control Fields + */ +static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + + if (!vmx_control_verify(vmcs12->vm_entry_controls, + vmx->nested.msrs.entry_ctls_low, + vmx->nested.msrs.entry_ctls_high)) + return -EINVAL; + + /* + * From the Intel SDM, volume 3: + * Fields relevant to VM-entry event injection must be set properly. + * These fields are the VM-entry interruption-information field, the + * VM-entry exception error code, and the VM-entry instruction length. + */ + if (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) { + u32 intr_info = vmcs12->vm_entry_intr_info_field; + u8 vector = intr_info & INTR_INFO_VECTOR_MASK; + u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK; + bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK; + bool should_have_error_code; + bool urg = nested_cpu_has2(vmcs12, + SECONDARY_EXEC_UNRESTRICTED_GUEST); + bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE; + + /* VM-entry interruption-info field: interruption type */ + if (intr_type == INTR_TYPE_RESERVED || + (intr_type == INTR_TYPE_OTHER_EVENT && + !nested_cpu_supports_monitor_trap_flag(vcpu))) + return -EINVAL; + + /* VM-entry interruption-info field: vector */ + if ((intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) || + (intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) || + (intr_type == INTR_TYPE_OTHER_EVENT && vector != 0)) + return -EINVAL; + + /* VM-entry interruption-info field: deliver error code */ + should_have_error_code = + intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode && + x86_exception_has_error_code(vector); + if (has_error_code != should_have_error_code) + return -EINVAL; + + /* VM-entry exception error code */ + if (has_error_code && + vmcs12->vm_entry_exception_error_code & GENMASK(31, 15)) + return -EINVAL; + + /* VM-entry interruption-info field: reserved bits */ + if (intr_info & INTR_INFO_RESVD_BITS_MASK) + return -EINVAL; + + /* VM-entry instruction length */ + switch (intr_type) { + case INTR_TYPE_SOFT_EXCEPTION: + case INTR_TYPE_SOFT_INTR: + case INTR_TYPE_PRIV_SW_EXCEPTION: + if ((vmcs12->vm_entry_instruction_len > 15) || + (vmcs12->vm_entry_instruction_len == 0 && + !nested_cpu_has_zero_length_injection(vcpu))) + return -EINVAL; + } + } + + if (nested_vmx_check_entry_msr_switch_controls(vcpu, vmcs12)) + return -EINVAL; + + return 0; +} + +/* + * Checks related to Host Control Registers and MSRs + */ +static int nested_check_host_control_regs(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12) +{ + bool ia32e; + + if (!nested_host_cr0_valid(vcpu, vmcs12->host_cr0) || + !nested_host_cr4_valid(vcpu, vmcs12->host_cr4) || + !nested_cr3_valid(vcpu, vmcs12->host_cr3)) + return -EINVAL; + /* + * If the load IA32_EFER VM-exit control is 1, bits reserved in the + * IA32_EFER MSR must be 0 in the field for that register. In addition, + * the values of the LMA and LME bits in the field must each be that of + * the host address-space size VM-exit control. + */ + if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) { + ia32e = (vmcs12->vm_exit_controls & + VM_EXIT_HOST_ADDR_SPACE_SIZE) != 0; + if (!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer) || + ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA) || + ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)) + return -EINVAL; + } + + return 0; +} + +/* + * Checks related to Guest Non-register State + */ +static int nested_check_guest_non_reg_state(struct vmcs12 *vmcs12) +{ + if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE && + vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT) + return -EINVAL; + + return 0; +} + +static int nested_vmx_check_vmentry_prereqs(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12) +{ + if (nested_check_vm_execution_controls(vcpu, vmcs12) || + nested_check_vm_exit_controls(vcpu, vmcs12) || + nested_check_vm_entry_controls(vcpu, vmcs12)) + return VMXERR_ENTRY_INVALID_CONTROL_FIELD; + + if (nested_check_host_control_regs(vcpu, vmcs12)) + return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD; + + if (nested_check_guest_non_reg_state(vmcs12)) + return VMXERR_ENTRY_INVALID_CONTROL_FIELD; + + return 0; +} + +static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12) +{ + int r; + struct page *page; + struct vmcs12 *shadow; + + if (vmcs12->vmcs_link_pointer == -1ull) + return 0; + + if (!page_address_valid(vcpu, vmcs12->vmcs_link_pointer)) + return -EINVAL; + + page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->vmcs_link_pointer); + if (is_error_page(page)) + return -EINVAL; + + r = 0; + shadow = kmap(page); + if (shadow->hdr.revision_id != VMCS12_REVISION || + shadow->hdr.shadow_vmcs != nested_cpu_has_shadow_vmcs(vmcs12)) + r = -EINVAL; + kunmap(page); + kvm_release_page_clean(page); + return r; +} + +static int nested_vmx_check_vmentry_postreqs(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12, + u32 *exit_qual) +{ + bool ia32e; + + *exit_qual = ENTRY_FAIL_DEFAULT; + + if (!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0) || + !nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4)) + return 1; + + if (nested_vmx_check_vmcs_link_ptr(vcpu, vmcs12)) { + *exit_qual = ENTRY_FAIL_VMCS_LINK_PTR; + return 1; + } + + /* + * If the load IA32_EFER VM-entry control is 1, the following checks + * are performed on the field for the IA32_EFER MSR: + * - Bits reserved in the IA32_EFER MSR must be 0. + * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of + * the IA-32e mode guest VM-exit control. It must also be identical + * to bit 8 (LME) if bit 31 in the CR0 field (corresponding to + * CR0.PG) is 1. + */ + if (to_vmx(vcpu)->nested.nested_run_pending && + (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) { + ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0; + if (!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer) || + ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA) || + ((vmcs12->guest_cr0 & X86_CR0_PG) && + ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))) + return 1; + } + + if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) && + (is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu) || + (vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD))) + return 1; + + return 0; +} + +static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + unsigned long cr3, cr4; + + if (!nested_early_check) + return 0; + + if (vmx->msr_autoload.host.nr) + vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); + if (vmx->msr_autoload.guest.nr) + vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0); + + preempt_disable(); + + vmx_prepare_switch_to_guest(vcpu); + + /* + * Induce a consistency check VMExit by clearing bit 1 in GUEST_RFLAGS, + * which is reserved to '1' by hardware. GUEST_RFLAGS is guaranteed to + * be written (by preparve_vmcs02()) before the "real" VMEnter, i.e. + * there is no need to preserve other bits or save/restore the field. + */ + vmcs_writel(GUEST_RFLAGS, 0); + + cr3 = __get_current_cr3_fast(); + if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) { + vmcs_writel(HOST_CR3, cr3); + vmx->loaded_vmcs->host_state.cr3 = cr3; + } + + cr4 = cr4_read_shadow(); + if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) { + vmcs_writel(HOST_CR4, cr4); + vmx->loaded_vmcs->host_state.cr4 = cr4; + } + + vmx->__launched = vmx->loaded_vmcs->launched; + + asm( + /* Set HOST_RSP */ + "sub $%c[wordsize], %%" _ASM_SP "\n\t" /* temporarily adjust RSP for CALL */ + __ex("vmwrite %%" _ASM_SP ", %%" _ASM_DX) "\n\t" + "mov %%" _ASM_SP ", %c[host_rsp](%1)\n\t" + "add $%c[wordsize], %%" _ASM_SP "\n\t" /* un-adjust RSP */ + + /* Check if vmlaunch or vmresume is needed */ + "cmpl $0, %c[launched](%% " _ASM_CX")\n\t" + + "call vmx_vmenter\n\t" + + /* Set vmx->fail accordingly */ + "setbe %c[fail](%% " _ASM_CX")\n\t" + : ASM_CALL_CONSTRAINT + : "c"(vmx), "d"((unsigned long)HOST_RSP), + [launched]"i"(offsetof(struct vcpu_vmx, __launched)), + [fail]"i"(offsetof(struct vcpu_vmx, fail)), + [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)), + [wordsize]"i"(sizeof(ulong)) + : "rax", "cc", "memory" + ); + + preempt_enable(); + + if (vmx->msr_autoload.host.nr) + vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); + if (vmx->msr_autoload.guest.nr) + vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); + + if (vmx->fail) { + WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) != + VMXERR_ENTRY_INVALID_CONTROL_FIELD); + vmx->fail = 0; + return 1; + } + + /* + * VMExit clears RFLAGS.IF and DR7, even on a consistency check. + */ + local_irq_enable(); + if (hw_breakpoint_active()) + set_debugreg(__this_cpu_read(cpu_dr7), 7); + + /* + * A non-failing VMEntry means we somehow entered guest mode with + * an illegal RIP, and that's just the tip of the iceberg. There + * is no telling what memory has been modified or what state has + * been exposed to unknown code. Hitting this all but guarantees + * a (very critical) hardware issue. + */ + WARN_ON(!(vmcs_read32(VM_EXIT_REASON) & + VMX_EXIT_REASONS_FAILED_VMENTRY)); + + return 0; +} +STACK_FRAME_NON_STANDARD(nested_vmx_check_vmentry_hw); + + +static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12); + +static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu) +{ + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); + struct vcpu_vmx *vmx = to_vmx(vcpu); + struct page *page; + u64 hpa; + + if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { + /* + * Translate L1 physical address to host physical + * address for vmcs02. Keep the page pinned, so this + * physical address remains valid. We keep a reference + * to it so we can release it later. + */ + if (vmx->nested.apic_access_page) { /* shouldn't happen */ + kvm_release_page_dirty(vmx->nested.apic_access_page); + vmx->nested.apic_access_page = NULL; + } + page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->apic_access_addr); + /* + * If translation failed, no matter: This feature asks + * to exit when accessing the given address, and if it + * can never be accessed, this feature won't do + * anything anyway. + */ + if (!is_error_page(page)) { + vmx->nested.apic_access_page = page; + hpa = page_to_phys(vmx->nested.apic_access_page); + vmcs_write64(APIC_ACCESS_ADDR, hpa); + } else { + vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, + SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES); + } + } + + if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) { + if (vmx->nested.virtual_apic_page) { /* shouldn't happen */ + kvm_release_page_dirty(vmx->nested.virtual_apic_page); + vmx->nested.virtual_apic_page = NULL; + } + page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->virtual_apic_page_addr); + + /* + * If translation failed, VM entry will fail because + * prepare_vmcs02 set VIRTUAL_APIC_PAGE_ADDR to -1ull. + * Failing the vm entry is _not_ what the processor + * does but it's basically the only possibility we + * have. We could still enter the guest if CR8 load + * exits are enabled, CR8 store exits are enabled, and + * virtualize APIC access is disabled; in this case + * the processor would never use the TPR shadow and we + * could simply clear the bit from the execution + * control. But such a configuration is useless, so + * let's keep the code simple. + */ + if (!is_error_page(page)) { + vmx->nested.virtual_apic_page = page; + hpa = page_to_phys(vmx->nested.virtual_apic_page); + vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, hpa); + } + } + + if (nested_cpu_has_posted_intr(vmcs12)) { + if (vmx->nested.pi_desc_page) { /* shouldn't happen */ + kunmap(vmx->nested.pi_desc_page); + kvm_release_page_dirty(vmx->nested.pi_desc_page); + vmx->nested.pi_desc_page = NULL; + vmx->nested.pi_desc = NULL; + vmcs_write64(POSTED_INTR_DESC_ADDR, -1ull); + } + page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->posted_intr_desc_addr); + if (is_error_page(page)) + return; + vmx->nested.pi_desc_page = page; + vmx->nested.pi_desc = kmap(vmx->nested.pi_desc_page); + vmx->nested.pi_desc = + (struct pi_desc *)((void *)vmx->nested.pi_desc + + (unsigned long)(vmcs12->posted_intr_desc_addr & + (PAGE_SIZE - 1))); + vmcs_write64(POSTED_INTR_DESC_ADDR, + page_to_phys(vmx->nested.pi_desc_page) + + (unsigned long)(vmcs12->posted_intr_desc_addr & + (PAGE_SIZE - 1))); + } + if (nested_vmx_prepare_msr_bitmap(vcpu, vmcs12)) + vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL, + CPU_BASED_USE_MSR_BITMAPS); + else + vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL, + CPU_BASED_USE_MSR_BITMAPS); +} + +/* + * Intel's VMX Instruction Reference specifies a common set of prerequisites + * for running VMX instructions (except VMXON, whose prerequisites are + * slightly different). It also specifies what exception to inject otherwise. + * Note that many of these exceptions have priority over VM exits, so they + * don't have to be checked again here. + */ +static int nested_vmx_check_permission(struct kvm_vcpu *vcpu) +{ + if (!to_vmx(vcpu)->nested.vmxon) { + kvm_queue_exception(vcpu, UD_VECTOR); + return 0; + } + + if (vmx_get_cpl(vcpu)) { + kvm_inject_gp(vcpu, 0); + return 0; + } + + return 1; +} + +static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu) +{ + u8 rvi = vmx_get_rvi(); + u8 vppr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_PROCPRI); + + return ((rvi & 0xf0) > (vppr & 0xf0)); +} + +static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12); + +/* + * If from_vmentry is false, this is being called from state restore (either RSM + * or KVM_SET_NESTED_STATE). Otherwise it's called from vmlaunch/vmresume. ++ * ++ * Returns: ++ * 0 - success, i.e. proceed with actual VMEnter ++ * 1 - consistency check VMExit ++ * -1 - consistency check VMFail + */ +int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); + bool evaluate_pending_interrupts; + u32 exit_reason = EXIT_REASON_INVALID_STATE; + u32 exit_qual; + + evaluate_pending_interrupts = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) & + (CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING); + if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu)) + evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu); + + if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) + vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); + if (kvm_mpx_supported() && + !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)) + vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS); + + vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02); + + prepare_vmcs02_early(vmx, vmcs12); + + if (from_vmentry) { + nested_get_vmcs12_pages(vcpu); + + if (nested_vmx_check_vmentry_hw(vcpu)) { + vmx_switch_vmcs(vcpu, &vmx->vmcs01); + return -1; + } + + if (nested_vmx_check_vmentry_postreqs(vcpu, vmcs12, &exit_qual)) + goto vmentry_fail_vmexit; + } + + enter_guest_mode(vcpu); + if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) + vcpu->arch.tsc_offset += vmcs12->tsc_offset; + + if (prepare_vmcs02(vcpu, vmcs12, &exit_qual)) + goto vmentry_fail_vmexit_guest_mode; + + if (from_vmentry) { + exit_reason = EXIT_REASON_MSR_LOAD_FAIL; + exit_qual = nested_vmx_load_msr(vcpu, + vmcs12->vm_entry_msr_load_addr, + vmcs12->vm_entry_msr_load_count); + if (exit_qual) + goto vmentry_fail_vmexit_guest_mode; + } else { + /* + * The MMU is not initialized to point at the right entities yet and + * "get pages" would need to read data from the guest (i.e. we will + * need to perform gpa to hpa translation). Request a call + * to nested_get_vmcs12_pages before the next VM-entry. The MSRs + * have already been set at vmentry time and should not be reset. + */ + kvm_make_request(KVM_REQ_GET_VMCS12_PAGES, vcpu); + } + + /* + * If L1 had a pending IRQ/NMI until it executed + * VMLAUNCH/VMRESUME which wasn't delivered because it was + * disallowed (e.g. interrupts disabled), L0 needs to + * evaluate if this pending event should cause an exit from L2 + * to L1 or delivered directly to L2 (e.g. In case L1 don't + * intercept EXTERNAL_INTERRUPT). + * + * Usually this would be handled by the processor noticing an + * IRQ/NMI window request, or checking RVI during evaluation of + * pending virtual interrupts. However, this setting was done + * on VMCS01 and now VMCS02 is active instead. Thus, we force L0 + * to perform pending event evaluation by requesting a KVM_REQ_EVENT. + */ + if (unlikely(evaluate_pending_interrupts)) + kvm_make_request(KVM_REQ_EVENT, vcpu); + + /* + * Note no nested_vmx_succeed or nested_vmx_fail here. At this point + * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet + * returned as far as L1 is concerned. It will only return (and set + * the success flag) when L2 exits (see nested_vmx_vmexit()). + */ + return 0; + + /* + * A failed consistency check that leads to a VMExit during L1's + * VMEnter to L2 is a variation of a normal VMexit, as explained in + * 26.7 "VM-entry failures during or after loading guest state". + */ +vmentry_fail_vmexit_guest_mode: + if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) + vcpu->arch.tsc_offset -= vmcs12->tsc_offset; + leave_guest_mode(vcpu); + +vmentry_fail_vmexit: + vmx_switch_vmcs(vcpu, &vmx->vmcs01); + + if (!from_vmentry) + return 1; + + load_vmcs12_host_state(vcpu, vmcs12); + vmcs12->vm_exit_reason = exit_reason | VMX_EXIT_REASONS_FAILED_VMENTRY; + vmcs12->exit_qualification = exit_qual; + if (enable_shadow_vmcs || vmx->nested.hv_evmcs) + vmx->nested.need_vmcs12_sync = true; + return 1; +} + +/* + * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1 + * for running an L2 nested guest. + */ +static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) +{ + struct vmcs12 *vmcs12; + struct vcpu_vmx *vmx = to_vmx(vcpu); + u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu); + int ret; + + if (!nested_vmx_check_permission(vcpu)) + return 1; + + if (!nested_vmx_handle_enlightened_vmptrld(vcpu, true)) + return 1; + + if (!vmx->nested.hv_evmcs && vmx->nested.current_vmptr == -1ull) + return nested_vmx_failInvalid(vcpu); + + vmcs12 = get_vmcs12(vcpu); + + /* + * Can't VMLAUNCH or VMRESUME a shadow VMCS. Despite the fact + * that there *is* a valid VMCS pointer, RFLAGS.CF is set + * rather than RFLAGS.ZF, and no error number is stored to the + * VM-instruction error field. + */ + if (vmcs12->hdr.shadow_vmcs) + return nested_vmx_failInvalid(vcpu); + + if (vmx->nested.hv_evmcs) { + copy_enlightened_to_vmcs12(vmx); + /* Enlightened VMCS doesn't have launch state */ + vmcs12->launch_state = !launch; + } else if (enable_shadow_vmcs) { + copy_shadow_to_vmcs12(vmx); + } + + /* + * The nested entry process starts with enforcing various prerequisites + * on vmcs12 as required by the Intel SDM, and act appropriately when + * they fail: As the SDM explains, some conditions should cause the + * instruction to fail, while others will cause the instruction to seem + * to succeed, but return an EXIT_REASON_INVALID_STATE. + * To speed up the normal (success) code path, we should avoid checking + * for misconfigurations which will anyway be caught by the processor + * when using the merged vmcs02. + */ + if (interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS) + return nested_vmx_failValid(vcpu, + VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS); + + if (vmcs12->launch_state == launch) + return nested_vmx_failValid(vcpu, + launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS + : VMXERR_VMRESUME_NONLAUNCHED_VMCS); + + ret = nested_vmx_check_vmentry_prereqs(vcpu, vmcs12); + if (ret) + return nested_vmx_failValid(vcpu, ret); + + /* + * We're finally done with prerequisite checking, and can start with + * the nested entry. + */ + vmx->nested.nested_run_pending = 1; + ret = nested_vmx_enter_non_root_mode(vcpu, true); + vmx->nested.nested_run_pending = !ret; + if (ret > 0) + return 1; + else if (ret) + return nested_vmx_failValid(vcpu, + VMXERR_ENTRY_INVALID_CONTROL_FIELD); + + /* Hide L1D cache contents from the nested guest. */ + vmx->vcpu.arch.l1tf_flush_l1d = true; + + /* + * Must happen outside of nested_vmx_enter_non_root_mode() as it will + * also be used as part of restoring nVMX state for + * snapshot restore (migration). + * + * In this flow, it is assumed that vmcs12 cache was + * trasferred as part of captured nVMX state and should + * therefore not be read from guest memory (which may not + * exist on destination host yet). + */ + nested_cache_shadow_vmcs12(vcpu, vmcs12); + + /* + * If we're entering a halted L2 vcpu and the L2 vcpu won't be + * awakened by event injection or by an NMI-window VM-exit or + * by an interrupt-window VM-exit, halt the vcpu. + */ + if ((vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) && + !(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) && + !(vmcs12->cpu_based_vm_exec_control & CPU_BASED_VIRTUAL_NMI_PENDING) && + !((vmcs12->cpu_based_vm_exec_control & CPU_BASED_VIRTUAL_INTR_PENDING) && + (vmcs12->guest_rflags & X86_EFLAGS_IF))) { + vmx->nested.nested_run_pending = 0; + return kvm_vcpu_halt(vcpu); + } + return 1; +} + +/* + * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date + * because L2 may have changed some cr0 bits directly (CRO_GUEST_HOST_MASK). + * This function returns the new value we should put in vmcs12.guest_cr0. + * It's not enough to just return the vmcs02 GUEST_CR0. Rather, + * 1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now + * available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0 + * didn't trap the bit, because if L1 did, so would L0). + * 2. Bits that L1 asked to trap (and therefore L0 also did) could not have + * been modified by L2, and L1 knows it. So just leave the old value of + * the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0 + * isn't relevant, because if L0 traps this bit it can set it to anything. + * 3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have + * changed these bits, and therefore they need to be updated, but L0 + * didn't necessarily allow them to be changed in GUEST_CR0 - and rather + * put them in vmcs02 CR0_READ_SHADOW. So take these bits from there. + */ +static inline unsigned long +vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) +{ + return + /*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) | + /*2*/ (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) | + /*3*/ (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask | + vcpu->arch.cr0_guest_owned_bits)); +} + +static inline unsigned long +vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) +{ + return + /*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) | + /*2*/ (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) | + /*3*/ (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask | + vcpu->arch.cr4_guest_owned_bits)); +} + +static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12) +{ + u32 idt_vectoring; + unsigned int nr; + + if (vcpu->arch.exception.injected) { + nr = vcpu->arch.exception.nr; + idt_vectoring = nr | VECTORING_INFO_VALID_MASK; + + if (kvm_exception_is_soft(nr)) { + vmcs12->vm_exit_instruction_len = + vcpu->arch.event_exit_inst_len; + idt_vectoring |= INTR_TYPE_SOFT_EXCEPTION; + } else + idt_vectoring |= INTR_TYPE_HARD_EXCEPTION; + + if (vcpu->arch.exception.has_error_code) { + idt_vectoring |= VECTORING_INFO_DELIVER_CODE_MASK; + vmcs12->idt_vectoring_error_code = + vcpu->arch.exception.error_code; + } + + vmcs12->idt_vectoring_info_field = idt_vectoring; + } else if (vcpu->arch.nmi_injected) { + vmcs12->idt_vectoring_info_field = + INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR; + } else if (vcpu->arch.interrupt.injected) { + nr = vcpu->arch.interrupt.nr; + idt_vectoring = nr | VECTORING_INFO_VALID_MASK; + + if (vcpu->arch.interrupt.soft) { + idt_vectoring |= INTR_TYPE_SOFT_INTR; + vmcs12->vm_entry_instruction_len = + vcpu->arch.event_exit_inst_len; + } else + idt_vectoring |= INTR_TYPE_EXT_INTR; + + vmcs12->idt_vectoring_info_field = idt_vectoring; + } +} + + +static void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu) +{ + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); + gfn_t gfn; + + /* + * Don't need to mark the APIC access page dirty; it is never + * written to by the CPU during APIC virtualization. + */ + + if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) { + gfn = vmcs12->virtual_apic_page_addr >> PAGE_SHIFT; + kvm_vcpu_mark_page_dirty(vcpu, gfn); + } + + if (nested_cpu_has_posted_intr(vmcs12)) { + gfn = vmcs12->posted_intr_desc_addr >> PAGE_SHIFT; + kvm_vcpu_mark_page_dirty(vcpu, gfn); + } +} + +static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + int max_irr; + void *vapic_page; + u16 status; + + if (!vmx->nested.pi_desc || !vmx->nested.pi_pending) + return; + + vmx->nested.pi_pending = false; + if (!pi_test_and_clear_on(vmx->nested.pi_desc)) + return; + + max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256); + if (max_irr != 256) { + vapic_page = kmap(vmx->nested.virtual_apic_page); + __kvm_apic_update_irr(vmx->nested.pi_desc->pir, + vapic_page, &max_irr); + kunmap(vmx->nested.virtual_apic_page); + + status = vmcs_read16(GUEST_INTR_STATUS); + if ((u8)max_irr > ((u8)status & 0xff)) { + status &= ~0xff; + status |= (u8)max_irr; + vmcs_write16(GUEST_INTR_STATUS, status); + } + } + + nested_mark_vmcs12_pages_dirty(vcpu); +} + +static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu, + unsigned long exit_qual) +{ + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); + unsigned int nr = vcpu->arch.exception.nr; + u32 intr_info = nr | INTR_INFO_VALID_MASK; + + if (vcpu->arch.exception.has_error_code) { + vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code; + intr_info |= INTR_INFO_DELIVER_CODE_MASK; + } + + if (kvm_exception_is_soft(nr)) + intr_info |= INTR_TYPE_SOFT_EXCEPTION; + else + intr_info |= INTR_TYPE_HARD_EXCEPTION; + + if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) && + vmx_get_nmi_mask(vcpu)) + intr_info |= INTR_INFO_UNBLOCK_NMI; + + nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual); +} + +static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + unsigned long exit_qual; + bool block_nested_events = + vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu); + + if (vcpu->arch.exception.pending && + nested_vmx_check_exception(vcpu, &exit_qual)) { + if (block_nested_events) + return -EBUSY; + nested_vmx_inject_exception_vmexit(vcpu, exit_qual); + return 0; + } + + if (nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) && + vmx->nested.preemption_timer_expired) { + if (block_nested_events) + return -EBUSY; + nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0); + return 0; + } + + if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) { + if (block_nested_events) + return -EBUSY; + nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, + NMI_VECTOR | INTR_TYPE_NMI_INTR | + INTR_INFO_VALID_MASK, 0); + /* + * The NMI-triggered VM exit counts as injection: + * clear this one and block further NMIs. + */ + vcpu->arch.nmi_pending = 0; + vmx_set_nmi_mask(vcpu, true); + return 0; + } + + if ((kvm_cpu_has_interrupt(vcpu) || external_intr) && + nested_exit_on_intr(vcpu)) { + if (block_nested_events) + return -EBUSY; + nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0); + return 0; + } + + vmx_complete_nested_posted_interrupt(vcpu); + return 0; +} + +static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu) +{ + ktime_t remaining = + hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer); + u64 value; + + if (ktime_to_ns(remaining) <= 0) + return 0; + + value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz; + do_div(value, 1000000); + return value >> VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; +} + +/* + * Update the guest state fields of vmcs12 to reflect changes that + * occurred while L2 was running. (The "IA-32e mode guest" bit of the + * VM-entry controls is also updated, since this is really a guest + * state bit.) + */ +static void sync_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) +{ + vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12); + vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12); + + vmcs12->guest_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP); + vmcs12->guest_rip = kvm_register_read(vcpu, VCPU_REGS_RIP); + vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS); + + vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR); + vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR); + vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR); + vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR); + vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR); + vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR); + vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR); + vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR); + vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT); + vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT); + vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT); + vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT); + vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT); + vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT); + vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT); + vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT); + vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT); + vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT); + vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES); + vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES); + vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES); + vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES); + vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES); + vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES); + vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES); + vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES); + vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE); + vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE); + vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE); + vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE); + vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE); + vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE); + vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE); + vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE); + vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE); + vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE); + + vmcs12->guest_interruptibility_info = + vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); + vmcs12->guest_pending_dbg_exceptions = + vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS); + if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) + vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT; + else + vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE; + + if (nested_cpu_has_preemption_timer(vmcs12)) { + if (vmcs12->vm_exit_controls & + VM_EXIT_SAVE_VMX_PREEMPTION_TIMER) + vmcs12->vmx_preemption_timer_value = + vmx_get_preemption_timer_value(vcpu); + hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer); + } + + /* + * In some cases (usually, nested EPT), L2 is allowed to change its + * own CR3 without exiting. If it has changed it, we must keep it. + * Of course, if L0 is using shadow page tables, GUEST_CR3 was defined + * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12. + * + * Additionally, restore L2's PDPTR to vmcs12. + */ + if (enable_ept) { + vmcs12->guest_cr3 = vmcs_readl(GUEST_CR3); + vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0); + vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1); + vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2); + vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3); + } + + vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS); + + if (nested_cpu_has_vid(vmcs12)) + vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS); + + vmcs12->vm_entry_controls = + (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) | + (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE); + + if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS) { + kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7); + vmcs12->guest_ia32_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); + } + + /* TODO: These cannot have changed unless we have MSR bitmaps and + * the relevant bit asks not to trap the change */ + if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT) + vmcs12->guest_ia32_pat = vmcs_read64(GUEST_IA32_PAT); + if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER) + vmcs12->guest_ia32_efer = vcpu->arch.efer; + vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS); + vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP); + vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP); + if (kvm_mpx_supported()) + vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS); +} + +/* + * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits + * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12), + * and this function updates it to reflect the changes to the guest state while + * L2 was running (and perhaps made some exits which were handled directly by L0 + * without going back to L1), and to reflect the exit reason. + * Note that we do not have to copy here all VMCS fields, just those that + * could have changed by the L2 guest or the exit - i.e., the guest-state and + * exit-information fields only. Other fields are modified by L1 with VMWRITE, + * which already writes to vmcs12 directly. + */ +static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, + u32 exit_reason, u32 exit_intr_info, + unsigned long exit_qualification) +{ + /* update guest state fields: */ + sync_vmcs12(vcpu, vmcs12); + + /* update exit information fields: */ + + vmcs12->vm_exit_reason = exit_reason; + vmcs12->exit_qualification = exit_qualification; + vmcs12->vm_exit_intr_info = exit_intr_info; + + vmcs12->idt_vectoring_info_field = 0; + vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN); + vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); + + if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) { + vmcs12->launch_state = 1; + + /* vm_entry_intr_info_field is cleared on exit. Emulate this + * instead of reading the real value. */ + vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK; + + /* + * Transfer the event that L0 or L1 may wanted to inject into + * L2 to IDT_VECTORING_INFO_FIELD. + */ + vmcs12_save_pending_event(vcpu, vmcs12); + + /* + * According to spec, there's no need to store the guest's + * MSRs if the exit is due to a VM-entry failure that occurs + * during or after loading the guest state. Since this exit + * does not fall in that category, we need to save the MSRs. + */ + if (nested_vmx_store_msr(vcpu, + vmcs12->vm_exit_msr_store_addr, + vmcs12->vm_exit_msr_store_count)) + nested_vmx_abort(vcpu, + VMX_ABORT_SAVE_GUEST_MSR_FAIL); + } + + /* + * Drop what we picked up for L2 via vmx_complete_interrupts. It is + * preserved above and would only end up incorrectly in L1. + */ + vcpu->arch.nmi_injected = false; + kvm_clear_exception_queue(vcpu); + kvm_clear_interrupt_queue(vcpu); +} + +/* + * A part of what we need to when the nested L2 guest exits and we want to + * run its L1 parent, is to reset L1's guest state to the host state specified + * in vmcs12. + * This function is to be called not only on normal nested exit, but also on + * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry + * Failures During or After Loading Guest State"). + * This function should be called when the active VMCS is L1's (vmcs01). + */ +static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12) +{ + struct kvm_segment seg; + u32 entry_failure_code; + + if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) + vcpu->arch.efer = vmcs12->host_ia32_efer; + else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) + vcpu->arch.efer |= (EFER_LMA | EFER_LME); + else + vcpu->arch.efer &= ~(EFER_LMA | EFER_LME); + vmx_set_efer(vcpu, vcpu->arch.efer); + + kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->host_rsp); + kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->host_rip); + vmx_set_rflags(vcpu, X86_EFLAGS_FIXED); + vmx_set_interrupt_shadow(vcpu, 0); + + /* + * Note that calling vmx_set_cr0 is important, even if cr0 hasn't + * actually changed, because vmx_set_cr0 refers to efer set above. + * + * CR0_GUEST_HOST_MASK is already set in the original vmcs01 + * (KVM doesn't change it); + */ + vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS; + vmx_set_cr0(vcpu, vmcs12->host_cr0); + + /* Same as above - no reason to call set_cr4_guest_host_mask(). */ + vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); + vmx_set_cr4(vcpu, vmcs12->host_cr4); + + nested_ept_uninit_mmu_context(vcpu); + + /* + * Only PDPTE load can fail as the value of cr3 was checked on entry and + * couldn't have changed. + */ + if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code)) + nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL); + + if (!enable_ept) + vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault; + + /* + * If vmcs01 doesn't use VPID, CPU flushes TLB on every + * VMEntry/VMExit. Thus, no need to flush TLB. + * + * If vmcs12 doesn't use VPID, L1 expects TLB to be + * flushed on every VMEntry/VMExit. + * + * Otherwise, we can preserve TLB entries as long as we are + * able to tag L1 TLB entries differently than L2 TLB entries. + * + * If vmcs12 uses EPT, we need to execute this flush on EPTP01 + * and therefore we request the TLB flush to happen only after VMCS EPTP + * has been set by KVM_REQ_LOAD_CR3. + */ + if (enable_vpid && + (!nested_cpu_has_vpid(vmcs12) || !nested_has_guest_tlb_tag(vcpu))) { + kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); + } + + vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs); + vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp); + vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip); + vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base); + vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base); + vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF); + vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF); + + /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */ + if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS) + vmcs_write64(GUEST_BNDCFGS, 0); + + if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) { + vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat); + vcpu->arch.pat = vmcs12->host_ia32_pat; + } + if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) + vmcs_write64(GUEST_IA32_PERF_GLOBAL_CTRL, + vmcs12->host_ia32_perf_global_ctrl); + + /* Set L1 segment info according to Intel SDM + 27.5.2 Loading Host Segment and Descriptor-Table Registers */ + seg = (struct kvm_segment) { + .base = 0, + .limit = 0xFFFFFFFF, + .selector = vmcs12->host_cs_selector, + .type = 11, + .present = 1, + .s = 1, + .g = 1 + }; + if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) + seg.l = 1; + else + seg.db = 1; + vmx_set_segment(vcpu, &seg, VCPU_SREG_CS); + seg = (struct kvm_segment) { + .base = 0, + .limit = 0xFFFFFFFF, + .type = 3, + .present = 1, + .s = 1, + .db = 1, + .g = 1 + }; + seg.selector = vmcs12->host_ds_selector; + vmx_set_segment(vcpu, &seg, VCPU_SREG_DS); + seg.selector = vmcs12->host_es_selector; + vmx_set_segment(vcpu, &seg, VCPU_SREG_ES); + seg.selector = vmcs12->host_ss_selector; + vmx_set_segment(vcpu, &seg, VCPU_SREG_SS); + seg.selector = vmcs12->host_fs_selector; + seg.base = vmcs12->host_fs_base; + vmx_set_segment(vcpu, &seg, VCPU_SREG_FS); + seg.selector = vmcs12->host_gs_selector; + seg.base = vmcs12->host_gs_base; + vmx_set_segment(vcpu, &seg, VCPU_SREG_GS); + seg = (struct kvm_segment) { + .base = vmcs12->host_tr_base, + .limit = 0x67, + .selector = vmcs12->host_tr_selector, + .type = 11, + .present = 1 + }; + vmx_set_segment(vcpu, &seg, VCPU_SREG_TR); + + kvm_set_dr(vcpu, 7, 0x400); + vmcs_write64(GUEST_IA32_DEBUGCTL, 0); + + if (cpu_has_vmx_msr_bitmap()) + vmx_update_msr_bitmap(vcpu); + + if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr, + vmcs12->vm_exit_msr_load_count)) + nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL); +} + +static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx) +{ + struct shared_msr_entry *efer_msr; + unsigned int i; + + if (vm_entry_controls_get(vmx) & VM_ENTRY_LOAD_IA32_EFER) + return vmcs_read64(GUEST_IA32_EFER); + + if (cpu_has_load_ia32_efer()) + return host_efer; + + for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) { + if (vmx->msr_autoload.guest.val[i].index == MSR_EFER) + return vmx->msr_autoload.guest.val[i].value; + } + + efer_msr = find_msr_entry(vmx, MSR_EFER); + if (efer_msr) + return efer_msr->data; + + return host_efer; +} + +static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu) +{ + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); + struct vcpu_vmx *vmx = to_vmx(vcpu); + struct vmx_msr_entry g, h; + struct msr_data msr; + gpa_t gpa; + u32 i, j; + + vcpu->arch.pat = vmcs_read64(GUEST_IA32_PAT); + + if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) { + /* + * L1's host DR7 is lost if KVM_GUESTDBG_USE_HW_BP is set + * as vmcs01.GUEST_DR7 contains a userspace defined value + * and vcpu->arch.dr7 is not squirreled away before the + * nested VMENTER (not worth adding a variable in nested_vmx). + */ + if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) + kvm_set_dr(vcpu, 7, DR7_FIXED_1); + else + WARN_ON(kvm_set_dr(vcpu, 7, vmcs_readl(GUEST_DR7))); + } + + /* + * Note that calling vmx_set_{efer,cr0,cr4} is important as they + * handle a variety of side effects to KVM's software model. + */ + vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx)); + + vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS; + vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW)); + + vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); + vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW)); + + nested_ept_uninit_mmu_context(vcpu); + vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); + __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); + + /* + * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs + * from vmcs01 (if necessary). The PDPTRs are not loaded on + * VMFail, like everything else we just need to ensure our + * software model is up-to-date. + */ + ept_save_pdptrs(vcpu); + + kvm_mmu_reset_context(vcpu); + + if (cpu_has_vmx_msr_bitmap()) + vmx_update_msr_bitmap(vcpu); + + /* + * This nasty bit of open coding is a compromise between blindly + * loading L1's MSRs using the exit load lists (incorrect emulation + * of VMFail), leaving the nested VM's MSRs in the software model + * (incorrect behavior) and snapshotting the modified MSRs (too + * expensive since the lists are unbound by hardware). For each + * MSR that was (prematurely) loaded from the nested VMEntry load + * list, reload it from the exit load list if it exists and differs + * from the guest value. The intent is to stuff host state as + * silently as possible, not to fully process the exit load list. + */ + msr.host_initiated = false; + for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) { + gpa = vmcs12->vm_entry_msr_load_addr + (i * sizeof(g)); + if (kvm_vcpu_read_guest(vcpu, gpa, &g, sizeof(g))) { + pr_debug_ratelimited( + "%s read MSR index failed (%u, 0x%08llx)\n", + __func__, i, gpa); + goto vmabort; + } + + for (j = 0; j < vmcs12->vm_exit_msr_load_count; j++) { + gpa = vmcs12->vm_exit_msr_load_addr + (j * sizeof(h)); + if (kvm_vcpu_read_guest(vcpu, gpa, &h, sizeof(h))) { + pr_debug_ratelimited( + "%s read MSR failed (%u, 0x%08llx)\n", + __func__, j, gpa); + goto vmabort; + } + if (h.index != g.index) + continue; + if (h.value == g.value) + break; + + if (nested_vmx_load_msr_check(vcpu, &h)) { + pr_debug_ratelimited( + "%s check failed (%u, 0x%x, 0x%x)\n", + __func__, j, h.index, h.reserved); + goto vmabort; + } + + msr.index = h.index; + msr.data = h.value; + if (kvm_set_msr(vcpu, &msr)) { + pr_debug_ratelimited( + "%s WRMSR failed (%u, 0x%x, 0x%llx)\n", + __func__, j, h.index, h.value); + goto vmabort; + } + } + } + + return; + +vmabort: + nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL); +} + +/* + * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1 + * and modify vmcs12 to make it see what it would expect to see there if + * L2 was its real guest. Must only be called when in L2 (is_guest_mode()) + */ +void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, + u32 exit_intr_info, unsigned long exit_qualification) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); + + /* trying to cancel vmlaunch/vmresume is a bug */ + WARN_ON_ONCE(vmx->nested.nested_run_pending); + + leave_guest_mode(vcpu); + + if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) + vcpu->arch.tsc_offset -= vmcs12->tsc_offset; + + if (likely(!vmx->fail)) { + if (exit_reason == -1) + sync_vmcs12(vcpu, vmcs12); + else + prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info, + exit_qualification); + + /* + * Must happen outside of sync_vmcs12() as it will + * also be used to capture vmcs12 cache as part of + * capturing nVMX state for snapshot (migration). + * + * Otherwise, this flush will dirty guest memory at a + * point it is already assumed by user-space to be + * immutable. + */ + nested_flush_cached_shadow_vmcs12(vcpu, vmcs12); + } else { + /* + * The only expected VM-instruction error is "VM entry with + * invalid control field(s)." Anything else indicates a + * problem with L0. And we should never get here with a + * VMFail of any type if early consistency checks are enabled. + */ + WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) != + VMXERR_ENTRY_INVALID_CONTROL_FIELD); + WARN_ON_ONCE(nested_early_check); + } + + vmx_switch_vmcs(vcpu, &vmx->vmcs01); + + /* Update any VMCS fields that might have changed while L2 ran */ + vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); + vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); + vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); + + if (kvm_has_tsc_control) + decache_tsc_multiplier(vmx); + + if (vmx->nested.change_vmcs01_virtual_apic_mode) { + vmx->nested.change_vmcs01_virtual_apic_mode = false; + vmx_set_virtual_apic_mode(vcpu); + } else if (!nested_cpu_has_ept(vmcs12) && + nested_cpu_has2(vmcs12, + SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { + vmx_flush_tlb(vcpu, true); + } + + /* This is needed for same reason as it was needed in prepare_vmcs02 */ + vmx->host_rsp = 0; + + /* Unpin physical memory we referred to in vmcs02 */ + if (vmx->nested.apic_access_page) { + kvm_release_page_dirty(vmx->nested.apic_access_page); + vmx->nested.apic_access_page = NULL; + } + if (vmx->nested.virtual_apic_page) { + kvm_release_page_dirty(vmx->nested.virtual_apic_page); + vmx->nested.virtual_apic_page = NULL; + } + if (vmx->nested.pi_desc_page) { + kunmap(vmx->nested.pi_desc_page); + kvm_release_page_dirty(vmx->nested.pi_desc_page); + vmx->nested.pi_desc_page = NULL; + vmx->nested.pi_desc = NULL; + } + + /* + * We are now running in L2, mmu_notifier will force to reload the + * page's hpa for L2 vmcs. Need to reload it for L1 before entering L1. + */ + kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); + + if ((exit_reason != -1) && (enable_shadow_vmcs || vmx->nested.hv_evmcs)) + vmx->nested.need_vmcs12_sync = true; + + /* in case we halted in L2 */ + vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; + + if (likely(!vmx->fail)) { + /* + * TODO: SDM says that with acknowledge interrupt on + * exit, bit 31 of the VM-exit interrupt information + * (valid interrupt) is always set to 1 on + * EXIT_REASON_EXTERNAL_INTERRUPT, so we shouldn't + * need kvm_cpu_has_interrupt(). See the commit + * message for details. + */ + if (nested_exit_intr_ack_set(vcpu) && + exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT && + kvm_cpu_has_interrupt(vcpu)) { + int irq = kvm_cpu_get_interrupt(vcpu); + WARN_ON(irq < 0); + vmcs12->vm_exit_intr_info = irq | + INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR; + } + + if (exit_reason != -1) + trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason, + vmcs12->exit_qualification, + vmcs12->idt_vectoring_info_field, + vmcs12->vm_exit_intr_info, + vmcs12->vm_exit_intr_error_code, + KVM_ISA_VMX); + + load_vmcs12_host_state(vcpu, vmcs12); + + return; + } + + /* + * After an early L2 VM-entry failure, we're now back + * in L1 which thinks it just finished a VMLAUNCH or + * VMRESUME instruction, so we need to set the failure + * flag and the VM-instruction error field of the VMCS + * accordingly, and skip the emulated instruction. + */ + (void)nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); + + /* + * Restore L1's host state to KVM's software model. We're here + * because a consistency check was caught by hardware, which + * means some amount of guest state has been propagated to KVM's + * model and needs to be unwound to the host's state. + */ + nested_vmx_restore_host_state(vcpu); + + vmx->fail = 0; +} + +/* + * Decode the memory-address operand of a vmx instruction, as recorded on an + * exit caused by such an instruction (run by a guest hypervisor). + * On success, returns 0. When the operand is invalid, returns 1 and throws + * #UD or #GP. + */ +int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification, + u32 vmx_instruction_info, bool wr, gva_t *ret) +{ + gva_t off; + bool exn; + struct kvm_segment s; + + /* + * According to Vol. 3B, "Information for VM Exits Due to Instruction + * Execution", on an exit, vmx_instruction_info holds most of the + * addressing components of the operand. Only the displacement part + * is put in exit_qualification (see 3B, "Basic VM-Exit Information"). + * For how an actual address is calculated from all these components, + * refer to Vol. 1, "Operand Addressing". + */ + int scaling = vmx_instruction_info & 3; + int addr_size = (vmx_instruction_info >> 7) & 7; + bool is_reg = vmx_instruction_info & (1u << 10); + int seg_reg = (vmx_instruction_info >> 15) & 7; + int index_reg = (vmx_instruction_info >> 18) & 0xf; + bool index_is_valid = !(vmx_instruction_info & (1u << 22)); + int base_reg = (vmx_instruction_info >> 23) & 0xf; + bool base_is_valid = !(vmx_instruction_info & (1u << 27)); + + if (is_reg) { + kvm_queue_exception(vcpu, UD_VECTOR); + return 1; + } + + /* Addr = segment_base + offset */ + /* offset = base + [index * scale] + displacement */ + off = exit_qualification; /* holds the displacement */ + if (base_is_valid) + off += kvm_register_read(vcpu, base_reg); + if (index_is_valid) + off += kvm_register_read(vcpu, index_reg)<<scaling; + vmx_get_segment(vcpu, &s, seg_reg); + *ret = s.base + off; + + if (addr_size == 1) /* 32 bit */ + *ret &= 0xffffffff; + + /* Checks for #GP/#SS exceptions. */ + exn = false; + if (is_long_mode(vcpu)) { + /* Long mode: #GP(0)/#SS(0) if the memory address is in a + * non-canonical form. This is the only check on the memory + * destination for long mode! + */ + exn = is_noncanonical_address(*ret, vcpu); + } else if (is_protmode(vcpu)) { + /* Protected mode: apply checks for segment validity in the + * following order: + * - segment type check (#GP(0) may be thrown) + * - usability check (#GP(0)/#SS(0)) + * - limit check (#GP(0)/#SS(0)) + */ + if (wr) + /* #GP(0) if the destination operand is located in a + * read-only data segment or any code segment. + */ + exn = ((s.type & 0xa) == 0 || (s.type & 8)); + else + /* #GP(0) if the source operand is located in an + * execute-only code segment + */ + exn = ((s.type & 0xa) == 8); + if (exn) { + kvm_queue_exception_e(vcpu, GP_VECTOR, 0); + return 1; + } + /* Protected mode: #GP(0)/#SS(0) if the segment is unusable. + */ + exn = (s.unusable != 0); + /* Protected mode: #GP(0)/#SS(0) if the memory + * operand is outside the segment limit. + */ + exn = exn || (off + sizeof(u64) > s.limit); + } + if (exn) { + kvm_queue_exception_e(vcpu, + seg_reg == VCPU_SREG_SS ? + SS_VECTOR : GP_VECTOR, + 0); + return 1; + } + + return 0; +} + +static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer) +{ + gva_t gva; + struct x86_exception e; + + if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), + vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva)) + return 1; + + if (kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e)) { + kvm_inject_page_fault(vcpu, &e); + return 1; + } + + return 0; +} + +/* + * Allocate a shadow VMCS and associate it with the currently loaded + * VMCS, unless such a shadow VMCS already exists. The newly allocated + * VMCS is also VMCLEARed, so that it is ready for use. + */ +static struct vmcs *alloc_shadow_vmcs(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + struct loaded_vmcs *loaded_vmcs = vmx->loaded_vmcs; + + /* + * We should allocate a shadow vmcs for vmcs01 only when L1 + * executes VMXON and free it when L1 executes VMXOFF. + * As it is invalid to execute VMXON twice, we shouldn't reach + * here when vmcs01 already have an allocated shadow vmcs. + */ + WARN_ON(loaded_vmcs == &vmx->vmcs01 && loaded_vmcs->shadow_vmcs); + + if (!loaded_vmcs->shadow_vmcs) { + loaded_vmcs->shadow_vmcs = alloc_vmcs(true); + if (loaded_vmcs->shadow_vmcs) + vmcs_clear(loaded_vmcs->shadow_vmcs); + } + return loaded_vmcs->shadow_vmcs; +} + +static int enter_vmx_operation(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + int r; + + r = alloc_loaded_vmcs(&vmx->nested.vmcs02); + if (r < 0) + goto out_vmcs02; + + vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL); + if (!vmx->nested.cached_vmcs12) + goto out_cached_vmcs12; + + vmx->nested.cached_shadow_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL); + if (!vmx->nested.cached_shadow_vmcs12) + goto out_cached_shadow_vmcs12; + + if (enable_shadow_vmcs && !alloc_shadow_vmcs(vcpu)) + goto out_shadow_vmcs; + + hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL_PINNED); + vmx->nested.preemption_timer.function = vmx_preemption_timer_fn; + + vmx->nested.vpid02 = allocate_vpid(); + + vmx->nested.vmcs02_initialized = false; + vmx->nested.vmxon = true; + + if (pt_mode == PT_MODE_HOST_GUEST) { + vmx->pt_desc.guest.ctl = 0; + pt_update_intercept_for_msr(vmx); + } + + return 0; + +out_shadow_vmcs: + kfree(vmx->nested.cached_shadow_vmcs12); + +out_cached_shadow_vmcs12: + kfree(vmx->nested.cached_vmcs12); + +out_cached_vmcs12: + free_loaded_vmcs(&vmx->nested.vmcs02); + +out_vmcs02: + return -ENOMEM; +} + +/* + * Emulate the VMXON instruction. + * Currently, we just remember that VMX is active, and do not save or even + * inspect the argument to VMXON (the so-called "VMXON pointer") because we + * do not currently need to store anything in that guest-allocated memory + * region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their + * argument is different from the VMXON pointer (which the spec says they do). + */ +static int handle_vmon(struct kvm_vcpu *vcpu) +{ + int ret; + gpa_t vmptr; + struct page *page; + struct vcpu_vmx *vmx = to_vmx(vcpu); + const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED + | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; + + /* + * The Intel VMX Instruction Reference lists a bunch of bits that are + * prerequisite to running VMXON, most notably cr4.VMXE must be set to + * 1 (see vmx_set_cr4() for when we allow the guest to set this). + * Otherwise, we should fail with #UD. But most faulting conditions + * have already been checked by hardware, prior to the VM-exit for + * VMXON. We do test guest cr4.VMXE because processor CR4 always has + * that bit set to 1 in non-root mode. + */ + if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) { + kvm_queue_exception(vcpu, UD_VECTOR); + return 1; + } + + /* CPL=0 must be checked manually. */ + if (vmx_get_cpl(vcpu)) { + kvm_inject_gp(vcpu, 0); + return 1; + } + + if (vmx->nested.vmxon) + return nested_vmx_failValid(vcpu, + VMXERR_VMXON_IN_VMX_ROOT_OPERATION); + + if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES) + != VMXON_NEEDED_FEATURES) { + kvm_inject_gp(vcpu, 0); + return 1; + } + + if (nested_vmx_get_vmptr(vcpu, &vmptr)) + return 1; + + /* + * SDM 3: 24.11.5 + * The first 4 bytes of VMXON region contain the supported + * VMCS revision identifier + * + * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case; + * which replaces physical address width with 32 + */ + if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) + return nested_vmx_failInvalid(vcpu); + + page = kvm_vcpu_gpa_to_page(vcpu, vmptr); + if (is_error_page(page)) + return nested_vmx_failInvalid(vcpu); + + if (*(u32 *)kmap(page) != VMCS12_REVISION) { + kunmap(page); + kvm_release_page_clean(page); + return nested_vmx_failInvalid(vcpu); + } + kunmap(page); + kvm_release_page_clean(page); + + vmx->nested.vmxon_ptr = vmptr; + ret = enter_vmx_operation(vcpu); + if (ret) + return ret; + + return nested_vmx_succeed(vcpu); +} + +static inline void nested_release_vmcs12(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + + if (vmx->nested.current_vmptr == -1ull) + return; + + if (enable_shadow_vmcs) { + /* copy to memory all shadowed fields in case + they were modified */ + copy_shadow_to_vmcs12(vmx); + vmx->nested.need_vmcs12_sync = false; + vmx_disable_shadow_vmcs(vmx); + } + vmx->nested.posted_intr_nv = -1; + + /* Flush VMCS12 to guest memory */ + kvm_vcpu_write_guest_page(vcpu, + vmx->nested.current_vmptr >> PAGE_SHIFT, + vmx->nested.cached_vmcs12, 0, VMCS12_SIZE); + + kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); + + vmx->nested.current_vmptr = -1ull; +} + +/* Emulate the VMXOFF instruction */ +static int handle_vmoff(struct kvm_vcpu *vcpu) +{ + if (!nested_vmx_check_permission(vcpu)) + return 1; + free_nested(vcpu); + return nested_vmx_succeed(vcpu); +} + +/* Emulate the VMCLEAR instruction */ +static int handle_vmclear(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + u32 zero = 0; + gpa_t vmptr; + + if (!nested_vmx_check_permission(vcpu)) + return 1; + + if (nested_vmx_get_vmptr(vcpu, &vmptr)) + return 1; + + if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) + return nested_vmx_failValid(vcpu, + VMXERR_VMCLEAR_INVALID_ADDRESS); + + if (vmptr == vmx->nested.vmxon_ptr) + return nested_vmx_failValid(vcpu, + VMXERR_VMCLEAR_VMXON_POINTER); + + if (vmx->nested.hv_evmcs_page) { + if (vmptr == vmx->nested.hv_evmcs_vmptr) + nested_release_evmcs(vcpu); + } else { + if (vmptr == vmx->nested.current_vmptr) + nested_release_vmcs12(vcpu); + + kvm_vcpu_write_guest(vcpu, + vmptr + offsetof(struct vmcs12, + launch_state), + &zero, sizeof(zero)); + } + + return nested_vmx_succeed(vcpu); +} + +static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch); + +/* Emulate the VMLAUNCH instruction */ +static int handle_vmlaunch(struct kvm_vcpu *vcpu) +{ + return nested_vmx_run(vcpu, true); +} + +/* Emulate the VMRESUME instruction */ +static int handle_vmresume(struct kvm_vcpu *vcpu) +{ + + return nested_vmx_run(vcpu, false); +} + +static int handle_vmread(struct kvm_vcpu *vcpu) +{ + unsigned long field; + u64 field_value; + unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); + u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); + gva_t gva = 0; + struct vmcs12 *vmcs12; + + if (!nested_vmx_check_permission(vcpu)) + return 1; + + if (to_vmx(vcpu)->nested.current_vmptr == -1ull) + return nested_vmx_failInvalid(vcpu); + + if (!is_guest_mode(vcpu)) + vmcs12 = get_vmcs12(vcpu); + else { + /* + * When vmcs->vmcs_link_pointer is -1ull, any VMREAD + * to shadowed-field sets the ALU flags for VMfailInvalid. + */ + if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull) + return nested_vmx_failInvalid(vcpu); + vmcs12 = get_shadow_vmcs12(vcpu); + } + + /* Decode instruction info and find the field to read */ + field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); + /* Read the field, zero-extended to a u64 field_value */ + if (vmcs12_read_any(vmcs12, field, &field_value) < 0) + return nested_vmx_failValid(vcpu, + VMXERR_UNSUPPORTED_VMCS_COMPONENT); + + /* + * Now copy part of this value to register or memory, as requested. + * Note that the number of bits actually copied is 32 or 64 depending + * on the guest's mode (32 or 64 bit), not on the given field's length. + */ + if (vmx_instruction_info & (1u << 10)) { + kvm_register_writel(vcpu, (((vmx_instruction_info) >> 3) & 0xf), + field_value); + } else { + if (get_vmx_mem_address(vcpu, exit_qualification, + vmx_instruction_info, true, &gva)) + return 1; + /* _system ok, nested_vmx_check_permission has verified cpl=0 */ + kvm_write_guest_virt_system(vcpu, gva, &field_value, + (is_long_mode(vcpu) ? 8 : 4), NULL); + } + + return nested_vmx_succeed(vcpu); +} + + +static int handle_vmwrite(struct kvm_vcpu *vcpu) +{ + unsigned long field; + gva_t gva; + struct vcpu_vmx *vmx = to_vmx(vcpu); + unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); + u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); + + /* The value to write might be 32 or 64 bits, depending on L1's long + * mode, and eventually we need to write that into a field of several + * possible lengths. The code below first zero-extends the value to 64 + * bit (field_value), and then copies only the appropriate number of + * bits into the vmcs12 field. + */ + u64 field_value = 0; + struct x86_exception e; + struct vmcs12 *vmcs12; + + if (!nested_vmx_check_permission(vcpu)) + return 1; + + if (vmx->nested.current_vmptr == -1ull) + return nested_vmx_failInvalid(vcpu); + + if (vmx_instruction_info & (1u << 10)) + field_value = kvm_register_readl(vcpu, + (((vmx_instruction_info) >> 3) & 0xf)); + else { + if (get_vmx_mem_address(vcpu, exit_qualification, + vmx_instruction_info, false, &gva)) + return 1; + if (kvm_read_guest_virt(vcpu, gva, &field_value, + (is_64_bit_mode(vcpu) ? 8 : 4), &e)) { + kvm_inject_page_fault(vcpu, &e); + return 1; + } + } + + + field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); + /* + * If the vCPU supports "VMWRITE to any supported field in the + * VMCS," then the "read-only" fields are actually read/write. + */ + if (vmcs_field_readonly(field) && + !nested_cpu_has_vmwrite_any_field(vcpu)) + return nested_vmx_failValid(vcpu, + VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT); + + if (!is_guest_mode(vcpu)) + vmcs12 = get_vmcs12(vcpu); + else { + /* + * When vmcs->vmcs_link_pointer is -1ull, any VMWRITE + * to shadowed-field sets the ALU flags for VMfailInvalid. + */ + if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull) + return nested_vmx_failInvalid(vcpu); + vmcs12 = get_shadow_vmcs12(vcpu); + } + + if (vmcs12_write_any(vmcs12, field, field_value) < 0) + return nested_vmx_failValid(vcpu, + VMXERR_UNSUPPORTED_VMCS_COMPONENT); + + /* + * Do not track vmcs12 dirty-state if in guest-mode + * as we actually dirty shadow vmcs12 instead of vmcs12. + */ + if (!is_guest_mode(vcpu)) { + switch (field) { +#define SHADOW_FIELD_RW(x) case x: +#include "vmcs_shadow_fields.h" + /* + * The fields that can be updated by L1 without a vmexit are + * always updated in the vmcs02, the others go down the slow + * path of prepare_vmcs02. + */ + break; + default: + vmx->nested.dirty_vmcs12 = true; + break; + } + } + + return nested_vmx_succeed(vcpu); +} + +static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr) +{ + vmx->nested.current_vmptr = vmptr; + if (enable_shadow_vmcs) { + vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL, + SECONDARY_EXEC_SHADOW_VMCS); + vmcs_write64(VMCS_LINK_POINTER, + __pa(vmx->vmcs01.shadow_vmcs)); + vmx->nested.need_vmcs12_sync = true; + } + vmx->nested.dirty_vmcs12 = true; +} + +/* Emulate the VMPTRLD instruction */ +static int handle_vmptrld(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + gpa_t vmptr; + + if (!nested_vmx_check_permission(vcpu)) + return 1; + + if (nested_vmx_get_vmptr(vcpu, &vmptr)) + return 1; + + if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) + return nested_vmx_failValid(vcpu, + VMXERR_VMPTRLD_INVALID_ADDRESS); + + if (vmptr == vmx->nested.vmxon_ptr) + return nested_vmx_failValid(vcpu, + VMXERR_VMPTRLD_VMXON_POINTER); + + /* Forbid normal VMPTRLD if Enlightened version was used */ + if (vmx->nested.hv_evmcs) + return 1; + + if (vmx->nested.current_vmptr != vmptr) { + struct vmcs12 *new_vmcs12; + struct page *page; + + page = kvm_vcpu_gpa_to_page(vcpu, vmptr); + if (is_error_page(page)) { + /* + * Reads from an unbacked page return all 1s, + * which means that the 32 bits located at the + * given physical address won't match the required + * VMCS12_REVISION identifier. + */ + nested_vmx_failValid(vcpu, + VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); + return kvm_skip_emulated_instruction(vcpu); + } + new_vmcs12 = kmap(page); + if (new_vmcs12->hdr.revision_id != VMCS12_REVISION || + (new_vmcs12->hdr.shadow_vmcs && + !nested_cpu_has_vmx_shadow_vmcs(vcpu))) { + kunmap(page); + kvm_release_page_clean(page); + return nested_vmx_failValid(vcpu, + VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); + } + + nested_release_vmcs12(vcpu); + + /* + * Load VMCS12 from guest memory since it is not already + * cached. + */ + memcpy(vmx->nested.cached_vmcs12, new_vmcs12, VMCS12_SIZE); + kunmap(page); + kvm_release_page_clean(page); + + set_current_vmptr(vmx, vmptr); + } + + return nested_vmx_succeed(vcpu); +} + +/* Emulate the VMPTRST instruction */ +static int handle_vmptrst(struct kvm_vcpu *vcpu) +{ + unsigned long exit_qual = vmcs_readl(EXIT_QUALIFICATION); + u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO); + gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr; + struct x86_exception e; + gva_t gva; + + if (!nested_vmx_check_permission(vcpu)) + return 1; + + if (unlikely(to_vmx(vcpu)->nested.hv_evmcs)) + return 1; + + if (get_vmx_mem_address(vcpu, exit_qual, instr_info, true, &gva)) + return 1; + /* *_system ok, nested_vmx_check_permission has verified cpl=0 */ + if (kvm_write_guest_virt_system(vcpu, gva, (void *)¤t_vmptr, + sizeof(gpa_t), &e)) { + kvm_inject_page_fault(vcpu, &e); + return 1; + } + return nested_vmx_succeed(vcpu); +} + +/* Emulate the INVEPT instruction */ +static int handle_invept(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + u32 vmx_instruction_info, types; + unsigned long type; + gva_t gva; + struct x86_exception e; + struct { + u64 eptp, gpa; + } operand; + + if (!(vmx->nested.msrs.secondary_ctls_high & + SECONDARY_EXEC_ENABLE_EPT) || + !(vmx->nested.msrs.ept_caps & VMX_EPT_INVEPT_BIT)) { + kvm_queue_exception(vcpu, UD_VECTOR); + return 1; + } + + if (!nested_vmx_check_permission(vcpu)) + return 1; + + vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); + type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); + + types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6; + + if (type >= 32 || !(types & (1 << type))) + return nested_vmx_failValid(vcpu, + VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); + + /* According to the Intel VMX instruction reference, the memory + * operand is read even if it isn't needed (e.g., for type==global) + */ + if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), + vmx_instruction_info, false, &gva)) + return 1; + if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) { + kvm_inject_page_fault(vcpu, &e); + return 1; + } + + switch (type) { + case VMX_EPT_EXTENT_GLOBAL: + /* + * TODO: track mappings and invalidate + * single context requests appropriately + */ + case VMX_EPT_EXTENT_CONTEXT: + kvm_mmu_sync_roots(vcpu); + kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); + break; + default: + BUG_ON(1); + break; + } + + return nested_vmx_succeed(vcpu); +} + +static int handle_invvpid(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + u32 vmx_instruction_info; + unsigned long type, types; + gva_t gva; + struct x86_exception e; + struct { + u64 vpid; + u64 gla; + } operand; + u16 vpid02; + + if (!(vmx->nested.msrs.secondary_ctls_high & + SECONDARY_EXEC_ENABLE_VPID) || + !(vmx->nested.msrs.vpid_caps & VMX_VPID_INVVPID_BIT)) { + kvm_queue_exception(vcpu, UD_VECTOR); + return 1; + } + + if (!nested_vmx_check_permission(vcpu)) + return 1; + + vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); + type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); + + types = (vmx->nested.msrs.vpid_caps & + VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8; + + if (type >= 32 || !(types & (1 << type))) + return nested_vmx_failValid(vcpu, + VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); + + /* according to the intel vmx instruction reference, the memory + * operand is read even if it isn't needed (e.g., for type==global) + */ + if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), + vmx_instruction_info, false, &gva)) + return 1; + if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) { + kvm_inject_page_fault(vcpu, &e); + return 1; + } + if (operand.vpid >> 16) + return nested_vmx_failValid(vcpu, + VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); + + vpid02 = nested_get_vpid02(vcpu); + switch (type) { + case VMX_VPID_EXTENT_INDIVIDUAL_ADDR: + if (!operand.vpid || + is_noncanonical_address(operand.gla, vcpu)) + return nested_vmx_failValid(vcpu, + VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); + if (cpu_has_vmx_invvpid_individual_addr()) { + __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR, + vpid02, operand.gla); + } else + __vmx_flush_tlb(vcpu, vpid02, false); + break; + case VMX_VPID_EXTENT_SINGLE_CONTEXT: + case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL: + if (!operand.vpid) + return nested_vmx_failValid(vcpu, + VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); + __vmx_flush_tlb(vcpu, vpid02, false); + break; + case VMX_VPID_EXTENT_ALL_CONTEXT: + __vmx_flush_tlb(vcpu, vpid02, false); + break; + default: + WARN_ON_ONCE(1); + return kvm_skip_emulated_instruction(vcpu); + } + + return nested_vmx_succeed(vcpu); +} + +static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12) +{ + u32 index = vcpu->arch.regs[VCPU_REGS_RCX]; + u64 address; + bool accessed_dirty; + struct kvm_mmu *mmu = vcpu->arch.walk_mmu; + + if (!nested_cpu_has_eptp_switching(vmcs12) || + !nested_cpu_has_ept(vmcs12)) + return 1; + + if (index >= VMFUNC_EPTP_ENTRIES) + return 1; + + + if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT, + &address, index * 8, 8)) + return 1; + + accessed_dirty = !!(address & VMX_EPTP_AD_ENABLE_BIT); + + /* + * If the (L2) guest does a vmfunc to the currently + * active ept pointer, we don't have to do anything else + */ + if (vmcs12->ept_pointer != address) { + if (!valid_ept_address(vcpu, address)) + return 1; + + kvm_mmu_unload(vcpu); + mmu->ept_ad = accessed_dirty; + mmu->mmu_role.base.ad_disabled = !accessed_dirty; + vmcs12->ept_pointer = address; + /* + * TODO: Check what's the correct approach in case + * mmu reload fails. Currently, we just let the next + * reload potentially fail + */ + kvm_mmu_reload(vcpu); + } + + return 0; +} + +static int handle_vmfunc(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + struct vmcs12 *vmcs12; + u32 function = vcpu->arch.regs[VCPU_REGS_RAX]; + + /* + * VMFUNC is only supported for nested guests, but we always enable the + * secondary control for simplicity; for non-nested mode, fake that we + * didn't by injecting #UD. + */ + if (!is_guest_mode(vcpu)) { + kvm_queue_exception(vcpu, UD_VECTOR); + return 1; + } + + vmcs12 = get_vmcs12(vcpu); + if ((vmcs12->vm_function_control & (1 << function)) == 0) + goto fail; + + switch (function) { + case 0: + if (nested_vmx_eptp_switching(vcpu, vmcs12)) + goto fail; + break; + default: + goto fail; + } + return kvm_skip_emulated_instruction(vcpu); + +fail: + nested_vmx_vmexit(vcpu, vmx->exit_reason, + vmcs_read32(VM_EXIT_INTR_INFO), + vmcs_readl(EXIT_QUALIFICATION)); + return 1; +} + + +static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12) +{ + unsigned long exit_qualification; + gpa_t bitmap, last_bitmap; + unsigned int port; + int size; + u8 b; + + if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS)) + return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING); + + exit_qualification = vmcs_readl(EXIT_QUALIFICATION); + + port = exit_qualification >> 16; + size = (exit_qualification & 7) + 1; + + last_bitmap = (gpa_t)-1; + b = -1; + + while (size > 0) { + if (port < 0x8000) + bitmap = vmcs12->io_bitmap_a; + else if (port < 0x10000) + bitmap = vmcs12->io_bitmap_b; + else + return true; + bitmap += (port & 0x7fff) / 8; + + if (last_bitmap != bitmap) + if (kvm_vcpu_read_guest(vcpu, bitmap, &b, 1)) + return true; + if (b & (1 << (port & 7))) + return true; + + port++; + size--; + last_bitmap = bitmap; + } + + return false; +} + +/* + * Return 1 if we should exit from L2 to L1 to handle an MSR access access, + * rather than handle it ourselves in L0. I.e., check whether L1 expressed + * disinterest in the current event (read or write a specific MSR) by using an + * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps. + */ +static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12, u32 exit_reason) +{ + u32 msr_index = vcpu->arch.regs[VCPU_REGS_RCX]; + gpa_t bitmap; + + if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) + return true; + + /* + * The MSR_BITMAP page is divided into four 1024-byte bitmaps, + * for the four combinations of read/write and low/high MSR numbers. + * First we need to figure out which of the four to use: + */ + bitmap = vmcs12->msr_bitmap; + if (exit_reason == EXIT_REASON_MSR_WRITE) + bitmap += 2048; + if (msr_index >= 0xc0000000) { + msr_index -= 0xc0000000; + bitmap += 1024; + } + + /* Then read the msr_index'th bit from this bitmap: */ + if (msr_index < 1024*8) { + unsigned char b; + if (kvm_vcpu_read_guest(vcpu, bitmap + msr_index/8, &b, 1)) + return true; + return 1 & (b >> (msr_index & 7)); + } else + return true; /* let L1 handle the wrong parameter */ +} + +/* + * Return 1 if we should exit from L2 to L1 to handle a CR access exit, + * rather than handle it ourselves in L0. I.e., check if L1 wanted to + * intercept (via guest_host_mask etc.) the current event. + */ +static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12) +{ + unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); + int cr = exit_qualification & 15; + int reg; + unsigned long val; + + switch ((exit_qualification >> 4) & 3) { + case 0: /* mov to cr */ + reg = (exit_qualification >> 8) & 15; + val = kvm_register_readl(vcpu, reg); + switch (cr) { + case 0: + if (vmcs12->cr0_guest_host_mask & + (val ^ vmcs12->cr0_read_shadow)) + return true; + break; + case 3: + if ((vmcs12->cr3_target_count >= 1 && + vmcs12->cr3_target_value0 == val) || + (vmcs12->cr3_target_count >= 2 && + vmcs12->cr3_target_value1 == val) || + (vmcs12->cr3_target_count >= 3 && + vmcs12->cr3_target_value2 == val) || + (vmcs12->cr3_target_count >= 4 && + vmcs12->cr3_target_value3 == val)) + return false; + if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING)) + return true; + break; + case 4: + if (vmcs12->cr4_guest_host_mask & + (vmcs12->cr4_read_shadow ^ val)) + return true; + break; + case 8: + if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING)) + return true; + break; + } + break; + case 2: /* clts */ + if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) && + (vmcs12->cr0_read_shadow & X86_CR0_TS)) + return true; + break; + case 1: /* mov from cr */ + switch (cr) { + case 3: + if (vmcs12->cpu_based_vm_exec_control & + CPU_BASED_CR3_STORE_EXITING) + return true; + break; + case 8: + if (vmcs12->cpu_based_vm_exec_control & + CPU_BASED_CR8_STORE_EXITING) + return true; + break; + } + break; + case 3: /* lmsw */ + /* + * lmsw can change bits 1..3 of cr0, and only set bit 0 of + * cr0. Other attempted changes are ignored, with no exit. + */ + val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f; + if (vmcs12->cr0_guest_host_mask & 0xe & + (val ^ vmcs12->cr0_read_shadow)) + return true; + if ((vmcs12->cr0_guest_host_mask & 0x1) && + !(vmcs12->cr0_read_shadow & 0x1) && + (val & 0x1)) + return true; + break; + } + return false; +} + +static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12, gpa_t bitmap) +{ + u32 vmx_instruction_info; + unsigned long field; + u8 b; + + if (!nested_cpu_has_shadow_vmcs(vmcs12)) + return true; + + /* Decode instruction info and find the field to access */ + vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); + field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); + + /* Out-of-range fields always cause a VM exit from L2 to L1 */ + if (field >> 15) + return true; + + if (kvm_vcpu_read_guest(vcpu, bitmap + field/8, &b, 1)) + return true; + + return 1 & (b >> (field & 7)); +} + +/* + * Return 1 if we should exit from L2 to L1 to handle an exit, or 0 if we + * should handle it ourselves in L0 (and then continue L2). Only call this + * when in is_guest_mode (L2). + */ +bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason) +{ + u32 intr_info = vmcs_read32(VM_EXIT_INTR_INFO); + struct vcpu_vmx *vmx = to_vmx(vcpu); + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); + + if (vmx->nested.nested_run_pending) + return false; + + if (unlikely(vmx->fail)) { + pr_info_ratelimited("%s failed vm entry %x\n", __func__, + vmcs_read32(VM_INSTRUCTION_ERROR)); + return true; + } + + /* + * The host physical addresses of some pages of guest memory + * are loaded into the vmcs02 (e.g. vmcs12's Virtual APIC + * Page). The CPU may write to these pages via their host + * physical address while L2 is running, bypassing any + * address-translation-based dirty tracking (e.g. EPT write + * protection). + * + * Mark them dirty on every exit from L2 to prevent them from + * getting out of sync with dirty tracking. + */ + nested_mark_vmcs12_pages_dirty(vcpu); + + trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason, + vmcs_readl(EXIT_QUALIFICATION), + vmx->idt_vectoring_info, + intr_info, + vmcs_read32(VM_EXIT_INTR_ERROR_CODE), + KVM_ISA_VMX); + + switch (exit_reason) { + case EXIT_REASON_EXCEPTION_NMI: + if (is_nmi(intr_info)) + return false; + else if (is_page_fault(intr_info)) + return !vmx->vcpu.arch.apf.host_apf_reason && enable_ept; + else if (is_debug(intr_info) && + vcpu->guest_debug & + (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) + return false; + else if (is_breakpoint(intr_info) && + vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) + return false; + return vmcs12->exception_bitmap & + (1u << (intr_info & INTR_INFO_VECTOR_MASK)); + case EXIT_REASON_EXTERNAL_INTERRUPT: + return false; + case EXIT_REASON_TRIPLE_FAULT: + return true; + case EXIT_REASON_PENDING_INTERRUPT: + return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_INTR_PENDING); + case EXIT_REASON_NMI_WINDOW: + return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING); + case EXIT_REASON_TASK_SWITCH: + return true; + case EXIT_REASON_CPUID: + return true; + case EXIT_REASON_HLT: + return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING); + case EXIT_REASON_INVD: + return true; + case EXIT_REASON_INVLPG: + return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING); + case EXIT_REASON_RDPMC: + return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING); + case EXIT_REASON_RDRAND: + return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDRAND_EXITING); + case EXIT_REASON_RDSEED: + return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDSEED_EXITING); + case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP: + return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING); + case EXIT_REASON_VMREAD: + return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12, + vmcs12->vmread_bitmap); + case EXIT_REASON_VMWRITE: + return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12, + vmcs12->vmwrite_bitmap); + case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR: + case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD: + case EXIT_REASON_VMPTRST: case EXIT_REASON_VMRESUME: + case EXIT_REASON_VMOFF: case EXIT_REASON_VMON: + case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID: + /* + * VMX instructions trap unconditionally. This allows L1 to + * emulate them for its L2 guest, i.e., allows 3-level nesting! + */ + return true; + case EXIT_REASON_CR_ACCESS: + return nested_vmx_exit_handled_cr(vcpu, vmcs12); + case EXIT_REASON_DR_ACCESS: + return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING); + case EXIT_REASON_IO_INSTRUCTION: + return nested_vmx_exit_handled_io(vcpu, vmcs12); + case EXIT_REASON_GDTR_IDTR: case EXIT_REASON_LDTR_TR: + return nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC); + case EXIT_REASON_MSR_READ: + case EXIT_REASON_MSR_WRITE: + return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason); + case EXIT_REASON_INVALID_STATE: + return true; + case EXIT_REASON_MWAIT_INSTRUCTION: + return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING); + case EXIT_REASON_MONITOR_TRAP_FLAG: + return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_TRAP_FLAG); + case EXIT_REASON_MONITOR_INSTRUCTION: + return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING); + case EXIT_REASON_PAUSE_INSTRUCTION: + return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) || + nested_cpu_has2(vmcs12, + SECONDARY_EXEC_PAUSE_LOOP_EXITING); + case EXIT_REASON_MCE_DURING_VMENTRY: + return false; + case EXIT_REASON_TPR_BELOW_THRESHOLD: + return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW); + case EXIT_REASON_APIC_ACCESS: + case EXIT_REASON_APIC_WRITE: + case EXIT_REASON_EOI_INDUCED: + /* + * The controls for "virtualize APIC accesses," "APIC- + * register virtualization," and "virtual-interrupt + * delivery" only come from vmcs12. + */ + return true; + case EXIT_REASON_EPT_VIOLATION: + /* + * L0 always deals with the EPT violation. If nested EPT is + * used, and the nested mmu code discovers that the address is + * missing in the guest EPT table (EPT12), the EPT violation + * will be injected with nested_ept_inject_page_fault() + */ + return false; + case EXIT_REASON_EPT_MISCONFIG: + /* + * L2 never uses directly L1's EPT, but rather L0's own EPT + * table (shadow on EPT) or a merged EPT table that L0 built + * (EPT on EPT). So any problems with the structure of the + * table is L0's fault. + */ + return false; + case EXIT_REASON_INVPCID: + return + nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_INVPCID) && + nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING); + case EXIT_REASON_WBINVD: + return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING); + case EXIT_REASON_XSETBV: + return true; + case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS: + /* + * This should never happen, since it is not possible to + * set XSS to a non-zero value---neither in L1 nor in L2. + * If if it were, XSS would have to be checked against + * the XSS exit bitmap in vmcs12. + */ + return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES); + case EXIT_REASON_PREEMPTION_TIMER: + return false; + case EXIT_REASON_PML_FULL: + /* We emulate PML support to L1. */ + return false; + case EXIT_REASON_VMFUNC: + /* VM functions are emulated through L2->L0 vmexits. */ + return false; + case EXIT_REASON_ENCLS: + /* SGX is never exposed to L1 */ + return false; + default: + return true; + } +} + + +static int vmx_get_nested_state(struct kvm_vcpu *vcpu, + struct kvm_nested_state __user *user_kvm_nested_state, + u32 user_data_size) +{ + struct vcpu_vmx *vmx; + struct vmcs12 *vmcs12; + struct kvm_nested_state kvm_state = { + .flags = 0, + .format = 0, + .size = sizeof(kvm_state), + .vmx.vmxon_pa = -1ull, + .vmx.vmcs_pa = -1ull, + }; + + if (!vcpu) + return kvm_state.size + 2 * VMCS12_SIZE; + + vmx = to_vmx(vcpu); + vmcs12 = get_vmcs12(vcpu); + + if (nested_vmx_allowed(vcpu) && vmx->nested.enlightened_vmcs_enabled) + kvm_state.flags |= KVM_STATE_NESTED_EVMCS; + + if (nested_vmx_allowed(vcpu) && + (vmx->nested.vmxon || vmx->nested.smm.vmxon)) { + kvm_state.vmx.vmxon_pa = vmx->nested.vmxon_ptr; + kvm_state.vmx.vmcs_pa = vmx->nested.current_vmptr; + + if (vmx_has_valid_vmcs12(vcpu)) { + kvm_state.size += VMCS12_SIZE; + + if (is_guest_mode(vcpu) && + nested_cpu_has_shadow_vmcs(vmcs12) && + vmcs12->vmcs_link_pointer != -1ull) + kvm_state.size += VMCS12_SIZE; + } + + if (vmx->nested.smm.vmxon) + kvm_state.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON; + + if (vmx->nested.smm.guest_mode) + kvm_state.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE; + + if (is_guest_mode(vcpu)) { + kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE; + + if (vmx->nested.nested_run_pending) + kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING; + } + } + + if (user_data_size < kvm_state.size) + goto out; + + if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state))) + return -EFAULT; + + if (!vmx_has_valid_vmcs12(vcpu)) + goto out; + + /* + * When running L2, the authoritative vmcs12 state is in the + * vmcs02. When running L1, the authoritative vmcs12 state is + * in the shadow or enlightened vmcs linked to vmcs01, unless + * need_vmcs12_sync is set, in which case, the authoritative + * vmcs12 state is in the vmcs12 already. + */ + if (is_guest_mode(vcpu)) { + sync_vmcs12(vcpu, vmcs12); + } else if (!vmx->nested.need_vmcs12_sync) { + if (vmx->nested.hv_evmcs) + copy_enlightened_to_vmcs12(vmx); + else if (enable_shadow_vmcs) + copy_shadow_to_vmcs12(vmx); + } + + if (copy_to_user(user_kvm_nested_state->data, vmcs12, sizeof(*vmcs12))) + return -EFAULT; + + if (nested_cpu_has_shadow_vmcs(vmcs12) && + vmcs12->vmcs_link_pointer != -1ull) { + if (copy_to_user(user_kvm_nested_state->data + VMCS12_SIZE, + get_shadow_vmcs12(vcpu), sizeof(*vmcs12))) + return -EFAULT; + } + +out: + return kvm_state.size; +} + +/* + * Forcibly leave nested mode in order to be able to reset the VCPU later on. + */ +void vmx_leave_nested(struct kvm_vcpu *vcpu) +{ + if (is_guest_mode(vcpu)) { + to_vmx(vcpu)->nested.nested_run_pending = 0; + nested_vmx_vmexit(vcpu, -1, 0, 0); + } + free_nested(vcpu); +} + +static int vmx_set_nested_state(struct kvm_vcpu *vcpu, + struct kvm_nested_state __user *user_kvm_nested_state, + struct kvm_nested_state *kvm_state) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + struct vmcs12 *vmcs12; + u32 exit_qual; + int ret; + + if (kvm_state->format != 0) + return -EINVAL; + + if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) + nested_enable_evmcs(vcpu, NULL); + + if (!nested_vmx_allowed(vcpu)) + return kvm_state->vmx.vmxon_pa == -1ull ? 0 : -EINVAL; + + if (kvm_state->vmx.vmxon_pa == -1ull) { + if (kvm_state->vmx.smm.flags) + return -EINVAL; + + if (kvm_state->vmx.vmcs_pa != -1ull) + return -EINVAL; + + vmx_leave_nested(vcpu); + return 0; + } + + if (!page_address_valid(vcpu, kvm_state->vmx.vmxon_pa)) + return -EINVAL; + + if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && + (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) + return -EINVAL; + + if (kvm_state->vmx.smm.flags & + ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON)) + return -EINVAL; + + /* + * SMM temporarily disables VMX, so we cannot be in guest mode, + * nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags + * must be zero. + */ + if (is_smm(vcpu) ? kvm_state->flags : kvm_state->vmx.smm.flags) + return -EINVAL; + + if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && + !(kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON)) + return -EINVAL; + + vmx_leave_nested(vcpu); + if (kvm_state->vmx.vmxon_pa == -1ull) + return 0; + + vmx->nested.vmxon_ptr = kvm_state->vmx.vmxon_pa; + ret = enter_vmx_operation(vcpu); + if (ret) + return ret; + + /* Empty 'VMXON' state is permitted */ + if (kvm_state->size < sizeof(kvm_state) + sizeof(*vmcs12)) + return 0; + + if (kvm_state->vmx.vmcs_pa != -1ull) { + if (kvm_state->vmx.vmcs_pa == kvm_state->vmx.vmxon_pa || + !page_address_valid(vcpu, kvm_state->vmx.vmcs_pa)) + return -EINVAL; + + set_current_vmptr(vmx, kvm_state->vmx.vmcs_pa); + } else if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) { + /* + * Sync eVMCS upon entry as we may not have + * HV_X64_MSR_VP_ASSIST_PAGE set up yet. + */ + vmx->nested.need_vmcs12_sync = true; + } else { + return -EINVAL; + } + + if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) { + vmx->nested.smm.vmxon = true; + vmx->nested.vmxon = false; + + if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) + vmx->nested.smm.guest_mode = true; + } + + vmcs12 = get_vmcs12(vcpu); + if (copy_from_user(vmcs12, user_kvm_nested_state->data, sizeof(*vmcs12))) + return -EFAULT; + + if (vmcs12->hdr.revision_id != VMCS12_REVISION) + return -EINVAL; + + if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) + return 0; + + vmx->nested.nested_run_pending = + !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING); + + if (nested_cpu_has_shadow_vmcs(vmcs12) && + vmcs12->vmcs_link_pointer != -1ull) { + struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu); + + if (kvm_state->size < sizeof(kvm_state) + 2 * sizeof(*vmcs12)) + return -EINVAL; + + if (copy_from_user(shadow_vmcs12, + user_kvm_nested_state->data + VMCS12_SIZE, + sizeof(*vmcs12))) + return -EFAULT; + + if (shadow_vmcs12->hdr.revision_id != VMCS12_REVISION || + !shadow_vmcs12->hdr.shadow_vmcs) + return -EINVAL; + } + + if (nested_vmx_check_vmentry_prereqs(vcpu, vmcs12) || + nested_vmx_check_vmentry_postreqs(vcpu, vmcs12, &exit_qual)) + return -EINVAL; + + vmx->nested.dirty_vmcs12 = true; + ret = nested_vmx_enter_non_root_mode(vcpu, false); + if (ret) + return -EINVAL; + + return 0; +} + +void nested_vmx_vcpu_setup(void) +{ + if (enable_shadow_vmcs) { + /* + * At vCPU creation, "VMWRITE to any supported field + * in the VMCS" is supported, so use the more + * permissive vmx_vmread_bitmap to specify both read + * and write permissions for the shadow VMCS. + */ + vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap)); + vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmread_bitmap)); + } +} + +/* + * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be + * returned for the various VMX controls MSRs when nested VMX is enabled. + * The same values should also be used to verify that vmcs12 control fields are + * valid during nested entry from L1 to L2. + * Each of these control msrs has a low and high 32-bit half: A low bit is on + * if the corresponding bit in the (32-bit) control field *must* be on, and a + * bit in the high half is on if the corresponding bit in the control field + * may be on. See also vmx_control_verify(). + */ +void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps, + bool apicv) +{ + /* + * Note that as a general rule, the high half of the MSRs (bits in + * the control fields which may be 1) should be initialized by the + * intersection of the underlying hardware's MSR (i.e., features which + * can be supported) and the list of features we want to expose - + * because they are known to be properly supported in our code. + * Also, usually, the low half of the MSRs (bits which must be 1) can + * be set to 0, meaning that L1 may turn off any of these bits. The + * reason is that if one of these bits is necessary, it will appear + * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control + * fields of vmcs01 and vmcs02, will turn these bits off - and + * nested_vmx_exit_reflected() will not pass related exits to L1. + * These rules have exceptions below. + */ + + /* pin-based controls */ + rdmsr(MSR_IA32_VMX_PINBASED_CTLS, + msrs->pinbased_ctls_low, + msrs->pinbased_ctls_high); + msrs->pinbased_ctls_low |= + PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR; + msrs->pinbased_ctls_high &= + PIN_BASED_EXT_INTR_MASK | + PIN_BASED_NMI_EXITING | + PIN_BASED_VIRTUAL_NMIS | + (apicv ? PIN_BASED_POSTED_INTR : 0); + msrs->pinbased_ctls_high |= + PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR | + PIN_BASED_VMX_PREEMPTION_TIMER; + + /* exit controls */ + rdmsr(MSR_IA32_VMX_EXIT_CTLS, + msrs->exit_ctls_low, + msrs->exit_ctls_high); + msrs->exit_ctls_low = + VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR; + + msrs->exit_ctls_high &= +#ifdef CONFIG_X86_64 + VM_EXIT_HOST_ADDR_SPACE_SIZE | +#endif + VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT; + msrs->exit_ctls_high |= + VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR | + VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER | + VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT; + + /* We support free control of debug control saving. */ + msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS; + + /* entry controls */ + rdmsr(MSR_IA32_VMX_ENTRY_CTLS, + msrs->entry_ctls_low, + msrs->entry_ctls_high); + msrs->entry_ctls_low = + VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR; + msrs->entry_ctls_high &= +#ifdef CONFIG_X86_64 + VM_ENTRY_IA32E_MODE | +#endif + VM_ENTRY_LOAD_IA32_PAT; + msrs->entry_ctls_high |= + (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER); + + /* We support free control of debug control loading. */ + msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS; + + /* cpu-based controls */ + rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, + msrs->procbased_ctls_low, + msrs->procbased_ctls_high); + msrs->procbased_ctls_low = + CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR; + msrs->procbased_ctls_high &= + CPU_BASED_VIRTUAL_INTR_PENDING | + CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING | + CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING | + CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING | + CPU_BASED_CR3_STORE_EXITING | +#ifdef CONFIG_X86_64 + CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING | +#endif + CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING | + CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG | + CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING | + CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING | + CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; + /* + * We can allow some features even when not supported by the + * hardware. For example, L1 can specify an MSR bitmap - and we + * can use it to avoid exits to L1 - even when L0 runs L2 + * without MSR bitmaps. + */ + msrs->procbased_ctls_high |= + CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR | + CPU_BASED_USE_MSR_BITMAPS; + + /* We support free control of CR3 access interception. */ + msrs->procbased_ctls_low &= + ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING); + + /* + * secondary cpu-based controls. Do not include those that + * depend on CPUID bits, they are added later by vmx_cpuid_update. + */ + rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, + msrs->secondary_ctls_low, + msrs->secondary_ctls_high); + msrs->secondary_ctls_low = 0; + msrs->secondary_ctls_high &= + SECONDARY_EXEC_DESC | + SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | + SECONDARY_EXEC_APIC_REGISTER_VIRT | + SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | + SECONDARY_EXEC_WBINVD_EXITING; + + /* + * We can emulate "VMCS shadowing," even if the hardware + * doesn't support it. + */ + msrs->secondary_ctls_high |= + SECONDARY_EXEC_SHADOW_VMCS; + + if (enable_ept) { + /* nested EPT: emulate EPT also to L1 */ + msrs->secondary_ctls_high |= + SECONDARY_EXEC_ENABLE_EPT; + msrs->ept_caps = VMX_EPT_PAGE_WALK_4_BIT | + VMX_EPTP_WB_BIT | VMX_EPT_INVEPT_BIT; + if (cpu_has_vmx_ept_execute_only()) + msrs->ept_caps |= + VMX_EPT_EXECUTE_ONLY_BIT; + msrs->ept_caps &= ept_caps; + msrs->ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT | + VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT | + VMX_EPT_1GB_PAGE_BIT; + if (enable_ept_ad_bits) { + msrs->secondary_ctls_high |= + SECONDARY_EXEC_ENABLE_PML; + msrs->ept_caps |= VMX_EPT_AD_BIT; + } + } + + if (cpu_has_vmx_vmfunc()) { + msrs->secondary_ctls_high |= + SECONDARY_EXEC_ENABLE_VMFUNC; + /* + * Advertise EPTP switching unconditionally + * since we emulate it + */ + if (enable_ept) + msrs->vmfunc_controls = + VMX_VMFUNC_EPTP_SWITCHING; + } + + /* + * Old versions of KVM use the single-context version without + * checking for support, so declare that it is supported even + * though it is treated as global context. The alternative is + * not failing the single-context invvpid, and it is worse. + */ + if (enable_vpid) { + msrs->secondary_ctls_high |= + SECONDARY_EXEC_ENABLE_VPID; + msrs->vpid_caps = VMX_VPID_INVVPID_BIT | + VMX_VPID_EXTENT_SUPPORTED_MASK; + } + + if (enable_unrestricted_guest) + msrs->secondary_ctls_high |= + SECONDARY_EXEC_UNRESTRICTED_GUEST; + + if (flexpriority_enabled) + msrs->secondary_ctls_high |= + SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; + + /* miscellaneous data */ + rdmsr(MSR_IA32_VMX_MISC, + msrs->misc_low, + msrs->misc_high); + msrs->misc_low &= VMX_MISC_SAVE_EFER_LMA; + msrs->misc_low |= + MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS | + VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE | + VMX_MISC_ACTIVITY_HLT; + msrs->misc_high = 0; + + /* + * This MSR reports some information about VMX support. We + * should return information about the VMX we emulate for the + * guest, and the VMCS structure we give it - not about the + * VMX support of the underlying hardware. + */ + msrs->basic = + VMCS12_REVISION | + VMX_BASIC_TRUE_CTLS | + ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) | + (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT); + + if (cpu_has_vmx_basic_inout()) + msrs->basic |= VMX_BASIC_INOUT; + + /* + * These MSRs specify bits which the guest must keep fixed on + * while L1 is in VMXON mode (in L1's root mode, or running an L2). + * We picked the standard core2 setting. + */ +#define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE) +#define VMXON_CR4_ALWAYSON X86_CR4_VMXE + msrs->cr0_fixed0 = VMXON_CR0_ALWAYSON; + msrs->cr4_fixed0 = VMXON_CR4_ALWAYSON; + + /* These MSRs specify bits which the guest must keep fixed off. */ + rdmsrl(MSR_IA32_VMX_CR0_FIXED1, msrs->cr0_fixed1); + rdmsrl(MSR_IA32_VMX_CR4_FIXED1, msrs->cr4_fixed1); + + /* highest index: VMX_PREEMPTION_TIMER_VALUE */ + msrs->vmcs_enum = VMCS12_MAX_FIELD_INDEX << 1; +} + +void nested_vmx_hardware_unsetup(void) +{ + int i; + + if (enable_shadow_vmcs) { + for (i = 0; i < VMX_BITMAP_NR; i++) + free_page((unsigned long)vmx_bitmap[i]); + } +} + +__init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *)) +{ + int i; + + if (!cpu_has_vmx_shadow_vmcs()) + enable_shadow_vmcs = 0; + if (enable_shadow_vmcs) { + for (i = 0; i < VMX_BITMAP_NR; i++) { + vmx_bitmap[i] = (unsigned long *) + __get_free_page(GFP_KERNEL); + if (!vmx_bitmap[i]) { + nested_vmx_hardware_unsetup(); + return -ENOMEM; + } + } + + init_vmcs_shadow_fields(); + } + + exit_handlers[EXIT_REASON_VMCLEAR] = handle_vmclear, + exit_handlers[EXIT_REASON_VMLAUNCH] = handle_vmlaunch, + exit_handlers[EXIT_REASON_VMPTRLD] = handle_vmptrld, + exit_handlers[EXIT_REASON_VMPTRST] = handle_vmptrst, + exit_handlers[EXIT_REASON_VMREAD] = handle_vmread, + exit_handlers[EXIT_REASON_VMRESUME] = handle_vmresume, + exit_handlers[EXIT_REASON_VMWRITE] = handle_vmwrite, + exit_handlers[EXIT_REASON_VMOFF] = handle_vmoff, + exit_handlers[EXIT_REASON_VMON] = handle_vmon, + exit_handlers[EXIT_REASON_INVEPT] = handle_invept, + exit_handlers[EXIT_REASON_INVVPID] = handle_invvpid, + exit_handlers[EXIT_REASON_VMFUNC] = handle_vmfunc, + + kvm_x86_ops->check_nested_events = vmx_check_nested_events; + kvm_x86_ops->get_nested_state = vmx_get_nested_state; + kvm_x86_ops->set_nested_state = vmx_set_nested_state; + kvm_x86_ops->get_vmcs12_pages = nested_get_vmcs12_pages, + kvm_x86_ops->nested_enable_evmcs = nested_enable_evmcs; + kvm_x86_ops->nested_get_evmcs_version = nested_get_evmcs_version; + + return 0; +} diff --git a/arch/x86/kvm/vmx/nested.h b/arch/x86/kvm/vmx/nested.h new file mode 100644 index 000000000000..e847ff1019a2 --- /dev/null +++ b/arch/x86/kvm/vmx/nested.h @@ -0,0 +1,282 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __KVM_X86_VMX_NESTED_H +#define __KVM_X86_VMX_NESTED_H + +#include "kvm_cache_regs.h" +#include "vmcs12.h" +#include "vmx.h" + +void vmx_leave_nested(struct kvm_vcpu *vcpu); +void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps, + bool apicv); +void nested_vmx_hardware_unsetup(void); +__init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *)); +void nested_vmx_vcpu_setup(void); +void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu); +int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry); +bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason); +void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, + u32 exit_intr_info, unsigned long exit_qualification); +void nested_sync_from_vmcs12(struct kvm_vcpu *vcpu); +int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); +int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata); +int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification, + u32 vmx_instruction_info, bool wr, gva_t *ret); + +static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu) +{ + return to_vmx(vcpu)->nested.cached_vmcs12; +} + +static inline struct vmcs12 *get_shadow_vmcs12(struct kvm_vcpu *vcpu) +{ + return to_vmx(vcpu)->nested.cached_shadow_vmcs12; +} + +static inline int vmx_has_valid_vmcs12(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + + /* + * In case we do two consecutive get/set_nested_state()s while L2 was + * running hv_evmcs may end up not being mapped (we map it from + * nested_vmx_run()/vmx_vcpu_run()). Check is_guest_mode() as we always + * have vmcs12 if it is true. + */ + return is_guest_mode(vcpu) || vmx->nested.current_vmptr != -1ull || + vmx->nested.hv_evmcs; +} + +static inline unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu) +{ + /* return the page table to be shadowed - in our case, EPT12 */ + return get_vmcs12(vcpu)->ept_pointer; +} + +static inline bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu) +{ + return nested_ept_get_cr3(vcpu) & VMX_EPTP_AD_ENABLE_BIT; +} + +/* + * Reflect a VM Exit into L1. + */ +static inline int nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu, + u32 exit_reason) +{ + u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); + + /* + * At this point, the exit interruption info in exit_intr_info + * is only valid for EXCEPTION_NMI exits. For EXTERNAL_INTERRUPT + * we need to query the in-kernel LAPIC. + */ + WARN_ON(exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT); + if ((exit_intr_info & + (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) == + (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) { + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); + + vmcs12->vm_exit_intr_error_code = + vmcs_read32(VM_EXIT_INTR_ERROR_CODE); + } + + nested_vmx_vmexit(vcpu, exit_reason, exit_intr_info, + vmcs_readl(EXIT_QUALIFICATION)); + return 1; +} + +/* + * Return the cr0 value that a nested guest would read. This is a combination + * of the real cr0 used to run the guest (guest_cr0), and the bits shadowed by + * its hypervisor (cr0_read_shadow). + */ +static inline unsigned long nested_read_cr0(struct vmcs12 *fields) +{ + return (fields->guest_cr0 & ~fields->cr0_guest_host_mask) | + (fields->cr0_read_shadow & fields->cr0_guest_host_mask); +} +static inline unsigned long nested_read_cr4(struct vmcs12 *fields) +{ + return (fields->guest_cr4 & ~fields->cr4_guest_host_mask) | + (fields->cr4_read_shadow & fields->cr4_guest_host_mask); +} + +static inline unsigned nested_cpu_vmx_misc_cr3_count(struct kvm_vcpu *vcpu) +{ + return vmx_misc_cr3_count(to_vmx(vcpu)->nested.msrs.misc_low); +} + +/* + * Do the virtual VMX capability MSRs specify that L1 can use VMWRITE + * to modify any valid field of the VMCS, or are the VM-exit + * information fields read-only? + */ +static inline bool nested_cpu_has_vmwrite_any_field(struct kvm_vcpu *vcpu) +{ + return to_vmx(vcpu)->nested.msrs.misc_low & + MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS; +} + +static inline bool nested_cpu_has_zero_length_injection(struct kvm_vcpu *vcpu) +{ + return to_vmx(vcpu)->nested.msrs.misc_low & VMX_MISC_ZERO_LEN_INS; +} + +static inline bool nested_cpu_supports_monitor_trap_flag(struct kvm_vcpu *vcpu) +{ + return to_vmx(vcpu)->nested.msrs.procbased_ctls_high & + CPU_BASED_MONITOR_TRAP_FLAG; +} + +static inline bool nested_cpu_has_vmx_shadow_vmcs(struct kvm_vcpu *vcpu) +{ + return to_vmx(vcpu)->nested.msrs.secondary_ctls_high & + SECONDARY_EXEC_SHADOW_VMCS; +} + +static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit) +{ + return vmcs12->cpu_based_vm_exec_control & bit; +} + +static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit) +{ + return (vmcs12->cpu_based_vm_exec_control & + CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) && + (vmcs12->secondary_vm_exec_control & bit); +} + +static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12) +{ + return vmcs12->pin_based_vm_exec_control & + PIN_BASED_VMX_PREEMPTION_TIMER; +} + +static inline bool nested_cpu_has_nmi_exiting(struct vmcs12 *vmcs12) +{ + return vmcs12->pin_based_vm_exec_control & PIN_BASED_NMI_EXITING; +} + +static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12) +{ + return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS; +} + +static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12) +{ + return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT); +} + +static inline bool nested_cpu_has_xsaves(struct vmcs12 *vmcs12) +{ + return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES); +} + +static inline bool nested_cpu_has_pml(struct vmcs12 *vmcs12) +{ + return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_PML); +} + +static inline bool nested_cpu_has_virt_x2apic_mode(struct vmcs12 *vmcs12) +{ + return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE); +} + +static inline bool nested_cpu_has_vpid(struct vmcs12 *vmcs12) +{ + return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VPID); +} + +static inline bool nested_cpu_has_apic_reg_virt(struct vmcs12 *vmcs12) +{ + return nested_cpu_has2(vmcs12, SECONDARY_EXEC_APIC_REGISTER_VIRT); +} + +static inline bool nested_cpu_has_vid(struct vmcs12 *vmcs12) +{ + return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); +} + +static inline bool nested_cpu_has_posted_intr(struct vmcs12 *vmcs12) +{ + return vmcs12->pin_based_vm_exec_control & PIN_BASED_POSTED_INTR; +} + +static inline bool nested_cpu_has_vmfunc(struct vmcs12 *vmcs12) +{ + return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VMFUNC); +} + +static inline bool nested_cpu_has_eptp_switching(struct vmcs12 *vmcs12) +{ + return nested_cpu_has_vmfunc(vmcs12) && + (vmcs12->vm_function_control & + VMX_VMFUNC_EPTP_SWITCHING); +} + +static inline bool nested_cpu_has_shadow_vmcs(struct vmcs12 *vmcs12) +{ + return nested_cpu_has2(vmcs12, SECONDARY_EXEC_SHADOW_VMCS); +} + +static inline bool nested_cpu_has_save_preemption_timer(struct vmcs12 *vmcs12) +{ + return vmcs12->vm_exit_controls & + VM_EXIT_SAVE_VMX_PREEMPTION_TIMER; +} + +/* + * In nested virtualization, check if L1 asked to exit on external interrupts. + * For most existing hypervisors, this will always return true. + */ +static inline bool nested_exit_on_intr(struct kvm_vcpu *vcpu) +{ + return get_vmcs12(vcpu)->pin_based_vm_exec_control & + PIN_BASED_EXT_INTR_MASK; +} + +/* + * if fixed0[i] == 1: val[i] must be 1 + * if fixed1[i] == 0: val[i] must be 0 + */ +static inline bool fixed_bits_valid(u64 val, u64 fixed0, u64 fixed1) +{ + return ((val & fixed1) | fixed0) == val; +} + +static bool nested_guest_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val) +{ + u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0; + u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr0_fixed1; + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); + + if (to_vmx(vcpu)->nested.msrs.secondary_ctls_high & + SECONDARY_EXEC_UNRESTRICTED_GUEST && + nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST)) + fixed0 &= ~(X86_CR0_PE | X86_CR0_PG); + + return fixed_bits_valid(val, fixed0, fixed1); +} + +static bool nested_host_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val) +{ + u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0; + u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr0_fixed1; + + return fixed_bits_valid(val, fixed0, fixed1); +} + +static bool nested_cr4_valid(struct kvm_vcpu *vcpu, unsigned long val) +{ + u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr4_fixed0; + u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr4_fixed1; + + return fixed_bits_valid(val, fixed0, fixed1); +} + +/* No difference in the restrictions on guest and host CR4 in VMX operation. */ +#define nested_guest_cr4_valid nested_cr4_valid +#define nested_host_cr4_valid nested_cr4_valid + +#endif /* __KVM_X86_VMX_NESTED_H */ diff --git a/arch/x86/kvm/vmx/ops.h b/arch/x86/kvm/vmx/ops.h new file mode 100644 index 000000000000..b8e50f76fefc --- /dev/null +++ b/arch/x86/kvm/vmx/ops.h @@ -0,0 +1,285 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __KVM_X86_VMX_INSN_H +#define __KVM_X86_VMX_INSN_H + +#include <linux/nospec.h> + +#include <asm/kvm_host.h> +#include <asm/vmx.h> + +#include "evmcs.h" +#include "vmcs.h" + +#define __ex(x) __kvm_handle_fault_on_reboot(x) +#define __ex_clear(x, reg) \ + ____kvm_handle_fault_on_reboot(x, "xor " reg ", " reg) + +static __always_inline void vmcs_check16(unsigned long field) +{ + BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000, + "16-bit accessor invalid for 64-bit field"); + BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001, + "16-bit accessor invalid for 64-bit high field"); + BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000, + "16-bit accessor invalid for 32-bit high field"); + BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000, + "16-bit accessor invalid for natural width field"); +} + +static __always_inline void vmcs_check32(unsigned long field) +{ + BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0, + "32-bit accessor invalid for 16-bit field"); + BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000, + "32-bit accessor invalid for natural width field"); +} + +static __always_inline void vmcs_check64(unsigned long field) +{ + BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0, + "64-bit accessor invalid for 16-bit field"); + BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001, + "64-bit accessor invalid for 64-bit high field"); + BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000, + "64-bit accessor invalid for 32-bit field"); + BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000, + "64-bit accessor invalid for natural width field"); +} + +static __always_inline void vmcs_checkl(unsigned long field) +{ + BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0, + "Natural width accessor invalid for 16-bit field"); + BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000, + "Natural width accessor invalid for 64-bit field"); + BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001, + "Natural width accessor invalid for 64-bit high field"); + BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000, + "Natural width accessor invalid for 32-bit field"); +} + +static __always_inline unsigned long __vmcs_readl(unsigned long field) +{ + unsigned long value; + + asm volatile (__ex_clear("vmread %1, %0", "%k0") + : "=r"(value) : "r"(field)); + return value; +} + +static __always_inline u16 vmcs_read16(unsigned long field) +{ + vmcs_check16(field); + if (static_branch_unlikely(&enable_evmcs)) + return evmcs_read16(field); + return __vmcs_readl(field); +} + +static __always_inline u32 vmcs_read32(unsigned long field) +{ + vmcs_check32(field); + if (static_branch_unlikely(&enable_evmcs)) + return evmcs_read32(field); + return __vmcs_readl(field); +} + +static __always_inline u64 vmcs_read64(unsigned long field) +{ + vmcs_check64(field); + if (static_branch_unlikely(&enable_evmcs)) + return evmcs_read64(field); +#ifdef CONFIG_X86_64 + return __vmcs_readl(field); +#else + return __vmcs_readl(field) | ((u64)__vmcs_readl(field+1) << 32); +#endif +} + +static __always_inline unsigned long vmcs_readl(unsigned long field) +{ + vmcs_checkl(field); + if (static_branch_unlikely(&enable_evmcs)) + return evmcs_read64(field); + return __vmcs_readl(field); +} + +static noinline void vmwrite_error(unsigned long field, unsigned long value) +{ + printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n", + field, value, vmcs_read32(VM_INSTRUCTION_ERROR)); + dump_stack(); +} + +static __always_inline void __vmcs_writel(unsigned long field, unsigned long value) +{ + bool error; + + asm volatile (__ex("vmwrite %2, %1") CC_SET(na) + : CC_OUT(na) (error) : "r"(field), "rm"(value)); + if (unlikely(error)) + vmwrite_error(field, value); +} + +static __always_inline void vmcs_write16(unsigned long field, u16 value) +{ + vmcs_check16(field); + if (static_branch_unlikely(&enable_evmcs)) + return evmcs_write16(field, value); + + __vmcs_writel(field, value); +} + +static __always_inline void vmcs_write32(unsigned long field, u32 value) +{ + vmcs_check32(field); + if (static_branch_unlikely(&enable_evmcs)) + return evmcs_write32(field, value); + + __vmcs_writel(field, value); +} + +static __always_inline void vmcs_write64(unsigned long field, u64 value) +{ + vmcs_check64(field); + if (static_branch_unlikely(&enable_evmcs)) + return evmcs_write64(field, value); + + __vmcs_writel(field, value); +#ifndef CONFIG_X86_64 + asm volatile (""); + __vmcs_writel(field+1, value >> 32); +#endif +} + +static __always_inline void vmcs_writel(unsigned long field, unsigned long value) +{ + vmcs_checkl(field); + if (static_branch_unlikely(&enable_evmcs)) + return evmcs_write64(field, value); + + __vmcs_writel(field, value); +} + +static __always_inline void vmcs_clear_bits(unsigned long field, u32 mask) +{ + BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000, + "vmcs_clear_bits does not support 64-bit fields"); + if (static_branch_unlikely(&enable_evmcs)) + return evmcs_write32(field, evmcs_read32(field) & ~mask); + + __vmcs_writel(field, __vmcs_readl(field) & ~mask); +} + +static __always_inline void vmcs_set_bits(unsigned long field, u32 mask) +{ + BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000, + "vmcs_set_bits does not support 64-bit fields"); + if (static_branch_unlikely(&enable_evmcs)) + return evmcs_write32(field, evmcs_read32(field) | mask); + + __vmcs_writel(field, __vmcs_readl(field) | mask); +} + +static inline void vmcs_clear(struct vmcs *vmcs) +{ + u64 phys_addr = __pa(vmcs); + bool error; + + asm volatile (__ex("vmclear %1") CC_SET(na) + : CC_OUT(na) (error) : "m"(phys_addr)); + if (unlikely(error)) + printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n", + vmcs, phys_addr); +} + +static inline void vmcs_load(struct vmcs *vmcs) +{ + u64 phys_addr = __pa(vmcs); + bool error; + + if (static_branch_unlikely(&enable_evmcs)) + return evmcs_load(phys_addr); + + asm volatile (__ex("vmptrld %1") CC_SET(na) + : CC_OUT(na) (error) : "m"(phys_addr)); + if (unlikely(error)) + printk(KERN_ERR "kvm: vmptrld %p/%llx failed\n", + vmcs, phys_addr); +} + +static inline void __invvpid(unsigned long ext, u16 vpid, gva_t gva) +{ + struct { + u64 vpid : 16; + u64 rsvd : 48; + u64 gva; + } operand = { vpid, 0, gva }; + bool error; + + asm volatile (__ex("invvpid %2, %1") CC_SET(na) + : CC_OUT(na) (error) : "r"(ext), "m"(operand)); + BUG_ON(error); +} + +static inline void __invept(unsigned long ext, u64 eptp, gpa_t gpa) +{ + struct { + u64 eptp, gpa; + } operand = {eptp, gpa}; + bool error; + + asm volatile (__ex("invept %2, %1") CC_SET(na) + : CC_OUT(na) (error) : "r"(ext), "m"(operand)); + BUG_ON(error); +} + +static inline bool vpid_sync_vcpu_addr(int vpid, gva_t addr) +{ + if (vpid == 0) + return true; + + if (cpu_has_vmx_invvpid_individual_addr()) { + __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR, vpid, addr); + return true; + } + + return false; +} + +static inline void vpid_sync_vcpu_single(int vpid) +{ + if (vpid == 0) + return; + + if (cpu_has_vmx_invvpid_single()) + __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0); +} + +static inline void vpid_sync_vcpu_global(void) +{ + if (cpu_has_vmx_invvpid_global()) + __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0); +} + +static inline void vpid_sync_context(int vpid) +{ + if (cpu_has_vmx_invvpid_single()) + vpid_sync_vcpu_single(vpid); + else + vpid_sync_vcpu_global(); +} + +static inline void ept_sync_global(void) +{ + __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0); +} + +static inline void ept_sync_context(u64 eptp) +{ + if (cpu_has_vmx_invept_context()) + __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0); + else + ept_sync_global(); +} + +#endif /* __KVM_X86_VMX_INSN_H */ diff --git a/arch/x86/kvm/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index 5ab4a364348e..5ab4a364348e 100644 --- a/arch/x86/kvm/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c diff --git a/arch/x86/kvm/vmx/vmcs.h b/arch/x86/kvm/vmx/vmcs.h new file mode 100644 index 000000000000..6def3ba88e3b --- /dev/null +++ b/arch/x86/kvm/vmx/vmcs.h @@ -0,0 +1,136 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __KVM_X86_VMX_VMCS_H +#define __KVM_X86_VMX_VMCS_H + +#include <linux/ktime.h> +#include <linux/list.h> +#include <linux/nospec.h> + +#include <asm/kvm.h> +#include <asm/vmx.h> + +#include "capabilities.h" + +struct vmcs_hdr { + u32 revision_id:31; + u32 shadow_vmcs:1; +}; + +struct vmcs { + struct vmcs_hdr hdr; + u32 abort; + char data[0]; +}; + +DECLARE_PER_CPU(struct vmcs *, current_vmcs); + +/* + * vmcs_host_state tracks registers that are loaded from the VMCS on VMEXIT + * and whose values change infrequently, but are not constant. I.e. this is + * used as a write-through cache of the corresponding VMCS fields. + */ +struct vmcs_host_state { + unsigned long cr3; /* May not match real cr3 */ + unsigned long cr4; /* May not match real cr4 */ + unsigned long gs_base; + unsigned long fs_base; + + u16 fs_sel, gs_sel, ldt_sel; +#ifdef CONFIG_X86_64 + u16 ds_sel, es_sel; +#endif +}; + +/* + * Track a VMCS that may be loaded on a certain CPU. If it is (cpu!=-1), also + * remember whether it was VMLAUNCHed, and maintain a linked list of all VMCSs + * loaded on this CPU (so we can clear them if the CPU goes down). + */ +struct loaded_vmcs { + struct vmcs *vmcs; + struct vmcs *shadow_vmcs; + int cpu; + bool launched; + bool nmi_known_unmasked; + bool hv_timer_armed; + /* Support for vnmi-less CPUs */ + int soft_vnmi_blocked; + ktime_t entry_time; + s64 vnmi_blocked_time; + unsigned long *msr_bitmap; + struct list_head loaded_vmcss_on_cpu_link; + struct vmcs_host_state host_state; +}; + +static inline bool is_exception_n(u32 intr_info, u8 vector) +{ + return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | + INTR_INFO_VALID_MASK)) == + (INTR_TYPE_HARD_EXCEPTION | vector | INTR_INFO_VALID_MASK); +} + +static inline bool is_debug(u32 intr_info) +{ + return is_exception_n(intr_info, DB_VECTOR); +} + +static inline bool is_breakpoint(u32 intr_info) +{ + return is_exception_n(intr_info, BP_VECTOR); +} + +static inline bool is_page_fault(u32 intr_info) +{ + return is_exception_n(intr_info, PF_VECTOR); +} + +static inline bool is_invalid_opcode(u32 intr_info) +{ + return is_exception_n(intr_info, UD_VECTOR); +} + +static inline bool is_gp_fault(u32 intr_info) +{ + return is_exception_n(intr_info, GP_VECTOR); +} + +static inline bool is_machine_check(u32 intr_info) +{ + return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | + INTR_INFO_VALID_MASK)) == + (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK); +} + +/* Undocumented: icebp/int1 */ +static inline bool is_icebp(u32 intr_info) +{ + return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) + == (INTR_TYPE_PRIV_SW_EXCEPTION | INTR_INFO_VALID_MASK); +} + +static inline bool is_nmi(u32 intr_info) +{ + return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) + == (INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK); +} + +enum vmcs_field_width { + VMCS_FIELD_WIDTH_U16 = 0, + VMCS_FIELD_WIDTH_U64 = 1, + VMCS_FIELD_WIDTH_U32 = 2, + VMCS_FIELD_WIDTH_NATURAL_WIDTH = 3 +}; + +static inline int vmcs_field_width(unsigned long field) +{ + if (0x1 & field) /* the *_HIGH fields are all 32 bit */ + return VMCS_FIELD_WIDTH_U32; + return (field >> 13) & 0x3; +} + +static inline int vmcs_field_readonly(unsigned long field) +{ + return (((field >> 10) & 0x3) == 1); +} + +#endif /* __KVM_X86_VMX_VMCS_H */ diff --git a/arch/x86/kvm/vmx/vmcs12.c b/arch/x86/kvm/vmx/vmcs12.c new file mode 100644 index 000000000000..53dfb401316d --- /dev/null +++ b/arch/x86/kvm/vmx/vmcs12.c @@ -0,0 +1,157 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include "vmcs12.h" + +#define ROL16(val, n) ((u16)(((u16)(val) << (n)) | ((u16)(val) >> (16 - (n))))) +#define VMCS12_OFFSET(x) offsetof(struct vmcs12, x) +#define FIELD(number, name) [ROL16(number, 6)] = VMCS12_OFFSET(name) +#define FIELD64(number, name) \ + FIELD(number, name), \ + [ROL16(number##_HIGH, 6)] = VMCS12_OFFSET(name) + sizeof(u32) + +const unsigned short vmcs_field_to_offset_table[] = { + FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id), + FIELD(POSTED_INTR_NV, posted_intr_nv), + FIELD(GUEST_ES_SELECTOR, guest_es_selector), + FIELD(GUEST_CS_SELECTOR, guest_cs_selector), + FIELD(GUEST_SS_SELECTOR, guest_ss_selector), + FIELD(GUEST_DS_SELECTOR, guest_ds_selector), + FIELD(GUEST_FS_SELECTOR, guest_fs_selector), + FIELD(GUEST_GS_SELECTOR, guest_gs_selector), + FIELD(GUEST_LDTR_SELECTOR, guest_ldtr_selector), + FIELD(GUEST_TR_SELECTOR, guest_tr_selector), + FIELD(GUEST_INTR_STATUS, guest_intr_status), + FIELD(GUEST_PML_INDEX, guest_pml_index), + FIELD(HOST_ES_SELECTOR, host_es_selector), + FIELD(HOST_CS_SELECTOR, host_cs_selector), + FIELD(HOST_SS_SELECTOR, host_ss_selector), + FIELD(HOST_DS_SELECTOR, host_ds_selector), + FIELD(HOST_FS_SELECTOR, host_fs_selector), + FIELD(HOST_GS_SELECTOR, host_gs_selector), + FIELD(HOST_TR_SELECTOR, host_tr_selector), + FIELD64(IO_BITMAP_A, io_bitmap_a), + FIELD64(IO_BITMAP_B, io_bitmap_b), + FIELD64(MSR_BITMAP, msr_bitmap), + FIELD64(VM_EXIT_MSR_STORE_ADDR, vm_exit_msr_store_addr), + FIELD64(VM_EXIT_MSR_LOAD_ADDR, vm_exit_msr_load_addr), + FIELD64(VM_ENTRY_MSR_LOAD_ADDR, vm_entry_msr_load_addr), + FIELD64(PML_ADDRESS, pml_address), + FIELD64(TSC_OFFSET, tsc_offset), + FIELD64(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr), + FIELD64(APIC_ACCESS_ADDR, apic_access_addr), + FIELD64(POSTED_INTR_DESC_ADDR, posted_intr_desc_addr), + FIELD64(VM_FUNCTION_CONTROL, vm_function_control), + FIELD64(EPT_POINTER, ept_pointer), + FIELD64(EOI_EXIT_BITMAP0, eoi_exit_bitmap0), + FIELD64(EOI_EXIT_BITMAP1, eoi_exit_bitmap1), + FIELD64(EOI_EXIT_BITMAP2, eoi_exit_bitmap2), + FIELD64(EOI_EXIT_BITMAP3, eoi_exit_bitmap3), + FIELD64(EPTP_LIST_ADDRESS, eptp_list_address), + FIELD64(VMREAD_BITMAP, vmread_bitmap), + FIELD64(VMWRITE_BITMAP, vmwrite_bitmap), + FIELD64(XSS_EXIT_BITMAP, xss_exit_bitmap), + FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address), + FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer), + FIELD64(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl), + FIELD64(GUEST_IA32_PAT, guest_ia32_pat), + FIELD64(GUEST_IA32_EFER, guest_ia32_efer), + FIELD64(GUEST_IA32_PERF_GLOBAL_CTRL, guest_ia32_perf_global_ctrl), + FIELD64(GUEST_PDPTR0, guest_pdptr0), + FIELD64(GUEST_PDPTR1, guest_pdptr1), + FIELD64(GUEST_PDPTR2, guest_pdptr2), + FIELD64(GUEST_PDPTR3, guest_pdptr3), + FIELD64(GUEST_BNDCFGS, guest_bndcfgs), + FIELD64(HOST_IA32_PAT, host_ia32_pat), + FIELD64(HOST_IA32_EFER, host_ia32_efer), + FIELD64(HOST_IA32_PERF_GLOBAL_CTRL, host_ia32_perf_global_ctrl), + FIELD(PIN_BASED_VM_EXEC_CONTROL, pin_based_vm_exec_control), + FIELD(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control), + FIELD(EXCEPTION_BITMAP, exception_bitmap), + FIELD(PAGE_FAULT_ERROR_CODE_MASK, page_fault_error_code_mask), + FIELD(PAGE_FAULT_ERROR_CODE_MATCH, page_fault_error_code_match), + FIELD(CR3_TARGET_COUNT, cr3_target_count), + FIELD(VM_EXIT_CONTROLS, vm_exit_controls), + FIELD(VM_EXIT_MSR_STORE_COUNT, vm_exit_msr_store_count), + FIELD(VM_EXIT_MSR_LOAD_COUNT, vm_exit_msr_load_count), + FIELD(VM_ENTRY_CONTROLS, vm_entry_controls), + FIELD(VM_ENTRY_MSR_LOAD_COUNT, vm_entry_msr_load_count), + FIELD(VM_ENTRY_INTR_INFO_FIELD, vm_entry_intr_info_field), + FIELD(VM_ENTRY_EXCEPTION_ERROR_CODE, vm_entry_exception_error_code), + FIELD(VM_ENTRY_INSTRUCTION_LEN, vm_entry_instruction_len), + FIELD(TPR_THRESHOLD, tpr_threshold), + FIELD(SECONDARY_VM_EXEC_CONTROL, secondary_vm_exec_control), + FIELD(VM_INSTRUCTION_ERROR, vm_instruction_error), + FIELD(VM_EXIT_REASON, vm_exit_reason), + FIELD(VM_EXIT_INTR_INFO, vm_exit_intr_info), + FIELD(VM_EXIT_INTR_ERROR_CODE, vm_exit_intr_error_code), + FIELD(IDT_VECTORING_INFO_FIELD, idt_vectoring_info_field), + FIELD(IDT_VECTORING_ERROR_CODE, idt_vectoring_error_code), + FIELD(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len), + FIELD(VMX_INSTRUCTION_INFO, vmx_instruction_info), + FIELD(GUEST_ES_LIMIT, guest_es_limit), + FIELD(GUEST_CS_LIMIT, guest_cs_limit), + FIELD(GUEST_SS_LIMIT, guest_ss_limit), + FIELD(GUEST_DS_LIMIT, guest_ds_limit), + FIELD(GUEST_FS_LIMIT, guest_fs_limit), + FIELD(GUEST_GS_LIMIT, guest_gs_limit), + FIELD(GUEST_LDTR_LIMIT, guest_ldtr_limit), + FIELD(GUEST_TR_LIMIT, guest_tr_limit), + FIELD(GUEST_GDTR_LIMIT, guest_gdtr_limit), + FIELD(GUEST_IDTR_LIMIT, guest_idtr_limit), + FIELD(GUEST_ES_AR_BYTES, guest_es_ar_bytes), + FIELD(GUEST_CS_AR_BYTES, guest_cs_ar_bytes), + FIELD(GUEST_SS_AR_BYTES, guest_ss_ar_bytes), + FIELD(GUEST_DS_AR_BYTES, guest_ds_ar_bytes), + FIELD(GUEST_FS_AR_BYTES, guest_fs_ar_bytes), + FIELD(GUEST_GS_AR_BYTES, guest_gs_ar_bytes), + FIELD(GUEST_LDTR_AR_BYTES, guest_ldtr_ar_bytes), + FIELD(GUEST_TR_AR_BYTES, guest_tr_ar_bytes), + FIELD(GUEST_INTERRUPTIBILITY_INFO, guest_interruptibility_info), + FIELD(GUEST_ACTIVITY_STATE, guest_activity_state), + FIELD(GUEST_SYSENTER_CS, guest_sysenter_cs), + FIELD(HOST_IA32_SYSENTER_CS, host_ia32_sysenter_cs), + FIELD(VMX_PREEMPTION_TIMER_VALUE, vmx_preemption_timer_value), + FIELD(CR0_GUEST_HOST_MASK, cr0_guest_host_mask), + FIELD(CR4_GUEST_HOST_MASK, cr4_guest_host_mask), + FIELD(CR0_READ_SHADOW, cr0_read_shadow), + FIELD(CR4_READ_SHADOW, cr4_read_shadow), + FIELD(CR3_TARGET_VALUE0, cr3_target_value0), + FIELD(CR3_TARGET_VALUE1, cr3_target_value1), + FIELD(CR3_TARGET_VALUE2, cr3_target_value2), + FIELD(CR3_TARGET_VALUE3, cr3_target_value3), + FIELD(EXIT_QUALIFICATION, exit_qualification), + FIELD(GUEST_LINEAR_ADDRESS, guest_linear_address), + FIELD(GUEST_CR0, guest_cr0), + FIELD(GUEST_CR3, guest_cr3), + FIELD(GUEST_CR4, guest_cr4), + FIELD(GUEST_ES_BASE, guest_es_base), + FIELD(GUEST_CS_BASE, guest_cs_base), + FIELD(GUEST_SS_BASE, guest_ss_base), + FIELD(GUEST_DS_BASE, guest_ds_base), + FIELD(GUEST_FS_BASE, guest_fs_base), + FIELD(GUEST_GS_BASE, guest_gs_base), + FIELD(GUEST_LDTR_BASE, guest_ldtr_base), + FIELD(GUEST_TR_BASE, guest_tr_base), + FIELD(GUEST_GDTR_BASE, guest_gdtr_base), + FIELD(GUEST_IDTR_BASE, guest_idtr_base), + FIELD(GUEST_DR7, guest_dr7), + FIELD(GUEST_RSP, guest_rsp), + FIELD(GUEST_RIP, guest_rip), + FIELD(GUEST_RFLAGS, guest_rflags), + FIELD(GUEST_PENDING_DBG_EXCEPTIONS, guest_pending_dbg_exceptions), + FIELD(GUEST_SYSENTER_ESP, guest_sysenter_esp), + FIELD(GUEST_SYSENTER_EIP, guest_sysenter_eip), + FIELD(HOST_CR0, host_cr0), + FIELD(HOST_CR3, host_cr3), + FIELD(HOST_CR4, host_cr4), + FIELD(HOST_FS_BASE, host_fs_base), + FIELD(HOST_GS_BASE, host_gs_base), + FIELD(HOST_TR_BASE, host_tr_base), + FIELD(HOST_GDTR_BASE, host_gdtr_base), + FIELD(HOST_IDTR_BASE, host_idtr_base), + FIELD(HOST_IA32_SYSENTER_ESP, host_ia32_sysenter_esp), + FIELD(HOST_IA32_SYSENTER_EIP, host_ia32_sysenter_eip), + FIELD(HOST_RSP, host_rsp), + FIELD(HOST_RIP, host_rip), +}; +const unsigned int nr_vmcs12_fields = ARRAY_SIZE(vmcs_field_to_offset_table); diff --git a/arch/x86/kvm/vmx/vmcs12.h b/arch/x86/kvm/vmx/vmcs12.h new file mode 100644 index 000000000000..3a742428ad17 --- /dev/null +++ b/arch/x86/kvm/vmx/vmcs12.h @@ -0,0 +1,462 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __KVM_X86_VMX_VMCS12_H +#define __KVM_X86_VMX_VMCS12_H + +#include <linux/build_bug.h> + +#include "vmcs.h" + +/* + * struct vmcs12 describes the state that our guest hypervisor (L1) keeps for a + * single nested guest (L2), hence the name vmcs12. Any VMX implementation has + * a VMCS structure, and vmcs12 is our emulated VMX's VMCS. This structure is + * stored in guest memory specified by VMPTRLD, but is opaque to the guest, + * which must access it using VMREAD/VMWRITE/VMCLEAR instructions. + * More than one of these structures may exist, if L1 runs multiple L2 guests. + * nested_vmx_run() will use the data here to build the vmcs02: a VMCS for the + * underlying hardware which will be used to run L2. + * This structure is packed to ensure that its layout is identical across + * machines (necessary for live migration). + * + * IMPORTANT: Changing the layout of existing fields in this structure + * will break save/restore compatibility with older kvm releases. When + * adding new fields, either use space in the reserved padding* arrays + * or add the new fields to the end of the structure. + */ +typedef u64 natural_width; +struct __packed vmcs12 { + /* According to the Intel spec, a VMCS region must start with the + * following two fields. Then follow implementation-specific data. + */ + struct vmcs_hdr hdr; + u32 abort; + + u32 launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */ + u32 padding[7]; /* room for future expansion */ + + u64 io_bitmap_a; + u64 io_bitmap_b; + u64 msr_bitmap; + u64 vm_exit_msr_store_addr; + u64 vm_exit_msr_load_addr; + u64 vm_entry_msr_load_addr; + u64 tsc_offset; + u64 virtual_apic_page_addr; + u64 apic_access_addr; + u64 posted_intr_desc_addr; + u64 ept_pointer; + u64 eoi_exit_bitmap0; + u64 eoi_exit_bitmap1; + u64 eoi_exit_bitmap2; + u64 eoi_exit_bitmap3; + u64 xss_exit_bitmap; + u64 guest_physical_address; + u64 vmcs_link_pointer; + u64 guest_ia32_debugctl; + u64 guest_ia32_pat; + u64 guest_ia32_efer; + u64 guest_ia32_perf_global_ctrl; + u64 guest_pdptr0; + u64 guest_pdptr1; + u64 guest_pdptr2; + u64 guest_pdptr3; + u64 guest_bndcfgs; + u64 host_ia32_pat; + u64 host_ia32_efer; + u64 host_ia32_perf_global_ctrl; + u64 vmread_bitmap; + u64 vmwrite_bitmap; + u64 vm_function_control; + u64 eptp_list_address; + u64 pml_address; + u64 padding64[3]; /* room for future expansion */ + /* + * To allow migration of L1 (complete with its L2 guests) between + * machines of different natural widths (32 or 64 bit), we cannot have + * unsigned long fields with no explicit size. We use u64 (aliased + * natural_width) instead. Luckily, x86 is little-endian. + */ + natural_width cr0_guest_host_mask; + natural_width cr4_guest_host_mask; + natural_width cr0_read_shadow; + natural_width cr4_read_shadow; + natural_width cr3_target_value0; + natural_width cr3_target_value1; + natural_width cr3_target_value2; + natural_width cr3_target_value3; + natural_width exit_qualification; + natural_width guest_linear_address; + natural_width guest_cr0; + natural_width guest_cr3; + natural_width guest_cr4; + natural_width guest_es_base; + natural_width guest_cs_base; + natural_width guest_ss_base; + natural_width guest_ds_base; + natural_width guest_fs_base; + natural_width guest_gs_base; + natural_width guest_ldtr_base; + natural_width guest_tr_base; + natural_width guest_gdtr_base; + natural_width guest_idtr_base; + natural_width guest_dr7; + natural_width guest_rsp; + natural_width guest_rip; + natural_width guest_rflags; + natural_width guest_pending_dbg_exceptions; + natural_width guest_sysenter_esp; + natural_width guest_sysenter_eip; + natural_width host_cr0; + natural_width host_cr3; + natural_width host_cr4; + natural_width host_fs_base; + natural_width host_gs_base; + natural_width host_tr_base; + natural_width host_gdtr_base; + natural_width host_idtr_base; + natural_width host_ia32_sysenter_esp; + natural_width host_ia32_sysenter_eip; + natural_width host_rsp; + natural_width host_rip; + natural_width paddingl[8]; /* room for future expansion */ + u32 pin_based_vm_exec_control; + u32 cpu_based_vm_exec_control; + u32 exception_bitmap; + u32 page_fault_error_code_mask; + u32 page_fault_error_code_match; + u32 cr3_target_count; + u32 vm_exit_controls; + u32 vm_exit_msr_store_count; + u32 vm_exit_msr_load_count; + u32 vm_entry_controls; + u32 vm_entry_msr_load_count; + u32 vm_entry_intr_info_field; + u32 vm_entry_exception_error_code; + u32 vm_entry_instruction_len; + u32 tpr_threshold; + u32 secondary_vm_exec_control; + u32 vm_instruction_error; + u32 vm_exit_reason; + u32 vm_exit_intr_info; + u32 vm_exit_intr_error_code; + u32 idt_vectoring_info_field; + u32 idt_vectoring_error_code; + u32 vm_exit_instruction_len; + u32 vmx_instruction_info; + u32 guest_es_limit; + u32 guest_cs_limit; + u32 guest_ss_limit; + u32 guest_ds_limit; + u32 guest_fs_limit; + u32 guest_gs_limit; + u32 guest_ldtr_limit; + u32 guest_tr_limit; + u32 guest_gdtr_limit; + u32 guest_idtr_limit; + u32 guest_es_ar_bytes; + u32 guest_cs_ar_bytes; + u32 guest_ss_ar_bytes; + u32 guest_ds_ar_bytes; + u32 guest_fs_ar_bytes; + u32 guest_gs_ar_bytes; + u32 guest_ldtr_ar_bytes; + u32 guest_tr_ar_bytes; + u32 guest_interruptibility_info; + u32 guest_activity_state; + u32 guest_sysenter_cs; + u32 host_ia32_sysenter_cs; + u32 vmx_preemption_timer_value; + u32 padding32[7]; /* room for future expansion */ + u16 virtual_processor_id; + u16 posted_intr_nv; + u16 guest_es_selector; + u16 guest_cs_selector; + u16 guest_ss_selector; + u16 guest_ds_selector; + u16 guest_fs_selector; + u16 guest_gs_selector; + u16 guest_ldtr_selector; + u16 guest_tr_selector; + u16 guest_intr_status; + u16 host_es_selector; + u16 host_cs_selector; + u16 host_ss_selector; + u16 host_ds_selector; + u16 host_fs_selector; + u16 host_gs_selector; + u16 host_tr_selector; + u16 guest_pml_index; +}; + +/* + * VMCS12_REVISION is an arbitrary id that should be changed if the content or + * layout of struct vmcs12 is changed. MSR_IA32_VMX_BASIC returns this id, and + * VMPTRLD verifies that the VMCS region that L1 is loading contains this id. + * + * IMPORTANT: Changing this value will break save/restore compatibility with + * older kvm releases. + */ +#define VMCS12_REVISION 0x11e57ed0 + +/* + * VMCS12_SIZE is the number of bytes L1 should allocate for the VMXON region + * and any VMCS region. Although only sizeof(struct vmcs12) are used by the + * current implementation, 4K are reserved to avoid future complications. + */ +#define VMCS12_SIZE 0x1000 + +/* + * VMCS12_MAX_FIELD_INDEX is the highest index value used in any + * supported VMCS12 field encoding. + */ +#define VMCS12_MAX_FIELD_INDEX 0x17 + +/* + * For save/restore compatibility, the vmcs12 field offsets must not change. + */ +#define CHECK_OFFSET(field, loc) \ + BUILD_BUG_ON_MSG(offsetof(struct vmcs12, field) != (loc), \ + "Offset of " #field " in struct vmcs12 has changed.") + +static inline void vmx_check_vmcs12_offsets(void) +{ + CHECK_OFFSET(hdr, 0); + CHECK_OFFSET(abort, 4); + CHECK_OFFSET(launch_state, 8); + CHECK_OFFSET(io_bitmap_a, 40); + CHECK_OFFSET(io_bitmap_b, 48); + CHECK_OFFSET(msr_bitmap, 56); + CHECK_OFFSET(vm_exit_msr_store_addr, 64); + CHECK_OFFSET(vm_exit_msr_load_addr, 72); + CHECK_OFFSET(vm_entry_msr_load_addr, 80); + CHECK_OFFSET(tsc_offset, 88); + CHECK_OFFSET(virtual_apic_page_addr, 96); + CHECK_OFFSET(apic_access_addr, 104); + CHECK_OFFSET(posted_intr_desc_addr, 112); + CHECK_OFFSET(ept_pointer, 120); + CHECK_OFFSET(eoi_exit_bitmap0, 128); + CHECK_OFFSET(eoi_exit_bitmap1, 136); + CHECK_OFFSET(eoi_exit_bitmap2, 144); + CHECK_OFFSET(eoi_exit_bitmap3, 152); + CHECK_OFFSET(xss_exit_bitmap, 160); + CHECK_OFFSET(guest_physical_address, 168); + CHECK_OFFSET(vmcs_link_pointer, 176); + CHECK_OFFSET(guest_ia32_debugctl, 184); + CHECK_OFFSET(guest_ia32_pat, 192); + CHECK_OFFSET(guest_ia32_efer, 200); + CHECK_OFFSET(guest_ia32_perf_global_ctrl, 208); + CHECK_OFFSET(guest_pdptr0, 216); + CHECK_OFFSET(guest_pdptr1, 224); + CHECK_OFFSET(guest_pdptr2, 232); + CHECK_OFFSET(guest_pdptr3, 240); + CHECK_OFFSET(guest_bndcfgs, 248); + CHECK_OFFSET(host_ia32_pat, 256); + CHECK_OFFSET(host_ia32_efer, 264); + CHECK_OFFSET(host_ia32_perf_global_ctrl, 272); + CHECK_OFFSET(vmread_bitmap, 280); + CHECK_OFFSET(vmwrite_bitmap, 288); + CHECK_OFFSET(vm_function_control, 296); + CHECK_OFFSET(eptp_list_address, 304); + CHECK_OFFSET(pml_address, 312); + CHECK_OFFSET(cr0_guest_host_mask, 344); + CHECK_OFFSET(cr4_guest_host_mask, 352); + CHECK_OFFSET(cr0_read_shadow, 360); + CHECK_OFFSET(cr4_read_shadow, 368); + CHECK_OFFSET(cr3_target_value0, 376); + CHECK_OFFSET(cr3_target_value1, 384); + CHECK_OFFSET(cr3_target_value2, 392); + CHECK_OFFSET(cr3_target_value3, 400); + CHECK_OFFSET(exit_qualification, 408); + CHECK_OFFSET(guest_linear_address, 416); + CHECK_OFFSET(guest_cr0, 424); + CHECK_OFFSET(guest_cr3, 432); + CHECK_OFFSET(guest_cr4, 440); + CHECK_OFFSET(guest_es_base, 448); + CHECK_OFFSET(guest_cs_base, 456); + CHECK_OFFSET(guest_ss_base, 464); + CHECK_OFFSET(guest_ds_base, 472); + CHECK_OFFSET(guest_fs_base, 480); + CHECK_OFFSET(guest_gs_base, 488); + CHECK_OFFSET(guest_ldtr_base, 496); + CHECK_OFFSET(guest_tr_base, 504); + CHECK_OFFSET(guest_gdtr_base, 512); + CHECK_OFFSET(guest_idtr_base, 520); + CHECK_OFFSET(guest_dr7, 528); + CHECK_OFFSET(guest_rsp, 536); + CHECK_OFFSET(guest_rip, 544); + CHECK_OFFSET(guest_rflags, 552); + CHECK_OFFSET(guest_pending_dbg_exceptions, 560); + CHECK_OFFSET(guest_sysenter_esp, 568); + CHECK_OFFSET(guest_sysenter_eip, 576); + CHECK_OFFSET(host_cr0, 584); + CHECK_OFFSET(host_cr3, 592); + CHECK_OFFSET(host_cr4, 600); + CHECK_OFFSET(host_fs_base, 608); + CHECK_OFFSET(host_gs_base, 616); + CHECK_OFFSET(host_tr_base, 624); + CHECK_OFFSET(host_gdtr_base, 632); + CHECK_OFFSET(host_idtr_base, 640); + CHECK_OFFSET(host_ia32_sysenter_esp, 648); + CHECK_OFFSET(host_ia32_sysenter_eip, 656); + CHECK_OFFSET(host_rsp, 664); + CHECK_OFFSET(host_rip, 672); + CHECK_OFFSET(pin_based_vm_exec_control, 744); + CHECK_OFFSET(cpu_based_vm_exec_control, 748); + CHECK_OFFSET(exception_bitmap, 752); + CHECK_OFFSET(page_fault_error_code_mask, 756); + CHECK_OFFSET(page_fault_error_code_match, 760); + CHECK_OFFSET(cr3_target_count, 764); + CHECK_OFFSET(vm_exit_controls, 768); + CHECK_OFFSET(vm_exit_msr_store_count, 772); + CHECK_OFFSET(vm_exit_msr_load_count, 776); + CHECK_OFFSET(vm_entry_controls, 780); + CHECK_OFFSET(vm_entry_msr_load_count, 784); + CHECK_OFFSET(vm_entry_intr_info_field, 788); + CHECK_OFFSET(vm_entry_exception_error_code, 792); + CHECK_OFFSET(vm_entry_instruction_len, 796); + CHECK_OFFSET(tpr_threshold, 800); + CHECK_OFFSET(secondary_vm_exec_control, 804); + CHECK_OFFSET(vm_instruction_error, 808); + CHECK_OFFSET(vm_exit_reason, 812); + CHECK_OFFSET(vm_exit_intr_info, 816); + CHECK_OFFSET(vm_exit_intr_error_code, 820); + CHECK_OFFSET(idt_vectoring_info_field, 824); + CHECK_OFFSET(idt_vectoring_error_code, 828); + CHECK_OFFSET(vm_exit_instruction_len, 832); + CHECK_OFFSET(vmx_instruction_info, 836); + CHECK_OFFSET(guest_es_limit, 840); + CHECK_OFFSET(guest_cs_limit, 844); + CHECK_OFFSET(guest_ss_limit, 848); + CHECK_OFFSET(guest_ds_limit, 852); + CHECK_OFFSET(guest_fs_limit, 856); + CHECK_OFFSET(guest_gs_limit, 860); + CHECK_OFFSET(guest_ldtr_limit, 864); + CHECK_OFFSET(guest_tr_limit, 868); + CHECK_OFFSET(guest_gdtr_limit, 872); + CHECK_OFFSET(guest_idtr_limit, 876); + CHECK_OFFSET(guest_es_ar_bytes, 880); + CHECK_OFFSET(guest_cs_ar_bytes, 884); + CHECK_OFFSET(guest_ss_ar_bytes, 888); + CHECK_OFFSET(guest_ds_ar_bytes, 892); + CHECK_OFFSET(guest_fs_ar_bytes, 896); + CHECK_OFFSET(guest_gs_ar_bytes, 900); + CHECK_OFFSET(guest_ldtr_ar_bytes, 904); + CHECK_OFFSET(guest_tr_ar_bytes, 908); + CHECK_OFFSET(guest_interruptibility_info, 912); + CHECK_OFFSET(guest_activity_state, 916); + CHECK_OFFSET(guest_sysenter_cs, 920); + CHECK_OFFSET(host_ia32_sysenter_cs, 924); + CHECK_OFFSET(vmx_preemption_timer_value, 928); + CHECK_OFFSET(virtual_processor_id, 960); + CHECK_OFFSET(posted_intr_nv, 962); + CHECK_OFFSET(guest_es_selector, 964); + CHECK_OFFSET(guest_cs_selector, 966); + CHECK_OFFSET(guest_ss_selector, 968); + CHECK_OFFSET(guest_ds_selector, 970); + CHECK_OFFSET(guest_fs_selector, 972); + CHECK_OFFSET(guest_gs_selector, 974); + CHECK_OFFSET(guest_ldtr_selector, 976); + CHECK_OFFSET(guest_tr_selector, 978); + CHECK_OFFSET(guest_intr_status, 980); + CHECK_OFFSET(host_es_selector, 982); + CHECK_OFFSET(host_cs_selector, 984); + CHECK_OFFSET(host_ss_selector, 986); + CHECK_OFFSET(host_ds_selector, 988); + CHECK_OFFSET(host_fs_selector, 990); + CHECK_OFFSET(host_gs_selector, 992); + CHECK_OFFSET(host_tr_selector, 994); + CHECK_OFFSET(guest_pml_index, 996); +} + +extern const unsigned short vmcs_field_to_offset_table[]; +extern const unsigned int nr_vmcs12_fields; + +#define ROL16(val, n) ((u16)(((u16)(val) << (n)) | ((u16)(val) >> (16 - (n))))) + +static inline short vmcs_field_to_offset(unsigned long field) +{ + unsigned short offset; + unsigned int index; + + if (field >> 15) + return -ENOENT; + + index = ROL16(field, 6); + if (index >= nr_vmcs12_fields) + return -ENOENT; + + index = array_index_nospec(index, nr_vmcs12_fields); + offset = vmcs_field_to_offset_table[index]; + if (offset == 0) + return -ENOENT; + return offset; +} + +#undef ROL16 + +/* + * Read a vmcs12 field. Since these can have varying lengths and we return + * one type, we chose the biggest type (u64) and zero-extend the return value + * to that size. Note that the caller, handle_vmread, might need to use only + * some of the bits we return here (e.g., on 32-bit guests, only 32 bits of + * 64-bit fields are to be returned). + */ +static inline int vmcs12_read_any(struct vmcs12 *vmcs12, + unsigned long field, u64 *ret) +{ + short offset = vmcs_field_to_offset(field); + char *p; + + if (offset < 0) + return offset; + + p = (char *)vmcs12 + offset; + + switch (vmcs_field_width(field)) { + case VMCS_FIELD_WIDTH_NATURAL_WIDTH: + *ret = *((natural_width *)p); + return 0; + case VMCS_FIELD_WIDTH_U16: + *ret = *((u16 *)p); + return 0; + case VMCS_FIELD_WIDTH_U32: + *ret = *((u32 *)p); + return 0; + case VMCS_FIELD_WIDTH_U64: + *ret = *((u64 *)p); + return 0; + default: + WARN_ON(1); + return -ENOENT; + } +} + +static inline int vmcs12_write_any(struct vmcs12 *vmcs12, + unsigned long field, u64 field_value){ + short offset = vmcs_field_to_offset(field); + char *p = (char *)vmcs12 + offset; + + if (offset < 0) + return offset; + + switch (vmcs_field_width(field)) { + case VMCS_FIELD_WIDTH_U16: + *(u16 *)p = field_value; + return 0; + case VMCS_FIELD_WIDTH_U32: + *(u32 *)p = field_value; + return 0; + case VMCS_FIELD_WIDTH_U64: + *(u64 *)p = field_value; + return 0; + case VMCS_FIELD_WIDTH_NATURAL_WIDTH: + *(natural_width *)p = field_value; + return 0; + default: + WARN_ON(1); + return -ENOENT; + } + +} + +#endif /* __KVM_X86_VMX_VMCS12_H */ diff --git a/arch/x86/kvm/vmx_shadow_fields.h b/arch/x86/kvm/vmx/vmcs_shadow_fields.h index 132432f375c2..132432f375c2 100644 --- a/arch/x86/kvm/vmx_shadow_fields.h +++ b/arch/x86/kvm/vmx/vmcs_shadow_fields.h diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S new file mode 100644 index 000000000000..bcef2c7e9bc4 --- /dev/null +++ b/arch/x86/kvm/vmx/vmenter.S @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include <linux/linkage.h> +#include <asm/asm.h> + + .text + +/** + * vmx_vmenter - VM-Enter the current loaded VMCS + * + * %RFLAGS.ZF: !VMCS.LAUNCHED, i.e. controls VMLAUNCH vs. VMRESUME + * + * Returns: + * %RFLAGS.CF is set on VM-Fail Invalid + * %RFLAGS.ZF is set on VM-Fail Valid + * %RFLAGS.{CF,ZF} are cleared on VM-Success, i.e. VM-Exit + * + * Note that VMRESUME/VMLAUNCH fall-through and return directly if + * they VM-Fail, whereas a successful VM-Enter + VM-Exit will jump + * to vmx_vmexit. + */ +ENTRY(vmx_vmenter) + /* EFLAGS.ZF is set if VMCS.LAUNCHED == 0 */ + je 2f + +1: vmresume + ret + +2: vmlaunch + ret + +3: cmpb $0, kvm_rebooting + jne 4f + call kvm_spurious_fault +4: ret + + .pushsection .fixup, "ax" +5: jmp 3b + .popsection + + _ASM_EXTABLE(1b, 5b) + _ASM_EXTABLE(2b, 5b) + +ENDPROC(vmx_vmenter) + +/** + * vmx_vmexit - Handle a VMX VM-Exit + * + * Returns: + * %RFLAGS.{CF,ZF} are cleared on VM-Success, i.e. VM-Exit + * + * This is vmx_vmenter's partner in crime. On a VM-Exit, control will jump + * here after hardware loads the host's state, i.e. this is the destination + * referred to by VMCS.HOST_RIP. + */ +ENTRY(vmx_vmexit) + ret +ENDPROC(vmx_vmexit) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c new file mode 100644 index 000000000000..4d39f731bc33 --- /dev/null +++ b/arch/x86/kvm/vmx/vmx.c @@ -0,0 +1,7935 @@ +/* + * Kernel-based Virtual Machine driver for Linux + * + * This module enables machines with Intel VT-x extensions to run virtual + * machines without emulation or binary translation. + * + * Copyright (C) 2006 Qumranet, Inc. + * Copyright 2010 Red Hat, Inc. and/or its affiliates. + * + * Authors: + * Avi Kivity <avi@qumranet.com> + * Yaniv Kamay <yaniv@qumranet.com> + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +#include <linux/frame.h> +#include <linux/highmem.h> +#include <linux/hrtimer.h> +#include <linux/kernel.h> +#include <linux/kvm_host.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/mod_devicetable.h> +#include <linux/mm.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/tboot.h> +#include <linux/trace_events.h> + +#include <asm/apic.h> +#include <asm/asm.h> +#include <asm/cpu.h> +#include <asm/debugreg.h> +#include <asm/desc.h> +#include <asm/fpu/internal.h> +#include <asm/io.h> +#include <asm/irq_remapping.h> +#include <asm/kexec.h> +#include <asm/perf_event.h> +#include <asm/mce.h> +#include <asm/mmu_context.h> +#include <asm/mshyperv.h> +#include <asm/spec-ctrl.h> +#include <asm/virtext.h> +#include <asm/vmx.h> + +#include "capabilities.h" +#include "cpuid.h" +#include "evmcs.h" +#include "irq.h" +#include "kvm_cache_regs.h" +#include "lapic.h" +#include "mmu.h" +#include "nested.h" +#include "ops.h" +#include "pmu.h" +#include "trace.h" +#include "vmcs.h" +#include "vmcs12.h" +#include "vmx.h" +#include "x86.h" + +MODULE_AUTHOR("Qumranet"); +MODULE_LICENSE("GPL"); + +static const struct x86_cpu_id vmx_cpu_id[] = { + X86_FEATURE_MATCH(X86_FEATURE_VMX), + {} +}; +MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id); + +bool __read_mostly enable_vpid = 1; +module_param_named(vpid, enable_vpid, bool, 0444); + +static bool __read_mostly enable_vnmi = 1; +module_param_named(vnmi, enable_vnmi, bool, S_IRUGO); + +bool __read_mostly flexpriority_enabled = 1; +module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO); + +bool __read_mostly enable_ept = 1; +module_param_named(ept, enable_ept, bool, S_IRUGO); + +bool __read_mostly enable_unrestricted_guest = 1; +module_param_named(unrestricted_guest, + enable_unrestricted_guest, bool, S_IRUGO); + +bool __read_mostly enable_ept_ad_bits = 1; +module_param_named(eptad, enable_ept_ad_bits, bool, S_IRUGO); + +static bool __read_mostly emulate_invalid_guest_state = true; +module_param(emulate_invalid_guest_state, bool, S_IRUGO); + +static bool __read_mostly fasteoi = 1; +module_param(fasteoi, bool, S_IRUGO); + +static bool __read_mostly enable_apicv = 1; +module_param(enable_apicv, bool, S_IRUGO); + +/* + * If nested=1, nested virtualization is supported, i.e., guests may use + * VMX and be a hypervisor for its own guests. If nested=0, guests may not + * use VMX instructions. + */ +static bool __read_mostly nested = 1; +module_param(nested, bool, S_IRUGO); + +static u64 __read_mostly host_xss; + +bool __read_mostly enable_pml = 1; +module_param_named(pml, enable_pml, bool, S_IRUGO); + +#define MSR_BITMAP_MODE_X2APIC 1 +#define MSR_BITMAP_MODE_X2APIC_APICV 2 + +#define KVM_VMX_TSC_MULTIPLIER_MAX 0xffffffffffffffffULL + +/* Guest_tsc -> host_tsc conversion requires 64-bit division. */ +static int __read_mostly cpu_preemption_timer_multi; +static bool __read_mostly enable_preemption_timer = 1; +#ifdef CONFIG_X86_64 +module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO); +#endif + +#define KVM_VM_CR0_ALWAYS_OFF (X86_CR0_NW | X86_CR0_CD) +#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR0_NE +#define KVM_VM_CR0_ALWAYS_ON \ + (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | \ + X86_CR0_WP | X86_CR0_PG | X86_CR0_PE) +#define KVM_CR4_GUEST_OWNED_BITS \ + (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \ + | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_TSD) + +#define KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR4_VMXE +#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE) +#define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE) + +#define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM)) + +#define MSR_IA32_RTIT_STATUS_MASK (~(RTIT_STATUS_FILTEREN | \ + RTIT_STATUS_CONTEXTEN | RTIT_STATUS_TRIGGEREN | \ + RTIT_STATUS_ERROR | RTIT_STATUS_STOPPED | \ + RTIT_STATUS_BYTECNT)) + +#define MSR_IA32_RTIT_OUTPUT_BASE_MASK \ + (~((1UL << cpuid_query_maxphyaddr(vcpu)) - 1) | 0x7f) + +/* + * These 2 parameters are used to config the controls for Pause-Loop Exiting: + * ple_gap: upper bound on the amount of time between two successive + * executions of PAUSE in a loop. Also indicate if ple enabled. + * According to test, this time is usually smaller than 128 cycles. + * ple_window: upper bound on the amount of time a guest is allowed to execute + * in a PAUSE loop. Tests indicate that most spinlocks are held for + * less than 2^12 cycles + * Time is measured based on a counter that runs at the same rate as the TSC, + * refer SDM volume 3b section 21.6.13 & 22.1.3. + */ +static unsigned int ple_gap = KVM_DEFAULT_PLE_GAP; +module_param(ple_gap, uint, 0444); + +static unsigned int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW; +module_param(ple_window, uint, 0444); + +/* Default doubles per-vcpu window every exit. */ +static unsigned int ple_window_grow = KVM_DEFAULT_PLE_WINDOW_GROW; +module_param(ple_window_grow, uint, 0444); + +/* Default resets per-vcpu window every exit to ple_window. */ +static unsigned int ple_window_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK; +module_param(ple_window_shrink, uint, 0444); + +/* Default is to compute the maximum so we can never overflow. */ +static unsigned int ple_window_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX; +module_param(ple_window_max, uint, 0444); + +/* Default is SYSTEM mode, 1 for host-guest mode */ +int __read_mostly pt_mode = PT_MODE_SYSTEM; +module_param(pt_mode, int, S_IRUGO); + +static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush); +static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond); +static DEFINE_MUTEX(vmx_l1d_flush_mutex); + +/* Storage for pre module init parameter parsing */ +static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = VMENTER_L1D_FLUSH_AUTO; + +static const struct { + const char *option; + bool for_parse; +} vmentry_l1d_param[] = { + [VMENTER_L1D_FLUSH_AUTO] = {"auto", true}, + [VMENTER_L1D_FLUSH_NEVER] = {"never", true}, + [VMENTER_L1D_FLUSH_COND] = {"cond", true}, + [VMENTER_L1D_FLUSH_ALWAYS] = {"always", true}, + [VMENTER_L1D_FLUSH_EPT_DISABLED] = {"EPT disabled", false}, + [VMENTER_L1D_FLUSH_NOT_REQUIRED] = {"not required", false}, +}; + +#define L1D_CACHE_ORDER 4 +static void *vmx_l1d_flush_pages; + +static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf) +{ + struct page *page; + unsigned int i; + + if (!enable_ept) { + l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED; + return 0; + } + + if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) { + u64 msr; + + rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr); + if (msr & ARCH_CAP_SKIP_VMENTRY_L1DFLUSH) { + l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED; + return 0; + } + } + + /* If set to auto use the default l1tf mitigation method */ + if (l1tf == VMENTER_L1D_FLUSH_AUTO) { + switch (l1tf_mitigation) { + case L1TF_MITIGATION_OFF: + l1tf = VMENTER_L1D_FLUSH_NEVER; + break; + case L1TF_MITIGATION_FLUSH_NOWARN: + case L1TF_MITIGATION_FLUSH: + case L1TF_MITIGATION_FLUSH_NOSMT: + l1tf = VMENTER_L1D_FLUSH_COND; + break; + case L1TF_MITIGATION_FULL: + case L1TF_MITIGATION_FULL_FORCE: + l1tf = VMENTER_L1D_FLUSH_ALWAYS; + break; + } + } else if (l1tf_mitigation == L1TF_MITIGATION_FULL_FORCE) { + l1tf = VMENTER_L1D_FLUSH_ALWAYS; + } + + if (l1tf != VMENTER_L1D_FLUSH_NEVER && !vmx_l1d_flush_pages && + !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) { + page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER); + if (!page) + return -ENOMEM; + vmx_l1d_flush_pages = page_address(page); + + /* + * Initialize each page with a different pattern in + * order to protect against KSM in the nested + * virtualization case. + */ + for (i = 0; i < 1u << L1D_CACHE_ORDER; ++i) { + memset(vmx_l1d_flush_pages + i * PAGE_SIZE, i + 1, + PAGE_SIZE); + } + } + + l1tf_vmx_mitigation = l1tf; + + if (l1tf != VMENTER_L1D_FLUSH_NEVER) + static_branch_enable(&vmx_l1d_should_flush); + else + static_branch_disable(&vmx_l1d_should_flush); + + if (l1tf == VMENTER_L1D_FLUSH_COND) + static_branch_enable(&vmx_l1d_flush_cond); + else + static_branch_disable(&vmx_l1d_flush_cond); + return 0; +} + +static int vmentry_l1d_flush_parse(const char *s) +{ + unsigned int i; + + if (s) { + for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) { + if (vmentry_l1d_param[i].for_parse && + sysfs_streq(s, vmentry_l1d_param[i].option)) + return i; + } + } + return -EINVAL; +} + +static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp) +{ + int l1tf, ret; + + l1tf = vmentry_l1d_flush_parse(s); + if (l1tf < 0) + return l1tf; + + if (!boot_cpu_has(X86_BUG_L1TF)) + return 0; + + /* + * Has vmx_init() run already? If not then this is the pre init + * parameter parsing. In that case just store the value and let + * vmx_init() do the proper setup after enable_ept has been + * established. + */ + if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) { + vmentry_l1d_flush_param = l1tf; + return 0; + } + + mutex_lock(&vmx_l1d_flush_mutex); + ret = vmx_setup_l1d_flush(l1tf); + mutex_unlock(&vmx_l1d_flush_mutex); + return ret; +} + +static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp) +{ + if (WARN_ON_ONCE(l1tf_vmx_mitigation >= ARRAY_SIZE(vmentry_l1d_param))) + return sprintf(s, "???\n"); + + return sprintf(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option); +} + +static const struct kernel_param_ops vmentry_l1d_flush_ops = { + .set = vmentry_l1d_flush_set, + .get = vmentry_l1d_flush_get, +}; +module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, NULL, 0644); + +static bool guest_state_valid(struct kvm_vcpu *vcpu); +static u32 vmx_segment_access_rights(struct kvm_segment *var); +static __always_inline void vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, + u32 msr, int type); + +void vmx_vmexit(void); + +static DEFINE_PER_CPU(struct vmcs *, vmxarea); +DEFINE_PER_CPU(struct vmcs *, current_vmcs); +/* + * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed + * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it. + */ +static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu); + +/* + * We maintian a per-CPU linked-list of vCPU, so in wakeup_handler() we + * can find which vCPU should be waken up. + */ +static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu); +static DEFINE_PER_CPU(spinlock_t, blocked_vcpu_on_cpu_lock); + +static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS); +static DEFINE_SPINLOCK(vmx_vpid_lock); + +struct vmcs_config vmcs_config; +struct vmx_capability vmx_capability; + +#define VMX_SEGMENT_FIELD(seg) \ + [VCPU_SREG_##seg] = { \ + .selector = GUEST_##seg##_SELECTOR, \ + .base = GUEST_##seg##_BASE, \ + .limit = GUEST_##seg##_LIMIT, \ + .ar_bytes = GUEST_##seg##_AR_BYTES, \ + } + +static const struct kvm_vmx_segment_field { + unsigned selector; + unsigned base; + unsigned limit; + unsigned ar_bytes; +} kvm_vmx_segment_fields[] = { + VMX_SEGMENT_FIELD(CS), + VMX_SEGMENT_FIELD(DS), + VMX_SEGMENT_FIELD(ES), + VMX_SEGMENT_FIELD(FS), + VMX_SEGMENT_FIELD(GS), + VMX_SEGMENT_FIELD(SS), + VMX_SEGMENT_FIELD(TR), + VMX_SEGMENT_FIELD(LDTR), +}; + +u64 host_efer; + +/* + * Though SYSCALL is only supported in 64-bit mode on Intel CPUs, kvm + * will emulate SYSCALL in legacy mode if the vendor string in guest + * CPUID.0:{EBX,ECX,EDX} is "AuthenticAMD" or "AMDisbetter!" To + * support this emulation, IA32_STAR must always be included in + * vmx_msr_index[], even in i386 builds. + */ +const u32 vmx_msr_index[] = { +#ifdef CONFIG_X86_64 + MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, +#endif + MSR_EFER, MSR_TSC_AUX, MSR_STAR, +}; + +#if IS_ENABLED(CONFIG_HYPERV) +static bool __read_mostly enlightened_vmcs = true; +module_param(enlightened_vmcs, bool, 0444); + +/* check_ept_pointer() should be under protection of ept_pointer_lock. */ +static void check_ept_pointer_match(struct kvm *kvm) +{ + struct kvm_vcpu *vcpu; + u64 tmp_eptp = INVALID_PAGE; + int i; + + kvm_for_each_vcpu(i, vcpu, kvm) { + if (!VALID_PAGE(tmp_eptp)) { + tmp_eptp = to_vmx(vcpu)->ept_pointer; + } else if (tmp_eptp != to_vmx(vcpu)->ept_pointer) { + to_kvm_vmx(kvm)->ept_pointers_match + = EPT_POINTERS_MISMATCH; + return; + } + } + + to_kvm_vmx(kvm)->ept_pointers_match = EPT_POINTERS_MATCH; +} + +int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush, + void *data) +{ + struct kvm_tlb_range *range = data; + + return hyperv_fill_flush_guest_mapping_list(flush, range->start_gfn, + range->pages); +} + +static inline int __hv_remote_flush_tlb_with_range(struct kvm *kvm, + struct kvm_vcpu *vcpu, struct kvm_tlb_range *range) +{ + u64 ept_pointer = to_vmx(vcpu)->ept_pointer; + + /* + * FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE hypercall needs address + * of the base of EPT PML4 table, strip off EPT configuration + * information. + */ + if (range) + return hyperv_flush_guest_mapping_range(ept_pointer & PAGE_MASK, + kvm_fill_hv_flush_list_func, (void *)range); + else + return hyperv_flush_guest_mapping(ept_pointer & PAGE_MASK); +} + +static int hv_remote_flush_tlb_with_range(struct kvm *kvm, + struct kvm_tlb_range *range) +{ + struct kvm_vcpu *vcpu; + int ret = -ENOTSUPP, i; + + spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock); + + if (to_kvm_vmx(kvm)->ept_pointers_match == EPT_POINTERS_CHECK) + check_ept_pointer_match(kvm); + + if (to_kvm_vmx(kvm)->ept_pointers_match != EPT_POINTERS_MATCH) { + kvm_for_each_vcpu(i, vcpu, kvm) { + /* If ept_pointer is invalid pointer, bypass flush request. */ + if (VALID_PAGE(to_vmx(vcpu)->ept_pointer)) + ret |= __hv_remote_flush_tlb_with_range( + kvm, vcpu, range); + } + } else { + ret = __hv_remote_flush_tlb_with_range(kvm, + kvm_get_vcpu(kvm, 0), range); + } + + spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock); + return ret; +} +static int hv_remote_flush_tlb(struct kvm *kvm) +{ + return hv_remote_flush_tlb_with_range(kvm, NULL); +} + +#endif /* IS_ENABLED(CONFIG_HYPERV) */ + +/* + * Comment's format: document - errata name - stepping - processor name. + * Refer from + * https://www.virtualbox.org/svn/vbox/trunk/src/VBox/VMM/VMMR0/HMR0.cpp + */ +static u32 vmx_preemption_cpu_tfms[] = { +/* 323344.pdf - BA86 - D0 - Xeon 7500 Series */ +0x000206E6, +/* 323056.pdf - AAX65 - C2 - Xeon L3406 */ +/* 322814.pdf - AAT59 - C2 - i7-600, i5-500, i5-400 and i3-300 Mobile */ +/* 322911.pdf - AAU65 - C2 - i5-600, i3-500 Desktop and Pentium G6950 */ +0x00020652, +/* 322911.pdf - AAU65 - K0 - i5-600, i3-500 Desktop and Pentium G6950 */ +0x00020655, +/* 322373.pdf - AAO95 - B1 - Xeon 3400 Series */ +/* 322166.pdf - AAN92 - B1 - i7-800 and i5-700 Desktop */ +/* + * 320767.pdf - AAP86 - B1 - + * i7-900 Mobile Extreme, i7-800 and i7-700 Mobile + */ +0x000106E5, +/* 321333.pdf - AAM126 - C0 - Xeon 3500 */ +0x000106A0, +/* 321333.pdf - AAM126 - C1 - Xeon 3500 */ +0x000106A1, +/* 320836.pdf - AAJ124 - C0 - i7-900 Desktop Extreme and i7-900 Desktop */ +0x000106A4, + /* 321333.pdf - AAM126 - D0 - Xeon 3500 */ + /* 321324.pdf - AAK139 - D0 - Xeon 5500 */ + /* 320836.pdf - AAJ124 - D0 - i7-900 Extreme and i7-900 Desktop */ +0x000106A5, + /* Xeon E3-1220 V2 */ +0x000306A8, +}; + +static inline bool cpu_has_broken_vmx_preemption_timer(void) +{ + u32 eax = cpuid_eax(0x00000001), i; + + /* Clear the reserved bits */ + eax &= ~(0x3U << 14 | 0xfU << 28); + for (i = 0; i < ARRAY_SIZE(vmx_preemption_cpu_tfms); i++) + if (eax == vmx_preemption_cpu_tfms[i]) + return true; + + return false; +} + +static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu) +{ + return flexpriority_enabled && lapic_in_kernel(vcpu); +} + +static inline bool report_flexpriority(void) +{ + return flexpriority_enabled; +} + +static inline int __find_msr_index(struct vcpu_vmx *vmx, u32 msr) +{ + int i; + + for (i = 0; i < vmx->nmsrs; ++i) + if (vmx_msr_index[vmx->guest_msrs[i].index] == msr) + return i; + return -1; +} + +struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr) +{ + int i; + + i = __find_msr_index(vmx, msr); + if (i >= 0) + return &vmx->guest_msrs[i]; + return NULL; +} + +void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs) +{ + vmcs_clear(loaded_vmcs->vmcs); + if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched) + vmcs_clear(loaded_vmcs->shadow_vmcs); + loaded_vmcs->cpu = -1; + loaded_vmcs->launched = 0; +} + +#ifdef CONFIG_KEXEC_CORE +/* + * This bitmap is used to indicate whether the vmclear + * operation is enabled on all cpus. All disabled by + * default. + */ +static cpumask_t crash_vmclear_enabled_bitmap = CPU_MASK_NONE; + +static inline void crash_enable_local_vmclear(int cpu) +{ + cpumask_set_cpu(cpu, &crash_vmclear_enabled_bitmap); +} + +static inline void crash_disable_local_vmclear(int cpu) +{ + cpumask_clear_cpu(cpu, &crash_vmclear_enabled_bitmap); +} + +static inline int crash_local_vmclear_enabled(int cpu) +{ + return cpumask_test_cpu(cpu, &crash_vmclear_enabled_bitmap); +} + +static void crash_vmclear_local_loaded_vmcss(void) +{ + int cpu = raw_smp_processor_id(); + struct loaded_vmcs *v; + + if (!crash_local_vmclear_enabled(cpu)) + return; + + list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu), + loaded_vmcss_on_cpu_link) + vmcs_clear(v->vmcs); +} +#else +static inline void crash_enable_local_vmclear(int cpu) { } +static inline void crash_disable_local_vmclear(int cpu) { } +#endif /* CONFIG_KEXEC_CORE */ + +static void __loaded_vmcs_clear(void *arg) +{ + struct loaded_vmcs *loaded_vmcs = arg; + int cpu = raw_smp_processor_id(); + + if (loaded_vmcs->cpu != cpu) + return; /* vcpu migration can race with cpu offline */ + if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs) + per_cpu(current_vmcs, cpu) = NULL; + crash_disable_local_vmclear(cpu); + list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link); + + /* + * we should ensure updating loaded_vmcs->loaded_vmcss_on_cpu_link + * is before setting loaded_vmcs->vcpu to -1 which is done in + * loaded_vmcs_init. Otherwise, other cpu can see vcpu = -1 fist + * then adds the vmcs into percpu list before it is deleted. + */ + smp_wmb(); + + loaded_vmcs_init(loaded_vmcs); + crash_enable_local_vmclear(cpu); +} + +void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs) +{ + int cpu = loaded_vmcs->cpu; + + if (cpu != -1) + smp_call_function_single(cpu, + __loaded_vmcs_clear, loaded_vmcs, 1); +} + +static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg, + unsigned field) +{ + bool ret; + u32 mask = 1 << (seg * SEG_FIELD_NR + field); + + if (!(vmx->vcpu.arch.regs_avail & (1 << VCPU_EXREG_SEGMENTS))) { + vmx->vcpu.arch.regs_avail |= (1 << VCPU_EXREG_SEGMENTS); + vmx->segment_cache.bitmask = 0; + } + ret = vmx->segment_cache.bitmask & mask; + vmx->segment_cache.bitmask |= mask; + return ret; +} + +static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg) +{ + u16 *p = &vmx->segment_cache.seg[seg].selector; + + if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL)) + *p = vmcs_read16(kvm_vmx_segment_fields[seg].selector); + return *p; +} + +static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg) +{ + ulong *p = &vmx->segment_cache.seg[seg].base; + + if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE)) + *p = vmcs_readl(kvm_vmx_segment_fields[seg].base); + return *p; +} + +static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg) +{ + u32 *p = &vmx->segment_cache.seg[seg].limit; + + if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT)) + *p = vmcs_read32(kvm_vmx_segment_fields[seg].limit); + return *p; +} + +static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg) +{ + u32 *p = &vmx->segment_cache.seg[seg].ar; + + if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR)) + *p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes); + return *p; +} + +void update_exception_bitmap(struct kvm_vcpu *vcpu) +{ + u32 eb; + + eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) | + (1u << DB_VECTOR) | (1u << AC_VECTOR); + /* + * Guest access to VMware backdoor ports could legitimately + * trigger #GP because of TSS I/O permission bitmap. + * We intercept those #GP and allow access to them anyway + * as VMware does. + */ + if (enable_vmware_backdoor) + eb |= (1u << GP_VECTOR); + if ((vcpu->guest_debug & + (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) == + (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) + eb |= 1u << BP_VECTOR; + if (to_vmx(vcpu)->rmode.vm86_active) + eb = ~0; + if (enable_ept) + eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */ + + /* When we are running a nested L2 guest and L1 specified for it a + * certain exception bitmap, we must trap the same exceptions and pass + * them to L1. When running L2, we will only handle the exceptions + * specified above if L1 did not want them. + */ + if (is_guest_mode(vcpu)) + eb |= get_vmcs12(vcpu)->exception_bitmap; + + vmcs_write32(EXCEPTION_BITMAP, eb); +} + +/* + * Check if MSR is intercepted for currently loaded MSR bitmap. + */ +static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr) +{ + unsigned long *msr_bitmap; + int f = sizeof(unsigned long); + + if (!cpu_has_vmx_msr_bitmap()) + return true; + + msr_bitmap = to_vmx(vcpu)->loaded_vmcs->msr_bitmap; + + if (msr <= 0x1fff) { + return !!test_bit(msr, msr_bitmap + 0x800 / f); + } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { + msr &= 0x1fff; + return !!test_bit(msr, msr_bitmap + 0xc00 / f); + } + + return true; +} + +static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx, + unsigned long entry, unsigned long exit) +{ + vm_entry_controls_clearbit(vmx, entry); + vm_exit_controls_clearbit(vmx, exit); +} + +static int find_msr(struct vmx_msrs *m, unsigned int msr) +{ + unsigned int i; + + for (i = 0; i < m->nr; ++i) { + if (m->val[i].index == msr) + return i; + } + return -ENOENT; +} + +static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) +{ + int i; + struct msr_autoload *m = &vmx->msr_autoload; + + switch (msr) { + case MSR_EFER: + if (cpu_has_load_ia32_efer()) { + clear_atomic_switch_msr_special(vmx, + VM_ENTRY_LOAD_IA32_EFER, + VM_EXIT_LOAD_IA32_EFER); + return; + } + break; + case MSR_CORE_PERF_GLOBAL_CTRL: + if (cpu_has_load_perf_global_ctrl()) { + clear_atomic_switch_msr_special(vmx, + VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, + VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL); + return; + } + break; + } + i = find_msr(&m->guest, msr); + if (i < 0) + goto skip_guest; + --m->guest.nr; + m->guest.val[i] = m->guest.val[m->guest.nr]; + vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr); + +skip_guest: + i = find_msr(&m->host, msr); + if (i < 0) + return; + + --m->host.nr; + m->host.val[i] = m->host.val[m->host.nr]; + vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr); +} + +static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx, + unsigned long entry, unsigned long exit, + unsigned long guest_val_vmcs, unsigned long host_val_vmcs, + u64 guest_val, u64 host_val) +{ + vmcs_write64(guest_val_vmcs, guest_val); + if (host_val_vmcs != HOST_IA32_EFER) + vmcs_write64(host_val_vmcs, host_val); + vm_entry_controls_setbit(vmx, entry); + vm_exit_controls_setbit(vmx, exit); +} + +static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, + u64 guest_val, u64 host_val, bool entry_only) +{ + int i, j = 0; + struct msr_autoload *m = &vmx->msr_autoload; + + switch (msr) { + case MSR_EFER: + if (cpu_has_load_ia32_efer()) { + add_atomic_switch_msr_special(vmx, + VM_ENTRY_LOAD_IA32_EFER, + VM_EXIT_LOAD_IA32_EFER, + GUEST_IA32_EFER, + HOST_IA32_EFER, + guest_val, host_val); + return; + } + break; + case MSR_CORE_PERF_GLOBAL_CTRL: + if (cpu_has_load_perf_global_ctrl()) { + add_atomic_switch_msr_special(vmx, + VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, + VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL, + GUEST_IA32_PERF_GLOBAL_CTRL, + HOST_IA32_PERF_GLOBAL_CTRL, + guest_val, host_val); + return; + } + break; + case MSR_IA32_PEBS_ENABLE: + /* PEBS needs a quiescent period after being disabled (to write + * a record). Disabling PEBS through VMX MSR swapping doesn't + * provide that period, so a CPU could write host's record into + * guest's memory. + */ + wrmsrl(MSR_IA32_PEBS_ENABLE, 0); + } + + i = find_msr(&m->guest, msr); + if (!entry_only) + j = find_msr(&m->host, msr); + + if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) { + printk_once(KERN_WARNING "Not enough msr switch entries. " + "Can't add msr %x\n", msr); + return; + } + if (i < 0) { + i = m->guest.nr++; + vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr); + } + m->guest.val[i].index = msr; + m->guest.val[i].value = guest_val; + + if (entry_only) + return; + + if (j < 0) { + j = m->host.nr++; + vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr); + } + m->host.val[j].index = msr; + m->host.val[j].value = host_val; +} + +static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) +{ + u64 guest_efer = vmx->vcpu.arch.efer; + u64 ignore_bits = 0; + + if (!enable_ept) { + /* + * NX is needed to handle CR0.WP=1, CR4.SMEP=1. Testing + * host CPUID is more efficient than testing guest CPUID + * or CR4. Host SMEP is anyway a requirement for guest SMEP. + */ + if (boot_cpu_has(X86_FEATURE_SMEP)) + guest_efer |= EFER_NX; + else if (!(guest_efer & EFER_NX)) + ignore_bits |= EFER_NX; + } + + /* + * LMA and LME handled by hardware; SCE meaningless outside long mode. + */ + ignore_bits |= EFER_SCE; +#ifdef CONFIG_X86_64 + ignore_bits |= EFER_LMA | EFER_LME; + /* SCE is meaningful only in long mode on Intel */ + if (guest_efer & EFER_LMA) + ignore_bits &= ~(u64)EFER_SCE; +#endif + + /* + * On EPT, we can't emulate NX, so we must switch EFER atomically. + * On CPUs that support "load IA32_EFER", always switch EFER + * atomically, since it's faster than switching it manually. + */ + if (cpu_has_load_ia32_efer() || + (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) { + if (!(guest_efer & EFER_LMA)) + guest_efer &= ~EFER_LME; + if (guest_efer != host_efer) + add_atomic_switch_msr(vmx, MSR_EFER, + guest_efer, host_efer, false); + else + clear_atomic_switch_msr(vmx, MSR_EFER); + return false; + } else { + clear_atomic_switch_msr(vmx, MSR_EFER); + + guest_efer &= ~ignore_bits; + guest_efer |= host_efer & ignore_bits; + + vmx->guest_msrs[efer_offset].data = guest_efer; + vmx->guest_msrs[efer_offset].mask = ~ignore_bits; + + return true; + } +} + +#ifdef CONFIG_X86_32 +/* + * On 32-bit kernels, VM exits still load the FS and GS bases from the + * VMCS rather than the segment table. KVM uses this helper to figure + * out the current bases to poke them into the VMCS before entry. + */ +static unsigned long segment_base(u16 selector) +{ + struct desc_struct *table; + unsigned long v; + + if (!(selector & ~SEGMENT_RPL_MASK)) + return 0; + + table = get_current_gdt_ro(); + + if ((selector & SEGMENT_TI_MASK) == SEGMENT_LDT) { + u16 ldt_selector = kvm_read_ldt(); + + if (!(ldt_selector & ~SEGMENT_RPL_MASK)) + return 0; + + table = (struct desc_struct *)segment_base(ldt_selector); + } + v = get_desc_base(&table[selector >> 3]); + return v; +} +#endif + +static inline void pt_load_msr(struct pt_ctx *ctx, u32 addr_range) +{ + u32 i; + + wrmsrl(MSR_IA32_RTIT_STATUS, ctx->status); + wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base); + wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask); + wrmsrl(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match); + for (i = 0; i < addr_range; i++) { + wrmsrl(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->addr_a[i]); + wrmsrl(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->addr_b[i]); + } +} + +static inline void pt_save_msr(struct pt_ctx *ctx, u32 addr_range) +{ + u32 i; + + rdmsrl(MSR_IA32_RTIT_STATUS, ctx->status); + rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base); + rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask); + rdmsrl(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match); + for (i = 0; i < addr_range; i++) { + rdmsrl(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->addr_a[i]); + rdmsrl(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->addr_b[i]); + } +} + +static void pt_guest_enter(struct vcpu_vmx *vmx) +{ + if (pt_mode == PT_MODE_SYSTEM) + return; + + /* + * GUEST_IA32_RTIT_CTL is already set in the VMCS. + * Save host state before VM entry. + */ + rdmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl); + if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) { + wrmsrl(MSR_IA32_RTIT_CTL, 0); + pt_save_msr(&vmx->pt_desc.host, vmx->pt_desc.addr_range); + pt_load_msr(&vmx->pt_desc.guest, vmx->pt_desc.addr_range); + } +} + +static void pt_guest_exit(struct vcpu_vmx *vmx) +{ + if (pt_mode == PT_MODE_SYSTEM) + return; + + if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) { + pt_save_msr(&vmx->pt_desc.guest, vmx->pt_desc.addr_range); + pt_load_msr(&vmx->pt_desc.host, vmx->pt_desc.addr_range); + } + + /* Reload host state (IA32_RTIT_CTL will be cleared on VM exit). */ + wrmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl); +} + +void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + struct vmcs_host_state *host_state; +#ifdef CONFIG_X86_64 + int cpu = raw_smp_processor_id(); +#endif + unsigned long fs_base, gs_base; + u16 fs_sel, gs_sel; + int i; + + vmx->req_immediate_exit = false; + + /* + * Note that guest MSRs to be saved/restored can also be changed + * when guest state is loaded. This happens when guest transitions + * to/from long-mode by setting MSR_EFER.LMA. + */ + if (!vmx->loaded_cpu_state || vmx->guest_msrs_dirty) { + vmx->guest_msrs_dirty = false; + for (i = 0; i < vmx->save_nmsrs; ++i) + kvm_set_shared_msr(vmx->guest_msrs[i].index, + vmx->guest_msrs[i].data, + vmx->guest_msrs[i].mask); + + } + + if (vmx->loaded_cpu_state) + return; + + vmx->loaded_cpu_state = vmx->loaded_vmcs; + host_state = &vmx->loaded_cpu_state->host_state; + + /* + * Set host fs and gs selectors. Unfortunately, 22.2.3 does not + * allow segment selectors with cpl > 0 or ti == 1. + */ + host_state->ldt_sel = kvm_read_ldt(); + +#ifdef CONFIG_X86_64 + savesegment(ds, host_state->ds_sel); + savesegment(es, host_state->es_sel); + + gs_base = cpu_kernelmode_gs_base(cpu); + if (likely(is_64bit_mm(current->mm))) { + save_fsgs_for_kvm(); + fs_sel = current->thread.fsindex; + gs_sel = current->thread.gsindex; + fs_base = current->thread.fsbase; + vmx->msr_host_kernel_gs_base = current->thread.gsbase; + } else { + savesegment(fs, fs_sel); + savesegment(gs, gs_sel); + fs_base = read_msr(MSR_FS_BASE); + vmx->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE); + } + + wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); +#else + savesegment(fs, fs_sel); + savesegment(gs, gs_sel); + fs_base = segment_base(fs_sel); + gs_base = segment_base(gs_sel); +#endif + + if (unlikely(fs_sel != host_state->fs_sel)) { + if (!(fs_sel & 7)) + vmcs_write16(HOST_FS_SELECTOR, fs_sel); + else + vmcs_write16(HOST_FS_SELECTOR, 0); + host_state->fs_sel = fs_sel; + } + if (unlikely(gs_sel != host_state->gs_sel)) { + if (!(gs_sel & 7)) + vmcs_write16(HOST_GS_SELECTOR, gs_sel); + else + vmcs_write16(HOST_GS_SELECTOR, 0); + host_state->gs_sel = gs_sel; + } + if (unlikely(fs_base != host_state->fs_base)) { + vmcs_writel(HOST_FS_BASE, fs_base); + host_state->fs_base = fs_base; + } + if (unlikely(gs_base != host_state->gs_base)) { + vmcs_writel(HOST_GS_BASE, gs_base); + host_state->gs_base = gs_base; + } +} + +static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx) +{ + struct vmcs_host_state *host_state; + + if (!vmx->loaded_cpu_state) + return; + + WARN_ON_ONCE(vmx->loaded_cpu_state != vmx->loaded_vmcs); + host_state = &vmx->loaded_cpu_state->host_state; + + ++vmx->vcpu.stat.host_state_reload; + vmx->loaded_cpu_state = NULL; + +#ifdef CONFIG_X86_64 + rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); +#endif + if (host_state->ldt_sel || (host_state->gs_sel & 7)) { + kvm_load_ldt(host_state->ldt_sel); +#ifdef CONFIG_X86_64 + load_gs_index(host_state->gs_sel); +#else + loadsegment(gs, host_state->gs_sel); +#endif + } + if (host_state->fs_sel & 7) + loadsegment(fs, host_state->fs_sel); +#ifdef CONFIG_X86_64 + if (unlikely(host_state->ds_sel | host_state->es_sel)) { + loadsegment(ds, host_state->ds_sel); + loadsegment(es, host_state->es_sel); + } +#endif + invalidate_tss_limit(); +#ifdef CONFIG_X86_64 + wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); +#endif + load_fixmap_gdt(raw_smp_processor_id()); +} + +#ifdef CONFIG_X86_64 +static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx) +{ + preempt_disable(); + if (vmx->loaded_cpu_state) + rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); + preempt_enable(); + return vmx->msr_guest_kernel_gs_base; +} + +static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data) +{ + preempt_disable(); + if (vmx->loaded_cpu_state) + wrmsrl(MSR_KERNEL_GS_BASE, data); + preempt_enable(); + vmx->msr_guest_kernel_gs_base = data; +} +#endif + +static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu) +{ + struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); + struct pi_desc old, new; + unsigned int dest; + + /* + * In case of hot-plug or hot-unplug, we may have to undo + * vmx_vcpu_pi_put even if there is no assigned device. And we + * always keep PI.NDST up to date for simplicity: it makes the + * code easier, and CPU migration is not a fast path. + */ + if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu) + return; + + /* + * First handle the simple case where no cmpxchg is necessary; just + * allow posting non-urgent interrupts. + * + * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change + * PI.NDST: pi_post_block will do it for us and the wakeup_handler + * expects the VCPU to be on the blocked_vcpu_list that matches + * PI.NDST. + */ + if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR || + vcpu->cpu == cpu) { + pi_clear_sn(pi_desc); + return; + } + + /* The full case. */ + do { + old.control = new.control = pi_desc->control; + + dest = cpu_physical_id(cpu); + + if (x2apic_enabled()) + new.ndst = dest; + else + new.ndst = (dest << 8) & 0xFF00; + + new.sn = 0; + } while (cmpxchg64(&pi_desc->control, old.control, + new.control) != old.control); +} + +/* + * Switches to specified vcpu, until a matching vcpu_put(), but assumes + * vcpu mutex is already taken. + */ +void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + bool already_loaded = vmx->loaded_vmcs->cpu == cpu; + + if (!already_loaded) { + loaded_vmcs_clear(vmx->loaded_vmcs); + local_irq_disable(); + crash_disable_local_vmclear(cpu); + + /* + * Read loaded_vmcs->cpu should be before fetching + * loaded_vmcs->loaded_vmcss_on_cpu_link. + * See the comments in __loaded_vmcs_clear(). + */ + smp_rmb(); + + list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link, + &per_cpu(loaded_vmcss_on_cpu, cpu)); + crash_enable_local_vmclear(cpu); + local_irq_enable(); + } + + if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) { + per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs; + vmcs_load(vmx->loaded_vmcs->vmcs); + indirect_branch_prediction_barrier(); + } + + if (!already_loaded) { + void *gdt = get_current_gdt_ro(); + unsigned long sysenter_esp; + + kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); + + /* + * Linux uses per-cpu TSS and GDT, so set these when switching + * processors. See 22.2.4. + */ + vmcs_writel(HOST_TR_BASE, + (unsigned long)&get_cpu_entry_area(cpu)->tss.x86_tss); + vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt); /* 22.2.4 */ + + /* + * VM exits change the host TR limit to 0x67 after a VM + * exit. This is okay, since 0x67 covers everything except + * the IO bitmap and have have code to handle the IO bitmap + * being lost after a VM exit. + */ + BUILD_BUG_ON(IO_BITMAP_OFFSET - 1 != 0x67); + + rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp); + vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */ + + vmx->loaded_vmcs->cpu = cpu; + } + + /* Setup TSC multiplier */ + if (kvm_has_tsc_control && + vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio) + decache_tsc_multiplier(vmx); + + vmx_vcpu_pi_load(vcpu, cpu); + vmx->host_pkru = read_pkru(); + vmx->host_debugctlmsr = get_debugctlmsr(); +} + +static void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu) +{ + struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); + + if (!kvm_arch_has_assigned_device(vcpu->kvm) || + !irq_remapping_cap(IRQ_POSTING_CAP) || + !kvm_vcpu_apicv_active(vcpu)) + return; + + /* Set SN when the vCPU is preempted */ + if (vcpu->preempted) + pi_set_sn(pi_desc); +} + +void vmx_vcpu_put(struct kvm_vcpu *vcpu) +{ + vmx_vcpu_pi_put(vcpu); + + vmx_prepare_switch_to_host(to_vmx(vcpu)); +} + +static bool emulation_required(struct kvm_vcpu *vcpu) +{ + return emulate_invalid_guest_state && !guest_state_valid(vcpu); +} + +static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu); + +unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) +{ + unsigned long rflags, save_rflags; + + if (!test_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail)) { + __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail); + rflags = vmcs_readl(GUEST_RFLAGS); + if (to_vmx(vcpu)->rmode.vm86_active) { + rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS; + save_rflags = to_vmx(vcpu)->rmode.save_rflags; + rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS; + } + to_vmx(vcpu)->rflags = rflags; + } + return to_vmx(vcpu)->rflags; +} + +void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) +{ + unsigned long old_rflags = vmx_get_rflags(vcpu); + + __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail); + to_vmx(vcpu)->rflags = rflags; + if (to_vmx(vcpu)->rmode.vm86_active) { + to_vmx(vcpu)->rmode.save_rflags = rflags; + rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; + } + vmcs_writel(GUEST_RFLAGS, rflags); + + if ((old_rflags ^ to_vmx(vcpu)->rflags) & X86_EFLAGS_VM) + to_vmx(vcpu)->emulation_required = emulation_required(vcpu); +} + +u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu) +{ + u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); + int ret = 0; + + if (interruptibility & GUEST_INTR_STATE_STI) + ret |= KVM_X86_SHADOW_INT_STI; + if (interruptibility & GUEST_INTR_STATE_MOV_SS) + ret |= KVM_X86_SHADOW_INT_MOV_SS; + + return ret; +} + +void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) +{ + u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); + u32 interruptibility = interruptibility_old; + + interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS); + + if (mask & KVM_X86_SHADOW_INT_MOV_SS) + interruptibility |= GUEST_INTR_STATE_MOV_SS; + else if (mask & KVM_X86_SHADOW_INT_STI) + interruptibility |= GUEST_INTR_STATE_STI; + + if ((interruptibility != interruptibility_old)) + vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility); +} + +static int vmx_rtit_ctl_check(struct kvm_vcpu *vcpu, u64 data) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + unsigned long value; + + /* + * Any MSR write that attempts to change bits marked reserved will + * case a #GP fault. + */ + if (data & vmx->pt_desc.ctl_bitmask) + return 1; + + /* + * Any attempt to modify IA32_RTIT_CTL while TraceEn is set will + * result in a #GP unless the same write also clears TraceEn. + */ + if ((vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) && + ((vmx->pt_desc.guest.ctl ^ data) & ~RTIT_CTL_TRACEEN)) + return 1; + + /* + * WRMSR to IA32_RTIT_CTL that sets TraceEn but clears this bit + * and FabricEn would cause #GP, if + * CPUID.(EAX=14H, ECX=0):ECX.SNGLRGNOUT[bit 2] = 0 + */ + if ((data & RTIT_CTL_TRACEEN) && !(data & RTIT_CTL_TOPA) && + !(data & RTIT_CTL_FABRIC_EN) && + !intel_pt_validate_cap(vmx->pt_desc.caps, + PT_CAP_single_range_output)) + return 1; + + /* + * MTCFreq, CycThresh and PSBFreq encodings check, any MSR write that + * utilize encodings marked reserved will casue a #GP fault. + */ + value = intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc_periods); + if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc) && + !test_bit((data & RTIT_CTL_MTC_RANGE) >> + RTIT_CTL_MTC_RANGE_OFFSET, &value)) + return 1; + value = intel_pt_validate_cap(vmx->pt_desc.caps, + PT_CAP_cycle_thresholds); + if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc) && + !test_bit((data & RTIT_CTL_CYC_THRESH) >> + RTIT_CTL_CYC_THRESH_OFFSET, &value)) + return 1; + value = intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_periods); + if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc) && + !test_bit((data & RTIT_CTL_PSB_FREQ) >> + RTIT_CTL_PSB_FREQ_OFFSET, &value)) + return 1; + + /* + * If ADDRx_CFG is reserved or the encodings is >2 will + * cause a #GP fault. + */ + value = (data & RTIT_CTL_ADDR0) >> RTIT_CTL_ADDR0_OFFSET; + if ((value && (vmx->pt_desc.addr_range < 1)) || (value > 2)) + return 1; + value = (data & RTIT_CTL_ADDR1) >> RTIT_CTL_ADDR1_OFFSET; + if ((value && (vmx->pt_desc.addr_range < 2)) || (value > 2)) + return 1; + value = (data & RTIT_CTL_ADDR2) >> RTIT_CTL_ADDR2_OFFSET; + if ((value && (vmx->pt_desc.addr_range < 3)) || (value > 2)) + return 1; + value = (data & RTIT_CTL_ADDR3) >> RTIT_CTL_ADDR3_OFFSET; + if ((value && (vmx->pt_desc.addr_range < 4)) || (value > 2)) + return 1; + + return 0; +} + + +static void skip_emulated_instruction(struct kvm_vcpu *vcpu) +{ + unsigned long rip; + + rip = kvm_rip_read(vcpu); + rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN); + kvm_rip_write(vcpu, rip); + + /* skipping an emulated instruction also counts */ + vmx_set_interrupt_shadow(vcpu, 0); +} + +static void vmx_clear_hlt(struct kvm_vcpu *vcpu) +{ + /* + * Ensure that we clear the HLT state in the VMCS. We don't need to + * explicitly skip the instruction because if the HLT state is set, + * then the instruction is already executing and RIP has already been + * advanced. + */ + if (kvm_hlt_in_guest(vcpu->kvm) && + vmcs_read32(GUEST_ACTIVITY_STATE) == GUEST_ACTIVITY_HLT) + vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE); +} + +static void vmx_queue_exception(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + unsigned nr = vcpu->arch.exception.nr; + bool has_error_code = vcpu->arch.exception.has_error_code; + u32 error_code = vcpu->arch.exception.error_code; + u32 intr_info = nr | INTR_INFO_VALID_MASK; + + kvm_deliver_exception_payload(vcpu); + + if (has_error_code) { + vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code); + intr_info |= INTR_INFO_DELIVER_CODE_MASK; + } + + if (vmx->rmode.vm86_active) { + int inc_eip = 0; + if (kvm_exception_is_soft(nr)) + inc_eip = vcpu->arch.event_exit_inst_len; + if (kvm_inject_realmode_interrupt(vcpu, nr, inc_eip) != EMULATE_DONE) + kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); + return; + } + + WARN_ON_ONCE(vmx->emulation_required); + + if (kvm_exception_is_soft(nr)) { + vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, + vmx->vcpu.arch.event_exit_inst_len); + intr_info |= INTR_TYPE_SOFT_EXCEPTION; + } else + intr_info |= INTR_TYPE_HARD_EXCEPTION; + + vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info); + + vmx_clear_hlt(vcpu); +} + +static bool vmx_rdtscp_supported(void) +{ + return cpu_has_vmx_rdtscp(); +} + +static bool vmx_invpcid_supported(void) +{ + return cpu_has_vmx_invpcid(); +} + +/* + * Swap MSR entry in host/guest MSR entry array. + */ +static void move_msr_up(struct vcpu_vmx *vmx, int from, int to) +{ + struct shared_msr_entry tmp; + + tmp = vmx->guest_msrs[to]; + vmx->guest_msrs[to] = vmx->guest_msrs[from]; + vmx->guest_msrs[from] = tmp; +} + +/* + * Set up the vmcs to automatically save and restore system + * msrs. Don't touch the 64-bit msrs if the guest is in legacy + * mode, as fiddling with msrs is very expensive. + */ +static void setup_msrs(struct vcpu_vmx *vmx) +{ + int save_nmsrs, index; + + save_nmsrs = 0; +#ifdef CONFIG_X86_64 + /* + * The SYSCALL MSRs are only needed on long mode guests, and only + * when EFER.SCE is set. + */ + if (is_long_mode(&vmx->vcpu) && (vmx->vcpu.arch.efer & EFER_SCE)) { + index = __find_msr_index(vmx, MSR_STAR); + if (index >= 0) + move_msr_up(vmx, index, save_nmsrs++); + index = __find_msr_index(vmx, MSR_LSTAR); + if (index >= 0) + move_msr_up(vmx, index, save_nmsrs++); + index = __find_msr_index(vmx, MSR_SYSCALL_MASK); + if (index >= 0) + move_msr_up(vmx, index, save_nmsrs++); + } +#endif + index = __find_msr_index(vmx, MSR_EFER); + if (index >= 0 && update_transition_efer(vmx, index)) + move_msr_up(vmx, index, save_nmsrs++); + index = __find_msr_index(vmx, MSR_TSC_AUX); + if (index >= 0 && guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP)) + move_msr_up(vmx, index, save_nmsrs++); + + vmx->save_nmsrs = save_nmsrs; + vmx->guest_msrs_dirty = true; + + if (cpu_has_vmx_msr_bitmap()) + vmx_update_msr_bitmap(&vmx->vcpu); +} + +static u64 vmx_read_l1_tsc_offset(struct kvm_vcpu *vcpu) +{ + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); + + if (is_guest_mode(vcpu) && + (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)) + return vcpu->arch.tsc_offset - vmcs12->tsc_offset; + + return vcpu->arch.tsc_offset; +} + +static u64 vmx_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) +{ + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); + u64 g_tsc_offset = 0; + + /* + * We're here if L1 chose not to trap WRMSR to TSC. According + * to the spec, this should set L1's TSC; The offset that L1 + * set for L2 remains unchanged, and still needs to be added + * to the newly set TSC to get L2's TSC. + */ + if (is_guest_mode(vcpu) && + (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)) + g_tsc_offset = vmcs12->tsc_offset; + + trace_kvm_write_tsc_offset(vcpu->vcpu_id, + vcpu->arch.tsc_offset - g_tsc_offset, + offset); + vmcs_write64(TSC_OFFSET, offset + g_tsc_offset); + return offset + g_tsc_offset; +} + +/* + * nested_vmx_allowed() checks whether a guest should be allowed to use VMX + * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for + * all guests if the "nested" module option is off, and can also be disabled + * for a single guest by disabling its VMX cpuid bit. + */ +bool nested_vmx_allowed(struct kvm_vcpu *vcpu) +{ + return nested && guest_cpuid_has(vcpu, X86_FEATURE_VMX); +} + +static inline bool vmx_feature_control_msr_valid(struct kvm_vcpu *vcpu, + uint64_t val) +{ + uint64_t valid_bits = to_vmx(vcpu)->msr_ia32_feature_control_valid_bits; + + return !(val & ~valid_bits); +} + +static int vmx_get_msr_feature(struct kvm_msr_entry *msr) +{ + switch (msr->index) { + case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: + if (!nested) + return 1; + return vmx_get_vmx_msr(&vmcs_config.nested, msr->index, &msr->data); + default: + return 1; + } + + return 0; +} + +/* + * Reads an msr value (of 'msr_index') into 'pdata'. + * Returns 0 on success, non-0 otherwise. + * Assumes vcpu_load() was already called. + */ +static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + struct shared_msr_entry *msr; + u32 index; + + switch (msr_info->index) { +#ifdef CONFIG_X86_64 + case MSR_FS_BASE: + msr_info->data = vmcs_readl(GUEST_FS_BASE); + break; + case MSR_GS_BASE: + msr_info->data = vmcs_readl(GUEST_GS_BASE); + break; + case MSR_KERNEL_GS_BASE: + msr_info->data = vmx_read_guest_kernel_gs_base(vmx); + break; +#endif + case MSR_EFER: + return kvm_get_msr_common(vcpu, msr_info); + case MSR_IA32_SPEC_CTRL: + if (!msr_info->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)) + return 1; + + msr_info->data = to_vmx(vcpu)->spec_ctrl; + break; + case MSR_IA32_ARCH_CAPABILITIES: + if (!msr_info->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES)) + return 1; + msr_info->data = to_vmx(vcpu)->arch_capabilities; + break; + case MSR_IA32_SYSENTER_CS: + msr_info->data = vmcs_read32(GUEST_SYSENTER_CS); + break; + case MSR_IA32_SYSENTER_EIP: + msr_info->data = vmcs_readl(GUEST_SYSENTER_EIP); + break; + case MSR_IA32_SYSENTER_ESP: + msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP); + break; + case MSR_IA32_BNDCFGS: + if (!kvm_mpx_supported() || + (!msr_info->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_MPX))) + return 1; + msr_info->data = vmcs_read64(GUEST_BNDCFGS); + break; + case MSR_IA32_MCG_EXT_CTL: + if (!msr_info->host_initiated && + !(vmx->msr_ia32_feature_control & + FEATURE_CONTROL_LMCE)) + return 1; + msr_info->data = vcpu->arch.mcg_ext_ctl; + break; + case MSR_IA32_FEATURE_CONTROL: + msr_info->data = vmx->msr_ia32_feature_control; + break; + case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: + if (!nested_vmx_allowed(vcpu)) + return 1; + return vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index, + &msr_info->data); + case MSR_IA32_XSS: + if (!vmx_xsaves_supported()) + return 1; + msr_info->data = vcpu->arch.ia32_xss; + break; + case MSR_IA32_RTIT_CTL: + if (pt_mode != PT_MODE_HOST_GUEST) + return 1; + msr_info->data = vmx->pt_desc.guest.ctl; + break; + case MSR_IA32_RTIT_STATUS: + if (pt_mode != PT_MODE_HOST_GUEST) + return 1; + msr_info->data = vmx->pt_desc.guest.status; + break; + case MSR_IA32_RTIT_CR3_MATCH: + if ((pt_mode != PT_MODE_HOST_GUEST) || + !intel_pt_validate_cap(vmx->pt_desc.caps, + PT_CAP_cr3_filtering)) + return 1; + msr_info->data = vmx->pt_desc.guest.cr3_match; + break; + case MSR_IA32_RTIT_OUTPUT_BASE: + if ((pt_mode != PT_MODE_HOST_GUEST) || + (!intel_pt_validate_cap(vmx->pt_desc.caps, + PT_CAP_topa_output) && + !intel_pt_validate_cap(vmx->pt_desc.caps, + PT_CAP_single_range_output))) + return 1; + msr_info->data = vmx->pt_desc.guest.output_base; + break; + case MSR_IA32_RTIT_OUTPUT_MASK: + if ((pt_mode != PT_MODE_HOST_GUEST) || + (!intel_pt_validate_cap(vmx->pt_desc.caps, + PT_CAP_topa_output) && + !intel_pt_validate_cap(vmx->pt_desc.caps, + PT_CAP_single_range_output))) + return 1; + msr_info->data = vmx->pt_desc.guest.output_mask; + break; + case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B: + index = msr_info->index - MSR_IA32_RTIT_ADDR0_A; + if ((pt_mode != PT_MODE_HOST_GUEST) || + (index >= 2 * intel_pt_validate_cap(vmx->pt_desc.caps, + PT_CAP_num_address_ranges))) + return 1; + if (index % 2) + msr_info->data = vmx->pt_desc.guest.addr_b[index / 2]; + else + msr_info->data = vmx->pt_desc.guest.addr_a[index / 2]; + break; + case MSR_TSC_AUX: + if (!msr_info->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP)) + return 1; + /* Otherwise falls through */ + default: + msr = find_msr_entry(vmx, msr_info->index); + if (msr) { + msr_info->data = msr->data; + break; + } + return kvm_get_msr_common(vcpu, msr_info); + } + + return 0; +} + +/* + * Writes msr value into into the appropriate "register". + * Returns 0 on success, non-0 otherwise. + * Assumes vcpu_load() was already called. + */ +static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + struct shared_msr_entry *msr; + int ret = 0; + u32 msr_index = msr_info->index; + u64 data = msr_info->data; + u32 index; + + switch (msr_index) { + case MSR_EFER: + ret = kvm_set_msr_common(vcpu, msr_info); + break; +#ifdef CONFIG_X86_64 + case MSR_FS_BASE: + vmx_segment_cache_clear(vmx); + vmcs_writel(GUEST_FS_BASE, data); + break; + case MSR_GS_BASE: + vmx_segment_cache_clear(vmx); + vmcs_writel(GUEST_GS_BASE, data); + break; + case MSR_KERNEL_GS_BASE: + vmx_write_guest_kernel_gs_base(vmx, data); + break; +#endif + case MSR_IA32_SYSENTER_CS: + vmcs_write32(GUEST_SYSENTER_CS, data); + break; + case MSR_IA32_SYSENTER_EIP: + vmcs_writel(GUEST_SYSENTER_EIP, data); + break; + case MSR_IA32_SYSENTER_ESP: + vmcs_writel(GUEST_SYSENTER_ESP, data); + break; + case MSR_IA32_BNDCFGS: + if (!kvm_mpx_supported() || + (!msr_info->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_MPX))) + return 1; + if (is_noncanonical_address(data & PAGE_MASK, vcpu) || + (data & MSR_IA32_BNDCFGS_RSVD)) + return 1; + vmcs_write64(GUEST_BNDCFGS, data); + break; + case MSR_IA32_SPEC_CTRL: + if (!msr_info->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)) + return 1; + + /* The STIBP bit doesn't fault even if it's not advertised */ + if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD)) + return 1; + + vmx->spec_ctrl = data; + + if (!data) + break; + + /* + * For non-nested: + * When it's written (to non-zero) for the first time, pass + * it through. + * + * For nested: + * The handling of the MSR bitmap for L2 guests is done in + * nested_vmx_merge_msr_bitmap. We should not touch the + * vmcs02.msr_bitmap here since it gets completely overwritten + * in the merging. We update the vmcs01 here for L1 as well + * since it will end up touching the MSR anyway now. + */ + vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, + MSR_IA32_SPEC_CTRL, + MSR_TYPE_RW); + break; + case MSR_IA32_PRED_CMD: + if (!msr_info->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)) + return 1; + + if (data & ~PRED_CMD_IBPB) + return 1; + + if (!data) + break; + + wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB); + + /* + * For non-nested: + * When it's written (to non-zero) for the first time, pass + * it through. + * + * For nested: + * The handling of the MSR bitmap for L2 guests is done in + * nested_vmx_merge_msr_bitmap. We should not touch the + * vmcs02.msr_bitmap here since it gets completely overwritten + * in the merging. + */ + vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, MSR_IA32_PRED_CMD, + MSR_TYPE_W); + break; + case MSR_IA32_ARCH_CAPABILITIES: + if (!msr_info->host_initiated) + return 1; + vmx->arch_capabilities = data; + break; + case MSR_IA32_CR_PAT: + if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { + if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data)) + return 1; + vmcs_write64(GUEST_IA32_PAT, data); + vcpu->arch.pat = data; + break; + } + ret = kvm_set_msr_common(vcpu, msr_info); + break; + case MSR_IA32_TSC_ADJUST: + ret = kvm_set_msr_common(vcpu, msr_info); + break; + case MSR_IA32_MCG_EXT_CTL: + if ((!msr_info->host_initiated && + !(to_vmx(vcpu)->msr_ia32_feature_control & + FEATURE_CONTROL_LMCE)) || + (data & ~MCG_EXT_CTL_LMCE_EN)) + return 1; + vcpu->arch.mcg_ext_ctl = data; + break; + case MSR_IA32_FEATURE_CONTROL: + if (!vmx_feature_control_msr_valid(vcpu, data) || + (to_vmx(vcpu)->msr_ia32_feature_control & + FEATURE_CONTROL_LOCKED && !msr_info->host_initiated)) + return 1; + vmx->msr_ia32_feature_control = data; + if (msr_info->host_initiated && data == 0) + vmx_leave_nested(vcpu); + break; + case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: + if (!msr_info->host_initiated) + return 1; /* they are read-only */ + if (!nested_vmx_allowed(vcpu)) + return 1; + return vmx_set_vmx_msr(vcpu, msr_index, data); + case MSR_IA32_XSS: + if (!vmx_xsaves_supported()) + return 1; + /* + * The only supported bit as of Skylake is bit 8, but + * it is not supported on KVM. + */ + if (data != 0) + return 1; + vcpu->arch.ia32_xss = data; + if (vcpu->arch.ia32_xss != host_xss) + add_atomic_switch_msr(vmx, MSR_IA32_XSS, + vcpu->arch.ia32_xss, host_xss, false); + else + clear_atomic_switch_msr(vmx, MSR_IA32_XSS); + break; + case MSR_IA32_RTIT_CTL: + if ((pt_mode != PT_MODE_HOST_GUEST) || + vmx_rtit_ctl_check(vcpu, data) || + vmx->nested.vmxon) + return 1; + vmcs_write64(GUEST_IA32_RTIT_CTL, data); + vmx->pt_desc.guest.ctl = data; + pt_update_intercept_for_msr(vmx); + break; + case MSR_IA32_RTIT_STATUS: + if ((pt_mode != PT_MODE_HOST_GUEST) || + (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) || + (data & MSR_IA32_RTIT_STATUS_MASK)) + return 1; + vmx->pt_desc.guest.status = data; + break; + case MSR_IA32_RTIT_CR3_MATCH: + if ((pt_mode != PT_MODE_HOST_GUEST) || + (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) || + !intel_pt_validate_cap(vmx->pt_desc.caps, + PT_CAP_cr3_filtering)) + return 1; + vmx->pt_desc.guest.cr3_match = data; + break; + case MSR_IA32_RTIT_OUTPUT_BASE: + if ((pt_mode != PT_MODE_HOST_GUEST) || + (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) || + (!intel_pt_validate_cap(vmx->pt_desc.caps, + PT_CAP_topa_output) && + !intel_pt_validate_cap(vmx->pt_desc.caps, + PT_CAP_single_range_output)) || + (data & MSR_IA32_RTIT_OUTPUT_BASE_MASK)) + return 1; + vmx->pt_desc.guest.output_base = data; + break; + case MSR_IA32_RTIT_OUTPUT_MASK: + if ((pt_mode != PT_MODE_HOST_GUEST) || + (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) || + (!intel_pt_validate_cap(vmx->pt_desc.caps, + PT_CAP_topa_output) && + !intel_pt_validate_cap(vmx->pt_desc.caps, + PT_CAP_single_range_output))) + return 1; + vmx->pt_desc.guest.output_mask = data; + break; + case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B: + index = msr_info->index - MSR_IA32_RTIT_ADDR0_A; + if ((pt_mode != PT_MODE_HOST_GUEST) || + (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) || + (index >= 2 * intel_pt_validate_cap(vmx->pt_desc.caps, + PT_CAP_num_address_ranges))) + return 1; + if (index % 2) + vmx->pt_desc.guest.addr_b[index / 2] = data; + else + vmx->pt_desc.guest.addr_a[index / 2] = data; + break; + case MSR_TSC_AUX: + if (!msr_info->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP)) + return 1; + /* Check reserved bit, higher 32 bits should be zero */ + if ((data >> 32) != 0) + return 1; + /* Otherwise falls through */ + default: + msr = find_msr_entry(vmx, msr_index); + if (msr) { + u64 old_msr_data = msr->data; + msr->data = data; + if (msr - vmx->guest_msrs < vmx->save_nmsrs) { + preempt_disable(); + ret = kvm_set_shared_msr(msr->index, msr->data, + msr->mask); + preempt_enable(); + if (ret) + msr->data = old_msr_data; + } + break; + } + ret = kvm_set_msr_common(vcpu, msr_info); + } + + return ret; +} + +static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) +{ + __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); + switch (reg) { + case VCPU_REGS_RSP: + vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP); + break; + case VCPU_REGS_RIP: + vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP); + break; + case VCPU_EXREG_PDPTR: + if (enable_ept) + ept_save_pdptrs(vcpu); + break; + default: + break; + } +} + +static __init int cpu_has_kvm_support(void) +{ + return cpu_has_vmx(); +} + +static __init int vmx_disabled_by_bios(void) +{ + u64 msr; + + rdmsrl(MSR_IA32_FEATURE_CONTROL, msr); + if (msr & FEATURE_CONTROL_LOCKED) { + /* launched w/ TXT and VMX disabled */ + if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX) + && tboot_enabled()) + return 1; + /* launched w/o TXT and VMX only enabled w/ TXT */ + if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX) + && (msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX) + && !tboot_enabled()) { + printk(KERN_WARNING "kvm: disable TXT in the BIOS or " + "activate TXT before enabling KVM\n"); + return 1; + } + /* launched w/o TXT and VMX disabled */ + if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX) + && !tboot_enabled()) + return 1; + } + + return 0; +} + +static void kvm_cpu_vmxon(u64 addr) +{ + cr4_set_bits(X86_CR4_VMXE); + intel_pt_handle_vmx(1); + + asm volatile ("vmxon %0" : : "m"(addr)); +} + +static int hardware_enable(void) +{ + int cpu = raw_smp_processor_id(); + u64 phys_addr = __pa(per_cpu(vmxarea, cpu)); + u64 old, test_bits; + + if (cr4_read_shadow() & X86_CR4_VMXE) + return -EBUSY; + + /* + * This can happen if we hot-added a CPU but failed to allocate + * VP assist page for it. + */ + if (static_branch_unlikely(&enable_evmcs) && + !hv_get_vp_assist_page(cpu)) + return -EFAULT; + + INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu)); + INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu)); + spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); + + /* + * Now we can enable the vmclear operation in kdump + * since the loaded_vmcss_on_cpu list on this cpu + * has been initialized. + * + * Though the cpu is not in VMX operation now, there + * is no problem to enable the vmclear operation + * for the loaded_vmcss_on_cpu list is empty! + */ + crash_enable_local_vmclear(cpu); + + rdmsrl(MSR_IA32_FEATURE_CONTROL, old); + + test_bits = FEATURE_CONTROL_LOCKED; + test_bits |= FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; + if (tboot_enabled()) + test_bits |= FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX; + + if ((old & test_bits) != test_bits) { + /* enable and lock */ + wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits); + } + kvm_cpu_vmxon(phys_addr); + if (enable_ept) + ept_sync_global(); + + return 0; +} + +static void vmclear_local_loaded_vmcss(void) +{ + int cpu = raw_smp_processor_id(); + struct loaded_vmcs *v, *n; + + list_for_each_entry_safe(v, n, &per_cpu(loaded_vmcss_on_cpu, cpu), + loaded_vmcss_on_cpu_link) + __loaded_vmcs_clear(v); +} + + +/* Just like cpu_vmxoff(), but with the __kvm_handle_fault_on_reboot() + * tricks. + */ +static void kvm_cpu_vmxoff(void) +{ + asm volatile (__ex("vmxoff")); + + intel_pt_handle_vmx(0); + cr4_clear_bits(X86_CR4_VMXE); +} + +static void hardware_disable(void) +{ + vmclear_local_loaded_vmcss(); + kvm_cpu_vmxoff(); +} + +static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, + u32 msr, u32 *result) +{ + u32 vmx_msr_low, vmx_msr_high; + u32 ctl = ctl_min | ctl_opt; + + rdmsr(msr, vmx_msr_low, vmx_msr_high); + + ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */ + ctl |= vmx_msr_low; /* bit == 1 in low word ==> must be one */ + + /* Ensure minimum (required) set of control bits are supported. */ + if (ctl_min & ~ctl) + return -EIO; + + *result = ctl; + return 0; +} + +static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf, + struct vmx_capability *vmx_cap) +{ + u32 vmx_msr_low, vmx_msr_high; + u32 min, opt, min2, opt2; + u32 _pin_based_exec_control = 0; + u32 _cpu_based_exec_control = 0; + u32 _cpu_based_2nd_exec_control = 0; + u32 _vmexit_control = 0; + u32 _vmentry_control = 0; + + memset(vmcs_conf, 0, sizeof(*vmcs_conf)); + min = CPU_BASED_HLT_EXITING | +#ifdef CONFIG_X86_64 + CPU_BASED_CR8_LOAD_EXITING | + CPU_BASED_CR8_STORE_EXITING | +#endif + CPU_BASED_CR3_LOAD_EXITING | + CPU_BASED_CR3_STORE_EXITING | + CPU_BASED_UNCOND_IO_EXITING | + CPU_BASED_MOV_DR_EXITING | + CPU_BASED_USE_TSC_OFFSETING | + CPU_BASED_MWAIT_EXITING | + CPU_BASED_MONITOR_EXITING | + CPU_BASED_INVLPG_EXITING | + CPU_BASED_RDPMC_EXITING; + + opt = CPU_BASED_TPR_SHADOW | + CPU_BASED_USE_MSR_BITMAPS | + CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; + if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS, + &_cpu_based_exec_control) < 0) + return -EIO; +#ifdef CONFIG_X86_64 + if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW)) + _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING & + ~CPU_BASED_CR8_STORE_EXITING; +#endif + if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) { + min2 = 0; + opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | + SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | + SECONDARY_EXEC_WBINVD_EXITING | + SECONDARY_EXEC_ENABLE_VPID | + SECONDARY_EXEC_ENABLE_EPT | + SECONDARY_EXEC_UNRESTRICTED_GUEST | + SECONDARY_EXEC_PAUSE_LOOP_EXITING | + SECONDARY_EXEC_DESC | + SECONDARY_EXEC_RDTSCP | + SECONDARY_EXEC_ENABLE_INVPCID | + SECONDARY_EXEC_APIC_REGISTER_VIRT | + SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | + SECONDARY_EXEC_SHADOW_VMCS | + SECONDARY_EXEC_XSAVES | + SECONDARY_EXEC_RDSEED_EXITING | + SECONDARY_EXEC_RDRAND_EXITING | + SECONDARY_EXEC_ENABLE_PML | + SECONDARY_EXEC_TSC_SCALING | + SECONDARY_EXEC_PT_USE_GPA | + SECONDARY_EXEC_PT_CONCEAL_VMX | + SECONDARY_EXEC_ENABLE_VMFUNC | + SECONDARY_EXEC_ENCLS_EXITING; + if (adjust_vmx_controls(min2, opt2, + MSR_IA32_VMX_PROCBASED_CTLS2, + &_cpu_based_2nd_exec_control) < 0) + return -EIO; + } +#ifndef CONFIG_X86_64 + if (!(_cpu_based_2nd_exec_control & + SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) + _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW; +#endif + + if (!(_cpu_based_exec_control & CPU_BASED_TPR_SHADOW)) + _cpu_based_2nd_exec_control &= ~( + SECONDARY_EXEC_APIC_REGISTER_VIRT | + SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | + SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); + + rdmsr_safe(MSR_IA32_VMX_EPT_VPID_CAP, + &vmx_cap->ept, &vmx_cap->vpid); + + if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) { + /* CR3 accesses and invlpg don't need to cause VM Exits when EPT + enabled */ + _cpu_based_exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING | + CPU_BASED_CR3_STORE_EXITING | + CPU_BASED_INVLPG_EXITING); + } else if (vmx_cap->ept) { + vmx_cap->ept = 0; + pr_warn_once("EPT CAP should not exist if not support " + "1-setting enable EPT VM-execution control\n"); + } + if (!(_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_VPID) && + vmx_cap->vpid) { + vmx_cap->vpid = 0; + pr_warn_once("VPID CAP should not exist if not support " + "1-setting enable VPID VM-execution control\n"); + } + + min = VM_EXIT_SAVE_DEBUG_CONTROLS | VM_EXIT_ACK_INTR_ON_EXIT; +#ifdef CONFIG_X86_64 + min |= VM_EXIT_HOST_ADDR_SPACE_SIZE; +#endif + opt = VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | + VM_EXIT_SAVE_IA32_PAT | + VM_EXIT_LOAD_IA32_PAT | + VM_EXIT_LOAD_IA32_EFER | + VM_EXIT_CLEAR_BNDCFGS | + VM_EXIT_PT_CONCEAL_PIP | + VM_EXIT_CLEAR_IA32_RTIT_CTL; + if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS, + &_vmexit_control) < 0) + return -EIO; + + min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING; + opt = PIN_BASED_VIRTUAL_NMIS | PIN_BASED_POSTED_INTR | + PIN_BASED_VMX_PREEMPTION_TIMER; + if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS, + &_pin_based_exec_control) < 0) + return -EIO; + + if (cpu_has_broken_vmx_preemption_timer()) + _pin_based_exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER; + if (!(_cpu_based_2nd_exec_control & + SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)) + _pin_based_exec_control &= ~PIN_BASED_POSTED_INTR; + + min = VM_ENTRY_LOAD_DEBUG_CONTROLS; + opt = VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | + VM_ENTRY_LOAD_IA32_PAT | + VM_ENTRY_LOAD_IA32_EFER | + VM_ENTRY_LOAD_BNDCFGS | + VM_ENTRY_PT_CONCEAL_PIP | + VM_ENTRY_LOAD_IA32_RTIT_CTL; + if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS, + &_vmentry_control) < 0) + return -EIO; + + /* + * Some cpus support VM_{ENTRY,EXIT}_IA32_PERF_GLOBAL_CTRL but they + * can't be used due to an errata where VM Exit may incorrectly clear + * IA32_PERF_GLOBAL_CTRL[34:32]. Workaround the errata by using the + * MSR load mechanism to switch IA32_PERF_GLOBAL_CTRL. + */ + if (boot_cpu_data.x86 == 0x6) { + switch (boot_cpu_data.x86_model) { + case 26: /* AAK155 */ + case 30: /* AAP115 */ + case 37: /* AAT100 */ + case 44: /* BC86,AAY89,BD102 */ + case 46: /* BA97 */ + _vmexit_control &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL; + _vmexit_control &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL; + pr_warn_once("kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL " + "does not work properly. Using workaround\n"); + break; + default: + break; + } + } + + + rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high); + + /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */ + if ((vmx_msr_high & 0x1fff) > PAGE_SIZE) + return -EIO; + +#ifdef CONFIG_X86_64 + /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */ + if (vmx_msr_high & (1u<<16)) + return -EIO; +#endif + + /* Require Write-Back (WB) memory type for VMCS accesses. */ + if (((vmx_msr_high >> 18) & 15) != 6) + return -EIO; + + vmcs_conf->size = vmx_msr_high & 0x1fff; + vmcs_conf->order = get_order(vmcs_conf->size); + vmcs_conf->basic_cap = vmx_msr_high & ~0x1fff; + + vmcs_conf->revision_id = vmx_msr_low; + + vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control; + vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control; + vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control; + vmcs_conf->vmexit_ctrl = _vmexit_control; + vmcs_conf->vmentry_ctrl = _vmentry_control; + + if (static_branch_unlikely(&enable_evmcs)) + evmcs_sanitize_exec_ctrls(vmcs_conf); + + return 0; +} + +struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu) +{ + int node = cpu_to_node(cpu); + struct page *pages; + struct vmcs *vmcs; + + pages = __alloc_pages_node(node, GFP_KERNEL, vmcs_config.order); + if (!pages) + return NULL; + vmcs = page_address(pages); + memset(vmcs, 0, vmcs_config.size); + + /* KVM supports Enlightened VMCS v1 only */ + if (static_branch_unlikely(&enable_evmcs)) + vmcs->hdr.revision_id = KVM_EVMCS_VERSION; + else + vmcs->hdr.revision_id = vmcs_config.revision_id; + + if (shadow) + vmcs->hdr.shadow_vmcs = 1; + return vmcs; +} + +void free_vmcs(struct vmcs *vmcs) +{ + free_pages((unsigned long)vmcs, vmcs_config.order); +} + +/* + * Free a VMCS, but before that VMCLEAR it on the CPU where it was last loaded + */ +void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs) +{ + if (!loaded_vmcs->vmcs) + return; + loaded_vmcs_clear(loaded_vmcs); + free_vmcs(loaded_vmcs->vmcs); + loaded_vmcs->vmcs = NULL; + if (loaded_vmcs->msr_bitmap) + free_page((unsigned long)loaded_vmcs->msr_bitmap); + WARN_ON(loaded_vmcs->shadow_vmcs != NULL); +} + +int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs) +{ + loaded_vmcs->vmcs = alloc_vmcs(false); + if (!loaded_vmcs->vmcs) + return -ENOMEM; + + loaded_vmcs->shadow_vmcs = NULL; + loaded_vmcs_init(loaded_vmcs); + + if (cpu_has_vmx_msr_bitmap()) { + loaded_vmcs->msr_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL); + if (!loaded_vmcs->msr_bitmap) + goto out_vmcs; + memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE); + + if (IS_ENABLED(CONFIG_HYPERV) && + static_branch_unlikely(&enable_evmcs) && + (ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) { + struct hv_enlightened_vmcs *evmcs = + (struct hv_enlightened_vmcs *)loaded_vmcs->vmcs; + + evmcs->hv_enlightenments_control.msr_bitmap = 1; + } + } + + memset(&loaded_vmcs->host_state, 0, sizeof(struct vmcs_host_state)); + + return 0; + +out_vmcs: + free_loaded_vmcs(loaded_vmcs); + return -ENOMEM; +} + +static void free_kvm_area(void) +{ + int cpu; + + for_each_possible_cpu(cpu) { + free_vmcs(per_cpu(vmxarea, cpu)); + per_cpu(vmxarea, cpu) = NULL; + } +} + +static __init int alloc_kvm_area(void) +{ + int cpu; + + for_each_possible_cpu(cpu) { + struct vmcs *vmcs; + + vmcs = alloc_vmcs_cpu(false, cpu); + if (!vmcs) { + free_kvm_area(); + return -ENOMEM; + } + + /* + * When eVMCS is enabled, alloc_vmcs_cpu() sets + * vmcs->revision_id to KVM_EVMCS_VERSION instead of + * revision_id reported by MSR_IA32_VMX_BASIC. + * + * However, even though not explicitly documented by + * TLFS, VMXArea passed as VMXON argument should + * still be marked with revision_id reported by + * physical CPU. + */ + if (static_branch_unlikely(&enable_evmcs)) + vmcs->hdr.revision_id = vmcs_config.revision_id; + + per_cpu(vmxarea, cpu) = vmcs; + } + return 0; +} + +static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg, + struct kvm_segment *save) +{ + if (!emulate_invalid_guest_state) { + /* + * CS and SS RPL should be equal during guest entry according + * to VMX spec, but in reality it is not always so. Since vcpu + * is in the middle of the transition from real mode to + * protected mode it is safe to assume that RPL 0 is a good + * default value. + */ + if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS) + save->selector &= ~SEGMENT_RPL_MASK; + save->dpl = save->selector & SEGMENT_RPL_MASK; + save->s = 1; + } + vmx_set_segment(vcpu, save, seg); +} + +static void enter_pmode(struct kvm_vcpu *vcpu) +{ + unsigned long flags; + struct vcpu_vmx *vmx = to_vmx(vcpu); + + /* + * Update real mode segment cache. It may be not up-to-date if sement + * register was written while vcpu was in a guest mode. + */ + vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); + vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); + vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); + vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); + vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); + vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); + + vmx->rmode.vm86_active = 0; + + vmx_segment_cache_clear(vmx); + + vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); + + flags = vmcs_readl(GUEST_RFLAGS); + flags &= RMODE_GUEST_OWNED_EFLAGS_BITS; + flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS; + vmcs_writel(GUEST_RFLAGS, flags); + + vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) | + (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME)); + + update_exception_bitmap(vcpu); + + fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); + fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); + fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]); + fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]); + fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]); + fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]); +} + +static void fix_rmode_seg(int seg, struct kvm_segment *save) +{ + const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; + struct kvm_segment var = *save; + + var.dpl = 0x3; + if (seg == VCPU_SREG_CS) + var.type = 0x3; + + if (!emulate_invalid_guest_state) { + var.selector = var.base >> 4; + var.base = var.base & 0xffff0; + var.limit = 0xffff; + var.g = 0; + var.db = 0; + var.present = 1; + var.s = 1; + var.l = 0; + var.unusable = 0; + var.type = 0x3; + var.avl = 0; + if (save->base & 0xf) + printk_once(KERN_WARNING "kvm: segment base is not " + "paragraph aligned when entering " + "protected mode (seg=%d)", seg); + } + + vmcs_write16(sf->selector, var.selector); + vmcs_writel(sf->base, var.base); + vmcs_write32(sf->limit, var.limit); + vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(&var)); +} + +static void enter_rmode(struct kvm_vcpu *vcpu) +{ + unsigned long flags; + struct vcpu_vmx *vmx = to_vmx(vcpu); + struct kvm_vmx *kvm_vmx = to_kvm_vmx(vcpu->kvm); + + vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); + vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); + vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); + vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); + vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); + vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); + vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); + + vmx->rmode.vm86_active = 1; + + /* + * Very old userspace does not call KVM_SET_TSS_ADDR before entering + * vcpu. Warn the user that an update is overdue. + */ + if (!kvm_vmx->tss_addr) + printk_once(KERN_WARNING "kvm: KVM_SET_TSS_ADDR need to be " + "called before entering vcpu\n"); + + vmx_segment_cache_clear(vmx); + + vmcs_writel(GUEST_TR_BASE, kvm_vmx->tss_addr); + vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1); + vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); + + flags = vmcs_readl(GUEST_RFLAGS); + vmx->rmode.save_rflags = flags; + + flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; + + vmcs_writel(GUEST_RFLAGS, flags); + vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME); + update_exception_bitmap(vcpu); + + fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); + fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); + fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]); + fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]); + fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]); + fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]); + + kvm_mmu_reset_context(vcpu); +} + +void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + struct shared_msr_entry *msr = find_msr_entry(vmx, MSR_EFER); + + if (!msr) + return; + + vcpu->arch.efer = efer; + if (efer & EFER_LMA) { + vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); + msr->data = efer; + } else { + vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); + + msr->data = efer & ~EFER_LME; + } + setup_msrs(vmx); +} + +#ifdef CONFIG_X86_64 + +static void enter_lmode(struct kvm_vcpu *vcpu) +{ + u32 guest_tr_ar; + + vmx_segment_cache_clear(to_vmx(vcpu)); + + guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES); + if ((guest_tr_ar & VMX_AR_TYPE_MASK) != VMX_AR_TYPE_BUSY_64_TSS) { + pr_debug_ratelimited("%s: tss fixup for long mode. \n", + __func__); + vmcs_write32(GUEST_TR_AR_BYTES, + (guest_tr_ar & ~VMX_AR_TYPE_MASK) + | VMX_AR_TYPE_BUSY_64_TSS); + } + vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA); +} + +static void exit_lmode(struct kvm_vcpu *vcpu) +{ + vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); + vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA); +} + +#endif + +static void vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr) +{ + int vpid = to_vmx(vcpu)->vpid; + + if (!vpid_sync_vcpu_addr(vpid, addr)) + vpid_sync_context(vpid); + + /* + * If VPIDs are not supported or enabled, then the above is a no-op. + * But we don't really need a TLB flush in that case anyway, because + * each VM entry/exit includes an implicit flush when VPID is 0. + */ +} + +static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu) +{ + ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits; + + vcpu->arch.cr0 &= ~cr0_guest_owned_bits; + vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits; +} + +static void vmx_decache_cr3(struct kvm_vcpu *vcpu) +{ + if (enable_unrestricted_guest || (enable_ept && is_paging(vcpu))) + vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); + __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); +} + +static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) +{ + ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits; + + vcpu->arch.cr4 &= ~cr4_guest_owned_bits; + vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & cr4_guest_owned_bits; +} + +static void ept_load_pdptrs(struct kvm_vcpu *vcpu) +{ + struct kvm_mmu *mmu = vcpu->arch.walk_mmu; + + if (!test_bit(VCPU_EXREG_PDPTR, + (unsigned long *)&vcpu->arch.regs_dirty)) + return; + + if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { + vmcs_write64(GUEST_PDPTR0, mmu->pdptrs[0]); + vmcs_write64(GUEST_PDPTR1, mmu->pdptrs[1]); + vmcs_write64(GUEST_PDPTR2, mmu->pdptrs[2]); + vmcs_write64(GUEST_PDPTR3, mmu->pdptrs[3]); + } +} + +void ept_save_pdptrs(struct kvm_vcpu *vcpu) +{ + struct kvm_mmu *mmu = vcpu->arch.walk_mmu; + + if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { + mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0); + mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1); + mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2); + mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3); + } + + __set_bit(VCPU_EXREG_PDPTR, + (unsigned long *)&vcpu->arch.regs_avail); + __set_bit(VCPU_EXREG_PDPTR, + (unsigned long *)&vcpu->arch.regs_dirty); +} + +static void ept_update_paging_mode_cr0(unsigned long *hw_cr0, + unsigned long cr0, + struct kvm_vcpu *vcpu) +{ + if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail)) + vmx_decache_cr3(vcpu); + if (!(cr0 & X86_CR0_PG)) { + /* From paging/starting to nonpaging */ + vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, + vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) | + (CPU_BASED_CR3_LOAD_EXITING | + CPU_BASED_CR3_STORE_EXITING)); + vcpu->arch.cr0 = cr0; + vmx_set_cr4(vcpu, kvm_read_cr4(vcpu)); + } else if (!is_paging(vcpu)) { + /* From nonpaging to paging */ + vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, + vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) & + ~(CPU_BASED_CR3_LOAD_EXITING | + CPU_BASED_CR3_STORE_EXITING)); + vcpu->arch.cr0 = cr0; + vmx_set_cr4(vcpu, kvm_read_cr4(vcpu)); + } + + if (!(cr0 & X86_CR0_WP)) + *hw_cr0 &= ~X86_CR0_WP; +} + +void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + unsigned long hw_cr0; + + hw_cr0 = (cr0 & ~KVM_VM_CR0_ALWAYS_OFF); + if (enable_unrestricted_guest) + hw_cr0 |= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST; + else { + hw_cr0 |= KVM_VM_CR0_ALWAYS_ON; + + if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE)) + enter_pmode(vcpu); + + if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE)) + enter_rmode(vcpu); + } + +#ifdef CONFIG_X86_64 + if (vcpu->arch.efer & EFER_LME) { + if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) + enter_lmode(vcpu); + if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) + exit_lmode(vcpu); + } +#endif + + if (enable_ept && !enable_unrestricted_guest) + ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu); + + vmcs_writel(CR0_READ_SHADOW, cr0); + vmcs_writel(GUEST_CR0, hw_cr0); + vcpu->arch.cr0 = cr0; + + /* depends on vcpu->arch.cr0 to be set to a new value */ + vmx->emulation_required = emulation_required(vcpu); +} + +static int get_ept_level(struct kvm_vcpu *vcpu) +{ + if (cpu_has_vmx_ept_5levels() && (cpuid_maxphyaddr(vcpu) > 48)) + return 5; + return 4; +} + +u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa) +{ + u64 eptp = VMX_EPTP_MT_WB; + + eptp |= (get_ept_level(vcpu) == 5) ? VMX_EPTP_PWL_5 : VMX_EPTP_PWL_4; + + if (enable_ept_ad_bits && + (!is_guest_mode(vcpu) || nested_ept_ad_enabled(vcpu))) + eptp |= VMX_EPTP_AD_ENABLE_BIT; + eptp |= (root_hpa & PAGE_MASK); + + return eptp; +} + +void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) +{ + struct kvm *kvm = vcpu->kvm; + unsigned long guest_cr3; + u64 eptp; + + guest_cr3 = cr3; + if (enable_ept) { + eptp = construct_eptp(vcpu, cr3); + vmcs_write64(EPT_POINTER, eptp); + + if (kvm_x86_ops->tlb_remote_flush) { + spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock); + to_vmx(vcpu)->ept_pointer = eptp; + to_kvm_vmx(kvm)->ept_pointers_match + = EPT_POINTERS_CHECK; + spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock); + } + + if (enable_unrestricted_guest || is_paging(vcpu) || + is_guest_mode(vcpu)) + guest_cr3 = kvm_read_cr3(vcpu); + else + guest_cr3 = to_kvm_vmx(kvm)->ept_identity_map_addr; + ept_load_pdptrs(vcpu); + } + + vmcs_writel(GUEST_CR3, guest_cr3); +} + +int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) +{ + /* + * Pass through host's Machine Check Enable value to hw_cr4, which + * is in force while we are in guest mode. Do not let guests control + * this bit, even if host CR4.MCE == 0. + */ + unsigned long hw_cr4; + + hw_cr4 = (cr4_read_shadow() & X86_CR4_MCE) | (cr4 & ~X86_CR4_MCE); + if (enable_unrestricted_guest) + hw_cr4 |= KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST; + else if (to_vmx(vcpu)->rmode.vm86_active) + hw_cr4 |= KVM_RMODE_VM_CR4_ALWAYS_ON; + else + hw_cr4 |= KVM_PMODE_VM_CR4_ALWAYS_ON; + + if (!boot_cpu_has(X86_FEATURE_UMIP) && vmx_umip_emulated()) { + if (cr4 & X86_CR4_UMIP) { + vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL, + SECONDARY_EXEC_DESC); + hw_cr4 &= ~X86_CR4_UMIP; + } else if (!is_guest_mode(vcpu) || + !nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC)) + vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, + SECONDARY_EXEC_DESC); + } + + if (cr4 & X86_CR4_VMXE) { + /* + * To use VMXON (and later other VMX instructions), a guest + * must first be able to turn on cr4.VMXE (see handle_vmon()). + * So basically the check on whether to allow nested VMX + * is here. We operate under the default treatment of SMM, + * so VMX cannot be enabled under SMM. + */ + if (!nested_vmx_allowed(vcpu) || is_smm(vcpu)) + return 1; + } + + if (to_vmx(vcpu)->nested.vmxon && !nested_cr4_valid(vcpu, cr4)) + return 1; + + vcpu->arch.cr4 = cr4; + + if (!enable_unrestricted_guest) { + if (enable_ept) { + if (!is_paging(vcpu)) { + hw_cr4 &= ~X86_CR4_PAE; + hw_cr4 |= X86_CR4_PSE; + } else if (!(cr4 & X86_CR4_PAE)) { + hw_cr4 &= ~X86_CR4_PAE; + } + } + + /* + * SMEP/SMAP/PKU is disabled if CPU is in non-paging mode in + * hardware. To emulate this behavior, SMEP/SMAP/PKU needs + * to be manually disabled when guest switches to non-paging + * mode. + * + * If !enable_unrestricted_guest, the CPU is always running + * with CR0.PG=1 and CR4 needs to be modified. + * If enable_unrestricted_guest, the CPU automatically + * disables SMEP/SMAP/PKU when the guest sets CR0.PG=0. + */ + if (!is_paging(vcpu)) + hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE); + } + + vmcs_writel(CR4_READ_SHADOW, cr4); + vmcs_writel(GUEST_CR4, hw_cr4); + return 0; +} + +void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + u32 ar; + + if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) { + *var = vmx->rmode.segs[seg]; + if (seg == VCPU_SREG_TR + || var->selector == vmx_read_guest_seg_selector(vmx, seg)) + return; + var->base = vmx_read_guest_seg_base(vmx, seg); + var->selector = vmx_read_guest_seg_selector(vmx, seg); + return; + } + var->base = vmx_read_guest_seg_base(vmx, seg); + var->limit = vmx_read_guest_seg_limit(vmx, seg); + var->selector = vmx_read_guest_seg_selector(vmx, seg); + ar = vmx_read_guest_seg_ar(vmx, seg); + var->unusable = (ar >> 16) & 1; + var->type = ar & 15; + var->s = (ar >> 4) & 1; + var->dpl = (ar >> 5) & 3; + /* + * Some userspaces do not preserve unusable property. Since usable + * segment has to be present according to VMX spec we can use present + * property to amend userspace bug by making unusable segment always + * nonpresent. vmx_segment_access_rights() already marks nonpresent + * segment as unusable. + */ + var->present = !var->unusable; + var->avl = (ar >> 12) & 1; + var->l = (ar >> 13) & 1; + var->db = (ar >> 14) & 1; + var->g = (ar >> 15) & 1; +} + +static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg) +{ + struct kvm_segment s; + + if (to_vmx(vcpu)->rmode.vm86_active) { + vmx_get_segment(vcpu, &s, seg); + return s.base; + } + return vmx_read_guest_seg_base(to_vmx(vcpu), seg); +} + +int vmx_get_cpl(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + + if (unlikely(vmx->rmode.vm86_active)) + return 0; + else { + int ar = vmx_read_guest_seg_ar(vmx, VCPU_SREG_SS); + return VMX_AR_DPL(ar); + } +} + +static u32 vmx_segment_access_rights(struct kvm_segment *var) +{ + u32 ar; + + if (var->unusable || !var->present) + ar = 1 << 16; + else { + ar = var->type & 15; + ar |= (var->s & 1) << 4; + ar |= (var->dpl & 3) << 5; + ar |= (var->present & 1) << 7; + ar |= (var->avl & 1) << 12; + ar |= (var->l & 1) << 13; + ar |= (var->db & 1) << 14; + ar |= (var->g & 1) << 15; + } + + return ar; +} + +void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; + + vmx_segment_cache_clear(vmx); + + if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) { + vmx->rmode.segs[seg] = *var; + if (seg == VCPU_SREG_TR) + vmcs_write16(sf->selector, var->selector); + else if (var->s) + fix_rmode_seg(seg, &vmx->rmode.segs[seg]); + goto out; + } + + vmcs_writel(sf->base, var->base); + vmcs_write32(sf->limit, var->limit); + vmcs_write16(sf->selector, var->selector); + + /* + * Fix the "Accessed" bit in AR field of segment registers for older + * qemu binaries. + * IA32 arch specifies that at the time of processor reset the + * "Accessed" bit in the AR field of segment registers is 1. And qemu + * is setting it to 0 in the userland code. This causes invalid guest + * state vmexit when "unrestricted guest" mode is turned on. + * Fix for this setup issue in cpu_reset is being pushed in the qemu + * tree. Newer qemu binaries with that qemu fix would not need this + * kvm hack. + */ + if (enable_unrestricted_guest && (seg != VCPU_SREG_LDTR)) + var->type |= 0x1; /* Accessed */ + + vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(var)); + +out: + vmx->emulation_required = emulation_required(vcpu); +} + +static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) +{ + u32 ar = vmx_read_guest_seg_ar(to_vmx(vcpu), VCPU_SREG_CS); + + *db = (ar >> 14) & 1; + *l = (ar >> 13) & 1; +} + +static void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) +{ + dt->size = vmcs_read32(GUEST_IDTR_LIMIT); + dt->address = vmcs_readl(GUEST_IDTR_BASE); +} + +static void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) +{ + vmcs_write32(GUEST_IDTR_LIMIT, dt->size); + vmcs_writel(GUEST_IDTR_BASE, dt->address); +} + +static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) +{ + dt->size = vmcs_read32(GUEST_GDTR_LIMIT); + dt->address = vmcs_readl(GUEST_GDTR_BASE); +} + +static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) +{ + vmcs_write32(GUEST_GDTR_LIMIT, dt->size); + vmcs_writel(GUEST_GDTR_BASE, dt->address); +} + +static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg) +{ + struct kvm_segment var; + u32 ar; + + vmx_get_segment(vcpu, &var, seg); + var.dpl = 0x3; + if (seg == VCPU_SREG_CS) + var.type = 0x3; + ar = vmx_segment_access_rights(&var); + + if (var.base != (var.selector << 4)) + return false; + if (var.limit != 0xffff) + return false; + if (ar != 0xf3) + return false; + + return true; +} + +static bool code_segment_valid(struct kvm_vcpu *vcpu) +{ + struct kvm_segment cs; + unsigned int cs_rpl; + + vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); + cs_rpl = cs.selector & SEGMENT_RPL_MASK; + + if (cs.unusable) + return false; + if (~cs.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_ACCESSES_MASK)) + return false; + if (!cs.s) + return false; + if (cs.type & VMX_AR_TYPE_WRITEABLE_MASK) { + if (cs.dpl > cs_rpl) + return false; + } else { + if (cs.dpl != cs_rpl) + return false; + } + if (!cs.present) + return false; + + /* TODO: Add Reserved field check, this'll require a new member in the kvm_segment_field structure */ + return true; +} + +static bool stack_segment_valid(struct kvm_vcpu *vcpu) +{ + struct kvm_segment ss; + unsigned int ss_rpl; + + vmx_get_segment(vcpu, &ss, VCPU_SREG_SS); + ss_rpl = ss.selector & SEGMENT_RPL_MASK; + + if (ss.unusable) + return true; + if (ss.type != 3 && ss.type != 7) + return false; + if (!ss.s) + return false; + if (ss.dpl != ss_rpl) /* DPL != RPL */ + return false; + if (!ss.present) + return false; + + return true; +} + +static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg) +{ + struct kvm_segment var; + unsigned int rpl; + + vmx_get_segment(vcpu, &var, seg); + rpl = var.selector & SEGMENT_RPL_MASK; + + if (var.unusable) + return true; + if (!var.s) + return false; + if (!var.present) + return false; + if (~var.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_WRITEABLE_MASK)) { + if (var.dpl < rpl) /* DPL < RPL */ + return false; + } + + /* TODO: Add other members to kvm_segment_field to allow checking for other access + * rights flags + */ + return true; +} + +static bool tr_valid(struct kvm_vcpu *vcpu) +{ + struct kvm_segment tr; + + vmx_get_segment(vcpu, &tr, VCPU_SREG_TR); + + if (tr.unusable) + return false; + if (tr.selector & SEGMENT_TI_MASK) /* TI = 1 */ + return false; + if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */ + return false; + if (!tr.present) + return false; + + return true; +} + +static bool ldtr_valid(struct kvm_vcpu *vcpu) +{ + struct kvm_segment ldtr; + + vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR); + + if (ldtr.unusable) + return true; + if (ldtr.selector & SEGMENT_TI_MASK) /* TI = 1 */ + return false; + if (ldtr.type != 2) + return false; + if (!ldtr.present) + return false; + + return true; +} + +static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu) +{ + struct kvm_segment cs, ss; + + vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); + vmx_get_segment(vcpu, &ss, VCPU_SREG_SS); + + return ((cs.selector & SEGMENT_RPL_MASK) == + (ss.selector & SEGMENT_RPL_MASK)); +} + +/* + * Check if guest state is valid. Returns true if valid, false if + * not. + * We assume that registers are always usable + */ +static bool guest_state_valid(struct kvm_vcpu *vcpu) +{ + if (enable_unrestricted_guest) + return true; + + /* real mode guest state checks */ + if (!is_protmode(vcpu) || (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) { + if (!rmode_segment_valid(vcpu, VCPU_SREG_CS)) + return false; + if (!rmode_segment_valid(vcpu, VCPU_SREG_SS)) + return false; + if (!rmode_segment_valid(vcpu, VCPU_SREG_DS)) + return false; + if (!rmode_segment_valid(vcpu, VCPU_SREG_ES)) + return false; + if (!rmode_segment_valid(vcpu, VCPU_SREG_FS)) + return false; + if (!rmode_segment_valid(vcpu, VCPU_SREG_GS)) + return false; + } else { + /* protected mode guest state checks */ + if (!cs_ss_rpl_check(vcpu)) + return false; + if (!code_segment_valid(vcpu)) + return false; + if (!stack_segment_valid(vcpu)) + return false; + if (!data_segment_valid(vcpu, VCPU_SREG_DS)) + return false; + if (!data_segment_valid(vcpu, VCPU_SREG_ES)) + return false; + if (!data_segment_valid(vcpu, VCPU_SREG_FS)) + return false; + if (!data_segment_valid(vcpu, VCPU_SREG_GS)) + return false; + if (!tr_valid(vcpu)) + return false; + if (!ldtr_valid(vcpu)) + return false; + } + /* TODO: + * - Add checks on RIP + * - Add checks on RFLAGS + */ + + return true; +} + +static int init_rmode_tss(struct kvm *kvm) +{ + gfn_t fn; + u16 data = 0; + int idx, r; + + idx = srcu_read_lock(&kvm->srcu); + fn = to_kvm_vmx(kvm)->tss_addr >> PAGE_SHIFT; + r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE); + if (r < 0) + goto out; + data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE; + r = kvm_write_guest_page(kvm, fn++, &data, + TSS_IOPB_BASE_OFFSET, sizeof(u16)); + if (r < 0) + goto out; + r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE); + if (r < 0) + goto out; + r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE); + if (r < 0) + goto out; + data = ~0; + r = kvm_write_guest_page(kvm, fn, &data, + RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1, + sizeof(u8)); +out: + srcu_read_unlock(&kvm->srcu, idx); + return r; +} + +static int init_rmode_identity_map(struct kvm *kvm) +{ + struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm); + int i, idx, r = 0; + kvm_pfn_t identity_map_pfn; + u32 tmp; + + /* Protect kvm_vmx->ept_identity_pagetable_done. */ + mutex_lock(&kvm->slots_lock); + + if (likely(kvm_vmx->ept_identity_pagetable_done)) + goto out2; + + if (!kvm_vmx->ept_identity_map_addr) + kvm_vmx->ept_identity_map_addr = VMX_EPT_IDENTITY_PAGETABLE_ADDR; + identity_map_pfn = kvm_vmx->ept_identity_map_addr >> PAGE_SHIFT; + + r = __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, + kvm_vmx->ept_identity_map_addr, PAGE_SIZE); + if (r < 0) + goto out2; + + idx = srcu_read_lock(&kvm->srcu); + r = kvm_clear_guest_page(kvm, identity_map_pfn, 0, PAGE_SIZE); + if (r < 0) + goto out; + /* Set up identity-mapping pagetable for EPT in real mode */ + for (i = 0; i < PT32_ENT_PER_PAGE; i++) { + tmp = (i << 22) + (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | + _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE); + r = kvm_write_guest_page(kvm, identity_map_pfn, + &tmp, i * sizeof(tmp), sizeof(tmp)); + if (r < 0) + goto out; + } + kvm_vmx->ept_identity_pagetable_done = true; + +out: + srcu_read_unlock(&kvm->srcu, idx); + +out2: + mutex_unlock(&kvm->slots_lock); + return r; +} + +static void seg_setup(int seg) +{ + const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; + unsigned int ar; + + vmcs_write16(sf->selector, 0); + vmcs_writel(sf->base, 0); + vmcs_write32(sf->limit, 0xffff); + ar = 0x93; + if (seg == VCPU_SREG_CS) + ar |= 0x08; /* code segment */ + + vmcs_write32(sf->ar_bytes, ar); +} + +static int alloc_apic_access_page(struct kvm *kvm) +{ + struct page *page; + int r = 0; + + mutex_lock(&kvm->slots_lock); + if (kvm->arch.apic_access_page_done) + goto out; + r = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, + APIC_DEFAULT_PHYS_BASE, PAGE_SIZE); + if (r) + goto out; + + page = gfn_to_page(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT); + if (is_error_page(page)) { + r = -EFAULT; + goto out; + } + + /* + * Do not pin the page in memory, so that memory hot-unplug + * is able to migrate it. + */ + put_page(page); + kvm->arch.apic_access_page_done = true; +out: + mutex_unlock(&kvm->slots_lock); + return r; +} + +int allocate_vpid(void) +{ + int vpid; + + if (!enable_vpid) + return 0; + spin_lock(&vmx_vpid_lock); + vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS); + if (vpid < VMX_NR_VPIDS) + __set_bit(vpid, vmx_vpid_bitmap); + else + vpid = 0; + spin_unlock(&vmx_vpid_lock); + return vpid; +} + +void free_vpid(int vpid) +{ + if (!enable_vpid || vpid == 0) + return; + spin_lock(&vmx_vpid_lock); + __clear_bit(vpid, vmx_vpid_bitmap); + spin_unlock(&vmx_vpid_lock); +} + +static __always_inline void vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, + u32 msr, int type) +{ + int f = sizeof(unsigned long); + + if (!cpu_has_vmx_msr_bitmap()) + return; + + if (static_branch_unlikely(&enable_evmcs)) + evmcs_touch_msr_bitmap(); + + /* + * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals + * have the write-low and read-high bitmap offsets the wrong way round. + * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff. + */ + if (msr <= 0x1fff) { + if (type & MSR_TYPE_R) + /* read-low */ + __clear_bit(msr, msr_bitmap + 0x000 / f); + + if (type & MSR_TYPE_W) + /* write-low */ + __clear_bit(msr, msr_bitmap + 0x800 / f); + + } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { + msr &= 0x1fff; + if (type & MSR_TYPE_R) + /* read-high */ + __clear_bit(msr, msr_bitmap + 0x400 / f); + + if (type & MSR_TYPE_W) + /* write-high */ + __clear_bit(msr, msr_bitmap + 0xc00 / f); + + } +} + +static __always_inline void vmx_enable_intercept_for_msr(unsigned long *msr_bitmap, + u32 msr, int type) +{ + int f = sizeof(unsigned long); + + if (!cpu_has_vmx_msr_bitmap()) + return; + + if (static_branch_unlikely(&enable_evmcs)) + evmcs_touch_msr_bitmap(); + + /* + * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals + * have the write-low and read-high bitmap offsets the wrong way round. + * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff. + */ + if (msr <= 0x1fff) { + if (type & MSR_TYPE_R) + /* read-low */ + __set_bit(msr, msr_bitmap + 0x000 / f); + + if (type & MSR_TYPE_W) + /* write-low */ + __set_bit(msr, msr_bitmap + 0x800 / f); + + } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { + msr &= 0x1fff; + if (type & MSR_TYPE_R) + /* read-high */ + __set_bit(msr, msr_bitmap + 0x400 / f); + + if (type & MSR_TYPE_W) + /* write-high */ + __set_bit(msr, msr_bitmap + 0xc00 / f); + + } +} + +static __always_inline void vmx_set_intercept_for_msr(unsigned long *msr_bitmap, + u32 msr, int type, bool value) +{ + if (value) + vmx_enable_intercept_for_msr(msr_bitmap, msr, type); + else + vmx_disable_intercept_for_msr(msr_bitmap, msr, type); +} + +static u8 vmx_msr_bitmap_mode(struct kvm_vcpu *vcpu) +{ + u8 mode = 0; + + if (cpu_has_secondary_exec_ctrls() && + (vmcs_read32(SECONDARY_VM_EXEC_CONTROL) & + SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) { + mode |= MSR_BITMAP_MODE_X2APIC; + if (enable_apicv && kvm_vcpu_apicv_active(vcpu)) + mode |= MSR_BITMAP_MODE_X2APIC_APICV; + } + + return mode; +} + +static void vmx_update_msr_bitmap_x2apic(unsigned long *msr_bitmap, + u8 mode) +{ + int msr; + + for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { + unsigned word = msr / BITS_PER_LONG; + msr_bitmap[word] = (mode & MSR_BITMAP_MODE_X2APIC_APICV) ? 0 : ~0; + msr_bitmap[word + (0x800 / sizeof(long))] = ~0; + } + + if (mode & MSR_BITMAP_MODE_X2APIC) { + /* + * TPR reads and writes can be virtualized even if virtual interrupt + * delivery is not in use. + */ + vmx_disable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_TASKPRI), MSR_TYPE_RW); + if (mode & MSR_BITMAP_MODE_X2APIC_APICV) { + vmx_enable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_TMCCT), MSR_TYPE_R); + vmx_disable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_EOI), MSR_TYPE_W); + vmx_disable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_SELF_IPI), MSR_TYPE_W); + } + } +} + +void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap; + u8 mode = vmx_msr_bitmap_mode(vcpu); + u8 changed = mode ^ vmx->msr_bitmap_mode; + + if (!changed) + return; + + if (changed & (MSR_BITMAP_MODE_X2APIC | MSR_BITMAP_MODE_X2APIC_APICV)) + vmx_update_msr_bitmap_x2apic(msr_bitmap, mode); + + vmx->msr_bitmap_mode = mode; +} + +void pt_update_intercept_for_msr(struct vcpu_vmx *vmx) +{ + unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap; + bool flag = !(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN); + u32 i; + + vmx_set_intercept_for_msr(msr_bitmap, MSR_IA32_RTIT_STATUS, + MSR_TYPE_RW, flag); + vmx_set_intercept_for_msr(msr_bitmap, MSR_IA32_RTIT_OUTPUT_BASE, + MSR_TYPE_RW, flag); + vmx_set_intercept_for_msr(msr_bitmap, MSR_IA32_RTIT_OUTPUT_MASK, + MSR_TYPE_RW, flag); + vmx_set_intercept_for_msr(msr_bitmap, MSR_IA32_RTIT_CR3_MATCH, + MSR_TYPE_RW, flag); + for (i = 0; i < vmx->pt_desc.addr_range; i++) { + vmx_set_intercept_for_msr(msr_bitmap, + MSR_IA32_RTIT_ADDR0_A + i * 2, MSR_TYPE_RW, flag); + vmx_set_intercept_for_msr(msr_bitmap, + MSR_IA32_RTIT_ADDR0_B + i * 2, MSR_TYPE_RW, flag); + } +} + +static bool vmx_get_enable_apicv(struct kvm_vcpu *vcpu) +{ + return enable_apicv; +} + +static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + void *vapic_page; + u32 vppr; + int rvi; + + if (WARN_ON_ONCE(!is_guest_mode(vcpu)) || + !nested_cpu_has_vid(get_vmcs12(vcpu)) || + WARN_ON_ONCE(!vmx->nested.virtual_apic_page)) + return false; + + rvi = vmx_get_rvi(); + + vapic_page = kmap(vmx->nested.virtual_apic_page); + vppr = *((u32 *)(vapic_page + APIC_PROCPRI)); + kunmap(vmx->nested.virtual_apic_page); + + return ((rvi & 0xf0) > (vppr & 0xf0)); +} + +static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu, + bool nested) +{ +#ifdef CONFIG_SMP + int pi_vec = nested ? POSTED_INTR_NESTED_VECTOR : POSTED_INTR_VECTOR; + + if (vcpu->mode == IN_GUEST_MODE) { + /* + * The vector of interrupt to be delivered to vcpu had + * been set in PIR before this function. + * + * Following cases will be reached in this block, and + * we always send a notification event in all cases as + * explained below. + * + * Case 1: vcpu keeps in non-root mode. Sending a + * notification event posts the interrupt to vcpu. + * + * Case 2: vcpu exits to root mode and is still + * runnable. PIR will be synced to vIRR before the + * next vcpu entry. Sending a notification event in + * this case has no effect, as vcpu is not in root + * mode. + * + * Case 3: vcpu exits to root mode and is blocked. + * vcpu_block() has already synced PIR to vIRR and + * never blocks vcpu if vIRR is not cleared. Therefore, + * a blocked vcpu here does not wait for any requested + * interrupts in PIR, and sending a notification event + * which has no effect is safe here. + */ + + apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec); + return true; + } +#endif + return false; +} + +static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu, + int vector) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + + if (is_guest_mode(vcpu) && + vector == vmx->nested.posted_intr_nv) { + /* + * If a posted intr is not recognized by hardware, + * we will accomplish it in the next vmentry. + */ + vmx->nested.pi_pending = true; + kvm_make_request(KVM_REQ_EVENT, vcpu); + /* the PIR and ON have been set by L1. */ + if (!kvm_vcpu_trigger_posted_interrupt(vcpu, true)) + kvm_vcpu_kick(vcpu); + return 0; + } + return -1; +} +/* + * Send interrupt to vcpu via posted interrupt way. + * 1. If target vcpu is running(non-root mode), send posted interrupt + * notification to vcpu and hardware will sync PIR to vIRR atomically. + * 2. If target vcpu isn't running(root mode), kick it to pick up the + * interrupt from PIR in next vmentry. + */ +static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + int r; + + r = vmx_deliver_nested_posted_interrupt(vcpu, vector); + if (!r) + return; + + if (pi_test_and_set_pir(vector, &vmx->pi_desc)) + return; + + /* If a previous notification has sent the IPI, nothing to do. */ + if (pi_test_and_set_on(&vmx->pi_desc)) + return; + + if (!kvm_vcpu_trigger_posted_interrupt(vcpu, false)) + kvm_vcpu_kick(vcpu); +} + +/* + * Set up the vmcs's constant host-state fields, i.e., host-state fields that + * will not change in the lifetime of the guest. + * Note that host-state that does change is set elsewhere. E.g., host-state + * that is set differently for each CPU is set in vmx_vcpu_load(), not here. + */ +void vmx_set_constant_host_state(struct vcpu_vmx *vmx) +{ + u32 low32, high32; + unsigned long tmpl; + struct desc_ptr dt; + unsigned long cr0, cr3, cr4; + + cr0 = read_cr0(); + WARN_ON(cr0 & X86_CR0_TS); + vmcs_writel(HOST_CR0, cr0); /* 22.2.3 */ + + /* + * Save the most likely value for this task's CR3 in the VMCS. + * We can't use __get_current_cr3_fast() because we're not atomic. + */ + cr3 = __read_cr3(); + vmcs_writel(HOST_CR3, cr3); /* 22.2.3 FIXME: shadow tables */ + vmx->loaded_vmcs->host_state.cr3 = cr3; + + /* Save the most likely value for this task's CR4 in the VMCS. */ + cr4 = cr4_read_shadow(); + vmcs_writel(HOST_CR4, cr4); /* 22.2.3, 22.2.5 */ + vmx->loaded_vmcs->host_state.cr4 = cr4; + + vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ +#ifdef CONFIG_X86_64 + /* + * Load null selectors, so we can avoid reloading them in + * vmx_prepare_switch_to_host(), in case userspace uses + * the null selectors too (the expected case). + */ + vmcs_write16(HOST_DS_SELECTOR, 0); + vmcs_write16(HOST_ES_SELECTOR, 0); +#else + vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ + vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */ +#endif + vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ + vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */ + + store_idt(&dt); + vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */ + vmx->host_idt_base = dt.address; + + vmcs_writel(HOST_RIP, (unsigned long)vmx_vmexit); /* 22.2.5 */ + + rdmsr(MSR_IA32_SYSENTER_CS, low32, high32); + vmcs_write32(HOST_IA32_SYSENTER_CS, low32); + rdmsrl(MSR_IA32_SYSENTER_EIP, tmpl); + vmcs_writel(HOST_IA32_SYSENTER_EIP, tmpl); /* 22.2.3 */ + + if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) { + rdmsr(MSR_IA32_CR_PAT, low32, high32); + vmcs_write64(HOST_IA32_PAT, low32 | ((u64) high32 << 32)); + } + + if (cpu_has_load_ia32_efer()) + vmcs_write64(HOST_IA32_EFER, host_efer); +} + +void set_cr4_guest_host_mask(struct vcpu_vmx *vmx) +{ + vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS; + if (enable_ept) + vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE; + if (is_guest_mode(&vmx->vcpu)) + vmx->vcpu.arch.cr4_guest_owned_bits &= + ~get_vmcs12(&vmx->vcpu)->cr4_guest_host_mask; + vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits); +} + +static u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx) +{ + u32 pin_based_exec_ctrl = vmcs_config.pin_based_exec_ctrl; + + if (!kvm_vcpu_apicv_active(&vmx->vcpu)) + pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR; + + if (!enable_vnmi) + pin_based_exec_ctrl &= ~PIN_BASED_VIRTUAL_NMIS; + + /* Enable the preemption timer dynamically */ + pin_based_exec_ctrl &= ~PIN_BASED_VMX_PREEMPTION_TIMER; + return pin_based_exec_ctrl; +} + +static void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + + vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_ctrl(vmx)); + if (cpu_has_secondary_exec_ctrls()) { + if (kvm_vcpu_apicv_active(vcpu)) + vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL, + SECONDARY_EXEC_APIC_REGISTER_VIRT | + SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); + else + vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, + SECONDARY_EXEC_APIC_REGISTER_VIRT | + SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); + } + + if (cpu_has_vmx_msr_bitmap()) + vmx_update_msr_bitmap(vcpu); +} + +u32 vmx_exec_control(struct vcpu_vmx *vmx) +{ + u32 exec_control = vmcs_config.cpu_based_exec_ctrl; + + if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT) + exec_control &= ~CPU_BASED_MOV_DR_EXITING; + + if (!cpu_need_tpr_shadow(&vmx->vcpu)) { + exec_control &= ~CPU_BASED_TPR_SHADOW; +#ifdef CONFIG_X86_64 + exec_control |= CPU_BASED_CR8_STORE_EXITING | + CPU_BASED_CR8_LOAD_EXITING; +#endif + } + if (!enable_ept) + exec_control |= CPU_BASED_CR3_STORE_EXITING | + CPU_BASED_CR3_LOAD_EXITING | + CPU_BASED_INVLPG_EXITING; + if (kvm_mwait_in_guest(vmx->vcpu.kvm)) + exec_control &= ~(CPU_BASED_MWAIT_EXITING | + CPU_BASED_MONITOR_EXITING); + if (kvm_hlt_in_guest(vmx->vcpu.kvm)) + exec_control &= ~CPU_BASED_HLT_EXITING; + return exec_control; +} + + +static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx) +{ + struct kvm_vcpu *vcpu = &vmx->vcpu; + + u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl; + + if (pt_mode == PT_MODE_SYSTEM) + exec_control &= ~(SECONDARY_EXEC_PT_USE_GPA | SECONDARY_EXEC_PT_CONCEAL_VMX); + if (!cpu_need_virtualize_apic_accesses(vcpu)) + exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; + if (vmx->vpid == 0) + exec_control &= ~SECONDARY_EXEC_ENABLE_VPID; + if (!enable_ept) { + exec_control &= ~SECONDARY_EXEC_ENABLE_EPT; + enable_unrestricted_guest = 0; + } + if (!enable_unrestricted_guest) + exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST; + if (kvm_pause_in_guest(vmx->vcpu.kvm)) + exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING; + if (!kvm_vcpu_apicv_active(vcpu)) + exec_control &= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT | + SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); + exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; + + /* SECONDARY_EXEC_DESC is enabled/disabled on writes to CR4.UMIP, + * in vmx_set_cr4. */ + exec_control &= ~SECONDARY_EXEC_DESC; + + /* SECONDARY_EXEC_SHADOW_VMCS is enabled when L1 executes VMPTRLD + (handle_vmptrld). + We can NOT enable shadow_vmcs here because we don't have yet + a current VMCS12 + */ + exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS; + + if (!enable_pml) + exec_control &= ~SECONDARY_EXEC_ENABLE_PML; + + if (vmx_xsaves_supported()) { + /* Exposing XSAVES only when XSAVE is exposed */ + bool xsaves_enabled = + guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && + guest_cpuid_has(vcpu, X86_FEATURE_XSAVES); + + if (!xsaves_enabled) + exec_control &= ~SECONDARY_EXEC_XSAVES; + + if (nested) { + if (xsaves_enabled) + vmx->nested.msrs.secondary_ctls_high |= + SECONDARY_EXEC_XSAVES; + else + vmx->nested.msrs.secondary_ctls_high &= + ~SECONDARY_EXEC_XSAVES; + } + } + + if (vmx_rdtscp_supported()) { + bool rdtscp_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP); + if (!rdtscp_enabled) + exec_control &= ~SECONDARY_EXEC_RDTSCP; + + if (nested) { + if (rdtscp_enabled) + vmx->nested.msrs.secondary_ctls_high |= + SECONDARY_EXEC_RDTSCP; + else + vmx->nested.msrs.secondary_ctls_high &= + ~SECONDARY_EXEC_RDTSCP; + } + } + + if (vmx_invpcid_supported()) { + /* Exposing INVPCID only when PCID is exposed */ + bool invpcid_enabled = + guest_cpuid_has(vcpu, X86_FEATURE_INVPCID) && + guest_cpuid_has(vcpu, X86_FEATURE_PCID); + + if (!invpcid_enabled) { + exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID; + guest_cpuid_clear(vcpu, X86_FEATURE_INVPCID); + } + + if (nested) { + if (invpcid_enabled) + vmx->nested.msrs.secondary_ctls_high |= + SECONDARY_EXEC_ENABLE_INVPCID; + else + vmx->nested.msrs.secondary_ctls_high &= + ~SECONDARY_EXEC_ENABLE_INVPCID; + } + } + + if (vmx_rdrand_supported()) { + bool rdrand_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDRAND); + if (rdrand_enabled) + exec_control &= ~SECONDARY_EXEC_RDRAND_EXITING; + + if (nested) { + if (rdrand_enabled) + vmx->nested.msrs.secondary_ctls_high |= + SECONDARY_EXEC_RDRAND_EXITING; + else + vmx->nested.msrs.secondary_ctls_high &= + ~SECONDARY_EXEC_RDRAND_EXITING; + } + } + + if (vmx_rdseed_supported()) { + bool rdseed_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDSEED); + if (rdseed_enabled) + exec_control &= ~SECONDARY_EXEC_RDSEED_EXITING; + + if (nested) { + if (rdseed_enabled) + vmx->nested.msrs.secondary_ctls_high |= + SECONDARY_EXEC_RDSEED_EXITING; + else + vmx->nested.msrs.secondary_ctls_high &= + ~SECONDARY_EXEC_RDSEED_EXITING; + } + } + + vmx->secondary_exec_control = exec_control; +} + +static void ept_set_mmio_spte_mask(void) +{ + /* + * EPT Misconfigurations can be generated if the value of bits 2:0 + * of an EPT paging-structure entry is 110b (write/execute). + */ + kvm_mmu_set_mmio_spte_mask(VMX_EPT_RWX_MASK, + VMX_EPT_MISCONFIG_WX_VALUE); +} + +#define VMX_XSS_EXIT_BITMAP 0 + +/* + * Sets up the vmcs for emulated real mode. + */ +static void vmx_vcpu_setup(struct vcpu_vmx *vmx) +{ + int i; + + if (nested) + nested_vmx_vcpu_setup(); + + if (cpu_has_vmx_msr_bitmap()) + vmcs_write64(MSR_BITMAP, __pa(vmx->vmcs01.msr_bitmap)); + + vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */ + + /* Control */ + vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_ctrl(vmx)); + vmx->hv_deadline_tsc = -1; + + vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmx_exec_control(vmx)); + + if (cpu_has_secondary_exec_ctrls()) { + vmx_compute_secondary_exec_control(vmx); + vmcs_write32(SECONDARY_VM_EXEC_CONTROL, + vmx->secondary_exec_control); + } + + if (kvm_vcpu_apicv_active(&vmx->vcpu)) { + vmcs_write64(EOI_EXIT_BITMAP0, 0); + vmcs_write64(EOI_EXIT_BITMAP1, 0); + vmcs_write64(EOI_EXIT_BITMAP2, 0); + vmcs_write64(EOI_EXIT_BITMAP3, 0); + + vmcs_write16(GUEST_INTR_STATUS, 0); + + vmcs_write16(POSTED_INTR_NV, POSTED_INTR_VECTOR); + vmcs_write64(POSTED_INTR_DESC_ADDR, __pa((&vmx->pi_desc))); + } + + if (!kvm_pause_in_guest(vmx->vcpu.kvm)) { + vmcs_write32(PLE_GAP, ple_gap); + vmx->ple_window = ple_window; + vmx->ple_window_dirty = true; + } + + vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0); + vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0); + vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */ + + vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */ + vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */ + vmx_set_constant_host_state(vmx); + vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */ + vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */ + + if (cpu_has_vmx_vmfunc()) + vmcs_write64(VM_FUNCTION_CONTROL, 0); + + vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); + vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); + vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val)); + vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0); + vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val)); + + if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) + vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); + + for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) { + u32 index = vmx_msr_index[i]; + u32 data_low, data_high; + int j = vmx->nmsrs; + + if (rdmsr_safe(index, &data_low, &data_high) < 0) + continue; + if (wrmsr_safe(index, data_low, data_high) < 0) + continue; + vmx->guest_msrs[j].index = i; + vmx->guest_msrs[j].data = 0; + vmx->guest_msrs[j].mask = -1ull; + ++vmx->nmsrs; + } + + vmx->arch_capabilities = kvm_get_arch_capabilities(); + + vm_exit_controls_init(vmx, vmx_vmexit_ctrl()); + + /* 22.2.1, 20.8.1 */ + vm_entry_controls_init(vmx, vmx_vmentry_ctrl()); + + vmx->vcpu.arch.cr0_guest_owned_bits = X86_CR0_TS; + vmcs_writel(CR0_GUEST_HOST_MASK, ~X86_CR0_TS); + + set_cr4_guest_host_mask(vmx); + + if (vmx_xsaves_supported()) + vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP); + + if (enable_pml) { + vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); + vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); + } + + if (cpu_has_vmx_encls_vmexit()) + vmcs_write64(ENCLS_EXITING_BITMAP, -1ull); + + if (pt_mode == PT_MODE_HOST_GUEST) { + memset(&vmx->pt_desc, 0, sizeof(vmx->pt_desc)); + /* Bit[6~0] are forced to 1, writes are ignored. */ + vmx->pt_desc.guest.output_mask = 0x7F; + vmcs_write64(GUEST_IA32_RTIT_CTL, 0); + } +} + +static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + struct msr_data apic_base_msr; + u64 cr0; + + vmx->rmode.vm86_active = 0; + vmx->spec_ctrl = 0; + + vcpu->arch.microcode_version = 0x100000000ULL; + vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); + kvm_set_cr8(vcpu, 0); + + if (!init_event) { + apic_base_msr.data = APIC_DEFAULT_PHYS_BASE | + MSR_IA32_APICBASE_ENABLE; + if (kvm_vcpu_is_reset_bsp(vcpu)) + apic_base_msr.data |= MSR_IA32_APICBASE_BSP; + apic_base_msr.host_initiated = true; + kvm_set_apic_base(vcpu, &apic_base_msr); + } + + vmx_segment_cache_clear(vmx); + + seg_setup(VCPU_SREG_CS); + vmcs_write16(GUEST_CS_SELECTOR, 0xf000); + vmcs_writel(GUEST_CS_BASE, 0xffff0000ul); + + seg_setup(VCPU_SREG_DS); + seg_setup(VCPU_SREG_ES); + seg_setup(VCPU_SREG_FS); + seg_setup(VCPU_SREG_GS); + seg_setup(VCPU_SREG_SS); + + vmcs_write16(GUEST_TR_SELECTOR, 0); + vmcs_writel(GUEST_TR_BASE, 0); + vmcs_write32(GUEST_TR_LIMIT, 0xffff); + vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); + + vmcs_write16(GUEST_LDTR_SELECTOR, 0); + vmcs_writel(GUEST_LDTR_BASE, 0); + vmcs_write32(GUEST_LDTR_LIMIT, 0xffff); + vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082); + + if (!init_event) { + vmcs_write32(GUEST_SYSENTER_CS, 0); + vmcs_writel(GUEST_SYSENTER_ESP, 0); + vmcs_writel(GUEST_SYSENTER_EIP, 0); + vmcs_write64(GUEST_IA32_DEBUGCTL, 0); + } + + kvm_set_rflags(vcpu, X86_EFLAGS_FIXED); + kvm_rip_write(vcpu, 0xfff0); + + vmcs_writel(GUEST_GDTR_BASE, 0); + vmcs_write32(GUEST_GDTR_LIMIT, 0xffff); + + vmcs_writel(GUEST_IDTR_BASE, 0); + vmcs_write32(GUEST_IDTR_LIMIT, 0xffff); + + vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE); + vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0); + vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, 0); + if (kvm_mpx_supported()) + vmcs_write64(GUEST_BNDCFGS, 0); + + setup_msrs(vmx); + + vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */ + + if (cpu_has_vmx_tpr_shadow() && !init_event) { + vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0); + if (cpu_need_tpr_shadow(vcpu)) + vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, + __pa(vcpu->arch.apic->regs)); + vmcs_write32(TPR_THRESHOLD, 0); + } + + kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); + + if (vmx->vpid != 0) + vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); + + cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET; + vmx->vcpu.arch.cr0 = cr0; + vmx_set_cr0(vcpu, cr0); /* enter rmode */ + vmx_set_cr4(vcpu, 0); + vmx_set_efer(vcpu, 0); + + update_exception_bitmap(vcpu); + + vpid_sync_context(vmx->vpid); + if (init_event) + vmx_clear_hlt(vcpu); +} + +static void enable_irq_window(struct kvm_vcpu *vcpu) +{ + vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL, + CPU_BASED_VIRTUAL_INTR_PENDING); +} + +static void enable_nmi_window(struct kvm_vcpu *vcpu) +{ + if (!enable_vnmi || + vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) { + enable_irq_window(vcpu); + return; + } + + vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL, + CPU_BASED_VIRTUAL_NMI_PENDING); +} + +static void vmx_inject_irq(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + uint32_t intr; + int irq = vcpu->arch.interrupt.nr; + + trace_kvm_inj_virq(irq); + + ++vcpu->stat.irq_injections; + if (vmx->rmode.vm86_active) { + int inc_eip = 0; + if (vcpu->arch.interrupt.soft) + inc_eip = vcpu->arch.event_exit_inst_len; + if (kvm_inject_realmode_interrupt(vcpu, irq, inc_eip) != EMULATE_DONE) + kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); + return; + } + intr = irq | INTR_INFO_VALID_MASK; + if (vcpu->arch.interrupt.soft) { + intr |= INTR_TYPE_SOFT_INTR; + vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, + vmx->vcpu.arch.event_exit_inst_len); + } else + intr |= INTR_TYPE_EXT_INTR; + vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr); + + vmx_clear_hlt(vcpu); +} + +static void vmx_inject_nmi(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + + if (!enable_vnmi) { + /* + * Tracking the NMI-blocked state in software is built upon + * finding the next open IRQ window. This, in turn, depends on + * well-behaving guests: They have to keep IRQs disabled at + * least as long as the NMI handler runs. Otherwise we may + * cause NMI nesting, maybe breaking the guest. But as this is + * highly unlikely, we can live with the residual risk. + */ + vmx->loaded_vmcs->soft_vnmi_blocked = 1; + vmx->loaded_vmcs->vnmi_blocked_time = 0; + } + + ++vcpu->stat.nmi_injections; + vmx->loaded_vmcs->nmi_known_unmasked = false; + + if (vmx->rmode.vm86_active) { + if (kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0) != EMULATE_DONE) + kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); + return; + } + + vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, + INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR); + + vmx_clear_hlt(vcpu); +} + +bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + bool masked; + + if (!enable_vnmi) + return vmx->loaded_vmcs->soft_vnmi_blocked; + if (vmx->loaded_vmcs->nmi_known_unmasked) + return false; + masked = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI; + vmx->loaded_vmcs->nmi_known_unmasked = !masked; + return masked; +} + +void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + + if (!enable_vnmi) { + if (vmx->loaded_vmcs->soft_vnmi_blocked != masked) { + vmx->loaded_vmcs->soft_vnmi_blocked = masked; + vmx->loaded_vmcs->vnmi_blocked_time = 0; + } + } else { + vmx->loaded_vmcs->nmi_known_unmasked = !masked; + if (masked) + vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, + GUEST_INTR_STATE_NMI); + else + vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO, + GUEST_INTR_STATE_NMI); + } +} + +static int vmx_nmi_allowed(struct kvm_vcpu *vcpu) +{ + if (to_vmx(vcpu)->nested.nested_run_pending) + return 0; + + if (!enable_vnmi && + to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked) + return 0; + + return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & + (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI + | GUEST_INTR_STATE_NMI)); +} + +static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu) +{ + return (!to_vmx(vcpu)->nested.nested_run_pending && + vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && + !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & + (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS)); +} + +static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr) +{ + int ret; + + if (enable_unrestricted_guest) + return 0; + + ret = x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, addr, + PAGE_SIZE * 3); + if (ret) + return ret; + to_kvm_vmx(kvm)->tss_addr = addr; + return init_rmode_tss(kvm); +} + +static int vmx_set_identity_map_addr(struct kvm *kvm, u64 ident_addr) +{ + to_kvm_vmx(kvm)->ept_identity_map_addr = ident_addr; + return 0; +} + +static bool rmode_exception(struct kvm_vcpu *vcpu, int vec) +{ + switch (vec) { + case BP_VECTOR: + /* + * Update instruction length as we may reinject the exception + * from user space while in guest debugging mode. + */ + to_vmx(vcpu)->vcpu.arch.event_exit_inst_len = + vmcs_read32(VM_EXIT_INSTRUCTION_LEN); + if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) + return false; + /* fall through */ + case DB_VECTOR: + if (vcpu->guest_debug & + (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) + return false; + /* fall through */ + case DE_VECTOR: + case OF_VECTOR: + case BR_VECTOR: + case UD_VECTOR: + case DF_VECTOR: + case SS_VECTOR: + case GP_VECTOR: + case MF_VECTOR: + return true; + break; + } + return false; +} + +static int handle_rmode_exception(struct kvm_vcpu *vcpu, + int vec, u32 err_code) +{ + /* + * Instruction with address size override prefix opcode 0x67 + * Cause the #SS fault with 0 error code in VM86 mode. + */ + if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) { + if (kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE) { + if (vcpu->arch.halt_request) { + vcpu->arch.halt_request = 0; + return kvm_vcpu_halt(vcpu); + } + return 1; + } + return 0; + } + + /* + * Forward all other exceptions that are valid in real mode. + * FIXME: Breaks guest debugging in real mode, needs to be fixed with + * the required debugging infrastructure rework. + */ + kvm_queue_exception(vcpu, vec); + return 1; +} + +/* + * Trigger machine check on the host. We assume all the MSRs are already set up + * by the CPU and that we still run on the same CPU as the MCE occurred on. + * We pass a fake environment to the machine check handler because we want + * the guest to be always treated like user space, no matter what context + * it used internally. + */ +static void kvm_machine_check(void) +{ +#if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_64) + struct pt_regs regs = { + .cs = 3, /* Fake ring 3 no matter what the guest ran on */ + .flags = X86_EFLAGS_IF, + }; + + do_machine_check(®s, 0); +#endif +} + +static int handle_machine_check(struct kvm_vcpu *vcpu) +{ + /* already handled by vcpu_run */ + return 1; +} + +static int handle_exception(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + struct kvm_run *kvm_run = vcpu->run; + u32 intr_info, ex_no, error_code; + unsigned long cr2, rip, dr6; + u32 vect_info; + enum emulation_result er; + + vect_info = vmx->idt_vectoring_info; + intr_info = vmx->exit_intr_info; + + if (is_machine_check(intr_info)) + return handle_machine_check(vcpu); + + if (is_nmi(intr_info)) + return 1; /* already handled by vmx_vcpu_run() */ + + if (is_invalid_opcode(intr_info)) + return handle_ud(vcpu); + + error_code = 0; + if (intr_info & INTR_INFO_DELIVER_CODE_MASK) + error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE); + + if (!vmx->rmode.vm86_active && is_gp_fault(intr_info)) { + WARN_ON_ONCE(!enable_vmware_backdoor); + er = kvm_emulate_instruction(vcpu, + EMULTYPE_VMWARE | EMULTYPE_NO_UD_ON_FAIL); + if (er == EMULATE_USER_EXIT) + return 0; + else if (er != EMULATE_DONE) + kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); + return 1; + } + + /* + * The #PF with PFEC.RSVD = 1 indicates the guest is accessing + * MMIO, it is better to report an internal error. + * See the comments in vmx_handle_exit. + */ + if ((vect_info & VECTORING_INFO_VALID_MASK) && + !(is_page_fault(intr_info) && !(error_code & PFERR_RSVD_MASK))) { + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX; + vcpu->run->internal.ndata = 3; + vcpu->run->internal.data[0] = vect_info; + vcpu->run->internal.data[1] = intr_info; + vcpu->run->internal.data[2] = error_code; + return 0; + } + + if (is_page_fault(intr_info)) { + cr2 = vmcs_readl(EXIT_QUALIFICATION); + /* EPT won't cause page fault directly */ + WARN_ON_ONCE(!vcpu->arch.apf.host_apf_reason && enable_ept); + return kvm_handle_page_fault(vcpu, error_code, cr2, NULL, 0); + } + + ex_no = intr_info & INTR_INFO_VECTOR_MASK; + + if (vmx->rmode.vm86_active && rmode_exception(vcpu, ex_no)) + return handle_rmode_exception(vcpu, ex_no, error_code); + + switch (ex_no) { + case AC_VECTOR: + kvm_queue_exception_e(vcpu, AC_VECTOR, error_code); + return 1; + case DB_VECTOR: + dr6 = vmcs_readl(EXIT_QUALIFICATION); + if (!(vcpu->guest_debug & + (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) { + vcpu->arch.dr6 &= ~15; + vcpu->arch.dr6 |= dr6 | DR6_RTM; + if (is_icebp(intr_info)) + skip_emulated_instruction(vcpu); + + kvm_queue_exception(vcpu, DB_VECTOR); + return 1; + } + kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1; + kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7); + /* fall through */ + case BP_VECTOR: + /* + * Update instruction length as we may reinject #BP from + * user space while in guest debugging mode. Reading it for + * #DB as well causes no harm, it is not used in that case. + */ + vmx->vcpu.arch.event_exit_inst_len = + vmcs_read32(VM_EXIT_INSTRUCTION_LEN); + kvm_run->exit_reason = KVM_EXIT_DEBUG; + rip = kvm_rip_read(vcpu); + kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip; + kvm_run->debug.arch.exception = ex_no; + break; + default: + kvm_run->exit_reason = KVM_EXIT_EXCEPTION; + kvm_run->ex.exception = ex_no; + kvm_run->ex.error_code = error_code; + break; + } + return 0; +} + +static int handle_external_interrupt(struct kvm_vcpu *vcpu) +{ + ++vcpu->stat.irq_exits; + return 1; +} + +static int handle_triple_fault(struct kvm_vcpu *vcpu) +{ + vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; + vcpu->mmio_needed = 0; + return 0; +} + +static int handle_io(struct kvm_vcpu *vcpu) +{ + unsigned long exit_qualification; + int size, in, string; + unsigned port; + + exit_qualification = vmcs_readl(EXIT_QUALIFICATION); + string = (exit_qualification & 16) != 0; + + ++vcpu->stat.io_exits; + + if (string) + return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE; + + port = exit_qualification >> 16; + size = (exit_qualification & 7) + 1; + in = (exit_qualification & 8) != 0; + + return kvm_fast_pio(vcpu, size, port, in); +} + +static void +vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) +{ + /* + * Patch in the VMCALL instruction: + */ + hypercall[0] = 0x0f; + hypercall[1] = 0x01; + hypercall[2] = 0xc1; +} + +/* called to set cr0 as appropriate for a mov-to-cr0 exit. */ +static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val) +{ + if (is_guest_mode(vcpu)) { + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); + unsigned long orig_val = val; + + /* + * We get here when L2 changed cr0 in a way that did not change + * any of L1's shadowed bits (see nested_vmx_exit_handled_cr), + * but did change L0 shadowed bits. So we first calculate the + * effective cr0 value that L1 would like to write into the + * hardware. It consists of the L2-owned bits from the new + * value combined with the L1-owned bits from L1's guest_cr0. + */ + val = (val & ~vmcs12->cr0_guest_host_mask) | + (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask); + + if (!nested_guest_cr0_valid(vcpu, val)) + return 1; + + if (kvm_set_cr0(vcpu, val)) + return 1; + vmcs_writel(CR0_READ_SHADOW, orig_val); + return 0; + } else { + if (to_vmx(vcpu)->nested.vmxon && + !nested_host_cr0_valid(vcpu, val)) + return 1; + + return kvm_set_cr0(vcpu, val); + } +} + +static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val) +{ + if (is_guest_mode(vcpu)) { + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); + unsigned long orig_val = val; + + /* analogously to handle_set_cr0 */ + val = (val & ~vmcs12->cr4_guest_host_mask) | + (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask); + if (kvm_set_cr4(vcpu, val)) + return 1; + vmcs_writel(CR4_READ_SHADOW, orig_val); + return 0; + } else + return kvm_set_cr4(vcpu, val); +} + +static int handle_desc(struct kvm_vcpu *vcpu) +{ + WARN_ON(!(vcpu->arch.cr4 & X86_CR4_UMIP)); + return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE; +} + +static int handle_cr(struct kvm_vcpu *vcpu) +{ + unsigned long exit_qualification, val; + int cr; + int reg; + int err; + int ret; + + exit_qualification = vmcs_readl(EXIT_QUALIFICATION); + cr = exit_qualification & 15; + reg = (exit_qualification >> 8) & 15; + switch ((exit_qualification >> 4) & 3) { + case 0: /* mov to cr */ + val = kvm_register_readl(vcpu, reg); + trace_kvm_cr_write(cr, val); + switch (cr) { + case 0: + err = handle_set_cr0(vcpu, val); + return kvm_complete_insn_gp(vcpu, err); + case 3: + WARN_ON_ONCE(enable_unrestricted_guest); + err = kvm_set_cr3(vcpu, val); + return kvm_complete_insn_gp(vcpu, err); + case 4: + err = handle_set_cr4(vcpu, val); + return kvm_complete_insn_gp(vcpu, err); + case 8: { + u8 cr8_prev = kvm_get_cr8(vcpu); + u8 cr8 = (u8)val; + err = kvm_set_cr8(vcpu, cr8); + ret = kvm_complete_insn_gp(vcpu, err); + if (lapic_in_kernel(vcpu)) + return ret; + if (cr8_prev <= cr8) + return ret; + /* + * TODO: we might be squashing a + * KVM_GUESTDBG_SINGLESTEP-triggered + * KVM_EXIT_DEBUG here. + */ + vcpu->run->exit_reason = KVM_EXIT_SET_TPR; + return 0; + } + } + break; + case 2: /* clts */ + WARN_ONCE(1, "Guest should always own CR0.TS"); + vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS)); + trace_kvm_cr_write(0, kvm_read_cr0(vcpu)); + return kvm_skip_emulated_instruction(vcpu); + case 1: /*mov from cr*/ + switch (cr) { + case 3: + WARN_ON_ONCE(enable_unrestricted_guest); + val = kvm_read_cr3(vcpu); + kvm_register_write(vcpu, reg, val); + trace_kvm_cr_read(cr, val); + return kvm_skip_emulated_instruction(vcpu); + case 8: + val = kvm_get_cr8(vcpu); + kvm_register_write(vcpu, reg, val); + trace_kvm_cr_read(cr, val); + return kvm_skip_emulated_instruction(vcpu); + } + break; + case 3: /* lmsw */ + val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f; + trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val); + kvm_lmsw(vcpu, val); + + return kvm_skip_emulated_instruction(vcpu); + default: + break; + } + vcpu->run->exit_reason = 0; + vcpu_unimpl(vcpu, "unhandled control register: op %d cr %d\n", + (int)(exit_qualification >> 4) & 3, cr); + return 0; +} + +static int handle_dr(struct kvm_vcpu *vcpu) +{ + unsigned long exit_qualification; + int dr, dr7, reg; + + exit_qualification = vmcs_readl(EXIT_QUALIFICATION); + dr = exit_qualification & DEBUG_REG_ACCESS_NUM; + + /* First, if DR does not exist, trigger UD */ + if (!kvm_require_dr(vcpu, dr)) + return 1; + + /* Do not handle if the CPL > 0, will trigger GP on re-entry */ + if (!kvm_require_cpl(vcpu, 0)) + return 1; + dr7 = vmcs_readl(GUEST_DR7); + if (dr7 & DR7_GD) { + /* + * As the vm-exit takes precedence over the debug trap, we + * need to emulate the latter, either for the host or the + * guest debugging itself. + */ + if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { + vcpu->run->debug.arch.dr6 = vcpu->arch.dr6; + vcpu->run->debug.arch.dr7 = dr7; + vcpu->run->debug.arch.pc = kvm_get_linear_rip(vcpu); + vcpu->run->debug.arch.exception = DB_VECTOR; + vcpu->run->exit_reason = KVM_EXIT_DEBUG; + return 0; + } else { + vcpu->arch.dr6 &= ~15; + vcpu->arch.dr6 |= DR6_BD | DR6_RTM; + kvm_queue_exception(vcpu, DB_VECTOR); + return 1; + } + } + + if (vcpu->guest_debug == 0) { + vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL, + CPU_BASED_MOV_DR_EXITING); + + /* + * No more DR vmexits; force a reload of the debug registers + * and reenter on this instruction. The next vmexit will + * retrieve the full state of the debug registers. + */ + vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT; + return 1; + } + + reg = DEBUG_REG_ACCESS_REG(exit_qualification); + if (exit_qualification & TYPE_MOV_FROM_DR) { + unsigned long val; + + if (kvm_get_dr(vcpu, dr, &val)) + return 1; + kvm_register_write(vcpu, reg, val); + } else + if (kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg))) + return 1; + + return kvm_skip_emulated_instruction(vcpu); +} + +static u64 vmx_get_dr6(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.dr6; +} + +static void vmx_set_dr6(struct kvm_vcpu *vcpu, unsigned long val) +{ +} + +static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu) +{ + get_debugreg(vcpu->arch.db[0], 0); + get_debugreg(vcpu->arch.db[1], 1); + get_debugreg(vcpu->arch.db[2], 2); + get_debugreg(vcpu->arch.db[3], 3); + get_debugreg(vcpu->arch.dr6, 6); + vcpu->arch.dr7 = vmcs_readl(GUEST_DR7); + + vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT; + vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL, CPU_BASED_MOV_DR_EXITING); +} + +static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val) +{ + vmcs_writel(GUEST_DR7, val); +} + +static int handle_cpuid(struct kvm_vcpu *vcpu) +{ + return kvm_emulate_cpuid(vcpu); +} + +static int handle_rdmsr(struct kvm_vcpu *vcpu) +{ + u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; + struct msr_data msr_info; + + msr_info.index = ecx; + msr_info.host_initiated = false; + if (vmx_get_msr(vcpu, &msr_info)) { + trace_kvm_msr_read_ex(ecx); + kvm_inject_gp(vcpu, 0); + return 1; + } + + trace_kvm_msr_read(ecx, msr_info.data); + + /* FIXME: handling of bits 32:63 of rax, rdx */ + vcpu->arch.regs[VCPU_REGS_RAX] = msr_info.data & -1u; + vcpu->arch.regs[VCPU_REGS_RDX] = (msr_info.data >> 32) & -1u; + return kvm_skip_emulated_instruction(vcpu); +} + +static int handle_wrmsr(struct kvm_vcpu *vcpu) +{ + struct msr_data msr; + u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; + u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u) + | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32); + + msr.data = data; + msr.index = ecx; + msr.host_initiated = false; + if (kvm_set_msr(vcpu, &msr) != 0) { + trace_kvm_msr_write_ex(ecx, data); + kvm_inject_gp(vcpu, 0); + return 1; + } + + trace_kvm_msr_write(ecx, data); + return kvm_skip_emulated_instruction(vcpu); +} + +static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu) +{ + kvm_apic_update_ppr(vcpu); + return 1; +} + +static int handle_interrupt_window(struct kvm_vcpu *vcpu) +{ + vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL, + CPU_BASED_VIRTUAL_INTR_PENDING); + + kvm_make_request(KVM_REQ_EVENT, vcpu); + + ++vcpu->stat.irq_window_exits; + return 1; +} + +static int handle_halt(struct kvm_vcpu *vcpu) +{ + return kvm_emulate_halt(vcpu); +} + +static int handle_vmcall(struct kvm_vcpu *vcpu) +{ + return kvm_emulate_hypercall(vcpu); +} + +static int handle_invd(struct kvm_vcpu *vcpu) +{ + return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE; +} + +static int handle_invlpg(struct kvm_vcpu *vcpu) +{ + unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); + + kvm_mmu_invlpg(vcpu, exit_qualification); + return kvm_skip_emulated_instruction(vcpu); +} + +static int handle_rdpmc(struct kvm_vcpu *vcpu) +{ + int err; + + err = kvm_rdpmc(vcpu); + return kvm_complete_insn_gp(vcpu, err); +} + +static int handle_wbinvd(struct kvm_vcpu *vcpu) +{ + return kvm_emulate_wbinvd(vcpu); +} + +static int handle_xsetbv(struct kvm_vcpu *vcpu) +{ + u64 new_bv = kvm_read_edx_eax(vcpu); + u32 index = kvm_register_read(vcpu, VCPU_REGS_RCX); + + if (kvm_set_xcr(vcpu, index, new_bv) == 0) + return kvm_skip_emulated_instruction(vcpu); + return 1; +} + +static int handle_xsaves(struct kvm_vcpu *vcpu) +{ + kvm_skip_emulated_instruction(vcpu); + WARN(1, "this should never happen\n"); + return 1; +} + +static int handle_xrstors(struct kvm_vcpu *vcpu) +{ + kvm_skip_emulated_instruction(vcpu); + WARN(1, "this should never happen\n"); + return 1; +} + +static int handle_apic_access(struct kvm_vcpu *vcpu) +{ + if (likely(fasteoi)) { + unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); + int access_type, offset; + + access_type = exit_qualification & APIC_ACCESS_TYPE; + offset = exit_qualification & APIC_ACCESS_OFFSET; + /* + * Sane guest uses MOV to write EOI, with written value + * not cared. So make a short-circuit here by avoiding + * heavy instruction emulation. + */ + if ((access_type == TYPE_LINEAR_APIC_INST_WRITE) && + (offset == APIC_EOI)) { + kvm_lapic_set_eoi(vcpu); + return kvm_skip_emulated_instruction(vcpu); + } + } + return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE; +} + +static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu) +{ + unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); + int vector = exit_qualification & 0xff; + + /* EOI-induced VM exit is trap-like and thus no need to adjust IP */ + kvm_apic_set_eoi_accelerated(vcpu, vector); + return 1; +} + +static int handle_apic_write(struct kvm_vcpu *vcpu) +{ + unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); + u32 offset = exit_qualification & 0xfff; + + /* APIC-write VM exit is trap-like and thus no need to adjust IP */ + kvm_apic_write_nodecode(vcpu, offset); + return 1; +} + +static int handle_task_switch(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + unsigned long exit_qualification; + bool has_error_code = false; + u32 error_code = 0; + u16 tss_selector; + int reason, type, idt_v, idt_index; + + idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK); + idt_index = (vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK); + type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK); + + exit_qualification = vmcs_readl(EXIT_QUALIFICATION); + + reason = (u32)exit_qualification >> 30; + if (reason == TASK_SWITCH_GATE && idt_v) { + switch (type) { + case INTR_TYPE_NMI_INTR: + vcpu->arch.nmi_injected = false; + vmx_set_nmi_mask(vcpu, true); + break; + case INTR_TYPE_EXT_INTR: + case INTR_TYPE_SOFT_INTR: + kvm_clear_interrupt_queue(vcpu); + break; + case INTR_TYPE_HARD_EXCEPTION: + if (vmx->idt_vectoring_info & + VECTORING_INFO_DELIVER_CODE_MASK) { + has_error_code = true; + error_code = + vmcs_read32(IDT_VECTORING_ERROR_CODE); + } + /* fall through */ + case INTR_TYPE_SOFT_EXCEPTION: + kvm_clear_exception_queue(vcpu); + break; + default: + break; + } + } + tss_selector = exit_qualification; + + if (!idt_v || (type != INTR_TYPE_HARD_EXCEPTION && + type != INTR_TYPE_EXT_INTR && + type != INTR_TYPE_NMI_INTR)) + skip_emulated_instruction(vcpu); + + if (kvm_task_switch(vcpu, tss_selector, + type == INTR_TYPE_SOFT_INTR ? idt_index : -1, reason, + has_error_code, error_code) == EMULATE_FAIL) { + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; + vcpu->run->internal.ndata = 0; + return 0; + } + + /* + * TODO: What about debug traps on tss switch? + * Are we supposed to inject them and update dr6? + */ + + return 1; +} + +static int handle_ept_violation(struct kvm_vcpu *vcpu) +{ + unsigned long exit_qualification; + gpa_t gpa; + u64 error_code; + + exit_qualification = vmcs_readl(EXIT_QUALIFICATION); + + /* + * EPT violation happened while executing iret from NMI, + * "blocked by NMI" bit has to be set before next VM entry. + * There are errata that may cause this bit to not be set: + * AAK134, BY25. + */ + if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && + enable_vnmi && + (exit_qualification & INTR_INFO_UNBLOCK_NMI)) + vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); + + gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); + trace_kvm_page_fault(gpa, exit_qualification); + + /* Is it a read fault? */ + error_code = (exit_qualification & EPT_VIOLATION_ACC_READ) + ? PFERR_USER_MASK : 0; + /* Is it a write fault? */ + error_code |= (exit_qualification & EPT_VIOLATION_ACC_WRITE) + ? PFERR_WRITE_MASK : 0; + /* Is it a fetch fault? */ + error_code |= (exit_qualification & EPT_VIOLATION_ACC_INSTR) + ? PFERR_FETCH_MASK : 0; + /* ept page table entry is present? */ + error_code |= (exit_qualification & + (EPT_VIOLATION_READABLE | EPT_VIOLATION_WRITABLE | + EPT_VIOLATION_EXECUTABLE)) + ? PFERR_PRESENT_MASK : 0; + + error_code |= (exit_qualification & 0x100) != 0 ? + PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK; + + vcpu->arch.exit_qualification = exit_qualification; + return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0); +} + +static int handle_ept_misconfig(struct kvm_vcpu *vcpu) +{ + gpa_t gpa; + + /* + * A nested guest cannot optimize MMIO vmexits, because we have an + * nGPA here instead of the required GPA. + */ + gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); + if (!is_guest_mode(vcpu) && + !kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) { + trace_kvm_fast_mmio(gpa); + /* + * Doing kvm_skip_emulated_instruction() depends on undefined + * behavior: Intel's manual doesn't mandate + * VM_EXIT_INSTRUCTION_LEN to be set in VMCS when EPT MISCONFIG + * occurs and while on real hardware it was observed to be set, + * other hypervisors (namely Hyper-V) don't set it, we end up + * advancing IP with some random value. Disable fast mmio when + * running nested and keep it for real hardware in hope that + * VM_EXIT_INSTRUCTION_LEN will always be set correctly. + */ + if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) + return kvm_skip_emulated_instruction(vcpu); + else + return kvm_emulate_instruction(vcpu, EMULTYPE_SKIP) == + EMULATE_DONE; + } + + return kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0); +} + +static int handle_nmi_window(struct kvm_vcpu *vcpu) +{ + WARN_ON_ONCE(!enable_vnmi); + vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL, + CPU_BASED_VIRTUAL_NMI_PENDING); + ++vcpu->stat.nmi_window_exits; + kvm_make_request(KVM_REQ_EVENT, vcpu); + + return 1; +} + +static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + enum emulation_result err = EMULATE_DONE; + int ret = 1; + u32 cpu_exec_ctrl; + bool intr_window_requested; + unsigned count = 130; + + /* + * We should never reach the point where we are emulating L2 + * due to invalid guest state as that means we incorrectly + * allowed a nested VMEntry with an invalid vmcs12. + */ + WARN_ON_ONCE(vmx->emulation_required && vmx->nested.nested_run_pending); + + cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); + intr_window_requested = cpu_exec_ctrl & CPU_BASED_VIRTUAL_INTR_PENDING; + + while (vmx->emulation_required && count-- != 0) { + if (intr_window_requested && vmx_interrupt_allowed(vcpu)) + return handle_interrupt_window(&vmx->vcpu); + + if (kvm_test_request(KVM_REQ_EVENT, vcpu)) + return 1; + + err = kvm_emulate_instruction(vcpu, 0); + + if (err == EMULATE_USER_EXIT) { + ++vcpu->stat.mmio_exits; + ret = 0; + goto out; + } + + if (err != EMULATE_DONE) + goto emulation_error; + + if (vmx->emulation_required && !vmx->rmode.vm86_active && + vcpu->arch.exception.pending) + goto emulation_error; + + if (vcpu->arch.halt_request) { + vcpu->arch.halt_request = 0; + ret = kvm_vcpu_halt(vcpu); + goto out; + } + + if (signal_pending(current)) + goto out; + if (need_resched()) + schedule(); + } + +out: + return ret; + +emulation_error: + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; + vcpu->run->internal.ndata = 0; + return 0; +} + +static void grow_ple_window(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + int old = vmx->ple_window; + + vmx->ple_window = __grow_ple_window(old, ple_window, + ple_window_grow, + ple_window_max); + + if (vmx->ple_window != old) + vmx->ple_window_dirty = true; + + trace_kvm_ple_window_grow(vcpu->vcpu_id, vmx->ple_window, old); +} + +static void shrink_ple_window(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + int old = vmx->ple_window; + + vmx->ple_window = __shrink_ple_window(old, ple_window, + ple_window_shrink, + ple_window); + + if (vmx->ple_window != old) + vmx->ple_window_dirty = true; + + trace_kvm_ple_window_shrink(vcpu->vcpu_id, vmx->ple_window, old); +} + +/* + * Handler for POSTED_INTERRUPT_WAKEUP_VECTOR. + */ +static void wakeup_handler(void) +{ + struct kvm_vcpu *vcpu; + int cpu = smp_processor_id(); + + spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); + list_for_each_entry(vcpu, &per_cpu(blocked_vcpu_on_cpu, cpu), + blocked_vcpu_list) { + struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); + + if (pi_test_on(pi_desc) == 1) + kvm_vcpu_kick(vcpu); + } + spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); +} + +static void vmx_enable_tdp(void) +{ + kvm_mmu_set_mask_ptes(VMX_EPT_READABLE_MASK, + enable_ept_ad_bits ? VMX_EPT_ACCESS_BIT : 0ull, + enable_ept_ad_bits ? VMX_EPT_DIRTY_BIT : 0ull, + 0ull, VMX_EPT_EXECUTABLE_MASK, + cpu_has_vmx_ept_execute_only() ? 0ull : VMX_EPT_READABLE_MASK, + VMX_EPT_RWX_MASK, 0ull); + + ept_set_mmio_spte_mask(); + kvm_enable_tdp(); +} + +/* + * Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE + * exiting, so only get here on cpu with PAUSE-Loop-Exiting. + */ +static int handle_pause(struct kvm_vcpu *vcpu) +{ + if (!kvm_pause_in_guest(vcpu->kvm)) + grow_ple_window(vcpu); + + /* + * Intel sdm vol3 ch-25.1.3 says: The "PAUSE-loop exiting" + * VM-execution control is ignored if CPL > 0. OTOH, KVM + * never set PAUSE_EXITING and just set PLE if supported, + * so the vcpu must be CPL=0 if it gets a PAUSE exit. + */ + kvm_vcpu_on_spin(vcpu, true); + return kvm_skip_emulated_instruction(vcpu); +} + +static int handle_nop(struct kvm_vcpu *vcpu) +{ + return kvm_skip_emulated_instruction(vcpu); +} + +static int handle_mwait(struct kvm_vcpu *vcpu) +{ + printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n"); + return handle_nop(vcpu); +} + +static int handle_invalid_op(struct kvm_vcpu *vcpu) +{ + kvm_queue_exception(vcpu, UD_VECTOR); + return 1; +} + +static int handle_monitor_trap(struct kvm_vcpu *vcpu) +{ + return 1; +} + +static int handle_monitor(struct kvm_vcpu *vcpu) +{ + printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n"); + return handle_nop(vcpu); +} + +static int handle_invpcid(struct kvm_vcpu *vcpu) +{ + u32 vmx_instruction_info; + unsigned long type; + bool pcid_enabled; + gva_t gva; + struct x86_exception e; + unsigned i; + unsigned long roots_to_free = 0; + struct { + u64 pcid; + u64 gla; + } operand; + + if (!guest_cpuid_has(vcpu, X86_FEATURE_INVPCID)) { + kvm_queue_exception(vcpu, UD_VECTOR); + return 1; + } + + vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); + type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); + + if (type > 3) { + kvm_inject_gp(vcpu, 0); + return 1; + } + + /* According to the Intel instruction reference, the memory operand + * is read even if it isn't needed (e.g., for type==all) + */ + if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), + vmx_instruction_info, false, &gva)) + return 1; + + if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) { + kvm_inject_page_fault(vcpu, &e); + return 1; + } + + if (operand.pcid >> 12 != 0) { + kvm_inject_gp(vcpu, 0); + return 1; + } + + pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE); + + switch (type) { + case INVPCID_TYPE_INDIV_ADDR: + if ((!pcid_enabled && (operand.pcid != 0)) || + is_noncanonical_address(operand.gla, vcpu)) { + kvm_inject_gp(vcpu, 0); + return 1; + } + kvm_mmu_invpcid_gva(vcpu, operand.gla, operand.pcid); + return kvm_skip_emulated_instruction(vcpu); + + case INVPCID_TYPE_SINGLE_CTXT: + if (!pcid_enabled && (operand.pcid != 0)) { + kvm_inject_gp(vcpu, 0); + return 1; + } + + if (kvm_get_active_pcid(vcpu) == operand.pcid) { + kvm_mmu_sync_roots(vcpu); + kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); + } + + for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) + if (kvm_get_pcid(vcpu, vcpu->arch.mmu->prev_roots[i].cr3) + == operand.pcid) + roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i); + + kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, roots_to_free); + /* + * If neither the current cr3 nor any of the prev_roots use the + * given PCID, then nothing needs to be done here because a + * resync will happen anyway before switching to any other CR3. + */ + + return kvm_skip_emulated_instruction(vcpu); + + case INVPCID_TYPE_ALL_NON_GLOBAL: + /* + * Currently, KVM doesn't mark global entries in the shadow + * page tables, so a non-global flush just degenerates to a + * global flush. If needed, we could optimize this later by + * keeping track of global entries in shadow page tables. + */ + + /* fall-through */ + case INVPCID_TYPE_ALL_INCL_GLOBAL: + kvm_mmu_unload(vcpu); + return kvm_skip_emulated_instruction(vcpu); + + default: + BUG(); /* We have already checked above that type <= 3 */ + } +} + +static int handle_pml_full(struct kvm_vcpu *vcpu) +{ + unsigned long exit_qualification; + + trace_kvm_pml_full(vcpu->vcpu_id); + + exit_qualification = vmcs_readl(EXIT_QUALIFICATION); + + /* + * PML buffer FULL happened while executing iret from NMI, + * "blocked by NMI" bit has to be set before next VM entry. + */ + if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && + enable_vnmi && + (exit_qualification & INTR_INFO_UNBLOCK_NMI)) + vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, + GUEST_INTR_STATE_NMI); + + /* + * PML buffer already flushed at beginning of VMEXIT. Nothing to do + * here.., and there's no userspace involvement needed for PML. + */ + return 1; +} + +static int handle_preemption_timer(struct kvm_vcpu *vcpu) +{ + if (!to_vmx(vcpu)->req_immediate_exit) + kvm_lapic_expired_hv_timer(vcpu); + return 1; +} + +/* + * When nested=0, all VMX instruction VM Exits filter here. The handlers + * are overwritten by nested_vmx_setup() when nested=1. + */ +static int handle_vmx_instruction(struct kvm_vcpu *vcpu) +{ + kvm_queue_exception(vcpu, UD_VECTOR); + return 1; +} + +static int handle_encls(struct kvm_vcpu *vcpu) +{ + /* + * SGX virtualization is not yet supported. There is no software + * enable bit for SGX, so we have to trap ENCLS and inject a #UD + * to prevent the guest from executing ENCLS. + */ + kvm_queue_exception(vcpu, UD_VECTOR); + return 1; +} + +/* + * The exit handlers return 1 if the exit was handled fully and guest execution + * may resume. Otherwise they set the kvm_run parameter to indicate what needs + * to be done to userspace and return 0. + */ +static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { + [EXIT_REASON_EXCEPTION_NMI] = handle_exception, + [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, + [EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault, + [EXIT_REASON_NMI_WINDOW] = handle_nmi_window, + [EXIT_REASON_IO_INSTRUCTION] = handle_io, + [EXIT_REASON_CR_ACCESS] = handle_cr, + [EXIT_REASON_DR_ACCESS] = handle_dr, + [EXIT_REASON_CPUID] = handle_cpuid, + [EXIT_REASON_MSR_READ] = handle_rdmsr, + [EXIT_REASON_MSR_WRITE] = handle_wrmsr, + [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window, + [EXIT_REASON_HLT] = handle_halt, + [EXIT_REASON_INVD] = handle_invd, + [EXIT_REASON_INVLPG] = handle_invlpg, + [EXIT_REASON_RDPMC] = handle_rdpmc, + [EXIT_REASON_VMCALL] = handle_vmcall, + [EXIT_REASON_VMCLEAR] = handle_vmx_instruction, + [EXIT_REASON_VMLAUNCH] = handle_vmx_instruction, + [EXIT_REASON_VMPTRLD] = handle_vmx_instruction, + [EXIT_REASON_VMPTRST] = handle_vmx_instruction, + [EXIT_REASON_VMREAD] = handle_vmx_instruction, + [EXIT_REASON_VMRESUME] = handle_vmx_instruction, + [EXIT_REASON_VMWRITE] = handle_vmx_instruction, + [EXIT_REASON_VMOFF] = handle_vmx_instruction, + [EXIT_REASON_VMON] = handle_vmx_instruction, + [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold, + [EXIT_REASON_APIC_ACCESS] = handle_apic_access, + [EXIT_REASON_APIC_WRITE] = handle_apic_write, + [EXIT_REASON_EOI_INDUCED] = handle_apic_eoi_induced, + [EXIT_REASON_WBINVD] = handle_wbinvd, + [EXIT_REASON_XSETBV] = handle_xsetbv, + [EXIT_REASON_TASK_SWITCH] = handle_task_switch, + [EXIT_REASON_MCE_DURING_VMENTRY] = handle_machine_check, + [EXIT_REASON_GDTR_IDTR] = handle_desc, + [EXIT_REASON_LDTR_TR] = handle_desc, + [EXIT_REASON_EPT_VIOLATION] = handle_ept_violation, + [EXIT_REASON_EPT_MISCONFIG] = handle_ept_misconfig, + [EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause, + [EXIT_REASON_MWAIT_INSTRUCTION] = handle_mwait, + [EXIT_REASON_MONITOR_TRAP_FLAG] = handle_monitor_trap, + [EXIT_REASON_MONITOR_INSTRUCTION] = handle_monitor, + [EXIT_REASON_INVEPT] = handle_vmx_instruction, + [EXIT_REASON_INVVPID] = handle_vmx_instruction, + [EXIT_REASON_RDRAND] = handle_invalid_op, + [EXIT_REASON_RDSEED] = handle_invalid_op, + [EXIT_REASON_XSAVES] = handle_xsaves, + [EXIT_REASON_XRSTORS] = handle_xrstors, + [EXIT_REASON_PML_FULL] = handle_pml_full, + [EXIT_REASON_INVPCID] = handle_invpcid, + [EXIT_REASON_VMFUNC] = handle_vmx_instruction, + [EXIT_REASON_PREEMPTION_TIMER] = handle_preemption_timer, + [EXIT_REASON_ENCLS] = handle_encls, +}; + +static const int kvm_vmx_max_exit_handlers = + ARRAY_SIZE(kvm_vmx_exit_handlers); + +static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2) +{ + *info1 = vmcs_readl(EXIT_QUALIFICATION); + *info2 = vmcs_read32(VM_EXIT_INTR_INFO); +} + +static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx) +{ + if (vmx->pml_pg) { + __free_page(vmx->pml_pg); + vmx->pml_pg = NULL; + } +} + +static void vmx_flush_pml_buffer(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + u64 *pml_buf; + u16 pml_idx; + + pml_idx = vmcs_read16(GUEST_PML_INDEX); + + /* Do nothing if PML buffer is empty */ + if (pml_idx == (PML_ENTITY_NUM - 1)) + return; + + /* PML index always points to next available PML buffer entity */ + if (pml_idx >= PML_ENTITY_NUM) + pml_idx = 0; + else + pml_idx++; + + pml_buf = page_address(vmx->pml_pg); + for (; pml_idx < PML_ENTITY_NUM; pml_idx++) { + u64 gpa; + + gpa = pml_buf[pml_idx]; + WARN_ON(gpa & (PAGE_SIZE - 1)); + kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT); + } + + /* reset PML index */ + vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); +} + +/* + * Flush all vcpus' PML buffer and update logged GPAs to dirty_bitmap. + * Called before reporting dirty_bitmap to userspace. + */ +static void kvm_flush_pml_buffers(struct kvm *kvm) +{ + int i; + struct kvm_vcpu *vcpu; + /* + * We only need to kick vcpu out of guest mode here, as PML buffer + * is flushed at beginning of all VMEXITs, and it's obvious that only + * vcpus running in guest are possible to have unflushed GPAs in PML + * buffer. + */ + kvm_for_each_vcpu(i, vcpu, kvm) + kvm_vcpu_kick(vcpu); +} + +static void vmx_dump_sel(char *name, uint32_t sel) +{ + pr_err("%s sel=0x%04x, attr=0x%05x, limit=0x%08x, base=0x%016lx\n", + name, vmcs_read16(sel), + vmcs_read32(sel + GUEST_ES_AR_BYTES - GUEST_ES_SELECTOR), + vmcs_read32(sel + GUEST_ES_LIMIT - GUEST_ES_SELECTOR), + vmcs_readl(sel + GUEST_ES_BASE - GUEST_ES_SELECTOR)); +} + +static void vmx_dump_dtsel(char *name, uint32_t limit) +{ + pr_err("%s limit=0x%08x, base=0x%016lx\n", + name, vmcs_read32(limit), + vmcs_readl(limit + GUEST_GDTR_BASE - GUEST_GDTR_LIMIT)); +} + +static void dump_vmcs(void) +{ + u32 vmentry_ctl = vmcs_read32(VM_ENTRY_CONTROLS); + u32 vmexit_ctl = vmcs_read32(VM_EXIT_CONTROLS); + u32 cpu_based_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); + u32 pin_based_exec_ctrl = vmcs_read32(PIN_BASED_VM_EXEC_CONTROL); + u32 secondary_exec_control = 0; + unsigned long cr4 = vmcs_readl(GUEST_CR4); + u64 efer = vmcs_read64(GUEST_IA32_EFER); + int i, n; + + if (cpu_has_secondary_exec_ctrls()) + secondary_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); + + pr_err("*** Guest State ***\n"); + pr_err("CR0: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n", + vmcs_readl(GUEST_CR0), vmcs_readl(CR0_READ_SHADOW), + vmcs_readl(CR0_GUEST_HOST_MASK)); + pr_err("CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n", + cr4, vmcs_readl(CR4_READ_SHADOW), vmcs_readl(CR4_GUEST_HOST_MASK)); + pr_err("CR3 = 0x%016lx\n", vmcs_readl(GUEST_CR3)); + if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT) && + (cr4 & X86_CR4_PAE) && !(efer & EFER_LMA)) + { + pr_err("PDPTR0 = 0x%016llx PDPTR1 = 0x%016llx\n", + vmcs_read64(GUEST_PDPTR0), vmcs_read64(GUEST_PDPTR1)); + pr_err("PDPTR2 = 0x%016llx PDPTR3 = 0x%016llx\n", + vmcs_read64(GUEST_PDPTR2), vmcs_read64(GUEST_PDPTR3)); + } + pr_err("RSP = 0x%016lx RIP = 0x%016lx\n", + vmcs_readl(GUEST_RSP), vmcs_readl(GUEST_RIP)); + pr_err("RFLAGS=0x%08lx DR7 = 0x%016lx\n", + vmcs_readl(GUEST_RFLAGS), vmcs_readl(GUEST_DR7)); + pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n", + vmcs_readl(GUEST_SYSENTER_ESP), + vmcs_read32(GUEST_SYSENTER_CS), vmcs_readl(GUEST_SYSENTER_EIP)); + vmx_dump_sel("CS: ", GUEST_CS_SELECTOR); + vmx_dump_sel("DS: ", GUEST_DS_SELECTOR); + vmx_dump_sel("SS: ", GUEST_SS_SELECTOR); + vmx_dump_sel("ES: ", GUEST_ES_SELECTOR); + vmx_dump_sel("FS: ", GUEST_FS_SELECTOR); + vmx_dump_sel("GS: ", GUEST_GS_SELECTOR); + vmx_dump_dtsel("GDTR:", GUEST_GDTR_LIMIT); + vmx_dump_sel("LDTR:", GUEST_LDTR_SELECTOR); + vmx_dump_dtsel("IDTR:", GUEST_IDTR_LIMIT); + vmx_dump_sel("TR: ", GUEST_TR_SELECTOR); + if ((vmexit_ctl & (VM_EXIT_SAVE_IA32_PAT | VM_EXIT_SAVE_IA32_EFER)) || + (vmentry_ctl & (VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_IA32_EFER))) + pr_err("EFER = 0x%016llx PAT = 0x%016llx\n", + efer, vmcs_read64(GUEST_IA32_PAT)); + pr_err("DebugCtl = 0x%016llx DebugExceptions = 0x%016lx\n", + vmcs_read64(GUEST_IA32_DEBUGCTL), + vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS)); + if (cpu_has_load_perf_global_ctrl() && + vmentry_ctl & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) + pr_err("PerfGlobCtl = 0x%016llx\n", + vmcs_read64(GUEST_IA32_PERF_GLOBAL_CTRL)); + if (vmentry_ctl & VM_ENTRY_LOAD_BNDCFGS) + pr_err("BndCfgS = 0x%016llx\n", vmcs_read64(GUEST_BNDCFGS)); + pr_err("Interruptibility = %08x ActivityState = %08x\n", + vmcs_read32(GUEST_INTERRUPTIBILITY_INFO), + vmcs_read32(GUEST_ACTIVITY_STATE)); + if (secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) + pr_err("InterruptStatus = %04x\n", + vmcs_read16(GUEST_INTR_STATUS)); + + pr_err("*** Host State ***\n"); + pr_err("RIP = 0x%016lx RSP = 0x%016lx\n", + vmcs_readl(HOST_RIP), vmcs_readl(HOST_RSP)); + pr_err("CS=%04x SS=%04x DS=%04x ES=%04x FS=%04x GS=%04x TR=%04x\n", + vmcs_read16(HOST_CS_SELECTOR), vmcs_read16(HOST_SS_SELECTOR), + vmcs_read16(HOST_DS_SELECTOR), vmcs_read16(HOST_ES_SELECTOR), + vmcs_read16(HOST_FS_SELECTOR), vmcs_read16(HOST_GS_SELECTOR), + vmcs_read16(HOST_TR_SELECTOR)); + pr_err("FSBase=%016lx GSBase=%016lx TRBase=%016lx\n", + vmcs_readl(HOST_FS_BASE), vmcs_readl(HOST_GS_BASE), + vmcs_readl(HOST_TR_BASE)); + pr_err("GDTBase=%016lx IDTBase=%016lx\n", + vmcs_readl(HOST_GDTR_BASE), vmcs_readl(HOST_IDTR_BASE)); + pr_err("CR0=%016lx CR3=%016lx CR4=%016lx\n", + vmcs_readl(HOST_CR0), vmcs_readl(HOST_CR3), + vmcs_readl(HOST_CR4)); + pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n", + vmcs_readl(HOST_IA32_SYSENTER_ESP), + vmcs_read32(HOST_IA32_SYSENTER_CS), + vmcs_readl(HOST_IA32_SYSENTER_EIP)); + if (vmexit_ctl & (VM_EXIT_LOAD_IA32_PAT | VM_EXIT_LOAD_IA32_EFER)) + pr_err("EFER = 0x%016llx PAT = 0x%016llx\n", + vmcs_read64(HOST_IA32_EFER), + vmcs_read64(HOST_IA32_PAT)); + if (cpu_has_load_perf_global_ctrl() && + vmexit_ctl & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) + pr_err("PerfGlobCtl = 0x%016llx\n", + vmcs_read64(HOST_IA32_PERF_GLOBAL_CTRL)); + + pr_err("*** Control State ***\n"); + pr_err("PinBased=%08x CPUBased=%08x SecondaryExec=%08x\n", + pin_based_exec_ctrl, cpu_based_exec_ctrl, secondary_exec_control); + pr_err("EntryControls=%08x ExitControls=%08x\n", vmentry_ctl, vmexit_ctl); + pr_err("ExceptionBitmap=%08x PFECmask=%08x PFECmatch=%08x\n", + vmcs_read32(EXCEPTION_BITMAP), + vmcs_read32(PAGE_FAULT_ERROR_CODE_MASK), + vmcs_read32(PAGE_FAULT_ERROR_CODE_MATCH)); + pr_err("VMEntry: intr_info=%08x errcode=%08x ilen=%08x\n", + vmcs_read32(VM_ENTRY_INTR_INFO_FIELD), + vmcs_read32(VM_ENTRY_EXCEPTION_ERROR_CODE), + vmcs_read32(VM_ENTRY_INSTRUCTION_LEN)); + pr_err("VMExit: intr_info=%08x errcode=%08x ilen=%08x\n", + vmcs_read32(VM_EXIT_INTR_INFO), + vmcs_read32(VM_EXIT_INTR_ERROR_CODE), + vmcs_read32(VM_EXIT_INSTRUCTION_LEN)); + pr_err(" reason=%08x qualification=%016lx\n", + vmcs_read32(VM_EXIT_REASON), vmcs_readl(EXIT_QUALIFICATION)); + pr_err("IDTVectoring: info=%08x errcode=%08x\n", + vmcs_read32(IDT_VECTORING_INFO_FIELD), + vmcs_read32(IDT_VECTORING_ERROR_CODE)); + pr_err("TSC Offset = 0x%016llx\n", vmcs_read64(TSC_OFFSET)); + if (secondary_exec_control & SECONDARY_EXEC_TSC_SCALING) + pr_err("TSC Multiplier = 0x%016llx\n", + vmcs_read64(TSC_MULTIPLIER)); + if (cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW) + pr_err("TPR Threshold = 0x%02x\n", vmcs_read32(TPR_THRESHOLD)); + if (pin_based_exec_ctrl & PIN_BASED_POSTED_INTR) + pr_err("PostedIntrVec = 0x%02x\n", vmcs_read16(POSTED_INTR_NV)); + if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT)) + pr_err("EPT pointer = 0x%016llx\n", vmcs_read64(EPT_POINTER)); + n = vmcs_read32(CR3_TARGET_COUNT); + for (i = 0; i + 1 < n; i += 4) + pr_err("CR3 target%u=%016lx target%u=%016lx\n", + i, vmcs_readl(CR3_TARGET_VALUE0 + i * 2), + i + 1, vmcs_readl(CR3_TARGET_VALUE0 + i * 2 + 2)); + if (i < n) + pr_err("CR3 target%u=%016lx\n", + i, vmcs_readl(CR3_TARGET_VALUE0 + i * 2)); + if (secondary_exec_control & SECONDARY_EXEC_PAUSE_LOOP_EXITING) + pr_err("PLE Gap=%08x Window=%08x\n", + vmcs_read32(PLE_GAP), vmcs_read32(PLE_WINDOW)); + if (secondary_exec_control & SECONDARY_EXEC_ENABLE_VPID) + pr_err("Virtual processor ID = 0x%04x\n", + vmcs_read16(VIRTUAL_PROCESSOR_ID)); +} + +/* + * The guest has exited. See if we can fix it or if we need userspace + * assistance. + */ +static int vmx_handle_exit(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + u32 exit_reason = vmx->exit_reason; + u32 vectoring_info = vmx->idt_vectoring_info; + + trace_kvm_exit(exit_reason, vcpu, KVM_ISA_VMX); + + /* + * Flush logged GPAs PML buffer, this will make dirty_bitmap more + * updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before + * querying dirty_bitmap, we only need to kick all vcpus out of guest + * mode as if vcpus is in root mode, the PML buffer must has been + * flushed already. + */ + if (enable_pml) + vmx_flush_pml_buffer(vcpu); + + /* If guest state is invalid, start emulating */ + if (vmx->emulation_required) + return handle_invalid_guest_state(vcpu); + + if (is_guest_mode(vcpu) && nested_vmx_exit_reflected(vcpu, exit_reason)) + return nested_vmx_reflect_vmexit(vcpu, exit_reason); + + if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) { + dump_vmcs(); + vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; + vcpu->run->fail_entry.hardware_entry_failure_reason + = exit_reason; + return 0; + } + + if (unlikely(vmx->fail)) { + vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; + vcpu->run->fail_entry.hardware_entry_failure_reason + = vmcs_read32(VM_INSTRUCTION_ERROR); + return 0; + } + + /* + * Note: + * Do not try to fix EXIT_REASON_EPT_MISCONFIG if it caused by + * delivery event since it indicates guest is accessing MMIO. + * The vm-exit can be triggered again after return to guest that + * will cause infinite loop. + */ + if ((vectoring_info & VECTORING_INFO_VALID_MASK) && + (exit_reason != EXIT_REASON_EXCEPTION_NMI && + exit_reason != EXIT_REASON_EPT_VIOLATION && + exit_reason != EXIT_REASON_PML_FULL && + exit_reason != EXIT_REASON_TASK_SWITCH)) { + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV; + vcpu->run->internal.ndata = 3; + vcpu->run->internal.data[0] = vectoring_info; + vcpu->run->internal.data[1] = exit_reason; + vcpu->run->internal.data[2] = vcpu->arch.exit_qualification; + if (exit_reason == EXIT_REASON_EPT_MISCONFIG) { + vcpu->run->internal.ndata++; + vcpu->run->internal.data[3] = + vmcs_read64(GUEST_PHYSICAL_ADDRESS); + } + return 0; + } + + if (unlikely(!enable_vnmi && + vmx->loaded_vmcs->soft_vnmi_blocked)) { + if (vmx_interrupt_allowed(vcpu)) { + vmx->loaded_vmcs->soft_vnmi_blocked = 0; + } else if (vmx->loaded_vmcs->vnmi_blocked_time > 1000000000LL && + vcpu->arch.nmi_pending) { + /* + * This CPU don't support us in finding the end of an + * NMI-blocked window if the guest runs with IRQs + * disabled. So we pull the trigger after 1 s of + * futile waiting, but inform the user about this. + */ + printk(KERN_WARNING "%s: Breaking out of NMI-blocked " + "state on VCPU %d after 1 s timeout\n", + __func__, vcpu->vcpu_id); + vmx->loaded_vmcs->soft_vnmi_blocked = 0; + } + } + + if (exit_reason < kvm_vmx_max_exit_handlers + && kvm_vmx_exit_handlers[exit_reason]) + return kvm_vmx_exit_handlers[exit_reason](vcpu); + else { + vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n", + exit_reason); + kvm_queue_exception(vcpu, UD_VECTOR); + return 1; + } +} + +/* + * Software based L1D cache flush which is used when microcode providing + * the cache control MSR is not loaded. + * + * The L1D cache is 32 KiB on Nehalem and later microarchitectures, but to + * flush it is required to read in 64 KiB because the replacement algorithm + * is not exactly LRU. This could be sized at runtime via topology + * information but as all relevant affected CPUs have 32KiB L1D cache size + * there is no point in doing so. + */ +static void vmx_l1d_flush(struct kvm_vcpu *vcpu) +{ + int size = PAGE_SIZE << L1D_CACHE_ORDER; + + /* + * This code is only executed when the the flush mode is 'cond' or + * 'always' + */ + if (static_branch_likely(&vmx_l1d_flush_cond)) { + bool flush_l1d; + + /* + * Clear the per-vcpu flush bit, it gets set again + * either from vcpu_run() or from one of the unsafe + * VMEXIT handlers. + */ + flush_l1d = vcpu->arch.l1tf_flush_l1d; + vcpu->arch.l1tf_flush_l1d = false; + + /* + * Clear the per-cpu flush bit, it gets set again from + * the interrupt handlers. + */ + flush_l1d |= kvm_get_cpu_l1tf_flush_l1d(); + kvm_clear_cpu_l1tf_flush_l1d(); + + if (!flush_l1d) + return; + } + + vcpu->stat.l1d_flush++; + + if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) { + wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH); + return; + } + + asm volatile( + /* First ensure the pages are in the TLB */ + "xorl %%eax, %%eax\n" + ".Lpopulate_tlb:\n\t" + "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t" + "addl $4096, %%eax\n\t" + "cmpl %%eax, %[size]\n\t" + "jne .Lpopulate_tlb\n\t" + "xorl %%eax, %%eax\n\t" + "cpuid\n\t" + /* Now fill the cache */ + "xorl %%eax, %%eax\n" + ".Lfill_cache:\n" + "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t" + "addl $64, %%eax\n\t" + "cmpl %%eax, %[size]\n\t" + "jne .Lfill_cache\n\t" + "lfence\n" + :: [flush_pages] "r" (vmx_l1d_flush_pages), + [size] "r" (size) + : "eax", "ebx", "ecx", "edx"); +} + +static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) +{ + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); + + if (is_guest_mode(vcpu) && + nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) + return; + + if (irr == -1 || tpr < irr) { + vmcs_write32(TPR_THRESHOLD, 0); + return; + } + + vmcs_write32(TPR_THRESHOLD, irr); +} + +void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu) +{ + u32 sec_exec_control; + + if (!lapic_in_kernel(vcpu)) + return; + + if (!flexpriority_enabled && + !cpu_has_vmx_virtualize_x2apic_mode()) + return; + + /* Postpone execution until vmcs01 is the current VMCS. */ + if (is_guest_mode(vcpu)) { + to_vmx(vcpu)->nested.change_vmcs01_virtual_apic_mode = true; + return; + } + + sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); + sec_exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | + SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE); + + switch (kvm_get_apic_mode(vcpu)) { + case LAPIC_MODE_INVALID: + WARN_ONCE(true, "Invalid local APIC state"); + case LAPIC_MODE_DISABLED: + break; + case LAPIC_MODE_XAPIC: + if (flexpriority_enabled) { + sec_exec_control |= + SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; + vmx_flush_tlb(vcpu, true); + } + break; + case LAPIC_MODE_X2APIC: + if (cpu_has_vmx_virtualize_x2apic_mode()) + sec_exec_control |= + SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; + break; + } + vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control); + + vmx_update_msr_bitmap(vcpu); +} + +static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa) +{ + if (!is_guest_mode(vcpu)) { + vmcs_write64(APIC_ACCESS_ADDR, hpa); + vmx_flush_tlb(vcpu, true); + } +} + +static void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr) +{ + u16 status; + u8 old; + + if (max_isr == -1) + max_isr = 0; + + status = vmcs_read16(GUEST_INTR_STATUS); + old = status >> 8; + if (max_isr != old) { + status &= 0xff; + status |= max_isr << 8; + vmcs_write16(GUEST_INTR_STATUS, status); + } +} + +static void vmx_set_rvi(int vector) +{ + u16 status; + u8 old; + + if (vector == -1) + vector = 0; + + status = vmcs_read16(GUEST_INTR_STATUS); + old = (u8)status & 0xff; + if ((u8)vector != old) { + status &= ~0xff; + status |= (u8)vector; + vmcs_write16(GUEST_INTR_STATUS, status); + } +} + +static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr) +{ + /* + * When running L2, updating RVI is only relevant when + * vmcs12 virtual-interrupt-delivery enabled. + * However, it can be enabled only when L1 also + * intercepts external-interrupts and in that case + * we should not update vmcs02 RVI but instead intercept + * interrupt. Therefore, do nothing when running L2. + */ + if (!is_guest_mode(vcpu)) + vmx_set_rvi(max_irr); +} + +static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + int max_irr; + bool max_irr_updated; + + WARN_ON(!vcpu->arch.apicv_active); + if (pi_test_on(&vmx->pi_desc)) { + pi_clear_on(&vmx->pi_desc); + /* + * IOMMU can write to PIR.ON, so the barrier matters even on UP. + * But on x86 this is just a compiler barrier anyway. + */ + smp_mb__after_atomic(); + max_irr_updated = + kvm_apic_update_irr(vcpu, vmx->pi_desc.pir, &max_irr); + + /* + * If we are running L2 and L1 has a new pending interrupt + * which can be injected, we should re-evaluate + * what should be done with this new L1 interrupt. + * If L1 intercepts external-interrupts, we should + * exit from L2 to L1. Otherwise, interrupt should be + * delivered directly to L2. + */ + if (is_guest_mode(vcpu) && max_irr_updated) { + if (nested_exit_on_intr(vcpu)) + kvm_vcpu_exiting_guest_mode(vcpu); + else + kvm_make_request(KVM_REQ_EVENT, vcpu); + } + } else { + max_irr = kvm_lapic_find_highest_irr(vcpu); + } + vmx_hwapic_irr_update(vcpu, max_irr); + return max_irr; +} + +static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap) +{ + if (!kvm_vcpu_apicv_active(vcpu)) + return; + + vmcs_write64(EOI_EXIT_BITMAP0, eoi_exit_bitmap[0]); + vmcs_write64(EOI_EXIT_BITMAP1, eoi_exit_bitmap[1]); + vmcs_write64(EOI_EXIT_BITMAP2, eoi_exit_bitmap[2]); + vmcs_write64(EOI_EXIT_BITMAP3, eoi_exit_bitmap[3]); +} + +static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + + pi_clear_on(&vmx->pi_desc); + memset(vmx->pi_desc.pir, 0, sizeof(vmx->pi_desc.pir)); +} + +static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx) +{ + u32 exit_intr_info = 0; + u16 basic_exit_reason = (u16)vmx->exit_reason; + + if (!(basic_exit_reason == EXIT_REASON_MCE_DURING_VMENTRY + || basic_exit_reason == EXIT_REASON_EXCEPTION_NMI)) + return; + + if (!(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) + exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); + vmx->exit_intr_info = exit_intr_info; + + /* if exit due to PF check for async PF */ + if (is_page_fault(exit_intr_info)) + vmx->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason(); + + /* Handle machine checks before interrupts are enabled */ + if (basic_exit_reason == EXIT_REASON_MCE_DURING_VMENTRY || + is_machine_check(exit_intr_info)) + kvm_machine_check(); + + /* We need to handle NMIs before interrupts are enabled */ + if (is_nmi(exit_intr_info)) { + kvm_before_interrupt(&vmx->vcpu); + asm("int $2"); + kvm_after_interrupt(&vmx->vcpu); + } +} + +static void vmx_handle_external_intr(struct kvm_vcpu *vcpu) +{ + u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); + + if ((exit_intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK)) + == (INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR)) { + unsigned int vector; + unsigned long entry; + gate_desc *desc; + struct vcpu_vmx *vmx = to_vmx(vcpu); +#ifdef CONFIG_X86_64 + unsigned long tmp; +#endif + + vector = exit_intr_info & INTR_INFO_VECTOR_MASK; + desc = (gate_desc *)vmx->host_idt_base + vector; + entry = gate_offset(desc); + asm volatile( +#ifdef CONFIG_X86_64 + "mov %%" _ASM_SP ", %[sp]\n\t" + "and $0xfffffffffffffff0, %%" _ASM_SP "\n\t" + "push $%c[ss]\n\t" + "push %[sp]\n\t" +#endif + "pushf\n\t" + __ASM_SIZE(push) " $%c[cs]\n\t" + CALL_NOSPEC + : +#ifdef CONFIG_X86_64 + [sp]"=&r"(tmp), +#endif + ASM_CALL_CONSTRAINT + : + THUNK_TARGET(entry), + [ss]"i"(__KERNEL_DS), + [cs]"i"(__KERNEL_CS) + ); + } +} +STACK_FRAME_NON_STANDARD(vmx_handle_external_intr); + +static bool vmx_has_emulated_msr(int index) +{ + switch (index) { + case MSR_IA32_SMBASE: + /* + * We cannot do SMM unless we can run the guest in big + * real mode. + */ + return enable_unrestricted_guest || emulate_invalid_guest_state; + case MSR_AMD64_VIRT_SPEC_CTRL: + /* This is AMD only. */ + return false; + default: + return true; + } +} + +static bool vmx_pt_supported(void) +{ + return pt_mode == PT_MODE_HOST_GUEST; +} + +static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx) +{ + u32 exit_intr_info; + bool unblock_nmi; + u8 vector; + bool idtv_info_valid; + + idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK; + + if (enable_vnmi) { + if (vmx->loaded_vmcs->nmi_known_unmasked) + return; + /* + * Can't use vmx->exit_intr_info since we're not sure what + * the exit reason is. + */ + exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); + unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0; + vector = exit_intr_info & INTR_INFO_VECTOR_MASK; + /* + * SDM 3: 27.7.1.2 (September 2008) + * Re-set bit "block by NMI" before VM entry if vmexit caused by + * a guest IRET fault. + * SDM 3: 23.2.2 (September 2008) + * Bit 12 is undefined in any of the following cases: + * If the VM exit sets the valid bit in the IDT-vectoring + * information field. + * If the VM exit is due to a double fault. + */ + if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi && + vector != DF_VECTOR && !idtv_info_valid) + vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, + GUEST_INTR_STATE_NMI); + else + vmx->loaded_vmcs->nmi_known_unmasked = + !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) + & GUEST_INTR_STATE_NMI); + } else if (unlikely(vmx->loaded_vmcs->soft_vnmi_blocked)) + vmx->loaded_vmcs->vnmi_blocked_time += + ktime_to_ns(ktime_sub(ktime_get(), + vmx->loaded_vmcs->entry_time)); +} + +static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu, + u32 idt_vectoring_info, + int instr_len_field, + int error_code_field) +{ + u8 vector; + int type; + bool idtv_info_valid; + + idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK; + + vcpu->arch.nmi_injected = false; + kvm_clear_exception_queue(vcpu); + kvm_clear_interrupt_queue(vcpu); + + if (!idtv_info_valid) + return; + + kvm_make_request(KVM_REQ_EVENT, vcpu); + + vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK; + type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK; + + switch (type) { + case INTR_TYPE_NMI_INTR: + vcpu->arch.nmi_injected = true; + /* + * SDM 3: 27.7.1.2 (September 2008) + * Clear bit "block by NMI" before VM entry if a NMI + * delivery faulted. + */ + vmx_set_nmi_mask(vcpu, false); + break; + case INTR_TYPE_SOFT_EXCEPTION: + vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); + /* fall through */ + case INTR_TYPE_HARD_EXCEPTION: + if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) { + u32 err = vmcs_read32(error_code_field); + kvm_requeue_exception_e(vcpu, vector, err); + } else + kvm_requeue_exception(vcpu, vector); + break; + case INTR_TYPE_SOFT_INTR: + vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); + /* fall through */ + case INTR_TYPE_EXT_INTR: + kvm_queue_interrupt(vcpu, vector, type == INTR_TYPE_SOFT_INTR); + break; + default: + break; + } +} + +static void vmx_complete_interrupts(struct vcpu_vmx *vmx) +{ + __vmx_complete_interrupts(&vmx->vcpu, vmx->idt_vectoring_info, + VM_EXIT_INSTRUCTION_LEN, + IDT_VECTORING_ERROR_CODE); +} + +static void vmx_cancel_injection(struct kvm_vcpu *vcpu) +{ + __vmx_complete_interrupts(vcpu, + vmcs_read32(VM_ENTRY_INTR_INFO_FIELD), + VM_ENTRY_INSTRUCTION_LEN, + VM_ENTRY_EXCEPTION_ERROR_CODE); + + vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); +} + +static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx) +{ + int i, nr_msrs; + struct perf_guest_switch_msr *msrs; + + msrs = perf_guest_get_msrs(&nr_msrs); + + if (!msrs) + return; + + for (i = 0; i < nr_msrs; i++) + if (msrs[i].host == msrs[i].guest) + clear_atomic_switch_msr(vmx, msrs[i].msr); + else + add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest, + msrs[i].host, false); +} + +static void vmx_arm_hv_timer(struct vcpu_vmx *vmx, u32 val) +{ + vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, val); + if (!vmx->loaded_vmcs->hv_timer_armed) + vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL, + PIN_BASED_VMX_PREEMPTION_TIMER); + vmx->loaded_vmcs->hv_timer_armed = true; +} + +static void vmx_update_hv_timer(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + u64 tscl; + u32 delta_tsc; + + if (vmx->req_immediate_exit) { + vmx_arm_hv_timer(vmx, 0); + return; + } + + if (vmx->hv_deadline_tsc != -1) { + tscl = rdtsc(); + if (vmx->hv_deadline_tsc > tscl) + /* set_hv_timer ensures the delta fits in 32-bits */ + delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >> + cpu_preemption_timer_multi); + else + delta_tsc = 0; + + vmx_arm_hv_timer(vmx, delta_tsc); + return; + } + + if (vmx->loaded_vmcs->hv_timer_armed) + vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL, + PIN_BASED_VMX_PREEMPTION_TIMER); + vmx->loaded_vmcs->hv_timer_armed = false; +} + +static void vmx_vcpu_run(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + unsigned long cr3, cr4, evmcs_rsp; + + /* Record the guest's net vcpu time for enforced NMI injections. */ + if (unlikely(!enable_vnmi && + vmx->loaded_vmcs->soft_vnmi_blocked)) + vmx->loaded_vmcs->entry_time = ktime_get(); + + /* Don't enter VMX if guest state is invalid, let the exit handler + start emulation until we arrive back to a valid state */ + if (vmx->emulation_required) + return; + + if (vmx->ple_window_dirty) { + vmx->ple_window_dirty = false; + vmcs_write32(PLE_WINDOW, vmx->ple_window); + } + + if (vmx->nested.need_vmcs12_sync) + nested_sync_from_vmcs12(vcpu); + + if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty)) + vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); + if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty)) + vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); + + cr3 = __get_current_cr3_fast(); + if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) { + vmcs_writel(HOST_CR3, cr3); + vmx->loaded_vmcs->host_state.cr3 = cr3; + } + + cr4 = cr4_read_shadow(); + if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) { + vmcs_writel(HOST_CR4, cr4); + vmx->loaded_vmcs->host_state.cr4 = cr4; + } + + /* When single-stepping over STI and MOV SS, we must clear the + * corresponding interruptibility bits in the guest state. Otherwise + * vmentry fails as it then expects bit 14 (BS) in pending debug + * exceptions being set, but that's not correct for the guest debugging + * case. */ + if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) + vmx_set_interrupt_shadow(vcpu, 0); + + if (static_cpu_has(X86_FEATURE_PKU) && + kvm_read_cr4_bits(vcpu, X86_CR4_PKE) && + vcpu->arch.pkru != vmx->host_pkru) + __write_pkru(vcpu->arch.pkru); + + pt_guest_enter(vmx); + + atomic_switch_perf_msrs(vmx); + + vmx_update_hv_timer(vcpu); + + /* + * If this vCPU has touched SPEC_CTRL, restore the guest's value if + * it's non-zero. Since vmentry is serialising on affected CPUs, there + * is no need to worry about the conditional branch over the wrmsr + * being speculatively taken. + */ + x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0); + + vmx->__launched = vmx->loaded_vmcs->launched; + + evmcs_rsp = static_branch_unlikely(&enable_evmcs) ? + (unsigned long)¤t_evmcs->host_rsp : 0; + + if (static_branch_unlikely(&vmx_l1d_should_flush)) + vmx_l1d_flush(vcpu); + + asm( + /* Store host registers */ + "push %%" _ASM_DX "; push %%" _ASM_BP ";" + "push %%" _ASM_CX " \n\t" /* placeholder for guest rcx */ + "push %%" _ASM_CX " \n\t" + "sub $%c[wordsize], %%" _ASM_SP "\n\t" /* temporarily adjust RSP for CALL */ + "cmp %%" _ASM_SP ", %c[host_rsp](%%" _ASM_CX ") \n\t" + "je 1f \n\t" + "mov %%" _ASM_SP ", %c[host_rsp](%%" _ASM_CX ") \n\t" + /* Avoid VMWRITE when Enlightened VMCS is in use */ + "test %%" _ASM_SI ", %%" _ASM_SI " \n\t" + "jz 2f \n\t" + "mov %%" _ASM_SP ", (%%" _ASM_SI ") \n\t" + "jmp 1f \n\t" + "2: \n\t" + __ex("vmwrite %%" _ASM_SP ", %%" _ASM_DX) "\n\t" + "1: \n\t" + "add $%c[wordsize], %%" _ASM_SP "\n\t" /* un-adjust RSP */ + + /* Reload cr2 if changed */ + "mov %c[cr2](%%" _ASM_CX "), %%" _ASM_AX " \n\t" + "mov %%cr2, %%" _ASM_DX " \n\t" + "cmp %%" _ASM_AX ", %%" _ASM_DX " \n\t" + "je 3f \n\t" + "mov %%" _ASM_AX", %%cr2 \n\t" + "3: \n\t" + /* Check if vmlaunch or vmresume is needed */ + "cmpl $0, %c[launched](%%" _ASM_CX ") \n\t" + /* Load guest registers. Don't clobber flags. */ + "mov %c[rax](%%" _ASM_CX "), %%" _ASM_AX " \n\t" + "mov %c[rbx](%%" _ASM_CX "), %%" _ASM_BX " \n\t" + "mov %c[rdx](%%" _ASM_CX "), %%" _ASM_DX " \n\t" + "mov %c[rsi](%%" _ASM_CX "), %%" _ASM_SI " \n\t" + "mov %c[rdi](%%" _ASM_CX "), %%" _ASM_DI " \n\t" + "mov %c[rbp](%%" _ASM_CX "), %%" _ASM_BP " \n\t" +#ifdef CONFIG_X86_64 + "mov %c[r8](%%" _ASM_CX "), %%r8 \n\t" + "mov %c[r9](%%" _ASM_CX "), %%r9 \n\t" + "mov %c[r10](%%" _ASM_CX "), %%r10 \n\t" + "mov %c[r11](%%" _ASM_CX "), %%r11 \n\t" + "mov %c[r12](%%" _ASM_CX "), %%r12 \n\t" + "mov %c[r13](%%" _ASM_CX "), %%r13 \n\t" + "mov %c[r14](%%" _ASM_CX "), %%r14 \n\t" + "mov %c[r15](%%" _ASM_CX "), %%r15 \n\t" +#endif + /* Load guest RCX. This kills the vmx_vcpu pointer! */ + "mov %c[rcx](%%" _ASM_CX "), %%" _ASM_CX " \n\t" + + /* Enter guest mode */ + "call vmx_vmenter\n\t" + + /* Save guest's RCX to the stack placeholder (see above) */ + "mov %%" _ASM_CX ", %c[wordsize](%%" _ASM_SP ") \n\t" + + /* Load host's RCX, i.e. the vmx_vcpu pointer */ + "pop %%" _ASM_CX " \n\t" + + /* Set vmx->fail based on EFLAGS.{CF,ZF} */ + "setbe %c[fail](%%" _ASM_CX ")\n\t" + + /* Save all guest registers, including RCX from the stack */ + "mov %%" _ASM_AX ", %c[rax](%%" _ASM_CX ") \n\t" + "mov %%" _ASM_BX ", %c[rbx](%%" _ASM_CX ") \n\t" + __ASM_SIZE(pop) " %c[rcx](%%" _ASM_CX ") \n\t" + "mov %%" _ASM_DX ", %c[rdx](%%" _ASM_CX ") \n\t" + "mov %%" _ASM_SI ", %c[rsi](%%" _ASM_CX ") \n\t" + "mov %%" _ASM_DI ", %c[rdi](%%" _ASM_CX ") \n\t" + "mov %%" _ASM_BP ", %c[rbp](%%" _ASM_CX ") \n\t" +#ifdef CONFIG_X86_64 + "mov %%r8, %c[r8](%%" _ASM_CX ") \n\t" + "mov %%r9, %c[r9](%%" _ASM_CX ") \n\t" + "mov %%r10, %c[r10](%%" _ASM_CX ") \n\t" + "mov %%r11, %c[r11](%%" _ASM_CX ") \n\t" + "mov %%r12, %c[r12](%%" _ASM_CX ") \n\t" + "mov %%r13, %c[r13](%%" _ASM_CX ") \n\t" + "mov %%r14, %c[r14](%%" _ASM_CX ") \n\t" + "mov %%r15, %c[r15](%%" _ASM_CX ") \n\t" + /* + * Clear host registers marked as clobbered to prevent + * speculative use. + */ + "xor %%r8d, %%r8d \n\t" + "xor %%r9d, %%r9d \n\t" + "xor %%r10d, %%r10d \n\t" + "xor %%r11d, %%r11d \n\t" + "xor %%r12d, %%r12d \n\t" + "xor %%r13d, %%r13d \n\t" + "xor %%r14d, %%r14d \n\t" + "xor %%r15d, %%r15d \n\t" +#endif + "mov %%cr2, %%" _ASM_AX " \n\t" + "mov %%" _ASM_AX ", %c[cr2](%%" _ASM_CX ") \n\t" + + "xor %%eax, %%eax \n\t" + "xor %%ebx, %%ebx \n\t" + "xor %%esi, %%esi \n\t" + "xor %%edi, %%edi \n\t" + "pop %%" _ASM_BP "; pop %%" _ASM_DX " \n\t" + : ASM_CALL_CONSTRAINT + : "c"(vmx), "d"((unsigned long)HOST_RSP), "S"(evmcs_rsp), + [launched]"i"(offsetof(struct vcpu_vmx, __launched)), + [fail]"i"(offsetof(struct vcpu_vmx, fail)), + [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)), + [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])), + [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])), + [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])), + [rdx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDX])), + [rsi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RSI])), + [rdi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDI])), + [rbp]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBP])), +#ifdef CONFIG_X86_64 + [r8]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R8])), + [r9]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R9])), + [r10]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R10])), + [r11]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R11])), + [r12]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R12])), + [r13]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R13])), + [r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])), + [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])), +#endif + [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)), + [wordsize]"i"(sizeof(ulong)) + : "cc", "memory" +#ifdef CONFIG_X86_64 + , "rax", "rbx", "rdi" + , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" +#else + , "eax", "ebx", "edi" +#endif + ); + + /* + * We do not use IBRS in the kernel. If this vCPU has used the + * SPEC_CTRL MSR it may have left it on; save the value and + * turn it off. This is much more efficient than blindly adding + * it to the atomic save/restore list. Especially as the former + * (Saving guest MSRs on vmexit) doesn't even exist in KVM. + * + * For non-nested case: + * If the L01 MSR bitmap does not intercept the MSR, then we need to + * save it. + * + * For nested case: + * If the L02 MSR bitmap does not intercept the MSR, then we need to + * save it. + */ + if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))) + vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); + + x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0); + + /* Eliminate branch target predictions from guest mode */ + vmexit_fill_RSB(); + + /* All fields are clean at this point */ + if (static_branch_unlikely(&enable_evmcs)) + current_evmcs->hv_clean_fields |= + HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL; + + /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */ + if (vmx->host_debugctlmsr) + update_debugctlmsr(vmx->host_debugctlmsr); + +#ifndef CONFIG_X86_64 + /* + * The sysexit path does not restore ds/es, so we must set them to + * a reasonable value ourselves. + * + * We can't defer this to vmx_prepare_switch_to_host() since that + * function may be executed in interrupt context, which saves and + * restore segments around it, nullifying its effect. + */ + loadsegment(ds, __USER_DS); + loadsegment(es, __USER_DS); +#endif + + vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP) + | (1 << VCPU_EXREG_RFLAGS) + | (1 << VCPU_EXREG_PDPTR) + | (1 << VCPU_EXREG_SEGMENTS) + | (1 << VCPU_EXREG_CR3)); + vcpu->arch.regs_dirty = 0; + + pt_guest_exit(vmx); + + /* + * eager fpu is enabled if PKEY is supported and CR4 is switched + * back on host, so it is safe to read guest PKRU from current + * XSAVE. + */ + if (static_cpu_has(X86_FEATURE_PKU) && + kvm_read_cr4_bits(vcpu, X86_CR4_PKE)) { + vcpu->arch.pkru = __read_pkru(); + if (vcpu->arch.pkru != vmx->host_pkru) + __write_pkru(vmx->host_pkru); + } + + vmx->nested.nested_run_pending = 0; + vmx->idt_vectoring_info = 0; + + vmx->exit_reason = vmx->fail ? 0xdead : vmcs_read32(VM_EXIT_REASON); + if (vmx->fail || (vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) + return; + + vmx->loaded_vmcs->launched = 1; + vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); + + vmx_complete_atomic_exit(vmx); + vmx_recover_nmi_blocking(vmx); + vmx_complete_interrupts(vmx); +} +STACK_FRAME_NON_STANDARD(vmx_vcpu_run); + +static struct kvm *vmx_vm_alloc(void) +{ + struct kvm_vmx *kvm_vmx = vzalloc(sizeof(struct kvm_vmx)); + return &kvm_vmx->kvm; +} + +static void vmx_vm_free(struct kvm *kvm) +{ + vfree(to_kvm_vmx(kvm)); +} + +static void vmx_free_vcpu(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + + if (enable_pml) + vmx_destroy_pml_buffer(vmx); + free_vpid(vmx->vpid); + leave_guest_mode(vcpu); + nested_vmx_free_vcpu(vcpu); + free_loaded_vmcs(vmx->loaded_vmcs); + kfree(vmx->guest_msrs); + kvm_vcpu_uninit(vcpu); + kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.guest_fpu); + kmem_cache_free(kvm_vcpu_cache, vmx); +} + +static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) +{ + int err; + struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); + unsigned long *msr_bitmap; + int cpu; + + if (!vmx) + return ERR_PTR(-ENOMEM); + + vmx->vcpu.arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache, GFP_KERNEL); + if (!vmx->vcpu.arch.guest_fpu) { + printk(KERN_ERR "kvm: failed to allocate vcpu's fpu\n"); + err = -ENOMEM; + goto free_partial_vcpu; + } + + vmx->vpid = allocate_vpid(); + + err = kvm_vcpu_init(&vmx->vcpu, kvm, id); + if (err) + goto free_vcpu; + + err = -ENOMEM; + + /* + * If PML is turned on, failure on enabling PML just results in failure + * of creating the vcpu, therefore we can simplify PML logic (by + * avoiding dealing with cases, such as enabling PML partially on vcpus + * for the guest, etc. + */ + if (enable_pml) { + vmx->pml_pg = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!vmx->pml_pg) + goto uninit_vcpu; + } + + vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL); + BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) * sizeof(vmx->guest_msrs[0]) + > PAGE_SIZE); + + if (!vmx->guest_msrs) + goto free_pml; + + err = alloc_loaded_vmcs(&vmx->vmcs01); + if (err < 0) + goto free_msrs; + + msr_bitmap = vmx->vmcs01.msr_bitmap; + vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_TSC, MSR_TYPE_R); + vmx_disable_intercept_for_msr(msr_bitmap, MSR_FS_BASE, MSR_TYPE_RW); + vmx_disable_intercept_for_msr(msr_bitmap, MSR_GS_BASE, MSR_TYPE_RW); + vmx_disable_intercept_for_msr(msr_bitmap, MSR_KERNEL_GS_BASE, MSR_TYPE_RW); + vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW); + vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW); + vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW); + vmx->msr_bitmap_mode = 0; + + vmx->loaded_vmcs = &vmx->vmcs01; + cpu = get_cpu(); + vmx_vcpu_load(&vmx->vcpu, cpu); + vmx->vcpu.cpu = cpu; + vmx_vcpu_setup(vmx); + vmx_vcpu_put(&vmx->vcpu); + put_cpu(); + if (cpu_need_virtualize_apic_accesses(&vmx->vcpu)) { + err = alloc_apic_access_page(kvm); + if (err) + goto free_vmcs; + } + + if (enable_ept && !enable_unrestricted_guest) { + err = init_rmode_identity_map(kvm); + if (err) + goto free_vmcs; + } + + if (nested) + nested_vmx_setup_ctls_msrs(&vmx->nested.msrs, + vmx_capability.ept, + kvm_vcpu_apicv_active(&vmx->vcpu)); + else + memset(&vmx->nested.msrs, 0, sizeof(vmx->nested.msrs)); + + vmx->nested.posted_intr_nv = -1; + vmx->nested.current_vmptr = -1ull; + + vmx->msr_ia32_feature_control_valid_bits = FEATURE_CONTROL_LOCKED; + + /* + * Enforce invariant: pi_desc.nv is always either POSTED_INTR_VECTOR + * or POSTED_INTR_WAKEUP_VECTOR. + */ + vmx->pi_desc.nv = POSTED_INTR_VECTOR; + vmx->pi_desc.sn = 1; + + vmx->ept_pointer = INVALID_PAGE; + + return &vmx->vcpu; + +free_vmcs: + free_loaded_vmcs(vmx->loaded_vmcs); +free_msrs: + kfree(vmx->guest_msrs); +free_pml: + vmx_destroy_pml_buffer(vmx); +uninit_vcpu: + kvm_vcpu_uninit(&vmx->vcpu); +free_vcpu: + free_vpid(vmx->vpid); + kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.guest_fpu); +free_partial_vcpu: + kmem_cache_free(kvm_vcpu_cache, vmx); + return ERR_PTR(err); +} + +#define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html for details.\n" +#define L1TF_MSG_L1D "L1TF CPU bug present and virtualization mitigation disabled, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html for details.\n" + +static int vmx_vm_init(struct kvm *kvm) +{ + spin_lock_init(&to_kvm_vmx(kvm)->ept_pointer_lock); + + if (!ple_gap) + kvm->arch.pause_in_guest = true; + + if (boot_cpu_has(X86_BUG_L1TF) && enable_ept) { + switch (l1tf_mitigation) { + case L1TF_MITIGATION_OFF: + case L1TF_MITIGATION_FLUSH_NOWARN: + /* 'I explicitly don't care' is set */ + break; + case L1TF_MITIGATION_FLUSH: + case L1TF_MITIGATION_FLUSH_NOSMT: + case L1TF_MITIGATION_FULL: + /* + * Warn upon starting the first VM in a potentially + * insecure environment. + */ + if (cpu_smt_control == CPU_SMT_ENABLED) + pr_warn_once(L1TF_MSG_SMT); + if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER) + pr_warn_once(L1TF_MSG_L1D); + break; + case L1TF_MITIGATION_FULL_FORCE: + /* Flush is enforced */ + break; + } + } + return 0; +} + +static void __init vmx_check_processor_compat(void *rtn) +{ + struct vmcs_config vmcs_conf; + struct vmx_capability vmx_cap; + + *(int *)rtn = 0; + if (setup_vmcs_config(&vmcs_conf, &vmx_cap) < 0) + *(int *)rtn = -EIO; + if (nested) + nested_vmx_setup_ctls_msrs(&vmcs_conf.nested, vmx_cap.ept, + enable_apicv); + if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) { + printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n", + smp_processor_id()); + *(int *)rtn = -EIO; + } +} + +static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) +{ + u8 cache; + u64 ipat = 0; + + /* For VT-d and EPT combination + * 1. MMIO: always map as UC + * 2. EPT with VT-d: + * a. VT-d without snooping control feature: can't guarantee the + * result, try to trust guest. + * b. VT-d with snooping control feature: snooping control feature of + * VT-d engine can guarantee the cache correctness. Just set it + * to WB to keep consistent with host. So the same as item 3. + * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep + * consistent with host MTRR + */ + if (is_mmio) { + cache = MTRR_TYPE_UNCACHABLE; + goto exit; + } + + if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) { + ipat = VMX_EPT_IPAT_BIT; + cache = MTRR_TYPE_WRBACK; + goto exit; + } + + if (kvm_read_cr0(vcpu) & X86_CR0_CD) { + ipat = VMX_EPT_IPAT_BIT; + if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) + cache = MTRR_TYPE_WRBACK; + else + cache = MTRR_TYPE_UNCACHABLE; + goto exit; + } + + cache = kvm_mtrr_get_guest_memory_type(vcpu, gfn); + +exit: + return (cache << VMX_EPT_MT_EPTE_SHIFT) | ipat; +} + +static int vmx_get_lpage_level(void) +{ + if (enable_ept && !cpu_has_vmx_ept_1g_page()) + return PT_DIRECTORY_LEVEL; + else + /* For shadow and EPT supported 1GB page */ + return PT_PDPE_LEVEL; +} + +static void vmcs_set_secondary_exec_control(u32 new_ctl) +{ + /* + * These bits in the secondary execution controls field + * are dynamic, the others are mostly based on the hypervisor + * architecture and the guest's CPUID. Do not touch the + * dynamic bits. + */ + u32 mask = + SECONDARY_EXEC_SHADOW_VMCS | + SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | + SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | + SECONDARY_EXEC_DESC; + + u32 cur_ctl = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); + + vmcs_write32(SECONDARY_VM_EXEC_CONTROL, + (new_ctl & ~mask) | (cur_ctl & mask)); +} + +/* + * Generate MSR_IA32_VMX_CR{0,4}_FIXED1 according to CPUID. Only set bits + * (indicating "allowed-1") if they are supported in the guest's CPUID. + */ +static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + struct kvm_cpuid_entry2 *entry; + + vmx->nested.msrs.cr0_fixed1 = 0xffffffff; + vmx->nested.msrs.cr4_fixed1 = X86_CR4_PCE; + +#define cr4_fixed1_update(_cr4_mask, _reg, _cpuid_mask) do { \ + if (entry && (entry->_reg & (_cpuid_mask))) \ + vmx->nested.msrs.cr4_fixed1 |= (_cr4_mask); \ +} while (0) + + entry = kvm_find_cpuid_entry(vcpu, 0x1, 0); + cr4_fixed1_update(X86_CR4_VME, edx, bit(X86_FEATURE_VME)); + cr4_fixed1_update(X86_CR4_PVI, edx, bit(X86_FEATURE_VME)); + cr4_fixed1_update(X86_CR4_TSD, edx, bit(X86_FEATURE_TSC)); + cr4_fixed1_update(X86_CR4_DE, edx, bit(X86_FEATURE_DE)); + cr4_fixed1_update(X86_CR4_PSE, edx, bit(X86_FEATURE_PSE)); + cr4_fixed1_update(X86_CR4_PAE, edx, bit(X86_FEATURE_PAE)); + cr4_fixed1_update(X86_CR4_MCE, edx, bit(X86_FEATURE_MCE)); + cr4_fixed1_update(X86_CR4_PGE, edx, bit(X86_FEATURE_PGE)); + cr4_fixed1_update(X86_CR4_OSFXSR, edx, bit(X86_FEATURE_FXSR)); + cr4_fixed1_update(X86_CR4_OSXMMEXCPT, edx, bit(X86_FEATURE_XMM)); + cr4_fixed1_update(X86_CR4_VMXE, ecx, bit(X86_FEATURE_VMX)); + cr4_fixed1_update(X86_CR4_SMXE, ecx, bit(X86_FEATURE_SMX)); + cr4_fixed1_update(X86_CR4_PCIDE, ecx, bit(X86_FEATURE_PCID)); + cr4_fixed1_update(X86_CR4_OSXSAVE, ecx, bit(X86_FEATURE_XSAVE)); + + entry = kvm_find_cpuid_entry(vcpu, 0x7, 0); + cr4_fixed1_update(X86_CR4_FSGSBASE, ebx, bit(X86_FEATURE_FSGSBASE)); + cr4_fixed1_update(X86_CR4_SMEP, ebx, bit(X86_FEATURE_SMEP)); + cr4_fixed1_update(X86_CR4_SMAP, ebx, bit(X86_FEATURE_SMAP)); + cr4_fixed1_update(X86_CR4_PKE, ecx, bit(X86_FEATURE_PKU)); + cr4_fixed1_update(X86_CR4_UMIP, ecx, bit(X86_FEATURE_UMIP)); + +#undef cr4_fixed1_update +} + +static void nested_vmx_entry_exit_ctls_update(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + + if (kvm_mpx_supported()) { + bool mpx_enabled = guest_cpuid_has(vcpu, X86_FEATURE_MPX); + + if (mpx_enabled) { + vmx->nested.msrs.entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS; + vmx->nested.msrs.exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS; + } else { + vmx->nested.msrs.entry_ctls_high &= ~VM_ENTRY_LOAD_BNDCFGS; + vmx->nested.msrs.exit_ctls_high &= ~VM_EXIT_CLEAR_BNDCFGS; + } + } +} + +static void update_intel_pt_cfg(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + struct kvm_cpuid_entry2 *best = NULL; + int i; + + for (i = 0; i < PT_CPUID_LEAVES; i++) { + best = kvm_find_cpuid_entry(vcpu, 0x14, i); + if (!best) + return; + vmx->pt_desc.caps[CPUID_EAX + i*PT_CPUID_REGS_NUM] = best->eax; + vmx->pt_desc.caps[CPUID_EBX + i*PT_CPUID_REGS_NUM] = best->ebx; + vmx->pt_desc.caps[CPUID_ECX + i*PT_CPUID_REGS_NUM] = best->ecx; + vmx->pt_desc.caps[CPUID_EDX + i*PT_CPUID_REGS_NUM] = best->edx; + } + + /* Get the number of configurable Address Ranges for filtering */ + vmx->pt_desc.addr_range = intel_pt_validate_cap(vmx->pt_desc.caps, + PT_CAP_num_address_ranges); + + /* Initialize and clear the no dependency bits */ + vmx->pt_desc.ctl_bitmask = ~(RTIT_CTL_TRACEEN | RTIT_CTL_OS | + RTIT_CTL_USR | RTIT_CTL_TSC_EN | RTIT_CTL_DISRETC); + + /* + * If CPUID.(EAX=14H,ECX=0):EBX[0]=1 CR3Filter can be set otherwise + * will inject an #GP + */ + if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_cr3_filtering)) + vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_CR3EN; + + /* + * If CPUID.(EAX=14H,ECX=0):EBX[1]=1 CYCEn, CycThresh and + * PSBFreq can be set + */ + if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc)) + vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_CYCLEACC | + RTIT_CTL_CYC_THRESH | RTIT_CTL_PSB_FREQ); + + /* + * If CPUID.(EAX=14H,ECX=0):EBX[3]=1 MTCEn BranchEn and + * MTCFreq can be set + */ + if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc)) + vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_MTC_EN | + RTIT_CTL_BRANCH_EN | RTIT_CTL_MTC_RANGE); + + /* If CPUID.(EAX=14H,ECX=0):EBX[4]=1 FUPonPTW and PTWEn can be set */ + if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_ptwrite)) + vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_FUP_ON_PTW | + RTIT_CTL_PTW_EN); + + /* If CPUID.(EAX=14H,ECX=0):EBX[5]=1 PwrEvEn can be set */ + if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_power_event_trace)) + vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_PWR_EVT_EN; + + /* If CPUID.(EAX=14H,ECX=0):ECX[0]=1 ToPA can be set */ + if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_topa_output)) + vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_TOPA; + + /* If CPUID.(EAX=14H,ECX=0):ECX[3]=1 FabircEn can be set */ + if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_output_subsys)) + vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_FABRIC_EN; + + /* unmask address range configure area */ + for (i = 0; i < vmx->pt_desc.addr_range; i++) + vmx->pt_desc.ctl_bitmask &= ~(0xf << (32 + i * 4)); +} + +static void vmx_cpuid_update(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + + if (cpu_has_secondary_exec_ctrls()) { + vmx_compute_secondary_exec_control(vmx); + vmcs_set_secondary_exec_control(vmx->secondary_exec_control); + } + + if (nested_vmx_allowed(vcpu)) + to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |= + FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; + else + to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &= + ~FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; + + if (nested_vmx_allowed(vcpu)) { + nested_vmx_cr_fixed1_bits_update(vcpu); + nested_vmx_entry_exit_ctls_update(vcpu); + } + + if (boot_cpu_has(X86_FEATURE_INTEL_PT) && + guest_cpuid_has(vcpu, X86_FEATURE_INTEL_PT)) + update_intel_pt_cfg(vcpu); +} + +static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry) +{ + if (func == 1 && nested) + entry->ecx |= bit(X86_FEATURE_VMX); +} + +static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu) +{ + to_vmx(vcpu)->req_immediate_exit = true; +} + +static int vmx_check_intercept(struct kvm_vcpu *vcpu, + struct x86_instruction_info *info, + enum x86_intercept_stage stage) +{ + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); + struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; + + /* + * RDPID causes #UD if disabled through secondary execution controls. + * Because it is marked as EmulateOnUD, we need to intercept it here. + */ + if (info->intercept == x86_intercept_rdtscp && + !nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDTSCP)) { + ctxt->exception.vector = UD_VECTOR; + ctxt->exception.error_code_valid = false; + return X86EMUL_PROPAGATE_FAULT; + } + + /* TODO: check more intercepts... */ + return X86EMUL_CONTINUE; +} + +#ifdef CONFIG_X86_64 +/* (a << shift) / divisor, return 1 if overflow otherwise 0 */ +static inline int u64_shl_div_u64(u64 a, unsigned int shift, + u64 divisor, u64 *result) +{ + u64 low = a << shift, high = a >> (64 - shift); + + /* To avoid the overflow on divq */ + if (high >= divisor) + return 1; + + /* Low hold the result, high hold rem which is discarded */ + asm("divq %2\n\t" : "=a" (low), "=d" (high) : + "rm" (divisor), "0" (low), "1" (high)); + *result = low; + + return 0; +} + +static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc) +{ + struct vcpu_vmx *vmx; + u64 tscl, guest_tscl, delta_tsc, lapic_timer_advance_cycles; + + if (kvm_mwait_in_guest(vcpu->kvm)) + return -EOPNOTSUPP; + + vmx = to_vmx(vcpu); + tscl = rdtsc(); + guest_tscl = kvm_read_l1_tsc(vcpu, tscl); + delta_tsc = max(guest_deadline_tsc, guest_tscl) - guest_tscl; + lapic_timer_advance_cycles = nsec_to_cycles(vcpu, lapic_timer_advance_ns); + + if (delta_tsc > lapic_timer_advance_cycles) + delta_tsc -= lapic_timer_advance_cycles; + else + delta_tsc = 0; + + /* Convert to host delta tsc if tsc scaling is enabled */ + if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio && + u64_shl_div_u64(delta_tsc, + kvm_tsc_scaling_ratio_frac_bits, + vcpu->arch.tsc_scaling_ratio, + &delta_tsc)) + return -ERANGE; + + /* + * If the delta tsc can't fit in the 32 bit after the multi shift, + * we can't use the preemption timer. + * It's possible that it fits on later vmentries, but checking + * on every vmentry is costly so we just use an hrtimer. + */ + if (delta_tsc >> (cpu_preemption_timer_multi + 32)) + return -ERANGE; + + vmx->hv_deadline_tsc = tscl + delta_tsc; + return delta_tsc == 0; +} + +static void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu) +{ + to_vmx(vcpu)->hv_deadline_tsc = -1; +} +#endif + +static void vmx_sched_in(struct kvm_vcpu *vcpu, int cpu) +{ + if (!kvm_pause_in_guest(vcpu->kvm)) + shrink_ple_window(vcpu); +} + +static void vmx_slot_enable_log_dirty(struct kvm *kvm, + struct kvm_memory_slot *slot) +{ + kvm_mmu_slot_leaf_clear_dirty(kvm, slot); + kvm_mmu_slot_largepage_remove_write_access(kvm, slot); +} + +static void vmx_slot_disable_log_dirty(struct kvm *kvm, + struct kvm_memory_slot *slot) +{ + kvm_mmu_slot_set_dirty(kvm, slot); +} + +static void vmx_flush_log_dirty(struct kvm *kvm) +{ + kvm_flush_pml_buffers(kvm); +} + +static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu) +{ + struct vmcs12 *vmcs12; + struct vcpu_vmx *vmx = to_vmx(vcpu); + gpa_t gpa; + struct page *page = NULL; + u64 *pml_address; + + if (is_guest_mode(vcpu)) { + WARN_ON_ONCE(vmx->nested.pml_full); + + /* + * Check if PML is enabled for the nested guest. + * Whether eptp bit 6 is set is already checked + * as part of A/D emulation. + */ + vmcs12 = get_vmcs12(vcpu); + if (!nested_cpu_has_pml(vmcs12)) + return 0; + + if (vmcs12->guest_pml_index >= PML_ENTITY_NUM) { + vmx->nested.pml_full = true; + return 1; + } + + gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS) & ~0xFFFull; + + page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->pml_address); + if (is_error_page(page)) + return 0; + + pml_address = kmap(page); + pml_address[vmcs12->guest_pml_index--] = gpa; + kunmap(page); + kvm_release_page_clean(page); + } + + return 0; +} + +static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm, + struct kvm_memory_slot *memslot, + gfn_t offset, unsigned long mask) +{ + kvm_mmu_clear_dirty_pt_masked(kvm, memslot, offset, mask); +} + +static void __pi_post_block(struct kvm_vcpu *vcpu) +{ + struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); + struct pi_desc old, new; + unsigned int dest; + + do { + old.control = new.control = pi_desc->control; + WARN(old.nv != POSTED_INTR_WAKEUP_VECTOR, + "Wakeup handler not enabled while the VCPU is blocked\n"); + + dest = cpu_physical_id(vcpu->cpu); + + if (x2apic_enabled()) + new.ndst = dest; + else + new.ndst = (dest << 8) & 0xFF00; + + /* set 'NV' to 'notification vector' */ + new.nv = POSTED_INTR_VECTOR; + } while (cmpxchg64(&pi_desc->control, old.control, + new.control) != old.control); + + if (!WARN_ON_ONCE(vcpu->pre_pcpu == -1)) { + spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); + list_del(&vcpu->blocked_vcpu_list); + spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); + vcpu->pre_pcpu = -1; + } +} + +/* + * This routine does the following things for vCPU which is going + * to be blocked if VT-d PI is enabled. + * - Store the vCPU to the wakeup list, so when interrupts happen + * we can find the right vCPU to wake up. + * - Change the Posted-interrupt descriptor as below: + * 'NDST' <-- vcpu->pre_pcpu + * 'NV' <-- POSTED_INTR_WAKEUP_VECTOR + * - If 'ON' is set during this process, which means at least one + * interrupt is posted for this vCPU, we cannot block it, in + * this case, return 1, otherwise, return 0. + * + */ +static int pi_pre_block(struct kvm_vcpu *vcpu) +{ + unsigned int dest; + struct pi_desc old, new; + struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); + + if (!kvm_arch_has_assigned_device(vcpu->kvm) || + !irq_remapping_cap(IRQ_POSTING_CAP) || + !kvm_vcpu_apicv_active(vcpu)) + return 0; + + WARN_ON(irqs_disabled()); + local_irq_disable(); + if (!WARN_ON_ONCE(vcpu->pre_pcpu != -1)) { + vcpu->pre_pcpu = vcpu->cpu; + spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); + list_add_tail(&vcpu->blocked_vcpu_list, + &per_cpu(blocked_vcpu_on_cpu, + vcpu->pre_pcpu)); + spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); + } + + do { + old.control = new.control = pi_desc->control; + + WARN((pi_desc->sn == 1), + "Warning: SN field of posted-interrupts " + "is set before blocking\n"); + + /* + * Since vCPU can be preempted during this process, + * vcpu->cpu could be different with pre_pcpu, we + * need to set pre_pcpu as the destination of wakeup + * notification event, then we can find the right vCPU + * to wakeup in wakeup handler if interrupts happen + * when the vCPU is in blocked state. + */ + dest = cpu_physical_id(vcpu->pre_pcpu); + + if (x2apic_enabled()) + new.ndst = dest; + else + new.ndst = (dest << 8) & 0xFF00; + + /* set 'NV' to 'wakeup vector' */ + new.nv = POSTED_INTR_WAKEUP_VECTOR; + } while (cmpxchg64(&pi_desc->control, old.control, + new.control) != old.control); + + /* We should not block the vCPU if an interrupt is posted for it. */ + if (pi_test_on(pi_desc) == 1) + __pi_post_block(vcpu); + + local_irq_enable(); + return (vcpu->pre_pcpu == -1); +} + +static int vmx_pre_block(struct kvm_vcpu *vcpu) +{ + if (pi_pre_block(vcpu)) + return 1; + + if (kvm_lapic_hv_timer_in_use(vcpu)) + kvm_lapic_switch_to_sw_timer(vcpu); + + return 0; +} + +static void pi_post_block(struct kvm_vcpu *vcpu) +{ + if (vcpu->pre_pcpu == -1) + return; + + WARN_ON(irqs_disabled()); + local_irq_disable(); + __pi_post_block(vcpu); + local_irq_enable(); +} + +static void vmx_post_block(struct kvm_vcpu *vcpu) +{ + if (kvm_x86_ops->set_hv_timer) + kvm_lapic_switch_to_hv_timer(vcpu); + + pi_post_block(vcpu); +} + +/* + * vmx_update_pi_irte - set IRTE for Posted-Interrupts + * + * @kvm: kvm + * @host_irq: host irq of the interrupt + * @guest_irq: gsi of the interrupt + * @set: set or unset PI + * returns 0 on success, < 0 on failure + */ +static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq, + uint32_t guest_irq, bool set) +{ + struct kvm_kernel_irq_routing_entry *e; + struct kvm_irq_routing_table *irq_rt; + struct kvm_lapic_irq irq; + struct kvm_vcpu *vcpu; + struct vcpu_data vcpu_info; + int idx, ret = 0; + + if (!kvm_arch_has_assigned_device(kvm) || + !irq_remapping_cap(IRQ_POSTING_CAP) || + !kvm_vcpu_apicv_active(kvm->vcpus[0])) + return 0; + + idx = srcu_read_lock(&kvm->irq_srcu); + irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu); + if (guest_irq >= irq_rt->nr_rt_entries || + hlist_empty(&irq_rt->map[guest_irq])) { + pr_warn_once("no route for guest_irq %u/%u (broken user space?)\n", + guest_irq, irq_rt->nr_rt_entries); + goto out; + } + + hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) { + if (e->type != KVM_IRQ_ROUTING_MSI) + continue; + /* + * VT-d PI cannot support posting multicast/broadcast + * interrupts to a vCPU, we still use interrupt remapping + * for these kind of interrupts. + * + * For lowest-priority interrupts, we only support + * those with single CPU as the destination, e.g. user + * configures the interrupts via /proc/irq or uses + * irqbalance to make the interrupts single-CPU. + * + * We will support full lowest-priority interrupt later. + */ + + kvm_set_msi_irq(kvm, e, &irq); + if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu)) { + /* + * Make sure the IRTE is in remapped mode if + * we don't handle it in posted mode. + */ + ret = irq_set_vcpu_affinity(host_irq, NULL); + if (ret < 0) { + printk(KERN_INFO + "failed to back to remapped mode, irq: %u\n", + host_irq); + goto out; + } + + continue; + } + + vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu)); + vcpu_info.vector = irq.vector; + + trace_kvm_pi_irte_update(host_irq, vcpu->vcpu_id, e->gsi, + vcpu_info.vector, vcpu_info.pi_desc_addr, set); + + if (set) + ret = irq_set_vcpu_affinity(host_irq, &vcpu_info); + else + ret = irq_set_vcpu_affinity(host_irq, NULL); + + if (ret < 0) { + printk(KERN_INFO "%s: failed to update PI IRTE\n", + __func__); + goto out; + } + } + + ret = 0; +out: + srcu_read_unlock(&kvm->irq_srcu, idx); + return ret; +} + +static void vmx_setup_mce(struct kvm_vcpu *vcpu) +{ + if (vcpu->arch.mcg_cap & MCG_LMCE_P) + to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |= + FEATURE_CONTROL_LMCE; + else + to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &= + ~FEATURE_CONTROL_LMCE; +} + +static int vmx_smi_allowed(struct kvm_vcpu *vcpu) +{ + /* we need a nested vmexit to enter SMM, postpone if run is pending */ + if (to_vmx(vcpu)->nested.nested_run_pending) + return 0; + return 1; +} + +static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + + vmx->nested.smm.guest_mode = is_guest_mode(vcpu); + if (vmx->nested.smm.guest_mode) + nested_vmx_vmexit(vcpu, -1, 0, 0); + + vmx->nested.smm.vmxon = vmx->nested.vmxon; + vmx->nested.vmxon = false; + vmx_clear_hlt(vcpu); + return 0; +} + +static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + int ret; + + if (vmx->nested.smm.vmxon) { + vmx->nested.vmxon = true; + vmx->nested.smm.vmxon = false; + } + + if (vmx->nested.smm.guest_mode) { + vcpu->arch.hflags &= ~HF_SMM_MASK; + ret = nested_vmx_enter_non_root_mode(vcpu, false); + vcpu->arch.hflags |= HF_SMM_MASK; + if (ret) + return ret; + + vmx->nested.smm.guest_mode = false; + } + return 0; +} + +static int enable_smi_window(struct kvm_vcpu *vcpu) +{ + return 0; +} + +static __init int hardware_setup(void) +{ + unsigned long host_bndcfgs; + int r, i; + + rdmsrl_safe(MSR_EFER, &host_efer); + + for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) + kvm_define_shared_msr(i, vmx_msr_index[i]); + + if (setup_vmcs_config(&vmcs_config, &vmx_capability) < 0) + return -EIO; + + if (boot_cpu_has(X86_FEATURE_NX)) + kvm_enable_efer_bits(EFER_NX); + + if (boot_cpu_has(X86_FEATURE_MPX)) { + rdmsrl(MSR_IA32_BNDCFGS, host_bndcfgs); + WARN_ONCE(host_bndcfgs, "KVM: BNDCFGS in host will be lost"); + } + + if (boot_cpu_has(X86_FEATURE_XSAVES)) + rdmsrl(MSR_IA32_XSS, host_xss); + + if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() || + !(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global())) + enable_vpid = 0; + + if (!cpu_has_vmx_ept() || + !cpu_has_vmx_ept_4levels() || + !cpu_has_vmx_ept_mt_wb() || + !cpu_has_vmx_invept_global()) + enable_ept = 0; + + if (!cpu_has_vmx_ept_ad_bits() || !enable_ept) + enable_ept_ad_bits = 0; + + if (!cpu_has_vmx_unrestricted_guest() || !enable_ept) + enable_unrestricted_guest = 0; + + if (!cpu_has_vmx_flexpriority()) + flexpriority_enabled = 0; + + if (!cpu_has_virtual_nmis()) + enable_vnmi = 0; + + /* + * set_apic_access_page_addr() is used to reload apic access + * page upon invalidation. No need to do anything if not + * using the APIC_ACCESS_ADDR VMCS field. + */ + if (!flexpriority_enabled) + kvm_x86_ops->set_apic_access_page_addr = NULL; + + if (!cpu_has_vmx_tpr_shadow()) + kvm_x86_ops->update_cr8_intercept = NULL; + + if (enable_ept && !cpu_has_vmx_ept_2m_page()) + kvm_disable_largepages(); + +#if IS_ENABLED(CONFIG_HYPERV) + if (ms_hyperv.nested_features & HV_X64_NESTED_GUEST_MAPPING_FLUSH + && enable_ept) { + kvm_x86_ops->tlb_remote_flush = hv_remote_flush_tlb; + kvm_x86_ops->tlb_remote_flush_with_range = + hv_remote_flush_tlb_with_range; + } +#endif + + if (!cpu_has_vmx_ple()) { + ple_gap = 0; + ple_window = 0; + ple_window_grow = 0; + ple_window_max = 0; + ple_window_shrink = 0; + } + + if (!cpu_has_vmx_apicv()) { + enable_apicv = 0; + kvm_x86_ops->sync_pir_to_irr = NULL; + } + + if (cpu_has_vmx_tsc_scaling()) { + kvm_has_tsc_control = true; + kvm_max_tsc_scaling_ratio = KVM_VMX_TSC_MULTIPLIER_MAX; + kvm_tsc_scaling_ratio_frac_bits = 48; + } + + set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */ + + if (enable_ept) + vmx_enable_tdp(); + else + kvm_disable_tdp(); + + /* + * Only enable PML when hardware supports PML feature, and both EPT + * and EPT A/D bit features are enabled -- PML depends on them to work. + */ + if (!enable_ept || !enable_ept_ad_bits || !cpu_has_vmx_pml()) + enable_pml = 0; + + if (!enable_pml) { + kvm_x86_ops->slot_enable_log_dirty = NULL; + kvm_x86_ops->slot_disable_log_dirty = NULL; + kvm_x86_ops->flush_log_dirty = NULL; + kvm_x86_ops->enable_log_dirty_pt_masked = NULL; + } + + if (!cpu_has_vmx_preemption_timer()) + kvm_x86_ops->request_immediate_exit = __kvm_request_immediate_exit; + + if (cpu_has_vmx_preemption_timer() && enable_preemption_timer) { + u64 vmx_msr; + + rdmsrl(MSR_IA32_VMX_MISC, vmx_msr); + cpu_preemption_timer_multi = + vmx_msr & VMX_MISC_PREEMPTION_TIMER_RATE_MASK; + } else { + kvm_x86_ops->set_hv_timer = NULL; + kvm_x86_ops->cancel_hv_timer = NULL; + } + + kvm_set_posted_intr_wakeup_handler(wakeup_handler); + + kvm_mce_cap_supported |= MCG_LMCE_P; + + if (pt_mode != PT_MODE_SYSTEM && pt_mode != PT_MODE_HOST_GUEST) + return -EINVAL; + if (!enable_ept || !cpu_has_vmx_intel_pt()) + pt_mode = PT_MODE_SYSTEM; + + if (nested) { + nested_vmx_setup_ctls_msrs(&vmcs_config.nested, + vmx_capability.ept, enable_apicv); + + r = nested_vmx_hardware_setup(kvm_vmx_exit_handlers); + if (r) + return r; + } + + r = alloc_kvm_area(); + if (r) + nested_vmx_hardware_unsetup(); + return r; +} + +static __exit void hardware_unsetup(void) +{ + if (nested) + nested_vmx_hardware_unsetup(); + + free_kvm_area(); +} + +static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { + .cpu_has_kvm_support = cpu_has_kvm_support, + .disabled_by_bios = vmx_disabled_by_bios, + .hardware_setup = hardware_setup, + .hardware_unsetup = hardware_unsetup, + .check_processor_compatibility = vmx_check_processor_compat, + .hardware_enable = hardware_enable, + .hardware_disable = hardware_disable, + .cpu_has_accelerated_tpr = report_flexpriority, + .has_emulated_msr = vmx_has_emulated_msr, + + .vm_init = vmx_vm_init, + .vm_alloc = vmx_vm_alloc, + .vm_free = vmx_vm_free, + + .vcpu_create = vmx_create_vcpu, + .vcpu_free = vmx_free_vcpu, + .vcpu_reset = vmx_vcpu_reset, + + .prepare_guest_switch = vmx_prepare_switch_to_guest, + .vcpu_load = vmx_vcpu_load, + .vcpu_put = vmx_vcpu_put, + + .update_bp_intercept = update_exception_bitmap, + .get_msr_feature = vmx_get_msr_feature, + .get_msr = vmx_get_msr, + .set_msr = vmx_set_msr, + .get_segment_base = vmx_get_segment_base, + .get_segment = vmx_get_segment, + .set_segment = vmx_set_segment, + .get_cpl = vmx_get_cpl, + .get_cs_db_l_bits = vmx_get_cs_db_l_bits, + .decache_cr0_guest_bits = vmx_decache_cr0_guest_bits, + .decache_cr3 = vmx_decache_cr3, + .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits, + .set_cr0 = vmx_set_cr0, + .set_cr3 = vmx_set_cr3, + .set_cr4 = vmx_set_cr4, + .set_efer = vmx_set_efer, + .get_idt = vmx_get_idt, + .set_idt = vmx_set_idt, + .get_gdt = vmx_get_gdt, + .set_gdt = vmx_set_gdt, + .get_dr6 = vmx_get_dr6, + .set_dr6 = vmx_set_dr6, + .set_dr7 = vmx_set_dr7, + .sync_dirty_debug_regs = vmx_sync_dirty_debug_regs, + .cache_reg = vmx_cache_reg, + .get_rflags = vmx_get_rflags, + .set_rflags = vmx_set_rflags, + + .tlb_flush = vmx_flush_tlb, + .tlb_flush_gva = vmx_flush_tlb_gva, + + .run = vmx_vcpu_run, + .handle_exit = vmx_handle_exit, + .skip_emulated_instruction = skip_emulated_instruction, + .set_interrupt_shadow = vmx_set_interrupt_shadow, + .get_interrupt_shadow = vmx_get_interrupt_shadow, + .patch_hypercall = vmx_patch_hypercall, + .set_irq = vmx_inject_irq, + .set_nmi = vmx_inject_nmi, + .queue_exception = vmx_queue_exception, + .cancel_injection = vmx_cancel_injection, + .interrupt_allowed = vmx_interrupt_allowed, + .nmi_allowed = vmx_nmi_allowed, + .get_nmi_mask = vmx_get_nmi_mask, + .set_nmi_mask = vmx_set_nmi_mask, + .enable_nmi_window = enable_nmi_window, + .enable_irq_window = enable_irq_window, + .update_cr8_intercept = update_cr8_intercept, + .set_virtual_apic_mode = vmx_set_virtual_apic_mode, + .set_apic_access_page_addr = vmx_set_apic_access_page_addr, + .get_enable_apicv = vmx_get_enable_apicv, + .refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl, + .load_eoi_exitmap = vmx_load_eoi_exitmap, + .apicv_post_state_restore = vmx_apicv_post_state_restore, + .hwapic_irr_update = vmx_hwapic_irr_update, + .hwapic_isr_update = vmx_hwapic_isr_update, + .guest_apic_has_interrupt = vmx_guest_apic_has_interrupt, + .sync_pir_to_irr = vmx_sync_pir_to_irr, + .deliver_posted_interrupt = vmx_deliver_posted_interrupt, + + .set_tss_addr = vmx_set_tss_addr, + .set_identity_map_addr = vmx_set_identity_map_addr, + .get_tdp_level = get_ept_level, + .get_mt_mask = vmx_get_mt_mask, + + .get_exit_info = vmx_get_exit_info, + + .get_lpage_level = vmx_get_lpage_level, + + .cpuid_update = vmx_cpuid_update, + + .rdtscp_supported = vmx_rdtscp_supported, + .invpcid_supported = vmx_invpcid_supported, + + .set_supported_cpuid = vmx_set_supported_cpuid, + + .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit, + + .read_l1_tsc_offset = vmx_read_l1_tsc_offset, + .write_l1_tsc_offset = vmx_write_l1_tsc_offset, + + .set_tdp_cr3 = vmx_set_cr3, + + .check_intercept = vmx_check_intercept, + .handle_external_intr = vmx_handle_external_intr, + .mpx_supported = vmx_mpx_supported, + .xsaves_supported = vmx_xsaves_supported, + .umip_emulated = vmx_umip_emulated, + .pt_supported = vmx_pt_supported, + + .request_immediate_exit = vmx_request_immediate_exit, + + .sched_in = vmx_sched_in, + + .slot_enable_log_dirty = vmx_slot_enable_log_dirty, + .slot_disable_log_dirty = vmx_slot_disable_log_dirty, + .flush_log_dirty = vmx_flush_log_dirty, + .enable_log_dirty_pt_masked = vmx_enable_log_dirty_pt_masked, + .write_log_dirty = vmx_write_pml_buffer, + + .pre_block = vmx_pre_block, + .post_block = vmx_post_block, + + .pmu_ops = &intel_pmu_ops, + + .update_pi_irte = vmx_update_pi_irte, + +#ifdef CONFIG_X86_64 + .set_hv_timer = vmx_set_hv_timer, + .cancel_hv_timer = vmx_cancel_hv_timer, +#endif + + .setup_mce = vmx_setup_mce, + + .smi_allowed = vmx_smi_allowed, + .pre_enter_smm = vmx_pre_enter_smm, + .pre_leave_smm = vmx_pre_leave_smm, + .enable_smi_window = enable_smi_window, + + .check_nested_events = NULL, + .get_nested_state = NULL, + .set_nested_state = NULL, + .get_vmcs12_pages = NULL, + .nested_enable_evmcs = NULL, +}; + +static void vmx_cleanup_l1d_flush(void) +{ + if (vmx_l1d_flush_pages) { + free_pages((unsigned long)vmx_l1d_flush_pages, L1D_CACHE_ORDER); + vmx_l1d_flush_pages = NULL; + } + /* Restore state so sysfs ignores VMX */ + l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; +} + +static void vmx_exit(void) +{ +#ifdef CONFIG_KEXEC_CORE + RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL); + synchronize_rcu(); +#endif + + kvm_exit(); + +#if IS_ENABLED(CONFIG_HYPERV) + if (static_branch_unlikely(&enable_evmcs)) { + int cpu; + struct hv_vp_assist_page *vp_ap; + /* + * Reset everything to support using non-enlightened VMCS + * access later (e.g. when we reload the module with + * enlightened_vmcs=0) + */ + for_each_online_cpu(cpu) { + vp_ap = hv_get_vp_assist_page(cpu); + + if (!vp_ap) + continue; + + vp_ap->current_nested_vmcs = 0; + vp_ap->enlighten_vmentry = 0; + } + + static_branch_disable(&enable_evmcs); + } +#endif + vmx_cleanup_l1d_flush(); +} +module_exit(vmx_exit); + +static int __init vmx_init(void) +{ + int r; + +#if IS_ENABLED(CONFIG_HYPERV) + /* + * Enlightened VMCS usage should be recommended and the host needs + * to support eVMCS v1 or above. We can also disable eVMCS support + * with module parameter. + */ + if (enlightened_vmcs && + ms_hyperv.hints & HV_X64_ENLIGHTENED_VMCS_RECOMMENDED && + (ms_hyperv.nested_features & HV_X64_ENLIGHTENED_VMCS_VERSION) >= + KVM_EVMCS_VERSION) { + int cpu; + + /* Check that we have assist pages on all online CPUs */ + for_each_online_cpu(cpu) { + if (!hv_get_vp_assist_page(cpu)) { + enlightened_vmcs = false; + break; + } + } + + if (enlightened_vmcs) { + pr_info("KVM: vmx: using Hyper-V Enlightened VMCS\n"); + static_branch_enable(&enable_evmcs); + } + } else { + enlightened_vmcs = false; + } +#endif + + r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), + __alignof__(struct vcpu_vmx), THIS_MODULE); + if (r) + return r; + + /* + * Must be called after kvm_init() so enable_ept is properly set + * up. Hand the parameter mitigation value in which was stored in + * the pre module init parser. If no parameter was given, it will + * contain 'auto' which will be turned into the default 'cond' + * mitigation mode. + */ + if (boot_cpu_has(X86_BUG_L1TF)) { + r = vmx_setup_l1d_flush(vmentry_l1d_flush_param); + if (r) { + vmx_exit(); + return r; + } + } + +#ifdef CONFIG_KEXEC_CORE + rcu_assign_pointer(crash_vmclear_loaded_vmcss, + crash_vmclear_local_loaded_vmcss); +#endif + vmx_check_vmcs12_offsets(); + + return 0; +} +module_init(vmx_init); diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h new file mode 100644 index 000000000000..99328954c2fc --- /dev/null +++ b/arch/x86/kvm/vmx/vmx.h @@ -0,0 +1,519 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __KVM_X86_VMX_H +#define __KVM_X86_VMX_H + +#include <linux/kvm_host.h> + +#include <asm/kvm.h> +#include <asm/intel_pt.h> + +#include "capabilities.h" +#include "ops.h" +#include "vmcs.h" + +extern const u32 vmx_msr_index[]; +extern u64 host_efer; + +#define MSR_TYPE_R 1 +#define MSR_TYPE_W 2 +#define MSR_TYPE_RW 3 + +#define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4)) + +#define NR_AUTOLOAD_MSRS 8 + +struct vmx_msrs { + unsigned int nr; + struct vmx_msr_entry val[NR_AUTOLOAD_MSRS]; +}; + +struct shared_msr_entry { + unsigned index; + u64 data; + u64 mask; +}; + +enum segment_cache_field { + SEG_FIELD_SEL = 0, + SEG_FIELD_BASE = 1, + SEG_FIELD_LIMIT = 2, + SEG_FIELD_AR = 3, + + SEG_FIELD_NR = 4 +}; + +/* Posted-Interrupt Descriptor */ +struct pi_desc { + u32 pir[8]; /* Posted interrupt requested */ + union { + struct { + /* bit 256 - Outstanding Notification */ + u16 on : 1, + /* bit 257 - Suppress Notification */ + sn : 1, + /* bit 271:258 - Reserved */ + rsvd_1 : 14; + /* bit 279:272 - Notification Vector */ + u8 nv; + /* bit 287:280 - Reserved */ + u8 rsvd_2; + /* bit 319:288 - Notification Destination */ + u32 ndst; + }; + u64 control; + }; + u32 rsvd[6]; +} __aligned(64); + +#define RTIT_ADDR_RANGE 4 + +struct pt_ctx { + u64 ctl; + u64 status; + u64 output_base; + u64 output_mask; + u64 cr3_match; + u64 addr_a[RTIT_ADDR_RANGE]; + u64 addr_b[RTIT_ADDR_RANGE]; +}; + +struct pt_desc { + u64 ctl_bitmask; + u32 addr_range; + u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES]; + struct pt_ctx host; + struct pt_ctx guest; +}; + +/* + * The nested_vmx structure is part of vcpu_vmx, and holds information we need + * for correct emulation of VMX (i.e., nested VMX) on this vcpu. + */ +struct nested_vmx { + /* Has the level1 guest done vmxon? */ + bool vmxon; + gpa_t vmxon_ptr; + bool pml_full; + + /* The guest-physical address of the current VMCS L1 keeps for L2 */ + gpa_t current_vmptr; + /* + * Cache of the guest's VMCS, existing outside of guest memory. + * Loaded from guest memory during VMPTRLD. Flushed to guest + * memory during VMCLEAR and VMPTRLD. + */ + struct vmcs12 *cached_vmcs12; + /* + * Cache of the guest's shadow VMCS, existing outside of guest + * memory. Loaded from guest memory during VM entry. Flushed + * to guest memory during VM exit. + */ + struct vmcs12 *cached_shadow_vmcs12; + /* + * Indicates if the shadow vmcs or enlightened vmcs must be updated + * with the data held by struct vmcs12. + */ + bool need_vmcs12_sync; + bool dirty_vmcs12; + + /* + * vmcs02 has been initialized, i.e. state that is constant for + * vmcs02 has been written to the backing VMCS. Initialization + * is delayed until L1 actually attempts to run a nested VM. + */ + bool vmcs02_initialized; + + bool change_vmcs01_virtual_apic_mode; + + /* + * Enlightened VMCS has been enabled. It does not mean that L1 has to + * use it. However, VMX features available to L1 will be limited based + * on what the enlightened VMCS supports. + */ + bool enlightened_vmcs_enabled; + + /* L2 must run next, and mustn't decide to exit to L1. */ + bool nested_run_pending; + + struct loaded_vmcs vmcs02; + + /* + * Guest pages referred to in the vmcs02 with host-physical + * pointers, so we must keep them pinned while L2 runs. + */ + struct page *apic_access_page; + struct page *virtual_apic_page; + struct page *pi_desc_page; + struct pi_desc *pi_desc; + bool pi_pending; + u16 posted_intr_nv; + + struct hrtimer preemption_timer; + bool preemption_timer_expired; + + /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */ + u64 vmcs01_debugctl; + u64 vmcs01_guest_bndcfgs; + + u16 vpid02; + u16 last_vpid; + + struct nested_vmx_msrs msrs; + + /* SMM related state */ + struct { + /* in VMX operation on SMM entry? */ + bool vmxon; + /* in guest mode on SMM entry? */ + bool guest_mode; + } smm; + + gpa_t hv_evmcs_vmptr; + struct page *hv_evmcs_page; + struct hv_enlightened_vmcs *hv_evmcs; +}; + +struct vcpu_vmx { + struct kvm_vcpu vcpu; + unsigned long host_rsp; + u8 fail; + u8 msr_bitmap_mode; + u32 exit_intr_info; + u32 idt_vectoring_info; + ulong rflags; + struct shared_msr_entry *guest_msrs; + int nmsrs; + int save_nmsrs; + bool guest_msrs_dirty; + unsigned long host_idt_base; +#ifdef CONFIG_X86_64 + u64 msr_host_kernel_gs_base; + u64 msr_guest_kernel_gs_base; +#endif + + u64 arch_capabilities; + u64 spec_ctrl; + + u32 vm_entry_controls_shadow; + u32 vm_exit_controls_shadow; + u32 secondary_exec_control; + + /* + * loaded_vmcs points to the VMCS currently used in this vcpu. For a + * non-nested (L1) guest, it always points to vmcs01. For a nested + * guest (L2), it points to a different VMCS. loaded_cpu_state points + * to the VMCS whose state is loaded into the CPU registers that only + * need to be switched when transitioning to/from the kernel; a NULL + * value indicates that host state is loaded. + */ + struct loaded_vmcs vmcs01; + struct loaded_vmcs *loaded_vmcs; + struct loaded_vmcs *loaded_cpu_state; + bool __launched; /* temporary, used in vmx_vcpu_run */ + struct msr_autoload { + struct vmx_msrs guest; + struct vmx_msrs host; + } msr_autoload; + + struct { + int vm86_active; + ulong save_rflags; + struct kvm_segment segs[8]; + } rmode; + struct { + u32 bitmask; /* 4 bits per segment (1 bit per field) */ + struct kvm_save_segment { + u16 selector; + unsigned long base; + u32 limit; + u32 ar; + } seg[8]; + } segment_cache; + int vpid; + bool emulation_required; + + u32 exit_reason; + + /* Posted interrupt descriptor */ + struct pi_desc pi_desc; + + /* Support for a guest hypervisor (nested VMX) */ + struct nested_vmx nested; + + /* Dynamic PLE window. */ + int ple_window; + bool ple_window_dirty; + + bool req_immediate_exit; + + /* Support for PML */ +#define PML_ENTITY_NUM 512 + struct page *pml_pg; + + /* apic deadline value in host tsc */ + u64 hv_deadline_tsc; + + u64 current_tsc_ratio; + + u32 host_pkru; + + unsigned long host_debugctlmsr; + + /* + * Only bits masked by msr_ia32_feature_control_valid_bits can be set in + * msr_ia32_feature_control. FEATURE_CONTROL_LOCKED is always included + * in msr_ia32_feature_control_valid_bits. + */ + u64 msr_ia32_feature_control; + u64 msr_ia32_feature_control_valid_bits; + u64 ept_pointer; + + struct pt_desc pt_desc; +}; + +enum ept_pointers_status { + EPT_POINTERS_CHECK = 0, + EPT_POINTERS_MATCH = 1, + EPT_POINTERS_MISMATCH = 2 +}; + +struct kvm_vmx { + struct kvm kvm; + + unsigned int tss_addr; + bool ept_identity_pagetable_done; + gpa_t ept_identity_map_addr; + + enum ept_pointers_status ept_pointers_match; + spinlock_t ept_pointer_lock; +}; + +bool nested_vmx_allowed(struct kvm_vcpu *vcpu); +void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu); +void vmx_vcpu_put(struct kvm_vcpu *vcpu); +int allocate_vpid(void); +void free_vpid(int vpid); +void vmx_set_constant_host_state(struct vcpu_vmx *vmx); +void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu); +int vmx_get_cpl(struct kvm_vcpu *vcpu); +unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu); +void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); +u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu); +void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask); +void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer); +void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); +void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); +int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); +void set_cr4_guest_host_mask(struct vcpu_vmx *vmx); +void ept_save_pdptrs(struct kvm_vcpu *vcpu); +void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); +void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); +u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa); +void update_exception_bitmap(struct kvm_vcpu *vcpu); +void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu); +bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu); +void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked); +void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu); +struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr); +void pt_update_intercept_for_msr(struct vcpu_vmx *vmx); + +#define POSTED_INTR_ON 0 +#define POSTED_INTR_SN 1 + +static inline bool pi_test_and_set_on(struct pi_desc *pi_desc) +{ + return test_and_set_bit(POSTED_INTR_ON, + (unsigned long *)&pi_desc->control); +} + +static inline bool pi_test_and_clear_on(struct pi_desc *pi_desc) +{ + return test_and_clear_bit(POSTED_INTR_ON, + (unsigned long *)&pi_desc->control); +} + +static inline int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc) +{ + return test_and_set_bit(vector, (unsigned long *)pi_desc->pir); +} + +static inline void pi_clear_sn(struct pi_desc *pi_desc) +{ + return clear_bit(POSTED_INTR_SN, + (unsigned long *)&pi_desc->control); +} + +static inline void pi_set_sn(struct pi_desc *pi_desc) +{ + return set_bit(POSTED_INTR_SN, + (unsigned long *)&pi_desc->control); +} + +static inline void pi_clear_on(struct pi_desc *pi_desc) +{ + clear_bit(POSTED_INTR_ON, + (unsigned long *)&pi_desc->control); +} + +static inline int pi_test_on(struct pi_desc *pi_desc) +{ + return test_bit(POSTED_INTR_ON, + (unsigned long *)&pi_desc->control); +} + +static inline int pi_test_sn(struct pi_desc *pi_desc) +{ + return test_bit(POSTED_INTR_SN, + (unsigned long *)&pi_desc->control); +} + +static inline u8 vmx_get_rvi(void) +{ + return vmcs_read16(GUEST_INTR_STATUS) & 0xff; +} + +static inline void vm_entry_controls_reset_shadow(struct vcpu_vmx *vmx) +{ + vmx->vm_entry_controls_shadow = vmcs_read32(VM_ENTRY_CONTROLS); +} + +static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val) +{ + vmcs_write32(VM_ENTRY_CONTROLS, val); + vmx->vm_entry_controls_shadow = val; +} + +static inline void vm_entry_controls_set(struct vcpu_vmx *vmx, u32 val) +{ + if (vmx->vm_entry_controls_shadow != val) + vm_entry_controls_init(vmx, val); +} + +static inline u32 vm_entry_controls_get(struct vcpu_vmx *vmx) +{ + return vmx->vm_entry_controls_shadow; +} + +static inline void vm_entry_controls_setbit(struct vcpu_vmx *vmx, u32 val) +{ + vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) | val); +} + +static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val) +{ + vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val); +} + +static inline void vm_exit_controls_reset_shadow(struct vcpu_vmx *vmx) +{ + vmx->vm_exit_controls_shadow = vmcs_read32(VM_EXIT_CONTROLS); +} + +static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val) +{ + vmcs_write32(VM_EXIT_CONTROLS, val); + vmx->vm_exit_controls_shadow = val; +} + +static inline void vm_exit_controls_set(struct vcpu_vmx *vmx, u32 val) +{ + if (vmx->vm_exit_controls_shadow != val) + vm_exit_controls_init(vmx, val); +} + +static inline u32 vm_exit_controls_get(struct vcpu_vmx *vmx) +{ + return vmx->vm_exit_controls_shadow; +} + +static inline void vm_exit_controls_setbit(struct vcpu_vmx *vmx, u32 val) +{ + vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) | val); +} + +static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val) +{ + vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val); +} + +static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx) +{ + vmx->segment_cache.bitmask = 0; +} + +static inline u32 vmx_vmentry_ctrl(void) +{ + u32 vmentry_ctrl = vmcs_config.vmentry_ctrl; + if (pt_mode == PT_MODE_SYSTEM) + vmentry_ctrl &= ~(VM_EXIT_PT_CONCEAL_PIP | VM_EXIT_CLEAR_IA32_RTIT_CTL); + /* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */ + return vmentry_ctrl & + ~(VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VM_ENTRY_LOAD_IA32_EFER); +} + +static inline u32 vmx_vmexit_ctrl(void) +{ + u32 vmexit_ctrl = vmcs_config.vmexit_ctrl; + if (pt_mode == PT_MODE_SYSTEM) + vmexit_ctrl &= ~(VM_ENTRY_PT_CONCEAL_PIP | VM_ENTRY_LOAD_IA32_RTIT_CTL); + /* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */ + return vmcs_config.vmexit_ctrl & + ~(VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | VM_EXIT_LOAD_IA32_EFER); +} + +u32 vmx_exec_control(struct vcpu_vmx *vmx); + +static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm) +{ + return container_of(kvm, struct kvm_vmx, kvm); +} + +static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu) +{ + return container_of(vcpu, struct vcpu_vmx, vcpu); +} + +static inline struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu) +{ + return &(to_vmx(vcpu)->pi_desc); +} + +struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu); +void free_vmcs(struct vmcs *vmcs); +int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs); +void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs); +void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs); +void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs); + +static inline struct vmcs *alloc_vmcs(bool shadow) +{ + return alloc_vmcs_cpu(shadow, raw_smp_processor_id()); +} + +u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa); + +static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid, + bool invalidate_gpa) +{ + if (enable_ept && (invalidate_gpa || !enable_vpid)) { + if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) + return; + ept_sync_context(construct_eptp(vcpu, + vcpu->arch.mmu->root_hpa)); + } else { + vpid_sync_context(vpid); + } +} + +static inline void vmx_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa) +{ + __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid, invalidate_gpa); +} + +static inline void decache_tsc_multiplier(struct vcpu_vmx *vmx) +{ + vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio; + vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio); +} + +#endif /* __KVM_X86_VMX_H */ diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index d02937760c3b..02c8e095a239 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -69,6 +69,7 @@ #include <asm/irq_remapping.h> #include <asm/mshyperv.h> #include <asm/hypervisor.h> +#include <asm/intel_pt.h> #define CREATE_TRACE_POINTS #include "trace.h" @@ -213,6 +214,9 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { u64 __read_mostly host_xcr0; +struct kmem_cache *x86_fpu_cache; +EXPORT_SYMBOL_GPL(x86_fpu_cache); + static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt); static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu) @@ -1121,7 +1125,13 @@ static u32 msrs_to_save[] = { #endif MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA, MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS, MSR_TSC_AUX, - MSR_IA32_SPEC_CTRL, MSR_IA32_ARCH_CAPABILITIES + MSR_IA32_SPEC_CTRL, MSR_IA32_ARCH_CAPABILITIES, + MSR_IA32_RTIT_CTL, MSR_IA32_RTIT_STATUS, MSR_IA32_RTIT_CR3_MATCH, + MSR_IA32_RTIT_OUTPUT_BASE, MSR_IA32_RTIT_OUTPUT_MASK, + MSR_IA32_RTIT_ADDR0_A, MSR_IA32_RTIT_ADDR0_B, + MSR_IA32_RTIT_ADDR1_A, MSR_IA32_RTIT_ADDR1_B, + MSR_IA32_RTIT_ADDR2_A, MSR_IA32_RTIT_ADDR2_B, + MSR_IA32_RTIT_ADDR3_A, MSR_IA32_RTIT_ADDR3_B, }; static unsigned num_msrs_to_save; @@ -2426,6 +2436,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_AMD64_PATCH_LOADER: case MSR_AMD64_BU_CFG2: case MSR_AMD64_DC_CFG: + case MSR_F15H_EX_CFG: break; case MSR_IA32_UCODE_REV: @@ -2721,6 +2732,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_AMD64_BU_CFG2: case MSR_IA32_PERF_CTL: case MSR_AMD64_DC_CFG: + case MSR_F15H_EX_CFG: msr_info->data = 0; break; case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5: @@ -2997,6 +3009,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_HYPERV_TLBFLUSH: case KVM_CAP_HYPERV_SEND_IPI: case KVM_CAP_HYPERV_ENLIGHTENED_VMCS: + case KVM_CAP_HYPERV_CPUID: case KVM_CAP_PCI_SEGMENT: case KVM_CAP_DEBUGREGS: case KVM_CAP_X86_ROBUST_SINGLESTEP: @@ -3008,7 +3021,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_HYPERV_TIME: case KVM_CAP_IOAPIC_POLARITY_IGNORED: case KVM_CAP_TSC_DEADLINE_TIMER: - case KVM_CAP_ENABLE_CAP_VM: case KVM_CAP_DISABLE_QUIRKS: case KVM_CAP_SET_BOOT_CPU_ID: case KVM_CAP_SPLIT_IRQCHIP: @@ -3630,7 +3642,7 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu) { - struct xregs_state *xsave = &vcpu->arch.guest_fpu.state.xsave; + struct xregs_state *xsave = &vcpu->arch.guest_fpu->state.xsave; u64 xstate_bv = xsave->header.xfeatures; u64 valid; @@ -3672,7 +3684,7 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu) static void load_xsave(struct kvm_vcpu *vcpu, u8 *src) { - struct xregs_state *xsave = &vcpu->arch.guest_fpu.state.xsave; + struct xregs_state *xsave = &vcpu->arch.guest_fpu->state.xsave; u64 xstate_bv = *(u64 *)(src + XSAVE_HDR_OFFSET); u64 valid; @@ -3720,7 +3732,7 @@ static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu, fill_xsave((u8 *) guest_xsave->region, vcpu); } else { memcpy(guest_xsave->region, - &vcpu->arch.guest_fpu.state.fxsave, + &vcpu->arch.guest_fpu->state.fxsave, sizeof(struct fxregs_state)); *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] = XFEATURE_MASK_FPSSE; @@ -3750,7 +3762,7 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, if (xstate_bv & ~XFEATURE_MASK_FPSSE || mxcsr & ~mxcsr_feature_mask) return -EINVAL; - memcpy(&vcpu->arch.guest_fpu.state.fxsave, + memcpy(&vcpu->arch.guest_fpu->state.fxsave, guest_xsave->region, sizeof(struct fxregs_state)); } return 0; @@ -3828,6 +3840,8 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, return kvm_hv_activate_synic(vcpu, cap->cap == KVM_CAP_HYPERV_SYNIC2); case KVM_CAP_HYPERV_ENLIGHTENED_VMCS: + if (!kvm_x86_ops->nested_enable_evmcs) + return -ENOTTY; r = kvm_x86_ops->nested_enable_evmcs(vcpu, &vmcs_version); if (!r) { user_ptr = (void __user *)(uintptr_t)cap->args[0]; @@ -4190,6 +4204,25 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = kvm_x86_ops->set_nested_state(vcpu, user_kvm_nested_state, &kvm_state); break; } + case KVM_GET_SUPPORTED_HV_CPUID: { + struct kvm_cpuid2 __user *cpuid_arg = argp; + struct kvm_cpuid2 cpuid; + + r = -EFAULT; + if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) + goto out; + + r = kvm_vcpu_ioctl_get_hv_cpuid(vcpu, &cpuid, + cpuid_arg->entries); + if (r) + goto out; + + r = -EFAULT; + if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid))) + goto out; + r = 0; + break; + } default: r = -EINVAL; } @@ -4394,7 +4427,34 @@ static int kvm_vm_ioctl_reinject(struct kvm *kvm, */ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) { - bool is_dirty = false; + bool flush = false; + int r; + + mutex_lock(&kvm->slots_lock); + + /* + * Flush potentially hardware-cached dirty pages to dirty_bitmap. + */ + if (kvm_x86_ops->flush_log_dirty) + kvm_x86_ops->flush_log_dirty(kvm); + + r = kvm_get_dirty_log_protect(kvm, log, &flush); + + /* + * All the TLBs can be flushed out of mmu lock, see the comments in + * kvm_mmu_slot_remove_write_access(). + */ + lockdep_assert_held(&kvm->slots_lock); + if (flush) + kvm_flush_remote_tlbs(kvm); + + mutex_unlock(&kvm->slots_lock); + return r; +} + +int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, struct kvm_clear_dirty_log *log) +{ + bool flush = false; int r; mutex_lock(&kvm->slots_lock); @@ -4405,14 +4465,14 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) if (kvm_x86_ops->flush_log_dirty) kvm_x86_ops->flush_log_dirty(kvm); - r = kvm_get_dirty_log_protect(kvm, log, &is_dirty); + r = kvm_clear_dirty_log_protect(kvm, log, &flush); /* * All the TLBs can be flushed out of mmu lock, see the comments in * kvm_mmu_slot_remove_write_access(). */ lockdep_assert_held(&kvm->slots_lock); - if (is_dirty) + if (flush) kvm_flush_remote_tlbs(kvm); mutex_unlock(&kvm->slots_lock); @@ -4431,8 +4491,8 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, return 0; } -static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, - struct kvm_enable_cap *cap) +int kvm_vm_ioctl_enable_cap(struct kvm *kvm, + struct kvm_enable_cap *cap) { int r; @@ -4765,15 +4825,6 @@ set_identity_unlock: r = 0; break; } - case KVM_ENABLE_CAP: { - struct kvm_enable_cap cap; - - r = -EFAULT; - if (copy_from_user(&cap, argp, sizeof(cap))) - goto out; - r = kvm_vm_ioctl_enable_cap(kvm, &cap); - break; - } case KVM_MEMORY_ENCRYPT_OP: { r = -ENOTTY; if (kvm_x86_ops->mem_enc_op) @@ -4842,6 +4893,30 @@ static void kvm_init_msr_list(void) if (!kvm_x86_ops->rdtscp_supported()) continue; break; + case MSR_IA32_RTIT_CTL: + case MSR_IA32_RTIT_STATUS: + if (!kvm_x86_ops->pt_supported()) + continue; + break; + case MSR_IA32_RTIT_CR3_MATCH: + if (!kvm_x86_ops->pt_supported() || + !intel_pt_validate_hw_cap(PT_CAP_cr3_filtering)) + continue; + break; + case MSR_IA32_RTIT_OUTPUT_BASE: + case MSR_IA32_RTIT_OUTPUT_MASK: + if (!kvm_x86_ops->pt_supported() || + (!intel_pt_validate_hw_cap(PT_CAP_topa_output) && + !intel_pt_validate_hw_cap(PT_CAP_single_range_output))) + continue; + break; + case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B: { + if (!kvm_x86_ops->pt_supported() || + msrs_to_save[i] - MSR_IA32_RTIT_ADDR0_A >= + intel_pt_validate_hw_cap(PT_CAP_num_address_ranges) * 2) + continue; + break; + } default: break; } @@ -6813,11 +6888,30 @@ int kvm_arch_init(void *opaque) goto out; } + /* + * KVM explicitly assumes that the guest has an FPU and + * FXSAVE/FXRSTOR. For example, the KVM_GET_FPU explicitly casts the + * vCPU's FPU state as a fxregs_state struct. + */ + if (!boot_cpu_has(X86_FEATURE_FPU) || !boot_cpu_has(X86_FEATURE_FXSR)) { + printk(KERN_ERR "kvm: inadequate fpu\n"); + r = -EOPNOTSUPP; + goto out; + } + r = -ENOMEM; + x86_fpu_cache = kmem_cache_create("x86_fpu", sizeof(struct fpu), + __alignof__(struct fpu), SLAB_ACCOUNT, + NULL); + if (!x86_fpu_cache) { + printk(KERN_ERR "kvm: failed to allocate cache for x86 fpu\n"); + goto out; + } + shared_msrs = alloc_percpu(struct kvm_shared_msrs); if (!shared_msrs) { printk(KERN_ERR "kvm: failed to allocate percpu kvm_shared_msrs\n"); - goto out; + goto out_free_x86_fpu_cache; } r = kvm_mmu_module_init(); @@ -6850,6 +6944,8 @@ int kvm_arch_init(void *opaque) out_free_percpu: free_percpu(shared_msrs); +out_free_x86_fpu_cache: + kmem_cache_destroy(x86_fpu_cache); out: return r; } @@ -6873,6 +6969,7 @@ void kvm_arch_exit(void) kvm_x86_ops = NULL; kvm_mmu_module_exit(); free_percpu(shared_msrs); + kmem_cache_destroy(x86_fpu_cache); } int kvm_vcpu_halt(struct kvm_vcpu *vcpu) @@ -7446,7 +7543,7 @@ void kvm_make_scan_ioapic_request(struct kvm *kvm) static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu) { - if (!kvm_apic_hw_enabled(vcpu->arch.apic)) + if (!kvm_apic_present(vcpu)) return; bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256); @@ -7996,9 +8093,9 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu) static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) { preempt_disable(); - copy_fpregs_to_fpstate(&vcpu->arch.user_fpu); + copy_fpregs_to_fpstate(¤t->thread.fpu); /* PKRU is separately restored in kvm_x86_ops->run. */ - __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state, + __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu->state, ~XFEATURE_MASK_PKRU); preempt_enable(); trace_kvm_fpu(1); @@ -8008,8 +8105,8 @@ static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) { preempt_disable(); - copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu); - copy_kernel_to_fpregs(&vcpu->arch.user_fpu.state); + copy_fpregs_to_fpstate(vcpu->arch.guest_fpu); + copy_kernel_to_fpregs(¤t->thread.fpu.state); preempt_enable(); ++vcpu->stat.fpu_reload; trace_kvm_fpu(0); @@ -8503,7 +8600,7 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) vcpu_load(vcpu); - fxsave = &vcpu->arch.guest_fpu.state.fxsave; + fxsave = &vcpu->arch.guest_fpu->state.fxsave; memcpy(fpu->fpr, fxsave->st_space, 128); fpu->fcw = fxsave->cwd; fpu->fsw = fxsave->swd; @@ -8523,7 +8620,7 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) vcpu_load(vcpu); - fxsave = &vcpu->arch.guest_fpu.state.fxsave; + fxsave = &vcpu->arch.guest_fpu->state.fxsave; memcpy(fxsave->st_space, fpu->fpr, 128); fxsave->cwd = fpu->fcw; @@ -8579,9 +8676,9 @@ static int sync_regs(struct kvm_vcpu *vcpu) static void fx_init(struct kvm_vcpu *vcpu) { - fpstate_init(&vcpu->arch.guest_fpu.state); + fpstate_init(&vcpu->arch.guest_fpu->state); if (boot_cpu_has(X86_FEATURE_XSAVES)) - vcpu->arch.guest_fpu.state.xsave.header.xcomp_bv = + vcpu->arch.guest_fpu->state.xsave.header.xcomp_bv = host_xcr0 | XSTATE_COMPACTION_ENABLED; /* @@ -8619,6 +8716,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) { + vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT; kvm_vcpu_mtrr_init(vcpu); vcpu_load(vcpu); kvm_vcpu_reset(vcpu, false); @@ -8705,11 +8803,11 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) */ if (init_event) kvm_put_guest_fpu(vcpu); - mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu.state.xsave, + mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu->state.xsave, XFEATURE_MASK_BNDREGS); if (mpx_state_buffer) memset(mpx_state_buffer, 0, sizeof(struct mpx_bndreg_state)); - mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu.state.xsave, + mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu->state.xsave, XFEATURE_MASK_BNDCSR); if (mpx_state_buffer) memset(mpx_state_buffer, 0, sizeof(struct mpx_bndcsr)); @@ -8721,7 +8819,6 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) kvm_pmu_reset(vcpu); vcpu->arch.smbase = 0x30000; - vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT; vcpu->arch.msr_misc_features_enables = 0; vcpu->arch.xcr0 = XFEATURE_MASK_FP; @@ -9280,7 +9377,7 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm, * with dirty logging disabled in order to eliminate unnecessary GPA * logging in PML buffer (and potential PML buffer full VMEXT). This * guarantees leaving PML enabled during guest's lifetime won't have - * any additonal overhead from PML when guest is running with dirty + * any additional overhead from PML when guest is running with dirty * logging disabled for memory slots. * * kvm_x86_ops->slot_enable_log_dirty is called when switching new slot diff --git a/arch/x86/mm/debug_pagetables.c b/arch/x86/mm/debug_pagetables.c index 225fe2f0bfec..cd84f067e41d 100644 --- a/arch/x86/mm/debug_pagetables.c +++ b/arch/x86/mm/debug_pagetables.c @@ -10,20 +10,9 @@ static int ptdump_show(struct seq_file *m, void *v) return 0; } -static int ptdump_open(struct inode *inode, struct file *filp) -{ - return single_open(filp, ptdump_show, NULL); -} - -static const struct file_operations ptdump_fops = { - .owner = THIS_MODULE, - .open = ptdump_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(ptdump); -static int ptdump_show_curknl(struct seq_file *m, void *v) +static int ptdump_curknl_show(struct seq_file *m, void *v) { if (current->mm->pgd) { down_read(¤t->mm->mmap_sem); @@ -33,23 +22,12 @@ static int ptdump_show_curknl(struct seq_file *m, void *v) return 0; } -static int ptdump_open_curknl(struct inode *inode, struct file *filp) -{ - return single_open(filp, ptdump_show_curknl, NULL); -} - -static const struct file_operations ptdump_curknl_fops = { - .owner = THIS_MODULE, - .open = ptdump_open_curknl, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(ptdump_curknl); #ifdef CONFIG_PAGE_TABLE_ISOLATION static struct dentry *pe_curusr; -static int ptdump_show_curusr(struct seq_file *m, void *v) +static int ptdump_curusr_show(struct seq_file *m, void *v) { if (current->mm->pgd) { down_read(¤t->mm->mmap_sem); @@ -59,42 +37,20 @@ static int ptdump_show_curusr(struct seq_file *m, void *v) return 0; } -static int ptdump_open_curusr(struct inode *inode, struct file *filp) -{ - return single_open(filp, ptdump_show_curusr, NULL); -} - -static const struct file_operations ptdump_curusr_fops = { - .owner = THIS_MODULE, - .open = ptdump_open_curusr, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(ptdump_curusr); #endif #if defined(CONFIG_EFI) && defined(CONFIG_X86_64) static struct dentry *pe_efi; -static int ptdump_show_efi(struct seq_file *m, void *v) +static int ptdump_efi_show(struct seq_file *m, void *v) { if (efi_mm.pgd) ptdump_walk_pgd_level_debugfs(m, efi_mm.pgd, false); return 0; } -static int ptdump_open_efi(struct inode *inode, struct file *filp) -{ - return single_open(filp, ptdump_show_efi, NULL); -} - -static const struct file_operations ptdump_efi_fops = { - .owner = THIS_MODULE, - .open = ptdump_open_efi, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(ptdump_efi); #endif static struct dentry *dir, *pe_knl, *pe_curknl; diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c index fc37bbd23eb8..abcb8d00b014 100644 --- a/arch/x86/mm/dump_pagetables.c +++ b/arch/x86/mm/dump_pagetables.c @@ -55,10 +55,10 @@ struct addr_marker { enum address_markers_idx { USER_SPACE_NR = 0, KERNEL_SPACE_NR, - LOW_KERNEL_NR, -#if defined(CONFIG_MODIFY_LDT_SYSCALL) && defined(CONFIG_X86_5LEVEL) +#ifdef CONFIG_MODIFY_LDT_SYSCALL LDT_NR, #endif + LOW_KERNEL_NR, VMALLOC_START_NR, VMEMMAP_START_NR, #ifdef CONFIG_KASAN @@ -66,9 +66,6 @@ enum address_markers_idx { KASAN_SHADOW_END_NR, #endif CPU_ENTRY_AREA_NR, -#if defined(CONFIG_MODIFY_LDT_SYSCALL) && !defined(CONFIG_X86_5LEVEL) - LDT_NR, -#endif #ifdef CONFIG_X86_ESPFIX64 ESPFIX_START_NR, #endif @@ -512,11 +509,11 @@ static inline bool is_hypervisor_range(int idx) { #ifdef CONFIG_X86_64 /* - * ffff800000000000 - ffff87ffffffffff is reserved for - * the hypervisor. + * A hole in the beginning of kernel address space reserved + * for a hypervisor. */ - return (idx >= pgd_index(__PAGE_OFFSET) - 16) && - (idx < pgd_index(__PAGE_OFFSET)); + return (idx >= pgd_index(GUARD_HOLE_BASE_ADDR)) && + (idx < pgd_index(GUARD_HOLE_END_ADDR)); #else return false; #endif diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 71d4b9d4d43f..2ff25ad33233 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -27,6 +27,7 @@ #include <asm/vm86.h> /* struct vm86 */ #include <asm/mmu_context.h> /* vma_pkey() */ #include <asm/efi.h> /* efi_recover_from_page_fault()*/ +#include <asm/desc.h> /* store_idt(), ... */ #define CREATE_TRACE_POINTS #include <asm/trace/exceptions.h> @@ -571,10 +572,55 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address) return 0; } +static void show_ldttss(const struct desc_ptr *gdt, const char *name, u16 index) +{ + u32 offset = (index >> 3) * sizeof(struct desc_struct); + unsigned long addr; + struct ldttss_desc desc; + + if (index == 0) { + pr_alert("%s: NULL\n", name); + return; + } + + if (offset + sizeof(struct ldttss_desc) >= gdt->size) { + pr_alert("%s: 0x%hx -- out of bounds\n", name, index); + return; + } + + if (probe_kernel_read(&desc, (void *)(gdt->address + offset), + sizeof(struct ldttss_desc))) { + pr_alert("%s: 0x%hx -- GDT entry is not readable\n", + name, index); + return; + } + + addr = desc.base0 | (desc.base1 << 16) | (desc.base2 << 24); +#ifdef CONFIG_X86_64 + addr |= ((u64)desc.base3 << 32); +#endif + pr_alert("%s: 0x%hx -- base=0x%lx limit=0x%x\n", + name, index, addr, (desc.limit0 | (desc.limit1 << 16))); +} + +/* + * This helper function transforms the #PF error_code bits into + * "[PROT] [USER]" type of descriptive, almost human-readable error strings: + */ +static void err_str_append(unsigned long error_code, char *buf, unsigned long mask, const char *txt) +{ + if (error_code & mask) { + if (buf[0]) + strcat(buf, " "); + strcat(buf, txt); + } +} + static void -show_fault_oops(struct pt_regs *regs, unsigned long error_code, - unsigned long address) +show_fault_oops(struct pt_regs *regs, unsigned long error_code, unsigned long address) { + char err_txt[64]; + if (!oops_may_print()) return; @@ -602,6 +648,52 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code, address < PAGE_SIZE ? "NULL pointer dereference" : "paging request", (void *)address); + err_txt[0] = 0; + + /* + * Note: length of these appended strings including the separation space and the + * zero delimiter must fit into err_txt[]. + */ + err_str_append(error_code, err_txt, X86_PF_PROT, "[PROT]" ); + err_str_append(error_code, err_txt, X86_PF_WRITE, "[WRITE]"); + err_str_append(error_code, err_txt, X86_PF_USER, "[USER]" ); + err_str_append(error_code, err_txt, X86_PF_RSVD, "[RSVD]" ); + err_str_append(error_code, err_txt, X86_PF_INSTR, "[INSTR]"); + err_str_append(error_code, err_txt, X86_PF_PK, "[PK]" ); + + pr_alert("#PF error: %s\n", error_code ? err_txt : "[normal kernel read fault]"); + + if (!(error_code & X86_PF_USER) && user_mode(regs)) { + struct desc_ptr idt, gdt; + u16 ldtr, tr; + + pr_alert("This was a system access from user code\n"); + + /* + * This can happen for quite a few reasons. The more obvious + * ones are faults accessing the GDT, or LDT. Perhaps + * surprisingly, if the CPU tries to deliver a benign or + * contributory exception from user code and gets a page fault + * during delivery, the page fault can be delivered as though + * it originated directly from user code. This could happen + * due to wrong permissions on the IDT, GDT, LDT, TSS, or + * kernel or IST stack. + */ + store_idt(&idt); + + /* Usable even on Xen PV -- it's just slow. */ + native_store_gdt(&gdt); + + pr_alert("IDT: 0x%lx (limit=0x%hx) GDT: 0x%lx (limit=0x%hx)\n", + idt.address, idt.size, gdt.address, gdt.size); + + store_ldt(ldtr); + show_ldttss(&gdt, "LDTR", ldtr); + + store_tr(tr); + show_ldttss(&gdt, "TR", tr); + } + dump_pagetable(address); } @@ -621,16 +713,30 @@ pgtable_bad(struct pt_regs *regs, unsigned long error_code, tsk->comm, address); dump_pagetable(address); - tsk->thread.cr2 = address; - tsk->thread.trap_nr = X86_TRAP_PF; - tsk->thread.error_code = error_code; - if (__die("Bad pagetable", regs, error_code)) sig = 0; oops_end(flags, regs, sig); } +static void set_signal_archinfo(unsigned long address, + unsigned long error_code) +{ + struct task_struct *tsk = current; + + /* + * To avoid leaking information about the kernel page + * table layout, pretend that user-mode accesses to + * kernel addresses are always protection faults. + */ + if (address >= TASK_SIZE_MAX) + error_code |= X86_PF_PROT; + + tsk->thread.trap_nr = X86_TRAP_PF; + tsk->thread.error_code = error_code | X86_PF_USER; + tsk->thread.cr2 = address; +} + static noinline void no_context(struct pt_regs *regs, unsigned long error_code, unsigned long address, int signal, int si_code) @@ -639,6 +745,15 @@ no_context(struct pt_regs *regs, unsigned long error_code, unsigned long flags; int sig; + if (user_mode(regs)) { + /* + * This is an implicit supervisor-mode access from user + * mode. Bypass all the kernel-mode recovery code and just + * OOPS. + */ + goto oops; + } + /* Are we prepared to handle this kernel fault? */ if (fixup_exception(regs, X86_TRAP_PF, error_code, address)) { /* @@ -656,9 +771,7 @@ no_context(struct pt_regs *regs, unsigned long error_code, * faulting through the emulate_vsyscall() logic. */ if (current->thread.sig_on_uaccess_err && signal) { - tsk->thread.trap_nr = X86_TRAP_PF; - tsk->thread.error_code = error_code | X86_PF_USER; - tsk->thread.cr2 = address; + set_signal_archinfo(address, error_code); /* XXX: hwpoison faults will set the wrong code. */ force_sig_fault(signal, si_code, (void __user *)address, @@ -726,6 +839,7 @@ no_context(struct pt_regs *regs, unsigned long error_code, if (IS_ENABLED(CONFIG_EFI)) efi_recover_from_page_fault(address); +oops: /* * Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice: @@ -737,10 +851,6 @@ no_context(struct pt_regs *regs, unsigned long error_code, if (task_stack_end_corrupted(tsk)) printk(KERN_EMERG "Thread overran stack, or stack corrupted\n"); - tsk->thread.cr2 = address; - tsk->thread.trap_nr = X86_TRAP_PF; - tsk->thread.error_code = error_code; - sig = SIGKILL; if (__die("Oops", regs, error_code)) sig = 0; @@ -794,7 +904,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, struct task_struct *tsk = current; /* User mode accesses just cause a SIGSEGV */ - if (error_code & X86_PF_USER) { + if (user_mode(regs) && (error_code & X86_PF_USER)) { /* * It's possible to have interrupts off here: */ @@ -821,9 +931,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, if (likely(show_unhandled_signals)) show_signal_msg(regs, error_code, address, tsk); - tsk->thread.cr2 = address; - tsk->thread.error_code = error_code; - tsk->thread.trap_nr = X86_TRAP_PF; + set_signal_archinfo(address, error_code); if (si_code == SEGV_PKUERR) force_sig_pkuerr((void __user *)address, pkey); @@ -937,9 +1045,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, if (is_prefetch(regs, error_code, address)) return; - tsk->thread.cr2 = address; - tsk->thread.error_code = error_code; - tsk->thread.trap_nr = X86_TRAP_PF; + set_signal_archinfo(address, error_code); #ifdef CONFIG_MEMORY_FAILURE if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { @@ -1148,23 +1254,6 @@ static int fault_in_kernel_space(unsigned long address) return address >= TASK_SIZE_MAX; } -static inline bool smap_violation(int error_code, struct pt_regs *regs) -{ - if (!IS_ENABLED(CONFIG_X86_SMAP)) - return false; - - if (!static_cpu_has(X86_FEATURE_SMAP)) - return false; - - if (error_code & X86_PF_USER) - return false; - - if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC)) - return false; - - return true; -} - /* * Called for all faults where 'address' is part of the kernel address * space. Might get called for faults that originate from *code* that @@ -1230,7 +1319,6 @@ void do_user_addr_fault(struct pt_regs *regs, unsigned long hw_error_code, unsigned long address) { - unsigned long sw_error_code; struct vm_area_struct *vma; struct task_struct *tsk; struct mm_struct *mm; @@ -1252,10 +1340,16 @@ void do_user_addr_fault(struct pt_regs *regs, pgtable_bad(regs, hw_error_code, address); /* - * Check for invalid kernel (supervisor) access to user - * pages in the user address space. + * If SMAP is on, check for invalid kernel (supervisor) access to user + * pages in the user address space. The odd case here is WRUSS, + * which, according to the preliminary documentation, does not respect + * SMAP and will have the USER bit set so, in all cases, SMAP + * enforcement appears to be consistent with the USER bit. */ - if (unlikely(smap_violation(hw_error_code, regs))) { + if (unlikely(cpu_feature_enabled(X86_FEATURE_SMAP) && + !(hw_error_code & X86_PF_USER) && + !(regs->flags & X86_EFLAGS_AC))) + { bad_area_nosemaphore(regs, hw_error_code, address); return; } @@ -1270,13 +1364,6 @@ void do_user_addr_fault(struct pt_regs *regs, } /* - * hw_error_code is literally the "page fault error code" passed to - * the kernel directly from the hardware. But, we will shortly be - * modifying it in software, so give it a new name. - */ - sw_error_code = hw_error_code; - - /* * It's safe to allow irq's after cr2 has been saved and the * vmalloc fault has been handled. * @@ -1285,26 +1372,6 @@ void do_user_addr_fault(struct pt_regs *regs, */ if (user_mode(regs)) { local_irq_enable(); - /* - * Up to this point, X86_PF_USER set in hw_error_code - * indicated a user-mode access. But, after this, - * X86_PF_USER in sw_error_code will indicate either - * that, *or* an implicit kernel(supervisor)-mode access - * which originated from user mode. - */ - if (!(hw_error_code & X86_PF_USER)) { - /* - * The CPU was in user mode, but the CPU says - * the fault was not a user-mode access. - * Must be an implicit kernel-mode access, - * which we do not expect to happen in the - * user address space. - */ - pr_warn_once("kernel-mode error from user-mode: %lx\n", - hw_error_code); - - sw_error_code |= X86_PF_USER; - } flags |= FAULT_FLAG_USER; } else { if (regs->flags & X86_EFLAGS_IF) @@ -1313,9 +1380,9 @@ void do_user_addr_fault(struct pt_regs *regs, perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); - if (sw_error_code & X86_PF_WRITE) + if (hw_error_code & X86_PF_WRITE) flags |= FAULT_FLAG_WRITE; - if (sw_error_code & X86_PF_INSTR) + if (hw_error_code & X86_PF_INSTR) flags |= FAULT_FLAG_INSTRUCTION; #ifdef CONFIG_X86_64 @@ -1328,7 +1395,7 @@ void do_user_addr_fault(struct pt_regs *regs, * The vsyscall page does not have a "real" VMA, so do this * emulation before we go searching for VMAs. */ - if ((sw_error_code & X86_PF_INSTR) && is_vsyscall_vaddr(address)) { + if ((hw_error_code & X86_PF_INSTR) && is_vsyscall_vaddr(address)) { if (emulate_vsyscall(regs, address)) return; } @@ -1344,18 +1411,15 @@ void do_user_addr_fault(struct pt_regs *regs, * Only do the expensive exception table search when we might be at * risk of a deadlock. This happens if we * 1. Failed to acquire mmap_sem, and - * 2. The access did not originate in userspace. Note: either the - * hardware or earlier page fault code may set X86_PF_USER - * in sw_error_code. + * 2. The access did not originate in userspace. */ if (unlikely(!down_read_trylock(&mm->mmap_sem))) { - if (!(sw_error_code & X86_PF_USER) && - !search_exception_tables(regs->ip)) { + if (!user_mode(regs) && !search_exception_tables(regs->ip)) { /* * Fault from code in kernel from * which we do not expect faults. */ - bad_area_nosemaphore(regs, sw_error_code, address); + bad_area_nosemaphore(regs, hw_error_code, address); return; } retry: @@ -1371,29 +1435,17 @@ retry: vma = find_vma(mm, address); if (unlikely(!vma)) { - bad_area(regs, sw_error_code, address); + bad_area(regs, hw_error_code, address); return; } if (likely(vma->vm_start <= address)) goto good_area; if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { - bad_area(regs, sw_error_code, address); + bad_area(regs, hw_error_code, address); return; } - if (sw_error_code & X86_PF_USER) { - /* - * Accessing the stack below %sp is always a bug. - * The large cushion allows instructions like enter - * and pusha to work. ("enter $65535, $31" pushes - * 32 pointers and then decrements %sp by 65535.) - */ - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) { - bad_area(regs, sw_error_code, address); - return; - } - } if (unlikely(expand_stack(vma, address))) { - bad_area(regs, sw_error_code, address); + bad_area(regs, hw_error_code, address); return; } @@ -1402,8 +1454,8 @@ retry: * we can handle it.. */ good_area: - if (unlikely(access_error(sw_error_code, vma))) { - bad_area_access_error(regs, sw_error_code, address, vma); + if (unlikely(access_error(hw_error_code, vma))) { + bad_area_access_error(regs, hw_error_code, address, vma); return; } @@ -1442,13 +1494,13 @@ good_area: return; /* Not returning to user mode? Handle exceptions or die: */ - no_context(regs, sw_error_code, address, SIGBUS, BUS_ADRERR); + no_context(regs, hw_error_code, address, SIGBUS, BUS_ADRERR); return; } up_read(&mm->mmap_sem); if (unlikely(fault & VM_FAULT_ERROR)) { - mm_fault_error(regs, sw_error_code, address, fault); + mm_fault_error(regs, hw_error_code, address, fault); return; } diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index ef99f3892e1f..427a955a2cf2 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -931,7 +931,7 @@ unsigned long max_swapfile_size(void) pages = generic_max_swapfile_size(); - if (boot_cpu_has_bug(X86_BUG_L1TF)) { + if (boot_cpu_has_bug(X86_BUG_L1TF) && l1tf_mitigation != L1TF_MITIGATION_OFF) { /* Limit the swap file size to MAX_PA/2 for L1TF workaround */ unsigned long long l1tf_limit = l1tf_pfn_limit(); /* diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 5fab264948c2..484c1b92f078 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -432,7 +432,7 @@ phys_pte_init(pte_t *pte_page, unsigned long paddr, unsigned long paddr_end, E820_TYPE_RAM) && !e820__mapped_any(paddr & PAGE_MASK, paddr_next, E820_TYPE_RESERVED_KERN)) - set_pte(pte, __pte(0)); + set_pte_safe(pte, __pte(0)); continue; } @@ -452,7 +452,7 @@ phys_pte_init(pte_t *pte_page, unsigned long paddr, unsigned long paddr_end, pr_info(" pte=%p addr=%lx pte=%016lx\n", pte, paddr, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL).pte); pages++; - set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, prot)); + set_pte_safe(pte, pfn_pte(paddr >> PAGE_SHIFT, prot)); paddr_last = (paddr & PAGE_MASK) + PAGE_SIZE; } @@ -487,7 +487,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end, E820_TYPE_RAM) && !e820__mapped_any(paddr & PMD_MASK, paddr_next, E820_TYPE_RESERVED_KERN)) - set_pmd(pmd, __pmd(0)); + set_pmd_safe(pmd, __pmd(0)); continue; } @@ -524,7 +524,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end, if (page_size_mask & (1<<PG_LEVEL_2M)) { pages++; spin_lock(&init_mm.page_table_lock); - set_pte((pte_t *)pmd, + set_pte_safe((pte_t *)pmd, pfn_pte((paddr & PMD_MASK) >> PAGE_SHIFT, __pgprot(pgprot_val(prot) | _PAGE_PSE))); spin_unlock(&init_mm.page_table_lock); @@ -536,7 +536,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end, paddr_last = phys_pte_init(pte, paddr, paddr_end, new_prot); spin_lock(&init_mm.page_table_lock); - pmd_populate_kernel(&init_mm, pmd, pte); + pmd_populate_kernel_safe(&init_mm, pmd, pte); spin_unlock(&init_mm.page_table_lock); } update_page_count(PG_LEVEL_2M, pages); @@ -573,7 +573,7 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end, E820_TYPE_RAM) && !e820__mapped_any(paddr & PUD_MASK, paddr_next, E820_TYPE_RESERVED_KERN)) - set_pud(pud, __pud(0)); + set_pud_safe(pud, __pud(0)); continue; } @@ -584,7 +584,6 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end, paddr_end, page_size_mask, prot); - __flush_tlb_all(); continue; } /* @@ -611,7 +610,7 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end, if (page_size_mask & (1<<PG_LEVEL_1G)) { pages++; spin_lock(&init_mm.page_table_lock); - set_pte((pte_t *)pud, + set_pte_safe((pte_t *)pud, pfn_pte((paddr & PUD_MASK) >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); spin_unlock(&init_mm.page_table_lock); @@ -624,10 +623,9 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end, page_size_mask, prot); spin_lock(&init_mm.page_table_lock); - pud_populate(&init_mm, pud, pmd); + pud_populate_safe(&init_mm, pud, pmd); spin_unlock(&init_mm.page_table_lock); } - __flush_tlb_all(); update_page_count(PG_LEVEL_1G, pages); @@ -659,7 +657,7 @@ phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end, E820_TYPE_RAM) && !e820__mapped_any(paddr & P4D_MASK, paddr_next, E820_TYPE_RESERVED_KERN)) - set_p4d(p4d, __p4d(0)); + set_p4d_safe(p4d, __p4d(0)); continue; } @@ -668,7 +666,6 @@ phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end, paddr_last = phys_pud_init(pud, paddr, paddr_end, page_size_mask); - __flush_tlb_all(); continue; } @@ -677,10 +674,9 @@ phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end, page_size_mask); spin_lock(&init_mm.page_table_lock); - p4d_populate(&init_mm, p4d, pud); + p4d_populate_safe(&init_mm, p4d, pud); spin_unlock(&init_mm.page_table_lock); } - __flush_tlb_all(); return paddr_last; } @@ -723,9 +719,9 @@ kernel_physical_mapping_init(unsigned long paddr_start, spin_lock(&init_mm.page_table_lock); if (pgtable_l5_enabled()) - pgd_populate(&init_mm, pgd, p4d); + pgd_populate_safe(&init_mm, pgd, p4d); else - p4d_populate(&init_mm, p4d_offset(pgd, vaddr), (pud_t *) p4d); + p4d_populate_safe(&init_mm, p4d_offset(pgd, vaddr), (pud_t *) p4d); spin_unlock(&init_mm.page_table_lock); pgd_changed = true; } @@ -733,8 +729,6 @@ kernel_physical_mapping_init(unsigned long paddr_start, if (pgd_changed) sync_global_pgds(vaddr_start, vaddr_end - 1); - __flush_tlb_all(); - return paddr_last; } diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index 006f373f54ab..385afa2b9e17 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -381,13 +381,6 @@ void __init mem_encrypt_init(void) swiotlb_update_mem_attributes(); /* - * With SEV, DMA operations cannot use encryption, we need to use - * SWIOTLB to bounce buffer DMA operation. - */ - if (sev_active()) - dma_ops = &swiotlb_dma_ops; - - /* * With SEV, we need to unroll the rep string I/O instructions. */ if (sev_active()) diff --git a/arch/x86/mm/mm_internal.h b/arch/x86/mm/mm_internal.h index 4e1f6e1b8159..319bde386d5f 100644 --- a/arch/x86/mm/mm_internal.h +++ b/arch/x86/mm/mm_internal.h @@ -19,4 +19,6 @@ extern int after_bootmem; void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache); +extern unsigned long tlb_single_page_flush_ceiling; + #endif /* __X86_MM_INTERNAL_H */ diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c index 08f8f76a4852..facce271e8b9 100644 --- a/arch/x86/mm/pageattr-test.c +++ b/arch/x86/mm/pageattr-test.c @@ -23,7 +23,8 @@ static __read_mostly int print = 1; enum { - NTEST = 400, + NTEST = 3 * 100, + NPAGES = 100, #ifdef CONFIG_X86_64 LPS = (1 << PMD_SHIFT), #elif defined(CONFIG_X86_PAE) @@ -110,6 +111,9 @@ static int print_split(struct split_state *s) static unsigned long addr[NTEST]; static unsigned int len[NTEST]; +static struct page *pages[NPAGES]; +static unsigned long addrs[NPAGES]; + /* Change the global bit on random pages in the direct mapping */ static int pageattr_test(void) { @@ -120,7 +124,6 @@ static int pageattr_test(void) unsigned int level; int i, k; int err; - unsigned long test_addr; if (print) printk(KERN_INFO "CPA self-test:\n"); @@ -137,7 +140,7 @@ static int pageattr_test(void) unsigned long pfn = prandom_u32() % max_pfn_mapped; addr[i] = (unsigned long)__va(pfn << PAGE_SHIFT); - len[i] = prandom_u32() % 100; + len[i] = prandom_u32() % NPAGES; len[i] = min_t(unsigned long, len[i], max_pfn_mapped - pfn - 1); if (len[i] == 0) @@ -167,14 +170,29 @@ static int pageattr_test(void) break; } __set_bit(pfn + k, bm); + addrs[k] = addr[i] + k*PAGE_SIZE; + pages[k] = pfn_to_page(pfn + k); } if (!addr[i] || !pte || !k) { addr[i] = 0; continue; } - test_addr = addr[i]; - err = change_page_attr_set(&test_addr, len[i], PAGE_CPA_TEST, 0); + switch (i % 3) { + case 0: + err = change_page_attr_set(&addr[i], len[i], PAGE_CPA_TEST, 0); + break; + + case 1: + err = change_page_attr_set(addrs, len[1], PAGE_CPA_TEST, 1); + break; + + case 2: + err = cpa_set_pages_array(pages, len[i], PAGE_CPA_TEST); + break; + } + + if (err < 0) { printk(KERN_ERR "CPA %d failed %d\n", i, err); failed++; @@ -206,8 +224,7 @@ static int pageattr_test(void) failed++; continue; } - test_addr = addr[i]; - err = change_page_attr_clear(&test_addr, len[i], PAGE_CPA_TEST, 0); + err = change_page_attr_clear(&addr[i], len[i], PAGE_CPA_TEST, 0); if (err < 0) { printk(KERN_ERR "CPA reverting failed: %d\n", err); failed++; diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index db7a10082238..4f8972311a77 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -26,6 +26,8 @@ #include <asm/pat.h> #include <asm/set_memory.h> +#include "mm_internal.h" + /* * The current flushing context - we pass it instead of 5 arguments: */ @@ -35,11 +37,11 @@ struct cpa_data { pgprot_t mask_set; pgprot_t mask_clr; unsigned long numpages; - int flags; + unsigned long curpage; unsigned long pfn; - unsigned force_split : 1, + unsigned int flags; + unsigned int force_split : 1, force_static_prot : 1; - int curpage; struct page **pages; }; @@ -228,19 +230,28 @@ static bool __cpa_pfn_in_highmap(unsigned long pfn) #endif +static unsigned long __cpa_addr(struct cpa_data *cpa, unsigned long idx) +{ + if (cpa->flags & CPA_PAGES_ARRAY) { + struct page *page = cpa->pages[idx]; + + if (unlikely(PageHighMem(page))) + return 0; + + return (unsigned long)page_address(page); + } + + if (cpa->flags & CPA_ARRAY) + return cpa->vaddr[idx]; + + return *cpa->vaddr + idx * PAGE_SIZE; +} + /* * Flushing functions */ -/** - * clflush_cache_range - flush a cache range with clflush - * @vaddr: virtual start address - * @size: number of bytes to flush - * - * clflushopt is an unordered instruction which needs fencing with mfence or - * sfence to avoid ordering issues. - */ -void clflush_cache_range(void *vaddr, unsigned int size) +static void clflush_cache_range_opt(void *vaddr, unsigned int size) { const unsigned long clflush_size = boot_cpu_data.x86_clflush_size; void *p = (void *)((unsigned long)vaddr & ~(clflush_size - 1)); @@ -249,11 +260,22 @@ void clflush_cache_range(void *vaddr, unsigned int size) if (p >= vend) return; - mb(); - for (; p < vend; p += clflush_size) clflushopt(p); +} +/** + * clflush_cache_range - flush a cache range with clflush + * @vaddr: virtual start address + * @size: number of bytes to flush + * + * CLFLUSHOPT is an unordered instruction which needs fencing with MFENCE or + * SFENCE to avoid ordering issues. + */ +void clflush_cache_range(void *vaddr, unsigned int size) +{ + mb(); + clflush_cache_range_opt(vaddr, size); mb(); } EXPORT_SYMBOL_GPL(clflush_cache_range); @@ -285,79 +307,49 @@ static void cpa_flush_all(unsigned long cache) on_each_cpu(__cpa_flush_all, (void *) cache, 1); } -static bool __cpa_flush_range(unsigned long start, int numpages, int cache) +void __cpa_flush_tlb(void *data) { - BUG_ON(irqs_disabled() && !early_boot_irqs_disabled); - - WARN_ON(PAGE_ALIGN(start) != start); - - if (cache && !static_cpu_has(X86_FEATURE_CLFLUSH)) { - cpa_flush_all(cache); - return true; - } + struct cpa_data *cpa = data; + unsigned int i; - flush_tlb_kernel_range(start, start + PAGE_SIZE * numpages); - - return !cache; + for (i = 0; i < cpa->numpages; i++) + __flush_tlb_one_kernel(__cpa_addr(cpa, i)); } -static void cpa_flush_range(unsigned long start, int numpages, int cache) +static void cpa_flush(struct cpa_data *data, int cache) { - unsigned int i, level; - unsigned long addr; + struct cpa_data *cpa = data; + unsigned int i; - if (__cpa_flush_range(start, numpages, cache)) - return; - - /* - * We only need to flush on one CPU, - * clflush is a MESI-coherent instruction that - * will cause all other CPUs to flush the same - * cachelines: - */ - for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) { - pte_t *pte = lookup_address(addr, &level); + BUG_ON(irqs_disabled() && !early_boot_irqs_disabled); - /* - * Only flush present addresses: - */ - if (pte && (pte_val(*pte) & _PAGE_PRESENT)) - clflush_cache_range((void *) addr, PAGE_SIZE); + if (cache && !static_cpu_has(X86_FEATURE_CLFLUSH)) { + cpa_flush_all(cache); + return; } -} -static void cpa_flush_array(unsigned long baddr, unsigned long *start, - int numpages, int cache, - int in_flags, struct page **pages) -{ - unsigned int i, level; + if (cpa->numpages <= tlb_single_page_flush_ceiling) + on_each_cpu(__cpa_flush_tlb, cpa, 1); + else + flush_tlb_all(); - if (__cpa_flush_range(baddr, numpages, cache)) + if (!cache) return; - /* - * We only need to flush on one CPU, - * clflush is a MESI-coherent instruction that - * will cause all other CPUs to flush the same - * cachelines: - */ - for (i = 0; i < numpages; i++) { - unsigned long addr; - pte_t *pte; - - if (in_flags & CPA_PAGES_ARRAY) - addr = (unsigned long)page_address(pages[i]); - else - addr = start[i]; + mb(); + for (i = 0; i < cpa->numpages; i++) { + unsigned long addr = __cpa_addr(cpa, i); + unsigned int level; - pte = lookup_address(addr, &level); + pte_t *pte = lookup_address(addr, &level); /* * Only flush present addresses: */ if (pte && (pte_val(*pte) & _PAGE_PRESENT)) - clflush_cache_range((void *)addr, PAGE_SIZE); + clflush_cache_range_opt((void *)addr, PAGE_SIZE); } + mb(); } static bool overlaps(unsigned long r1_start, unsigned long r1_end, @@ -1468,15 +1460,7 @@ static int __change_page_attr(struct cpa_data *cpa, int primary) unsigned int level; pte_t *kpte, old_pte; - if (cpa->flags & CPA_PAGES_ARRAY) { - struct page *page = cpa->pages[cpa->curpage]; - if (unlikely(PageHighMem(page))) - return 0; - address = (unsigned long)page_address(page); - } else if (cpa->flags & CPA_ARRAY) - address = cpa->vaddr[cpa->curpage]; - else - address = *cpa->vaddr; + address = __cpa_addr(cpa, cpa->curpage); repeat: kpte = _lookup_address_cpa(cpa, address, &level); if (!kpte) @@ -1557,22 +1541,14 @@ static int cpa_process_alias(struct cpa_data *cpa) * No need to redo, when the primary call touched the direct * mapping already: */ - if (cpa->flags & CPA_PAGES_ARRAY) { - struct page *page = cpa->pages[cpa->curpage]; - if (unlikely(PageHighMem(page))) - return 0; - vaddr = (unsigned long)page_address(page); - } else if (cpa->flags & CPA_ARRAY) - vaddr = cpa->vaddr[cpa->curpage]; - else - vaddr = *cpa->vaddr; - + vaddr = __cpa_addr(cpa, cpa->curpage); if (!(within(vaddr, PAGE_OFFSET, PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) { alias_cpa = *cpa; alias_cpa.vaddr = &laddr; alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); + alias_cpa.curpage = 0; ret = __change_page_attr_set_clr(&alias_cpa, 0); if (ret) @@ -1592,6 +1568,7 @@ static int cpa_process_alias(struct cpa_data *cpa) alias_cpa = *cpa; alias_cpa.vaddr = &temp_cpa_vaddr; alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); + alias_cpa.curpage = 0; /* * The high mapping range is imprecise, so ignore the @@ -1607,14 +1584,15 @@ static int cpa_process_alias(struct cpa_data *cpa) static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) { unsigned long numpages = cpa->numpages; - int ret; + unsigned long rempages = numpages; + int ret = 0; - while (numpages) { + while (rempages) { /* * Store the remaining nr of pages for the large page * preservation check. */ - cpa->numpages = numpages; + cpa->numpages = rempages; /* for array changes, we can't use large page */ if (cpa->flags & (CPA_ARRAY | CPA_PAGES_ARRAY)) cpa->numpages = 1; @@ -1625,12 +1603,12 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) if (!debug_pagealloc_enabled()) spin_unlock(&cpa_lock); if (ret) - return ret; + goto out; if (checkalias) { ret = cpa_process_alias(cpa); if (ret) - return ret; + goto out; } /* @@ -1638,15 +1616,15 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) * CPA operation. Either a large page has been * preserved or a single page update happened. */ - BUG_ON(cpa->numpages > numpages || !cpa->numpages); - numpages -= cpa->numpages; - if (cpa->flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) - cpa->curpage++; - else - *cpa->vaddr += cpa->numpages * PAGE_SIZE; - + BUG_ON(cpa->numpages > rempages || !cpa->numpages); + rempages -= cpa->numpages; + cpa->curpage += cpa->numpages; } - return 0; + +out: + /* Restore the original numpages */ + cpa->numpages = numpages; + return ret; } /* @@ -1679,7 +1657,6 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages, { struct cpa_data cpa; int ret, cache, checkalias; - unsigned long baddr = 0; memset(&cpa, 0, sizeof(cpa)); @@ -1704,7 +1681,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages, } else if (!(in_flag & CPA_PAGES_ARRAY)) { /* * in_flag of CPA_PAGES_ARRAY implies it is aligned. - * No need to cehck in that case + * No need to check in that case */ if (*addr & ~PAGE_MASK) { *addr &= PAGE_MASK; @@ -1713,11 +1690,6 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages, */ WARN_ON_ONCE(1); } - /* - * Save address for cache flush. *addr is modified in the call - * to __change_page_attr_set_clr() below. - */ - baddr = make_addr_canonical_again(*addr); } /* Must avoid aliasing mappings in the highmem code */ @@ -1765,13 +1737,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages, goto out; } - if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) { - cpa_flush_array(baddr, addr, numpages, cache, - cpa.flags, pages); - } else { - cpa_flush_range(baddr, numpages, cache); - } - + cpa_flush(&cpa, cache); out: return ret; } @@ -1842,14 +1808,14 @@ out_err: } EXPORT_SYMBOL(set_memory_uc); -static int _set_memory_array(unsigned long *addr, int addrinarray, +static int _set_memory_array(unsigned long *addr, int numpages, enum page_cache_mode new_type) { enum page_cache_mode set_type; int i, j; int ret; - for (i = 0; i < addrinarray; i++) { + for (i = 0; i < numpages; i++) { ret = reserve_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE, new_type, NULL); if (ret) @@ -1860,11 +1826,11 @@ static int _set_memory_array(unsigned long *addr, int addrinarray, set_type = (new_type == _PAGE_CACHE_MODE_WC) ? _PAGE_CACHE_MODE_UC_MINUS : new_type; - ret = change_page_attr_set(addr, addrinarray, + ret = change_page_attr_set(addr, numpages, cachemode2pgprot(set_type), 1); if (!ret && new_type == _PAGE_CACHE_MODE_WC) - ret = change_page_attr_set_clr(addr, addrinarray, + ret = change_page_attr_set_clr(addr, numpages, cachemode2pgprot( _PAGE_CACHE_MODE_WC), __pgprot(_PAGE_CACHE_MASK), @@ -1881,36 +1847,34 @@ out_free: return ret; } -int set_memory_array_uc(unsigned long *addr, int addrinarray) +int set_memory_array_uc(unsigned long *addr, int numpages) { - return _set_memory_array(addr, addrinarray, _PAGE_CACHE_MODE_UC_MINUS); + return _set_memory_array(addr, numpages, _PAGE_CACHE_MODE_UC_MINUS); } EXPORT_SYMBOL(set_memory_array_uc); -int set_memory_array_wc(unsigned long *addr, int addrinarray) +int set_memory_array_wc(unsigned long *addr, int numpages) { - return _set_memory_array(addr, addrinarray, _PAGE_CACHE_MODE_WC); + return _set_memory_array(addr, numpages, _PAGE_CACHE_MODE_WC); } EXPORT_SYMBOL(set_memory_array_wc); -int set_memory_array_wt(unsigned long *addr, int addrinarray) +int set_memory_array_wt(unsigned long *addr, int numpages) { - return _set_memory_array(addr, addrinarray, _PAGE_CACHE_MODE_WT); + return _set_memory_array(addr, numpages, _PAGE_CACHE_MODE_WT); } EXPORT_SYMBOL_GPL(set_memory_array_wt); int _set_memory_wc(unsigned long addr, int numpages) { int ret; - unsigned long addr_copy = addr; ret = change_page_attr_set(&addr, numpages, cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS), 0); if (!ret) { - ret = change_page_attr_set_clr(&addr_copy, numpages, - cachemode2pgprot( - _PAGE_CACHE_MODE_WC), + ret = change_page_attr_set_clr(&addr, numpages, + cachemode2pgprot(_PAGE_CACHE_MODE_WC), __pgprot(_PAGE_CACHE_MASK), 0, 0, NULL); } @@ -1977,18 +1941,18 @@ int set_memory_wb(unsigned long addr, int numpages) } EXPORT_SYMBOL(set_memory_wb); -int set_memory_array_wb(unsigned long *addr, int addrinarray) +int set_memory_array_wb(unsigned long *addr, int numpages) { int i; int ret; /* WB cache mode is hard wired to all cache attribute bits being 0 */ - ret = change_page_attr_clear(addr, addrinarray, + ret = change_page_attr_clear(addr, numpages, __pgprot(_PAGE_CACHE_MASK), 1); if (ret) return ret; - for (i = 0; i < addrinarray; i++) + for (i = 0; i < numpages; i++) free_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE); return 0; @@ -2058,7 +2022,6 @@ int set_memory_global(unsigned long addr, int numpages) static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc) { struct cpa_data cpa; - unsigned long start; int ret; /* Nothing to do if memory encryption is not active */ @@ -2069,8 +2032,6 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc) if (WARN_ONCE(addr & ~PAGE_MASK, "misaligned address: %#lx\n", addr)) addr &= PAGE_MASK; - start = addr; - memset(&cpa, 0, sizeof(cpa)); cpa.vaddr = &addr; cpa.numpages = numpages; @@ -2085,18 +2046,18 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc) /* * Before changing the encryption attribute, we need to flush caches. */ - cpa_flush_range(start, numpages, 1); + cpa_flush(&cpa, 1); ret = __change_page_attr_set_clr(&cpa, 1); /* - * After changing the encryption attribute, we need to flush TLBs - * again in case any speculative TLB caching occurred (but no need - * to flush caches again). We could just use cpa_flush_all(), but - * in case TLB flushing gets optimized in the cpa_flush_range() - * path use the same logic as above. + * After changing the encryption attribute, we need to flush TLBs again + * in case any speculative TLB caching occurred (but no need to flush + * caches again). We could just use cpa_flush_all(), but in case TLB + * flushing gets optimized in the cpa_flush() path use the same logic + * as above. */ - cpa_flush_range(start, numpages, 0); + cpa_flush(&cpa, 0); return ret; } @@ -2121,7 +2082,7 @@ int set_pages_uc(struct page *page, int numpages) } EXPORT_SYMBOL(set_pages_uc); -static int _set_pages_array(struct page **pages, int addrinarray, +static int _set_pages_array(struct page **pages, int numpages, enum page_cache_mode new_type) { unsigned long start; @@ -2131,7 +2092,7 @@ static int _set_pages_array(struct page **pages, int addrinarray, int free_idx; int ret; - for (i = 0; i < addrinarray; i++) { + for (i = 0; i < numpages; i++) { if (PageHighMem(pages[i])) continue; start = page_to_pfn(pages[i]) << PAGE_SHIFT; @@ -2144,10 +2105,10 @@ static int _set_pages_array(struct page **pages, int addrinarray, set_type = (new_type == _PAGE_CACHE_MODE_WC) ? _PAGE_CACHE_MODE_UC_MINUS : new_type; - ret = cpa_set_pages_array(pages, addrinarray, + ret = cpa_set_pages_array(pages, numpages, cachemode2pgprot(set_type)); if (!ret && new_type == _PAGE_CACHE_MODE_WC) - ret = change_page_attr_set_clr(NULL, addrinarray, + ret = change_page_attr_set_clr(NULL, numpages, cachemode2pgprot( _PAGE_CACHE_MODE_WC), __pgprot(_PAGE_CACHE_MASK), @@ -2167,21 +2128,21 @@ err_out: return -EINVAL; } -int set_pages_array_uc(struct page **pages, int addrinarray) +int set_pages_array_uc(struct page **pages, int numpages) { - return _set_pages_array(pages, addrinarray, _PAGE_CACHE_MODE_UC_MINUS); + return _set_pages_array(pages, numpages, _PAGE_CACHE_MODE_UC_MINUS); } EXPORT_SYMBOL(set_pages_array_uc); -int set_pages_array_wc(struct page **pages, int addrinarray) +int set_pages_array_wc(struct page **pages, int numpages) { - return _set_pages_array(pages, addrinarray, _PAGE_CACHE_MODE_WC); + return _set_pages_array(pages, numpages, _PAGE_CACHE_MODE_WC); } EXPORT_SYMBOL(set_pages_array_wc); -int set_pages_array_wt(struct page **pages, int addrinarray) +int set_pages_array_wt(struct page **pages, int numpages) { - return _set_pages_array(pages, addrinarray, _PAGE_CACHE_MODE_WT); + return _set_pages_array(pages, numpages, _PAGE_CACHE_MODE_WT); } EXPORT_SYMBOL_GPL(set_pages_array_wt); @@ -2193,7 +2154,7 @@ int set_pages_wb(struct page *page, int numpages) } EXPORT_SYMBOL(set_pages_wb); -int set_pages_array_wb(struct page **pages, int addrinarray) +int set_pages_array_wb(struct page **pages, int numpages) { int retval; unsigned long start; @@ -2201,12 +2162,12 @@ int set_pages_array_wb(struct page **pages, int addrinarray) int i; /* WB cache mode is hard wired to all cache attribute bits being 0 */ - retval = cpa_clear_pages_array(pages, addrinarray, + retval = cpa_clear_pages_array(pages, numpages, __pgprot(_PAGE_CACHE_MASK)); if (retval) return retval; - for (i = 0; i < addrinarray; i++) { + for (i = 0; i < numpages; i++) { if (PageHighMem(pages[i])) continue; start = page_to_pfn(pages[i]) << PAGE_SHIFT; @@ -2338,8 +2299,8 @@ bool kernel_page_present(struct page *page) #endif /* CONFIG_DEBUG_PAGEALLOC */ -int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address, - unsigned numpages, unsigned long page_flags) +int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address, + unsigned numpages, unsigned long page_flags) { int retval = -EINVAL; @@ -2353,6 +2314,8 @@ int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address, .flags = 0, }; + WARN_ONCE(num_online_cpus() > 1, "Don't call after initializing SMP"); + if (!(__supported_pte_mask & _PAGE_NX)) goto out; @@ -2375,6 +2338,40 @@ out: } /* + * __flush_tlb_all() flushes mappings only on current CPU and hence this + * function shouldn't be used in an SMP environment. Presently, it's used only + * during boot (way before smp_init()) by EFI subsystem and hence is ok. + */ +int __init kernel_unmap_pages_in_pgd(pgd_t *pgd, unsigned long address, + unsigned long numpages) +{ + int retval; + + /* + * The typical sequence for unmapping is to find a pte through + * lookup_address_in_pgd() (ideally, it should never return NULL because + * the address is already mapped) and change it's protections. As pfn is + * the *target* of a mapping, it's not useful while unmapping. + */ + struct cpa_data cpa = { + .vaddr = &address, + .pfn = 0, + .pgd = pgd, + .numpages = numpages, + .mask_set = __pgprot(0), + .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW), + .flags = 0, + }; + + WARN_ONCE(num_online_cpus() > 1, "Don't call after initializing SMP"); + + retval = __change_page_attr_set_clr(&cpa, 0); + __flush_tlb_all(); + + return retval; +} + +/* * The testcases use internal knowledge of the implementation that shouldn't * be exposed to the rest of the kernel. Include these directly here. */ diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 08013524fba1..4fe956a63b25 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -519,8 +519,13 @@ static u64 sanitize_phys(u64 address) * for a "decoy" virtual address (bit 63 clear) passed to * set_memory_X(). __pa() on a "decoy" address results in a * physical address with bit 63 set. + * + * Decoy addresses are not present for 32-bit builds, see + * set_mce_nospec(). */ - return address & __PHYSICAL_MASK; + if (IS_ENABLED(CONFIG_X86_64)) + return address & __PHYSICAL_MASK; + return address; } /* @@ -546,7 +551,11 @@ int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type, start = sanitize_phys(start); end = sanitize_phys(end); - BUG_ON(start >= end); /* end is exclusive */ + if (start >= end) { + WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n", __func__, + start, end - 1, cattr_name(req_type)); + return -EINVAL; + } if (!pat_enabled()) { /* This is identical to page table setting without PAT */ diff --git a/arch/x86/mm/pkeys.c b/arch/x86/mm/pkeys.c index 6e98e0a7c923..047a77f6a10c 100644 --- a/arch/x86/mm/pkeys.c +++ b/arch/x86/mm/pkeys.c @@ -131,6 +131,7 @@ int __arch_override_mprotect_pkey(struct vm_area_struct *vma, int prot, int pkey * in the process's lifetime will not accidentally get access * to data which is pkey-protected later on. */ +static u32 init_pkru_value = PKRU_AD_KEY( 1) | PKRU_AD_KEY( 2) | PKRU_AD_KEY( 3) | PKRU_AD_KEY( 4) | PKRU_AD_KEY( 5) | PKRU_AD_KEY( 6) | PKRU_AD_KEY( 7) | PKRU_AD_KEY( 8) | PKRU_AD_KEY( 9) | diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 03b6b4c2238d..999d6d8f0bef 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -15,6 +15,8 @@ #include <asm/apic.h> #include <asm/uv/uv.h> +#include "mm_internal.h" + /* * TLB flushing, formerly SMP-only * c/o Linus Torvalds. @@ -721,7 +723,7 @@ void native_flush_tlb_others(const struct cpumask *cpumask, * * This is in units of pages. */ -static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33; +unsigned long tlb_single_page_flush_ceiling __read_mostly = 33; void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned int stride_shift, diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 2580cd2e98b1..5542303c43d9 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -1181,6 +1181,8 @@ out_image: } if (!image || !prog->is_func || extra_pass) { + if (image) + bpf_prog_fill_jited_linfo(prog, addrs); out_addrs: kfree(addrs); kfree(jit_data); diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c index 8cd66152cdb0..9df652d3d927 100644 --- a/arch/x86/pci/i386.c +++ b/arch/x86/pci/i386.c @@ -59,7 +59,7 @@ static struct pcibios_fwaddrmap *pcibios_fwaddrmap_lookup(struct pci_dev *dev) { struct pcibios_fwaddrmap *map; - WARN_ON_SMP(!spin_is_locked(&pcibios_fwaddrmap_lock)); + lockdep_assert_held(&pcibios_fwaddrmap_lock); list_for_each_entry(map, &pcibios_fwaddrmappings, list) if (map->dev == dev) diff --git a/arch/x86/pci/sta2x11-fixup.c b/arch/x86/pci/sta2x11-fixup.c index 7a5bafb76d77..3cdafea55ab6 100644 --- a/arch/x86/pci/sta2x11-fixup.c +++ b/arch/x86/pci/sta2x11-fixup.c @@ -168,7 +168,6 @@ static void sta2x11_setup_pdev(struct pci_dev *pdev) return; pci_set_consistent_dma_mask(pdev, STA2X11_AMBA_SIZE - 1); pci_set_dma_mask(pdev, STA2X11_AMBA_SIZE - 1); - pdev->dev.dma_ops = &swiotlb_dma_ops; pdev->dev.archdata.is_sta2x11 = true; /* We must enable all devices as master, for audio DMA to work */ diff --git a/arch/x86/platform/ce4100/ce4100.c b/arch/x86/platform/ce4100/ce4100.c index ce4b06733c09..b3233b1835ea 100644 --- a/arch/x86/platform/ce4100/ce4100.c +++ b/arch/x86/platform/ce4100/ce4100.c @@ -84,7 +84,7 @@ static void ce4100_mem_serial_out(struct uart_port *p, int offset, int value) } static void ce4100_serial_fixup(int port, struct uart_port *up, - u32 *capabilites) + u32 *capabilities) { #ifdef CONFIG_EARLY_PRINTK /* @@ -111,7 +111,7 @@ static void ce4100_serial_fixup(int port, struct uart_port *up, up->serial_in = ce4100_mem_serial_in; up->serial_out = ce4100_mem_serial_out; - *capabilites |= (1 << 12); + *capabilities |= (1 << 12); } static __init void sdv_serial_fixup(void) diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 7ae939e353cd..e1cb01a22fa8 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -993,6 +993,8 @@ static void __init __efi_enter_virtual_mode(void) panic("EFI call to SetVirtualAddressMap() failed!"); } + efi_free_boot_services(); + /* * Now that EFI is in virtual mode, update the function * pointers in the runtime service table to the new virtual addresses. diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c index 95e77a667ba5..17456a1d3f04 100644 --- a/arch/x86/platform/efi/quirks.c +++ b/arch/x86/platform/efi/quirks.c @@ -369,6 +369,40 @@ void __init efi_reserve_boot_services(void) } } +/* + * Apart from having VA mappings for EFI boot services code/data regions, + * (duplicate) 1:1 mappings were also created as a quirk for buggy firmware. So, + * unmap both 1:1 and VA mappings. + */ +static void __init efi_unmap_pages(efi_memory_desc_t *md) +{ + pgd_t *pgd = efi_mm.pgd; + u64 pa = md->phys_addr; + u64 va = md->virt_addr; + + /* + * To Do: Remove this check after adding functionality to unmap EFI boot + * services code/data regions from direct mapping area because + * "efi=old_map" maps EFI regions in swapper_pg_dir. + */ + if (efi_enabled(EFI_OLD_MEMMAP)) + return; + + /* + * EFI mixed mode has all RAM mapped to access arguments while making + * EFI runtime calls, hence don't unmap EFI boot services code/data + * regions. + */ + if (!efi_is_native()) + return; + + if (kernel_unmap_pages_in_pgd(pgd, pa, md->num_pages)) + pr_err("Failed to unmap 1:1 mapping for 0x%llx\n", pa); + + if (kernel_unmap_pages_in_pgd(pgd, va, md->num_pages)) + pr_err("Failed to unmap VA mapping for 0x%llx\n", va); +} + void __init efi_free_boot_services(void) { phys_addr_t new_phys, new_size; @@ -394,6 +428,13 @@ void __init efi_free_boot_services(void) } /* + * Before calling set_virtual_address_map(), EFI boot services + * code/data regions were mapped as a quirk for buggy firmware. + * Unmap them from efi_pgd before freeing them up. + */ + efi_unmap_pages(md); + + /* * Nasty quirk: if all sub-1MB memory is used for boot * services, we can get here without having allocated the * real mode trampoline. It's too late to hand boot services diff --git a/arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c b/arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c index dbfc5cf2aa93..96f438d4b026 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c @@ -1,5 +1,5 @@ /* - * platform_bcm43xx.c: bcm43xx platform data initilization file + * platform_bcm43xx.c: bcm43xx platform data initialization file * * (C) Copyright 2016 Intel Corporation * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com> diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_spidev.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_spidev.c index 27186ad654c9..7a7fc54c449b 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_spidev.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_spidev.c @@ -1,5 +1,5 @@ /* - * spidev platform data initilization file + * spidev platform data initialization file * * (C) Copyright 2014, 2016 Intel Corporation * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com> diff --git a/arch/x86/platform/intel-mid/device_libs/platform_pcal9555a.c b/arch/x86/platform/intel-mid/device_libs/platform_pcal9555a.c index 429a94192671..8344d5a928c9 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_pcal9555a.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_pcal9555a.c @@ -1,5 +1,5 @@ /* - * PCAL9555a platform data initilization file + * PCAL9555a platform data initialization file * * Copyright (C) 2016, Intel Corporation * diff --git a/arch/x86/platform/intel/iosf_mbi.c b/arch/x86/platform/intel/iosf_mbi.c index 2e569d10f2d0..a9f2e888e135 100644 --- a/arch/x86/platform/intel/iosf_mbi.c +++ b/arch/x86/platform/intel/iosf_mbi.c @@ -13,7 +13,7 @@ * * * The IOSF-SB is a fabric bus available on Atom based SOC's that uses a - * mailbox interface (MBI) to communicate with mutiple devices. This + * mailbox interface (MBI) to communicate with multiple devices. This * driver implements access to this interface for those platforms that can * enumerate the device using PCI. */ diff --git a/arch/x86/platform/olpc/olpc-xo1-sci.c b/arch/x86/platform/olpc/olpc-xo1-sci.c index 7fa8b3b53bc0..d9b8a1c1ab0f 100644 --- a/arch/x86/platform/olpc/olpc-xo1-sci.c +++ b/arch/x86/platform/olpc/olpc-xo1-sci.c @@ -109,7 +109,7 @@ static void detect_lid_state(void) * the edge detector hookup on the gpio inputs on the geode is * odd, to say the least. See http://dev.laptop.org/ticket/5703 * for details, but in a nutshell: we don't use the edge - * detectors. instead, we make use of an anomoly: with the both + * detectors. instead, we make use of an anomaly: with the both * edge detectors turned off, we still get an edge event on a * positive edge transition. to take advantage of this, we use the * front-end inverter to ensure that that's the edge we're always diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c index 24d2175a9480..b4ab779f1d47 100644 --- a/arch/x86/platform/olpc/olpc_dt.c +++ b/arch/x86/platform/olpc/olpc_dt.c @@ -19,7 +19,6 @@ #include <linux/kernel.h> #include <linux/memblock.h> #include <linux/of.h> -#include <linux/of_platform.h> #include <linux/of_pdt.h> #include <asm/olpc.h> #include <asm/olpc_ofw.h> @@ -285,20 +284,3 @@ void __init olpc_dt_build_devicetree(void) pr_info("PROM DT: Built device tree with %u bytes of memory.\n", prom_early_allocated); } - -/* A list of DT node/bus matches that we want to expose as platform devices */ -static struct of_device_id __initdata of_ids[] = { - { .compatible = "olpc,xo1-battery" }, - { .compatible = "olpc,xo1-dcon" }, - { .compatible = "olpc,xo1-rtc" }, - {}, -}; - -static int __init olpc_create_platform_devices(void) -{ - if (machine_is_olpc()) - return of_platform_bus_probe(NULL, of_ids, NULL); - else - return 0; -} -device_initcall(olpc_create_platform_devices); diff --git a/arch/x86/platform/pvh/Makefile b/arch/x86/platform/pvh/Makefile new file mode 100644 index 000000000000..5dec5067c9fb --- /dev/null +++ b/arch/x86/platform/pvh/Makefile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0 +OBJECT_FILES_NON_STANDARD_head.o := y + +obj-$(CONFIG_PVH) += enlighten.o +obj-$(CONFIG_PVH) += head.o diff --git a/arch/x86/platform/pvh/enlighten.c b/arch/x86/platform/pvh/enlighten.c new file mode 100644 index 000000000000..62f5c7045944 --- /dev/null +++ b/arch/x86/platform/pvh/enlighten.c @@ -0,0 +1,137 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/acpi.h> + +#include <xen/hvc-console.h> + +#include <asm/io_apic.h> +#include <asm/hypervisor.h> +#include <asm/e820/api.h> +#include <asm/x86_init.h> + +#include <asm/xen/interface.h> + +#include <xen/xen.h> +#include <xen/interface/hvm/start_info.h> + +/* + * PVH variables. + * + * pvh_bootparams and pvh_start_info need to live in the data segment since + * they are used after startup_{32|64}, which clear .bss, are invoked. + */ +struct boot_params pvh_bootparams __attribute__((section(".data"))); +struct hvm_start_info pvh_start_info __attribute__((section(".data"))); + +unsigned int pvh_start_info_sz = sizeof(pvh_start_info); + +static u64 pvh_get_root_pointer(void) +{ + return pvh_start_info.rsdp_paddr; +} + +/* + * Xen guests are able to obtain the memory map from the hypervisor via the + * HYPERVISOR_memory_op hypercall. + * If we are trying to boot a Xen PVH guest, it is expected that the kernel + * will have been configured to provide an override for this routine to do + * just that. + */ +void __init __weak mem_map_via_hcall(struct boot_params *ptr __maybe_unused) +{ + xen_raw_printk("Error: Could not find memory map\n"); + BUG(); +} + +static void __init init_pvh_bootparams(bool xen_guest) +{ + memset(&pvh_bootparams, 0, sizeof(pvh_bootparams)); + + if ((pvh_start_info.version > 0) && (pvh_start_info.memmap_entries)) { + struct hvm_memmap_table_entry *ep; + int i; + + ep = __va(pvh_start_info.memmap_paddr); + pvh_bootparams.e820_entries = pvh_start_info.memmap_entries; + + for (i = 0; i < pvh_bootparams.e820_entries ; i++, ep++) { + pvh_bootparams.e820_table[i].addr = ep->addr; + pvh_bootparams.e820_table[i].size = ep->size; + pvh_bootparams.e820_table[i].type = ep->type; + } + } else if (xen_guest) { + mem_map_via_hcall(&pvh_bootparams); + } else { + /* Non-xen guests are not supported by version 0 */ + BUG(); + } + + if (pvh_bootparams.e820_entries < E820_MAX_ENTRIES_ZEROPAGE - 1) { + pvh_bootparams.e820_table[pvh_bootparams.e820_entries].addr = + ISA_START_ADDRESS; + pvh_bootparams.e820_table[pvh_bootparams.e820_entries].size = + ISA_END_ADDRESS - ISA_START_ADDRESS; + pvh_bootparams.e820_table[pvh_bootparams.e820_entries].type = + E820_TYPE_RESERVED; + pvh_bootparams.e820_entries++; + } else + xen_raw_printk("Warning: Can fit ISA range into e820\n"); + + pvh_bootparams.hdr.cmd_line_ptr = + pvh_start_info.cmdline_paddr; + + /* The first module is always ramdisk. */ + if (pvh_start_info.nr_modules) { + struct hvm_modlist_entry *modaddr = + __va(pvh_start_info.modlist_paddr); + pvh_bootparams.hdr.ramdisk_image = modaddr->paddr; + pvh_bootparams.hdr.ramdisk_size = modaddr->size; + } + + /* + * See Documentation/x86/boot.txt. + * + * Version 2.12 supports Xen entry point but we will use default x86/PC + * environment (i.e. hardware_subarch 0). + */ + pvh_bootparams.hdr.version = (2 << 8) | 12; + pvh_bootparams.hdr.type_of_loader = ((xen_guest ? 0x9 : 0xb) << 4) | 0; + + x86_init.acpi.get_root_pointer = pvh_get_root_pointer; +} + +/* + * If we are trying to boot a Xen PVH guest, it is expected that the kernel + * will have been configured to provide the required override for this routine. + */ +void __init __weak xen_pvh_init(void) +{ + xen_raw_printk("Error: Missing xen PVH initialization\n"); + BUG(); +} + +static void hypervisor_specific_init(bool xen_guest) +{ + if (xen_guest) + xen_pvh_init(); +} + +/* + * This routine (and those that it might call) should not use + * anything that lives in .bss since that segment will be cleared later. + */ +void __init xen_prepare_pvh(void) +{ + + u32 msr = xen_cpuid_base(); + bool xen_guest = !!msr; + + if (pvh_start_info.magic != XEN_HVM_START_MAGIC_VALUE) { + xen_raw_printk("Error: Unexpected magic value (0x%08x)\n", + pvh_start_info.magic); + BUG(); + } + + hypervisor_specific_init(xen_guest); + + init_pvh_bootparams(xen_guest); +} diff --git a/arch/x86/xen/xen-pvh.S b/arch/x86/platform/pvh/head.S index 1f8825bbaffb..1f8825bbaffb 100644 --- a/arch/x86/xen/xen-pvh.S +++ b/arch/x86/platform/pvh/head.S diff --git a/arch/x86/platform/uv/uv_nmi.c b/arch/x86/platform/uv/uv_nmi.c index 5f64f30873e2..b21a932c220c 100644 --- a/arch/x86/platform/uv/uv_nmi.c +++ b/arch/x86/platform/uv/uv_nmi.c @@ -560,7 +560,7 @@ static inline void uv_clear_nmi(int cpu) } } -/* Ping non-responding CPU's attemping to force them into the NMI handler */ +/* Ping non-responding CPU's attempting to force them into the NMI handler */ static void uv_nmi_nr_cpus_ping(void) { int cpu; diff --git a/arch/x86/um/vdso/Makefile b/arch/x86/um/vdso/Makefile index 822ccdba93ad..bf94060fc06f 100644 --- a/arch/x86/um/vdso/Makefile +++ b/arch/x86/um/vdso/Makefile @@ -26,7 +26,7 @@ targets += vdso.so vdso.so.dbg vdso.lds $(vobjs-y) CPPFLAGS_vdso.lds += -P -C VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \ - -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096 + -Wl,-z,max-page-size=4096 $(obj)/vdso.o: $(src)/vdso.S $(obj)/vdso.so diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig index 1ef391aa184d..e07abefd3d26 100644 --- a/arch/x86/xen/Kconfig +++ b/arch/x86/xen/Kconfig @@ -74,6 +74,7 @@ config XEN_DEBUG_FS Enabling this option may incur a significant performance overhead. config XEN_PVH - bool "Support for running as a PVH guest" + bool "Support for running as a Xen PVH guest" depends on XEN && XEN_PVHVM && ACPI + select PVH def_bool n diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile index dd2550d33b38..084de77a109e 100644 --- a/arch/x86/xen/Makefile +++ b/arch/x86/xen/Makefile @@ -1,6 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 OBJECT_FILES_NON_STANDARD_xen-asm_$(BITS).o := y -OBJECT_FILES_NON_STANDARD_xen-pvh.o := y ifdef CONFIG_FUNCTION_TRACER # Do not profile debug and lowlevel utilities @@ -38,7 +37,6 @@ obj-$(CONFIG_XEN_PV) += xen-asm.o obj-$(CONFIG_XEN_PV) += xen-asm_$(BITS).o obj-$(CONFIG_XEN_PVH) += enlighten_pvh.o -obj-$(CONFIG_XEN_PVH) += xen-pvh.o obj-$(CONFIG_EVENT_TRACING) += trace.o diff --git a/arch/x86/xen/enlighten_pvh.c b/arch/x86/xen/enlighten_pvh.c index 02e3ab7ff242..35b7599d2d0b 100644 --- a/arch/x86/xen/enlighten_pvh.c +++ b/arch/x86/xen/enlighten_pvh.c @@ -6,103 +6,45 @@ #include <asm/io_apic.h> #include <asm/hypervisor.h> #include <asm/e820/api.h> -#include <asm/x86_init.h> +#include <xen/xen.h> #include <asm/xen/interface.h> #include <asm/xen/hypercall.h> -#include <xen/xen.h> #include <xen/interface/memory.h> -#include <xen/interface/hvm/start_info.h> /* * PVH variables. * - * xen_pvh pvh_bootparams and pvh_start_info need to live in data segment - * since they are used after startup_{32|64}, which clear .bss, are invoked. + * The variable xen_pvh needs to live in the data segment since it is used + * after startup_{32|64} is invoked, which will clear the .bss segment. */ bool xen_pvh __attribute__((section(".data"))) = 0; -struct boot_params pvh_bootparams __attribute__((section(".data"))); -struct hvm_start_info pvh_start_info __attribute__((section(".data"))); - -unsigned int pvh_start_info_sz = sizeof(pvh_start_info); -static u64 pvh_get_root_pointer(void) +void __init xen_pvh_init(void) { - return pvh_start_info.rsdp_paddr; + u32 msr; + u64 pfn; + + xen_pvh = 1; + xen_start_flags = pvh_start_info.flags; + + msr = cpuid_ebx(xen_cpuid_base() + 2); + pfn = __pa(hypercall_page); + wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32)); } -static void __init init_pvh_bootparams(void) +void __init mem_map_via_hcall(struct boot_params *boot_params_p) { struct xen_memory_map memmap; int rc; - memset(&pvh_bootparams, 0, sizeof(pvh_bootparams)); - - memmap.nr_entries = ARRAY_SIZE(pvh_bootparams.e820_table); - set_xen_guest_handle(memmap.buffer, pvh_bootparams.e820_table); + memmap.nr_entries = ARRAY_SIZE(boot_params_p->e820_table); + set_xen_guest_handle(memmap.buffer, boot_params_p->e820_table); rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap); if (rc) { xen_raw_printk("XENMEM_memory_map failed (%d)\n", rc); BUG(); } - pvh_bootparams.e820_entries = memmap.nr_entries; - - if (pvh_bootparams.e820_entries < E820_MAX_ENTRIES_ZEROPAGE - 1) { - pvh_bootparams.e820_table[pvh_bootparams.e820_entries].addr = - ISA_START_ADDRESS; - pvh_bootparams.e820_table[pvh_bootparams.e820_entries].size = - ISA_END_ADDRESS - ISA_START_ADDRESS; - pvh_bootparams.e820_table[pvh_bootparams.e820_entries].type = - E820_TYPE_RESERVED; - pvh_bootparams.e820_entries++; - } else - xen_raw_printk("Warning: Can fit ISA range into e820\n"); - - pvh_bootparams.hdr.cmd_line_ptr = - pvh_start_info.cmdline_paddr; - - /* The first module is always ramdisk. */ - if (pvh_start_info.nr_modules) { - struct hvm_modlist_entry *modaddr = - __va(pvh_start_info.modlist_paddr); - pvh_bootparams.hdr.ramdisk_image = modaddr->paddr; - pvh_bootparams.hdr.ramdisk_size = modaddr->size; - } - - /* - * See Documentation/x86/boot.txt. - * - * Version 2.12 supports Xen entry point but we will use default x86/PC - * environment (i.e. hardware_subarch 0). - */ - pvh_bootparams.hdr.version = (2 << 8) | 12; - pvh_bootparams.hdr.type_of_loader = (9 << 4) | 0; /* Xen loader */ - - x86_init.acpi.get_root_pointer = pvh_get_root_pointer; -} - -/* - * This routine (and those that it might call) should not use - * anything that lives in .bss since that segment will be cleared later. - */ -void __init xen_prepare_pvh(void) -{ - u32 msr; - u64 pfn; - - if (pvh_start_info.magic != XEN_HVM_START_MAGIC_VALUE) { - xen_raw_printk("Error: Unexpected magic value (0x%08x)\n", - pvh_start_info.magic); - BUG(); - } - - xen_pvh = 1; - xen_start_flags = pvh_start_info.flags; - - msr = cpuid_ebx(xen_cpuid_base() + 2); - pfn = __pa(hypercall_page); - wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32)); - - init_pvh_bootparams(); + boot_params_p->e820_entries = memmap.nr_entries; } diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index a5d7ed125337..0f4fe206dcc2 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c @@ -648,19 +648,20 @@ static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd, unsigned long limit) { int i, nr, flush = 0; - unsigned hole_low, hole_high; + unsigned hole_low = 0, hole_high = 0; /* The limit is the last byte to be touched */ limit--; BUG_ON(limit >= FIXADDR_TOP); +#ifdef CONFIG_X86_64 /* * 64-bit has a great big hole in the middle of the address - * space, which contains the Xen mappings. On 32-bit these - * will end up making a zero-sized hole and so is a no-op. + * space, which contains the Xen mappings. */ - hole_low = pgd_index(USER_LIMIT); - hole_high = pgd_index(PAGE_OFFSET); + hole_low = pgd_index(GUARD_HOLE_BASE_ADDR); + hole_high = pgd_index(GUARD_HOLE_END_ADDR); +#endif nr = pgd_index(limit) + 1; for (i = 0; i < nr; i++) { diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 075ed47993bb..d5f303c0e656 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -493,7 +493,7 @@ static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages, * The remap information (which mfn remap to which pfn) is contained in the * to be remapped memory itself in a linked list anchored at xen_remap_mfn. * This scheme allows to remap the different chunks in arbitrary order while - * the resulting mapping will be independant from the order. + * the resulting mapping will be independent from the order. */ void __init xen_remap_memory(void) { diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S index bb1c2da0381d..1e9ef0ba30a5 100644 --- a/arch/x86/xen/xen-asm_64.S +++ b/arch/x86/xen/xen-asm_64.S @@ -12,6 +12,7 @@ #include <asm/segment.h> #include <asm/asm-offsets.h> #include <asm/thread_info.h> +#include <asm/asm.h> #include <xen/interface/xen.h> @@ -24,6 +25,7 @@ ENTRY(xen_\name) pop %r11 jmp \name END(xen_\name) +_ASM_NOKPROBE(xen_\name) .endm xen_pv_trap divide_error diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index d29b7365da8d..36338e7564a3 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -1,7 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 config XTENSA def_bool y - select ARCH_HAS_SG_CHAIN select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_NO_COHERENT_DMA_MMAP if !MMU @@ -10,7 +9,7 @@ config XTENSA select BUILDTIME_EXTABLE_SORT select CLONE_BACKWARDS select COMMON_CLK - select DMA_DIRECT_OPS + select DMA_REMAP if MMU select GENERIC_ATOMIC64 select GENERIC_CLOCKEVENTS select GENERIC_IRQ_SHOW diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c index 1fc138b6bc0a..9171bff76fc4 100644 --- a/arch/xtensa/kernel/pci-dma.c +++ b/arch/xtensa/kernel/pci-dma.c @@ -160,7 +160,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, flag & __GFP_NOWARN); if (!page) - page = alloc_pages(flag, get_order(size)); + page = alloc_pages(flag | __GFP_ZERO, get_order(size)); if (!page) return NULL; |