summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/Makefile4
-rw-r--r--Documentation/devicetree/bindings/net/fsl-fec.txt30
-rw-r--r--Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml3
-rw-r--r--MAINTAINERS1
-rw-r--r--arch/arm/mm/dma-mapping.c4
-rw-r--r--arch/arm64/kernel/cpufeature.c14
-rw-r--r--arch/arm64/kernel/ftrace.c22
-rw-r--r--arch/arm64/mm/dma-mapping.c4
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/kernel/Makefile3
-rw-r--r--arch/powerpc/kernel/dma-common.c17
-rw-r--r--arch/sh/kernel/disassemble.c5
-rw-r--r--arch/sh/kernel/hw_breakpoint.c1
-rw-r--r--drivers/auxdisplay/Kconfig5
-rw-r--r--drivers/auxdisplay/charlcd.c2
-rw-r--r--drivers/auxdisplay/charlcd.h (renamed from include/misc/charlcd.h)5
-rw-r--r--drivers/auxdisplay/hd44780.c3
-rw-r--r--drivers/auxdisplay/panel.c4
-rw-r--r--drivers/cpufreq/cpufreq.c2
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.h2
-rw-r--r--drivers/dma/dw-edma/dw-edma-pcie.c18
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-core.c34
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-debugfs.c29
-rw-r--r--drivers/dma/ste_dma40.c4
-rw-r--r--drivers/dma/stm32-mdma.c2
-rw-r--r--drivers/dma/tegra210-adma.c4
-rw-r--r--drivers/dma/ti/omap-dma.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c11
-rw-r--r--drivers/gpu/drm/ast/ast_main.c5
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c2
-rw-r--r--drivers/gpu/drm/ast/ast_post.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c22
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c4
-rw-r--r--drivers/infiniband/core/counters.c6
-rw-r--r--drivers/infiniband/core/nldev.c8
-rw-r--r--drivers/infiniband/core/umem_odp.c4
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c11
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c24
-rw-r--r--drivers/infiniband/sw/siw/Kconfig2
-rw-r--r--drivers/infiniband/sw/siw/siw.h2
-rw-r--r--drivers/infiniband/sw/siw/siw_main.c4
-rw-r--r--drivers/infiniband/sw/siw/siw_qp.c14
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c16
-rw-r--r--drivers/iommu/arm-smmu-v3.c4
-rw-r--r--drivers/iommu/dma-iommu.c25
-rw-r--r--drivers/iommu/intel-iommu-debugfs.c2
-rw-r--r--drivers/iommu/intel-iommu.c11
-rw-r--r--drivers/media/platform/omap/omap_vout_vrfb.c3
-rw-r--r--drivers/nvme/host/pci.c13
-rw-r--r--drivers/of/irq.c2
-rw-r--r--drivers/of/resolver.c12
-rw-r--r--drivers/pci/pcie/aspm.c20
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c23
-rw-r--r--fs/afs/cmservice.c10
-rw-r--r--fs/afs/dir.c89
-rw-r--r--fs/afs/file.c12
-rw-r--r--fs/afs/vlclient.c11
-rw-r--r--fs/seq_file.c2
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c29
-rw-r--r--fs/xfs/libxfs/xfs_da_btree.c19
-rw-r--r--fs/xfs/libxfs/xfs_dir2_node.c3
-rw-r--r--fs/xfs/xfs_log.c5
-rw-r--r--include/asm-generic/5level-fixup.h21
-rw-r--r--include/linux/dma-noncoherent.h13
-rw-r--r--include/linux/gfp.h12
-rw-r--r--include/linux/memcontrol.h19
-rw-r--r--include/linux/mempolicy.h2
-rw-r--r--include/linux/mm_types.h11
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/uapi/rdma/siw-abi.h3
-rw-r--r--kernel/dma/direct.c10
-rw-r--r--kernel/dma/mapping.c19
-rw-r--r--kernel/dma/remap.c2
-rw-r--r--kernel/sched/cpufreq_schedutil.c14
-rw-r--r--mm/huge_memory.c51
-rw-r--r--mm/hugetlb.c19
-rw-r--r--mm/kmemleak.c2
-rw-r--r--mm/memcontrol.c59
-rw-r--r--mm/mempolicy.c134
-rw-r--r--mm/memremap.c24
-rw-r--r--mm/rmap.c8
-rw-r--r--mm/shmem.c2
-rw-r--r--mm/usercopy.c2
-rw-r--r--mm/vmalloc.c12
-rw-r--r--mm/vmscan.c13
-rw-r--r--mm/workingset.c10
-rw-r--r--mm/z3fold.c14
-rw-r--r--samples/auxdisplay/cfag12864b-example.c2
-rw-r--r--security/keys/trusted.c13
-rw-r--r--sound/pci/hda/hda_generic.c21
-rw-r--r--sound/pci/hda/hda_generic.h1
-rw-r--r--sound/pci/hda/hda_intel.c3
-rw-r--r--sound/pci/hda/patch_conexant.c15
-rw-r--r--sound/pci/hda/patch_realtek.c12
-rw-r--r--sound/usb/mixer.c37
97 files changed, 807 insertions, 440 deletions
diff --git a/Documentation/devicetree/bindings/Makefile b/Documentation/devicetree/bindings/Makefile
index 6b0dfd5c17ba..5138a2f6232a 100644
--- a/Documentation/devicetree/bindings/Makefile
+++ b/Documentation/devicetree/bindings/Makefile
@@ -19,7 +19,9 @@ quiet_cmd_mk_schema = SCHEMA $@
DT_DOCS = $(shell \
cd $(srctree)/$(src) && \
- find * \( -name '*.yaml' ! -name $(DT_TMP_SCHEMA) \) \
+ find * \( -name '*.yaml' ! \
+ -name $(DT_TMP_SCHEMA) ! \
+ -name '*.example.dt.yaml' \) \
)
DT_SCHEMA_FILES ?= $(addprefix $(src)/,$(DT_DOCS))
diff --git a/Documentation/devicetree/bindings/net/fsl-fec.txt b/Documentation/devicetree/bindings/net/fsl-fec.txt
index 2d41fb96ce0a..5b88fae0307d 100644
--- a/Documentation/devicetree/bindings/net/fsl-fec.txt
+++ b/Documentation/devicetree/bindings/net/fsl-fec.txt
@@ -7,18 +7,6 @@ Required properties:
- phy-mode : See ethernet.txt file in the same directory
Optional properties:
-- phy-reset-gpios : Should specify the gpio for phy reset
-- phy-reset-duration : Reset duration in milliseconds. Should present
- only if property "phy-reset-gpios" is available. Missing the property
- will have the duration be 1 millisecond. Numbers greater than 1000 are
- invalid and 1 millisecond will be used instead.
-- phy-reset-active-high : If present then the reset sequence using the GPIO
- specified in the "phy-reset-gpios" property is reversed (H=reset state,
- L=operation state).
-- phy-reset-post-delay : Post reset delay in milliseconds. If present then
- a delay of phy-reset-post-delay milliseconds will be observed after the
- phy-reset-gpios has been toggled. Can be omitted thus no delay is
- observed. Delay is in range of 1ms to 1000ms. Other delays are invalid.
- phy-supply : regulator that powers the Ethernet PHY.
- phy-handle : phandle to the PHY device connected to this device.
- fixed-link : Assume a fixed link. See fixed-link.txt in the same directory.
@@ -47,11 +35,27 @@ Optional properties:
For imx6sx, "int0" handles all 3 queues and ENET_MII. "pps" is for the pulse
per second interrupt associated with 1588 precision time protocol(PTP).
-
Optional subnodes:
- mdio : specifies the mdio bus in the FEC, used as a container for phy nodes
according to phy.txt in the same directory
+Deprecated optional properties:
+ To avoid these, create a phy node according to phy.txt in the same
+ directory, and point the fec's "phy-handle" property to it. Then use
+ the phy's reset binding, again described by phy.txt.
+- phy-reset-gpios : Should specify the gpio for phy reset
+- phy-reset-duration : Reset duration in milliseconds. Should present
+ only if property "phy-reset-gpios" is available. Missing the property
+ will have the duration be 1 millisecond. Numbers greater than 1000 are
+ invalid and 1 millisecond will be used instead.
+- phy-reset-active-high : If present then the reset sequence using the GPIO
+ specified in the "phy-reset-gpios" property is reversed (H=reset state,
+ L=operation state).
+- phy-reset-post-delay : Post reset delay in milliseconds. If present then
+ a delay of phy-reset-post-delay milliseconds will be observed after the
+ phy-reset-gpios has been toggled. Can be omitted thus no delay is
+ observed. Delay is in range of 1ms to 1000ms. Other delays are invalid.
+
Example:
ethernet@83fec000 {
diff --git a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml
index 91d3e78b3395..400df2da018a 100644
--- a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml
@@ -37,7 +37,8 @@ properties:
hwlocks: true
st,syscfg:
- $ref: "/schemas/types.yaml#/definitions/phandle-array"
+ allOf:
+ - $ref: "/schemas/types.yaml#/definitions/phandle-array"
description: Should be phandle/offset/mask
items:
- description: Phandle to the syscon node which includes IRQ mux selection.
diff --git a/MAINTAINERS b/MAINTAINERS
index e81e60bd7c26..997a4f8fe88e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -8415,7 +8415,6 @@ L: linux-xfs@vger.kernel.org
L: linux-fsdevel@vger.kernel.org
T: git git://git.kernel.org/pub/scm/fs/xfs/xfs-linux.git
S: Supported
-F: fs/iomap.c
F: fs/iomap/
F: include/linux/iomap.h
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 6774b03aa405..d42557ee69c2 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -2405,9 +2405,7 @@ long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
unsigned long attrs)
{
- if (!dev_is_dma_coherent(dev))
- return __get_dma_pgprot(attrs, prot);
- return prot;
+ return __get_dma_pgprot(attrs, prot);
}
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index d19d14ba9ae4..b1fdc486aed8 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -184,9 +184,17 @@ static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = {
};
static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
- S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
- S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
+ /*
+ * We already refuse to boot CPUs that don't support our configured
+ * page size, so we can only detect mismatches for a page size other
+ * than the one we're currently using. Unfortunately, SoCs like this
+ * exist in the wild so, even though we don't like it, we'll have to go
+ * along with it and treat them as non-strict.
+ */
+ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
+ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
+
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
/* Linux shouldn't care about secure memory */
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
index 1285c7b2947f..171773257974 100644
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -73,7 +73,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
if (offset < -SZ_128M || offset >= SZ_128M) {
#ifdef CONFIG_ARM64_MODULE_PLTS
- struct plt_entry trampoline;
+ struct plt_entry trampoline, *dst;
struct module *mod;
/*
@@ -106,23 +106,27 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
* to check if the actual opcodes are in fact identical,
* regardless of the offset in memory so use memcmp() instead.
*/
- trampoline = get_plt_entry(addr, mod->arch.ftrace_trampoline);
- if (memcmp(mod->arch.ftrace_trampoline, &trampoline,
- sizeof(trampoline))) {
- if (plt_entry_is_initialized(mod->arch.ftrace_trampoline)) {
+ dst = mod->arch.ftrace_trampoline;
+ trampoline = get_plt_entry(addr, dst);
+ if (memcmp(dst, &trampoline, sizeof(trampoline))) {
+ if (plt_entry_is_initialized(dst)) {
pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
return -EINVAL;
}
/* point the trampoline to our ftrace entry point */
module_disable_ro(mod);
- *mod->arch.ftrace_trampoline = trampoline;
+ *dst = trampoline;
module_enable_ro(mod, true);
- /* update trampoline before patching in the branch */
- smp_wmb();
+ /*
+ * Ensure updated trampoline is visible to instruction
+ * fetch before we patch in the branch.
+ */
+ __flush_icache_range((unsigned long)&dst[0],
+ (unsigned long)&dst[1]);
}
- addr = (unsigned long)(void *)mod->arch.ftrace_trampoline;
+ addr = (unsigned long)dst;
#else /* CONFIG_ARM64_MODULE_PLTS */
return -EINVAL;
#endif /* CONFIG_ARM64_MODULE_PLTS */
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 1d3f0b5a9940..bd2b039f43a6 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -14,9 +14,7 @@
pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
unsigned long attrs)
{
- if (!dev_is_dma_coherent(dev) || (attrs & DMA_ATTR_WRITE_COMBINE))
- return pgprot_writecombine(prot);
- return prot;
+ return pgprot_writecombine(prot);
}
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 77f6ebf97113..d8dcd8820369 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -121,7 +121,6 @@ config PPC
select ARCH_32BIT_OFF_T if PPC32
select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEVMEM_IS_ALLOWED
- select ARCH_HAS_DMA_MMAP_PGPROT
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index ea0c69236789..56dfa7a2a6f2 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -49,8 +49,7 @@ obj-y := cputable.o ptrace.o syscalls.o \
signal.o sysfs.o cacheinfo.o time.o \
prom.o traps.o setup-common.o \
udbg.o misc.o io.o misc_$(BITS).o \
- of_platform.o prom_parse.o \
- dma-common.o
+ of_platform.o prom_parse.o
obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
signal_64.o ptrace32.o \
paca.o nvram_64.o firmware.o
diff --git a/arch/powerpc/kernel/dma-common.c b/arch/powerpc/kernel/dma-common.c
deleted file mode 100644
index dc7ef6b17b69..000000000000
--- a/arch/powerpc/kernel/dma-common.c
+++ /dev/null
@@ -1,17 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Contains common dma routines for all powerpc platforms.
- *
- * Copyright (C) 2019 Shawn Anastasio.
- */
-
-#include <linux/mm.h>
-#include <linux/dma-noncoherent.h>
-
-pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
- unsigned long attrs)
-{
- if (!dev_is_dma_coherent(dev))
- return pgprot_noncached(prot);
- return prot;
-}
diff --git a/arch/sh/kernel/disassemble.c b/arch/sh/kernel/disassemble.c
index defebf1a9c8a..845543780cc5 100644
--- a/arch/sh/kernel/disassemble.c
+++ b/arch/sh/kernel/disassemble.c
@@ -475,8 +475,6 @@ static void print_sh_insn(u32 memaddr, u16 insn)
printk("dbr");
break;
case FD_REG_N:
- if (0)
- goto d_reg_n;
case F_REG_N:
printk("fr%d", rn);
break;
@@ -488,7 +486,7 @@ static void print_sh_insn(u32 memaddr, u16 insn)
printk("xd%d", rn & ~1);
break;
}
- d_reg_n:
+ /* else, fall through */
case D_REG_N:
printk("dr%d", rn);
break;
@@ -497,6 +495,7 @@ static void print_sh_insn(u32 memaddr, u16 insn)
printk("xd%d", rm & ~1);
break;
}
+ /* else, fall through */
case D_REG_M:
printk("dr%d", rm);
break;
diff --git a/arch/sh/kernel/hw_breakpoint.c b/arch/sh/kernel/hw_breakpoint.c
index 3bd010b4c55f..f10d64311127 100644
--- a/arch/sh/kernel/hw_breakpoint.c
+++ b/arch/sh/kernel/hw_breakpoint.c
@@ -157,6 +157,7 @@ int arch_bp_generic_fields(int sh_len, int sh_type,
switch (sh_type) {
case SH_BREAKPOINT_READ:
*gen_type = HW_BREAKPOINT_R;
+ break;
case SH_BREAKPOINT_WRITE:
*gen_type = HW_BREAKPOINT_W;
break;
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index dd61fdd400f0..68489d1f00bb 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -448,6 +448,11 @@ config PANEL_BOOT_MESSAGE
choice
prompt "Backlight initial state"
default CHARLCD_BL_FLASH
+ ---help---
+ Select the initial backlight state on boot or module load.
+
+ Previously, there was no option for this: the backlight flashed
+ briefly on init. Now you can also turn it off/on.
config CHARLCD_BL_OFF
bool "Off"
diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c
index 92745efefb54..bef6b85778b6 100644
--- a/drivers/auxdisplay/charlcd.c
+++ b/drivers/auxdisplay/charlcd.c
@@ -20,7 +20,7 @@
#include <generated/utsrelease.h>
-#include <misc/charlcd.h>
+#include "charlcd.h"
#define LCD_MINOR 156
diff --git a/include/misc/charlcd.h b/drivers/auxdisplay/charlcd.h
index 8cf6c18b0adb..00911ad0f3de 100644
--- a/include/misc/charlcd.h
+++ b/drivers/auxdisplay/charlcd.h
@@ -6,6 +6,9 @@
* Copyright (C) 2016-2017 Glider bvba
*/
+#ifndef _CHARLCD_H
+#define _CHARLCD_H
+
struct charlcd {
const struct charlcd_ops *ops;
const unsigned char *char_conv; /* Optional */
@@ -37,3 +40,5 @@ int charlcd_register(struct charlcd *lcd);
int charlcd_unregister(struct charlcd *lcd);
void charlcd_poke(struct charlcd *lcd);
+
+#endif /* CHARLCD_H */
diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c
index ab15b64707ad..bcbe13092327 100644
--- a/drivers/auxdisplay/hd44780.c
+++ b/drivers/auxdisplay/hd44780.c
@@ -14,8 +14,7 @@
#include <linux/property.h>
#include <linux/slab.h>
-#include <misc/charlcd.h>
-
+#include "charlcd.h"
enum hd44780_pin {
/* Order does matter due to writing to GPIO array subsets! */
diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c
index e06de63497cf..85965953683e 100644
--- a/drivers/auxdisplay/panel.c
+++ b/drivers/auxdisplay/panel.c
@@ -55,7 +55,7 @@
#include <linux/io.h>
#include <linux/uaccess.h>
-#include <misc/charlcd.h>
+#include "charlcd.h"
#define KEYPAD_MINOR 185
@@ -1617,6 +1617,8 @@ static void panel_attach(struct parport *port)
return;
err_lcd_unreg:
+ if (scan_timer.function)
+ del_timer_sync(&scan_timer);
if (lcd.enabled)
charlcd_unregister(lcd.charlcd);
err_unreg_device:
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 8dda62367816..c28ebf2810f1 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -2528,7 +2528,7 @@ static int cpufreq_boost_set_sw(int state)
}
ret = dev_pm_qos_update_request(policy->max_freq_req, policy->max);
- if (ret)
+ if (ret < 0)
break;
}
diff --git a/drivers/dma/dw-edma/dw-edma-core.h b/drivers/dma/dw-edma/dw-edma-core.h
index b6cc90cbc9dc..4e5f9f6e901b 100644
--- a/drivers/dma/dw-edma/dw-edma-core.h
+++ b/drivers/dma/dw-edma/dw-edma-core.h
@@ -50,7 +50,7 @@ struct dw_edma_burst {
struct dw_edma_region {
phys_addr_t paddr;
- dma_addr_t vaddr;
+ void __iomem *vaddr;
size_t sz;
};
diff --git a/drivers/dma/dw-edma/dw-edma-pcie.c b/drivers/dma/dw-edma/dw-edma-pcie.c
index 4c96e1c948f2..dc85f55e1bb8 100644
--- a/drivers/dma/dw-edma/dw-edma-pcie.c
+++ b/drivers/dma/dw-edma/dw-edma-pcie.c
@@ -130,19 +130,19 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
chip->id = pdev->devfn;
chip->irq = pdev->irq;
- dw->rg_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->rg_bar];
+ dw->rg_region.vaddr = pcim_iomap_table(pdev)[pdata->rg_bar];
dw->rg_region.vaddr += pdata->rg_off;
dw->rg_region.paddr = pdev->resource[pdata->rg_bar].start;
dw->rg_region.paddr += pdata->rg_off;
dw->rg_region.sz = pdata->rg_sz;
- dw->ll_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->ll_bar];
+ dw->ll_region.vaddr = pcim_iomap_table(pdev)[pdata->ll_bar];
dw->ll_region.vaddr += pdata->ll_off;
dw->ll_region.paddr = pdev->resource[pdata->ll_bar].start;
dw->ll_region.paddr += pdata->ll_off;
dw->ll_region.sz = pdata->ll_sz;
- dw->dt_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->dt_bar];
+ dw->dt_region.vaddr = pcim_iomap_table(pdev)[pdata->dt_bar];
dw->dt_region.vaddr += pdata->dt_off;
dw->dt_region.paddr = pdev->resource[pdata->dt_bar].start;
dw->dt_region.paddr += pdata->dt_off;
@@ -158,17 +158,17 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
pci_dbg(pdev, "Mode:\t%s\n",
dw->mode == EDMA_MODE_LEGACY ? "Legacy" : "Unroll");
- pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n",
+ pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
pdata->rg_bar, pdata->rg_off, pdata->rg_sz,
- &dw->rg_region.vaddr, &dw->rg_region.paddr);
+ dw->rg_region.vaddr, &dw->rg_region.paddr);
- pci_dbg(pdev, "L. List:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n",
+ pci_dbg(pdev, "L. List:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
pdata->ll_bar, pdata->ll_off, pdata->ll_sz,
- &dw->ll_region.vaddr, &dw->ll_region.paddr);
+ dw->ll_region.vaddr, &dw->ll_region.paddr);
- pci_dbg(pdev, "Data:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n",
+ pci_dbg(pdev, "Data:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
pdata->dt_bar, pdata->dt_off, pdata->dt_sz,
- &dw->dt_region.vaddr, &dw->dt_region.paddr);
+ dw->dt_region.vaddr, &dw->dt_region.paddr);
pci_dbg(pdev, "Nr. IRQs:\t%u\n", dw->nr_irqs);
diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.c b/drivers/dma/dw-edma/dw-edma-v0-core.c
index 8a3180ed49a6..692de47b1670 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-core.c
+++ b/drivers/dma/dw-edma/dw-edma-v0-core.c
@@ -25,7 +25,7 @@ enum dw_edma_control {
static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw)
{
- return (struct dw_edma_v0_regs __iomem *)dw->rg_region.vaddr;
+ return dw->rg_region.vaddr;
}
#define SET(dw, name, value) \
@@ -192,13 +192,12 @@ u32 dw_edma_v0_core_status_abort_int(struct dw_edma *dw, enum dw_edma_dir dir)
static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
{
struct dw_edma_burst *child;
- struct dw_edma_v0_lli *lli;
- struct dw_edma_v0_llp *llp;
+ struct dw_edma_v0_lli __iomem *lli;
+ struct dw_edma_v0_llp __iomem *llp;
u32 control = 0, i = 0;
- u64 sar, dar, addr;
int j;
- lli = (struct dw_edma_v0_lli *)chunk->ll_region.vaddr;
+ lli = chunk->ll_region.vaddr;
if (chunk->cb)
control = DW_EDMA_V0_CB;
@@ -214,17 +213,15 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
/* Transfer size */
SET_LL(&lli[i].transfer_size, child->sz);
/* SAR - low, high */
- sar = cpu_to_le64(child->sar);
- SET_LL(&lli[i].sar_low, lower_32_bits(sar));
- SET_LL(&lli[i].sar_high, upper_32_bits(sar));
+ SET_LL(&lli[i].sar_low, lower_32_bits(child->sar));
+ SET_LL(&lli[i].sar_high, upper_32_bits(child->sar));
/* DAR - low, high */
- dar = cpu_to_le64(child->dar);
- SET_LL(&lli[i].dar_low, lower_32_bits(dar));
- SET_LL(&lli[i].dar_high, upper_32_bits(dar));
+ SET_LL(&lli[i].dar_low, lower_32_bits(child->dar));
+ SET_LL(&lli[i].dar_high, upper_32_bits(child->dar));
i++;
}
- llp = (struct dw_edma_v0_llp *)&lli[i];
+ llp = (void __iomem *)&lli[i];
control = DW_EDMA_V0_LLP | DW_EDMA_V0_TCB;
if (!chunk->cb)
control |= DW_EDMA_V0_CB;
@@ -232,9 +229,8 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
/* Channel control */
SET_LL(&llp->control, control);
/* Linked list - low, high */
- addr = cpu_to_le64(chunk->ll_region.paddr);
- SET_LL(&llp->llp_low, lower_32_bits(addr));
- SET_LL(&llp->llp_high, upper_32_bits(addr));
+ SET_LL(&llp->llp_low, lower_32_bits(chunk->ll_region.paddr));
+ SET_LL(&llp->llp_high, upper_32_bits(chunk->ll_region.paddr));
}
void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
@@ -242,7 +238,6 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
struct dw_edma_chan *chan = chunk->chan;
struct dw_edma *dw = chan->chip->dw;
u32 tmp;
- u64 llp;
dw_edma_v0_core_write_chunk(chunk);
@@ -262,9 +257,10 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
SET_CH(dw, chan->dir, chan->id, ch_control1,
(DW_EDMA_V0_CCS | DW_EDMA_V0_LLE));
/* Linked list - low, high */
- llp = cpu_to_le64(chunk->ll_region.paddr);
- SET_CH(dw, chan->dir, chan->id, llp_low, lower_32_bits(llp));
- SET_CH(dw, chan->dir, chan->id, llp_high, upper_32_bits(llp));
+ SET_CH(dw, chan->dir, chan->id, llp_low,
+ lower_32_bits(chunk->ll_region.paddr));
+ SET_CH(dw, chan->dir, chan->id, llp_high,
+ upper_32_bits(chunk->ll_region.paddr));
}
/* Doorbell */
SET_RW(dw, chan->dir, doorbell,
diff --git a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
index 3226f528cc11..42739508c0d8 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
+++ b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
@@ -14,7 +14,7 @@
#include "dw-edma-core.h"
#define REGS_ADDR(name) \
- ((dma_addr_t *)&regs->name)
+ ((void __force *)&regs->name)
#define REGISTER(name) \
{ #name, REGS_ADDR(name) }
@@ -40,36 +40,37 @@
static struct dentry *base_dir;
static struct dw_edma *dw;
-static struct dw_edma_v0_regs *regs;
+static struct dw_edma_v0_regs __iomem *regs;
static struct {
- void *start;
- void *end;
+ void __iomem *start;
+ void __iomem *end;
} lim[2][EDMA_V0_MAX_NR_CH];
struct debugfs_entries {
- char name[24];
+ const char *name;
dma_addr_t *reg;
};
static int dw_edma_debugfs_u32_get(void *data, u64 *val)
{
+ void __iomem *reg = (void __force __iomem *)data;
if (dw->mode == EDMA_MODE_LEGACY &&
- data >= (void *)&regs->type.legacy.ch) {
- void *ptr = (void *)&regs->type.legacy.ch;
+ reg >= (void __iomem *)&regs->type.legacy.ch) {
+ void __iomem *ptr = &regs->type.legacy.ch;
u32 viewport_sel = 0;
unsigned long flags;
u16 ch;
for (ch = 0; ch < dw->wr_ch_cnt; ch++)
- if (lim[0][ch].start >= data && data < lim[0][ch].end) {
- ptr += (data - lim[0][ch].start);
+ if (lim[0][ch].start >= reg && reg < lim[0][ch].end) {
+ ptr += (reg - lim[0][ch].start);
goto legacy_sel_wr;
}
for (ch = 0; ch < dw->rd_ch_cnt; ch++)
- if (lim[1][ch].start >= data && data < lim[1][ch].end) {
- ptr += (data - lim[1][ch].start);
+ if (lim[1][ch].start >= reg && reg < lim[1][ch].end) {
+ ptr += (reg - lim[1][ch].start);
goto legacy_sel_rd;
}
@@ -86,7 +87,7 @@ legacy_sel_wr:
raw_spin_unlock_irqrestore(&dw->lock, flags);
} else {
- *val = readl(data);
+ *val = readl(reg);
}
return 0;
@@ -105,7 +106,7 @@ static void dw_edma_debugfs_create_x32(const struct debugfs_entries entries[],
}
}
-static void dw_edma_debugfs_regs_ch(struct dw_edma_v0_ch_regs *regs,
+static void dw_edma_debugfs_regs_ch(struct dw_edma_v0_ch_regs __iomem *regs,
struct dentry *dir)
{
int nr_entries;
@@ -288,7 +289,7 @@ void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip)
if (!dw)
return;
- regs = (struct dw_edma_v0_regs *)dw->rg_region.vaddr;
+ regs = dw->rg_region.vaddr;
if (!regs)
return;
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 89d710899010..de8bfd9a76e9 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -142,7 +142,7 @@ enum d40_events {
* when the DMA hw is powered off.
* TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
*/
-static u32 d40_backup_regs[] = {
+static __maybe_unused u32 d40_backup_regs[] = {
D40_DREG_LCPA,
D40_DREG_LCLA,
D40_DREG_PRMSE,
@@ -211,7 +211,7 @@ static u32 d40_backup_regs_v4b[] = {
#define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b)
-static u32 d40_backup_regs_chan[] = {
+static __maybe_unused u32 d40_backup_regs_chan[] = {
D40_CHAN_REG_SSCFG,
D40_CHAN_REG_SSELT,
D40_CHAN_REG_SSPTR,
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index d6e919d3936a..1311de74bfdd 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -1366,7 +1366,7 @@ static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid)
chan = &dmadev->chan[id];
if (!chan) {
- dev_err(chan2dev(chan), "MDMA channel not initialized\n");
+ dev_dbg(mdma2dev(dmadev), "MDMA channel not initialized\n");
goto exit;
}
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index 2805853e963f..b33cf6e8ab8e 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -712,7 +712,7 @@ static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec,
return chan;
}
-static int tegra_adma_runtime_suspend(struct device *dev)
+static int __maybe_unused tegra_adma_runtime_suspend(struct device *dev)
{
struct tegra_adma *tdma = dev_get_drvdata(dev);
struct tegra_adma_chan_regs *ch_reg;
@@ -744,7 +744,7 @@ clk_disable:
return 0;
}
-static int tegra_adma_runtime_resume(struct device *dev)
+static int __maybe_unused tegra_adma_runtime_resume(struct device *dev)
{
struct tegra_adma *tdma = dev_get_drvdata(dev);
struct tegra_adma_chan_regs *ch_reg;
diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c
index ba2489d4ea24..ba27802efcd0 100644
--- a/drivers/dma/ti/omap-dma.c
+++ b/drivers/dma/ti/omap-dma.c
@@ -1234,7 +1234,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved(
if (src_icg) {
d->ccr |= CCR_SRC_AMODE_DBLIDX;
d->ei = 1;
- d->fi = src_icg;
+ d->fi = src_icg + 1;
} else if (xt->src_inc) {
d->ccr |= CCR_SRC_AMODE_POSTINC;
d->fi = 0;
@@ -1249,7 +1249,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved(
if (dst_icg) {
d->ccr |= CCR_DST_AMODE_DBLIDX;
sg->ei = 1;
- sg->fi = dst_icg;
+ sg->fi = dst_icg + 1;
} else if (xt->dst_inc) {
d->ccr |= CCR_DST_AMODE_POSTINC;
sg->fi = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 1cf639a51178..04b8ac4432c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -4869,7 +4869,7 @@ static void gfx_v9_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
- WREG32(mmSQ_CMD, value);
+ WREG32_SOC15(GC, 0, mmSQ_CMD, value);
}
static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index fa20201eef3a..cbc480a33376 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -23,6 +23,7 @@
*/
#include <linux/slab.h>
+#include <linux/mm.h>
#include "dm_services.h"
@@ -1171,8 +1172,8 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
struct dc_state *dc_create_state(struct dc *dc)
{
- struct dc_state *context = kzalloc(sizeof(struct dc_state),
- GFP_KERNEL);
+ struct dc_state *context = kvzalloc(sizeof(struct dc_state),
+ GFP_KERNEL);
if (!context)
return NULL;
@@ -1192,11 +1193,11 @@ struct dc_state *dc_create_state(struct dc *dc)
struct dc_state *dc_copy_state(struct dc_state *src_ctx)
{
int i, j;
- struct dc_state *new_ctx = kmemdup(src_ctx,
- sizeof(struct dc_state), GFP_KERNEL);
+ struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL);
if (!new_ctx)
return NULL;
+ memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
@@ -1230,7 +1231,7 @@ static void dc_state_free(struct kref *kref)
{
struct dc_state *context = container_of(kref, struct dc_state, refcount);
dc_resource_state_destruct(context);
- kfree(context);
+ kvfree(context);
}
void dc_release_state(struct dc_state *context)
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 4c7e31cb45ff..a5d1494a3dc4 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -131,8 +131,8 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
/* Enable extended register access */
- ast_enable_mmio(dev);
ast_open_key(ast);
+ ast_enable_mmio(dev);
/* Find out whether P2A works or whether to use device-tree */
ast_detect_config_mode(dev, &scu_rev);
@@ -576,6 +576,9 @@ void ast_driver_unload(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
+ /* enable standard VGA decode */
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x04);
+
ast_release_firmware(dev);
kfree(ast->dp501_fw_addr);
ast_mode_fini(dev);
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index ffccbef962a4..a1cb020e07e5 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -604,7 +604,7 @@ static int ast_crtc_mode_set(struct drm_crtc *crtc,
return -EINVAL;
ast_open_key(ast);
- ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x04);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06);
ast_set_std_reg(crtc, adjusted_mode, &vbios_mode);
ast_set_crtc_reg(crtc, adjusted_mode, &vbios_mode);
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index f7d421359d56..c1d1ac51d1c2 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -46,7 +46,7 @@ void ast_enable_mmio(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
- ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x04);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06);
}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 9f3fd7d96a69..75baff657e43 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -1528,9 +1528,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
if (!intel_gvt_ggtt_validate_range(vgpu,
workload->wa_ctx.indirect_ctx.guest_gma,
workload->wa_ctx.indirect_ctx.size)) {
- kmem_cache_free(s->workloads, workload);
gvt_vgpu_err("invalid wa_ctx at: 0x%lx\n",
workload->wa_ctx.indirect_ctx.guest_gma);
+ kmem_cache_free(s->workloads, workload);
return ERR_PTR(-EINVAL);
}
}
@@ -1542,9 +1542,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
if (!intel_gvt_ggtt_validate_range(vgpu,
workload->wa_ctx.per_ctx.guest_gma,
CACHELINE_BYTES)) {
- kmem_cache_free(s->workloads, workload);
gvt_vgpu_err("invalid per_ctx at: 0x%lx\n",
workload->wa_ctx.per_ctx.guest_gma);
+ kmem_cache_free(s->workloads, workload);
return ERR_PTR(-EINVAL);
}
}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 126703816794..5c36c75232e6 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -771,16 +771,20 @@ nv50_msto_atomic_check(struct drm_encoder *encoder,
struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
int slots;
- /* When restoring duplicated states, we need to make sure that the
- * bw remains the same and avoid recalculating it, as the connector's
- * bpc may have changed after the state was duplicated
- */
- if (!state->duplicated)
- asyh->dp.pbn =
- drm_dp_calc_pbn_mode(crtc_state->adjusted_mode.clock,
- connector->display_info.bpc * 3);
+ if (crtc_state->mode_changed || crtc_state->connectors_changed) {
+ /*
+ * When restoring duplicated states, we need to make sure that
+ * the bw remains the same and avoid recalculating it, as the
+ * connector's bpc may have changed after the state was
+ * duplicated
+ */
+ if (!state->duplicated) {
+ const int bpp = connector->display_info.bpc * 3;
+ const int clock = crtc_state->adjusted_mode.clock;
+
+ asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, bpp);
+ }
- if (crtc_state->mode_changed) {
slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr,
mstc->port,
asyh->dp.pbn);
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 35ddbec1375a..671c90f34ede 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -95,7 +95,7 @@ static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
rmb(); /* for list_empty to work without lock */
if (list_empty(&entity->list) ||
- spsc_queue_peek(&entity->job_queue) == NULL)
+ spsc_queue_count(&entity->job_queue) == 0)
return true;
return false;
@@ -281,7 +281,7 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity)
/* Consumption of existing IBs wasn't completed. Forcefully
* remove them here.
*/
- if (spsc_queue_peek(&entity->job_queue)) {
+ if (spsc_queue_count(&entity->job_queue)) {
if (sched) {
/* Park the kernel for a moment to make sure it isn't processing
* our enity.
diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c
index 45d5164e9574..b79890739a2c 100644
--- a/drivers/infiniband/core/counters.c
+++ b/drivers/infiniband/core/counters.c
@@ -38,6 +38,9 @@ int rdma_counter_set_auto_mode(struct ib_device *dev, u8 port,
int ret;
port_counter = &dev->port_data[port].port_counter;
+ if (!port_counter->hstats)
+ return -EOPNOTSUPP;
+
mutex_lock(&port_counter->lock);
if (on) {
ret = __counter_set_mode(&port_counter->mode,
@@ -509,6 +512,9 @@ int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u8 port,
if (!rdma_is_port_valid(dev, port))
return -EINVAL;
+ if (!dev->port_data[port].port_counter.hstats)
+ return -EOPNOTSUPP;
+
qp = rdma_counter_get_qp(dev, qp_num);
if (!qp)
return -ENOENT;
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 783e465e7c41..87d40d1ecdde 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -1952,12 +1952,16 @@ static int stat_get_doit_qp(struct sk_buff *skb, struct nlmsghdr *nlh,
if (fill_nldev_handle(msg, device) ||
nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) ||
- nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, mode))
+ nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, mode)) {
+ ret = -EMSGSIZE;
goto err_msg;
+ }
if ((mode == RDMA_COUNTER_MODE_AUTO) &&
- nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK, mask))
+ nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK, mask)) {
+ ret = -EMSGSIZE;
goto err_msg;
+ }
nlmsg_end(msg, nlh);
ib_device_put(device);
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 2a75c6f8d827..c0e15db34680 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -112,10 +112,6 @@ static int ib_umem_notifier_release_trampoline(struct ib_umem_odp *umem_odp,
* prevent any further fault handling on this MR.
*/
ib_umem_notifier_start_account(umem_odp);
- umem_odp->dying = 1;
- /* Make sure that the fact the umem is dying is out before we release
- * all pending page faults. */
- smp_wmb();
complete_all(&umem_odp->notifier_completion);
umem_odp->umem.context->invalidate_range(
umem_odp, ib_umem_start(umem_odp), ib_umem_end(umem_odp));
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index ec4370f99381..af5bbb35c058 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -2026,7 +2026,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)(
event_sub->eventfd =
eventfd_ctx_fdget(redirect_fd);
- if (IS_ERR(event_sub)) {
+ if (IS_ERR(event_sub->eventfd)) {
err = PTR_ERR(event_sub->eventfd);
event_sub->eventfd = NULL;
goto err;
@@ -2644,12 +2644,13 @@ static int devx_async_event_close(struct inode *inode, struct file *filp)
struct devx_async_event_file *ev_file = filp->private_data;
struct devx_event_subscription *event_sub, *event_sub_tmp;
struct devx_async_event_data *entry, *tmp;
+ struct mlx5_ib_dev *dev = ev_file->dev;
- mutex_lock(&ev_file->dev->devx_event_table.event_xa_lock);
+ mutex_lock(&dev->devx_event_table.event_xa_lock);
/* delete the subscriptions which are related to this FD */
list_for_each_entry_safe(event_sub, event_sub_tmp,
&ev_file->subscribed_events_list, file_list) {
- devx_cleanup_subscription(ev_file->dev, event_sub);
+ devx_cleanup_subscription(dev, event_sub);
if (event_sub->eventfd)
eventfd_ctx_put(event_sub->eventfd);
@@ -2658,7 +2659,7 @@ static int devx_async_event_close(struct inode *inode, struct file *filp)
kfree_rcu(event_sub, rcu);
}
- mutex_unlock(&ev_file->dev->devx_event_table.event_xa_lock);
+ mutex_unlock(&dev->devx_event_table.event_xa_lock);
/* free the pending events allocation */
if (!ev_file->omit_data) {
@@ -2670,7 +2671,7 @@ static int devx_async_event_close(struct inode *inode, struct file *filp)
}
uverbs_close_fd(filp);
- put_device(&ev_file->dev->ib_dev.dev);
+ put_device(&dev->ib_dev.dev);
return 0;
}
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 81da82050d05..1d257d1b3b0d 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -579,7 +579,6 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
u32 flags)
{
int npages = 0, current_seq, page_shift, ret, np;
- bool implicit = false;
struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH;
@@ -594,7 +593,6 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
if (IS_ERR(odp))
return PTR_ERR(odp);
mr = odp->private;
- implicit = true;
} else {
odp = odp_mr;
}
@@ -682,19 +680,15 @@ next_mr:
out:
if (ret == -EAGAIN) {
- if (implicit || !odp->dying) {
- unsigned long timeout =
- msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT);
-
- if (!wait_for_completion_timeout(
- &odp->notifier_completion,
- timeout)) {
- mlx5_ib_warn(dev, "timeout waiting for mmu notifier. seq %d against %d. notifiers_count=%d\n",
- current_seq, odp->notifiers_seq, odp->notifiers_count);
- }
- } else {
- /* The MR is being killed, kill the QP as well. */
- ret = -EFAULT;
+ unsigned long timeout = msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT);
+
+ if (!wait_for_completion_timeout(&odp->notifier_completion,
+ timeout)) {
+ mlx5_ib_warn(
+ dev,
+ "timeout waiting for mmu notifier. seq %d against %d. notifiers_count=%d\n",
+ current_seq, odp->notifiers_seq,
+ odp->notifiers_count);
}
}
diff --git a/drivers/infiniband/sw/siw/Kconfig b/drivers/infiniband/sw/siw/Kconfig
index dace276aea14..b622fc62f2cd 100644
--- a/drivers/infiniband/sw/siw/Kconfig
+++ b/drivers/infiniband/sw/siw/Kconfig
@@ -1,6 +1,6 @@
config RDMA_SIW
tristate "Software RDMA over TCP/IP (iWARP) driver"
- depends on INET && INFINIBAND && LIBCRC32C && 64BIT
+ depends on INET && INFINIBAND && LIBCRC32C
select DMA_VIRT_OPS
help
This driver implements the iWARP RDMA transport over
diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
index 03fd7b2f595f..77b1aabf6ff3 100644
--- a/drivers/infiniband/sw/siw/siw.h
+++ b/drivers/infiniband/sw/siw/siw.h
@@ -214,7 +214,7 @@ struct siw_wqe {
struct siw_cq {
struct ib_cq base_cq;
spinlock_t lock;
- u64 *notify;
+ struct siw_cq_ctrl *notify;
struct siw_cqe *queue;
u32 cq_put;
u32 cq_get;
diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
index d0f140daf659..05a92f997f60 100644
--- a/drivers/infiniband/sw/siw/siw_main.c
+++ b/drivers/infiniband/sw/siw/siw_main.c
@@ -160,10 +160,8 @@ static int siw_init_cpulist(void)
out_err:
siw_cpu_info.num_nodes = 0;
- while (i) {
+ while (--i >= 0)
kfree(siw_cpu_info.tx_valid_cpus[i]);
- siw_cpu_info.tx_valid_cpus[i--] = NULL;
- }
kfree(siw_cpu_info.tx_valid_cpus);
siw_cpu_info.tx_valid_cpus = NULL;
diff --git a/drivers/infiniband/sw/siw/siw_qp.c b/drivers/infiniband/sw/siw/siw_qp.c
index e27bd5b35b96..0990307c5d2c 100644
--- a/drivers/infiniband/sw/siw/siw_qp.c
+++ b/drivers/infiniband/sw/siw/siw_qp.c
@@ -1013,18 +1013,24 @@ out:
*/
static bool siw_cq_notify_now(struct siw_cq *cq, u32 flags)
{
- u64 cq_notify;
+ u32 cq_notify;
if (!cq->base_cq.comp_handler)
return false;
- cq_notify = READ_ONCE(*cq->notify);
+ /* Read application shared notification state */
+ cq_notify = READ_ONCE(cq->notify->flags);
if ((cq_notify & SIW_NOTIFY_NEXT_COMPLETION) ||
((cq_notify & SIW_NOTIFY_SOLICITED) &&
(flags & SIW_WQE_SOLICITED))) {
- /* dis-arm CQ */
- smp_store_mb(*cq->notify, SIW_NOTIFY_NOT);
+ /*
+ * CQ notification is one-shot: Since the
+ * current CQE causes user notification,
+ * the CQ gets dis-aremd and must be re-aremd
+ * by the user for a new notification.
+ */
+ WRITE_ONCE(cq->notify->flags, SIW_NOTIFY_NOT);
return true;
}
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index 32dc79d0e898..e7f3a2379d9d 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -1049,7 +1049,7 @@ int siw_create_cq(struct ib_cq *base_cq, const struct ib_cq_init_attr *attr,
spin_lock_init(&cq->lock);
- cq->notify = &((struct siw_cq_ctrl *)&cq->queue[size])->notify;
+ cq->notify = (struct siw_cq_ctrl *)&cq->queue[size];
if (udata) {
struct siw_uresp_create_cq uresp = {};
@@ -1141,11 +1141,17 @@ int siw_req_notify_cq(struct ib_cq *base_cq, enum ib_cq_notify_flags flags)
siw_dbg_cq(cq, "flags: 0x%02x\n", flags);
if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
- /* CQ event for next solicited completion */
- smp_store_mb(*cq->notify, SIW_NOTIFY_SOLICITED);
+ /*
+ * Enable CQ event for next solicited completion.
+ * and make it visible to all associated producers.
+ */
+ smp_store_mb(cq->notify->flags, SIW_NOTIFY_SOLICITED);
else
- /* CQ event for any signalled completion */
- smp_store_mb(*cq->notify, SIW_NOTIFY_ALL);
+ /*
+ * Enable CQ event for any signalled completion.
+ * and make it visible to all associated producers.
+ */
+ smp_store_mb(cq->notify->flags, SIW_NOTIFY_ALL);
if (flags & IB_CQ_REPORT_MISSED_EVENTS)
return cq->cq_put - cq->cq_get;
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index a9a9fabd3968..c5c93e48b4db 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -1186,8 +1186,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
ste_live = true;
break;
case STRTAB_STE_0_CFG_ABORT:
- if (disable_bypass)
- break;
+ BUG_ON(!disable_bypass);
+ break;
default:
BUG(); /* STE corruption */
}
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index a7f9c3edbcb2..d991d40f797f 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -459,13 +459,11 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
{
struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
- size_t iova_off = 0;
+ struct iova_domain *iovad = &cookie->iovad;
+ size_t iova_off = iova_offset(iovad, phys);
dma_addr_t iova;
- if (cookie->type == IOMMU_DMA_IOVA_COOKIE) {
- iova_off = iova_offset(&cookie->iovad, phys);
- size = iova_align(&cookie->iovad, size + iova_off);
- }
+ size = iova_align(iovad, size + iova_off);
iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
if (!iova)
@@ -574,7 +572,7 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
struct iova_domain *iovad = &cookie->iovad;
bool coherent = dev_is_dma_coherent(dev);
int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
- pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
+ pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
struct page **pages;
struct sg_table sgt;
@@ -764,7 +762,7 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
* - and wouldn't make the resulting output segment too long
*/
if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
- (cur_len + s_length <= max_len)) {
+ (max_len - cur_len >= s_length)) {
/* ...then concatenate it with the previous one */
cur_len += s_length;
} else {
@@ -975,7 +973,7 @@ static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
return NULL;
if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) {
- pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
+ pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
cpu_addr = dma_common_contiguous_remap(page, alloc_size,
VM_USERMAP, prot, __builtin_return_address(0));
@@ -1035,7 +1033,7 @@ static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
unsigned long pfn, off = vma->vm_pgoff;
int ret;
- vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs);
+ vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
return ret;
@@ -1147,16 +1145,21 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
if (!msi_page)
return NULL;
- iova = __iommu_dma_map(dev, msi_addr, size, prot);
- if (iova == DMA_MAPPING_ERROR)
+ iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
+ if (!iova)
goto out_free_page;
+ if (iommu_map(domain, iova, msi_addr, size, prot))
+ goto out_free_iova;
+
INIT_LIST_HEAD(&msi_page->list);
msi_page->phys = msi_addr;
msi_page->iova = iova;
list_add(&msi_page->list, &cookie->msi_page_list);
return msi_page;
+out_free_iova:
+ iommu_dma_free_iova(cookie, iova, size);
out_free_page:
kfree(msi_page);
return NULL;
diff --git a/drivers/iommu/intel-iommu-debugfs.c b/drivers/iommu/intel-iommu-debugfs.c
index 2b25d9c59336..471f05d452e0 100644
--- a/drivers/iommu/intel-iommu-debugfs.c
+++ b/drivers/iommu/intel-iommu-debugfs.c
@@ -235,7 +235,7 @@ static void ctx_tbl_walk(struct seq_file *m, struct intel_iommu *iommu, u16 bus)
tbl_wlk.ctx_entry = context;
m->private = &tbl_wlk;
- if (pasid_supported(iommu) && is_pasid_enabled(context)) {
+ if (dmar_readq(iommu->reg + DMAR_RTADDR_REG) & DMA_RTADDR_SMT) {
pasid_dir_ptr = context->lo & VTD_PAGE_MASK;
pasid_dir_size = get_pasid_dir_size(context);
pasid_dir_walk(m, pasid_dir_ptr, pasid_dir_size);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index bdaed2da8a55..12d094d08c0a 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -3449,6 +3449,7 @@ static bool iommu_need_mapping(struct device *dev)
dmar_domain = to_dmar_domain(domain);
dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
}
+ dmar_remove_one_dev_info(dev);
get_private_domain_for_dev(dev);
}
@@ -4790,7 +4791,8 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info)
/* free the private domain */
if (domain->flags & DOMAIN_FLAG_LOSE_CHILDREN &&
- !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY))
+ !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
+ list_empty(&domain->devices))
domain_exit(info->domain);
free_devinfo_mem(info);
@@ -4803,7 +4805,8 @@ static void dmar_remove_one_dev_info(struct device *dev)
spin_lock_irqsave(&device_domain_lock, flags);
info = dev->archdata.iommu;
- __dmar_remove_one_dev_info(info);
+ if (info)
+ __dmar_remove_one_dev_info(info);
spin_unlock_irqrestore(&device_domain_lock, flags);
}
@@ -5281,6 +5284,7 @@ static int intel_iommu_add_device(struct device *dev)
if (device_def_domain_type(dev) == IOMMU_DOMAIN_IDENTITY) {
ret = iommu_request_dm_for_dev(dev);
if (ret) {
+ dmar_remove_one_dev_info(dev);
dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
domain_add_dev_info(si_domain, dev);
dev_info(dev,
@@ -5291,6 +5295,7 @@ static int intel_iommu_add_device(struct device *dev)
if (device_def_domain_type(dev) == IOMMU_DOMAIN_DMA) {
ret = iommu_request_dma_domain_for_dev(dev);
if (ret) {
+ dmar_remove_one_dev_info(dev);
dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
if (!get_private_domain_for_dev(dev)) {
dev_warn(dev,
@@ -5316,6 +5321,8 @@ static void intel_iommu_remove_device(struct device *dev)
if (!iommu)
return;
+ dmar_remove_one_dev_info(dev);
+
iommu_group_remove_device(dev);
iommu_device_unlink(&iommu->iommu, dev);
diff --git a/drivers/media/platform/omap/omap_vout_vrfb.c b/drivers/media/platform/omap/omap_vout_vrfb.c
index 29e3f5da59c1..11ec048929e8 100644
--- a/drivers/media/platform/omap/omap_vout_vrfb.c
+++ b/drivers/media/platform/omap/omap_vout_vrfb.c
@@ -253,8 +253,7 @@ int omap_vout_prepare_vrfb(struct omap_vout_device *vout,
*/
pixsize = vout->bpp * vout->vrfb_bpp;
- dst_icg = ((MAX_PIXELS_PER_LINE * pixsize) -
- (vout->pix.width * vout->bpp)) + 1;
+ dst_icg = MAX_PIXELS_PER_LINE * pixsize - vout->pix.width * vout->bpp;
xt->src_start = vout->buf_phy_addr[vb->i];
xt->dst_start = vout->vrfb_context[vb->i].paddr[0];
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index db160cee42ad..108e109e99f1 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2846,7 +2846,7 @@ static int nvme_resume(struct device *dev)
struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev));
struct nvme_ctrl *ctrl = &ndev->ctrl;
- if (pm_resume_via_firmware() || !ctrl->npss ||
+ if (ndev->last_ps == U32_MAX ||
nvme_set_power_state(ctrl, ndev->last_ps) != 0)
nvme_reset_ctrl(ctrl);
return 0;
@@ -2859,6 +2859,8 @@ static int nvme_suspend(struct device *dev)
struct nvme_ctrl *ctrl = &ndev->ctrl;
int ret = -EBUSY;
+ ndev->last_ps = U32_MAX;
+
/*
* The platform does not remove power for a kernel managed suspend so
* use host managed nvme power settings for lowest idle power if
@@ -2866,8 +2868,14 @@ static int nvme_suspend(struct device *dev)
* shutdown. But if the firmware is involved after the suspend or the
* device does not support any non-default power states, shut down the
* device fully.
+ *
+ * If ASPM is not enabled for the device, shut down the device and allow
+ * the PCI bus layer to put it into D3 in order to take the PCIe link
+ * down, so as to allow the platform to achieve its minimum low-power
+ * state (which may not be possible if the link is up).
*/
- if (pm_suspend_via_firmware() || !ctrl->npss) {
+ if (pm_suspend_via_firmware() || !ctrl->npss ||
+ !pcie_aspm_enabled(pdev)) {
nvme_dev_disable(ndev, true);
return 0;
}
@@ -2880,7 +2888,6 @@ static int nvme_suspend(struct device *dev)
ctrl->state != NVME_CTRL_ADMIN_ONLY)
goto unfreeze;
- ndev->last_ps = 0;
ret = nvme_get_power_state(ctrl, &ndev->last_ps);
if (ret < 0)
goto unfreeze;
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 7f84bb4903ca..a296eaf52a5b 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -277,7 +277,7 @@ EXPORT_SYMBOL_GPL(of_irq_parse_raw);
* of_irq_parse_one - Resolve an interrupt for a device
* @device: the device whose interrupt is to be resolved
* @index: index of the interrupt to resolve
- * @out_irq: structure of_irq filled by this function
+ * @out_irq: structure of_phandle_args filled by this function
*
* This function resolves an interrupt for a node by walking the interrupt tree,
* finding which interrupt controller node it is attached to, and returning the
diff --git a/drivers/of/resolver.c b/drivers/of/resolver.c
index c1b67dd7cd6e..83c766233181 100644
--- a/drivers/of/resolver.c
+++ b/drivers/of/resolver.c
@@ -206,16 +206,22 @@ static int adjust_local_phandle_references(struct device_node *local_fixups,
for_each_child_of_node(local_fixups, child) {
for_each_child_of_node(overlay, overlay_child)
- if (!node_name_cmp(child, overlay_child))
+ if (!node_name_cmp(child, overlay_child)) {
+ of_node_put(overlay_child);
break;
+ }
- if (!overlay_child)
+ if (!overlay_child) {
+ of_node_put(child);
return -EINVAL;
+ }
err = adjust_local_phandle_references(child, overlay_child,
phandle_delta);
- if (err)
+ if (err) {
+ of_node_put(child);
return err;
+ }
}
return 0;
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index e44af7f4d37f..464f8f92653f 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -1170,6 +1170,26 @@ static int pcie_aspm_get_policy(char *buffer, const struct kernel_param *kp)
module_param_call(policy, pcie_aspm_set_policy, pcie_aspm_get_policy,
NULL, 0644);
+/**
+ * pcie_aspm_enabled - Check if PCIe ASPM has been enabled for a device.
+ * @pdev: Target device.
+ */
+bool pcie_aspm_enabled(struct pci_dev *pdev)
+{
+ struct pci_dev *bridge = pci_upstream_bridge(pdev);
+ bool ret;
+
+ if (!bridge)
+ return false;
+
+ mutex_lock(&aspm_lock);
+ ret = bridge->link_state ? !!bridge->link_state->aspm_enabled : false;
+ mutex_unlock(&aspm_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pcie_aspm_enabled);
+
#ifdef CONFIG_PCIEASPM_DEBUG
static ssize_t link_state_show(struct device *dev,
struct device_attribute *attr,
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index faf43b1d3dbe..a7549ae32542 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -10776,12 +10776,31 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
/* This loop sets up all CPUs that are affinitized with a
* irq vector assigned to the driver. All affinitized CPUs
* will get a link to that vectors IRQ and EQ.
+ *
+ * NULL affinity mask handling:
+ * If irq count is greater than one, log an error message.
+ * If the null mask is received for the first irq, find the
+ * first present cpu, and assign the eq index to ensure at
+ * least one EQ is assigned.
*/
for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
/* Get a CPU mask for all CPUs affinitized to this vector */
maskp = pci_irq_get_affinity(phba->pcidev, idx);
- if (!maskp)
- continue;
+ if (!maskp) {
+ if (phba->cfg_irq_chann > 1)
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3329 No affinity mask found "
+ "for vector %d (%d)\n",
+ idx, phba->cfg_irq_chann);
+ if (!idx) {
+ cpu = cpumask_first(cpu_present_mask);
+ cpup = &phba->sli4_hba.cpu_map[cpu];
+ cpup->eq = idx;
+ cpup->irq = pci_irq_vector(phba->pcidev, idx);
+ cpup->flag |= LPFC_CPU_FIRST_IRQ;
+ }
+ break;
+ }
i = 0;
/* Loop through all CPUs associated with vector idx */
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
index 4f1b6f466ff5..b86195e4dc6c 100644
--- a/fs/afs/cmservice.c
+++ b/fs/afs/cmservice.c
@@ -505,18 +505,14 @@ static void SRXAFSCB_ProbeUuid(struct work_struct *work)
struct afs_call *call = container_of(work, struct afs_call, work);
struct afs_uuid *r = call->request;
- struct {
- __be32 match;
- } reply;
-
_enter("");
if (memcmp(r, &call->net->uuid, sizeof(call->net->uuid)) == 0)
- reply.match = htonl(0);
+ afs_send_empty_reply(call);
else
- reply.match = htonl(1);
+ rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
+ 1, 1, "K-1");
- afs_send_simple_reply(call, &reply, sizeof(reply));
afs_put_call(call);
_leave("");
}
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index e640d67274be..81207dc3c997 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -440,7 +440,7 @@ static int afs_dir_iterate_block(struct afs_vnode *dvnode,
* iterate through the data blob that lists the contents of an AFS directory
*/
static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx,
- struct key *key)
+ struct key *key, afs_dataversion_t *_dir_version)
{
struct afs_vnode *dvnode = AFS_FS_I(dir);
struct afs_xdr_dir_page *dbuf;
@@ -460,6 +460,7 @@ static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx,
req = afs_read_dir(dvnode, key);
if (IS_ERR(req))
return PTR_ERR(req);
+ *_dir_version = req->data_version;
/* round the file position up to the next entry boundary */
ctx->pos += sizeof(union afs_xdr_dirent) - 1;
@@ -514,7 +515,10 @@ out:
*/
static int afs_readdir(struct file *file, struct dir_context *ctx)
{
- return afs_dir_iterate(file_inode(file), ctx, afs_file_key(file));
+ afs_dataversion_t dir_version;
+
+ return afs_dir_iterate(file_inode(file), ctx, afs_file_key(file),
+ &dir_version);
}
/*
@@ -555,7 +559,8 @@ static int afs_lookup_one_filldir(struct dir_context *ctx, const char *name,
* - just returns the FID the dentry name maps to if found
*/
static int afs_do_lookup_one(struct inode *dir, struct dentry *dentry,
- struct afs_fid *fid, struct key *key)
+ struct afs_fid *fid, struct key *key,
+ afs_dataversion_t *_dir_version)
{
struct afs_super_info *as = dir->i_sb->s_fs_info;
struct afs_lookup_one_cookie cookie = {
@@ -568,7 +573,7 @@ static int afs_do_lookup_one(struct inode *dir, struct dentry *dentry,
_enter("{%lu},%p{%pd},", dir->i_ino, dentry, dentry);
/* search the directory */
- ret = afs_dir_iterate(dir, &cookie.ctx, key);
+ ret = afs_dir_iterate(dir, &cookie.ctx, key, _dir_version);
if (ret < 0) {
_leave(" = %d [iter]", ret);
return ret;
@@ -642,6 +647,7 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
struct afs_server *server;
struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode;
struct inode *inode = NULL, *ti;
+ afs_dataversion_t data_version = READ_ONCE(dvnode->status.data_version);
int ret, i;
_enter("{%lu},%p{%pd},", dir->i_ino, dentry, dentry);
@@ -669,12 +675,14 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
cookie->fids[i].vid = as->volume->vid;
/* search the directory */
- ret = afs_dir_iterate(dir, &cookie->ctx, key);
+ ret = afs_dir_iterate(dir, &cookie->ctx, key, &data_version);
if (ret < 0) {
inode = ERR_PTR(ret);
goto out;
}
+ dentry->d_fsdata = (void *)(unsigned long)data_version;
+
inode = ERR_PTR(-ENOENT);
if (!cookie->found)
goto out;
@@ -968,7 +976,8 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
struct dentry *parent;
struct inode *inode;
struct key *key;
- long dir_version, de_version;
+ afs_dataversion_t dir_version;
+ long de_version;
int ret;
if (flags & LOOKUP_RCU)
@@ -1014,20 +1023,20 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
* on a 32-bit system, we only have 32 bits in the dentry to store the
* version.
*/
- dir_version = (long)dir->status.data_version;
+ dir_version = dir->status.data_version;
de_version = (long)dentry->d_fsdata;
- if (de_version == dir_version)
- goto out_valid;
+ if (de_version == (long)dir_version)
+ goto out_valid_noupdate;
- dir_version = (long)dir->invalid_before;
- if (de_version - dir_version >= 0)
+ dir_version = dir->invalid_before;
+ if (de_version - (long)dir_version >= 0)
goto out_valid;
_debug("dir modified");
afs_stat_v(dir, n_reval);
/* search the directory for this vnode */
- ret = afs_do_lookup_one(&dir->vfs_inode, dentry, &fid, key);
+ ret = afs_do_lookup_one(&dir->vfs_inode, dentry, &fid, key, &dir_version);
switch (ret) {
case 0:
/* the filename maps to something */
@@ -1080,7 +1089,8 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
}
out_valid:
- dentry->d_fsdata = (void *)dir_version;
+ dentry->d_fsdata = (void *)(unsigned long)dir_version;
+out_valid_noupdate:
dput(parent);
key_put(key);
_leave(" = 1 [valid]");
@@ -1186,6 +1196,20 @@ static void afs_prep_for_new_inode(struct afs_fs_cursor *fc,
}
/*
+ * Note that a dentry got changed. We need to set d_fsdata to the data version
+ * number derived from the result of the operation. It doesn't matter if
+ * d_fsdata goes backwards as we'll just revalidate.
+ */
+static void afs_update_dentry_version(struct afs_fs_cursor *fc,
+ struct dentry *dentry,
+ struct afs_status_cb *scb)
+{
+ if (fc->ac.error == 0)
+ dentry->d_fsdata =
+ (void *)(unsigned long)scb->status.data_version;
+}
+
+/*
* create a directory on an AFS filesystem
*/
static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
@@ -1227,6 +1251,7 @@ static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
afs_check_for_remote_deletion(&fc, dvnode);
afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
&data_version, &scb[0]);
+ afs_update_dentry_version(&fc, dentry, &scb[0]);
afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]);
ret = afs_end_vnode_operation(&fc);
if (ret < 0)
@@ -1319,6 +1344,7 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry)
afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
&data_version, scb);
+ afs_update_dentry_version(&fc, dentry, scb);
ret = afs_end_vnode_operation(&fc);
if (ret == 0) {
afs_dir_remove_subdir(dentry);
@@ -1458,6 +1484,7 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
&data_version, &scb[0]);
afs_vnode_commit_status(&fc, vnode, fc.cb_break_2,
&data_version_2, &scb[1]);
+ afs_update_dentry_version(&fc, dentry, &scb[0]);
ret = afs_end_vnode_operation(&fc);
if (ret == 0 && !(scb[1].have_status || scb[1].have_error))
ret = afs_dir_remove_link(dvnode, dentry, key);
@@ -1526,6 +1553,7 @@ static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
afs_check_for_remote_deletion(&fc, dvnode);
afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
&data_version, &scb[0]);
+ afs_update_dentry_version(&fc, dentry, &scb[0]);
afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]);
ret = afs_end_vnode_operation(&fc);
if (ret < 0)
@@ -1607,6 +1635,7 @@ static int afs_link(struct dentry *from, struct inode *dir,
afs_vnode_commit_status(&fc, vnode, fc.cb_break_2,
NULL, &scb[1]);
ihold(&vnode->vfs_inode);
+ afs_update_dentry_version(&fc, dentry, &scb[0]);
d_instantiate(dentry, &vnode->vfs_inode);
mutex_unlock(&vnode->io_lock);
@@ -1686,6 +1715,7 @@ static int afs_symlink(struct inode *dir, struct dentry *dentry,
afs_check_for_remote_deletion(&fc, dvnode);
afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
&data_version, &scb[0]);
+ afs_update_dentry_version(&fc, dentry, &scb[0]);
afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]);
ret = afs_end_vnode_operation(&fc);
if (ret < 0)
@@ -1791,6 +1821,17 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
}
}
+ /* This bit is potentially nasty as there's a potential race with
+ * afs_d_revalidate{,_rcu}(). We have to change d_fsdata on the dentry
+ * to reflect it's new parent's new data_version after the op, but
+ * d_revalidate may see old_dentry between the op having taken place
+ * and the version being updated.
+ *
+ * So drop the old_dentry for now to make other threads go through
+ * lookup instead - which we hold a lock against.
+ */
+ d_drop(old_dentry);
+
ret = -ERESTARTSYS;
if (afs_begin_vnode_operation(&fc, orig_dvnode, key, true)) {
afs_dataversion_t orig_data_version;
@@ -1802,9 +1843,9 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (orig_dvnode != new_dvnode) {
if (mutex_lock_interruptible_nested(&new_dvnode->io_lock, 1) < 0) {
afs_end_vnode_operation(&fc);
- goto error_rehash;
+ goto error_rehash_old;
}
- new_data_version = new_dvnode->status.data_version;
+ new_data_version = new_dvnode->status.data_version + 1;
} else {
new_data_version = orig_data_version;
new_scb = &scb[0];
@@ -1827,7 +1868,7 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
}
ret = afs_end_vnode_operation(&fc);
if (ret < 0)
- goto error_rehash;
+ goto error_rehash_old;
}
if (ret == 0) {
@@ -1853,10 +1894,26 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
drop_nlink(new_inode);
spin_unlock(&new_inode->i_lock);
}
+
+ /* Now we can update d_fsdata on the dentries to reflect their
+ * new parent's data_version.
+ *
+ * Note that if we ever implement RENAME_EXCHANGE, we'll have
+ * to update both dentries with opposing dir versions.
+ */
+ if (new_dvnode != orig_dvnode) {
+ afs_update_dentry_version(&fc, old_dentry, &scb[1]);
+ afs_update_dentry_version(&fc, new_dentry, &scb[1]);
+ } else {
+ afs_update_dentry_version(&fc, old_dentry, &scb[0]);
+ afs_update_dentry_version(&fc, new_dentry, &scb[0]);
+ }
d_move(old_dentry, new_dentry);
goto error_tmp;
}
+error_rehash_old:
+ d_rehash(new_dentry);
error_rehash:
if (rehash)
d_rehash(rehash);
diff --git a/fs/afs/file.c b/fs/afs/file.c
index 56b69576274d..dd3c55c9101c 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -191,11 +191,13 @@ void afs_put_read(struct afs_read *req)
int i;
if (refcount_dec_and_test(&req->usage)) {
- for (i = 0; i < req->nr_pages; i++)
- if (req->pages[i])
- put_page(req->pages[i]);
- if (req->pages != req->array)
- kfree(req->pages);
+ if (req->pages) {
+ for (i = 0; i < req->nr_pages; i++)
+ if (req->pages[i])
+ put_page(req->pages[i]);
+ if (req->pages != req->array)
+ kfree(req->pages);
+ }
kfree(req);
}
}
diff --git a/fs/afs/vlclient.c b/fs/afs/vlclient.c
index d7e0fd3c00df..cfb0ac4bd039 100644
--- a/fs/afs/vlclient.c
+++ b/fs/afs/vlclient.c
@@ -56,23 +56,24 @@ static int afs_deliver_vl_get_entry_by_name_u(struct afs_call *call)
struct afs_uuid__xdr *xdr;
struct afs_uuid *uuid;
int j;
+ int n = entry->nr_servers;
tmp = ntohl(uvldb->serverFlags[i]);
if (tmp & AFS_VLSF_DONTUSE ||
(new_only && !(tmp & AFS_VLSF_NEWREPSITE)))
continue;
if (tmp & AFS_VLSF_RWVOL) {
- entry->fs_mask[i] |= AFS_VOL_VTM_RW;
+ entry->fs_mask[n] |= AFS_VOL_VTM_RW;
if (vlflags & AFS_VLF_BACKEXISTS)
- entry->fs_mask[i] |= AFS_VOL_VTM_BAK;
+ entry->fs_mask[n] |= AFS_VOL_VTM_BAK;
}
if (tmp & AFS_VLSF_ROVOL)
- entry->fs_mask[i] |= AFS_VOL_VTM_RO;
- if (!entry->fs_mask[i])
+ entry->fs_mask[n] |= AFS_VOL_VTM_RO;
+ if (!entry->fs_mask[n])
continue;
xdr = &uvldb->serverNumber[i];
- uuid = (struct afs_uuid *)&entry->fs_server[i];
+ uuid = (struct afs_uuid *)&entry->fs_server[n];
uuid->time_low = xdr->time_low;
uuid->time_mid = htons(ntohl(xdr->time_mid));
uuid->time_hi_and_version = htons(ntohl(xdr->time_hi_and_version));
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 04f09689cd6d..1600034a929b 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -119,6 +119,7 @@ static int traverse(struct seq_file *m, loff_t offset)
}
if (seq_has_overflowed(m))
goto Eoverflow;
+ p = m->op->next(m, p, &m->index);
if (pos + m->count > offset) {
m->from = offset - pos;
m->count -= m->from;
@@ -126,7 +127,6 @@ static int traverse(struct seq_file *m, loff_t offset)
}
pos += m->count;
m->count = 0;
- p = m->op->next(m, p, &m->index);
if (pos == offset)
break;
}
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index baf0b72c0a37..07aad70f3931 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -3835,15 +3835,28 @@ xfs_bmapi_read(
XFS_STATS_INC(mp, xs_blk_mapr);
ifp = XFS_IFORK_PTR(ip, whichfork);
+ if (!ifp) {
+ /* No CoW fork? Return a hole. */
+ if (whichfork == XFS_COW_FORK) {
+ mval->br_startoff = bno;
+ mval->br_startblock = HOLESTARTBLOCK;
+ mval->br_blockcount = len;
+ mval->br_state = XFS_EXT_NORM;
+ *nmap = 1;
+ return 0;
+ }
- /* No CoW fork? Return a hole. */
- if (whichfork == XFS_COW_FORK && !ifp) {
- mval->br_startoff = bno;
- mval->br_startblock = HOLESTARTBLOCK;
- mval->br_blockcount = len;
- mval->br_state = XFS_EXT_NORM;
- *nmap = 1;
- return 0;
+ /*
+ * A missing attr ifork implies that the inode says we're in
+ * extents or btree format but failed to pass the inode fork
+ * verifier while trying to load it. Treat that as a file
+ * corruption too.
+ */
+#ifdef DEBUG
+ xfs_alert(mp, "%s: inode %llu missing fork %d",
+ __func__, ip->i_ino, whichfork);
+#endif /* DEBUG */
+ return -EFSCORRUPTED;
}
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
diff --git a/fs/xfs/libxfs/xfs_da_btree.c b/fs/xfs/libxfs/xfs_da_btree.c
index d1c77fd0815d..0bf56e94bfe9 100644
--- a/fs/xfs/libxfs/xfs_da_btree.c
+++ b/fs/xfs/libxfs/xfs_da_btree.c
@@ -487,10 +487,8 @@ xfs_da3_split(
ASSERT(state->path.active == 0);
oldblk = &state->path.blk[0];
error = xfs_da3_root_split(state, oldblk, addblk);
- if (error) {
- addblk->bp = NULL;
- return error; /* GROT: dir is inconsistent */
- }
+ if (error)
+ goto out;
/*
* Update pointers to the node which used to be block 0 and just got
@@ -505,7 +503,10 @@ xfs_da3_split(
*/
node = oldblk->bp->b_addr;
if (node->hdr.info.forw) {
- ASSERT(be32_to_cpu(node->hdr.info.forw) == addblk->blkno);
+ if (be32_to_cpu(node->hdr.info.forw) != addblk->blkno) {
+ error = -EFSCORRUPTED;
+ goto out;
+ }
node = addblk->bp->b_addr;
node->hdr.info.back = cpu_to_be32(oldblk->blkno);
xfs_trans_log_buf(state->args->trans, addblk->bp,
@@ -514,15 +515,19 @@ xfs_da3_split(
}
node = oldblk->bp->b_addr;
if (node->hdr.info.back) {
- ASSERT(be32_to_cpu(node->hdr.info.back) == addblk->blkno);
+ if (be32_to_cpu(node->hdr.info.back) != addblk->blkno) {
+ error = -EFSCORRUPTED;
+ goto out;
+ }
node = addblk->bp->b_addr;
node->hdr.info.forw = cpu_to_be32(oldblk->blkno);
xfs_trans_log_buf(state->args->trans, addblk->bp,
XFS_DA_LOGRANGE(node, &node->hdr.info,
sizeof(node->hdr.info)));
}
+out:
addblk->bp = NULL;
- return 0;
+ return error;
}
/*
diff --git a/fs/xfs/libxfs/xfs_dir2_node.c b/fs/xfs/libxfs/xfs_dir2_node.c
index afcc6642690a..1fc44efc344d 100644
--- a/fs/xfs/libxfs/xfs_dir2_node.c
+++ b/fs/xfs/libxfs/xfs_dir2_node.c
@@ -741,7 +741,8 @@ xfs_dir2_leafn_lookup_for_entry(
ents = dp->d_ops->leaf_ents_p(leaf);
xfs_dir3_leaf_check(dp, bp);
- ASSERT(leafhdr.count > 0);
+ if (leafhdr.count <= 0)
+ return -EFSCORRUPTED;
/*
* Look up the hash value in the leaf entries.
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 00e9f5c388d3..7fc3c1ad36bc 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -429,10 +429,7 @@ xfs_log_reserve(
ASSERT(*ticp == NULL);
tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent,
- KM_SLEEP | KM_MAYFAIL);
- if (!tic)
- return -ENOMEM;
-
+ KM_SLEEP);
*ticp = tic;
xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt
diff --git a/include/asm-generic/5level-fixup.h b/include/asm-generic/5level-fixup.h
index bb6cb347018c..f6947da70d71 100644
--- a/include/asm-generic/5level-fixup.h
+++ b/include/asm-generic/5level-fixup.h
@@ -19,9 +19,24 @@
#define p4d_alloc(mm, pgd, address) (pgd)
#define p4d_offset(pgd, start) (pgd)
-#define p4d_none(p4d) 0
-#define p4d_bad(p4d) 0
-#define p4d_present(p4d) 1
+
+#ifndef __ASSEMBLY__
+static inline int p4d_none(p4d_t p4d)
+{
+ return 0;
+}
+
+static inline int p4d_bad(p4d_t p4d)
+{
+ return 0;
+}
+
+static inline int p4d_present(p4d_t p4d)
+{
+ return 1;
+}
+#endif
+
#define p4d_ERROR(p4d) do { } while (0)
#define p4d_clear(p4d) pgd_clear(p4d)
#define p4d_val(p4d) pgd_val(p4d)
diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h
index 3813211a9aad..0bff3d7fac92 100644
--- a/include/linux/dma-noncoherent.h
+++ b/include/linux/dma-noncoherent.h
@@ -42,13 +42,18 @@ void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs);
long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
dma_addr_t dma_addr);
-
-#ifdef CONFIG_ARCH_HAS_DMA_MMAP_PGPROT
pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
unsigned long attrs);
+
+#ifdef CONFIG_MMU
+pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs);
#else
-# define arch_dma_mmap_pgprot(dev, prot, attrs) pgprot_noncached(prot)
-#endif
+static inline pgprot_t dma_pgprot(struct device *dev, pgprot_t prot,
+ unsigned long attrs)
+{
+ return prot; /* no protection bits supported without page tables */
+}
+#endif /* CONFIG_MMU */
#ifdef CONFIG_DMA_NONCOHERENT_CACHE_SYNC
void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index fb07b503dc45..f33881688f42 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -510,22 +510,18 @@ alloc_pages(gfp_t gfp_mask, unsigned int order)
}
extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
struct vm_area_struct *vma, unsigned long addr,
- int node, bool hugepage);
-#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
- alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true)
+ int node);
#else
#define alloc_pages(gfp_mask, order) \
alloc_pages_node(numa_node_id(), gfp_mask, order)
-#define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\
- alloc_pages(gfp_mask, order)
-#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
+#define alloc_pages_vma(gfp_mask, order, vma, addr, node)\
alloc_pages(gfp_mask, order)
#endif
#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
#define alloc_page_vma(gfp_mask, vma, addr) \
- alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false)
+ alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id())
#define alloc_page_vma_node(gfp_mask, vma, addr, node) \
- alloc_pages_vma(gfp_mask, 0, vma, addr, node, false)
+ alloc_pages_vma(gfp_mask, 0, vma, addr, node)
extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
extern unsigned long get_zeroed_page(gfp_t gfp_mask);
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 44c41462be33..2cd4359cb38c 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -668,6 +668,7 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
int val);
+void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val);
static inline void mod_lruvec_state(struct lruvec *lruvec,
enum node_stat_item idx, int val)
@@ -1072,6 +1073,14 @@ static inline void mod_lruvec_page_state(struct page *page,
mod_node_page_state(page_pgdat(page), idx, val);
}
+static inline void __mod_lruvec_slab_state(void *p, enum node_stat_item idx,
+ int val)
+{
+ struct page *page = virt_to_head_page(p);
+
+ __mod_node_page_state(page_pgdat(page), idx, val);
+}
+
static inline
unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
gfp_t gfp_mask,
@@ -1159,6 +1168,16 @@ static inline void __dec_lruvec_page_state(struct page *page,
__mod_lruvec_page_state(page, idx, -1);
}
+static inline void __inc_lruvec_slab_state(void *p, enum node_stat_item idx)
+{
+ __mod_lruvec_slab_state(p, idx, 1);
+}
+
+static inline void __dec_lruvec_slab_state(void *p, enum node_stat_item idx)
+{
+ __mod_lruvec_slab_state(p, idx, -1);
+}
+
/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void inc_memcg_state(struct mem_cgroup *memcg,
int idx)
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 5228c62af416..bac395f1d00a 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -139,6 +139,8 @@ struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
struct mempolicy *get_task_policy(struct task_struct *p);
struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
unsigned long addr);
+struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
+ unsigned long addr);
bool vma_policy_mof(struct vm_area_struct *vma);
extern void numa_default_policy(void);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 3a37a89eb7a7..6a7a1083b6fb 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -159,7 +159,16 @@ struct page {
/** @pgmap: Points to the hosting device page map. */
struct dev_pagemap *pgmap;
void *zone_device_data;
- unsigned long _zd_pad_1; /* uses mapping */
+ /*
+ * ZONE_DEVICE private pages are counted as being
+ * mapped so the next 3 words hold the mapping, index,
+ * and private fields from the source anonymous or
+ * page cache page while the page is migrated to device
+ * private memory.
+ * ZONE_DEVICE MEMORY_DEVICE_FS_DAX pages also
+ * use the mapping, index, and private fields when
+ * pmem backed DAX files are mapped.
+ */
};
/** @rcu_head: You can use this to free a page by RCU. */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 9e700d9f9f28..82e4cd1b7ac3 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1567,8 +1567,10 @@ extern bool pcie_ports_native;
#ifdef CONFIG_PCIEASPM
bool pcie_aspm_support_enabled(void);
+bool pcie_aspm_enabled(struct pci_dev *pdev);
#else
static inline bool pcie_aspm_support_enabled(void) { return false; }
+static inline bool pcie_aspm_enabled(struct pci_dev *pdev) { return false; }
#endif
#ifdef CONFIG_PCIEAER
diff --git a/include/uapi/rdma/siw-abi.h b/include/uapi/rdma/siw-abi.h
index 7de68f1dc707..af735f55b291 100644
--- a/include/uapi/rdma/siw-abi.h
+++ b/include/uapi/rdma/siw-abi.h
@@ -180,6 +180,7 @@ struct siw_cqe {
* to control CQ arming.
*/
struct siw_cq_ctrl {
- __aligned_u64 notify;
+ __u32 flags;
+ __u32 pad;
};
#endif
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 59bdceea3737..795c9b095d75 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -47,9 +47,6 @@ u64 dma_direct_get_required_mask(struct device *dev)
{
u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT);
- if (dev->bus_dma_mask && dev->bus_dma_mask < max_dma)
- max_dma = dev->bus_dma_mask;
-
return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
}
@@ -130,10 +127,12 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
if (!page)
return NULL;
- if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
+ if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
+ !force_dma_unencrypted(dev)) {
/* remove any dirty cache lines on the kernel alias */
if (!PageHighMem(page))
arch_dma_prep_coherent(page, size);
+ *dma_handle = phys_to_dma(dev, page_to_phys(page));
/* return the page pointer as the opaque cookie */
return page;
}
@@ -178,7 +177,8 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
{
unsigned int page_order = get_order(size);
- if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
+ if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
+ !force_dma_unencrypted(dev)) {
/* cpu_addr is a struct page cookie, not a kernel address */
__dma_direct_free_pages(dev, size, cpu_addr);
return;
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
index b945239621d8..b0038ca3aa92 100644
--- a/kernel/dma/mapping.c
+++ b/kernel/dma/mapping.c
@@ -150,6 +150,23 @@ int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
}
EXPORT_SYMBOL(dma_get_sgtable_attrs);
+#ifdef CONFIG_MMU
+/*
+ * Return the page attributes used for mapping dma_alloc_* memory, either in
+ * kernel space if remapping is needed, or to userspace through dma_mmap_*.
+ */
+pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs)
+{
+ if (dev_is_dma_coherent(dev) ||
+ (IS_ENABLED(CONFIG_DMA_NONCOHERENT_CACHE_SYNC) &&
+ (attrs & DMA_ATTR_NON_CONSISTENT)))
+ return prot;
+ if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_MMAP_PGPROT))
+ return arch_dma_mmap_pgprot(dev, prot, attrs);
+ return pgprot_noncached(prot);
+}
+#endif /* CONFIG_MMU */
+
/*
* Create userspace mapping for the DMA-coherent memory.
*/
@@ -164,7 +181,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
unsigned long pfn;
int ret = -ENXIO;
- vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs);
+ vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
return ret;
diff --git a/kernel/dma/remap.c b/kernel/dma/remap.c
index a594aec07882..ffe78f0b2fe4 100644
--- a/kernel/dma/remap.c
+++ b/kernel/dma/remap.c
@@ -218,7 +218,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
/* create a coherent mapping */
ret = dma_common_contiguous_remap(page, size, VM_USERMAP,
- arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs),
+ dma_pgprot(dev, PAGE_KERNEL, attrs),
__builtin_return_address(0));
if (!ret) {
__dma_direct_free_pages(dev, size, page);
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 636ca6f88c8e..867b4bb6d4be 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -40,6 +40,7 @@ struct sugov_policy {
struct task_struct *thread;
bool work_in_progress;
+ bool limits_changed;
bool need_freq_update;
};
@@ -89,8 +90,11 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
!cpufreq_this_cpu_can_update(sg_policy->policy))
return false;
- if (unlikely(sg_policy->need_freq_update))
+ if (unlikely(sg_policy->limits_changed)) {
+ sg_policy->limits_changed = false;
+ sg_policy->need_freq_update = true;
return true;
+ }
delta_ns = time - sg_policy->last_freq_update_time;
@@ -437,7 +441,7 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy)
{
if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
- sg_policy->need_freq_update = true;
+ sg_policy->limits_changed = true;
}
static void sugov_update_single(struct update_util_data *hook, u64 time,
@@ -457,7 +461,8 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
if (!sugov_should_update_freq(sg_policy, time))
return;
- busy = sugov_cpu_is_busy(sg_cpu);
+ /* Limits may have changed, don't skip frequency update */
+ busy = !sg_policy->need_freq_update && sugov_cpu_is_busy(sg_cpu);
util = sugov_get_util(sg_cpu);
max = sg_cpu->max;
@@ -831,6 +836,7 @@ static int sugov_start(struct cpufreq_policy *policy)
sg_policy->last_freq_update_time = 0;
sg_policy->next_freq = 0;
sg_policy->work_in_progress = false;
+ sg_policy->limits_changed = false;
sg_policy->need_freq_update = false;
sg_policy->cached_raw_freq = 0;
@@ -879,7 +885,7 @@ static void sugov_limits(struct cpufreq_policy *policy)
mutex_unlock(&sg_policy->work_lock);
}
- sg_policy->need_freq_update = true;
+ sg_policy->limits_changed = true;
}
struct cpufreq_governor schedutil_gov = {
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 1334ede667a8..738065f765ab 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -644,30 +644,40 @@ release:
* available
* never: never stall for any thp allocation
*/
-static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma)
+static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma, unsigned long addr)
{
const bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE);
+ gfp_t this_node = 0;
+
+#ifdef CONFIG_NUMA
+ struct mempolicy *pol;
+ /*
+ * __GFP_THISNODE is used only when __GFP_DIRECT_RECLAIM is not
+ * specified, to express a general desire to stay on the current
+ * node for optimistic allocation attempts. If the defrag mode
+ * and/or madvise hint requires the direct reclaim then we prefer
+ * to fallback to other node rather than node reclaim because that
+ * can lead to excessive reclaim even though there is free memory
+ * on other nodes. We expect that NUMA preferences are specified
+ * by memory policies.
+ */
+ pol = get_vma_policy(vma, addr);
+ if (pol->mode != MPOL_BIND)
+ this_node = __GFP_THISNODE;
+ mpol_cond_put(pol);
+#endif
- /* Always do synchronous compaction */
if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY);
-
- /* Kick kcompactd and fail quickly */
if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
- return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM;
-
- /* Synchronous compaction if madvised, otherwise kick kcompactd */
+ return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM | this_node;
if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
- return GFP_TRANSHUGE_LIGHT |
- (vma_madvised ? __GFP_DIRECT_RECLAIM :
- __GFP_KSWAPD_RECLAIM);
-
- /* Only do synchronous compaction if madvised */
+ return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM :
+ __GFP_KSWAPD_RECLAIM | this_node);
if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
- return GFP_TRANSHUGE_LIGHT |
- (vma_madvised ? __GFP_DIRECT_RECLAIM : 0);
-
- return GFP_TRANSHUGE_LIGHT;
+ return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM :
+ this_node);
+ return GFP_TRANSHUGE_LIGHT | this_node;
}
/* Caller must hold page table lock. */
@@ -739,8 +749,8 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
pte_free(vma->vm_mm, pgtable);
return ret;
}
- gfp = alloc_hugepage_direct_gfpmask(vma);
- page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
+ gfp = alloc_hugepage_direct_gfpmask(vma, haddr);
+ page = alloc_pages_vma(gfp, HPAGE_PMD_ORDER, vma, haddr, numa_node_id());
if (unlikely(!page)) {
count_vm_event(THP_FAULT_FALLBACK);
return VM_FAULT_FALLBACK;
@@ -1347,8 +1357,9 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
alloc:
if (__transparent_hugepage_enabled(vma) &&
!transparent_hugepage_debug_cow()) {
- huge_gfp = alloc_hugepage_direct_gfpmask(vma);
- new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER);
+ huge_gfp = alloc_hugepage_direct_gfpmask(vma, haddr);
+ new_page = alloc_pages_vma(huge_gfp, HPAGE_PMD_ORDER, vma,
+ haddr, numa_node_id());
} else
new_page = NULL;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ede7e7f5d1ab..6d7296dd11b8 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3856,6 +3856,25 @@ retry:
page = alloc_huge_page(vma, haddr, 0);
if (IS_ERR(page)) {
+ /*
+ * Returning error will result in faulting task being
+ * sent SIGBUS. The hugetlb fault mutex prevents two
+ * tasks from racing to fault in the same page which
+ * could result in false unable to allocate errors.
+ * Page migration does not take the fault mutex, but
+ * does a clear then write of pte's under page table
+ * lock. Page fault code could race with migration,
+ * notice the clear pte and try to allocate a page
+ * here. Before returning error, get ptl and make
+ * sure there really is no pte entry.
+ */
+ ptl = huge_pte_lock(h, mm, ptep);
+ if (!huge_pte_none(huge_ptep_get(ptep))) {
+ ret = 0;
+ spin_unlock(ptl);
+ goto out;
+ }
+ spin_unlock(ptl);
ret = vmf_error(PTR_ERR(page));
goto out;
}
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 6e9e8cca663e..f6e602918dac 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -1966,6 +1966,7 @@ static void kmemleak_disable(void)
/* stop any memory operation tracing */
kmemleak_enabled = 0;
+ kmemleak_early_log = 0;
/* check whether it is too early for a kernel thread */
if (kmemleak_initialized)
@@ -2009,7 +2010,6 @@ void __init kmemleak_init(void)
#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
if (!kmemleak_skip_disable) {
- kmemleak_early_log = 0;
kmemleak_disable();
return;
}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index cdbb7a84cb6e..6f5c0c517c49 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -768,6 +768,26 @@ void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
__this_cpu_write(pn->lruvec_stat_cpu->count[idx], x);
}
+void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val)
+{
+ struct page *page = virt_to_head_page(p);
+ pg_data_t *pgdat = page_pgdat(page);
+ struct mem_cgroup *memcg;
+ struct lruvec *lruvec;
+
+ rcu_read_lock();
+ memcg = memcg_from_slab_page(page);
+
+ /* Untracked pages have no memcg, no lruvec. Update only the node */
+ if (!memcg || memcg == root_mem_cgroup) {
+ __mod_node_page_state(pgdat, idx, val);
+ } else {
+ lruvec = mem_cgroup_lruvec(pgdat, memcg);
+ __mod_lruvec_state(lruvec, idx, val);
+ }
+ rcu_read_unlock();
+}
+
/**
* __count_memcg_events - account VM events in a cgroup
* @memcg: the memory cgroup
@@ -1130,26 +1150,45 @@ void mem_cgroup_iter_break(struct mem_cgroup *root,
css_put(&prev->css);
}
-static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
+static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
+ struct mem_cgroup *dead_memcg)
{
- struct mem_cgroup *memcg = dead_memcg;
struct mem_cgroup_reclaim_iter *iter;
struct mem_cgroup_per_node *mz;
int nid;
int i;
- for (; memcg; memcg = parent_mem_cgroup(memcg)) {
- for_each_node(nid) {
- mz = mem_cgroup_nodeinfo(memcg, nid);
- for (i = 0; i <= DEF_PRIORITY; i++) {
- iter = &mz->iter[i];
- cmpxchg(&iter->position,
- dead_memcg, NULL);
- }
+ for_each_node(nid) {
+ mz = mem_cgroup_nodeinfo(from, nid);
+ for (i = 0; i <= DEF_PRIORITY; i++) {
+ iter = &mz->iter[i];
+ cmpxchg(&iter->position,
+ dead_memcg, NULL);
}
}
}
+static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
+{
+ struct mem_cgroup *memcg = dead_memcg;
+ struct mem_cgroup *last;
+
+ do {
+ __invalidate_reclaim_iterators(memcg, dead_memcg);
+ last = memcg;
+ } while ((memcg = parent_mem_cgroup(memcg)));
+
+ /*
+ * When cgruop1 non-hierarchy mode is used,
+ * parent_mem_cgroup() does not walk all the way up to the
+ * cgroup root (root_mem_cgroup). So we have to handle
+ * dead_memcg from cgroup root separately.
+ */
+ if (last != root_mem_cgroup)
+ __invalidate_reclaim_iterators(root_mem_cgroup,
+ dead_memcg);
+}
+
/**
* mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
* @memcg: hierarchy root
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index f48693f75b37..65e0874fce17 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -403,7 +403,7 @@ static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
},
};
-static void migrate_page_add(struct page *page, struct list_head *pagelist,
+static int migrate_page_add(struct page *page, struct list_head *pagelist,
unsigned long flags);
struct queue_pages {
@@ -429,11 +429,14 @@ static inline bool queue_pages_required(struct page *page,
}
/*
- * queue_pages_pmd() has three possible return values:
- * 1 - pages are placed on the right node or queued successfully.
- * 0 - THP was split.
- * -EIO - is migration entry or MPOL_MF_STRICT was specified and an existing
- * page was already on a node that does not follow the policy.
+ * queue_pages_pmd() has four possible return values:
+ * 0 - pages are placed on the right node or queued successfully.
+ * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
+ * specified.
+ * 2 - THP was split.
+ * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
+ * existing page was already on a node that does not follow the
+ * policy.
*/
static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
unsigned long end, struct mm_walk *walk)
@@ -451,23 +454,20 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
if (is_huge_zero_page(page)) {
spin_unlock(ptl);
__split_huge_pmd(walk->vma, pmd, addr, false, NULL);
+ ret = 2;
goto out;
}
- if (!queue_pages_required(page, qp)) {
- ret = 1;
+ if (!queue_pages_required(page, qp))
goto unlock;
- }
- ret = 1;
flags = qp->flags;
/* go to thp migration */
if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
- if (!vma_migratable(walk->vma)) {
- ret = -EIO;
+ if (!vma_migratable(walk->vma) ||
+ migrate_page_add(page, qp->pagelist, flags)) {
+ ret = 1;
goto unlock;
}
-
- migrate_page_add(page, qp->pagelist, flags);
} else
ret = -EIO;
unlock:
@@ -479,6 +479,13 @@ out:
/*
* Scan through pages checking if pages follow certain conditions,
* and move them to the pagelist if they do.
+ *
+ * queue_pages_pte_range() has three possible return values:
+ * 0 - pages are placed on the right node or queued successfully.
+ * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
+ * specified.
+ * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
+ * on a node that does not follow the policy.
*/
static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
unsigned long end, struct mm_walk *walk)
@@ -488,17 +495,17 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
struct queue_pages *qp = walk->private;
unsigned long flags = qp->flags;
int ret;
+ bool has_unmovable = false;
pte_t *pte;
spinlock_t *ptl;
ptl = pmd_trans_huge_lock(pmd, vma);
if (ptl) {
ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
- if (ret > 0)
- return 0;
- else if (ret < 0)
+ if (ret != 2)
return ret;
}
+ /* THP was split, fall through to pte walk */
if (pmd_trans_unstable(pmd))
return 0;
@@ -519,14 +526,28 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
if (!queue_pages_required(page, qp))
continue;
if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
- if (!vma_migratable(vma))
+ /* MPOL_MF_STRICT must be specified if we get here */
+ if (!vma_migratable(vma)) {
+ has_unmovable = true;
break;
- migrate_page_add(page, qp->pagelist, flags);
+ }
+
+ /*
+ * Do not abort immediately since there may be
+ * temporary off LRU pages in the range. Still
+ * need migrate other LRU pages.
+ */
+ if (migrate_page_add(page, qp->pagelist, flags))
+ has_unmovable = true;
} else
break;
}
pte_unmap_unlock(pte - 1, ptl);
cond_resched();
+
+ if (has_unmovable)
+ return 1;
+
return addr != end ? -EIO : 0;
}
@@ -639,7 +660,13 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
*
* If pages found in a given range are on a set of nodes (determined by
* @nodes and @flags,) it's isolated and queued to the pagelist which is
- * passed via @private.)
+ * passed via @private.
+ *
+ * queue_pages_range() has three possible return values:
+ * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
+ * specified.
+ * 0 - queue pages successfully or no misplaced page.
+ * -EIO - there is misplaced page and only MPOL_MF_STRICT was specified.
*/
static int
queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
@@ -940,7 +967,7 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
/*
* page migration, thp tail pages can be passed.
*/
-static void migrate_page_add(struct page *page, struct list_head *pagelist,
+static int migrate_page_add(struct page *page, struct list_head *pagelist,
unsigned long flags)
{
struct page *head = compound_head(page);
@@ -953,8 +980,19 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist,
mod_node_page_state(page_pgdat(head),
NR_ISOLATED_ANON + page_is_file_cache(head),
hpage_nr_pages(head));
+ } else if (flags & MPOL_MF_STRICT) {
+ /*
+ * Non-movable page may reach here. And, there may be
+ * temporary off LRU pages or non-LRU movable pages.
+ * Treat them as unmovable pages since they can't be
+ * isolated, so they can't be moved at the moment. It
+ * should return -EIO for this case too.
+ */
+ return -EIO;
}
}
+
+ return 0;
}
/* page allocation callback for NUMA node migration */
@@ -1142,8 +1180,8 @@ static struct page *new_page(struct page *page, unsigned long start)
} else if (PageTransHuge(page)) {
struct page *thp;
- thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
- HPAGE_PMD_ORDER);
+ thp = alloc_pages_vma(GFP_TRANSHUGE, HPAGE_PMD_ORDER, vma,
+ address, numa_node_id());
if (!thp)
return NULL;
prep_transhuge_page(thp);
@@ -1157,9 +1195,10 @@ static struct page *new_page(struct page *page, unsigned long start)
}
#else
-static void migrate_page_add(struct page *page, struct list_head *pagelist,
+static int migrate_page_add(struct page *page, struct list_head *pagelist,
unsigned long flags)
{
+ return -EIO;
}
int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
@@ -1182,6 +1221,7 @@ static long do_mbind(unsigned long start, unsigned long len,
struct mempolicy *new;
unsigned long end;
int err;
+ int ret;
LIST_HEAD(pagelist);
if (flags & ~(unsigned long)MPOL_MF_VALID)
@@ -1243,10 +1283,15 @@ static long do_mbind(unsigned long start, unsigned long len,
if (err)
goto mpol_out;
- err = queue_pages_range(mm, start, end, nmask,
+ ret = queue_pages_range(mm, start, end, nmask,
flags | MPOL_MF_INVERT, &pagelist);
- if (!err)
- err = mbind_range(mm, start, end, new);
+
+ if (ret < 0) {
+ err = -EIO;
+ goto up_out;
+ }
+
+ err = mbind_range(mm, start, end, new);
if (!err) {
int nr_failed = 0;
@@ -1259,13 +1304,14 @@ static long do_mbind(unsigned long start, unsigned long len,
putback_movable_pages(&pagelist);
}
- if (nr_failed && (flags & MPOL_MF_STRICT))
+ if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
err = -EIO;
} else
putback_movable_pages(&pagelist);
+up_out:
up_write(&mm->mmap_sem);
- mpol_out:
+mpol_out:
mpol_put(new);
return err;
}
@@ -1688,7 +1734,7 @@ struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
* freeing by another task. It is the caller's responsibility to free the
* extra reference for shared policies.
*/
-static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
+struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
unsigned long addr)
{
struct mempolicy *pol = __get_vma_policy(vma, addr);
@@ -2037,7 +2083,6 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
* @vma: Pointer to VMA or NULL if not available.
* @addr: Virtual Address of the allocation. Must be inside the VMA.
* @node: Which node to prefer for allocation (modulo policy).
- * @hugepage: for hugepages try only the preferred node if possible
*
* This function allocates a page from the kernel page pool and applies
* a NUMA policy associated with the VMA or the current process.
@@ -2048,7 +2093,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
*/
struct page *
alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
- unsigned long addr, int node, bool hugepage)
+ unsigned long addr, int node)
{
struct mempolicy *pol;
struct page *page;
@@ -2066,31 +2111,6 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
goto out;
}
- if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
- int hpage_node = node;
-
- /*
- * For hugepage allocation and non-interleave policy which
- * allows the current node (or other explicitly preferred
- * node) we only try to allocate from the current/preferred
- * node and don't fall back to other nodes, as the cost of
- * remote accesses would likely offset THP benefits.
- *
- * If the policy is interleave, or does not allow the current
- * node in its nodemask, we allocate the standard way.
- */
- if (pol->mode == MPOL_PREFERRED && !(pol->flags & MPOL_F_LOCAL))
- hpage_node = pol->v.preferred_node;
-
- nmask = policy_nodemask(gfp, pol);
- if (!nmask || node_isset(hpage_node, *nmask)) {
- mpol_cond_put(pol);
- page = __alloc_pages_node(hpage_node,
- gfp | __GFP_THISNODE, order);
- goto out;
- }
- }
-
nmask = policy_nodemask(gfp, pol);
preferred_nid = policy_node(gfp, pol, node);
page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
diff --git a/mm/memremap.c b/mm/memremap.c
index 86432650f829..ed70c4e8e52a 100644
--- a/mm/memremap.c
+++ b/mm/memremap.c
@@ -403,6 +403,30 @@ void __put_devmap_managed_page(struct page *page)
mem_cgroup_uncharge(page);
+ /*
+ * When a device_private page is freed, the page->mapping field
+ * may still contain a (stale) mapping value. For example, the
+ * lower bits of page->mapping may still identify the page as
+ * an anonymous page. Ultimately, this entire field is just
+ * stale and wrong, and it will cause errors if not cleared.
+ * One example is:
+ *
+ * migrate_vma_pages()
+ * migrate_vma_insert_page()
+ * page_add_new_anon_rmap()
+ * __page_set_anon_rmap()
+ * ...checks page->mapping, via PageAnon(page) call,
+ * and incorrectly concludes that the page is an
+ * anonymous page. Therefore, it incorrectly,
+ * silently fails to set up the new anon rmap.
+ *
+ * For other types of ZONE_DEVICE pages, migration is either
+ * handled differently or not done at all, so there is no need
+ * to clear page->mapping.
+ */
+ if (is_device_private_page(page))
+ page->mapping = NULL;
+
page->pgmap->ops->page_free(page);
} else if (!count)
__put_page(page);
diff --git a/mm/rmap.c b/mm/rmap.c
index e5dfe2ae6b0d..003377e24232 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1475,7 +1475,15 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
/*
* No need to invalidate here it will synchronize on
* against the special swap migration pte.
+ *
+ * The assignment to subpage above was computed from a
+ * swap PTE which results in an invalid pointer.
+ * Since only PAGE_SIZE pages can currently be
+ * migrated, just set it to page. This will need to be
+ * changed when hugepage migrations to device private
+ * memory are supported.
*/
+ subpage = page;
goto discard;
}
diff --git a/mm/shmem.c b/mm/shmem.c
index 626d8c74b973..2bed4761f279 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1466,7 +1466,7 @@ static struct page *shmem_alloc_hugepage(gfp_t gfp,
shmem_pseudo_vma_init(&pvma, info, hindex);
page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN,
- HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), true);
+ HPAGE_PMD_ORDER, &pvma, 0, numa_node_id());
shmem_pseudo_vma_destroy(&pvma);
if (page)
prep_transhuge_page(page);
diff --git a/mm/usercopy.c b/mm/usercopy.c
index 2a09796edef8..98e924864554 100644
--- a/mm/usercopy.c
+++ b/mm/usercopy.c
@@ -147,7 +147,7 @@ static inline void check_bogus_address(const unsigned long ptr, unsigned long n,
bool to_user)
{
/* Reject if object wraps past end of memory. */
- if (ptr + n < ptr)
+ if (ptr + (n - 1) < ptr)
usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n);
/* Reject if NULL or ZERO-allocation. */
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index e0fc963acc41..7ba11e12a11f 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -3279,9 +3279,19 @@ retry:
goto overflow;
/*
+ * If required width exeeds current VA block, move
+ * base downwards and then recheck.
+ */
+ if (base + end > va->va_end) {
+ base = pvm_determine_end_from_reverse(&va, align) - end;
+ term_area = area;
+ continue;
+ }
+
+ /*
* If this VA does not fit, move base downwards and recheck.
*/
- if (base + start < va->va_start || base + end > va->va_end) {
+ if (base + start < va->va_start) {
va = node_to_va(rb_prev(&va->rb_node));
base = pvm_determine_end_from_reverse(&va, align) - end;
term_area = area;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index dbdc46a84f63..c77d1e3761a7 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -88,9 +88,6 @@ struct scan_control {
/* Can pages be swapped as part of reclaim? */
unsigned int may_swap:1;
- /* e.g. boosted watermark reclaim leaves slabs alone */
- unsigned int may_shrinkslab:1;
-
/*
* Cgroups are not reclaimed below their configured memory.low,
* unless we threaten to OOM. If any cgroups are skipped due to
@@ -2714,10 +2711,8 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
shrink_node_memcg(pgdat, memcg, sc, &lru_pages);
node_lru_pages += lru_pages;
- if (sc->may_shrinkslab) {
- shrink_slab(sc->gfp_mask, pgdat->node_id,
- memcg, sc->priority);
- }
+ shrink_slab(sc->gfp_mask, pgdat->node_id, memcg,
+ sc->priority);
/* Record the group's reclaim efficiency */
vmpressure(sc->gfp_mask, memcg, false,
@@ -3194,7 +3189,6 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
.may_writepage = !laptop_mode,
.may_unmap = 1,
.may_swap = 1,
- .may_shrinkslab = 1,
};
/*
@@ -3238,7 +3232,6 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
.may_unmap = 1,
.reclaim_idx = MAX_NR_ZONES - 1,
.may_swap = !noswap,
- .may_shrinkslab = 1,
};
unsigned long lru_pages;
@@ -3286,7 +3279,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
.may_writepage = !laptop_mode,
.may_unmap = 1,
.may_swap = may_swap,
- .may_shrinkslab = 1,
};
set_task_reclaim_state(current, &sc.reclaim_state);
@@ -3598,7 +3590,6 @@ restart:
*/
sc.may_writepage = !laptop_mode && !nr_boost_reclaim;
sc.may_swap = !nr_boost_reclaim;
- sc.may_shrinkslab = !nr_boost_reclaim;
/*
* Do some background aging of the anon list, to give
diff --git a/mm/workingset.c b/mm/workingset.c
index e0b4edcb88c8..c963831d354f 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -380,14 +380,12 @@ void workingset_update_node(struct xa_node *node)
if (node->count && node->count == node->nr_values) {
if (list_empty(&node->private_list)) {
list_lru_add(&shadow_nodes, &node->private_list);
- __inc_lruvec_page_state(virt_to_page(node),
- WORKINGSET_NODES);
+ __inc_lruvec_slab_state(node, WORKINGSET_NODES);
}
} else {
if (!list_empty(&node->private_list)) {
list_lru_del(&shadow_nodes, &node->private_list);
- __dec_lruvec_page_state(virt_to_page(node),
- WORKINGSET_NODES);
+ __dec_lruvec_slab_state(node, WORKINGSET_NODES);
}
}
}
@@ -480,7 +478,7 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
}
list_lru_isolate(lru, item);
- __dec_lruvec_page_state(virt_to_page(node), WORKINGSET_NODES);
+ __dec_lruvec_slab_state(node, WORKINGSET_NODES);
spin_unlock(lru_lock);
@@ -503,7 +501,7 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
* shadow entries we were tracking ...
*/
xas_store(&xas, NULL);
- __inc_lruvec_page_state(virt_to_page(node), WORKINGSET_NODERECLAIM);
+ __inc_lruvec_slab_state(node, WORKINGSET_NODERECLAIM);
out_invalid:
xa_unlock_irq(&mapping->i_pages);
diff --git a/mm/z3fold.c b/mm/z3fold.c
index 1a029a7432ee..ed19d98c9dcd 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -817,9 +817,19 @@ out:
static void z3fold_destroy_pool(struct z3fold_pool *pool)
{
kmem_cache_destroy(pool->c_handle);
- z3fold_unregister_migration(pool);
- destroy_workqueue(pool->release_wq);
+
+ /*
+ * We need to destroy pool->compact_wq before pool->release_wq,
+ * as any pending work on pool->compact_wq will call
+ * queue_work(pool->release_wq, &pool->work).
+ *
+ * There are still outstanding pages until both workqueues are drained,
+ * so we cannot unregister migration until then.
+ */
+
destroy_workqueue(pool->compact_wq);
+ destroy_workqueue(pool->release_wq);
+ z3fold_unregister_migration(pool);
kfree(pool);
}
diff --git a/samples/auxdisplay/cfag12864b-example.c b/samples/auxdisplay/cfag12864b-example.c
index 85571e90191f..bfeab44f81d0 100644
--- a/samples/auxdisplay/cfag12864b-example.c
+++ b/samples/auxdisplay/cfag12864b-example.c
@@ -245,7 +245,7 @@ int main(int argc, char *argv[])
if (argc != 2) {
printf(
- "Sintax: %s fbdev\n"
+ "Syntax: %s fbdev\n"
"Usually: /dev/fb0, /dev/fb1...\n", argv[0]);
return -1;
}
diff --git a/security/keys/trusted.c b/security/keys/trusted.c
index 9a94672e7adc..ade699131065 100644
--- a/security/keys/trusted.c
+++ b/security/keys/trusted.c
@@ -1228,24 +1228,11 @@ hashalg_fail:
static int __init init_digests(void)
{
- u8 digest[TPM_MAX_DIGEST_SIZE];
- int ret;
- int i;
-
- ret = tpm_get_random(chip, digest, TPM_MAX_DIGEST_SIZE);
- if (ret < 0)
- return ret;
- if (ret < TPM_MAX_DIGEST_SIZE)
- return -EFAULT;
-
digests = kcalloc(chip->nr_allocated_banks, sizeof(*digests),
GFP_KERNEL);
if (!digests)
return -ENOMEM;
- for (i = 0; i < chip->nr_allocated_banks; i++)
- memcpy(digests[i].digest, digest, TPM_MAX_DIGEST_SIZE);
-
return 0;
}
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 485edaba0037..5bf24fb819d2 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -6051,6 +6051,24 @@ void snd_hda_gen_free(struct hda_codec *codec)
}
EXPORT_SYMBOL_GPL(snd_hda_gen_free);
+/**
+ * snd_hda_gen_reboot_notify - Make codec enter D3 before rebooting
+ * @codec: the HDA codec
+ *
+ * This can be put as patch_ops reboot_notify function.
+ */
+void snd_hda_gen_reboot_notify(struct hda_codec *codec)
+{
+ /* Make the codec enter D3 to avoid spurious noises from the internal
+ * speaker during (and after) reboot
+ */
+ snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
+ snd_hda_codec_write(codec, codec->core.afg, 0,
+ AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
+ msleep(10);
+}
+EXPORT_SYMBOL_GPL(snd_hda_gen_reboot_notify);
+
#ifdef CONFIG_PM
/**
* snd_hda_gen_check_power_status - check the loopback power save state
@@ -6078,6 +6096,7 @@ static const struct hda_codec_ops generic_patch_ops = {
.init = snd_hda_gen_init,
.free = snd_hda_gen_free,
.unsol_event = snd_hda_jack_unsol_event,
+ .reboot_notify = snd_hda_gen_reboot_notify,
#ifdef CONFIG_PM
.check_power_status = snd_hda_gen_check_power_status,
#endif
@@ -6100,7 +6119,7 @@ static int snd_hda_parse_generic_codec(struct hda_codec *codec)
err = snd_hda_parse_pin_defcfg(codec, &spec->autocfg, NULL, 0);
if (err < 0)
- return err;
+ goto error;
err = snd_hda_gen_parse_auto_config(codec, &spec->autocfg);
if (err < 0)
diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
index 35a670a71c42..5f199dcb0d18 100644
--- a/sound/pci/hda/hda_generic.h
+++ b/sound/pci/hda/hda_generic.h
@@ -332,6 +332,7 @@ int snd_hda_gen_parse_auto_config(struct hda_codec *codec,
struct auto_pin_cfg *cfg);
int snd_hda_gen_build_controls(struct hda_codec *codec);
int snd_hda_gen_build_pcms(struct hda_codec *codec);
+void snd_hda_gen_reboot_notify(struct hda_codec *codec);
/* standard jack event callbacks */
void snd_hda_gen_hp_automute(struct hda_codec *codec,
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index a6d8c0d77b84..99fc0917339b 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2508,6 +2508,9 @@ static const struct pci_device_id azx_ids[] = {
/* AMD, X370 & co */
{ PCI_DEVICE(0x1022, 0x1457),
.driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB },
+ /* AMD, X570 & co */
+ { PCI_DEVICE(0x1022, 0x1487),
+ .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB },
/* AMD Stoney */
{ PCI_DEVICE(0x1022, 0x157a),
.driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB |
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index f299f137eaea..14298ef45b21 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -163,23 +163,10 @@ static void cx_auto_reboot_notify(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
- switch (codec->core.vendor_id) {
- case 0x14f12008: /* CX8200 */
- case 0x14f150f2: /* CX20722 */
- case 0x14f150f4: /* CX20724 */
- break;
- default:
- return;
- }
-
/* Turn the problematic codec into D3 to avoid spurious noises
from the internal speaker during (and after) reboot */
cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, false);
-
- snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
- snd_hda_codec_write(codec, codec->core.afg, 0,
- AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
- msleep(10);
+ snd_hda_gen_reboot_notify(codec);
}
static void cx_auto_free(struct hda_codec *codec)
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index de224cbea7a0..e333b3e30e31 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -869,15 +869,6 @@ static void alc_reboot_notify(struct hda_codec *codec)
alc_shutup(codec);
}
-/* power down codec to D3 at reboot/shutdown; set as reboot_notify ops */
-static void alc_d3_at_reboot(struct hda_codec *codec)
-{
- snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
- snd_hda_codec_write(codec, codec->core.afg, 0,
- AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
- msleep(10);
-}
-
#define alc_free snd_hda_gen_free
#ifdef CONFIG_PM
@@ -5152,7 +5143,7 @@ static void alc_fixup_tpt440_dock(struct hda_codec *codec,
struct alc_spec *spec = codec->spec;
if (action == HDA_FIXUP_ACT_PRE_PROBE) {
- spec->reboot_notify = alc_d3_at_reboot; /* reduce noise */
+ spec->reboot_notify = snd_hda_gen_reboot_notify; /* reduce noise */
spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
codec->power_save_node = 0; /* avoid click noises */
snd_hda_apply_pincfgs(codec, pincfgs);
@@ -6987,6 +6978,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+ SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 7498b5191b68..b5927c3d5bc0 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -68,6 +68,7 @@ struct mixer_build {
unsigned char *buffer;
unsigned int buflen;
DECLARE_BITMAP(unitbitmap, MAX_ID_ELEMS);
+ DECLARE_BITMAP(termbitmap, MAX_ID_ELEMS);
struct usb_audio_term oterm;
const struct usbmix_name_map *map;
const struct usbmix_selector_map *selector_map;
@@ -744,6 +745,8 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
return -EINVAL;
if (!desc->bNrInPins)
return -EINVAL;
+ if (desc->bLength < sizeof(*desc) + desc->bNrInPins)
+ return -EINVAL;
switch (state->mixer->protocol) {
case UAC_VERSION_1:
@@ -773,16 +776,25 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
* parse the source unit recursively until it reaches to a terminal
* or a branched unit.
*/
-static int check_input_term(struct mixer_build *state, int id,
+static int __check_input_term(struct mixer_build *state, int id,
struct usb_audio_term *term)
{
int protocol = state->mixer->protocol;
int err;
void *p1;
+ unsigned char *hdr;
memset(term, 0, sizeof(*term));
- while ((p1 = find_audio_control_unit(state, id)) != NULL) {
- unsigned char *hdr = p1;
+ for (;;) {
+ /* a loop in the terminal chain? */
+ if (test_and_set_bit(id, state->termbitmap))
+ return -EINVAL;
+
+ p1 = find_audio_control_unit(state, id);
+ if (!p1)
+ break;
+
+ hdr = p1;
term->id = id;
if (protocol == UAC_VERSION_1 || protocol == UAC_VERSION_2) {
@@ -800,7 +812,7 @@ static int check_input_term(struct mixer_build *state, int id,
/* call recursively to verify that the
* referenced clock entity is valid */
- err = check_input_term(state, d->bCSourceID, term);
+ err = __check_input_term(state, d->bCSourceID, term);
if (err < 0)
return err;
@@ -834,7 +846,7 @@ static int check_input_term(struct mixer_build *state, int id,
case UAC2_CLOCK_SELECTOR: {
struct uac_selector_unit_descriptor *d = p1;
/* call recursively to retrieve the channel info */
- err = check_input_term(state, d->baSourceID[0], term);
+ err = __check_input_term(state, d->baSourceID[0], term);
if (err < 0)
return err;
term->type = UAC3_SELECTOR_UNIT << 16; /* virtual type */
@@ -897,7 +909,7 @@ static int check_input_term(struct mixer_build *state, int id,
/* call recursively to verify that the
* referenced clock entity is valid */
- err = check_input_term(state, d->bCSourceID, term);
+ err = __check_input_term(state, d->bCSourceID, term);
if (err < 0)
return err;
@@ -948,7 +960,7 @@ static int check_input_term(struct mixer_build *state, int id,
case UAC3_CLOCK_SELECTOR: {
struct uac_selector_unit_descriptor *d = p1;
/* call recursively to retrieve the channel info */
- err = check_input_term(state, d->baSourceID[0], term);
+ err = __check_input_term(state, d->baSourceID[0], term);
if (err < 0)
return err;
term->type = UAC3_SELECTOR_UNIT << 16; /* virtual type */
@@ -964,7 +976,7 @@ static int check_input_term(struct mixer_build *state, int id,
return -EINVAL;
/* call recursively to retrieve the channel info */
- err = check_input_term(state, d->baSourceID[0], term);
+ err = __check_input_term(state, d->baSourceID[0], term);
if (err < 0)
return err;
@@ -982,6 +994,15 @@ static int check_input_term(struct mixer_build *state, int id,
return -ENODEV;
}
+
+static int check_input_term(struct mixer_build *state, int id,
+ struct usb_audio_term *term)
+{
+ memset(term, 0, sizeof(*term));
+ memset(state->termbitmap, 0, sizeof(state->termbitmap));
+ return __check_input_term(state, id, term);
+}
+
/*
* Feature Unit
*/